xref: /openbmc/linux/fs/udf/inode.c (revision bf74b964)
1 /*
2  * inode.c
3  *
4  * PURPOSE
5  *  Inode handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *  This file is distributed under the terms of the GNU General Public
9  *  License (GPL). Copies of the GPL can be obtained from:
10  *    ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *  Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1998 Dave Boynton
14  *  (C) 1998-2004 Ben Fennema
15  *  (C) 1999-2000 Stelias Computing Inc
16  *
17  * HISTORY
18  *
19  *  10/04/98 dgb  Added rudimentary directory functions
20  *  10/07/98      Fully working udf_block_map! It works!
21  *  11/25/98      bmap altered to better support extents
22  *  12/06/98 blf  partition support in udf_iget, udf_block_map and udf_read_inode
23  *  12/12/98      rewrote udf_block_map to handle next extents and descs across
24  *                block boundaries (which is not actually allowed)
25  *  12/20/98      added support for strategy 4096
26  *  03/07/99      rewrote udf_block_map (again)
27  *                New funcs, inode_bmap, udf_next_aext
28  *  04/19/99      Support for writing device EA's for major/minor #
29  */
30 
31 #include "udfdecl.h"
32 #include <linux/mm.h>
33 #include <linux/smp_lock.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 #include <linux/slab.h>
39 
40 #include "udf_i.h"
41 #include "udf_sb.h"
42 
43 MODULE_AUTHOR("Ben Fennema");
44 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
45 MODULE_LICENSE("GPL");
46 
47 #define EXTENT_MERGE_SIZE 5
48 
49 static mode_t udf_convert_permissions(struct fileEntry *);
50 static int udf_update_inode(struct inode *, int);
51 static void udf_fill_inode(struct inode *, struct buffer_head *);
52 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
53 	long *, int *);
54 static int8_t udf_insert_aext(struct inode *, struct extent_position,
55 	kernel_lb_addr, uint32_t);
56 static void udf_split_extents(struct inode *, int *, int, int,
57 	kernel_long_ad [EXTENT_MERGE_SIZE], int *);
58 static void udf_prealloc_extents(struct inode *, int, int,
59 	 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
60 static void udf_merge_extents(struct inode *,
61 	 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
62 static void udf_update_extents(struct inode *,
63 	kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
64 	struct extent_position *);
65 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
66 
67 /*
68  * udf_delete_inode
69  *
70  * PURPOSE
71  *	Clean-up before the specified inode is destroyed.
72  *
73  * DESCRIPTION
74  *	This routine is called when the kernel destroys an inode structure
75  *	ie. when iput() finds i_count == 0.
76  *
77  * HISTORY
78  *	July 1, 1997 - Andrew E. Mileski
79  *	Written, tested, and released.
80  *
81  *  Called at the last iput() if i_nlink is zero.
82  */
83 void udf_delete_inode(struct inode * inode)
84 {
85 	truncate_inode_pages(&inode->i_data, 0);
86 
87 	if (is_bad_inode(inode))
88 		goto no_delete;
89 
90 	inode->i_size = 0;
91 	udf_truncate(inode);
92 	lock_kernel();
93 
94 	udf_update_inode(inode, IS_SYNC(inode));
95 	udf_free_inode(inode);
96 
97 	unlock_kernel();
98 	return;
99 no_delete:
100 	clear_inode(inode);
101 }
102 
103 /*
104  * If we are going to release inode from memory, we discard preallocation and
105  * truncate last inode extent to proper length. We could use drop_inode() but
106  * it's called under inode_lock and thus we cannot mark inode dirty there.  We
107  * use clear_inode() but we have to make sure to write inode as it's not written
108  * automatically.
109  */
110 void udf_clear_inode(struct inode *inode)
111 {
112 	if (!(inode->i_sb->s_flags & MS_RDONLY)) {
113 		lock_kernel();
114 		/* Discard preallocation for directories, symlinks, etc. */
115 		udf_discard_prealloc(inode);
116 		udf_truncate_tail_extent(inode);
117 		unlock_kernel();
118 		write_inode_now(inode, 1);
119 	}
120 	kfree(UDF_I_DATA(inode));
121 	UDF_I_DATA(inode) = NULL;
122 }
123 
124 static int udf_writepage(struct page *page, struct writeback_control *wbc)
125 {
126 	return block_write_full_page(page, udf_get_block, wbc);
127 }
128 
129 static int udf_readpage(struct file *file, struct page *page)
130 {
131 	return block_read_full_page(page, udf_get_block);
132 }
133 
134 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
135 {
136 	return block_prepare_write(page, from, to, udf_get_block);
137 }
138 
139 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
140 {
141 	return generic_block_bmap(mapping,block,udf_get_block);
142 }
143 
144 const struct address_space_operations udf_aops = {
145 	.readpage		= udf_readpage,
146 	.writepage		= udf_writepage,
147 	.sync_page		= block_sync_page,
148 	.prepare_write		= udf_prepare_write,
149 	.commit_write		= generic_commit_write,
150 	.bmap			= udf_bmap,
151 };
152 
153 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
154 {
155 	struct page *page;
156 	char *kaddr;
157 	struct writeback_control udf_wbc = {
158 		.sync_mode = WB_SYNC_NONE,
159 		.nr_to_write = 1,
160 	};
161 
162 	/* from now on we have normal address_space methods */
163 	inode->i_data.a_ops = &udf_aops;
164 
165 	if (!UDF_I_LENALLOC(inode))
166 	{
167 		if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
168 			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
169 		else
170 			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
171 		mark_inode_dirty(inode);
172 		return;
173 	}
174 
175 	page = grab_cache_page(inode->i_mapping, 0);
176 	BUG_ON(!PageLocked(page));
177 
178 	if (!PageUptodate(page))
179 	{
180 		kaddr = kmap(page);
181 		memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
182 			PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
183 		memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
184 			UDF_I_LENALLOC(inode));
185 		flush_dcache_page(page);
186 		SetPageUptodate(page);
187 		kunmap(page);
188 	}
189 	memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
190 		UDF_I_LENALLOC(inode));
191 	UDF_I_LENALLOC(inode) = 0;
192 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
193 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
194 	else
195 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
196 
197 	inode->i_data.a_ops->writepage(page, &udf_wbc);
198 	page_cache_release(page);
199 
200 	mark_inode_dirty(inode);
201 }
202 
203 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
204 {
205 	int newblock;
206 	struct buffer_head *dbh = NULL;
207 	kernel_lb_addr eloc;
208 	uint32_t elen;
209 	uint8_t alloctype;
210 	struct extent_position epos;
211 
212 	struct udf_fileident_bh sfibh, dfibh;
213 	loff_t f_pos = udf_ext0_offset(inode) >> 2;
214 	int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
215 	struct fileIdentDesc cfi, *sfi, *dfi;
216 
217 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
218 		alloctype = ICBTAG_FLAG_AD_SHORT;
219 	else
220 		alloctype = ICBTAG_FLAG_AD_LONG;
221 
222 	if (!inode->i_size)
223 	{
224 		UDF_I_ALLOCTYPE(inode) = alloctype;
225 		mark_inode_dirty(inode);
226 		return NULL;
227 	}
228 
229 	/* alloc block, and copy data to it */
230 	*block = udf_new_block(inode->i_sb, inode,
231 		UDF_I_LOCATION(inode).partitionReferenceNum,
232 		UDF_I_LOCATION(inode).logicalBlockNum, err);
233 
234 	if (!(*block))
235 		return NULL;
236 	newblock = udf_get_pblock(inode->i_sb, *block,
237 		UDF_I_LOCATION(inode).partitionReferenceNum, 0);
238 	if (!newblock)
239 		return NULL;
240 	dbh = udf_tgetblk(inode->i_sb, newblock);
241 	if (!dbh)
242 		return NULL;
243 	lock_buffer(dbh);
244 	memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
245 	set_buffer_uptodate(dbh);
246 	unlock_buffer(dbh);
247 	mark_buffer_dirty_inode(dbh, inode);
248 
249 	sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
250 	sfibh.sbh = sfibh.ebh = NULL;
251 	dfibh.soffset = dfibh.eoffset = 0;
252 	dfibh.sbh = dfibh.ebh = dbh;
253 	while ( (f_pos < size) )
254 	{
255 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
256 		sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL);
257 		if (!sfi)
258 		{
259 			brelse(dbh);
260 			return NULL;
261 		}
262 		UDF_I_ALLOCTYPE(inode) = alloctype;
263 		sfi->descTag.tagLocation = cpu_to_le32(*block);
264 		dfibh.soffset = dfibh.eoffset;
265 		dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
266 		dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
267 		if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
268 			sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
269 		{
270 			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
271 			brelse(dbh);
272 			return NULL;
273 		}
274 	}
275 	mark_buffer_dirty_inode(dbh, inode);
276 
277 	memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
278 	UDF_I_LENALLOC(inode) = 0;
279 	eloc.logicalBlockNum = *block;
280 	eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
281 	elen = inode->i_size;
282 	UDF_I_LENEXTENTS(inode) = elen;
283 	epos.bh = NULL;
284 	epos.block = UDF_I_LOCATION(inode);
285 	epos.offset = udf_file_entry_alloc_offset(inode);
286 	udf_add_aext(inode, &epos, eloc, elen, 0);
287 	/* UniqueID stuff */
288 
289 	brelse(epos.bh);
290 	mark_inode_dirty(inode);
291 	return dbh;
292 }
293 
294 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
295 {
296 	int err, new;
297 	struct buffer_head *bh;
298 	unsigned long phys;
299 
300 	if (!create)
301 	{
302 		phys = udf_block_map(inode, block);
303 		if (phys)
304 			map_bh(bh_result, inode->i_sb, phys);
305 		return 0;
306 	}
307 
308 	err = -EIO;
309 	new = 0;
310 	bh = NULL;
311 
312 	lock_kernel();
313 
314 	if (block < 0)
315 		goto abort_negative;
316 
317 	if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
318 	{
319 		UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
320 		UDF_I_NEXT_ALLOC_GOAL(inode) ++;
321 	}
322 
323 	err = 0;
324 
325 	bh = inode_getblk(inode, block, &err, &phys, &new);
326 	BUG_ON(bh);
327 	if (err)
328 		goto abort;
329 	BUG_ON(!phys);
330 
331 	if (new)
332 		set_buffer_new(bh_result);
333 	map_bh(bh_result, inode->i_sb, phys);
334 abort:
335 	unlock_kernel();
336 	return err;
337 
338 abort_negative:
339 	udf_warning(inode->i_sb, "udf_get_block", "block < 0");
340 	goto abort;
341 }
342 
343 static struct buffer_head *
344 udf_getblk(struct inode *inode, long block, int create, int *err)
345 {
346 	struct buffer_head dummy;
347 
348 	dummy.b_state = 0;
349 	dummy.b_blocknr = -1000;
350 	*err = udf_get_block(inode, block, &dummy, create);
351 	if (!*err && buffer_mapped(&dummy))
352 	{
353 		struct buffer_head *bh;
354 		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
355 		if (buffer_new(&dummy))
356 		{
357 			lock_buffer(bh);
358 			memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
359 			set_buffer_uptodate(bh);
360 			unlock_buffer(bh);
361 			mark_buffer_dirty_inode(bh, inode);
362 		}
363 		return bh;
364 	}
365 	return NULL;
366 }
367 
368 /* Extend the file by 'blocks' blocks, return the number of extents added */
369 int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
370 	kernel_long_ad *last_ext, sector_t blocks)
371 {
372 	sector_t add;
373 	int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
374 	struct super_block *sb = inode->i_sb;
375 	kernel_lb_addr prealloc_loc = {0, 0};
376 	int prealloc_len = 0;
377 
378 	/* The previous extent is fake and we should not extend by anything
379 	 * - there's nothing to do... */
380 	if (!blocks && fake)
381 		return 0;
382 	/* Round the last extent up to a multiple of block size */
383 	if (last_ext->extLength & (sb->s_blocksize - 1)) {
384 		last_ext->extLength =
385 			(last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
386 			(((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
387 				sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
388 		UDF_I_LENEXTENTS(inode) =
389 			(UDF_I_LENEXTENTS(inode) + sb->s_blocksize - 1) &
390 				~(sb->s_blocksize - 1);
391 	}
392 	/* Last extent are just preallocated blocks? */
393 	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) {
394 		/* Save the extent so that we can reattach it to the end */
395 		prealloc_loc = last_ext->extLocation;
396 		prealloc_len = last_ext->extLength;
397 		/* Mark the extent as a hole */
398 		last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
399 			(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
400 		last_ext->extLocation.logicalBlockNum = 0;
401        		last_ext->extLocation.partitionReferenceNum = 0;
402 	}
403 	/* Can we merge with the previous extent? */
404 	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) {
405 		add = ((1<<30) - sb->s_blocksize - (last_ext->extLength &
406 			UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits;
407 		if (add > blocks)
408 			add = blocks;
409 		blocks -= add;
410 		last_ext->extLength += add << sb->s_blocksize_bits;
411 	}
412 
413 	if (fake) {
414 		udf_add_aext(inode, last_pos, last_ext->extLocation,
415 			last_ext->extLength, 1);
416 		count++;
417 	}
418 	else
419 		udf_write_aext(inode, last_pos, last_ext->extLocation, last_ext->extLength, 1);
420 	/* Managed to do everything necessary? */
421 	if (!blocks)
422 		goto out;
423 
424 	/* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
425 	last_ext->extLocation.logicalBlockNum = 0;
426        	last_ext->extLocation.partitionReferenceNum = 0;
427 	add = (1 << (30-sb->s_blocksize_bits)) - 1;
428 	last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits);
429 	/* Create enough extents to cover the whole hole */
430 	while (blocks > add) {
431 		blocks -= add;
432 		if (udf_add_aext(inode, last_pos, last_ext->extLocation,
433 			last_ext->extLength, 1) == -1)
434 			return -1;
435 		count++;
436 	}
437 	if (blocks) {
438 		last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
439 			(blocks << sb->s_blocksize_bits);
440 		if (udf_add_aext(inode, last_pos, last_ext->extLocation,
441 			last_ext->extLength, 1) == -1)
442 			return -1;
443 		count++;
444 	}
445 out:
446 	/* Do we have some preallocated blocks saved? */
447 	if (prealloc_len) {
448 		if (udf_add_aext(inode, last_pos, prealloc_loc, prealloc_len, 1) == -1)
449 			return -1;
450 		last_ext->extLocation = prealloc_loc;
451 		last_ext->extLength = prealloc_len;
452 		count++;
453 	}
454 	/* last_pos should point to the last written extent... */
455 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
456 		last_pos->offset -= sizeof(short_ad);
457 	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
458 		last_pos->offset -= sizeof(long_ad);
459 	else
460 		return -1;
461 	return count;
462 }
463 
464 static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
465 	int *err, long *phys, int *new)
466 {
467 	static sector_t last_block;
468 	struct buffer_head *result = NULL;
469 	kernel_long_ad laarr[EXTENT_MERGE_SIZE];
470 	struct extent_position prev_epos, cur_epos, next_epos;
471 	int count = 0, startnum = 0, endnum = 0;
472 	uint32_t elen = 0, tmpelen;
473 	kernel_lb_addr eloc, tmpeloc;
474 	int c = 1;
475 	loff_t lbcount = 0, b_off = 0;
476 	uint32_t newblocknum, newblock;
477 	sector_t offset = 0;
478 	int8_t etype;
479 	int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
480 	int lastblock = 0;
481 
482 	prev_epos.offset = udf_file_entry_alloc_offset(inode);
483 	prev_epos.block = UDF_I_LOCATION(inode);
484 	prev_epos.bh = NULL;
485 	cur_epos = next_epos = prev_epos;
486 	b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
487 
488 	/* find the extent which contains the block we are looking for.
489        alternate between laarr[0] and laarr[1] for locations of the
490        current extent, and the previous extent */
491 	do
492 	{
493 		if (prev_epos.bh != cur_epos.bh)
494 		{
495 			brelse(prev_epos.bh);
496 			get_bh(cur_epos.bh);
497 			prev_epos.bh = cur_epos.bh;
498 		}
499 		if (cur_epos.bh != next_epos.bh)
500 		{
501 			brelse(cur_epos.bh);
502 			get_bh(next_epos.bh);
503 			cur_epos.bh = next_epos.bh;
504 		}
505 
506 		lbcount += elen;
507 
508 		prev_epos.block = cur_epos.block;
509 		cur_epos.block = next_epos.block;
510 
511 		prev_epos.offset = cur_epos.offset;
512 		cur_epos.offset = next_epos.offset;
513 
514 		if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1)
515 			break;
516 
517 		c = !c;
518 
519 		laarr[c].extLength = (etype << 30) | elen;
520 		laarr[c].extLocation = eloc;
521 
522 		if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
523 			pgoal = eloc.logicalBlockNum +
524 				((elen + inode->i_sb->s_blocksize - 1) >>
525 				inode->i_sb->s_blocksize_bits);
526 
527 		count ++;
528 	} while (lbcount + elen <= b_off);
529 
530 	b_off -= lbcount;
531 	offset = b_off >> inode->i_sb->s_blocksize_bits;
532 	/*
533 	 * Move prev_epos and cur_epos into indirect extent if we are at
534 	 * the pointer to it
535 	 */
536 	udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
537 	udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
538 
539 	/* if the extent is allocated and recorded, return the block
540        if the extent is not a multiple of the blocksize, round up */
541 
542 	if (etype == (EXT_RECORDED_ALLOCATED >> 30))
543 	{
544 		if (elen & (inode->i_sb->s_blocksize - 1))
545 		{
546 			elen = EXT_RECORDED_ALLOCATED |
547 				((elen + inode->i_sb->s_blocksize - 1) &
548 				~(inode->i_sb->s_blocksize - 1));
549 			etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1);
550 		}
551 		brelse(prev_epos.bh);
552 		brelse(cur_epos.bh);
553 		brelse(next_epos.bh);
554 		newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
555 		*phys = newblock;
556 		return NULL;
557 	}
558 
559 	last_block = block;
560 	/* Are we beyond EOF? */
561 	if (etype == -1)
562 	{
563 		int ret;
564 
565 		if (count) {
566 			if (c)
567 				laarr[0] = laarr[1];
568 			startnum = 1;
569 		}
570 		else {
571 			/* Create a fake extent when there's not one */
572 			memset(&laarr[0].extLocation, 0x00, sizeof(kernel_lb_addr));
573 			laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
574 			/* Will udf_extend_file() create real extent from a fake one? */
575 			startnum = (offset > 0);
576 		}
577 		/* Create extents for the hole between EOF and offset */
578 		ret = udf_extend_file(inode, &prev_epos, laarr, offset);
579 		if (ret == -1) {
580 			brelse(prev_epos.bh);
581 			brelse(cur_epos.bh);
582 			brelse(next_epos.bh);
583 			/* We don't really know the error here so we just make
584 			 * something up */
585 			*err = -ENOSPC;
586 			return NULL;
587 		}
588 		c = 0;
589 		offset = 0;
590 		count += ret;
591 		/* We are not covered by a preallocated extent? */
592 		if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) {
593 			/* Is there any real extent? - otherwise we overwrite
594 			 * the fake one... */
595 			if (count)
596 				c = !c;
597 			laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
598 				inode->i_sb->s_blocksize;
599 			memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
600 			count ++;
601 			endnum ++;
602 		}
603 		endnum = c+1;
604 		lastblock = 1;
605 	}
606 	else {
607 		endnum = startnum = ((count > 2) ? 2 : count);
608 
609 		/* if the current extent is in position 0, swap it with the previous */
610 		if (!c && count != 1)
611 		{
612 			laarr[2] = laarr[0];
613 			laarr[0] = laarr[1];
614 			laarr[1] = laarr[2];
615 			c = 1;
616 		}
617 
618 		/* if the current block is located in an extent, read the next extent */
619 		if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1)
620 		{
621 			laarr[c+1].extLength = (etype << 30) | elen;
622 			laarr[c+1].extLocation = eloc;
623 			count ++;
624 			startnum ++;
625 			endnum ++;
626 		}
627 		else {
628 			lastblock = 1;
629 		}
630 	}
631 
632 	/* if the current extent is not recorded but allocated, get the
633 		block in the extent corresponding to the requested block */
634 	if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
635 		newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
636 	else /* otherwise, allocate a new block */
637 	{
638 		if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
639 			goal = UDF_I_NEXT_ALLOC_GOAL(inode);
640 
641 		if (!goal)
642 		{
643 			if (!(goal = pgoal))
644 				goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
645 		}
646 
647 		if (!(newblocknum = udf_new_block(inode->i_sb, inode,
648 			UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
649 		{
650 			brelse(prev_epos.bh);
651 			*err = -ENOSPC;
652 			return NULL;
653 		}
654 		UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
655 	}
656 
657 	/* if the extent the requsted block is located in contains multiple blocks,
658        split the extent into at most three extents. blocks prior to requested
659        block, requested block, and blocks after requested block */
660 	udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
661 
662 #ifdef UDF_PREALLOCATE
663 	/* preallocate blocks */
664 	udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
665 #endif
666 
667 	/* merge any continuous blocks in laarr */
668 	udf_merge_extents(inode, laarr, &endnum);
669 
670 	/* write back the new extents, inserting new extents if the new number
671 	of extents is greater than the old number, and deleting extents if
672 	the new number of extents is less than the old number */
673 	udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
674 
675 	brelse(prev_epos.bh);
676 
677 	if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
678 		UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
679 	{
680 		return NULL;
681 	}
682 	*phys = newblock;
683 	*err = 0;
684 	*new = 1;
685 	UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
686 	UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
687 	inode->i_ctime = current_fs_time(inode->i_sb);
688 
689 	if (IS_SYNC(inode))
690 		udf_sync_inode(inode);
691 	else
692 		mark_inode_dirty(inode);
693 	return result;
694 }
695 
696 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
697 	kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
698 {
699 	if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
700 		(laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
701 	{
702 		int curr = *c;
703 		int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
704 			inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
705 		int8_t etype = (laarr[curr].extLength >> 30);
706 
707 		if (blen == 1)
708 			;
709 		else if (!offset || blen == offset + 1)
710 		{
711 			laarr[curr+2] = laarr[curr+1];
712 			laarr[curr+1] = laarr[curr];
713 		}
714 		else
715 		{
716 			laarr[curr+3] = laarr[curr+1];
717 			laarr[curr+2] = laarr[curr+1] = laarr[curr];
718 		}
719 
720 		if (offset)
721 		{
722 			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
723 			{
724 				udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
725 				laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
726 					(offset << inode->i_sb->s_blocksize_bits);
727 				laarr[curr].extLocation.logicalBlockNum = 0;
728 				laarr[curr].extLocation.partitionReferenceNum = 0;
729 			}
730 			else
731 				laarr[curr].extLength = (etype << 30) |
732 					(offset << inode->i_sb->s_blocksize_bits);
733 			curr ++;
734 			(*c) ++;
735 			(*endnum) ++;
736 		}
737 
738 		laarr[curr].extLocation.logicalBlockNum = newblocknum;
739 		if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
740 			laarr[curr].extLocation.partitionReferenceNum =
741 				UDF_I_LOCATION(inode).partitionReferenceNum;
742 		laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
743 			inode->i_sb->s_blocksize;
744 		curr ++;
745 
746 		if (blen != offset + 1)
747 		{
748 			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
749 				laarr[curr].extLocation.logicalBlockNum += (offset + 1);
750 			laarr[curr].extLength = (etype << 30) |
751 				((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
752 			curr ++;
753 			(*endnum) ++;
754 		}
755 	}
756 }
757 
758 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
759 	 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
760 {
761 	int start, length = 0, currlength = 0, i;
762 
763 	if (*endnum >= (c+1))
764 	{
765 		if (!lastblock)
766 			return;
767 		else
768 			start = c;
769 	}
770 	else
771 	{
772 		if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
773 		{
774 			start = c+1;
775 			length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
776 				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
777 		}
778 		else
779 			start = c;
780 	}
781 
782 	for (i=start+1; i<=*endnum; i++)
783 	{
784 		if (i == *endnum)
785 		{
786 			if (lastblock)
787 				length += UDF_DEFAULT_PREALLOC_BLOCKS;
788 		}
789 		else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
790 			length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
791 				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
792 		else
793 			break;
794 	}
795 
796 	if (length)
797 	{
798 		int next = laarr[start].extLocation.logicalBlockNum +
799 			(((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
800 			inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
801 		int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
802 			laarr[start].extLocation.partitionReferenceNum,
803 			next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
804 				UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
805 
806 		if (numalloc)
807 		{
808 			if (start == (c+1))
809 				laarr[start].extLength +=
810 					(numalloc << inode->i_sb->s_blocksize_bits);
811 			else
812 			{
813 				memmove(&laarr[c+2], &laarr[c+1],
814 					sizeof(long_ad) * (*endnum - (c+1)));
815 				(*endnum) ++;
816 				laarr[c+1].extLocation.logicalBlockNum = next;
817 				laarr[c+1].extLocation.partitionReferenceNum =
818 					laarr[c].extLocation.partitionReferenceNum;
819 				laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
820 					(numalloc << inode->i_sb->s_blocksize_bits);
821 				start = c+1;
822 			}
823 
824 			for (i=start+1; numalloc && i<*endnum; i++)
825 			{
826 				int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
827 					inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
828 
829 				if (elen > numalloc)
830 				{
831 					laarr[i].extLength -=
832 						(numalloc << inode->i_sb->s_blocksize_bits);
833 					numalloc = 0;
834 				}
835 				else
836 				{
837 					numalloc -= elen;
838 					if (*endnum > (i+1))
839 						memmove(&laarr[i], &laarr[i+1],
840 							sizeof(long_ad) * (*endnum - (i+1)));
841 					i --;
842 					(*endnum) --;
843 				}
844 			}
845 			UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
846 		}
847 	}
848 }
849 
850 static void udf_merge_extents(struct inode *inode,
851 	 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
852 {
853 	int i;
854 
855 	for (i=0; i<(*endnum-1); i++)
856 	{
857 		if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
858 		{
859 			if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
860 				((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
861 				(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
862 				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
863 			{
864 				if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
865 					(laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
866 					inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
867 				{
868 					laarr[i+1].extLength = (laarr[i+1].extLength -
869 						(laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
870 						UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
871 					laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
872 						(UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
873 					laarr[i+1].extLocation.logicalBlockNum =
874 						laarr[i].extLocation.logicalBlockNum +
875 						((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
876 							inode->i_sb->s_blocksize_bits);
877 				}
878 				else
879 				{
880 					laarr[i].extLength = laarr[i+1].extLength +
881 						(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
882 						inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
883 					if (*endnum > (i+2))
884 						memmove(&laarr[i+1], &laarr[i+2],
885 							sizeof(long_ad) * (*endnum - (i+2)));
886 					i --;
887 					(*endnum) --;
888 				}
889 			}
890 		}
891 		else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
892 			((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
893 		{
894 			udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
895 				((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
896 				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
897 			laarr[i].extLocation.logicalBlockNum = 0;
898 			laarr[i].extLocation.partitionReferenceNum = 0;
899 
900 			if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
901 				(laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
902 				inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
903 			{
904 				laarr[i+1].extLength = (laarr[i+1].extLength -
905 					(laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
906 					UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
907 				laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
908 					(UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
909 			}
910 			else
911 			{
912 				laarr[i].extLength = laarr[i+1].extLength +
913 					(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
914 					inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
915 				if (*endnum > (i+2))
916 					memmove(&laarr[i+1], &laarr[i+2],
917 						sizeof(long_ad) * (*endnum - (i+2)));
918 				i --;
919 				(*endnum) --;
920 			}
921 		}
922 		else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
923 		{
924 			udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
925 				((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
926 			       inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
927 			laarr[i].extLocation.logicalBlockNum = 0;
928 			laarr[i].extLocation.partitionReferenceNum = 0;
929 			laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
930 				EXT_NOT_RECORDED_NOT_ALLOCATED;
931 		}
932 	}
933 }
934 
935 static void udf_update_extents(struct inode *inode,
936 	kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
937 	struct extent_position *epos)
938 {
939 	int start = 0, i;
940 	kernel_lb_addr tmploc;
941 	uint32_t tmplen;
942 
943 	if (startnum > endnum)
944 	{
945 		for (i=0; i<(startnum-endnum); i++)
946 			udf_delete_aext(inode, *epos, laarr[i].extLocation,
947 				laarr[i].extLength);
948 	}
949 	else if (startnum < endnum)
950 	{
951 		for (i=0; i<(endnum-startnum); i++)
952 		{
953 			udf_insert_aext(inode, *epos, laarr[i].extLocation,
954 				laarr[i].extLength);
955 			udf_next_aext(inode, epos, &laarr[i].extLocation,
956 				&laarr[i].extLength, 1);
957 			start ++;
958 		}
959 	}
960 
961 	for (i=start; i<endnum; i++)
962 	{
963 		udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
964 		udf_write_aext(inode, epos, laarr[i].extLocation,
965 			laarr[i].extLength, 1);
966 	}
967 }
968 
969 struct buffer_head * udf_bread(struct inode * inode, int block,
970 	int create, int * err)
971 {
972 	struct buffer_head * bh = NULL;
973 
974 	bh = udf_getblk(inode, block, create, err);
975 	if (!bh)
976 		return NULL;
977 
978 	if (buffer_uptodate(bh))
979 		return bh;
980 	ll_rw_block(READ, 1, &bh);
981 	wait_on_buffer(bh);
982 	if (buffer_uptodate(bh))
983 		return bh;
984 	brelse(bh);
985 	*err = -EIO;
986 	return NULL;
987 }
988 
989 void udf_truncate(struct inode * inode)
990 {
991 	int offset;
992 	int err;
993 
994 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
995 			S_ISLNK(inode->i_mode)))
996 		return;
997 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
998 		return;
999 
1000 	lock_kernel();
1001 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1002 	{
1003 		if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
1004 			inode->i_size))
1005 		{
1006 			udf_expand_file_adinicb(inode, inode->i_size, &err);
1007 			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1008 			{
1009 				inode->i_size = UDF_I_LENALLOC(inode);
1010 				unlock_kernel();
1011 				return;
1012 			}
1013 			else
1014 				udf_truncate_extents(inode);
1015 		}
1016 		else
1017 		{
1018 			offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
1019 			memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
1020 			UDF_I_LENALLOC(inode) = inode->i_size;
1021 		}
1022 	}
1023 	else
1024 	{
1025 		block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
1026 		udf_truncate_extents(inode);
1027 	}
1028 
1029 	inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
1030 	if (IS_SYNC(inode))
1031 		udf_sync_inode (inode);
1032 	else
1033 		mark_inode_dirty(inode);
1034 	unlock_kernel();
1035 }
1036 
1037 static void
1038 __udf_read_inode(struct inode *inode)
1039 {
1040 	struct buffer_head *bh = NULL;
1041 	struct fileEntry *fe;
1042 	uint16_t ident;
1043 
1044 	/*
1045 	 * Set defaults, but the inode is still incomplete!
1046 	 * Note: get_new_inode() sets the following on a new inode:
1047 	 *      i_sb = sb
1048 	 *      i_no = ino
1049 	 *      i_flags = sb->s_flags
1050 	 *      i_state = 0
1051 	 * clean_inode(): zero fills and sets
1052 	 *      i_count = 1
1053 	 *      i_nlink = 1
1054 	 *      i_op = NULL;
1055 	 */
1056 	bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
1057 
1058 	if (!bh)
1059 	{
1060 		printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
1061 			inode->i_ino);
1062 		make_bad_inode(inode);
1063 		return;
1064 	}
1065 
1066 	if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1067 		ident != TAG_IDENT_USE)
1068 	{
1069 		printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
1070 			inode->i_ino, ident);
1071 		brelse(bh);
1072 		make_bad_inode(inode);
1073 		return;
1074 	}
1075 
1076 	fe = (struct fileEntry *)bh->b_data;
1077 
1078 	if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
1079 	{
1080 		struct buffer_head *ibh = NULL, *nbh = NULL;
1081 		struct indirectEntry *ie;
1082 
1083 		ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
1084 		if (ident == TAG_IDENT_IE)
1085 		{
1086 			if (ibh)
1087 			{
1088 				kernel_lb_addr loc;
1089 				ie = (struct indirectEntry *)ibh->b_data;
1090 
1091 				loc = lelb_to_cpu(ie->indirectICB.extLocation);
1092 
1093 				if (ie->indirectICB.extLength &&
1094 					(nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
1095 				{
1096 					if (ident == TAG_IDENT_FE ||
1097 						ident == TAG_IDENT_EFE)
1098 					{
1099 						memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
1100 						brelse(bh);
1101 						brelse(ibh);
1102 						brelse(nbh);
1103 						__udf_read_inode(inode);
1104 						return;
1105 					}
1106 					else
1107 					{
1108 						brelse(nbh);
1109 						brelse(ibh);
1110 					}
1111 				}
1112 				else
1113 					brelse(ibh);
1114 			}
1115 		}
1116 		else
1117 			brelse(ibh);
1118 	}
1119 	else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
1120 	{
1121 		printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1122 			le16_to_cpu(fe->icbTag.strategyType));
1123 		brelse(bh);
1124 		make_bad_inode(inode);
1125 		return;
1126 	}
1127 	udf_fill_inode(inode, bh);
1128 
1129 	brelse(bh);
1130 }
1131 
1132 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1133 {
1134 	struct fileEntry *fe;
1135 	struct extendedFileEntry *efe;
1136 	time_t convtime;
1137 	long convtime_usec;
1138 	int offset;
1139 
1140 	fe = (struct fileEntry *)bh->b_data;
1141 	efe = (struct extendedFileEntry *)bh->b_data;
1142 
1143 	if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1144 		UDF_I_STRAT4096(inode) = 0;
1145 	else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1146 		UDF_I_STRAT4096(inode) = 1;
1147 
1148 	UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1149 	UDF_I_UNIQUE(inode) = 0;
1150 	UDF_I_LENEATTR(inode) = 0;
1151 	UDF_I_LENEXTENTS(inode) = 0;
1152 	UDF_I_LENALLOC(inode) = 0;
1153 	UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1154 	UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1155 	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1156 	{
1157 		UDF_I_EFE(inode) = 1;
1158 		UDF_I_USE(inode) = 0;
1159 		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1160 		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1161 	}
1162 	else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1163 	{
1164 		UDF_I_EFE(inode) = 0;
1165 		UDF_I_USE(inode) = 0;
1166 		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1167 		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1168 	}
1169 	else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1170 	{
1171 		UDF_I_EFE(inode) = 0;
1172 		UDF_I_USE(inode) = 1;
1173 		UDF_I_LENALLOC(inode) =
1174 			le32_to_cpu(
1175 				((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1176 		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1177 		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1178 		return;
1179 	}
1180 
1181 	inode->i_uid = le32_to_cpu(fe->uid);
1182 	if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1183 					UDF_FLAG_UID_IGNORE))
1184 		inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1185 
1186 	inode->i_gid = le32_to_cpu(fe->gid);
1187 	if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1188 					UDF_FLAG_GID_IGNORE))
1189 		inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1190 
1191 	inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1192 	if (!inode->i_nlink)
1193 		inode->i_nlink = 1;
1194 
1195 	inode->i_size = le64_to_cpu(fe->informationLength);
1196 	UDF_I_LENEXTENTS(inode) = inode->i_size;
1197 
1198 	inode->i_mode = udf_convert_permissions(fe);
1199 	inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1200 
1201 	if (UDF_I_EFE(inode) == 0)
1202 	{
1203 		inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1204 			(inode->i_sb->s_blocksize_bits - 9);
1205 
1206 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1207 			lets_to_cpu(fe->accessTime)) )
1208 		{
1209 			inode->i_atime.tv_sec = convtime;
1210 			inode->i_atime.tv_nsec = convtime_usec * 1000;
1211 		}
1212 		else
1213 		{
1214 			inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1215 		}
1216 
1217 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1218 			lets_to_cpu(fe->modificationTime)) )
1219 		{
1220 			inode->i_mtime.tv_sec = convtime;
1221 			inode->i_mtime.tv_nsec = convtime_usec * 1000;
1222 		}
1223 		else
1224 		{
1225 			inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1226 		}
1227 
1228 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1229 			lets_to_cpu(fe->attrTime)) )
1230 		{
1231 			inode->i_ctime.tv_sec = convtime;
1232 			inode->i_ctime.tv_nsec = convtime_usec * 1000;
1233 		}
1234 		else
1235 		{
1236 			inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1237 		}
1238 
1239 		UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1240 		UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1241 		UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1242 		offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1243 	}
1244 	else
1245 	{
1246 		inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1247 			(inode->i_sb->s_blocksize_bits - 9);
1248 
1249 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1250 			lets_to_cpu(efe->accessTime)) )
1251 		{
1252 			inode->i_atime.tv_sec = convtime;
1253 			inode->i_atime.tv_nsec = convtime_usec * 1000;
1254 		}
1255 		else
1256 		{
1257 			inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1258 		}
1259 
1260 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1261 			lets_to_cpu(efe->modificationTime)) )
1262 		{
1263 			inode->i_mtime.tv_sec = convtime;
1264 			inode->i_mtime.tv_nsec = convtime_usec * 1000;
1265 		}
1266 		else
1267 		{
1268 			inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1269 		}
1270 
1271 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1272 			lets_to_cpu(efe->createTime)) )
1273 		{
1274 			UDF_I_CRTIME(inode).tv_sec = convtime;
1275 			UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1276 		}
1277 		else
1278 		{
1279 			UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1280 		}
1281 
1282 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1283 			lets_to_cpu(efe->attrTime)) )
1284 		{
1285 			inode->i_ctime.tv_sec = convtime;
1286 			inode->i_ctime.tv_nsec = convtime_usec * 1000;
1287 		}
1288 		else
1289 		{
1290 			inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1291 		}
1292 
1293 		UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1294 		UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1295 		UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1296 		offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1297 	}
1298 
1299 	switch (fe->icbTag.fileType)
1300 	{
1301 		case ICBTAG_FILE_TYPE_DIRECTORY:
1302 		{
1303 			inode->i_op = &udf_dir_inode_operations;
1304 			inode->i_fop = &udf_dir_operations;
1305 			inode->i_mode |= S_IFDIR;
1306 			inc_nlink(inode);
1307 			break;
1308 		}
1309 		case ICBTAG_FILE_TYPE_REALTIME:
1310 		case ICBTAG_FILE_TYPE_REGULAR:
1311 		case ICBTAG_FILE_TYPE_UNDEF:
1312 		{
1313 			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1314 				inode->i_data.a_ops = &udf_adinicb_aops;
1315 			else
1316 				inode->i_data.a_ops = &udf_aops;
1317 			inode->i_op = &udf_file_inode_operations;
1318 			inode->i_fop = &udf_file_operations;
1319 			inode->i_mode |= S_IFREG;
1320 			break;
1321 		}
1322 		case ICBTAG_FILE_TYPE_BLOCK:
1323 		{
1324 			inode->i_mode |= S_IFBLK;
1325 			break;
1326 		}
1327 		case ICBTAG_FILE_TYPE_CHAR:
1328 		{
1329 			inode->i_mode |= S_IFCHR;
1330 			break;
1331 		}
1332 		case ICBTAG_FILE_TYPE_FIFO:
1333 		{
1334 			init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1335 			break;
1336 		}
1337 		case ICBTAG_FILE_TYPE_SOCKET:
1338 		{
1339 			init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1340 			break;
1341 		}
1342 		case ICBTAG_FILE_TYPE_SYMLINK:
1343 		{
1344 			inode->i_data.a_ops = &udf_symlink_aops;
1345 			inode->i_op = &page_symlink_inode_operations;
1346 			inode->i_mode = S_IFLNK|S_IRWXUGO;
1347 			break;
1348 		}
1349 		default:
1350 		{
1351 			printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1352 				inode->i_ino, fe->icbTag.fileType);
1353 			make_bad_inode(inode);
1354 			return;
1355 		}
1356 	}
1357 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1358 	{
1359 		struct deviceSpec *dsea =
1360 			(struct deviceSpec *)
1361 				udf_get_extendedattr(inode, 12, 1);
1362 
1363 		if (dsea)
1364 		{
1365 			init_special_inode(inode, inode->i_mode, MKDEV(
1366 				le32_to_cpu(dsea->majorDeviceIdent),
1367 				le32_to_cpu(dsea->minorDeviceIdent)));
1368 			/* Developer ID ??? */
1369 		}
1370 		else
1371 		{
1372 			make_bad_inode(inode);
1373 		}
1374 	}
1375 }
1376 
1377 static mode_t
1378 udf_convert_permissions(struct fileEntry *fe)
1379 {
1380 	mode_t mode;
1381 	uint32_t permissions;
1382 	uint32_t flags;
1383 
1384 	permissions = le32_to_cpu(fe->permissions);
1385 	flags = le16_to_cpu(fe->icbTag.flags);
1386 
1387 	mode =	(( permissions      ) & S_IRWXO) |
1388 		(( permissions >> 2 ) & S_IRWXG) |
1389 		(( permissions >> 4 ) & S_IRWXU) |
1390 		(( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1391 		(( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1392 		(( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1393 
1394 	return mode;
1395 }
1396 
1397 /*
1398  * udf_write_inode
1399  *
1400  * PURPOSE
1401  *	Write out the specified inode.
1402  *
1403  * DESCRIPTION
1404  *	This routine is called whenever an inode is synced.
1405  *	Currently this routine is just a placeholder.
1406  *
1407  * HISTORY
1408  *	July 1, 1997 - Andrew E. Mileski
1409  *	Written, tested, and released.
1410  */
1411 
1412 int udf_write_inode(struct inode * inode, int sync)
1413 {
1414 	int ret;
1415 	lock_kernel();
1416 	ret = udf_update_inode(inode, sync);
1417 	unlock_kernel();
1418 	return ret;
1419 }
1420 
1421 int udf_sync_inode(struct inode * inode)
1422 {
1423 	return udf_update_inode(inode, 1);
1424 }
1425 
1426 static int
1427 udf_update_inode(struct inode *inode, int do_sync)
1428 {
1429 	struct buffer_head *bh = NULL;
1430 	struct fileEntry *fe;
1431 	struct extendedFileEntry *efe;
1432 	uint32_t udfperms;
1433 	uint16_t icbflags;
1434 	uint16_t crclen;
1435 	int i;
1436 	kernel_timestamp cpu_time;
1437 	int err = 0;
1438 
1439 	bh = udf_tread(inode->i_sb,
1440 		udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1441 
1442 	if (!bh)
1443 	{
1444 		udf_debug("bread failure\n");
1445 		return -EIO;
1446 	}
1447 
1448 	memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1449 
1450 	fe = (struct fileEntry *)bh->b_data;
1451 	efe = (struct extendedFileEntry *)bh->b_data;
1452 
1453 	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1454 	{
1455 		struct unallocSpaceEntry *use =
1456 			(struct unallocSpaceEntry *)bh->b_data;
1457 
1458 		use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1459 		memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1460 		crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1461 			sizeof(tag);
1462 		use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1463 		use->descTag.descCRCLength = cpu_to_le16(crclen);
1464 		use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1465 
1466 		use->descTag.tagChecksum = 0;
1467 		for (i=0; i<16; i++)
1468 			if (i != 4)
1469 				use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1470 
1471 		mark_buffer_dirty(bh);
1472 		brelse(bh);
1473 		return err;
1474 	}
1475 
1476 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1477 		fe->uid = cpu_to_le32(-1);
1478 	else fe->uid = cpu_to_le32(inode->i_uid);
1479 
1480 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1481 		fe->gid = cpu_to_le32(-1);
1482 	else fe->gid = cpu_to_le32(inode->i_gid);
1483 
1484 	udfperms =	((inode->i_mode & S_IRWXO)     ) |
1485 			((inode->i_mode & S_IRWXG) << 2) |
1486 			((inode->i_mode & S_IRWXU) << 4);
1487 
1488 	udfperms |=	(le32_to_cpu(fe->permissions) &
1489 			(FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1490 			 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1491 			 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1492 	fe->permissions = cpu_to_le32(udfperms);
1493 
1494 	if (S_ISDIR(inode->i_mode))
1495 		fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1496 	else
1497 		fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1498 
1499 	fe->informationLength = cpu_to_le64(inode->i_size);
1500 
1501 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1502 	{
1503 		regid *eid;
1504 		struct deviceSpec *dsea =
1505 			(struct deviceSpec *)
1506 				udf_get_extendedattr(inode, 12, 1);
1507 
1508 		if (!dsea)
1509 		{
1510 			dsea = (struct deviceSpec *)
1511 				udf_add_extendedattr(inode,
1512 					sizeof(struct deviceSpec) +
1513 					sizeof(regid), 12, 0x3);
1514 			dsea->attrType = cpu_to_le32(12);
1515 			dsea->attrSubtype = 1;
1516 			dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1517 				sizeof(regid));
1518 			dsea->impUseLength = cpu_to_le32(sizeof(regid));
1519 		}
1520 		eid = (regid *)dsea->impUse;
1521 		memset(eid, 0, sizeof(regid));
1522 		strcpy(eid->ident, UDF_ID_DEVELOPER);
1523 		eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1524 		eid->identSuffix[1] = UDF_OS_ID_LINUX;
1525 		dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1526 		dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1527 	}
1528 
1529 	if (UDF_I_EFE(inode) == 0)
1530 	{
1531 		memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1532 		fe->logicalBlocksRecorded = cpu_to_le64(
1533 			(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1534 			(inode->i_sb->s_blocksize_bits - 9));
1535 
1536 		if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1537 			fe->accessTime = cpu_to_lets(cpu_time);
1538 		if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1539 			fe->modificationTime = cpu_to_lets(cpu_time);
1540 		if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1541 			fe->attrTime = cpu_to_lets(cpu_time);
1542 		memset(&(fe->impIdent), 0, sizeof(regid));
1543 		strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1544 		fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1545 		fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1546 		fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1547 		fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1548 		fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1549 		fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1550 		crclen = sizeof(struct fileEntry);
1551 	}
1552 	else
1553 	{
1554 		memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1555 		efe->objectSize = cpu_to_le64(inode->i_size);
1556 		efe->logicalBlocksRecorded = cpu_to_le64(
1557 			(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1558 			(inode->i_sb->s_blocksize_bits - 9));
1559 
1560 		if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1561 			(UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1562 			 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1563 		{
1564 			UDF_I_CRTIME(inode) = inode->i_atime;
1565 		}
1566 		if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1567 			(UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1568 			 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1569 		{
1570 			UDF_I_CRTIME(inode) = inode->i_mtime;
1571 		}
1572 		if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1573 			(UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1574 			 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1575 		{
1576 			UDF_I_CRTIME(inode) = inode->i_ctime;
1577 		}
1578 
1579 		if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1580 			efe->accessTime = cpu_to_lets(cpu_time);
1581 		if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1582 			efe->modificationTime = cpu_to_lets(cpu_time);
1583 		if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1584 			efe->createTime = cpu_to_lets(cpu_time);
1585 		if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1586 			efe->attrTime = cpu_to_lets(cpu_time);
1587 
1588 		memset(&(efe->impIdent), 0, sizeof(regid));
1589 		strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1590 		efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1591 		efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1592 		efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1593 		efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1594 		efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1595 		efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1596 		crclen = sizeof(struct extendedFileEntry);
1597 	}
1598 	if (UDF_I_STRAT4096(inode))
1599 	{
1600 		fe->icbTag.strategyType = cpu_to_le16(4096);
1601 		fe->icbTag.strategyParameter = cpu_to_le16(1);
1602 		fe->icbTag.numEntries = cpu_to_le16(2);
1603 	}
1604 	else
1605 	{
1606 		fe->icbTag.strategyType = cpu_to_le16(4);
1607 		fe->icbTag.numEntries = cpu_to_le16(1);
1608 	}
1609 
1610 	if (S_ISDIR(inode->i_mode))
1611 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1612 	else if (S_ISREG(inode->i_mode))
1613 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1614 	else if (S_ISLNK(inode->i_mode))
1615 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1616 	else if (S_ISBLK(inode->i_mode))
1617 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1618 	else if (S_ISCHR(inode->i_mode))
1619 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1620 	else if (S_ISFIFO(inode->i_mode))
1621 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1622 	else if (S_ISSOCK(inode->i_mode))
1623 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1624 
1625 	icbflags =	UDF_I_ALLOCTYPE(inode) |
1626 			((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1627 			((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1628 			((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1629 			(le16_to_cpu(fe->icbTag.flags) &
1630 				~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1631 				ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1632 
1633 	fe->icbTag.flags = cpu_to_le16(icbflags);
1634 	if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1635 		fe->descTag.descVersion = cpu_to_le16(3);
1636 	else
1637 		fe->descTag.descVersion = cpu_to_le16(2);
1638 	fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1639 	fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1640 	crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1641 	fe->descTag.descCRCLength = cpu_to_le16(crclen);
1642 	fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1643 
1644 	fe->descTag.tagChecksum = 0;
1645 	for (i=0; i<16; i++)
1646 		if (i != 4)
1647 			fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1648 
1649 	/* write the data blocks */
1650 	mark_buffer_dirty(bh);
1651 	if (do_sync)
1652 	{
1653 		sync_dirty_buffer(bh);
1654 		if (buffer_req(bh) && !buffer_uptodate(bh))
1655 		{
1656 			printk("IO error syncing udf inode [%s:%08lx]\n",
1657 				inode->i_sb->s_id, inode->i_ino);
1658 			err = -EIO;
1659 		}
1660 	}
1661 	brelse(bh);
1662 	return err;
1663 }
1664 
1665 struct inode *
1666 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1667 {
1668 	unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1669 	struct inode *inode = iget_locked(sb, block);
1670 
1671 	if (!inode)
1672 		return NULL;
1673 
1674 	if (inode->i_state & I_NEW) {
1675 		memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1676 		__udf_read_inode(inode);
1677 		unlock_new_inode(inode);
1678 	}
1679 
1680 	if (is_bad_inode(inode))
1681 		goto out_iput;
1682 
1683 	if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1684 		udf_debug("block=%d, partition=%d out of range\n",
1685 			ino.logicalBlockNum, ino.partitionReferenceNum);
1686 		make_bad_inode(inode);
1687 		goto out_iput;
1688 	}
1689 
1690 	return inode;
1691 
1692  out_iput:
1693 	iput(inode);
1694 	return NULL;
1695 }
1696 
1697 int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1698 	kernel_lb_addr eloc, uint32_t elen, int inc)
1699 {
1700 	int adsize;
1701 	short_ad *sad = NULL;
1702 	long_ad *lad = NULL;
1703 	struct allocExtDesc *aed;
1704 	int8_t etype;
1705 	uint8_t *ptr;
1706 
1707 	if (!epos->bh)
1708 		ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1709 	else
1710 		ptr = epos->bh->b_data + epos->offset;
1711 
1712 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1713 		adsize = sizeof(short_ad);
1714 	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1715 		adsize = sizeof(long_ad);
1716 	else
1717 		return -1;
1718 
1719 	if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize)
1720 	{
1721 		char *sptr, *dptr;
1722 		struct buffer_head *nbh;
1723 		int err, loffset;
1724 		kernel_lb_addr obloc = epos->block;
1725 
1726 		if (!(epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1727 			obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1728 		{
1729 			return -1;
1730 		}
1731 		if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1732 			epos->block, 0))))
1733 		{
1734 			return -1;
1735 		}
1736 		lock_buffer(nbh);
1737 		memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1738 		set_buffer_uptodate(nbh);
1739 		unlock_buffer(nbh);
1740 		mark_buffer_dirty_inode(nbh, inode);
1741 
1742 		aed = (struct allocExtDesc *)(nbh->b_data);
1743 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1744 			aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1745 		if (epos->offset + adsize > inode->i_sb->s_blocksize)
1746 		{
1747 			loffset = epos->offset;
1748 			aed->lengthAllocDescs = cpu_to_le32(adsize);
1749 			sptr = ptr - adsize;
1750 			dptr = nbh->b_data + sizeof(struct allocExtDesc);
1751 			memcpy(dptr, sptr, adsize);
1752 			epos->offset = sizeof(struct allocExtDesc) + adsize;
1753 		}
1754 		else
1755 		{
1756 			loffset = epos->offset + adsize;
1757 			aed->lengthAllocDescs = cpu_to_le32(0);
1758 			sptr = ptr;
1759 			epos->offset = sizeof(struct allocExtDesc);
1760 
1761 			if (epos->bh)
1762 			{
1763 				aed = (struct allocExtDesc *)epos->bh->b_data;
1764 				aed->lengthAllocDescs =
1765 					cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1766 			}
1767 			else
1768 			{
1769 				UDF_I_LENALLOC(inode) += adsize;
1770 				mark_inode_dirty(inode);
1771 			}
1772 		}
1773 		if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1774 			udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1775 				epos->block.logicalBlockNum, sizeof(tag));
1776 		else
1777 			udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1778 				epos->block.logicalBlockNum, sizeof(tag));
1779 		switch (UDF_I_ALLOCTYPE(inode))
1780 		{
1781 			case ICBTAG_FLAG_AD_SHORT:
1782 			{
1783 				sad = (short_ad *)sptr;
1784 				sad->extLength = cpu_to_le32(
1785 					EXT_NEXT_EXTENT_ALLOCDECS |
1786 					inode->i_sb->s_blocksize);
1787 				sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum);
1788 				break;
1789 			}
1790 			case ICBTAG_FLAG_AD_LONG:
1791 			{
1792 				lad = (long_ad *)sptr;
1793 				lad->extLength = cpu_to_le32(
1794 					EXT_NEXT_EXTENT_ALLOCDECS |
1795 					inode->i_sb->s_blocksize);
1796 				lad->extLocation = cpu_to_lelb(epos->block);
1797 				memset(lad->impUse, 0x00, sizeof(lad->impUse));
1798 				break;
1799 			}
1800 		}
1801 		if (epos->bh)
1802 		{
1803 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1804 				udf_update_tag(epos->bh->b_data, loffset);
1805 			else
1806 				udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
1807 			mark_buffer_dirty_inode(epos->bh, inode);
1808 			brelse(epos->bh);
1809 		}
1810 		else
1811 			mark_inode_dirty(inode);
1812 		epos->bh = nbh;
1813 	}
1814 
1815 	etype = udf_write_aext(inode, epos, eloc, elen, inc);
1816 
1817 	if (!epos->bh)
1818 	{
1819 		UDF_I_LENALLOC(inode) += adsize;
1820 		mark_inode_dirty(inode);
1821 	}
1822 	else
1823 	{
1824 		aed = (struct allocExtDesc *)epos->bh->b_data;
1825 		aed->lengthAllocDescs =
1826 			cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1827 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1828 			udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize));
1829 		else
1830 			udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
1831 		mark_buffer_dirty_inode(epos->bh, inode);
1832 	}
1833 
1834 	return etype;
1835 }
1836 
1837 int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
1838     kernel_lb_addr eloc, uint32_t elen, int inc)
1839 {
1840 	int adsize;
1841 	uint8_t *ptr;
1842 
1843 	if (!epos->bh)
1844 		ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1845 	else
1846 		ptr = epos->bh->b_data + epos->offset;
1847 
1848 	switch (UDF_I_ALLOCTYPE(inode))
1849 	{
1850 		case ICBTAG_FLAG_AD_SHORT:
1851 		{
1852 			short_ad *sad = (short_ad *)ptr;
1853 			sad->extLength = cpu_to_le32(elen);
1854 			sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1855 			adsize = sizeof(short_ad);
1856 			break;
1857 		}
1858 		case ICBTAG_FLAG_AD_LONG:
1859 		{
1860 			long_ad *lad = (long_ad *)ptr;
1861 			lad->extLength = cpu_to_le32(elen);
1862 			lad->extLocation = cpu_to_lelb(eloc);
1863 			memset(lad->impUse, 0x00, sizeof(lad->impUse));
1864 			adsize = sizeof(long_ad);
1865 			break;
1866 		}
1867 		default:
1868 			return -1;
1869 	}
1870 
1871 	if (epos->bh)
1872 	{
1873 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1874 		{
1875 			struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data;
1876 			udf_update_tag(epos->bh->b_data,
1877 				le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1878 		}
1879 		mark_buffer_dirty_inode(epos->bh, inode);
1880 	}
1881 	else
1882 		mark_inode_dirty(inode);
1883 
1884 	if (inc)
1885 		epos->offset += adsize;
1886 	return (elen >> 30);
1887 }
1888 
1889 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
1890 	kernel_lb_addr *eloc, uint32_t *elen, int inc)
1891 {
1892 	int8_t etype;
1893 
1894 	while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
1895 		(EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1896 	{
1897 		epos->block = *eloc;
1898 		epos->offset = sizeof(struct allocExtDesc);
1899 		brelse(epos->bh);
1900 		if (!(epos->bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, epos->block, 0))))
1901 		{
1902 			udf_debug("reading block %d failed!\n",
1903 				udf_get_lb_pblock(inode->i_sb, epos->block, 0));
1904 			return -1;
1905 		}
1906 	}
1907 
1908 	return etype;
1909 }
1910 
1911 int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
1912 	kernel_lb_addr *eloc, uint32_t *elen, int inc)
1913 {
1914 	int alen;
1915 	int8_t etype;
1916 	uint8_t *ptr;
1917 
1918 	if (!epos->bh)
1919 	{
1920 		if (!epos->offset)
1921 			epos->offset = udf_file_entry_alloc_offset(inode);
1922 		ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1923 		alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1924 	}
1925 	else
1926 	{
1927 		if (!epos->offset)
1928 			epos->offset = sizeof(struct allocExtDesc);
1929 		ptr = epos->bh->b_data + epos->offset;
1930 		alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->lengthAllocDescs);
1931 	}
1932 
1933 	switch (UDF_I_ALLOCTYPE(inode))
1934 	{
1935 		case ICBTAG_FLAG_AD_SHORT:
1936 		{
1937 			short_ad *sad;
1938 
1939 			if (!(sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc)))
1940 				return -1;
1941 
1942 			etype = le32_to_cpu(sad->extLength) >> 30;
1943 			eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1944 			eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1945 			*elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1946 			break;
1947 		}
1948 		case ICBTAG_FLAG_AD_LONG:
1949 		{
1950 			long_ad *lad;
1951 
1952 			if (!(lad = udf_get_filelongad(ptr, alen, &epos->offset, inc)))
1953 				return -1;
1954 
1955 			etype = le32_to_cpu(lad->extLength) >> 30;
1956 			*eloc = lelb_to_cpu(lad->extLocation);
1957 			*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1958 			break;
1959 		}
1960 		default:
1961 		{
1962 			udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1963 			return -1;
1964 		}
1965 	}
1966 
1967 	return etype;
1968 }
1969 
1970 static int8_t
1971 udf_insert_aext(struct inode *inode, struct extent_position epos,
1972 		kernel_lb_addr neloc, uint32_t nelen)
1973 {
1974 	kernel_lb_addr oeloc;
1975 	uint32_t oelen;
1976 	int8_t etype;
1977 
1978 	if (epos.bh)
1979 		get_bh(epos.bh);
1980 
1981 	while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1)
1982 	{
1983 		udf_write_aext(inode, &epos, neloc, nelen, 1);
1984 
1985 		neloc = oeloc;
1986 		nelen = (etype << 30) | oelen;
1987 	}
1988 	udf_add_aext(inode, &epos, neloc, nelen, 1);
1989 	brelse(epos.bh);
1990 	return (nelen >> 30);
1991 }
1992 
1993 int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
1994 	kernel_lb_addr eloc, uint32_t elen)
1995 {
1996 	struct extent_position oepos;
1997 	int adsize;
1998 	int8_t etype;
1999 	struct allocExtDesc *aed;
2000 
2001 	if (epos.bh)
2002 	{
2003 		get_bh(epos.bh);
2004 		get_bh(epos.bh);
2005 	}
2006 
2007 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
2008 		adsize = sizeof(short_ad);
2009 	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
2010 		adsize = sizeof(long_ad);
2011 	else
2012 		adsize = 0;
2013 
2014 	oepos = epos;
2015 	if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
2016 		return -1;
2017 
2018 	while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1)
2019 	{
2020 		udf_write_aext(inode, &oepos, eloc, (etype << 30) | elen, 1);
2021 		if (oepos.bh != epos.bh)
2022 		{
2023 			oepos.block = epos.block;
2024 			brelse(oepos.bh);
2025 			get_bh(epos.bh);
2026 			oepos.bh = epos.bh;
2027 			oepos.offset = epos.offset - adsize;
2028 		}
2029 	}
2030 	memset(&eloc, 0x00, sizeof(kernel_lb_addr));
2031 	elen = 0;
2032 
2033 	if (epos.bh != oepos.bh)
2034 	{
2035 		udf_free_blocks(inode->i_sb, inode, epos.block, 0, 1);
2036 		udf_write_aext(inode, &oepos, eloc, elen, 1);
2037 		udf_write_aext(inode, &oepos, eloc, elen, 1);
2038 		if (!oepos.bh)
2039 		{
2040 			UDF_I_LENALLOC(inode) -= (adsize * 2);
2041 			mark_inode_dirty(inode);
2042 		}
2043 		else
2044 		{
2045 			aed = (struct allocExtDesc *)oepos.bh->b_data;
2046 			aed->lengthAllocDescs =
2047 				cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
2048 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2049 				udf_update_tag(oepos.bh->b_data, oepos.offset - (2*adsize));
2050 			else
2051 				udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
2052 			mark_buffer_dirty_inode(oepos.bh, inode);
2053 		}
2054 	}
2055 	else
2056 	{
2057 		udf_write_aext(inode, &oepos, eloc, elen, 1);
2058 		if (!oepos.bh)
2059 		{
2060 			UDF_I_LENALLOC(inode) -= adsize;
2061 			mark_inode_dirty(inode);
2062 		}
2063 		else
2064 		{
2065 			aed = (struct allocExtDesc *)oepos.bh->b_data;
2066 			aed->lengthAllocDescs =
2067 				cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
2068 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2069 				udf_update_tag(oepos.bh->b_data, epos.offset - adsize);
2070 			else
2071 				udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
2072 			mark_buffer_dirty_inode(oepos.bh, inode);
2073 		}
2074 	}
2075 
2076 	brelse(epos.bh);
2077 	brelse(oepos.bh);
2078 	return (elen >> 30);
2079 }
2080 
2081 int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos,
2082 	kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset)
2083 {
2084 	loff_t lbcount = 0, bcount = (loff_t)block << inode->i_sb->s_blocksize_bits;
2085 	int8_t etype;
2086 
2087 	if (block < 0)
2088 	{
2089 		printk(KERN_ERR "udf: inode_bmap: block < 0\n");
2090 		return -1;
2091 	}
2092 
2093 	pos->offset = 0;
2094 	pos->block = UDF_I_LOCATION(inode);
2095 	pos->bh = NULL;
2096 	*elen = 0;
2097 
2098 	do
2099 	{
2100 		if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1)
2101 		{
2102 			*offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
2103 			UDF_I_LENEXTENTS(inode) = lbcount;
2104 			return -1;
2105 		}
2106 		lbcount += *elen;
2107 	} while (lbcount <= bcount);
2108 
2109 	*offset = (bcount + *elen - lbcount) >> inode->i_sb->s_blocksize_bits;
2110 
2111 	return etype;
2112 }
2113 
2114 long udf_block_map(struct inode *inode, sector_t block)
2115 {
2116 	kernel_lb_addr eloc;
2117 	uint32_t elen;
2118 	sector_t offset;
2119 	struct extent_position epos = { NULL, 0, { 0, 0}};
2120 	int ret;
2121 
2122 	lock_kernel();
2123 
2124 	if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
2125 		ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
2126 	else
2127 		ret = 0;
2128 
2129 	unlock_kernel();
2130 	brelse(epos.bh);
2131 
2132 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2133 		return udf_fixed_to_variable(ret);
2134 	else
2135 		return ret;
2136 }
2137