xref: /openbmc/linux/fs/udf/inode.c (revision dd093fb0)
1 /*
2  * inode.c
3  *
4  * PURPOSE
5  *  Inode handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *  This file is distributed under the terms of the GNU General Public
9  *  License (GPL). Copies of the GPL can be obtained from:
10  *    ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *  Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1998 Dave Boynton
14  *  (C) 1998-2004 Ben Fennema
15  *  (C) 1999-2000 Stelias Computing Inc
16  *
17  * HISTORY
18  *
19  *  10/04/98 dgb  Added rudimentary directory functions
20  *  10/07/98      Fully working udf_block_map! It works!
21  *  11/25/98      bmap altered to better support extents
22  *  12/06/98 blf  partition support in udf_iget, udf_block_map
23  *                and udf_read_inode
24  *  12/12/98      rewrote udf_block_map to handle next extents and descs across
25  *                block boundaries (which is not actually allowed)
26  *  12/20/98      added support for strategy 4096
27  *  03/07/99      rewrote udf_block_map (again)
28  *                New funcs, inode_bmap, udf_next_aext
29  *  04/19/99      Support for writing device EA's for major/minor #
30  */
31 
32 #include "udfdecl.h"
33 #include <linux/mm.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/writeback.h>
37 #include <linux/slab.h>
38 #include <linux/crc-itu-t.h>
39 #include <linux/mpage.h>
40 #include <linux/uio.h>
41 #include <linux/bio.h>
42 
43 #include "udf_i.h"
44 #include "udf_sb.h"
45 
46 #define EXTENT_MERGE_SIZE 5
47 
48 #define FE_MAPPED_PERMS	(FE_PERM_U_READ | FE_PERM_U_WRITE | FE_PERM_U_EXEC | \
49 			 FE_PERM_G_READ | FE_PERM_G_WRITE | FE_PERM_G_EXEC | \
50 			 FE_PERM_O_READ | FE_PERM_O_WRITE | FE_PERM_O_EXEC)
51 
52 #define FE_DELETE_PERMS	(FE_PERM_U_DELETE | FE_PERM_G_DELETE | \
53 			 FE_PERM_O_DELETE)
54 
55 struct udf_map_rq;
56 
57 static umode_t udf_convert_permissions(struct fileEntry *);
58 static int udf_update_inode(struct inode *, int);
59 static int udf_sync_inode(struct inode *inode);
60 static int udf_alloc_i_data(struct inode *inode, size_t size);
61 static int inode_getblk(struct inode *inode, struct udf_map_rq *map);
62 static int udf_insert_aext(struct inode *, struct extent_position,
63 			   struct kernel_lb_addr, uint32_t);
64 static void udf_split_extents(struct inode *, int *, int, udf_pblk_t,
65 			      struct kernel_long_ad *, int *);
66 static void udf_prealloc_extents(struct inode *, int, int,
67 				 struct kernel_long_ad *, int *);
68 static void udf_merge_extents(struct inode *, struct kernel_long_ad *, int *);
69 static int udf_update_extents(struct inode *, struct kernel_long_ad *, int,
70 			      int, struct extent_position *);
71 static int udf_get_block_wb(struct inode *inode, sector_t block,
72 			    struct buffer_head *bh_result, int create);
73 
74 static void __udf_clear_extent_cache(struct inode *inode)
75 {
76 	struct udf_inode_info *iinfo = UDF_I(inode);
77 
78 	if (iinfo->cached_extent.lstart != -1) {
79 		brelse(iinfo->cached_extent.epos.bh);
80 		iinfo->cached_extent.lstart = -1;
81 	}
82 }
83 
84 /* Invalidate extent cache */
85 static void udf_clear_extent_cache(struct inode *inode)
86 {
87 	struct udf_inode_info *iinfo = UDF_I(inode);
88 
89 	spin_lock(&iinfo->i_extent_cache_lock);
90 	__udf_clear_extent_cache(inode);
91 	spin_unlock(&iinfo->i_extent_cache_lock);
92 }
93 
94 /* Return contents of extent cache */
95 static int udf_read_extent_cache(struct inode *inode, loff_t bcount,
96 				 loff_t *lbcount, struct extent_position *pos)
97 {
98 	struct udf_inode_info *iinfo = UDF_I(inode);
99 	int ret = 0;
100 
101 	spin_lock(&iinfo->i_extent_cache_lock);
102 	if ((iinfo->cached_extent.lstart <= bcount) &&
103 	    (iinfo->cached_extent.lstart != -1)) {
104 		/* Cache hit */
105 		*lbcount = iinfo->cached_extent.lstart;
106 		memcpy(pos, &iinfo->cached_extent.epos,
107 		       sizeof(struct extent_position));
108 		if (pos->bh)
109 			get_bh(pos->bh);
110 		ret = 1;
111 	}
112 	spin_unlock(&iinfo->i_extent_cache_lock);
113 	return ret;
114 }
115 
116 /* Add extent to extent cache */
117 static void udf_update_extent_cache(struct inode *inode, loff_t estart,
118 				    struct extent_position *pos)
119 {
120 	struct udf_inode_info *iinfo = UDF_I(inode);
121 
122 	spin_lock(&iinfo->i_extent_cache_lock);
123 	/* Invalidate previously cached extent */
124 	__udf_clear_extent_cache(inode);
125 	if (pos->bh)
126 		get_bh(pos->bh);
127 	memcpy(&iinfo->cached_extent.epos, pos, sizeof(*pos));
128 	iinfo->cached_extent.lstart = estart;
129 	switch (iinfo->i_alloc_type) {
130 	case ICBTAG_FLAG_AD_SHORT:
131 		iinfo->cached_extent.epos.offset -= sizeof(struct short_ad);
132 		break;
133 	case ICBTAG_FLAG_AD_LONG:
134 		iinfo->cached_extent.epos.offset -= sizeof(struct long_ad);
135 		break;
136 	}
137 	spin_unlock(&iinfo->i_extent_cache_lock);
138 }
139 
140 void udf_evict_inode(struct inode *inode)
141 {
142 	struct udf_inode_info *iinfo = UDF_I(inode);
143 	int want_delete = 0;
144 
145 	if (!is_bad_inode(inode)) {
146 		if (!inode->i_nlink) {
147 			want_delete = 1;
148 			udf_setsize(inode, 0);
149 			udf_update_inode(inode, IS_SYNC(inode));
150 		}
151 		if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
152 		    inode->i_size != iinfo->i_lenExtents) {
153 			udf_warn(inode->i_sb,
154 				 "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
155 				 inode->i_ino, inode->i_mode,
156 				 (unsigned long long)inode->i_size,
157 				 (unsigned long long)iinfo->i_lenExtents);
158 		}
159 	}
160 	truncate_inode_pages_final(&inode->i_data);
161 	invalidate_inode_buffers(inode);
162 	clear_inode(inode);
163 	kfree(iinfo->i_data);
164 	iinfo->i_data = NULL;
165 	udf_clear_extent_cache(inode);
166 	if (want_delete) {
167 		udf_free_inode(inode);
168 	}
169 }
170 
171 static void udf_write_failed(struct address_space *mapping, loff_t to)
172 {
173 	struct inode *inode = mapping->host;
174 	struct udf_inode_info *iinfo = UDF_I(inode);
175 	loff_t isize = inode->i_size;
176 
177 	if (to > isize) {
178 		truncate_pagecache(inode, isize);
179 		if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
180 			down_write(&iinfo->i_data_sem);
181 			udf_clear_extent_cache(inode);
182 			udf_truncate_extents(inode);
183 			up_write(&iinfo->i_data_sem);
184 		}
185 	}
186 }
187 
188 static int udf_adinicb_writepage(struct page *page,
189 				 struct writeback_control *wbc, void *data)
190 {
191 	struct inode *inode = page->mapping->host;
192 	struct udf_inode_info *iinfo = UDF_I(inode);
193 
194 	BUG_ON(!PageLocked(page));
195 	memcpy_to_page(page, 0, iinfo->i_data + iinfo->i_lenEAttr,
196 		       i_size_read(inode));
197 	unlock_page(page);
198 	mark_inode_dirty(inode);
199 
200 	return 0;
201 }
202 
203 static int udf_writepages(struct address_space *mapping,
204 			  struct writeback_control *wbc)
205 {
206 	struct inode *inode = mapping->host;
207 	struct udf_inode_info *iinfo = UDF_I(inode);
208 
209 	if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB)
210 		return mpage_writepages(mapping, wbc, udf_get_block_wb);
211 	return write_cache_pages(mapping, wbc, udf_adinicb_writepage, NULL);
212 }
213 
214 static void udf_adinicb_readpage(struct page *page)
215 {
216 	struct inode *inode = page->mapping->host;
217 	char *kaddr;
218 	struct udf_inode_info *iinfo = UDF_I(inode);
219 	loff_t isize = i_size_read(inode);
220 
221 	kaddr = kmap_local_page(page);
222 	memcpy(kaddr, iinfo->i_data + iinfo->i_lenEAttr, isize);
223 	memset(kaddr + isize, 0, PAGE_SIZE - isize);
224 	flush_dcache_page(page);
225 	SetPageUptodate(page);
226 	kunmap_local(kaddr);
227 }
228 
229 static int udf_read_folio(struct file *file, struct folio *folio)
230 {
231 	struct udf_inode_info *iinfo = UDF_I(file_inode(file));
232 
233 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
234 		udf_adinicb_readpage(&folio->page);
235 		folio_unlock(folio);
236 		return 0;
237 	}
238 	return mpage_read_folio(folio, udf_get_block);
239 }
240 
241 static void udf_readahead(struct readahead_control *rac)
242 {
243 	mpage_readahead(rac, udf_get_block);
244 }
245 
246 static int udf_write_begin(struct file *file, struct address_space *mapping,
247 			   loff_t pos, unsigned len,
248 			   struct page **pagep, void **fsdata)
249 {
250 	struct udf_inode_info *iinfo = UDF_I(file_inode(file));
251 	struct page *page;
252 	int ret;
253 
254 	if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
255 		ret = block_write_begin(mapping, pos, len, pagep,
256 					udf_get_block);
257 		if (unlikely(ret))
258 			udf_write_failed(mapping, pos + len);
259 		return ret;
260 	}
261 	if (WARN_ON_ONCE(pos >= PAGE_SIZE))
262 		return -EIO;
263 	page = grab_cache_page_write_begin(mapping, 0);
264 	if (!page)
265 		return -ENOMEM;
266 	*pagep = page;
267 	if (!PageUptodate(page))
268 		udf_adinicb_readpage(page);
269 	return 0;
270 }
271 
272 static int udf_write_end(struct file *file, struct address_space *mapping,
273 			 loff_t pos, unsigned len, unsigned copied,
274 			 struct page *page, void *fsdata)
275 {
276 	struct inode *inode = file_inode(file);
277 	loff_t last_pos;
278 
279 	if (UDF_I(inode)->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB)
280 		return generic_write_end(file, mapping, pos, len, copied, page,
281 					 fsdata);
282 	last_pos = pos + copied;
283 	if (last_pos > inode->i_size)
284 		i_size_write(inode, last_pos);
285 	set_page_dirty(page);
286 	unlock_page(page);
287 	put_page(page);
288 
289 	return copied;
290 }
291 
292 static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
293 {
294 	struct file *file = iocb->ki_filp;
295 	struct address_space *mapping = file->f_mapping;
296 	struct inode *inode = mapping->host;
297 	size_t count = iov_iter_count(iter);
298 	ssize_t ret;
299 
300 	/* Fallback to buffered IO for in-ICB files */
301 	if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
302 		return 0;
303 	ret = blockdev_direct_IO(iocb, inode, iter, udf_get_block);
304 	if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE))
305 		udf_write_failed(mapping, iocb->ki_pos + count);
306 	return ret;
307 }
308 
309 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
310 {
311 	struct udf_inode_info *iinfo = UDF_I(mapping->host);
312 
313 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
314 		return -EINVAL;
315 	return generic_block_bmap(mapping, block, udf_get_block);
316 }
317 
318 const struct address_space_operations udf_aops = {
319 	.dirty_folio	= block_dirty_folio,
320 	.invalidate_folio = block_invalidate_folio,
321 	.read_folio	= udf_read_folio,
322 	.readahead	= udf_readahead,
323 	.writepages	= udf_writepages,
324 	.write_begin	= udf_write_begin,
325 	.write_end	= udf_write_end,
326 	.direct_IO	= udf_direct_IO,
327 	.bmap		= udf_bmap,
328 	.migrate_folio	= buffer_migrate_folio,
329 };
330 
331 /*
332  * Expand file stored in ICB to a normal one-block-file
333  *
334  * This function requires i_mutex held
335  */
336 int udf_expand_file_adinicb(struct inode *inode)
337 {
338 	struct page *page;
339 	struct udf_inode_info *iinfo = UDF_I(inode);
340 	int err;
341 
342 	WARN_ON_ONCE(!inode_is_locked(inode));
343 	if (!iinfo->i_lenAlloc) {
344 		down_write(&iinfo->i_data_sem);
345 		if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
346 			iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
347 		else
348 			iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
349 		/* from now on we have normal address_space methods */
350 		inode->i_data.a_ops = &udf_aops;
351 		up_write(&iinfo->i_data_sem);
352 		mark_inode_dirty(inode);
353 		return 0;
354 	}
355 
356 	page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
357 	if (!page)
358 		return -ENOMEM;
359 
360 	if (!PageUptodate(page))
361 		udf_adinicb_readpage(page);
362 	down_write(&iinfo->i_data_sem);
363 	memset(iinfo->i_data + iinfo->i_lenEAttr, 0x00,
364 	       iinfo->i_lenAlloc);
365 	iinfo->i_lenAlloc = 0;
366 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
367 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
368 	else
369 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
370 	set_page_dirty(page);
371 	unlock_page(page);
372 	up_write(&iinfo->i_data_sem);
373 	err = filemap_fdatawrite(inode->i_mapping);
374 	if (err) {
375 		/* Restore everything back so that we don't lose data... */
376 		lock_page(page);
377 		down_write(&iinfo->i_data_sem);
378 		memcpy_to_page(page, 0, iinfo->i_data + iinfo->i_lenEAttr,
379 			       inode->i_size);
380 		unlock_page(page);
381 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
382 		iinfo->i_lenAlloc = inode->i_size;
383 		up_write(&iinfo->i_data_sem);
384 	}
385 	put_page(page);
386 	mark_inode_dirty(inode);
387 
388 	return err;
389 }
390 
391 #define UDF_MAP_CREATE		0x01	/* Mapping can allocate new blocks */
392 #define UDF_MAP_NOPREALLOC	0x02	/* Do not preallocate blocks */
393 
394 #define UDF_BLK_MAPPED	0x01	/* Block was successfully mapped */
395 #define UDF_BLK_NEW	0x02	/* Block was freshly allocated */
396 
397 struct udf_map_rq {
398 	sector_t lblk;
399 	udf_pblk_t pblk;
400 	int iflags;		/* UDF_MAP_ flags determining behavior */
401 	int oflags;		/* UDF_BLK_ flags reporting results */
402 };
403 
404 static int udf_map_block(struct inode *inode, struct udf_map_rq *map)
405 {
406 	int err;
407 	struct udf_inode_info *iinfo = UDF_I(inode);
408 
409 	map->oflags = 0;
410 	if (!(map->iflags & UDF_MAP_CREATE)) {
411 		struct kernel_lb_addr eloc;
412 		uint32_t elen;
413 		sector_t offset;
414 		struct extent_position epos = {};
415 
416 		down_read(&iinfo->i_data_sem);
417 		if (inode_bmap(inode, map->lblk, &epos, &eloc, &elen, &offset)
418 				== (EXT_RECORDED_ALLOCATED >> 30)) {
419 			map->pblk = udf_get_lb_pblock(inode->i_sb, &eloc,
420 							offset);
421 			map->oflags |= UDF_BLK_MAPPED;
422 		}
423 		up_read(&iinfo->i_data_sem);
424 		brelse(epos.bh);
425 
426 		return 0;
427 	}
428 
429 	down_write(&iinfo->i_data_sem);
430 	/*
431 	 * Block beyond EOF and prealloc extents? Just discard preallocation
432 	 * as it is not useful and complicates things.
433 	 */
434 	if (((loff_t)map->lblk) << inode->i_blkbits >= iinfo->i_lenExtents)
435 		udf_discard_prealloc(inode);
436 	udf_clear_extent_cache(inode);
437 	err = inode_getblk(inode, map);
438 	up_write(&iinfo->i_data_sem);
439 	return err;
440 }
441 
442 static int __udf_get_block(struct inode *inode, sector_t block,
443 			   struct buffer_head *bh_result, int flags)
444 {
445 	int err;
446 	struct udf_map_rq map = {
447 		.lblk = block,
448 		.iflags = flags,
449 	};
450 
451 	err = udf_map_block(inode, &map);
452 	if (err < 0)
453 		return err;
454 	if (map.oflags & UDF_BLK_MAPPED) {
455 		map_bh(bh_result, inode->i_sb, map.pblk);
456 		if (map.oflags & UDF_BLK_NEW)
457 			set_buffer_new(bh_result);
458 	}
459 	return 0;
460 }
461 
462 int udf_get_block(struct inode *inode, sector_t block,
463 		  struct buffer_head *bh_result, int create)
464 {
465 	int flags = create ? UDF_MAP_CREATE : 0;
466 
467 	/*
468 	 * We preallocate blocks only for regular files. It also makes sense
469 	 * for directories but there's a problem when to drop the
470 	 * preallocation. We might use some delayed work for that but I feel
471 	 * it's overengineering for a filesystem like UDF.
472 	 */
473 	if (!S_ISREG(inode->i_mode))
474 		flags |= UDF_MAP_NOPREALLOC;
475 	return __udf_get_block(inode, block, bh_result, flags);
476 }
477 
478 /*
479  * We shouldn't be allocating blocks on page writeback since we allocate them
480  * on page fault. We can spot dirty buffers without allocated blocks though
481  * when truncate expands file. These however don't have valid data so we can
482  * safely ignore them. So never allocate blocks from page writeback.
483  */
484 static int udf_get_block_wb(struct inode *inode, sector_t block,
485 			    struct buffer_head *bh_result, int create)
486 {
487 	return __udf_get_block(inode, block, bh_result, 0);
488 }
489 
490 /* Extend the file with new blocks totaling 'new_block_bytes',
491  * return the number of extents added
492  */
493 static int udf_do_extend_file(struct inode *inode,
494 			      struct extent_position *last_pos,
495 			      struct kernel_long_ad *last_ext,
496 			      loff_t new_block_bytes)
497 {
498 	uint32_t add;
499 	int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
500 	struct super_block *sb = inode->i_sb;
501 	struct udf_inode_info *iinfo;
502 	int err;
503 
504 	/* The previous extent is fake and we should not extend by anything
505 	 * - there's nothing to do... */
506 	if (!new_block_bytes && fake)
507 		return 0;
508 
509 	iinfo = UDF_I(inode);
510 	/* Round the last extent up to a multiple of block size */
511 	if (last_ext->extLength & (sb->s_blocksize - 1)) {
512 		last_ext->extLength =
513 			(last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
514 			(((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
515 			  sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
516 		iinfo->i_lenExtents =
517 			(iinfo->i_lenExtents + sb->s_blocksize - 1) &
518 			~(sb->s_blocksize - 1);
519 	}
520 
521 	add = 0;
522 	/* Can we merge with the previous extent? */
523 	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
524 					EXT_NOT_RECORDED_NOT_ALLOCATED) {
525 		add = (1 << 30) - sb->s_blocksize -
526 			(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
527 		if (add > new_block_bytes)
528 			add = new_block_bytes;
529 		new_block_bytes -= add;
530 		last_ext->extLength += add;
531 	}
532 
533 	if (fake) {
534 		err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
535 				   last_ext->extLength, 1);
536 		if (err < 0)
537 			goto out_err;
538 		count++;
539 	} else {
540 		struct kernel_lb_addr tmploc;
541 		uint32_t tmplen;
542 
543 		udf_write_aext(inode, last_pos, &last_ext->extLocation,
544 				last_ext->extLength, 1);
545 
546 		/*
547 		 * We've rewritten the last extent. If we are going to add
548 		 * more extents, we may need to enter possible following
549 		 * empty indirect extent.
550 		 */
551 		if (new_block_bytes)
552 			udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
553 	}
554 	iinfo->i_lenExtents += add;
555 
556 	/* Managed to do everything necessary? */
557 	if (!new_block_bytes)
558 		goto out;
559 
560 	/* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
561 	last_ext->extLocation.logicalBlockNum = 0;
562 	last_ext->extLocation.partitionReferenceNum = 0;
563 	add = (1 << 30) - sb->s_blocksize;
564 	last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | add;
565 
566 	/* Create enough extents to cover the whole hole */
567 	while (new_block_bytes > add) {
568 		new_block_bytes -= add;
569 		err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
570 				   last_ext->extLength, 1);
571 		if (err)
572 			goto out_err;
573 		iinfo->i_lenExtents += add;
574 		count++;
575 	}
576 	if (new_block_bytes) {
577 		last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
578 			new_block_bytes;
579 		err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
580 				   last_ext->extLength, 1);
581 		if (err)
582 			goto out_err;
583 		iinfo->i_lenExtents += new_block_bytes;
584 		count++;
585 	}
586 
587 out:
588 	/* last_pos should point to the last written extent... */
589 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
590 		last_pos->offset -= sizeof(struct short_ad);
591 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
592 		last_pos->offset -= sizeof(struct long_ad);
593 	else
594 		return -EIO;
595 
596 	return count;
597 out_err:
598 	/* Remove extents we've created so far */
599 	udf_clear_extent_cache(inode);
600 	udf_truncate_extents(inode);
601 	return err;
602 }
603 
604 /* Extend the final block of the file to final_block_len bytes */
605 static void udf_do_extend_final_block(struct inode *inode,
606 				      struct extent_position *last_pos,
607 				      struct kernel_long_ad *last_ext,
608 				      uint32_t new_elen)
609 {
610 	uint32_t added_bytes;
611 
612 	/*
613 	 * Extent already large enough? It may be already rounded up to block
614 	 * size...
615 	 */
616 	if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK))
617 		return;
618 	added_bytes = new_elen - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
619 	last_ext->extLength += added_bytes;
620 	UDF_I(inode)->i_lenExtents += added_bytes;
621 
622 	udf_write_aext(inode, last_pos, &last_ext->extLocation,
623 			last_ext->extLength, 1);
624 }
625 
626 static int udf_extend_file(struct inode *inode, loff_t newsize)
627 {
628 
629 	struct extent_position epos;
630 	struct kernel_lb_addr eloc;
631 	uint32_t elen;
632 	int8_t etype;
633 	struct super_block *sb = inode->i_sb;
634 	sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
635 	loff_t new_elen;
636 	int adsize;
637 	struct udf_inode_info *iinfo = UDF_I(inode);
638 	struct kernel_long_ad extent;
639 	int err = 0;
640 	bool within_last_ext;
641 
642 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
643 		adsize = sizeof(struct short_ad);
644 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
645 		adsize = sizeof(struct long_ad);
646 	else
647 		BUG();
648 
649 	down_write(&iinfo->i_data_sem);
650 	/*
651 	 * When creating hole in file, just don't bother with preserving
652 	 * preallocation. It likely won't be very useful anyway.
653 	 */
654 	udf_discard_prealloc(inode);
655 
656 	etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
657 	within_last_ext = (etype != -1);
658 	/* We don't expect extents past EOF... */
659 	WARN_ON_ONCE(within_last_ext &&
660 		     elen > ((loff_t)offset + 1) << inode->i_blkbits);
661 
662 	if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
663 	    (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
664 		/* File has no extents at all or has empty last
665 		 * indirect extent! Create a fake extent... */
666 		extent.extLocation.logicalBlockNum = 0;
667 		extent.extLocation.partitionReferenceNum = 0;
668 		extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
669 	} else {
670 		epos.offset -= adsize;
671 		etype = udf_next_aext(inode, &epos, &extent.extLocation,
672 				      &extent.extLength, 0);
673 		extent.extLength |= etype << 30;
674 	}
675 
676 	new_elen = ((loff_t)offset << inode->i_blkbits) |
677 					(newsize & (sb->s_blocksize - 1));
678 
679 	/* File has extent covering the new size (could happen when extending
680 	 * inside a block)?
681 	 */
682 	if (within_last_ext) {
683 		/* Extending file within the last file block */
684 		udf_do_extend_final_block(inode, &epos, &extent, new_elen);
685 	} else {
686 		err = udf_do_extend_file(inode, &epos, &extent, new_elen);
687 	}
688 
689 	if (err < 0)
690 		goto out;
691 	err = 0;
692 out:
693 	brelse(epos.bh);
694 	up_write(&iinfo->i_data_sem);
695 	return err;
696 }
697 
698 static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
699 {
700 	struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
701 	struct extent_position prev_epos, cur_epos, next_epos;
702 	int count = 0, startnum = 0, endnum = 0;
703 	uint32_t elen = 0, tmpelen;
704 	struct kernel_lb_addr eloc, tmpeloc;
705 	int c = 1;
706 	loff_t lbcount = 0, b_off = 0;
707 	udf_pblk_t newblocknum;
708 	sector_t offset = 0;
709 	int8_t etype;
710 	struct udf_inode_info *iinfo = UDF_I(inode);
711 	udf_pblk_t goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
712 	int lastblock = 0;
713 	bool isBeyondEOF;
714 	int ret = 0;
715 
716 	prev_epos.offset = udf_file_entry_alloc_offset(inode);
717 	prev_epos.block = iinfo->i_location;
718 	prev_epos.bh = NULL;
719 	cur_epos = next_epos = prev_epos;
720 	b_off = (loff_t)map->lblk << inode->i_sb->s_blocksize_bits;
721 
722 	/* find the extent which contains the block we are looking for.
723 	   alternate between laarr[0] and laarr[1] for locations of the
724 	   current extent, and the previous extent */
725 	do {
726 		if (prev_epos.bh != cur_epos.bh) {
727 			brelse(prev_epos.bh);
728 			get_bh(cur_epos.bh);
729 			prev_epos.bh = cur_epos.bh;
730 		}
731 		if (cur_epos.bh != next_epos.bh) {
732 			brelse(cur_epos.bh);
733 			get_bh(next_epos.bh);
734 			cur_epos.bh = next_epos.bh;
735 		}
736 
737 		lbcount += elen;
738 
739 		prev_epos.block = cur_epos.block;
740 		cur_epos.block = next_epos.block;
741 
742 		prev_epos.offset = cur_epos.offset;
743 		cur_epos.offset = next_epos.offset;
744 
745 		etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1);
746 		if (etype == -1)
747 			break;
748 
749 		c = !c;
750 
751 		laarr[c].extLength = (etype << 30) | elen;
752 		laarr[c].extLocation = eloc;
753 
754 		if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
755 			pgoal = eloc.logicalBlockNum +
756 				((elen + inode->i_sb->s_blocksize - 1) >>
757 				 inode->i_sb->s_blocksize_bits);
758 
759 		count++;
760 	} while (lbcount + elen <= b_off);
761 
762 	b_off -= lbcount;
763 	offset = b_off >> inode->i_sb->s_blocksize_bits;
764 	/*
765 	 * Move prev_epos and cur_epos into indirect extent if we are at
766 	 * the pointer to it
767 	 */
768 	udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
769 	udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
770 
771 	/* if the extent is allocated and recorded, return the block
772 	   if the extent is not a multiple of the blocksize, round up */
773 
774 	if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
775 		if (elen & (inode->i_sb->s_blocksize - 1)) {
776 			elen = EXT_RECORDED_ALLOCATED |
777 				((elen + inode->i_sb->s_blocksize - 1) &
778 				 ~(inode->i_sb->s_blocksize - 1));
779 			iinfo->i_lenExtents =
780 				ALIGN(iinfo->i_lenExtents,
781 				      inode->i_sb->s_blocksize);
782 			udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
783 		}
784 		map->oflags = UDF_BLK_MAPPED;
785 		map->pblk = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
786 		goto out_free;
787 	}
788 
789 	/* Are we beyond EOF and preallocated extent? */
790 	if (etype == -1) {
791 		loff_t hole_len;
792 
793 		isBeyondEOF = true;
794 		if (count) {
795 			if (c)
796 				laarr[0] = laarr[1];
797 			startnum = 1;
798 		} else {
799 			/* Create a fake extent when there's not one */
800 			memset(&laarr[0].extLocation, 0x00,
801 				sizeof(struct kernel_lb_addr));
802 			laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
803 			/* Will udf_do_extend_file() create real extent from
804 			   a fake one? */
805 			startnum = (offset > 0);
806 		}
807 		/* Create extents for the hole between EOF and offset */
808 		hole_len = (loff_t)offset << inode->i_blkbits;
809 		ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len);
810 		if (ret < 0)
811 			goto out_free;
812 		c = 0;
813 		offset = 0;
814 		count += ret;
815 		/*
816 		 * Is there any real extent? - otherwise we overwrite the fake
817 		 * one...
818 		 */
819 		if (count)
820 			c = !c;
821 		laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
822 			inode->i_sb->s_blocksize;
823 		memset(&laarr[c].extLocation, 0x00,
824 			sizeof(struct kernel_lb_addr));
825 		count++;
826 		endnum = c + 1;
827 		lastblock = 1;
828 	} else {
829 		isBeyondEOF = false;
830 		endnum = startnum = ((count > 2) ? 2 : count);
831 
832 		/* if the current extent is in position 0,
833 		   swap it with the previous */
834 		if (!c && count != 1) {
835 			laarr[2] = laarr[0];
836 			laarr[0] = laarr[1];
837 			laarr[1] = laarr[2];
838 			c = 1;
839 		}
840 
841 		/* if the current block is located in an extent,
842 		   read the next extent */
843 		etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0);
844 		if (etype != -1) {
845 			laarr[c + 1].extLength = (etype << 30) | elen;
846 			laarr[c + 1].extLocation = eloc;
847 			count++;
848 			startnum++;
849 			endnum++;
850 		} else
851 			lastblock = 1;
852 	}
853 
854 	/* if the current extent is not recorded but allocated, get the
855 	 * block in the extent corresponding to the requested block */
856 	if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
857 		newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
858 	else { /* otherwise, allocate a new block */
859 		if (iinfo->i_next_alloc_block == map->lblk)
860 			goal = iinfo->i_next_alloc_goal;
861 
862 		if (!goal) {
863 			if (!(goal = pgoal)) /* XXX: what was intended here? */
864 				goal = iinfo->i_location.logicalBlockNum + 1;
865 		}
866 
867 		newblocknum = udf_new_block(inode->i_sb, inode,
868 				iinfo->i_location.partitionReferenceNum,
869 				goal, &ret);
870 		if (!newblocknum)
871 			goto out_free;
872 		if (isBeyondEOF)
873 			iinfo->i_lenExtents += inode->i_sb->s_blocksize;
874 	}
875 
876 	/* if the extent the requsted block is located in contains multiple
877 	 * blocks, split the extent into at most three extents. blocks prior
878 	 * to requested block, requested block, and blocks after requested
879 	 * block */
880 	udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
881 
882 	if (!(map->iflags & UDF_MAP_NOPREALLOC))
883 		udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
884 
885 	/* merge any continuous blocks in laarr */
886 	udf_merge_extents(inode, laarr, &endnum);
887 
888 	/* write back the new extents, inserting new extents if the new number
889 	 * of extents is greater than the old number, and deleting extents if
890 	 * the new number of extents is less than the old number */
891 	ret = udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
892 	if (ret < 0)
893 		goto out_free;
894 
895 	map->pblk = udf_get_pblock(inode->i_sb, newblocknum,
896 				iinfo->i_location.partitionReferenceNum, 0);
897 	if (!map->pblk) {
898 		ret = -EFSCORRUPTED;
899 		goto out_free;
900 	}
901 	map->oflags = UDF_BLK_NEW | UDF_BLK_MAPPED;
902 	iinfo->i_next_alloc_block = map->lblk + 1;
903 	iinfo->i_next_alloc_goal = newblocknum + 1;
904 	inode->i_ctime = current_time(inode);
905 
906 	if (IS_SYNC(inode))
907 		udf_sync_inode(inode);
908 	else
909 		mark_inode_dirty(inode);
910 	ret = 0;
911 out_free:
912 	brelse(prev_epos.bh);
913 	brelse(cur_epos.bh);
914 	brelse(next_epos.bh);
915 	return ret;
916 }
917 
918 static void udf_split_extents(struct inode *inode, int *c, int offset,
919 			       udf_pblk_t newblocknum,
920 			       struct kernel_long_ad *laarr, int *endnum)
921 {
922 	unsigned long blocksize = inode->i_sb->s_blocksize;
923 	unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
924 
925 	if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
926 	    (laarr[*c].extLength >> 30) ==
927 				(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
928 		int curr = *c;
929 		int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
930 			    blocksize - 1) >> blocksize_bits;
931 		int8_t etype = (laarr[curr].extLength >> 30);
932 
933 		if (blen == 1)
934 			;
935 		else if (!offset || blen == offset + 1) {
936 			laarr[curr + 2] = laarr[curr + 1];
937 			laarr[curr + 1] = laarr[curr];
938 		} else {
939 			laarr[curr + 3] = laarr[curr + 1];
940 			laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
941 		}
942 
943 		if (offset) {
944 			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
945 				udf_free_blocks(inode->i_sb, inode,
946 						&laarr[curr].extLocation,
947 						0, offset);
948 				laarr[curr].extLength =
949 					EXT_NOT_RECORDED_NOT_ALLOCATED |
950 					(offset << blocksize_bits);
951 				laarr[curr].extLocation.logicalBlockNum = 0;
952 				laarr[curr].extLocation.
953 						partitionReferenceNum = 0;
954 			} else
955 				laarr[curr].extLength = (etype << 30) |
956 					(offset << blocksize_bits);
957 			curr++;
958 			(*c)++;
959 			(*endnum)++;
960 		}
961 
962 		laarr[curr].extLocation.logicalBlockNum = newblocknum;
963 		if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
964 			laarr[curr].extLocation.partitionReferenceNum =
965 				UDF_I(inode)->i_location.partitionReferenceNum;
966 		laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
967 			blocksize;
968 		curr++;
969 
970 		if (blen != offset + 1) {
971 			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
972 				laarr[curr].extLocation.logicalBlockNum +=
973 								offset + 1;
974 			laarr[curr].extLength = (etype << 30) |
975 				((blen - (offset + 1)) << blocksize_bits);
976 			curr++;
977 			(*endnum)++;
978 		}
979 	}
980 }
981 
982 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
983 				 struct kernel_long_ad *laarr,
984 				 int *endnum)
985 {
986 	int start, length = 0, currlength = 0, i;
987 
988 	if (*endnum >= (c + 1)) {
989 		if (!lastblock)
990 			return;
991 		else
992 			start = c;
993 	} else {
994 		if ((laarr[c + 1].extLength >> 30) ==
995 					(EXT_NOT_RECORDED_ALLOCATED >> 30)) {
996 			start = c + 1;
997 			length = currlength =
998 				(((laarr[c + 1].extLength &
999 					UDF_EXTENT_LENGTH_MASK) +
1000 				inode->i_sb->s_blocksize - 1) >>
1001 				inode->i_sb->s_blocksize_bits);
1002 		} else
1003 			start = c;
1004 	}
1005 
1006 	for (i = start + 1; i <= *endnum; i++) {
1007 		if (i == *endnum) {
1008 			if (lastblock)
1009 				length += UDF_DEFAULT_PREALLOC_BLOCKS;
1010 		} else if ((laarr[i].extLength >> 30) ==
1011 				(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
1012 			length += (((laarr[i].extLength &
1013 						UDF_EXTENT_LENGTH_MASK) +
1014 				    inode->i_sb->s_blocksize - 1) >>
1015 				    inode->i_sb->s_blocksize_bits);
1016 		} else
1017 			break;
1018 	}
1019 
1020 	if (length) {
1021 		int next = laarr[start].extLocation.logicalBlockNum +
1022 			(((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
1023 			  inode->i_sb->s_blocksize - 1) >>
1024 			  inode->i_sb->s_blocksize_bits);
1025 		int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
1026 				laarr[start].extLocation.partitionReferenceNum,
1027 				next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ?
1028 				length : UDF_DEFAULT_PREALLOC_BLOCKS) -
1029 				currlength);
1030 		if (numalloc) 	{
1031 			if (start == (c + 1))
1032 				laarr[start].extLength +=
1033 					(numalloc <<
1034 					 inode->i_sb->s_blocksize_bits);
1035 			else {
1036 				memmove(&laarr[c + 2], &laarr[c + 1],
1037 					sizeof(struct long_ad) * (*endnum - (c + 1)));
1038 				(*endnum)++;
1039 				laarr[c + 1].extLocation.logicalBlockNum = next;
1040 				laarr[c + 1].extLocation.partitionReferenceNum =
1041 					laarr[c].extLocation.
1042 							partitionReferenceNum;
1043 				laarr[c + 1].extLength =
1044 					EXT_NOT_RECORDED_ALLOCATED |
1045 					(numalloc <<
1046 					 inode->i_sb->s_blocksize_bits);
1047 				start = c + 1;
1048 			}
1049 
1050 			for (i = start + 1; numalloc && i < *endnum; i++) {
1051 				int elen = ((laarr[i].extLength &
1052 						UDF_EXTENT_LENGTH_MASK) +
1053 					    inode->i_sb->s_blocksize - 1) >>
1054 					    inode->i_sb->s_blocksize_bits;
1055 
1056 				if (elen > numalloc) {
1057 					laarr[i].extLength -=
1058 						(numalloc <<
1059 						 inode->i_sb->s_blocksize_bits);
1060 					numalloc = 0;
1061 				} else {
1062 					numalloc -= elen;
1063 					if (*endnum > (i + 1))
1064 						memmove(&laarr[i],
1065 							&laarr[i + 1],
1066 							sizeof(struct long_ad) *
1067 							(*endnum - (i + 1)));
1068 					i--;
1069 					(*endnum)--;
1070 				}
1071 			}
1072 			UDF_I(inode)->i_lenExtents +=
1073 				numalloc << inode->i_sb->s_blocksize_bits;
1074 		}
1075 	}
1076 }
1077 
1078 static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr,
1079 			      int *endnum)
1080 {
1081 	int i;
1082 	unsigned long blocksize = inode->i_sb->s_blocksize;
1083 	unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1084 
1085 	for (i = 0; i < (*endnum - 1); i++) {
1086 		struct kernel_long_ad *li /*l[i]*/ = &laarr[i];
1087 		struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
1088 
1089 		if (((li->extLength >> 30) == (lip1->extLength >> 30)) &&
1090 			(((li->extLength >> 30) ==
1091 				(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
1092 			((lip1->extLocation.logicalBlockNum -
1093 			  li->extLocation.logicalBlockNum) ==
1094 			(((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1095 			blocksize - 1) >> blocksize_bits)))) {
1096 
1097 			if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1098 			     (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
1099 			     blocksize - 1) <= UDF_EXTENT_LENGTH_MASK) {
1100 				li->extLength = lip1->extLength +
1101 					(((li->extLength &
1102 						UDF_EXTENT_LENGTH_MASK) +
1103 					 blocksize - 1) & ~(blocksize - 1));
1104 				if (*endnum > (i + 2))
1105 					memmove(&laarr[i + 1], &laarr[i + 2],
1106 						sizeof(struct long_ad) *
1107 						(*endnum - (i + 2)));
1108 				i--;
1109 				(*endnum)--;
1110 			}
1111 		} else if (((li->extLength >> 30) ==
1112 				(EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
1113 			   ((lip1->extLength >> 30) ==
1114 				(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
1115 			udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0,
1116 					((li->extLength &
1117 					  UDF_EXTENT_LENGTH_MASK) +
1118 					 blocksize - 1) >> blocksize_bits);
1119 			li->extLocation.logicalBlockNum = 0;
1120 			li->extLocation.partitionReferenceNum = 0;
1121 
1122 			if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1123 			     (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
1124 			     blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
1125 				lip1->extLength = (lip1->extLength -
1126 						   (li->extLength &
1127 						   UDF_EXTENT_LENGTH_MASK) +
1128 						   UDF_EXTENT_LENGTH_MASK) &
1129 						   ~(blocksize - 1);
1130 				li->extLength = (li->extLength &
1131 						 UDF_EXTENT_FLAG_MASK) +
1132 						(UDF_EXTENT_LENGTH_MASK + 1) -
1133 						blocksize;
1134 			} else {
1135 				li->extLength = lip1->extLength +
1136 					(((li->extLength &
1137 						UDF_EXTENT_LENGTH_MASK) +
1138 					  blocksize - 1) & ~(blocksize - 1));
1139 				if (*endnum > (i + 2))
1140 					memmove(&laarr[i + 1], &laarr[i + 2],
1141 						sizeof(struct long_ad) *
1142 						(*endnum - (i + 2)));
1143 				i--;
1144 				(*endnum)--;
1145 			}
1146 		} else if ((li->extLength >> 30) ==
1147 					(EXT_NOT_RECORDED_ALLOCATED >> 30)) {
1148 			udf_free_blocks(inode->i_sb, inode,
1149 					&li->extLocation, 0,
1150 					((li->extLength &
1151 						UDF_EXTENT_LENGTH_MASK) +
1152 					 blocksize - 1) >> blocksize_bits);
1153 			li->extLocation.logicalBlockNum = 0;
1154 			li->extLocation.partitionReferenceNum = 0;
1155 			li->extLength = (li->extLength &
1156 						UDF_EXTENT_LENGTH_MASK) |
1157 						EXT_NOT_RECORDED_NOT_ALLOCATED;
1158 		}
1159 	}
1160 }
1161 
1162 static int udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr,
1163 			      int startnum, int endnum,
1164 			      struct extent_position *epos)
1165 {
1166 	int start = 0, i;
1167 	struct kernel_lb_addr tmploc;
1168 	uint32_t tmplen;
1169 	int err;
1170 
1171 	if (startnum > endnum) {
1172 		for (i = 0; i < (startnum - endnum); i++)
1173 			udf_delete_aext(inode, *epos);
1174 	} else if (startnum < endnum) {
1175 		for (i = 0; i < (endnum - startnum); i++) {
1176 			err = udf_insert_aext(inode, *epos,
1177 					      laarr[i].extLocation,
1178 					      laarr[i].extLength);
1179 			/*
1180 			 * If we fail here, we are likely corrupting the extent
1181 			 * list and leaking blocks. At least stop early to
1182 			 * limit the damage.
1183 			 */
1184 			if (err < 0)
1185 				return err;
1186 			udf_next_aext(inode, epos, &laarr[i].extLocation,
1187 				      &laarr[i].extLength, 1);
1188 			start++;
1189 		}
1190 	}
1191 
1192 	for (i = start; i < endnum; i++) {
1193 		udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
1194 		udf_write_aext(inode, epos, &laarr[i].extLocation,
1195 			       laarr[i].extLength, 1);
1196 	}
1197 	return 0;
1198 }
1199 
1200 struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
1201 			      int create, int *err)
1202 {
1203 	struct buffer_head *bh = NULL;
1204 	struct udf_map_rq map = {
1205 		.lblk = block,
1206 		.iflags = UDF_MAP_NOPREALLOC | (create ? UDF_MAP_CREATE : 0),
1207 	};
1208 
1209 	*err = udf_map_block(inode, &map);
1210 	if (*err || !(map.oflags & UDF_BLK_MAPPED))
1211 		return NULL;
1212 
1213 	bh = sb_getblk(inode->i_sb, map.pblk);
1214 	if (!bh) {
1215 		*err = -ENOMEM;
1216 		return NULL;
1217 	}
1218 	if (map.oflags & UDF_BLK_NEW) {
1219 		lock_buffer(bh);
1220 		memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1221 		set_buffer_uptodate(bh);
1222 		unlock_buffer(bh);
1223 		mark_buffer_dirty_inode(bh, inode);
1224 		return bh;
1225 	}
1226 
1227 	if (bh_read(bh, 0) >= 0)
1228 		return bh;
1229 
1230 	brelse(bh);
1231 	*err = -EIO;
1232 	return NULL;
1233 }
1234 
1235 int udf_setsize(struct inode *inode, loff_t newsize)
1236 {
1237 	int err = 0;
1238 	struct udf_inode_info *iinfo;
1239 	unsigned int bsize = i_blocksize(inode);
1240 
1241 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1242 	      S_ISLNK(inode->i_mode)))
1243 		return -EINVAL;
1244 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1245 		return -EPERM;
1246 
1247 	filemap_invalidate_lock(inode->i_mapping);
1248 	iinfo = UDF_I(inode);
1249 	if (newsize > inode->i_size) {
1250 		if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1251 			if (bsize >=
1252 			    (udf_file_entry_alloc_offset(inode) + newsize)) {
1253 				down_write(&iinfo->i_data_sem);
1254 				iinfo->i_lenAlloc = newsize;
1255 				up_write(&iinfo->i_data_sem);
1256 				goto set_size;
1257 			}
1258 			err = udf_expand_file_adinicb(inode);
1259 			if (err)
1260 				goto out_unlock;
1261 		}
1262 		err = udf_extend_file(inode, newsize);
1263 		if (err)
1264 			goto out_unlock;
1265 set_size:
1266 		truncate_setsize(inode, newsize);
1267 	} else {
1268 		if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1269 			down_write(&iinfo->i_data_sem);
1270 			udf_clear_extent_cache(inode);
1271 			memset(iinfo->i_data + iinfo->i_lenEAttr + newsize,
1272 			       0x00, bsize - newsize -
1273 			       udf_file_entry_alloc_offset(inode));
1274 			iinfo->i_lenAlloc = newsize;
1275 			truncate_setsize(inode, newsize);
1276 			up_write(&iinfo->i_data_sem);
1277 			goto update_time;
1278 		}
1279 		err = block_truncate_page(inode->i_mapping, newsize,
1280 					  udf_get_block);
1281 		if (err)
1282 			goto out_unlock;
1283 		truncate_setsize(inode, newsize);
1284 		down_write(&iinfo->i_data_sem);
1285 		udf_clear_extent_cache(inode);
1286 		err = udf_truncate_extents(inode);
1287 		up_write(&iinfo->i_data_sem);
1288 		if (err)
1289 			goto out_unlock;
1290 	}
1291 update_time:
1292 	inode->i_mtime = inode->i_ctime = current_time(inode);
1293 	if (IS_SYNC(inode))
1294 		udf_sync_inode(inode);
1295 	else
1296 		mark_inode_dirty(inode);
1297 out_unlock:
1298 	filemap_invalidate_unlock(inode->i_mapping);
1299 	return err;
1300 }
1301 
1302 /*
1303  * Maximum length of linked list formed by ICB hierarchy. The chosen number is
1304  * arbitrary - just that we hopefully don't limit any real use of rewritten
1305  * inode on write-once media but avoid looping for too long on corrupted media.
1306  */
1307 #define UDF_MAX_ICB_NESTING 1024
1308 
1309 static int udf_read_inode(struct inode *inode, bool hidden_inode)
1310 {
1311 	struct buffer_head *bh = NULL;
1312 	struct fileEntry *fe;
1313 	struct extendedFileEntry *efe;
1314 	uint16_t ident;
1315 	struct udf_inode_info *iinfo = UDF_I(inode);
1316 	struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1317 	struct kernel_lb_addr *iloc = &iinfo->i_location;
1318 	unsigned int link_count;
1319 	unsigned int indirections = 0;
1320 	int bs = inode->i_sb->s_blocksize;
1321 	int ret = -EIO;
1322 	uint32_t uid, gid;
1323 
1324 reread:
1325 	if (iloc->partitionReferenceNum >= sbi->s_partitions) {
1326 		udf_debug("partition reference: %u > logical volume partitions: %u\n",
1327 			  iloc->partitionReferenceNum, sbi->s_partitions);
1328 		return -EIO;
1329 	}
1330 
1331 	if (iloc->logicalBlockNum >=
1332 	    sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
1333 		udf_debug("block=%u, partition=%u out of range\n",
1334 			  iloc->logicalBlockNum, iloc->partitionReferenceNum);
1335 		return -EIO;
1336 	}
1337 
1338 	/*
1339 	 * Set defaults, but the inode is still incomplete!
1340 	 * Note: get_new_inode() sets the following on a new inode:
1341 	 *      i_sb = sb
1342 	 *      i_no = ino
1343 	 *      i_flags = sb->s_flags
1344 	 *      i_state = 0
1345 	 * clean_inode(): zero fills and sets
1346 	 *      i_count = 1
1347 	 *      i_nlink = 1
1348 	 *      i_op = NULL;
1349 	 */
1350 	bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
1351 	if (!bh) {
1352 		udf_err(inode->i_sb, "(ino %lu) failed !bh\n", inode->i_ino);
1353 		return -EIO;
1354 	}
1355 
1356 	if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1357 	    ident != TAG_IDENT_USE) {
1358 		udf_err(inode->i_sb, "(ino %lu) failed ident=%u\n",
1359 			inode->i_ino, ident);
1360 		goto out;
1361 	}
1362 
1363 	fe = (struct fileEntry *)bh->b_data;
1364 	efe = (struct extendedFileEntry *)bh->b_data;
1365 
1366 	if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
1367 		struct buffer_head *ibh;
1368 
1369 		ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
1370 		if (ident == TAG_IDENT_IE && ibh) {
1371 			struct kernel_lb_addr loc;
1372 			struct indirectEntry *ie;
1373 
1374 			ie = (struct indirectEntry *)ibh->b_data;
1375 			loc = lelb_to_cpu(ie->indirectICB.extLocation);
1376 
1377 			if (ie->indirectICB.extLength) {
1378 				brelse(ibh);
1379 				memcpy(&iinfo->i_location, &loc,
1380 				       sizeof(struct kernel_lb_addr));
1381 				if (++indirections > UDF_MAX_ICB_NESTING) {
1382 					udf_err(inode->i_sb,
1383 						"too many ICBs in ICB hierarchy"
1384 						" (max %d supported)\n",
1385 						UDF_MAX_ICB_NESTING);
1386 					goto out;
1387 				}
1388 				brelse(bh);
1389 				goto reread;
1390 			}
1391 		}
1392 		brelse(ibh);
1393 	} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
1394 		udf_err(inode->i_sb, "unsupported strategy type: %u\n",
1395 			le16_to_cpu(fe->icbTag.strategyType));
1396 		goto out;
1397 	}
1398 	if (fe->icbTag.strategyType == cpu_to_le16(4))
1399 		iinfo->i_strat4096 = 0;
1400 	else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1401 		iinfo->i_strat4096 = 1;
1402 
1403 	iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
1404 							ICBTAG_FLAG_AD_MASK;
1405 	if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT &&
1406 	    iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG &&
1407 	    iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1408 		ret = -EIO;
1409 		goto out;
1410 	}
1411 	iinfo->i_hidden = hidden_inode;
1412 	iinfo->i_unique = 0;
1413 	iinfo->i_lenEAttr = 0;
1414 	iinfo->i_lenExtents = 0;
1415 	iinfo->i_lenAlloc = 0;
1416 	iinfo->i_next_alloc_block = 0;
1417 	iinfo->i_next_alloc_goal = 0;
1418 	if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
1419 		iinfo->i_efe = 1;
1420 		iinfo->i_use = 0;
1421 		ret = udf_alloc_i_data(inode, bs -
1422 					sizeof(struct extendedFileEntry));
1423 		if (ret)
1424 			goto out;
1425 		memcpy(iinfo->i_data,
1426 		       bh->b_data + sizeof(struct extendedFileEntry),
1427 		       bs - sizeof(struct extendedFileEntry));
1428 	} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
1429 		iinfo->i_efe = 0;
1430 		iinfo->i_use = 0;
1431 		ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
1432 		if (ret)
1433 			goto out;
1434 		memcpy(iinfo->i_data,
1435 		       bh->b_data + sizeof(struct fileEntry),
1436 		       bs - sizeof(struct fileEntry));
1437 	} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
1438 		iinfo->i_efe = 0;
1439 		iinfo->i_use = 1;
1440 		iinfo->i_lenAlloc = le32_to_cpu(
1441 				((struct unallocSpaceEntry *)bh->b_data)->
1442 				 lengthAllocDescs);
1443 		ret = udf_alloc_i_data(inode, bs -
1444 					sizeof(struct unallocSpaceEntry));
1445 		if (ret)
1446 			goto out;
1447 		memcpy(iinfo->i_data,
1448 		       bh->b_data + sizeof(struct unallocSpaceEntry),
1449 		       bs - sizeof(struct unallocSpaceEntry));
1450 		return 0;
1451 	}
1452 
1453 	ret = -EIO;
1454 	read_lock(&sbi->s_cred_lock);
1455 	uid = le32_to_cpu(fe->uid);
1456 	if (uid == UDF_INVALID_ID ||
1457 	    UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
1458 		inode->i_uid = sbi->s_uid;
1459 	else
1460 		i_uid_write(inode, uid);
1461 
1462 	gid = le32_to_cpu(fe->gid);
1463 	if (gid == UDF_INVALID_ID ||
1464 	    UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
1465 		inode->i_gid = sbi->s_gid;
1466 	else
1467 		i_gid_write(inode, gid);
1468 
1469 	if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
1470 			sbi->s_fmode != UDF_INVALID_MODE)
1471 		inode->i_mode = sbi->s_fmode;
1472 	else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
1473 			sbi->s_dmode != UDF_INVALID_MODE)
1474 		inode->i_mode = sbi->s_dmode;
1475 	else
1476 		inode->i_mode = udf_convert_permissions(fe);
1477 	inode->i_mode &= ~sbi->s_umask;
1478 	iinfo->i_extraPerms = le32_to_cpu(fe->permissions) & ~FE_MAPPED_PERMS;
1479 
1480 	read_unlock(&sbi->s_cred_lock);
1481 
1482 	link_count = le16_to_cpu(fe->fileLinkCount);
1483 	if (!link_count) {
1484 		if (!hidden_inode) {
1485 			ret = -ESTALE;
1486 			goto out;
1487 		}
1488 		link_count = 1;
1489 	}
1490 	set_nlink(inode, link_count);
1491 
1492 	inode->i_size = le64_to_cpu(fe->informationLength);
1493 	iinfo->i_lenExtents = inode->i_size;
1494 
1495 	if (iinfo->i_efe == 0) {
1496 		inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1497 			(inode->i_sb->s_blocksize_bits - 9);
1498 
1499 		udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime);
1500 		udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime);
1501 		udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime);
1502 
1503 		iinfo->i_unique = le64_to_cpu(fe->uniqueID);
1504 		iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
1505 		iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
1506 		iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
1507 		iinfo->i_streamdir = 0;
1508 		iinfo->i_lenStreams = 0;
1509 	} else {
1510 		inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1511 		    (inode->i_sb->s_blocksize_bits - 9);
1512 
1513 		udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime);
1514 		udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime);
1515 		udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime);
1516 		udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime);
1517 
1518 		iinfo->i_unique = le64_to_cpu(efe->uniqueID);
1519 		iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
1520 		iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
1521 		iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
1522 
1523 		/* Named streams */
1524 		iinfo->i_streamdir = (efe->streamDirectoryICB.extLength != 0);
1525 		iinfo->i_locStreamdir =
1526 			lelb_to_cpu(efe->streamDirectoryICB.extLocation);
1527 		iinfo->i_lenStreams = le64_to_cpu(efe->objectSize);
1528 		if (iinfo->i_lenStreams >= inode->i_size)
1529 			iinfo->i_lenStreams -= inode->i_size;
1530 		else
1531 			iinfo->i_lenStreams = 0;
1532 	}
1533 	inode->i_generation = iinfo->i_unique;
1534 
1535 	/*
1536 	 * Sanity check length of allocation descriptors and extended attrs to
1537 	 * avoid integer overflows
1538 	 */
1539 	if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
1540 		goto out;
1541 	/* Now do exact checks */
1542 	if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
1543 		goto out;
1544 	/* Sanity checks for files in ICB so that we don't get confused later */
1545 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1546 		/*
1547 		 * For file in ICB data is stored in allocation descriptor
1548 		 * so sizes should match
1549 		 */
1550 		if (iinfo->i_lenAlloc != inode->i_size)
1551 			goto out;
1552 		/* File in ICB has to fit in there... */
1553 		if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
1554 			goto out;
1555 	}
1556 
1557 	switch (fe->icbTag.fileType) {
1558 	case ICBTAG_FILE_TYPE_DIRECTORY:
1559 		inode->i_op = &udf_dir_inode_operations;
1560 		inode->i_fop = &udf_dir_operations;
1561 		inode->i_mode |= S_IFDIR;
1562 		inc_nlink(inode);
1563 		break;
1564 	case ICBTAG_FILE_TYPE_REALTIME:
1565 	case ICBTAG_FILE_TYPE_REGULAR:
1566 	case ICBTAG_FILE_TYPE_UNDEF:
1567 	case ICBTAG_FILE_TYPE_VAT20:
1568 		inode->i_data.a_ops = &udf_aops;
1569 		inode->i_op = &udf_file_inode_operations;
1570 		inode->i_fop = &udf_file_operations;
1571 		inode->i_mode |= S_IFREG;
1572 		break;
1573 	case ICBTAG_FILE_TYPE_BLOCK:
1574 		inode->i_mode |= S_IFBLK;
1575 		break;
1576 	case ICBTAG_FILE_TYPE_CHAR:
1577 		inode->i_mode |= S_IFCHR;
1578 		break;
1579 	case ICBTAG_FILE_TYPE_FIFO:
1580 		init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1581 		break;
1582 	case ICBTAG_FILE_TYPE_SOCKET:
1583 		init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1584 		break;
1585 	case ICBTAG_FILE_TYPE_SYMLINK:
1586 		inode->i_data.a_ops = &udf_symlink_aops;
1587 		inode->i_op = &udf_symlink_inode_operations;
1588 		inode_nohighmem(inode);
1589 		inode->i_mode = S_IFLNK | 0777;
1590 		break;
1591 	case ICBTAG_FILE_TYPE_MAIN:
1592 		udf_debug("METADATA FILE-----\n");
1593 		break;
1594 	case ICBTAG_FILE_TYPE_MIRROR:
1595 		udf_debug("METADATA MIRROR FILE-----\n");
1596 		break;
1597 	case ICBTAG_FILE_TYPE_BITMAP:
1598 		udf_debug("METADATA BITMAP FILE-----\n");
1599 		break;
1600 	default:
1601 		udf_err(inode->i_sb, "(ino %lu) failed unknown file type=%u\n",
1602 			inode->i_ino, fe->icbTag.fileType);
1603 		goto out;
1604 	}
1605 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1606 		struct deviceSpec *dsea =
1607 			(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1608 		if (dsea) {
1609 			init_special_inode(inode, inode->i_mode,
1610 				MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
1611 				      le32_to_cpu(dsea->minorDeviceIdent)));
1612 			/* Developer ID ??? */
1613 		} else
1614 			goto out;
1615 	}
1616 	ret = 0;
1617 out:
1618 	brelse(bh);
1619 	return ret;
1620 }
1621 
1622 static int udf_alloc_i_data(struct inode *inode, size_t size)
1623 {
1624 	struct udf_inode_info *iinfo = UDF_I(inode);
1625 	iinfo->i_data = kmalloc(size, GFP_KERNEL);
1626 	if (!iinfo->i_data)
1627 		return -ENOMEM;
1628 	return 0;
1629 }
1630 
1631 static umode_t udf_convert_permissions(struct fileEntry *fe)
1632 {
1633 	umode_t mode;
1634 	uint32_t permissions;
1635 	uint32_t flags;
1636 
1637 	permissions = le32_to_cpu(fe->permissions);
1638 	flags = le16_to_cpu(fe->icbTag.flags);
1639 
1640 	mode =	((permissions) & 0007) |
1641 		((permissions >> 2) & 0070) |
1642 		((permissions >> 4) & 0700) |
1643 		((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1644 		((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1645 		((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1646 
1647 	return mode;
1648 }
1649 
1650 void udf_update_extra_perms(struct inode *inode, umode_t mode)
1651 {
1652 	struct udf_inode_info *iinfo = UDF_I(inode);
1653 
1654 	/*
1655 	 * UDF 2.01 sec. 3.3.3.3 Note 2:
1656 	 * In Unix, delete permission tracks write
1657 	 */
1658 	iinfo->i_extraPerms &= ~FE_DELETE_PERMS;
1659 	if (mode & 0200)
1660 		iinfo->i_extraPerms |= FE_PERM_U_DELETE;
1661 	if (mode & 0020)
1662 		iinfo->i_extraPerms |= FE_PERM_G_DELETE;
1663 	if (mode & 0002)
1664 		iinfo->i_extraPerms |= FE_PERM_O_DELETE;
1665 }
1666 
1667 int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
1668 {
1669 	return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1670 }
1671 
1672 static int udf_sync_inode(struct inode *inode)
1673 {
1674 	return udf_update_inode(inode, 1);
1675 }
1676 
1677 static void udf_adjust_time(struct udf_inode_info *iinfo, struct timespec64 time)
1678 {
1679 	if (iinfo->i_crtime.tv_sec > time.tv_sec ||
1680 	    (iinfo->i_crtime.tv_sec == time.tv_sec &&
1681 	     iinfo->i_crtime.tv_nsec > time.tv_nsec))
1682 		iinfo->i_crtime = time;
1683 }
1684 
1685 static int udf_update_inode(struct inode *inode, int do_sync)
1686 {
1687 	struct buffer_head *bh = NULL;
1688 	struct fileEntry *fe;
1689 	struct extendedFileEntry *efe;
1690 	uint64_t lb_recorded;
1691 	uint32_t udfperms;
1692 	uint16_t icbflags;
1693 	uint16_t crclen;
1694 	int err = 0;
1695 	struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1696 	unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1697 	struct udf_inode_info *iinfo = UDF_I(inode);
1698 
1699 	bh = sb_getblk(inode->i_sb,
1700 			udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0));
1701 	if (!bh) {
1702 		udf_debug("getblk failure\n");
1703 		return -EIO;
1704 	}
1705 
1706 	lock_buffer(bh);
1707 	memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1708 	fe = (struct fileEntry *)bh->b_data;
1709 	efe = (struct extendedFileEntry *)bh->b_data;
1710 
1711 	if (iinfo->i_use) {
1712 		struct unallocSpaceEntry *use =
1713 			(struct unallocSpaceEntry *)bh->b_data;
1714 
1715 		use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1716 		memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
1717 		       iinfo->i_data, inode->i_sb->s_blocksize -
1718 					sizeof(struct unallocSpaceEntry));
1719 		use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
1720 		crclen = sizeof(struct unallocSpaceEntry);
1721 
1722 		goto finish;
1723 	}
1724 
1725 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1726 		fe->uid = cpu_to_le32(UDF_INVALID_ID);
1727 	else
1728 		fe->uid = cpu_to_le32(i_uid_read(inode));
1729 
1730 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1731 		fe->gid = cpu_to_le32(UDF_INVALID_ID);
1732 	else
1733 		fe->gid = cpu_to_le32(i_gid_read(inode));
1734 
1735 	udfperms = ((inode->i_mode & 0007)) |
1736 		   ((inode->i_mode & 0070) << 2) |
1737 		   ((inode->i_mode & 0700) << 4);
1738 
1739 	udfperms |= iinfo->i_extraPerms;
1740 	fe->permissions = cpu_to_le32(udfperms);
1741 
1742 	if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
1743 		fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1744 	else {
1745 		if (iinfo->i_hidden)
1746 			fe->fileLinkCount = cpu_to_le16(0);
1747 		else
1748 			fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1749 	}
1750 
1751 	fe->informationLength = cpu_to_le64(inode->i_size);
1752 
1753 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1754 		struct regid *eid;
1755 		struct deviceSpec *dsea =
1756 			(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1757 		if (!dsea) {
1758 			dsea = (struct deviceSpec *)
1759 				udf_add_extendedattr(inode,
1760 						     sizeof(struct deviceSpec) +
1761 						     sizeof(struct regid), 12, 0x3);
1762 			dsea->attrType = cpu_to_le32(12);
1763 			dsea->attrSubtype = 1;
1764 			dsea->attrLength = cpu_to_le32(
1765 						sizeof(struct deviceSpec) +
1766 						sizeof(struct regid));
1767 			dsea->impUseLength = cpu_to_le32(sizeof(struct regid));
1768 		}
1769 		eid = (struct regid *)dsea->impUse;
1770 		memset(eid, 0, sizeof(*eid));
1771 		strcpy(eid->ident, UDF_ID_DEVELOPER);
1772 		eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1773 		eid->identSuffix[1] = UDF_OS_ID_LINUX;
1774 		dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1775 		dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1776 	}
1777 
1778 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1779 		lb_recorded = 0; /* No extents => no blocks! */
1780 	else
1781 		lb_recorded =
1782 			(inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1783 			(blocksize_bits - 9);
1784 
1785 	if (iinfo->i_efe == 0) {
1786 		memcpy(bh->b_data + sizeof(struct fileEntry),
1787 		       iinfo->i_data,
1788 		       inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1789 		fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
1790 
1791 		udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
1792 		udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
1793 		udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
1794 		memset(&(fe->impIdent), 0, sizeof(struct regid));
1795 		strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1796 		fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1797 		fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1798 		fe->uniqueID = cpu_to_le64(iinfo->i_unique);
1799 		fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1800 		fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1801 		fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
1802 		fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1803 		crclen = sizeof(struct fileEntry);
1804 	} else {
1805 		memcpy(bh->b_data + sizeof(struct extendedFileEntry),
1806 		       iinfo->i_data,
1807 		       inode->i_sb->s_blocksize -
1808 					sizeof(struct extendedFileEntry));
1809 		efe->objectSize =
1810 			cpu_to_le64(inode->i_size + iinfo->i_lenStreams);
1811 		efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
1812 
1813 		if (iinfo->i_streamdir) {
1814 			struct long_ad *icb_lad = &efe->streamDirectoryICB;
1815 
1816 			icb_lad->extLocation =
1817 				cpu_to_lelb(iinfo->i_locStreamdir);
1818 			icb_lad->extLength =
1819 				cpu_to_le32(inode->i_sb->s_blocksize);
1820 		}
1821 
1822 		udf_adjust_time(iinfo, inode->i_atime);
1823 		udf_adjust_time(iinfo, inode->i_mtime);
1824 		udf_adjust_time(iinfo, inode->i_ctime);
1825 
1826 		udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
1827 		udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
1828 		udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
1829 		udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
1830 
1831 		memset(&(efe->impIdent), 0, sizeof(efe->impIdent));
1832 		strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1833 		efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1834 		efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1835 		efe->uniqueID = cpu_to_le64(iinfo->i_unique);
1836 		efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1837 		efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1838 		efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
1839 		efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1840 		crclen = sizeof(struct extendedFileEntry);
1841 	}
1842 
1843 finish:
1844 	if (iinfo->i_strat4096) {
1845 		fe->icbTag.strategyType = cpu_to_le16(4096);
1846 		fe->icbTag.strategyParameter = cpu_to_le16(1);
1847 		fe->icbTag.numEntries = cpu_to_le16(2);
1848 	} else {
1849 		fe->icbTag.strategyType = cpu_to_le16(4);
1850 		fe->icbTag.numEntries = cpu_to_le16(1);
1851 	}
1852 
1853 	if (iinfo->i_use)
1854 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE;
1855 	else if (S_ISDIR(inode->i_mode))
1856 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1857 	else if (S_ISREG(inode->i_mode))
1858 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1859 	else if (S_ISLNK(inode->i_mode))
1860 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1861 	else if (S_ISBLK(inode->i_mode))
1862 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1863 	else if (S_ISCHR(inode->i_mode))
1864 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1865 	else if (S_ISFIFO(inode->i_mode))
1866 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1867 	else if (S_ISSOCK(inode->i_mode))
1868 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1869 
1870 	icbflags =	iinfo->i_alloc_type |
1871 			((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1872 			((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1873 			((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1874 			(le16_to_cpu(fe->icbTag.flags) &
1875 				~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1876 				ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1877 
1878 	fe->icbTag.flags = cpu_to_le16(icbflags);
1879 	if (sbi->s_udfrev >= 0x0200)
1880 		fe->descTag.descVersion = cpu_to_le16(3);
1881 	else
1882 		fe->descTag.descVersion = cpu_to_le16(2);
1883 	fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
1884 	fe->descTag.tagLocation = cpu_to_le32(
1885 					iinfo->i_location.logicalBlockNum);
1886 	crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag);
1887 	fe->descTag.descCRCLength = cpu_to_le16(crclen);
1888 	fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag),
1889 						  crclen));
1890 	fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1891 
1892 	set_buffer_uptodate(bh);
1893 	unlock_buffer(bh);
1894 
1895 	/* write the data blocks */
1896 	mark_buffer_dirty(bh);
1897 	if (do_sync) {
1898 		sync_dirty_buffer(bh);
1899 		if (buffer_write_io_error(bh)) {
1900 			udf_warn(inode->i_sb, "IO error syncing udf inode [%08lx]\n",
1901 				 inode->i_ino);
1902 			err = -EIO;
1903 		}
1904 	}
1905 	brelse(bh);
1906 
1907 	return err;
1908 }
1909 
1910 struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino,
1911 			 bool hidden_inode)
1912 {
1913 	unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1914 	struct inode *inode = iget_locked(sb, block);
1915 	int err;
1916 
1917 	if (!inode)
1918 		return ERR_PTR(-ENOMEM);
1919 
1920 	if (!(inode->i_state & I_NEW)) {
1921 		if (UDF_I(inode)->i_hidden != hidden_inode) {
1922 			iput(inode);
1923 			return ERR_PTR(-EFSCORRUPTED);
1924 		}
1925 		return inode;
1926 	}
1927 
1928 	memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
1929 	err = udf_read_inode(inode, hidden_inode);
1930 	if (err < 0) {
1931 		iget_failed(inode);
1932 		return ERR_PTR(err);
1933 	}
1934 	unlock_new_inode(inode);
1935 
1936 	return inode;
1937 }
1938 
1939 int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
1940 			    struct extent_position *epos)
1941 {
1942 	struct super_block *sb = inode->i_sb;
1943 	struct buffer_head *bh;
1944 	struct allocExtDesc *aed;
1945 	struct extent_position nepos;
1946 	struct kernel_lb_addr neloc;
1947 	int ver, adsize;
1948 
1949 	if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1950 		adsize = sizeof(struct short_ad);
1951 	else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1952 		adsize = sizeof(struct long_ad);
1953 	else
1954 		return -EIO;
1955 
1956 	neloc.logicalBlockNum = block;
1957 	neloc.partitionReferenceNum = epos->block.partitionReferenceNum;
1958 
1959 	bh = sb_getblk(sb, udf_get_lb_pblock(sb, &neloc, 0));
1960 	if (!bh)
1961 		return -EIO;
1962 	lock_buffer(bh);
1963 	memset(bh->b_data, 0x00, sb->s_blocksize);
1964 	set_buffer_uptodate(bh);
1965 	unlock_buffer(bh);
1966 	mark_buffer_dirty_inode(bh, inode);
1967 
1968 	aed = (struct allocExtDesc *)(bh->b_data);
1969 	if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)) {
1970 		aed->previousAllocExtLocation =
1971 				cpu_to_le32(epos->block.logicalBlockNum);
1972 	}
1973 	aed->lengthAllocDescs = cpu_to_le32(0);
1974 	if (UDF_SB(sb)->s_udfrev >= 0x0200)
1975 		ver = 3;
1976 	else
1977 		ver = 2;
1978 	udf_new_tag(bh->b_data, TAG_IDENT_AED, ver, 1, block,
1979 		    sizeof(struct tag));
1980 
1981 	nepos.block = neloc;
1982 	nepos.offset = sizeof(struct allocExtDesc);
1983 	nepos.bh = bh;
1984 
1985 	/*
1986 	 * Do we have to copy current last extent to make space for indirect
1987 	 * one?
1988 	 */
1989 	if (epos->offset + adsize > sb->s_blocksize) {
1990 		struct kernel_lb_addr cp_loc;
1991 		uint32_t cp_len;
1992 		int cp_type;
1993 
1994 		epos->offset -= adsize;
1995 		cp_type = udf_current_aext(inode, epos, &cp_loc, &cp_len, 0);
1996 		cp_len |= ((uint32_t)cp_type) << 30;
1997 
1998 		__udf_add_aext(inode, &nepos, &cp_loc, cp_len, 1);
1999 		udf_write_aext(inode, epos, &nepos.block,
2000 			       sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDESCS, 0);
2001 	} else {
2002 		__udf_add_aext(inode, epos, &nepos.block,
2003 			       sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDESCS, 0);
2004 	}
2005 
2006 	brelse(epos->bh);
2007 	*epos = nepos;
2008 
2009 	return 0;
2010 }
2011 
2012 /*
2013  * Append extent at the given position - should be the first free one in inode
2014  * / indirect extent. This function assumes there is enough space in the inode
2015  * or indirect extent. Use udf_add_aext() if you didn't check for this before.
2016  */
2017 int __udf_add_aext(struct inode *inode, struct extent_position *epos,
2018 		   struct kernel_lb_addr *eloc, uint32_t elen, int inc)
2019 {
2020 	struct udf_inode_info *iinfo = UDF_I(inode);
2021 	struct allocExtDesc *aed;
2022 	int adsize;
2023 
2024 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
2025 		adsize = sizeof(struct short_ad);
2026 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
2027 		adsize = sizeof(struct long_ad);
2028 	else
2029 		return -EIO;
2030 
2031 	if (!epos->bh) {
2032 		WARN_ON(iinfo->i_lenAlloc !=
2033 			epos->offset - udf_file_entry_alloc_offset(inode));
2034 	} else {
2035 		aed = (struct allocExtDesc *)epos->bh->b_data;
2036 		WARN_ON(le32_to_cpu(aed->lengthAllocDescs) !=
2037 			epos->offset - sizeof(struct allocExtDesc));
2038 		WARN_ON(epos->offset + adsize > inode->i_sb->s_blocksize);
2039 	}
2040 
2041 	udf_write_aext(inode, epos, eloc, elen, inc);
2042 
2043 	if (!epos->bh) {
2044 		iinfo->i_lenAlloc += adsize;
2045 		mark_inode_dirty(inode);
2046 	} else {
2047 		aed = (struct allocExtDesc *)epos->bh->b_data;
2048 		le32_add_cpu(&aed->lengthAllocDescs, adsize);
2049 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2050 				UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2051 			udf_update_tag(epos->bh->b_data,
2052 					epos->offset + (inc ? 0 : adsize));
2053 		else
2054 			udf_update_tag(epos->bh->b_data,
2055 					sizeof(struct allocExtDesc));
2056 		mark_buffer_dirty_inode(epos->bh, inode);
2057 	}
2058 
2059 	return 0;
2060 }
2061 
2062 /*
2063  * Append extent at given position - should be the first free one in inode
2064  * / indirect extent. Takes care of allocating and linking indirect blocks.
2065  */
2066 int udf_add_aext(struct inode *inode, struct extent_position *epos,
2067 		 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
2068 {
2069 	int adsize;
2070 	struct super_block *sb = inode->i_sb;
2071 
2072 	if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
2073 		adsize = sizeof(struct short_ad);
2074 	else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG)
2075 		adsize = sizeof(struct long_ad);
2076 	else
2077 		return -EIO;
2078 
2079 	if (epos->offset + (2 * adsize) > sb->s_blocksize) {
2080 		int err;
2081 		udf_pblk_t new_block;
2082 
2083 		new_block = udf_new_block(sb, NULL,
2084 					  epos->block.partitionReferenceNum,
2085 					  epos->block.logicalBlockNum, &err);
2086 		if (!new_block)
2087 			return -ENOSPC;
2088 
2089 		err = udf_setup_indirect_aext(inode, new_block, epos);
2090 		if (err)
2091 			return err;
2092 	}
2093 
2094 	return __udf_add_aext(inode, epos, eloc, elen, inc);
2095 }
2096 
2097 void udf_write_aext(struct inode *inode, struct extent_position *epos,
2098 		    struct kernel_lb_addr *eloc, uint32_t elen, int inc)
2099 {
2100 	int adsize;
2101 	uint8_t *ptr;
2102 	struct short_ad *sad;
2103 	struct long_ad *lad;
2104 	struct udf_inode_info *iinfo = UDF_I(inode);
2105 
2106 	if (!epos->bh)
2107 		ptr = iinfo->i_data + epos->offset -
2108 			udf_file_entry_alloc_offset(inode) +
2109 			iinfo->i_lenEAttr;
2110 	else
2111 		ptr = epos->bh->b_data + epos->offset;
2112 
2113 	switch (iinfo->i_alloc_type) {
2114 	case ICBTAG_FLAG_AD_SHORT:
2115 		sad = (struct short_ad *)ptr;
2116 		sad->extLength = cpu_to_le32(elen);
2117 		sad->extPosition = cpu_to_le32(eloc->logicalBlockNum);
2118 		adsize = sizeof(struct short_ad);
2119 		break;
2120 	case ICBTAG_FLAG_AD_LONG:
2121 		lad = (struct long_ad *)ptr;
2122 		lad->extLength = cpu_to_le32(elen);
2123 		lad->extLocation = cpu_to_lelb(*eloc);
2124 		memset(lad->impUse, 0x00, sizeof(lad->impUse));
2125 		adsize = sizeof(struct long_ad);
2126 		break;
2127 	default:
2128 		return;
2129 	}
2130 
2131 	if (epos->bh) {
2132 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2133 		    UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) {
2134 			struct allocExtDesc *aed =
2135 				(struct allocExtDesc *)epos->bh->b_data;
2136 			udf_update_tag(epos->bh->b_data,
2137 				       le32_to_cpu(aed->lengthAllocDescs) +
2138 				       sizeof(struct allocExtDesc));
2139 		}
2140 		mark_buffer_dirty_inode(epos->bh, inode);
2141 	} else {
2142 		mark_inode_dirty(inode);
2143 	}
2144 
2145 	if (inc)
2146 		epos->offset += adsize;
2147 }
2148 
2149 /*
2150  * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
2151  * someone does some weird stuff.
2152  */
2153 #define UDF_MAX_INDIR_EXTS 16
2154 
2155 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
2156 		     struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
2157 {
2158 	int8_t etype;
2159 	unsigned int indirections = 0;
2160 
2161 	while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
2162 	       (EXT_NEXT_EXTENT_ALLOCDESCS >> 30)) {
2163 		udf_pblk_t block;
2164 
2165 		if (++indirections > UDF_MAX_INDIR_EXTS) {
2166 			udf_err(inode->i_sb,
2167 				"too many indirect extents in inode %lu\n",
2168 				inode->i_ino);
2169 			return -1;
2170 		}
2171 
2172 		epos->block = *eloc;
2173 		epos->offset = sizeof(struct allocExtDesc);
2174 		brelse(epos->bh);
2175 		block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
2176 		epos->bh = sb_bread(inode->i_sb, block);
2177 		if (!epos->bh) {
2178 			udf_debug("reading block %u failed!\n", block);
2179 			return -1;
2180 		}
2181 	}
2182 
2183 	return etype;
2184 }
2185 
2186 int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
2187 			struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
2188 {
2189 	int alen;
2190 	int8_t etype;
2191 	uint8_t *ptr;
2192 	struct short_ad *sad;
2193 	struct long_ad *lad;
2194 	struct udf_inode_info *iinfo = UDF_I(inode);
2195 
2196 	if (!epos->bh) {
2197 		if (!epos->offset)
2198 			epos->offset = udf_file_entry_alloc_offset(inode);
2199 		ptr = iinfo->i_data + epos->offset -
2200 			udf_file_entry_alloc_offset(inode) +
2201 			iinfo->i_lenEAttr;
2202 		alen = udf_file_entry_alloc_offset(inode) +
2203 							iinfo->i_lenAlloc;
2204 	} else {
2205 		if (!epos->offset)
2206 			epos->offset = sizeof(struct allocExtDesc);
2207 		ptr = epos->bh->b_data + epos->offset;
2208 		alen = sizeof(struct allocExtDesc) +
2209 			le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
2210 							lengthAllocDescs);
2211 	}
2212 
2213 	switch (iinfo->i_alloc_type) {
2214 	case ICBTAG_FLAG_AD_SHORT:
2215 		sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc);
2216 		if (!sad)
2217 			return -1;
2218 		etype = le32_to_cpu(sad->extLength) >> 30;
2219 		eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
2220 		eloc->partitionReferenceNum =
2221 				iinfo->i_location.partitionReferenceNum;
2222 		*elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
2223 		break;
2224 	case ICBTAG_FLAG_AD_LONG:
2225 		lad = udf_get_filelongad(ptr, alen, &epos->offset, inc);
2226 		if (!lad)
2227 			return -1;
2228 		etype = le32_to_cpu(lad->extLength) >> 30;
2229 		*eloc = lelb_to_cpu(lad->extLocation);
2230 		*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
2231 		break;
2232 	default:
2233 		udf_debug("alloc_type = %u unsupported\n", iinfo->i_alloc_type);
2234 		return -1;
2235 	}
2236 
2237 	return etype;
2238 }
2239 
2240 static int udf_insert_aext(struct inode *inode, struct extent_position epos,
2241 			   struct kernel_lb_addr neloc, uint32_t nelen)
2242 {
2243 	struct kernel_lb_addr oeloc;
2244 	uint32_t oelen;
2245 	int8_t etype;
2246 	int err;
2247 
2248 	if (epos.bh)
2249 		get_bh(epos.bh);
2250 
2251 	while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
2252 		udf_write_aext(inode, &epos, &neloc, nelen, 1);
2253 		neloc = oeloc;
2254 		nelen = (etype << 30) | oelen;
2255 	}
2256 	err = udf_add_aext(inode, &epos, &neloc, nelen, 1);
2257 	brelse(epos.bh);
2258 
2259 	return err;
2260 }
2261 
2262 int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
2263 {
2264 	struct extent_position oepos;
2265 	int adsize;
2266 	int8_t etype;
2267 	struct allocExtDesc *aed;
2268 	struct udf_inode_info *iinfo;
2269 	struct kernel_lb_addr eloc;
2270 	uint32_t elen;
2271 
2272 	if (epos.bh) {
2273 		get_bh(epos.bh);
2274 		get_bh(epos.bh);
2275 	}
2276 
2277 	iinfo = UDF_I(inode);
2278 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
2279 		adsize = sizeof(struct short_ad);
2280 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
2281 		adsize = sizeof(struct long_ad);
2282 	else
2283 		adsize = 0;
2284 
2285 	oepos = epos;
2286 	if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
2287 		return -1;
2288 
2289 	while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
2290 		udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1);
2291 		if (oepos.bh != epos.bh) {
2292 			oepos.block = epos.block;
2293 			brelse(oepos.bh);
2294 			get_bh(epos.bh);
2295 			oepos.bh = epos.bh;
2296 			oepos.offset = epos.offset - adsize;
2297 		}
2298 	}
2299 	memset(&eloc, 0x00, sizeof(struct kernel_lb_addr));
2300 	elen = 0;
2301 
2302 	if (epos.bh != oepos.bh) {
2303 		udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1);
2304 		udf_write_aext(inode, &oepos, &eloc, elen, 1);
2305 		udf_write_aext(inode, &oepos, &eloc, elen, 1);
2306 		if (!oepos.bh) {
2307 			iinfo->i_lenAlloc -= (adsize * 2);
2308 			mark_inode_dirty(inode);
2309 		} else {
2310 			aed = (struct allocExtDesc *)oepos.bh->b_data;
2311 			le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
2312 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2313 			    UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2314 				udf_update_tag(oepos.bh->b_data,
2315 						oepos.offset - (2 * adsize));
2316 			else
2317 				udf_update_tag(oepos.bh->b_data,
2318 						sizeof(struct allocExtDesc));
2319 			mark_buffer_dirty_inode(oepos.bh, inode);
2320 		}
2321 	} else {
2322 		udf_write_aext(inode, &oepos, &eloc, elen, 1);
2323 		if (!oepos.bh) {
2324 			iinfo->i_lenAlloc -= adsize;
2325 			mark_inode_dirty(inode);
2326 		} else {
2327 			aed = (struct allocExtDesc *)oepos.bh->b_data;
2328 			le32_add_cpu(&aed->lengthAllocDescs, -adsize);
2329 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2330 			    UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2331 				udf_update_tag(oepos.bh->b_data,
2332 						epos.offset - adsize);
2333 			else
2334 				udf_update_tag(oepos.bh->b_data,
2335 						sizeof(struct allocExtDesc));
2336 			mark_buffer_dirty_inode(oepos.bh, inode);
2337 		}
2338 	}
2339 
2340 	brelse(epos.bh);
2341 	brelse(oepos.bh);
2342 
2343 	return (elen >> 30);
2344 }
2345 
2346 int8_t inode_bmap(struct inode *inode, sector_t block,
2347 		  struct extent_position *pos, struct kernel_lb_addr *eloc,
2348 		  uint32_t *elen, sector_t *offset)
2349 {
2350 	unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
2351 	loff_t lbcount = 0, bcount = (loff_t) block << blocksize_bits;
2352 	int8_t etype;
2353 	struct udf_inode_info *iinfo;
2354 
2355 	iinfo = UDF_I(inode);
2356 	if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) {
2357 		pos->offset = 0;
2358 		pos->block = iinfo->i_location;
2359 		pos->bh = NULL;
2360 	}
2361 	*elen = 0;
2362 	do {
2363 		etype = udf_next_aext(inode, pos, eloc, elen, 1);
2364 		if (etype == -1) {
2365 			*offset = (bcount - lbcount) >> blocksize_bits;
2366 			iinfo->i_lenExtents = lbcount;
2367 			return -1;
2368 		}
2369 		lbcount += *elen;
2370 	} while (lbcount <= bcount);
2371 	/* update extent cache */
2372 	udf_update_extent_cache(inode, lbcount - *elen, pos);
2373 	*offset = (bcount + *elen - lbcount) >> blocksize_bits;
2374 
2375 	return etype;
2376 }
2377