xref: /openbmc/linux/fs/udf/inode.c (revision 8b46168c)
1 /*
2  * inode.c
3  *
4  * PURPOSE
5  *  Inode handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *  This file is distributed under the terms of the GNU General Public
9  *  License (GPL). Copies of the GPL can be obtained from:
10  *    ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *  Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1998 Dave Boynton
14  *  (C) 1998-2004 Ben Fennema
15  *  (C) 1999-2000 Stelias Computing Inc
16  *
17  * HISTORY
18  *
19  *  10/04/98 dgb  Added rudimentary directory functions
20  *  10/07/98      Fully working udf_block_map! It works!
21  *  11/25/98      bmap altered to better support extents
22  *  12/06/98 blf  partition support in udf_iget, udf_block_map
23  *                and udf_read_inode
24  *  12/12/98      rewrote udf_block_map to handle next extents and descs across
25  *                block boundaries (which is not actually allowed)
26  *  12/20/98      added support for strategy 4096
27  *  03/07/99      rewrote udf_block_map (again)
28  *                New funcs, inode_bmap, udf_next_aext
29  *  04/19/99      Support for writing device EA's for major/minor #
30  */
31 
32 #include "udfdecl.h"
33 #include <linux/mm.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/writeback.h>
37 #include <linux/slab.h>
38 #include <linux/crc-itu-t.h>
39 #include <linux/mpage.h>
40 #include <linux/uio.h>
41 #include <linux/bio.h>
42 
43 #include "udf_i.h"
44 #include "udf_sb.h"
45 
46 #define EXTENT_MERGE_SIZE 5
47 
48 #define FE_MAPPED_PERMS	(FE_PERM_U_READ | FE_PERM_U_WRITE | FE_PERM_U_EXEC | \
49 			 FE_PERM_G_READ | FE_PERM_G_WRITE | FE_PERM_G_EXEC | \
50 			 FE_PERM_O_READ | FE_PERM_O_WRITE | FE_PERM_O_EXEC)
51 
52 #define FE_DELETE_PERMS	(FE_PERM_U_DELETE | FE_PERM_G_DELETE | \
53 			 FE_PERM_O_DELETE)
54 
55 struct udf_map_rq;
56 
57 static umode_t udf_convert_permissions(struct fileEntry *);
58 static int udf_update_inode(struct inode *, int);
59 static int udf_sync_inode(struct inode *inode);
60 static int udf_alloc_i_data(struct inode *inode, size_t size);
61 static int inode_getblk(struct inode *inode, struct udf_map_rq *map);
62 static int udf_insert_aext(struct inode *, struct extent_position,
63 			   struct kernel_lb_addr, uint32_t);
64 static void udf_split_extents(struct inode *, int *, int, udf_pblk_t,
65 			      struct kernel_long_ad *, int *);
66 static void udf_prealloc_extents(struct inode *, int, int,
67 				 struct kernel_long_ad *, int *);
68 static void udf_merge_extents(struct inode *, struct kernel_long_ad *, int *);
69 static int udf_update_extents(struct inode *, struct kernel_long_ad *, int,
70 			      int, struct extent_position *);
71 static int udf_get_block_wb(struct inode *inode, sector_t block,
72 			    struct buffer_head *bh_result, int create);
73 
74 static void __udf_clear_extent_cache(struct inode *inode)
75 {
76 	struct udf_inode_info *iinfo = UDF_I(inode);
77 
78 	if (iinfo->cached_extent.lstart != -1) {
79 		brelse(iinfo->cached_extent.epos.bh);
80 		iinfo->cached_extent.lstart = -1;
81 	}
82 }
83 
84 /* Invalidate extent cache */
85 static void udf_clear_extent_cache(struct inode *inode)
86 {
87 	struct udf_inode_info *iinfo = UDF_I(inode);
88 
89 	spin_lock(&iinfo->i_extent_cache_lock);
90 	__udf_clear_extent_cache(inode);
91 	spin_unlock(&iinfo->i_extent_cache_lock);
92 }
93 
94 /* Return contents of extent cache */
95 static int udf_read_extent_cache(struct inode *inode, loff_t bcount,
96 				 loff_t *lbcount, struct extent_position *pos)
97 {
98 	struct udf_inode_info *iinfo = UDF_I(inode);
99 	int ret = 0;
100 
101 	spin_lock(&iinfo->i_extent_cache_lock);
102 	if ((iinfo->cached_extent.lstart <= bcount) &&
103 	    (iinfo->cached_extent.lstart != -1)) {
104 		/* Cache hit */
105 		*lbcount = iinfo->cached_extent.lstart;
106 		memcpy(pos, &iinfo->cached_extent.epos,
107 		       sizeof(struct extent_position));
108 		if (pos->bh)
109 			get_bh(pos->bh);
110 		ret = 1;
111 	}
112 	spin_unlock(&iinfo->i_extent_cache_lock);
113 	return ret;
114 }
115 
116 /* Add extent to extent cache */
117 static void udf_update_extent_cache(struct inode *inode, loff_t estart,
118 				    struct extent_position *pos)
119 {
120 	struct udf_inode_info *iinfo = UDF_I(inode);
121 
122 	spin_lock(&iinfo->i_extent_cache_lock);
123 	/* Invalidate previously cached extent */
124 	__udf_clear_extent_cache(inode);
125 	if (pos->bh)
126 		get_bh(pos->bh);
127 	memcpy(&iinfo->cached_extent.epos, pos, sizeof(*pos));
128 	iinfo->cached_extent.lstart = estart;
129 	switch (iinfo->i_alloc_type) {
130 	case ICBTAG_FLAG_AD_SHORT:
131 		iinfo->cached_extent.epos.offset -= sizeof(struct short_ad);
132 		break;
133 	case ICBTAG_FLAG_AD_LONG:
134 		iinfo->cached_extent.epos.offset -= sizeof(struct long_ad);
135 		break;
136 	}
137 	spin_unlock(&iinfo->i_extent_cache_lock);
138 }
139 
140 void udf_evict_inode(struct inode *inode)
141 {
142 	struct udf_inode_info *iinfo = UDF_I(inode);
143 	int want_delete = 0;
144 
145 	if (!is_bad_inode(inode)) {
146 		if (!inode->i_nlink) {
147 			want_delete = 1;
148 			udf_setsize(inode, 0);
149 			udf_update_inode(inode, IS_SYNC(inode));
150 		}
151 		if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
152 		    inode->i_size != iinfo->i_lenExtents) {
153 			udf_warn(inode->i_sb,
154 				 "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
155 				 inode->i_ino, inode->i_mode,
156 				 (unsigned long long)inode->i_size,
157 				 (unsigned long long)iinfo->i_lenExtents);
158 		}
159 	}
160 	truncate_inode_pages_final(&inode->i_data);
161 	invalidate_inode_buffers(inode);
162 	clear_inode(inode);
163 	kfree(iinfo->i_data);
164 	iinfo->i_data = NULL;
165 	udf_clear_extent_cache(inode);
166 	if (want_delete) {
167 		udf_free_inode(inode);
168 	}
169 }
170 
171 static void udf_write_failed(struct address_space *mapping, loff_t to)
172 {
173 	struct inode *inode = mapping->host;
174 	struct udf_inode_info *iinfo = UDF_I(inode);
175 	loff_t isize = inode->i_size;
176 
177 	if (to > isize) {
178 		truncate_pagecache(inode, isize);
179 		if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
180 			down_write(&iinfo->i_data_sem);
181 			udf_clear_extent_cache(inode);
182 			udf_truncate_extents(inode);
183 			up_write(&iinfo->i_data_sem);
184 		}
185 	}
186 }
187 
188 static int udf_adinicb_writepage(struct folio *folio,
189 				 struct writeback_control *wbc, void *data)
190 {
191 	struct page *page = &folio->page;
192 	struct inode *inode = page->mapping->host;
193 	struct udf_inode_info *iinfo = UDF_I(inode);
194 
195 	BUG_ON(!PageLocked(page));
196 	memcpy_to_page(page, 0, iinfo->i_data + iinfo->i_lenEAttr,
197 		       i_size_read(inode));
198 	unlock_page(page);
199 	mark_inode_dirty(inode);
200 
201 	return 0;
202 }
203 
204 static int udf_writepages(struct address_space *mapping,
205 			  struct writeback_control *wbc)
206 {
207 	struct inode *inode = mapping->host;
208 	struct udf_inode_info *iinfo = UDF_I(inode);
209 
210 	if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB)
211 		return mpage_writepages(mapping, wbc, udf_get_block_wb);
212 	return write_cache_pages(mapping, wbc, udf_adinicb_writepage, NULL);
213 }
214 
215 static void udf_adinicb_readpage(struct page *page)
216 {
217 	struct inode *inode = page->mapping->host;
218 	char *kaddr;
219 	struct udf_inode_info *iinfo = UDF_I(inode);
220 	loff_t isize = i_size_read(inode);
221 
222 	kaddr = kmap_local_page(page);
223 	memcpy(kaddr, iinfo->i_data + iinfo->i_lenEAttr, isize);
224 	memset(kaddr + isize, 0, PAGE_SIZE - isize);
225 	flush_dcache_page(page);
226 	SetPageUptodate(page);
227 	kunmap_local(kaddr);
228 }
229 
230 static int udf_read_folio(struct file *file, struct folio *folio)
231 {
232 	struct udf_inode_info *iinfo = UDF_I(file_inode(file));
233 
234 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
235 		udf_adinicb_readpage(&folio->page);
236 		folio_unlock(folio);
237 		return 0;
238 	}
239 	return mpage_read_folio(folio, udf_get_block);
240 }
241 
242 static void udf_readahead(struct readahead_control *rac)
243 {
244 	mpage_readahead(rac, udf_get_block);
245 }
246 
247 static int udf_write_begin(struct file *file, struct address_space *mapping,
248 			   loff_t pos, unsigned len,
249 			   struct page **pagep, void **fsdata)
250 {
251 	struct udf_inode_info *iinfo = UDF_I(file_inode(file));
252 	struct page *page;
253 	int ret;
254 
255 	if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
256 		ret = block_write_begin(mapping, pos, len, pagep,
257 					udf_get_block);
258 		if (unlikely(ret))
259 			udf_write_failed(mapping, pos + len);
260 		return ret;
261 	}
262 	if (WARN_ON_ONCE(pos >= PAGE_SIZE))
263 		return -EIO;
264 	page = grab_cache_page_write_begin(mapping, 0);
265 	if (!page)
266 		return -ENOMEM;
267 	*pagep = page;
268 	if (!PageUptodate(page))
269 		udf_adinicb_readpage(page);
270 	return 0;
271 }
272 
273 static int udf_write_end(struct file *file, struct address_space *mapping,
274 			 loff_t pos, unsigned len, unsigned copied,
275 			 struct page *page, void *fsdata)
276 {
277 	struct inode *inode = file_inode(file);
278 	loff_t last_pos;
279 
280 	if (UDF_I(inode)->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB)
281 		return generic_write_end(file, mapping, pos, len, copied, page,
282 					 fsdata);
283 	last_pos = pos + copied;
284 	if (last_pos > inode->i_size)
285 		i_size_write(inode, last_pos);
286 	set_page_dirty(page);
287 	unlock_page(page);
288 	put_page(page);
289 
290 	return copied;
291 }
292 
293 static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
294 {
295 	struct file *file = iocb->ki_filp;
296 	struct address_space *mapping = file->f_mapping;
297 	struct inode *inode = mapping->host;
298 	size_t count = iov_iter_count(iter);
299 	ssize_t ret;
300 
301 	/* Fallback to buffered IO for in-ICB files */
302 	if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
303 		return 0;
304 	ret = blockdev_direct_IO(iocb, inode, iter, udf_get_block);
305 	if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE))
306 		udf_write_failed(mapping, iocb->ki_pos + count);
307 	return ret;
308 }
309 
310 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
311 {
312 	struct udf_inode_info *iinfo = UDF_I(mapping->host);
313 
314 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
315 		return -EINVAL;
316 	return generic_block_bmap(mapping, block, udf_get_block);
317 }
318 
319 const struct address_space_operations udf_aops = {
320 	.dirty_folio	= block_dirty_folio,
321 	.invalidate_folio = block_invalidate_folio,
322 	.read_folio	= udf_read_folio,
323 	.readahead	= udf_readahead,
324 	.writepages	= udf_writepages,
325 	.write_begin	= udf_write_begin,
326 	.write_end	= udf_write_end,
327 	.direct_IO	= udf_direct_IO,
328 	.bmap		= udf_bmap,
329 	.migrate_folio	= buffer_migrate_folio,
330 };
331 
332 /*
333  * Expand file stored in ICB to a normal one-block-file
334  *
335  * This function requires i_mutex held
336  */
337 int udf_expand_file_adinicb(struct inode *inode)
338 {
339 	struct page *page;
340 	struct udf_inode_info *iinfo = UDF_I(inode);
341 	int err;
342 
343 	WARN_ON_ONCE(!inode_is_locked(inode));
344 	if (!iinfo->i_lenAlloc) {
345 		down_write(&iinfo->i_data_sem);
346 		if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
347 			iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
348 		else
349 			iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
350 		/* from now on we have normal address_space methods */
351 		inode->i_data.a_ops = &udf_aops;
352 		up_write(&iinfo->i_data_sem);
353 		mark_inode_dirty(inode);
354 		return 0;
355 	}
356 
357 	page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
358 	if (!page)
359 		return -ENOMEM;
360 
361 	if (!PageUptodate(page))
362 		udf_adinicb_readpage(page);
363 	down_write(&iinfo->i_data_sem);
364 	memset(iinfo->i_data + iinfo->i_lenEAttr, 0x00,
365 	       iinfo->i_lenAlloc);
366 	iinfo->i_lenAlloc = 0;
367 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
368 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
369 	else
370 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
371 	set_page_dirty(page);
372 	unlock_page(page);
373 	up_write(&iinfo->i_data_sem);
374 	err = filemap_fdatawrite(inode->i_mapping);
375 	if (err) {
376 		/* Restore everything back so that we don't lose data... */
377 		lock_page(page);
378 		down_write(&iinfo->i_data_sem);
379 		memcpy_to_page(page, 0, iinfo->i_data + iinfo->i_lenEAttr,
380 			       inode->i_size);
381 		unlock_page(page);
382 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
383 		iinfo->i_lenAlloc = inode->i_size;
384 		up_write(&iinfo->i_data_sem);
385 	}
386 	put_page(page);
387 	mark_inode_dirty(inode);
388 
389 	return err;
390 }
391 
392 #define UDF_MAP_CREATE		0x01	/* Mapping can allocate new blocks */
393 #define UDF_MAP_NOPREALLOC	0x02	/* Do not preallocate blocks */
394 
395 #define UDF_BLK_MAPPED	0x01	/* Block was successfully mapped */
396 #define UDF_BLK_NEW	0x02	/* Block was freshly allocated */
397 
398 struct udf_map_rq {
399 	sector_t lblk;
400 	udf_pblk_t pblk;
401 	int iflags;		/* UDF_MAP_ flags determining behavior */
402 	int oflags;		/* UDF_BLK_ flags reporting results */
403 };
404 
405 static int udf_map_block(struct inode *inode, struct udf_map_rq *map)
406 {
407 	int err;
408 	struct udf_inode_info *iinfo = UDF_I(inode);
409 
410 	map->oflags = 0;
411 	if (!(map->iflags & UDF_MAP_CREATE)) {
412 		struct kernel_lb_addr eloc;
413 		uint32_t elen;
414 		sector_t offset;
415 		struct extent_position epos = {};
416 
417 		down_read(&iinfo->i_data_sem);
418 		if (inode_bmap(inode, map->lblk, &epos, &eloc, &elen, &offset)
419 				== (EXT_RECORDED_ALLOCATED >> 30)) {
420 			map->pblk = udf_get_lb_pblock(inode->i_sb, &eloc,
421 							offset);
422 			map->oflags |= UDF_BLK_MAPPED;
423 		}
424 		up_read(&iinfo->i_data_sem);
425 		brelse(epos.bh);
426 
427 		return 0;
428 	}
429 
430 	down_write(&iinfo->i_data_sem);
431 	/*
432 	 * Block beyond EOF and prealloc extents? Just discard preallocation
433 	 * as it is not useful and complicates things.
434 	 */
435 	if (((loff_t)map->lblk) << inode->i_blkbits >= iinfo->i_lenExtents)
436 		udf_discard_prealloc(inode);
437 	udf_clear_extent_cache(inode);
438 	err = inode_getblk(inode, map);
439 	up_write(&iinfo->i_data_sem);
440 	return err;
441 }
442 
443 static int __udf_get_block(struct inode *inode, sector_t block,
444 			   struct buffer_head *bh_result, int flags)
445 {
446 	int err;
447 	struct udf_map_rq map = {
448 		.lblk = block,
449 		.iflags = flags,
450 	};
451 
452 	err = udf_map_block(inode, &map);
453 	if (err < 0)
454 		return err;
455 	if (map.oflags & UDF_BLK_MAPPED) {
456 		map_bh(bh_result, inode->i_sb, map.pblk);
457 		if (map.oflags & UDF_BLK_NEW)
458 			set_buffer_new(bh_result);
459 	}
460 	return 0;
461 }
462 
463 int udf_get_block(struct inode *inode, sector_t block,
464 		  struct buffer_head *bh_result, int create)
465 {
466 	int flags = create ? UDF_MAP_CREATE : 0;
467 
468 	/*
469 	 * We preallocate blocks only for regular files. It also makes sense
470 	 * for directories but there's a problem when to drop the
471 	 * preallocation. We might use some delayed work for that but I feel
472 	 * it's overengineering for a filesystem like UDF.
473 	 */
474 	if (!S_ISREG(inode->i_mode))
475 		flags |= UDF_MAP_NOPREALLOC;
476 	return __udf_get_block(inode, block, bh_result, flags);
477 }
478 
479 /*
480  * We shouldn't be allocating blocks on page writeback since we allocate them
481  * on page fault. We can spot dirty buffers without allocated blocks though
482  * when truncate expands file. These however don't have valid data so we can
483  * safely ignore them. So never allocate blocks from page writeback.
484  */
485 static int udf_get_block_wb(struct inode *inode, sector_t block,
486 			    struct buffer_head *bh_result, int create)
487 {
488 	return __udf_get_block(inode, block, bh_result, 0);
489 }
490 
491 /* Extend the file with new blocks totaling 'new_block_bytes',
492  * return the number of extents added
493  */
494 static int udf_do_extend_file(struct inode *inode,
495 			      struct extent_position *last_pos,
496 			      struct kernel_long_ad *last_ext,
497 			      loff_t new_block_bytes)
498 {
499 	uint32_t add;
500 	int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
501 	struct super_block *sb = inode->i_sb;
502 	struct udf_inode_info *iinfo;
503 	int err;
504 
505 	/* The previous extent is fake and we should not extend by anything
506 	 * - there's nothing to do... */
507 	if (!new_block_bytes && fake)
508 		return 0;
509 
510 	iinfo = UDF_I(inode);
511 	/* Round the last extent up to a multiple of block size */
512 	if (last_ext->extLength & (sb->s_blocksize - 1)) {
513 		last_ext->extLength =
514 			(last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
515 			(((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
516 			  sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
517 		iinfo->i_lenExtents =
518 			(iinfo->i_lenExtents + sb->s_blocksize - 1) &
519 			~(sb->s_blocksize - 1);
520 	}
521 
522 	add = 0;
523 	/* Can we merge with the previous extent? */
524 	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
525 					EXT_NOT_RECORDED_NOT_ALLOCATED) {
526 		add = (1 << 30) - sb->s_blocksize -
527 			(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
528 		if (add > new_block_bytes)
529 			add = new_block_bytes;
530 		new_block_bytes -= add;
531 		last_ext->extLength += add;
532 	}
533 
534 	if (fake) {
535 		err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
536 				   last_ext->extLength, 1);
537 		if (err < 0)
538 			goto out_err;
539 		count++;
540 	} else {
541 		struct kernel_lb_addr tmploc;
542 		uint32_t tmplen;
543 
544 		udf_write_aext(inode, last_pos, &last_ext->extLocation,
545 				last_ext->extLength, 1);
546 
547 		/*
548 		 * We've rewritten the last extent. If we are going to add
549 		 * more extents, we may need to enter possible following
550 		 * empty indirect extent.
551 		 */
552 		if (new_block_bytes)
553 			udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
554 	}
555 	iinfo->i_lenExtents += add;
556 
557 	/* Managed to do everything necessary? */
558 	if (!new_block_bytes)
559 		goto out;
560 
561 	/* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
562 	last_ext->extLocation.logicalBlockNum = 0;
563 	last_ext->extLocation.partitionReferenceNum = 0;
564 	add = (1 << 30) - sb->s_blocksize;
565 	last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | add;
566 
567 	/* Create enough extents to cover the whole hole */
568 	while (new_block_bytes > add) {
569 		new_block_bytes -= add;
570 		err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
571 				   last_ext->extLength, 1);
572 		if (err)
573 			goto out_err;
574 		iinfo->i_lenExtents += add;
575 		count++;
576 	}
577 	if (new_block_bytes) {
578 		last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
579 			new_block_bytes;
580 		err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
581 				   last_ext->extLength, 1);
582 		if (err)
583 			goto out_err;
584 		iinfo->i_lenExtents += new_block_bytes;
585 		count++;
586 	}
587 
588 out:
589 	/* last_pos should point to the last written extent... */
590 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
591 		last_pos->offset -= sizeof(struct short_ad);
592 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
593 		last_pos->offset -= sizeof(struct long_ad);
594 	else
595 		return -EIO;
596 
597 	return count;
598 out_err:
599 	/* Remove extents we've created so far */
600 	udf_clear_extent_cache(inode);
601 	udf_truncate_extents(inode);
602 	return err;
603 }
604 
605 /* Extend the final block of the file to final_block_len bytes */
606 static void udf_do_extend_final_block(struct inode *inode,
607 				      struct extent_position *last_pos,
608 				      struct kernel_long_ad *last_ext,
609 				      uint32_t new_elen)
610 {
611 	uint32_t added_bytes;
612 
613 	/*
614 	 * Extent already large enough? It may be already rounded up to block
615 	 * size...
616 	 */
617 	if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK))
618 		return;
619 	added_bytes = new_elen - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
620 	last_ext->extLength += added_bytes;
621 	UDF_I(inode)->i_lenExtents += added_bytes;
622 
623 	udf_write_aext(inode, last_pos, &last_ext->extLocation,
624 			last_ext->extLength, 1);
625 }
626 
627 static int udf_extend_file(struct inode *inode, loff_t newsize)
628 {
629 
630 	struct extent_position epos;
631 	struct kernel_lb_addr eloc;
632 	uint32_t elen;
633 	int8_t etype;
634 	struct super_block *sb = inode->i_sb;
635 	sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
636 	loff_t new_elen;
637 	int adsize;
638 	struct udf_inode_info *iinfo = UDF_I(inode);
639 	struct kernel_long_ad extent;
640 	int err = 0;
641 	bool within_last_ext;
642 
643 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
644 		adsize = sizeof(struct short_ad);
645 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
646 		adsize = sizeof(struct long_ad);
647 	else
648 		BUG();
649 
650 	down_write(&iinfo->i_data_sem);
651 	/*
652 	 * When creating hole in file, just don't bother with preserving
653 	 * preallocation. It likely won't be very useful anyway.
654 	 */
655 	udf_discard_prealloc(inode);
656 
657 	etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
658 	within_last_ext = (etype != -1);
659 	/* We don't expect extents past EOF... */
660 	WARN_ON_ONCE(within_last_ext &&
661 		     elen > ((loff_t)offset + 1) << inode->i_blkbits);
662 
663 	if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
664 	    (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
665 		/* File has no extents at all or has empty last
666 		 * indirect extent! Create a fake extent... */
667 		extent.extLocation.logicalBlockNum = 0;
668 		extent.extLocation.partitionReferenceNum = 0;
669 		extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
670 	} else {
671 		epos.offset -= adsize;
672 		etype = udf_next_aext(inode, &epos, &extent.extLocation,
673 				      &extent.extLength, 0);
674 		extent.extLength |= etype << 30;
675 	}
676 
677 	new_elen = ((loff_t)offset << inode->i_blkbits) |
678 					(newsize & (sb->s_blocksize - 1));
679 
680 	/* File has extent covering the new size (could happen when extending
681 	 * inside a block)?
682 	 */
683 	if (within_last_ext) {
684 		/* Extending file within the last file block */
685 		udf_do_extend_final_block(inode, &epos, &extent, new_elen);
686 	} else {
687 		err = udf_do_extend_file(inode, &epos, &extent, new_elen);
688 	}
689 
690 	if (err < 0)
691 		goto out;
692 	err = 0;
693 out:
694 	brelse(epos.bh);
695 	up_write(&iinfo->i_data_sem);
696 	return err;
697 }
698 
699 static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
700 {
701 	struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
702 	struct extent_position prev_epos, cur_epos, next_epos;
703 	int count = 0, startnum = 0, endnum = 0;
704 	uint32_t elen = 0, tmpelen;
705 	struct kernel_lb_addr eloc, tmpeloc;
706 	int c = 1;
707 	loff_t lbcount = 0, b_off = 0;
708 	udf_pblk_t newblocknum;
709 	sector_t offset = 0;
710 	int8_t etype;
711 	struct udf_inode_info *iinfo = UDF_I(inode);
712 	udf_pblk_t goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
713 	int lastblock = 0;
714 	bool isBeyondEOF;
715 	int ret = 0;
716 
717 	prev_epos.offset = udf_file_entry_alloc_offset(inode);
718 	prev_epos.block = iinfo->i_location;
719 	prev_epos.bh = NULL;
720 	cur_epos = next_epos = prev_epos;
721 	b_off = (loff_t)map->lblk << inode->i_sb->s_blocksize_bits;
722 
723 	/* find the extent which contains the block we are looking for.
724 	   alternate between laarr[0] and laarr[1] for locations of the
725 	   current extent, and the previous extent */
726 	do {
727 		if (prev_epos.bh != cur_epos.bh) {
728 			brelse(prev_epos.bh);
729 			get_bh(cur_epos.bh);
730 			prev_epos.bh = cur_epos.bh;
731 		}
732 		if (cur_epos.bh != next_epos.bh) {
733 			brelse(cur_epos.bh);
734 			get_bh(next_epos.bh);
735 			cur_epos.bh = next_epos.bh;
736 		}
737 
738 		lbcount += elen;
739 
740 		prev_epos.block = cur_epos.block;
741 		cur_epos.block = next_epos.block;
742 
743 		prev_epos.offset = cur_epos.offset;
744 		cur_epos.offset = next_epos.offset;
745 
746 		etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1);
747 		if (etype == -1)
748 			break;
749 
750 		c = !c;
751 
752 		laarr[c].extLength = (etype << 30) | elen;
753 		laarr[c].extLocation = eloc;
754 
755 		if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
756 			pgoal = eloc.logicalBlockNum +
757 				((elen + inode->i_sb->s_blocksize - 1) >>
758 				 inode->i_sb->s_blocksize_bits);
759 
760 		count++;
761 	} while (lbcount + elen <= b_off);
762 
763 	b_off -= lbcount;
764 	offset = b_off >> inode->i_sb->s_blocksize_bits;
765 	/*
766 	 * Move prev_epos and cur_epos into indirect extent if we are at
767 	 * the pointer to it
768 	 */
769 	udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
770 	udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
771 
772 	/* if the extent is allocated and recorded, return the block
773 	   if the extent is not a multiple of the blocksize, round up */
774 
775 	if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
776 		if (elen & (inode->i_sb->s_blocksize - 1)) {
777 			elen = EXT_RECORDED_ALLOCATED |
778 				((elen + inode->i_sb->s_blocksize - 1) &
779 				 ~(inode->i_sb->s_blocksize - 1));
780 			iinfo->i_lenExtents =
781 				ALIGN(iinfo->i_lenExtents,
782 				      inode->i_sb->s_blocksize);
783 			udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
784 		}
785 		map->oflags = UDF_BLK_MAPPED;
786 		map->pblk = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
787 		goto out_free;
788 	}
789 
790 	/* Are we beyond EOF and preallocated extent? */
791 	if (etype == -1) {
792 		loff_t hole_len;
793 
794 		isBeyondEOF = true;
795 		if (count) {
796 			if (c)
797 				laarr[0] = laarr[1];
798 			startnum = 1;
799 		} else {
800 			/* Create a fake extent when there's not one */
801 			memset(&laarr[0].extLocation, 0x00,
802 				sizeof(struct kernel_lb_addr));
803 			laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
804 			/* Will udf_do_extend_file() create real extent from
805 			   a fake one? */
806 			startnum = (offset > 0);
807 		}
808 		/* Create extents for the hole between EOF and offset */
809 		hole_len = (loff_t)offset << inode->i_blkbits;
810 		ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len);
811 		if (ret < 0)
812 			goto out_free;
813 		c = 0;
814 		offset = 0;
815 		count += ret;
816 		/*
817 		 * Is there any real extent? - otherwise we overwrite the fake
818 		 * one...
819 		 */
820 		if (count)
821 			c = !c;
822 		laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
823 			inode->i_sb->s_blocksize;
824 		memset(&laarr[c].extLocation, 0x00,
825 			sizeof(struct kernel_lb_addr));
826 		count++;
827 		endnum = c + 1;
828 		lastblock = 1;
829 	} else {
830 		isBeyondEOF = false;
831 		endnum = startnum = ((count > 2) ? 2 : count);
832 
833 		/* if the current extent is in position 0,
834 		   swap it with the previous */
835 		if (!c && count != 1) {
836 			laarr[2] = laarr[0];
837 			laarr[0] = laarr[1];
838 			laarr[1] = laarr[2];
839 			c = 1;
840 		}
841 
842 		/* if the current block is located in an extent,
843 		   read the next extent */
844 		etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0);
845 		if (etype != -1) {
846 			laarr[c + 1].extLength = (etype << 30) | elen;
847 			laarr[c + 1].extLocation = eloc;
848 			count++;
849 			startnum++;
850 			endnum++;
851 		} else
852 			lastblock = 1;
853 	}
854 
855 	/* if the current extent is not recorded but allocated, get the
856 	 * block in the extent corresponding to the requested block */
857 	if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
858 		newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
859 	else { /* otherwise, allocate a new block */
860 		if (iinfo->i_next_alloc_block == map->lblk)
861 			goal = iinfo->i_next_alloc_goal;
862 
863 		if (!goal) {
864 			if (!(goal = pgoal)) /* XXX: what was intended here? */
865 				goal = iinfo->i_location.logicalBlockNum + 1;
866 		}
867 
868 		newblocknum = udf_new_block(inode->i_sb, inode,
869 				iinfo->i_location.partitionReferenceNum,
870 				goal, &ret);
871 		if (!newblocknum)
872 			goto out_free;
873 		if (isBeyondEOF)
874 			iinfo->i_lenExtents += inode->i_sb->s_blocksize;
875 	}
876 
877 	/* if the extent the requsted block is located in contains multiple
878 	 * blocks, split the extent into at most three extents. blocks prior
879 	 * to requested block, requested block, and blocks after requested
880 	 * block */
881 	udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
882 
883 	if (!(map->iflags & UDF_MAP_NOPREALLOC))
884 		udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
885 
886 	/* merge any continuous blocks in laarr */
887 	udf_merge_extents(inode, laarr, &endnum);
888 
889 	/* write back the new extents, inserting new extents if the new number
890 	 * of extents is greater than the old number, and deleting extents if
891 	 * the new number of extents is less than the old number */
892 	ret = udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
893 	if (ret < 0)
894 		goto out_free;
895 
896 	map->pblk = udf_get_pblock(inode->i_sb, newblocknum,
897 				iinfo->i_location.partitionReferenceNum, 0);
898 	if (!map->pblk) {
899 		ret = -EFSCORRUPTED;
900 		goto out_free;
901 	}
902 	map->oflags = UDF_BLK_NEW | UDF_BLK_MAPPED;
903 	iinfo->i_next_alloc_block = map->lblk + 1;
904 	iinfo->i_next_alloc_goal = newblocknum + 1;
905 	inode->i_ctime = current_time(inode);
906 
907 	if (IS_SYNC(inode))
908 		udf_sync_inode(inode);
909 	else
910 		mark_inode_dirty(inode);
911 	ret = 0;
912 out_free:
913 	brelse(prev_epos.bh);
914 	brelse(cur_epos.bh);
915 	brelse(next_epos.bh);
916 	return ret;
917 }
918 
919 static void udf_split_extents(struct inode *inode, int *c, int offset,
920 			       udf_pblk_t newblocknum,
921 			       struct kernel_long_ad *laarr, int *endnum)
922 {
923 	unsigned long blocksize = inode->i_sb->s_blocksize;
924 	unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
925 
926 	if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
927 	    (laarr[*c].extLength >> 30) ==
928 				(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
929 		int curr = *c;
930 		int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
931 			    blocksize - 1) >> blocksize_bits;
932 		int8_t etype = (laarr[curr].extLength >> 30);
933 
934 		if (blen == 1)
935 			;
936 		else if (!offset || blen == offset + 1) {
937 			laarr[curr + 2] = laarr[curr + 1];
938 			laarr[curr + 1] = laarr[curr];
939 		} else {
940 			laarr[curr + 3] = laarr[curr + 1];
941 			laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
942 		}
943 
944 		if (offset) {
945 			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
946 				udf_free_blocks(inode->i_sb, inode,
947 						&laarr[curr].extLocation,
948 						0, offset);
949 				laarr[curr].extLength =
950 					EXT_NOT_RECORDED_NOT_ALLOCATED |
951 					(offset << blocksize_bits);
952 				laarr[curr].extLocation.logicalBlockNum = 0;
953 				laarr[curr].extLocation.
954 						partitionReferenceNum = 0;
955 			} else
956 				laarr[curr].extLength = (etype << 30) |
957 					(offset << blocksize_bits);
958 			curr++;
959 			(*c)++;
960 			(*endnum)++;
961 		}
962 
963 		laarr[curr].extLocation.logicalBlockNum = newblocknum;
964 		if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
965 			laarr[curr].extLocation.partitionReferenceNum =
966 				UDF_I(inode)->i_location.partitionReferenceNum;
967 		laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
968 			blocksize;
969 		curr++;
970 
971 		if (blen != offset + 1) {
972 			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
973 				laarr[curr].extLocation.logicalBlockNum +=
974 								offset + 1;
975 			laarr[curr].extLength = (etype << 30) |
976 				((blen - (offset + 1)) << blocksize_bits);
977 			curr++;
978 			(*endnum)++;
979 		}
980 	}
981 }
982 
983 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
984 				 struct kernel_long_ad *laarr,
985 				 int *endnum)
986 {
987 	int start, length = 0, currlength = 0, i;
988 
989 	if (*endnum >= (c + 1)) {
990 		if (!lastblock)
991 			return;
992 		else
993 			start = c;
994 	} else {
995 		if ((laarr[c + 1].extLength >> 30) ==
996 					(EXT_NOT_RECORDED_ALLOCATED >> 30)) {
997 			start = c + 1;
998 			length = currlength =
999 				(((laarr[c + 1].extLength &
1000 					UDF_EXTENT_LENGTH_MASK) +
1001 				inode->i_sb->s_blocksize - 1) >>
1002 				inode->i_sb->s_blocksize_bits);
1003 		} else
1004 			start = c;
1005 	}
1006 
1007 	for (i = start + 1; i <= *endnum; i++) {
1008 		if (i == *endnum) {
1009 			if (lastblock)
1010 				length += UDF_DEFAULT_PREALLOC_BLOCKS;
1011 		} else if ((laarr[i].extLength >> 30) ==
1012 				(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
1013 			length += (((laarr[i].extLength &
1014 						UDF_EXTENT_LENGTH_MASK) +
1015 				    inode->i_sb->s_blocksize - 1) >>
1016 				    inode->i_sb->s_blocksize_bits);
1017 		} else
1018 			break;
1019 	}
1020 
1021 	if (length) {
1022 		int next = laarr[start].extLocation.logicalBlockNum +
1023 			(((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
1024 			  inode->i_sb->s_blocksize - 1) >>
1025 			  inode->i_sb->s_blocksize_bits);
1026 		int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
1027 				laarr[start].extLocation.partitionReferenceNum,
1028 				next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ?
1029 				length : UDF_DEFAULT_PREALLOC_BLOCKS) -
1030 				currlength);
1031 		if (numalloc) 	{
1032 			if (start == (c + 1))
1033 				laarr[start].extLength +=
1034 					(numalloc <<
1035 					 inode->i_sb->s_blocksize_bits);
1036 			else {
1037 				memmove(&laarr[c + 2], &laarr[c + 1],
1038 					sizeof(struct long_ad) * (*endnum - (c + 1)));
1039 				(*endnum)++;
1040 				laarr[c + 1].extLocation.logicalBlockNum = next;
1041 				laarr[c + 1].extLocation.partitionReferenceNum =
1042 					laarr[c].extLocation.
1043 							partitionReferenceNum;
1044 				laarr[c + 1].extLength =
1045 					EXT_NOT_RECORDED_ALLOCATED |
1046 					(numalloc <<
1047 					 inode->i_sb->s_blocksize_bits);
1048 				start = c + 1;
1049 			}
1050 
1051 			for (i = start + 1; numalloc && i < *endnum; i++) {
1052 				int elen = ((laarr[i].extLength &
1053 						UDF_EXTENT_LENGTH_MASK) +
1054 					    inode->i_sb->s_blocksize - 1) >>
1055 					    inode->i_sb->s_blocksize_bits;
1056 
1057 				if (elen > numalloc) {
1058 					laarr[i].extLength -=
1059 						(numalloc <<
1060 						 inode->i_sb->s_blocksize_bits);
1061 					numalloc = 0;
1062 				} else {
1063 					numalloc -= elen;
1064 					if (*endnum > (i + 1))
1065 						memmove(&laarr[i],
1066 							&laarr[i + 1],
1067 							sizeof(struct long_ad) *
1068 							(*endnum - (i + 1)));
1069 					i--;
1070 					(*endnum)--;
1071 				}
1072 			}
1073 			UDF_I(inode)->i_lenExtents +=
1074 				numalloc << inode->i_sb->s_blocksize_bits;
1075 		}
1076 	}
1077 }
1078 
1079 static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr,
1080 			      int *endnum)
1081 {
1082 	int i;
1083 	unsigned long blocksize = inode->i_sb->s_blocksize;
1084 	unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1085 
1086 	for (i = 0; i < (*endnum - 1); i++) {
1087 		struct kernel_long_ad *li /*l[i]*/ = &laarr[i];
1088 		struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
1089 
1090 		if (((li->extLength >> 30) == (lip1->extLength >> 30)) &&
1091 			(((li->extLength >> 30) ==
1092 				(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
1093 			((lip1->extLocation.logicalBlockNum -
1094 			  li->extLocation.logicalBlockNum) ==
1095 			(((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1096 			blocksize - 1) >> blocksize_bits)))) {
1097 
1098 			if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1099 			     (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
1100 			     blocksize - 1) <= UDF_EXTENT_LENGTH_MASK) {
1101 				li->extLength = lip1->extLength +
1102 					(((li->extLength &
1103 						UDF_EXTENT_LENGTH_MASK) +
1104 					 blocksize - 1) & ~(blocksize - 1));
1105 				if (*endnum > (i + 2))
1106 					memmove(&laarr[i + 1], &laarr[i + 2],
1107 						sizeof(struct long_ad) *
1108 						(*endnum - (i + 2)));
1109 				i--;
1110 				(*endnum)--;
1111 			}
1112 		} else if (((li->extLength >> 30) ==
1113 				(EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
1114 			   ((lip1->extLength >> 30) ==
1115 				(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
1116 			udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0,
1117 					((li->extLength &
1118 					  UDF_EXTENT_LENGTH_MASK) +
1119 					 blocksize - 1) >> blocksize_bits);
1120 			li->extLocation.logicalBlockNum = 0;
1121 			li->extLocation.partitionReferenceNum = 0;
1122 
1123 			if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1124 			     (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
1125 			     blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
1126 				lip1->extLength = (lip1->extLength -
1127 						   (li->extLength &
1128 						   UDF_EXTENT_LENGTH_MASK) +
1129 						   UDF_EXTENT_LENGTH_MASK) &
1130 						   ~(blocksize - 1);
1131 				li->extLength = (li->extLength &
1132 						 UDF_EXTENT_FLAG_MASK) +
1133 						(UDF_EXTENT_LENGTH_MASK + 1) -
1134 						blocksize;
1135 			} else {
1136 				li->extLength = lip1->extLength +
1137 					(((li->extLength &
1138 						UDF_EXTENT_LENGTH_MASK) +
1139 					  blocksize - 1) & ~(blocksize - 1));
1140 				if (*endnum > (i + 2))
1141 					memmove(&laarr[i + 1], &laarr[i + 2],
1142 						sizeof(struct long_ad) *
1143 						(*endnum - (i + 2)));
1144 				i--;
1145 				(*endnum)--;
1146 			}
1147 		} else if ((li->extLength >> 30) ==
1148 					(EXT_NOT_RECORDED_ALLOCATED >> 30)) {
1149 			udf_free_blocks(inode->i_sb, inode,
1150 					&li->extLocation, 0,
1151 					((li->extLength &
1152 						UDF_EXTENT_LENGTH_MASK) +
1153 					 blocksize - 1) >> blocksize_bits);
1154 			li->extLocation.logicalBlockNum = 0;
1155 			li->extLocation.partitionReferenceNum = 0;
1156 			li->extLength = (li->extLength &
1157 						UDF_EXTENT_LENGTH_MASK) |
1158 						EXT_NOT_RECORDED_NOT_ALLOCATED;
1159 		}
1160 	}
1161 }
1162 
1163 static int udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr,
1164 			      int startnum, int endnum,
1165 			      struct extent_position *epos)
1166 {
1167 	int start = 0, i;
1168 	struct kernel_lb_addr tmploc;
1169 	uint32_t tmplen;
1170 	int err;
1171 
1172 	if (startnum > endnum) {
1173 		for (i = 0; i < (startnum - endnum); i++)
1174 			udf_delete_aext(inode, *epos);
1175 	} else if (startnum < endnum) {
1176 		for (i = 0; i < (endnum - startnum); i++) {
1177 			err = udf_insert_aext(inode, *epos,
1178 					      laarr[i].extLocation,
1179 					      laarr[i].extLength);
1180 			/*
1181 			 * If we fail here, we are likely corrupting the extent
1182 			 * list and leaking blocks. At least stop early to
1183 			 * limit the damage.
1184 			 */
1185 			if (err < 0)
1186 				return err;
1187 			udf_next_aext(inode, epos, &laarr[i].extLocation,
1188 				      &laarr[i].extLength, 1);
1189 			start++;
1190 		}
1191 	}
1192 
1193 	for (i = start; i < endnum; i++) {
1194 		udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
1195 		udf_write_aext(inode, epos, &laarr[i].extLocation,
1196 			       laarr[i].extLength, 1);
1197 	}
1198 	return 0;
1199 }
1200 
1201 struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
1202 			      int create, int *err)
1203 {
1204 	struct buffer_head *bh = NULL;
1205 	struct udf_map_rq map = {
1206 		.lblk = block,
1207 		.iflags = UDF_MAP_NOPREALLOC | (create ? UDF_MAP_CREATE : 0),
1208 	};
1209 
1210 	*err = udf_map_block(inode, &map);
1211 	if (*err || !(map.oflags & UDF_BLK_MAPPED))
1212 		return NULL;
1213 
1214 	bh = sb_getblk(inode->i_sb, map.pblk);
1215 	if (!bh) {
1216 		*err = -ENOMEM;
1217 		return NULL;
1218 	}
1219 	if (map.oflags & UDF_BLK_NEW) {
1220 		lock_buffer(bh);
1221 		memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1222 		set_buffer_uptodate(bh);
1223 		unlock_buffer(bh);
1224 		mark_buffer_dirty_inode(bh, inode);
1225 		return bh;
1226 	}
1227 
1228 	if (bh_read(bh, 0) >= 0)
1229 		return bh;
1230 
1231 	brelse(bh);
1232 	*err = -EIO;
1233 	return NULL;
1234 }
1235 
1236 int udf_setsize(struct inode *inode, loff_t newsize)
1237 {
1238 	int err = 0;
1239 	struct udf_inode_info *iinfo;
1240 	unsigned int bsize = i_blocksize(inode);
1241 
1242 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1243 	      S_ISLNK(inode->i_mode)))
1244 		return -EINVAL;
1245 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1246 		return -EPERM;
1247 
1248 	filemap_invalidate_lock(inode->i_mapping);
1249 	iinfo = UDF_I(inode);
1250 	if (newsize > inode->i_size) {
1251 		if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1252 			if (bsize >=
1253 			    (udf_file_entry_alloc_offset(inode) + newsize)) {
1254 				down_write(&iinfo->i_data_sem);
1255 				iinfo->i_lenAlloc = newsize;
1256 				up_write(&iinfo->i_data_sem);
1257 				goto set_size;
1258 			}
1259 			err = udf_expand_file_adinicb(inode);
1260 			if (err)
1261 				goto out_unlock;
1262 		}
1263 		err = udf_extend_file(inode, newsize);
1264 		if (err)
1265 			goto out_unlock;
1266 set_size:
1267 		truncate_setsize(inode, newsize);
1268 	} else {
1269 		if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1270 			down_write(&iinfo->i_data_sem);
1271 			udf_clear_extent_cache(inode);
1272 			memset(iinfo->i_data + iinfo->i_lenEAttr + newsize,
1273 			       0x00, bsize - newsize -
1274 			       udf_file_entry_alloc_offset(inode));
1275 			iinfo->i_lenAlloc = newsize;
1276 			truncate_setsize(inode, newsize);
1277 			up_write(&iinfo->i_data_sem);
1278 			goto update_time;
1279 		}
1280 		err = block_truncate_page(inode->i_mapping, newsize,
1281 					  udf_get_block);
1282 		if (err)
1283 			goto out_unlock;
1284 		truncate_setsize(inode, newsize);
1285 		down_write(&iinfo->i_data_sem);
1286 		udf_clear_extent_cache(inode);
1287 		err = udf_truncate_extents(inode);
1288 		up_write(&iinfo->i_data_sem);
1289 		if (err)
1290 			goto out_unlock;
1291 	}
1292 update_time:
1293 	inode->i_mtime = inode->i_ctime = current_time(inode);
1294 	if (IS_SYNC(inode))
1295 		udf_sync_inode(inode);
1296 	else
1297 		mark_inode_dirty(inode);
1298 out_unlock:
1299 	filemap_invalidate_unlock(inode->i_mapping);
1300 	return err;
1301 }
1302 
1303 /*
1304  * Maximum length of linked list formed by ICB hierarchy. The chosen number is
1305  * arbitrary - just that we hopefully don't limit any real use of rewritten
1306  * inode on write-once media but avoid looping for too long on corrupted media.
1307  */
1308 #define UDF_MAX_ICB_NESTING 1024
1309 
1310 static int udf_read_inode(struct inode *inode, bool hidden_inode)
1311 {
1312 	struct buffer_head *bh = NULL;
1313 	struct fileEntry *fe;
1314 	struct extendedFileEntry *efe;
1315 	uint16_t ident;
1316 	struct udf_inode_info *iinfo = UDF_I(inode);
1317 	struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1318 	struct kernel_lb_addr *iloc = &iinfo->i_location;
1319 	unsigned int link_count;
1320 	unsigned int indirections = 0;
1321 	int bs = inode->i_sb->s_blocksize;
1322 	int ret = -EIO;
1323 	uint32_t uid, gid;
1324 
1325 reread:
1326 	if (iloc->partitionReferenceNum >= sbi->s_partitions) {
1327 		udf_debug("partition reference: %u > logical volume partitions: %u\n",
1328 			  iloc->partitionReferenceNum, sbi->s_partitions);
1329 		return -EIO;
1330 	}
1331 
1332 	if (iloc->logicalBlockNum >=
1333 	    sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
1334 		udf_debug("block=%u, partition=%u out of range\n",
1335 			  iloc->logicalBlockNum, iloc->partitionReferenceNum);
1336 		return -EIO;
1337 	}
1338 
1339 	/*
1340 	 * Set defaults, but the inode is still incomplete!
1341 	 * Note: get_new_inode() sets the following on a new inode:
1342 	 *      i_sb = sb
1343 	 *      i_no = ino
1344 	 *      i_flags = sb->s_flags
1345 	 *      i_state = 0
1346 	 * clean_inode(): zero fills and sets
1347 	 *      i_count = 1
1348 	 *      i_nlink = 1
1349 	 *      i_op = NULL;
1350 	 */
1351 	bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
1352 	if (!bh) {
1353 		udf_err(inode->i_sb, "(ino %lu) failed !bh\n", inode->i_ino);
1354 		return -EIO;
1355 	}
1356 
1357 	if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1358 	    ident != TAG_IDENT_USE) {
1359 		udf_err(inode->i_sb, "(ino %lu) failed ident=%u\n",
1360 			inode->i_ino, ident);
1361 		goto out;
1362 	}
1363 
1364 	fe = (struct fileEntry *)bh->b_data;
1365 	efe = (struct extendedFileEntry *)bh->b_data;
1366 
1367 	if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
1368 		struct buffer_head *ibh;
1369 
1370 		ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
1371 		if (ident == TAG_IDENT_IE && ibh) {
1372 			struct kernel_lb_addr loc;
1373 			struct indirectEntry *ie;
1374 
1375 			ie = (struct indirectEntry *)ibh->b_data;
1376 			loc = lelb_to_cpu(ie->indirectICB.extLocation);
1377 
1378 			if (ie->indirectICB.extLength) {
1379 				brelse(ibh);
1380 				memcpy(&iinfo->i_location, &loc,
1381 				       sizeof(struct kernel_lb_addr));
1382 				if (++indirections > UDF_MAX_ICB_NESTING) {
1383 					udf_err(inode->i_sb,
1384 						"too many ICBs in ICB hierarchy"
1385 						" (max %d supported)\n",
1386 						UDF_MAX_ICB_NESTING);
1387 					goto out;
1388 				}
1389 				brelse(bh);
1390 				goto reread;
1391 			}
1392 		}
1393 		brelse(ibh);
1394 	} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
1395 		udf_err(inode->i_sb, "unsupported strategy type: %u\n",
1396 			le16_to_cpu(fe->icbTag.strategyType));
1397 		goto out;
1398 	}
1399 	if (fe->icbTag.strategyType == cpu_to_le16(4))
1400 		iinfo->i_strat4096 = 0;
1401 	else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1402 		iinfo->i_strat4096 = 1;
1403 
1404 	iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
1405 							ICBTAG_FLAG_AD_MASK;
1406 	if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT &&
1407 	    iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG &&
1408 	    iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1409 		ret = -EIO;
1410 		goto out;
1411 	}
1412 	iinfo->i_hidden = hidden_inode;
1413 	iinfo->i_unique = 0;
1414 	iinfo->i_lenEAttr = 0;
1415 	iinfo->i_lenExtents = 0;
1416 	iinfo->i_lenAlloc = 0;
1417 	iinfo->i_next_alloc_block = 0;
1418 	iinfo->i_next_alloc_goal = 0;
1419 	if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
1420 		iinfo->i_efe = 1;
1421 		iinfo->i_use = 0;
1422 		ret = udf_alloc_i_data(inode, bs -
1423 					sizeof(struct extendedFileEntry));
1424 		if (ret)
1425 			goto out;
1426 		memcpy(iinfo->i_data,
1427 		       bh->b_data + sizeof(struct extendedFileEntry),
1428 		       bs - sizeof(struct extendedFileEntry));
1429 	} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
1430 		iinfo->i_efe = 0;
1431 		iinfo->i_use = 0;
1432 		ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
1433 		if (ret)
1434 			goto out;
1435 		memcpy(iinfo->i_data,
1436 		       bh->b_data + sizeof(struct fileEntry),
1437 		       bs - sizeof(struct fileEntry));
1438 	} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
1439 		iinfo->i_efe = 0;
1440 		iinfo->i_use = 1;
1441 		iinfo->i_lenAlloc = le32_to_cpu(
1442 				((struct unallocSpaceEntry *)bh->b_data)->
1443 				 lengthAllocDescs);
1444 		ret = udf_alloc_i_data(inode, bs -
1445 					sizeof(struct unallocSpaceEntry));
1446 		if (ret)
1447 			goto out;
1448 		memcpy(iinfo->i_data,
1449 		       bh->b_data + sizeof(struct unallocSpaceEntry),
1450 		       bs - sizeof(struct unallocSpaceEntry));
1451 		return 0;
1452 	}
1453 
1454 	ret = -EIO;
1455 	read_lock(&sbi->s_cred_lock);
1456 	uid = le32_to_cpu(fe->uid);
1457 	if (uid == UDF_INVALID_ID ||
1458 	    UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
1459 		inode->i_uid = sbi->s_uid;
1460 	else
1461 		i_uid_write(inode, uid);
1462 
1463 	gid = le32_to_cpu(fe->gid);
1464 	if (gid == UDF_INVALID_ID ||
1465 	    UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
1466 		inode->i_gid = sbi->s_gid;
1467 	else
1468 		i_gid_write(inode, gid);
1469 
1470 	if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
1471 			sbi->s_fmode != UDF_INVALID_MODE)
1472 		inode->i_mode = sbi->s_fmode;
1473 	else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
1474 			sbi->s_dmode != UDF_INVALID_MODE)
1475 		inode->i_mode = sbi->s_dmode;
1476 	else
1477 		inode->i_mode = udf_convert_permissions(fe);
1478 	inode->i_mode &= ~sbi->s_umask;
1479 	iinfo->i_extraPerms = le32_to_cpu(fe->permissions) & ~FE_MAPPED_PERMS;
1480 
1481 	read_unlock(&sbi->s_cred_lock);
1482 
1483 	link_count = le16_to_cpu(fe->fileLinkCount);
1484 	if (!link_count) {
1485 		if (!hidden_inode) {
1486 			ret = -ESTALE;
1487 			goto out;
1488 		}
1489 		link_count = 1;
1490 	}
1491 	set_nlink(inode, link_count);
1492 
1493 	inode->i_size = le64_to_cpu(fe->informationLength);
1494 	iinfo->i_lenExtents = inode->i_size;
1495 
1496 	if (iinfo->i_efe == 0) {
1497 		inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1498 			(inode->i_sb->s_blocksize_bits - 9);
1499 
1500 		udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime);
1501 		udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime);
1502 		udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime);
1503 
1504 		iinfo->i_unique = le64_to_cpu(fe->uniqueID);
1505 		iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
1506 		iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
1507 		iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
1508 		iinfo->i_streamdir = 0;
1509 		iinfo->i_lenStreams = 0;
1510 	} else {
1511 		inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1512 		    (inode->i_sb->s_blocksize_bits - 9);
1513 
1514 		udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime);
1515 		udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime);
1516 		udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime);
1517 		udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime);
1518 
1519 		iinfo->i_unique = le64_to_cpu(efe->uniqueID);
1520 		iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
1521 		iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
1522 		iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
1523 
1524 		/* Named streams */
1525 		iinfo->i_streamdir = (efe->streamDirectoryICB.extLength != 0);
1526 		iinfo->i_locStreamdir =
1527 			lelb_to_cpu(efe->streamDirectoryICB.extLocation);
1528 		iinfo->i_lenStreams = le64_to_cpu(efe->objectSize);
1529 		if (iinfo->i_lenStreams >= inode->i_size)
1530 			iinfo->i_lenStreams -= inode->i_size;
1531 		else
1532 			iinfo->i_lenStreams = 0;
1533 	}
1534 	inode->i_generation = iinfo->i_unique;
1535 
1536 	/*
1537 	 * Sanity check length of allocation descriptors and extended attrs to
1538 	 * avoid integer overflows
1539 	 */
1540 	if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
1541 		goto out;
1542 	/* Now do exact checks */
1543 	if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
1544 		goto out;
1545 	/* Sanity checks for files in ICB so that we don't get confused later */
1546 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1547 		/*
1548 		 * For file in ICB data is stored in allocation descriptor
1549 		 * so sizes should match
1550 		 */
1551 		if (iinfo->i_lenAlloc != inode->i_size)
1552 			goto out;
1553 		/* File in ICB has to fit in there... */
1554 		if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
1555 			goto out;
1556 	}
1557 
1558 	switch (fe->icbTag.fileType) {
1559 	case ICBTAG_FILE_TYPE_DIRECTORY:
1560 		inode->i_op = &udf_dir_inode_operations;
1561 		inode->i_fop = &udf_dir_operations;
1562 		inode->i_mode |= S_IFDIR;
1563 		inc_nlink(inode);
1564 		break;
1565 	case ICBTAG_FILE_TYPE_REALTIME:
1566 	case ICBTAG_FILE_TYPE_REGULAR:
1567 	case ICBTAG_FILE_TYPE_UNDEF:
1568 	case ICBTAG_FILE_TYPE_VAT20:
1569 		inode->i_data.a_ops = &udf_aops;
1570 		inode->i_op = &udf_file_inode_operations;
1571 		inode->i_fop = &udf_file_operations;
1572 		inode->i_mode |= S_IFREG;
1573 		break;
1574 	case ICBTAG_FILE_TYPE_BLOCK:
1575 		inode->i_mode |= S_IFBLK;
1576 		break;
1577 	case ICBTAG_FILE_TYPE_CHAR:
1578 		inode->i_mode |= S_IFCHR;
1579 		break;
1580 	case ICBTAG_FILE_TYPE_FIFO:
1581 		init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1582 		break;
1583 	case ICBTAG_FILE_TYPE_SOCKET:
1584 		init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1585 		break;
1586 	case ICBTAG_FILE_TYPE_SYMLINK:
1587 		inode->i_data.a_ops = &udf_symlink_aops;
1588 		inode->i_op = &udf_symlink_inode_operations;
1589 		inode_nohighmem(inode);
1590 		inode->i_mode = S_IFLNK | 0777;
1591 		break;
1592 	case ICBTAG_FILE_TYPE_MAIN:
1593 		udf_debug("METADATA FILE-----\n");
1594 		break;
1595 	case ICBTAG_FILE_TYPE_MIRROR:
1596 		udf_debug("METADATA MIRROR FILE-----\n");
1597 		break;
1598 	case ICBTAG_FILE_TYPE_BITMAP:
1599 		udf_debug("METADATA BITMAP FILE-----\n");
1600 		break;
1601 	default:
1602 		udf_err(inode->i_sb, "(ino %lu) failed unknown file type=%u\n",
1603 			inode->i_ino, fe->icbTag.fileType);
1604 		goto out;
1605 	}
1606 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1607 		struct deviceSpec *dsea =
1608 			(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1609 		if (dsea) {
1610 			init_special_inode(inode, inode->i_mode,
1611 				MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
1612 				      le32_to_cpu(dsea->minorDeviceIdent)));
1613 			/* Developer ID ??? */
1614 		} else
1615 			goto out;
1616 	}
1617 	ret = 0;
1618 out:
1619 	brelse(bh);
1620 	return ret;
1621 }
1622 
1623 static int udf_alloc_i_data(struct inode *inode, size_t size)
1624 {
1625 	struct udf_inode_info *iinfo = UDF_I(inode);
1626 	iinfo->i_data = kmalloc(size, GFP_KERNEL);
1627 	if (!iinfo->i_data)
1628 		return -ENOMEM;
1629 	return 0;
1630 }
1631 
1632 static umode_t udf_convert_permissions(struct fileEntry *fe)
1633 {
1634 	umode_t mode;
1635 	uint32_t permissions;
1636 	uint32_t flags;
1637 
1638 	permissions = le32_to_cpu(fe->permissions);
1639 	flags = le16_to_cpu(fe->icbTag.flags);
1640 
1641 	mode =	((permissions) & 0007) |
1642 		((permissions >> 2) & 0070) |
1643 		((permissions >> 4) & 0700) |
1644 		((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1645 		((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1646 		((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1647 
1648 	return mode;
1649 }
1650 
1651 void udf_update_extra_perms(struct inode *inode, umode_t mode)
1652 {
1653 	struct udf_inode_info *iinfo = UDF_I(inode);
1654 
1655 	/*
1656 	 * UDF 2.01 sec. 3.3.3.3 Note 2:
1657 	 * In Unix, delete permission tracks write
1658 	 */
1659 	iinfo->i_extraPerms &= ~FE_DELETE_PERMS;
1660 	if (mode & 0200)
1661 		iinfo->i_extraPerms |= FE_PERM_U_DELETE;
1662 	if (mode & 0020)
1663 		iinfo->i_extraPerms |= FE_PERM_G_DELETE;
1664 	if (mode & 0002)
1665 		iinfo->i_extraPerms |= FE_PERM_O_DELETE;
1666 }
1667 
1668 int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
1669 {
1670 	return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1671 }
1672 
1673 static int udf_sync_inode(struct inode *inode)
1674 {
1675 	return udf_update_inode(inode, 1);
1676 }
1677 
1678 static void udf_adjust_time(struct udf_inode_info *iinfo, struct timespec64 time)
1679 {
1680 	if (iinfo->i_crtime.tv_sec > time.tv_sec ||
1681 	    (iinfo->i_crtime.tv_sec == time.tv_sec &&
1682 	     iinfo->i_crtime.tv_nsec > time.tv_nsec))
1683 		iinfo->i_crtime = time;
1684 }
1685 
1686 static int udf_update_inode(struct inode *inode, int do_sync)
1687 {
1688 	struct buffer_head *bh = NULL;
1689 	struct fileEntry *fe;
1690 	struct extendedFileEntry *efe;
1691 	uint64_t lb_recorded;
1692 	uint32_t udfperms;
1693 	uint16_t icbflags;
1694 	uint16_t crclen;
1695 	int err = 0;
1696 	struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1697 	unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1698 	struct udf_inode_info *iinfo = UDF_I(inode);
1699 
1700 	bh = sb_getblk(inode->i_sb,
1701 			udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0));
1702 	if (!bh) {
1703 		udf_debug("getblk failure\n");
1704 		return -EIO;
1705 	}
1706 
1707 	lock_buffer(bh);
1708 	memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1709 	fe = (struct fileEntry *)bh->b_data;
1710 	efe = (struct extendedFileEntry *)bh->b_data;
1711 
1712 	if (iinfo->i_use) {
1713 		struct unallocSpaceEntry *use =
1714 			(struct unallocSpaceEntry *)bh->b_data;
1715 
1716 		use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1717 		memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
1718 		       iinfo->i_data, inode->i_sb->s_blocksize -
1719 					sizeof(struct unallocSpaceEntry));
1720 		use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
1721 		crclen = sizeof(struct unallocSpaceEntry);
1722 
1723 		goto finish;
1724 	}
1725 
1726 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1727 		fe->uid = cpu_to_le32(UDF_INVALID_ID);
1728 	else
1729 		fe->uid = cpu_to_le32(i_uid_read(inode));
1730 
1731 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1732 		fe->gid = cpu_to_le32(UDF_INVALID_ID);
1733 	else
1734 		fe->gid = cpu_to_le32(i_gid_read(inode));
1735 
1736 	udfperms = ((inode->i_mode & 0007)) |
1737 		   ((inode->i_mode & 0070) << 2) |
1738 		   ((inode->i_mode & 0700) << 4);
1739 
1740 	udfperms |= iinfo->i_extraPerms;
1741 	fe->permissions = cpu_to_le32(udfperms);
1742 
1743 	if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
1744 		fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1745 	else {
1746 		if (iinfo->i_hidden)
1747 			fe->fileLinkCount = cpu_to_le16(0);
1748 		else
1749 			fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1750 	}
1751 
1752 	fe->informationLength = cpu_to_le64(inode->i_size);
1753 
1754 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1755 		struct regid *eid;
1756 		struct deviceSpec *dsea =
1757 			(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1758 		if (!dsea) {
1759 			dsea = (struct deviceSpec *)
1760 				udf_add_extendedattr(inode,
1761 						     sizeof(struct deviceSpec) +
1762 						     sizeof(struct regid), 12, 0x3);
1763 			dsea->attrType = cpu_to_le32(12);
1764 			dsea->attrSubtype = 1;
1765 			dsea->attrLength = cpu_to_le32(
1766 						sizeof(struct deviceSpec) +
1767 						sizeof(struct regid));
1768 			dsea->impUseLength = cpu_to_le32(sizeof(struct regid));
1769 		}
1770 		eid = (struct regid *)dsea->impUse;
1771 		memset(eid, 0, sizeof(*eid));
1772 		strcpy(eid->ident, UDF_ID_DEVELOPER);
1773 		eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1774 		eid->identSuffix[1] = UDF_OS_ID_LINUX;
1775 		dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1776 		dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1777 	}
1778 
1779 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1780 		lb_recorded = 0; /* No extents => no blocks! */
1781 	else
1782 		lb_recorded =
1783 			(inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1784 			(blocksize_bits - 9);
1785 
1786 	if (iinfo->i_efe == 0) {
1787 		memcpy(bh->b_data + sizeof(struct fileEntry),
1788 		       iinfo->i_data,
1789 		       inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1790 		fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
1791 
1792 		udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
1793 		udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
1794 		udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
1795 		memset(&(fe->impIdent), 0, sizeof(struct regid));
1796 		strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1797 		fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1798 		fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1799 		fe->uniqueID = cpu_to_le64(iinfo->i_unique);
1800 		fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1801 		fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1802 		fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
1803 		fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1804 		crclen = sizeof(struct fileEntry);
1805 	} else {
1806 		memcpy(bh->b_data + sizeof(struct extendedFileEntry),
1807 		       iinfo->i_data,
1808 		       inode->i_sb->s_blocksize -
1809 					sizeof(struct extendedFileEntry));
1810 		efe->objectSize =
1811 			cpu_to_le64(inode->i_size + iinfo->i_lenStreams);
1812 		efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
1813 
1814 		if (iinfo->i_streamdir) {
1815 			struct long_ad *icb_lad = &efe->streamDirectoryICB;
1816 
1817 			icb_lad->extLocation =
1818 				cpu_to_lelb(iinfo->i_locStreamdir);
1819 			icb_lad->extLength =
1820 				cpu_to_le32(inode->i_sb->s_blocksize);
1821 		}
1822 
1823 		udf_adjust_time(iinfo, inode->i_atime);
1824 		udf_adjust_time(iinfo, inode->i_mtime);
1825 		udf_adjust_time(iinfo, inode->i_ctime);
1826 
1827 		udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
1828 		udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
1829 		udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
1830 		udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
1831 
1832 		memset(&(efe->impIdent), 0, sizeof(efe->impIdent));
1833 		strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1834 		efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1835 		efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1836 		efe->uniqueID = cpu_to_le64(iinfo->i_unique);
1837 		efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1838 		efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1839 		efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
1840 		efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1841 		crclen = sizeof(struct extendedFileEntry);
1842 	}
1843 
1844 finish:
1845 	if (iinfo->i_strat4096) {
1846 		fe->icbTag.strategyType = cpu_to_le16(4096);
1847 		fe->icbTag.strategyParameter = cpu_to_le16(1);
1848 		fe->icbTag.numEntries = cpu_to_le16(2);
1849 	} else {
1850 		fe->icbTag.strategyType = cpu_to_le16(4);
1851 		fe->icbTag.numEntries = cpu_to_le16(1);
1852 	}
1853 
1854 	if (iinfo->i_use)
1855 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE;
1856 	else if (S_ISDIR(inode->i_mode))
1857 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1858 	else if (S_ISREG(inode->i_mode))
1859 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1860 	else if (S_ISLNK(inode->i_mode))
1861 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1862 	else if (S_ISBLK(inode->i_mode))
1863 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1864 	else if (S_ISCHR(inode->i_mode))
1865 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1866 	else if (S_ISFIFO(inode->i_mode))
1867 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1868 	else if (S_ISSOCK(inode->i_mode))
1869 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1870 
1871 	icbflags =	iinfo->i_alloc_type |
1872 			((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1873 			((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1874 			((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1875 			(le16_to_cpu(fe->icbTag.flags) &
1876 				~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1877 				ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1878 
1879 	fe->icbTag.flags = cpu_to_le16(icbflags);
1880 	if (sbi->s_udfrev >= 0x0200)
1881 		fe->descTag.descVersion = cpu_to_le16(3);
1882 	else
1883 		fe->descTag.descVersion = cpu_to_le16(2);
1884 	fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
1885 	fe->descTag.tagLocation = cpu_to_le32(
1886 					iinfo->i_location.logicalBlockNum);
1887 	crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag);
1888 	fe->descTag.descCRCLength = cpu_to_le16(crclen);
1889 	fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag),
1890 						  crclen));
1891 	fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1892 
1893 	set_buffer_uptodate(bh);
1894 	unlock_buffer(bh);
1895 
1896 	/* write the data blocks */
1897 	mark_buffer_dirty(bh);
1898 	if (do_sync) {
1899 		sync_dirty_buffer(bh);
1900 		if (buffer_write_io_error(bh)) {
1901 			udf_warn(inode->i_sb, "IO error syncing udf inode [%08lx]\n",
1902 				 inode->i_ino);
1903 			err = -EIO;
1904 		}
1905 	}
1906 	brelse(bh);
1907 
1908 	return err;
1909 }
1910 
1911 struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino,
1912 			 bool hidden_inode)
1913 {
1914 	unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1915 	struct inode *inode = iget_locked(sb, block);
1916 	int err;
1917 
1918 	if (!inode)
1919 		return ERR_PTR(-ENOMEM);
1920 
1921 	if (!(inode->i_state & I_NEW)) {
1922 		if (UDF_I(inode)->i_hidden != hidden_inode) {
1923 			iput(inode);
1924 			return ERR_PTR(-EFSCORRUPTED);
1925 		}
1926 		return inode;
1927 	}
1928 
1929 	memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
1930 	err = udf_read_inode(inode, hidden_inode);
1931 	if (err < 0) {
1932 		iget_failed(inode);
1933 		return ERR_PTR(err);
1934 	}
1935 	unlock_new_inode(inode);
1936 
1937 	return inode;
1938 }
1939 
1940 int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
1941 			    struct extent_position *epos)
1942 {
1943 	struct super_block *sb = inode->i_sb;
1944 	struct buffer_head *bh;
1945 	struct allocExtDesc *aed;
1946 	struct extent_position nepos;
1947 	struct kernel_lb_addr neloc;
1948 	int ver, adsize;
1949 
1950 	if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1951 		adsize = sizeof(struct short_ad);
1952 	else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1953 		adsize = sizeof(struct long_ad);
1954 	else
1955 		return -EIO;
1956 
1957 	neloc.logicalBlockNum = block;
1958 	neloc.partitionReferenceNum = epos->block.partitionReferenceNum;
1959 
1960 	bh = sb_getblk(sb, udf_get_lb_pblock(sb, &neloc, 0));
1961 	if (!bh)
1962 		return -EIO;
1963 	lock_buffer(bh);
1964 	memset(bh->b_data, 0x00, sb->s_blocksize);
1965 	set_buffer_uptodate(bh);
1966 	unlock_buffer(bh);
1967 	mark_buffer_dirty_inode(bh, inode);
1968 
1969 	aed = (struct allocExtDesc *)(bh->b_data);
1970 	if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)) {
1971 		aed->previousAllocExtLocation =
1972 				cpu_to_le32(epos->block.logicalBlockNum);
1973 	}
1974 	aed->lengthAllocDescs = cpu_to_le32(0);
1975 	if (UDF_SB(sb)->s_udfrev >= 0x0200)
1976 		ver = 3;
1977 	else
1978 		ver = 2;
1979 	udf_new_tag(bh->b_data, TAG_IDENT_AED, ver, 1, block,
1980 		    sizeof(struct tag));
1981 
1982 	nepos.block = neloc;
1983 	nepos.offset = sizeof(struct allocExtDesc);
1984 	nepos.bh = bh;
1985 
1986 	/*
1987 	 * Do we have to copy current last extent to make space for indirect
1988 	 * one?
1989 	 */
1990 	if (epos->offset + adsize > sb->s_blocksize) {
1991 		struct kernel_lb_addr cp_loc;
1992 		uint32_t cp_len;
1993 		int cp_type;
1994 
1995 		epos->offset -= adsize;
1996 		cp_type = udf_current_aext(inode, epos, &cp_loc, &cp_len, 0);
1997 		cp_len |= ((uint32_t)cp_type) << 30;
1998 
1999 		__udf_add_aext(inode, &nepos, &cp_loc, cp_len, 1);
2000 		udf_write_aext(inode, epos, &nepos.block,
2001 			       sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDESCS, 0);
2002 	} else {
2003 		__udf_add_aext(inode, epos, &nepos.block,
2004 			       sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDESCS, 0);
2005 	}
2006 
2007 	brelse(epos->bh);
2008 	*epos = nepos;
2009 
2010 	return 0;
2011 }
2012 
2013 /*
2014  * Append extent at the given position - should be the first free one in inode
2015  * / indirect extent. This function assumes there is enough space in the inode
2016  * or indirect extent. Use udf_add_aext() if you didn't check for this before.
2017  */
2018 int __udf_add_aext(struct inode *inode, struct extent_position *epos,
2019 		   struct kernel_lb_addr *eloc, uint32_t elen, int inc)
2020 {
2021 	struct udf_inode_info *iinfo = UDF_I(inode);
2022 	struct allocExtDesc *aed;
2023 	int adsize;
2024 
2025 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
2026 		adsize = sizeof(struct short_ad);
2027 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
2028 		adsize = sizeof(struct long_ad);
2029 	else
2030 		return -EIO;
2031 
2032 	if (!epos->bh) {
2033 		WARN_ON(iinfo->i_lenAlloc !=
2034 			epos->offset - udf_file_entry_alloc_offset(inode));
2035 	} else {
2036 		aed = (struct allocExtDesc *)epos->bh->b_data;
2037 		WARN_ON(le32_to_cpu(aed->lengthAllocDescs) !=
2038 			epos->offset - sizeof(struct allocExtDesc));
2039 		WARN_ON(epos->offset + adsize > inode->i_sb->s_blocksize);
2040 	}
2041 
2042 	udf_write_aext(inode, epos, eloc, elen, inc);
2043 
2044 	if (!epos->bh) {
2045 		iinfo->i_lenAlloc += adsize;
2046 		mark_inode_dirty(inode);
2047 	} else {
2048 		aed = (struct allocExtDesc *)epos->bh->b_data;
2049 		le32_add_cpu(&aed->lengthAllocDescs, adsize);
2050 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2051 				UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2052 			udf_update_tag(epos->bh->b_data,
2053 					epos->offset + (inc ? 0 : adsize));
2054 		else
2055 			udf_update_tag(epos->bh->b_data,
2056 					sizeof(struct allocExtDesc));
2057 		mark_buffer_dirty_inode(epos->bh, inode);
2058 	}
2059 
2060 	return 0;
2061 }
2062 
2063 /*
2064  * Append extent at given position - should be the first free one in inode
2065  * / indirect extent. Takes care of allocating and linking indirect blocks.
2066  */
2067 int udf_add_aext(struct inode *inode, struct extent_position *epos,
2068 		 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
2069 {
2070 	int adsize;
2071 	struct super_block *sb = inode->i_sb;
2072 
2073 	if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
2074 		adsize = sizeof(struct short_ad);
2075 	else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG)
2076 		adsize = sizeof(struct long_ad);
2077 	else
2078 		return -EIO;
2079 
2080 	if (epos->offset + (2 * adsize) > sb->s_blocksize) {
2081 		int err;
2082 		udf_pblk_t new_block;
2083 
2084 		new_block = udf_new_block(sb, NULL,
2085 					  epos->block.partitionReferenceNum,
2086 					  epos->block.logicalBlockNum, &err);
2087 		if (!new_block)
2088 			return -ENOSPC;
2089 
2090 		err = udf_setup_indirect_aext(inode, new_block, epos);
2091 		if (err)
2092 			return err;
2093 	}
2094 
2095 	return __udf_add_aext(inode, epos, eloc, elen, inc);
2096 }
2097 
2098 void udf_write_aext(struct inode *inode, struct extent_position *epos,
2099 		    struct kernel_lb_addr *eloc, uint32_t elen, int inc)
2100 {
2101 	int adsize;
2102 	uint8_t *ptr;
2103 	struct short_ad *sad;
2104 	struct long_ad *lad;
2105 	struct udf_inode_info *iinfo = UDF_I(inode);
2106 
2107 	if (!epos->bh)
2108 		ptr = iinfo->i_data + epos->offset -
2109 			udf_file_entry_alloc_offset(inode) +
2110 			iinfo->i_lenEAttr;
2111 	else
2112 		ptr = epos->bh->b_data + epos->offset;
2113 
2114 	switch (iinfo->i_alloc_type) {
2115 	case ICBTAG_FLAG_AD_SHORT:
2116 		sad = (struct short_ad *)ptr;
2117 		sad->extLength = cpu_to_le32(elen);
2118 		sad->extPosition = cpu_to_le32(eloc->logicalBlockNum);
2119 		adsize = sizeof(struct short_ad);
2120 		break;
2121 	case ICBTAG_FLAG_AD_LONG:
2122 		lad = (struct long_ad *)ptr;
2123 		lad->extLength = cpu_to_le32(elen);
2124 		lad->extLocation = cpu_to_lelb(*eloc);
2125 		memset(lad->impUse, 0x00, sizeof(lad->impUse));
2126 		adsize = sizeof(struct long_ad);
2127 		break;
2128 	default:
2129 		return;
2130 	}
2131 
2132 	if (epos->bh) {
2133 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2134 		    UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) {
2135 			struct allocExtDesc *aed =
2136 				(struct allocExtDesc *)epos->bh->b_data;
2137 			udf_update_tag(epos->bh->b_data,
2138 				       le32_to_cpu(aed->lengthAllocDescs) +
2139 				       sizeof(struct allocExtDesc));
2140 		}
2141 		mark_buffer_dirty_inode(epos->bh, inode);
2142 	} else {
2143 		mark_inode_dirty(inode);
2144 	}
2145 
2146 	if (inc)
2147 		epos->offset += adsize;
2148 }
2149 
2150 /*
2151  * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
2152  * someone does some weird stuff.
2153  */
2154 #define UDF_MAX_INDIR_EXTS 16
2155 
2156 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
2157 		     struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
2158 {
2159 	int8_t etype;
2160 	unsigned int indirections = 0;
2161 
2162 	while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
2163 	       (EXT_NEXT_EXTENT_ALLOCDESCS >> 30)) {
2164 		udf_pblk_t block;
2165 
2166 		if (++indirections > UDF_MAX_INDIR_EXTS) {
2167 			udf_err(inode->i_sb,
2168 				"too many indirect extents in inode %lu\n",
2169 				inode->i_ino);
2170 			return -1;
2171 		}
2172 
2173 		epos->block = *eloc;
2174 		epos->offset = sizeof(struct allocExtDesc);
2175 		brelse(epos->bh);
2176 		block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
2177 		epos->bh = sb_bread(inode->i_sb, block);
2178 		if (!epos->bh) {
2179 			udf_debug("reading block %u failed!\n", block);
2180 			return -1;
2181 		}
2182 	}
2183 
2184 	return etype;
2185 }
2186 
2187 int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
2188 			struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
2189 {
2190 	int alen;
2191 	int8_t etype;
2192 	uint8_t *ptr;
2193 	struct short_ad *sad;
2194 	struct long_ad *lad;
2195 	struct udf_inode_info *iinfo = UDF_I(inode);
2196 
2197 	if (!epos->bh) {
2198 		if (!epos->offset)
2199 			epos->offset = udf_file_entry_alloc_offset(inode);
2200 		ptr = iinfo->i_data + epos->offset -
2201 			udf_file_entry_alloc_offset(inode) +
2202 			iinfo->i_lenEAttr;
2203 		alen = udf_file_entry_alloc_offset(inode) +
2204 							iinfo->i_lenAlloc;
2205 	} else {
2206 		if (!epos->offset)
2207 			epos->offset = sizeof(struct allocExtDesc);
2208 		ptr = epos->bh->b_data + epos->offset;
2209 		alen = sizeof(struct allocExtDesc) +
2210 			le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
2211 							lengthAllocDescs);
2212 	}
2213 
2214 	switch (iinfo->i_alloc_type) {
2215 	case ICBTAG_FLAG_AD_SHORT:
2216 		sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc);
2217 		if (!sad)
2218 			return -1;
2219 		etype = le32_to_cpu(sad->extLength) >> 30;
2220 		eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
2221 		eloc->partitionReferenceNum =
2222 				iinfo->i_location.partitionReferenceNum;
2223 		*elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
2224 		break;
2225 	case ICBTAG_FLAG_AD_LONG:
2226 		lad = udf_get_filelongad(ptr, alen, &epos->offset, inc);
2227 		if (!lad)
2228 			return -1;
2229 		etype = le32_to_cpu(lad->extLength) >> 30;
2230 		*eloc = lelb_to_cpu(lad->extLocation);
2231 		*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
2232 		break;
2233 	default:
2234 		udf_debug("alloc_type = %u unsupported\n", iinfo->i_alloc_type);
2235 		return -1;
2236 	}
2237 
2238 	return etype;
2239 }
2240 
2241 static int udf_insert_aext(struct inode *inode, struct extent_position epos,
2242 			   struct kernel_lb_addr neloc, uint32_t nelen)
2243 {
2244 	struct kernel_lb_addr oeloc;
2245 	uint32_t oelen;
2246 	int8_t etype;
2247 	int err;
2248 
2249 	if (epos.bh)
2250 		get_bh(epos.bh);
2251 
2252 	while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
2253 		udf_write_aext(inode, &epos, &neloc, nelen, 1);
2254 		neloc = oeloc;
2255 		nelen = (etype << 30) | oelen;
2256 	}
2257 	err = udf_add_aext(inode, &epos, &neloc, nelen, 1);
2258 	brelse(epos.bh);
2259 
2260 	return err;
2261 }
2262 
2263 int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
2264 {
2265 	struct extent_position oepos;
2266 	int adsize;
2267 	int8_t etype;
2268 	struct allocExtDesc *aed;
2269 	struct udf_inode_info *iinfo;
2270 	struct kernel_lb_addr eloc;
2271 	uint32_t elen;
2272 
2273 	if (epos.bh) {
2274 		get_bh(epos.bh);
2275 		get_bh(epos.bh);
2276 	}
2277 
2278 	iinfo = UDF_I(inode);
2279 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
2280 		adsize = sizeof(struct short_ad);
2281 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
2282 		adsize = sizeof(struct long_ad);
2283 	else
2284 		adsize = 0;
2285 
2286 	oepos = epos;
2287 	if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
2288 		return -1;
2289 
2290 	while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
2291 		udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1);
2292 		if (oepos.bh != epos.bh) {
2293 			oepos.block = epos.block;
2294 			brelse(oepos.bh);
2295 			get_bh(epos.bh);
2296 			oepos.bh = epos.bh;
2297 			oepos.offset = epos.offset - adsize;
2298 		}
2299 	}
2300 	memset(&eloc, 0x00, sizeof(struct kernel_lb_addr));
2301 	elen = 0;
2302 
2303 	if (epos.bh != oepos.bh) {
2304 		udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1);
2305 		udf_write_aext(inode, &oepos, &eloc, elen, 1);
2306 		udf_write_aext(inode, &oepos, &eloc, elen, 1);
2307 		if (!oepos.bh) {
2308 			iinfo->i_lenAlloc -= (adsize * 2);
2309 			mark_inode_dirty(inode);
2310 		} else {
2311 			aed = (struct allocExtDesc *)oepos.bh->b_data;
2312 			le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
2313 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2314 			    UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2315 				udf_update_tag(oepos.bh->b_data,
2316 						oepos.offset - (2 * adsize));
2317 			else
2318 				udf_update_tag(oepos.bh->b_data,
2319 						sizeof(struct allocExtDesc));
2320 			mark_buffer_dirty_inode(oepos.bh, inode);
2321 		}
2322 	} else {
2323 		udf_write_aext(inode, &oepos, &eloc, elen, 1);
2324 		if (!oepos.bh) {
2325 			iinfo->i_lenAlloc -= adsize;
2326 			mark_inode_dirty(inode);
2327 		} else {
2328 			aed = (struct allocExtDesc *)oepos.bh->b_data;
2329 			le32_add_cpu(&aed->lengthAllocDescs, -adsize);
2330 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2331 			    UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2332 				udf_update_tag(oepos.bh->b_data,
2333 						epos.offset - adsize);
2334 			else
2335 				udf_update_tag(oepos.bh->b_data,
2336 						sizeof(struct allocExtDesc));
2337 			mark_buffer_dirty_inode(oepos.bh, inode);
2338 		}
2339 	}
2340 
2341 	brelse(epos.bh);
2342 	brelse(oepos.bh);
2343 
2344 	return (elen >> 30);
2345 }
2346 
2347 int8_t inode_bmap(struct inode *inode, sector_t block,
2348 		  struct extent_position *pos, struct kernel_lb_addr *eloc,
2349 		  uint32_t *elen, sector_t *offset)
2350 {
2351 	unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
2352 	loff_t lbcount = 0, bcount = (loff_t) block << blocksize_bits;
2353 	int8_t etype;
2354 	struct udf_inode_info *iinfo;
2355 
2356 	iinfo = UDF_I(inode);
2357 	if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) {
2358 		pos->offset = 0;
2359 		pos->block = iinfo->i_location;
2360 		pos->bh = NULL;
2361 	}
2362 	*elen = 0;
2363 	do {
2364 		etype = udf_next_aext(inode, pos, eloc, elen, 1);
2365 		if (etype == -1) {
2366 			*offset = (bcount - lbcount) >> blocksize_bits;
2367 			iinfo->i_lenExtents = lbcount;
2368 			return -1;
2369 		}
2370 		lbcount += *elen;
2371 	} while (lbcount <= bcount);
2372 	/* update extent cache */
2373 	udf_update_extent_cache(inode, lbcount - *elen, pos);
2374 	*offset = (bcount + *elen - lbcount) >> blocksize_bits;
2375 
2376 	return etype;
2377 }
2378