xref: /openbmc/linux/fs/udf/super.c (revision c4ee0af3)
1 /*
2  * super.c
3  *
4  * PURPOSE
5  *  Super block routines for the OSTA-UDF(tm) filesystem.
6  *
7  * DESCRIPTION
8  *  OSTA-UDF(tm) = Optical Storage Technology Association
9  *  Universal Disk Format.
10  *
11  *  This code is based on version 2.00 of the UDF specification,
12  *  and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
13  *    http://www.osta.org/
14  *    http://www.ecma.ch/
15  *    http://www.iso.org/
16  *
17  * COPYRIGHT
18  *  This file is distributed under the terms of the GNU General Public
19  *  License (GPL). Copies of the GPL can be obtained from:
20  *    ftp://prep.ai.mit.edu/pub/gnu/GPL
21  *  Each contributing author retains all rights to their own work.
22  *
23  *  (C) 1998 Dave Boynton
24  *  (C) 1998-2004 Ben Fennema
25  *  (C) 2000 Stelias Computing Inc
26  *
27  * HISTORY
28  *
29  *  09/24/98 dgb  changed to allow compiling outside of kernel, and
30  *                added some debugging.
31  *  10/01/98 dgb  updated to allow (some) possibility of compiling w/2.0.34
32  *  10/16/98      attempting some multi-session support
33  *  10/17/98      added freespace count for "df"
34  *  11/11/98 gr   added novrs option
35  *  11/26/98 dgb  added fileset,anchor mount options
36  *  12/06/98 blf  really hosed things royally. vat/sparing support. sequenced
37  *                vol descs. rewrote option handling based on isofs
38  *  12/20/98      find the free space bitmap (if it exists)
39  */
40 
41 #include "udfdecl.h"
42 
43 #include <linux/blkdev.h>
44 #include <linux/slab.h>
45 #include <linux/kernel.h>
46 #include <linux/module.h>
47 #include <linux/parser.h>
48 #include <linux/stat.h>
49 #include <linux/cdrom.h>
50 #include <linux/nls.h>
51 #include <linux/buffer_head.h>
52 #include <linux/vfs.h>
53 #include <linux/vmalloc.h>
54 #include <linux/errno.h>
55 #include <linux/mount.h>
56 #include <linux/seq_file.h>
57 #include <linux/bitmap.h>
58 #include <linux/crc-itu-t.h>
59 #include <linux/log2.h>
60 #include <asm/byteorder.h>
61 
62 #include "udf_sb.h"
63 #include "udf_i.h"
64 
65 #include <linux/init.h>
66 #include <asm/uaccess.h>
67 
68 #define VDS_POS_PRIMARY_VOL_DESC	0
69 #define VDS_POS_UNALLOC_SPACE_DESC	1
70 #define VDS_POS_LOGICAL_VOL_DESC	2
71 #define VDS_POS_PARTITION_DESC		3
72 #define VDS_POS_IMP_USE_VOL_DESC	4
73 #define VDS_POS_VOL_DESC_PTR		5
74 #define VDS_POS_TERMINATING_DESC	6
75 #define VDS_POS_LENGTH			7
76 
77 #define UDF_DEFAULT_BLOCKSIZE 2048
78 
79 #define VSD_FIRST_SECTOR_OFFSET		32768
80 #define VSD_MAX_SECTOR_OFFSET		0x800000
81 
82 enum { UDF_MAX_LINKS = 0xffff };
83 
84 /* These are the "meat" - everything else is stuffing */
85 static int udf_fill_super(struct super_block *, void *, int);
86 static void udf_put_super(struct super_block *);
87 static int udf_sync_fs(struct super_block *, int);
88 static int udf_remount_fs(struct super_block *, int *, char *);
89 static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
90 static int udf_find_fileset(struct super_block *, struct kernel_lb_addr *,
91 			    struct kernel_lb_addr *);
92 static void udf_load_fileset(struct super_block *, struct buffer_head *,
93 			     struct kernel_lb_addr *);
94 static void udf_open_lvid(struct super_block *);
95 static void udf_close_lvid(struct super_block *);
96 static unsigned int udf_count_free(struct super_block *);
97 static int udf_statfs(struct dentry *, struct kstatfs *);
98 static int udf_show_options(struct seq_file *, struct dentry *);
99 
100 struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
101 {
102 	struct logicalVolIntegrityDesc *lvid;
103 	unsigned int partnum;
104 	unsigned int offset;
105 
106 	if (!UDF_SB(sb)->s_lvid_bh)
107 		return NULL;
108 	lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
109 	partnum = le32_to_cpu(lvid->numOfPartitions);
110 	if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
111 	     offsetof(struct logicalVolIntegrityDesc, impUse)) /
112 	     (2 * sizeof(uint32_t)) < partnum) {
113 		udf_err(sb, "Logical volume integrity descriptor corrupted "
114 			"(numOfPartitions = %u)!\n", partnum);
115 		return NULL;
116 	}
117 	/* The offset is to skip freeSpaceTable and sizeTable arrays */
118 	offset = partnum * 2 * sizeof(uint32_t);
119 	return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
120 }
121 
122 /* UDF filesystem type */
123 static struct dentry *udf_mount(struct file_system_type *fs_type,
124 		      int flags, const char *dev_name, void *data)
125 {
126 	return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super);
127 }
128 
129 static struct file_system_type udf_fstype = {
130 	.owner		= THIS_MODULE,
131 	.name		= "udf",
132 	.mount		= udf_mount,
133 	.kill_sb	= kill_block_super,
134 	.fs_flags	= FS_REQUIRES_DEV,
135 };
136 MODULE_ALIAS_FS("udf");
137 
138 static struct kmem_cache *udf_inode_cachep;
139 
140 static struct inode *udf_alloc_inode(struct super_block *sb)
141 {
142 	struct udf_inode_info *ei;
143 	ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
144 	if (!ei)
145 		return NULL;
146 
147 	ei->i_unique = 0;
148 	ei->i_lenExtents = 0;
149 	ei->i_next_alloc_block = 0;
150 	ei->i_next_alloc_goal = 0;
151 	ei->i_strat4096 = 0;
152 	init_rwsem(&ei->i_data_sem);
153 	ei->cached_extent.lstart = -1;
154 	spin_lock_init(&ei->i_extent_cache_lock);
155 
156 	return &ei->vfs_inode;
157 }
158 
159 static void udf_i_callback(struct rcu_head *head)
160 {
161 	struct inode *inode = container_of(head, struct inode, i_rcu);
162 	kmem_cache_free(udf_inode_cachep, UDF_I(inode));
163 }
164 
165 static void udf_destroy_inode(struct inode *inode)
166 {
167 	call_rcu(&inode->i_rcu, udf_i_callback);
168 }
169 
170 static void init_once(void *foo)
171 {
172 	struct udf_inode_info *ei = (struct udf_inode_info *)foo;
173 
174 	ei->i_ext.i_data = NULL;
175 	inode_init_once(&ei->vfs_inode);
176 }
177 
178 static int init_inodecache(void)
179 {
180 	udf_inode_cachep = kmem_cache_create("udf_inode_cache",
181 					     sizeof(struct udf_inode_info),
182 					     0, (SLAB_RECLAIM_ACCOUNT |
183 						 SLAB_MEM_SPREAD),
184 					     init_once);
185 	if (!udf_inode_cachep)
186 		return -ENOMEM;
187 	return 0;
188 }
189 
190 static void destroy_inodecache(void)
191 {
192 	/*
193 	 * Make sure all delayed rcu free inodes are flushed before we
194 	 * destroy cache.
195 	 */
196 	rcu_barrier();
197 	kmem_cache_destroy(udf_inode_cachep);
198 }
199 
200 /* Superblock operations */
201 static const struct super_operations udf_sb_ops = {
202 	.alloc_inode	= udf_alloc_inode,
203 	.destroy_inode	= udf_destroy_inode,
204 	.write_inode	= udf_write_inode,
205 	.evict_inode	= udf_evict_inode,
206 	.put_super	= udf_put_super,
207 	.sync_fs	= udf_sync_fs,
208 	.statfs		= udf_statfs,
209 	.remount_fs	= udf_remount_fs,
210 	.show_options	= udf_show_options,
211 };
212 
213 struct udf_options {
214 	unsigned char novrs;
215 	unsigned int blocksize;
216 	unsigned int session;
217 	unsigned int lastblock;
218 	unsigned int anchor;
219 	unsigned int volume;
220 	unsigned short partition;
221 	unsigned int fileset;
222 	unsigned int rootdir;
223 	unsigned int flags;
224 	umode_t umask;
225 	kgid_t gid;
226 	kuid_t uid;
227 	umode_t fmode;
228 	umode_t dmode;
229 	struct nls_table *nls_map;
230 };
231 
232 static int __init init_udf_fs(void)
233 {
234 	int err;
235 
236 	err = init_inodecache();
237 	if (err)
238 		goto out1;
239 	err = register_filesystem(&udf_fstype);
240 	if (err)
241 		goto out;
242 
243 	return 0;
244 
245 out:
246 	destroy_inodecache();
247 
248 out1:
249 	return err;
250 }
251 
252 static void __exit exit_udf_fs(void)
253 {
254 	unregister_filesystem(&udf_fstype);
255 	destroy_inodecache();
256 }
257 
258 module_init(init_udf_fs)
259 module_exit(exit_udf_fs)
260 
261 static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
262 {
263 	struct udf_sb_info *sbi = UDF_SB(sb);
264 
265 	sbi->s_partmaps = kcalloc(count, sizeof(struct udf_part_map),
266 				  GFP_KERNEL);
267 	if (!sbi->s_partmaps) {
268 		udf_err(sb, "Unable to allocate space for %d partition maps\n",
269 			count);
270 		sbi->s_partitions = 0;
271 		return -ENOMEM;
272 	}
273 
274 	sbi->s_partitions = count;
275 	return 0;
276 }
277 
278 static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
279 {
280 	int i;
281 	int nr_groups = bitmap->s_nr_groups;
282 	int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) *
283 						nr_groups);
284 
285 	for (i = 0; i < nr_groups; i++)
286 		if (bitmap->s_block_bitmap[i])
287 			brelse(bitmap->s_block_bitmap[i]);
288 
289 	if (size <= PAGE_SIZE)
290 		kfree(bitmap);
291 	else
292 		vfree(bitmap);
293 }
294 
295 static void udf_free_partition(struct udf_part_map *map)
296 {
297 	int i;
298 	struct udf_meta_data *mdata;
299 
300 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
301 		iput(map->s_uspace.s_table);
302 	if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
303 		iput(map->s_fspace.s_table);
304 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
305 		udf_sb_free_bitmap(map->s_uspace.s_bitmap);
306 	if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
307 		udf_sb_free_bitmap(map->s_fspace.s_bitmap);
308 	if (map->s_partition_type == UDF_SPARABLE_MAP15)
309 		for (i = 0; i < 4; i++)
310 			brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
311 	else if (map->s_partition_type == UDF_METADATA_MAP25) {
312 		mdata = &map->s_type_specific.s_metadata;
313 		iput(mdata->s_metadata_fe);
314 		mdata->s_metadata_fe = NULL;
315 
316 		iput(mdata->s_mirror_fe);
317 		mdata->s_mirror_fe = NULL;
318 
319 		iput(mdata->s_bitmap_fe);
320 		mdata->s_bitmap_fe = NULL;
321 	}
322 }
323 
324 static void udf_sb_free_partitions(struct super_block *sb)
325 {
326 	struct udf_sb_info *sbi = UDF_SB(sb);
327 	int i;
328 	if (sbi->s_partmaps == NULL)
329 		return;
330 	for (i = 0; i < sbi->s_partitions; i++)
331 		udf_free_partition(&sbi->s_partmaps[i]);
332 	kfree(sbi->s_partmaps);
333 	sbi->s_partmaps = NULL;
334 }
335 
336 static int udf_show_options(struct seq_file *seq, struct dentry *root)
337 {
338 	struct super_block *sb = root->d_sb;
339 	struct udf_sb_info *sbi = UDF_SB(sb);
340 
341 	if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
342 		seq_puts(seq, ",nostrict");
343 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET))
344 		seq_printf(seq, ",bs=%lu", sb->s_blocksize);
345 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
346 		seq_puts(seq, ",unhide");
347 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
348 		seq_puts(seq, ",undelete");
349 	if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB))
350 		seq_puts(seq, ",noadinicb");
351 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD))
352 		seq_puts(seq, ",shortad");
353 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
354 		seq_puts(seq, ",uid=forget");
355 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_IGNORE))
356 		seq_puts(seq, ",uid=ignore");
357 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
358 		seq_puts(seq, ",gid=forget");
359 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_IGNORE))
360 		seq_puts(seq, ",gid=ignore");
361 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
362 		seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid));
363 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
364 		seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid));
365 	if (sbi->s_umask != 0)
366 		seq_printf(seq, ",umask=%ho", sbi->s_umask);
367 	if (sbi->s_fmode != UDF_INVALID_MODE)
368 		seq_printf(seq, ",mode=%ho", sbi->s_fmode);
369 	if (sbi->s_dmode != UDF_INVALID_MODE)
370 		seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
371 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
372 		seq_printf(seq, ",session=%u", sbi->s_session);
373 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
374 		seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
375 	if (sbi->s_anchor != 0)
376 		seq_printf(seq, ",anchor=%u", sbi->s_anchor);
377 	/*
378 	 * volume, partition, fileset and rootdir seem to be ignored
379 	 * currently
380 	 */
381 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
382 		seq_puts(seq, ",utf8");
383 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
384 		seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
385 
386 	return 0;
387 }
388 
389 /*
390  * udf_parse_options
391  *
392  * PURPOSE
393  *	Parse mount options.
394  *
395  * DESCRIPTION
396  *	The following mount options are supported:
397  *
398  *	gid=		Set the default group.
399  *	umask=		Set the default umask.
400  *	mode=		Set the default file permissions.
401  *	dmode=		Set the default directory permissions.
402  *	uid=		Set the default user.
403  *	bs=		Set the block size.
404  *	unhide		Show otherwise hidden files.
405  *	undelete	Show deleted files in lists.
406  *	adinicb		Embed data in the inode (default)
407  *	noadinicb	Don't embed data in the inode
408  *	shortad		Use short ad's
409  *	longad		Use long ad's (default)
410  *	nostrict	Unset strict conformance
411  *	iocharset=	Set the NLS character set
412  *
413  *	The remaining are for debugging and disaster recovery:
414  *
415  *	novrs		Skip volume sequence recognition
416  *
417  *	The following expect a offset from 0.
418  *
419  *	session=	Set the CDROM session (default= last session)
420  *	anchor=		Override standard anchor location. (default= 256)
421  *	volume=		Override the VolumeDesc location. (unused)
422  *	partition=	Override the PartitionDesc location. (unused)
423  *	lastblock=	Set the last block of the filesystem/
424  *
425  *	The following expect a offset from the partition root.
426  *
427  *	fileset=	Override the fileset block location. (unused)
428  *	rootdir=	Override the root directory location. (unused)
429  *		WARNING: overriding the rootdir to a non-directory may
430  *		yield highly unpredictable results.
431  *
432  * PRE-CONDITIONS
433  *	options		Pointer to mount options string.
434  *	uopts		Pointer to mount options variable.
435  *
436  * POST-CONDITIONS
437  *	<return>	1	Mount options parsed okay.
438  *	<return>	0	Error parsing mount options.
439  *
440  * HISTORY
441  *	July 1, 1997 - Andrew E. Mileski
442  *	Written, tested, and released.
443  */
444 
445 enum {
446 	Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
447 	Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
448 	Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
449 	Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
450 	Opt_rootdir, Opt_utf8, Opt_iocharset,
451 	Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore,
452 	Opt_fmode, Opt_dmode
453 };
454 
455 static const match_table_t tokens = {
456 	{Opt_novrs,	"novrs"},
457 	{Opt_nostrict,	"nostrict"},
458 	{Opt_bs,	"bs=%u"},
459 	{Opt_unhide,	"unhide"},
460 	{Opt_undelete,	"undelete"},
461 	{Opt_noadinicb,	"noadinicb"},
462 	{Opt_adinicb,	"adinicb"},
463 	{Opt_shortad,	"shortad"},
464 	{Opt_longad,	"longad"},
465 	{Opt_uforget,	"uid=forget"},
466 	{Opt_uignore,	"uid=ignore"},
467 	{Opt_gforget,	"gid=forget"},
468 	{Opt_gignore,	"gid=ignore"},
469 	{Opt_gid,	"gid=%u"},
470 	{Opt_uid,	"uid=%u"},
471 	{Opt_umask,	"umask=%o"},
472 	{Opt_session,	"session=%u"},
473 	{Opt_lastblock,	"lastblock=%u"},
474 	{Opt_anchor,	"anchor=%u"},
475 	{Opt_volume,	"volume=%u"},
476 	{Opt_partition,	"partition=%u"},
477 	{Opt_fileset,	"fileset=%u"},
478 	{Opt_rootdir,	"rootdir=%u"},
479 	{Opt_utf8,	"utf8"},
480 	{Opt_iocharset,	"iocharset=%s"},
481 	{Opt_fmode,     "mode=%o"},
482 	{Opt_dmode,     "dmode=%o"},
483 	{Opt_err,	NULL}
484 };
485 
486 static int udf_parse_options(char *options, struct udf_options *uopt,
487 			     bool remount)
488 {
489 	char *p;
490 	int option;
491 
492 	uopt->novrs = 0;
493 	uopt->partition = 0xFFFF;
494 	uopt->session = 0xFFFFFFFF;
495 	uopt->lastblock = 0;
496 	uopt->anchor = 0;
497 	uopt->volume = 0xFFFFFFFF;
498 	uopt->rootdir = 0xFFFFFFFF;
499 	uopt->fileset = 0xFFFFFFFF;
500 	uopt->nls_map = NULL;
501 
502 	if (!options)
503 		return 1;
504 
505 	while ((p = strsep(&options, ",")) != NULL) {
506 		substring_t args[MAX_OPT_ARGS];
507 		int token;
508 		if (!*p)
509 			continue;
510 
511 		token = match_token(p, tokens, args);
512 		switch (token) {
513 		case Opt_novrs:
514 			uopt->novrs = 1;
515 			break;
516 		case Opt_bs:
517 			if (match_int(&args[0], &option))
518 				return 0;
519 			uopt->blocksize = option;
520 			uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
521 			break;
522 		case Opt_unhide:
523 			uopt->flags |= (1 << UDF_FLAG_UNHIDE);
524 			break;
525 		case Opt_undelete:
526 			uopt->flags |= (1 << UDF_FLAG_UNDELETE);
527 			break;
528 		case Opt_noadinicb:
529 			uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
530 			break;
531 		case Opt_adinicb:
532 			uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
533 			break;
534 		case Opt_shortad:
535 			uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
536 			break;
537 		case Opt_longad:
538 			uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
539 			break;
540 		case Opt_gid:
541 			if (match_int(args, &option))
542 				return 0;
543 			uopt->gid = make_kgid(current_user_ns(), option);
544 			if (!gid_valid(uopt->gid))
545 				return 0;
546 			uopt->flags |= (1 << UDF_FLAG_GID_SET);
547 			break;
548 		case Opt_uid:
549 			if (match_int(args, &option))
550 				return 0;
551 			uopt->uid = make_kuid(current_user_ns(), option);
552 			if (!uid_valid(uopt->uid))
553 				return 0;
554 			uopt->flags |= (1 << UDF_FLAG_UID_SET);
555 			break;
556 		case Opt_umask:
557 			if (match_octal(args, &option))
558 				return 0;
559 			uopt->umask = option;
560 			break;
561 		case Opt_nostrict:
562 			uopt->flags &= ~(1 << UDF_FLAG_STRICT);
563 			break;
564 		case Opt_session:
565 			if (match_int(args, &option))
566 				return 0;
567 			uopt->session = option;
568 			if (!remount)
569 				uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
570 			break;
571 		case Opt_lastblock:
572 			if (match_int(args, &option))
573 				return 0;
574 			uopt->lastblock = option;
575 			if (!remount)
576 				uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
577 			break;
578 		case Opt_anchor:
579 			if (match_int(args, &option))
580 				return 0;
581 			uopt->anchor = option;
582 			break;
583 		case Opt_volume:
584 			if (match_int(args, &option))
585 				return 0;
586 			uopt->volume = option;
587 			break;
588 		case Opt_partition:
589 			if (match_int(args, &option))
590 				return 0;
591 			uopt->partition = option;
592 			break;
593 		case Opt_fileset:
594 			if (match_int(args, &option))
595 				return 0;
596 			uopt->fileset = option;
597 			break;
598 		case Opt_rootdir:
599 			if (match_int(args, &option))
600 				return 0;
601 			uopt->rootdir = option;
602 			break;
603 		case Opt_utf8:
604 			uopt->flags |= (1 << UDF_FLAG_UTF8);
605 			break;
606 #ifdef CONFIG_UDF_NLS
607 		case Opt_iocharset:
608 			uopt->nls_map = load_nls(args[0].from);
609 			uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
610 			break;
611 #endif
612 		case Opt_uignore:
613 			uopt->flags |= (1 << UDF_FLAG_UID_IGNORE);
614 			break;
615 		case Opt_uforget:
616 			uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
617 			break;
618 		case Opt_gignore:
619 			uopt->flags |= (1 << UDF_FLAG_GID_IGNORE);
620 			break;
621 		case Opt_gforget:
622 			uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
623 			break;
624 		case Opt_fmode:
625 			if (match_octal(args, &option))
626 				return 0;
627 			uopt->fmode = option & 0777;
628 			break;
629 		case Opt_dmode:
630 			if (match_octal(args, &option))
631 				return 0;
632 			uopt->dmode = option & 0777;
633 			break;
634 		default:
635 			pr_err("bad mount option \"%s\" or missing value\n", p);
636 			return 0;
637 		}
638 	}
639 	return 1;
640 }
641 
642 static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
643 {
644 	struct udf_options uopt;
645 	struct udf_sb_info *sbi = UDF_SB(sb);
646 	int error = 0;
647 	struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
648 
649 	if (lvidiu) {
650 		int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
651 		if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
652 			return -EACCES;
653 	}
654 
655 	uopt.flags = sbi->s_flags;
656 	uopt.uid   = sbi->s_uid;
657 	uopt.gid   = sbi->s_gid;
658 	uopt.umask = sbi->s_umask;
659 	uopt.fmode = sbi->s_fmode;
660 	uopt.dmode = sbi->s_dmode;
661 
662 	if (!udf_parse_options(options, &uopt, true))
663 		return -EINVAL;
664 
665 	write_lock(&sbi->s_cred_lock);
666 	sbi->s_flags = uopt.flags;
667 	sbi->s_uid   = uopt.uid;
668 	sbi->s_gid   = uopt.gid;
669 	sbi->s_umask = uopt.umask;
670 	sbi->s_fmode = uopt.fmode;
671 	sbi->s_dmode = uopt.dmode;
672 	write_unlock(&sbi->s_cred_lock);
673 
674 	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
675 		goto out_unlock;
676 
677 	if (*flags & MS_RDONLY)
678 		udf_close_lvid(sb);
679 	else
680 		udf_open_lvid(sb);
681 
682 out_unlock:
683 	return error;
684 }
685 
686 /* Check Volume Structure Descriptors (ECMA 167 2/9.1) */
687 /* We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
688 static loff_t udf_check_vsd(struct super_block *sb)
689 {
690 	struct volStructDesc *vsd = NULL;
691 	loff_t sector = VSD_FIRST_SECTOR_OFFSET;
692 	int sectorsize;
693 	struct buffer_head *bh = NULL;
694 	int nsr02 = 0;
695 	int nsr03 = 0;
696 	struct udf_sb_info *sbi;
697 
698 	sbi = UDF_SB(sb);
699 	if (sb->s_blocksize < sizeof(struct volStructDesc))
700 		sectorsize = sizeof(struct volStructDesc);
701 	else
702 		sectorsize = sb->s_blocksize;
703 
704 	sector += (sbi->s_session << sb->s_blocksize_bits);
705 
706 	udf_debug("Starting at sector %u (%ld byte sectors)\n",
707 		  (unsigned int)(sector >> sb->s_blocksize_bits),
708 		  sb->s_blocksize);
709 	/* Process the sequence (if applicable). The hard limit on the sector
710 	 * offset is arbitrary, hopefully large enough so that all valid UDF
711 	 * filesystems will be recognised. There is no mention of an upper
712 	 * bound to the size of the volume recognition area in the standard.
713 	 *  The limit will prevent the code to read all the sectors of a
714 	 * specially crafted image (like a bluray disc full of CD001 sectors),
715 	 * potentially causing minutes or even hours of uninterruptible I/O
716 	 * activity. This actually happened with uninitialised SSD partitions
717 	 * (all 0xFF) before the check for the limit and all valid IDs were
718 	 * added */
719 	for (; !nsr02 && !nsr03 && sector < VSD_MAX_SECTOR_OFFSET;
720 	     sector += sectorsize) {
721 		/* Read a block */
722 		bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
723 		if (!bh)
724 			break;
725 
726 		/* Look for ISO  descriptors */
727 		vsd = (struct volStructDesc *)(bh->b_data +
728 					      (sector & (sb->s_blocksize - 1)));
729 
730 		if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001,
731 				    VSD_STD_ID_LEN)) {
732 			switch (vsd->structType) {
733 			case 0:
734 				udf_debug("ISO9660 Boot Record found\n");
735 				break;
736 			case 1:
737 				udf_debug("ISO9660 Primary Volume Descriptor found\n");
738 				break;
739 			case 2:
740 				udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
741 				break;
742 			case 3:
743 				udf_debug("ISO9660 Volume Partition Descriptor found\n");
744 				break;
745 			case 255:
746 				udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
747 				break;
748 			default:
749 				udf_debug("ISO9660 VRS (%u) found\n",
750 					  vsd->structType);
751 				break;
752 			}
753 		} else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01,
754 				    VSD_STD_ID_LEN))
755 			; /* nothing */
756 		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01,
757 				    VSD_STD_ID_LEN)) {
758 			brelse(bh);
759 			break;
760 		} else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02,
761 				    VSD_STD_ID_LEN))
762 			nsr02 = sector;
763 		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03,
764 				    VSD_STD_ID_LEN))
765 			nsr03 = sector;
766 		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BOOT2,
767 				    VSD_STD_ID_LEN))
768 			; /* nothing */
769 		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CDW02,
770 				    VSD_STD_ID_LEN))
771 			; /* nothing */
772 		else {
773 			/* invalid id : end of volume recognition area */
774 			brelse(bh);
775 			break;
776 		}
777 		brelse(bh);
778 	}
779 
780 	if (nsr03)
781 		return nsr03;
782 	else if (nsr02)
783 		return nsr02;
784 	else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
785 			VSD_FIRST_SECTOR_OFFSET)
786 		return -1;
787 	else
788 		return 0;
789 }
790 
791 static int udf_find_fileset(struct super_block *sb,
792 			    struct kernel_lb_addr *fileset,
793 			    struct kernel_lb_addr *root)
794 {
795 	struct buffer_head *bh = NULL;
796 	long lastblock;
797 	uint16_t ident;
798 	struct udf_sb_info *sbi;
799 
800 	if (fileset->logicalBlockNum != 0xFFFFFFFF ||
801 	    fileset->partitionReferenceNum != 0xFFFF) {
802 		bh = udf_read_ptagged(sb, fileset, 0, &ident);
803 
804 		if (!bh) {
805 			return 1;
806 		} else if (ident != TAG_IDENT_FSD) {
807 			brelse(bh);
808 			return 1;
809 		}
810 
811 	}
812 
813 	sbi = UDF_SB(sb);
814 	if (!bh) {
815 		/* Search backwards through the partitions */
816 		struct kernel_lb_addr newfileset;
817 
818 /* --> cvg: FIXME - is it reasonable? */
819 		return 1;
820 
821 		for (newfileset.partitionReferenceNum = sbi->s_partitions - 1;
822 		     (newfileset.partitionReferenceNum != 0xFFFF &&
823 		      fileset->logicalBlockNum == 0xFFFFFFFF &&
824 		      fileset->partitionReferenceNum == 0xFFFF);
825 		     newfileset.partitionReferenceNum--) {
826 			lastblock = sbi->s_partmaps
827 					[newfileset.partitionReferenceNum]
828 						.s_partition_len;
829 			newfileset.logicalBlockNum = 0;
830 
831 			do {
832 				bh = udf_read_ptagged(sb, &newfileset, 0,
833 						      &ident);
834 				if (!bh) {
835 					newfileset.logicalBlockNum++;
836 					continue;
837 				}
838 
839 				switch (ident) {
840 				case TAG_IDENT_SBD:
841 				{
842 					struct spaceBitmapDesc *sp;
843 					sp = (struct spaceBitmapDesc *)
844 								bh->b_data;
845 					newfileset.logicalBlockNum += 1 +
846 						((le32_to_cpu(sp->numOfBytes) +
847 						  sizeof(struct spaceBitmapDesc)
848 						  - 1) >> sb->s_blocksize_bits);
849 					brelse(bh);
850 					break;
851 				}
852 				case TAG_IDENT_FSD:
853 					*fileset = newfileset;
854 					break;
855 				default:
856 					newfileset.logicalBlockNum++;
857 					brelse(bh);
858 					bh = NULL;
859 					break;
860 				}
861 			} while (newfileset.logicalBlockNum < lastblock &&
862 				 fileset->logicalBlockNum == 0xFFFFFFFF &&
863 				 fileset->partitionReferenceNum == 0xFFFF);
864 		}
865 	}
866 
867 	if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
868 	     fileset->partitionReferenceNum != 0xFFFF) && bh) {
869 		udf_debug("Fileset at block=%d, partition=%d\n",
870 			  fileset->logicalBlockNum,
871 			  fileset->partitionReferenceNum);
872 
873 		sbi->s_partition = fileset->partitionReferenceNum;
874 		udf_load_fileset(sb, bh, root);
875 		brelse(bh);
876 		return 0;
877 	}
878 	return 1;
879 }
880 
881 /*
882  * Load primary Volume Descriptor Sequence
883  *
884  * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
885  * should be tried.
886  */
887 static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
888 {
889 	struct primaryVolDesc *pvoldesc;
890 	struct ustr *instr, *outstr;
891 	struct buffer_head *bh;
892 	uint16_t ident;
893 	int ret = -ENOMEM;
894 
895 	instr = kmalloc(sizeof(struct ustr), GFP_NOFS);
896 	if (!instr)
897 		return -ENOMEM;
898 
899 	outstr = kmalloc(sizeof(struct ustr), GFP_NOFS);
900 	if (!outstr)
901 		goto out1;
902 
903 	bh = udf_read_tagged(sb, block, block, &ident);
904 	if (!bh) {
905 		ret = -EAGAIN;
906 		goto out2;
907 	}
908 
909 	if (ident != TAG_IDENT_PVD) {
910 		ret = -EIO;
911 		goto out_bh;
912 	}
913 
914 	pvoldesc = (struct primaryVolDesc *)bh->b_data;
915 
916 	if (udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
917 			      pvoldesc->recordingDateAndTime)) {
918 #ifdef UDFFS_DEBUG
919 		struct timestamp *ts = &pvoldesc->recordingDateAndTime;
920 		udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n",
921 			  le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
922 			  ts->minute, le16_to_cpu(ts->typeAndTimezone));
923 #endif
924 	}
925 
926 	if (!udf_build_ustr(instr, pvoldesc->volIdent, 32))
927 		if (udf_CS0toUTF8(outstr, instr)) {
928 			strncpy(UDF_SB(sb)->s_volume_ident, outstr->u_name,
929 				outstr->u_len > 31 ? 31 : outstr->u_len);
930 			udf_debug("volIdent[] = '%s'\n",
931 				  UDF_SB(sb)->s_volume_ident);
932 		}
933 
934 	if (!udf_build_ustr(instr, pvoldesc->volSetIdent, 128))
935 		if (udf_CS0toUTF8(outstr, instr))
936 			udf_debug("volSetIdent[] = '%s'\n", outstr->u_name);
937 
938 	ret = 0;
939 out_bh:
940 	brelse(bh);
941 out2:
942 	kfree(outstr);
943 out1:
944 	kfree(instr);
945 	return ret;
946 }
947 
948 struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
949 					u32 meta_file_loc, u32 partition_num)
950 {
951 	struct kernel_lb_addr addr;
952 	struct inode *metadata_fe;
953 
954 	addr.logicalBlockNum = meta_file_loc;
955 	addr.partitionReferenceNum = partition_num;
956 
957 	metadata_fe = udf_iget(sb, &addr);
958 
959 	if (metadata_fe == NULL)
960 		udf_warn(sb, "metadata inode efe not found\n");
961 	else if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
962 		udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
963 		iput(metadata_fe);
964 		metadata_fe = NULL;
965 	}
966 
967 	return metadata_fe;
968 }
969 
970 static int udf_load_metadata_files(struct super_block *sb, int partition)
971 {
972 	struct udf_sb_info *sbi = UDF_SB(sb);
973 	struct udf_part_map *map;
974 	struct udf_meta_data *mdata;
975 	struct kernel_lb_addr addr;
976 
977 	map = &sbi->s_partmaps[partition];
978 	mdata = &map->s_type_specific.s_metadata;
979 
980 	/* metadata address */
981 	udf_debug("Metadata file location: block = %d part = %d\n",
982 		  mdata->s_meta_file_loc, map->s_partition_num);
983 
984 	mdata->s_metadata_fe = udf_find_metadata_inode_efe(sb,
985 		mdata->s_meta_file_loc, map->s_partition_num);
986 
987 	if (mdata->s_metadata_fe == NULL) {
988 		/* mirror file entry */
989 		udf_debug("Mirror metadata file location: block = %d part = %d\n",
990 			  mdata->s_mirror_file_loc, map->s_partition_num);
991 
992 		mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
993 			mdata->s_mirror_file_loc, map->s_partition_num);
994 
995 		if (mdata->s_mirror_fe == NULL) {
996 			udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
997 			return -EIO;
998 		}
999 	}
1000 
1001 	/*
1002 	 * bitmap file entry
1003 	 * Note:
1004 	 * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
1005 	*/
1006 	if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
1007 		addr.logicalBlockNum = mdata->s_bitmap_file_loc;
1008 		addr.partitionReferenceNum = map->s_partition_num;
1009 
1010 		udf_debug("Bitmap file location: block = %d part = %d\n",
1011 			  addr.logicalBlockNum, addr.partitionReferenceNum);
1012 
1013 		mdata->s_bitmap_fe = udf_iget(sb, &addr);
1014 		if (mdata->s_bitmap_fe == NULL) {
1015 			if (sb->s_flags & MS_RDONLY)
1016 				udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
1017 			else {
1018 				udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
1019 				return -EIO;
1020 			}
1021 		}
1022 	}
1023 
1024 	udf_debug("udf_load_metadata_files Ok\n");
1025 	return 0;
1026 }
1027 
1028 static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
1029 			     struct kernel_lb_addr *root)
1030 {
1031 	struct fileSetDesc *fset;
1032 
1033 	fset = (struct fileSetDesc *)bh->b_data;
1034 
1035 	*root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
1036 
1037 	UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
1038 
1039 	udf_debug("Rootdir at block=%d, partition=%d\n",
1040 		  root->logicalBlockNum, root->partitionReferenceNum);
1041 }
1042 
1043 int udf_compute_nr_groups(struct super_block *sb, u32 partition)
1044 {
1045 	struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
1046 	return DIV_ROUND_UP(map->s_partition_len +
1047 			    (sizeof(struct spaceBitmapDesc) << 3),
1048 			    sb->s_blocksize * 8);
1049 }
1050 
1051 static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
1052 {
1053 	struct udf_bitmap *bitmap;
1054 	int nr_groups;
1055 	int size;
1056 
1057 	nr_groups = udf_compute_nr_groups(sb, index);
1058 	size = sizeof(struct udf_bitmap) +
1059 		(sizeof(struct buffer_head *) * nr_groups);
1060 
1061 	if (size <= PAGE_SIZE)
1062 		bitmap = kzalloc(size, GFP_KERNEL);
1063 	else
1064 		bitmap = vzalloc(size); /* TODO: get rid of vzalloc */
1065 
1066 	if (bitmap == NULL)
1067 		return NULL;
1068 
1069 	bitmap->s_nr_groups = nr_groups;
1070 	return bitmap;
1071 }
1072 
1073 static int udf_fill_partdesc_info(struct super_block *sb,
1074 		struct partitionDesc *p, int p_index)
1075 {
1076 	struct udf_part_map *map;
1077 	struct udf_sb_info *sbi = UDF_SB(sb);
1078 	struct partitionHeaderDesc *phd;
1079 
1080 	map = &sbi->s_partmaps[p_index];
1081 
1082 	map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
1083 	map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
1084 
1085 	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
1086 		map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
1087 	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
1088 		map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
1089 	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
1090 		map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
1091 	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
1092 		map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
1093 
1094 	udf_debug("Partition (%d type %x) starts at physical %d, block length %d\n",
1095 		  p_index, map->s_partition_type,
1096 		  map->s_partition_root, map->s_partition_len);
1097 
1098 	if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
1099 	    strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
1100 		return 0;
1101 
1102 	phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1103 	if (phd->unallocSpaceTable.extLength) {
1104 		struct kernel_lb_addr loc = {
1105 			.logicalBlockNum = le32_to_cpu(
1106 				phd->unallocSpaceTable.extPosition),
1107 			.partitionReferenceNum = p_index,
1108 		};
1109 
1110 		map->s_uspace.s_table = udf_iget(sb, &loc);
1111 		if (!map->s_uspace.s_table) {
1112 			udf_debug("cannot load unallocSpaceTable (part %d)\n",
1113 				  p_index);
1114 			return -EIO;
1115 		}
1116 		map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
1117 		udf_debug("unallocSpaceTable (part %d) @ %ld\n",
1118 			  p_index, map->s_uspace.s_table->i_ino);
1119 	}
1120 
1121 	if (phd->unallocSpaceBitmap.extLength) {
1122 		struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1123 		if (!bitmap)
1124 			return -ENOMEM;
1125 		map->s_uspace.s_bitmap = bitmap;
1126 		bitmap->s_extPosition = le32_to_cpu(
1127 				phd->unallocSpaceBitmap.extPosition);
1128 		map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
1129 		udf_debug("unallocSpaceBitmap (part %d) @ %d\n",
1130 			  p_index, bitmap->s_extPosition);
1131 	}
1132 
1133 	if (phd->partitionIntegrityTable.extLength)
1134 		udf_debug("partitionIntegrityTable (part %d)\n", p_index);
1135 
1136 	if (phd->freedSpaceTable.extLength) {
1137 		struct kernel_lb_addr loc = {
1138 			.logicalBlockNum = le32_to_cpu(
1139 				phd->freedSpaceTable.extPosition),
1140 			.partitionReferenceNum = p_index,
1141 		};
1142 
1143 		map->s_fspace.s_table = udf_iget(sb, &loc);
1144 		if (!map->s_fspace.s_table) {
1145 			udf_debug("cannot load freedSpaceTable (part %d)\n",
1146 				  p_index);
1147 			return -EIO;
1148 		}
1149 
1150 		map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
1151 		udf_debug("freedSpaceTable (part %d) @ %ld\n",
1152 			  p_index, map->s_fspace.s_table->i_ino);
1153 	}
1154 
1155 	if (phd->freedSpaceBitmap.extLength) {
1156 		struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1157 		if (!bitmap)
1158 			return -ENOMEM;
1159 		map->s_fspace.s_bitmap = bitmap;
1160 		bitmap->s_extPosition = le32_to_cpu(
1161 				phd->freedSpaceBitmap.extPosition);
1162 		map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
1163 		udf_debug("freedSpaceBitmap (part %d) @ %d\n",
1164 			  p_index, bitmap->s_extPosition);
1165 	}
1166 	return 0;
1167 }
1168 
1169 static void udf_find_vat_block(struct super_block *sb, int p_index,
1170 			       int type1_index, sector_t start_block)
1171 {
1172 	struct udf_sb_info *sbi = UDF_SB(sb);
1173 	struct udf_part_map *map = &sbi->s_partmaps[p_index];
1174 	sector_t vat_block;
1175 	struct kernel_lb_addr ino;
1176 
1177 	/*
1178 	 * VAT file entry is in the last recorded block. Some broken disks have
1179 	 * it a few blocks before so try a bit harder...
1180 	 */
1181 	ino.partitionReferenceNum = type1_index;
1182 	for (vat_block = start_block;
1183 	     vat_block >= map->s_partition_root &&
1184 	     vat_block >= start_block - 3 &&
1185 	     !sbi->s_vat_inode; vat_block--) {
1186 		ino.logicalBlockNum = vat_block - map->s_partition_root;
1187 		sbi->s_vat_inode = udf_iget(sb, &ino);
1188 	}
1189 }
1190 
1191 static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1192 {
1193 	struct udf_sb_info *sbi = UDF_SB(sb);
1194 	struct udf_part_map *map = &sbi->s_partmaps[p_index];
1195 	struct buffer_head *bh = NULL;
1196 	struct udf_inode_info *vati;
1197 	uint32_t pos;
1198 	struct virtualAllocationTable20 *vat20;
1199 	sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
1200 
1201 	udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
1202 	if (!sbi->s_vat_inode &&
1203 	    sbi->s_last_block != blocks - 1) {
1204 		pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n",
1205 			  (unsigned long)sbi->s_last_block,
1206 			  (unsigned long)blocks - 1);
1207 		udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
1208 	}
1209 	if (!sbi->s_vat_inode)
1210 		return -EIO;
1211 
1212 	if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
1213 		map->s_type_specific.s_virtual.s_start_offset = 0;
1214 		map->s_type_specific.s_virtual.s_num_entries =
1215 			(sbi->s_vat_inode->i_size - 36) >> 2;
1216 	} else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
1217 		vati = UDF_I(sbi->s_vat_inode);
1218 		if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1219 			pos = udf_block_map(sbi->s_vat_inode, 0);
1220 			bh = sb_bread(sb, pos);
1221 			if (!bh)
1222 				return -EIO;
1223 			vat20 = (struct virtualAllocationTable20 *)bh->b_data;
1224 		} else {
1225 			vat20 = (struct virtualAllocationTable20 *)
1226 							vati->i_ext.i_data;
1227 		}
1228 
1229 		map->s_type_specific.s_virtual.s_start_offset =
1230 			le16_to_cpu(vat20->lengthHeader);
1231 		map->s_type_specific.s_virtual.s_num_entries =
1232 			(sbi->s_vat_inode->i_size -
1233 				map->s_type_specific.s_virtual.
1234 					s_start_offset) >> 2;
1235 		brelse(bh);
1236 	}
1237 	return 0;
1238 }
1239 
1240 /*
1241  * Load partition descriptor block
1242  *
1243  * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
1244  * sequence.
1245  */
1246 static int udf_load_partdesc(struct super_block *sb, sector_t block)
1247 {
1248 	struct buffer_head *bh;
1249 	struct partitionDesc *p;
1250 	struct udf_part_map *map;
1251 	struct udf_sb_info *sbi = UDF_SB(sb);
1252 	int i, type1_idx;
1253 	uint16_t partitionNumber;
1254 	uint16_t ident;
1255 	int ret;
1256 
1257 	bh = udf_read_tagged(sb, block, block, &ident);
1258 	if (!bh)
1259 		return -EAGAIN;
1260 	if (ident != TAG_IDENT_PD) {
1261 		ret = 0;
1262 		goto out_bh;
1263 	}
1264 
1265 	p = (struct partitionDesc *)bh->b_data;
1266 	partitionNumber = le16_to_cpu(p->partitionNumber);
1267 
1268 	/* First scan for TYPE1, SPARABLE and METADATA partitions */
1269 	for (i = 0; i < sbi->s_partitions; i++) {
1270 		map = &sbi->s_partmaps[i];
1271 		udf_debug("Searching map: (%d == %d)\n",
1272 			  map->s_partition_num, partitionNumber);
1273 		if (map->s_partition_num == partitionNumber &&
1274 		    (map->s_partition_type == UDF_TYPE1_MAP15 ||
1275 		     map->s_partition_type == UDF_SPARABLE_MAP15))
1276 			break;
1277 	}
1278 
1279 	if (i >= sbi->s_partitions) {
1280 		udf_debug("Partition (%d) not found in partition map\n",
1281 			  partitionNumber);
1282 		ret = 0;
1283 		goto out_bh;
1284 	}
1285 
1286 	ret = udf_fill_partdesc_info(sb, p, i);
1287 	if (ret < 0)
1288 		goto out_bh;
1289 
1290 	/*
1291 	 * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
1292 	 * PHYSICAL partitions are already set up
1293 	 */
1294 	type1_idx = i;
1295 #ifdef UDFFS_DEBUG
1296 	map = NULL; /* supress 'maybe used uninitialized' warning */
1297 #endif
1298 	for (i = 0; i < sbi->s_partitions; i++) {
1299 		map = &sbi->s_partmaps[i];
1300 
1301 		if (map->s_partition_num == partitionNumber &&
1302 		    (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1303 		     map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1304 		     map->s_partition_type == UDF_METADATA_MAP25))
1305 			break;
1306 	}
1307 
1308 	if (i >= sbi->s_partitions) {
1309 		ret = 0;
1310 		goto out_bh;
1311 	}
1312 
1313 	ret = udf_fill_partdesc_info(sb, p, i);
1314 	if (ret < 0)
1315 		goto out_bh;
1316 
1317 	if (map->s_partition_type == UDF_METADATA_MAP25) {
1318 		ret = udf_load_metadata_files(sb, i);
1319 		if (ret < 0) {
1320 			udf_err(sb, "error loading MetaData partition map %d\n",
1321 				i);
1322 			goto out_bh;
1323 		}
1324 	} else {
1325 		/*
1326 		 * If we have a partition with virtual map, we don't handle
1327 		 * writing to it (we overwrite blocks instead of relocating
1328 		 * them).
1329 		 */
1330 		if (!(sb->s_flags & MS_RDONLY)) {
1331 			ret = -EACCES;
1332 			goto out_bh;
1333 		}
1334 		ret = udf_load_vat(sb, i, type1_idx);
1335 		if (ret < 0)
1336 			goto out_bh;
1337 	}
1338 	ret = 0;
1339 out_bh:
1340 	/* In case loading failed, we handle cleanup in udf_fill_super */
1341 	brelse(bh);
1342 	return ret;
1343 }
1344 
1345 static int udf_load_sparable_map(struct super_block *sb,
1346 				 struct udf_part_map *map,
1347 				 struct sparablePartitionMap *spm)
1348 {
1349 	uint32_t loc;
1350 	uint16_t ident;
1351 	struct sparingTable *st;
1352 	struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
1353 	int i;
1354 	struct buffer_head *bh;
1355 
1356 	map->s_partition_type = UDF_SPARABLE_MAP15;
1357 	sdata->s_packet_len = le16_to_cpu(spm->packetLength);
1358 	if (!is_power_of_2(sdata->s_packet_len)) {
1359 		udf_err(sb, "error loading logical volume descriptor: "
1360 			"Invalid packet length %u\n",
1361 			(unsigned)sdata->s_packet_len);
1362 		return -EIO;
1363 	}
1364 	if (spm->numSparingTables > 4) {
1365 		udf_err(sb, "error loading logical volume descriptor: "
1366 			"Too many sparing tables (%d)\n",
1367 			(int)spm->numSparingTables);
1368 		return -EIO;
1369 	}
1370 
1371 	for (i = 0; i < spm->numSparingTables; i++) {
1372 		loc = le32_to_cpu(spm->locSparingTable[i]);
1373 		bh = udf_read_tagged(sb, loc, loc, &ident);
1374 		if (!bh)
1375 			continue;
1376 
1377 		st = (struct sparingTable *)bh->b_data;
1378 		if (ident != 0 ||
1379 		    strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
1380 			    strlen(UDF_ID_SPARING)) ||
1381 		    sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
1382 							sb->s_blocksize) {
1383 			brelse(bh);
1384 			continue;
1385 		}
1386 
1387 		sdata->s_spar_map[i] = bh;
1388 	}
1389 	map->s_partition_func = udf_get_pblock_spar15;
1390 	return 0;
1391 }
1392 
1393 static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1394 			       struct kernel_lb_addr *fileset)
1395 {
1396 	struct logicalVolDesc *lvd;
1397 	int i, offset;
1398 	uint8_t type;
1399 	struct udf_sb_info *sbi = UDF_SB(sb);
1400 	struct genericPartitionMap *gpm;
1401 	uint16_t ident;
1402 	struct buffer_head *bh;
1403 	unsigned int table_len;
1404 	int ret;
1405 
1406 	bh = udf_read_tagged(sb, block, block, &ident);
1407 	if (!bh)
1408 		return -EAGAIN;
1409 	BUG_ON(ident != TAG_IDENT_LVD);
1410 	lvd = (struct logicalVolDesc *)bh->b_data;
1411 	table_len = le32_to_cpu(lvd->mapTableLength);
1412 	if (table_len > sb->s_blocksize - sizeof(*lvd)) {
1413 		udf_err(sb, "error loading logical volume descriptor: "
1414 			"Partition table too long (%u > %lu)\n", table_len,
1415 			sb->s_blocksize - sizeof(*lvd));
1416 		ret = -EIO;
1417 		goto out_bh;
1418 	}
1419 
1420 	ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
1421 	if (ret)
1422 		goto out_bh;
1423 
1424 	for (i = 0, offset = 0;
1425 	     i < sbi->s_partitions && offset < table_len;
1426 	     i++, offset += gpm->partitionMapLength) {
1427 		struct udf_part_map *map = &sbi->s_partmaps[i];
1428 		gpm = (struct genericPartitionMap *)
1429 				&(lvd->partitionMaps[offset]);
1430 		type = gpm->partitionMapType;
1431 		if (type == 1) {
1432 			struct genericPartitionMap1 *gpm1 =
1433 				(struct genericPartitionMap1 *)gpm;
1434 			map->s_partition_type = UDF_TYPE1_MAP15;
1435 			map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
1436 			map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
1437 			map->s_partition_func = NULL;
1438 		} else if (type == 2) {
1439 			struct udfPartitionMap2 *upm2 =
1440 						(struct udfPartitionMap2 *)gpm;
1441 			if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
1442 						strlen(UDF_ID_VIRTUAL))) {
1443 				u16 suf =
1444 					le16_to_cpu(((__le16 *)upm2->partIdent.
1445 							identSuffix)[0]);
1446 				if (suf < 0x0200) {
1447 					map->s_partition_type =
1448 							UDF_VIRTUAL_MAP15;
1449 					map->s_partition_func =
1450 							udf_get_pblock_virt15;
1451 				} else {
1452 					map->s_partition_type =
1453 							UDF_VIRTUAL_MAP20;
1454 					map->s_partition_func =
1455 							udf_get_pblock_virt20;
1456 				}
1457 			} else if (!strncmp(upm2->partIdent.ident,
1458 						UDF_ID_SPARABLE,
1459 						strlen(UDF_ID_SPARABLE))) {
1460 				ret = udf_load_sparable_map(sb, map,
1461 					(struct sparablePartitionMap *)gpm);
1462 				if (ret < 0)
1463 					goto out_bh;
1464 			} else if (!strncmp(upm2->partIdent.ident,
1465 						UDF_ID_METADATA,
1466 						strlen(UDF_ID_METADATA))) {
1467 				struct udf_meta_data *mdata =
1468 					&map->s_type_specific.s_metadata;
1469 				struct metadataPartitionMap *mdm =
1470 						(struct metadataPartitionMap *)
1471 						&(lvd->partitionMaps[offset]);
1472 				udf_debug("Parsing Logical vol part %d type %d  id=%s\n",
1473 					  i, type, UDF_ID_METADATA);
1474 
1475 				map->s_partition_type = UDF_METADATA_MAP25;
1476 				map->s_partition_func = udf_get_pblock_meta25;
1477 
1478 				mdata->s_meta_file_loc   =
1479 					le32_to_cpu(mdm->metadataFileLoc);
1480 				mdata->s_mirror_file_loc =
1481 					le32_to_cpu(mdm->metadataMirrorFileLoc);
1482 				mdata->s_bitmap_file_loc =
1483 					le32_to_cpu(mdm->metadataBitmapFileLoc);
1484 				mdata->s_alloc_unit_size =
1485 					le32_to_cpu(mdm->allocUnitSize);
1486 				mdata->s_align_unit_size =
1487 					le16_to_cpu(mdm->alignUnitSize);
1488 				if (mdm->flags & 0x01)
1489 					mdata->s_flags |= MF_DUPLICATE_MD;
1490 
1491 				udf_debug("Metadata Ident suffix=0x%x\n",
1492 					  le16_to_cpu(*(__le16 *)
1493 						      mdm->partIdent.identSuffix));
1494 				udf_debug("Metadata part num=%d\n",
1495 					  le16_to_cpu(mdm->partitionNum));
1496 				udf_debug("Metadata part alloc unit size=%d\n",
1497 					  le32_to_cpu(mdm->allocUnitSize));
1498 				udf_debug("Metadata file loc=%d\n",
1499 					  le32_to_cpu(mdm->metadataFileLoc));
1500 				udf_debug("Mirror file loc=%d\n",
1501 					  le32_to_cpu(mdm->metadataMirrorFileLoc));
1502 				udf_debug("Bitmap file loc=%d\n",
1503 					  le32_to_cpu(mdm->metadataBitmapFileLoc));
1504 				udf_debug("Flags: %d %d\n",
1505 					  mdata->s_flags, mdm->flags);
1506 			} else {
1507 				udf_debug("Unknown ident: %s\n",
1508 					  upm2->partIdent.ident);
1509 				continue;
1510 			}
1511 			map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
1512 			map->s_partition_num = le16_to_cpu(upm2->partitionNum);
1513 		}
1514 		udf_debug("Partition (%d:%d) type %d on volume %d\n",
1515 			  i, map->s_partition_num, type, map->s_volumeseqnum);
1516 	}
1517 
1518 	if (fileset) {
1519 		struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
1520 
1521 		*fileset = lelb_to_cpu(la->extLocation);
1522 		udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n",
1523 			  fileset->logicalBlockNum,
1524 			  fileset->partitionReferenceNum);
1525 	}
1526 	if (lvd->integritySeqExt.extLength)
1527 		udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
1528 	ret = 0;
1529 out_bh:
1530 	brelse(bh);
1531 	return ret;
1532 }
1533 
1534 /*
1535  * udf_load_logicalvolint
1536  *
1537  */
1538 static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
1539 {
1540 	struct buffer_head *bh = NULL;
1541 	uint16_t ident;
1542 	struct udf_sb_info *sbi = UDF_SB(sb);
1543 	struct logicalVolIntegrityDesc *lvid;
1544 
1545 	while (loc.extLength > 0 &&
1546 	       (bh = udf_read_tagged(sb, loc.extLocation,
1547 				     loc.extLocation, &ident)) &&
1548 	       ident == TAG_IDENT_LVID) {
1549 		sbi->s_lvid_bh = bh;
1550 		lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1551 
1552 		if (lvid->nextIntegrityExt.extLength)
1553 			udf_load_logicalvolint(sb,
1554 				leea_to_cpu(lvid->nextIntegrityExt));
1555 
1556 		if (sbi->s_lvid_bh != bh)
1557 			brelse(bh);
1558 		loc.extLength -= sb->s_blocksize;
1559 		loc.extLocation++;
1560 	}
1561 	if (sbi->s_lvid_bh != bh)
1562 		brelse(bh);
1563 }
1564 
1565 /*
1566  * Process a main/reserve volume descriptor sequence.
1567  *   @block		First block of first extent of the sequence.
1568  *   @lastblock		Lastblock of first extent of the sequence.
1569  *   @fileset		There we store extent containing root fileset
1570  *
1571  * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
1572  * sequence
1573  */
1574 static noinline int udf_process_sequence(
1575 		struct super_block *sb,
1576 		sector_t block, sector_t lastblock,
1577 		struct kernel_lb_addr *fileset)
1578 {
1579 	struct buffer_head *bh = NULL;
1580 	struct udf_vds_record vds[VDS_POS_LENGTH];
1581 	struct udf_vds_record *curr;
1582 	struct generic_desc *gd;
1583 	struct volDescPtr *vdp;
1584 	int done = 0;
1585 	uint32_t vdsn;
1586 	uint16_t ident;
1587 	long next_s = 0, next_e = 0;
1588 	int ret;
1589 
1590 	memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
1591 
1592 	/*
1593 	 * Read the main descriptor sequence and find which descriptors
1594 	 * are in it.
1595 	 */
1596 	for (; (!done && block <= lastblock); block++) {
1597 
1598 		bh = udf_read_tagged(sb, block, block, &ident);
1599 		if (!bh) {
1600 			udf_err(sb,
1601 				"Block %llu of volume descriptor sequence is corrupted or we could not read it\n",
1602 				(unsigned long long)block);
1603 			return -EAGAIN;
1604 		}
1605 
1606 		/* Process each descriptor (ISO 13346 3/8.3-8.4) */
1607 		gd = (struct generic_desc *)bh->b_data;
1608 		vdsn = le32_to_cpu(gd->volDescSeqNum);
1609 		switch (ident) {
1610 		case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1611 			curr = &vds[VDS_POS_PRIMARY_VOL_DESC];
1612 			if (vdsn >= curr->volDescSeqNum) {
1613 				curr->volDescSeqNum = vdsn;
1614 				curr->block = block;
1615 			}
1616 			break;
1617 		case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
1618 			curr = &vds[VDS_POS_VOL_DESC_PTR];
1619 			if (vdsn >= curr->volDescSeqNum) {
1620 				curr->volDescSeqNum = vdsn;
1621 				curr->block = block;
1622 
1623 				vdp = (struct volDescPtr *)bh->b_data;
1624 				next_s = le32_to_cpu(
1625 					vdp->nextVolDescSeqExt.extLocation);
1626 				next_e = le32_to_cpu(
1627 					vdp->nextVolDescSeqExt.extLength);
1628 				next_e = next_e >> sb->s_blocksize_bits;
1629 				next_e += next_s;
1630 			}
1631 			break;
1632 		case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1633 			curr = &vds[VDS_POS_IMP_USE_VOL_DESC];
1634 			if (vdsn >= curr->volDescSeqNum) {
1635 				curr->volDescSeqNum = vdsn;
1636 				curr->block = block;
1637 			}
1638 			break;
1639 		case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1640 			curr = &vds[VDS_POS_PARTITION_DESC];
1641 			if (!curr->block)
1642 				curr->block = block;
1643 			break;
1644 		case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1645 			curr = &vds[VDS_POS_LOGICAL_VOL_DESC];
1646 			if (vdsn >= curr->volDescSeqNum) {
1647 				curr->volDescSeqNum = vdsn;
1648 				curr->block = block;
1649 			}
1650 			break;
1651 		case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1652 			curr = &vds[VDS_POS_UNALLOC_SPACE_DESC];
1653 			if (vdsn >= curr->volDescSeqNum) {
1654 				curr->volDescSeqNum = vdsn;
1655 				curr->block = block;
1656 			}
1657 			break;
1658 		case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
1659 			vds[VDS_POS_TERMINATING_DESC].block = block;
1660 			if (next_e) {
1661 				block = next_s;
1662 				lastblock = next_e;
1663 				next_s = next_e = 0;
1664 			} else
1665 				done = 1;
1666 			break;
1667 		}
1668 		brelse(bh);
1669 	}
1670 	/*
1671 	 * Now read interesting descriptors again and process them
1672 	 * in a suitable order
1673 	 */
1674 	if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
1675 		udf_err(sb, "Primary Volume Descriptor not found!\n");
1676 		return -EAGAIN;
1677 	}
1678 	ret = udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block);
1679 	if (ret < 0)
1680 		return ret;
1681 
1682 	if (vds[VDS_POS_LOGICAL_VOL_DESC].block) {
1683 		ret = udf_load_logicalvol(sb,
1684 					  vds[VDS_POS_LOGICAL_VOL_DESC].block,
1685 					  fileset);
1686 		if (ret < 0)
1687 			return ret;
1688 	}
1689 
1690 	if (vds[VDS_POS_PARTITION_DESC].block) {
1691 		/*
1692 		 * We rescan the whole descriptor sequence to find
1693 		 * partition descriptor blocks and process them.
1694 		 */
1695 		for (block = vds[VDS_POS_PARTITION_DESC].block;
1696 		     block < vds[VDS_POS_TERMINATING_DESC].block;
1697 		     block++) {
1698 			ret = udf_load_partdesc(sb, block);
1699 			if (ret < 0)
1700 				return ret;
1701 		}
1702 	}
1703 
1704 	return 0;
1705 }
1706 
1707 /*
1708  * Load Volume Descriptor Sequence described by anchor in bh
1709  *
1710  * Returns <0 on error, 0 on success
1711  */
1712 static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
1713 			     struct kernel_lb_addr *fileset)
1714 {
1715 	struct anchorVolDescPtr *anchor;
1716 	sector_t main_s, main_e, reserve_s, reserve_e;
1717 	int ret;
1718 
1719 	anchor = (struct anchorVolDescPtr *)bh->b_data;
1720 
1721 	/* Locate the main sequence */
1722 	main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
1723 	main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
1724 	main_e = main_e >> sb->s_blocksize_bits;
1725 	main_e += main_s;
1726 
1727 	/* Locate the reserve sequence */
1728 	reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
1729 	reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
1730 	reserve_e = reserve_e >> sb->s_blocksize_bits;
1731 	reserve_e += reserve_s;
1732 
1733 	/* Process the main & reserve sequences */
1734 	/* responsible for finding the PartitionDesc(s) */
1735 	ret = udf_process_sequence(sb, main_s, main_e, fileset);
1736 	if (ret != -EAGAIN)
1737 		return ret;
1738 	udf_sb_free_partitions(sb);
1739 	ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
1740 	if (ret < 0) {
1741 		udf_sb_free_partitions(sb);
1742 		/* No sequence was OK, return -EIO */
1743 		if (ret == -EAGAIN)
1744 			ret = -EIO;
1745 	}
1746 	return ret;
1747 }
1748 
1749 /*
1750  * Check whether there is an anchor block in the given block and
1751  * load Volume Descriptor Sequence if so.
1752  *
1753  * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
1754  * block
1755  */
1756 static int udf_check_anchor_block(struct super_block *sb, sector_t block,
1757 				  struct kernel_lb_addr *fileset)
1758 {
1759 	struct buffer_head *bh;
1760 	uint16_t ident;
1761 	int ret;
1762 
1763 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
1764 	    udf_fixed_to_variable(block) >=
1765 	    sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits)
1766 		return -EAGAIN;
1767 
1768 	bh = udf_read_tagged(sb, block, block, &ident);
1769 	if (!bh)
1770 		return -EAGAIN;
1771 	if (ident != TAG_IDENT_AVDP) {
1772 		brelse(bh);
1773 		return -EAGAIN;
1774 	}
1775 	ret = udf_load_sequence(sb, bh, fileset);
1776 	brelse(bh);
1777 	return ret;
1778 }
1779 
1780 /*
1781  * Search for an anchor volume descriptor pointer.
1782  *
1783  * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
1784  * of anchors.
1785  */
1786 static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock,
1787 			    struct kernel_lb_addr *fileset)
1788 {
1789 	sector_t last[6];
1790 	int i;
1791 	struct udf_sb_info *sbi = UDF_SB(sb);
1792 	int last_count = 0;
1793 	int ret;
1794 
1795 	/* First try user provided anchor */
1796 	if (sbi->s_anchor) {
1797 		ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
1798 		if (ret != -EAGAIN)
1799 			return ret;
1800 	}
1801 	/*
1802 	 * according to spec, anchor is in either:
1803 	 *     block 256
1804 	 *     lastblock-256
1805 	 *     lastblock
1806 	 *  however, if the disc isn't closed, it could be 512.
1807 	 */
1808 	ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
1809 	if (ret != -EAGAIN)
1810 		return ret;
1811 	/*
1812 	 * The trouble is which block is the last one. Drives often misreport
1813 	 * this so we try various possibilities.
1814 	 */
1815 	last[last_count++] = *lastblock;
1816 	if (*lastblock >= 1)
1817 		last[last_count++] = *lastblock - 1;
1818 	last[last_count++] = *lastblock + 1;
1819 	if (*lastblock >= 2)
1820 		last[last_count++] = *lastblock - 2;
1821 	if (*lastblock >= 150)
1822 		last[last_count++] = *lastblock - 150;
1823 	if (*lastblock >= 152)
1824 		last[last_count++] = *lastblock - 152;
1825 
1826 	for (i = 0; i < last_count; i++) {
1827 		if (last[i] >= sb->s_bdev->bd_inode->i_size >>
1828 				sb->s_blocksize_bits)
1829 			continue;
1830 		ret = udf_check_anchor_block(sb, last[i], fileset);
1831 		if (ret != -EAGAIN) {
1832 			if (!ret)
1833 				*lastblock = last[i];
1834 			return ret;
1835 		}
1836 		if (last[i] < 256)
1837 			continue;
1838 		ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
1839 		if (ret != -EAGAIN) {
1840 			if (!ret)
1841 				*lastblock = last[i];
1842 			return ret;
1843 		}
1844 	}
1845 
1846 	/* Finally try block 512 in case media is open */
1847 	return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
1848 }
1849 
1850 /*
1851  * Find an anchor volume descriptor and load Volume Descriptor Sequence from
1852  * area specified by it. The function expects sbi->s_lastblock to be the last
1853  * block on the media.
1854  *
1855  * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor
1856  * was not found.
1857  */
1858 static int udf_find_anchor(struct super_block *sb,
1859 			   struct kernel_lb_addr *fileset)
1860 {
1861 	struct udf_sb_info *sbi = UDF_SB(sb);
1862 	sector_t lastblock = sbi->s_last_block;
1863 	int ret;
1864 
1865 	ret = udf_scan_anchors(sb, &lastblock, fileset);
1866 	if (ret != -EAGAIN)
1867 		goto out;
1868 
1869 	/* No anchor found? Try VARCONV conversion of block numbers */
1870 	UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
1871 	lastblock = udf_variable_to_fixed(sbi->s_last_block);
1872 	/* Firstly, we try to not convert number of the last block */
1873 	ret = udf_scan_anchors(sb, &lastblock, fileset);
1874 	if (ret != -EAGAIN)
1875 		goto out;
1876 
1877 	lastblock = sbi->s_last_block;
1878 	/* Secondly, we try with converted number of the last block */
1879 	ret = udf_scan_anchors(sb, &lastblock, fileset);
1880 	if (ret < 0) {
1881 		/* VARCONV didn't help. Clear it. */
1882 		UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
1883 	}
1884 out:
1885 	if (ret == 0)
1886 		sbi->s_last_block = lastblock;
1887 	return ret;
1888 }
1889 
1890 /*
1891  * Check Volume Structure Descriptor, find Anchor block and load Volume
1892  * Descriptor Sequence.
1893  *
1894  * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
1895  * block was not found.
1896  */
1897 static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
1898 			int silent, struct kernel_lb_addr *fileset)
1899 {
1900 	struct udf_sb_info *sbi = UDF_SB(sb);
1901 	loff_t nsr_off;
1902 	int ret;
1903 
1904 	if (!sb_set_blocksize(sb, uopt->blocksize)) {
1905 		if (!silent)
1906 			udf_warn(sb, "Bad block size\n");
1907 		return -EINVAL;
1908 	}
1909 	sbi->s_last_block = uopt->lastblock;
1910 	if (!uopt->novrs) {
1911 		/* Check that it is NSR02 compliant */
1912 		nsr_off = udf_check_vsd(sb);
1913 		if (!nsr_off) {
1914 			if (!silent)
1915 				udf_warn(sb, "No VRS found\n");
1916 			return 0;
1917 		}
1918 		if (nsr_off == -1)
1919 			udf_debug("Failed to read sector at offset %d. "
1920 				  "Assuming open disc. Skipping validity "
1921 				  "check\n", VSD_FIRST_SECTOR_OFFSET);
1922 		if (!sbi->s_last_block)
1923 			sbi->s_last_block = udf_get_last_block(sb);
1924 	} else {
1925 		udf_debug("Validity check skipped because of novrs option\n");
1926 	}
1927 
1928 	/* Look for anchor block and load Volume Descriptor Sequence */
1929 	sbi->s_anchor = uopt->anchor;
1930 	ret = udf_find_anchor(sb, fileset);
1931 	if (ret < 0) {
1932 		if (!silent && ret == -EAGAIN)
1933 			udf_warn(sb, "No anchor found\n");
1934 		return ret;
1935 	}
1936 	return 0;
1937 }
1938 
1939 static void udf_open_lvid(struct super_block *sb)
1940 {
1941 	struct udf_sb_info *sbi = UDF_SB(sb);
1942 	struct buffer_head *bh = sbi->s_lvid_bh;
1943 	struct logicalVolIntegrityDesc *lvid;
1944 	struct logicalVolIntegrityDescImpUse *lvidiu;
1945 
1946 	if (!bh)
1947 		return;
1948 	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1949 	lvidiu = udf_sb_lvidiu(sb);
1950 	if (!lvidiu)
1951 		return;
1952 
1953 	mutex_lock(&sbi->s_alloc_mutex);
1954 	lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1955 	lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1956 	udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
1957 				CURRENT_TIME);
1958 	lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
1959 
1960 	lvid->descTag.descCRC = cpu_to_le16(
1961 		crc_itu_t(0, (char *)lvid + sizeof(struct tag),
1962 			le16_to_cpu(lvid->descTag.descCRCLength)));
1963 
1964 	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
1965 	mark_buffer_dirty(bh);
1966 	sbi->s_lvid_dirty = 0;
1967 	mutex_unlock(&sbi->s_alloc_mutex);
1968 	/* Make opening of filesystem visible on the media immediately */
1969 	sync_dirty_buffer(bh);
1970 }
1971 
1972 static void udf_close_lvid(struct super_block *sb)
1973 {
1974 	struct udf_sb_info *sbi = UDF_SB(sb);
1975 	struct buffer_head *bh = sbi->s_lvid_bh;
1976 	struct logicalVolIntegrityDesc *lvid;
1977 	struct logicalVolIntegrityDescImpUse *lvidiu;
1978 
1979 	if (!bh)
1980 		return;
1981 	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1982 	lvidiu = udf_sb_lvidiu(sb);
1983 	if (!lvidiu)
1984 		return;
1985 
1986 	mutex_lock(&sbi->s_alloc_mutex);
1987 	lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1988 	lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1989 	udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
1990 	if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
1991 		lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
1992 	if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
1993 		lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
1994 	if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
1995 		lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
1996 	lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
1997 
1998 	lvid->descTag.descCRC = cpu_to_le16(
1999 			crc_itu_t(0, (char *)lvid + sizeof(struct tag),
2000 				le16_to_cpu(lvid->descTag.descCRCLength)));
2001 
2002 	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
2003 	/*
2004 	 * We set buffer uptodate unconditionally here to avoid spurious
2005 	 * warnings from mark_buffer_dirty() when previous EIO has marked
2006 	 * the buffer as !uptodate
2007 	 */
2008 	set_buffer_uptodate(bh);
2009 	mark_buffer_dirty(bh);
2010 	sbi->s_lvid_dirty = 0;
2011 	mutex_unlock(&sbi->s_alloc_mutex);
2012 	/* Make closing of filesystem visible on the media immediately */
2013 	sync_dirty_buffer(bh);
2014 }
2015 
2016 u64 lvid_get_unique_id(struct super_block *sb)
2017 {
2018 	struct buffer_head *bh;
2019 	struct udf_sb_info *sbi = UDF_SB(sb);
2020 	struct logicalVolIntegrityDesc *lvid;
2021 	struct logicalVolHeaderDesc *lvhd;
2022 	u64 uniqueID;
2023 	u64 ret;
2024 
2025 	bh = sbi->s_lvid_bh;
2026 	if (!bh)
2027 		return 0;
2028 
2029 	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2030 	lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
2031 
2032 	mutex_lock(&sbi->s_alloc_mutex);
2033 	ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
2034 	if (!(++uniqueID & 0xFFFFFFFF))
2035 		uniqueID += 16;
2036 	lvhd->uniqueID = cpu_to_le64(uniqueID);
2037 	mutex_unlock(&sbi->s_alloc_mutex);
2038 	mark_buffer_dirty(bh);
2039 
2040 	return ret;
2041 }
2042 
2043 static int udf_fill_super(struct super_block *sb, void *options, int silent)
2044 {
2045 	int ret = -EINVAL;
2046 	struct inode *inode = NULL;
2047 	struct udf_options uopt;
2048 	struct kernel_lb_addr rootdir, fileset;
2049 	struct udf_sb_info *sbi;
2050 
2051 	uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
2052 	uopt.uid = INVALID_UID;
2053 	uopt.gid = INVALID_GID;
2054 	uopt.umask = 0;
2055 	uopt.fmode = UDF_INVALID_MODE;
2056 	uopt.dmode = UDF_INVALID_MODE;
2057 
2058 	sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
2059 	if (!sbi)
2060 		return -ENOMEM;
2061 
2062 	sb->s_fs_info = sbi;
2063 
2064 	mutex_init(&sbi->s_alloc_mutex);
2065 
2066 	if (!udf_parse_options((char *)options, &uopt, false))
2067 		goto error_out;
2068 
2069 	if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
2070 	    uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
2071 		udf_err(sb, "utf8 cannot be combined with iocharset\n");
2072 		goto error_out;
2073 	}
2074 #ifdef CONFIG_UDF_NLS
2075 	if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
2076 		uopt.nls_map = load_nls_default();
2077 		if (!uopt.nls_map)
2078 			uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
2079 		else
2080 			udf_debug("Using default NLS map\n");
2081 	}
2082 #endif
2083 	if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
2084 		uopt.flags |= (1 << UDF_FLAG_UTF8);
2085 
2086 	fileset.logicalBlockNum = 0xFFFFFFFF;
2087 	fileset.partitionReferenceNum = 0xFFFF;
2088 
2089 	sbi->s_flags = uopt.flags;
2090 	sbi->s_uid = uopt.uid;
2091 	sbi->s_gid = uopt.gid;
2092 	sbi->s_umask = uopt.umask;
2093 	sbi->s_fmode = uopt.fmode;
2094 	sbi->s_dmode = uopt.dmode;
2095 	sbi->s_nls_map = uopt.nls_map;
2096 	rwlock_init(&sbi->s_cred_lock);
2097 
2098 	if (uopt.session == 0xFFFFFFFF)
2099 		sbi->s_session = udf_get_last_session(sb);
2100 	else
2101 		sbi->s_session = uopt.session;
2102 
2103 	udf_debug("Multi-session=%d\n", sbi->s_session);
2104 
2105 	/* Fill in the rest of the superblock */
2106 	sb->s_op = &udf_sb_ops;
2107 	sb->s_export_op = &udf_export_ops;
2108 
2109 	sb->s_magic = UDF_SUPER_MAGIC;
2110 	sb->s_time_gran = 1000;
2111 
2112 	if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
2113 		ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2114 	} else {
2115 		uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
2116 		ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2117 		if (ret == -EAGAIN && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
2118 			if (!silent)
2119 				pr_notice("Rescanning with blocksize %d\n",
2120 					  UDF_DEFAULT_BLOCKSIZE);
2121 			brelse(sbi->s_lvid_bh);
2122 			sbi->s_lvid_bh = NULL;
2123 			uopt.blocksize = UDF_DEFAULT_BLOCKSIZE;
2124 			ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2125 		}
2126 	}
2127 	if (ret < 0) {
2128 		if (ret == -EAGAIN) {
2129 			udf_warn(sb, "No partition found (1)\n");
2130 			ret = -EINVAL;
2131 		}
2132 		goto error_out;
2133 	}
2134 
2135 	udf_debug("Lastblock=%d\n", sbi->s_last_block);
2136 
2137 	if (sbi->s_lvid_bh) {
2138 		struct logicalVolIntegrityDescImpUse *lvidiu =
2139 							udf_sb_lvidiu(sb);
2140 		uint16_t minUDFReadRev;
2141 		uint16_t minUDFWriteRev;
2142 
2143 		if (!lvidiu) {
2144 			ret = -EINVAL;
2145 			goto error_out;
2146 		}
2147 		minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
2148 		minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
2149 		if (minUDFReadRev > UDF_MAX_READ_VERSION) {
2150 			udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2151 				minUDFReadRev,
2152 				UDF_MAX_READ_VERSION);
2153 			ret = -EINVAL;
2154 			goto error_out;
2155 		} else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION &&
2156 			   !(sb->s_flags & MS_RDONLY)) {
2157 			ret = -EACCES;
2158 			goto error_out;
2159 		}
2160 
2161 		sbi->s_udfrev = minUDFWriteRev;
2162 
2163 		if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
2164 			UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
2165 		if (minUDFReadRev >= UDF_VERS_USE_STREAMS)
2166 			UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
2167 	}
2168 
2169 	if (!sbi->s_partitions) {
2170 		udf_warn(sb, "No partition found (2)\n");
2171 		ret = -EINVAL;
2172 		goto error_out;
2173 	}
2174 
2175 	if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
2176 			UDF_PART_FLAG_READ_ONLY &&
2177 	    !(sb->s_flags & MS_RDONLY)) {
2178 		ret = -EACCES;
2179 		goto error_out;
2180 	}
2181 
2182 	if (udf_find_fileset(sb, &fileset, &rootdir)) {
2183 		udf_warn(sb, "No fileset found\n");
2184 		ret = -EINVAL;
2185 		goto error_out;
2186 	}
2187 
2188 	if (!silent) {
2189 		struct timestamp ts;
2190 		udf_time_to_disk_stamp(&ts, sbi->s_record_time);
2191 		udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
2192 			 sbi->s_volume_ident,
2193 			 le16_to_cpu(ts.year), ts.month, ts.day,
2194 			 ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
2195 	}
2196 	if (!(sb->s_flags & MS_RDONLY))
2197 		udf_open_lvid(sb);
2198 
2199 	/* Assign the root inode */
2200 	/* assign inodes by physical block number */
2201 	/* perhaps it's not extensible enough, but for now ... */
2202 	inode = udf_iget(sb, &rootdir);
2203 	if (!inode) {
2204 		udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
2205 		       rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
2206 		ret = -EIO;
2207 		goto error_out;
2208 	}
2209 
2210 	/* Allocate a dentry for the root inode */
2211 	sb->s_root = d_make_root(inode);
2212 	if (!sb->s_root) {
2213 		udf_err(sb, "Couldn't allocate root dentry\n");
2214 		ret = -ENOMEM;
2215 		goto error_out;
2216 	}
2217 	sb->s_maxbytes = MAX_LFS_FILESIZE;
2218 	sb->s_max_links = UDF_MAX_LINKS;
2219 	return 0;
2220 
2221 error_out:
2222 	if (sbi->s_vat_inode)
2223 		iput(sbi->s_vat_inode);
2224 #ifdef CONFIG_UDF_NLS
2225 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
2226 		unload_nls(sbi->s_nls_map);
2227 #endif
2228 	if (!(sb->s_flags & MS_RDONLY))
2229 		udf_close_lvid(sb);
2230 	brelse(sbi->s_lvid_bh);
2231 	udf_sb_free_partitions(sb);
2232 	kfree(sbi);
2233 	sb->s_fs_info = NULL;
2234 
2235 	return ret;
2236 }
2237 
2238 void _udf_err(struct super_block *sb, const char *function,
2239 	      const char *fmt, ...)
2240 {
2241 	struct va_format vaf;
2242 	va_list args;
2243 
2244 	va_start(args, fmt);
2245 
2246 	vaf.fmt = fmt;
2247 	vaf.va = &args;
2248 
2249 	pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf);
2250 
2251 	va_end(args);
2252 }
2253 
2254 void _udf_warn(struct super_block *sb, const char *function,
2255 	       const char *fmt, ...)
2256 {
2257 	struct va_format vaf;
2258 	va_list args;
2259 
2260 	va_start(args, fmt);
2261 
2262 	vaf.fmt = fmt;
2263 	vaf.va = &args;
2264 
2265 	pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf);
2266 
2267 	va_end(args);
2268 }
2269 
2270 static void udf_put_super(struct super_block *sb)
2271 {
2272 	struct udf_sb_info *sbi;
2273 
2274 	sbi = UDF_SB(sb);
2275 
2276 	if (sbi->s_vat_inode)
2277 		iput(sbi->s_vat_inode);
2278 #ifdef CONFIG_UDF_NLS
2279 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
2280 		unload_nls(sbi->s_nls_map);
2281 #endif
2282 	if (!(sb->s_flags & MS_RDONLY))
2283 		udf_close_lvid(sb);
2284 	brelse(sbi->s_lvid_bh);
2285 	udf_sb_free_partitions(sb);
2286 	kfree(sb->s_fs_info);
2287 	sb->s_fs_info = NULL;
2288 }
2289 
2290 static int udf_sync_fs(struct super_block *sb, int wait)
2291 {
2292 	struct udf_sb_info *sbi = UDF_SB(sb);
2293 
2294 	mutex_lock(&sbi->s_alloc_mutex);
2295 	if (sbi->s_lvid_dirty) {
2296 		/*
2297 		 * Blockdevice will be synced later so we don't have to submit
2298 		 * the buffer for IO
2299 		 */
2300 		mark_buffer_dirty(sbi->s_lvid_bh);
2301 		sbi->s_lvid_dirty = 0;
2302 	}
2303 	mutex_unlock(&sbi->s_alloc_mutex);
2304 
2305 	return 0;
2306 }
2307 
2308 static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2309 {
2310 	struct super_block *sb = dentry->d_sb;
2311 	struct udf_sb_info *sbi = UDF_SB(sb);
2312 	struct logicalVolIntegrityDescImpUse *lvidiu;
2313 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2314 
2315 	lvidiu = udf_sb_lvidiu(sb);
2316 	buf->f_type = UDF_SUPER_MAGIC;
2317 	buf->f_bsize = sb->s_blocksize;
2318 	buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
2319 	buf->f_bfree = udf_count_free(sb);
2320 	buf->f_bavail = buf->f_bfree;
2321 	buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
2322 					  le32_to_cpu(lvidiu->numDirs)) : 0)
2323 			+ buf->f_bfree;
2324 	buf->f_ffree = buf->f_bfree;
2325 	buf->f_namelen = UDF_NAME_LEN - 2;
2326 	buf->f_fsid.val[0] = (u32)id;
2327 	buf->f_fsid.val[1] = (u32)(id >> 32);
2328 
2329 	return 0;
2330 }
2331 
2332 static unsigned int udf_count_free_bitmap(struct super_block *sb,
2333 					  struct udf_bitmap *bitmap)
2334 {
2335 	struct buffer_head *bh = NULL;
2336 	unsigned int accum = 0;
2337 	int index;
2338 	int block = 0, newblock;
2339 	struct kernel_lb_addr loc;
2340 	uint32_t bytes;
2341 	uint8_t *ptr;
2342 	uint16_t ident;
2343 	struct spaceBitmapDesc *bm;
2344 
2345 	loc.logicalBlockNum = bitmap->s_extPosition;
2346 	loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
2347 	bh = udf_read_ptagged(sb, &loc, 0, &ident);
2348 
2349 	if (!bh) {
2350 		udf_err(sb, "udf_count_free failed\n");
2351 		goto out;
2352 	} else if (ident != TAG_IDENT_SBD) {
2353 		brelse(bh);
2354 		udf_err(sb, "udf_count_free failed\n");
2355 		goto out;
2356 	}
2357 
2358 	bm = (struct spaceBitmapDesc *)bh->b_data;
2359 	bytes = le32_to_cpu(bm->numOfBytes);
2360 	index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
2361 	ptr = (uint8_t *)bh->b_data;
2362 
2363 	while (bytes > 0) {
2364 		u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
2365 		accum += bitmap_weight((const unsigned long *)(ptr + index),
2366 					cur_bytes * 8);
2367 		bytes -= cur_bytes;
2368 		if (bytes) {
2369 			brelse(bh);
2370 			newblock = udf_get_lb_pblock(sb, &loc, ++block);
2371 			bh = udf_tread(sb, newblock);
2372 			if (!bh) {
2373 				udf_debug("read failed\n");
2374 				goto out;
2375 			}
2376 			index = 0;
2377 			ptr = (uint8_t *)bh->b_data;
2378 		}
2379 	}
2380 	brelse(bh);
2381 out:
2382 	return accum;
2383 }
2384 
2385 static unsigned int udf_count_free_table(struct super_block *sb,
2386 					 struct inode *table)
2387 {
2388 	unsigned int accum = 0;
2389 	uint32_t elen;
2390 	struct kernel_lb_addr eloc;
2391 	int8_t etype;
2392 	struct extent_position epos;
2393 
2394 	mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
2395 	epos.block = UDF_I(table)->i_location;
2396 	epos.offset = sizeof(struct unallocSpaceEntry);
2397 	epos.bh = NULL;
2398 
2399 	while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
2400 		accum += (elen >> table->i_sb->s_blocksize_bits);
2401 
2402 	brelse(epos.bh);
2403 	mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
2404 
2405 	return accum;
2406 }
2407 
2408 static unsigned int udf_count_free(struct super_block *sb)
2409 {
2410 	unsigned int accum = 0;
2411 	struct udf_sb_info *sbi;
2412 	struct udf_part_map *map;
2413 
2414 	sbi = UDF_SB(sb);
2415 	if (sbi->s_lvid_bh) {
2416 		struct logicalVolIntegrityDesc *lvid =
2417 			(struct logicalVolIntegrityDesc *)
2418 			sbi->s_lvid_bh->b_data;
2419 		if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) {
2420 			accum = le32_to_cpu(
2421 					lvid->freeSpaceTable[sbi->s_partition]);
2422 			if (accum == 0xFFFFFFFF)
2423 				accum = 0;
2424 		}
2425 	}
2426 
2427 	if (accum)
2428 		return accum;
2429 
2430 	map = &sbi->s_partmaps[sbi->s_partition];
2431 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
2432 		accum += udf_count_free_bitmap(sb,
2433 					       map->s_uspace.s_bitmap);
2434 	}
2435 	if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
2436 		accum += udf_count_free_bitmap(sb,
2437 					       map->s_fspace.s_bitmap);
2438 	}
2439 	if (accum)
2440 		return accum;
2441 
2442 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
2443 		accum += udf_count_free_table(sb,
2444 					      map->s_uspace.s_table);
2445 	}
2446 	if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
2447 		accum += udf_count_free_table(sb,
2448 					      map->s_fspace.s_table);
2449 	}
2450 
2451 	return accum;
2452 }
2453