xref: /openbmc/linux/fs/udf/super.c (revision 217188d9)
1  /*
2   * super.c
3   *
4   * PURPOSE
5   *  Super block routines for the OSTA-UDF(tm) filesystem.
6   *
7   * DESCRIPTION
8   *  OSTA-UDF(tm) = Optical Storage Technology Association
9   *  Universal Disk Format.
10   *
11   *  This code is based on version 2.00 of the UDF specification,
12   *  and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
13   *    http://www.osta.org/
14   *    http://www.ecma.ch/
15   *    http://www.iso.org/
16   *
17   * COPYRIGHT
18   *  This file is distributed under the terms of the GNU General Public
19   *  License (GPL). Copies of the GPL can be obtained from:
20   *    ftp://prep.ai.mit.edu/pub/gnu/GPL
21   *  Each contributing author retains all rights to their own work.
22   *
23   *  (C) 1998 Dave Boynton
24   *  (C) 1998-2004 Ben Fennema
25   *  (C) 2000 Stelias Computing Inc
26   *
27   * HISTORY
28   *
29   *  09/24/98 dgb  changed to allow compiling outside of kernel, and
30   *                added some debugging.
31   *  10/01/98 dgb  updated to allow (some) possibility of compiling w/2.0.34
32   *  10/16/98      attempting some multi-session support
33   *  10/17/98      added freespace count for "df"
34   *  11/11/98 gr   added novrs option
35   *  11/26/98 dgb  added fileset,anchor mount options
36   *  12/06/98 blf  really hosed things royally. vat/sparing support. sequenced
37   *                vol descs. rewrote option handling based on isofs
38   *  12/20/98      find the free space bitmap (if it exists)
39   */
40  
41  #include "udfdecl.h"
42  
43  #include <linux/blkdev.h>
44  #include <linux/slab.h>
45  #include <linux/kernel.h>
46  #include <linux/module.h>
47  #include <linux/parser.h>
48  #include <linux/stat.h>
49  #include <linux/cdrom.h>
50  #include <linux/nls.h>
51  #include <linux/vfs.h>
52  #include <linux/vmalloc.h>
53  #include <linux/errno.h>
54  #include <linux/mount.h>
55  #include <linux/seq_file.h>
56  #include <linux/bitmap.h>
57  #include <linux/crc-itu-t.h>
58  #include <linux/log2.h>
59  #include <asm/byteorder.h>
60  
61  #include "udf_sb.h"
62  #include "udf_i.h"
63  
64  #include <linux/init.h>
65  #include <linux/uaccess.h>
66  
67  enum {
68  	VDS_POS_PRIMARY_VOL_DESC,
69  	VDS_POS_UNALLOC_SPACE_DESC,
70  	VDS_POS_LOGICAL_VOL_DESC,
71  	VDS_POS_IMP_USE_VOL_DESC,
72  	VDS_POS_LENGTH
73  };
74  
75  #define VSD_FIRST_SECTOR_OFFSET		32768
76  #define VSD_MAX_SECTOR_OFFSET		0x800000
77  
78  /*
79   * Maximum number of Terminating Descriptor / Logical Volume Integrity
80   * Descriptor redirections. The chosen numbers are arbitrary - just that we
81   * hopefully don't limit any real use of rewritten inode on write-once media
82   * but avoid looping for too long on corrupted media.
83   */
84  #define UDF_MAX_TD_NESTING 64
85  #define UDF_MAX_LVID_NESTING 1000
86  
87  enum { UDF_MAX_LINKS = 0xffff };
88  
89  /* These are the "meat" - everything else is stuffing */
90  static int udf_fill_super(struct super_block *, void *, int);
91  static void udf_put_super(struct super_block *);
92  static int udf_sync_fs(struct super_block *, int);
93  static int udf_remount_fs(struct super_block *, int *, char *);
94  static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
95  static int udf_find_fileset(struct super_block *, struct kernel_lb_addr *,
96  			    struct kernel_lb_addr *);
97  static void udf_load_fileset(struct super_block *, struct buffer_head *,
98  			     struct kernel_lb_addr *);
99  static void udf_open_lvid(struct super_block *);
100  static void udf_close_lvid(struct super_block *);
101  static unsigned int udf_count_free(struct super_block *);
102  static int udf_statfs(struct dentry *, struct kstatfs *);
103  static int udf_show_options(struct seq_file *, struct dentry *);
104  
105  struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
106  {
107  	struct logicalVolIntegrityDesc *lvid;
108  	unsigned int partnum;
109  	unsigned int offset;
110  
111  	if (!UDF_SB(sb)->s_lvid_bh)
112  		return NULL;
113  	lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
114  	partnum = le32_to_cpu(lvid->numOfPartitions);
115  	if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
116  	     offsetof(struct logicalVolIntegrityDesc, impUse)) /
117  	     (2 * sizeof(uint32_t)) < partnum) {
118  		udf_err(sb, "Logical volume integrity descriptor corrupted "
119  			"(numOfPartitions = %u)!\n", partnum);
120  		return NULL;
121  	}
122  	/* The offset is to skip freeSpaceTable and sizeTable arrays */
123  	offset = partnum * 2 * sizeof(uint32_t);
124  	return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
125  }
126  
127  /* UDF filesystem type */
128  static struct dentry *udf_mount(struct file_system_type *fs_type,
129  		      int flags, const char *dev_name, void *data)
130  {
131  	return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super);
132  }
133  
134  static struct file_system_type udf_fstype = {
135  	.owner		= THIS_MODULE,
136  	.name		= "udf",
137  	.mount		= udf_mount,
138  	.kill_sb	= kill_block_super,
139  	.fs_flags	= FS_REQUIRES_DEV,
140  };
141  MODULE_ALIAS_FS("udf");
142  
143  static struct kmem_cache *udf_inode_cachep;
144  
145  static struct inode *udf_alloc_inode(struct super_block *sb)
146  {
147  	struct udf_inode_info *ei;
148  	ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
149  	if (!ei)
150  		return NULL;
151  
152  	ei->i_unique = 0;
153  	ei->i_lenExtents = 0;
154  	ei->i_next_alloc_block = 0;
155  	ei->i_next_alloc_goal = 0;
156  	ei->i_strat4096 = 0;
157  	init_rwsem(&ei->i_data_sem);
158  	ei->cached_extent.lstart = -1;
159  	spin_lock_init(&ei->i_extent_cache_lock);
160  
161  	return &ei->vfs_inode;
162  }
163  
164  static void udf_i_callback(struct rcu_head *head)
165  {
166  	struct inode *inode = container_of(head, struct inode, i_rcu);
167  	kmem_cache_free(udf_inode_cachep, UDF_I(inode));
168  }
169  
170  static void udf_destroy_inode(struct inode *inode)
171  {
172  	call_rcu(&inode->i_rcu, udf_i_callback);
173  }
174  
175  static void init_once(void *foo)
176  {
177  	struct udf_inode_info *ei = (struct udf_inode_info *)foo;
178  
179  	ei->i_ext.i_data = NULL;
180  	inode_init_once(&ei->vfs_inode);
181  }
182  
183  static int __init init_inodecache(void)
184  {
185  	udf_inode_cachep = kmem_cache_create("udf_inode_cache",
186  					     sizeof(struct udf_inode_info),
187  					     0, (SLAB_RECLAIM_ACCOUNT |
188  						 SLAB_MEM_SPREAD |
189  						 SLAB_ACCOUNT),
190  					     init_once);
191  	if (!udf_inode_cachep)
192  		return -ENOMEM;
193  	return 0;
194  }
195  
196  static void destroy_inodecache(void)
197  {
198  	/*
199  	 * Make sure all delayed rcu free inodes are flushed before we
200  	 * destroy cache.
201  	 */
202  	rcu_barrier();
203  	kmem_cache_destroy(udf_inode_cachep);
204  }
205  
206  /* Superblock operations */
207  static const struct super_operations udf_sb_ops = {
208  	.alloc_inode	= udf_alloc_inode,
209  	.destroy_inode	= udf_destroy_inode,
210  	.write_inode	= udf_write_inode,
211  	.evict_inode	= udf_evict_inode,
212  	.put_super	= udf_put_super,
213  	.sync_fs	= udf_sync_fs,
214  	.statfs		= udf_statfs,
215  	.remount_fs	= udf_remount_fs,
216  	.show_options	= udf_show_options,
217  };
218  
219  struct udf_options {
220  	unsigned char novrs;
221  	unsigned int blocksize;
222  	unsigned int session;
223  	unsigned int lastblock;
224  	unsigned int anchor;
225  	unsigned int flags;
226  	umode_t umask;
227  	kgid_t gid;
228  	kuid_t uid;
229  	umode_t fmode;
230  	umode_t dmode;
231  	struct nls_table *nls_map;
232  };
233  
234  static int __init init_udf_fs(void)
235  {
236  	int err;
237  
238  	err = init_inodecache();
239  	if (err)
240  		goto out1;
241  	err = register_filesystem(&udf_fstype);
242  	if (err)
243  		goto out;
244  
245  	return 0;
246  
247  out:
248  	destroy_inodecache();
249  
250  out1:
251  	return err;
252  }
253  
254  static void __exit exit_udf_fs(void)
255  {
256  	unregister_filesystem(&udf_fstype);
257  	destroy_inodecache();
258  }
259  
260  static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
261  {
262  	struct udf_sb_info *sbi = UDF_SB(sb);
263  
264  	sbi->s_partmaps = kcalloc(count, sizeof(*sbi->s_partmaps), GFP_KERNEL);
265  	if (!sbi->s_partmaps) {
266  		sbi->s_partitions = 0;
267  		return -ENOMEM;
268  	}
269  
270  	sbi->s_partitions = count;
271  	return 0;
272  }
273  
274  static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
275  {
276  	int i;
277  	int nr_groups = bitmap->s_nr_groups;
278  
279  	for (i = 0; i < nr_groups; i++)
280  		if (bitmap->s_block_bitmap[i])
281  			brelse(bitmap->s_block_bitmap[i]);
282  
283  	kvfree(bitmap);
284  }
285  
286  static void udf_free_partition(struct udf_part_map *map)
287  {
288  	int i;
289  	struct udf_meta_data *mdata;
290  
291  	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
292  		iput(map->s_uspace.s_table);
293  	if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
294  		iput(map->s_fspace.s_table);
295  	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
296  		udf_sb_free_bitmap(map->s_uspace.s_bitmap);
297  	if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
298  		udf_sb_free_bitmap(map->s_fspace.s_bitmap);
299  	if (map->s_partition_type == UDF_SPARABLE_MAP15)
300  		for (i = 0; i < 4; i++)
301  			brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
302  	else if (map->s_partition_type == UDF_METADATA_MAP25) {
303  		mdata = &map->s_type_specific.s_metadata;
304  		iput(mdata->s_metadata_fe);
305  		mdata->s_metadata_fe = NULL;
306  
307  		iput(mdata->s_mirror_fe);
308  		mdata->s_mirror_fe = NULL;
309  
310  		iput(mdata->s_bitmap_fe);
311  		mdata->s_bitmap_fe = NULL;
312  	}
313  }
314  
315  static void udf_sb_free_partitions(struct super_block *sb)
316  {
317  	struct udf_sb_info *sbi = UDF_SB(sb);
318  	int i;
319  
320  	if (!sbi->s_partmaps)
321  		return;
322  	for (i = 0; i < sbi->s_partitions; i++)
323  		udf_free_partition(&sbi->s_partmaps[i]);
324  	kfree(sbi->s_partmaps);
325  	sbi->s_partmaps = NULL;
326  }
327  
328  static int udf_show_options(struct seq_file *seq, struct dentry *root)
329  {
330  	struct super_block *sb = root->d_sb;
331  	struct udf_sb_info *sbi = UDF_SB(sb);
332  
333  	if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
334  		seq_puts(seq, ",nostrict");
335  	if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET))
336  		seq_printf(seq, ",bs=%lu", sb->s_blocksize);
337  	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
338  		seq_puts(seq, ",unhide");
339  	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
340  		seq_puts(seq, ",undelete");
341  	if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB))
342  		seq_puts(seq, ",noadinicb");
343  	if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD))
344  		seq_puts(seq, ",shortad");
345  	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
346  		seq_puts(seq, ",uid=forget");
347  	if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
348  		seq_puts(seq, ",gid=forget");
349  	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
350  		seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid));
351  	if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
352  		seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid));
353  	if (sbi->s_umask != 0)
354  		seq_printf(seq, ",umask=%ho", sbi->s_umask);
355  	if (sbi->s_fmode != UDF_INVALID_MODE)
356  		seq_printf(seq, ",mode=%ho", sbi->s_fmode);
357  	if (sbi->s_dmode != UDF_INVALID_MODE)
358  		seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
359  	if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
360  		seq_printf(seq, ",session=%d", sbi->s_session);
361  	if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
362  		seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
363  	if (sbi->s_anchor != 0)
364  		seq_printf(seq, ",anchor=%u", sbi->s_anchor);
365  	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
366  		seq_puts(seq, ",utf8");
367  	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
368  		seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
369  
370  	return 0;
371  }
372  
373  /*
374   * udf_parse_options
375   *
376   * PURPOSE
377   *	Parse mount options.
378   *
379   * DESCRIPTION
380   *	The following mount options are supported:
381   *
382   *	gid=		Set the default group.
383   *	umask=		Set the default umask.
384   *	mode=		Set the default file permissions.
385   *	dmode=		Set the default directory permissions.
386   *	uid=		Set the default user.
387   *	bs=		Set the block size.
388   *	unhide		Show otherwise hidden files.
389   *	undelete	Show deleted files in lists.
390   *	adinicb		Embed data in the inode (default)
391   *	noadinicb	Don't embed data in the inode
392   *	shortad		Use short ad's
393   *	longad		Use long ad's (default)
394   *	nostrict	Unset strict conformance
395   *	iocharset=	Set the NLS character set
396   *
397   *	The remaining are for debugging and disaster recovery:
398   *
399   *	novrs		Skip volume sequence recognition
400   *
401   *	The following expect a offset from 0.
402   *
403   *	session=	Set the CDROM session (default= last session)
404   *	anchor=		Override standard anchor location. (default= 256)
405   *	volume=		Override the VolumeDesc location. (unused)
406   *	partition=	Override the PartitionDesc location. (unused)
407   *	lastblock=	Set the last block of the filesystem/
408   *
409   *	The following expect a offset from the partition root.
410   *
411   *	fileset=	Override the fileset block location. (unused)
412   *	rootdir=	Override the root directory location. (unused)
413   *		WARNING: overriding the rootdir to a non-directory may
414   *		yield highly unpredictable results.
415   *
416   * PRE-CONDITIONS
417   *	options		Pointer to mount options string.
418   *	uopts		Pointer to mount options variable.
419   *
420   * POST-CONDITIONS
421   *	<return>	1	Mount options parsed okay.
422   *	<return>	0	Error parsing mount options.
423   *
424   * HISTORY
425   *	July 1, 1997 - Andrew E. Mileski
426   *	Written, tested, and released.
427   */
428  
429  enum {
430  	Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
431  	Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
432  	Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
433  	Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
434  	Opt_rootdir, Opt_utf8, Opt_iocharset,
435  	Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore,
436  	Opt_fmode, Opt_dmode
437  };
438  
439  static const match_table_t tokens = {
440  	{Opt_novrs,	"novrs"},
441  	{Opt_nostrict,	"nostrict"},
442  	{Opt_bs,	"bs=%u"},
443  	{Opt_unhide,	"unhide"},
444  	{Opt_undelete,	"undelete"},
445  	{Opt_noadinicb,	"noadinicb"},
446  	{Opt_adinicb,	"adinicb"},
447  	{Opt_shortad,	"shortad"},
448  	{Opt_longad,	"longad"},
449  	{Opt_uforget,	"uid=forget"},
450  	{Opt_uignore,	"uid=ignore"},
451  	{Opt_gforget,	"gid=forget"},
452  	{Opt_gignore,	"gid=ignore"},
453  	{Opt_gid,	"gid=%u"},
454  	{Opt_uid,	"uid=%u"},
455  	{Opt_umask,	"umask=%o"},
456  	{Opt_session,	"session=%u"},
457  	{Opt_lastblock,	"lastblock=%u"},
458  	{Opt_anchor,	"anchor=%u"},
459  	{Opt_volume,	"volume=%u"},
460  	{Opt_partition,	"partition=%u"},
461  	{Opt_fileset,	"fileset=%u"},
462  	{Opt_rootdir,	"rootdir=%u"},
463  	{Opt_utf8,	"utf8"},
464  	{Opt_iocharset,	"iocharset=%s"},
465  	{Opt_fmode,     "mode=%o"},
466  	{Opt_dmode,     "dmode=%o"},
467  	{Opt_err,	NULL}
468  };
469  
470  static int udf_parse_options(char *options, struct udf_options *uopt,
471  			     bool remount)
472  {
473  	char *p;
474  	int option;
475  
476  	uopt->novrs = 0;
477  	uopt->session = 0xFFFFFFFF;
478  	uopt->lastblock = 0;
479  	uopt->anchor = 0;
480  
481  	if (!options)
482  		return 1;
483  
484  	while ((p = strsep(&options, ",")) != NULL) {
485  		substring_t args[MAX_OPT_ARGS];
486  		int token;
487  		unsigned n;
488  		if (!*p)
489  			continue;
490  
491  		token = match_token(p, tokens, args);
492  		switch (token) {
493  		case Opt_novrs:
494  			uopt->novrs = 1;
495  			break;
496  		case Opt_bs:
497  			if (match_int(&args[0], &option))
498  				return 0;
499  			n = option;
500  			if (n != 512 && n != 1024 && n != 2048 && n != 4096)
501  				return 0;
502  			uopt->blocksize = n;
503  			uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
504  			break;
505  		case Opt_unhide:
506  			uopt->flags |= (1 << UDF_FLAG_UNHIDE);
507  			break;
508  		case Opt_undelete:
509  			uopt->flags |= (1 << UDF_FLAG_UNDELETE);
510  			break;
511  		case Opt_noadinicb:
512  			uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
513  			break;
514  		case Opt_adinicb:
515  			uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
516  			break;
517  		case Opt_shortad:
518  			uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
519  			break;
520  		case Opt_longad:
521  			uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
522  			break;
523  		case Opt_gid:
524  			if (match_int(args, &option))
525  				return 0;
526  			uopt->gid = make_kgid(current_user_ns(), option);
527  			if (!gid_valid(uopt->gid))
528  				return 0;
529  			uopt->flags |= (1 << UDF_FLAG_GID_SET);
530  			break;
531  		case Opt_uid:
532  			if (match_int(args, &option))
533  				return 0;
534  			uopt->uid = make_kuid(current_user_ns(), option);
535  			if (!uid_valid(uopt->uid))
536  				return 0;
537  			uopt->flags |= (1 << UDF_FLAG_UID_SET);
538  			break;
539  		case Opt_umask:
540  			if (match_octal(args, &option))
541  				return 0;
542  			uopt->umask = option;
543  			break;
544  		case Opt_nostrict:
545  			uopt->flags &= ~(1 << UDF_FLAG_STRICT);
546  			break;
547  		case Opt_session:
548  			if (match_int(args, &option))
549  				return 0;
550  			uopt->session = option;
551  			if (!remount)
552  				uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
553  			break;
554  		case Opt_lastblock:
555  			if (match_int(args, &option))
556  				return 0;
557  			uopt->lastblock = option;
558  			if (!remount)
559  				uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
560  			break;
561  		case Opt_anchor:
562  			if (match_int(args, &option))
563  				return 0;
564  			uopt->anchor = option;
565  			break;
566  		case Opt_volume:
567  		case Opt_partition:
568  		case Opt_fileset:
569  		case Opt_rootdir:
570  			/* Ignored (never implemented properly) */
571  			break;
572  		case Opt_utf8:
573  			uopt->flags |= (1 << UDF_FLAG_UTF8);
574  			break;
575  		case Opt_iocharset:
576  			if (!remount) {
577  				if (uopt->nls_map)
578  					unload_nls(uopt->nls_map);
579  				uopt->nls_map = load_nls(args[0].from);
580  				uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
581  			}
582  			break;
583  		case Opt_uforget:
584  			uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
585  			break;
586  		case Opt_uignore:
587  		case Opt_gignore:
588  			/* These options are superseeded by uid=<number> */
589  			break;
590  		case Opt_gforget:
591  			uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
592  			break;
593  		case Opt_fmode:
594  			if (match_octal(args, &option))
595  				return 0;
596  			uopt->fmode = option & 0777;
597  			break;
598  		case Opt_dmode:
599  			if (match_octal(args, &option))
600  				return 0;
601  			uopt->dmode = option & 0777;
602  			break;
603  		default:
604  			pr_err("bad mount option \"%s\" or missing value\n", p);
605  			return 0;
606  		}
607  	}
608  	return 1;
609  }
610  
611  static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
612  {
613  	struct udf_options uopt;
614  	struct udf_sb_info *sbi = UDF_SB(sb);
615  	int error = 0;
616  	struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
617  
618  	sync_filesystem(sb);
619  	if (lvidiu) {
620  		int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
621  		if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & SB_RDONLY))
622  			return -EACCES;
623  	}
624  
625  	uopt.flags = sbi->s_flags;
626  	uopt.uid   = sbi->s_uid;
627  	uopt.gid   = sbi->s_gid;
628  	uopt.umask = sbi->s_umask;
629  	uopt.fmode = sbi->s_fmode;
630  	uopt.dmode = sbi->s_dmode;
631  	uopt.nls_map = NULL;
632  
633  	if (!udf_parse_options(options, &uopt, true))
634  		return -EINVAL;
635  
636  	write_lock(&sbi->s_cred_lock);
637  	sbi->s_flags = uopt.flags;
638  	sbi->s_uid   = uopt.uid;
639  	sbi->s_gid   = uopt.gid;
640  	sbi->s_umask = uopt.umask;
641  	sbi->s_fmode = uopt.fmode;
642  	sbi->s_dmode = uopt.dmode;
643  	write_unlock(&sbi->s_cred_lock);
644  
645  	if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
646  		goto out_unlock;
647  
648  	if (*flags & SB_RDONLY)
649  		udf_close_lvid(sb);
650  	else
651  		udf_open_lvid(sb);
652  
653  out_unlock:
654  	return error;
655  }
656  
657  /* Check Volume Structure Descriptors (ECMA 167 2/9.1) */
658  /* We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
659  static loff_t udf_check_vsd(struct super_block *sb)
660  {
661  	struct volStructDesc *vsd = NULL;
662  	loff_t sector = VSD_FIRST_SECTOR_OFFSET;
663  	int sectorsize;
664  	struct buffer_head *bh = NULL;
665  	int nsr02 = 0;
666  	int nsr03 = 0;
667  	struct udf_sb_info *sbi;
668  
669  	sbi = UDF_SB(sb);
670  	if (sb->s_blocksize < sizeof(struct volStructDesc))
671  		sectorsize = sizeof(struct volStructDesc);
672  	else
673  		sectorsize = sb->s_blocksize;
674  
675  	sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
676  
677  	udf_debug("Starting at sector %u (%lu byte sectors)\n",
678  		  (unsigned int)(sector >> sb->s_blocksize_bits),
679  		  sb->s_blocksize);
680  	/* Process the sequence (if applicable). The hard limit on the sector
681  	 * offset is arbitrary, hopefully large enough so that all valid UDF
682  	 * filesystems will be recognised. There is no mention of an upper
683  	 * bound to the size of the volume recognition area in the standard.
684  	 *  The limit will prevent the code to read all the sectors of a
685  	 * specially crafted image (like a bluray disc full of CD001 sectors),
686  	 * potentially causing minutes or even hours of uninterruptible I/O
687  	 * activity. This actually happened with uninitialised SSD partitions
688  	 * (all 0xFF) before the check for the limit and all valid IDs were
689  	 * added */
690  	for (; !nsr02 && !nsr03 && sector < VSD_MAX_SECTOR_OFFSET;
691  	     sector += sectorsize) {
692  		/* Read a block */
693  		bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
694  		if (!bh)
695  			break;
696  
697  		/* Look for ISO  descriptors */
698  		vsd = (struct volStructDesc *)(bh->b_data +
699  					      (sector & (sb->s_blocksize - 1)));
700  
701  		if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001,
702  				    VSD_STD_ID_LEN)) {
703  			switch (vsd->structType) {
704  			case 0:
705  				udf_debug("ISO9660 Boot Record found\n");
706  				break;
707  			case 1:
708  				udf_debug("ISO9660 Primary Volume Descriptor found\n");
709  				break;
710  			case 2:
711  				udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
712  				break;
713  			case 3:
714  				udf_debug("ISO9660 Volume Partition Descriptor found\n");
715  				break;
716  			case 255:
717  				udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
718  				break;
719  			default:
720  				udf_debug("ISO9660 VRS (%u) found\n",
721  					  vsd->structType);
722  				break;
723  			}
724  		} else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01,
725  				    VSD_STD_ID_LEN))
726  			; /* nothing */
727  		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01,
728  				    VSD_STD_ID_LEN)) {
729  			brelse(bh);
730  			break;
731  		} else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02,
732  				    VSD_STD_ID_LEN))
733  			nsr02 = sector;
734  		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03,
735  				    VSD_STD_ID_LEN))
736  			nsr03 = sector;
737  		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BOOT2,
738  				    VSD_STD_ID_LEN))
739  			; /* nothing */
740  		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CDW02,
741  				    VSD_STD_ID_LEN))
742  			; /* nothing */
743  		else {
744  			/* invalid id : end of volume recognition area */
745  			brelse(bh);
746  			break;
747  		}
748  		brelse(bh);
749  	}
750  
751  	if (nsr03)
752  		return nsr03;
753  	else if (nsr02)
754  		return nsr02;
755  	else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
756  			VSD_FIRST_SECTOR_OFFSET)
757  		return -1;
758  	else
759  		return 0;
760  }
761  
762  static int udf_find_fileset(struct super_block *sb,
763  			    struct kernel_lb_addr *fileset,
764  			    struct kernel_lb_addr *root)
765  {
766  	struct buffer_head *bh = NULL;
767  	uint16_t ident;
768  
769  	if (fileset->logicalBlockNum != 0xFFFFFFFF ||
770  	    fileset->partitionReferenceNum != 0xFFFF) {
771  		bh = udf_read_ptagged(sb, fileset, 0, &ident);
772  
773  		if (!bh) {
774  			return 1;
775  		} else if (ident != TAG_IDENT_FSD) {
776  			brelse(bh);
777  			return 1;
778  		}
779  
780  		udf_debug("Fileset at block=%u, partition=%u\n",
781  			  fileset->logicalBlockNum,
782  			  fileset->partitionReferenceNum);
783  
784  		UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
785  		udf_load_fileset(sb, bh, root);
786  		brelse(bh);
787  		return 0;
788  	}
789  	return 1;
790  }
791  
792  /*
793   * Load primary Volume Descriptor Sequence
794   *
795   * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
796   * should be tried.
797   */
798  static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
799  {
800  	struct primaryVolDesc *pvoldesc;
801  	uint8_t *outstr;
802  	struct buffer_head *bh;
803  	uint16_t ident;
804  	int ret = -ENOMEM;
805  #ifdef UDFFS_DEBUG
806  	struct timestamp *ts;
807  #endif
808  
809  	outstr = kmalloc(128, GFP_NOFS);
810  	if (!outstr)
811  		return -ENOMEM;
812  
813  	bh = udf_read_tagged(sb, block, block, &ident);
814  	if (!bh) {
815  		ret = -EAGAIN;
816  		goto out2;
817  	}
818  
819  	if (ident != TAG_IDENT_PVD) {
820  		ret = -EIO;
821  		goto out_bh;
822  	}
823  
824  	pvoldesc = (struct primaryVolDesc *)bh->b_data;
825  
826  	udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
827  			      pvoldesc->recordingDateAndTime);
828  #ifdef UDFFS_DEBUG
829  	ts = &pvoldesc->recordingDateAndTime;
830  	udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n",
831  		  le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
832  		  ts->minute, le16_to_cpu(ts->typeAndTimezone));
833  #endif
834  
835  
836  	ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
837  	if (ret < 0)
838  		goto out_bh;
839  
840  	strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
841  	udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
842  
843  	ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128);
844  	if (ret < 0)
845  		goto out_bh;
846  
847  	outstr[ret] = 0;
848  	udf_debug("volSetIdent[] = '%s'\n", outstr);
849  
850  	ret = 0;
851  out_bh:
852  	brelse(bh);
853  out2:
854  	kfree(outstr);
855  	return ret;
856  }
857  
858  struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
859  					u32 meta_file_loc, u32 partition_ref)
860  {
861  	struct kernel_lb_addr addr;
862  	struct inode *metadata_fe;
863  
864  	addr.logicalBlockNum = meta_file_loc;
865  	addr.partitionReferenceNum = partition_ref;
866  
867  	metadata_fe = udf_iget_special(sb, &addr);
868  
869  	if (IS_ERR(metadata_fe)) {
870  		udf_warn(sb, "metadata inode efe not found\n");
871  		return metadata_fe;
872  	}
873  	if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
874  		udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
875  		iput(metadata_fe);
876  		return ERR_PTR(-EIO);
877  	}
878  
879  	return metadata_fe;
880  }
881  
882  static int udf_load_metadata_files(struct super_block *sb, int partition,
883  				   int type1_index)
884  {
885  	struct udf_sb_info *sbi = UDF_SB(sb);
886  	struct udf_part_map *map;
887  	struct udf_meta_data *mdata;
888  	struct kernel_lb_addr addr;
889  	struct inode *fe;
890  
891  	map = &sbi->s_partmaps[partition];
892  	mdata = &map->s_type_specific.s_metadata;
893  	mdata->s_phys_partition_ref = type1_index;
894  
895  	/* metadata address */
896  	udf_debug("Metadata file location: block = %u part = %u\n",
897  		  mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
898  
899  	fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
900  					 mdata->s_phys_partition_ref);
901  	if (IS_ERR(fe)) {
902  		/* mirror file entry */
903  		udf_debug("Mirror metadata file location: block = %u part = %u\n",
904  			  mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
905  
906  		fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
907  						 mdata->s_phys_partition_ref);
908  
909  		if (IS_ERR(fe)) {
910  			udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
911  			return PTR_ERR(fe);
912  		}
913  		mdata->s_mirror_fe = fe;
914  	} else
915  		mdata->s_metadata_fe = fe;
916  
917  
918  	/*
919  	 * bitmap file entry
920  	 * Note:
921  	 * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
922  	*/
923  	if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
924  		addr.logicalBlockNum = mdata->s_bitmap_file_loc;
925  		addr.partitionReferenceNum = mdata->s_phys_partition_ref;
926  
927  		udf_debug("Bitmap file location: block = %u part = %u\n",
928  			  addr.logicalBlockNum, addr.partitionReferenceNum);
929  
930  		fe = udf_iget_special(sb, &addr);
931  		if (IS_ERR(fe)) {
932  			if (sb_rdonly(sb))
933  				udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
934  			else {
935  				udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
936  				return PTR_ERR(fe);
937  			}
938  		} else
939  			mdata->s_bitmap_fe = fe;
940  	}
941  
942  	udf_debug("udf_load_metadata_files Ok\n");
943  	return 0;
944  }
945  
946  static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
947  			     struct kernel_lb_addr *root)
948  {
949  	struct fileSetDesc *fset;
950  
951  	fset = (struct fileSetDesc *)bh->b_data;
952  
953  	*root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
954  
955  	UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
956  
957  	udf_debug("Rootdir at block=%u, partition=%u\n",
958  		  root->logicalBlockNum, root->partitionReferenceNum);
959  }
960  
961  int udf_compute_nr_groups(struct super_block *sb, u32 partition)
962  {
963  	struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
964  	return DIV_ROUND_UP(map->s_partition_len +
965  			    (sizeof(struct spaceBitmapDesc) << 3),
966  			    sb->s_blocksize * 8);
967  }
968  
969  static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
970  {
971  	struct udf_bitmap *bitmap;
972  	int nr_groups;
973  	int size;
974  
975  	nr_groups = udf_compute_nr_groups(sb, index);
976  	size = sizeof(struct udf_bitmap) +
977  		(sizeof(struct buffer_head *) * nr_groups);
978  
979  	if (size <= PAGE_SIZE)
980  		bitmap = kzalloc(size, GFP_KERNEL);
981  	else
982  		bitmap = vzalloc(size); /* TODO: get rid of vzalloc */
983  
984  	if (!bitmap)
985  		return NULL;
986  
987  	bitmap->s_nr_groups = nr_groups;
988  	return bitmap;
989  }
990  
991  static int udf_fill_partdesc_info(struct super_block *sb,
992  		struct partitionDesc *p, int p_index)
993  {
994  	struct udf_part_map *map;
995  	struct udf_sb_info *sbi = UDF_SB(sb);
996  	struct partitionHeaderDesc *phd;
997  
998  	map = &sbi->s_partmaps[p_index];
999  
1000  	map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
1001  	map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
1002  
1003  	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
1004  		map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
1005  	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
1006  		map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
1007  	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
1008  		map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
1009  	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
1010  		map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
1011  
1012  	udf_debug("Partition (%d type %x) starts at physical %u, block length %u\n",
1013  		  p_index, map->s_partition_type,
1014  		  map->s_partition_root, map->s_partition_len);
1015  
1016  	if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
1017  	    strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
1018  		return 0;
1019  
1020  	phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1021  	if (phd->unallocSpaceTable.extLength) {
1022  		struct kernel_lb_addr loc = {
1023  			.logicalBlockNum = le32_to_cpu(
1024  				phd->unallocSpaceTable.extPosition),
1025  			.partitionReferenceNum = p_index,
1026  		};
1027  		struct inode *inode;
1028  
1029  		inode = udf_iget_special(sb, &loc);
1030  		if (IS_ERR(inode)) {
1031  			udf_debug("cannot load unallocSpaceTable (part %d)\n",
1032  				  p_index);
1033  			return PTR_ERR(inode);
1034  		}
1035  		map->s_uspace.s_table = inode;
1036  		map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
1037  		udf_debug("unallocSpaceTable (part %d) @ %lu\n",
1038  			  p_index, map->s_uspace.s_table->i_ino);
1039  	}
1040  
1041  	if (phd->unallocSpaceBitmap.extLength) {
1042  		struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1043  		if (!bitmap)
1044  			return -ENOMEM;
1045  		map->s_uspace.s_bitmap = bitmap;
1046  		bitmap->s_extPosition = le32_to_cpu(
1047  				phd->unallocSpaceBitmap.extPosition);
1048  		map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
1049  		udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
1050  			  p_index, bitmap->s_extPosition);
1051  	}
1052  
1053  	if (phd->partitionIntegrityTable.extLength)
1054  		udf_debug("partitionIntegrityTable (part %d)\n", p_index);
1055  
1056  	if (phd->freedSpaceTable.extLength) {
1057  		struct kernel_lb_addr loc = {
1058  			.logicalBlockNum = le32_to_cpu(
1059  				phd->freedSpaceTable.extPosition),
1060  			.partitionReferenceNum = p_index,
1061  		};
1062  		struct inode *inode;
1063  
1064  		inode = udf_iget_special(sb, &loc);
1065  		if (IS_ERR(inode)) {
1066  			udf_debug("cannot load freedSpaceTable (part %d)\n",
1067  				  p_index);
1068  			return PTR_ERR(inode);
1069  		}
1070  		map->s_fspace.s_table = inode;
1071  		map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
1072  		udf_debug("freedSpaceTable (part %d) @ %lu\n",
1073  			  p_index, map->s_fspace.s_table->i_ino);
1074  	}
1075  
1076  	if (phd->freedSpaceBitmap.extLength) {
1077  		struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1078  		if (!bitmap)
1079  			return -ENOMEM;
1080  		map->s_fspace.s_bitmap = bitmap;
1081  		bitmap->s_extPosition = le32_to_cpu(
1082  				phd->freedSpaceBitmap.extPosition);
1083  		map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
1084  		udf_debug("freedSpaceBitmap (part %d) @ %u\n",
1085  			  p_index, bitmap->s_extPosition);
1086  	}
1087  	return 0;
1088  }
1089  
1090  static void udf_find_vat_block(struct super_block *sb, int p_index,
1091  			       int type1_index, sector_t start_block)
1092  {
1093  	struct udf_sb_info *sbi = UDF_SB(sb);
1094  	struct udf_part_map *map = &sbi->s_partmaps[p_index];
1095  	sector_t vat_block;
1096  	struct kernel_lb_addr ino;
1097  	struct inode *inode;
1098  
1099  	/*
1100  	 * VAT file entry is in the last recorded block. Some broken disks have
1101  	 * it a few blocks before so try a bit harder...
1102  	 */
1103  	ino.partitionReferenceNum = type1_index;
1104  	for (vat_block = start_block;
1105  	     vat_block >= map->s_partition_root &&
1106  	     vat_block >= start_block - 3; vat_block--) {
1107  		ino.logicalBlockNum = vat_block - map->s_partition_root;
1108  		inode = udf_iget_special(sb, &ino);
1109  		if (!IS_ERR(inode)) {
1110  			sbi->s_vat_inode = inode;
1111  			break;
1112  		}
1113  	}
1114  }
1115  
1116  static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1117  {
1118  	struct udf_sb_info *sbi = UDF_SB(sb);
1119  	struct udf_part_map *map = &sbi->s_partmaps[p_index];
1120  	struct buffer_head *bh = NULL;
1121  	struct udf_inode_info *vati;
1122  	uint32_t pos;
1123  	struct virtualAllocationTable20 *vat20;
1124  	sector_t blocks = i_size_read(sb->s_bdev->bd_inode) >>
1125  			  sb->s_blocksize_bits;
1126  
1127  	udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
1128  	if (!sbi->s_vat_inode &&
1129  	    sbi->s_last_block != blocks - 1) {
1130  		pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n",
1131  			  (unsigned long)sbi->s_last_block,
1132  			  (unsigned long)blocks - 1);
1133  		udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
1134  	}
1135  	if (!sbi->s_vat_inode)
1136  		return -EIO;
1137  
1138  	if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
1139  		map->s_type_specific.s_virtual.s_start_offset = 0;
1140  		map->s_type_specific.s_virtual.s_num_entries =
1141  			(sbi->s_vat_inode->i_size - 36) >> 2;
1142  	} else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
1143  		vati = UDF_I(sbi->s_vat_inode);
1144  		if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1145  			pos = udf_block_map(sbi->s_vat_inode, 0);
1146  			bh = sb_bread(sb, pos);
1147  			if (!bh)
1148  				return -EIO;
1149  			vat20 = (struct virtualAllocationTable20 *)bh->b_data;
1150  		} else {
1151  			vat20 = (struct virtualAllocationTable20 *)
1152  							vati->i_ext.i_data;
1153  		}
1154  
1155  		map->s_type_specific.s_virtual.s_start_offset =
1156  			le16_to_cpu(vat20->lengthHeader);
1157  		map->s_type_specific.s_virtual.s_num_entries =
1158  			(sbi->s_vat_inode->i_size -
1159  				map->s_type_specific.s_virtual.
1160  					s_start_offset) >> 2;
1161  		brelse(bh);
1162  	}
1163  	return 0;
1164  }
1165  
1166  /*
1167   * Load partition descriptor block
1168   *
1169   * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
1170   * sequence.
1171   */
1172  static int udf_load_partdesc(struct super_block *sb, sector_t block)
1173  {
1174  	struct buffer_head *bh;
1175  	struct partitionDesc *p;
1176  	struct udf_part_map *map;
1177  	struct udf_sb_info *sbi = UDF_SB(sb);
1178  	int i, type1_idx;
1179  	uint16_t partitionNumber;
1180  	uint16_t ident;
1181  	int ret;
1182  
1183  	bh = udf_read_tagged(sb, block, block, &ident);
1184  	if (!bh)
1185  		return -EAGAIN;
1186  	if (ident != TAG_IDENT_PD) {
1187  		ret = 0;
1188  		goto out_bh;
1189  	}
1190  
1191  	p = (struct partitionDesc *)bh->b_data;
1192  	partitionNumber = le16_to_cpu(p->partitionNumber);
1193  
1194  	/* First scan for TYPE1 and SPARABLE partitions */
1195  	for (i = 0; i < sbi->s_partitions; i++) {
1196  		map = &sbi->s_partmaps[i];
1197  		udf_debug("Searching map: (%u == %u)\n",
1198  			  map->s_partition_num, partitionNumber);
1199  		if (map->s_partition_num == partitionNumber &&
1200  		    (map->s_partition_type == UDF_TYPE1_MAP15 ||
1201  		     map->s_partition_type == UDF_SPARABLE_MAP15))
1202  			break;
1203  	}
1204  
1205  	if (i >= sbi->s_partitions) {
1206  		udf_debug("Partition (%u) not found in partition map\n",
1207  			  partitionNumber);
1208  		ret = 0;
1209  		goto out_bh;
1210  	}
1211  
1212  	ret = udf_fill_partdesc_info(sb, p, i);
1213  	if (ret < 0)
1214  		goto out_bh;
1215  
1216  	/*
1217  	 * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
1218  	 * PHYSICAL partitions are already set up
1219  	 */
1220  	type1_idx = i;
1221  #ifdef UDFFS_DEBUG
1222  	map = NULL; /* supress 'maybe used uninitialized' warning */
1223  #endif
1224  	for (i = 0; i < sbi->s_partitions; i++) {
1225  		map = &sbi->s_partmaps[i];
1226  
1227  		if (map->s_partition_num == partitionNumber &&
1228  		    (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1229  		     map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1230  		     map->s_partition_type == UDF_METADATA_MAP25))
1231  			break;
1232  	}
1233  
1234  	if (i >= sbi->s_partitions) {
1235  		ret = 0;
1236  		goto out_bh;
1237  	}
1238  
1239  	ret = udf_fill_partdesc_info(sb, p, i);
1240  	if (ret < 0)
1241  		goto out_bh;
1242  
1243  	if (map->s_partition_type == UDF_METADATA_MAP25) {
1244  		ret = udf_load_metadata_files(sb, i, type1_idx);
1245  		if (ret < 0) {
1246  			udf_err(sb, "error loading MetaData partition map %d\n",
1247  				i);
1248  			goto out_bh;
1249  		}
1250  	} else {
1251  		/*
1252  		 * If we have a partition with virtual map, we don't handle
1253  		 * writing to it (we overwrite blocks instead of relocating
1254  		 * them).
1255  		 */
1256  		if (!sb_rdonly(sb)) {
1257  			ret = -EACCES;
1258  			goto out_bh;
1259  		}
1260  		ret = udf_load_vat(sb, i, type1_idx);
1261  		if (ret < 0)
1262  			goto out_bh;
1263  	}
1264  	ret = 0;
1265  out_bh:
1266  	/* In case loading failed, we handle cleanup in udf_fill_super */
1267  	brelse(bh);
1268  	return ret;
1269  }
1270  
1271  static int udf_load_sparable_map(struct super_block *sb,
1272  				 struct udf_part_map *map,
1273  				 struct sparablePartitionMap *spm)
1274  {
1275  	uint32_t loc;
1276  	uint16_t ident;
1277  	struct sparingTable *st;
1278  	struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
1279  	int i;
1280  	struct buffer_head *bh;
1281  
1282  	map->s_partition_type = UDF_SPARABLE_MAP15;
1283  	sdata->s_packet_len = le16_to_cpu(spm->packetLength);
1284  	if (!is_power_of_2(sdata->s_packet_len)) {
1285  		udf_err(sb, "error loading logical volume descriptor: "
1286  			"Invalid packet length %u\n",
1287  			(unsigned)sdata->s_packet_len);
1288  		return -EIO;
1289  	}
1290  	if (spm->numSparingTables > 4) {
1291  		udf_err(sb, "error loading logical volume descriptor: "
1292  			"Too many sparing tables (%d)\n",
1293  			(int)spm->numSparingTables);
1294  		return -EIO;
1295  	}
1296  
1297  	for (i = 0; i < spm->numSparingTables; i++) {
1298  		loc = le32_to_cpu(spm->locSparingTable[i]);
1299  		bh = udf_read_tagged(sb, loc, loc, &ident);
1300  		if (!bh)
1301  			continue;
1302  
1303  		st = (struct sparingTable *)bh->b_data;
1304  		if (ident != 0 ||
1305  		    strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
1306  			    strlen(UDF_ID_SPARING)) ||
1307  		    sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
1308  							sb->s_blocksize) {
1309  			brelse(bh);
1310  			continue;
1311  		}
1312  
1313  		sdata->s_spar_map[i] = bh;
1314  	}
1315  	map->s_partition_func = udf_get_pblock_spar15;
1316  	return 0;
1317  }
1318  
1319  static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1320  			       struct kernel_lb_addr *fileset)
1321  {
1322  	struct logicalVolDesc *lvd;
1323  	int i, offset;
1324  	uint8_t type;
1325  	struct udf_sb_info *sbi = UDF_SB(sb);
1326  	struct genericPartitionMap *gpm;
1327  	uint16_t ident;
1328  	struct buffer_head *bh;
1329  	unsigned int table_len;
1330  	int ret;
1331  
1332  	bh = udf_read_tagged(sb, block, block, &ident);
1333  	if (!bh)
1334  		return -EAGAIN;
1335  	BUG_ON(ident != TAG_IDENT_LVD);
1336  	lvd = (struct logicalVolDesc *)bh->b_data;
1337  	table_len = le32_to_cpu(lvd->mapTableLength);
1338  	if (table_len > sb->s_blocksize - sizeof(*lvd)) {
1339  		udf_err(sb, "error loading logical volume descriptor: "
1340  			"Partition table too long (%u > %lu)\n", table_len,
1341  			sb->s_blocksize - sizeof(*lvd));
1342  		ret = -EIO;
1343  		goto out_bh;
1344  	}
1345  
1346  	ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
1347  	if (ret)
1348  		goto out_bh;
1349  
1350  	for (i = 0, offset = 0;
1351  	     i < sbi->s_partitions && offset < table_len;
1352  	     i++, offset += gpm->partitionMapLength) {
1353  		struct udf_part_map *map = &sbi->s_partmaps[i];
1354  		gpm = (struct genericPartitionMap *)
1355  				&(lvd->partitionMaps[offset]);
1356  		type = gpm->partitionMapType;
1357  		if (type == 1) {
1358  			struct genericPartitionMap1 *gpm1 =
1359  				(struct genericPartitionMap1 *)gpm;
1360  			map->s_partition_type = UDF_TYPE1_MAP15;
1361  			map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
1362  			map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
1363  			map->s_partition_func = NULL;
1364  		} else if (type == 2) {
1365  			struct udfPartitionMap2 *upm2 =
1366  						(struct udfPartitionMap2 *)gpm;
1367  			if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
1368  						strlen(UDF_ID_VIRTUAL))) {
1369  				u16 suf =
1370  					le16_to_cpu(((__le16 *)upm2->partIdent.
1371  							identSuffix)[0]);
1372  				if (suf < 0x0200) {
1373  					map->s_partition_type =
1374  							UDF_VIRTUAL_MAP15;
1375  					map->s_partition_func =
1376  							udf_get_pblock_virt15;
1377  				} else {
1378  					map->s_partition_type =
1379  							UDF_VIRTUAL_MAP20;
1380  					map->s_partition_func =
1381  							udf_get_pblock_virt20;
1382  				}
1383  			} else if (!strncmp(upm2->partIdent.ident,
1384  						UDF_ID_SPARABLE,
1385  						strlen(UDF_ID_SPARABLE))) {
1386  				ret = udf_load_sparable_map(sb, map,
1387  					(struct sparablePartitionMap *)gpm);
1388  				if (ret < 0)
1389  					goto out_bh;
1390  			} else if (!strncmp(upm2->partIdent.ident,
1391  						UDF_ID_METADATA,
1392  						strlen(UDF_ID_METADATA))) {
1393  				struct udf_meta_data *mdata =
1394  					&map->s_type_specific.s_metadata;
1395  				struct metadataPartitionMap *mdm =
1396  						(struct metadataPartitionMap *)
1397  						&(lvd->partitionMaps[offset]);
1398  				udf_debug("Parsing Logical vol part %d type %u  id=%s\n",
1399  					  i, type, UDF_ID_METADATA);
1400  
1401  				map->s_partition_type = UDF_METADATA_MAP25;
1402  				map->s_partition_func = udf_get_pblock_meta25;
1403  
1404  				mdata->s_meta_file_loc   =
1405  					le32_to_cpu(mdm->metadataFileLoc);
1406  				mdata->s_mirror_file_loc =
1407  					le32_to_cpu(mdm->metadataMirrorFileLoc);
1408  				mdata->s_bitmap_file_loc =
1409  					le32_to_cpu(mdm->metadataBitmapFileLoc);
1410  				mdata->s_alloc_unit_size =
1411  					le32_to_cpu(mdm->allocUnitSize);
1412  				mdata->s_align_unit_size =
1413  					le16_to_cpu(mdm->alignUnitSize);
1414  				if (mdm->flags & 0x01)
1415  					mdata->s_flags |= MF_DUPLICATE_MD;
1416  
1417  				udf_debug("Metadata Ident suffix=0x%x\n",
1418  					  le16_to_cpu(*(__le16 *)
1419  						      mdm->partIdent.identSuffix));
1420  				udf_debug("Metadata part num=%u\n",
1421  					  le16_to_cpu(mdm->partitionNum));
1422  				udf_debug("Metadata part alloc unit size=%u\n",
1423  					  le32_to_cpu(mdm->allocUnitSize));
1424  				udf_debug("Metadata file loc=%u\n",
1425  					  le32_to_cpu(mdm->metadataFileLoc));
1426  				udf_debug("Mirror file loc=%u\n",
1427  					  le32_to_cpu(mdm->metadataMirrorFileLoc));
1428  				udf_debug("Bitmap file loc=%u\n",
1429  					  le32_to_cpu(mdm->metadataBitmapFileLoc));
1430  				udf_debug("Flags: %d %u\n",
1431  					  mdata->s_flags, mdm->flags);
1432  			} else {
1433  				udf_debug("Unknown ident: %s\n",
1434  					  upm2->partIdent.ident);
1435  				continue;
1436  			}
1437  			map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
1438  			map->s_partition_num = le16_to_cpu(upm2->partitionNum);
1439  		}
1440  		udf_debug("Partition (%d:%u) type %u on volume %u\n",
1441  			  i, map->s_partition_num, type, map->s_volumeseqnum);
1442  	}
1443  
1444  	if (fileset) {
1445  		struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
1446  
1447  		*fileset = lelb_to_cpu(la->extLocation);
1448  		udf_debug("FileSet found in LogicalVolDesc at block=%u, partition=%u\n",
1449  			  fileset->logicalBlockNum,
1450  			  fileset->partitionReferenceNum);
1451  	}
1452  	if (lvd->integritySeqExt.extLength)
1453  		udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
1454  	ret = 0;
1455  out_bh:
1456  	brelse(bh);
1457  	return ret;
1458  }
1459  
1460  /*
1461   * Find the prevailing Logical Volume Integrity Descriptor.
1462   */
1463  static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
1464  {
1465  	struct buffer_head *bh, *final_bh;
1466  	uint16_t ident;
1467  	struct udf_sb_info *sbi = UDF_SB(sb);
1468  	struct logicalVolIntegrityDesc *lvid;
1469  	int indirections = 0;
1470  
1471  	while (++indirections <= UDF_MAX_LVID_NESTING) {
1472  		final_bh = NULL;
1473  		while (loc.extLength > 0 &&
1474  			(bh = udf_read_tagged(sb, loc.extLocation,
1475  					loc.extLocation, &ident))) {
1476  			if (ident != TAG_IDENT_LVID) {
1477  				brelse(bh);
1478  				break;
1479  			}
1480  
1481  			brelse(final_bh);
1482  			final_bh = bh;
1483  
1484  			loc.extLength -= sb->s_blocksize;
1485  			loc.extLocation++;
1486  		}
1487  
1488  		if (!final_bh)
1489  			return;
1490  
1491  		brelse(sbi->s_lvid_bh);
1492  		sbi->s_lvid_bh = final_bh;
1493  
1494  		lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
1495  		if (lvid->nextIntegrityExt.extLength == 0)
1496  			return;
1497  
1498  		loc = leea_to_cpu(lvid->nextIntegrityExt);
1499  	}
1500  
1501  	udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
1502  		UDF_MAX_LVID_NESTING);
1503  	brelse(sbi->s_lvid_bh);
1504  	sbi->s_lvid_bh = NULL;
1505  }
1506  
1507  /*
1508   * Step for reallocation of table of partition descriptor sequence numbers.
1509   * Must be power of 2.
1510   */
1511  #define PART_DESC_ALLOC_STEP 32
1512  
1513  struct part_desc_seq_scan_data {
1514  	struct udf_vds_record rec;
1515  	u32 partnum;
1516  };
1517  
1518  struct desc_seq_scan_data {
1519  	struct udf_vds_record vds[VDS_POS_LENGTH];
1520  	unsigned int size_part_descs;
1521  	unsigned int num_part_descs;
1522  	struct part_desc_seq_scan_data *part_descs_loc;
1523  };
1524  
1525  static struct udf_vds_record *handle_partition_descriptor(
1526  				struct buffer_head *bh,
1527  				struct desc_seq_scan_data *data)
1528  {
1529  	struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
1530  	int partnum;
1531  	int i;
1532  
1533  	partnum = le16_to_cpu(desc->partitionNumber);
1534  	for (i = 0; i < data->num_part_descs; i++)
1535  		if (partnum == data->part_descs_loc[i].partnum)
1536  			return &(data->part_descs_loc[i].rec);
1537  	if (data->num_part_descs >= data->size_part_descs) {
1538  		struct part_desc_seq_scan_data *new_loc;
1539  		unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
1540  
1541  		new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
1542  		if (!new_loc)
1543  			return ERR_PTR(-ENOMEM);
1544  		memcpy(new_loc, data->part_descs_loc,
1545  		       data->size_part_descs * sizeof(*new_loc));
1546  		kfree(data->part_descs_loc);
1547  		data->part_descs_loc = new_loc;
1548  		data->size_part_descs = new_size;
1549  	}
1550  	return &(data->part_descs_loc[data->num_part_descs++].rec);
1551  }
1552  
1553  
1554  static struct udf_vds_record *get_volume_descriptor_record(uint16_t ident,
1555  		struct buffer_head *bh, struct desc_seq_scan_data *data)
1556  {
1557  	switch (ident) {
1558  	case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1559  		return &(data->vds[VDS_POS_PRIMARY_VOL_DESC]);
1560  	case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1561  		return &(data->vds[VDS_POS_IMP_USE_VOL_DESC]);
1562  	case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1563  		return &(data->vds[VDS_POS_LOGICAL_VOL_DESC]);
1564  	case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1565  		return &(data->vds[VDS_POS_UNALLOC_SPACE_DESC]);
1566  	case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1567  		return handle_partition_descriptor(bh, data);
1568  	}
1569  	return NULL;
1570  }
1571  
1572  /*
1573   * Process a main/reserve volume descriptor sequence.
1574   *   @block		First block of first extent of the sequence.
1575   *   @lastblock		Lastblock of first extent of the sequence.
1576   *   @fileset		There we store extent containing root fileset
1577   *
1578   * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
1579   * sequence
1580   */
1581  static noinline int udf_process_sequence(
1582  		struct super_block *sb,
1583  		sector_t block, sector_t lastblock,
1584  		struct kernel_lb_addr *fileset)
1585  {
1586  	struct buffer_head *bh = NULL;
1587  	struct udf_vds_record *curr;
1588  	struct generic_desc *gd;
1589  	struct volDescPtr *vdp;
1590  	bool done = false;
1591  	uint32_t vdsn;
1592  	uint16_t ident;
1593  	int ret;
1594  	unsigned int indirections = 0;
1595  	struct desc_seq_scan_data data;
1596  	unsigned int i;
1597  
1598  	memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
1599  	data.size_part_descs = PART_DESC_ALLOC_STEP;
1600  	data.num_part_descs = 0;
1601  	data.part_descs_loc = kcalloc(data.size_part_descs,
1602  				      sizeof(*data.part_descs_loc),
1603  				      GFP_KERNEL);
1604  	if (!data.part_descs_loc)
1605  		return -ENOMEM;
1606  
1607  	/*
1608  	 * Read the main descriptor sequence and find which descriptors
1609  	 * are in it.
1610  	 */
1611  	for (; (!done && block <= lastblock); block++) {
1612  		bh = udf_read_tagged(sb, block, block, &ident);
1613  		if (!bh)
1614  			break;
1615  
1616  		/* Process each descriptor (ISO 13346 3/8.3-8.4) */
1617  		gd = (struct generic_desc *)bh->b_data;
1618  		vdsn = le32_to_cpu(gd->volDescSeqNum);
1619  		switch (ident) {
1620  		case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
1621  			if (++indirections > UDF_MAX_TD_NESTING) {
1622  				udf_err(sb, "too many Volume Descriptor "
1623  					"Pointers (max %u supported)\n",
1624  					UDF_MAX_TD_NESTING);
1625  				brelse(bh);
1626  				return -EIO;
1627  			}
1628  
1629  			vdp = (struct volDescPtr *)bh->b_data;
1630  			block = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
1631  			lastblock = le32_to_cpu(
1632  				vdp->nextVolDescSeqExt.extLength) >>
1633  				sb->s_blocksize_bits;
1634  			lastblock += block - 1;
1635  			/* For loop is going to increment 'block' again */
1636  			block--;
1637  			break;
1638  		case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1639  		case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1640  		case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1641  		case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1642  		case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1643  			curr = get_volume_descriptor_record(ident, bh, &data);
1644  			if (IS_ERR(curr)) {
1645  				brelse(bh);
1646  				return PTR_ERR(curr);
1647  			}
1648  			/* Descriptor we don't care about? */
1649  			if (!curr)
1650  				break;
1651  			if (vdsn >= curr->volDescSeqNum) {
1652  				curr->volDescSeqNum = vdsn;
1653  				curr->block = block;
1654  			}
1655  			break;
1656  		case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
1657  			done = true;
1658  			break;
1659  		}
1660  		brelse(bh);
1661  	}
1662  	/*
1663  	 * Now read interesting descriptors again and process them
1664  	 * in a suitable order
1665  	 */
1666  	if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) {
1667  		udf_err(sb, "Primary Volume Descriptor not found!\n");
1668  		return -EAGAIN;
1669  	}
1670  	ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block);
1671  	if (ret < 0)
1672  		return ret;
1673  
1674  	if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) {
1675  		ret = udf_load_logicalvol(sb,
1676  				data.vds[VDS_POS_LOGICAL_VOL_DESC].block,
1677  				fileset);
1678  		if (ret < 0)
1679  			return ret;
1680  	}
1681  
1682  	/* Now handle prevailing Partition Descriptors */
1683  	for (i = 0; i < data.num_part_descs; i++) {
1684  		ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
1685  		if (ret < 0)
1686  			return ret;
1687  	}
1688  
1689  	return 0;
1690  }
1691  
1692  /*
1693   * Load Volume Descriptor Sequence described by anchor in bh
1694   *
1695   * Returns <0 on error, 0 on success
1696   */
1697  static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
1698  			     struct kernel_lb_addr *fileset)
1699  {
1700  	struct anchorVolDescPtr *anchor;
1701  	sector_t main_s, main_e, reserve_s, reserve_e;
1702  	int ret;
1703  
1704  	anchor = (struct anchorVolDescPtr *)bh->b_data;
1705  
1706  	/* Locate the main sequence */
1707  	main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
1708  	main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
1709  	main_e = main_e >> sb->s_blocksize_bits;
1710  	main_e += main_s - 1;
1711  
1712  	/* Locate the reserve sequence */
1713  	reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
1714  	reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
1715  	reserve_e = reserve_e >> sb->s_blocksize_bits;
1716  	reserve_e += reserve_s - 1;
1717  
1718  	/* Process the main & reserve sequences */
1719  	/* responsible for finding the PartitionDesc(s) */
1720  	ret = udf_process_sequence(sb, main_s, main_e, fileset);
1721  	if (ret != -EAGAIN)
1722  		return ret;
1723  	udf_sb_free_partitions(sb);
1724  	ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
1725  	if (ret < 0) {
1726  		udf_sb_free_partitions(sb);
1727  		/* No sequence was OK, return -EIO */
1728  		if (ret == -EAGAIN)
1729  			ret = -EIO;
1730  	}
1731  	return ret;
1732  }
1733  
1734  /*
1735   * Check whether there is an anchor block in the given block and
1736   * load Volume Descriptor Sequence if so.
1737   *
1738   * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
1739   * block
1740   */
1741  static int udf_check_anchor_block(struct super_block *sb, sector_t block,
1742  				  struct kernel_lb_addr *fileset)
1743  {
1744  	struct buffer_head *bh;
1745  	uint16_t ident;
1746  	int ret;
1747  
1748  	if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
1749  	    udf_fixed_to_variable(block) >=
1750  	    i_size_read(sb->s_bdev->bd_inode) >> sb->s_blocksize_bits)
1751  		return -EAGAIN;
1752  
1753  	bh = udf_read_tagged(sb, block, block, &ident);
1754  	if (!bh)
1755  		return -EAGAIN;
1756  	if (ident != TAG_IDENT_AVDP) {
1757  		brelse(bh);
1758  		return -EAGAIN;
1759  	}
1760  	ret = udf_load_sequence(sb, bh, fileset);
1761  	brelse(bh);
1762  	return ret;
1763  }
1764  
1765  /*
1766   * Search for an anchor volume descriptor pointer.
1767   *
1768   * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
1769   * of anchors.
1770   */
1771  static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock,
1772  			    struct kernel_lb_addr *fileset)
1773  {
1774  	sector_t last[6];
1775  	int i;
1776  	struct udf_sb_info *sbi = UDF_SB(sb);
1777  	int last_count = 0;
1778  	int ret;
1779  
1780  	/* First try user provided anchor */
1781  	if (sbi->s_anchor) {
1782  		ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
1783  		if (ret != -EAGAIN)
1784  			return ret;
1785  	}
1786  	/*
1787  	 * according to spec, anchor is in either:
1788  	 *     block 256
1789  	 *     lastblock-256
1790  	 *     lastblock
1791  	 *  however, if the disc isn't closed, it could be 512.
1792  	 */
1793  	ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
1794  	if (ret != -EAGAIN)
1795  		return ret;
1796  	/*
1797  	 * The trouble is which block is the last one. Drives often misreport
1798  	 * this so we try various possibilities.
1799  	 */
1800  	last[last_count++] = *lastblock;
1801  	if (*lastblock >= 1)
1802  		last[last_count++] = *lastblock - 1;
1803  	last[last_count++] = *lastblock + 1;
1804  	if (*lastblock >= 2)
1805  		last[last_count++] = *lastblock - 2;
1806  	if (*lastblock >= 150)
1807  		last[last_count++] = *lastblock - 150;
1808  	if (*lastblock >= 152)
1809  		last[last_count++] = *lastblock - 152;
1810  
1811  	for (i = 0; i < last_count; i++) {
1812  		if (last[i] >= i_size_read(sb->s_bdev->bd_inode) >>
1813  				sb->s_blocksize_bits)
1814  			continue;
1815  		ret = udf_check_anchor_block(sb, last[i], fileset);
1816  		if (ret != -EAGAIN) {
1817  			if (!ret)
1818  				*lastblock = last[i];
1819  			return ret;
1820  		}
1821  		if (last[i] < 256)
1822  			continue;
1823  		ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
1824  		if (ret != -EAGAIN) {
1825  			if (!ret)
1826  				*lastblock = last[i];
1827  			return ret;
1828  		}
1829  	}
1830  
1831  	/* Finally try block 512 in case media is open */
1832  	return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
1833  }
1834  
1835  /*
1836   * Find an anchor volume descriptor and load Volume Descriptor Sequence from
1837   * area specified by it. The function expects sbi->s_lastblock to be the last
1838   * block on the media.
1839   *
1840   * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor
1841   * was not found.
1842   */
1843  static int udf_find_anchor(struct super_block *sb,
1844  			   struct kernel_lb_addr *fileset)
1845  {
1846  	struct udf_sb_info *sbi = UDF_SB(sb);
1847  	sector_t lastblock = sbi->s_last_block;
1848  	int ret;
1849  
1850  	ret = udf_scan_anchors(sb, &lastblock, fileset);
1851  	if (ret != -EAGAIN)
1852  		goto out;
1853  
1854  	/* No anchor found? Try VARCONV conversion of block numbers */
1855  	UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
1856  	lastblock = udf_variable_to_fixed(sbi->s_last_block);
1857  	/* Firstly, we try to not convert number of the last block */
1858  	ret = udf_scan_anchors(sb, &lastblock, fileset);
1859  	if (ret != -EAGAIN)
1860  		goto out;
1861  
1862  	lastblock = sbi->s_last_block;
1863  	/* Secondly, we try with converted number of the last block */
1864  	ret = udf_scan_anchors(sb, &lastblock, fileset);
1865  	if (ret < 0) {
1866  		/* VARCONV didn't help. Clear it. */
1867  		UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
1868  	}
1869  out:
1870  	if (ret == 0)
1871  		sbi->s_last_block = lastblock;
1872  	return ret;
1873  }
1874  
1875  /*
1876   * Check Volume Structure Descriptor, find Anchor block and load Volume
1877   * Descriptor Sequence.
1878   *
1879   * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
1880   * block was not found.
1881   */
1882  static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
1883  			int silent, struct kernel_lb_addr *fileset)
1884  {
1885  	struct udf_sb_info *sbi = UDF_SB(sb);
1886  	loff_t nsr_off;
1887  	int ret;
1888  
1889  	if (!sb_set_blocksize(sb, uopt->blocksize)) {
1890  		if (!silent)
1891  			udf_warn(sb, "Bad block size\n");
1892  		return -EINVAL;
1893  	}
1894  	sbi->s_last_block = uopt->lastblock;
1895  	if (!uopt->novrs) {
1896  		/* Check that it is NSR02 compliant */
1897  		nsr_off = udf_check_vsd(sb);
1898  		if (!nsr_off) {
1899  			if (!silent)
1900  				udf_warn(sb, "No VRS found\n");
1901  			return -EINVAL;
1902  		}
1903  		if (nsr_off == -1)
1904  			udf_debug("Failed to read sector at offset %d. "
1905  				  "Assuming open disc. Skipping validity "
1906  				  "check\n", VSD_FIRST_SECTOR_OFFSET);
1907  		if (!sbi->s_last_block)
1908  			sbi->s_last_block = udf_get_last_block(sb);
1909  	} else {
1910  		udf_debug("Validity check skipped because of novrs option\n");
1911  	}
1912  
1913  	/* Look for anchor block and load Volume Descriptor Sequence */
1914  	sbi->s_anchor = uopt->anchor;
1915  	ret = udf_find_anchor(sb, fileset);
1916  	if (ret < 0) {
1917  		if (!silent && ret == -EAGAIN)
1918  			udf_warn(sb, "No anchor found\n");
1919  		return ret;
1920  	}
1921  	return 0;
1922  }
1923  
1924  static void udf_open_lvid(struct super_block *sb)
1925  {
1926  	struct udf_sb_info *sbi = UDF_SB(sb);
1927  	struct buffer_head *bh = sbi->s_lvid_bh;
1928  	struct logicalVolIntegrityDesc *lvid;
1929  	struct logicalVolIntegrityDescImpUse *lvidiu;
1930  	struct timespec64 ts;
1931  
1932  	if (!bh)
1933  		return;
1934  	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1935  	lvidiu = udf_sb_lvidiu(sb);
1936  	if (!lvidiu)
1937  		return;
1938  
1939  	mutex_lock(&sbi->s_alloc_mutex);
1940  	lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1941  	lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1942  	ktime_get_real_ts64(&ts);
1943  	udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
1944  	if (le32_to_cpu(lvid->integrityType) == LVID_INTEGRITY_TYPE_CLOSE)
1945  		lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
1946  	else
1947  		UDF_SET_FLAG(sb, UDF_FLAG_INCONSISTENT);
1948  
1949  	lvid->descTag.descCRC = cpu_to_le16(
1950  		crc_itu_t(0, (char *)lvid + sizeof(struct tag),
1951  			le16_to_cpu(lvid->descTag.descCRCLength)));
1952  
1953  	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
1954  	mark_buffer_dirty(bh);
1955  	sbi->s_lvid_dirty = 0;
1956  	mutex_unlock(&sbi->s_alloc_mutex);
1957  	/* Make opening of filesystem visible on the media immediately */
1958  	sync_dirty_buffer(bh);
1959  }
1960  
1961  static void udf_close_lvid(struct super_block *sb)
1962  {
1963  	struct udf_sb_info *sbi = UDF_SB(sb);
1964  	struct buffer_head *bh = sbi->s_lvid_bh;
1965  	struct logicalVolIntegrityDesc *lvid;
1966  	struct logicalVolIntegrityDescImpUse *lvidiu;
1967  	struct timespec64 ts;
1968  
1969  	if (!bh)
1970  		return;
1971  	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1972  	lvidiu = udf_sb_lvidiu(sb);
1973  	if (!lvidiu)
1974  		return;
1975  
1976  	mutex_lock(&sbi->s_alloc_mutex);
1977  	lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1978  	lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1979  	ktime_get_real_ts64(&ts);
1980  	udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
1981  	if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
1982  		lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
1983  	if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
1984  		lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
1985  	if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
1986  		lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
1987  	if (!UDF_QUERY_FLAG(sb, UDF_FLAG_INCONSISTENT))
1988  		lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
1989  
1990  	lvid->descTag.descCRC = cpu_to_le16(
1991  			crc_itu_t(0, (char *)lvid + sizeof(struct tag),
1992  				le16_to_cpu(lvid->descTag.descCRCLength)));
1993  
1994  	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
1995  	/*
1996  	 * We set buffer uptodate unconditionally here to avoid spurious
1997  	 * warnings from mark_buffer_dirty() when previous EIO has marked
1998  	 * the buffer as !uptodate
1999  	 */
2000  	set_buffer_uptodate(bh);
2001  	mark_buffer_dirty(bh);
2002  	sbi->s_lvid_dirty = 0;
2003  	mutex_unlock(&sbi->s_alloc_mutex);
2004  	/* Make closing of filesystem visible on the media immediately */
2005  	sync_dirty_buffer(bh);
2006  }
2007  
2008  u64 lvid_get_unique_id(struct super_block *sb)
2009  {
2010  	struct buffer_head *bh;
2011  	struct udf_sb_info *sbi = UDF_SB(sb);
2012  	struct logicalVolIntegrityDesc *lvid;
2013  	struct logicalVolHeaderDesc *lvhd;
2014  	u64 uniqueID;
2015  	u64 ret;
2016  
2017  	bh = sbi->s_lvid_bh;
2018  	if (!bh)
2019  		return 0;
2020  
2021  	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2022  	lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
2023  
2024  	mutex_lock(&sbi->s_alloc_mutex);
2025  	ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
2026  	if (!(++uniqueID & 0xFFFFFFFF))
2027  		uniqueID += 16;
2028  	lvhd->uniqueID = cpu_to_le64(uniqueID);
2029  	mutex_unlock(&sbi->s_alloc_mutex);
2030  	mark_buffer_dirty(bh);
2031  
2032  	return ret;
2033  }
2034  
2035  static int udf_fill_super(struct super_block *sb, void *options, int silent)
2036  {
2037  	int ret = -EINVAL;
2038  	struct inode *inode = NULL;
2039  	struct udf_options uopt;
2040  	struct kernel_lb_addr rootdir, fileset;
2041  	struct udf_sb_info *sbi;
2042  	bool lvid_open = false;
2043  
2044  	uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
2045  	/* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
2046  	uopt.uid = make_kuid(current_user_ns(), overflowuid);
2047  	uopt.gid = make_kgid(current_user_ns(), overflowgid);
2048  	uopt.umask = 0;
2049  	uopt.fmode = UDF_INVALID_MODE;
2050  	uopt.dmode = UDF_INVALID_MODE;
2051  	uopt.nls_map = NULL;
2052  
2053  	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
2054  	if (!sbi)
2055  		return -ENOMEM;
2056  
2057  	sb->s_fs_info = sbi;
2058  
2059  	mutex_init(&sbi->s_alloc_mutex);
2060  
2061  	if (!udf_parse_options((char *)options, &uopt, false))
2062  		goto parse_options_failure;
2063  
2064  	if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
2065  	    uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
2066  		udf_err(sb, "utf8 cannot be combined with iocharset\n");
2067  		goto parse_options_failure;
2068  	}
2069  	if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
2070  		uopt.nls_map = load_nls_default();
2071  		if (!uopt.nls_map)
2072  			uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
2073  		else
2074  			udf_debug("Using default NLS map\n");
2075  	}
2076  	if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
2077  		uopt.flags |= (1 << UDF_FLAG_UTF8);
2078  
2079  	fileset.logicalBlockNum = 0xFFFFFFFF;
2080  	fileset.partitionReferenceNum = 0xFFFF;
2081  
2082  	sbi->s_flags = uopt.flags;
2083  	sbi->s_uid = uopt.uid;
2084  	sbi->s_gid = uopt.gid;
2085  	sbi->s_umask = uopt.umask;
2086  	sbi->s_fmode = uopt.fmode;
2087  	sbi->s_dmode = uopt.dmode;
2088  	sbi->s_nls_map = uopt.nls_map;
2089  	rwlock_init(&sbi->s_cred_lock);
2090  
2091  	if (uopt.session == 0xFFFFFFFF)
2092  		sbi->s_session = udf_get_last_session(sb);
2093  	else
2094  		sbi->s_session = uopt.session;
2095  
2096  	udf_debug("Multi-session=%d\n", sbi->s_session);
2097  
2098  	/* Fill in the rest of the superblock */
2099  	sb->s_op = &udf_sb_ops;
2100  	sb->s_export_op = &udf_export_ops;
2101  
2102  	sb->s_magic = UDF_SUPER_MAGIC;
2103  	sb->s_time_gran = 1000;
2104  
2105  	if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
2106  		ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2107  	} else {
2108  		uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
2109  		while (uopt.blocksize <= 4096) {
2110  			ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2111  			if (ret < 0) {
2112  				if (!silent && ret != -EACCES) {
2113  					pr_notice("Scanning with blocksize %u failed\n",
2114  						  uopt.blocksize);
2115  				}
2116  				brelse(sbi->s_lvid_bh);
2117  				sbi->s_lvid_bh = NULL;
2118  				/*
2119  				 * EACCES is special - we want to propagate to
2120  				 * upper layers that we cannot handle RW mount.
2121  				 */
2122  				if (ret == -EACCES)
2123  					break;
2124  			} else
2125  				break;
2126  
2127  			uopt.blocksize <<= 1;
2128  		}
2129  	}
2130  	if (ret < 0) {
2131  		if (ret == -EAGAIN) {
2132  			udf_warn(sb, "No partition found (1)\n");
2133  			ret = -EINVAL;
2134  		}
2135  		goto error_out;
2136  	}
2137  
2138  	udf_debug("Lastblock=%u\n", sbi->s_last_block);
2139  
2140  	if (sbi->s_lvid_bh) {
2141  		struct logicalVolIntegrityDescImpUse *lvidiu =
2142  							udf_sb_lvidiu(sb);
2143  		uint16_t minUDFReadRev;
2144  		uint16_t minUDFWriteRev;
2145  
2146  		if (!lvidiu) {
2147  			ret = -EINVAL;
2148  			goto error_out;
2149  		}
2150  		minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
2151  		minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
2152  		if (minUDFReadRev > UDF_MAX_READ_VERSION) {
2153  			udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2154  				minUDFReadRev,
2155  				UDF_MAX_READ_VERSION);
2156  			ret = -EINVAL;
2157  			goto error_out;
2158  		} else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION &&
2159  			   !sb_rdonly(sb)) {
2160  			ret = -EACCES;
2161  			goto error_out;
2162  		}
2163  
2164  		sbi->s_udfrev = minUDFWriteRev;
2165  
2166  		if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
2167  			UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
2168  		if (minUDFReadRev >= UDF_VERS_USE_STREAMS)
2169  			UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
2170  	}
2171  
2172  	if (!sbi->s_partitions) {
2173  		udf_warn(sb, "No partition found (2)\n");
2174  		ret = -EINVAL;
2175  		goto error_out;
2176  	}
2177  
2178  	if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
2179  			UDF_PART_FLAG_READ_ONLY &&
2180  	    !sb_rdonly(sb)) {
2181  		ret = -EACCES;
2182  		goto error_out;
2183  	}
2184  
2185  	if (udf_find_fileset(sb, &fileset, &rootdir)) {
2186  		udf_warn(sb, "No fileset found\n");
2187  		ret = -EINVAL;
2188  		goto error_out;
2189  	}
2190  
2191  	if (!silent) {
2192  		struct timestamp ts;
2193  		udf_time_to_disk_stamp(&ts, sbi->s_record_time);
2194  		udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
2195  			 sbi->s_volume_ident,
2196  			 le16_to_cpu(ts.year), ts.month, ts.day,
2197  			 ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
2198  	}
2199  	if (!sb_rdonly(sb)) {
2200  		udf_open_lvid(sb);
2201  		lvid_open = true;
2202  	}
2203  
2204  	/* Assign the root inode */
2205  	/* assign inodes by physical block number */
2206  	/* perhaps it's not extensible enough, but for now ... */
2207  	inode = udf_iget(sb, &rootdir);
2208  	if (IS_ERR(inode)) {
2209  		udf_err(sb, "Error in udf_iget, block=%u, partition=%u\n",
2210  		       rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
2211  		ret = PTR_ERR(inode);
2212  		goto error_out;
2213  	}
2214  
2215  	/* Allocate a dentry for the root inode */
2216  	sb->s_root = d_make_root(inode);
2217  	if (!sb->s_root) {
2218  		udf_err(sb, "Couldn't allocate root dentry\n");
2219  		ret = -ENOMEM;
2220  		goto error_out;
2221  	}
2222  	sb->s_maxbytes = MAX_LFS_FILESIZE;
2223  	sb->s_max_links = UDF_MAX_LINKS;
2224  	return 0;
2225  
2226  error_out:
2227  	iput(sbi->s_vat_inode);
2228  parse_options_failure:
2229  	if (uopt.nls_map)
2230  		unload_nls(uopt.nls_map);
2231  	if (lvid_open)
2232  		udf_close_lvid(sb);
2233  	brelse(sbi->s_lvid_bh);
2234  	udf_sb_free_partitions(sb);
2235  	kfree(sbi);
2236  	sb->s_fs_info = NULL;
2237  
2238  	return ret;
2239  }
2240  
2241  void _udf_err(struct super_block *sb, const char *function,
2242  	      const char *fmt, ...)
2243  {
2244  	struct va_format vaf;
2245  	va_list args;
2246  
2247  	va_start(args, fmt);
2248  
2249  	vaf.fmt = fmt;
2250  	vaf.va = &args;
2251  
2252  	pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf);
2253  
2254  	va_end(args);
2255  }
2256  
2257  void _udf_warn(struct super_block *sb, const char *function,
2258  	       const char *fmt, ...)
2259  {
2260  	struct va_format vaf;
2261  	va_list args;
2262  
2263  	va_start(args, fmt);
2264  
2265  	vaf.fmt = fmt;
2266  	vaf.va = &args;
2267  
2268  	pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf);
2269  
2270  	va_end(args);
2271  }
2272  
2273  static void udf_put_super(struct super_block *sb)
2274  {
2275  	struct udf_sb_info *sbi;
2276  
2277  	sbi = UDF_SB(sb);
2278  
2279  	iput(sbi->s_vat_inode);
2280  	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
2281  		unload_nls(sbi->s_nls_map);
2282  	if (!sb_rdonly(sb))
2283  		udf_close_lvid(sb);
2284  	brelse(sbi->s_lvid_bh);
2285  	udf_sb_free_partitions(sb);
2286  	mutex_destroy(&sbi->s_alloc_mutex);
2287  	kfree(sb->s_fs_info);
2288  	sb->s_fs_info = NULL;
2289  }
2290  
2291  static int udf_sync_fs(struct super_block *sb, int wait)
2292  {
2293  	struct udf_sb_info *sbi = UDF_SB(sb);
2294  
2295  	mutex_lock(&sbi->s_alloc_mutex);
2296  	if (sbi->s_lvid_dirty) {
2297  		/*
2298  		 * Blockdevice will be synced later so we don't have to submit
2299  		 * the buffer for IO
2300  		 */
2301  		mark_buffer_dirty(sbi->s_lvid_bh);
2302  		sbi->s_lvid_dirty = 0;
2303  	}
2304  	mutex_unlock(&sbi->s_alloc_mutex);
2305  
2306  	return 0;
2307  }
2308  
2309  static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2310  {
2311  	struct super_block *sb = dentry->d_sb;
2312  	struct udf_sb_info *sbi = UDF_SB(sb);
2313  	struct logicalVolIntegrityDescImpUse *lvidiu;
2314  	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2315  
2316  	lvidiu = udf_sb_lvidiu(sb);
2317  	buf->f_type = UDF_SUPER_MAGIC;
2318  	buf->f_bsize = sb->s_blocksize;
2319  	buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
2320  	buf->f_bfree = udf_count_free(sb);
2321  	buf->f_bavail = buf->f_bfree;
2322  	buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
2323  					  le32_to_cpu(lvidiu->numDirs)) : 0)
2324  			+ buf->f_bfree;
2325  	buf->f_ffree = buf->f_bfree;
2326  	buf->f_namelen = UDF_NAME_LEN;
2327  	buf->f_fsid.val[0] = (u32)id;
2328  	buf->f_fsid.val[1] = (u32)(id >> 32);
2329  
2330  	return 0;
2331  }
2332  
2333  static unsigned int udf_count_free_bitmap(struct super_block *sb,
2334  					  struct udf_bitmap *bitmap)
2335  {
2336  	struct buffer_head *bh = NULL;
2337  	unsigned int accum = 0;
2338  	int index;
2339  	udf_pblk_t block = 0, newblock;
2340  	struct kernel_lb_addr loc;
2341  	uint32_t bytes;
2342  	uint8_t *ptr;
2343  	uint16_t ident;
2344  	struct spaceBitmapDesc *bm;
2345  
2346  	loc.logicalBlockNum = bitmap->s_extPosition;
2347  	loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
2348  	bh = udf_read_ptagged(sb, &loc, 0, &ident);
2349  
2350  	if (!bh) {
2351  		udf_err(sb, "udf_count_free failed\n");
2352  		goto out;
2353  	} else if (ident != TAG_IDENT_SBD) {
2354  		brelse(bh);
2355  		udf_err(sb, "udf_count_free failed\n");
2356  		goto out;
2357  	}
2358  
2359  	bm = (struct spaceBitmapDesc *)bh->b_data;
2360  	bytes = le32_to_cpu(bm->numOfBytes);
2361  	index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
2362  	ptr = (uint8_t *)bh->b_data;
2363  
2364  	while (bytes > 0) {
2365  		u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
2366  		accum += bitmap_weight((const unsigned long *)(ptr + index),
2367  					cur_bytes * 8);
2368  		bytes -= cur_bytes;
2369  		if (bytes) {
2370  			brelse(bh);
2371  			newblock = udf_get_lb_pblock(sb, &loc, ++block);
2372  			bh = udf_tread(sb, newblock);
2373  			if (!bh) {
2374  				udf_debug("read failed\n");
2375  				goto out;
2376  			}
2377  			index = 0;
2378  			ptr = (uint8_t *)bh->b_data;
2379  		}
2380  	}
2381  	brelse(bh);
2382  out:
2383  	return accum;
2384  }
2385  
2386  static unsigned int udf_count_free_table(struct super_block *sb,
2387  					 struct inode *table)
2388  {
2389  	unsigned int accum = 0;
2390  	uint32_t elen;
2391  	struct kernel_lb_addr eloc;
2392  	int8_t etype;
2393  	struct extent_position epos;
2394  
2395  	mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
2396  	epos.block = UDF_I(table)->i_location;
2397  	epos.offset = sizeof(struct unallocSpaceEntry);
2398  	epos.bh = NULL;
2399  
2400  	while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
2401  		accum += (elen >> table->i_sb->s_blocksize_bits);
2402  
2403  	brelse(epos.bh);
2404  	mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
2405  
2406  	return accum;
2407  }
2408  
2409  static unsigned int udf_count_free(struct super_block *sb)
2410  {
2411  	unsigned int accum = 0;
2412  	struct udf_sb_info *sbi;
2413  	struct udf_part_map *map;
2414  
2415  	sbi = UDF_SB(sb);
2416  	if (sbi->s_lvid_bh) {
2417  		struct logicalVolIntegrityDesc *lvid =
2418  			(struct logicalVolIntegrityDesc *)
2419  			sbi->s_lvid_bh->b_data;
2420  		if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) {
2421  			accum = le32_to_cpu(
2422  					lvid->freeSpaceTable[sbi->s_partition]);
2423  			if (accum == 0xFFFFFFFF)
2424  				accum = 0;
2425  		}
2426  	}
2427  
2428  	if (accum)
2429  		return accum;
2430  
2431  	map = &sbi->s_partmaps[sbi->s_partition];
2432  	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
2433  		accum += udf_count_free_bitmap(sb,
2434  					       map->s_uspace.s_bitmap);
2435  	}
2436  	if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
2437  		accum += udf_count_free_bitmap(sb,
2438  					       map->s_fspace.s_bitmap);
2439  	}
2440  	if (accum)
2441  		return accum;
2442  
2443  	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
2444  		accum += udf_count_free_table(sb,
2445  					      map->s_uspace.s_table);
2446  	}
2447  	if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
2448  		accum += udf_count_free_table(sb,
2449  					      map->s_fspace.s_table);
2450  	}
2451  
2452  	return accum;
2453  }
2454  
2455  MODULE_AUTHOR("Ben Fennema");
2456  MODULE_DESCRIPTION("Universal Disk Format Filesystem");
2457  MODULE_LICENSE("GPL");
2458  module_init(init_udf_fs)
2459  module_exit(exit_udf_fs)
2460