xref: /openbmc/linux/fs/udf/ialloc.c (revision a1e58bbd)
1 /*
2  * ialloc.c
3  *
4  * PURPOSE
5  *	Inode allocation handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *	This file is distributed under the terms of the GNU General Public
9  *	License (GPL). Copies of the GPL can be obtained from:
10  *		ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *	Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1998-2001 Ben Fennema
14  *
15  * HISTORY
16  *
17  *  02/24/99 blf  Created.
18  *
19  */
20 
21 #include "udfdecl.h"
22 #include <linux/fs.h>
23 #include <linux/quotaops.h>
24 #include <linux/udf_fs.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 
28 #include "udf_i.h"
29 #include "udf_sb.h"
30 
31 void udf_free_inode(struct inode *inode)
32 {
33 	struct super_block *sb = inode->i_sb;
34 	struct udf_sb_info *sbi = UDF_SB(sb);
35 
36 	/*
37 	 * Note: we must free any quota before locking the superblock,
38 	 * as writing the quota to disk may need the lock as well.
39 	 */
40 	DQUOT_FREE_INODE(inode);
41 	DQUOT_DROP(inode);
42 
43 	clear_inode(inode);
44 
45 	mutex_lock(&sbi->s_alloc_mutex);
46 	if (sbi->s_lvid_bh) {
47 		struct logicalVolIntegrityDescImpUse *lvidiu =
48 							udf_sb_lvidiu(sbi);
49 		if (S_ISDIR(inode->i_mode))
50 			lvidiu->numDirs =
51 				cpu_to_le32(le32_to_cpu(lvidiu->numDirs) - 1);
52 		else
53 			lvidiu->numFiles =
54 				cpu_to_le32(le32_to_cpu(lvidiu->numFiles) - 1);
55 
56 		mark_buffer_dirty(sbi->s_lvid_bh);
57 	}
58 	mutex_unlock(&sbi->s_alloc_mutex);
59 
60 	udf_free_blocks(sb, NULL, UDF_I(inode)->i_location, 0, 1);
61 }
62 
63 struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
64 {
65 	struct super_block *sb = dir->i_sb;
66 	struct udf_sb_info *sbi = UDF_SB(sb);
67 	struct inode *inode;
68 	int block;
69 	uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
70 	struct udf_inode_info *iinfo;
71 	struct udf_inode_info *dinfo = UDF_I(dir);
72 
73 	inode = new_inode(sb);
74 
75 	if (!inode) {
76 		*err = -ENOMEM;
77 		return NULL;
78 	}
79 	*err = -ENOSPC;
80 
81 	iinfo = UDF_I(inode);
82 	iinfo->i_unique = 0;
83 	iinfo->i_lenExtents = 0;
84 	iinfo->i_next_alloc_block = 0;
85 	iinfo->i_next_alloc_goal = 0;
86 	iinfo->i_strat4096 = 0;
87 
88 	block = udf_new_block(dir->i_sb, NULL,
89 			      dinfo->i_location.partitionReferenceNum,
90 			      start, err);
91 	if (*err) {
92 		iput(inode);
93 		return NULL;
94 	}
95 
96 	mutex_lock(&sbi->s_alloc_mutex);
97 	if (sbi->s_lvid_bh) {
98 		struct logicalVolIntegrityDesc *lvid =
99 			(struct logicalVolIntegrityDesc *)
100 			sbi->s_lvid_bh->b_data;
101 		struct logicalVolIntegrityDescImpUse *lvidiu =
102 							udf_sb_lvidiu(sbi);
103 		struct logicalVolHeaderDesc *lvhd;
104 		uint64_t uniqueID;
105 		lvhd = (struct logicalVolHeaderDesc *)
106 				(lvid->logicalVolContentsUse);
107 		if (S_ISDIR(mode))
108 			lvidiu->numDirs =
109 				cpu_to_le32(le32_to_cpu(lvidiu->numDirs) + 1);
110 		else
111 			lvidiu->numFiles =
112 				cpu_to_le32(le32_to_cpu(lvidiu->numFiles) + 1);
113 		iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID);
114 		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
115 			uniqueID += 16;
116 		lvhd->uniqueID = cpu_to_le64(uniqueID);
117 		mark_buffer_dirty(sbi->s_lvid_bh);
118 	}
119 	inode->i_mode = mode;
120 	inode->i_uid = current->fsuid;
121 	if (dir->i_mode & S_ISGID) {
122 		inode->i_gid = dir->i_gid;
123 		if (S_ISDIR(mode))
124 			mode |= S_ISGID;
125 	} else {
126 		inode->i_gid = current->fsgid;
127 	}
128 
129 	iinfo->i_location.logicalBlockNum = block;
130 	iinfo->i_location.partitionReferenceNum =
131 				dinfo->i_location.partitionReferenceNum;
132 	inode->i_ino = udf_get_lb_pblock(sb, iinfo->i_location, 0);
133 	inode->i_blocks = 0;
134 	iinfo->i_lenEAttr = 0;
135 	iinfo->i_lenAlloc = 0;
136 	iinfo->i_use = 0;
137 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
138 		iinfo->i_efe = 1;
139 		if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev)
140 			sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE;
141 		iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize -
142 					    sizeof(struct extendedFileEntry),
143 					    GFP_KERNEL);
144 	} else {
145 		iinfo->i_efe = 0;
146 		iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize -
147 					    sizeof(struct fileEntry),
148 					    GFP_KERNEL);
149 	}
150 	if (!iinfo->i_ext.i_data) {
151 		iput(inode);
152 		*err = -ENOMEM;
153 		mutex_unlock(&sbi->s_alloc_mutex);
154 		return NULL;
155 	}
156 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
157 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
158 	else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
159 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
160 	else
161 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
162 	inode->i_mtime = inode->i_atime = inode->i_ctime =
163 		iinfo->i_crtime = current_fs_time(inode->i_sb);
164 	insert_inode_hash(inode);
165 	mark_inode_dirty(inode);
166 	mutex_unlock(&sbi->s_alloc_mutex);
167 
168 	if (DQUOT_ALLOC_INODE(inode)) {
169 		DQUOT_DROP(inode);
170 		inode->i_flags |= S_NOQUOTA;
171 		inode->i_nlink = 0;
172 		iput(inode);
173 		*err = -EDQUOT;
174 		return NULL;
175 	}
176 
177 	*err = 0;
178 	return inode;
179 }
180