1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2008, Christoph Hellwig
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_inode.h"
13 #include "xfs_da_format.h"
14 #include "xfs_da_btree.h"
15 #include "xfs_attr.h"
16 #include "xfs_trace.h"
17 #include "xfs_error.h"
18 #include "xfs_acl.h"
19 #include "xfs_trans.h"
20 #include "xfs_xattr.h"
21
22 #include <linux/posix_acl_xattr.h>
23
24 /*
25 * Locking scheme:
26 * - all ACL updates are protected by inode->i_mutex, which is taken before
27 * calling into this file.
28 */
29
30 STATIC struct posix_acl *
xfs_acl_from_disk(struct xfs_mount * mp,const struct xfs_acl * aclp,int len,int max_entries)31 xfs_acl_from_disk(
32 struct xfs_mount *mp,
33 const struct xfs_acl *aclp,
34 int len,
35 int max_entries)
36 {
37 struct posix_acl_entry *acl_e;
38 struct posix_acl *acl;
39 const struct xfs_acl_entry *ace;
40 unsigned int count, i;
41
42 if (len < sizeof(*aclp)) {
43 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
44 len);
45 return ERR_PTR(-EFSCORRUPTED);
46 }
47
48 count = be32_to_cpu(aclp->acl_cnt);
49 if (count > max_entries || XFS_ACL_SIZE(count) != len) {
50 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
51 len);
52 return ERR_PTR(-EFSCORRUPTED);
53 }
54
55 acl = posix_acl_alloc(count, GFP_KERNEL);
56 if (!acl)
57 return ERR_PTR(-ENOMEM);
58
59 for (i = 0; i < count; i++) {
60 acl_e = &acl->a_entries[i];
61 ace = &aclp->acl_entry[i];
62
63 /*
64 * The tag is 32 bits on disk and 16 bits in core.
65 *
66 * Because every access to it goes through the core
67 * format first this is not a problem.
68 */
69 acl_e->e_tag = be32_to_cpu(ace->ae_tag);
70 acl_e->e_perm = be16_to_cpu(ace->ae_perm);
71
72 switch (acl_e->e_tag) {
73 case ACL_USER:
74 acl_e->e_uid = make_kuid(&init_user_ns,
75 be32_to_cpu(ace->ae_id));
76 break;
77 case ACL_GROUP:
78 acl_e->e_gid = make_kgid(&init_user_ns,
79 be32_to_cpu(ace->ae_id));
80 break;
81 case ACL_USER_OBJ:
82 case ACL_GROUP_OBJ:
83 case ACL_MASK:
84 case ACL_OTHER:
85 break;
86 default:
87 goto fail;
88 }
89 }
90 return acl;
91
92 fail:
93 posix_acl_release(acl);
94 return ERR_PTR(-EINVAL);
95 }
96
97 STATIC void
xfs_acl_to_disk(struct xfs_acl * aclp,const struct posix_acl * acl)98 xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
99 {
100 const struct posix_acl_entry *acl_e;
101 struct xfs_acl_entry *ace;
102 int i;
103
104 aclp->acl_cnt = cpu_to_be32(acl->a_count);
105 for (i = 0; i < acl->a_count; i++) {
106 ace = &aclp->acl_entry[i];
107 acl_e = &acl->a_entries[i];
108
109 ace->ae_tag = cpu_to_be32(acl_e->e_tag);
110 switch (acl_e->e_tag) {
111 case ACL_USER:
112 ace->ae_id = cpu_to_be32(
113 from_kuid(&init_user_ns, acl_e->e_uid));
114 break;
115 case ACL_GROUP:
116 ace->ae_id = cpu_to_be32(
117 from_kgid(&init_user_ns, acl_e->e_gid));
118 break;
119 default:
120 ace->ae_id = cpu_to_be32(ACL_UNDEFINED_ID);
121 break;
122 }
123
124 ace->ae_perm = cpu_to_be16(acl_e->e_perm);
125 }
126 }
127
128 struct posix_acl *
xfs_get_acl(struct inode * inode,int type,bool rcu)129 xfs_get_acl(struct inode *inode, int type, bool rcu)
130 {
131 struct xfs_inode *ip = XFS_I(inode);
132 struct xfs_mount *mp = ip->i_mount;
133 struct posix_acl *acl = NULL;
134 struct xfs_da_args args = {
135 .dp = ip,
136 .attr_filter = XFS_ATTR_ROOT,
137 .valuelen = XFS_ACL_MAX_SIZE(mp),
138 };
139 int error;
140
141 if (rcu)
142 return ERR_PTR(-ECHILD);
143
144 trace_xfs_get_acl(ip);
145
146 switch (type) {
147 case ACL_TYPE_ACCESS:
148 args.name = SGI_ACL_FILE;
149 break;
150 case ACL_TYPE_DEFAULT:
151 args.name = SGI_ACL_DEFAULT;
152 break;
153 default:
154 BUG();
155 }
156 args.namelen = strlen(args.name);
157
158 /*
159 * If the attribute doesn't exist make sure we have a negative cache
160 * entry, for any other error assume it is transient.
161 */
162 error = xfs_attr_get(&args);
163 if (!error) {
164 acl = xfs_acl_from_disk(mp, args.value, args.valuelen,
165 XFS_ACL_MAX_ENTRIES(mp));
166 } else if (error != -ENOATTR) {
167 acl = ERR_PTR(error);
168 }
169
170 kmem_free(args.value);
171 return acl;
172 }
173
174 int
__xfs_set_acl(struct inode * inode,struct posix_acl * acl,int type)175 __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
176 {
177 struct xfs_inode *ip = XFS_I(inode);
178 struct xfs_da_args args = {
179 .dp = ip,
180 .attr_filter = XFS_ATTR_ROOT,
181 };
182 int error;
183
184 switch (type) {
185 case ACL_TYPE_ACCESS:
186 args.name = SGI_ACL_FILE;
187 break;
188 case ACL_TYPE_DEFAULT:
189 if (!S_ISDIR(inode->i_mode))
190 return acl ? -EACCES : 0;
191 args.name = SGI_ACL_DEFAULT;
192 break;
193 default:
194 return -EINVAL;
195 }
196 args.namelen = strlen(args.name);
197
198 if (acl) {
199 args.valuelen = XFS_ACL_SIZE(acl->a_count);
200 args.value = kvzalloc(args.valuelen, GFP_KERNEL);
201 if (!args.value)
202 return -ENOMEM;
203 xfs_acl_to_disk(args.value, acl);
204 }
205
206 error = xfs_attr_change(&args);
207 kmem_free(args.value);
208
209 /*
210 * If the attribute didn't exist to start with that's fine.
211 */
212 if (!acl && error == -ENOATTR)
213 error = 0;
214 if (!error)
215 set_cached_acl(inode, type, acl);
216 return error;
217 }
218
219 static int
xfs_acl_set_mode(struct inode * inode,umode_t mode)220 xfs_acl_set_mode(
221 struct inode *inode,
222 umode_t mode)
223 {
224 struct xfs_inode *ip = XFS_I(inode);
225 struct xfs_mount *mp = ip->i_mount;
226 struct xfs_trans *tp;
227 int error;
228
229 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
230 if (error)
231 return error;
232
233 xfs_ilock(ip, XFS_ILOCK_EXCL);
234 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
235 inode->i_mode = mode;
236 inode_set_ctime_current(inode);
237 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
238
239 if (xfs_has_wsync(mp))
240 xfs_trans_set_sync(tp);
241 return xfs_trans_commit(tp);
242 }
243
244 int
xfs_set_acl(struct mnt_idmap * idmap,struct dentry * dentry,struct posix_acl * acl,int type)245 xfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
246 struct posix_acl *acl, int type)
247 {
248 umode_t mode;
249 bool set_mode = false;
250 int error = 0;
251 struct inode *inode = d_inode(dentry);
252
253 if (!acl)
254 goto set_acl;
255
256 error = -E2BIG;
257 if (acl->a_count > XFS_ACL_MAX_ENTRIES(XFS_M(inode->i_sb)))
258 return error;
259
260 if (type == ACL_TYPE_ACCESS) {
261 error = posix_acl_update_mode(idmap, inode, &mode, &acl);
262 if (error)
263 return error;
264 set_mode = true;
265 }
266
267 set_acl:
268 /*
269 * We set the mode after successfully updating the ACL xattr because the
270 * xattr update can fail at ENOSPC and we don't want to change the mode
271 * if the ACL update hasn't been applied.
272 */
273 error = __xfs_set_acl(inode, acl, type);
274 if (!error && set_mode && mode != inode->i_mode)
275 error = xfs_acl_set_mode(inode, mode);
276 return error;
277 }
278
279 /*
280 * Invalidate any cached ACLs if the user has bypassed the ACL interface.
281 * We don't validate the content whatsoever so it is caller responsibility to
282 * provide data in valid format and ensure i_mode is consistent.
283 */
284 void
xfs_forget_acl(struct inode * inode,const char * name)285 xfs_forget_acl(
286 struct inode *inode,
287 const char *name)
288 {
289 if (!strcmp(name, SGI_ACL_FILE))
290 forget_cached_acl(inode, ACL_TYPE_ACCESS);
291 else if (!strcmp(name, SGI_ACL_DEFAULT))
292 forget_cached_acl(inode, ACL_TYPE_DEFAULT);
293 }
294