xref: /openbmc/linux/security/landlock/fs.c (revision 100f59d9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Landlock LSM - Filesystem management and hooks
4  *
5  * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
6  * Copyright © 2018-2020 ANSSI
7  */
8 
9 #include <linux/atomic.h>
10 #include <linux/bitops.h>
11 #include <linux/bits.h>
12 #include <linux/compiler_types.h>
13 #include <linux/dcache.h>
14 #include <linux/err.h>
15 #include <linux/fs.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/limits.h>
19 #include <linux/list.h>
20 #include <linux/lsm_hooks.h>
21 #include <linux/mount.h>
22 #include <linux/namei.h>
23 #include <linux/path.h>
24 #include <linux/rcupdate.h>
25 #include <linux/spinlock.h>
26 #include <linux/stat.h>
27 #include <linux/types.h>
28 #include <linux/wait_bit.h>
29 #include <linux/workqueue.h>
30 #include <uapi/linux/landlock.h>
31 
32 #include "common.h"
33 #include "cred.h"
34 #include "fs.h"
35 #include "limits.h"
36 #include "object.h"
37 #include "ruleset.h"
38 #include "setup.h"
39 
40 /* Underlying object management */
41 
42 static void release_inode(struct landlock_object *const object)
43 	__releases(object->lock)
44 {
45 	struct inode *const inode = object->underobj;
46 	struct super_block *sb;
47 
48 	if (!inode) {
49 		spin_unlock(&object->lock);
50 		return;
51 	}
52 
53 	/*
54 	 * Protects against concurrent use by hook_sb_delete() of the reference
55 	 * to the underlying inode.
56 	 */
57 	object->underobj = NULL;
58 	/*
59 	 * Makes sure that if the filesystem is concurrently unmounted,
60 	 * hook_sb_delete() will wait for us to finish iput().
61 	 */
62 	sb = inode->i_sb;
63 	atomic_long_inc(&landlock_superblock(sb)->inode_refs);
64 	spin_unlock(&object->lock);
65 	/*
66 	 * Because object->underobj was not NULL, hook_sb_delete() and
67 	 * get_inode_object() guarantee that it is safe to reset
68 	 * landlock_inode(inode)->object while it is not NULL.  It is therefore
69 	 * not necessary to lock inode->i_lock.
70 	 */
71 	rcu_assign_pointer(landlock_inode(inode)->object, NULL);
72 	/*
73 	 * Now, new rules can safely be tied to @inode with get_inode_object().
74 	 */
75 
76 	iput(inode);
77 	if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
78 		wake_up_var(&landlock_superblock(sb)->inode_refs);
79 }
80 
81 static const struct landlock_object_underops landlock_fs_underops = {
82 	.release = release_inode
83 };
84 
85 /* Ruleset management */
86 
87 static struct landlock_object *get_inode_object(struct inode *const inode)
88 {
89 	struct landlock_object *object, *new_object;
90 	struct landlock_inode_security *inode_sec = landlock_inode(inode);
91 
92 	rcu_read_lock();
93 retry:
94 	object = rcu_dereference(inode_sec->object);
95 	if (object) {
96 		if (likely(refcount_inc_not_zero(&object->usage))) {
97 			rcu_read_unlock();
98 			return object;
99 		}
100 		/*
101 		 * We are racing with release_inode(), the object is going
102 		 * away.  Wait for release_inode(), then retry.
103 		 */
104 		spin_lock(&object->lock);
105 		spin_unlock(&object->lock);
106 		goto retry;
107 	}
108 	rcu_read_unlock();
109 
110 	/*
111 	 * If there is no object tied to @inode, then create a new one (without
112 	 * holding any locks).
113 	 */
114 	new_object = landlock_create_object(&landlock_fs_underops, inode);
115 	if (IS_ERR(new_object))
116 		return new_object;
117 
118 	/*
119 	 * Protects against concurrent calls to get_inode_object() or
120 	 * hook_sb_delete().
121 	 */
122 	spin_lock(&inode->i_lock);
123 	if (unlikely(rcu_access_pointer(inode_sec->object))) {
124 		/* Someone else just created the object, bail out and retry. */
125 		spin_unlock(&inode->i_lock);
126 		kfree(new_object);
127 
128 		rcu_read_lock();
129 		goto retry;
130 	}
131 
132 	/*
133 	 * @inode will be released by hook_sb_delete() on its superblock
134 	 * shutdown, or by release_inode() when no more ruleset references the
135 	 * related object.
136 	 */
137 	ihold(inode);
138 	rcu_assign_pointer(inode_sec->object, new_object);
139 	spin_unlock(&inode->i_lock);
140 	return new_object;
141 }
142 
143 /* All access rights that can be tied to files. */
144 /* clang-format off */
145 #define ACCESS_FILE ( \
146 	LANDLOCK_ACCESS_FS_EXECUTE | \
147 	LANDLOCK_ACCESS_FS_WRITE_FILE | \
148 	LANDLOCK_ACCESS_FS_READ_FILE)
149 /* clang-format on */
150 
151 /*
152  * @path: Should have been checked by get_path_from_fd().
153  */
154 int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
155 			    const struct path *const path,
156 			    access_mask_t access_rights)
157 {
158 	int err;
159 	struct landlock_object *object;
160 
161 	/* Files only get access rights that make sense. */
162 	if (!d_is_dir(path->dentry) &&
163 	    (access_rights | ACCESS_FILE) != ACCESS_FILE)
164 		return -EINVAL;
165 	if (WARN_ON_ONCE(ruleset->num_layers != 1))
166 		return -EINVAL;
167 
168 	/* Transforms relative access rights to absolute ones. */
169 	access_rights |= LANDLOCK_MASK_ACCESS_FS & ~ruleset->fs_access_masks[0];
170 	object = get_inode_object(d_backing_inode(path->dentry));
171 	if (IS_ERR(object))
172 		return PTR_ERR(object);
173 	mutex_lock(&ruleset->lock);
174 	err = landlock_insert_rule(ruleset, object, access_rights);
175 	mutex_unlock(&ruleset->lock);
176 	/*
177 	 * No need to check for an error because landlock_insert_rule()
178 	 * increments the refcount for the new object if needed.
179 	 */
180 	landlock_put_object(object);
181 	return err;
182 }
183 
184 /* Access-control management */
185 
186 /*
187  * The lifetime of the returned rule is tied to @domain.
188  *
189  * Returns NULL if no rule is found or if @dentry is negative.
190  */
191 static inline const struct landlock_rule *
192 find_rule(const struct landlock_ruleset *const domain,
193 	  const struct dentry *const dentry)
194 {
195 	const struct landlock_rule *rule;
196 	const struct inode *inode;
197 
198 	/* Ignores nonexistent leafs. */
199 	if (d_is_negative(dentry))
200 		return NULL;
201 
202 	inode = d_backing_inode(dentry);
203 	rcu_read_lock();
204 	rule = landlock_find_rule(
205 		domain, rcu_dereference(landlock_inode(inode)->object));
206 	rcu_read_unlock();
207 	return rule;
208 }
209 
210 /*
211  * @layer_masks is read and may be updated according to the access request and
212  * the matching rule.
213  *
214  * Returns true if the request is allowed (i.e. relevant layer masks for the
215  * request are empty).
216  */
217 static inline bool
218 unmask_layers(const struct landlock_rule *const rule,
219 	      const access_mask_t access_request,
220 	      layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
221 {
222 	size_t layer_level;
223 
224 	if (!access_request || !layer_masks)
225 		return true;
226 	if (!rule)
227 		return false;
228 
229 	/*
230 	 * An access is granted if, for each policy layer, at least one rule
231 	 * encountered on the pathwalk grants the requested access,
232 	 * regardless of its position in the layer stack.  We must then check
233 	 * the remaining layers for each inode, from the first added layer to
234 	 * the last one.  When there is multiple requested accesses, for each
235 	 * policy layer, the full set of requested accesses may not be granted
236 	 * by only one rule, but by the union (binary OR) of multiple rules.
237 	 * E.g. /a/b <execute> + /a <read> => /a/b <execute + read>
238 	 */
239 	for (layer_level = 0; layer_level < rule->num_layers; layer_level++) {
240 		const struct landlock_layer *const layer =
241 			&rule->layers[layer_level];
242 		const layer_mask_t layer_bit = BIT_ULL(layer->level - 1);
243 		const unsigned long access_req = access_request;
244 		unsigned long access_bit;
245 		bool is_empty;
246 
247 		/*
248 		 * Records in @layer_masks which layer grants access to each
249 		 * requested access.
250 		 */
251 		is_empty = true;
252 		for_each_set_bit(access_bit, &access_req,
253 				 ARRAY_SIZE(*layer_masks)) {
254 			if (layer->access & BIT_ULL(access_bit))
255 				(*layer_masks)[access_bit] &= ~layer_bit;
256 			is_empty = is_empty && !(*layer_masks)[access_bit];
257 		}
258 		if (is_empty)
259 			return true;
260 	}
261 	return false;
262 }
263 
264 /*
265  * Allows access to pseudo filesystems that will never be mountable (e.g.
266  * sockfs, pipefs), but can still be reachable through
267  * /proc/<pid>/fd/<file-descriptor>
268  */
269 static inline bool is_nouser_or_private(const struct dentry *dentry)
270 {
271 	return (dentry->d_sb->s_flags & SB_NOUSER) ||
272 	       (d_is_positive(dentry) &&
273 		unlikely(IS_PRIVATE(d_backing_inode(dentry))));
274 }
275 
276 static int check_access_path(const struct landlock_ruleset *const domain,
277 			     const struct path *const path,
278 			     const access_mask_t access_request)
279 {
280 	layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
281 	bool allowed = false, has_access = false;
282 	struct path walker_path;
283 	size_t i;
284 
285 	if (!access_request)
286 		return 0;
287 	if (WARN_ON_ONCE(!domain || !path))
288 		return 0;
289 	if (is_nouser_or_private(path->dentry))
290 		return 0;
291 	if (WARN_ON_ONCE(domain->num_layers < 1))
292 		return -EACCES;
293 
294 	/* Saves all layers handling a subset of requested accesses. */
295 	for (i = 0; i < domain->num_layers; i++) {
296 		const unsigned long access_req = access_request;
297 		unsigned long access_bit;
298 
299 		for_each_set_bit(access_bit, &access_req,
300 				 ARRAY_SIZE(layer_masks)) {
301 			if (domain->fs_access_masks[i] & BIT_ULL(access_bit)) {
302 				layer_masks[access_bit] |= BIT_ULL(i);
303 				has_access = true;
304 			}
305 		}
306 	}
307 	/* An access request not handled by the domain is allowed. */
308 	if (!has_access)
309 		return 0;
310 
311 	walker_path = *path;
312 	path_get(&walker_path);
313 	/*
314 	 * We need to walk through all the hierarchy to not miss any relevant
315 	 * restriction.
316 	 */
317 	while (true) {
318 		struct dentry *parent_dentry;
319 
320 		allowed = unmask_layers(find_rule(domain, walker_path.dentry),
321 					access_request, &layer_masks);
322 		if (allowed)
323 			/* Stops when a rule from each layer grants access. */
324 			break;
325 
326 jump_up:
327 		if (walker_path.dentry == walker_path.mnt->mnt_root) {
328 			if (follow_up(&walker_path)) {
329 				/* Ignores hidden mount points. */
330 				goto jump_up;
331 			} else {
332 				/*
333 				 * Stops at the real root.  Denies access
334 				 * because not all layers have granted access.
335 				 */
336 				allowed = false;
337 				break;
338 			}
339 		}
340 		if (unlikely(IS_ROOT(walker_path.dentry))) {
341 			/*
342 			 * Stops at disconnected root directories.  Only allows
343 			 * access to internal filesystems (e.g. nsfs, which is
344 			 * reachable through /proc/<pid>/ns/<namespace>).
345 			 */
346 			allowed = !!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
347 			break;
348 		}
349 		parent_dentry = dget_parent(walker_path.dentry);
350 		dput(walker_path.dentry);
351 		walker_path.dentry = parent_dentry;
352 	}
353 	path_put(&walker_path);
354 	return allowed ? 0 : -EACCES;
355 }
356 
357 static inline int current_check_access_path(const struct path *const path,
358 					    const access_mask_t access_request)
359 {
360 	const struct landlock_ruleset *const dom =
361 		landlock_get_current_domain();
362 
363 	if (!dom)
364 		return 0;
365 	return check_access_path(dom, path, access_request);
366 }
367 
368 static inline access_mask_t get_mode_access(const umode_t mode)
369 {
370 	switch (mode & S_IFMT) {
371 	case S_IFLNK:
372 		return LANDLOCK_ACCESS_FS_MAKE_SYM;
373 	case 0:
374 		/* A zero mode translates to S_IFREG. */
375 	case S_IFREG:
376 		return LANDLOCK_ACCESS_FS_MAKE_REG;
377 	case S_IFDIR:
378 		return LANDLOCK_ACCESS_FS_MAKE_DIR;
379 	case S_IFCHR:
380 		return LANDLOCK_ACCESS_FS_MAKE_CHAR;
381 	case S_IFBLK:
382 		return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
383 	case S_IFIFO:
384 		return LANDLOCK_ACCESS_FS_MAKE_FIFO;
385 	case S_IFSOCK:
386 		return LANDLOCK_ACCESS_FS_MAKE_SOCK;
387 	default:
388 		WARN_ON_ONCE(1);
389 		return 0;
390 	}
391 }
392 
393 static inline access_mask_t maybe_remove(const struct dentry *const dentry)
394 {
395 	if (d_is_negative(dentry))
396 		return 0;
397 	return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
398 				  LANDLOCK_ACCESS_FS_REMOVE_FILE;
399 }
400 
401 /* Inode hooks */
402 
403 static void hook_inode_free_security(struct inode *const inode)
404 {
405 	/*
406 	 * All inodes must already have been untied from their object by
407 	 * release_inode() or hook_sb_delete().
408 	 */
409 	WARN_ON_ONCE(landlock_inode(inode)->object);
410 }
411 
412 /* Super-block hooks */
413 
414 /*
415  * Release the inodes used in a security policy.
416  *
417  * Cf. fsnotify_unmount_inodes() and invalidate_inodes()
418  */
419 static void hook_sb_delete(struct super_block *const sb)
420 {
421 	struct inode *inode, *prev_inode = NULL;
422 
423 	if (!landlock_initialized)
424 		return;
425 
426 	spin_lock(&sb->s_inode_list_lock);
427 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
428 		struct landlock_object *object;
429 
430 		/* Only handles referenced inodes. */
431 		if (!atomic_read(&inode->i_count))
432 			continue;
433 
434 		/*
435 		 * Protects against concurrent modification of inode (e.g.
436 		 * from get_inode_object()).
437 		 */
438 		spin_lock(&inode->i_lock);
439 		/*
440 		 * Checks I_FREEING and I_WILL_FREE  to protect against a race
441 		 * condition when release_inode() just called iput(), which
442 		 * could lead to a NULL dereference of inode->security or a
443 		 * second call to iput() for the same Landlock object.  Also
444 		 * checks I_NEW because such inode cannot be tied to an object.
445 		 */
446 		if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
447 			spin_unlock(&inode->i_lock);
448 			continue;
449 		}
450 
451 		rcu_read_lock();
452 		object = rcu_dereference(landlock_inode(inode)->object);
453 		if (!object) {
454 			rcu_read_unlock();
455 			spin_unlock(&inode->i_lock);
456 			continue;
457 		}
458 		/* Keeps a reference to this inode until the next loop walk. */
459 		__iget(inode);
460 		spin_unlock(&inode->i_lock);
461 
462 		/*
463 		 * If there is no concurrent release_inode() ongoing, then we
464 		 * are in charge of calling iput() on this inode, otherwise we
465 		 * will just wait for it to finish.
466 		 */
467 		spin_lock(&object->lock);
468 		if (object->underobj == inode) {
469 			object->underobj = NULL;
470 			spin_unlock(&object->lock);
471 			rcu_read_unlock();
472 
473 			/*
474 			 * Because object->underobj was not NULL,
475 			 * release_inode() and get_inode_object() guarantee
476 			 * that it is safe to reset
477 			 * landlock_inode(inode)->object while it is not NULL.
478 			 * It is therefore not necessary to lock inode->i_lock.
479 			 */
480 			rcu_assign_pointer(landlock_inode(inode)->object, NULL);
481 			/*
482 			 * At this point, we own the ihold() reference that was
483 			 * originally set up by get_inode_object() and the
484 			 * __iget() reference that we just set in this loop
485 			 * walk.  Therefore the following call to iput() will
486 			 * not sleep nor drop the inode because there is now at
487 			 * least two references to it.
488 			 */
489 			iput(inode);
490 		} else {
491 			spin_unlock(&object->lock);
492 			rcu_read_unlock();
493 		}
494 
495 		if (prev_inode) {
496 			/*
497 			 * At this point, we still own the __iget() reference
498 			 * that we just set in this loop walk.  Therefore we
499 			 * can drop the list lock and know that the inode won't
500 			 * disappear from under us until the next loop walk.
501 			 */
502 			spin_unlock(&sb->s_inode_list_lock);
503 			/*
504 			 * We can now actually put the inode reference from the
505 			 * previous loop walk, which is not needed anymore.
506 			 */
507 			iput(prev_inode);
508 			cond_resched();
509 			spin_lock(&sb->s_inode_list_lock);
510 		}
511 		prev_inode = inode;
512 	}
513 	spin_unlock(&sb->s_inode_list_lock);
514 
515 	/* Puts the inode reference from the last loop walk, if any. */
516 	if (prev_inode)
517 		iput(prev_inode);
518 	/* Waits for pending iput() in release_inode(). */
519 	wait_var_event(&landlock_superblock(sb)->inode_refs,
520 		       !atomic_long_read(&landlock_superblock(sb)->inode_refs));
521 }
522 
523 /*
524  * Because a Landlock security policy is defined according to the filesystem
525  * topology (i.e. the mount namespace), changing it may grant access to files
526  * not previously allowed.
527  *
528  * To make it simple, deny any filesystem topology modification by landlocked
529  * processes.  Non-landlocked processes may still change the namespace of a
530  * landlocked process, but this kind of threat must be handled by a system-wide
531  * access-control security policy.
532  *
533  * This could be lifted in the future if Landlock can safely handle mount
534  * namespace updates requested by a landlocked process.  Indeed, we could
535  * update the current domain (which is currently read-only) by taking into
536  * account the accesses of the source and the destination of a new mount point.
537  * However, it would also require to make all the child domains dynamically
538  * inherit these new constraints.  Anyway, for backward compatibility reasons,
539  * a dedicated user space option would be required (e.g. as a ruleset flag).
540  */
541 static int hook_sb_mount(const char *const dev_name,
542 			 const struct path *const path, const char *const type,
543 			 const unsigned long flags, void *const data)
544 {
545 	if (!landlock_get_current_domain())
546 		return 0;
547 	return -EPERM;
548 }
549 
550 static int hook_move_mount(const struct path *const from_path,
551 			   const struct path *const to_path)
552 {
553 	if (!landlock_get_current_domain())
554 		return 0;
555 	return -EPERM;
556 }
557 
558 /*
559  * Removing a mount point may reveal a previously hidden file hierarchy, which
560  * may then grant access to files, which may have previously been forbidden.
561  */
562 static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
563 {
564 	if (!landlock_get_current_domain())
565 		return 0;
566 	return -EPERM;
567 }
568 
569 static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
570 {
571 	if (!landlock_get_current_domain())
572 		return 0;
573 	return -EPERM;
574 }
575 
576 /*
577  * pivot_root(2), like mount(2), changes the current mount namespace.  It must
578  * then be forbidden for a landlocked process.
579  *
580  * However, chroot(2) may be allowed because it only changes the relative root
581  * directory of the current process.  Moreover, it can be used to restrict the
582  * view of the filesystem.
583  */
584 static int hook_sb_pivotroot(const struct path *const old_path,
585 			     const struct path *const new_path)
586 {
587 	if (!landlock_get_current_domain())
588 		return 0;
589 	return -EPERM;
590 }
591 
592 /* Path hooks */
593 
594 /*
595  * Creating multiple links or renaming may lead to privilege escalations if not
596  * handled properly.  Indeed, we must be sure that the source doesn't gain more
597  * privileges by being accessible from the destination.  This is getting more
598  * complex when dealing with multiple layers.  The whole picture can be seen as
599  * a multilayer partial ordering problem.  A future version of Landlock will
600  * deal with that.
601  */
602 static int hook_path_link(struct dentry *const old_dentry,
603 			  const struct path *const new_dir,
604 			  struct dentry *const new_dentry)
605 {
606 	const struct landlock_ruleset *const dom =
607 		landlock_get_current_domain();
608 
609 	if (!dom)
610 		return 0;
611 	/* The mount points are the same for old and new paths, cf. EXDEV. */
612 	if (old_dentry->d_parent != new_dir->dentry)
613 		/* Gracefully forbids reparenting. */
614 		return -EXDEV;
615 	if (unlikely(d_is_negative(old_dentry)))
616 		return -ENOENT;
617 	return check_access_path(
618 		dom, new_dir,
619 		get_mode_access(d_backing_inode(old_dentry)->i_mode));
620 }
621 
622 static int hook_path_rename(const struct path *const old_dir,
623 			    struct dentry *const old_dentry,
624 			    const struct path *const new_dir,
625 			    struct dentry *const new_dentry,
626 			    const unsigned int flags)
627 {
628 	const struct landlock_ruleset *const dom =
629 		landlock_get_current_domain();
630 	u32 exchange_access = 0;
631 
632 	if (!dom)
633 		return 0;
634 	/* The mount points are the same for old and new paths, cf. EXDEV. */
635 	if (old_dir->dentry != new_dir->dentry)
636 		/* Gracefully forbids reparenting. */
637 		return -EXDEV;
638 	if (flags & RENAME_EXCHANGE) {
639 		if (unlikely(d_is_negative(new_dentry)))
640 			return -ENOENT;
641 		exchange_access =
642 			get_mode_access(d_backing_inode(new_dentry)->i_mode);
643 	}
644 	if (unlikely(d_is_negative(old_dentry)))
645 		return -ENOENT;
646 	/* RENAME_EXCHANGE is handled because directories are the same. */
647 	return check_access_path(
648 		dom, old_dir,
649 		maybe_remove(old_dentry) | maybe_remove(new_dentry) |
650 			exchange_access |
651 			get_mode_access(d_backing_inode(old_dentry)->i_mode));
652 }
653 
654 static int hook_path_mkdir(const struct path *const dir,
655 			   struct dentry *const dentry, const umode_t mode)
656 {
657 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
658 }
659 
660 static int hook_path_mknod(const struct path *const dir,
661 			   struct dentry *const dentry, const umode_t mode,
662 			   const unsigned int dev)
663 {
664 	const struct landlock_ruleset *const dom =
665 		landlock_get_current_domain();
666 
667 	if (!dom)
668 		return 0;
669 	return check_access_path(dom, dir, get_mode_access(mode));
670 }
671 
672 static int hook_path_symlink(const struct path *const dir,
673 			     struct dentry *const dentry,
674 			     const char *const old_name)
675 {
676 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
677 }
678 
679 static int hook_path_unlink(const struct path *const dir,
680 			    struct dentry *const dentry)
681 {
682 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
683 }
684 
685 static int hook_path_rmdir(const struct path *const dir,
686 			   struct dentry *const dentry)
687 {
688 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
689 }
690 
691 /* File hooks */
692 
693 static inline access_mask_t get_file_access(const struct file *const file)
694 {
695 	access_mask_t access = 0;
696 
697 	if (file->f_mode & FMODE_READ) {
698 		/* A directory can only be opened in read mode. */
699 		if (S_ISDIR(file_inode(file)->i_mode))
700 			return LANDLOCK_ACCESS_FS_READ_DIR;
701 		access = LANDLOCK_ACCESS_FS_READ_FILE;
702 	}
703 	if (file->f_mode & FMODE_WRITE)
704 		access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
705 	/* __FMODE_EXEC is indeed part of f_flags, not f_mode. */
706 	if (file->f_flags & __FMODE_EXEC)
707 		access |= LANDLOCK_ACCESS_FS_EXECUTE;
708 	return access;
709 }
710 
711 static int hook_file_open(struct file *const file)
712 {
713 	const struct landlock_ruleset *const dom =
714 		landlock_get_current_domain();
715 
716 	if (!dom)
717 		return 0;
718 	/*
719 	 * Because a file may be opened with O_PATH, get_file_access() may
720 	 * return 0.  This case will be handled with a future Landlock
721 	 * evolution.
722 	 */
723 	return check_access_path(dom, &file->f_path, get_file_access(file));
724 }
725 
726 static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = {
727 	LSM_HOOK_INIT(inode_free_security, hook_inode_free_security),
728 
729 	LSM_HOOK_INIT(sb_delete, hook_sb_delete),
730 	LSM_HOOK_INIT(sb_mount, hook_sb_mount),
731 	LSM_HOOK_INIT(move_mount, hook_move_mount),
732 	LSM_HOOK_INIT(sb_umount, hook_sb_umount),
733 	LSM_HOOK_INIT(sb_remount, hook_sb_remount),
734 	LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
735 
736 	LSM_HOOK_INIT(path_link, hook_path_link),
737 	LSM_HOOK_INIT(path_rename, hook_path_rename),
738 	LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
739 	LSM_HOOK_INIT(path_mknod, hook_path_mknod),
740 	LSM_HOOK_INIT(path_symlink, hook_path_symlink),
741 	LSM_HOOK_INIT(path_unlink, hook_path_unlink),
742 	LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
743 
744 	LSM_HOOK_INIT(file_open, hook_file_open),
745 };
746 
747 __init void landlock_add_fs_hooks(void)
748 {
749 	security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
750 			   LANDLOCK_NAME);
751 }
752