xref: /openbmc/linux/security/security.c (revision 3098f5eb)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Security plug functions
4  *
5  * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
6  * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
7  * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
8  * Copyright (C) 2016 Mellanox Technologies
9  */
10 
11 #define pr_fmt(fmt) "LSM: " fmt
12 
13 #include <linux/bpf.h>
14 #include <linux/capability.h>
15 #include <linux/dcache.h>
16 #include <linux/export.h>
17 #include <linux/init.h>
18 #include <linux/kernel.h>
19 #include <linux/lsm_hooks.h>
20 #include <linux/integrity.h>
21 #include <linux/ima.h>
22 #include <linux/evm.h>
23 #include <linux/fsnotify.h>
24 #include <linux/mman.h>
25 #include <linux/mount.h>
26 #include <linux/personality.h>
27 #include <linux/backing-dev.h>
28 #include <linux/string.h>
29 #include <linux/msg.h>
30 #include <net/flow.h>
31 
32 #define MAX_LSM_EVM_XATTR	2
33 
34 /* How many LSMs were built into the kernel? */
35 #define LSM_COUNT (__end_lsm_info - __start_lsm_info)
36 #define EARLY_LSM_COUNT (__end_early_lsm_info - __start_early_lsm_info)
37 
38 struct security_hook_heads security_hook_heads __lsm_ro_after_init;
39 static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain);
40 
41 static struct kmem_cache *lsm_file_cache;
42 static struct kmem_cache *lsm_inode_cache;
43 
44 char *lsm_names;
45 static struct lsm_blob_sizes blob_sizes __lsm_ro_after_init;
46 
47 /* Boot-time LSM user choice */
48 static __initdata const char *chosen_lsm_order;
49 static __initdata const char *chosen_major_lsm;
50 
51 static __initconst const char * const builtin_lsm_order = CONFIG_LSM;
52 
53 /* Ordered list of LSMs to initialize. */
54 static __initdata struct lsm_info **ordered_lsms;
55 static __initdata struct lsm_info *exclusive;
56 
57 static __initdata bool debug;
58 #define init_debug(...)						\
59 	do {							\
60 		if (debug)					\
61 			pr_info(__VA_ARGS__);			\
62 	} while (0)
63 
64 static bool __init is_enabled(struct lsm_info *lsm)
65 {
66 	if (!lsm->enabled)
67 		return false;
68 
69 	return *lsm->enabled;
70 }
71 
72 /* Mark an LSM's enabled flag. */
73 static int lsm_enabled_true __initdata = 1;
74 static int lsm_enabled_false __initdata = 0;
75 static void __init set_enabled(struct lsm_info *lsm, bool enabled)
76 {
77 	/*
78 	 * When an LSM hasn't configured an enable variable, we can use
79 	 * a hard-coded location for storing the default enabled state.
80 	 */
81 	if (!lsm->enabled) {
82 		if (enabled)
83 			lsm->enabled = &lsm_enabled_true;
84 		else
85 			lsm->enabled = &lsm_enabled_false;
86 	} else if (lsm->enabled == &lsm_enabled_true) {
87 		if (!enabled)
88 			lsm->enabled = &lsm_enabled_false;
89 	} else if (lsm->enabled == &lsm_enabled_false) {
90 		if (enabled)
91 			lsm->enabled = &lsm_enabled_true;
92 	} else {
93 		*lsm->enabled = enabled;
94 	}
95 }
96 
97 /* Is an LSM already listed in the ordered LSMs list? */
98 static bool __init exists_ordered_lsm(struct lsm_info *lsm)
99 {
100 	struct lsm_info **check;
101 
102 	for (check = ordered_lsms; *check; check++)
103 		if (*check == lsm)
104 			return true;
105 
106 	return false;
107 }
108 
109 /* Append an LSM to the list of ordered LSMs to initialize. */
110 static int last_lsm __initdata;
111 static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from)
112 {
113 	/* Ignore duplicate selections. */
114 	if (exists_ordered_lsm(lsm))
115 		return;
116 
117 	if (WARN(last_lsm == LSM_COUNT, "%s: out of LSM slots!?\n", from))
118 		return;
119 
120 	/* Enable this LSM, if it is not already set. */
121 	if (!lsm->enabled)
122 		lsm->enabled = &lsm_enabled_true;
123 	ordered_lsms[last_lsm++] = lsm;
124 
125 	init_debug("%s ordering: %s (%sabled)\n", from, lsm->name,
126 		   is_enabled(lsm) ? "en" : "dis");
127 }
128 
129 /* Is an LSM allowed to be initialized? */
130 static bool __init lsm_allowed(struct lsm_info *lsm)
131 {
132 	/* Skip if the LSM is disabled. */
133 	if (!is_enabled(lsm))
134 		return false;
135 
136 	/* Not allowed if another exclusive LSM already initialized. */
137 	if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && exclusive) {
138 		init_debug("exclusive disabled: %s\n", lsm->name);
139 		return false;
140 	}
141 
142 	return true;
143 }
144 
145 static void __init lsm_set_blob_size(int *need, int *lbs)
146 {
147 	int offset;
148 
149 	if (*need > 0) {
150 		offset = *lbs;
151 		*lbs += *need;
152 		*need = offset;
153 	}
154 }
155 
156 static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed)
157 {
158 	if (!needed)
159 		return;
160 
161 	lsm_set_blob_size(&needed->lbs_cred, &blob_sizes.lbs_cred);
162 	lsm_set_blob_size(&needed->lbs_file, &blob_sizes.lbs_file);
163 	/*
164 	 * The inode blob gets an rcu_head in addition to
165 	 * what the modules might need.
166 	 */
167 	if (needed->lbs_inode && blob_sizes.lbs_inode == 0)
168 		blob_sizes.lbs_inode = sizeof(struct rcu_head);
169 	lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode);
170 	lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc);
171 	lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg);
172 	lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task);
173 }
174 
175 /* Prepare LSM for initialization. */
176 static void __init prepare_lsm(struct lsm_info *lsm)
177 {
178 	int enabled = lsm_allowed(lsm);
179 
180 	/* Record enablement (to handle any following exclusive LSMs). */
181 	set_enabled(lsm, enabled);
182 
183 	/* If enabled, do pre-initialization work. */
184 	if (enabled) {
185 		if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && !exclusive) {
186 			exclusive = lsm;
187 			init_debug("exclusive chosen: %s\n", lsm->name);
188 		}
189 
190 		lsm_set_blob_sizes(lsm->blobs);
191 	}
192 }
193 
194 /* Initialize a given LSM, if it is enabled. */
195 static void __init initialize_lsm(struct lsm_info *lsm)
196 {
197 	if (is_enabled(lsm)) {
198 		int ret;
199 
200 		init_debug("initializing %s\n", lsm->name);
201 		ret = lsm->init();
202 		WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret);
203 	}
204 }
205 
206 /* Populate ordered LSMs list from comma-separated LSM name list. */
207 static void __init ordered_lsm_parse(const char *order, const char *origin)
208 {
209 	struct lsm_info *lsm;
210 	char *sep, *name, *next;
211 
212 	/* LSM_ORDER_FIRST is always first. */
213 	for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
214 		if (lsm->order == LSM_ORDER_FIRST)
215 			append_ordered_lsm(lsm, "first");
216 	}
217 
218 	/* Process "security=", if given. */
219 	if (chosen_major_lsm) {
220 		struct lsm_info *major;
221 
222 		/*
223 		 * To match the original "security=" behavior, this
224 		 * explicitly does NOT fallback to another Legacy Major
225 		 * if the selected one was separately disabled: disable
226 		 * all non-matching Legacy Major LSMs.
227 		 */
228 		for (major = __start_lsm_info; major < __end_lsm_info;
229 		     major++) {
230 			if ((major->flags & LSM_FLAG_LEGACY_MAJOR) &&
231 			    strcmp(major->name, chosen_major_lsm) != 0) {
232 				set_enabled(major, false);
233 				init_debug("security=%s disabled: %s\n",
234 					   chosen_major_lsm, major->name);
235 			}
236 		}
237 	}
238 
239 	sep = kstrdup(order, GFP_KERNEL);
240 	next = sep;
241 	/* Walk the list, looking for matching LSMs. */
242 	while ((name = strsep(&next, ",")) != NULL) {
243 		bool found = false;
244 
245 		for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
246 			if (lsm->order == LSM_ORDER_MUTABLE &&
247 			    strcmp(lsm->name, name) == 0) {
248 				append_ordered_lsm(lsm, origin);
249 				found = true;
250 			}
251 		}
252 
253 		if (!found)
254 			init_debug("%s ignored: %s\n", origin, name);
255 	}
256 
257 	/* Process "security=", if given. */
258 	if (chosen_major_lsm) {
259 		for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
260 			if (exists_ordered_lsm(lsm))
261 				continue;
262 			if (strcmp(lsm->name, chosen_major_lsm) == 0)
263 				append_ordered_lsm(lsm, "security=");
264 		}
265 	}
266 
267 	/* Disable all LSMs not in the ordered list. */
268 	for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
269 		if (exists_ordered_lsm(lsm))
270 			continue;
271 		set_enabled(lsm, false);
272 		init_debug("%s disabled: %s\n", origin, lsm->name);
273 	}
274 
275 	kfree(sep);
276 }
277 
278 static void __init lsm_early_cred(struct cred *cred);
279 static void __init lsm_early_task(struct task_struct *task);
280 
281 static int lsm_append(const char *new, char **result);
282 
283 static void __init ordered_lsm_init(void)
284 {
285 	struct lsm_info **lsm;
286 
287 	ordered_lsms = kcalloc(LSM_COUNT + 1, sizeof(*ordered_lsms),
288 				GFP_KERNEL);
289 
290 	if (chosen_lsm_order) {
291 		if (chosen_major_lsm) {
292 			pr_info("security= is ignored because it is superseded by lsm=\n");
293 			chosen_major_lsm = NULL;
294 		}
295 		ordered_lsm_parse(chosen_lsm_order, "cmdline");
296 	} else
297 		ordered_lsm_parse(builtin_lsm_order, "builtin");
298 
299 	for (lsm = ordered_lsms; *lsm; lsm++)
300 		prepare_lsm(*lsm);
301 
302 	init_debug("cred blob size     = %d\n", blob_sizes.lbs_cred);
303 	init_debug("file blob size     = %d\n", blob_sizes.lbs_file);
304 	init_debug("inode blob size    = %d\n", blob_sizes.lbs_inode);
305 	init_debug("ipc blob size      = %d\n", blob_sizes.lbs_ipc);
306 	init_debug("msg_msg blob size  = %d\n", blob_sizes.lbs_msg_msg);
307 	init_debug("task blob size     = %d\n", blob_sizes.lbs_task);
308 
309 	/*
310 	 * Create any kmem_caches needed for blobs
311 	 */
312 	if (blob_sizes.lbs_file)
313 		lsm_file_cache = kmem_cache_create("lsm_file_cache",
314 						   blob_sizes.lbs_file, 0,
315 						   SLAB_PANIC, NULL);
316 	if (blob_sizes.lbs_inode)
317 		lsm_inode_cache = kmem_cache_create("lsm_inode_cache",
318 						    blob_sizes.lbs_inode, 0,
319 						    SLAB_PANIC, NULL);
320 
321 	lsm_early_cred((struct cred *) current->cred);
322 	lsm_early_task(current);
323 	for (lsm = ordered_lsms; *lsm; lsm++)
324 		initialize_lsm(*lsm);
325 
326 	kfree(ordered_lsms);
327 }
328 
329 int __init early_security_init(void)
330 {
331 	int i;
332 	struct hlist_head *list = (struct hlist_head *) &security_hook_heads;
333 	struct lsm_info *lsm;
334 
335 	for (i = 0; i < sizeof(security_hook_heads) / sizeof(struct hlist_head);
336 	     i++)
337 		INIT_HLIST_HEAD(&list[i]);
338 
339 	for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
340 		if (!lsm->enabled)
341 			lsm->enabled = &lsm_enabled_true;
342 		prepare_lsm(lsm);
343 		initialize_lsm(lsm);
344 	}
345 
346 	return 0;
347 }
348 
349 /**
350  * security_init - initializes the security framework
351  *
352  * This should be called early in the kernel initialization sequence.
353  */
354 int __init security_init(void)
355 {
356 	struct lsm_info *lsm;
357 
358 	pr_info("Security Framework initializing\n");
359 
360 	/*
361 	 * Append the names of the early LSM modules now that kmalloc() is
362 	 * available
363 	 */
364 	for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
365 		if (lsm->enabled)
366 			lsm_append(lsm->name, &lsm_names);
367 	}
368 
369 	/* Load LSMs in specified order. */
370 	ordered_lsm_init();
371 
372 	return 0;
373 }
374 
375 /* Save user chosen LSM */
376 static int __init choose_major_lsm(char *str)
377 {
378 	chosen_major_lsm = str;
379 	return 1;
380 }
381 __setup("security=", choose_major_lsm);
382 
383 /* Explicitly choose LSM initialization order. */
384 static int __init choose_lsm_order(char *str)
385 {
386 	chosen_lsm_order = str;
387 	return 1;
388 }
389 __setup("lsm=", choose_lsm_order);
390 
391 /* Enable LSM order debugging. */
392 static int __init enable_debug(char *str)
393 {
394 	debug = true;
395 	return 1;
396 }
397 __setup("lsm.debug", enable_debug);
398 
399 static bool match_last_lsm(const char *list, const char *lsm)
400 {
401 	const char *last;
402 
403 	if (WARN_ON(!list || !lsm))
404 		return false;
405 	last = strrchr(list, ',');
406 	if (last)
407 		/* Pass the comma, strcmp() will check for '\0' */
408 		last++;
409 	else
410 		last = list;
411 	return !strcmp(last, lsm);
412 }
413 
414 static int lsm_append(const char *new, char **result)
415 {
416 	char *cp;
417 
418 	if (*result == NULL) {
419 		*result = kstrdup(new, GFP_KERNEL);
420 		if (*result == NULL)
421 			return -ENOMEM;
422 	} else {
423 		/* Check if it is the last registered name */
424 		if (match_last_lsm(*result, new))
425 			return 0;
426 		cp = kasprintf(GFP_KERNEL, "%s,%s", *result, new);
427 		if (cp == NULL)
428 			return -ENOMEM;
429 		kfree(*result);
430 		*result = cp;
431 	}
432 	return 0;
433 }
434 
435 /**
436  * security_add_hooks - Add a modules hooks to the hook lists.
437  * @hooks: the hooks to add
438  * @count: the number of hooks to add
439  * @lsm: the name of the security module
440  *
441  * Each LSM has to register its hooks with the infrastructure.
442  */
443 void __init security_add_hooks(struct security_hook_list *hooks, int count,
444 				char *lsm)
445 {
446 	int i;
447 
448 	for (i = 0; i < count; i++) {
449 		hooks[i].lsm = lsm;
450 		hlist_add_tail_rcu(&hooks[i].list, hooks[i].head);
451 	}
452 
453 	/*
454 	 * Don't try to append during early_security_init(), we'll come back
455 	 * and fix this up afterwards.
456 	 */
457 	if (slab_is_available()) {
458 		if (lsm_append(lsm, &lsm_names) < 0)
459 			panic("%s - Cannot get early memory.\n", __func__);
460 	}
461 }
462 
463 int call_blocking_lsm_notifier(enum lsm_event event, void *data)
464 {
465 	return blocking_notifier_call_chain(&blocking_lsm_notifier_chain,
466 					    event, data);
467 }
468 EXPORT_SYMBOL(call_blocking_lsm_notifier);
469 
470 int register_blocking_lsm_notifier(struct notifier_block *nb)
471 {
472 	return blocking_notifier_chain_register(&blocking_lsm_notifier_chain,
473 						nb);
474 }
475 EXPORT_SYMBOL(register_blocking_lsm_notifier);
476 
477 int unregister_blocking_lsm_notifier(struct notifier_block *nb)
478 {
479 	return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain,
480 						  nb);
481 }
482 EXPORT_SYMBOL(unregister_blocking_lsm_notifier);
483 
484 /**
485  * lsm_cred_alloc - allocate a composite cred blob
486  * @cred: the cred that needs a blob
487  * @gfp: allocation type
488  *
489  * Allocate the cred blob for all the modules
490  *
491  * Returns 0, or -ENOMEM if memory can't be allocated.
492  */
493 static int lsm_cred_alloc(struct cred *cred, gfp_t gfp)
494 {
495 	if (blob_sizes.lbs_cred == 0) {
496 		cred->security = NULL;
497 		return 0;
498 	}
499 
500 	cred->security = kzalloc(blob_sizes.lbs_cred, gfp);
501 	if (cred->security == NULL)
502 		return -ENOMEM;
503 	return 0;
504 }
505 
506 /**
507  * lsm_early_cred - during initialization allocate a composite cred blob
508  * @cred: the cred that needs a blob
509  *
510  * Allocate the cred blob for all the modules
511  */
512 static void __init lsm_early_cred(struct cred *cred)
513 {
514 	int rc = lsm_cred_alloc(cred, GFP_KERNEL);
515 
516 	if (rc)
517 		panic("%s: Early cred alloc failed.\n", __func__);
518 }
519 
520 /**
521  * lsm_file_alloc - allocate a composite file blob
522  * @file: the file that needs a blob
523  *
524  * Allocate the file blob for all the modules
525  *
526  * Returns 0, or -ENOMEM if memory can't be allocated.
527  */
528 static int lsm_file_alloc(struct file *file)
529 {
530 	if (!lsm_file_cache) {
531 		file->f_security = NULL;
532 		return 0;
533 	}
534 
535 	file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL);
536 	if (file->f_security == NULL)
537 		return -ENOMEM;
538 	return 0;
539 }
540 
541 /**
542  * lsm_inode_alloc - allocate a composite inode blob
543  * @inode: the inode that needs a blob
544  *
545  * Allocate the inode blob for all the modules
546  *
547  * Returns 0, or -ENOMEM if memory can't be allocated.
548  */
549 int lsm_inode_alloc(struct inode *inode)
550 {
551 	if (!lsm_inode_cache) {
552 		inode->i_security = NULL;
553 		return 0;
554 	}
555 
556 	inode->i_security = kmem_cache_zalloc(lsm_inode_cache, GFP_NOFS);
557 	if (inode->i_security == NULL)
558 		return -ENOMEM;
559 	return 0;
560 }
561 
562 /**
563  * lsm_task_alloc - allocate a composite task blob
564  * @task: the task that needs a blob
565  *
566  * Allocate the task blob for all the modules
567  *
568  * Returns 0, or -ENOMEM if memory can't be allocated.
569  */
570 static int lsm_task_alloc(struct task_struct *task)
571 {
572 	if (blob_sizes.lbs_task == 0) {
573 		task->security = NULL;
574 		return 0;
575 	}
576 
577 	task->security = kzalloc(blob_sizes.lbs_task, GFP_KERNEL);
578 	if (task->security == NULL)
579 		return -ENOMEM;
580 	return 0;
581 }
582 
583 /**
584  * lsm_ipc_alloc - allocate a composite ipc blob
585  * @kip: the ipc that needs a blob
586  *
587  * Allocate the ipc blob for all the modules
588  *
589  * Returns 0, or -ENOMEM if memory can't be allocated.
590  */
591 static int lsm_ipc_alloc(struct kern_ipc_perm *kip)
592 {
593 	if (blob_sizes.lbs_ipc == 0) {
594 		kip->security = NULL;
595 		return 0;
596 	}
597 
598 	kip->security = kzalloc(blob_sizes.lbs_ipc, GFP_KERNEL);
599 	if (kip->security == NULL)
600 		return -ENOMEM;
601 	return 0;
602 }
603 
604 /**
605  * lsm_msg_msg_alloc - allocate a composite msg_msg blob
606  * @mp: the msg_msg that needs a blob
607  *
608  * Allocate the ipc blob for all the modules
609  *
610  * Returns 0, or -ENOMEM if memory can't be allocated.
611  */
612 static int lsm_msg_msg_alloc(struct msg_msg *mp)
613 {
614 	if (blob_sizes.lbs_msg_msg == 0) {
615 		mp->security = NULL;
616 		return 0;
617 	}
618 
619 	mp->security = kzalloc(blob_sizes.lbs_msg_msg, GFP_KERNEL);
620 	if (mp->security == NULL)
621 		return -ENOMEM;
622 	return 0;
623 }
624 
625 /**
626  * lsm_early_task - during initialization allocate a composite task blob
627  * @task: the task that needs a blob
628  *
629  * Allocate the task blob for all the modules
630  */
631 static void __init lsm_early_task(struct task_struct *task)
632 {
633 	int rc = lsm_task_alloc(task);
634 
635 	if (rc)
636 		panic("%s: Early task alloc failed.\n", __func__);
637 }
638 
639 /*
640  * Hook list operation macros.
641  *
642  * call_void_hook:
643  *	This is a hook that does not return a value.
644  *
645  * call_int_hook:
646  *	This is a hook that returns a value.
647  */
648 
649 #define call_void_hook(FUNC, ...)				\
650 	do {							\
651 		struct security_hook_list *P;			\
652 								\
653 		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
654 			P->hook.FUNC(__VA_ARGS__);		\
655 	} while (0)
656 
657 #define call_int_hook(FUNC, IRC, ...) ({			\
658 	int RC = IRC;						\
659 	do {							\
660 		struct security_hook_list *P;			\
661 								\
662 		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) { \
663 			RC = P->hook.FUNC(__VA_ARGS__);		\
664 			if (RC != 0)				\
665 				break;				\
666 		}						\
667 	} while (0);						\
668 	RC;							\
669 })
670 
671 /* Security operations */
672 
673 int security_binder_set_context_mgr(struct task_struct *mgr)
674 {
675 	return call_int_hook(binder_set_context_mgr, 0, mgr);
676 }
677 
678 int security_binder_transaction(struct task_struct *from,
679 				struct task_struct *to)
680 {
681 	return call_int_hook(binder_transaction, 0, from, to);
682 }
683 
684 int security_binder_transfer_binder(struct task_struct *from,
685 				    struct task_struct *to)
686 {
687 	return call_int_hook(binder_transfer_binder, 0, from, to);
688 }
689 
690 int security_binder_transfer_file(struct task_struct *from,
691 				  struct task_struct *to, struct file *file)
692 {
693 	return call_int_hook(binder_transfer_file, 0, from, to, file);
694 }
695 
696 int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
697 {
698 	return call_int_hook(ptrace_access_check, 0, child, mode);
699 }
700 
701 int security_ptrace_traceme(struct task_struct *parent)
702 {
703 	return call_int_hook(ptrace_traceme, 0, parent);
704 }
705 
706 int security_capget(struct task_struct *target,
707 		     kernel_cap_t *effective,
708 		     kernel_cap_t *inheritable,
709 		     kernel_cap_t *permitted)
710 {
711 	return call_int_hook(capget, 0, target,
712 				effective, inheritable, permitted);
713 }
714 
715 int security_capset(struct cred *new, const struct cred *old,
716 		    const kernel_cap_t *effective,
717 		    const kernel_cap_t *inheritable,
718 		    const kernel_cap_t *permitted)
719 {
720 	return call_int_hook(capset, 0, new, old,
721 				effective, inheritable, permitted);
722 }
723 
724 int security_capable(const struct cred *cred,
725 		     struct user_namespace *ns,
726 		     int cap,
727 		     unsigned int opts)
728 {
729 	return call_int_hook(capable, 0, cred, ns, cap, opts);
730 }
731 
732 int security_quotactl(int cmds, int type, int id, struct super_block *sb)
733 {
734 	return call_int_hook(quotactl, 0, cmds, type, id, sb);
735 }
736 
737 int security_quota_on(struct dentry *dentry)
738 {
739 	return call_int_hook(quota_on, 0, dentry);
740 }
741 
742 int security_syslog(int type)
743 {
744 	return call_int_hook(syslog, 0, type);
745 }
746 
747 int security_settime64(const struct timespec64 *ts, const struct timezone *tz)
748 {
749 	return call_int_hook(settime, 0, ts, tz);
750 }
751 
752 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
753 {
754 	struct security_hook_list *hp;
755 	int cap_sys_admin = 1;
756 	int rc;
757 
758 	/*
759 	 * The module will respond with a positive value if
760 	 * it thinks the __vm_enough_memory() call should be
761 	 * made with the cap_sys_admin set. If all of the modules
762 	 * agree that it should be set it will. If any module
763 	 * thinks it should not be set it won't.
764 	 */
765 	hlist_for_each_entry(hp, &security_hook_heads.vm_enough_memory, list) {
766 		rc = hp->hook.vm_enough_memory(mm, pages);
767 		if (rc <= 0) {
768 			cap_sys_admin = 0;
769 			break;
770 		}
771 	}
772 	return __vm_enough_memory(mm, pages, cap_sys_admin);
773 }
774 
775 int security_bprm_set_creds(struct linux_binprm *bprm)
776 {
777 	return call_int_hook(bprm_set_creds, 0, bprm);
778 }
779 
780 int security_bprm_check(struct linux_binprm *bprm)
781 {
782 	int ret;
783 
784 	ret = call_int_hook(bprm_check_security, 0, bprm);
785 	if (ret)
786 		return ret;
787 	return ima_bprm_check(bprm);
788 }
789 
790 void security_bprm_committing_creds(struct linux_binprm *bprm)
791 {
792 	call_void_hook(bprm_committing_creds, bprm);
793 }
794 
795 void security_bprm_committed_creds(struct linux_binprm *bprm)
796 {
797 	call_void_hook(bprm_committed_creds, bprm);
798 }
799 
800 int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
801 {
802 	return call_int_hook(fs_context_dup, 0, fc, src_fc);
803 }
804 
805 int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param)
806 {
807 	return call_int_hook(fs_context_parse_param, -ENOPARAM, fc, param);
808 }
809 
810 int security_sb_alloc(struct super_block *sb)
811 {
812 	return call_int_hook(sb_alloc_security, 0, sb);
813 }
814 
815 void security_sb_free(struct super_block *sb)
816 {
817 	call_void_hook(sb_free_security, sb);
818 }
819 
820 void security_free_mnt_opts(void **mnt_opts)
821 {
822 	if (!*mnt_opts)
823 		return;
824 	call_void_hook(sb_free_mnt_opts, *mnt_opts);
825 	*mnt_opts = NULL;
826 }
827 EXPORT_SYMBOL(security_free_mnt_opts);
828 
829 int security_sb_eat_lsm_opts(char *options, void **mnt_opts)
830 {
831 	return call_int_hook(sb_eat_lsm_opts, 0, options, mnt_opts);
832 }
833 EXPORT_SYMBOL(security_sb_eat_lsm_opts);
834 
835 int security_sb_remount(struct super_block *sb,
836 			void *mnt_opts)
837 {
838 	return call_int_hook(sb_remount, 0, sb, mnt_opts);
839 }
840 EXPORT_SYMBOL(security_sb_remount);
841 
842 int security_sb_kern_mount(struct super_block *sb)
843 {
844 	return call_int_hook(sb_kern_mount, 0, sb);
845 }
846 
847 int security_sb_show_options(struct seq_file *m, struct super_block *sb)
848 {
849 	return call_int_hook(sb_show_options, 0, m, sb);
850 }
851 
852 int security_sb_statfs(struct dentry *dentry)
853 {
854 	return call_int_hook(sb_statfs, 0, dentry);
855 }
856 
857 int security_sb_mount(const char *dev_name, const struct path *path,
858                        const char *type, unsigned long flags, void *data)
859 {
860 	return call_int_hook(sb_mount, 0, dev_name, path, type, flags, data);
861 }
862 
863 int security_sb_umount(struct vfsmount *mnt, int flags)
864 {
865 	return call_int_hook(sb_umount, 0, mnt, flags);
866 }
867 
868 int security_sb_pivotroot(const struct path *old_path, const struct path *new_path)
869 {
870 	return call_int_hook(sb_pivotroot, 0, old_path, new_path);
871 }
872 
873 int security_sb_set_mnt_opts(struct super_block *sb,
874 				void *mnt_opts,
875 				unsigned long kern_flags,
876 				unsigned long *set_kern_flags)
877 {
878 	return call_int_hook(sb_set_mnt_opts,
879 				mnt_opts ? -EOPNOTSUPP : 0, sb,
880 				mnt_opts, kern_flags, set_kern_flags);
881 }
882 EXPORT_SYMBOL(security_sb_set_mnt_opts);
883 
884 int security_sb_clone_mnt_opts(const struct super_block *oldsb,
885 				struct super_block *newsb,
886 				unsigned long kern_flags,
887 				unsigned long *set_kern_flags)
888 {
889 	return call_int_hook(sb_clone_mnt_opts, 0, oldsb, newsb,
890 				kern_flags, set_kern_flags);
891 }
892 EXPORT_SYMBOL(security_sb_clone_mnt_opts);
893 
894 int security_add_mnt_opt(const char *option, const char *val, int len,
895 			 void **mnt_opts)
896 {
897 	return call_int_hook(sb_add_mnt_opt, -EINVAL,
898 					option, val, len, mnt_opts);
899 }
900 EXPORT_SYMBOL(security_add_mnt_opt);
901 
902 int security_move_mount(const struct path *from_path, const struct path *to_path)
903 {
904 	return call_int_hook(move_mount, 0, from_path, to_path);
905 }
906 
907 int security_path_notify(const struct path *path, u64 mask,
908 				unsigned int obj_type)
909 {
910 	return call_int_hook(path_notify, 0, path, mask, obj_type);
911 }
912 
913 int security_inode_alloc(struct inode *inode)
914 {
915 	int rc = lsm_inode_alloc(inode);
916 
917 	if (unlikely(rc))
918 		return rc;
919 	rc = call_int_hook(inode_alloc_security, 0, inode);
920 	if (unlikely(rc))
921 		security_inode_free(inode);
922 	return rc;
923 }
924 
925 static void inode_free_by_rcu(struct rcu_head *head)
926 {
927 	/*
928 	 * The rcu head is at the start of the inode blob
929 	 */
930 	kmem_cache_free(lsm_inode_cache, head);
931 }
932 
933 void security_inode_free(struct inode *inode)
934 {
935 	integrity_inode_free(inode);
936 	call_void_hook(inode_free_security, inode);
937 	/*
938 	 * The inode may still be referenced in a path walk and
939 	 * a call to security_inode_permission() can be made
940 	 * after inode_free_security() is called. Ideally, the VFS
941 	 * wouldn't do this, but fixing that is a much harder
942 	 * job. For now, simply free the i_security via RCU, and
943 	 * leave the current inode->i_security pointer intact.
944 	 * The inode will be freed after the RCU grace period too.
945 	 */
946 	if (inode->i_security)
947 		call_rcu((struct rcu_head *)inode->i_security,
948 				inode_free_by_rcu);
949 }
950 
951 int security_dentry_init_security(struct dentry *dentry, int mode,
952 					const struct qstr *name, void **ctx,
953 					u32 *ctxlen)
954 {
955 	return call_int_hook(dentry_init_security, -EOPNOTSUPP, dentry, mode,
956 				name, ctx, ctxlen);
957 }
958 EXPORT_SYMBOL(security_dentry_init_security);
959 
960 int security_dentry_create_files_as(struct dentry *dentry, int mode,
961 				    struct qstr *name,
962 				    const struct cred *old, struct cred *new)
963 {
964 	return call_int_hook(dentry_create_files_as, 0, dentry, mode,
965 				name, old, new);
966 }
967 EXPORT_SYMBOL(security_dentry_create_files_as);
968 
969 int security_inode_init_security(struct inode *inode, struct inode *dir,
970 				 const struct qstr *qstr,
971 				 const initxattrs initxattrs, void *fs_data)
972 {
973 	struct xattr new_xattrs[MAX_LSM_EVM_XATTR + 1];
974 	struct xattr *lsm_xattr, *evm_xattr, *xattr;
975 	int ret;
976 
977 	if (unlikely(IS_PRIVATE(inode)))
978 		return 0;
979 
980 	if (!initxattrs)
981 		return call_int_hook(inode_init_security, -EOPNOTSUPP, inode,
982 				     dir, qstr, NULL, NULL, NULL);
983 	memset(new_xattrs, 0, sizeof(new_xattrs));
984 	lsm_xattr = new_xattrs;
985 	ret = call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir, qstr,
986 						&lsm_xattr->name,
987 						&lsm_xattr->value,
988 						&lsm_xattr->value_len);
989 	if (ret)
990 		goto out;
991 
992 	evm_xattr = lsm_xattr + 1;
993 	ret = evm_inode_init_security(inode, lsm_xattr, evm_xattr);
994 	if (ret)
995 		goto out;
996 	ret = initxattrs(inode, new_xattrs, fs_data);
997 out:
998 	for (xattr = new_xattrs; xattr->value != NULL; xattr++)
999 		kfree(xattr->value);
1000 	return (ret == -EOPNOTSUPP) ? 0 : ret;
1001 }
1002 EXPORT_SYMBOL(security_inode_init_security);
1003 
1004 int security_old_inode_init_security(struct inode *inode, struct inode *dir,
1005 				     const struct qstr *qstr, const char **name,
1006 				     void **value, size_t *len)
1007 {
1008 	if (unlikely(IS_PRIVATE(inode)))
1009 		return -EOPNOTSUPP;
1010 	return call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir,
1011 			     qstr, name, value, len);
1012 }
1013 EXPORT_SYMBOL(security_old_inode_init_security);
1014 
1015 #ifdef CONFIG_SECURITY_PATH
1016 int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode,
1017 			unsigned int dev)
1018 {
1019 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1020 		return 0;
1021 	return call_int_hook(path_mknod, 0, dir, dentry, mode, dev);
1022 }
1023 EXPORT_SYMBOL(security_path_mknod);
1024 
1025 int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode)
1026 {
1027 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1028 		return 0;
1029 	return call_int_hook(path_mkdir, 0, dir, dentry, mode);
1030 }
1031 EXPORT_SYMBOL(security_path_mkdir);
1032 
1033 int security_path_rmdir(const struct path *dir, struct dentry *dentry)
1034 {
1035 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1036 		return 0;
1037 	return call_int_hook(path_rmdir, 0, dir, dentry);
1038 }
1039 
1040 int security_path_unlink(const struct path *dir, struct dentry *dentry)
1041 {
1042 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1043 		return 0;
1044 	return call_int_hook(path_unlink, 0, dir, dentry);
1045 }
1046 EXPORT_SYMBOL(security_path_unlink);
1047 
1048 int security_path_symlink(const struct path *dir, struct dentry *dentry,
1049 			  const char *old_name)
1050 {
1051 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1052 		return 0;
1053 	return call_int_hook(path_symlink, 0, dir, dentry, old_name);
1054 }
1055 
1056 int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
1057 		       struct dentry *new_dentry)
1058 {
1059 	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
1060 		return 0;
1061 	return call_int_hook(path_link, 0, old_dentry, new_dir, new_dentry);
1062 }
1063 
1064 int security_path_rename(const struct path *old_dir, struct dentry *old_dentry,
1065 			 const struct path *new_dir, struct dentry *new_dentry,
1066 			 unsigned int flags)
1067 {
1068 	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
1069 		     (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry)))))
1070 		return 0;
1071 
1072 	if (flags & RENAME_EXCHANGE) {
1073 		int err = call_int_hook(path_rename, 0, new_dir, new_dentry,
1074 					old_dir, old_dentry);
1075 		if (err)
1076 			return err;
1077 	}
1078 
1079 	return call_int_hook(path_rename, 0, old_dir, old_dentry, new_dir,
1080 				new_dentry);
1081 }
1082 EXPORT_SYMBOL(security_path_rename);
1083 
1084 int security_path_truncate(const struct path *path)
1085 {
1086 	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1087 		return 0;
1088 	return call_int_hook(path_truncate, 0, path);
1089 }
1090 
1091 int security_path_chmod(const struct path *path, umode_t mode)
1092 {
1093 	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1094 		return 0;
1095 	return call_int_hook(path_chmod, 0, path, mode);
1096 }
1097 
1098 int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
1099 {
1100 	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1101 		return 0;
1102 	return call_int_hook(path_chown, 0, path, uid, gid);
1103 }
1104 
1105 int security_path_chroot(const struct path *path)
1106 {
1107 	return call_int_hook(path_chroot, 0, path);
1108 }
1109 #endif
1110 
1111 int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
1112 {
1113 	if (unlikely(IS_PRIVATE(dir)))
1114 		return 0;
1115 	return call_int_hook(inode_create, 0, dir, dentry, mode);
1116 }
1117 EXPORT_SYMBOL_GPL(security_inode_create);
1118 
1119 int security_inode_link(struct dentry *old_dentry, struct inode *dir,
1120 			 struct dentry *new_dentry)
1121 {
1122 	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
1123 		return 0;
1124 	return call_int_hook(inode_link, 0, old_dentry, dir, new_dentry);
1125 }
1126 
1127 int security_inode_unlink(struct inode *dir, struct dentry *dentry)
1128 {
1129 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1130 		return 0;
1131 	return call_int_hook(inode_unlink, 0, dir, dentry);
1132 }
1133 
1134 int security_inode_symlink(struct inode *dir, struct dentry *dentry,
1135 			    const char *old_name)
1136 {
1137 	if (unlikely(IS_PRIVATE(dir)))
1138 		return 0;
1139 	return call_int_hook(inode_symlink, 0, dir, dentry, old_name);
1140 }
1141 
1142 int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1143 {
1144 	if (unlikely(IS_PRIVATE(dir)))
1145 		return 0;
1146 	return call_int_hook(inode_mkdir, 0, dir, dentry, mode);
1147 }
1148 EXPORT_SYMBOL_GPL(security_inode_mkdir);
1149 
1150 int security_inode_rmdir(struct inode *dir, struct dentry *dentry)
1151 {
1152 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1153 		return 0;
1154 	return call_int_hook(inode_rmdir, 0, dir, dentry);
1155 }
1156 
1157 int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
1158 {
1159 	if (unlikely(IS_PRIVATE(dir)))
1160 		return 0;
1161 	return call_int_hook(inode_mknod, 0, dir, dentry, mode, dev);
1162 }
1163 
1164 int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
1165 			   struct inode *new_dir, struct dentry *new_dentry,
1166 			   unsigned int flags)
1167 {
1168         if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
1169             (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry)))))
1170 		return 0;
1171 
1172 	if (flags & RENAME_EXCHANGE) {
1173 		int err = call_int_hook(inode_rename, 0, new_dir, new_dentry,
1174 						     old_dir, old_dentry);
1175 		if (err)
1176 			return err;
1177 	}
1178 
1179 	return call_int_hook(inode_rename, 0, old_dir, old_dentry,
1180 					   new_dir, new_dentry);
1181 }
1182 
1183 int security_inode_readlink(struct dentry *dentry)
1184 {
1185 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1186 		return 0;
1187 	return call_int_hook(inode_readlink, 0, dentry);
1188 }
1189 
1190 int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
1191 			       bool rcu)
1192 {
1193 	if (unlikely(IS_PRIVATE(inode)))
1194 		return 0;
1195 	return call_int_hook(inode_follow_link, 0, dentry, inode, rcu);
1196 }
1197 
1198 int security_inode_permission(struct inode *inode, int mask)
1199 {
1200 	if (unlikely(IS_PRIVATE(inode)))
1201 		return 0;
1202 	return call_int_hook(inode_permission, 0, inode, mask);
1203 }
1204 
1205 int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
1206 {
1207 	int ret;
1208 
1209 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1210 		return 0;
1211 	ret = call_int_hook(inode_setattr, 0, dentry, attr);
1212 	if (ret)
1213 		return ret;
1214 	return evm_inode_setattr(dentry, attr);
1215 }
1216 EXPORT_SYMBOL_GPL(security_inode_setattr);
1217 
1218 int security_inode_getattr(const struct path *path)
1219 {
1220 	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1221 		return 0;
1222 	return call_int_hook(inode_getattr, 0, path);
1223 }
1224 
1225 int security_inode_setxattr(struct dentry *dentry, const char *name,
1226 			    const void *value, size_t size, int flags)
1227 {
1228 	int ret;
1229 
1230 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1231 		return 0;
1232 	/*
1233 	 * SELinux and Smack integrate the cap call,
1234 	 * so assume that all LSMs supplying this call do so.
1235 	 */
1236 	ret = call_int_hook(inode_setxattr, 1, dentry, name, value, size,
1237 				flags);
1238 
1239 	if (ret == 1)
1240 		ret = cap_inode_setxattr(dentry, name, value, size, flags);
1241 	if (ret)
1242 		return ret;
1243 	ret = ima_inode_setxattr(dentry, name, value, size);
1244 	if (ret)
1245 		return ret;
1246 	return evm_inode_setxattr(dentry, name, value, size);
1247 }
1248 
1249 void security_inode_post_setxattr(struct dentry *dentry, const char *name,
1250 				  const void *value, size_t size, int flags)
1251 {
1252 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1253 		return;
1254 	call_void_hook(inode_post_setxattr, dentry, name, value, size, flags);
1255 	evm_inode_post_setxattr(dentry, name, value, size);
1256 }
1257 
1258 int security_inode_getxattr(struct dentry *dentry, const char *name)
1259 {
1260 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1261 		return 0;
1262 	return call_int_hook(inode_getxattr, 0, dentry, name);
1263 }
1264 
1265 int security_inode_listxattr(struct dentry *dentry)
1266 {
1267 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1268 		return 0;
1269 	return call_int_hook(inode_listxattr, 0, dentry);
1270 }
1271 
1272 int security_inode_removexattr(struct dentry *dentry, const char *name)
1273 {
1274 	int ret;
1275 
1276 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1277 		return 0;
1278 	/*
1279 	 * SELinux and Smack integrate the cap call,
1280 	 * so assume that all LSMs supplying this call do so.
1281 	 */
1282 	ret = call_int_hook(inode_removexattr, 1, dentry, name);
1283 	if (ret == 1)
1284 		ret = cap_inode_removexattr(dentry, name);
1285 	if (ret)
1286 		return ret;
1287 	ret = ima_inode_removexattr(dentry, name);
1288 	if (ret)
1289 		return ret;
1290 	return evm_inode_removexattr(dentry, name);
1291 }
1292 
1293 int security_inode_need_killpriv(struct dentry *dentry)
1294 {
1295 	return call_int_hook(inode_need_killpriv, 0, dentry);
1296 }
1297 
1298 int security_inode_killpriv(struct dentry *dentry)
1299 {
1300 	return call_int_hook(inode_killpriv, 0, dentry);
1301 }
1302 
1303 int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
1304 {
1305 	struct security_hook_list *hp;
1306 	int rc;
1307 
1308 	if (unlikely(IS_PRIVATE(inode)))
1309 		return -EOPNOTSUPP;
1310 	/*
1311 	 * Only one module will provide an attribute with a given name.
1312 	 */
1313 	hlist_for_each_entry(hp, &security_hook_heads.inode_getsecurity, list) {
1314 		rc = hp->hook.inode_getsecurity(inode, name, buffer, alloc);
1315 		if (rc != -EOPNOTSUPP)
1316 			return rc;
1317 	}
1318 	return -EOPNOTSUPP;
1319 }
1320 
1321 int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
1322 {
1323 	struct security_hook_list *hp;
1324 	int rc;
1325 
1326 	if (unlikely(IS_PRIVATE(inode)))
1327 		return -EOPNOTSUPP;
1328 	/*
1329 	 * Only one module will provide an attribute with a given name.
1330 	 */
1331 	hlist_for_each_entry(hp, &security_hook_heads.inode_setsecurity, list) {
1332 		rc = hp->hook.inode_setsecurity(inode, name, value, size,
1333 								flags);
1334 		if (rc != -EOPNOTSUPP)
1335 			return rc;
1336 	}
1337 	return -EOPNOTSUPP;
1338 }
1339 
1340 int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
1341 {
1342 	if (unlikely(IS_PRIVATE(inode)))
1343 		return 0;
1344 	return call_int_hook(inode_listsecurity, 0, inode, buffer, buffer_size);
1345 }
1346 EXPORT_SYMBOL(security_inode_listsecurity);
1347 
1348 void security_inode_getsecid(struct inode *inode, u32 *secid)
1349 {
1350 	call_void_hook(inode_getsecid, inode, secid);
1351 }
1352 
1353 int security_inode_copy_up(struct dentry *src, struct cred **new)
1354 {
1355 	return call_int_hook(inode_copy_up, 0, src, new);
1356 }
1357 EXPORT_SYMBOL(security_inode_copy_up);
1358 
1359 int security_inode_copy_up_xattr(const char *name)
1360 {
1361 	return call_int_hook(inode_copy_up_xattr, -EOPNOTSUPP, name);
1362 }
1363 EXPORT_SYMBOL(security_inode_copy_up_xattr);
1364 
1365 int security_kernfs_init_security(struct kernfs_node *kn_dir,
1366 				  struct kernfs_node *kn)
1367 {
1368 	return call_int_hook(kernfs_init_security, 0, kn_dir, kn);
1369 }
1370 
1371 int security_file_permission(struct file *file, int mask)
1372 {
1373 	int ret;
1374 
1375 	ret = call_int_hook(file_permission, 0, file, mask);
1376 	if (ret)
1377 		return ret;
1378 
1379 	return fsnotify_perm(file, mask);
1380 }
1381 
1382 int security_file_alloc(struct file *file)
1383 {
1384 	int rc = lsm_file_alloc(file);
1385 
1386 	if (rc)
1387 		return rc;
1388 	rc = call_int_hook(file_alloc_security, 0, file);
1389 	if (unlikely(rc))
1390 		security_file_free(file);
1391 	return rc;
1392 }
1393 
1394 void security_file_free(struct file *file)
1395 {
1396 	void *blob;
1397 
1398 	call_void_hook(file_free_security, file);
1399 
1400 	blob = file->f_security;
1401 	if (blob) {
1402 		file->f_security = NULL;
1403 		kmem_cache_free(lsm_file_cache, blob);
1404 	}
1405 }
1406 
1407 int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1408 {
1409 	return call_int_hook(file_ioctl, 0, file, cmd, arg);
1410 }
1411 
1412 static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
1413 {
1414 	/*
1415 	 * Does we have PROT_READ and does the application expect
1416 	 * it to imply PROT_EXEC?  If not, nothing to talk about...
1417 	 */
1418 	if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ)
1419 		return prot;
1420 	if (!(current->personality & READ_IMPLIES_EXEC))
1421 		return prot;
1422 	/*
1423 	 * if that's an anonymous mapping, let it.
1424 	 */
1425 	if (!file)
1426 		return prot | PROT_EXEC;
1427 	/*
1428 	 * ditto if it's not on noexec mount, except that on !MMU we need
1429 	 * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case
1430 	 */
1431 	if (!path_noexec(&file->f_path)) {
1432 #ifndef CONFIG_MMU
1433 		if (file->f_op->mmap_capabilities) {
1434 			unsigned caps = file->f_op->mmap_capabilities(file);
1435 			if (!(caps & NOMMU_MAP_EXEC))
1436 				return prot;
1437 		}
1438 #endif
1439 		return prot | PROT_EXEC;
1440 	}
1441 	/* anything on noexec mount won't get PROT_EXEC */
1442 	return prot;
1443 }
1444 
1445 int security_mmap_file(struct file *file, unsigned long prot,
1446 			unsigned long flags)
1447 {
1448 	int ret;
1449 	ret = call_int_hook(mmap_file, 0, file, prot,
1450 					mmap_prot(file, prot), flags);
1451 	if (ret)
1452 		return ret;
1453 	return ima_file_mmap(file, prot);
1454 }
1455 
1456 int security_mmap_addr(unsigned long addr)
1457 {
1458 	return call_int_hook(mmap_addr, 0, addr);
1459 }
1460 
1461 int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
1462 			    unsigned long prot)
1463 {
1464 	return call_int_hook(file_mprotect, 0, vma, reqprot, prot);
1465 }
1466 
1467 int security_file_lock(struct file *file, unsigned int cmd)
1468 {
1469 	return call_int_hook(file_lock, 0, file, cmd);
1470 }
1471 
1472 int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1473 {
1474 	return call_int_hook(file_fcntl, 0, file, cmd, arg);
1475 }
1476 
1477 void security_file_set_fowner(struct file *file)
1478 {
1479 	call_void_hook(file_set_fowner, file);
1480 }
1481 
1482 int security_file_send_sigiotask(struct task_struct *tsk,
1483 				  struct fown_struct *fown, int sig)
1484 {
1485 	return call_int_hook(file_send_sigiotask, 0, tsk, fown, sig);
1486 }
1487 
1488 int security_file_receive(struct file *file)
1489 {
1490 	return call_int_hook(file_receive, 0, file);
1491 }
1492 
1493 int security_file_open(struct file *file)
1494 {
1495 	int ret;
1496 
1497 	ret = call_int_hook(file_open, 0, file);
1498 	if (ret)
1499 		return ret;
1500 
1501 	return fsnotify_perm(file, MAY_OPEN);
1502 }
1503 
1504 int security_task_alloc(struct task_struct *task, unsigned long clone_flags)
1505 {
1506 	int rc = lsm_task_alloc(task);
1507 
1508 	if (rc)
1509 		return rc;
1510 	rc = call_int_hook(task_alloc, 0, task, clone_flags);
1511 	if (unlikely(rc))
1512 		security_task_free(task);
1513 	return rc;
1514 }
1515 
1516 void security_task_free(struct task_struct *task)
1517 {
1518 	call_void_hook(task_free, task);
1519 
1520 	kfree(task->security);
1521 	task->security = NULL;
1522 }
1523 
1524 int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
1525 {
1526 	int rc = lsm_cred_alloc(cred, gfp);
1527 
1528 	if (rc)
1529 		return rc;
1530 
1531 	rc = call_int_hook(cred_alloc_blank, 0, cred, gfp);
1532 	if (unlikely(rc))
1533 		security_cred_free(cred);
1534 	return rc;
1535 }
1536 
1537 void security_cred_free(struct cred *cred)
1538 {
1539 	/*
1540 	 * There is a failure case in prepare_creds() that
1541 	 * may result in a call here with ->security being NULL.
1542 	 */
1543 	if (unlikely(cred->security == NULL))
1544 		return;
1545 
1546 	call_void_hook(cred_free, cred);
1547 
1548 	kfree(cred->security);
1549 	cred->security = NULL;
1550 }
1551 
1552 int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp)
1553 {
1554 	int rc = lsm_cred_alloc(new, gfp);
1555 
1556 	if (rc)
1557 		return rc;
1558 
1559 	rc = call_int_hook(cred_prepare, 0, new, old, gfp);
1560 	if (unlikely(rc))
1561 		security_cred_free(new);
1562 	return rc;
1563 }
1564 
1565 void security_transfer_creds(struct cred *new, const struct cred *old)
1566 {
1567 	call_void_hook(cred_transfer, new, old);
1568 }
1569 
1570 void security_cred_getsecid(const struct cred *c, u32 *secid)
1571 {
1572 	*secid = 0;
1573 	call_void_hook(cred_getsecid, c, secid);
1574 }
1575 EXPORT_SYMBOL(security_cred_getsecid);
1576 
1577 int security_kernel_act_as(struct cred *new, u32 secid)
1578 {
1579 	return call_int_hook(kernel_act_as, 0, new, secid);
1580 }
1581 
1582 int security_kernel_create_files_as(struct cred *new, struct inode *inode)
1583 {
1584 	return call_int_hook(kernel_create_files_as, 0, new, inode);
1585 }
1586 
1587 int security_kernel_module_request(char *kmod_name)
1588 {
1589 	int ret;
1590 
1591 	ret = call_int_hook(kernel_module_request, 0, kmod_name);
1592 	if (ret)
1593 		return ret;
1594 	return integrity_kernel_module_request(kmod_name);
1595 }
1596 
1597 int security_kernel_read_file(struct file *file, enum kernel_read_file_id id)
1598 {
1599 	int ret;
1600 
1601 	ret = call_int_hook(kernel_read_file, 0, file, id);
1602 	if (ret)
1603 		return ret;
1604 	return ima_read_file(file, id);
1605 }
1606 EXPORT_SYMBOL_GPL(security_kernel_read_file);
1607 
1608 int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
1609 				   enum kernel_read_file_id id)
1610 {
1611 	int ret;
1612 
1613 	ret = call_int_hook(kernel_post_read_file, 0, file, buf, size, id);
1614 	if (ret)
1615 		return ret;
1616 	return ima_post_read_file(file, buf, size, id);
1617 }
1618 EXPORT_SYMBOL_GPL(security_kernel_post_read_file);
1619 
1620 int security_kernel_load_data(enum kernel_load_data_id id)
1621 {
1622 	int ret;
1623 
1624 	ret = call_int_hook(kernel_load_data, 0, id);
1625 	if (ret)
1626 		return ret;
1627 	return ima_load_data(id);
1628 }
1629 EXPORT_SYMBOL_GPL(security_kernel_load_data);
1630 
1631 int security_task_fix_setuid(struct cred *new, const struct cred *old,
1632 			     int flags)
1633 {
1634 	return call_int_hook(task_fix_setuid, 0, new, old, flags);
1635 }
1636 
1637 int security_task_setpgid(struct task_struct *p, pid_t pgid)
1638 {
1639 	return call_int_hook(task_setpgid, 0, p, pgid);
1640 }
1641 
1642 int security_task_getpgid(struct task_struct *p)
1643 {
1644 	return call_int_hook(task_getpgid, 0, p);
1645 }
1646 
1647 int security_task_getsid(struct task_struct *p)
1648 {
1649 	return call_int_hook(task_getsid, 0, p);
1650 }
1651 
1652 void security_task_getsecid(struct task_struct *p, u32 *secid)
1653 {
1654 	*secid = 0;
1655 	call_void_hook(task_getsecid, p, secid);
1656 }
1657 EXPORT_SYMBOL(security_task_getsecid);
1658 
1659 int security_task_setnice(struct task_struct *p, int nice)
1660 {
1661 	return call_int_hook(task_setnice, 0, p, nice);
1662 }
1663 
1664 int security_task_setioprio(struct task_struct *p, int ioprio)
1665 {
1666 	return call_int_hook(task_setioprio, 0, p, ioprio);
1667 }
1668 
1669 int security_task_getioprio(struct task_struct *p)
1670 {
1671 	return call_int_hook(task_getioprio, 0, p);
1672 }
1673 
1674 int security_task_prlimit(const struct cred *cred, const struct cred *tcred,
1675 			  unsigned int flags)
1676 {
1677 	return call_int_hook(task_prlimit, 0, cred, tcred, flags);
1678 }
1679 
1680 int security_task_setrlimit(struct task_struct *p, unsigned int resource,
1681 		struct rlimit *new_rlim)
1682 {
1683 	return call_int_hook(task_setrlimit, 0, p, resource, new_rlim);
1684 }
1685 
1686 int security_task_setscheduler(struct task_struct *p)
1687 {
1688 	return call_int_hook(task_setscheduler, 0, p);
1689 }
1690 
1691 int security_task_getscheduler(struct task_struct *p)
1692 {
1693 	return call_int_hook(task_getscheduler, 0, p);
1694 }
1695 
1696 int security_task_movememory(struct task_struct *p)
1697 {
1698 	return call_int_hook(task_movememory, 0, p);
1699 }
1700 
1701 int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
1702 			int sig, const struct cred *cred)
1703 {
1704 	return call_int_hook(task_kill, 0, p, info, sig, cred);
1705 }
1706 
1707 int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
1708 			 unsigned long arg4, unsigned long arg5)
1709 {
1710 	int thisrc;
1711 	int rc = -ENOSYS;
1712 	struct security_hook_list *hp;
1713 
1714 	hlist_for_each_entry(hp, &security_hook_heads.task_prctl, list) {
1715 		thisrc = hp->hook.task_prctl(option, arg2, arg3, arg4, arg5);
1716 		if (thisrc != -ENOSYS) {
1717 			rc = thisrc;
1718 			if (thisrc != 0)
1719 				break;
1720 		}
1721 	}
1722 	return rc;
1723 }
1724 
1725 void security_task_to_inode(struct task_struct *p, struct inode *inode)
1726 {
1727 	call_void_hook(task_to_inode, p, inode);
1728 }
1729 
1730 int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag)
1731 {
1732 	return call_int_hook(ipc_permission, 0, ipcp, flag);
1733 }
1734 
1735 void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
1736 {
1737 	*secid = 0;
1738 	call_void_hook(ipc_getsecid, ipcp, secid);
1739 }
1740 
1741 int security_msg_msg_alloc(struct msg_msg *msg)
1742 {
1743 	int rc = lsm_msg_msg_alloc(msg);
1744 
1745 	if (unlikely(rc))
1746 		return rc;
1747 	rc = call_int_hook(msg_msg_alloc_security, 0, msg);
1748 	if (unlikely(rc))
1749 		security_msg_msg_free(msg);
1750 	return rc;
1751 }
1752 
1753 void security_msg_msg_free(struct msg_msg *msg)
1754 {
1755 	call_void_hook(msg_msg_free_security, msg);
1756 	kfree(msg->security);
1757 	msg->security = NULL;
1758 }
1759 
1760 int security_msg_queue_alloc(struct kern_ipc_perm *msq)
1761 {
1762 	int rc = lsm_ipc_alloc(msq);
1763 
1764 	if (unlikely(rc))
1765 		return rc;
1766 	rc = call_int_hook(msg_queue_alloc_security, 0, msq);
1767 	if (unlikely(rc))
1768 		security_msg_queue_free(msq);
1769 	return rc;
1770 }
1771 
1772 void security_msg_queue_free(struct kern_ipc_perm *msq)
1773 {
1774 	call_void_hook(msg_queue_free_security, msq);
1775 	kfree(msq->security);
1776 	msq->security = NULL;
1777 }
1778 
1779 int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg)
1780 {
1781 	return call_int_hook(msg_queue_associate, 0, msq, msqflg);
1782 }
1783 
1784 int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd)
1785 {
1786 	return call_int_hook(msg_queue_msgctl, 0, msq, cmd);
1787 }
1788 
1789 int security_msg_queue_msgsnd(struct kern_ipc_perm *msq,
1790 			       struct msg_msg *msg, int msqflg)
1791 {
1792 	return call_int_hook(msg_queue_msgsnd, 0, msq, msg, msqflg);
1793 }
1794 
1795 int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg,
1796 			       struct task_struct *target, long type, int mode)
1797 {
1798 	return call_int_hook(msg_queue_msgrcv, 0, msq, msg, target, type, mode);
1799 }
1800 
1801 int security_shm_alloc(struct kern_ipc_perm *shp)
1802 {
1803 	int rc = lsm_ipc_alloc(shp);
1804 
1805 	if (unlikely(rc))
1806 		return rc;
1807 	rc = call_int_hook(shm_alloc_security, 0, shp);
1808 	if (unlikely(rc))
1809 		security_shm_free(shp);
1810 	return rc;
1811 }
1812 
1813 void security_shm_free(struct kern_ipc_perm *shp)
1814 {
1815 	call_void_hook(shm_free_security, shp);
1816 	kfree(shp->security);
1817 	shp->security = NULL;
1818 }
1819 
1820 int security_shm_associate(struct kern_ipc_perm *shp, int shmflg)
1821 {
1822 	return call_int_hook(shm_associate, 0, shp, shmflg);
1823 }
1824 
1825 int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd)
1826 {
1827 	return call_int_hook(shm_shmctl, 0, shp, cmd);
1828 }
1829 
1830 int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg)
1831 {
1832 	return call_int_hook(shm_shmat, 0, shp, shmaddr, shmflg);
1833 }
1834 
1835 int security_sem_alloc(struct kern_ipc_perm *sma)
1836 {
1837 	int rc = lsm_ipc_alloc(sma);
1838 
1839 	if (unlikely(rc))
1840 		return rc;
1841 	rc = call_int_hook(sem_alloc_security, 0, sma);
1842 	if (unlikely(rc))
1843 		security_sem_free(sma);
1844 	return rc;
1845 }
1846 
1847 void security_sem_free(struct kern_ipc_perm *sma)
1848 {
1849 	call_void_hook(sem_free_security, sma);
1850 	kfree(sma->security);
1851 	sma->security = NULL;
1852 }
1853 
1854 int security_sem_associate(struct kern_ipc_perm *sma, int semflg)
1855 {
1856 	return call_int_hook(sem_associate, 0, sma, semflg);
1857 }
1858 
1859 int security_sem_semctl(struct kern_ipc_perm *sma, int cmd)
1860 {
1861 	return call_int_hook(sem_semctl, 0, sma, cmd);
1862 }
1863 
1864 int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
1865 			unsigned nsops, int alter)
1866 {
1867 	return call_int_hook(sem_semop, 0, sma, sops, nsops, alter);
1868 }
1869 
1870 void security_d_instantiate(struct dentry *dentry, struct inode *inode)
1871 {
1872 	if (unlikely(inode && IS_PRIVATE(inode)))
1873 		return;
1874 	call_void_hook(d_instantiate, dentry, inode);
1875 }
1876 EXPORT_SYMBOL(security_d_instantiate);
1877 
1878 int security_getprocattr(struct task_struct *p, const char *lsm, char *name,
1879 				char **value)
1880 {
1881 	struct security_hook_list *hp;
1882 
1883 	hlist_for_each_entry(hp, &security_hook_heads.getprocattr, list) {
1884 		if (lsm != NULL && strcmp(lsm, hp->lsm))
1885 			continue;
1886 		return hp->hook.getprocattr(p, name, value);
1887 	}
1888 	return -EINVAL;
1889 }
1890 
1891 int security_setprocattr(const char *lsm, const char *name, void *value,
1892 			 size_t size)
1893 {
1894 	struct security_hook_list *hp;
1895 
1896 	hlist_for_each_entry(hp, &security_hook_heads.setprocattr, list) {
1897 		if (lsm != NULL && strcmp(lsm, hp->lsm))
1898 			continue;
1899 		return hp->hook.setprocattr(name, value, size);
1900 	}
1901 	return -EINVAL;
1902 }
1903 
1904 int security_netlink_send(struct sock *sk, struct sk_buff *skb)
1905 {
1906 	return call_int_hook(netlink_send, 0, sk, skb);
1907 }
1908 
1909 int security_ismaclabel(const char *name)
1910 {
1911 	return call_int_hook(ismaclabel, 0, name);
1912 }
1913 EXPORT_SYMBOL(security_ismaclabel);
1914 
1915 int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
1916 {
1917 	return call_int_hook(secid_to_secctx, -EOPNOTSUPP, secid, secdata,
1918 				seclen);
1919 }
1920 EXPORT_SYMBOL(security_secid_to_secctx);
1921 
1922 int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
1923 {
1924 	*secid = 0;
1925 	return call_int_hook(secctx_to_secid, 0, secdata, seclen, secid);
1926 }
1927 EXPORT_SYMBOL(security_secctx_to_secid);
1928 
1929 void security_release_secctx(char *secdata, u32 seclen)
1930 {
1931 	call_void_hook(release_secctx, secdata, seclen);
1932 }
1933 EXPORT_SYMBOL(security_release_secctx);
1934 
1935 void security_inode_invalidate_secctx(struct inode *inode)
1936 {
1937 	call_void_hook(inode_invalidate_secctx, inode);
1938 }
1939 EXPORT_SYMBOL(security_inode_invalidate_secctx);
1940 
1941 int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
1942 {
1943 	return call_int_hook(inode_notifysecctx, 0, inode, ctx, ctxlen);
1944 }
1945 EXPORT_SYMBOL(security_inode_notifysecctx);
1946 
1947 int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
1948 {
1949 	return call_int_hook(inode_setsecctx, 0, dentry, ctx, ctxlen);
1950 }
1951 EXPORT_SYMBOL(security_inode_setsecctx);
1952 
1953 int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
1954 {
1955 	return call_int_hook(inode_getsecctx, -EOPNOTSUPP, inode, ctx, ctxlen);
1956 }
1957 EXPORT_SYMBOL(security_inode_getsecctx);
1958 
1959 #ifdef CONFIG_SECURITY_NETWORK
1960 
1961 int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk)
1962 {
1963 	return call_int_hook(unix_stream_connect, 0, sock, other, newsk);
1964 }
1965 EXPORT_SYMBOL(security_unix_stream_connect);
1966 
1967 int security_unix_may_send(struct socket *sock,  struct socket *other)
1968 {
1969 	return call_int_hook(unix_may_send, 0, sock, other);
1970 }
1971 EXPORT_SYMBOL(security_unix_may_send);
1972 
1973 int security_socket_create(int family, int type, int protocol, int kern)
1974 {
1975 	return call_int_hook(socket_create, 0, family, type, protocol, kern);
1976 }
1977 
1978 int security_socket_post_create(struct socket *sock, int family,
1979 				int type, int protocol, int kern)
1980 {
1981 	return call_int_hook(socket_post_create, 0, sock, family, type,
1982 						protocol, kern);
1983 }
1984 
1985 int security_socket_socketpair(struct socket *socka, struct socket *sockb)
1986 {
1987 	return call_int_hook(socket_socketpair, 0, socka, sockb);
1988 }
1989 EXPORT_SYMBOL(security_socket_socketpair);
1990 
1991 int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
1992 {
1993 	return call_int_hook(socket_bind, 0, sock, address, addrlen);
1994 }
1995 
1996 int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen)
1997 {
1998 	return call_int_hook(socket_connect, 0, sock, address, addrlen);
1999 }
2000 
2001 int security_socket_listen(struct socket *sock, int backlog)
2002 {
2003 	return call_int_hook(socket_listen, 0, sock, backlog);
2004 }
2005 
2006 int security_socket_accept(struct socket *sock, struct socket *newsock)
2007 {
2008 	return call_int_hook(socket_accept, 0, sock, newsock);
2009 }
2010 
2011 int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size)
2012 {
2013 	return call_int_hook(socket_sendmsg, 0, sock, msg, size);
2014 }
2015 
2016 int security_socket_recvmsg(struct socket *sock, struct msghdr *msg,
2017 			    int size, int flags)
2018 {
2019 	return call_int_hook(socket_recvmsg, 0, sock, msg, size, flags);
2020 }
2021 
2022 int security_socket_getsockname(struct socket *sock)
2023 {
2024 	return call_int_hook(socket_getsockname, 0, sock);
2025 }
2026 
2027 int security_socket_getpeername(struct socket *sock)
2028 {
2029 	return call_int_hook(socket_getpeername, 0, sock);
2030 }
2031 
2032 int security_socket_getsockopt(struct socket *sock, int level, int optname)
2033 {
2034 	return call_int_hook(socket_getsockopt, 0, sock, level, optname);
2035 }
2036 
2037 int security_socket_setsockopt(struct socket *sock, int level, int optname)
2038 {
2039 	return call_int_hook(socket_setsockopt, 0, sock, level, optname);
2040 }
2041 
2042 int security_socket_shutdown(struct socket *sock, int how)
2043 {
2044 	return call_int_hook(socket_shutdown, 0, sock, how);
2045 }
2046 
2047 int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
2048 {
2049 	return call_int_hook(socket_sock_rcv_skb, 0, sk, skb);
2050 }
2051 EXPORT_SYMBOL(security_sock_rcv_skb);
2052 
2053 int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
2054 				      int __user *optlen, unsigned len)
2055 {
2056 	return call_int_hook(socket_getpeersec_stream, -ENOPROTOOPT, sock,
2057 				optval, optlen, len);
2058 }
2059 
2060 int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
2061 {
2062 	return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
2063 			     skb, secid);
2064 }
2065 EXPORT_SYMBOL(security_socket_getpeersec_dgram);
2066 
2067 int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
2068 {
2069 	return call_int_hook(sk_alloc_security, 0, sk, family, priority);
2070 }
2071 
2072 void security_sk_free(struct sock *sk)
2073 {
2074 	call_void_hook(sk_free_security, sk);
2075 }
2076 
2077 void security_sk_clone(const struct sock *sk, struct sock *newsk)
2078 {
2079 	call_void_hook(sk_clone_security, sk, newsk);
2080 }
2081 EXPORT_SYMBOL(security_sk_clone);
2082 
2083 void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
2084 {
2085 	call_void_hook(sk_getsecid, sk, &fl->flowi_secid);
2086 }
2087 EXPORT_SYMBOL(security_sk_classify_flow);
2088 
2089 void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
2090 {
2091 	call_void_hook(req_classify_flow, req, fl);
2092 }
2093 EXPORT_SYMBOL(security_req_classify_flow);
2094 
2095 void security_sock_graft(struct sock *sk, struct socket *parent)
2096 {
2097 	call_void_hook(sock_graft, sk, parent);
2098 }
2099 EXPORT_SYMBOL(security_sock_graft);
2100 
2101 int security_inet_conn_request(struct sock *sk,
2102 			struct sk_buff *skb, struct request_sock *req)
2103 {
2104 	return call_int_hook(inet_conn_request, 0, sk, skb, req);
2105 }
2106 EXPORT_SYMBOL(security_inet_conn_request);
2107 
2108 void security_inet_csk_clone(struct sock *newsk,
2109 			const struct request_sock *req)
2110 {
2111 	call_void_hook(inet_csk_clone, newsk, req);
2112 }
2113 
2114 void security_inet_conn_established(struct sock *sk,
2115 			struct sk_buff *skb)
2116 {
2117 	call_void_hook(inet_conn_established, sk, skb);
2118 }
2119 EXPORT_SYMBOL(security_inet_conn_established);
2120 
2121 int security_secmark_relabel_packet(u32 secid)
2122 {
2123 	return call_int_hook(secmark_relabel_packet, 0, secid);
2124 }
2125 EXPORT_SYMBOL(security_secmark_relabel_packet);
2126 
2127 void security_secmark_refcount_inc(void)
2128 {
2129 	call_void_hook(secmark_refcount_inc);
2130 }
2131 EXPORT_SYMBOL(security_secmark_refcount_inc);
2132 
2133 void security_secmark_refcount_dec(void)
2134 {
2135 	call_void_hook(secmark_refcount_dec);
2136 }
2137 EXPORT_SYMBOL(security_secmark_refcount_dec);
2138 
2139 int security_tun_dev_alloc_security(void **security)
2140 {
2141 	return call_int_hook(tun_dev_alloc_security, 0, security);
2142 }
2143 EXPORT_SYMBOL(security_tun_dev_alloc_security);
2144 
2145 void security_tun_dev_free_security(void *security)
2146 {
2147 	call_void_hook(tun_dev_free_security, security);
2148 }
2149 EXPORT_SYMBOL(security_tun_dev_free_security);
2150 
2151 int security_tun_dev_create(void)
2152 {
2153 	return call_int_hook(tun_dev_create, 0);
2154 }
2155 EXPORT_SYMBOL(security_tun_dev_create);
2156 
2157 int security_tun_dev_attach_queue(void *security)
2158 {
2159 	return call_int_hook(tun_dev_attach_queue, 0, security);
2160 }
2161 EXPORT_SYMBOL(security_tun_dev_attach_queue);
2162 
2163 int security_tun_dev_attach(struct sock *sk, void *security)
2164 {
2165 	return call_int_hook(tun_dev_attach, 0, sk, security);
2166 }
2167 EXPORT_SYMBOL(security_tun_dev_attach);
2168 
2169 int security_tun_dev_open(void *security)
2170 {
2171 	return call_int_hook(tun_dev_open, 0, security);
2172 }
2173 EXPORT_SYMBOL(security_tun_dev_open);
2174 
2175 int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb)
2176 {
2177 	return call_int_hook(sctp_assoc_request, 0, ep, skb);
2178 }
2179 EXPORT_SYMBOL(security_sctp_assoc_request);
2180 
2181 int security_sctp_bind_connect(struct sock *sk, int optname,
2182 			       struct sockaddr *address, int addrlen)
2183 {
2184 	return call_int_hook(sctp_bind_connect, 0, sk, optname,
2185 			     address, addrlen);
2186 }
2187 EXPORT_SYMBOL(security_sctp_bind_connect);
2188 
2189 void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
2190 			    struct sock *newsk)
2191 {
2192 	call_void_hook(sctp_sk_clone, ep, sk, newsk);
2193 }
2194 EXPORT_SYMBOL(security_sctp_sk_clone);
2195 
2196 #endif	/* CONFIG_SECURITY_NETWORK */
2197 
2198 #ifdef CONFIG_SECURITY_INFINIBAND
2199 
2200 int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey)
2201 {
2202 	return call_int_hook(ib_pkey_access, 0, sec, subnet_prefix, pkey);
2203 }
2204 EXPORT_SYMBOL(security_ib_pkey_access);
2205 
2206 int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num)
2207 {
2208 	return call_int_hook(ib_endport_manage_subnet, 0, sec, dev_name, port_num);
2209 }
2210 EXPORT_SYMBOL(security_ib_endport_manage_subnet);
2211 
2212 int security_ib_alloc_security(void **sec)
2213 {
2214 	return call_int_hook(ib_alloc_security, 0, sec);
2215 }
2216 EXPORT_SYMBOL(security_ib_alloc_security);
2217 
2218 void security_ib_free_security(void *sec)
2219 {
2220 	call_void_hook(ib_free_security, sec);
2221 }
2222 EXPORT_SYMBOL(security_ib_free_security);
2223 #endif	/* CONFIG_SECURITY_INFINIBAND */
2224 
2225 #ifdef CONFIG_SECURITY_NETWORK_XFRM
2226 
2227 int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
2228 			       struct xfrm_user_sec_ctx *sec_ctx,
2229 			       gfp_t gfp)
2230 {
2231 	return call_int_hook(xfrm_policy_alloc_security, 0, ctxp, sec_ctx, gfp);
2232 }
2233 EXPORT_SYMBOL(security_xfrm_policy_alloc);
2234 
2235 int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
2236 			      struct xfrm_sec_ctx **new_ctxp)
2237 {
2238 	return call_int_hook(xfrm_policy_clone_security, 0, old_ctx, new_ctxp);
2239 }
2240 
2241 void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
2242 {
2243 	call_void_hook(xfrm_policy_free_security, ctx);
2244 }
2245 EXPORT_SYMBOL(security_xfrm_policy_free);
2246 
2247 int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
2248 {
2249 	return call_int_hook(xfrm_policy_delete_security, 0, ctx);
2250 }
2251 
2252 int security_xfrm_state_alloc(struct xfrm_state *x,
2253 			      struct xfrm_user_sec_ctx *sec_ctx)
2254 {
2255 	return call_int_hook(xfrm_state_alloc, 0, x, sec_ctx);
2256 }
2257 EXPORT_SYMBOL(security_xfrm_state_alloc);
2258 
2259 int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
2260 				      struct xfrm_sec_ctx *polsec, u32 secid)
2261 {
2262 	return call_int_hook(xfrm_state_alloc_acquire, 0, x, polsec, secid);
2263 }
2264 
2265 int security_xfrm_state_delete(struct xfrm_state *x)
2266 {
2267 	return call_int_hook(xfrm_state_delete_security, 0, x);
2268 }
2269 EXPORT_SYMBOL(security_xfrm_state_delete);
2270 
2271 void security_xfrm_state_free(struct xfrm_state *x)
2272 {
2273 	call_void_hook(xfrm_state_free_security, x);
2274 }
2275 
2276 int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
2277 {
2278 	return call_int_hook(xfrm_policy_lookup, 0, ctx, fl_secid, dir);
2279 }
2280 
2281 int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
2282 				       struct xfrm_policy *xp,
2283 				       const struct flowi *fl)
2284 {
2285 	struct security_hook_list *hp;
2286 	int rc = 1;
2287 
2288 	/*
2289 	 * Since this function is expected to return 0 or 1, the judgment
2290 	 * becomes difficult if multiple LSMs supply this call. Fortunately,
2291 	 * we can use the first LSM's judgment because currently only SELinux
2292 	 * supplies this call.
2293 	 *
2294 	 * For speed optimization, we explicitly break the loop rather than
2295 	 * using the macro
2296 	 */
2297 	hlist_for_each_entry(hp, &security_hook_heads.xfrm_state_pol_flow_match,
2298 				list) {
2299 		rc = hp->hook.xfrm_state_pol_flow_match(x, xp, fl);
2300 		break;
2301 	}
2302 	return rc;
2303 }
2304 
2305 int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
2306 {
2307 	return call_int_hook(xfrm_decode_session, 0, skb, secid, 1);
2308 }
2309 
2310 void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
2311 {
2312 	int rc = call_int_hook(xfrm_decode_session, 0, skb, &fl->flowi_secid,
2313 				0);
2314 
2315 	BUG_ON(rc);
2316 }
2317 EXPORT_SYMBOL(security_skb_classify_flow);
2318 
2319 #endif	/* CONFIG_SECURITY_NETWORK_XFRM */
2320 
2321 #ifdef CONFIG_KEYS
2322 
2323 int security_key_alloc(struct key *key, const struct cred *cred,
2324 		       unsigned long flags)
2325 {
2326 	return call_int_hook(key_alloc, 0, key, cred, flags);
2327 }
2328 
2329 void security_key_free(struct key *key)
2330 {
2331 	call_void_hook(key_free, key);
2332 }
2333 
2334 int security_key_permission(key_ref_t key_ref,
2335 			    const struct cred *cred, unsigned perm)
2336 {
2337 	return call_int_hook(key_permission, 0, key_ref, cred, perm);
2338 }
2339 
2340 int security_key_getsecurity(struct key *key, char **_buffer)
2341 {
2342 	*_buffer = NULL;
2343 	return call_int_hook(key_getsecurity, 0, key, _buffer);
2344 }
2345 
2346 #endif	/* CONFIG_KEYS */
2347 
2348 #ifdef CONFIG_AUDIT
2349 
2350 int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
2351 {
2352 	return call_int_hook(audit_rule_init, 0, field, op, rulestr, lsmrule);
2353 }
2354 
2355 int security_audit_rule_known(struct audit_krule *krule)
2356 {
2357 	return call_int_hook(audit_rule_known, 0, krule);
2358 }
2359 
2360 void security_audit_rule_free(void *lsmrule)
2361 {
2362 	call_void_hook(audit_rule_free, lsmrule);
2363 }
2364 
2365 int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule)
2366 {
2367 	return call_int_hook(audit_rule_match, 0, secid, field, op, lsmrule);
2368 }
2369 #endif /* CONFIG_AUDIT */
2370 
2371 #ifdef CONFIG_BPF_SYSCALL
2372 int security_bpf(int cmd, union bpf_attr *attr, unsigned int size)
2373 {
2374 	return call_int_hook(bpf, 0, cmd, attr, size);
2375 }
2376 int security_bpf_map(struct bpf_map *map, fmode_t fmode)
2377 {
2378 	return call_int_hook(bpf_map, 0, map, fmode);
2379 }
2380 int security_bpf_prog(struct bpf_prog *prog)
2381 {
2382 	return call_int_hook(bpf_prog, 0, prog);
2383 }
2384 int security_bpf_map_alloc(struct bpf_map *map)
2385 {
2386 	return call_int_hook(bpf_map_alloc_security, 0, map);
2387 }
2388 int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
2389 {
2390 	return call_int_hook(bpf_prog_alloc_security, 0, aux);
2391 }
2392 void security_bpf_map_free(struct bpf_map *map)
2393 {
2394 	call_void_hook(bpf_map_free_security, map);
2395 }
2396 void security_bpf_prog_free(struct bpf_prog_aux *aux)
2397 {
2398 	call_void_hook(bpf_prog_free_security, aux);
2399 }
2400 #endif /* CONFIG_BPF_SYSCALL */
2401 
2402 int security_locked_down(enum lockdown_reason what)
2403 {
2404 	return call_int_hook(locked_down, 0, what);
2405 }
2406 EXPORT_SYMBOL(security_locked_down);
2407 
2408 #ifdef CONFIG_PERF_EVENTS
2409 int security_perf_event_open(struct perf_event_attr *attr, int type)
2410 {
2411 	return call_int_hook(perf_event_open, 0, attr, type);
2412 }
2413 
2414 int security_perf_event_alloc(struct perf_event *event)
2415 {
2416 	return call_int_hook(perf_event_alloc, 0, event);
2417 }
2418 
2419 void security_perf_event_free(struct perf_event *event)
2420 {
2421 	call_void_hook(perf_event_free, event);
2422 }
2423 
2424 int security_perf_event_read(struct perf_event *event)
2425 {
2426 	return call_int_hook(perf_event_read, 0, event);
2427 }
2428 
2429 int security_perf_event_write(struct perf_event *event)
2430 {
2431 	return call_int_hook(perf_event_write, 0, event);
2432 }
2433 #endif /* CONFIG_PERF_EVENTS */
2434