xref: /openbmc/linux/security/selinux/netif.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1 /*
2  * Network interface table.
3  *
4  * Network interfaces (devices) do not have a security field, so we
5  * maintain a table associating each interface with a SID.
6  *
7  * Author: James Morris <jmorris@redhat.com>
8  *
9  * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2,
13  * as published by the Free Software Foundation.
14  */
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/stddef.h>
18 #include <linux/kernel.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/netdevice.h>
22 #include <linux/rcupdate.h>
23 
24 #include "security.h"
25 #include "objsec.h"
26 #include "netif.h"
27 
28 #define SEL_NETIF_HASH_SIZE	64
29 #define SEL_NETIF_HASH_MAX	1024
30 
31 #undef DEBUG
32 
33 #ifdef DEBUG
34 #define DEBUGP printk
35 #else
36 #define DEBUGP(format, args...)
37 #endif
38 
39 struct sel_netif
40 {
41 	struct list_head list;
42 	struct netif_security_struct nsec;
43 	struct rcu_head rcu_head;
44 };
45 
46 static u32 sel_netif_total;
47 static LIST_HEAD(sel_netif_list);
48 static DEFINE_SPINLOCK(sel_netif_lock);
49 static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
50 
51 static inline u32 sel_netif_hasfn(struct net_device *dev)
52 {
53 	return (dev->ifindex & (SEL_NETIF_HASH_SIZE - 1));
54 }
55 
56 /*
57  * All of the devices should normally fit in the hash, so we optimize
58  * for that case.
59  */
60 static inline struct sel_netif *sel_netif_find(struct net_device *dev)
61 {
62 	struct list_head *pos;
63 	int idx = sel_netif_hasfn(dev);
64 
65 	__list_for_each_rcu(pos, &sel_netif_hash[idx]) {
66 		struct sel_netif *netif = list_entry(pos,
67 		                                     struct sel_netif, list);
68 		if (likely(netif->nsec.dev == dev))
69 			return netif;
70 	}
71 	return NULL;
72 }
73 
74 static int sel_netif_insert(struct sel_netif *netif)
75 {
76 	int idx, ret = 0;
77 
78 	if (sel_netif_total >= SEL_NETIF_HASH_MAX) {
79 		ret = -ENOSPC;
80 		goto out;
81 	}
82 
83 	idx = sel_netif_hasfn(netif->nsec.dev);
84 	list_add_rcu(&netif->list, &sel_netif_hash[idx]);
85 	sel_netif_total++;
86 out:
87 	return ret;
88 }
89 
90 static void sel_netif_free(struct rcu_head *p)
91 {
92 	struct sel_netif *netif = container_of(p, struct sel_netif, rcu_head);
93 
94 	DEBUGP("%s: %s\n", __FUNCTION__, netif->nsec.dev->name);
95 	kfree(netif);
96 }
97 
98 static void sel_netif_destroy(struct sel_netif *netif)
99 {
100 	DEBUGP("%s: %s\n", __FUNCTION__, netif->nsec.dev->name);
101 
102 	list_del_rcu(&netif->list);
103 	sel_netif_total--;
104 	call_rcu(&netif->rcu_head, sel_netif_free);
105 }
106 
107 static struct sel_netif *sel_netif_lookup(struct net_device *dev)
108 {
109 	int ret;
110 	struct sel_netif *netif, *new;
111 	struct netif_security_struct *nsec;
112 
113 	netif = sel_netif_find(dev);
114 	if (likely(netif != NULL))
115 		goto out;
116 
117 	new = kmalloc(sizeof(*new), GFP_ATOMIC);
118 	if (!new) {
119 		netif = ERR_PTR(-ENOMEM);
120 		goto out;
121 	}
122 
123 	memset(new, 0, sizeof(*new));
124 	nsec = &new->nsec;
125 
126 	ret = security_netif_sid(dev->name, &nsec->if_sid, &nsec->msg_sid);
127 	if (ret < 0) {
128 		kfree(new);
129 		netif = ERR_PTR(ret);
130 		goto out;
131 	}
132 
133 	nsec->dev = dev;
134 
135 	spin_lock_bh(&sel_netif_lock);
136 
137 	netif = sel_netif_find(dev);
138 	if (netif) {
139 		spin_unlock_bh(&sel_netif_lock);
140 		kfree(new);
141 		goto out;
142 	}
143 
144 	ret = sel_netif_insert(new);
145 	spin_unlock_bh(&sel_netif_lock);
146 
147 	if (ret) {
148 		kfree(new);
149 		netif = ERR_PTR(ret);
150 		goto out;
151 	}
152 
153 	netif = new;
154 
155 	DEBUGP("new: ifindex=%u name=%s if_sid=%u msg_sid=%u\n", dev->ifindex, dev->name,
156 	        nsec->if_sid, nsec->msg_sid);
157 out:
158 	return netif;
159 }
160 
161 static void sel_netif_assign_sids(u32 if_sid_in, u32 msg_sid_in, u32 *if_sid_out, u32 *msg_sid_out)
162 {
163 	if (if_sid_out)
164 		*if_sid_out = if_sid_in;
165 	if (msg_sid_out)
166 		*msg_sid_out = msg_sid_in;
167 }
168 
169 static int sel_netif_sids_slow(struct net_device *dev, u32 *if_sid, u32 *msg_sid)
170 {
171 	int ret = 0;
172 	u32 tmp_if_sid, tmp_msg_sid;
173 
174 	ret = security_netif_sid(dev->name, &tmp_if_sid, &tmp_msg_sid);
175 	if (!ret)
176 		sel_netif_assign_sids(tmp_if_sid, tmp_msg_sid, if_sid, msg_sid);
177 	return ret;
178 }
179 
180 int sel_netif_sids(struct net_device *dev, u32 *if_sid, u32 *msg_sid)
181 {
182 	int ret = 0;
183 	struct sel_netif *netif;
184 
185 	rcu_read_lock();
186 	netif = sel_netif_lookup(dev);
187 	if (IS_ERR(netif)) {
188 		rcu_read_unlock();
189 		ret = sel_netif_sids_slow(dev, if_sid, msg_sid);
190 		goto out;
191 	}
192 	sel_netif_assign_sids(netif->nsec.if_sid, netif->nsec.msg_sid, if_sid, msg_sid);
193 	rcu_read_unlock();
194 out:
195 	return ret;
196 }
197 
198 static void sel_netif_kill(struct net_device *dev)
199 {
200 	struct sel_netif *netif;
201 
202 	spin_lock_bh(&sel_netif_lock);
203 	netif = sel_netif_find(dev);
204 	if (netif)
205 		sel_netif_destroy(netif);
206 	spin_unlock_bh(&sel_netif_lock);
207 }
208 
209 static void sel_netif_flush(void)
210 {
211 	int idx;
212 
213 	spin_lock_bh(&sel_netif_lock);
214 	for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++) {
215 		struct sel_netif *netif;
216 
217 		list_for_each_entry(netif, &sel_netif_hash[idx], list)
218 			sel_netif_destroy(netif);
219 	}
220 	spin_unlock_bh(&sel_netif_lock);
221 }
222 
223 static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid,
224                                   u16 class, u32 perms, u32 *retained)
225 {
226 	if (event == AVC_CALLBACK_RESET) {
227 		sel_netif_flush();
228 		synchronize_net();
229 	}
230 	return 0;
231 }
232 
233 static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
234                                              unsigned long event, void *ptr)
235 {
236 	struct net_device *dev = ptr;
237 
238 	if (event == NETDEV_DOWN)
239 		sel_netif_kill(dev);
240 
241 	return NOTIFY_DONE;
242 }
243 
244 static struct notifier_block sel_netif_netdev_notifier = {
245 	.notifier_call = sel_netif_netdev_notifier_handler,
246 };
247 
248 static __init int sel_netif_init(void)
249 {
250 	int i, err = 0;
251 
252 	if (!selinux_enabled)
253 		goto out;
254 
255 	for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
256 		INIT_LIST_HEAD(&sel_netif_hash[i]);
257 
258 	register_netdevice_notifier(&sel_netif_netdev_notifier);
259 
260 	err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET,
261 	                       SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
262 	if (err)
263 		panic("avc_add_callback() failed, error %d\n", err);
264 
265 out:
266 	return err;
267 }
268 
269 __initcall(sel_netif_init);
270 
271