xref: /openbmc/linux/security/selinux/netif.c (revision 64c70b1c)
1 /*
2  * Network interface table.
3  *
4  * Network interfaces (devices) do not have a security field, so we
5  * maintain a table associating each interface with a SID.
6  *
7  * Author: James Morris <jmorris@redhat.com>
8  *
9  * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2,
13  * as published by the Free Software Foundation.
14  */
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/stddef.h>
18 #include <linux/kernel.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/netdevice.h>
22 #include <linux/rcupdate.h>
23 
24 #include "security.h"
25 #include "objsec.h"
26 #include "netif.h"
27 
28 #define SEL_NETIF_HASH_SIZE	64
29 #define SEL_NETIF_HASH_MAX	1024
30 
31 #undef DEBUG
32 
33 #ifdef DEBUG
34 #define DEBUGP printk
35 #else
36 #define DEBUGP(format, args...)
37 #endif
38 
39 struct sel_netif
40 {
41 	struct list_head list;
42 	struct netif_security_struct nsec;
43 	struct rcu_head rcu_head;
44 };
45 
46 static u32 sel_netif_total;
47 static LIST_HEAD(sel_netif_list);
48 static DEFINE_SPINLOCK(sel_netif_lock);
49 static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
50 
51 static inline u32 sel_netif_hasfn(struct net_device *dev)
52 {
53 	return (dev->ifindex & (SEL_NETIF_HASH_SIZE - 1));
54 }
55 
56 /*
57  * All of the devices should normally fit in the hash, so we optimize
58  * for that case.
59  */
60 static inline struct sel_netif *sel_netif_find(struct net_device *dev)
61 {
62 	struct list_head *pos;
63 	int idx = sel_netif_hasfn(dev);
64 
65 	__list_for_each_rcu(pos, &sel_netif_hash[idx]) {
66 		struct sel_netif *netif = list_entry(pos,
67 		                                     struct sel_netif, list);
68 		if (likely(netif->nsec.dev == dev))
69 			return netif;
70 	}
71 	return NULL;
72 }
73 
74 static int sel_netif_insert(struct sel_netif *netif)
75 {
76 	int idx, ret = 0;
77 
78 	if (sel_netif_total >= SEL_NETIF_HASH_MAX) {
79 		ret = -ENOSPC;
80 		goto out;
81 	}
82 
83 	idx = sel_netif_hasfn(netif->nsec.dev);
84 	list_add_rcu(&netif->list, &sel_netif_hash[idx]);
85 	sel_netif_total++;
86 out:
87 	return ret;
88 }
89 
90 static void sel_netif_free(struct rcu_head *p)
91 {
92 	struct sel_netif *netif = container_of(p, struct sel_netif, rcu_head);
93 
94 	DEBUGP("%s: %s\n", __FUNCTION__, netif->nsec.dev->name);
95 	kfree(netif);
96 }
97 
98 static void sel_netif_destroy(struct sel_netif *netif)
99 {
100 	DEBUGP("%s: %s\n", __FUNCTION__, netif->nsec.dev->name);
101 
102 	list_del_rcu(&netif->list);
103 	sel_netif_total--;
104 	call_rcu(&netif->rcu_head, sel_netif_free);
105 }
106 
107 static struct sel_netif *sel_netif_lookup(struct net_device *dev)
108 {
109 	int ret;
110 	struct sel_netif *netif, *new;
111 	struct netif_security_struct *nsec;
112 
113 	netif = sel_netif_find(dev);
114 	if (likely(netif != NULL))
115 		goto out;
116 
117 	new = kzalloc(sizeof(*new), GFP_ATOMIC);
118 	if (!new) {
119 		netif = ERR_PTR(-ENOMEM);
120 		goto out;
121 	}
122 
123 	nsec = &new->nsec;
124 
125 	ret = security_netif_sid(dev->name, &nsec->if_sid, &nsec->msg_sid);
126 	if (ret < 0) {
127 		kfree(new);
128 		netif = ERR_PTR(ret);
129 		goto out;
130 	}
131 
132 	nsec->dev = dev;
133 
134 	spin_lock_bh(&sel_netif_lock);
135 
136 	netif = sel_netif_find(dev);
137 	if (netif) {
138 		spin_unlock_bh(&sel_netif_lock);
139 		kfree(new);
140 		goto out;
141 	}
142 
143 	ret = sel_netif_insert(new);
144 	spin_unlock_bh(&sel_netif_lock);
145 
146 	if (ret) {
147 		kfree(new);
148 		netif = ERR_PTR(ret);
149 		goto out;
150 	}
151 
152 	netif = new;
153 
154 	DEBUGP("new: ifindex=%u name=%s if_sid=%u msg_sid=%u\n", dev->ifindex, dev->name,
155 	        nsec->if_sid, nsec->msg_sid);
156 out:
157 	return netif;
158 }
159 
160 static void sel_netif_assign_sids(u32 if_sid_in, u32 msg_sid_in, u32 *if_sid_out, u32 *msg_sid_out)
161 {
162 	if (if_sid_out)
163 		*if_sid_out = if_sid_in;
164 	if (msg_sid_out)
165 		*msg_sid_out = msg_sid_in;
166 }
167 
168 static int sel_netif_sids_slow(struct net_device *dev, u32 *if_sid, u32 *msg_sid)
169 {
170 	int ret = 0;
171 	u32 tmp_if_sid, tmp_msg_sid;
172 
173 	ret = security_netif_sid(dev->name, &tmp_if_sid, &tmp_msg_sid);
174 	if (!ret)
175 		sel_netif_assign_sids(tmp_if_sid, tmp_msg_sid, if_sid, msg_sid);
176 	return ret;
177 }
178 
179 int sel_netif_sids(struct net_device *dev, u32 *if_sid, u32 *msg_sid)
180 {
181 	int ret = 0;
182 	struct sel_netif *netif;
183 
184 	rcu_read_lock();
185 	netif = sel_netif_lookup(dev);
186 	if (IS_ERR(netif)) {
187 		rcu_read_unlock();
188 		ret = sel_netif_sids_slow(dev, if_sid, msg_sid);
189 		goto out;
190 	}
191 	sel_netif_assign_sids(netif->nsec.if_sid, netif->nsec.msg_sid, if_sid, msg_sid);
192 	rcu_read_unlock();
193 out:
194 	return ret;
195 }
196 
197 static void sel_netif_kill(struct net_device *dev)
198 {
199 	struct sel_netif *netif;
200 
201 	spin_lock_bh(&sel_netif_lock);
202 	netif = sel_netif_find(dev);
203 	if (netif)
204 		sel_netif_destroy(netif);
205 	spin_unlock_bh(&sel_netif_lock);
206 }
207 
208 static void sel_netif_flush(void)
209 {
210 	int idx;
211 
212 	spin_lock_bh(&sel_netif_lock);
213 	for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++) {
214 		struct sel_netif *netif;
215 
216 		list_for_each_entry(netif, &sel_netif_hash[idx], list)
217 			sel_netif_destroy(netif);
218 	}
219 	spin_unlock_bh(&sel_netif_lock);
220 }
221 
222 static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid,
223                                   u16 class, u32 perms, u32 *retained)
224 {
225 	if (event == AVC_CALLBACK_RESET) {
226 		sel_netif_flush();
227 		synchronize_net();
228 	}
229 	return 0;
230 }
231 
232 static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
233                                              unsigned long event, void *ptr)
234 {
235 	struct net_device *dev = ptr;
236 
237 	if (event == NETDEV_DOWN)
238 		sel_netif_kill(dev);
239 
240 	return NOTIFY_DONE;
241 }
242 
243 static struct notifier_block sel_netif_netdev_notifier = {
244 	.notifier_call = sel_netif_netdev_notifier_handler,
245 };
246 
247 static __init int sel_netif_init(void)
248 {
249 	int i, err = 0;
250 
251 	if (!selinux_enabled)
252 		goto out;
253 
254 	for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
255 		INIT_LIST_HEAD(&sel_netif_hash[i]);
256 
257 	register_netdevice_notifier(&sel_netif_netdev_notifier);
258 
259 	err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET,
260 	                       SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
261 	if (err)
262 		panic("avc_add_callback() failed, error %d\n", err);
263 
264 out:
265 	return err;
266 }
267 
268 __initcall(sel_netif_init);
269 
270