xref: /openbmc/linux/net/caif/caif_dev.c (revision c72dfae2)
1 /*
2  * CAIF Interface registration.
3  * Copyright (C) ST-Ericsson AB 2010
4  * Author:	Sjur Brendeland/sjur.brandeland@stericsson.com
5  * License terms: GNU General Public License (GPL) version 2
6  *
7  * Borrowed heavily from file: pn_dev.c. Thanks to
8  *  Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9  *  and Sakari Ailus <sakari.ailus@nokia.com>
10  */
11 
12 #include <linux/version.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/if_arp.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <net/netns/generic.h>
22 #include <net/net_namespace.h>
23 #include <net/pkt_sched.h>
24 #include <net/caif/caif_device.h>
25 #include <net/caif/caif_dev.h>
26 #include <net/caif/caif_layer.h>
27 #include <net/caif/cfpkt.h>
28 #include <net/caif/cfcnfg.h>
29 
30 MODULE_LICENSE("GPL");
31 #define TIMEOUT (HZ*5)
32 
33 /* Used for local tracking of the CAIF net devices */
34 struct caif_device_entry {
35 	struct cflayer layer;
36 	struct list_head list;
37 	atomic_t in_use;
38 	atomic_t state;
39 	u16 phyid;
40 	struct net_device *netdev;
41 	wait_queue_head_t event;
42 };
43 
44 struct caif_device_entry_list {
45 	struct list_head list;
46 	/* Protects simulanous deletes in list */
47 	spinlock_t lock;
48 };
49 
50 struct caif_net {
51 	struct caif_device_entry_list caifdevs;
52 };
53 
54 static int caif_net_id;
55 static struct cfcnfg *cfg;
56 
57 static struct caif_device_entry_list *caif_device_list(struct net *net)
58 {
59 	struct caif_net *caifn;
60 	BUG_ON(!net);
61 	caifn = net_generic(net, caif_net_id);
62 	BUG_ON(!caifn);
63 	return &caifn->caifdevs;
64 }
65 
66 /* Allocate new CAIF device. */
67 static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
68 {
69 	struct caif_device_entry_list *caifdevs;
70 	struct caif_device_entry *caifd;
71 	caifdevs = caif_device_list(dev_net(dev));
72 	BUG_ON(!caifdevs);
73 	caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
74 	if (!caifd)
75 		return NULL;
76 	caifd->netdev = dev;
77 	list_add(&caifd->list, &caifdevs->list);
78 	init_waitqueue_head(&caifd->event);
79 	return caifd;
80 }
81 
82 static struct caif_device_entry *caif_get(struct net_device *dev)
83 {
84 	struct caif_device_entry_list *caifdevs =
85 	    caif_device_list(dev_net(dev));
86 	struct caif_device_entry *caifd;
87 	BUG_ON(!caifdevs);
88 	list_for_each_entry(caifd, &caifdevs->list, list) {
89 		if (caifd->netdev == dev)
90 			return caifd;
91 	}
92 	return NULL;
93 }
94 
95 static void caif_device_destroy(struct net_device *dev)
96 {
97 	struct caif_device_entry_list *caifdevs =
98 	    caif_device_list(dev_net(dev));
99 	struct caif_device_entry *caifd;
100 	ASSERT_RTNL();
101 	if (dev->type != ARPHRD_CAIF)
102 		return;
103 
104 	spin_lock_bh(&caifdevs->lock);
105 	caifd = caif_get(dev);
106 	if (caifd == NULL) {
107 		spin_unlock_bh(&caifdevs->lock);
108 		return;
109 	}
110 
111 	list_del(&caifd->list);
112 	spin_unlock_bh(&caifdevs->lock);
113 
114 	kfree(caifd);
115 	return;
116 }
117 
118 static int transmit(struct cflayer *layer, struct cfpkt *pkt)
119 {
120 	struct caif_device_entry *caifd =
121 	    container_of(layer, struct caif_device_entry, layer);
122 	struct sk_buff *skb, *skb2;
123 	int ret = -EINVAL;
124 	skb = cfpkt_tonative(pkt);
125 	skb->dev = caifd->netdev;
126 	/*
127 	 * Don't allow SKB to be destroyed upon error, but signal resend
128 	 * notification to clients. We can't rely on the return value as
129 	 * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't.
130 	 */
131 	if (netif_queue_stopped(caifd->netdev))
132 		return -EAGAIN;
133 	skb2 = skb_get(skb);
134 
135 	ret = dev_queue_xmit(skb2);
136 
137 	if (!ret)
138 		kfree_skb(skb);
139 	else
140 		return -EAGAIN;
141 
142 	return 0;
143 }
144 
145 static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
146 {
147 	struct caif_device_entry *caifd;
148 	struct caif_dev_common *caifdev;
149 	caifd = container_of(layr, struct caif_device_entry, layer);
150 	caifdev = netdev_priv(caifd->netdev);
151 	if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
152 		atomic_set(&caifd->in_use, 1);
153 		wake_up_interruptible(&caifd->event);
154 
155 	} else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) {
156 		atomic_set(&caifd->in_use, 0);
157 		wake_up_interruptible(&caifd->event);
158 	}
159 	return 0;
160 }
161 
162 /*
163  * Stuff received packets to associated sockets.
164  * On error, returns non-zero and releases the skb.
165  */
166 static int receive(struct sk_buff *skb, struct net_device *dev,
167 		   struct packet_type *pkttype, struct net_device *orig_dev)
168 {
169 	struct net *net;
170 	struct cfpkt *pkt;
171 	struct caif_device_entry *caifd;
172 	net = dev_net(dev);
173 	pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
174 	caifd = caif_get(dev);
175 	if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
176 		return NET_RX_DROP;
177 
178 	if (caifd->layer.up->receive(caifd->layer.up, pkt))
179 		return NET_RX_DROP;
180 
181 	return 0;
182 }
183 
184 static struct packet_type caif_packet_type __read_mostly = {
185 	.type = cpu_to_be16(ETH_P_CAIF),
186 	.func = receive,
187 };
188 
189 static void dev_flowctrl(struct net_device *dev, int on)
190 {
191 	struct caif_device_entry *caifd = caif_get(dev);
192 	if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
193 		return;
194 
195 	caifd->layer.up->ctrlcmd(caifd->layer.up,
196 				 on ?
197 				 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
198 				 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
199 				 caifd->layer.id);
200 }
201 
202 /* notify Caif of device events */
203 static int caif_device_notify(struct notifier_block *me, unsigned long what,
204 			      void *arg)
205 {
206 	struct net_device *dev = arg;
207 	struct caif_device_entry *caifd = NULL;
208 	struct caif_dev_common *caifdev;
209 	enum cfcnfg_phy_preference pref;
210 	int res = -EINVAL;
211 	enum cfcnfg_phy_type phy_type;
212 
213 	if (dev->type != ARPHRD_CAIF)
214 		return 0;
215 
216 	switch (what) {
217 	case NETDEV_REGISTER:
218 		pr_info("CAIF: %s():register %s\n", __func__, dev->name);
219 		caifd = caif_device_alloc(dev);
220 		if (caifd == NULL)
221 			break;
222 		caifdev = netdev_priv(dev);
223 		caifdev->flowctrl = dev_flowctrl;
224 		atomic_set(&caifd->state, what);
225 		res = 0;
226 		break;
227 
228 	case NETDEV_UP:
229 		pr_info("CAIF: %s(): up %s\n", __func__, dev->name);
230 		caifd = caif_get(dev);
231 		if (caifd == NULL)
232 			break;
233 		caifdev = netdev_priv(dev);
234 		if (atomic_read(&caifd->state) == NETDEV_UP) {
235 			pr_info("CAIF: %s():%s already up\n",
236 				__func__, dev->name);
237 			break;
238 		}
239 		atomic_set(&caifd->state, what);
240 		caifd->layer.transmit = transmit;
241 		caifd->layer.modemcmd = modemcmd;
242 
243 		if (caifdev->use_frag)
244 			phy_type = CFPHYTYPE_FRAG;
245 		else
246 			phy_type = CFPHYTYPE_CAIF;
247 
248 		switch (caifdev->link_select) {
249 		case CAIF_LINK_HIGH_BANDW:
250 			pref = CFPHYPREF_LOW_LAT;
251 			break;
252 		case CAIF_LINK_LOW_LATENCY:
253 			pref = CFPHYPREF_HIGH_BW;
254 			break;
255 		default:
256 			pref = CFPHYPREF_HIGH_BW;
257 			break;
258 		}
259 
260 		cfcnfg_add_phy_layer(get_caif_conf(),
261 				     phy_type,
262 				     dev,
263 				     &caifd->layer,
264 				     &caifd->phyid,
265 				     pref,
266 				     caifdev->use_fcs,
267 				     caifdev->use_stx);
268 		strncpy(caifd->layer.name, dev->name,
269 			sizeof(caifd->layer.name) - 1);
270 		caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
271 		break;
272 
273 	case NETDEV_GOING_DOWN:
274 		caifd = caif_get(dev);
275 		if (caifd == NULL)
276 			break;
277 		pr_info("CAIF: %s():going down %s\n", __func__, dev->name);
278 
279 		if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN ||
280 			atomic_read(&caifd->state) == NETDEV_DOWN)
281 			break;
282 
283 		atomic_set(&caifd->state, what);
284 		if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
285 			return -EINVAL;
286 		caifd->layer.up->ctrlcmd(caifd->layer.up,
287 					 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
288 					 caifd->layer.id);
289 		res = wait_event_interruptible_timeout(caifd->event,
290 					atomic_read(&caifd->in_use) == 0,
291 					TIMEOUT);
292 		break;
293 
294 	case NETDEV_DOWN:
295 		caifd = caif_get(dev);
296 		if (caifd == NULL)
297 			break;
298 		pr_info("CAIF: %s(): down %s\n", __func__, dev->name);
299 		if (atomic_read(&caifd->in_use))
300 			pr_warning("CAIF: %s(): "
301 				   "Unregistering an active CAIF device: %s\n",
302 				   __func__, dev->name);
303 		cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
304 		atomic_set(&caifd->state, what);
305 		break;
306 
307 	case NETDEV_UNREGISTER:
308 		caifd = caif_get(dev);
309 		pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name);
310 		atomic_set(&caifd->state, what);
311 		caif_device_destroy(dev);
312 		break;
313 	}
314 	return 0;
315 }
316 
317 static struct notifier_block caif_device_notifier = {
318 	.notifier_call = caif_device_notify,
319 	.priority = 0,
320 };
321 
322 
323 struct cfcnfg *get_caif_conf(void)
324 {
325 	return cfg;
326 }
327 EXPORT_SYMBOL(get_caif_conf);
328 
329 int caif_connect_client(struct caif_connect_request *conn_req,
330 			   struct cflayer *client_layer)
331 {
332 	struct cfctrl_link_param param;
333 	if (connect_req_to_link_param(get_caif_conf(), conn_req, &param) == 0)
334 		/* Hook up the adaptation layer. */
335 		return cfcnfg_add_adaptation_layer(get_caif_conf(),
336 						&param, client_layer);
337 
338 	return -EINVAL;
339 
340 	caif_assert(0);
341 }
342 EXPORT_SYMBOL(caif_connect_client);
343 
344 int caif_disconnect_client(struct cflayer *adap_layer)
345 {
346 	return cfcnfg_del_adapt_layer(get_caif_conf(), adap_layer);
347 }
348 EXPORT_SYMBOL(caif_disconnect_client);
349 
350 /* Per-namespace Caif devices handling */
351 static int caif_init_net(struct net *net)
352 {
353 	struct caif_net *caifn = net_generic(net, caif_net_id);
354 	INIT_LIST_HEAD(&caifn->caifdevs.list);
355 	spin_lock_init(&caifn->caifdevs.lock);
356 	return 0;
357 }
358 
359 static void caif_exit_net(struct net *net)
360 {
361 	struct net_device *dev;
362 	int res;
363 	rtnl_lock();
364 	for_each_netdev(net, dev) {
365 		if (dev->type != ARPHRD_CAIF)
366 			continue;
367 		res = dev_close(dev);
368 		caif_device_destroy(dev);
369 	}
370 	rtnl_unlock();
371 }
372 
373 static struct pernet_operations caif_net_ops = {
374 	.init = caif_init_net,
375 	.exit = caif_exit_net,
376 	.id   = &caif_net_id,
377 	.size = sizeof(struct caif_net),
378 };
379 
380 /* Initialize Caif devices list */
381 static int __init caif_device_init(void)
382 {
383 	int result;
384 	cfg = cfcnfg_create();
385 	if (!cfg) {
386 		pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__);
387 		goto err_cfcnfg_create_failed;
388 	}
389 	result = register_pernet_device(&caif_net_ops);
390 
391 	if (result) {
392 		kfree(cfg);
393 		cfg = NULL;
394 		return result;
395 	}
396 	dev_add_pack(&caif_packet_type);
397 	register_netdevice_notifier(&caif_device_notifier);
398 
399 	return result;
400 err_cfcnfg_create_failed:
401 	return -ENODEV;
402 }
403 
404 static void __exit caif_device_exit(void)
405 {
406 	dev_remove_pack(&caif_packet_type);
407 	unregister_pernet_device(&caif_net_ops);
408 	unregister_netdevice_notifier(&caif_device_notifier);
409 	cfcnfg_remove(cfg);
410 }
411 
412 module_init(caif_device_init);
413 module_exit(caif_device_exit);
414