xref: /openbmc/linux/net/can/raw.c (revision 5e0266f0)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /* raw.c - Raw sockets for protocol family CAN
3  *
4  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of Volkswagen nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * Alternatively, provided that this notice is retained in full, this
20  * software may be distributed under the terms of the GNU General
21  * Public License ("GPL") version 2, in which case the provisions of the
22  * GPL apply INSTEAD OF those given above.
23  *
24  * The provided data structures and external interfaces from this code
25  * are not restricted to be used by modules with a GPL compatible license.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38  * DAMAGE.
39  *
40  */
41 
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/uio.h>
45 #include <linux/net.h>
46 #include <linux/slab.h>
47 #include <linux/netdevice.h>
48 #include <linux/socket.h>
49 #include <linux/if_arp.h>
50 #include <linux/skbuff.h>
51 #include <linux/can.h>
52 #include <linux/can/core.h>
53 #include <linux/can/dev.h> /* for can_is_canxl_dev_mtu() */
54 #include <linux/can/skb.h>
55 #include <linux/can/raw.h>
56 #include <net/sock.h>
57 #include <net/net_namespace.h>
58 
59 MODULE_DESCRIPTION("PF_CAN raw protocol");
60 MODULE_LICENSE("Dual BSD/GPL");
61 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
62 MODULE_ALIAS("can-proto-1");
63 
64 #define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
65 
66 #define MASK_ALL 0
67 
68 /* A raw socket has a list of can_filters attached to it, each receiving
69  * the CAN frames matching that filter.  If the filter list is empty,
70  * no CAN frames will be received by the socket.  The default after
71  * opening the socket, is to have one filter which receives all frames.
72  * The filter list is allocated dynamically with the exception of the
73  * list containing only one item.  This common case is optimized by
74  * storing the single filter in dfilter, to avoid using dynamic memory.
75  */
76 
77 struct uniqframe {
78 	int skbcnt;
79 	const struct sk_buff *skb;
80 	unsigned int join_rx_count;
81 };
82 
83 struct raw_sock {
84 	struct sock sk;
85 	int bound;
86 	int ifindex;
87 	struct list_head notifier;
88 	int loopback;
89 	int recv_own_msgs;
90 	int fd_frames;
91 	int xl_frames;
92 	int join_filters;
93 	int count;                 /* number of active filters */
94 	struct can_filter dfilter; /* default/single filter */
95 	struct can_filter *filter; /* pointer to filter(s) */
96 	can_err_mask_t err_mask;
97 	struct uniqframe __percpu *uniq;
98 };
99 
100 static LIST_HEAD(raw_notifier_list);
101 static DEFINE_SPINLOCK(raw_notifier_lock);
102 static struct raw_sock *raw_busy_notifier;
103 
104 /* Return pointer to store the extra msg flags for raw_recvmsg().
105  * We use the space of one unsigned int beyond the 'struct sockaddr_can'
106  * in skb->cb.
107  */
108 static inline unsigned int *raw_flags(struct sk_buff *skb)
109 {
110 	sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
111 			       sizeof(unsigned int));
112 
113 	/* return pointer after struct sockaddr_can */
114 	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
115 }
116 
117 static inline struct raw_sock *raw_sk(const struct sock *sk)
118 {
119 	return (struct raw_sock *)sk;
120 }
121 
122 static void raw_rcv(struct sk_buff *oskb, void *data)
123 {
124 	struct sock *sk = (struct sock *)data;
125 	struct raw_sock *ro = raw_sk(sk);
126 	struct sockaddr_can *addr;
127 	struct sk_buff *skb;
128 	unsigned int *pflags;
129 
130 	/* check the received tx sock reference */
131 	if (!ro->recv_own_msgs && oskb->sk == sk)
132 		return;
133 
134 	/* make sure to not pass oversized frames to the socket */
135 	if ((!ro->fd_frames && can_is_canfd_skb(oskb)) ||
136 	    (!ro->xl_frames && can_is_canxl_skb(oskb)))
137 		return;
138 
139 	/* eliminate multiple filter matches for the same skb */
140 	if (this_cpu_ptr(ro->uniq)->skb == oskb &&
141 	    this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
142 		if (!ro->join_filters)
143 			return;
144 
145 		this_cpu_inc(ro->uniq->join_rx_count);
146 		/* drop frame until all enabled filters matched */
147 		if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
148 			return;
149 	} else {
150 		this_cpu_ptr(ro->uniq)->skb = oskb;
151 		this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
152 		this_cpu_ptr(ro->uniq)->join_rx_count = 1;
153 		/* drop first frame to check all enabled filters? */
154 		if (ro->join_filters && ro->count > 1)
155 			return;
156 	}
157 
158 	/* clone the given skb to be able to enqueue it into the rcv queue */
159 	skb = skb_clone(oskb, GFP_ATOMIC);
160 	if (!skb)
161 		return;
162 
163 	/* Put the datagram to the queue so that raw_recvmsg() can get
164 	 * it from there. We need to pass the interface index to
165 	 * raw_recvmsg(). We pass a whole struct sockaddr_can in
166 	 * skb->cb containing the interface index.
167 	 */
168 
169 	sock_skb_cb_check_size(sizeof(struct sockaddr_can));
170 	addr = (struct sockaddr_can *)skb->cb;
171 	memset(addr, 0, sizeof(*addr));
172 	addr->can_family = AF_CAN;
173 	addr->can_ifindex = skb->dev->ifindex;
174 
175 	/* add CAN specific message flags for raw_recvmsg() */
176 	pflags = raw_flags(skb);
177 	*pflags = 0;
178 	if (oskb->sk)
179 		*pflags |= MSG_DONTROUTE;
180 	if (oskb->sk == sk)
181 		*pflags |= MSG_CONFIRM;
182 
183 	if (sock_queue_rcv_skb(sk, skb) < 0)
184 		kfree_skb(skb);
185 }
186 
187 static int raw_enable_filters(struct net *net, struct net_device *dev,
188 			      struct sock *sk, struct can_filter *filter,
189 			      int count)
190 {
191 	int err = 0;
192 	int i;
193 
194 	for (i = 0; i < count; i++) {
195 		err = can_rx_register(net, dev, filter[i].can_id,
196 				      filter[i].can_mask,
197 				      raw_rcv, sk, "raw", sk);
198 		if (err) {
199 			/* clean up successfully registered filters */
200 			while (--i >= 0)
201 				can_rx_unregister(net, dev, filter[i].can_id,
202 						  filter[i].can_mask,
203 						  raw_rcv, sk);
204 			break;
205 		}
206 	}
207 
208 	return err;
209 }
210 
211 static int raw_enable_errfilter(struct net *net, struct net_device *dev,
212 				struct sock *sk, can_err_mask_t err_mask)
213 {
214 	int err = 0;
215 
216 	if (err_mask)
217 		err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
218 				      raw_rcv, sk, "raw", sk);
219 
220 	return err;
221 }
222 
223 static void raw_disable_filters(struct net *net, struct net_device *dev,
224 				struct sock *sk, struct can_filter *filter,
225 				int count)
226 {
227 	int i;
228 
229 	for (i = 0; i < count; i++)
230 		can_rx_unregister(net, dev, filter[i].can_id,
231 				  filter[i].can_mask, raw_rcv, sk);
232 }
233 
234 static inline void raw_disable_errfilter(struct net *net,
235 					 struct net_device *dev,
236 					 struct sock *sk,
237 					 can_err_mask_t err_mask)
238 
239 {
240 	if (err_mask)
241 		can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
242 				  raw_rcv, sk);
243 }
244 
245 static inline void raw_disable_allfilters(struct net *net,
246 					  struct net_device *dev,
247 					  struct sock *sk)
248 {
249 	struct raw_sock *ro = raw_sk(sk);
250 
251 	raw_disable_filters(net, dev, sk, ro->filter, ro->count);
252 	raw_disable_errfilter(net, dev, sk, ro->err_mask);
253 }
254 
255 static int raw_enable_allfilters(struct net *net, struct net_device *dev,
256 				 struct sock *sk)
257 {
258 	struct raw_sock *ro = raw_sk(sk);
259 	int err;
260 
261 	err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
262 	if (!err) {
263 		err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
264 		if (err)
265 			raw_disable_filters(net, dev, sk, ro->filter,
266 					    ro->count);
267 	}
268 
269 	return err;
270 }
271 
272 static void raw_notify(struct raw_sock *ro, unsigned long msg,
273 		       struct net_device *dev)
274 {
275 	struct sock *sk = &ro->sk;
276 
277 	if (!net_eq(dev_net(dev), sock_net(sk)))
278 		return;
279 
280 	if (ro->ifindex != dev->ifindex)
281 		return;
282 
283 	switch (msg) {
284 	case NETDEV_UNREGISTER:
285 		lock_sock(sk);
286 		/* remove current filters & unregister */
287 		if (ro->bound)
288 			raw_disable_allfilters(dev_net(dev), dev, sk);
289 
290 		if (ro->count > 1)
291 			kfree(ro->filter);
292 
293 		ro->ifindex = 0;
294 		ro->bound = 0;
295 		ro->count = 0;
296 		release_sock(sk);
297 
298 		sk->sk_err = ENODEV;
299 		if (!sock_flag(sk, SOCK_DEAD))
300 			sk_error_report(sk);
301 		break;
302 
303 	case NETDEV_DOWN:
304 		sk->sk_err = ENETDOWN;
305 		if (!sock_flag(sk, SOCK_DEAD))
306 			sk_error_report(sk);
307 		break;
308 	}
309 }
310 
311 static int raw_notifier(struct notifier_block *nb, unsigned long msg,
312 			void *ptr)
313 {
314 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
315 
316 	if (dev->type != ARPHRD_CAN)
317 		return NOTIFY_DONE;
318 	if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
319 		return NOTIFY_DONE;
320 	if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
321 		return NOTIFY_DONE;
322 
323 	spin_lock(&raw_notifier_lock);
324 	list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
325 		spin_unlock(&raw_notifier_lock);
326 		raw_notify(raw_busy_notifier, msg, dev);
327 		spin_lock(&raw_notifier_lock);
328 	}
329 	raw_busy_notifier = NULL;
330 	spin_unlock(&raw_notifier_lock);
331 	return NOTIFY_DONE;
332 }
333 
334 static int raw_init(struct sock *sk)
335 {
336 	struct raw_sock *ro = raw_sk(sk);
337 
338 	ro->bound            = 0;
339 	ro->ifindex          = 0;
340 
341 	/* set default filter to single entry dfilter */
342 	ro->dfilter.can_id   = 0;
343 	ro->dfilter.can_mask = MASK_ALL;
344 	ro->filter           = &ro->dfilter;
345 	ro->count            = 1;
346 
347 	/* set default loopback behaviour */
348 	ro->loopback         = 1;
349 	ro->recv_own_msgs    = 0;
350 	ro->fd_frames        = 0;
351 	ro->xl_frames        = 0;
352 	ro->join_filters     = 0;
353 
354 	/* alloc_percpu provides zero'ed memory */
355 	ro->uniq = alloc_percpu(struct uniqframe);
356 	if (unlikely(!ro->uniq))
357 		return -ENOMEM;
358 
359 	/* set notifier */
360 	spin_lock(&raw_notifier_lock);
361 	list_add_tail(&ro->notifier, &raw_notifier_list);
362 	spin_unlock(&raw_notifier_lock);
363 
364 	return 0;
365 }
366 
367 static int raw_release(struct socket *sock)
368 {
369 	struct sock *sk = sock->sk;
370 	struct raw_sock *ro;
371 
372 	if (!sk)
373 		return 0;
374 
375 	ro = raw_sk(sk);
376 
377 	spin_lock(&raw_notifier_lock);
378 	while (raw_busy_notifier == ro) {
379 		spin_unlock(&raw_notifier_lock);
380 		schedule_timeout_uninterruptible(1);
381 		spin_lock(&raw_notifier_lock);
382 	}
383 	list_del(&ro->notifier);
384 	spin_unlock(&raw_notifier_lock);
385 
386 	lock_sock(sk);
387 
388 	/* remove current filters & unregister */
389 	if (ro->bound) {
390 		if (ro->ifindex) {
391 			struct net_device *dev;
392 
393 			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
394 			if (dev) {
395 				raw_disable_allfilters(dev_net(dev), dev, sk);
396 				dev_put(dev);
397 			}
398 		} else {
399 			raw_disable_allfilters(sock_net(sk), NULL, sk);
400 		}
401 	}
402 
403 	if (ro->count > 1)
404 		kfree(ro->filter);
405 
406 	ro->ifindex = 0;
407 	ro->bound = 0;
408 	ro->count = 0;
409 	free_percpu(ro->uniq);
410 
411 	sock_orphan(sk);
412 	sock->sk = NULL;
413 
414 	release_sock(sk);
415 	sock_put(sk);
416 
417 	return 0;
418 }
419 
420 static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
421 {
422 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
423 	struct sock *sk = sock->sk;
424 	struct raw_sock *ro = raw_sk(sk);
425 	int ifindex;
426 	int err = 0;
427 	int notify_enetdown = 0;
428 
429 	if (len < RAW_MIN_NAMELEN)
430 		return -EINVAL;
431 	if (addr->can_family != AF_CAN)
432 		return -EINVAL;
433 
434 	lock_sock(sk);
435 
436 	if (ro->bound && addr->can_ifindex == ro->ifindex)
437 		goto out;
438 
439 	if (addr->can_ifindex) {
440 		struct net_device *dev;
441 
442 		dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
443 		if (!dev) {
444 			err = -ENODEV;
445 			goto out;
446 		}
447 		if (dev->type != ARPHRD_CAN) {
448 			dev_put(dev);
449 			err = -ENODEV;
450 			goto out;
451 		}
452 		if (!(dev->flags & IFF_UP))
453 			notify_enetdown = 1;
454 
455 		ifindex = dev->ifindex;
456 
457 		/* filters set by default/setsockopt */
458 		err = raw_enable_allfilters(sock_net(sk), dev, sk);
459 		dev_put(dev);
460 	} else {
461 		ifindex = 0;
462 
463 		/* filters set by default/setsockopt */
464 		err = raw_enable_allfilters(sock_net(sk), NULL, sk);
465 	}
466 
467 	if (!err) {
468 		if (ro->bound) {
469 			/* unregister old filters */
470 			if (ro->ifindex) {
471 				struct net_device *dev;
472 
473 				dev = dev_get_by_index(sock_net(sk),
474 						       ro->ifindex);
475 				if (dev) {
476 					raw_disable_allfilters(dev_net(dev),
477 							       dev, sk);
478 					dev_put(dev);
479 				}
480 			} else {
481 				raw_disable_allfilters(sock_net(sk), NULL, sk);
482 			}
483 		}
484 		ro->ifindex = ifindex;
485 		ro->bound = 1;
486 	}
487 
488  out:
489 	release_sock(sk);
490 
491 	if (notify_enetdown) {
492 		sk->sk_err = ENETDOWN;
493 		if (!sock_flag(sk, SOCK_DEAD))
494 			sk_error_report(sk);
495 	}
496 
497 	return err;
498 }
499 
500 static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
501 		       int peer)
502 {
503 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
504 	struct sock *sk = sock->sk;
505 	struct raw_sock *ro = raw_sk(sk);
506 
507 	if (peer)
508 		return -EOPNOTSUPP;
509 
510 	memset(addr, 0, RAW_MIN_NAMELEN);
511 	addr->can_family  = AF_CAN;
512 	addr->can_ifindex = ro->ifindex;
513 
514 	return RAW_MIN_NAMELEN;
515 }
516 
517 static int raw_setsockopt(struct socket *sock, int level, int optname,
518 			  sockptr_t optval, unsigned int optlen)
519 {
520 	struct sock *sk = sock->sk;
521 	struct raw_sock *ro = raw_sk(sk);
522 	struct can_filter *filter = NULL;  /* dyn. alloc'ed filters */
523 	struct can_filter sfilter;         /* single filter */
524 	struct net_device *dev = NULL;
525 	can_err_mask_t err_mask = 0;
526 	int fd_frames;
527 	int count = 0;
528 	int err = 0;
529 
530 	if (level != SOL_CAN_RAW)
531 		return -EINVAL;
532 
533 	switch (optname) {
534 	case CAN_RAW_FILTER:
535 		if (optlen % sizeof(struct can_filter) != 0)
536 			return -EINVAL;
537 
538 		if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
539 			return -EINVAL;
540 
541 		count = optlen / sizeof(struct can_filter);
542 
543 		if (count > 1) {
544 			/* filter does not fit into dfilter => alloc space */
545 			filter = memdup_sockptr(optval, optlen);
546 			if (IS_ERR(filter))
547 				return PTR_ERR(filter);
548 		} else if (count == 1) {
549 			if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter)))
550 				return -EFAULT;
551 		}
552 
553 		rtnl_lock();
554 		lock_sock(sk);
555 
556 		if (ro->bound && ro->ifindex) {
557 			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
558 			if (!dev) {
559 				if (count > 1)
560 					kfree(filter);
561 				err = -ENODEV;
562 				goto out_fil;
563 			}
564 		}
565 
566 		if (ro->bound) {
567 			/* (try to) register the new filters */
568 			if (count == 1)
569 				err = raw_enable_filters(sock_net(sk), dev, sk,
570 							 &sfilter, 1);
571 			else
572 				err = raw_enable_filters(sock_net(sk), dev, sk,
573 							 filter, count);
574 			if (err) {
575 				if (count > 1)
576 					kfree(filter);
577 				goto out_fil;
578 			}
579 
580 			/* remove old filter registrations */
581 			raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
582 					    ro->count);
583 		}
584 
585 		/* remove old filter space */
586 		if (ro->count > 1)
587 			kfree(ro->filter);
588 
589 		/* link new filters to the socket */
590 		if (count == 1) {
591 			/* copy filter data for single filter */
592 			ro->dfilter = sfilter;
593 			filter = &ro->dfilter;
594 		}
595 		ro->filter = filter;
596 		ro->count  = count;
597 
598  out_fil:
599 		dev_put(dev);
600 		release_sock(sk);
601 		rtnl_unlock();
602 
603 		break;
604 
605 	case CAN_RAW_ERR_FILTER:
606 		if (optlen != sizeof(err_mask))
607 			return -EINVAL;
608 
609 		if (copy_from_sockptr(&err_mask, optval, optlen))
610 			return -EFAULT;
611 
612 		err_mask &= CAN_ERR_MASK;
613 
614 		rtnl_lock();
615 		lock_sock(sk);
616 
617 		if (ro->bound && ro->ifindex) {
618 			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
619 			if (!dev) {
620 				err = -ENODEV;
621 				goto out_err;
622 			}
623 		}
624 
625 		/* remove current error mask */
626 		if (ro->bound) {
627 			/* (try to) register the new err_mask */
628 			err = raw_enable_errfilter(sock_net(sk), dev, sk,
629 						   err_mask);
630 
631 			if (err)
632 				goto out_err;
633 
634 			/* remove old err_mask registration */
635 			raw_disable_errfilter(sock_net(sk), dev, sk,
636 					      ro->err_mask);
637 		}
638 
639 		/* link new err_mask to the socket */
640 		ro->err_mask = err_mask;
641 
642  out_err:
643 		dev_put(dev);
644 		release_sock(sk);
645 		rtnl_unlock();
646 
647 		break;
648 
649 	case CAN_RAW_LOOPBACK:
650 		if (optlen != sizeof(ro->loopback))
651 			return -EINVAL;
652 
653 		if (copy_from_sockptr(&ro->loopback, optval, optlen))
654 			return -EFAULT;
655 
656 		break;
657 
658 	case CAN_RAW_RECV_OWN_MSGS:
659 		if (optlen != sizeof(ro->recv_own_msgs))
660 			return -EINVAL;
661 
662 		if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
663 			return -EFAULT;
664 
665 		break;
666 
667 	case CAN_RAW_FD_FRAMES:
668 		if (optlen != sizeof(fd_frames))
669 			return -EINVAL;
670 
671 		if (copy_from_sockptr(&fd_frames, optval, optlen))
672 			return -EFAULT;
673 
674 		/* Enabling CAN XL includes CAN FD */
675 		if (ro->xl_frames && !fd_frames)
676 			return -EINVAL;
677 
678 		ro->fd_frames = fd_frames;
679 		break;
680 
681 	case CAN_RAW_XL_FRAMES:
682 		if (optlen != sizeof(ro->xl_frames))
683 			return -EINVAL;
684 
685 		if (copy_from_sockptr(&ro->xl_frames, optval, optlen))
686 			return -EFAULT;
687 
688 		/* Enabling CAN XL includes CAN FD */
689 		if (ro->xl_frames)
690 			ro->fd_frames = ro->xl_frames;
691 		break;
692 
693 	case CAN_RAW_JOIN_FILTERS:
694 		if (optlen != sizeof(ro->join_filters))
695 			return -EINVAL;
696 
697 		if (copy_from_sockptr(&ro->join_filters, optval, optlen))
698 			return -EFAULT;
699 
700 		break;
701 
702 	default:
703 		return -ENOPROTOOPT;
704 	}
705 	return err;
706 }
707 
708 static int raw_getsockopt(struct socket *sock, int level, int optname,
709 			  char __user *optval, int __user *optlen)
710 {
711 	struct sock *sk = sock->sk;
712 	struct raw_sock *ro = raw_sk(sk);
713 	int len;
714 	void *val;
715 	int err = 0;
716 
717 	if (level != SOL_CAN_RAW)
718 		return -EINVAL;
719 	if (get_user(len, optlen))
720 		return -EFAULT;
721 	if (len < 0)
722 		return -EINVAL;
723 
724 	switch (optname) {
725 	case CAN_RAW_FILTER:
726 		lock_sock(sk);
727 		if (ro->count > 0) {
728 			int fsize = ro->count * sizeof(struct can_filter);
729 
730 			/* user space buffer to small for filter list? */
731 			if (len < fsize) {
732 				/* return -ERANGE and needed space in optlen */
733 				err = -ERANGE;
734 				if (put_user(fsize, optlen))
735 					err = -EFAULT;
736 			} else {
737 				if (len > fsize)
738 					len = fsize;
739 				if (copy_to_user(optval, ro->filter, len))
740 					err = -EFAULT;
741 			}
742 		} else {
743 			len = 0;
744 		}
745 		release_sock(sk);
746 
747 		if (!err)
748 			err = put_user(len, optlen);
749 		return err;
750 
751 	case CAN_RAW_ERR_FILTER:
752 		if (len > sizeof(can_err_mask_t))
753 			len = sizeof(can_err_mask_t);
754 		val = &ro->err_mask;
755 		break;
756 
757 	case CAN_RAW_LOOPBACK:
758 		if (len > sizeof(int))
759 			len = sizeof(int);
760 		val = &ro->loopback;
761 		break;
762 
763 	case CAN_RAW_RECV_OWN_MSGS:
764 		if (len > sizeof(int))
765 			len = sizeof(int);
766 		val = &ro->recv_own_msgs;
767 		break;
768 
769 	case CAN_RAW_FD_FRAMES:
770 		if (len > sizeof(int))
771 			len = sizeof(int);
772 		val = &ro->fd_frames;
773 		break;
774 
775 	case CAN_RAW_XL_FRAMES:
776 		if (len > sizeof(int))
777 			len = sizeof(int);
778 		val = &ro->xl_frames;
779 		break;
780 
781 	case CAN_RAW_JOIN_FILTERS:
782 		if (len > sizeof(int))
783 			len = sizeof(int);
784 		val = &ro->join_filters;
785 		break;
786 
787 	default:
788 		return -ENOPROTOOPT;
789 	}
790 
791 	if (put_user(len, optlen))
792 		return -EFAULT;
793 	if (copy_to_user(optval, val, len))
794 		return -EFAULT;
795 	return 0;
796 }
797 
798 static bool raw_bad_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
799 {
800 	/* Classical CAN -> no checks for flags and device capabilities */
801 	if (can_is_can_skb(skb))
802 		return false;
803 
804 	/* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */
805 	if (ro->fd_frames && can_is_canfd_skb(skb) &&
806 	    (mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu)))
807 		return false;
808 
809 	/* CAN XL -> needs to be enabled and a CAN XL device */
810 	if (ro->xl_frames && can_is_canxl_skb(skb) &&
811 	    can_is_canxl_dev_mtu(mtu))
812 		return false;
813 
814 	return true;
815 }
816 
817 static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
818 {
819 	struct sock *sk = sock->sk;
820 	struct raw_sock *ro = raw_sk(sk);
821 	struct sockcm_cookie sockc;
822 	struct sk_buff *skb;
823 	struct net_device *dev;
824 	int ifindex;
825 	int err = -EINVAL;
826 
827 	/* check for valid CAN frame sizes */
828 	if (size < CANXL_HDR_SIZE + CANXL_MIN_DLEN || size > CANXL_MTU)
829 		return -EINVAL;
830 
831 	if (msg->msg_name) {
832 		DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
833 
834 		if (msg->msg_namelen < RAW_MIN_NAMELEN)
835 			return -EINVAL;
836 
837 		if (addr->can_family != AF_CAN)
838 			return -EINVAL;
839 
840 		ifindex = addr->can_ifindex;
841 	} else {
842 		ifindex = ro->ifindex;
843 	}
844 
845 	dev = dev_get_by_index(sock_net(sk), ifindex);
846 	if (!dev)
847 		return -ENXIO;
848 
849 	skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
850 				  msg->msg_flags & MSG_DONTWAIT, &err);
851 	if (!skb)
852 		goto put_dev;
853 
854 	can_skb_reserve(skb);
855 	can_skb_prv(skb)->ifindex = dev->ifindex;
856 	can_skb_prv(skb)->skbcnt = 0;
857 
858 	/* fill the skb before testing for valid CAN frames */
859 	err = memcpy_from_msg(skb_put(skb, size), msg, size);
860 	if (err < 0)
861 		goto free_skb;
862 
863 	err = -EINVAL;
864 	if (raw_bad_txframe(ro, skb, dev->mtu))
865 		goto free_skb;
866 
867 	sockcm_init(&sockc, sk);
868 	if (msg->msg_controllen) {
869 		err = sock_cmsg_send(sk, msg, &sockc);
870 		if (unlikely(err))
871 			goto free_skb;
872 	}
873 
874 	skb->dev = dev;
875 	skb->priority = sk->sk_priority;
876 	skb->mark = sk->sk_mark;
877 	skb->tstamp = sockc.transmit_time;
878 
879 	skb_setup_tx_timestamp(skb, sockc.tsflags);
880 
881 	err = can_send(skb, ro->loopback);
882 
883 	dev_put(dev);
884 
885 	if (err)
886 		goto send_failed;
887 
888 	return size;
889 
890 free_skb:
891 	kfree_skb(skb);
892 put_dev:
893 	dev_put(dev);
894 send_failed:
895 	return err;
896 }
897 
898 static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
899 		       int flags)
900 {
901 	struct sock *sk = sock->sk;
902 	struct sk_buff *skb;
903 	int err = 0;
904 
905 	if (flags & MSG_ERRQUEUE)
906 		return sock_recv_errqueue(sk, msg, size,
907 					  SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE);
908 
909 	skb = skb_recv_datagram(sk, flags, &err);
910 	if (!skb)
911 		return err;
912 
913 	if (size < skb->len)
914 		msg->msg_flags |= MSG_TRUNC;
915 	else
916 		size = skb->len;
917 
918 	err = memcpy_to_msg(msg, skb->data, size);
919 	if (err < 0) {
920 		skb_free_datagram(sk, skb);
921 		return err;
922 	}
923 
924 	sock_recv_cmsgs(msg, sk, skb);
925 
926 	if (msg->msg_name) {
927 		__sockaddr_check_size(RAW_MIN_NAMELEN);
928 		msg->msg_namelen = RAW_MIN_NAMELEN;
929 		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
930 	}
931 
932 	/* assign the flags that have been recorded in raw_rcv() */
933 	msg->msg_flags |= *(raw_flags(skb));
934 
935 	skb_free_datagram(sk, skb);
936 
937 	return size;
938 }
939 
940 static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
941 				unsigned long arg)
942 {
943 	/* no ioctls for socket layer -> hand it down to NIC layer */
944 	return -ENOIOCTLCMD;
945 }
946 
947 static const struct proto_ops raw_ops = {
948 	.family        = PF_CAN,
949 	.release       = raw_release,
950 	.bind          = raw_bind,
951 	.connect       = sock_no_connect,
952 	.socketpair    = sock_no_socketpair,
953 	.accept        = sock_no_accept,
954 	.getname       = raw_getname,
955 	.poll          = datagram_poll,
956 	.ioctl         = raw_sock_no_ioctlcmd,
957 	.gettstamp     = sock_gettstamp,
958 	.listen        = sock_no_listen,
959 	.shutdown      = sock_no_shutdown,
960 	.setsockopt    = raw_setsockopt,
961 	.getsockopt    = raw_getsockopt,
962 	.sendmsg       = raw_sendmsg,
963 	.recvmsg       = raw_recvmsg,
964 	.mmap          = sock_no_mmap,
965 	.sendpage      = sock_no_sendpage,
966 };
967 
968 static struct proto raw_proto __read_mostly = {
969 	.name       = "CAN_RAW",
970 	.owner      = THIS_MODULE,
971 	.obj_size   = sizeof(struct raw_sock),
972 	.init       = raw_init,
973 };
974 
975 static const struct can_proto raw_can_proto = {
976 	.type       = SOCK_RAW,
977 	.protocol   = CAN_RAW,
978 	.ops        = &raw_ops,
979 	.prot       = &raw_proto,
980 };
981 
982 static struct notifier_block canraw_notifier = {
983 	.notifier_call = raw_notifier
984 };
985 
986 static __init int raw_module_init(void)
987 {
988 	int err;
989 
990 	pr_info("can: raw protocol\n");
991 
992 	err = register_netdevice_notifier(&canraw_notifier);
993 	if (err)
994 		return err;
995 
996 	err = can_proto_register(&raw_can_proto);
997 	if (err < 0) {
998 		pr_err("can: registration of raw protocol failed\n");
999 		goto register_proto_failed;
1000 	}
1001 
1002 	return 0;
1003 
1004 register_proto_failed:
1005 	unregister_netdevice_notifier(&canraw_notifier);
1006 	return err;
1007 }
1008 
1009 static __exit void raw_module_exit(void)
1010 {
1011 	can_proto_unregister(&raw_can_proto);
1012 	unregister_netdevice_notifier(&canraw_notifier);
1013 }
1014 
1015 module_init(raw_module_init);
1016 module_exit(raw_module_exit);
1017