Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v3.5.6
  1/*
  2 * raw.c - Raw sockets for protocol family CAN
  3 *
  4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  5 * All rights reserved.
  6 *
  7 * Redistribution and use in source and binary forms, with or without
  8 * modification, are permitted provided that the following conditions
  9 * are met:
 10 * 1. Redistributions of source code must retain the above copyright
 11 *    notice, this list of conditions and the following disclaimer.
 12 * 2. Redistributions in binary form must reproduce the above copyright
 13 *    notice, this list of conditions and the following disclaimer in the
 14 *    documentation and/or other materials provided with the distribution.
 15 * 3. Neither the name of Volkswagen nor the names of its contributors
 16 *    may be used to endorse or promote products derived from this software
 17 *    without specific prior written permission.
 18 *
 19 * Alternatively, provided that this notice is retained in full, this
 20 * software may be distributed under the terms of the GNU General
 21 * Public License ("GPL") version 2, in which case the provisions of the
 22 * GPL apply INSTEAD OF those given above.
 23 *
 24 * The provided data structures and external interfaces from this code
 25 * are not restricted to be used by modules with a GPL compatible license.
 26 *
 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
 38 * DAMAGE.
 39 *
 40 */
 41
 42#include <linux/module.h>
 43#include <linux/init.h>
 44#include <linux/uio.h>
 45#include <linux/net.h>
 46#include <linux/slab.h>
 47#include <linux/netdevice.h>
 48#include <linux/socket.h>
 49#include <linux/if_arp.h>
 50#include <linux/skbuff.h>
 51#include <linux/can.h>
 52#include <linux/can/core.h>
 
 
 53#include <linux/can/raw.h>
 54#include <net/sock.h>
 55#include <net/net_namespace.h>
 56
 57#define CAN_RAW_VERSION CAN_VERSION
 58static __initdata const char banner[] =
 59	KERN_INFO "can: raw protocol (rev " CAN_RAW_VERSION ")\n";
 60
 61MODULE_DESCRIPTION("PF_CAN raw protocol");
 62MODULE_LICENSE("Dual BSD/GPL");
 63MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
 64MODULE_ALIAS("can-proto-1");
 65
 
 
 66#define MASK_ALL 0
 67
 68/*
 69 * A raw socket has a list of can_filters attached to it, each receiving
 70 * the CAN frames matching that filter.  If the filter list is empty,
 71 * no CAN frames will be received by the socket.  The default after
 72 * opening the socket, is to have one filter which receives all frames.
 73 * The filter list is allocated dynamically with the exception of the
 74 * list containing only one item.  This common case is optimized by
 75 * storing the single filter in dfilter, to avoid using dynamic memory.
 76 */
 77
 
 
 
 
 
 
 78struct raw_sock {
 79	struct sock sk;
 80	int bound;
 81	int ifindex;
 82	struct notifier_block notifier;
 
 
 83	int loopback;
 84	int recv_own_msgs;
 
 
 
 85	int count;                 /* number of active filters */
 86	struct can_filter dfilter; /* default/single filter */
 87	struct can_filter *filter; /* pointer to filter(s) */
 88	can_err_mask_t err_mask;
 
 89};
 90
 91/*
 92 * Return pointer to store the extra msg flags for raw_recvmsg().
 
 
 
 93 * We use the space of one unsigned int beyond the 'struct sockaddr_can'
 94 * in skb->cb.
 95 */
 96static inline unsigned int *raw_flags(struct sk_buff *skb)
 97{
 98	BUILD_BUG_ON(sizeof(skb->cb) <= (sizeof(struct sockaddr_can) +
 99					 sizeof(unsigned int)));
100
101	/* return pointer after struct sockaddr_can */
102	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
103}
104
105static inline struct raw_sock *raw_sk(const struct sock *sk)
106{
107	return (struct raw_sock *)sk;
108}
109
110static void raw_rcv(struct sk_buff *oskb, void *data)
111{
112	struct sock *sk = (struct sock *)data;
113	struct raw_sock *ro = raw_sk(sk);
114	struct sockaddr_can *addr;
115	struct sk_buff *skb;
116	unsigned int *pflags;
117
118	/* check the received tx sock reference */
119	if (!ro->recv_own_msgs && oskb->sk == sk)
120		return;
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122	/* clone the given skb to be able to enqueue it into the rcv queue */
123	skb = skb_clone(oskb, GFP_ATOMIC);
124	if (!skb)
125		return;
126
127	/*
128	 *  Put the datagram to the queue so that raw_recvmsg() can
129	 *  get it from there.  We need to pass the interface index to
130	 *  raw_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
131	 *  containing the interface index.
132	 */
133
134	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
135	addr = (struct sockaddr_can *)skb->cb;
136	memset(addr, 0, sizeof(*addr));
137	addr->can_family  = AF_CAN;
138	addr->can_ifindex = skb->dev->ifindex;
139
140	/* add CAN specific message flags for raw_recvmsg() */
141	pflags = raw_flags(skb);
142	*pflags = 0;
143	if (oskb->sk)
144		*pflags |= MSG_DONTROUTE;
145	if (oskb->sk == sk)
146		*pflags |= MSG_CONFIRM;
147
148	if (sock_queue_rcv_skb(sk, skb) < 0)
149		kfree_skb(skb);
150}
151
152static int raw_enable_filters(struct net_device *dev, struct sock *sk,
153			      struct can_filter *filter, int count)
 
154{
155	int err = 0;
156	int i;
157
158	for (i = 0; i < count; i++) {
159		err = can_rx_register(dev, filter[i].can_id,
160				      filter[i].can_mask,
161				      raw_rcv, sk, "raw");
162		if (err) {
163			/* clean up successfully registered filters */
164			while (--i >= 0)
165				can_rx_unregister(dev, filter[i].can_id,
166						  filter[i].can_mask,
167						  raw_rcv, sk);
168			break;
169		}
170	}
171
172	return err;
173}
174
175static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
176				can_err_mask_t err_mask)
177{
178	int err = 0;
179
180	if (err_mask)
181		err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
182				      raw_rcv, sk, "raw");
183
184	return err;
185}
186
187static void raw_disable_filters(struct net_device *dev, struct sock *sk,
188			      struct can_filter *filter, int count)
 
189{
190	int i;
191
192	for (i = 0; i < count; i++)
193		can_rx_unregister(dev, filter[i].can_id, filter[i].can_mask,
194				  raw_rcv, sk);
195}
196
197static inline void raw_disable_errfilter(struct net_device *dev,
 
198					 struct sock *sk,
199					 can_err_mask_t err_mask)
200
201{
202	if (err_mask)
203		can_rx_unregister(dev, 0, err_mask | CAN_ERR_FLAG,
204				  raw_rcv, sk);
205}
206
207static inline void raw_disable_allfilters(struct net_device *dev,
 
208					  struct sock *sk)
209{
210	struct raw_sock *ro = raw_sk(sk);
211
212	raw_disable_filters(dev, sk, ro->filter, ro->count);
213	raw_disable_errfilter(dev, sk, ro->err_mask);
214}
215
216static int raw_enable_allfilters(struct net_device *dev, struct sock *sk)
 
217{
218	struct raw_sock *ro = raw_sk(sk);
219	int err;
220
221	err = raw_enable_filters(dev, sk, ro->filter, ro->count);
222	if (!err) {
223		err = raw_enable_errfilter(dev, sk, ro->err_mask);
224		if (err)
225			raw_disable_filters(dev, sk, ro->filter, ro->count);
 
226	}
227
228	return err;
229}
230
231static int raw_notifier(struct notifier_block *nb,
232			unsigned long msg, void *data)
233{
234	struct net_device *dev = (struct net_device *)data;
235	struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
236	struct sock *sk = &ro->sk;
237
238	if (!net_eq(dev_net(dev), &init_net))
239		return NOTIFY_DONE;
240
241	if (dev->type != ARPHRD_CAN)
242		return NOTIFY_DONE;
243
244	if (ro->ifindex != dev->ifindex)
245		return NOTIFY_DONE;
246
247	switch (msg) {
248
249	case NETDEV_UNREGISTER:
250		lock_sock(sk);
251		/* remove current filters & unregister */
252		if (ro->bound)
253			raw_disable_allfilters(dev, sk);
 
 
254
255		if (ro->count > 1)
256			kfree(ro->filter);
257
258		ro->ifindex = 0;
259		ro->bound   = 0;
260		ro->count   = 0;
 
261		release_sock(sk);
262
263		sk->sk_err = ENODEV;
264		if (!sock_flag(sk, SOCK_DEAD))
265			sk->sk_error_report(sk);
266		break;
267
268	case NETDEV_DOWN:
269		sk->sk_err = ENETDOWN;
270		if (!sock_flag(sk, SOCK_DEAD))
271			sk->sk_error_report(sk);
272		break;
273	}
 
 
 
 
 
 
 
 
 
 
 
 
 
274
 
 
 
 
 
 
 
 
275	return NOTIFY_DONE;
276}
277
278static int raw_init(struct sock *sk)
279{
280	struct raw_sock *ro = raw_sk(sk);
281
282	ro->bound            = 0;
283	ro->ifindex          = 0;
 
284
285	/* set default filter to single entry dfilter */
286	ro->dfilter.can_id   = 0;
287	ro->dfilter.can_mask = MASK_ALL;
288	ro->filter           = &ro->dfilter;
289	ro->count            = 1;
290
291	/* set default loopback behaviour */
292	ro->loopback         = 1;
293	ro->recv_own_msgs    = 0;
 
 
 
 
 
 
 
 
294
295	/* set notifier */
296	ro->notifier.notifier_call = raw_notifier;
297
298	register_netdevice_notifier(&ro->notifier);
299
300	return 0;
301}
302
303static int raw_release(struct socket *sock)
304{
305	struct sock *sk = sock->sk;
306	struct raw_sock *ro;
307
308	if (!sk)
309		return 0;
310
311	ro = raw_sk(sk);
312
313	unregister_netdevice_notifier(&ro->notifier);
 
 
 
 
 
 
 
314
 
315	lock_sock(sk);
316
317	/* remove current filters & unregister */
318	if (ro->bound) {
319		if (ro->ifindex) {
320			struct net_device *dev;
321
322			dev = dev_get_by_index(&init_net, ro->ifindex);
323			if (dev) {
324				raw_disable_allfilters(dev, sk);
325				dev_put(dev);
326			}
327		} else
328			raw_disable_allfilters(NULL, sk);
329	}
330
331	if (ro->count > 1)
332		kfree(ro->filter);
333
334	ro->ifindex = 0;
335	ro->bound   = 0;
336	ro->count   = 0;
 
 
337
338	sock_orphan(sk);
339	sock->sk = NULL;
340
341	release_sock(sk);
 
 
342	sock_put(sk);
343
344	return 0;
345}
346
347static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
348{
349	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
350	struct sock *sk = sock->sk;
351	struct raw_sock *ro = raw_sk(sk);
 
352	int ifindex;
353	int err = 0;
354	int notify_enetdown = 0;
355
356	if (len < sizeof(*addr))
 
 
357		return -EINVAL;
358
 
359	lock_sock(sk);
360
361	if (ro->bound && addr->can_ifindex == ro->ifindex)
362		goto out;
363
364	if (addr->can_ifindex) {
365		struct net_device *dev;
366
367		dev = dev_get_by_index(&init_net, addr->can_ifindex);
368		if (!dev) {
369			err = -ENODEV;
370			goto out;
371		}
372		if (dev->type != ARPHRD_CAN) {
373			dev_put(dev);
374			err = -ENODEV;
375			goto out;
376		}
 
377		if (!(dev->flags & IFF_UP))
378			notify_enetdown = 1;
379
380		ifindex = dev->ifindex;
381
382		/* filters set by default/setsockopt */
383		err = raw_enable_allfilters(dev, sk);
384		dev_put(dev);
 
 
385	} else {
386		ifindex = 0;
387
388		/* filters set by default/setsockopt */
389		err = raw_enable_allfilters(NULL, sk);
390	}
391
392	if (!err) {
393		if (ro->bound) {
394			/* unregister old filters */
395			if (ro->ifindex) {
396				struct net_device *dev;
397
398				dev = dev_get_by_index(&init_net, ro->ifindex);
399				if (dev) {
400					raw_disable_allfilters(dev, sk);
401					dev_put(dev);
402				}
403			} else
404				raw_disable_allfilters(NULL, sk);
405		}
406		ro->ifindex = ifindex;
407		ro->bound = 1;
 
 
 
 
408	}
409
410 out:
 
 
 
411	release_sock(sk);
 
412
413	if (notify_enetdown) {
414		sk->sk_err = ENETDOWN;
415		if (!sock_flag(sk, SOCK_DEAD))
416			sk->sk_error_report(sk);
417	}
418
419	return err;
420}
421
422static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
423		       int *len, int peer)
424{
425	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
426	struct sock *sk = sock->sk;
427	struct raw_sock *ro = raw_sk(sk);
428
429	if (peer)
430		return -EOPNOTSUPP;
431
432	memset(addr, 0, sizeof(*addr));
433	addr->can_family  = AF_CAN;
434	addr->can_ifindex = ro->ifindex;
435
436	*len = sizeof(*addr);
437
438	return 0;
439}
440
441static int raw_setsockopt(struct socket *sock, int level, int optname,
442			  char __user *optval, unsigned int optlen)
443{
444	struct sock *sk = sock->sk;
445	struct raw_sock *ro = raw_sk(sk);
446	struct can_filter *filter = NULL;  /* dyn. alloc'ed filters */
447	struct can_filter sfilter;         /* single filter */
448	struct net_device *dev = NULL;
449	can_err_mask_t err_mask = 0;
 
450	int count = 0;
451	int err = 0;
452
453	if (level != SOL_CAN_RAW)
454		return -EINVAL;
455
456	switch (optname) {
457
458	case CAN_RAW_FILTER:
459		if (optlen % sizeof(struct can_filter) != 0)
460			return -EINVAL;
461
 
 
 
462		count = optlen / sizeof(struct can_filter);
463
464		if (count > 1) {
465			/* filter does not fit into dfilter => alloc space */
466			filter = memdup_user(optval, optlen);
467			if (IS_ERR(filter))
468				return PTR_ERR(filter);
469		} else if (count == 1) {
470			if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
471				return -EFAULT;
472		}
473
 
474		lock_sock(sk);
475
476		if (ro->bound && ro->ifindex)
477			dev = dev_get_by_index(&init_net, ro->ifindex);
 
 
 
 
 
 
 
478
479		if (ro->bound) {
480			/* (try to) register the new filters */
481			if (count == 1)
482				err = raw_enable_filters(dev, sk, &sfilter, 1);
 
483			else
484				err = raw_enable_filters(dev, sk, filter,
485							 count);
486			if (err) {
487				if (count > 1)
488					kfree(filter);
489				goto out_fil;
490			}
491
492			/* remove old filter registrations */
493			raw_disable_filters(dev, sk, ro->filter, ro->count);
 
494		}
495
496		/* remove old filter space */
497		if (ro->count > 1)
498			kfree(ro->filter);
499
500		/* link new filters to the socket */
501		if (count == 1) {
502			/* copy filter data for single filter */
503			ro->dfilter = sfilter;
504			filter = &ro->dfilter;
505		}
506		ro->filter = filter;
507		ro->count  = count;
508
509 out_fil:
510		if (dev)
511			dev_put(dev);
512
513		release_sock(sk);
 
514
515		break;
516
517	case CAN_RAW_ERR_FILTER:
518		if (optlen != sizeof(err_mask))
519			return -EINVAL;
520
521		if (copy_from_user(&err_mask, optval, optlen))
522			return -EFAULT;
523
524		err_mask &= CAN_ERR_MASK;
525
 
526		lock_sock(sk);
527
528		if (ro->bound && ro->ifindex)
529			dev = dev_get_by_index(&init_net, ro->ifindex);
 
 
 
 
 
530
531		/* remove current error mask */
532		if (ro->bound) {
533			/* (try to) register the new err_mask */
534			err = raw_enable_errfilter(dev, sk, err_mask);
 
535
536			if (err)
537				goto out_err;
538
539			/* remove old err_mask registration */
540			raw_disable_errfilter(dev, sk, ro->err_mask);
 
541		}
542
543		/* link new err_mask to the socket */
544		ro->err_mask = err_mask;
545
546 out_err:
547		if (dev)
548			dev_put(dev);
549
550		release_sock(sk);
 
551
552		break;
553
554	case CAN_RAW_LOOPBACK:
555		if (optlen != sizeof(ro->loopback))
556			return -EINVAL;
557
558		if (copy_from_user(&ro->loopback, optval, optlen))
559			return -EFAULT;
560
561		break;
562
563	case CAN_RAW_RECV_OWN_MSGS:
564		if (optlen != sizeof(ro->recv_own_msgs))
565			return -EINVAL;
566
567		if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
568			return -EFAULT;
569
570		break;
571
572	default:
573		return -ENOPROTOOPT;
574	}
575	return err;
576}
577
578static int raw_getsockopt(struct socket *sock, int level, int optname,
579			  char __user *optval, int __user *optlen)
580{
581	struct sock *sk = sock->sk;
582	struct raw_sock *ro = raw_sk(sk);
583	int len;
584	void *val;
585	int err = 0;
586
587	if (level != SOL_CAN_RAW)
588		return -EINVAL;
589	if (get_user(len, optlen))
590		return -EFAULT;
591	if (len < 0)
592		return -EINVAL;
593
594	switch (optname) {
595
596	case CAN_RAW_FILTER:
597		lock_sock(sk);
598		if (ro->count > 0) {
599			int fsize = ro->count * sizeof(struct can_filter);
600			if (len > fsize)
601				len = fsize;
602			if (copy_to_user(optval, ro->filter, len))
603				err = -EFAULT;
604		} else
 
 
 
 
 
 
 
 
 
605			len = 0;
 
606		release_sock(sk);
607
608		if (!err)
609			err = put_user(len, optlen);
610		return err;
611
612	case CAN_RAW_ERR_FILTER:
613		if (len > sizeof(can_err_mask_t))
614			len = sizeof(can_err_mask_t);
615		val = &ro->err_mask;
616		break;
617
618	case CAN_RAW_LOOPBACK:
619		if (len > sizeof(int))
620			len = sizeof(int);
621		val = &ro->loopback;
622		break;
623
624	case CAN_RAW_RECV_OWN_MSGS:
625		if (len > sizeof(int))
626			len = sizeof(int);
627		val = &ro->recv_own_msgs;
628		break;
629
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
630	default:
631		return -ENOPROTOOPT;
632	}
633
634	if (put_user(len, optlen))
635		return -EFAULT;
636	if (copy_to_user(optval, val, len))
637		return -EFAULT;
638	return 0;
639}
640
641static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
642		       struct msghdr *msg, size_t size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
643{
644	struct sock *sk = sock->sk;
645	struct raw_sock *ro = raw_sk(sk);
 
646	struct sk_buff *skb;
647	struct net_device *dev;
648	int ifindex;
649	int err;
 
 
 
 
650
651	if (msg->msg_name) {
652		struct sockaddr_can *addr =
653			(struct sockaddr_can *)msg->msg_name;
654
655		if (msg->msg_namelen < sizeof(*addr))
656			return -EINVAL;
657
658		if (addr->can_family != AF_CAN)
659			return -EINVAL;
660
661		ifindex = addr->can_ifindex;
662	} else
663		ifindex = ro->ifindex;
 
664
665	if (size != sizeof(struct can_frame))
666		return -EINVAL;
667
668	dev = dev_get_by_index(&init_net, ifindex);
669	if (!dev)
670		return -ENXIO;
671
672	skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT,
673				  &err);
674	if (!skb)
675		goto put_dev;
676
677	err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
 
 
 
 
 
678	if (err < 0)
679		goto free_skb;
680	err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
681	if (err < 0)
 
682		goto free_skb;
683
 
 
 
 
 
 
 
684	skb->dev = dev;
685	skb->sk  = sk;
 
 
 
 
686
687	err = can_send(skb, ro->loopback);
688
689	dev_put(dev);
690
691	if (err)
692		goto send_failed;
693
694	return size;
695
696free_skb:
697	kfree_skb(skb);
698put_dev:
699	dev_put(dev);
700send_failed:
701	return err;
702}
703
704static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
705		       struct msghdr *msg, size_t size, int flags)
706{
707	struct sock *sk = sock->sk;
708	struct sk_buff *skb;
709	int err = 0;
710	int noblock;
711
712	noblock =  flags & MSG_DONTWAIT;
713	flags   &= ~MSG_DONTWAIT;
 
714
715	skb = skb_recv_datagram(sk, flags, noblock, &err);
716	if (!skb)
717		return err;
718
719	if (size < skb->len)
720		msg->msg_flags |= MSG_TRUNC;
721	else
722		size = skb->len;
723
724	err = memcpy_toiovec(msg->msg_iov, skb->data, size);
725	if (err < 0) {
726		skb_free_datagram(sk, skb);
727		return err;
728	}
729
730	sock_recv_ts_and_drops(msg, sk, skb);
731
732	if (msg->msg_name) {
733		msg->msg_namelen = sizeof(struct sockaddr_can);
 
734		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
735	}
736
737	/* assign the flags that have been recorded in raw_rcv() */
738	msg->msg_flags |= *(raw_flags(skb));
739
740	skb_free_datagram(sk, skb);
741
742	return size;
743}
744
 
 
 
 
 
 
 
745static const struct proto_ops raw_ops = {
746	.family        = PF_CAN,
747	.release       = raw_release,
748	.bind          = raw_bind,
749	.connect       = sock_no_connect,
750	.socketpair    = sock_no_socketpair,
751	.accept        = sock_no_accept,
752	.getname       = raw_getname,
753	.poll          = datagram_poll,
754	.ioctl         = can_ioctl,	/* use can_ioctl() from af_can.c */
 
755	.listen        = sock_no_listen,
756	.shutdown      = sock_no_shutdown,
757	.setsockopt    = raw_setsockopt,
758	.getsockopt    = raw_getsockopt,
759	.sendmsg       = raw_sendmsg,
760	.recvmsg       = raw_recvmsg,
761	.mmap          = sock_no_mmap,
762	.sendpage      = sock_no_sendpage,
763};
764
765static struct proto raw_proto __read_mostly = {
766	.name       = "CAN_RAW",
767	.owner      = THIS_MODULE,
768	.obj_size   = sizeof(struct raw_sock),
769	.init       = raw_init,
770};
771
772static const struct can_proto raw_can_proto = {
773	.type       = SOCK_RAW,
774	.protocol   = CAN_RAW,
775	.ops        = &raw_ops,
776	.prot       = &raw_proto,
777};
778
 
 
 
 
779static __init int raw_module_init(void)
780{
781	int err;
782
783	printk(banner);
 
 
 
 
784
785	err = can_proto_register(&raw_can_proto);
786	if (err < 0)
787		printk(KERN_ERR "can: registration of raw protocol failed\n");
 
 
 
 
788
 
 
789	return err;
790}
791
792static __exit void raw_module_exit(void)
793{
794	can_proto_unregister(&raw_can_proto);
 
795}
796
797module_init(raw_module_init);
798module_exit(raw_module_exit);
v6.8
   1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
   2/* raw.c - Raw sockets for protocol family CAN
   3 *
   4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
   5 * All rights reserved.
   6 *
   7 * Redistribution and use in source and binary forms, with or without
   8 * modification, are permitted provided that the following conditions
   9 * are met:
  10 * 1. Redistributions of source code must retain the above copyright
  11 *    notice, this list of conditions and the following disclaimer.
  12 * 2. Redistributions in binary form must reproduce the above copyright
  13 *    notice, this list of conditions and the following disclaimer in the
  14 *    documentation and/or other materials provided with the distribution.
  15 * 3. Neither the name of Volkswagen nor the names of its contributors
  16 *    may be used to endorse or promote products derived from this software
  17 *    without specific prior written permission.
  18 *
  19 * Alternatively, provided that this notice is retained in full, this
  20 * software may be distributed under the terms of the GNU General
  21 * Public License ("GPL") version 2, in which case the provisions of the
  22 * GPL apply INSTEAD OF those given above.
  23 *
  24 * The provided data structures and external interfaces from this code
  25 * are not restricted to be used by modules with a GPL compatible license.
  26 *
  27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  38 * DAMAGE.
  39 *
  40 */
  41
  42#include <linux/module.h>
  43#include <linux/init.h>
  44#include <linux/uio.h>
  45#include <linux/net.h>
  46#include <linux/slab.h>
  47#include <linux/netdevice.h>
  48#include <linux/socket.h>
  49#include <linux/if_arp.h>
  50#include <linux/skbuff.h>
  51#include <linux/can.h>
  52#include <linux/can/core.h>
  53#include <linux/can/dev.h> /* for can_is_canxl_dev_mtu() */
  54#include <linux/can/skb.h>
  55#include <linux/can/raw.h>
  56#include <net/sock.h>
  57#include <net/net_namespace.h>
  58
 
 
 
 
  59MODULE_DESCRIPTION("PF_CAN raw protocol");
  60MODULE_LICENSE("Dual BSD/GPL");
  61MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
  62MODULE_ALIAS("can-proto-1");
  63
  64#define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
  65
  66#define MASK_ALL 0
  67
  68/* A raw socket has a list of can_filters attached to it, each receiving
 
  69 * the CAN frames matching that filter.  If the filter list is empty,
  70 * no CAN frames will be received by the socket.  The default after
  71 * opening the socket, is to have one filter which receives all frames.
  72 * The filter list is allocated dynamically with the exception of the
  73 * list containing only one item.  This common case is optimized by
  74 * storing the single filter in dfilter, to avoid using dynamic memory.
  75 */
  76
  77struct uniqframe {
  78	int skbcnt;
  79	const struct sk_buff *skb;
  80	unsigned int join_rx_count;
  81};
  82
  83struct raw_sock {
  84	struct sock sk;
  85	int bound;
  86	int ifindex;
  87	struct net_device *dev;
  88	netdevice_tracker dev_tracker;
  89	struct list_head notifier;
  90	int loopback;
  91	int recv_own_msgs;
  92	int fd_frames;
  93	int xl_frames;
  94	int join_filters;
  95	int count;                 /* number of active filters */
  96	struct can_filter dfilter; /* default/single filter */
  97	struct can_filter *filter; /* pointer to filter(s) */
  98	can_err_mask_t err_mask;
  99	struct uniqframe __percpu *uniq;
 100};
 101
 102static LIST_HEAD(raw_notifier_list);
 103static DEFINE_SPINLOCK(raw_notifier_lock);
 104static struct raw_sock *raw_busy_notifier;
 105
 106/* Return pointer to store the extra msg flags for raw_recvmsg().
 107 * We use the space of one unsigned int beyond the 'struct sockaddr_can'
 108 * in skb->cb.
 109 */
 110static inline unsigned int *raw_flags(struct sk_buff *skb)
 111{
 112	sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
 113			       sizeof(unsigned int));
 114
 115	/* return pointer after struct sockaddr_can */
 116	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
 117}
 118
 119static inline struct raw_sock *raw_sk(const struct sock *sk)
 120{
 121	return (struct raw_sock *)sk;
 122}
 123
 124static void raw_rcv(struct sk_buff *oskb, void *data)
 125{
 126	struct sock *sk = (struct sock *)data;
 127	struct raw_sock *ro = raw_sk(sk);
 128	struct sockaddr_can *addr;
 129	struct sk_buff *skb;
 130	unsigned int *pflags;
 131
 132	/* check the received tx sock reference */
 133	if (!ro->recv_own_msgs && oskb->sk == sk)
 134		return;
 135
 136	/* make sure to not pass oversized frames to the socket */
 137	if ((!ro->fd_frames && can_is_canfd_skb(oskb)) ||
 138	    (!ro->xl_frames && can_is_canxl_skb(oskb)))
 139		return;
 140
 141	/* eliminate multiple filter matches for the same skb */
 142	if (this_cpu_ptr(ro->uniq)->skb == oskb &&
 143	    this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
 144		if (!ro->join_filters)
 145			return;
 146
 147		this_cpu_inc(ro->uniq->join_rx_count);
 148		/* drop frame until all enabled filters matched */
 149		if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
 150			return;
 151	} else {
 152		this_cpu_ptr(ro->uniq)->skb = oskb;
 153		this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
 154		this_cpu_ptr(ro->uniq)->join_rx_count = 1;
 155		/* drop first frame to check all enabled filters? */
 156		if (ro->join_filters && ro->count > 1)
 157			return;
 158	}
 159
 160	/* clone the given skb to be able to enqueue it into the rcv queue */
 161	skb = skb_clone(oskb, GFP_ATOMIC);
 162	if (!skb)
 163		return;
 164
 165	/* Put the datagram to the queue so that raw_recvmsg() can get
 166	 * it from there. We need to pass the interface index to
 167	 * raw_recvmsg(). We pass a whole struct sockaddr_can in
 168	 * skb->cb containing the interface index.
 
 169	 */
 170
 171	sock_skb_cb_check_size(sizeof(struct sockaddr_can));
 172	addr = (struct sockaddr_can *)skb->cb;
 173	memset(addr, 0, sizeof(*addr));
 174	addr->can_family = AF_CAN;
 175	addr->can_ifindex = skb->dev->ifindex;
 176
 177	/* add CAN specific message flags for raw_recvmsg() */
 178	pflags = raw_flags(skb);
 179	*pflags = 0;
 180	if (oskb->sk)
 181		*pflags |= MSG_DONTROUTE;
 182	if (oskb->sk == sk)
 183		*pflags |= MSG_CONFIRM;
 184
 185	if (sock_queue_rcv_skb(sk, skb) < 0)
 186		kfree_skb(skb);
 187}
 188
 189static int raw_enable_filters(struct net *net, struct net_device *dev,
 190			      struct sock *sk, struct can_filter *filter,
 191			      int count)
 192{
 193	int err = 0;
 194	int i;
 195
 196	for (i = 0; i < count; i++) {
 197		err = can_rx_register(net, dev, filter[i].can_id,
 198				      filter[i].can_mask,
 199				      raw_rcv, sk, "raw", sk);
 200		if (err) {
 201			/* clean up successfully registered filters */
 202			while (--i >= 0)
 203				can_rx_unregister(net, dev, filter[i].can_id,
 204						  filter[i].can_mask,
 205						  raw_rcv, sk);
 206			break;
 207		}
 208	}
 209
 210	return err;
 211}
 212
 213static int raw_enable_errfilter(struct net *net, struct net_device *dev,
 214				struct sock *sk, can_err_mask_t err_mask)
 215{
 216	int err = 0;
 217
 218	if (err_mask)
 219		err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
 220				      raw_rcv, sk, "raw", sk);
 221
 222	return err;
 223}
 224
 225static void raw_disable_filters(struct net *net, struct net_device *dev,
 226				struct sock *sk, struct can_filter *filter,
 227				int count)
 228{
 229	int i;
 230
 231	for (i = 0; i < count; i++)
 232		can_rx_unregister(net, dev, filter[i].can_id,
 233				  filter[i].can_mask, raw_rcv, sk);
 234}
 235
 236static inline void raw_disable_errfilter(struct net *net,
 237					 struct net_device *dev,
 238					 struct sock *sk,
 239					 can_err_mask_t err_mask)
 240
 241{
 242	if (err_mask)
 243		can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
 244				  raw_rcv, sk);
 245}
 246
 247static inline void raw_disable_allfilters(struct net *net,
 248					  struct net_device *dev,
 249					  struct sock *sk)
 250{
 251	struct raw_sock *ro = raw_sk(sk);
 252
 253	raw_disable_filters(net, dev, sk, ro->filter, ro->count);
 254	raw_disable_errfilter(net, dev, sk, ro->err_mask);
 255}
 256
 257static int raw_enable_allfilters(struct net *net, struct net_device *dev,
 258				 struct sock *sk)
 259{
 260	struct raw_sock *ro = raw_sk(sk);
 261	int err;
 262
 263	err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
 264	if (!err) {
 265		err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
 266		if (err)
 267			raw_disable_filters(net, dev, sk, ro->filter,
 268					    ro->count);
 269	}
 270
 271	return err;
 272}
 273
 274static void raw_notify(struct raw_sock *ro, unsigned long msg,
 275		       struct net_device *dev)
 276{
 
 
 277	struct sock *sk = &ro->sk;
 278
 279	if (!net_eq(dev_net(dev), sock_net(sk)))
 280		return;
 
 
 
 281
 282	if (ro->dev != dev)
 283		return;
 284
 285	switch (msg) {
 
 286	case NETDEV_UNREGISTER:
 287		lock_sock(sk);
 288		/* remove current filters & unregister */
 289		if (ro->bound) {
 290			raw_disable_allfilters(dev_net(dev), dev, sk);
 291			netdev_put(dev, &ro->dev_tracker);
 292		}
 293
 294		if (ro->count > 1)
 295			kfree(ro->filter);
 296
 297		ro->ifindex = 0;
 298		ro->bound = 0;
 299		ro->dev = NULL;
 300		ro->count = 0;
 301		release_sock(sk);
 302
 303		sk->sk_err = ENODEV;
 304		if (!sock_flag(sk, SOCK_DEAD))
 305			sk_error_report(sk);
 306		break;
 307
 308	case NETDEV_DOWN:
 309		sk->sk_err = ENETDOWN;
 310		if (!sock_flag(sk, SOCK_DEAD))
 311			sk_error_report(sk);
 312		break;
 313	}
 314}
 315
 316static int raw_notifier(struct notifier_block *nb, unsigned long msg,
 317			void *ptr)
 318{
 319	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 320
 321	if (dev->type != ARPHRD_CAN)
 322		return NOTIFY_DONE;
 323	if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
 324		return NOTIFY_DONE;
 325	if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
 326		return NOTIFY_DONE;
 327
 328	spin_lock(&raw_notifier_lock);
 329	list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
 330		spin_unlock(&raw_notifier_lock);
 331		raw_notify(raw_busy_notifier, msg, dev);
 332		spin_lock(&raw_notifier_lock);
 333	}
 334	raw_busy_notifier = NULL;
 335	spin_unlock(&raw_notifier_lock);
 336	return NOTIFY_DONE;
 337}
 338
 339static int raw_init(struct sock *sk)
 340{
 341	struct raw_sock *ro = raw_sk(sk);
 342
 343	ro->bound            = 0;
 344	ro->ifindex          = 0;
 345	ro->dev              = NULL;
 346
 347	/* set default filter to single entry dfilter */
 348	ro->dfilter.can_id   = 0;
 349	ro->dfilter.can_mask = MASK_ALL;
 350	ro->filter           = &ro->dfilter;
 351	ro->count            = 1;
 352
 353	/* set default loopback behaviour */
 354	ro->loopback         = 1;
 355	ro->recv_own_msgs    = 0;
 356	ro->fd_frames        = 0;
 357	ro->xl_frames        = 0;
 358	ro->join_filters     = 0;
 359
 360	/* alloc_percpu provides zero'ed memory */
 361	ro->uniq = alloc_percpu(struct uniqframe);
 362	if (unlikely(!ro->uniq))
 363		return -ENOMEM;
 364
 365	/* set notifier */
 366	spin_lock(&raw_notifier_lock);
 367	list_add_tail(&ro->notifier, &raw_notifier_list);
 368	spin_unlock(&raw_notifier_lock);
 369
 370	return 0;
 371}
 372
 373static int raw_release(struct socket *sock)
 374{
 375	struct sock *sk = sock->sk;
 376	struct raw_sock *ro;
 377
 378	if (!sk)
 379		return 0;
 380
 381	ro = raw_sk(sk);
 382
 383	spin_lock(&raw_notifier_lock);
 384	while (raw_busy_notifier == ro) {
 385		spin_unlock(&raw_notifier_lock);
 386		schedule_timeout_uninterruptible(1);
 387		spin_lock(&raw_notifier_lock);
 388	}
 389	list_del(&ro->notifier);
 390	spin_unlock(&raw_notifier_lock);
 391
 392	rtnl_lock();
 393	lock_sock(sk);
 394
 395	/* remove current filters & unregister */
 396	if (ro->bound) {
 397		if (ro->dev) {
 398			raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk);
 399			netdev_put(ro->dev, &ro->dev_tracker);
 400		} else {
 401			raw_disable_allfilters(sock_net(sk), NULL, sk);
 402		}
 
 
 
 
 403	}
 404
 405	if (ro->count > 1)
 406		kfree(ro->filter);
 407
 408	ro->ifindex = 0;
 409	ro->bound = 0;
 410	ro->dev = NULL;
 411	ro->count = 0;
 412	free_percpu(ro->uniq);
 413
 414	sock_orphan(sk);
 415	sock->sk = NULL;
 416
 417	release_sock(sk);
 418	rtnl_unlock();
 419
 420	sock_put(sk);
 421
 422	return 0;
 423}
 424
 425static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
 426{
 427	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
 428	struct sock *sk = sock->sk;
 429	struct raw_sock *ro = raw_sk(sk);
 430	struct net_device *dev = NULL;
 431	int ifindex;
 432	int err = 0;
 433	int notify_enetdown = 0;
 434
 435	if (len < RAW_MIN_NAMELEN)
 436		return -EINVAL;
 437	if (addr->can_family != AF_CAN)
 438		return -EINVAL;
 439
 440	rtnl_lock();
 441	lock_sock(sk);
 442
 443	if (ro->bound && addr->can_ifindex == ro->ifindex)
 444		goto out;
 445
 446	if (addr->can_ifindex) {
 447		dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
 
 
 448		if (!dev) {
 449			err = -ENODEV;
 450			goto out;
 451		}
 452		if (dev->type != ARPHRD_CAN) {
 
 453			err = -ENODEV;
 454			goto out_put_dev;
 455		}
 456
 457		if (!(dev->flags & IFF_UP))
 458			notify_enetdown = 1;
 459
 460		ifindex = dev->ifindex;
 461
 462		/* filters set by default/setsockopt */
 463		err = raw_enable_allfilters(sock_net(sk), dev, sk);
 464		if (err)
 465			goto out_put_dev;
 466
 467	} else {
 468		ifindex = 0;
 469
 470		/* filters set by default/setsockopt */
 471		err = raw_enable_allfilters(sock_net(sk), NULL, sk);
 472	}
 473
 474	if (!err) {
 475		if (ro->bound) {
 476			/* unregister old filters */
 477			if (ro->dev) {
 478				raw_disable_allfilters(dev_net(ro->dev),
 479						       ro->dev, sk);
 480				/* drop reference to old ro->dev */
 481				netdev_put(ro->dev, &ro->dev_tracker);
 482			} else {
 483				raw_disable_allfilters(sock_net(sk), NULL, sk);
 484			}
 
 
 485		}
 486		ro->ifindex = ifindex;
 487		ro->bound = 1;
 488		/* bind() ok -> hold a reference for new ro->dev */
 489		ro->dev = dev;
 490		if (ro->dev)
 491			netdev_hold(ro->dev, &ro->dev_tracker, GFP_KERNEL);
 492	}
 493
 494out_put_dev:
 495	/* remove potential reference from dev_get_by_index() */
 496	dev_put(dev);
 497out:
 498	release_sock(sk);
 499	rtnl_unlock();
 500
 501	if (notify_enetdown) {
 502		sk->sk_err = ENETDOWN;
 503		if (!sock_flag(sk, SOCK_DEAD))
 504			sk_error_report(sk);
 505	}
 506
 507	return err;
 508}
 509
 510static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
 511		       int peer)
 512{
 513	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
 514	struct sock *sk = sock->sk;
 515	struct raw_sock *ro = raw_sk(sk);
 516
 517	if (peer)
 518		return -EOPNOTSUPP;
 519
 520	memset(addr, 0, RAW_MIN_NAMELEN);
 521	addr->can_family  = AF_CAN;
 522	addr->can_ifindex = ro->ifindex;
 523
 524	return RAW_MIN_NAMELEN;
 
 
 525}
 526
 527static int raw_setsockopt(struct socket *sock, int level, int optname,
 528			  sockptr_t optval, unsigned int optlen)
 529{
 530	struct sock *sk = sock->sk;
 531	struct raw_sock *ro = raw_sk(sk);
 532	struct can_filter *filter = NULL;  /* dyn. alloc'ed filters */
 533	struct can_filter sfilter;         /* single filter */
 534	struct net_device *dev = NULL;
 535	can_err_mask_t err_mask = 0;
 536	int fd_frames;
 537	int count = 0;
 538	int err = 0;
 539
 540	if (level != SOL_CAN_RAW)
 541		return -EINVAL;
 542
 543	switch (optname) {
 
 544	case CAN_RAW_FILTER:
 545		if (optlen % sizeof(struct can_filter) != 0)
 546			return -EINVAL;
 547
 548		if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
 549			return -EINVAL;
 550
 551		count = optlen / sizeof(struct can_filter);
 552
 553		if (count > 1) {
 554			/* filter does not fit into dfilter => alloc space */
 555			filter = memdup_sockptr(optval, optlen);
 556			if (IS_ERR(filter))
 557				return PTR_ERR(filter);
 558		} else if (count == 1) {
 559			if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter)))
 560				return -EFAULT;
 561		}
 562
 563		rtnl_lock();
 564		lock_sock(sk);
 565
 566		dev = ro->dev;
 567		if (ro->bound && dev) {
 568			if (dev->reg_state != NETREG_REGISTERED) {
 569				if (count > 1)
 570					kfree(filter);
 571				err = -ENODEV;
 572				goto out_fil;
 573			}
 574		}
 575
 576		if (ro->bound) {
 577			/* (try to) register the new filters */
 578			if (count == 1)
 579				err = raw_enable_filters(sock_net(sk), dev, sk,
 580							 &sfilter, 1);
 581			else
 582				err = raw_enable_filters(sock_net(sk), dev, sk,
 583							 filter, count);
 584			if (err) {
 585				if (count > 1)
 586					kfree(filter);
 587				goto out_fil;
 588			}
 589
 590			/* remove old filter registrations */
 591			raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
 592					    ro->count);
 593		}
 594
 595		/* remove old filter space */
 596		if (ro->count > 1)
 597			kfree(ro->filter);
 598
 599		/* link new filters to the socket */
 600		if (count == 1) {
 601			/* copy filter data for single filter */
 602			ro->dfilter = sfilter;
 603			filter = &ro->dfilter;
 604		}
 605		ro->filter = filter;
 606		ro->count  = count;
 607
 608 out_fil:
 
 
 
 609		release_sock(sk);
 610		rtnl_unlock();
 611
 612		break;
 613
 614	case CAN_RAW_ERR_FILTER:
 615		if (optlen != sizeof(err_mask))
 616			return -EINVAL;
 617
 618		if (copy_from_sockptr(&err_mask, optval, optlen))
 619			return -EFAULT;
 620
 621		err_mask &= CAN_ERR_MASK;
 622
 623		rtnl_lock();
 624		lock_sock(sk);
 625
 626		dev = ro->dev;
 627		if (ro->bound && dev) {
 628			if (dev->reg_state != NETREG_REGISTERED) {
 629				err = -ENODEV;
 630				goto out_err;
 631			}
 632		}
 633
 634		/* remove current error mask */
 635		if (ro->bound) {
 636			/* (try to) register the new err_mask */
 637			err = raw_enable_errfilter(sock_net(sk), dev, sk,
 638						   err_mask);
 639
 640			if (err)
 641				goto out_err;
 642
 643			/* remove old err_mask registration */
 644			raw_disable_errfilter(sock_net(sk), dev, sk,
 645					      ro->err_mask);
 646		}
 647
 648		/* link new err_mask to the socket */
 649		ro->err_mask = err_mask;
 650
 651 out_err:
 
 
 
 652		release_sock(sk);
 653		rtnl_unlock();
 654
 655		break;
 656
 657	case CAN_RAW_LOOPBACK:
 658		if (optlen != sizeof(ro->loopback))
 659			return -EINVAL;
 660
 661		if (copy_from_sockptr(&ro->loopback, optval, optlen))
 662			return -EFAULT;
 663
 664		break;
 665
 666	case CAN_RAW_RECV_OWN_MSGS:
 667		if (optlen != sizeof(ro->recv_own_msgs))
 668			return -EINVAL;
 669
 670		if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
 671			return -EFAULT;
 672
 673		break;
 674
 675	case CAN_RAW_FD_FRAMES:
 676		if (optlen != sizeof(fd_frames))
 677			return -EINVAL;
 678
 679		if (copy_from_sockptr(&fd_frames, optval, optlen))
 680			return -EFAULT;
 681
 682		/* Enabling CAN XL includes CAN FD */
 683		if (ro->xl_frames && !fd_frames)
 684			return -EINVAL;
 685
 686		ro->fd_frames = fd_frames;
 687		break;
 688
 689	case CAN_RAW_XL_FRAMES:
 690		if (optlen != sizeof(ro->xl_frames))
 691			return -EINVAL;
 692
 693		if (copy_from_sockptr(&ro->xl_frames, optval, optlen))
 694			return -EFAULT;
 695
 696		/* Enabling CAN XL includes CAN FD */
 697		if (ro->xl_frames)
 698			ro->fd_frames = ro->xl_frames;
 699		break;
 700
 701	case CAN_RAW_JOIN_FILTERS:
 702		if (optlen != sizeof(ro->join_filters))
 703			return -EINVAL;
 704
 705		if (copy_from_sockptr(&ro->join_filters, optval, optlen))
 706			return -EFAULT;
 707
 708		break;
 709
 710	default:
 711		return -ENOPROTOOPT;
 712	}
 713	return err;
 714}
 715
 716static int raw_getsockopt(struct socket *sock, int level, int optname,
 717			  char __user *optval, int __user *optlen)
 718{
 719	struct sock *sk = sock->sk;
 720	struct raw_sock *ro = raw_sk(sk);
 721	int len;
 722	void *val;
 723	int err = 0;
 724
 725	if (level != SOL_CAN_RAW)
 726		return -EINVAL;
 727	if (get_user(len, optlen))
 728		return -EFAULT;
 729	if (len < 0)
 730		return -EINVAL;
 731
 732	switch (optname) {
 
 733	case CAN_RAW_FILTER:
 734		lock_sock(sk);
 735		if (ro->count > 0) {
 736			int fsize = ro->count * sizeof(struct can_filter);
 737
 738			/* user space buffer to small for filter list? */
 739			if (len < fsize) {
 740				/* return -ERANGE and needed space in optlen */
 741				err = -ERANGE;
 742				if (put_user(fsize, optlen))
 743					err = -EFAULT;
 744			} else {
 745				if (len > fsize)
 746					len = fsize;
 747				if (copy_to_user(optval, ro->filter, len))
 748					err = -EFAULT;
 749			}
 750		} else {
 751			len = 0;
 752		}
 753		release_sock(sk);
 754
 755		if (!err)
 756			err = put_user(len, optlen);
 757		return err;
 758
 759	case CAN_RAW_ERR_FILTER:
 760		if (len > sizeof(can_err_mask_t))
 761			len = sizeof(can_err_mask_t);
 762		val = &ro->err_mask;
 763		break;
 764
 765	case CAN_RAW_LOOPBACK:
 766		if (len > sizeof(int))
 767			len = sizeof(int);
 768		val = &ro->loopback;
 769		break;
 770
 771	case CAN_RAW_RECV_OWN_MSGS:
 772		if (len > sizeof(int))
 773			len = sizeof(int);
 774		val = &ro->recv_own_msgs;
 775		break;
 776
 777	case CAN_RAW_FD_FRAMES:
 778		if (len > sizeof(int))
 779			len = sizeof(int);
 780		val = &ro->fd_frames;
 781		break;
 782
 783	case CAN_RAW_XL_FRAMES:
 784		if (len > sizeof(int))
 785			len = sizeof(int);
 786		val = &ro->xl_frames;
 787		break;
 788
 789	case CAN_RAW_JOIN_FILTERS:
 790		if (len > sizeof(int))
 791			len = sizeof(int);
 792		val = &ro->join_filters;
 793		break;
 794
 795	default:
 796		return -ENOPROTOOPT;
 797	}
 798
 799	if (put_user(len, optlen))
 800		return -EFAULT;
 801	if (copy_to_user(optval, val, len))
 802		return -EFAULT;
 803	return 0;
 804}
 805
 806static bool raw_bad_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
 807{
 808	/* Classical CAN -> no checks for flags and device capabilities */
 809	if (can_is_can_skb(skb))
 810		return false;
 811
 812	/* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */
 813	if (ro->fd_frames && can_is_canfd_skb(skb) &&
 814	    (mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu)))
 815		return false;
 816
 817	/* CAN XL -> needs to be enabled and a CAN XL device */
 818	if (ro->xl_frames && can_is_canxl_skb(skb) &&
 819	    can_is_canxl_dev_mtu(mtu))
 820		return false;
 821
 822	return true;
 823}
 824
 825static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 826{
 827	struct sock *sk = sock->sk;
 828	struct raw_sock *ro = raw_sk(sk);
 829	struct sockcm_cookie sockc;
 830	struct sk_buff *skb;
 831	struct net_device *dev;
 832	int ifindex;
 833	int err = -EINVAL;
 834
 835	/* check for valid CAN frame sizes */
 836	if (size < CANXL_HDR_SIZE + CANXL_MIN_DLEN || size > CANXL_MTU)
 837		return -EINVAL;
 838
 839	if (msg->msg_name) {
 840		DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
 
 841
 842		if (msg->msg_namelen < RAW_MIN_NAMELEN)
 843			return -EINVAL;
 844
 845		if (addr->can_family != AF_CAN)
 846			return -EINVAL;
 847
 848		ifindex = addr->can_ifindex;
 849	} else {
 850		ifindex = ro->ifindex;
 851	}
 852
 853	dev = dev_get_by_index(sock_net(sk), ifindex);
 
 
 
 854	if (!dev)
 855		return -ENXIO;
 856
 857	skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
 858				  msg->msg_flags & MSG_DONTWAIT, &err);
 859	if (!skb)
 860		goto put_dev;
 861
 862	can_skb_reserve(skb);
 863	can_skb_prv(skb)->ifindex = dev->ifindex;
 864	can_skb_prv(skb)->skbcnt = 0;
 865
 866	/* fill the skb before testing for valid CAN frames */
 867	err = memcpy_from_msg(skb_put(skb, size), msg, size);
 868	if (err < 0)
 869		goto free_skb;
 870
 871	err = -EINVAL;
 872	if (raw_bad_txframe(ro, skb, dev->mtu))
 873		goto free_skb;
 874
 875	sockcm_init(&sockc, sk);
 876	if (msg->msg_controllen) {
 877		err = sock_cmsg_send(sk, msg, &sockc);
 878		if (unlikely(err))
 879			goto free_skb;
 880	}
 881
 882	skb->dev = dev;
 883	skb->priority = READ_ONCE(sk->sk_priority);
 884	skb->mark = READ_ONCE(sk->sk_mark);
 885	skb->tstamp = sockc.transmit_time;
 886
 887	skb_setup_tx_timestamp(skb, sockc.tsflags);
 888
 889	err = can_send(skb, ro->loopback);
 890
 891	dev_put(dev);
 892
 893	if (err)
 894		goto send_failed;
 895
 896	return size;
 897
 898free_skb:
 899	kfree_skb(skb);
 900put_dev:
 901	dev_put(dev);
 902send_failed:
 903	return err;
 904}
 905
 906static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 907		       int flags)
 908{
 909	struct sock *sk = sock->sk;
 910	struct sk_buff *skb;
 911	int err = 0;
 
 912
 913	if (flags & MSG_ERRQUEUE)
 914		return sock_recv_errqueue(sk, msg, size,
 915					  SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE);
 916
 917	skb = skb_recv_datagram(sk, flags, &err);
 918	if (!skb)
 919		return err;
 920
 921	if (size < skb->len)
 922		msg->msg_flags |= MSG_TRUNC;
 923	else
 924		size = skb->len;
 925
 926	err = memcpy_to_msg(msg, skb->data, size);
 927	if (err < 0) {
 928		skb_free_datagram(sk, skb);
 929		return err;
 930	}
 931
 932	sock_recv_cmsgs(msg, sk, skb);
 933
 934	if (msg->msg_name) {
 935		__sockaddr_check_size(RAW_MIN_NAMELEN);
 936		msg->msg_namelen = RAW_MIN_NAMELEN;
 937		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
 938	}
 939
 940	/* assign the flags that have been recorded in raw_rcv() */
 941	msg->msg_flags |= *(raw_flags(skb));
 942
 943	skb_free_datagram(sk, skb);
 944
 945	return size;
 946}
 947
 948static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
 949				unsigned long arg)
 950{
 951	/* no ioctls for socket layer -> hand it down to NIC layer */
 952	return -ENOIOCTLCMD;
 953}
 954
 955static const struct proto_ops raw_ops = {
 956	.family        = PF_CAN,
 957	.release       = raw_release,
 958	.bind          = raw_bind,
 959	.connect       = sock_no_connect,
 960	.socketpair    = sock_no_socketpair,
 961	.accept        = sock_no_accept,
 962	.getname       = raw_getname,
 963	.poll          = datagram_poll,
 964	.ioctl         = raw_sock_no_ioctlcmd,
 965	.gettstamp     = sock_gettstamp,
 966	.listen        = sock_no_listen,
 967	.shutdown      = sock_no_shutdown,
 968	.setsockopt    = raw_setsockopt,
 969	.getsockopt    = raw_getsockopt,
 970	.sendmsg       = raw_sendmsg,
 971	.recvmsg       = raw_recvmsg,
 972	.mmap          = sock_no_mmap,
 
 973};
 974
 975static struct proto raw_proto __read_mostly = {
 976	.name       = "CAN_RAW",
 977	.owner      = THIS_MODULE,
 978	.obj_size   = sizeof(struct raw_sock),
 979	.init       = raw_init,
 980};
 981
 982static const struct can_proto raw_can_proto = {
 983	.type       = SOCK_RAW,
 984	.protocol   = CAN_RAW,
 985	.ops        = &raw_ops,
 986	.prot       = &raw_proto,
 987};
 988
 989static struct notifier_block canraw_notifier = {
 990	.notifier_call = raw_notifier
 991};
 992
 993static __init int raw_module_init(void)
 994{
 995	int err;
 996
 997	pr_info("can: raw protocol\n");
 998
 999	err = register_netdevice_notifier(&canraw_notifier);
1000	if (err)
1001		return err;
1002
1003	err = can_proto_register(&raw_can_proto);
1004	if (err < 0) {
1005		pr_err("can: registration of raw protocol failed\n");
1006		goto register_proto_failed;
1007	}
1008
1009	return 0;
1010
1011register_proto_failed:
1012	unregister_netdevice_notifier(&canraw_notifier);
1013	return err;
1014}
1015
1016static __exit void raw_module_exit(void)
1017{
1018	can_proto_unregister(&raw_can_proto);
1019	unregister_netdevice_notifier(&canraw_notifier);
1020}
1021
1022module_init(raw_module_init);
1023module_exit(raw_module_exit);