Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v3.5.6
  1/*
  2 * raw.c - Raw sockets for protocol family CAN
  3 *
  4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  5 * All rights reserved.
  6 *
  7 * Redistribution and use in source and binary forms, with or without
  8 * modification, are permitted provided that the following conditions
  9 * are met:
 10 * 1. Redistributions of source code must retain the above copyright
 11 *    notice, this list of conditions and the following disclaimer.
 12 * 2. Redistributions in binary form must reproduce the above copyright
 13 *    notice, this list of conditions and the following disclaimer in the
 14 *    documentation and/or other materials provided with the distribution.
 15 * 3. Neither the name of Volkswagen nor the names of its contributors
 16 *    may be used to endorse or promote products derived from this software
 17 *    without specific prior written permission.
 18 *
 19 * Alternatively, provided that this notice is retained in full, this
 20 * software may be distributed under the terms of the GNU General
 21 * Public License ("GPL") version 2, in which case the provisions of the
 22 * GPL apply INSTEAD OF those given above.
 23 *
 24 * The provided data structures and external interfaces from this code
 25 * are not restricted to be used by modules with a GPL compatible license.
 26 *
 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
 38 * DAMAGE.
 39 *
 40 */
 41
 42#include <linux/module.h>
 43#include <linux/init.h>
 44#include <linux/uio.h>
 45#include <linux/net.h>
 46#include <linux/slab.h>
 47#include <linux/netdevice.h>
 48#include <linux/socket.h>
 49#include <linux/if_arp.h>
 50#include <linux/skbuff.h>
 51#include <linux/can.h>
 52#include <linux/can/core.h>
 
 53#include <linux/can/raw.h>
 54#include <net/sock.h>
 55#include <net/net_namespace.h>
 56
 57#define CAN_RAW_VERSION CAN_VERSION
 58static __initdata const char banner[] =
 59	KERN_INFO "can: raw protocol (rev " CAN_RAW_VERSION ")\n";
 60
 61MODULE_DESCRIPTION("PF_CAN raw protocol");
 62MODULE_LICENSE("Dual BSD/GPL");
 63MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
 64MODULE_ALIAS("can-proto-1");
 65
 66#define MASK_ALL 0
 67
 68/*
 69 * A raw socket has a list of can_filters attached to it, each receiving
 70 * the CAN frames matching that filter.  If the filter list is empty,
 71 * no CAN frames will be received by the socket.  The default after
 72 * opening the socket, is to have one filter which receives all frames.
 73 * The filter list is allocated dynamically with the exception of the
 74 * list containing only one item.  This common case is optimized by
 75 * storing the single filter in dfilter, to avoid using dynamic memory.
 76 */
 77
 
 
 
 
 
 
 78struct raw_sock {
 79	struct sock sk;
 80	int bound;
 81	int ifindex;
 82	struct notifier_block notifier;
 83	int loopback;
 84	int recv_own_msgs;
 
 
 85	int count;                 /* number of active filters */
 86	struct can_filter dfilter; /* default/single filter */
 87	struct can_filter *filter; /* pointer to filter(s) */
 88	can_err_mask_t err_mask;
 
 89};
 90
 91/*
 92 * Return pointer to store the extra msg flags for raw_recvmsg().
 93 * We use the space of one unsigned int beyond the 'struct sockaddr_can'
 94 * in skb->cb.
 95 */
 96static inline unsigned int *raw_flags(struct sk_buff *skb)
 97{
 98	BUILD_BUG_ON(sizeof(skb->cb) <= (sizeof(struct sockaddr_can) +
 99					 sizeof(unsigned int)));
100
101	/* return pointer after struct sockaddr_can */
102	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
103}
104
105static inline struct raw_sock *raw_sk(const struct sock *sk)
106{
107	return (struct raw_sock *)sk;
108}
109
110static void raw_rcv(struct sk_buff *oskb, void *data)
111{
112	struct sock *sk = (struct sock *)data;
113	struct raw_sock *ro = raw_sk(sk);
114	struct sockaddr_can *addr;
115	struct sk_buff *skb;
116	unsigned int *pflags;
117
118	/* check the received tx sock reference */
119	if (!ro->recv_own_msgs && oskb->sk == sk)
120		return;
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122	/* clone the given skb to be able to enqueue it into the rcv queue */
123	skb = skb_clone(oskb, GFP_ATOMIC);
124	if (!skb)
125		return;
126
127	/*
128	 *  Put the datagram to the queue so that raw_recvmsg() can
129	 *  get it from there.  We need to pass the interface index to
130	 *  raw_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
131	 *  containing the interface index.
132	 */
133
134	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
135	addr = (struct sockaddr_can *)skb->cb;
136	memset(addr, 0, sizeof(*addr));
137	addr->can_family  = AF_CAN;
138	addr->can_ifindex = skb->dev->ifindex;
139
140	/* add CAN specific message flags for raw_recvmsg() */
141	pflags = raw_flags(skb);
142	*pflags = 0;
143	if (oskb->sk)
144		*pflags |= MSG_DONTROUTE;
145	if (oskb->sk == sk)
146		*pflags |= MSG_CONFIRM;
147
148	if (sock_queue_rcv_skb(sk, skb) < 0)
149		kfree_skb(skb);
150}
151
152static int raw_enable_filters(struct net_device *dev, struct sock *sk,
153			      struct can_filter *filter, int count)
 
154{
155	int err = 0;
156	int i;
157
158	for (i = 0; i < count; i++) {
159		err = can_rx_register(dev, filter[i].can_id,
160				      filter[i].can_mask,
161				      raw_rcv, sk, "raw");
162		if (err) {
163			/* clean up successfully registered filters */
164			while (--i >= 0)
165				can_rx_unregister(dev, filter[i].can_id,
166						  filter[i].can_mask,
167						  raw_rcv, sk);
168			break;
169		}
170	}
171
172	return err;
173}
174
175static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
176				can_err_mask_t err_mask)
177{
178	int err = 0;
179
180	if (err_mask)
181		err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
182				      raw_rcv, sk, "raw");
183
184	return err;
185}
186
187static void raw_disable_filters(struct net_device *dev, struct sock *sk,
188			      struct can_filter *filter, int count)
 
189{
190	int i;
191
192	for (i = 0; i < count; i++)
193		can_rx_unregister(dev, filter[i].can_id, filter[i].can_mask,
194				  raw_rcv, sk);
195}
196
197static inline void raw_disable_errfilter(struct net_device *dev,
 
198					 struct sock *sk,
199					 can_err_mask_t err_mask)
200
201{
202	if (err_mask)
203		can_rx_unregister(dev, 0, err_mask | CAN_ERR_FLAG,
204				  raw_rcv, sk);
205}
206
207static inline void raw_disable_allfilters(struct net_device *dev,
 
208					  struct sock *sk)
209{
210	struct raw_sock *ro = raw_sk(sk);
211
212	raw_disable_filters(dev, sk, ro->filter, ro->count);
213	raw_disable_errfilter(dev, sk, ro->err_mask);
214}
215
216static int raw_enable_allfilters(struct net_device *dev, struct sock *sk)
 
217{
218	struct raw_sock *ro = raw_sk(sk);
219	int err;
220
221	err = raw_enable_filters(dev, sk, ro->filter, ro->count);
222	if (!err) {
223		err = raw_enable_errfilter(dev, sk, ro->err_mask);
224		if (err)
225			raw_disable_filters(dev, sk, ro->filter, ro->count);
 
226	}
227
228	return err;
229}
230
231static int raw_notifier(struct notifier_block *nb,
232			unsigned long msg, void *data)
233{
234	struct net_device *dev = (struct net_device *)data;
235	struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
236	struct sock *sk = &ro->sk;
237
238	if (!net_eq(dev_net(dev), &init_net))
239		return NOTIFY_DONE;
240
241	if (dev->type != ARPHRD_CAN)
242		return NOTIFY_DONE;
243
244	if (ro->ifindex != dev->ifindex)
245		return NOTIFY_DONE;
246
247	switch (msg) {
248
249	case NETDEV_UNREGISTER:
250		lock_sock(sk);
251		/* remove current filters & unregister */
252		if (ro->bound)
253			raw_disable_allfilters(dev, sk);
254
255		if (ro->count > 1)
256			kfree(ro->filter);
257
258		ro->ifindex = 0;
259		ro->bound   = 0;
260		ro->count   = 0;
261		release_sock(sk);
262
263		sk->sk_err = ENODEV;
264		if (!sock_flag(sk, SOCK_DEAD))
265			sk->sk_error_report(sk);
266		break;
267
268	case NETDEV_DOWN:
269		sk->sk_err = ENETDOWN;
270		if (!sock_flag(sk, SOCK_DEAD))
271			sk->sk_error_report(sk);
272		break;
273	}
274
275	return NOTIFY_DONE;
276}
277
278static int raw_init(struct sock *sk)
279{
280	struct raw_sock *ro = raw_sk(sk);
281
282	ro->bound            = 0;
283	ro->ifindex          = 0;
284
285	/* set default filter to single entry dfilter */
286	ro->dfilter.can_id   = 0;
287	ro->dfilter.can_mask = MASK_ALL;
288	ro->filter           = &ro->dfilter;
289	ro->count            = 1;
290
291	/* set default loopback behaviour */
292	ro->loopback         = 1;
293	ro->recv_own_msgs    = 0;
 
 
 
 
 
 
 
294
295	/* set notifier */
296	ro->notifier.notifier_call = raw_notifier;
297
298	register_netdevice_notifier(&ro->notifier);
299
300	return 0;
301}
302
303static int raw_release(struct socket *sock)
304{
305	struct sock *sk = sock->sk;
306	struct raw_sock *ro;
307
308	if (!sk)
309		return 0;
310
311	ro = raw_sk(sk);
312
313	unregister_netdevice_notifier(&ro->notifier);
314
315	lock_sock(sk);
316
317	/* remove current filters & unregister */
318	if (ro->bound) {
319		if (ro->ifindex) {
320			struct net_device *dev;
321
322			dev = dev_get_by_index(&init_net, ro->ifindex);
323			if (dev) {
324				raw_disable_allfilters(dev, sk);
325				dev_put(dev);
326			}
327		} else
328			raw_disable_allfilters(NULL, sk);
 
329	}
330
331	if (ro->count > 1)
332		kfree(ro->filter);
333
334	ro->ifindex = 0;
335	ro->bound   = 0;
336	ro->count   = 0;
 
337
338	sock_orphan(sk);
339	sock->sk = NULL;
340
341	release_sock(sk);
342	sock_put(sk);
343
344	return 0;
345}
346
347static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
348{
349	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
350	struct sock *sk = sock->sk;
351	struct raw_sock *ro = raw_sk(sk);
352	int ifindex;
353	int err = 0;
354	int notify_enetdown = 0;
355
356	if (len < sizeof(*addr))
 
 
357		return -EINVAL;
358
359	lock_sock(sk);
360
361	if (ro->bound && addr->can_ifindex == ro->ifindex)
362		goto out;
363
364	if (addr->can_ifindex) {
365		struct net_device *dev;
366
367		dev = dev_get_by_index(&init_net, addr->can_ifindex);
368		if (!dev) {
369			err = -ENODEV;
370			goto out;
371		}
372		if (dev->type != ARPHRD_CAN) {
373			dev_put(dev);
374			err = -ENODEV;
375			goto out;
376		}
377		if (!(dev->flags & IFF_UP))
378			notify_enetdown = 1;
379
380		ifindex = dev->ifindex;
381
382		/* filters set by default/setsockopt */
383		err = raw_enable_allfilters(dev, sk);
384		dev_put(dev);
385	} else {
386		ifindex = 0;
387
388		/* filters set by default/setsockopt */
389		err = raw_enable_allfilters(NULL, sk);
390	}
391
392	if (!err) {
393		if (ro->bound) {
394			/* unregister old filters */
395			if (ro->ifindex) {
396				struct net_device *dev;
397
398				dev = dev_get_by_index(&init_net, ro->ifindex);
 
399				if (dev) {
400					raw_disable_allfilters(dev, sk);
 
401					dev_put(dev);
402				}
403			} else
404				raw_disable_allfilters(NULL, sk);
 
405		}
406		ro->ifindex = ifindex;
407		ro->bound = 1;
408	}
409
410 out:
411	release_sock(sk);
412
413	if (notify_enetdown) {
414		sk->sk_err = ENETDOWN;
415		if (!sock_flag(sk, SOCK_DEAD))
416			sk->sk_error_report(sk);
417	}
418
419	return err;
420}
421
422static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
423		       int *len, int peer)
424{
425	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
426	struct sock *sk = sock->sk;
427	struct raw_sock *ro = raw_sk(sk);
428
429	if (peer)
430		return -EOPNOTSUPP;
431
432	memset(addr, 0, sizeof(*addr));
433	addr->can_family  = AF_CAN;
434	addr->can_ifindex = ro->ifindex;
435
436	*len = sizeof(*addr);
437
438	return 0;
439}
440
441static int raw_setsockopt(struct socket *sock, int level, int optname,
442			  char __user *optval, unsigned int optlen)
443{
444	struct sock *sk = sock->sk;
445	struct raw_sock *ro = raw_sk(sk);
446	struct can_filter *filter = NULL;  /* dyn. alloc'ed filters */
447	struct can_filter sfilter;         /* single filter */
448	struct net_device *dev = NULL;
449	can_err_mask_t err_mask = 0;
450	int count = 0;
451	int err = 0;
452
453	if (level != SOL_CAN_RAW)
454		return -EINVAL;
455
456	switch (optname) {
457
458	case CAN_RAW_FILTER:
459		if (optlen % sizeof(struct can_filter) != 0)
460			return -EINVAL;
461
 
 
 
462		count = optlen / sizeof(struct can_filter);
463
464		if (count > 1) {
465			/* filter does not fit into dfilter => alloc space */
466			filter = memdup_user(optval, optlen);
467			if (IS_ERR(filter))
468				return PTR_ERR(filter);
469		} else if (count == 1) {
470			if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
471				return -EFAULT;
472		}
473
474		lock_sock(sk);
475
476		if (ro->bound && ro->ifindex)
477			dev = dev_get_by_index(&init_net, ro->ifindex);
478
479		if (ro->bound) {
480			/* (try to) register the new filters */
481			if (count == 1)
482				err = raw_enable_filters(dev, sk, &sfilter, 1);
 
483			else
484				err = raw_enable_filters(dev, sk, filter,
485							 count);
486			if (err) {
487				if (count > 1)
488					kfree(filter);
489				goto out_fil;
490			}
491
492			/* remove old filter registrations */
493			raw_disable_filters(dev, sk, ro->filter, ro->count);
 
494		}
495
496		/* remove old filter space */
497		if (ro->count > 1)
498			kfree(ro->filter);
499
500		/* link new filters to the socket */
501		if (count == 1) {
502			/* copy filter data for single filter */
503			ro->dfilter = sfilter;
504			filter = &ro->dfilter;
505		}
506		ro->filter = filter;
507		ro->count  = count;
508
509 out_fil:
510		if (dev)
511			dev_put(dev);
512
513		release_sock(sk);
514
515		break;
516
517	case CAN_RAW_ERR_FILTER:
518		if (optlen != sizeof(err_mask))
519			return -EINVAL;
520
521		if (copy_from_user(&err_mask, optval, optlen))
522			return -EFAULT;
523
524		err_mask &= CAN_ERR_MASK;
525
526		lock_sock(sk);
527
528		if (ro->bound && ro->ifindex)
529			dev = dev_get_by_index(&init_net, ro->ifindex);
530
531		/* remove current error mask */
532		if (ro->bound) {
533			/* (try to) register the new err_mask */
534			err = raw_enable_errfilter(dev, sk, err_mask);
 
535
536			if (err)
537				goto out_err;
538
539			/* remove old err_mask registration */
540			raw_disable_errfilter(dev, sk, ro->err_mask);
 
541		}
542
543		/* link new err_mask to the socket */
544		ro->err_mask = err_mask;
545
546 out_err:
547		if (dev)
548			dev_put(dev);
549
550		release_sock(sk);
551
552		break;
553
554	case CAN_RAW_LOOPBACK:
555		if (optlen != sizeof(ro->loopback))
556			return -EINVAL;
557
558		if (copy_from_user(&ro->loopback, optval, optlen))
559			return -EFAULT;
560
561		break;
562
563	case CAN_RAW_RECV_OWN_MSGS:
564		if (optlen != sizeof(ro->recv_own_msgs))
565			return -EINVAL;
566
567		if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
568			return -EFAULT;
569
570		break;
571
572	default:
573		return -ENOPROTOOPT;
574	}
575	return err;
576}
577
578static int raw_getsockopt(struct socket *sock, int level, int optname,
579			  char __user *optval, int __user *optlen)
580{
581	struct sock *sk = sock->sk;
582	struct raw_sock *ro = raw_sk(sk);
583	int len;
584	void *val;
585	int err = 0;
586
587	if (level != SOL_CAN_RAW)
588		return -EINVAL;
589	if (get_user(len, optlen))
590		return -EFAULT;
591	if (len < 0)
592		return -EINVAL;
593
594	switch (optname) {
595
596	case CAN_RAW_FILTER:
597		lock_sock(sk);
598		if (ro->count > 0) {
599			int fsize = ro->count * sizeof(struct can_filter);
 
600			if (len > fsize)
601				len = fsize;
602			if (copy_to_user(optval, ro->filter, len))
603				err = -EFAULT;
604		} else
605			len = 0;
 
606		release_sock(sk);
607
608		if (!err)
609			err = put_user(len, optlen);
610		return err;
611
612	case CAN_RAW_ERR_FILTER:
613		if (len > sizeof(can_err_mask_t))
614			len = sizeof(can_err_mask_t);
615		val = &ro->err_mask;
616		break;
617
618	case CAN_RAW_LOOPBACK:
619		if (len > sizeof(int))
620			len = sizeof(int);
621		val = &ro->loopback;
622		break;
623
624	case CAN_RAW_RECV_OWN_MSGS:
625		if (len > sizeof(int))
626			len = sizeof(int);
627		val = &ro->recv_own_msgs;
628		break;
629
 
 
 
 
 
 
 
 
 
 
 
 
630	default:
631		return -ENOPROTOOPT;
632	}
633
634	if (put_user(len, optlen))
635		return -EFAULT;
636	if (copy_to_user(optval, val, len))
637		return -EFAULT;
638	return 0;
639}
640
641static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
642		       struct msghdr *msg, size_t size)
643{
644	struct sock *sk = sock->sk;
645	struct raw_sock *ro = raw_sk(sk);
646	struct sk_buff *skb;
647	struct net_device *dev;
648	int ifindex;
649	int err;
650
651	if (msg->msg_name) {
652		struct sockaddr_can *addr =
653			(struct sockaddr_can *)msg->msg_name;
654
655		if (msg->msg_namelen < sizeof(*addr))
656			return -EINVAL;
657
658		if (addr->can_family != AF_CAN)
659			return -EINVAL;
660
661		ifindex = addr->can_ifindex;
662	} else
663		ifindex = ro->ifindex;
 
664
665	if (size != sizeof(struct can_frame))
666		return -EINVAL;
667
668	dev = dev_get_by_index(&init_net, ifindex);
669	if (!dev)
670		return -ENXIO;
671
672	skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT,
673				  &err);
 
 
 
 
 
 
 
 
 
674	if (!skb)
675		goto put_dev;
676
677	err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
678	if (err < 0)
679		goto free_skb;
680	err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
 
681	if (err < 0)
682		goto free_skb;
683
 
 
684	skb->dev = dev;
685	skb->sk  = sk;
 
686
687	err = can_send(skb, ro->loopback);
688
689	dev_put(dev);
690
691	if (err)
692		goto send_failed;
693
694	return size;
695
696free_skb:
697	kfree_skb(skb);
698put_dev:
699	dev_put(dev);
700send_failed:
701	return err;
702}
703
704static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
705		       struct msghdr *msg, size_t size, int flags)
706{
707	struct sock *sk = sock->sk;
708	struct sk_buff *skb;
709	int err = 0;
710	int noblock;
711
712	noblock =  flags & MSG_DONTWAIT;
713	flags   &= ~MSG_DONTWAIT;
714
715	skb = skb_recv_datagram(sk, flags, noblock, &err);
716	if (!skb)
717		return err;
718
719	if (size < skb->len)
720		msg->msg_flags |= MSG_TRUNC;
721	else
722		size = skb->len;
723
724	err = memcpy_toiovec(msg->msg_iov, skb->data, size);
725	if (err < 0) {
726		skb_free_datagram(sk, skb);
727		return err;
728	}
729
730	sock_recv_ts_and_drops(msg, sk, skb);
731
732	if (msg->msg_name) {
 
733		msg->msg_namelen = sizeof(struct sockaddr_can);
734		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
735	}
736
737	/* assign the flags that have been recorded in raw_rcv() */
738	msg->msg_flags |= *(raw_flags(skb));
739
740	skb_free_datagram(sk, skb);
741
742	return size;
743}
744
 
 
 
 
 
 
 
745static const struct proto_ops raw_ops = {
746	.family        = PF_CAN,
747	.release       = raw_release,
748	.bind          = raw_bind,
749	.connect       = sock_no_connect,
750	.socketpair    = sock_no_socketpair,
751	.accept        = sock_no_accept,
752	.getname       = raw_getname,
753	.poll          = datagram_poll,
754	.ioctl         = can_ioctl,	/* use can_ioctl() from af_can.c */
 
755	.listen        = sock_no_listen,
756	.shutdown      = sock_no_shutdown,
757	.setsockopt    = raw_setsockopt,
758	.getsockopt    = raw_getsockopt,
759	.sendmsg       = raw_sendmsg,
760	.recvmsg       = raw_recvmsg,
761	.mmap          = sock_no_mmap,
762	.sendpage      = sock_no_sendpage,
763};
764
765static struct proto raw_proto __read_mostly = {
766	.name       = "CAN_RAW",
767	.owner      = THIS_MODULE,
768	.obj_size   = sizeof(struct raw_sock),
769	.init       = raw_init,
770};
771
772static const struct can_proto raw_can_proto = {
773	.type       = SOCK_RAW,
774	.protocol   = CAN_RAW,
775	.ops        = &raw_ops,
776	.prot       = &raw_proto,
777};
778
779static __init int raw_module_init(void)
780{
781	int err;
782
783	printk(banner);
784
785	err = can_proto_register(&raw_can_proto);
786	if (err < 0)
787		printk(KERN_ERR "can: registration of raw protocol failed\n");
788
789	return err;
790}
791
792static __exit void raw_module_exit(void)
793{
794	can_proto_unregister(&raw_can_proto);
795}
796
797module_init(raw_module_init);
798module_exit(raw_module_exit);
v5.9
  1// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
  2/* raw.c - Raw sockets for protocol family CAN
  3 *
  4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  5 * All rights reserved.
  6 *
  7 * Redistribution and use in source and binary forms, with or without
  8 * modification, are permitted provided that the following conditions
  9 * are met:
 10 * 1. Redistributions of source code must retain the above copyright
 11 *    notice, this list of conditions and the following disclaimer.
 12 * 2. Redistributions in binary form must reproduce the above copyright
 13 *    notice, this list of conditions and the following disclaimer in the
 14 *    documentation and/or other materials provided with the distribution.
 15 * 3. Neither the name of Volkswagen nor the names of its contributors
 16 *    may be used to endorse or promote products derived from this software
 17 *    without specific prior written permission.
 18 *
 19 * Alternatively, provided that this notice is retained in full, this
 20 * software may be distributed under the terms of the GNU General
 21 * Public License ("GPL") version 2, in which case the provisions of the
 22 * GPL apply INSTEAD OF those given above.
 23 *
 24 * The provided data structures and external interfaces from this code
 25 * are not restricted to be used by modules with a GPL compatible license.
 26 *
 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
 38 * DAMAGE.
 39 *
 40 */
 41
 42#include <linux/module.h>
 43#include <linux/init.h>
 44#include <linux/uio.h>
 45#include <linux/net.h>
 46#include <linux/slab.h>
 47#include <linux/netdevice.h>
 48#include <linux/socket.h>
 49#include <linux/if_arp.h>
 50#include <linux/skbuff.h>
 51#include <linux/can.h>
 52#include <linux/can/core.h>
 53#include <linux/can/skb.h>
 54#include <linux/can/raw.h>
 55#include <net/sock.h>
 56#include <net/net_namespace.h>
 57
 58#define CAN_RAW_VERSION CAN_VERSION
 
 
 59
 60MODULE_DESCRIPTION("PF_CAN raw protocol");
 61MODULE_LICENSE("Dual BSD/GPL");
 62MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
 63MODULE_ALIAS("can-proto-1");
 64
 65#define MASK_ALL 0
 66
 67/* A raw socket has a list of can_filters attached to it, each receiving
 
 68 * the CAN frames matching that filter.  If the filter list is empty,
 69 * no CAN frames will be received by the socket.  The default after
 70 * opening the socket, is to have one filter which receives all frames.
 71 * The filter list is allocated dynamically with the exception of the
 72 * list containing only one item.  This common case is optimized by
 73 * storing the single filter in dfilter, to avoid using dynamic memory.
 74 */
 75
 76struct uniqframe {
 77	int skbcnt;
 78	const struct sk_buff *skb;
 79	unsigned int join_rx_count;
 80};
 81
 82struct raw_sock {
 83	struct sock sk;
 84	int bound;
 85	int ifindex;
 86	struct notifier_block notifier;
 87	int loopback;
 88	int recv_own_msgs;
 89	int fd_frames;
 90	int join_filters;
 91	int count;                 /* number of active filters */
 92	struct can_filter dfilter; /* default/single filter */
 93	struct can_filter *filter; /* pointer to filter(s) */
 94	can_err_mask_t err_mask;
 95	struct uniqframe __percpu *uniq;
 96};
 97
 98/* Return pointer to store the extra msg flags for raw_recvmsg().
 
 99 * We use the space of one unsigned int beyond the 'struct sockaddr_can'
100 * in skb->cb.
101 */
102static inline unsigned int *raw_flags(struct sk_buff *skb)
103{
104	sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
105			       sizeof(unsigned int));
106
107	/* return pointer after struct sockaddr_can */
108	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
109}
110
111static inline struct raw_sock *raw_sk(const struct sock *sk)
112{
113	return (struct raw_sock *)sk;
114}
115
116static void raw_rcv(struct sk_buff *oskb, void *data)
117{
118	struct sock *sk = (struct sock *)data;
119	struct raw_sock *ro = raw_sk(sk);
120	struct sockaddr_can *addr;
121	struct sk_buff *skb;
122	unsigned int *pflags;
123
124	/* check the received tx sock reference */
125	if (!ro->recv_own_msgs && oskb->sk == sk)
126		return;
127
128	/* do not pass non-CAN2.0 frames to a legacy socket */
129	if (!ro->fd_frames && oskb->len != CAN_MTU)
130		return;
131
132	/* eliminate multiple filter matches for the same skb */
133	if (this_cpu_ptr(ro->uniq)->skb == oskb &&
134	    this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
135		if (ro->join_filters) {
136			this_cpu_inc(ro->uniq->join_rx_count);
137			/* drop frame until all enabled filters matched */
138			if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
139				return;
140		} else {
141			return;
142		}
143	} else {
144		this_cpu_ptr(ro->uniq)->skb = oskb;
145		this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
146		this_cpu_ptr(ro->uniq)->join_rx_count = 1;
147		/* drop first frame to check all enabled filters? */
148		if (ro->join_filters && ro->count > 1)
149			return;
150	}
151
152	/* clone the given skb to be able to enqueue it into the rcv queue */
153	skb = skb_clone(oskb, GFP_ATOMIC);
154	if (!skb)
155		return;
156
157	/*  Put the datagram to the queue so that raw_recvmsg() can
 
158	 *  get it from there.  We need to pass the interface index to
159	 *  raw_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
160	 *  containing the interface index.
161	 */
162
163	sock_skb_cb_check_size(sizeof(struct sockaddr_can));
164	addr = (struct sockaddr_can *)skb->cb;
165	memset(addr, 0, sizeof(*addr));
166	addr->can_family  = AF_CAN;
167	addr->can_ifindex = skb->dev->ifindex;
168
169	/* add CAN specific message flags for raw_recvmsg() */
170	pflags = raw_flags(skb);
171	*pflags = 0;
172	if (oskb->sk)
173		*pflags |= MSG_DONTROUTE;
174	if (oskb->sk == sk)
175		*pflags |= MSG_CONFIRM;
176
177	if (sock_queue_rcv_skb(sk, skb) < 0)
178		kfree_skb(skb);
179}
180
181static int raw_enable_filters(struct net *net, struct net_device *dev,
182			      struct sock *sk, struct can_filter *filter,
183			      int count)
184{
185	int err = 0;
186	int i;
187
188	for (i = 0; i < count; i++) {
189		err = can_rx_register(net, dev, filter[i].can_id,
190				      filter[i].can_mask,
191				      raw_rcv, sk, "raw", sk);
192		if (err) {
193			/* clean up successfully registered filters */
194			while (--i >= 0)
195				can_rx_unregister(net, dev, filter[i].can_id,
196						  filter[i].can_mask,
197						  raw_rcv, sk);
198			break;
199		}
200	}
201
202	return err;
203}
204
205static int raw_enable_errfilter(struct net *net, struct net_device *dev,
206				struct sock *sk, can_err_mask_t err_mask)
207{
208	int err = 0;
209
210	if (err_mask)
211		err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
212				      raw_rcv, sk, "raw", sk);
213
214	return err;
215}
216
217static void raw_disable_filters(struct net *net, struct net_device *dev,
218				struct sock *sk, struct can_filter *filter,
219				int count)
220{
221	int i;
222
223	for (i = 0; i < count; i++)
224		can_rx_unregister(net, dev, filter[i].can_id,
225				  filter[i].can_mask, raw_rcv, sk);
226}
227
228static inline void raw_disable_errfilter(struct net *net,
229					 struct net_device *dev,
230					 struct sock *sk,
231					 can_err_mask_t err_mask)
232
233{
234	if (err_mask)
235		can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
236				  raw_rcv, sk);
237}
238
239static inline void raw_disable_allfilters(struct net *net,
240					  struct net_device *dev,
241					  struct sock *sk)
242{
243	struct raw_sock *ro = raw_sk(sk);
244
245	raw_disable_filters(net, dev, sk, ro->filter, ro->count);
246	raw_disable_errfilter(net, dev, sk, ro->err_mask);
247}
248
249static int raw_enable_allfilters(struct net *net, struct net_device *dev,
250				 struct sock *sk)
251{
252	struct raw_sock *ro = raw_sk(sk);
253	int err;
254
255	err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
256	if (!err) {
257		err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
258		if (err)
259			raw_disable_filters(net, dev, sk, ro->filter,
260					    ro->count);
261	}
262
263	return err;
264}
265
266static int raw_notifier(struct notifier_block *nb,
267			unsigned long msg, void *ptr)
268{
269	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
270	struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
271	struct sock *sk = &ro->sk;
272
273	if (!net_eq(dev_net(dev), sock_net(sk)))
274		return NOTIFY_DONE;
275
276	if (dev->type != ARPHRD_CAN)
277		return NOTIFY_DONE;
278
279	if (ro->ifindex != dev->ifindex)
280		return NOTIFY_DONE;
281
282	switch (msg) {
 
283	case NETDEV_UNREGISTER:
284		lock_sock(sk);
285		/* remove current filters & unregister */
286		if (ro->bound)
287			raw_disable_allfilters(dev_net(dev), dev, sk);
288
289		if (ro->count > 1)
290			kfree(ro->filter);
291
292		ro->ifindex = 0;
293		ro->bound   = 0;
294		ro->count   = 0;
295		release_sock(sk);
296
297		sk->sk_err = ENODEV;
298		if (!sock_flag(sk, SOCK_DEAD))
299			sk->sk_error_report(sk);
300		break;
301
302	case NETDEV_DOWN:
303		sk->sk_err = ENETDOWN;
304		if (!sock_flag(sk, SOCK_DEAD))
305			sk->sk_error_report(sk);
306		break;
307	}
308
309	return NOTIFY_DONE;
310}
311
312static int raw_init(struct sock *sk)
313{
314	struct raw_sock *ro = raw_sk(sk);
315
316	ro->bound            = 0;
317	ro->ifindex          = 0;
318
319	/* set default filter to single entry dfilter */
320	ro->dfilter.can_id   = 0;
321	ro->dfilter.can_mask = MASK_ALL;
322	ro->filter           = &ro->dfilter;
323	ro->count            = 1;
324
325	/* set default loopback behaviour */
326	ro->loopback         = 1;
327	ro->recv_own_msgs    = 0;
328	ro->fd_frames        = 0;
329	ro->join_filters     = 0;
330
331	/* alloc_percpu provides zero'ed memory */
332	ro->uniq = alloc_percpu(struct uniqframe);
333	if (unlikely(!ro->uniq))
334		return -ENOMEM;
335
336	/* set notifier */
337	ro->notifier.notifier_call = raw_notifier;
338
339	register_netdevice_notifier(&ro->notifier);
340
341	return 0;
342}
343
344static int raw_release(struct socket *sock)
345{
346	struct sock *sk = sock->sk;
347	struct raw_sock *ro;
348
349	if (!sk)
350		return 0;
351
352	ro = raw_sk(sk);
353
354	unregister_netdevice_notifier(&ro->notifier);
355
356	lock_sock(sk);
357
358	/* remove current filters & unregister */
359	if (ro->bound) {
360		if (ro->ifindex) {
361			struct net_device *dev;
362
363			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
364			if (dev) {
365				raw_disable_allfilters(dev_net(dev), dev, sk);
366				dev_put(dev);
367			}
368		} else {
369			raw_disable_allfilters(sock_net(sk), NULL, sk);
370		}
371	}
372
373	if (ro->count > 1)
374		kfree(ro->filter);
375
376	ro->ifindex = 0;
377	ro->bound   = 0;
378	ro->count   = 0;
379	free_percpu(ro->uniq);
380
381	sock_orphan(sk);
382	sock->sk = NULL;
383
384	release_sock(sk);
385	sock_put(sk);
386
387	return 0;
388}
389
390static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
391{
392	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
393	struct sock *sk = sock->sk;
394	struct raw_sock *ro = raw_sk(sk);
395	int ifindex;
396	int err = 0;
397	int notify_enetdown = 0;
398
399	if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
400		return -EINVAL;
401	if (addr->can_family != AF_CAN)
402		return -EINVAL;
403
404	lock_sock(sk);
405
406	if (ro->bound && addr->can_ifindex == ro->ifindex)
407		goto out;
408
409	if (addr->can_ifindex) {
410		struct net_device *dev;
411
412		dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
413		if (!dev) {
414			err = -ENODEV;
415			goto out;
416		}
417		if (dev->type != ARPHRD_CAN) {
418			dev_put(dev);
419			err = -ENODEV;
420			goto out;
421		}
422		if (!(dev->flags & IFF_UP))
423			notify_enetdown = 1;
424
425		ifindex = dev->ifindex;
426
427		/* filters set by default/setsockopt */
428		err = raw_enable_allfilters(sock_net(sk), dev, sk);
429		dev_put(dev);
430	} else {
431		ifindex = 0;
432
433		/* filters set by default/setsockopt */
434		err = raw_enable_allfilters(sock_net(sk), NULL, sk);
435	}
436
437	if (!err) {
438		if (ro->bound) {
439			/* unregister old filters */
440			if (ro->ifindex) {
441				struct net_device *dev;
442
443				dev = dev_get_by_index(sock_net(sk),
444						       ro->ifindex);
445				if (dev) {
446					raw_disable_allfilters(dev_net(dev),
447							       dev, sk);
448					dev_put(dev);
449				}
450			} else {
451				raw_disable_allfilters(sock_net(sk), NULL, sk);
452			}
453		}
454		ro->ifindex = ifindex;
455		ro->bound = 1;
456	}
457
458 out:
459	release_sock(sk);
460
461	if (notify_enetdown) {
462		sk->sk_err = ENETDOWN;
463		if (!sock_flag(sk, SOCK_DEAD))
464			sk->sk_error_report(sk);
465	}
466
467	return err;
468}
469
470static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
471		       int peer)
472{
473	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
474	struct sock *sk = sock->sk;
475	struct raw_sock *ro = raw_sk(sk);
476
477	if (peer)
478		return -EOPNOTSUPP;
479
480	memset(addr, 0, sizeof(*addr));
481	addr->can_family  = AF_CAN;
482	addr->can_ifindex = ro->ifindex;
483
484	return sizeof(*addr);
 
 
485}
486
487static int raw_setsockopt(struct socket *sock, int level, int optname,
488			  sockptr_t optval, unsigned int optlen)
489{
490	struct sock *sk = sock->sk;
491	struct raw_sock *ro = raw_sk(sk);
492	struct can_filter *filter = NULL;  /* dyn. alloc'ed filters */
493	struct can_filter sfilter;         /* single filter */
494	struct net_device *dev = NULL;
495	can_err_mask_t err_mask = 0;
496	int count = 0;
497	int err = 0;
498
499	if (level != SOL_CAN_RAW)
500		return -EINVAL;
501
502	switch (optname) {
 
503	case CAN_RAW_FILTER:
504		if (optlen % sizeof(struct can_filter) != 0)
505			return -EINVAL;
506
507		if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
508			return -EINVAL;
509
510		count = optlen / sizeof(struct can_filter);
511
512		if (count > 1) {
513			/* filter does not fit into dfilter => alloc space */
514			filter = memdup_sockptr(optval, optlen);
515			if (IS_ERR(filter))
516				return PTR_ERR(filter);
517		} else if (count == 1) {
518			if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter)))
519				return -EFAULT;
520		}
521
522		lock_sock(sk);
523
524		if (ro->bound && ro->ifindex)
525			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
526
527		if (ro->bound) {
528			/* (try to) register the new filters */
529			if (count == 1)
530				err = raw_enable_filters(sock_net(sk), dev, sk,
531							 &sfilter, 1);
532			else
533				err = raw_enable_filters(sock_net(sk), dev, sk,
534							 filter, count);
535			if (err) {
536				if (count > 1)
537					kfree(filter);
538				goto out_fil;
539			}
540
541			/* remove old filter registrations */
542			raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
543					    ro->count);
544		}
545
546		/* remove old filter space */
547		if (ro->count > 1)
548			kfree(ro->filter);
549
550		/* link new filters to the socket */
551		if (count == 1) {
552			/* copy filter data for single filter */
553			ro->dfilter = sfilter;
554			filter = &ro->dfilter;
555		}
556		ro->filter = filter;
557		ro->count  = count;
558
559 out_fil:
560		if (dev)
561			dev_put(dev);
562
563		release_sock(sk);
564
565		break;
566
567	case CAN_RAW_ERR_FILTER:
568		if (optlen != sizeof(err_mask))
569			return -EINVAL;
570
571		if (copy_from_sockptr(&err_mask, optval, optlen))
572			return -EFAULT;
573
574		err_mask &= CAN_ERR_MASK;
575
576		lock_sock(sk);
577
578		if (ro->bound && ro->ifindex)
579			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
580
581		/* remove current error mask */
582		if (ro->bound) {
583			/* (try to) register the new err_mask */
584			err = raw_enable_errfilter(sock_net(sk), dev, sk,
585						   err_mask);
586
587			if (err)
588				goto out_err;
589
590			/* remove old err_mask registration */
591			raw_disable_errfilter(sock_net(sk), dev, sk,
592					      ro->err_mask);
593		}
594
595		/* link new err_mask to the socket */
596		ro->err_mask = err_mask;
597
598 out_err:
599		if (dev)
600			dev_put(dev);
601
602		release_sock(sk);
603
604		break;
605
606	case CAN_RAW_LOOPBACK:
607		if (optlen != sizeof(ro->loopback))
608			return -EINVAL;
609
610		if (copy_from_sockptr(&ro->loopback, optval, optlen))
611			return -EFAULT;
612
613		break;
614
615	case CAN_RAW_RECV_OWN_MSGS:
616		if (optlen != sizeof(ro->recv_own_msgs))
617			return -EINVAL;
618
619		if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
620			return -EFAULT;
621
622		break;
623
624	case CAN_RAW_FD_FRAMES:
625		if (optlen != sizeof(ro->fd_frames))
626			return -EINVAL;
627
628		if (copy_from_sockptr(&ro->fd_frames, optval, optlen))
629			return -EFAULT;
630
631		break;
632
633	case CAN_RAW_JOIN_FILTERS:
634		if (optlen != sizeof(ro->join_filters))
635			return -EINVAL;
636
637		if (copy_from_sockptr(&ro->join_filters, optval, optlen))
638			return -EFAULT;
639
640		break;
641
642	default:
643		return -ENOPROTOOPT;
644	}
645	return err;
646}
647
648static int raw_getsockopt(struct socket *sock, int level, int optname,
649			  char __user *optval, int __user *optlen)
650{
651	struct sock *sk = sock->sk;
652	struct raw_sock *ro = raw_sk(sk);
653	int len;
654	void *val;
655	int err = 0;
656
657	if (level != SOL_CAN_RAW)
658		return -EINVAL;
659	if (get_user(len, optlen))
660		return -EFAULT;
661	if (len < 0)
662		return -EINVAL;
663
664	switch (optname) {
 
665	case CAN_RAW_FILTER:
666		lock_sock(sk);
667		if (ro->count > 0) {
668			int fsize = ro->count * sizeof(struct can_filter);
669
670			if (len > fsize)
671				len = fsize;
672			if (copy_to_user(optval, ro->filter, len))
673				err = -EFAULT;
674		} else {
675			len = 0;
676		}
677		release_sock(sk);
678
679		if (!err)
680			err = put_user(len, optlen);
681		return err;
682
683	case CAN_RAW_ERR_FILTER:
684		if (len > sizeof(can_err_mask_t))
685			len = sizeof(can_err_mask_t);
686		val = &ro->err_mask;
687		break;
688
689	case CAN_RAW_LOOPBACK:
690		if (len > sizeof(int))
691			len = sizeof(int);
692		val = &ro->loopback;
693		break;
694
695	case CAN_RAW_RECV_OWN_MSGS:
696		if (len > sizeof(int))
697			len = sizeof(int);
698		val = &ro->recv_own_msgs;
699		break;
700
701	case CAN_RAW_FD_FRAMES:
702		if (len > sizeof(int))
703			len = sizeof(int);
704		val = &ro->fd_frames;
705		break;
706
707	case CAN_RAW_JOIN_FILTERS:
708		if (len > sizeof(int))
709			len = sizeof(int);
710		val = &ro->join_filters;
711		break;
712
713	default:
714		return -ENOPROTOOPT;
715	}
716
717	if (put_user(len, optlen))
718		return -EFAULT;
719	if (copy_to_user(optval, val, len))
720		return -EFAULT;
721	return 0;
722}
723
724static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 
725{
726	struct sock *sk = sock->sk;
727	struct raw_sock *ro = raw_sk(sk);
728	struct sk_buff *skb;
729	struct net_device *dev;
730	int ifindex;
731	int err;
732
733	if (msg->msg_name) {
734		DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
 
735
736		if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
737			return -EINVAL;
738
739		if (addr->can_family != AF_CAN)
740			return -EINVAL;
741
742		ifindex = addr->can_ifindex;
743	} else {
744		ifindex = ro->ifindex;
745	}
746
747	dev = dev_get_by_index(sock_net(sk), ifindex);
 
 
 
748	if (!dev)
749		return -ENXIO;
750
751	err = -EINVAL;
752	if (ro->fd_frames && dev->mtu == CANFD_MTU) {
753		if (unlikely(size != CANFD_MTU && size != CAN_MTU))
754			goto put_dev;
755	} else {
756		if (unlikely(size != CAN_MTU))
757			goto put_dev;
758	}
759
760	skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
761				  msg->msg_flags & MSG_DONTWAIT, &err);
762	if (!skb)
763		goto put_dev;
764
765	can_skb_reserve(skb);
766	can_skb_prv(skb)->ifindex = dev->ifindex;
767	can_skb_prv(skb)->skbcnt = 0;
768
769	err = memcpy_from_msg(skb_put(skb, size), msg, size);
770	if (err < 0)
771		goto free_skb;
772
773	skb_setup_tx_timestamp(skb, sk->sk_tsflags);
774
775	skb->dev = dev;
776	skb->sk  = sk;
777	skb->priority = sk->sk_priority;
778
779	err = can_send(skb, ro->loopback);
780
781	dev_put(dev);
782
783	if (err)
784		goto send_failed;
785
786	return size;
787
788free_skb:
789	kfree_skb(skb);
790put_dev:
791	dev_put(dev);
792send_failed:
793	return err;
794}
795
796static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
797		       int flags)
798{
799	struct sock *sk = sock->sk;
800	struct sk_buff *skb;
801	int err = 0;
802	int noblock;
803
804	noblock =  flags & MSG_DONTWAIT;
805	flags   &= ~MSG_DONTWAIT;
806
807	skb = skb_recv_datagram(sk, flags, noblock, &err);
808	if (!skb)
809		return err;
810
811	if (size < skb->len)
812		msg->msg_flags |= MSG_TRUNC;
813	else
814		size = skb->len;
815
816	err = memcpy_to_msg(msg, skb->data, size);
817	if (err < 0) {
818		skb_free_datagram(sk, skb);
819		return err;
820	}
821
822	sock_recv_ts_and_drops(msg, sk, skb);
823
824	if (msg->msg_name) {
825		__sockaddr_check_size(sizeof(struct sockaddr_can));
826		msg->msg_namelen = sizeof(struct sockaddr_can);
827		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
828	}
829
830	/* assign the flags that have been recorded in raw_rcv() */
831	msg->msg_flags |= *(raw_flags(skb));
832
833	skb_free_datagram(sk, skb);
834
835	return size;
836}
837
838static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
839				unsigned long arg)
840{
841	/* no ioctls for socket layer -> hand it down to NIC layer */
842	return -ENOIOCTLCMD;
843}
844
845static const struct proto_ops raw_ops = {
846	.family        = PF_CAN,
847	.release       = raw_release,
848	.bind          = raw_bind,
849	.connect       = sock_no_connect,
850	.socketpair    = sock_no_socketpair,
851	.accept        = sock_no_accept,
852	.getname       = raw_getname,
853	.poll          = datagram_poll,
854	.ioctl         = raw_sock_no_ioctlcmd,
855	.gettstamp     = sock_gettstamp,
856	.listen        = sock_no_listen,
857	.shutdown      = sock_no_shutdown,
858	.setsockopt    = raw_setsockopt,
859	.getsockopt    = raw_getsockopt,
860	.sendmsg       = raw_sendmsg,
861	.recvmsg       = raw_recvmsg,
862	.mmap          = sock_no_mmap,
863	.sendpage      = sock_no_sendpage,
864};
865
866static struct proto raw_proto __read_mostly = {
867	.name       = "CAN_RAW",
868	.owner      = THIS_MODULE,
869	.obj_size   = sizeof(struct raw_sock),
870	.init       = raw_init,
871};
872
873static const struct can_proto raw_can_proto = {
874	.type       = SOCK_RAW,
875	.protocol   = CAN_RAW,
876	.ops        = &raw_ops,
877	.prot       = &raw_proto,
878};
879
880static __init int raw_module_init(void)
881{
882	int err;
883
884	pr_info("can: raw protocol (rev " CAN_RAW_VERSION ")\n");
885
886	err = can_proto_register(&raw_can_proto);
887	if (err < 0)
888		pr_err("can: registration of raw protocol failed\n");
889
890	return err;
891}
892
893static __exit void raw_module_exit(void)
894{
895	can_proto_unregister(&raw_can_proto);
896}
897
898module_init(raw_module_init);
899module_exit(raw_module_exit);