Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * raw.c - Raw sockets for protocol family CAN
  3 *
  4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  5 * All rights reserved.
  6 *
  7 * Redistribution and use in source and binary forms, with or without
  8 * modification, are permitted provided that the following conditions
  9 * are met:
 10 * 1. Redistributions of source code must retain the above copyright
 11 *    notice, this list of conditions and the following disclaimer.
 12 * 2. Redistributions in binary form must reproduce the above copyright
 13 *    notice, this list of conditions and the following disclaimer in the
 14 *    documentation and/or other materials provided with the distribution.
 15 * 3. Neither the name of Volkswagen nor the names of its contributors
 16 *    may be used to endorse or promote products derived from this software
 17 *    without specific prior written permission.
 18 *
 19 * Alternatively, provided that this notice is retained in full, this
 20 * software may be distributed under the terms of the GNU General
 21 * Public License ("GPL") version 2, in which case the provisions of the
 22 * GPL apply INSTEAD OF those given above.
 23 *
 24 * The provided data structures and external interfaces from this code
 25 * are not restricted to be used by modules with a GPL compatible license.
 26 *
 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
 38 * DAMAGE.
 39 *
 40 */
 41
 42#include <linux/module.h>
 43#include <linux/init.h>
 44#include <linux/uio.h>
 45#include <linux/net.h>
 46#include <linux/slab.h>
 47#include <linux/netdevice.h>
 48#include <linux/socket.h>
 49#include <linux/if_arp.h>
 50#include <linux/skbuff.h>
 51#include <linux/can.h>
 52#include <linux/can/core.h>
 
 53#include <linux/can/raw.h>
 54#include <net/sock.h>
 55#include <net/net_namespace.h>
 56
 57#define CAN_RAW_VERSION CAN_VERSION
 58static __initdata const char banner[] =
 59	KERN_INFO "can: raw protocol (rev " CAN_RAW_VERSION ")\n";
 60
 61MODULE_DESCRIPTION("PF_CAN raw protocol");
 62MODULE_LICENSE("Dual BSD/GPL");
 63MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
 64MODULE_ALIAS("can-proto-1");
 65
 66#define MASK_ALL 0
 67
 68/*
 69 * A raw socket has a list of can_filters attached to it, each receiving
 70 * the CAN frames matching that filter.  If the filter list is empty,
 71 * no CAN frames will be received by the socket.  The default after
 72 * opening the socket, is to have one filter which receives all frames.
 73 * The filter list is allocated dynamically with the exception of the
 74 * list containing only one item.  This common case is optimized by
 75 * storing the single filter in dfilter, to avoid using dynamic memory.
 76 */
 77
 
 
 
 
 
 
 78struct raw_sock {
 79	struct sock sk;
 80	int bound;
 81	int ifindex;
 82	struct notifier_block notifier;
 83	int loopback;
 84	int recv_own_msgs;
 
 
 85	int count;                 /* number of active filters */
 86	struct can_filter dfilter; /* default/single filter */
 87	struct can_filter *filter; /* pointer to filter(s) */
 88	can_err_mask_t err_mask;
 
 89};
 90
 91/*
 92 * Return pointer to store the extra msg flags for raw_recvmsg().
 93 * We use the space of one unsigned int beyond the 'struct sockaddr_can'
 94 * in skb->cb.
 95 */
 96static inline unsigned int *raw_flags(struct sk_buff *skb)
 97{
 98	BUILD_BUG_ON(sizeof(skb->cb) <= (sizeof(struct sockaddr_can) +
 99					 sizeof(unsigned int)));
100
101	/* return pointer after struct sockaddr_can */
102	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
103}
104
105static inline struct raw_sock *raw_sk(const struct sock *sk)
106{
107	return (struct raw_sock *)sk;
108}
109
110static void raw_rcv(struct sk_buff *oskb, void *data)
111{
112	struct sock *sk = (struct sock *)data;
113	struct raw_sock *ro = raw_sk(sk);
114	struct sockaddr_can *addr;
115	struct sk_buff *skb;
116	unsigned int *pflags;
117
118	/* check the received tx sock reference */
119	if (!ro->recv_own_msgs && oskb->sk == sk)
120		return;
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122	/* clone the given skb to be able to enqueue it into the rcv queue */
123	skb = skb_clone(oskb, GFP_ATOMIC);
124	if (!skb)
125		return;
126
127	/*
128	 *  Put the datagram to the queue so that raw_recvmsg() can
129	 *  get it from there.  We need to pass the interface index to
130	 *  raw_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
131	 *  containing the interface index.
132	 */
133
134	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
135	addr = (struct sockaddr_can *)skb->cb;
136	memset(addr, 0, sizeof(*addr));
137	addr->can_family  = AF_CAN;
138	addr->can_ifindex = skb->dev->ifindex;
139
140	/* add CAN specific message flags for raw_recvmsg() */
141	pflags = raw_flags(skb);
142	*pflags = 0;
143	if (oskb->sk)
144		*pflags |= MSG_DONTROUTE;
145	if (oskb->sk == sk)
146		*pflags |= MSG_CONFIRM;
147
148	if (sock_queue_rcv_skb(sk, skb) < 0)
149		kfree_skb(skb);
150}
151
152static int raw_enable_filters(struct net_device *dev, struct sock *sk,
153			      struct can_filter *filter, int count)
 
154{
155	int err = 0;
156	int i;
157
158	for (i = 0; i < count; i++) {
159		err = can_rx_register(dev, filter[i].can_id,
160				      filter[i].can_mask,
161				      raw_rcv, sk, "raw");
162		if (err) {
163			/* clean up successfully registered filters */
164			while (--i >= 0)
165				can_rx_unregister(dev, filter[i].can_id,
166						  filter[i].can_mask,
167						  raw_rcv, sk);
168			break;
169		}
170	}
171
172	return err;
173}
174
175static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
176				can_err_mask_t err_mask)
177{
178	int err = 0;
179
180	if (err_mask)
181		err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
182				      raw_rcv, sk, "raw");
183
184	return err;
185}
186
187static void raw_disable_filters(struct net_device *dev, struct sock *sk,
188			      struct can_filter *filter, int count)
 
189{
190	int i;
191
192	for (i = 0; i < count; i++)
193		can_rx_unregister(dev, filter[i].can_id, filter[i].can_mask,
194				  raw_rcv, sk);
195}
196
197static inline void raw_disable_errfilter(struct net_device *dev,
 
198					 struct sock *sk,
199					 can_err_mask_t err_mask)
200
201{
202	if (err_mask)
203		can_rx_unregister(dev, 0, err_mask | CAN_ERR_FLAG,
204				  raw_rcv, sk);
205}
206
207static inline void raw_disable_allfilters(struct net_device *dev,
 
208					  struct sock *sk)
209{
210	struct raw_sock *ro = raw_sk(sk);
211
212	raw_disable_filters(dev, sk, ro->filter, ro->count);
213	raw_disable_errfilter(dev, sk, ro->err_mask);
214}
215
216static int raw_enable_allfilters(struct net_device *dev, struct sock *sk)
 
217{
218	struct raw_sock *ro = raw_sk(sk);
219	int err;
220
221	err = raw_enable_filters(dev, sk, ro->filter, ro->count);
222	if (!err) {
223		err = raw_enable_errfilter(dev, sk, ro->err_mask);
224		if (err)
225			raw_disable_filters(dev, sk, ro->filter, ro->count);
 
226	}
227
228	return err;
229}
230
231static int raw_notifier(struct notifier_block *nb,
232			unsigned long msg, void *data)
233{
234	struct net_device *dev = (struct net_device *)data;
235	struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
236	struct sock *sk = &ro->sk;
237
238	if (!net_eq(dev_net(dev), &init_net))
239		return NOTIFY_DONE;
240
241	if (dev->type != ARPHRD_CAN)
242		return NOTIFY_DONE;
243
244	if (ro->ifindex != dev->ifindex)
245		return NOTIFY_DONE;
246
247	switch (msg) {
248
249	case NETDEV_UNREGISTER:
250		lock_sock(sk);
251		/* remove current filters & unregister */
252		if (ro->bound)
253			raw_disable_allfilters(dev, sk);
254
255		if (ro->count > 1)
256			kfree(ro->filter);
257
258		ro->ifindex = 0;
259		ro->bound   = 0;
260		ro->count   = 0;
261		release_sock(sk);
262
263		sk->sk_err = ENODEV;
264		if (!sock_flag(sk, SOCK_DEAD))
265			sk->sk_error_report(sk);
266		break;
267
268	case NETDEV_DOWN:
269		sk->sk_err = ENETDOWN;
270		if (!sock_flag(sk, SOCK_DEAD))
271			sk->sk_error_report(sk);
272		break;
273	}
274
275	return NOTIFY_DONE;
276}
277
278static int raw_init(struct sock *sk)
279{
280	struct raw_sock *ro = raw_sk(sk);
281
282	ro->bound            = 0;
283	ro->ifindex          = 0;
284
285	/* set default filter to single entry dfilter */
286	ro->dfilter.can_id   = 0;
287	ro->dfilter.can_mask = MASK_ALL;
288	ro->filter           = &ro->dfilter;
289	ro->count            = 1;
290
291	/* set default loopback behaviour */
292	ro->loopback         = 1;
293	ro->recv_own_msgs    = 0;
 
 
 
 
 
 
 
294
295	/* set notifier */
296	ro->notifier.notifier_call = raw_notifier;
297
298	register_netdevice_notifier(&ro->notifier);
299
300	return 0;
301}
302
303static int raw_release(struct socket *sock)
304{
305	struct sock *sk = sock->sk;
306	struct raw_sock *ro;
307
308	if (!sk)
309		return 0;
310
311	ro = raw_sk(sk);
312
313	unregister_netdevice_notifier(&ro->notifier);
314
315	lock_sock(sk);
316
317	/* remove current filters & unregister */
318	if (ro->bound) {
319		if (ro->ifindex) {
320			struct net_device *dev;
321
322			dev = dev_get_by_index(&init_net, ro->ifindex);
323			if (dev) {
324				raw_disable_allfilters(dev, sk);
325				dev_put(dev);
326			}
327		} else
328			raw_disable_allfilters(NULL, sk);
329	}
330
331	if (ro->count > 1)
332		kfree(ro->filter);
333
334	ro->ifindex = 0;
335	ro->bound   = 0;
336	ro->count   = 0;
 
337
338	sock_orphan(sk);
339	sock->sk = NULL;
340
341	release_sock(sk);
342	sock_put(sk);
343
344	return 0;
345}
346
347static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
348{
349	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
350	struct sock *sk = sock->sk;
351	struct raw_sock *ro = raw_sk(sk);
352	int ifindex;
353	int err = 0;
354	int notify_enetdown = 0;
355
356	if (len < sizeof(*addr))
357		return -EINVAL;
 
 
358
359	lock_sock(sk);
360
361	if (ro->bound && addr->can_ifindex == ro->ifindex)
362		goto out;
363
364	if (addr->can_ifindex) {
365		struct net_device *dev;
366
367		dev = dev_get_by_index(&init_net, addr->can_ifindex);
368		if (!dev) {
369			err = -ENODEV;
370			goto out;
371		}
372		if (dev->type != ARPHRD_CAN) {
373			dev_put(dev);
374			err = -ENODEV;
375			goto out;
376		}
377		if (!(dev->flags & IFF_UP))
378			notify_enetdown = 1;
379
380		ifindex = dev->ifindex;
381
382		/* filters set by default/setsockopt */
383		err = raw_enable_allfilters(dev, sk);
384		dev_put(dev);
385	} else {
386		ifindex = 0;
387
388		/* filters set by default/setsockopt */
389		err = raw_enable_allfilters(NULL, sk);
390	}
391
392	if (!err) {
393		if (ro->bound) {
394			/* unregister old filters */
395			if (ro->ifindex) {
396				struct net_device *dev;
397
398				dev = dev_get_by_index(&init_net, ro->ifindex);
 
399				if (dev) {
400					raw_disable_allfilters(dev, sk);
 
401					dev_put(dev);
402				}
403			} else
404				raw_disable_allfilters(NULL, sk);
405		}
406		ro->ifindex = ifindex;
407		ro->bound = 1;
408	}
409
410 out:
411	release_sock(sk);
412
413	if (notify_enetdown) {
414		sk->sk_err = ENETDOWN;
415		if (!sock_flag(sk, SOCK_DEAD))
416			sk->sk_error_report(sk);
417	}
418
419	return err;
420}
421
422static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
423		       int *len, int peer)
424{
425	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
426	struct sock *sk = sock->sk;
427	struct raw_sock *ro = raw_sk(sk);
428
429	if (peer)
430		return -EOPNOTSUPP;
431
432	memset(addr, 0, sizeof(*addr));
433	addr->can_family  = AF_CAN;
434	addr->can_ifindex = ro->ifindex;
435
436	*len = sizeof(*addr);
437
438	return 0;
439}
440
441static int raw_setsockopt(struct socket *sock, int level, int optname,
442			  char __user *optval, unsigned int optlen)
443{
444	struct sock *sk = sock->sk;
445	struct raw_sock *ro = raw_sk(sk);
446	struct can_filter *filter = NULL;  /* dyn. alloc'ed filters */
447	struct can_filter sfilter;         /* single filter */
448	struct net_device *dev = NULL;
449	can_err_mask_t err_mask = 0;
450	int count = 0;
451	int err = 0;
452
453	if (level != SOL_CAN_RAW)
454		return -EINVAL;
455
456	switch (optname) {
457
458	case CAN_RAW_FILTER:
459		if (optlen % sizeof(struct can_filter) != 0)
460			return -EINVAL;
461
 
 
 
462		count = optlen / sizeof(struct can_filter);
463
464		if (count > 1) {
465			/* filter does not fit into dfilter => alloc space */
466			filter = memdup_user(optval, optlen);
467			if (IS_ERR(filter))
468				return PTR_ERR(filter);
469		} else if (count == 1) {
470			if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
471				return -EFAULT;
472		}
473
474		lock_sock(sk);
475
476		if (ro->bound && ro->ifindex)
477			dev = dev_get_by_index(&init_net, ro->ifindex);
478
479		if (ro->bound) {
480			/* (try to) register the new filters */
481			if (count == 1)
482				err = raw_enable_filters(dev, sk, &sfilter, 1);
 
483			else
484				err = raw_enable_filters(dev, sk, filter,
485							 count);
486			if (err) {
487				if (count > 1)
488					kfree(filter);
489				goto out_fil;
490			}
491
492			/* remove old filter registrations */
493			raw_disable_filters(dev, sk, ro->filter, ro->count);
 
494		}
495
496		/* remove old filter space */
497		if (ro->count > 1)
498			kfree(ro->filter);
499
500		/* link new filters to the socket */
501		if (count == 1) {
502			/* copy filter data for single filter */
503			ro->dfilter = sfilter;
504			filter = &ro->dfilter;
505		}
506		ro->filter = filter;
507		ro->count  = count;
508
509 out_fil:
510		if (dev)
511			dev_put(dev);
512
513		release_sock(sk);
514
515		break;
516
517	case CAN_RAW_ERR_FILTER:
518		if (optlen != sizeof(err_mask))
519			return -EINVAL;
520
521		if (copy_from_user(&err_mask, optval, optlen))
522			return -EFAULT;
523
524		err_mask &= CAN_ERR_MASK;
525
526		lock_sock(sk);
527
528		if (ro->bound && ro->ifindex)
529			dev = dev_get_by_index(&init_net, ro->ifindex);
530
531		/* remove current error mask */
532		if (ro->bound) {
533			/* (try to) register the new err_mask */
534			err = raw_enable_errfilter(dev, sk, err_mask);
 
535
536			if (err)
537				goto out_err;
538
539			/* remove old err_mask registration */
540			raw_disable_errfilter(dev, sk, ro->err_mask);
 
541		}
542
543		/* link new err_mask to the socket */
544		ro->err_mask = err_mask;
545
546 out_err:
547		if (dev)
548			dev_put(dev);
549
550		release_sock(sk);
551
552		break;
553
554	case CAN_RAW_LOOPBACK:
555		if (optlen != sizeof(ro->loopback))
556			return -EINVAL;
557
558		if (copy_from_user(&ro->loopback, optval, optlen))
559			return -EFAULT;
560
561		break;
562
563	case CAN_RAW_RECV_OWN_MSGS:
564		if (optlen != sizeof(ro->recv_own_msgs))
565			return -EINVAL;
566
567		if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
568			return -EFAULT;
569
570		break;
571
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
572	default:
573		return -ENOPROTOOPT;
574	}
575	return err;
576}
577
578static int raw_getsockopt(struct socket *sock, int level, int optname,
579			  char __user *optval, int __user *optlen)
580{
581	struct sock *sk = sock->sk;
582	struct raw_sock *ro = raw_sk(sk);
583	int len;
584	void *val;
585	int err = 0;
586
587	if (level != SOL_CAN_RAW)
588		return -EINVAL;
589	if (get_user(len, optlen))
590		return -EFAULT;
591	if (len < 0)
592		return -EINVAL;
593
594	switch (optname) {
595
596	case CAN_RAW_FILTER:
597		lock_sock(sk);
598		if (ro->count > 0) {
599			int fsize = ro->count * sizeof(struct can_filter);
600			if (len > fsize)
601				len = fsize;
602			if (copy_to_user(optval, ro->filter, len))
603				err = -EFAULT;
604		} else
605			len = 0;
606		release_sock(sk);
607
608		if (!err)
609			err = put_user(len, optlen);
610		return err;
611
612	case CAN_RAW_ERR_FILTER:
613		if (len > sizeof(can_err_mask_t))
614			len = sizeof(can_err_mask_t);
615		val = &ro->err_mask;
616		break;
617
618	case CAN_RAW_LOOPBACK:
619		if (len > sizeof(int))
620			len = sizeof(int);
621		val = &ro->loopback;
622		break;
623
624	case CAN_RAW_RECV_OWN_MSGS:
625		if (len > sizeof(int))
626			len = sizeof(int);
627		val = &ro->recv_own_msgs;
628		break;
629
 
 
 
 
 
 
 
 
 
 
 
 
630	default:
631		return -ENOPROTOOPT;
632	}
633
634	if (put_user(len, optlen))
635		return -EFAULT;
636	if (copy_to_user(optval, val, len))
637		return -EFAULT;
638	return 0;
639}
640
641static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
642		       struct msghdr *msg, size_t size)
643{
644	struct sock *sk = sock->sk;
645	struct raw_sock *ro = raw_sk(sk);
646	struct sk_buff *skb;
647	struct net_device *dev;
648	int ifindex;
649	int err;
650
651	if (msg->msg_name) {
652		struct sockaddr_can *addr =
653			(struct sockaddr_can *)msg->msg_name;
654
655		if (msg->msg_namelen < sizeof(*addr))
656			return -EINVAL;
657
658		if (addr->can_family != AF_CAN)
659			return -EINVAL;
660
661		ifindex = addr->can_ifindex;
662	} else
663		ifindex = ro->ifindex;
664
665	if (size != sizeof(struct can_frame))
666		return -EINVAL;
 
 
 
 
 
667
668	dev = dev_get_by_index(&init_net, ifindex);
669	if (!dev)
670		return -ENXIO;
671
672	skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT,
673				  &err);
674	if (!skb)
675		goto put_dev;
676
677	err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
678	if (err < 0)
679		goto free_skb;
680	err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
 
681	if (err < 0)
682		goto free_skb;
683
 
 
684	skb->dev = dev;
685	skb->sk  = sk;
 
686
687	err = can_send(skb, ro->loopback);
688
689	dev_put(dev);
690
691	if (err)
692		goto send_failed;
693
694	return size;
695
696free_skb:
697	kfree_skb(skb);
698put_dev:
699	dev_put(dev);
700send_failed:
701	return err;
702}
703
704static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
705		       struct msghdr *msg, size_t size, int flags)
706{
707	struct sock *sk = sock->sk;
708	struct sk_buff *skb;
709	int err = 0;
710	int noblock;
711
712	noblock =  flags & MSG_DONTWAIT;
713	flags   &= ~MSG_DONTWAIT;
714
715	skb = skb_recv_datagram(sk, flags, noblock, &err);
716	if (!skb)
717		return err;
718
719	if (size < skb->len)
720		msg->msg_flags |= MSG_TRUNC;
721	else
722		size = skb->len;
723
724	err = memcpy_toiovec(msg->msg_iov, skb->data, size);
725	if (err < 0) {
726		skb_free_datagram(sk, skb);
727		return err;
728	}
729
730	sock_recv_ts_and_drops(msg, sk, skb);
731
732	if (msg->msg_name) {
 
733		msg->msg_namelen = sizeof(struct sockaddr_can);
734		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
735	}
736
737	/* assign the flags that have been recorded in raw_rcv() */
738	msg->msg_flags |= *(raw_flags(skb));
739
740	skb_free_datagram(sk, skb);
741
742	return size;
743}
744
745static const struct proto_ops raw_ops = {
746	.family        = PF_CAN,
747	.release       = raw_release,
748	.bind          = raw_bind,
749	.connect       = sock_no_connect,
750	.socketpair    = sock_no_socketpair,
751	.accept        = sock_no_accept,
752	.getname       = raw_getname,
753	.poll          = datagram_poll,
754	.ioctl         = can_ioctl,	/* use can_ioctl() from af_can.c */
755	.listen        = sock_no_listen,
756	.shutdown      = sock_no_shutdown,
757	.setsockopt    = raw_setsockopt,
758	.getsockopt    = raw_getsockopt,
759	.sendmsg       = raw_sendmsg,
760	.recvmsg       = raw_recvmsg,
761	.mmap          = sock_no_mmap,
762	.sendpage      = sock_no_sendpage,
763};
764
765static struct proto raw_proto __read_mostly = {
766	.name       = "CAN_RAW",
767	.owner      = THIS_MODULE,
768	.obj_size   = sizeof(struct raw_sock),
769	.init       = raw_init,
770};
771
772static const struct can_proto raw_can_proto = {
773	.type       = SOCK_RAW,
774	.protocol   = CAN_RAW,
775	.ops        = &raw_ops,
776	.prot       = &raw_proto,
777};
778
779static __init int raw_module_init(void)
780{
781	int err;
782
783	printk(banner);
784
785	err = can_proto_register(&raw_can_proto);
786	if (err < 0)
787		printk(KERN_ERR "can: registration of raw protocol failed\n");
788
789	return err;
790}
791
792static __exit void raw_module_exit(void)
793{
794	can_proto_unregister(&raw_can_proto);
795}
796
797module_init(raw_module_init);
798module_exit(raw_module_exit);
v4.17
  1/*
  2 * raw.c - Raw sockets for protocol family CAN
  3 *
  4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  5 * All rights reserved.
  6 *
  7 * Redistribution and use in source and binary forms, with or without
  8 * modification, are permitted provided that the following conditions
  9 * are met:
 10 * 1. Redistributions of source code must retain the above copyright
 11 *    notice, this list of conditions and the following disclaimer.
 12 * 2. Redistributions in binary form must reproduce the above copyright
 13 *    notice, this list of conditions and the following disclaimer in the
 14 *    documentation and/or other materials provided with the distribution.
 15 * 3. Neither the name of Volkswagen nor the names of its contributors
 16 *    may be used to endorse or promote products derived from this software
 17 *    without specific prior written permission.
 18 *
 19 * Alternatively, provided that this notice is retained in full, this
 20 * software may be distributed under the terms of the GNU General
 21 * Public License ("GPL") version 2, in which case the provisions of the
 22 * GPL apply INSTEAD OF those given above.
 23 *
 24 * The provided data structures and external interfaces from this code
 25 * are not restricted to be used by modules with a GPL compatible license.
 26 *
 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
 38 * DAMAGE.
 39 *
 40 */
 41
 42#include <linux/module.h>
 43#include <linux/init.h>
 44#include <linux/uio.h>
 45#include <linux/net.h>
 46#include <linux/slab.h>
 47#include <linux/netdevice.h>
 48#include <linux/socket.h>
 49#include <linux/if_arp.h>
 50#include <linux/skbuff.h>
 51#include <linux/can.h>
 52#include <linux/can/core.h>
 53#include <linux/can/skb.h>
 54#include <linux/can/raw.h>
 55#include <net/sock.h>
 56#include <net/net_namespace.h>
 57
 58#define CAN_RAW_VERSION CAN_VERSION
 
 
 59
 60MODULE_DESCRIPTION("PF_CAN raw protocol");
 61MODULE_LICENSE("Dual BSD/GPL");
 62MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
 63MODULE_ALIAS("can-proto-1");
 64
 65#define MASK_ALL 0
 66
 67/*
 68 * A raw socket has a list of can_filters attached to it, each receiving
 69 * the CAN frames matching that filter.  If the filter list is empty,
 70 * no CAN frames will be received by the socket.  The default after
 71 * opening the socket, is to have one filter which receives all frames.
 72 * The filter list is allocated dynamically with the exception of the
 73 * list containing only one item.  This common case is optimized by
 74 * storing the single filter in dfilter, to avoid using dynamic memory.
 75 */
 76
 77struct uniqframe {
 78	int skbcnt;
 79	const struct sk_buff *skb;
 80	unsigned int join_rx_count;
 81};
 82
 83struct raw_sock {
 84	struct sock sk;
 85	int bound;
 86	int ifindex;
 87	struct notifier_block notifier;
 88	int loopback;
 89	int recv_own_msgs;
 90	int fd_frames;
 91	int join_filters;
 92	int count;                 /* number of active filters */
 93	struct can_filter dfilter; /* default/single filter */
 94	struct can_filter *filter; /* pointer to filter(s) */
 95	can_err_mask_t err_mask;
 96	struct uniqframe __percpu *uniq;
 97};
 98
 99/*
100 * Return pointer to store the extra msg flags for raw_recvmsg().
101 * We use the space of one unsigned int beyond the 'struct sockaddr_can'
102 * in skb->cb.
103 */
104static inline unsigned int *raw_flags(struct sk_buff *skb)
105{
106	sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
107			       sizeof(unsigned int));
108
109	/* return pointer after struct sockaddr_can */
110	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
111}
112
113static inline struct raw_sock *raw_sk(const struct sock *sk)
114{
115	return (struct raw_sock *)sk;
116}
117
118static void raw_rcv(struct sk_buff *oskb, void *data)
119{
120	struct sock *sk = (struct sock *)data;
121	struct raw_sock *ro = raw_sk(sk);
122	struct sockaddr_can *addr;
123	struct sk_buff *skb;
124	unsigned int *pflags;
125
126	/* check the received tx sock reference */
127	if (!ro->recv_own_msgs && oskb->sk == sk)
128		return;
129
130	/* do not pass non-CAN2.0 frames to a legacy socket */
131	if (!ro->fd_frames && oskb->len != CAN_MTU)
132		return;
133
134	/* eliminate multiple filter matches for the same skb */
135	if (this_cpu_ptr(ro->uniq)->skb == oskb &&
136	    this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
137		if (ro->join_filters) {
138			this_cpu_inc(ro->uniq->join_rx_count);
139			/* drop frame until all enabled filters matched */
140			if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
141				return;
142		} else {
143			return;
144		}
145	} else {
146		this_cpu_ptr(ro->uniq)->skb = oskb;
147		this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
148		this_cpu_ptr(ro->uniq)->join_rx_count = 1;
149		/* drop first frame to check all enabled filters? */
150		if (ro->join_filters && ro->count > 1)
151			return;
152	}
153
154	/* clone the given skb to be able to enqueue it into the rcv queue */
155	skb = skb_clone(oskb, GFP_ATOMIC);
156	if (!skb)
157		return;
158
159	/*
160	 *  Put the datagram to the queue so that raw_recvmsg() can
161	 *  get it from there.  We need to pass the interface index to
162	 *  raw_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
163	 *  containing the interface index.
164	 */
165
166	sock_skb_cb_check_size(sizeof(struct sockaddr_can));
167	addr = (struct sockaddr_can *)skb->cb;
168	memset(addr, 0, sizeof(*addr));
169	addr->can_family  = AF_CAN;
170	addr->can_ifindex = skb->dev->ifindex;
171
172	/* add CAN specific message flags for raw_recvmsg() */
173	pflags = raw_flags(skb);
174	*pflags = 0;
175	if (oskb->sk)
176		*pflags |= MSG_DONTROUTE;
177	if (oskb->sk == sk)
178		*pflags |= MSG_CONFIRM;
179
180	if (sock_queue_rcv_skb(sk, skb) < 0)
181		kfree_skb(skb);
182}
183
184static int raw_enable_filters(struct net *net, struct net_device *dev,
185			      struct sock *sk, struct can_filter *filter,
186			      int count)
187{
188	int err = 0;
189	int i;
190
191	for (i = 0; i < count; i++) {
192		err = can_rx_register(net, dev, filter[i].can_id,
193				      filter[i].can_mask,
194				      raw_rcv, sk, "raw", sk);
195		if (err) {
196			/* clean up successfully registered filters */
197			while (--i >= 0)
198				can_rx_unregister(net, dev, filter[i].can_id,
199						  filter[i].can_mask,
200						  raw_rcv, sk);
201			break;
202		}
203	}
204
205	return err;
206}
207
208static int raw_enable_errfilter(struct net *net, struct net_device *dev,
209				struct sock *sk, can_err_mask_t err_mask)
210{
211	int err = 0;
212
213	if (err_mask)
214		err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
215				      raw_rcv, sk, "raw", sk);
216
217	return err;
218}
219
220static void raw_disable_filters(struct net *net, struct net_device *dev,
221				struct sock *sk, struct can_filter *filter,
222				int count)
223{
224	int i;
225
226	for (i = 0; i < count; i++)
227		can_rx_unregister(net, dev, filter[i].can_id,
228				  filter[i].can_mask, raw_rcv, sk);
229}
230
231static inline void raw_disable_errfilter(struct net *net,
232					 struct net_device *dev,
233					 struct sock *sk,
234					 can_err_mask_t err_mask)
235
236{
237	if (err_mask)
238		can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
239				  raw_rcv, sk);
240}
241
242static inline void raw_disable_allfilters(struct net *net,
243					  struct net_device *dev,
244					  struct sock *sk)
245{
246	struct raw_sock *ro = raw_sk(sk);
247
248	raw_disable_filters(net, dev, sk, ro->filter, ro->count);
249	raw_disable_errfilter(net, dev, sk, ro->err_mask);
250}
251
252static int raw_enable_allfilters(struct net *net, struct net_device *dev,
253				 struct sock *sk)
254{
255	struct raw_sock *ro = raw_sk(sk);
256	int err;
257
258	err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
259	if (!err) {
260		err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
261		if (err)
262			raw_disable_filters(net, dev, sk, ro->filter,
263					    ro->count);
264	}
265
266	return err;
267}
268
269static int raw_notifier(struct notifier_block *nb,
270			unsigned long msg, void *ptr)
271{
272	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
273	struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
274	struct sock *sk = &ro->sk;
275
276	if (!net_eq(dev_net(dev), sock_net(sk)))
277		return NOTIFY_DONE;
278
279	if (dev->type != ARPHRD_CAN)
280		return NOTIFY_DONE;
281
282	if (ro->ifindex != dev->ifindex)
283		return NOTIFY_DONE;
284
285	switch (msg) {
286
287	case NETDEV_UNREGISTER:
288		lock_sock(sk);
289		/* remove current filters & unregister */
290		if (ro->bound)
291			raw_disable_allfilters(dev_net(dev), dev, sk);
292
293		if (ro->count > 1)
294			kfree(ro->filter);
295
296		ro->ifindex = 0;
297		ro->bound   = 0;
298		ro->count   = 0;
299		release_sock(sk);
300
301		sk->sk_err = ENODEV;
302		if (!sock_flag(sk, SOCK_DEAD))
303			sk->sk_error_report(sk);
304		break;
305
306	case NETDEV_DOWN:
307		sk->sk_err = ENETDOWN;
308		if (!sock_flag(sk, SOCK_DEAD))
309			sk->sk_error_report(sk);
310		break;
311	}
312
313	return NOTIFY_DONE;
314}
315
316static int raw_init(struct sock *sk)
317{
318	struct raw_sock *ro = raw_sk(sk);
319
320	ro->bound            = 0;
321	ro->ifindex          = 0;
322
323	/* set default filter to single entry dfilter */
324	ro->dfilter.can_id   = 0;
325	ro->dfilter.can_mask = MASK_ALL;
326	ro->filter           = &ro->dfilter;
327	ro->count            = 1;
328
329	/* set default loopback behaviour */
330	ro->loopback         = 1;
331	ro->recv_own_msgs    = 0;
332	ro->fd_frames        = 0;
333	ro->join_filters     = 0;
334
335	/* alloc_percpu provides zero'ed memory */
336	ro->uniq = alloc_percpu(struct uniqframe);
337	if (unlikely(!ro->uniq))
338		return -ENOMEM;
339
340	/* set notifier */
341	ro->notifier.notifier_call = raw_notifier;
342
343	register_netdevice_notifier(&ro->notifier);
344
345	return 0;
346}
347
348static int raw_release(struct socket *sock)
349{
350	struct sock *sk = sock->sk;
351	struct raw_sock *ro;
352
353	if (!sk)
354		return 0;
355
356	ro = raw_sk(sk);
357
358	unregister_netdevice_notifier(&ro->notifier);
359
360	lock_sock(sk);
361
362	/* remove current filters & unregister */
363	if (ro->bound) {
364		if (ro->ifindex) {
365			struct net_device *dev;
366
367			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
368			if (dev) {
369				raw_disable_allfilters(dev_net(dev), dev, sk);
370				dev_put(dev);
371			}
372		} else
373			raw_disable_allfilters(sock_net(sk), NULL, sk);
374	}
375
376	if (ro->count > 1)
377		kfree(ro->filter);
378
379	ro->ifindex = 0;
380	ro->bound   = 0;
381	ro->count   = 0;
382	free_percpu(ro->uniq);
383
384	sock_orphan(sk);
385	sock->sk = NULL;
386
387	release_sock(sk);
388	sock_put(sk);
389
390	return 0;
391}
392
393static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
394{
395	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
396	struct sock *sk = sock->sk;
397	struct raw_sock *ro = raw_sk(sk);
398	int ifindex;
399	int err = 0;
400	int notify_enetdown = 0;
401
402	if (len < sizeof(*addr))
403		return -EINVAL;
404	if (addr->can_family != AF_CAN)
405		return -EINVAL;
406
407	lock_sock(sk);
408
409	if (ro->bound && addr->can_ifindex == ro->ifindex)
410		goto out;
411
412	if (addr->can_ifindex) {
413		struct net_device *dev;
414
415		dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
416		if (!dev) {
417			err = -ENODEV;
418			goto out;
419		}
420		if (dev->type != ARPHRD_CAN) {
421			dev_put(dev);
422			err = -ENODEV;
423			goto out;
424		}
425		if (!(dev->flags & IFF_UP))
426			notify_enetdown = 1;
427
428		ifindex = dev->ifindex;
429
430		/* filters set by default/setsockopt */
431		err = raw_enable_allfilters(sock_net(sk), dev, sk);
432		dev_put(dev);
433	} else {
434		ifindex = 0;
435
436		/* filters set by default/setsockopt */
437		err = raw_enable_allfilters(sock_net(sk), NULL, sk);
438	}
439
440	if (!err) {
441		if (ro->bound) {
442			/* unregister old filters */
443			if (ro->ifindex) {
444				struct net_device *dev;
445
446				dev = dev_get_by_index(sock_net(sk),
447						       ro->ifindex);
448				if (dev) {
449					raw_disable_allfilters(dev_net(dev),
450							       dev, sk);
451					dev_put(dev);
452				}
453			} else
454				raw_disable_allfilters(sock_net(sk), NULL, sk);
455		}
456		ro->ifindex = ifindex;
457		ro->bound = 1;
458	}
459
460 out:
461	release_sock(sk);
462
463	if (notify_enetdown) {
464		sk->sk_err = ENETDOWN;
465		if (!sock_flag(sk, SOCK_DEAD))
466			sk->sk_error_report(sk);
467	}
468
469	return err;
470}
471
472static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
473		       int peer)
474{
475	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
476	struct sock *sk = sock->sk;
477	struct raw_sock *ro = raw_sk(sk);
478
479	if (peer)
480		return -EOPNOTSUPP;
481
482	memset(addr, 0, sizeof(*addr));
483	addr->can_family  = AF_CAN;
484	addr->can_ifindex = ro->ifindex;
485
486	return sizeof(*addr);
 
 
487}
488
489static int raw_setsockopt(struct socket *sock, int level, int optname,
490			  char __user *optval, unsigned int optlen)
491{
492	struct sock *sk = sock->sk;
493	struct raw_sock *ro = raw_sk(sk);
494	struct can_filter *filter = NULL;  /* dyn. alloc'ed filters */
495	struct can_filter sfilter;         /* single filter */
496	struct net_device *dev = NULL;
497	can_err_mask_t err_mask = 0;
498	int count = 0;
499	int err = 0;
500
501	if (level != SOL_CAN_RAW)
502		return -EINVAL;
503
504	switch (optname) {
505
506	case CAN_RAW_FILTER:
507		if (optlen % sizeof(struct can_filter) != 0)
508			return -EINVAL;
509
510		if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
511			return -EINVAL;
512
513		count = optlen / sizeof(struct can_filter);
514
515		if (count > 1) {
516			/* filter does not fit into dfilter => alloc space */
517			filter = memdup_user(optval, optlen);
518			if (IS_ERR(filter))
519				return PTR_ERR(filter);
520		} else if (count == 1) {
521			if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
522				return -EFAULT;
523		}
524
525		lock_sock(sk);
526
527		if (ro->bound && ro->ifindex)
528			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
529
530		if (ro->bound) {
531			/* (try to) register the new filters */
532			if (count == 1)
533				err = raw_enable_filters(sock_net(sk), dev, sk,
534							 &sfilter, 1);
535			else
536				err = raw_enable_filters(sock_net(sk), dev, sk,
537							 filter, count);
538			if (err) {
539				if (count > 1)
540					kfree(filter);
541				goto out_fil;
542			}
543
544			/* remove old filter registrations */
545			raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
546					    ro->count);
547		}
548
549		/* remove old filter space */
550		if (ro->count > 1)
551			kfree(ro->filter);
552
553		/* link new filters to the socket */
554		if (count == 1) {
555			/* copy filter data for single filter */
556			ro->dfilter = sfilter;
557			filter = &ro->dfilter;
558		}
559		ro->filter = filter;
560		ro->count  = count;
561
562 out_fil:
563		if (dev)
564			dev_put(dev);
565
566		release_sock(sk);
567
568		break;
569
570	case CAN_RAW_ERR_FILTER:
571		if (optlen != sizeof(err_mask))
572			return -EINVAL;
573
574		if (copy_from_user(&err_mask, optval, optlen))
575			return -EFAULT;
576
577		err_mask &= CAN_ERR_MASK;
578
579		lock_sock(sk);
580
581		if (ro->bound && ro->ifindex)
582			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
583
584		/* remove current error mask */
585		if (ro->bound) {
586			/* (try to) register the new err_mask */
587			err = raw_enable_errfilter(sock_net(sk), dev, sk,
588						   err_mask);
589
590			if (err)
591				goto out_err;
592
593			/* remove old err_mask registration */
594			raw_disable_errfilter(sock_net(sk), dev, sk,
595					      ro->err_mask);
596		}
597
598		/* link new err_mask to the socket */
599		ro->err_mask = err_mask;
600
601 out_err:
602		if (dev)
603			dev_put(dev);
604
605		release_sock(sk);
606
607		break;
608
609	case CAN_RAW_LOOPBACK:
610		if (optlen != sizeof(ro->loopback))
611			return -EINVAL;
612
613		if (copy_from_user(&ro->loopback, optval, optlen))
614			return -EFAULT;
615
616		break;
617
618	case CAN_RAW_RECV_OWN_MSGS:
619		if (optlen != sizeof(ro->recv_own_msgs))
620			return -EINVAL;
621
622		if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
623			return -EFAULT;
624
625		break;
626
627	case CAN_RAW_FD_FRAMES:
628		if (optlen != sizeof(ro->fd_frames))
629			return -EINVAL;
630
631		if (copy_from_user(&ro->fd_frames, optval, optlen))
632			return -EFAULT;
633
634		break;
635
636	case CAN_RAW_JOIN_FILTERS:
637		if (optlen != sizeof(ro->join_filters))
638			return -EINVAL;
639
640		if (copy_from_user(&ro->join_filters, optval, optlen))
641			return -EFAULT;
642
643		break;
644
645	default:
646		return -ENOPROTOOPT;
647	}
648	return err;
649}
650
651static int raw_getsockopt(struct socket *sock, int level, int optname,
652			  char __user *optval, int __user *optlen)
653{
654	struct sock *sk = sock->sk;
655	struct raw_sock *ro = raw_sk(sk);
656	int len;
657	void *val;
658	int err = 0;
659
660	if (level != SOL_CAN_RAW)
661		return -EINVAL;
662	if (get_user(len, optlen))
663		return -EFAULT;
664	if (len < 0)
665		return -EINVAL;
666
667	switch (optname) {
668
669	case CAN_RAW_FILTER:
670		lock_sock(sk);
671		if (ro->count > 0) {
672			int fsize = ro->count * sizeof(struct can_filter);
673			if (len > fsize)
674				len = fsize;
675			if (copy_to_user(optval, ro->filter, len))
676				err = -EFAULT;
677		} else
678			len = 0;
679		release_sock(sk);
680
681		if (!err)
682			err = put_user(len, optlen);
683		return err;
684
685	case CAN_RAW_ERR_FILTER:
686		if (len > sizeof(can_err_mask_t))
687			len = sizeof(can_err_mask_t);
688		val = &ro->err_mask;
689		break;
690
691	case CAN_RAW_LOOPBACK:
692		if (len > sizeof(int))
693			len = sizeof(int);
694		val = &ro->loopback;
695		break;
696
697	case CAN_RAW_RECV_OWN_MSGS:
698		if (len > sizeof(int))
699			len = sizeof(int);
700		val = &ro->recv_own_msgs;
701		break;
702
703	case CAN_RAW_FD_FRAMES:
704		if (len > sizeof(int))
705			len = sizeof(int);
706		val = &ro->fd_frames;
707		break;
708
709	case CAN_RAW_JOIN_FILTERS:
710		if (len > sizeof(int))
711			len = sizeof(int);
712		val = &ro->join_filters;
713		break;
714
715	default:
716		return -ENOPROTOOPT;
717	}
718
719	if (put_user(len, optlen))
720		return -EFAULT;
721	if (copy_to_user(optval, val, len))
722		return -EFAULT;
723	return 0;
724}
725
726static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 
727{
728	struct sock *sk = sock->sk;
729	struct raw_sock *ro = raw_sk(sk);
730	struct sk_buff *skb;
731	struct net_device *dev;
732	int ifindex;
733	int err;
734
735	if (msg->msg_name) {
736		DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
 
737
738		if (msg->msg_namelen < sizeof(*addr))
739			return -EINVAL;
740
741		if (addr->can_family != AF_CAN)
742			return -EINVAL;
743
744		ifindex = addr->can_ifindex;
745	} else
746		ifindex = ro->ifindex;
747
748	if (ro->fd_frames) {
749		if (unlikely(size != CANFD_MTU && size != CAN_MTU))
750			return -EINVAL;
751	} else {
752		if (unlikely(size != CAN_MTU))
753			return -EINVAL;
754	}
755
756	dev = dev_get_by_index(sock_net(sk), ifindex);
757	if (!dev)
758		return -ENXIO;
759
760	skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
761				  msg->msg_flags & MSG_DONTWAIT, &err);
762	if (!skb)
763		goto put_dev;
764
765	can_skb_reserve(skb);
766	can_skb_prv(skb)->ifindex = dev->ifindex;
767	can_skb_prv(skb)->skbcnt = 0;
768
769	err = memcpy_from_msg(skb_put(skb, size), msg, size);
770	if (err < 0)
771		goto free_skb;
772
773	sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags);
774
775	skb->dev = dev;
776	skb->sk  = sk;
777	skb->priority = sk->sk_priority;
778
779	err = can_send(skb, ro->loopback);
780
781	dev_put(dev);
782
783	if (err)
784		goto send_failed;
785
786	return size;
787
788free_skb:
789	kfree_skb(skb);
790put_dev:
791	dev_put(dev);
792send_failed:
793	return err;
794}
795
796static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
797		       int flags)
798{
799	struct sock *sk = sock->sk;
800	struct sk_buff *skb;
801	int err = 0;
802	int noblock;
803
804	noblock =  flags & MSG_DONTWAIT;
805	flags   &= ~MSG_DONTWAIT;
806
807	skb = skb_recv_datagram(sk, flags, noblock, &err);
808	if (!skb)
809		return err;
810
811	if (size < skb->len)
812		msg->msg_flags |= MSG_TRUNC;
813	else
814		size = skb->len;
815
816	err = memcpy_to_msg(msg, skb->data, size);
817	if (err < 0) {
818		skb_free_datagram(sk, skb);
819		return err;
820	}
821
822	sock_recv_ts_and_drops(msg, sk, skb);
823
824	if (msg->msg_name) {
825		__sockaddr_check_size(sizeof(struct sockaddr_can));
826		msg->msg_namelen = sizeof(struct sockaddr_can);
827		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
828	}
829
830	/* assign the flags that have been recorded in raw_rcv() */
831	msg->msg_flags |= *(raw_flags(skb));
832
833	skb_free_datagram(sk, skb);
834
835	return size;
836}
837
838static const struct proto_ops raw_ops = {
839	.family        = PF_CAN,
840	.release       = raw_release,
841	.bind          = raw_bind,
842	.connect       = sock_no_connect,
843	.socketpair    = sock_no_socketpair,
844	.accept        = sock_no_accept,
845	.getname       = raw_getname,
846	.poll          = datagram_poll,
847	.ioctl         = can_ioctl,	/* use can_ioctl() from af_can.c */
848	.listen        = sock_no_listen,
849	.shutdown      = sock_no_shutdown,
850	.setsockopt    = raw_setsockopt,
851	.getsockopt    = raw_getsockopt,
852	.sendmsg       = raw_sendmsg,
853	.recvmsg       = raw_recvmsg,
854	.mmap          = sock_no_mmap,
855	.sendpage      = sock_no_sendpage,
856};
857
858static struct proto raw_proto __read_mostly = {
859	.name       = "CAN_RAW",
860	.owner      = THIS_MODULE,
861	.obj_size   = sizeof(struct raw_sock),
862	.init       = raw_init,
863};
864
865static const struct can_proto raw_can_proto = {
866	.type       = SOCK_RAW,
867	.protocol   = CAN_RAW,
868	.ops        = &raw_ops,
869	.prot       = &raw_proto,
870};
871
872static __init int raw_module_init(void)
873{
874	int err;
875
876	pr_info("can: raw protocol (rev " CAN_RAW_VERSION ")\n");
877
878	err = can_proto_register(&raw_can_proto);
879	if (err < 0)
880		printk(KERN_ERR "can: registration of raw protocol failed\n");
881
882	return err;
883}
884
885static __exit void raw_module_exit(void)
886{
887	can_proto_unregister(&raw_can_proto);
888}
889
890module_init(raw_module_init);
891module_exit(raw_module_exit);