Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 * raw.c - Raw sockets for protocol family CAN
  3 *
  4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  5 * All rights reserved.
  6 *
  7 * Redistribution and use in source and binary forms, with or without
  8 * modification, are permitted provided that the following conditions
  9 * are met:
 10 * 1. Redistributions of source code must retain the above copyright
 11 *    notice, this list of conditions and the following disclaimer.
 12 * 2. Redistributions in binary form must reproduce the above copyright
 13 *    notice, this list of conditions and the following disclaimer in the
 14 *    documentation and/or other materials provided with the distribution.
 15 * 3. Neither the name of Volkswagen nor the names of its contributors
 16 *    may be used to endorse or promote products derived from this software
 17 *    without specific prior written permission.
 18 *
 19 * Alternatively, provided that this notice is retained in full, this
 20 * software may be distributed under the terms of the GNU General
 21 * Public License ("GPL") version 2, in which case the provisions of the
 22 * GPL apply INSTEAD OF those given above.
 23 *
 24 * The provided data structures and external interfaces from this code
 25 * are not restricted to be used by modules with a GPL compatible license.
 26 *
 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
 38 * DAMAGE.
 39 *
 40 */
 41
 42#include <linux/module.h>
 43#include <linux/init.h>
 44#include <linux/uio.h>
 45#include <linux/net.h>
 46#include <linux/slab.h>
 47#include <linux/netdevice.h>
 48#include <linux/socket.h>
 49#include <linux/if_arp.h>
 50#include <linux/skbuff.h>
 51#include <linux/can.h>
 52#include <linux/can/core.h>
 53#include <linux/can/skb.h>
 54#include <linux/can/raw.h>
 55#include <net/sock.h>
 56#include <net/net_namespace.h>
 57
 58#define CAN_RAW_VERSION CAN_VERSION
 59
 60MODULE_DESCRIPTION("PF_CAN raw protocol");
 61MODULE_LICENSE("Dual BSD/GPL");
 62MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
 63MODULE_ALIAS("can-proto-1");
 64
 65#define MASK_ALL 0
 66
 67/*
 68 * A raw socket has a list of can_filters attached to it, each receiving
 69 * the CAN frames matching that filter.  If the filter list is empty,
 70 * no CAN frames will be received by the socket.  The default after
 71 * opening the socket, is to have one filter which receives all frames.
 72 * The filter list is allocated dynamically with the exception of the
 73 * list containing only one item.  This common case is optimized by
 74 * storing the single filter in dfilter, to avoid using dynamic memory.
 75 */
 76
 77struct uniqframe {
 78	int skbcnt;
 79	const struct sk_buff *skb;
 80	unsigned int join_rx_count;
 81};
 82
 83struct raw_sock {
 84	struct sock sk;
 85	int bound;
 86	int ifindex;
 87	struct notifier_block notifier;
 88	int loopback;
 89	int recv_own_msgs;
 90	int fd_frames;
 91	int join_filters;
 92	int count;                 /* number of active filters */
 93	struct can_filter dfilter; /* default/single filter */
 94	struct can_filter *filter; /* pointer to filter(s) */
 95	can_err_mask_t err_mask;
 96	struct uniqframe __percpu *uniq;
 97};
 98
 99/*
100 * Return pointer to store the extra msg flags for raw_recvmsg().
101 * We use the space of one unsigned int beyond the 'struct sockaddr_can'
102 * in skb->cb.
103 */
104static inline unsigned int *raw_flags(struct sk_buff *skb)
105{
106	sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
107			       sizeof(unsigned int));
108
109	/* return pointer after struct sockaddr_can */
110	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
111}
112
113static inline struct raw_sock *raw_sk(const struct sock *sk)
114{
115	return (struct raw_sock *)sk;
116}
117
118static void raw_rcv(struct sk_buff *oskb, void *data)
119{
120	struct sock *sk = (struct sock *)data;
121	struct raw_sock *ro = raw_sk(sk);
122	struct sockaddr_can *addr;
123	struct sk_buff *skb;
124	unsigned int *pflags;
125
126	/* check the received tx sock reference */
127	if (!ro->recv_own_msgs && oskb->sk == sk)
128		return;
129
130	/* do not pass non-CAN2.0 frames to a legacy socket */
131	if (!ro->fd_frames && oskb->len != CAN_MTU)
132		return;
133
134	/* eliminate multiple filter matches for the same skb */
135	if (this_cpu_ptr(ro->uniq)->skb == oskb &&
136	    this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
137		if (ro->join_filters) {
138			this_cpu_inc(ro->uniq->join_rx_count);
139			/* drop frame until all enabled filters matched */
140			if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
141				return;
142		} else {
143			return;
144		}
145	} else {
146		this_cpu_ptr(ro->uniq)->skb = oskb;
147		this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
148		this_cpu_ptr(ro->uniq)->join_rx_count = 1;
149		/* drop first frame to check all enabled filters? */
150		if (ro->join_filters && ro->count > 1)
151			return;
152	}
153
154	/* clone the given skb to be able to enqueue it into the rcv queue */
155	skb = skb_clone(oskb, GFP_ATOMIC);
156	if (!skb)
157		return;
158
159	/*
160	 *  Put the datagram to the queue so that raw_recvmsg() can
161	 *  get it from there.  We need to pass the interface index to
162	 *  raw_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
163	 *  containing the interface index.
164	 */
165
166	sock_skb_cb_check_size(sizeof(struct sockaddr_can));
167	addr = (struct sockaddr_can *)skb->cb;
168	memset(addr, 0, sizeof(*addr));
169	addr->can_family  = AF_CAN;
170	addr->can_ifindex = skb->dev->ifindex;
171
172	/* add CAN specific message flags for raw_recvmsg() */
173	pflags = raw_flags(skb);
174	*pflags = 0;
175	if (oskb->sk)
176		*pflags |= MSG_DONTROUTE;
177	if (oskb->sk == sk)
178		*pflags |= MSG_CONFIRM;
179
180	if (sock_queue_rcv_skb(sk, skb) < 0)
181		kfree_skb(skb);
182}
183
184static int raw_enable_filters(struct net_device *dev, struct sock *sk,
185			      struct can_filter *filter, int count)
 
186{
187	int err = 0;
188	int i;
189
190	for (i = 0; i < count; i++) {
191		err = can_rx_register(dev, filter[i].can_id,
192				      filter[i].can_mask,
193				      raw_rcv, sk, "raw", sk);
194		if (err) {
195			/* clean up successfully registered filters */
196			while (--i >= 0)
197				can_rx_unregister(dev, filter[i].can_id,
198						  filter[i].can_mask,
199						  raw_rcv, sk);
200			break;
201		}
202	}
203
204	return err;
205}
206
207static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
208				can_err_mask_t err_mask)
209{
210	int err = 0;
211
212	if (err_mask)
213		err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
214				      raw_rcv, sk, "raw", sk);
215
216	return err;
217}
218
219static void raw_disable_filters(struct net_device *dev, struct sock *sk,
220			      struct can_filter *filter, int count)
 
221{
222	int i;
223
224	for (i = 0; i < count; i++)
225		can_rx_unregister(dev, filter[i].can_id, filter[i].can_mask,
226				  raw_rcv, sk);
227}
228
229static inline void raw_disable_errfilter(struct net_device *dev,
 
230					 struct sock *sk,
231					 can_err_mask_t err_mask)
232
233{
234	if (err_mask)
235		can_rx_unregister(dev, 0, err_mask | CAN_ERR_FLAG,
236				  raw_rcv, sk);
237}
238
239static inline void raw_disable_allfilters(struct net_device *dev,
 
240					  struct sock *sk)
241{
242	struct raw_sock *ro = raw_sk(sk);
243
244	raw_disable_filters(dev, sk, ro->filter, ro->count);
245	raw_disable_errfilter(dev, sk, ro->err_mask);
246}
247
248static int raw_enable_allfilters(struct net_device *dev, struct sock *sk)
 
249{
250	struct raw_sock *ro = raw_sk(sk);
251	int err;
252
253	err = raw_enable_filters(dev, sk, ro->filter, ro->count);
254	if (!err) {
255		err = raw_enable_errfilter(dev, sk, ro->err_mask);
256		if (err)
257			raw_disable_filters(dev, sk, ro->filter, ro->count);
 
258	}
259
260	return err;
261}
262
263static int raw_notifier(struct notifier_block *nb,
264			unsigned long msg, void *ptr)
265{
266	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
267	struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
268	struct sock *sk = &ro->sk;
269
270	if (!net_eq(dev_net(dev), &init_net))
271		return NOTIFY_DONE;
272
273	if (dev->type != ARPHRD_CAN)
274		return NOTIFY_DONE;
275
276	if (ro->ifindex != dev->ifindex)
277		return NOTIFY_DONE;
278
279	switch (msg) {
280
281	case NETDEV_UNREGISTER:
282		lock_sock(sk);
283		/* remove current filters & unregister */
284		if (ro->bound)
285			raw_disable_allfilters(dev, sk);
286
287		if (ro->count > 1)
288			kfree(ro->filter);
289
290		ro->ifindex = 0;
291		ro->bound   = 0;
292		ro->count   = 0;
293		release_sock(sk);
294
295		sk->sk_err = ENODEV;
296		if (!sock_flag(sk, SOCK_DEAD))
297			sk->sk_error_report(sk);
298		break;
299
300	case NETDEV_DOWN:
301		sk->sk_err = ENETDOWN;
302		if (!sock_flag(sk, SOCK_DEAD))
303			sk->sk_error_report(sk);
304		break;
305	}
306
307	return NOTIFY_DONE;
308}
309
310static int raw_init(struct sock *sk)
311{
312	struct raw_sock *ro = raw_sk(sk);
313
314	ro->bound            = 0;
315	ro->ifindex          = 0;
316
317	/* set default filter to single entry dfilter */
318	ro->dfilter.can_id   = 0;
319	ro->dfilter.can_mask = MASK_ALL;
320	ro->filter           = &ro->dfilter;
321	ro->count            = 1;
322
323	/* set default loopback behaviour */
324	ro->loopback         = 1;
325	ro->recv_own_msgs    = 0;
326	ro->fd_frames        = 0;
327	ro->join_filters     = 0;
328
329	/* alloc_percpu provides zero'ed memory */
330	ro->uniq = alloc_percpu(struct uniqframe);
331	if (unlikely(!ro->uniq))
332		return -ENOMEM;
333
334	/* set notifier */
335	ro->notifier.notifier_call = raw_notifier;
336
337	register_netdevice_notifier(&ro->notifier);
338
339	return 0;
340}
341
342static int raw_release(struct socket *sock)
343{
344	struct sock *sk = sock->sk;
345	struct raw_sock *ro;
346
347	if (!sk)
348		return 0;
349
350	ro = raw_sk(sk);
351
352	unregister_netdevice_notifier(&ro->notifier);
353
354	lock_sock(sk);
355
356	/* remove current filters & unregister */
357	if (ro->bound) {
358		if (ro->ifindex) {
359			struct net_device *dev;
360
361			dev = dev_get_by_index(&init_net, ro->ifindex);
362			if (dev) {
363				raw_disable_allfilters(dev, sk);
364				dev_put(dev);
365			}
366		} else
367			raw_disable_allfilters(NULL, sk);
 
368	}
369
370	if (ro->count > 1)
371		kfree(ro->filter);
372
373	ro->ifindex = 0;
374	ro->bound   = 0;
375	ro->count   = 0;
376	free_percpu(ro->uniq);
377
378	sock_orphan(sk);
379	sock->sk = NULL;
380
381	release_sock(sk);
382	sock_put(sk);
383
384	return 0;
385}
386
387static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
388{
389	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
390	struct sock *sk = sock->sk;
391	struct raw_sock *ro = raw_sk(sk);
392	int ifindex;
393	int err = 0;
394	int notify_enetdown = 0;
395
396	if (len < sizeof(*addr))
 
 
397		return -EINVAL;
398
399	lock_sock(sk);
400
401	if (ro->bound && addr->can_ifindex == ro->ifindex)
402		goto out;
403
404	if (addr->can_ifindex) {
405		struct net_device *dev;
406
407		dev = dev_get_by_index(&init_net, addr->can_ifindex);
408		if (!dev) {
409			err = -ENODEV;
410			goto out;
411		}
412		if (dev->type != ARPHRD_CAN) {
413			dev_put(dev);
414			err = -ENODEV;
415			goto out;
416		}
417		if (!(dev->flags & IFF_UP))
418			notify_enetdown = 1;
419
420		ifindex = dev->ifindex;
421
422		/* filters set by default/setsockopt */
423		err = raw_enable_allfilters(dev, sk);
424		dev_put(dev);
425	} else {
426		ifindex = 0;
427
428		/* filters set by default/setsockopt */
429		err = raw_enable_allfilters(NULL, sk);
430	}
431
432	if (!err) {
433		if (ro->bound) {
434			/* unregister old filters */
435			if (ro->ifindex) {
436				struct net_device *dev;
437
438				dev = dev_get_by_index(&init_net, ro->ifindex);
 
439				if (dev) {
440					raw_disable_allfilters(dev, sk);
 
441					dev_put(dev);
442				}
443			} else
444				raw_disable_allfilters(NULL, sk);
 
445		}
446		ro->ifindex = ifindex;
447		ro->bound = 1;
448	}
449
450 out:
451	release_sock(sk);
452
453	if (notify_enetdown) {
454		sk->sk_err = ENETDOWN;
455		if (!sock_flag(sk, SOCK_DEAD))
456			sk->sk_error_report(sk);
457	}
458
459	return err;
460}
461
462static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
463		       int *len, int peer)
464{
465	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
466	struct sock *sk = sock->sk;
467	struct raw_sock *ro = raw_sk(sk);
468
469	if (peer)
470		return -EOPNOTSUPP;
471
472	memset(addr, 0, sizeof(*addr));
473	addr->can_family  = AF_CAN;
474	addr->can_ifindex = ro->ifindex;
475
476	*len = sizeof(*addr);
477
478	return 0;
479}
480
481static int raw_setsockopt(struct socket *sock, int level, int optname,
482			  char __user *optval, unsigned int optlen)
483{
484	struct sock *sk = sock->sk;
485	struct raw_sock *ro = raw_sk(sk);
486	struct can_filter *filter = NULL;  /* dyn. alloc'ed filters */
487	struct can_filter sfilter;         /* single filter */
488	struct net_device *dev = NULL;
489	can_err_mask_t err_mask = 0;
490	int count = 0;
491	int err = 0;
492
493	if (level != SOL_CAN_RAW)
494		return -EINVAL;
495
496	switch (optname) {
497
498	case CAN_RAW_FILTER:
499		if (optlen % sizeof(struct can_filter) != 0)
500			return -EINVAL;
501
502		if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
503			return -EINVAL;
504
505		count = optlen / sizeof(struct can_filter);
506
507		if (count > 1) {
508			/* filter does not fit into dfilter => alloc space */
509			filter = memdup_user(optval, optlen);
510			if (IS_ERR(filter))
511				return PTR_ERR(filter);
512		} else if (count == 1) {
513			if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
514				return -EFAULT;
515		}
516
517		lock_sock(sk);
518
519		if (ro->bound && ro->ifindex)
520			dev = dev_get_by_index(&init_net, ro->ifindex);
521
522		if (ro->bound) {
523			/* (try to) register the new filters */
524			if (count == 1)
525				err = raw_enable_filters(dev, sk, &sfilter, 1);
 
526			else
527				err = raw_enable_filters(dev, sk, filter,
528							 count);
529			if (err) {
530				if (count > 1)
531					kfree(filter);
532				goto out_fil;
533			}
534
535			/* remove old filter registrations */
536			raw_disable_filters(dev, sk, ro->filter, ro->count);
 
537		}
538
539		/* remove old filter space */
540		if (ro->count > 1)
541			kfree(ro->filter);
542
543		/* link new filters to the socket */
544		if (count == 1) {
545			/* copy filter data for single filter */
546			ro->dfilter = sfilter;
547			filter = &ro->dfilter;
548		}
549		ro->filter = filter;
550		ro->count  = count;
551
552 out_fil:
553		if (dev)
554			dev_put(dev);
555
556		release_sock(sk);
557
558		break;
559
560	case CAN_RAW_ERR_FILTER:
561		if (optlen != sizeof(err_mask))
562			return -EINVAL;
563
564		if (copy_from_user(&err_mask, optval, optlen))
565			return -EFAULT;
566
567		err_mask &= CAN_ERR_MASK;
568
569		lock_sock(sk);
570
571		if (ro->bound && ro->ifindex)
572			dev = dev_get_by_index(&init_net, ro->ifindex);
573
574		/* remove current error mask */
575		if (ro->bound) {
576			/* (try to) register the new err_mask */
577			err = raw_enable_errfilter(dev, sk, err_mask);
 
578
579			if (err)
580				goto out_err;
581
582			/* remove old err_mask registration */
583			raw_disable_errfilter(dev, sk, ro->err_mask);
 
584		}
585
586		/* link new err_mask to the socket */
587		ro->err_mask = err_mask;
588
589 out_err:
590		if (dev)
591			dev_put(dev);
592
593		release_sock(sk);
594
595		break;
596
597	case CAN_RAW_LOOPBACK:
598		if (optlen != sizeof(ro->loopback))
599			return -EINVAL;
600
601		if (copy_from_user(&ro->loopback, optval, optlen))
602			return -EFAULT;
603
604		break;
605
606	case CAN_RAW_RECV_OWN_MSGS:
607		if (optlen != sizeof(ro->recv_own_msgs))
608			return -EINVAL;
609
610		if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
611			return -EFAULT;
612
613		break;
614
615	case CAN_RAW_FD_FRAMES:
616		if (optlen != sizeof(ro->fd_frames))
617			return -EINVAL;
618
619		if (copy_from_user(&ro->fd_frames, optval, optlen))
620			return -EFAULT;
621
622		break;
623
624	case CAN_RAW_JOIN_FILTERS:
625		if (optlen != sizeof(ro->join_filters))
626			return -EINVAL;
627
628		if (copy_from_user(&ro->join_filters, optval, optlen))
629			return -EFAULT;
630
631		break;
632
633	default:
634		return -ENOPROTOOPT;
635	}
636	return err;
637}
638
639static int raw_getsockopt(struct socket *sock, int level, int optname,
640			  char __user *optval, int __user *optlen)
641{
642	struct sock *sk = sock->sk;
643	struct raw_sock *ro = raw_sk(sk);
644	int len;
645	void *val;
646	int err = 0;
647
648	if (level != SOL_CAN_RAW)
649		return -EINVAL;
650	if (get_user(len, optlen))
651		return -EFAULT;
652	if (len < 0)
653		return -EINVAL;
654
655	switch (optname) {
656
657	case CAN_RAW_FILTER:
658		lock_sock(sk);
659		if (ro->count > 0) {
660			int fsize = ro->count * sizeof(struct can_filter);
 
661			if (len > fsize)
662				len = fsize;
663			if (copy_to_user(optval, ro->filter, len))
664				err = -EFAULT;
665		} else
666			len = 0;
 
667		release_sock(sk);
668
669		if (!err)
670			err = put_user(len, optlen);
671		return err;
672
673	case CAN_RAW_ERR_FILTER:
674		if (len > sizeof(can_err_mask_t))
675			len = sizeof(can_err_mask_t);
676		val = &ro->err_mask;
677		break;
678
679	case CAN_RAW_LOOPBACK:
680		if (len > sizeof(int))
681			len = sizeof(int);
682		val = &ro->loopback;
683		break;
684
685	case CAN_RAW_RECV_OWN_MSGS:
686		if (len > sizeof(int))
687			len = sizeof(int);
688		val = &ro->recv_own_msgs;
689		break;
690
691	case CAN_RAW_FD_FRAMES:
692		if (len > sizeof(int))
693			len = sizeof(int);
694		val = &ro->fd_frames;
695		break;
696
697	case CAN_RAW_JOIN_FILTERS:
698		if (len > sizeof(int))
699			len = sizeof(int);
700		val = &ro->join_filters;
701		break;
702
703	default:
704		return -ENOPROTOOPT;
705	}
706
707	if (put_user(len, optlen))
708		return -EFAULT;
709	if (copy_to_user(optval, val, len))
710		return -EFAULT;
711	return 0;
712}
713
714static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
715{
716	struct sock *sk = sock->sk;
717	struct raw_sock *ro = raw_sk(sk);
718	struct sk_buff *skb;
719	struct net_device *dev;
720	int ifindex;
721	int err;
722
723	if (msg->msg_name) {
724		DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
725
726		if (msg->msg_namelen < sizeof(*addr))
727			return -EINVAL;
728
729		if (addr->can_family != AF_CAN)
730			return -EINVAL;
731
732		ifindex = addr->can_ifindex;
733	} else
734		ifindex = ro->ifindex;
 
735
736	if (ro->fd_frames) {
 
 
 
 
 
737		if (unlikely(size != CANFD_MTU && size != CAN_MTU))
738			return -EINVAL;
739	} else {
740		if (unlikely(size != CAN_MTU))
741			return -EINVAL;
742	}
743
744	dev = dev_get_by_index(&init_net, ifindex);
745	if (!dev)
746		return -ENXIO;
747
748	skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
749				  msg->msg_flags & MSG_DONTWAIT, &err);
750	if (!skb)
751		goto put_dev;
752
753	can_skb_reserve(skb);
754	can_skb_prv(skb)->ifindex = dev->ifindex;
755	can_skb_prv(skb)->skbcnt = 0;
756
757	err = memcpy_from_msg(skb_put(skb, size), msg, size);
758	if (err < 0)
759		goto free_skb;
760
761	sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags);
762
763	skb->dev = dev;
764	skb->sk  = sk;
765	skb->priority = sk->sk_priority;
766
767	err = can_send(skb, ro->loopback);
768
769	dev_put(dev);
770
771	if (err)
772		goto send_failed;
773
774	return size;
775
776free_skb:
777	kfree_skb(skb);
778put_dev:
779	dev_put(dev);
780send_failed:
781	return err;
782}
783
784static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
785		       int flags)
786{
787	struct sock *sk = sock->sk;
788	struct sk_buff *skb;
789	int err = 0;
790	int noblock;
791
792	noblock =  flags & MSG_DONTWAIT;
793	flags   &= ~MSG_DONTWAIT;
794
795	skb = skb_recv_datagram(sk, flags, noblock, &err);
796	if (!skb)
797		return err;
798
799	if (size < skb->len)
800		msg->msg_flags |= MSG_TRUNC;
801	else
802		size = skb->len;
803
804	err = memcpy_to_msg(msg, skb->data, size);
805	if (err < 0) {
806		skb_free_datagram(sk, skb);
807		return err;
808	}
809
810	sock_recv_ts_and_drops(msg, sk, skb);
811
812	if (msg->msg_name) {
813		__sockaddr_check_size(sizeof(struct sockaddr_can));
814		msg->msg_namelen = sizeof(struct sockaddr_can);
815		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
816	}
817
818	/* assign the flags that have been recorded in raw_rcv() */
819	msg->msg_flags |= *(raw_flags(skb));
820
821	skb_free_datagram(sk, skb);
822
823	return size;
824}
825
 
 
 
 
 
 
 
826static const struct proto_ops raw_ops = {
827	.family        = PF_CAN,
828	.release       = raw_release,
829	.bind          = raw_bind,
830	.connect       = sock_no_connect,
831	.socketpair    = sock_no_socketpair,
832	.accept        = sock_no_accept,
833	.getname       = raw_getname,
834	.poll          = datagram_poll,
835	.ioctl         = can_ioctl,	/* use can_ioctl() from af_can.c */
 
836	.listen        = sock_no_listen,
837	.shutdown      = sock_no_shutdown,
838	.setsockopt    = raw_setsockopt,
839	.getsockopt    = raw_getsockopt,
840	.sendmsg       = raw_sendmsg,
841	.recvmsg       = raw_recvmsg,
842	.mmap          = sock_no_mmap,
843	.sendpage      = sock_no_sendpage,
844};
845
846static struct proto raw_proto __read_mostly = {
847	.name       = "CAN_RAW",
848	.owner      = THIS_MODULE,
849	.obj_size   = sizeof(struct raw_sock),
850	.init       = raw_init,
851};
852
853static const struct can_proto raw_can_proto = {
854	.type       = SOCK_RAW,
855	.protocol   = CAN_RAW,
856	.ops        = &raw_ops,
857	.prot       = &raw_proto,
858};
859
860static __init int raw_module_init(void)
861{
862	int err;
863
864	pr_info("can: raw protocol (rev " CAN_RAW_VERSION ")\n");
865
866	err = can_proto_register(&raw_can_proto);
867	if (err < 0)
868		printk(KERN_ERR "can: registration of raw protocol failed\n");
869
870	return err;
871}
872
873static __exit void raw_module_exit(void)
874{
875	can_proto_unregister(&raw_can_proto);
876}
877
878module_init(raw_module_init);
879module_exit(raw_module_exit);
v5.4
  1// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
  2/* raw.c - Raw sockets for protocol family CAN
  3 *
  4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  5 * All rights reserved.
  6 *
  7 * Redistribution and use in source and binary forms, with or without
  8 * modification, are permitted provided that the following conditions
  9 * are met:
 10 * 1. Redistributions of source code must retain the above copyright
 11 *    notice, this list of conditions and the following disclaimer.
 12 * 2. Redistributions in binary form must reproduce the above copyright
 13 *    notice, this list of conditions and the following disclaimer in the
 14 *    documentation and/or other materials provided with the distribution.
 15 * 3. Neither the name of Volkswagen nor the names of its contributors
 16 *    may be used to endorse or promote products derived from this software
 17 *    without specific prior written permission.
 18 *
 19 * Alternatively, provided that this notice is retained in full, this
 20 * software may be distributed under the terms of the GNU General
 21 * Public License ("GPL") version 2, in which case the provisions of the
 22 * GPL apply INSTEAD OF those given above.
 23 *
 24 * The provided data structures and external interfaces from this code
 25 * are not restricted to be used by modules with a GPL compatible license.
 26 *
 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
 38 * DAMAGE.
 39 *
 40 */
 41
 42#include <linux/module.h>
 43#include <linux/init.h>
 44#include <linux/uio.h>
 45#include <linux/net.h>
 46#include <linux/slab.h>
 47#include <linux/netdevice.h>
 48#include <linux/socket.h>
 49#include <linux/if_arp.h>
 50#include <linux/skbuff.h>
 51#include <linux/can.h>
 52#include <linux/can/core.h>
 53#include <linux/can/skb.h>
 54#include <linux/can/raw.h>
 55#include <net/sock.h>
 56#include <net/net_namespace.h>
 57
 58#define CAN_RAW_VERSION CAN_VERSION
 59
 60MODULE_DESCRIPTION("PF_CAN raw protocol");
 61MODULE_LICENSE("Dual BSD/GPL");
 62MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
 63MODULE_ALIAS("can-proto-1");
 64
 65#define MASK_ALL 0
 66
 67/* A raw socket has a list of can_filters attached to it, each receiving
 
 68 * the CAN frames matching that filter.  If the filter list is empty,
 69 * no CAN frames will be received by the socket.  The default after
 70 * opening the socket, is to have one filter which receives all frames.
 71 * The filter list is allocated dynamically with the exception of the
 72 * list containing only one item.  This common case is optimized by
 73 * storing the single filter in dfilter, to avoid using dynamic memory.
 74 */
 75
 76struct uniqframe {
 77	int skbcnt;
 78	const struct sk_buff *skb;
 79	unsigned int join_rx_count;
 80};
 81
 82struct raw_sock {
 83	struct sock sk;
 84	int bound;
 85	int ifindex;
 86	struct notifier_block notifier;
 87	int loopback;
 88	int recv_own_msgs;
 89	int fd_frames;
 90	int join_filters;
 91	int count;                 /* number of active filters */
 92	struct can_filter dfilter; /* default/single filter */
 93	struct can_filter *filter; /* pointer to filter(s) */
 94	can_err_mask_t err_mask;
 95	struct uniqframe __percpu *uniq;
 96};
 97
 98/* Return pointer to store the extra msg flags for raw_recvmsg().
 
 99 * We use the space of one unsigned int beyond the 'struct sockaddr_can'
100 * in skb->cb.
101 */
102static inline unsigned int *raw_flags(struct sk_buff *skb)
103{
104	sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
105			       sizeof(unsigned int));
106
107	/* return pointer after struct sockaddr_can */
108	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
109}
110
111static inline struct raw_sock *raw_sk(const struct sock *sk)
112{
113	return (struct raw_sock *)sk;
114}
115
116static void raw_rcv(struct sk_buff *oskb, void *data)
117{
118	struct sock *sk = (struct sock *)data;
119	struct raw_sock *ro = raw_sk(sk);
120	struct sockaddr_can *addr;
121	struct sk_buff *skb;
122	unsigned int *pflags;
123
124	/* check the received tx sock reference */
125	if (!ro->recv_own_msgs && oskb->sk == sk)
126		return;
127
128	/* do not pass non-CAN2.0 frames to a legacy socket */
129	if (!ro->fd_frames && oskb->len != CAN_MTU)
130		return;
131
132	/* eliminate multiple filter matches for the same skb */
133	if (this_cpu_ptr(ro->uniq)->skb == oskb &&
134	    this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
135		if (ro->join_filters) {
136			this_cpu_inc(ro->uniq->join_rx_count);
137			/* drop frame until all enabled filters matched */
138			if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
139				return;
140		} else {
141			return;
142		}
143	} else {
144		this_cpu_ptr(ro->uniq)->skb = oskb;
145		this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
146		this_cpu_ptr(ro->uniq)->join_rx_count = 1;
147		/* drop first frame to check all enabled filters? */
148		if (ro->join_filters && ro->count > 1)
149			return;
150	}
151
152	/* clone the given skb to be able to enqueue it into the rcv queue */
153	skb = skb_clone(oskb, GFP_ATOMIC);
154	if (!skb)
155		return;
156
157	/*  Put the datagram to the queue so that raw_recvmsg() can
 
158	 *  get it from there.  We need to pass the interface index to
159	 *  raw_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
160	 *  containing the interface index.
161	 */
162
163	sock_skb_cb_check_size(sizeof(struct sockaddr_can));
164	addr = (struct sockaddr_can *)skb->cb;
165	memset(addr, 0, sizeof(*addr));
166	addr->can_family  = AF_CAN;
167	addr->can_ifindex = skb->dev->ifindex;
168
169	/* add CAN specific message flags for raw_recvmsg() */
170	pflags = raw_flags(skb);
171	*pflags = 0;
172	if (oskb->sk)
173		*pflags |= MSG_DONTROUTE;
174	if (oskb->sk == sk)
175		*pflags |= MSG_CONFIRM;
176
177	if (sock_queue_rcv_skb(sk, skb) < 0)
178		kfree_skb(skb);
179}
180
181static int raw_enable_filters(struct net *net, struct net_device *dev,
182			      struct sock *sk, struct can_filter *filter,
183			      int count)
184{
185	int err = 0;
186	int i;
187
188	for (i = 0; i < count; i++) {
189		err = can_rx_register(net, dev, filter[i].can_id,
190				      filter[i].can_mask,
191				      raw_rcv, sk, "raw", sk);
192		if (err) {
193			/* clean up successfully registered filters */
194			while (--i >= 0)
195				can_rx_unregister(net, dev, filter[i].can_id,
196						  filter[i].can_mask,
197						  raw_rcv, sk);
198			break;
199		}
200	}
201
202	return err;
203}
204
205static int raw_enable_errfilter(struct net *net, struct net_device *dev,
206				struct sock *sk, can_err_mask_t err_mask)
207{
208	int err = 0;
209
210	if (err_mask)
211		err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
212				      raw_rcv, sk, "raw", sk);
213
214	return err;
215}
216
217static void raw_disable_filters(struct net *net, struct net_device *dev,
218				struct sock *sk, struct can_filter *filter,
219				int count)
220{
221	int i;
222
223	for (i = 0; i < count; i++)
224		can_rx_unregister(net, dev, filter[i].can_id,
225				  filter[i].can_mask, raw_rcv, sk);
226}
227
228static inline void raw_disable_errfilter(struct net *net,
229					 struct net_device *dev,
230					 struct sock *sk,
231					 can_err_mask_t err_mask)
232
233{
234	if (err_mask)
235		can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
236				  raw_rcv, sk);
237}
238
239static inline void raw_disable_allfilters(struct net *net,
240					  struct net_device *dev,
241					  struct sock *sk)
242{
243	struct raw_sock *ro = raw_sk(sk);
244
245	raw_disable_filters(net, dev, sk, ro->filter, ro->count);
246	raw_disable_errfilter(net, dev, sk, ro->err_mask);
247}
248
249static int raw_enable_allfilters(struct net *net, struct net_device *dev,
250				 struct sock *sk)
251{
252	struct raw_sock *ro = raw_sk(sk);
253	int err;
254
255	err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
256	if (!err) {
257		err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
258		if (err)
259			raw_disable_filters(net, dev, sk, ro->filter,
260					    ro->count);
261	}
262
263	return err;
264}
265
266static int raw_notifier(struct notifier_block *nb,
267			unsigned long msg, void *ptr)
268{
269	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
270	struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
271	struct sock *sk = &ro->sk;
272
273	if (!net_eq(dev_net(dev), sock_net(sk)))
274		return NOTIFY_DONE;
275
276	if (dev->type != ARPHRD_CAN)
277		return NOTIFY_DONE;
278
279	if (ro->ifindex != dev->ifindex)
280		return NOTIFY_DONE;
281
282	switch (msg) {
 
283	case NETDEV_UNREGISTER:
284		lock_sock(sk);
285		/* remove current filters & unregister */
286		if (ro->bound)
287			raw_disable_allfilters(dev_net(dev), dev, sk);
288
289		if (ro->count > 1)
290			kfree(ro->filter);
291
292		ro->ifindex = 0;
293		ro->bound   = 0;
294		ro->count   = 0;
295		release_sock(sk);
296
297		sk->sk_err = ENODEV;
298		if (!sock_flag(sk, SOCK_DEAD))
299			sk->sk_error_report(sk);
300		break;
301
302	case NETDEV_DOWN:
303		sk->sk_err = ENETDOWN;
304		if (!sock_flag(sk, SOCK_DEAD))
305			sk->sk_error_report(sk);
306		break;
307	}
308
309	return NOTIFY_DONE;
310}
311
312static int raw_init(struct sock *sk)
313{
314	struct raw_sock *ro = raw_sk(sk);
315
316	ro->bound            = 0;
317	ro->ifindex          = 0;
318
319	/* set default filter to single entry dfilter */
320	ro->dfilter.can_id   = 0;
321	ro->dfilter.can_mask = MASK_ALL;
322	ro->filter           = &ro->dfilter;
323	ro->count            = 1;
324
325	/* set default loopback behaviour */
326	ro->loopback         = 1;
327	ro->recv_own_msgs    = 0;
328	ro->fd_frames        = 0;
329	ro->join_filters     = 0;
330
331	/* alloc_percpu provides zero'ed memory */
332	ro->uniq = alloc_percpu(struct uniqframe);
333	if (unlikely(!ro->uniq))
334		return -ENOMEM;
335
336	/* set notifier */
337	ro->notifier.notifier_call = raw_notifier;
338
339	register_netdevice_notifier(&ro->notifier);
340
341	return 0;
342}
343
344static int raw_release(struct socket *sock)
345{
346	struct sock *sk = sock->sk;
347	struct raw_sock *ro;
348
349	if (!sk)
350		return 0;
351
352	ro = raw_sk(sk);
353
354	unregister_netdevice_notifier(&ro->notifier);
355
356	lock_sock(sk);
357
358	/* remove current filters & unregister */
359	if (ro->bound) {
360		if (ro->ifindex) {
361			struct net_device *dev;
362
363			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
364			if (dev) {
365				raw_disable_allfilters(dev_net(dev), dev, sk);
366				dev_put(dev);
367			}
368		} else {
369			raw_disable_allfilters(sock_net(sk), NULL, sk);
370		}
371	}
372
373	if (ro->count > 1)
374		kfree(ro->filter);
375
376	ro->ifindex = 0;
377	ro->bound   = 0;
378	ro->count   = 0;
379	free_percpu(ro->uniq);
380
381	sock_orphan(sk);
382	sock->sk = NULL;
383
384	release_sock(sk);
385	sock_put(sk);
386
387	return 0;
388}
389
390static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
391{
392	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
393	struct sock *sk = sock->sk;
394	struct raw_sock *ro = raw_sk(sk);
395	int ifindex;
396	int err = 0;
397	int notify_enetdown = 0;
398
399	if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
400		return -EINVAL;
401	if (addr->can_family != AF_CAN)
402		return -EINVAL;
403
404	lock_sock(sk);
405
406	if (ro->bound && addr->can_ifindex == ro->ifindex)
407		goto out;
408
409	if (addr->can_ifindex) {
410		struct net_device *dev;
411
412		dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
413		if (!dev) {
414			err = -ENODEV;
415			goto out;
416		}
417		if (dev->type != ARPHRD_CAN) {
418			dev_put(dev);
419			err = -ENODEV;
420			goto out;
421		}
422		if (!(dev->flags & IFF_UP))
423			notify_enetdown = 1;
424
425		ifindex = dev->ifindex;
426
427		/* filters set by default/setsockopt */
428		err = raw_enable_allfilters(sock_net(sk), dev, sk);
429		dev_put(dev);
430	} else {
431		ifindex = 0;
432
433		/* filters set by default/setsockopt */
434		err = raw_enable_allfilters(sock_net(sk), NULL, sk);
435	}
436
437	if (!err) {
438		if (ro->bound) {
439			/* unregister old filters */
440			if (ro->ifindex) {
441				struct net_device *dev;
442
443				dev = dev_get_by_index(sock_net(sk),
444						       ro->ifindex);
445				if (dev) {
446					raw_disable_allfilters(dev_net(dev),
447							       dev, sk);
448					dev_put(dev);
449				}
450			} else {
451				raw_disable_allfilters(sock_net(sk), NULL, sk);
452			}
453		}
454		ro->ifindex = ifindex;
455		ro->bound = 1;
456	}
457
458 out:
459	release_sock(sk);
460
461	if (notify_enetdown) {
462		sk->sk_err = ENETDOWN;
463		if (!sock_flag(sk, SOCK_DEAD))
464			sk->sk_error_report(sk);
465	}
466
467	return err;
468}
469
470static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
471		       int peer)
472{
473	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
474	struct sock *sk = sock->sk;
475	struct raw_sock *ro = raw_sk(sk);
476
477	if (peer)
478		return -EOPNOTSUPP;
479
480	memset(addr, 0, sizeof(*addr));
481	addr->can_family  = AF_CAN;
482	addr->can_ifindex = ro->ifindex;
483
484	return sizeof(*addr);
 
 
485}
486
487static int raw_setsockopt(struct socket *sock, int level, int optname,
488			  char __user *optval, unsigned int optlen)
489{
490	struct sock *sk = sock->sk;
491	struct raw_sock *ro = raw_sk(sk);
492	struct can_filter *filter = NULL;  /* dyn. alloc'ed filters */
493	struct can_filter sfilter;         /* single filter */
494	struct net_device *dev = NULL;
495	can_err_mask_t err_mask = 0;
496	int count = 0;
497	int err = 0;
498
499	if (level != SOL_CAN_RAW)
500		return -EINVAL;
501
502	switch (optname) {
 
503	case CAN_RAW_FILTER:
504		if (optlen % sizeof(struct can_filter) != 0)
505			return -EINVAL;
506
507		if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
508			return -EINVAL;
509
510		count = optlen / sizeof(struct can_filter);
511
512		if (count > 1) {
513			/* filter does not fit into dfilter => alloc space */
514			filter = memdup_user(optval, optlen);
515			if (IS_ERR(filter))
516				return PTR_ERR(filter);
517		} else if (count == 1) {
518			if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
519				return -EFAULT;
520		}
521
522		lock_sock(sk);
523
524		if (ro->bound && ro->ifindex)
525			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
526
527		if (ro->bound) {
528			/* (try to) register the new filters */
529			if (count == 1)
530				err = raw_enable_filters(sock_net(sk), dev, sk,
531							 &sfilter, 1);
532			else
533				err = raw_enable_filters(sock_net(sk), dev, sk,
534							 filter, count);
535			if (err) {
536				if (count > 1)
537					kfree(filter);
538				goto out_fil;
539			}
540
541			/* remove old filter registrations */
542			raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
543					    ro->count);
544		}
545
546		/* remove old filter space */
547		if (ro->count > 1)
548			kfree(ro->filter);
549
550		/* link new filters to the socket */
551		if (count == 1) {
552			/* copy filter data for single filter */
553			ro->dfilter = sfilter;
554			filter = &ro->dfilter;
555		}
556		ro->filter = filter;
557		ro->count  = count;
558
559 out_fil:
560		if (dev)
561			dev_put(dev);
562
563		release_sock(sk);
564
565		break;
566
567	case CAN_RAW_ERR_FILTER:
568		if (optlen != sizeof(err_mask))
569			return -EINVAL;
570
571		if (copy_from_user(&err_mask, optval, optlen))
572			return -EFAULT;
573
574		err_mask &= CAN_ERR_MASK;
575
576		lock_sock(sk);
577
578		if (ro->bound && ro->ifindex)
579			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
580
581		/* remove current error mask */
582		if (ro->bound) {
583			/* (try to) register the new err_mask */
584			err = raw_enable_errfilter(sock_net(sk), dev, sk,
585						   err_mask);
586
587			if (err)
588				goto out_err;
589
590			/* remove old err_mask registration */
591			raw_disable_errfilter(sock_net(sk), dev, sk,
592					      ro->err_mask);
593		}
594
595		/* link new err_mask to the socket */
596		ro->err_mask = err_mask;
597
598 out_err:
599		if (dev)
600			dev_put(dev);
601
602		release_sock(sk);
603
604		break;
605
606	case CAN_RAW_LOOPBACK:
607		if (optlen != sizeof(ro->loopback))
608			return -EINVAL;
609
610		if (copy_from_user(&ro->loopback, optval, optlen))
611			return -EFAULT;
612
613		break;
614
615	case CAN_RAW_RECV_OWN_MSGS:
616		if (optlen != sizeof(ro->recv_own_msgs))
617			return -EINVAL;
618
619		if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
620			return -EFAULT;
621
622		break;
623
624	case CAN_RAW_FD_FRAMES:
625		if (optlen != sizeof(ro->fd_frames))
626			return -EINVAL;
627
628		if (copy_from_user(&ro->fd_frames, optval, optlen))
629			return -EFAULT;
630
631		break;
632
633	case CAN_RAW_JOIN_FILTERS:
634		if (optlen != sizeof(ro->join_filters))
635			return -EINVAL;
636
637		if (copy_from_user(&ro->join_filters, optval, optlen))
638			return -EFAULT;
639
640		break;
641
642	default:
643		return -ENOPROTOOPT;
644	}
645	return err;
646}
647
648static int raw_getsockopt(struct socket *sock, int level, int optname,
649			  char __user *optval, int __user *optlen)
650{
651	struct sock *sk = sock->sk;
652	struct raw_sock *ro = raw_sk(sk);
653	int len;
654	void *val;
655	int err = 0;
656
657	if (level != SOL_CAN_RAW)
658		return -EINVAL;
659	if (get_user(len, optlen))
660		return -EFAULT;
661	if (len < 0)
662		return -EINVAL;
663
664	switch (optname) {
 
665	case CAN_RAW_FILTER:
666		lock_sock(sk);
667		if (ro->count > 0) {
668			int fsize = ro->count * sizeof(struct can_filter);
669
670			if (len > fsize)
671				len = fsize;
672			if (copy_to_user(optval, ro->filter, len))
673				err = -EFAULT;
674		} else {
675			len = 0;
676		}
677		release_sock(sk);
678
679		if (!err)
680			err = put_user(len, optlen);
681		return err;
682
683	case CAN_RAW_ERR_FILTER:
684		if (len > sizeof(can_err_mask_t))
685			len = sizeof(can_err_mask_t);
686		val = &ro->err_mask;
687		break;
688
689	case CAN_RAW_LOOPBACK:
690		if (len > sizeof(int))
691			len = sizeof(int);
692		val = &ro->loopback;
693		break;
694
695	case CAN_RAW_RECV_OWN_MSGS:
696		if (len > sizeof(int))
697			len = sizeof(int);
698		val = &ro->recv_own_msgs;
699		break;
700
701	case CAN_RAW_FD_FRAMES:
702		if (len > sizeof(int))
703			len = sizeof(int);
704		val = &ro->fd_frames;
705		break;
706
707	case CAN_RAW_JOIN_FILTERS:
708		if (len > sizeof(int))
709			len = sizeof(int);
710		val = &ro->join_filters;
711		break;
712
713	default:
714		return -ENOPROTOOPT;
715	}
716
717	if (put_user(len, optlen))
718		return -EFAULT;
719	if (copy_to_user(optval, val, len))
720		return -EFAULT;
721	return 0;
722}
723
724static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
725{
726	struct sock *sk = sock->sk;
727	struct raw_sock *ro = raw_sk(sk);
728	struct sk_buff *skb;
729	struct net_device *dev;
730	int ifindex;
731	int err;
732
733	if (msg->msg_name) {
734		DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
735
736		if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
737			return -EINVAL;
738
739		if (addr->can_family != AF_CAN)
740			return -EINVAL;
741
742		ifindex = addr->can_ifindex;
743	} else {
744		ifindex = ro->ifindex;
745	}
746
747	dev = dev_get_by_index(sock_net(sk), ifindex);
748	if (!dev)
749		return -ENXIO;
750
751	err = -EINVAL;
752	if (ro->fd_frames && dev->mtu == CANFD_MTU) {
753		if (unlikely(size != CANFD_MTU && size != CAN_MTU))
754			goto put_dev;
755	} else {
756		if (unlikely(size != CAN_MTU))
757			goto put_dev;
758	}
759
 
 
 
 
760	skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
761				  msg->msg_flags & MSG_DONTWAIT, &err);
762	if (!skb)
763		goto put_dev;
764
765	can_skb_reserve(skb);
766	can_skb_prv(skb)->ifindex = dev->ifindex;
767	can_skb_prv(skb)->skbcnt = 0;
768
769	err = memcpy_from_msg(skb_put(skb, size), msg, size);
770	if (err < 0)
771		goto free_skb;
772
773	skb_setup_tx_timestamp(skb, sk->sk_tsflags);
774
775	skb->dev = dev;
776	skb->sk  = sk;
777	skb->priority = sk->sk_priority;
778
779	err = can_send(skb, ro->loopback);
780
781	dev_put(dev);
782
783	if (err)
784		goto send_failed;
785
786	return size;
787
788free_skb:
789	kfree_skb(skb);
790put_dev:
791	dev_put(dev);
792send_failed:
793	return err;
794}
795
796static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
797		       int flags)
798{
799	struct sock *sk = sock->sk;
800	struct sk_buff *skb;
801	int err = 0;
802	int noblock;
803
804	noblock =  flags & MSG_DONTWAIT;
805	flags   &= ~MSG_DONTWAIT;
806
807	skb = skb_recv_datagram(sk, flags, noblock, &err);
808	if (!skb)
809		return err;
810
811	if (size < skb->len)
812		msg->msg_flags |= MSG_TRUNC;
813	else
814		size = skb->len;
815
816	err = memcpy_to_msg(msg, skb->data, size);
817	if (err < 0) {
818		skb_free_datagram(sk, skb);
819		return err;
820	}
821
822	sock_recv_ts_and_drops(msg, sk, skb);
823
824	if (msg->msg_name) {
825		__sockaddr_check_size(sizeof(struct sockaddr_can));
826		msg->msg_namelen = sizeof(struct sockaddr_can);
827		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
828	}
829
830	/* assign the flags that have been recorded in raw_rcv() */
831	msg->msg_flags |= *(raw_flags(skb));
832
833	skb_free_datagram(sk, skb);
834
835	return size;
836}
837
838static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
839				unsigned long arg)
840{
841	/* no ioctls for socket layer -> hand it down to NIC layer */
842	return -ENOIOCTLCMD;
843}
844
845static const struct proto_ops raw_ops = {
846	.family        = PF_CAN,
847	.release       = raw_release,
848	.bind          = raw_bind,
849	.connect       = sock_no_connect,
850	.socketpair    = sock_no_socketpair,
851	.accept        = sock_no_accept,
852	.getname       = raw_getname,
853	.poll          = datagram_poll,
854	.ioctl         = raw_sock_no_ioctlcmd,
855	.gettstamp     = sock_gettstamp,
856	.listen        = sock_no_listen,
857	.shutdown      = sock_no_shutdown,
858	.setsockopt    = raw_setsockopt,
859	.getsockopt    = raw_getsockopt,
860	.sendmsg       = raw_sendmsg,
861	.recvmsg       = raw_recvmsg,
862	.mmap          = sock_no_mmap,
863	.sendpage      = sock_no_sendpage,
864};
865
866static struct proto raw_proto __read_mostly = {
867	.name       = "CAN_RAW",
868	.owner      = THIS_MODULE,
869	.obj_size   = sizeof(struct raw_sock),
870	.init       = raw_init,
871};
872
873static const struct can_proto raw_can_proto = {
874	.type       = SOCK_RAW,
875	.protocol   = CAN_RAW,
876	.ops        = &raw_ops,
877	.prot       = &raw_proto,
878};
879
880static __init int raw_module_init(void)
881{
882	int err;
883
884	pr_info("can: raw protocol (rev " CAN_RAW_VERSION ")\n");
885
886	err = can_proto_register(&raw_can_proto);
887	if (err < 0)
888		pr_err("can: registration of raw protocol failed\n");
889
890	return err;
891}
892
893static __exit void raw_module_exit(void)
894{
895	can_proto_unregister(&raw_can_proto);
896}
897
898module_init(raw_module_init);
899module_exit(raw_module_exit);