Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
  2/* raw.c - Raw sockets for protocol family CAN
  3 *
  4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  5 * All rights reserved.
  6 *
  7 * Redistribution and use in source and binary forms, with or without
  8 * modification, are permitted provided that the following conditions
  9 * are met:
 10 * 1. Redistributions of source code must retain the above copyright
 11 *    notice, this list of conditions and the following disclaimer.
 12 * 2. Redistributions in binary form must reproduce the above copyright
 13 *    notice, this list of conditions and the following disclaimer in the
 14 *    documentation and/or other materials provided with the distribution.
 15 * 3. Neither the name of Volkswagen nor the names of its contributors
 16 *    may be used to endorse or promote products derived from this software
 17 *    without specific prior written permission.
 18 *
 19 * Alternatively, provided that this notice is retained in full, this
 20 * software may be distributed under the terms of the GNU General
 21 * Public License ("GPL") version 2, in which case the provisions of the
 22 * GPL apply INSTEAD OF those given above.
 23 *
 24 * The provided data structures and external interfaces from this code
 25 * are not restricted to be used by modules with a GPL compatible license.
 26 *
 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
 38 * DAMAGE.
 39 *
 40 */
 41
 42#include <linux/module.h>
 43#include <linux/init.h>
 44#include <linux/uio.h>
 45#include <linux/net.h>
 46#include <linux/slab.h>
 47#include <linux/netdevice.h>
 48#include <linux/socket.h>
 49#include <linux/if_arp.h>
 50#include <linux/skbuff.h>
 51#include <linux/can.h>
 52#include <linux/can/core.h>
 
 53#include <linux/can/skb.h>
 54#include <linux/can/raw.h>
 55#include <net/sock.h>
 56#include <net/net_namespace.h>
 57
 58MODULE_DESCRIPTION("PF_CAN raw protocol");
 59MODULE_LICENSE("Dual BSD/GPL");
 60MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
 61MODULE_ALIAS("can-proto-1");
 62
 63#define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
 64
 65#define MASK_ALL 0
 66
 67/* A raw socket has a list of can_filters attached to it, each receiving
 68 * the CAN frames matching that filter.  If the filter list is empty,
 69 * no CAN frames will be received by the socket.  The default after
 70 * opening the socket, is to have one filter which receives all frames.
 71 * The filter list is allocated dynamically with the exception of the
 72 * list containing only one item.  This common case is optimized by
 73 * storing the single filter in dfilter, to avoid using dynamic memory.
 74 */
 75
 76struct uniqframe {
 77	int skbcnt;
 78	const struct sk_buff *skb;
 79	unsigned int join_rx_count;
 80};
 81
 82struct raw_sock {
 83	struct sock sk;
 84	int bound;
 85	int ifindex;
 86	struct list_head notifier;
 87	int loopback;
 88	int recv_own_msgs;
 89	int fd_frames;
 
 90	int join_filters;
 91	int count;                 /* number of active filters */
 92	struct can_filter dfilter; /* default/single filter */
 93	struct can_filter *filter; /* pointer to filter(s) */
 94	can_err_mask_t err_mask;
 95	struct uniqframe __percpu *uniq;
 96};
 97
 98static LIST_HEAD(raw_notifier_list);
 99static DEFINE_SPINLOCK(raw_notifier_lock);
100static struct raw_sock *raw_busy_notifier;
101
102/* Return pointer to store the extra msg flags for raw_recvmsg().
103 * We use the space of one unsigned int beyond the 'struct sockaddr_can'
104 * in skb->cb.
105 */
106static inline unsigned int *raw_flags(struct sk_buff *skb)
107{
108	sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
109			       sizeof(unsigned int));
110
111	/* return pointer after struct sockaddr_can */
112	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
113}
114
115static inline struct raw_sock *raw_sk(const struct sock *sk)
116{
117	return (struct raw_sock *)sk;
118}
119
120static void raw_rcv(struct sk_buff *oskb, void *data)
121{
122	struct sock *sk = (struct sock *)data;
123	struct raw_sock *ro = raw_sk(sk);
124	struct sockaddr_can *addr;
125	struct sk_buff *skb;
126	unsigned int *pflags;
127
128	/* check the received tx sock reference */
129	if (!ro->recv_own_msgs && oskb->sk == sk)
130		return;
131
132	/* do not pass non-CAN2.0 frames to a legacy socket */
133	if (!ro->fd_frames && oskb->len != CAN_MTU)
 
134		return;
135
136	/* eliminate multiple filter matches for the same skb */
137	if (this_cpu_ptr(ro->uniq)->skb == oskb &&
138	    this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
139		if (ro->join_filters) {
140			this_cpu_inc(ro->uniq->join_rx_count);
141			/* drop frame until all enabled filters matched */
142			if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
143				return;
144		} else {
145			return;
146		}
147	} else {
148		this_cpu_ptr(ro->uniq)->skb = oskb;
149		this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
150		this_cpu_ptr(ro->uniq)->join_rx_count = 1;
151		/* drop first frame to check all enabled filters? */
152		if (ro->join_filters && ro->count > 1)
153			return;
154	}
155
156	/* clone the given skb to be able to enqueue it into the rcv queue */
157	skb = skb_clone(oskb, GFP_ATOMIC);
158	if (!skb)
159		return;
160
161	/* Put the datagram to the queue so that raw_recvmsg() can get
162	 * it from there. We need to pass the interface index to
163	 * raw_recvmsg(). We pass a whole struct sockaddr_can in
164	 * skb->cb containing the interface index.
165	 */
166
167	sock_skb_cb_check_size(sizeof(struct sockaddr_can));
168	addr = (struct sockaddr_can *)skb->cb;
169	memset(addr, 0, sizeof(*addr));
170	addr->can_family = AF_CAN;
171	addr->can_ifindex = skb->dev->ifindex;
172
173	/* add CAN specific message flags for raw_recvmsg() */
174	pflags = raw_flags(skb);
175	*pflags = 0;
176	if (oskb->sk)
177		*pflags |= MSG_DONTROUTE;
178	if (oskb->sk == sk)
179		*pflags |= MSG_CONFIRM;
180
181	if (sock_queue_rcv_skb(sk, skb) < 0)
182		kfree_skb(skb);
183}
184
185static int raw_enable_filters(struct net *net, struct net_device *dev,
186			      struct sock *sk, struct can_filter *filter,
187			      int count)
188{
189	int err = 0;
190	int i;
191
192	for (i = 0; i < count; i++) {
193		err = can_rx_register(net, dev, filter[i].can_id,
194				      filter[i].can_mask,
195				      raw_rcv, sk, "raw", sk);
196		if (err) {
197			/* clean up successfully registered filters */
198			while (--i >= 0)
199				can_rx_unregister(net, dev, filter[i].can_id,
200						  filter[i].can_mask,
201						  raw_rcv, sk);
202			break;
203		}
204	}
205
206	return err;
207}
208
209static int raw_enable_errfilter(struct net *net, struct net_device *dev,
210				struct sock *sk, can_err_mask_t err_mask)
211{
212	int err = 0;
213
214	if (err_mask)
215		err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
216				      raw_rcv, sk, "raw", sk);
217
218	return err;
219}
220
221static void raw_disable_filters(struct net *net, struct net_device *dev,
222				struct sock *sk, struct can_filter *filter,
223				int count)
224{
225	int i;
226
227	for (i = 0; i < count; i++)
228		can_rx_unregister(net, dev, filter[i].can_id,
229				  filter[i].can_mask, raw_rcv, sk);
230}
231
232static inline void raw_disable_errfilter(struct net *net,
233					 struct net_device *dev,
234					 struct sock *sk,
235					 can_err_mask_t err_mask)
236
237{
238	if (err_mask)
239		can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
240				  raw_rcv, sk);
241}
242
243static inline void raw_disable_allfilters(struct net *net,
244					  struct net_device *dev,
245					  struct sock *sk)
246{
247	struct raw_sock *ro = raw_sk(sk);
248
249	raw_disable_filters(net, dev, sk, ro->filter, ro->count);
250	raw_disable_errfilter(net, dev, sk, ro->err_mask);
251}
252
253static int raw_enable_allfilters(struct net *net, struct net_device *dev,
254				 struct sock *sk)
255{
256	struct raw_sock *ro = raw_sk(sk);
257	int err;
258
259	err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
260	if (!err) {
261		err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
262		if (err)
263			raw_disable_filters(net, dev, sk, ro->filter,
264					    ro->count);
265	}
266
267	return err;
268}
269
270static void raw_notify(struct raw_sock *ro, unsigned long msg,
271		       struct net_device *dev)
272{
273	struct sock *sk = &ro->sk;
274
275	if (!net_eq(dev_net(dev), sock_net(sk)))
276		return;
277
278	if (ro->ifindex != dev->ifindex)
279		return;
280
281	switch (msg) {
282	case NETDEV_UNREGISTER:
283		lock_sock(sk);
284		/* remove current filters & unregister */
285		if (ro->bound)
286			raw_disable_allfilters(dev_net(dev), dev, sk);
287
288		if (ro->count > 1)
289			kfree(ro->filter);
290
291		ro->ifindex = 0;
292		ro->bound = 0;
293		ro->count = 0;
294		release_sock(sk);
295
296		sk->sk_err = ENODEV;
297		if (!sock_flag(sk, SOCK_DEAD))
298			sk_error_report(sk);
299		break;
300
301	case NETDEV_DOWN:
302		sk->sk_err = ENETDOWN;
303		if (!sock_flag(sk, SOCK_DEAD))
304			sk_error_report(sk);
305		break;
306	}
307}
308
309static int raw_notifier(struct notifier_block *nb, unsigned long msg,
310			void *ptr)
311{
312	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
313
314	if (dev->type != ARPHRD_CAN)
315		return NOTIFY_DONE;
316	if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
317		return NOTIFY_DONE;
318	if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
319		return NOTIFY_DONE;
320
321	spin_lock(&raw_notifier_lock);
322	list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
323		spin_unlock(&raw_notifier_lock);
324		raw_notify(raw_busy_notifier, msg, dev);
325		spin_lock(&raw_notifier_lock);
326	}
327	raw_busy_notifier = NULL;
328	spin_unlock(&raw_notifier_lock);
329	return NOTIFY_DONE;
330}
331
332static int raw_init(struct sock *sk)
333{
334	struct raw_sock *ro = raw_sk(sk);
335
336	ro->bound            = 0;
337	ro->ifindex          = 0;
338
339	/* set default filter to single entry dfilter */
340	ro->dfilter.can_id   = 0;
341	ro->dfilter.can_mask = MASK_ALL;
342	ro->filter           = &ro->dfilter;
343	ro->count            = 1;
344
345	/* set default loopback behaviour */
346	ro->loopback         = 1;
347	ro->recv_own_msgs    = 0;
348	ro->fd_frames        = 0;
 
349	ro->join_filters     = 0;
350
351	/* alloc_percpu provides zero'ed memory */
352	ro->uniq = alloc_percpu(struct uniqframe);
353	if (unlikely(!ro->uniq))
354		return -ENOMEM;
355
356	/* set notifier */
357	spin_lock(&raw_notifier_lock);
358	list_add_tail(&ro->notifier, &raw_notifier_list);
359	spin_unlock(&raw_notifier_lock);
360
361	return 0;
362}
363
364static int raw_release(struct socket *sock)
365{
366	struct sock *sk = sock->sk;
367	struct raw_sock *ro;
368
369	if (!sk)
370		return 0;
371
372	ro = raw_sk(sk);
373
374	spin_lock(&raw_notifier_lock);
375	while (raw_busy_notifier == ro) {
376		spin_unlock(&raw_notifier_lock);
377		schedule_timeout_uninterruptible(1);
378		spin_lock(&raw_notifier_lock);
379	}
380	list_del(&ro->notifier);
381	spin_unlock(&raw_notifier_lock);
382
383	lock_sock(sk);
384
385	/* remove current filters & unregister */
386	if (ro->bound) {
387		if (ro->ifindex) {
388			struct net_device *dev;
389
390			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
391			if (dev) {
392				raw_disable_allfilters(dev_net(dev), dev, sk);
393				dev_put(dev);
394			}
395		} else {
396			raw_disable_allfilters(sock_net(sk), NULL, sk);
397		}
398	}
399
400	if (ro->count > 1)
401		kfree(ro->filter);
402
403	ro->ifindex = 0;
404	ro->bound = 0;
405	ro->count = 0;
406	free_percpu(ro->uniq);
407
408	sock_orphan(sk);
409	sock->sk = NULL;
410
411	release_sock(sk);
412	sock_put(sk);
413
414	return 0;
415}
416
417static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
418{
419	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
420	struct sock *sk = sock->sk;
421	struct raw_sock *ro = raw_sk(sk);
422	int ifindex;
423	int err = 0;
424	int notify_enetdown = 0;
425
426	if (len < RAW_MIN_NAMELEN)
427		return -EINVAL;
428	if (addr->can_family != AF_CAN)
429		return -EINVAL;
430
431	lock_sock(sk);
432
433	if (ro->bound && addr->can_ifindex == ro->ifindex)
434		goto out;
435
436	if (addr->can_ifindex) {
437		struct net_device *dev;
438
439		dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
440		if (!dev) {
441			err = -ENODEV;
442			goto out;
443		}
444		if (dev->type != ARPHRD_CAN) {
445			dev_put(dev);
446			err = -ENODEV;
447			goto out;
448		}
449		if (!(dev->flags & IFF_UP))
450			notify_enetdown = 1;
451
452		ifindex = dev->ifindex;
453
454		/* filters set by default/setsockopt */
455		err = raw_enable_allfilters(sock_net(sk), dev, sk);
456		dev_put(dev);
457	} else {
458		ifindex = 0;
459
460		/* filters set by default/setsockopt */
461		err = raw_enable_allfilters(sock_net(sk), NULL, sk);
462	}
463
464	if (!err) {
465		if (ro->bound) {
466			/* unregister old filters */
467			if (ro->ifindex) {
468				struct net_device *dev;
469
470				dev = dev_get_by_index(sock_net(sk),
471						       ro->ifindex);
472				if (dev) {
473					raw_disable_allfilters(dev_net(dev),
474							       dev, sk);
475					dev_put(dev);
476				}
477			} else {
478				raw_disable_allfilters(sock_net(sk), NULL, sk);
479			}
480		}
481		ro->ifindex = ifindex;
482		ro->bound = 1;
483	}
484
485 out:
486	release_sock(sk);
487
488	if (notify_enetdown) {
489		sk->sk_err = ENETDOWN;
490		if (!sock_flag(sk, SOCK_DEAD))
491			sk_error_report(sk);
492	}
493
494	return err;
495}
496
497static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
498		       int peer)
499{
500	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
501	struct sock *sk = sock->sk;
502	struct raw_sock *ro = raw_sk(sk);
503
504	if (peer)
505		return -EOPNOTSUPP;
506
507	memset(addr, 0, RAW_MIN_NAMELEN);
508	addr->can_family  = AF_CAN;
509	addr->can_ifindex = ro->ifindex;
510
511	return RAW_MIN_NAMELEN;
512}
513
514static int raw_setsockopt(struct socket *sock, int level, int optname,
515			  sockptr_t optval, unsigned int optlen)
516{
517	struct sock *sk = sock->sk;
518	struct raw_sock *ro = raw_sk(sk);
519	struct can_filter *filter = NULL;  /* dyn. alloc'ed filters */
520	struct can_filter sfilter;         /* single filter */
521	struct net_device *dev = NULL;
522	can_err_mask_t err_mask = 0;
523	int count = 0;
524	int err = 0;
525
526	if (level != SOL_CAN_RAW)
527		return -EINVAL;
528
529	switch (optname) {
530	case CAN_RAW_FILTER:
531		if (optlen % sizeof(struct can_filter) != 0)
532			return -EINVAL;
533
534		if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
535			return -EINVAL;
536
537		count = optlen / sizeof(struct can_filter);
538
539		if (count > 1) {
540			/* filter does not fit into dfilter => alloc space */
541			filter = memdup_sockptr(optval, optlen);
542			if (IS_ERR(filter))
543				return PTR_ERR(filter);
544		} else if (count == 1) {
545			if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter)))
546				return -EFAULT;
547		}
548
549		rtnl_lock();
550		lock_sock(sk);
551
552		if (ro->bound && ro->ifindex) {
553			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
554			if (!dev) {
555				if (count > 1)
556					kfree(filter);
557				err = -ENODEV;
558				goto out_fil;
559			}
560		}
561
562		if (ro->bound) {
563			/* (try to) register the new filters */
564			if (count == 1)
565				err = raw_enable_filters(sock_net(sk), dev, sk,
566							 &sfilter, 1);
567			else
568				err = raw_enable_filters(sock_net(sk), dev, sk,
569							 filter, count);
570			if (err) {
571				if (count > 1)
572					kfree(filter);
573				goto out_fil;
574			}
575
576			/* remove old filter registrations */
577			raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
578					    ro->count);
579		}
580
581		/* remove old filter space */
582		if (ro->count > 1)
583			kfree(ro->filter);
584
585		/* link new filters to the socket */
586		if (count == 1) {
587			/* copy filter data for single filter */
588			ro->dfilter = sfilter;
589			filter = &ro->dfilter;
590		}
591		ro->filter = filter;
592		ro->count  = count;
593
594 out_fil:
595		if (dev)
596			dev_put(dev);
597
598		release_sock(sk);
599		rtnl_unlock();
600
601		break;
602
603	case CAN_RAW_ERR_FILTER:
604		if (optlen != sizeof(err_mask))
605			return -EINVAL;
606
607		if (copy_from_sockptr(&err_mask, optval, optlen))
608			return -EFAULT;
609
610		err_mask &= CAN_ERR_MASK;
611
612		rtnl_lock();
613		lock_sock(sk);
614
615		if (ro->bound && ro->ifindex) {
616			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
617			if (!dev) {
618				err = -ENODEV;
619				goto out_err;
620			}
621		}
622
623		/* remove current error mask */
624		if (ro->bound) {
625			/* (try to) register the new err_mask */
626			err = raw_enable_errfilter(sock_net(sk), dev, sk,
627						   err_mask);
628
629			if (err)
630				goto out_err;
631
632			/* remove old err_mask registration */
633			raw_disable_errfilter(sock_net(sk), dev, sk,
634					      ro->err_mask);
635		}
636
637		/* link new err_mask to the socket */
638		ro->err_mask = err_mask;
639
640 out_err:
641		if (dev)
642			dev_put(dev);
643
644		release_sock(sk);
645		rtnl_unlock();
646
647		break;
648
649	case CAN_RAW_LOOPBACK:
650		if (optlen != sizeof(ro->loopback))
651			return -EINVAL;
652
653		if (copy_from_sockptr(&ro->loopback, optval, optlen))
654			return -EFAULT;
655
656		break;
657
658	case CAN_RAW_RECV_OWN_MSGS:
659		if (optlen != sizeof(ro->recv_own_msgs))
660			return -EINVAL;
661
662		if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
663			return -EFAULT;
664
665		break;
666
667	case CAN_RAW_FD_FRAMES:
668		if (optlen != sizeof(ro->fd_frames))
669			return -EINVAL;
670
671		if (copy_from_sockptr(&ro->fd_frames, optval, optlen))
672			return -EFAULT;
673
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
674		break;
675
676	case CAN_RAW_JOIN_FILTERS:
677		if (optlen != sizeof(ro->join_filters))
678			return -EINVAL;
679
680		if (copy_from_sockptr(&ro->join_filters, optval, optlen))
681			return -EFAULT;
682
683		break;
684
685	default:
686		return -ENOPROTOOPT;
687	}
688	return err;
689}
690
691static int raw_getsockopt(struct socket *sock, int level, int optname,
692			  char __user *optval, int __user *optlen)
693{
694	struct sock *sk = sock->sk;
695	struct raw_sock *ro = raw_sk(sk);
696	int len;
697	void *val;
698	int err = 0;
699
700	if (level != SOL_CAN_RAW)
701		return -EINVAL;
702	if (get_user(len, optlen))
703		return -EFAULT;
704	if (len < 0)
705		return -EINVAL;
706
707	switch (optname) {
708	case CAN_RAW_FILTER:
709		lock_sock(sk);
710		if (ro->count > 0) {
711			int fsize = ro->count * sizeof(struct can_filter);
712
713			/* user space buffer to small for filter list? */
714			if (len < fsize) {
715				/* return -ERANGE and needed space in optlen */
716				err = -ERANGE;
717				if (put_user(fsize, optlen))
718					err = -EFAULT;
719			} else {
720				if (len > fsize)
721					len = fsize;
722				if (copy_to_user(optval, ro->filter, len))
723					err = -EFAULT;
724			}
725		} else {
726			len = 0;
727		}
728		release_sock(sk);
729
730		if (!err)
731			err = put_user(len, optlen);
732		return err;
733
734	case CAN_RAW_ERR_FILTER:
735		if (len > sizeof(can_err_mask_t))
736			len = sizeof(can_err_mask_t);
737		val = &ro->err_mask;
738		break;
739
740	case CAN_RAW_LOOPBACK:
741		if (len > sizeof(int))
742			len = sizeof(int);
743		val = &ro->loopback;
744		break;
745
746	case CAN_RAW_RECV_OWN_MSGS:
747		if (len > sizeof(int))
748			len = sizeof(int);
749		val = &ro->recv_own_msgs;
750		break;
751
752	case CAN_RAW_FD_FRAMES:
753		if (len > sizeof(int))
754			len = sizeof(int);
755		val = &ro->fd_frames;
756		break;
757
 
 
 
 
 
 
758	case CAN_RAW_JOIN_FILTERS:
759		if (len > sizeof(int))
760			len = sizeof(int);
761		val = &ro->join_filters;
762		break;
763
764	default:
765		return -ENOPROTOOPT;
766	}
767
768	if (put_user(len, optlen))
769		return -EFAULT;
770	if (copy_to_user(optval, val, len))
771		return -EFAULT;
772	return 0;
773}
774
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
775static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
776{
777	struct sock *sk = sock->sk;
778	struct raw_sock *ro = raw_sk(sk);
 
779	struct sk_buff *skb;
780	struct net_device *dev;
781	int ifindex;
782	int err;
 
 
 
 
783
784	if (msg->msg_name) {
785		DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
786
787		if (msg->msg_namelen < RAW_MIN_NAMELEN)
788			return -EINVAL;
789
790		if (addr->can_family != AF_CAN)
791			return -EINVAL;
792
793		ifindex = addr->can_ifindex;
794	} else {
795		ifindex = ro->ifindex;
796	}
797
798	dev = dev_get_by_index(sock_net(sk), ifindex);
799	if (!dev)
800		return -ENXIO;
801
802	err = -EINVAL;
803	if (ro->fd_frames && dev->mtu == CANFD_MTU) {
804		if (unlikely(size != CANFD_MTU && size != CAN_MTU))
805			goto put_dev;
806	} else {
807		if (unlikely(size != CAN_MTU))
808			goto put_dev;
809	}
810
811	skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
812				  msg->msg_flags & MSG_DONTWAIT, &err);
813	if (!skb)
814		goto put_dev;
815
816	can_skb_reserve(skb);
817	can_skb_prv(skb)->ifindex = dev->ifindex;
818	can_skb_prv(skb)->skbcnt = 0;
819
 
820	err = memcpy_from_msg(skb_put(skb, size), msg, size);
821	if (err < 0)
822		goto free_skb;
823
824	skb_setup_tx_timestamp(skb, sk->sk_tsflags);
 
 
 
 
 
 
 
 
 
825
826	skb->dev = dev;
827	skb->sk = sk;
828	skb->priority = sk->sk_priority;
 
 
 
 
829
830	err = can_send(skb, ro->loopback);
831
832	dev_put(dev);
833
834	if (err)
835		goto send_failed;
836
837	return size;
838
839free_skb:
840	kfree_skb(skb);
841put_dev:
842	dev_put(dev);
843send_failed:
844	return err;
845}
846
847static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
848		       int flags)
849{
850	struct sock *sk = sock->sk;
851	struct sk_buff *skb;
852	int err = 0;
853	int noblock;
854
855	noblock = flags & MSG_DONTWAIT;
856	flags &= ~MSG_DONTWAIT;
857
858	if (flags & MSG_ERRQUEUE)
859		return sock_recv_errqueue(sk, msg, size,
860					  SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE);
861
862	skb = skb_recv_datagram(sk, flags, noblock, &err);
863	if (!skb)
864		return err;
865
866	if (size < skb->len)
867		msg->msg_flags |= MSG_TRUNC;
868	else
869		size = skb->len;
870
871	err = memcpy_to_msg(msg, skb->data, size);
872	if (err < 0) {
873		skb_free_datagram(sk, skb);
874		return err;
875	}
876
877	sock_recv_ts_and_drops(msg, sk, skb);
878
879	if (msg->msg_name) {
880		__sockaddr_check_size(RAW_MIN_NAMELEN);
881		msg->msg_namelen = RAW_MIN_NAMELEN;
882		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
883	}
884
885	/* assign the flags that have been recorded in raw_rcv() */
886	msg->msg_flags |= *(raw_flags(skb));
887
888	skb_free_datagram(sk, skb);
889
890	return size;
891}
892
893static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
894				unsigned long arg)
895{
896	/* no ioctls for socket layer -> hand it down to NIC layer */
897	return -ENOIOCTLCMD;
898}
899
900static const struct proto_ops raw_ops = {
901	.family        = PF_CAN,
902	.release       = raw_release,
903	.bind          = raw_bind,
904	.connect       = sock_no_connect,
905	.socketpair    = sock_no_socketpair,
906	.accept        = sock_no_accept,
907	.getname       = raw_getname,
908	.poll          = datagram_poll,
909	.ioctl         = raw_sock_no_ioctlcmd,
910	.gettstamp     = sock_gettstamp,
911	.listen        = sock_no_listen,
912	.shutdown      = sock_no_shutdown,
913	.setsockopt    = raw_setsockopt,
914	.getsockopt    = raw_getsockopt,
915	.sendmsg       = raw_sendmsg,
916	.recvmsg       = raw_recvmsg,
917	.mmap          = sock_no_mmap,
918	.sendpage      = sock_no_sendpage,
919};
920
921static struct proto raw_proto __read_mostly = {
922	.name       = "CAN_RAW",
923	.owner      = THIS_MODULE,
924	.obj_size   = sizeof(struct raw_sock),
925	.init       = raw_init,
926};
927
928static const struct can_proto raw_can_proto = {
929	.type       = SOCK_RAW,
930	.protocol   = CAN_RAW,
931	.ops        = &raw_ops,
932	.prot       = &raw_proto,
933};
934
935static struct notifier_block canraw_notifier = {
936	.notifier_call = raw_notifier
937};
938
939static __init int raw_module_init(void)
940{
941	int err;
942
943	pr_info("can: raw protocol\n");
944
 
 
 
 
945	err = can_proto_register(&raw_can_proto);
946	if (err < 0)
947		pr_err("can: registration of raw protocol failed\n");
948	else
949		register_netdevice_notifier(&canraw_notifier);
 
 
950
 
 
951	return err;
952}
953
954static __exit void raw_module_exit(void)
955{
956	can_proto_unregister(&raw_can_proto);
957	unregister_netdevice_notifier(&canraw_notifier);
958}
959
960module_init(raw_module_init);
961module_exit(raw_module_exit);
v6.2
   1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
   2/* raw.c - Raw sockets for protocol family CAN
   3 *
   4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
   5 * All rights reserved.
   6 *
   7 * Redistribution and use in source and binary forms, with or without
   8 * modification, are permitted provided that the following conditions
   9 * are met:
  10 * 1. Redistributions of source code must retain the above copyright
  11 *    notice, this list of conditions and the following disclaimer.
  12 * 2. Redistributions in binary form must reproduce the above copyright
  13 *    notice, this list of conditions and the following disclaimer in the
  14 *    documentation and/or other materials provided with the distribution.
  15 * 3. Neither the name of Volkswagen nor the names of its contributors
  16 *    may be used to endorse or promote products derived from this software
  17 *    without specific prior written permission.
  18 *
  19 * Alternatively, provided that this notice is retained in full, this
  20 * software may be distributed under the terms of the GNU General
  21 * Public License ("GPL") version 2, in which case the provisions of the
  22 * GPL apply INSTEAD OF those given above.
  23 *
  24 * The provided data structures and external interfaces from this code
  25 * are not restricted to be used by modules with a GPL compatible license.
  26 *
  27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  38 * DAMAGE.
  39 *
  40 */
  41
  42#include <linux/module.h>
  43#include <linux/init.h>
  44#include <linux/uio.h>
  45#include <linux/net.h>
  46#include <linux/slab.h>
  47#include <linux/netdevice.h>
  48#include <linux/socket.h>
  49#include <linux/if_arp.h>
  50#include <linux/skbuff.h>
  51#include <linux/can.h>
  52#include <linux/can/core.h>
  53#include <linux/can/dev.h> /* for can_is_canxl_dev_mtu() */
  54#include <linux/can/skb.h>
  55#include <linux/can/raw.h>
  56#include <net/sock.h>
  57#include <net/net_namespace.h>
  58
  59MODULE_DESCRIPTION("PF_CAN raw protocol");
  60MODULE_LICENSE("Dual BSD/GPL");
  61MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
  62MODULE_ALIAS("can-proto-1");
  63
  64#define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
  65
  66#define MASK_ALL 0
  67
  68/* A raw socket has a list of can_filters attached to it, each receiving
  69 * the CAN frames matching that filter.  If the filter list is empty,
  70 * no CAN frames will be received by the socket.  The default after
  71 * opening the socket, is to have one filter which receives all frames.
  72 * The filter list is allocated dynamically with the exception of the
  73 * list containing only one item.  This common case is optimized by
  74 * storing the single filter in dfilter, to avoid using dynamic memory.
  75 */
  76
  77struct uniqframe {
  78	int skbcnt;
  79	const struct sk_buff *skb;
  80	unsigned int join_rx_count;
  81};
  82
  83struct raw_sock {
  84	struct sock sk;
  85	int bound;
  86	int ifindex;
  87	struct list_head notifier;
  88	int loopback;
  89	int recv_own_msgs;
  90	int fd_frames;
  91	int xl_frames;
  92	int join_filters;
  93	int count;                 /* number of active filters */
  94	struct can_filter dfilter; /* default/single filter */
  95	struct can_filter *filter; /* pointer to filter(s) */
  96	can_err_mask_t err_mask;
  97	struct uniqframe __percpu *uniq;
  98};
  99
 100static LIST_HEAD(raw_notifier_list);
 101static DEFINE_SPINLOCK(raw_notifier_lock);
 102static struct raw_sock *raw_busy_notifier;
 103
 104/* Return pointer to store the extra msg flags for raw_recvmsg().
 105 * We use the space of one unsigned int beyond the 'struct sockaddr_can'
 106 * in skb->cb.
 107 */
 108static inline unsigned int *raw_flags(struct sk_buff *skb)
 109{
 110	sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
 111			       sizeof(unsigned int));
 112
 113	/* return pointer after struct sockaddr_can */
 114	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
 115}
 116
 117static inline struct raw_sock *raw_sk(const struct sock *sk)
 118{
 119	return (struct raw_sock *)sk;
 120}
 121
 122static void raw_rcv(struct sk_buff *oskb, void *data)
 123{
 124	struct sock *sk = (struct sock *)data;
 125	struct raw_sock *ro = raw_sk(sk);
 126	struct sockaddr_can *addr;
 127	struct sk_buff *skb;
 128	unsigned int *pflags;
 129
 130	/* check the received tx sock reference */
 131	if (!ro->recv_own_msgs && oskb->sk == sk)
 132		return;
 133
 134	/* make sure to not pass oversized frames to the socket */
 135	if ((!ro->fd_frames && can_is_canfd_skb(oskb)) ||
 136	    (!ro->xl_frames && can_is_canxl_skb(oskb)))
 137		return;
 138
 139	/* eliminate multiple filter matches for the same skb */
 140	if (this_cpu_ptr(ro->uniq)->skb == oskb &&
 141	    this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
 142		if (!ro->join_filters)
 143			return;
 144
 145		this_cpu_inc(ro->uniq->join_rx_count);
 146		/* drop frame until all enabled filters matched */
 147		if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
 148			return;
 
 149	} else {
 150		this_cpu_ptr(ro->uniq)->skb = oskb;
 151		this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
 152		this_cpu_ptr(ro->uniq)->join_rx_count = 1;
 153		/* drop first frame to check all enabled filters? */
 154		if (ro->join_filters && ro->count > 1)
 155			return;
 156	}
 157
 158	/* clone the given skb to be able to enqueue it into the rcv queue */
 159	skb = skb_clone(oskb, GFP_ATOMIC);
 160	if (!skb)
 161		return;
 162
 163	/* Put the datagram to the queue so that raw_recvmsg() can get
 164	 * it from there. We need to pass the interface index to
 165	 * raw_recvmsg(). We pass a whole struct sockaddr_can in
 166	 * skb->cb containing the interface index.
 167	 */
 168
 169	sock_skb_cb_check_size(sizeof(struct sockaddr_can));
 170	addr = (struct sockaddr_can *)skb->cb;
 171	memset(addr, 0, sizeof(*addr));
 172	addr->can_family = AF_CAN;
 173	addr->can_ifindex = skb->dev->ifindex;
 174
 175	/* add CAN specific message flags for raw_recvmsg() */
 176	pflags = raw_flags(skb);
 177	*pflags = 0;
 178	if (oskb->sk)
 179		*pflags |= MSG_DONTROUTE;
 180	if (oskb->sk == sk)
 181		*pflags |= MSG_CONFIRM;
 182
 183	if (sock_queue_rcv_skb(sk, skb) < 0)
 184		kfree_skb(skb);
 185}
 186
 187static int raw_enable_filters(struct net *net, struct net_device *dev,
 188			      struct sock *sk, struct can_filter *filter,
 189			      int count)
 190{
 191	int err = 0;
 192	int i;
 193
 194	for (i = 0; i < count; i++) {
 195		err = can_rx_register(net, dev, filter[i].can_id,
 196				      filter[i].can_mask,
 197				      raw_rcv, sk, "raw", sk);
 198		if (err) {
 199			/* clean up successfully registered filters */
 200			while (--i >= 0)
 201				can_rx_unregister(net, dev, filter[i].can_id,
 202						  filter[i].can_mask,
 203						  raw_rcv, sk);
 204			break;
 205		}
 206	}
 207
 208	return err;
 209}
 210
 211static int raw_enable_errfilter(struct net *net, struct net_device *dev,
 212				struct sock *sk, can_err_mask_t err_mask)
 213{
 214	int err = 0;
 215
 216	if (err_mask)
 217		err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
 218				      raw_rcv, sk, "raw", sk);
 219
 220	return err;
 221}
 222
 223static void raw_disable_filters(struct net *net, struct net_device *dev,
 224				struct sock *sk, struct can_filter *filter,
 225				int count)
 226{
 227	int i;
 228
 229	for (i = 0; i < count; i++)
 230		can_rx_unregister(net, dev, filter[i].can_id,
 231				  filter[i].can_mask, raw_rcv, sk);
 232}
 233
 234static inline void raw_disable_errfilter(struct net *net,
 235					 struct net_device *dev,
 236					 struct sock *sk,
 237					 can_err_mask_t err_mask)
 238
 239{
 240	if (err_mask)
 241		can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
 242				  raw_rcv, sk);
 243}
 244
 245static inline void raw_disable_allfilters(struct net *net,
 246					  struct net_device *dev,
 247					  struct sock *sk)
 248{
 249	struct raw_sock *ro = raw_sk(sk);
 250
 251	raw_disable_filters(net, dev, sk, ro->filter, ro->count);
 252	raw_disable_errfilter(net, dev, sk, ro->err_mask);
 253}
 254
 255static int raw_enable_allfilters(struct net *net, struct net_device *dev,
 256				 struct sock *sk)
 257{
 258	struct raw_sock *ro = raw_sk(sk);
 259	int err;
 260
 261	err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
 262	if (!err) {
 263		err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
 264		if (err)
 265			raw_disable_filters(net, dev, sk, ro->filter,
 266					    ro->count);
 267	}
 268
 269	return err;
 270}
 271
 272static void raw_notify(struct raw_sock *ro, unsigned long msg,
 273		       struct net_device *dev)
 274{
 275	struct sock *sk = &ro->sk;
 276
 277	if (!net_eq(dev_net(dev), sock_net(sk)))
 278		return;
 279
 280	if (ro->ifindex != dev->ifindex)
 281		return;
 282
 283	switch (msg) {
 284	case NETDEV_UNREGISTER:
 285		lock_sock(sk);
 286		/* remove current filters & unregister */
 287		if (ro->bound)
 288			raw_disable_allfilters(dev_net(dev), dev, sk);
 289
 290		if (ro->count > 1)
 291			kfree(ro->filter);
 292
 293		ro->ifindex = 0;
 294		ro->bound = 0;
 295		ro->count = 0;
 296		release_sock(sk);
 297
 298		sk->sk_err = ENODEV;
 299		if (!sock_flag(sk, SOCK_DEAD))
 300			sk_error_report(sk);
 301		break;
 302
 303	case NETDEV_DOWN:
 304		sk->sk_err = ENETDOWN;
 305		if (!sock_flag(sk, SOCK_DEAD))
 306			sk_error_report(sk);
 307		break;
 308	}
 309}
 310
 311static int raw_notifier(struct notifier_block *nb, unsigned long msg,
 312			void *ptr)
 313{
 314	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 315
 316	if (dev->type != ARPHRD_CAN)
 317		return NOTIFY_DONE;
 318	if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
 319		return NOTIFY_DONE;
 320	if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
 321		return NOTIFY_DONE;
 322
 323	spin_lock(&raw_notifier_lock);
 324	list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
 325		spin_unlock(&raw_notifier_lock);
 326		raw_notify(raw_busy_notifier, msg, dev);
 327		spin_lock(&raw_notifier_lock);
 328	}
 329	raw_busy_notifier = NULL;
 330	spin_unlock(&raw_notifier_lock);
 331	return NOTIFY_DONE;
 332}
 333
 334static int raw_init(struct sock *sk)
 335{
 336	struct raw_sock *ro = raw_sk(sk);
 337
 338	ro->bound            = 0;
 339	ro->ifindex          = 0;
 340
 341	/* set default filter to single entry dfilter */
 342	ro->dfilter.can_id   = 0;
 343	ro->dfilter.can_mask = MASK_ALL;
 344	ro->filter           = &ro->dfilter;
 345	ro->count            = 1;
 346
 347	/* set default loopback behaviour */
 348	ro->loopback         = 1;
 349	ro->recv_own_msgs    = 0;
 350	ro->fd_frames        = 0;
 351	ro->xl_frames        = 0;
 352	ro->join_filters     = 0;
 353
 354	/* alloc_percpu provides zero'ed memory */
 355	ro->uniq = alloc_percpu(struct uniqframe);
 356	if (unlikely(!ro->uniq))
 357		return -ENOMEM;
 358
 359	/* set notifier */
 360	spin_lock(&raw_notifier_lock);
 361	list_add_tail(&ro->notifier, &raw_notifier_list);
 362	spin_unlock(&raw_notifier_lock);
 363
 364	return 0;
 365}
 366
 367static int raw_release(struct socket *sock)
 368{
 369	struct sock *sk = sock->sk;
 370	struct raw_sock *ro;
 371
 372	if (!sk)
 373		return 0;
 374
 375	ro = raw_sk(sk);
 376
 377	spin_lock(&raw_notifier_lock);
 378	while (raw_busy_notifier == ro) {
 379		spin_unlock(&raw_notifier_lock);
 380		schedule_timeout_uninterruptible(1);
 381		spin_lock(&raw_notifier_lock);
 382	}
 383	list_del(&ro->notifier);
 384	spin_unlock(&raw_notifier_lock);
 385
 386	lock_sock(sk);
 387
 388	/* remove current filters & unregister */
 389	if (ro->bound) {
 390		if (ro->ifindex) {
 391			struct net_device *dev;
 392
 393			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
 394			if (dev) {
 395				raw_disable_allfilters(dev_net(dev), dev, sk);
 396				dev_put(dev);
 397			}
 398		} else {
 399			raw_disable_allfilters(sock_net(sk), NULL, sk);
 400		}
 401	}
 402
 403	if (ro->count > 1)
 404		kfree(ro->filter);
 405
 406	ro->ifindex = 0;
 407	ro->bound = 0;
 408	ro->count = 0;
 409	free_percpu(ro->uniq);
 410
 411	sock_orphan(sk);
 412	sock->sk = NULL;
 413
 414	release_sock(sk);
 415	sock_put(sk);
 416
 417	return 0;
 418}
 419
 420static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
 421{
 422	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
 423	struct sock *sk = sock->sk;
 424	struct raw_sock *ro = raw_sk(sk);
 425	int ifindex;
 426	int err = 0;
 427	int notify_enetdown = 0;
 428
 429	if (len < RAW_MIN_NAMELEN)
 430		return -EINVAL;
 431	if (addr->can_family != AF_CAN)
 432		return -EINVAL;
 433
 434	lock_sock(sk);
 435
 436	if (ro->bound && addr->can_ifindex == ro->ifindex)
 437		goto out;
 438
 439	if (addr->can_ifindex) {
 440		struct net_device *dev;
 441
 442		dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
 443		if (!dev) {
 444			err = -ENODEV;
 445			goto out;
 446		}
 447		if (dev->type != ARPHRD_CAN) {
 448			dev_put(dev);
 449			err = -ENODEV;
 450			goto out;
 451		}
 452		if (!(dev->flags & IFF_UP))
 453			notify_enetdown = 1;
 454
 455		ifindex = dev->ifindex;
 456
 457		/* filters set by default/setsockopt */
 458		err = raw_enable_allfilters(sock_net(sk), dev, sk);
 459		dev_put(dev);
 460	} else {
 461		ifindex = 0;
 462
 463		/* filters set by default/setsockopt */
 464		err = raw_enable_allfilters(sock_net(sk), NULL, sk);
 465	}
 466
 467	if (!err) {
 468		if (ro->bound) {
 469			/* unregister old filters */
 470			if (ro->ifindex) {
 471				struct net_device *dev;
 472
 473				dev = dev_get_by_index(sock_net(sk),
 474						       ro->ifindex);
 475				if (dev) {
 476					raw_disable_allfilters(dev_net(dev),
 477							       dev, sk);
 478					dev_put(dev);
 479				}
 480			} else {
 481				raw_disable_allfilters(sock_net(sk), NULL, sk);
 482			}
 483		}
 484		ro->ifindex = ifindex;
 485		ro->bound = 1;
 486	}
 487
 488 out:
 489	release_sock(sk);
 490
 491	if (notify_enetdown) {
 492		sk->sk_err = ENETDOWN;
 493		if (!sock_flag(sk, SOCK_DEAD))
 494			sk_error_report(sk);
 495	}
 496
 497	return err;
 498}
 499
 500static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
 501		       int peer)
 502{
 503	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
 504	struct sock *sk = sock->sk;
 505	struct raw_sock *ro = raw_sk(sk);
 506
 507	if (peer)
 508		return -EOPNOTSUPP;
 509
 510	memset(addr, 0, RAW_MIN_NAMELEN);
 511	addr->can_family  = AF_CAN;
 512	addr->can_ifindex = ro->ifindex;
 513
 514	return RAW_MIN_NAMELEN;
 515}
 516
 517static int raw_setsockopt(struct socket *sock, int level, int optname,
 518			  sockptr_t optval, unsigned int optlen)
 519{
 520	struct sock *sk = sock->sk;
 521	struct raw_sock *ro = raw_sk(sk);
 522	struct can_filter *filter = NULL;  /* dyn. alloc'ed filters */
 523	struct can_filter sfilter;         /* single filter */
 524	struct net_device *dev = NULL;
 525	can_err_mask_t err_mask = 0;
 526	int count = 0;
 527	int err = 0;
 528
 529	if (level != SOL_CAN_RAW)
 530		return -EINVAL;
 531
 532	switch (optname) {
 533	case CAN_RAW_FILTER:
 534		if (optlen % sizeof(struct can_filter) != 0)
 535			return -EINVAL;
 536
 537		if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
 538			return -EINVAL;
 539
 540		count = optlen / sizeof(struct can_filter);
 541
 542		if (count > 1) {
 543			/* filter does not fit into dfilter => alloc space */
 544			filter = memdup_sockptr(optval, optlen);
 545			if (IS_ERR(filter))
 546				return PTR_ERR(filter);
 547		} else if (count == 1) {
 548			if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter)))
 549				return -EFAULT;
 550		}
 551
 552		rtnl_lock();
 553		lock_sock(sk);
 554
 555		if (ro->bound && ro->ifindex) {
 556			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
 557			if (!dev) {
 558				if (count > 1)
 559					kfree(filter);
 560				err = -ENODEV;
 561				goto out_fil;
 562			}
 563		}
 564
 565		if (ro->bound) {
 566			/* (try to) register the new filters */
 567			if (count == 1)
 568				err = raw_enable_filters(sock_net(sk), dev, sk,
 569							 &sfilter, 1);
 570			else
 571				err = raw_enable_filters(sock_net(sk), dev, sk,
 572							 filter, count);
 573			if (err) {
 574				if (count > 1)
 575					kfree(filter);
 576				goto out_fil;
 577			}
 578
 579			/* remove old filter registrations */
 580			raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
 581					    ro->count);
 582		}
 583
 584		/* remove old filter space */
 585		if (ro->count > 1)
 586			kfree(ro->filter);
 587
 588		/* link new filters to the socket */
 589		if (count == 1) {
 590			/* copy filter data for single filter */
 591			ro->dfilter = sfilter;
 592			filter = &ro->dfilter;
 593		}
 594		ro->filter = filter;
 595		ro->count  = count;
 596
 597 out_fil:
 598		dev_put(dev);
 
 
 599		release_sock(sk);
 600		rtnl_unlock();
 601
 602		break;
 603
 604	case CAN_RAW_ERR_FILTER:
 605		if (optlen != sizeof(err_mask))
 606			return -EINVAL;
 607
 608		if (copy_from_sockptr(&err_mask, optval, optlen))
 609			return -EFAULT;
 610
 611		err_mask &= CAN_ERR_MASK;
 612
 613		rtnl_lock();
 614		lock_sock(sk);
 615
 616		if (ro->bound && ro->ifindex) {
 617			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
 618			if (!dev) {
 619				err = -ENODEV;
 620				goto out_err;
 621			}
 622		}
 623
 624		/* remove current error mask */
 625		if (ro->bound) {
 626			/* (try to) register the new err_mask */
 627			err = raw_enable_errfilter(sock_net(sk), dev, sk,
 628						   err_mask);
 629
 630			if (err)
 631				goto out_err;
 632
 633			/* remove old err_mask registration */
 634			raw_disable_errfilter(sock_net(sk), dev, sk,
 635					      ro->err_mask);
 636		}
 637
 638		/* link new err_mask to the socket */
 639		ro->err_mask = err_mask;
 640
 641 out_err:
 642		dev_put(dev);
 
 
 643		release_sock(sk);
 644		rtnl_unlock();
 645
 646		break;
 647
 648	case CAN_RAW_LOOPBACK:
 649		if (optlen != sizeof(ro->loopback))
 650			return -EINVAL;
 651
 652		if (copy_from_sockptr(&ro->loopback, optval, optlen))
 653			return -EFAULT;
 654
 655		break;
 656
 657	case CAN_RAW_RECV_OWN_MSGS:
 658		if (optlen != sizeof(ro->recv_own_msgs))
 659			return -EINVAL;
 660
 661		if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
 662			return -EFAULT;
 663
 664		break;
 665
 666	case CAN_RAW_FD_FRAMES:
 667		if (optlen != sizeof(ro->fd_frames))
 668			return -EINVAL;
 669
 670		if (copy_from_sockptr(&ro->fd_frames, optval, optlen))
 671			return -EFAULT;
 672
 673		/* Enabling CAN XL includes CAN FD */
 674		if (ro->xl_frames && !ro->fd_frames) {
 675			ro->fd_frames = ro->xl_frames;
 676			return -EINVAL;
 677		}
 678		break;
 679
 680	case CAN_RAW_XL_FRAMES:
 681		if (optlen != sizeof(ro->xl_frames))
 682			return -EINVAL;
 683
 684		if (copy_from_sockptr(&ro->xl_frames, optval, optlen))
 685			return -EFAULT;
 686
 687		/* Enabling CAN XL includes CAN FD */
 688		if (ro->xl_frames)
 689			ro->fd_frames = ro->xl_frames;
 690		break;
 691
 692	case CAN_RAW_JOIN_FILTERS:
 693		if (optlen != sizeof(ro->join_filters))
 694			return -EINVAL;
 695
 696		if (copy_from_sockptr(&ro->join_filters, optval, optlen))
 697			return -EFAULT;
 698
 699		break;
 700
 701	default:
 702		return -ENOPROTOOPT;
 703	}
 704	return err;
 705}
 706
 707static int raw_getsockopt(struct socket *sock, int level, int optname,
 708			  char __user *optval, int __user *optlen)
 709{
 710	struct sock *sk = sock->sk;
 711	struct raw_sock *ro = raw_sk(sk);
 712	int len;
 713	void *val;
 714	int err = 0;
 715
 716	if (level != SOL_CAN_RAW)
 717		return -EINVAL;
 718	if (get_user(len, optlen))
 719		return -EFAULT;
 720	if (len < 0)
 721		return -EINVAL;
 722
 723	switch (optname) {
 724	case CAN_RAW_FILTER:
 725		lock_sock(sk);
 726		if (ro->count > 0) {
 727			int fsize = ro->count * sizeof(struct can_filter);
 728
 729			/* user space buffer to small for filter list? */
 730			if (len < fsize) {
 731				/* return -ERANGE and needed space in optlen */
 732				err = -ERANGE;
 733				if (put_user(fsize, optlen))
 734					err = -EFAULT;
 735			} else {
 736				if (len > fsize)
 737					len = fsize;
 738				if (copy_to_user(optval, ro->filter, len))
 739					err = -EFAULT;
 740			}
 741		} else {
 742			len = 0;
 743		}
 744		release_sock(sk);
 745
 746		if (!err)
 747			err = put_user(len, optlen);
 748		return err;
 749
 750	case CAN_RAW_ERR_FILTER:
 751		if (len > sizeof(can_err_mask_t))
 752			len = sizeof(can_err_mask_t);
 753		val = &ro->err_mask;
 754		break;
 755
 756	case CAN_RAW_LOOPBACK:
 757		if (len > sizeof(int))
 758			len = sizeof(int);
 759		val = &ro->loopback;
 760		break;
 761
 762	case CAN_RAW_RECV_OWN_MSGS:
 763		if (len > sizeof(int))
 764			len = sizeof(int);
 765		val = &ro->recv_own_msgs;
 766		break;
 767
 768	case CAN_RAW_FD_FRAMES:
 769		if (len > sizeof(int))
 770			len = sizeof(int);
 771		val = &ro->fd_frames;
 772		break;
 773
 774	case CAN_RAW_XL_FRAMES:
 775		if (len > sizeof(int))
 776			len = sizeof(int);
 777		val = &ro->xl_frames;
 778		break;
 779
 780	case CAN_RAW_JOIN_FILTERS:
 781		if (len > sizeof(int))
 782			len = sizeof(int);
 783		val = &ro->join_filters;
 784		break;
 785
 786	default:
 787		return -ENOPROTOOPT;
 788	}
 789
 790	if (put_user(len, optlen))
 791		return -EFAULT;
 792	if (copy_to_user(optval, val, len))
 793		return -EFAULT;
 794	return 0;
 795}
 796
 797static bool raw_bad_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
 798{
 799	/* Classical CAN -> no checks for flags and device capabilities */
 800	if (can_is_can_skb(skb))
 801		return false;
 802
 803	/* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */
 804	if (ro->fd_frames && can_is_canfd_skb(skb) &&
 805	    (mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu)))
 806		return false;
 807
 808	/* CAN XL -> needs to be enabled and a CAN XL device */
 809	if (ro->xl_frames && can_is_canxl_skb(skb) &&
 810	    can_is_canxl_dev_mtu(mtu))
 811		return false;
 812
 813	return true;
 814}
 815
 816static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 817{
 818	struct sock *sk = sock->sk;
 819	struct raw_sock *ro = raw_sk(sk);
 820	struct sockcm_cookie sockc;
 821	struct sk_buff *skb;
 822	struct net_device *dev;
 823	int ifindex;
 824	int err = -EINVAL;
 825
 826	/* check for valid CAN frame sizes */
 827	if (size < CANXL_HDR_SIZE + CANXL_MIN_DLEN || size > CANXL_MTU)
 828		return -EINVAL;
 829
 830	if (msg->msg_name) {
 831		DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
 832
 833		if (msg->msg_namelen < RAW_MIN_NAMELEN)
 834			return -EINVAL;
 835
 836		if (addr->can_family != AF_CAN)
 837			return -EINVAL;
 838
 839		ifindex = addr->can_ifindex;
 840	} else {
 841		ifindex = ro->ifindex;
 842	}
 843
 844	dev = dev_get_by_index(sock_net(sk), ifindex);
 845	if (!dev)
 846		return -ENXIO;
 847
 
 
 
 
 
 
 
 
 
 848	skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
 849				  msg->msg_flags & MSG_DONTWAIT, &err);
 850	if (!skb)
 851		goto put_dev;
 852
 853	can_skb_reserve(skb);
 854	can_skb_prv(skb)->ifindex = dev->ifindex;
 855	can_skb_prv(skb)->skbcnt = 0;
 856
 857	/* fill the skb before testing for valid CAN frames */
 858	err = memcpy_from_msg(skb_put(skb, size), msg, size);
 859	if (err < 0)
 860		goto free_skb;
 861
 862	err = -EINVAL;
 863	if (raw_bad_txframe(ro, skb, dev->mtu))
 864		goto free_skb;
 865
 866	sockcm_init(&sockc, sk);
 867	if (msg->msg_controllen) {
 868		err = sock_cmsg_send(sk, msg, &sockc);
 869		if (unlikely(err))
 870			goto free_skb;
 871	}
 872
 873	skb->dev = dev;
 
 874	skb->priority = sk->sk_priority;
 875	skb->mark = sk->sk_mark;
 876	skb->tstamp = sockc.transmit_time;
 877
 878	skb_setup_tx_timestamp(skb, sockc.tsflags);
 879
 880	err = can_send(skb, ro->loopback);
 881
 882	dev_put(dev);
 883
 884	if (err)
 885		goto send_failed;
 886
 887	return size;
 888
 889free_skb:
 890	kfree_skb(skb);
 891put_dev:
 892	dev_put(dev);
 893send_failed:
 894	return err;
 895}
 896
 897static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 898		       int flags)
 899{
 900	struct sock *sk = sock->sk;
 901	struct sk_buff *skb;
 902	int err = 0;
 
 
 
 
 903
 904	if (flags & MSG_ERRQUEUE)
 905		return sock_recv_errqueue(sk, msg, size,
 906					  SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE);
 907
 908	skb = skb_recv_datagram(sk, flags, &err);
 909	if (!skb)
 910		return err;
 911
 912	if (size < skb->len)
 913		msg->msg_flags |= MSG_TRUNC;
 914	else
 915		size = skb->len;
 916
 917	err = memcpy_to_msg(msg, skb->data, size);
 918	if (err < 0) {
 919		skb_free_datagram(sk, skb);
 920		return err;
 921	}
 922
 923	sock_recv_cmsgs(msg, sk, skb);
 924
 925	if (msg->msg_name) {
 926		__sockaddr_check_size(RAW_MIN_NAMELEN);
 927		msg->msg_namelen = RAW_MIN_NAMELEN;
 928		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
 929	}
 930
 931	/* assign the flags that have been recorded in raw_rcv() */
 932	msg->msg_flags |= *(raw_flags(skb));
 933
 934	skb_free_datagram(sk, skb);
 935
 936	return size;
 937}
 938
 939static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
 940				unsigned long arg)
 941{
 942	/* no ioctls for socket layer -> hand it down to NIC layer */
 943	return -ENOIOCTLCMD;
 944}
 945
 946static const struct proto_ops raw_ops = {
 947	.family        = PF_CAN,
 948	.release       = raw_release,
 949	.bind          = raw_bind,
 950	.connect       = sock_no_connect,
 951	.socketpair    = sock_no_socketpair,
 952	.accept        = sock_no_accept,
 953	.getname       = raw_getname,
 954	.poll          = datagram_poll,
 955	.ioctl         = raw_sock_no_ioctlcmd,
 956	.gettstamp     = sock_gettstamp,
 957	.listen        = sock_no_listen,
 958	.shutdown      = sock_no_shutdown,
 959	.setsockopt    = raw_setsockopt,
 960	.getsockopt    = raw_getsockopt,
 961	.sendmsg       = raw_sendmsg,
 962	.recvmsg       = raw_recvmsg,
 963	.mmap          = sock_no_mmap,
 964	.sendpage      = sock_no_sendpage,
 965};
 966
 967static struct proto raw_proto __read_mostly = {
 968	.name       = "CAN_RAW",
 969	.owner      = THIS_MODULE,
 970	.obj_size   = sizeof(struct raw_sock),
 971	.init       = raw_init,
 972};
 973
 974static const struct can_proto raw_can_proto = {
 975	.type       = SOCK_RAW,
 976	.protocol   = CAN_RAW,
 977	.ops        = &raw_ops,
 978	.prot       = &raw_proto,
 979};
 980
 981static struct notifier_block canraw_notifier = {
 982	.notifier_call = raw_notifier
 983};
 984
 985static __init int raw_module_init(void)
 986{
 987	int err;
 988
 989	pr_info("can: raw protocol\n");
 990
 991	err = register_netdevice_notifier(&canraw_notifier);
 992	if (err)
 993		return err;
 994
 995	err = can_proto_register(&raw_can_proto);
 996	if (err < 0) {
 997		pr_err("can: registration of raw protocol failed\n");
 998		goto register_proto_failed;
 999	}
1000
1001	return 0;
1002
1003register_proto_failed:
1004	unregister_netdevice_notifier(&canraw_notifier);
1005	return err;
1006}
1007
1008static __exit void raw_module_exit(void)
1009{
1010	can_proto_unregister(&raw_can_proto);
1011	unregister_netdevice_notifier(&canraw_notifier);
1012}
1013
1014module_init(raw_module_init);
1015module_exit(raw_module_exit);