Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
  2/* af_can.c - Protocol family CAN core module
  3 *            (used by different CAN protocol modules)
  4 *
  5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
  6 * All rights reserved.
  7 *
  8 * Redistribution and use in source and binary forms, with or without
  9 * modification, are permitted provided that the following conditions
 10 * are met:
 11 * 1. Redistributions of source code must retain the above copyright
 12 *    notice, this list of conditions and the following disclaimer.
 13 * 2. Redistributions in binary form must reproduce the above copyright
 14 *    notice, this list of conditions and the following disclaimer in the
 15 *    documentation and/or other materials provided with the distribution.
 16 * 3. Neither the name of Volkswagen nor the names of its contributors
 17 *    may be used to endorse or promote products derived from this software
 18 *    without specific prior written permission.
 19 *
 20 * Alternatively, provided that this notice is retained in full, this
 21 * software may be distributed under the terms of the GNU General
 22 * Public License ("GPL") version 2, in which case the provisions of the
 23 * GPL apply INSTEAD OF those given above.
 24 *
 25 * The provided data structures and external interfaces from this code
 26 * are not restricted to be used by modules with a GPL compatible license.
 27 *
 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
 39 * DAMAGE.
 40 *
 41 */
 42
 43#include <linux/module.h>
 44#include <linux/stddef.h>
 45#include <linux/init.h>
 46#include <linux/kmod.h>
 47#include <linux/slab.h>
 48#include <linux/list.h>
 49#include <linux/spinlock.h>
 50#include <linux/rcupdate.h>
 51#include <linux/uaccess.h>
 52#include <linux/net.h>
 53#include <linux/netdevice.h>
 54#include <linux/socket.h>
 55#include <linux/if_ether.h>
 56#include <linux/if_arp.h>
 57#include <linux/skbuff.h>
 58#include <linux/can.h>
 59#include <linux/can/core.h>
 60#include <linux/can/skb.h>
 61#include <linux/can/can-ml.h>
 62#include <linux/ratelimit.h>
 63#include <net/net_namespace.h>
 64#include <net/sock.h>
 65
 66#include "af_can.h"
 67
 
 
 
 68MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
 69MODULE_LICENSE("Dual BSD/GPL");
 70MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
 71	      "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
 72
 73MODULE_ALIAS_NETPROTO(PF_CAN);
 74
 75static int stats_timer __read_mostly = 1;
 76module_param(stats_timer, int, 0444);
 77MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
 78
 
 
 
 
 79static struct kmem_cache *rcv_cache __read_mostly;
 80
 81/* table of registered CAN protocols */
 82static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly;
 83static DEFINE_MUTEX(proto_tab_lock);
 84
 85static atomic_t skbcounter = ATOMIC_INIT(0);
 
 
 86
 87/* af_can socket functions */
 
 
 88
 89void can_sock_destruct(struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90{
 91	skb_queue_purge(&sk->sk_receive_queue);
 92	skb_queue_purge(&sk->sk_error_queue);
 93}
 94EXPORT_SYMBOL(can_sock_destruct);
 95
 96static const struct can_proto *can_get_proto(int protocol)
 97{
 98	const struct can_proto *cp;
 99
100	rcu_read_lock();
101	cp = rcu_dereference(proto_tab[protocol]);
102	if (cp && !try_module_get(cp->prot->owner))
103		cp = NULL;
104	rcu_read_unlock();
105
106	return cp;
107}
108
109static inline void can_put_proto(const struct can_proto *cp)
110{
111	module_put(cp->prot->owner);
112}
113
114static int can_create(struct net *net, struct socket *sock, int protocol,
115		      int kern)
116{
117	struct sock *sk;
118	const struct can_proto *cp;
119	int err = 0;
120
121	sock->state = SS_UNCONNECTED;
122
123	if (protocol < 0 || protocol >= CAN_NPROTO)
124		return -EINVAL;
125
 
 
 
126	cp = can_get_proto(protocol);
127
128#ifdef CONFIG_MODULES
129	if (!cp) {
130		/* try to load protocol module if kernel is modular */
131
132		err = request_module("can-proto-%d", protocol);
133
134		/* In case of error we only print a message but don't
 
135		 * return the error code immediately.  Below we will
136		 * return -EPROTONOSUPPORT
137		 */
138		if (err)
139			pr_err_ratelimited("can: request_module (can-proto-%d) failed.\n",
140					   protocol);
141
142		cp = can_get_proto(protocol);
143	}
144#endif
145
146	/* check for available protocol and correct usage */
147
148	if (!cp)
149		return -EPROTONOSUPPORT;
150
151	if (cp->type != sock->type) {
152		err = -EPROTOTYPE;
153		goto errout;
154	}
155
156	sock->ops = cp->ops;
157
158	sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern);
159	if (!sk) {
160		err = -ENOMEM;
161		goto errout;
162	}
163
164	sock_init_data(sock, sk);
165	sk->sk_destruct = can_sock_destruct;
166
167	if (sk->sk_prot->init)
168		err = sk->sk_prot->init(sk);
169
170	if (err) {
171		/* release sk on errors */
172		sock_orphan(sk);
173		sock_put(sk);
174		sock->sk = NULL;
175	}
176
177 errout:
178	can_put_proto(cp);
179	return err;
180}
181
182/* af_can tx path */
 
 
183
184/**
185 * can_send - transmit a CAN frame (optional with local loopback)
186 * @skb: pointer to socket buffer with CAN frame in data section
187 * @loop: loopback for listeners on local CAN sockets (recommended default!)
188 *
189 * Due to the loopback this routine must not be called from hardirq context.
190 *
191 * Return:
192 *  0 on success
193 *  -ENETDOWN when the selected interface is down
194 *  -ENOBUFS on full driver queue (see net_xmit_errno())
195 *  -ENOMEM when local loopback failed at calling skb_clone()
196 *  -EPERM when trying to send on a non-CAN interface
197 *  -EMSGSIZE CAN frame size is bigger than CAN interface MTU
198 *  -EINVAL when the skb->data does not contain a valid CAN frame
199 */
200int can_send(struct sk_buff *skb, int loop)
201{
202	struct sk_buff *newskb = NULL;
203	struct can_pkg_stats *pkg_stats = dev_net(skb->dev)->can.pkg_stats;
204	int err = -EINVAL;
205
206	if (can_is_canxl_skb(skb)) {
207		skb->protocol = htons(ETH_P_CANXL);
208	} else if (can_is_can_skb(skb)) {
209		skb->protocol = htons(ETH_P_CAN);
210	} else if (can_is_canfd_skb(skb)) {
211		struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
212
213		skb->protocol = htons(ETH_P_CANFD);
214
215		/* set CAN FD flag for CAN FD frames by default */
216		cfd->flags |= CANFD_FDF;
217	} else {
218		goto inval_skb;
219	}
220
221	/* Make sure the CAN frame can pass the selected CAN netdevice. */
222	if (unlikely(skb->len > skb->dev->mtu)) {
223		err = -EMSGSIZE;
224		goto inval_skb;
225	}
226
227	if (unlikely(skb->dev->type != ARPHRD_CAN)) {
228		err = -EPERM;
229		goto inval_skb;
230	}
231
232	if (unlikely(!(skb->dev->flags & IFF_UP))) {
233		err = -ENETDOWN;
234		goto inval_skb;
235	}
236
237	skb->ip_summed = CHECKSUM_UNNECESSARY;
238
239	skb_reset_mac_header(skb);
240	skb_reset_network_header(skb);
241	skb_reset_transport_header(skb);
242
243	if (loop) {
244		/* local loopback of sent CAN frames */
245
246		/* indication for the CAN driver: do loopback */
247		skb->pkt_type = PACKET_LOOPBACK;
248
249		/* The reference to the originating sock may be required
 
250		 * by the receiving socket to check whether the frame is
251		 * its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS
252		 * Therefore we have to ensure that skb->sk remains the
253		 * reference to the originating sock by restoring skb->sk
254		 * after each skb_clone() or skb_orphan() usage.
255		 */
256
257		if (!(skb->dev->flags & IFF_ECHO)) {
258			/* If the interface is not capable to do loopback
 
259			 * itself, we do it here.
260			 */
261			newskb = skb_clone(skb, GFP_ATOMIC);
262			if (!newskb) {
263				kfree_skb(skb);
264				return -ENOMEM;
265			}
266
267			can_skb_set_owner(newskb, skb->sk);
268			newskb->ip_summed = CHECKSUM_UNNECESSARY;
269			newskb->pkt_type = PACKET_BROADCAST;
270		}
271	} else {
272		/* indication for the CAN driver: no loopback required */
273		skb->pkt_type = PACKET_HOST;
274	}
275
276	/* send to netdevice */
277	err = dev_queue_xmit(skb);
278	if (err > 0)
279		err = net_xmit_errno(err);
280
281	if (err) {
282		kfree_skb(newskb);
283		return err;
284	}
285
286	if (newskb)
287		netif_rx(newskb);
288
289	/* update statistics */
290	pkg_stats->tx_frames++;
291	pkg_stats->tx_frames_delta++;
292
293	return 0;
294
295inval_skb:
296	kfree_skb(skb);
297	return err;
298}
299EXPORT_SYMBOL(can_send);
300
301/* af_can rx path */
302
303static struct can_dev_rcv_lists *can_dev_rcv_lists_find(struct net *net,
304							struct net_device *dev)
305{
306	if (dev) {
307		struct can_ml_priv *can_ml = can_get_ml_priv(dev);
308		return &can_ml->dev_rcv_lists;
309	} else {
310		return net->can.rx_alldev_list;
311	}
312}
313
314/**
315 * effhash - hash function for 29 bit CAN identifier reduction
316 * @can_id: 29 bit CAN identifier
317 *
318 * Description:
319 *  To reduce the linear traversal in one linked list of _single_ EFF CAN
320 *  frame subscriptions the 29 bit identifier is mapped to 10 bits.
321 *  (see CAN_EFF_RCV_HASH_BITS definition)
322 *
323 * Return:
324 *  Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask )
325 */
326static unsigned int effhash(canid_t can_id)
327{
328	unsigned int hash;
329
330	hash = can_id;
331	hash ^= can_id >> CAN_EFF_RCV_HASH_BITS;
332	hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS);
333
334	return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1);
 
 
 
 
 
335}
336
337/**
338 * can_rcv_list_find - determine optimal filterlist inside device filter struct
339 * @can_id: pointer to CAN identifier of a given can_filter
340 * @mask: pointer to CAN mask of a given can_filter
341 * @dev_rcv_lists: pointer to the device filter struct
342 *
343 * Description:
344 *  Returns the optimal filterlist to reduce the filter handling in the
345 *  receive path. This function is called by service functions that need
346 *  to register or unregister a can_filter in the filter lists.
347 *
348 *  A filter matches in general, when
349 *
350 *          <received_can_id> & mask == can_id & mask
351 *
352 *  so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe
353 *  relevant bits for the filter.
354 *
355 *  The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
356 *  filter for error messages (CAN_ERR_FLAG bit set in mask). For error msg
357 *  frames there is a special filterlist and a special rx path filter handling.
358 *
359 * Return:
360 *  Pointer to optimal filterlist for the given can_id/mask pair.
361 *  Consistency checked mask.
362 *  Reduced can_id to have a preprocessed filter compare value.
363 */
364static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask,
365					    struct can_dev_rcv_lists *dev_rcv_lists)
366{
367	canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
368
369	/* filter for error message frames in extra filterlist */
370	if (*mask & CAN_ERR_FLAG) {
371		/* clear CAN_ERR_FLAG in filter entry */
372		*mask &= CAN_ERR_MASK;
373		return &dev_rcv_lists->rx[RX_ERR];
374	}
375
376	/* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
377
378#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
379
380	/* ensure valid values in can_mask for 'SFF only' frame filtering */
381	if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
382		*mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
383
384	/* reduce condition testing at receive time */
385	*can_id &= *mask;
386
387	/* inverse can_id/can_mask filter */
388	if (inv)
389		return &dev_rcv_lists->rx[RX_INV];
390
391	/* mask == 0 => no condition testing at receive time */
392	if (!(*mask))
393		return &dev_rcv_lists->rx[RX_ALL];
394
395	/* extra filterlists for the subscription of a single non-RTR can_id */
396	if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
397	    !(*can_id & CAN_RTR_FLAG)) {
 
398		if (*can_id & CAN_EFF_FLAG) {
399			if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS))
400				return &dev_rcv_lists->rx_eff[effhash(*can_id)];
 
 
401		} else {
402			if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
403				return &dev_rcv_lists->rx_sff[*can_id];
404		}
405	}
406
407	/* default: filter via can_id/can_mask */
408	return &dev_rcv_lists->rx[RX_FIL];
409}
410
411/**
412 * can_rx_register - subscribe CAN frames from a specific interface
413 * @net: the applicable net namespace
414 * @dev: pointer to netdevice (NULL => subscribe from 'all' CAN devices list)
415 * @can_id: CAN identifier (see description)
416 * @mask: CAN mask (see description)
417 * @func: callback function on filter match
418 * @data: returned parameter for callback function
419 * @ident: string for calling module identification
420 * @sk: socket pointer (might be NULL)
421 *
422 * Description:
423 *  Invokes the callback function with the received sk_buff and the given
424 *  parameter 'data' on a matching receive filter. A filter matches, when
425 *
426 *          <received_can_id> & mask == can_id & mask
427 *
428 *  The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
429 *  filter for error message frames (CAN_ERR_FLAG bit set in mask).
430 *
431 *  The provided pointer to the sk_buff is guaranteed to be valid as long as
432 *  the callback function is running. The callback function must *not* free
433 *  the given sk_buff while processing it's task. When the given sk_buff is
434 *  needed after the end of the callback function it must be cloned inside
435 *  the callback function with skb_clone().
436 *
437 * Return:
438 *  0 on success
439 *  -ENOMEM on missing cache mem to create subscription entry
440 *  -ENODEV unknown device
441 */
442int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id,
443		    canid_t mask, void (*func)(struct sk_buff *, void *),
444		    void *data, char *ident, struct sock *sk)
445{
446	struct receiver *rcv;
447	struct hlist_head *rcv_list;
448	struct can_dev_rcv_lists *dev_rcv_lists;
449	struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
450
451	/* insert new receiver  (dev,canid,mask) -> (func,data) */
452
453	if (dev && (dev->type != ARPHRD_CAN || !can_get_ml_priv(dev)))
454		return -ENODEV;
455
456	if (dev && !net_eq(net, dev_net(dev)))
457		return -ENODEV;
458
459	rcv = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
460	if (!rcv)
461		return -ENOMEM;
462
463	spin_lock_bh(&net->can.rcvlists_lock);
464
465	dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
466	rcv_list = can_rcv_list_find(&can_id, &mask, dev_rcv_lists);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467
468	rcv->can_id = can_id;
469	rcv->mask = mask;
470	rcv->matches = 0;
471	rcv->func = func;
472	rcv->data = data;
473	rcv->ident = ident;
474	rcv->sk = sk;
475
476	hlist_add_head_rcu(&rcv->list, rcv_list);
477	dev_rcv_lists->entries++;
478
479	rcv_lists_stats->rcv_entries++;
480	rcv_lists_stats->rcv_entries_max = max(rcv_lists_stats->rcv_entries_max,
481					       rcv_lists_stats->rcv_entries);
482	spin_unlock_bh(&net->can.rcvlists_lock);
483
484	return 0;
485}
486EXPORT_SYMBOL(can_rx_register);
487
488/* can_rx_delete_receiver - rcu callback for single receiver entry removal */
 
 
489static void can_rx_delete_receiver(struct rcu_head *rp)
490{
491	struct receiver *rcv = container_of(rp, struct receiver, rcu);
492	struct sock *sk = rcv->sk;
493
494	kmem_cache_free(rcv_cache, rcv);
495	if (sk)
496		sock_put(sk);
497}
498
499/**
500 * can_rx_unregister - unsubscribe CAN frames from a specific interface
501 * @net: the applicable net namespace
502 * @dev: pointer to netdevice (NULL => unsubscribe from 'all' CAN devices list)
503 * @can_id: CAN identifier
504 * @mask: CAN mask
505 * @func: callback function on filter match
506 * @data: returned parameter for callback function
507 *
508 * Description:
509 *  Removes subscription entry depending on given (subscription) values.
510 */
511void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
512		       canid_t mask, void (*func)(struct sk_buff *, void *),
513		       void *data)
514{
515	struct receiver *rcv = NULL;
516	struct hlist_head *rcv_list;
517	struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
518	struct can_dev_rcv_lists *dev_rcv_lists;
519
520	if (dev && dev->type != ARPHRD_CAN)
521		return;
522
523	if (dev && !net_eq(net, dev_net(dev)))
524		return;
525
526	spin_lock_bh(&net->can.rcvlists_lock);
 
 
 
 
 
 
527
528	dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
529	rcv_list = can_rcv_list_find(&can_id, &mask, dev_rcv_lists);
530
531	/* Search the receiver list for the item to delete.  This should
 
532	 * exist, since no receiver may be unregistered that hasn't
533	 * been registered before.
534	 */
535	hlist_for_each_entry_rcu(rcv, rcv_list, list) {
536		if (rcv->can_id == can_id && rcv->mask == mask &&
537		    rcv->func == func && rcv->data == data)
 
538			break;
539	}
540
541	/* Check for bugs in CAN protocol implementations using af_can.c:
542	 * 'rcv' will be NULL if no matching list item was found for removal.
543	 * As this case may potentially happen when closing a socket while
544	 * the notifier for removing the CAN netdev is running we just print
545	 * a warning here.
546	 */
547	if (!rcv) {
548		pr_warn("can: receive list entry not found for dev %s, id %03X, mask %03X\n",
549			DNAME(dev), can_id, mask);
 
 
 
550		goto out;
551	}
552
553	hlist_del_rcu(&rcv->list);
554	dev_rcv_lists->entries--;
 
 
 
555
556	if (rcv_lists_stats->rcv_entries > 0)
557		rcv_lists_stats->rcv_entries--;
 
 
 
558
559 out:
560	spin_unlock_bh(&net->can.rcvlists_lock);
561
562	/* schedule the receiver item for deletion */
563	if (rcv) {
564		if (rcv->sk)
565			sock_hold(rcv->sk);
566		call_rcu(&rcv->rcu, can_rx_delete_receiver);
567	}
568}
569EXPORT_SYMBOL(can_rx_unregister);
570
571static inline void deliver(struct sk_buff *skb, struct receiver *rcv)
572{
573	rcv->func(skb, rcv->data);
574	rcv->matches++;
575}
576
577static int can_rcv_filter(struct can_dev_rcv_lists *dev_rcv_lists, struct sk_buff *skb)
578{
579	struct receiver *rcv;
 
580	int matches = 0;
581	struct can_frame *cf = (struct can_frame *)skb->data;
582	canid_t can_id = cf->can_id;
583
584	if (dev_rcv_lists->entries == 0)
585		return 0;
586
587	if (can_id & CAN_ERR_FLAG) {
588		/* check for error message frame entries only */
589		hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ERR], list) {
590			if (can_id & rcv->mask) {
591				deliver(skb, rcv);
592				matches++;
593			}
594		}
595		return matches;
596	}
597
598	/* check for unfiltered entries */
599	hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ALL], list) {
600		deliver(skb, rcv);
601		matches++;
602	}
603
604	/* check for can_id/mask entries */
605	hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_FIL], list) {
606		if ((can_id & rcv->mask) == rcv->can_id) {
607			deliver(skb, rcv);
608			matches++;
609		}
610	}
611
612	/* check for inverted can_id/mask entries */
613	hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_INV], list) {
614		if ((can_id & rcv->mask) != rcv->can_id) {
615			deliver(skb, rcv);
616			matches++;
617		}
618	}
619
620	/* check filterlists for single non-RTR can_ids */
621	if (can_id & CAN_RTR_FLAG)
622		return matches;
623
624	if (can_id & CAN_EFF_FLAG) {
625		hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_eff[effhash(can_id)], list) {
626			if (rcv->can_id == can_id) {
627				deliver(skb, rcv);
628				matches++;
629			}
630		}
631	} else {
632		can_id &= CAN_SFF_MASK;
633		hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_sff[can_id], list) {
634			deliver(skb, rcv);
635			matches++;
636		}
637	}
638
639	return matches;
640}
641
642static void can_receive(struct sk_buff *skb, struct net_device *dev)
 
643{
644	struct can_dev_rcv_lists *dev_rcv_lists;
645	struct net *net = dev_net(dev);
646	struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
647	int matches;
648
649	/* update statistics */
650	pkg_stats->rx_frames++;
651	pkg_stats->rx_frames_delta++;
652
653	/* create non-zero unique skb identifier together with *skb */
654	while (!(can_skb_prv(skb)->skbcnt))
655		can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
 
 
 
 
 
 
 
 
656
657	rcu_read_lock();
658
659	/* deliver the packet to sockets listening on all devices */
660	matches = can_rcv_filter(net->can.rx_alldev_list, skb);
661
662	/* find receive list for this device */
663	dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
664	matches += can_rcv_filter(dev_rcv_lists, skb);
 
665
666	rcu_read_unlock();
667
668	/* consume the skbuff allocated by the netdevice driver */
669	consume_skb(skb);
670
671	if (matches > 0) {
672		pkg_stats->matches++;
673		pkg_stats->matches_delta++;
674	}
675}
676
677static int can_rcv(struct sk_buff *skb, struct net_device *dev,
678		   struct packet_type *pt, struct net_device *orig_dev)
679{
680	if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_can_skb(skb))) {
681		pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
682			     dev->type, skb->len);
683
684		kfree_skb(skb);
685		return NET_RX_DROP;
686	}
687
688	can_receive(skb, dev);
689	return NET_RX_SUCCESS;
690}
691
692static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
693		     struct packet_type *pt, struct net_device *orig_dev)
694{
695	if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_canfd_skb(skb))) {
696		pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
697			     dev->type, skb->len);
698
699		kfree_skb(skb);
700		return NET_RX_DROP;
701	}
702
703	can_receive(skb, dev);
704	return NET_RX_SUCCESS;
705}
706
707static int canxl_rcv(struct sk_buff *skb, struct net_device *dev,
708		     struct packet_type *pt, struct net_device *orig_dev)
709{
710	if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_canxl_skb(skb))) {
711		pr_warn_once("PF_CAN: dropped non conform CAN XL skbuff: dev type %d, len %d\n",
712			     dev->type, skb->len);
713
714		kfree_skb(skb);
715		return NET_RX_DROP;
716	}
717
718	can_receive(skb, dev);
719	return NET_RX_SUCCESS;
720}
721
722/* af_can protocol functions */
 
 
723
724/**
725 * can_proto_register - register CAN transport protocol
726 * @cp: pointer to CAN protocol structure
727 *
728 * Return:
729 *  0 on success
730 *  -EINVAL invalid (out of range) protocol number
731 *  -EBUSY  protocol already in use
732 *  -ENOBUF if proto_register() fails
733 */
734int can_proto_register(const struct can_proto *cp)
735{
736	int proto = cp->protocol;
737	int err = 0;
738
739	if (proto < 0 || proto >= CAN_NPROTO) {
740		pr_err("can: protocol number %d out of range\n", proto);
 
741		return -EINVAL;
742	}
743
744	err = proto_register(cp->prot, 0);
745	if (err < 0)
746		return err;
747
748	mutex_lock(&proto_tab_lock);
749
750	if (rcu_access_pointer(proto_tab[proto])) {
751		pr_err("can: protocol %d already registered\n", proto);
 
752		err = -EBUSY;
753	} else {
754		RCU_INIT_POINTER(proto_tab[proto], cp);
755	}
756
757	mutex_unlock(&proto_tab_lock);
758
759	if (err < 0)
760		proto_unregister(cp->prot);
761
762	return err;
763}
764EXPORT_SYMBOL(can_proto_register);
765
766/**
767 * can_proto_unregister - unregister CAN transport protocol
768 * @cp: pointer to CAN protocol structure
769 */
770void can_proto_unregister(const struct can_proto *cp)
771{
772	int proto = cp->protocol;
773
774	mutex_lock(&proto_tab_lock);
775	BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp);
776	RCU_INIT_POINTER(proto_tab[proto], NULL);
777	mutex_unlock(&proto_tab_lock);
778
779	synchronize_rcu();
780
781	proto_unregister(cp->prot);
782}
783EXPORT_SYMBOL(can_proto_unregister);
784
785static int can_pernet_init(struct net *net)
 
 
 
 
786{
787	spin_lock_init(&net->can.rcvlists_lock);
788	net->can.rx_alldev_list =
789		kzalloc(sizeof(*net->can.rx_alldev_list), GFP_KERNEL);
790	if (!net->can.rx_alldev_list)
791		goto out;
792	net->can.pkg_stats = kzalloc(sizeof(*net->can.pkg_stats), GFP_KERNEL);
793	if (!net->can.pkg_stats)
794		goto out_free_rx_alldev_list;
795	net->can.rcv_lists_stats = kzalloc(sizeof(*net->can.rcv_lists_stats), GFP_KERNEL);
796	if (!net->can.rcv_lists_stats)
797		goto out_free_pkg_stats;
798
799	if (IS_ENABLED(CONFIG_PROC_FS)) {
800		/* the statistics are updated every second (timer triggered) */
801		if (stats_timer) {
802			timer_setup(&net->can.stattimer, can_stat_update,
803				    0);
804			mod_timer(&net->can.stattimer,
805				  round_jiffies(jiffies + HZ));
 
 
 
 
 
 
 
 
 
806		}
807		net->can.pkg_stats->jiffies_init = jiffies;
808		can_init_proc(net);
809	}
810
811	return 0;
812
813 out_free_pkg_stats:
814	kfree(net->can.pkg_stats);
815 out_free_rx_alldev_list:
816	kfree(net->can.rx_alldev_list);
817 out:
818	return -ENOMEM;
819}
820
821static void can_pernet_exit(struct net *net)
822{
823	if (IS_ENABLED(CONFIG_PROC_FS)) {
824		can_remove_proc(net);
825		if (stats_timer)
826			del_timer_sync(&net->can.stattimer);
 
 
 
 
 
 
 
 
 
827	}
828
829	kfree(net->can.rx_alldev_list);
830	kfree(net->can.pkg_stats);
831	kfree(net->can.rcv_lists_stats);
832}
833
834/* af_can module init/exit functions */
 
 
835
836static struct packet_type can_packet __read_mostly = {
837	.type = cpu_to_be16(ETH_P_CAN),
 
838	.func = can_rcv,
839};
840
841static struct packet_type canfd_packet __read_mostly = {
842	.type = cpu_to_be16(ETH_P_CANFD),
843	.func = canfd_rcv,
844};
845
846static struct packet_type canxl_packet __read_mostly = {
847	.type = cpu_to_be16(ETH_P_CANXL),
848	.func = canxl_rcv,
849};
850
851static const struct net_proto_family can_family_ops = {
852	.family = PF_CAN,
853	.create = can_create,
854	.owner  = THIS_MODULE,
855};
856
857static struct pernet_operations can_pernet_ops __read_mostly = {
858	.init = can_pernet_init,
859	.exit = can_pernet_exit,
860};
861
862static __init int can_init(void)
863{
864	int err;
865
866	/* check for correct padding to be able to use the structs similarly */
867	BUILD_BUG_ON(offsetof(struct can_frame, len) !=
868		     offsetof(struct canfd_frame, len) ||
869		     offsetof(struct can_frame, len) !=
870		     offsetof(struct canxl_frame, flags) ||
871		     offsetof(struct can_frame, data) !=
872		     offsetof(struct canfd_frame, data));
873
874	pr_info("can: controller area network core\n");
875
876	rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
877				      0, 0, NULL);
878	if (!rcv_cache)
879		return -ENOMEM;
880
881	err = register_pernet_subsys(&can_pernet_ops);
882	if (err)
883		goto out_pernet;
 
 
 
 
 
884
885	/* protocol register */
886	err = sock_register(&can_family_ops);
887	if (err)
888		goto out_sock;
889
890	dev_add_pack(&can_packet);
891	dev_add_pack(&canfd_packet);
892	dev_add_pack(&canxl_packet);
893
894	return 0;
895
896out_sock:
897	unregister_pernet_subsys(&can_pernet_ops);
898out_pernet:
899	kmem_cache_destroy(rcv_cache);
900
901	return err;
902}
903
904static __exit void can_exit(void)
905{
 
 
 
 
 
 
 
906	/* protocol unregister */
907	dev_remove_pack(&canxl_packet);
908	dev_remove_pack(&canfd_packet);
909	dev_remove_pack(&can_packet);
 
910	sock_unregister(PF_CAN);
911
912	unregister_pernet_subsys(&can_pernet_ops);
 
 
 
 
 
 
 
 
 
 
 
 
913
914	rcu_barrier(); /* Wait for completion of call_rcu()'s */
915
916	kmem_cache_destroy(rcv_cache);
917}
918
919module_init(can_init);
920module_exit(can_exit);
v3.5.6
  1/*
  2 * af_can.c - Protocol family CAN core module
  3 *            (used by different CAN protocol modules)
  4 *
  5 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  6 * All rights reserved.
  7 *
  8 * Redistribution and use in source and binary forms, with or without
  9 * modification, are permitted provided that the following conditions
 10 * are met:
 11 * 1. Redistributions of source code must retain the above copyright
 12 *    notice, this list of conditions and the following disclaimer.
 13 * 2. Redistributions in binary form must reproduce the above copyright
 14 *    notice, this list of conditions and the following disclaimer in the
 15 *    documentation and/or other materials provided with the distribution.
 16 * 3. Neither the name of Volkswagen nor the names of its contributors
 17 *    may be used to endorse or promote products derived from this software
 18 *    without specific prior written permission.
 19 *
 20 * Alternatively, provided that this notice is retained in full, this
 21 * software may be distributed under the terms of the GNU General
 22 * Public License ("GPL") version 2, in which case the provisions of the
 23 * GPL apply INSTEAD OF those given above.
 24 *
 25 * The provided data structures and external interfaces from this code
 26 * are not restricted to be used by modules with a GPL compatible license.
 27 *
 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
 39 * DAMAGE.
 40 *
 41 */
 42
 43#include <linux/module.h>
 
 44#include <linux/init.h>
 45#include <linux/kmod.h>
 46#include <linux/slab.h>
 47#include <linux/list.h>
 48#include <linux/spinlock.h>
 49#include <linux/rcupdate.h>
 50#include <linux/uaccess.h>
 51#include <linux/net.h>
 52#include <linux/netdevice.h>
 53#include <linux/socket.h>
 54#include <linux/if_ether.h>
 55#include <linux/if_arp.h>
 56#include <linux/skbuff.h>
 57#include <linux/can.h>
 58#include <linux/can/core.h>
 
 
 59#include <linux/ratelimit.h>
 60#include <net/net_namespace.h>
 61#include <net/sock.h>
 62
 63#include "af_can.h"
 64
 65static __initdata const char banner[] = KERN_INFO
 66	"can: controller area network core (" CAN_VERSION_STRING ")\n";
 67
 68MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
 69MODULE_LICENSE("Dual BSD/GPL");
 70MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
 71	      "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
 72
 73MODULE_ALIAS_NETPROTO(PF_CAN);
 74
 75static int stats_timer __read_mostly = 1;
 76module_param(stats_timer, int, S_IRUGO);
 77MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
 78
 79/* receive filters subscribed for 'all' CAN devices */
 80struct dev_rcv_lists can_rx_alldev_list;
 81static DEFINE_SPINLOCK(can_rcvlists_lock);
 82
 83static struct kmem_cache *rcv_cache __read_mostly;
 84
 85/* table of registered CAN protocols */
 86static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
 87static DEFINE_MUTEX(proto_tab_lock);
 88
 89struct timer_list can_stattimer;   /* timer for statistics update */
 90struct s_stats    can_stats;       /* packet statistics */
 91struct s_pstats   can_pstats;      /* receive list statistics */
 92
 93/*
 94 * af_can socket functions
 95 */
 96
 97int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 98{
 99	struct sock *sk = sock->sk;
100
101	switch (cmd) {
102
103	case SIOCGSTAMP:
104		return sock_get_timestamp(sk, (struct timeval __user *)arg);
105
106	default:
107		return -ENOIOCTLCMD;
108	}
109}
110EXPORT_SYMBOL(can_ioctl);
111
112static void can_sock_destruct(struct sock *sk)
113{
114	skb_queue_purge(&sk->sk_receive_queue);
 
115}
 
116
117static const struct can_proto *can_get_proto(int protocol)
118{
119	const struct can_proto *cp;
120
121	rcu_read_lock();
122	cp = rcu_dereference(proto_tab[protocol]);
123	if (cp && !try_module_get(cp->prot->owner))
124		cp = NULL;
125	rcu_read_unlock();
126
127	return cp;
128}
129
130static inline void can_put_proto(const struct can_proto *cp)
131{
132	module_put(cp->prot->owner);
133}
134
135static int can_create(struct net *net, struct socket *sock, int protocol,
136		      int kern)
137{
138	struct sock *sk;
139	const struct can_proto *cp;
140	int err = 0;
141
142	sock->state = SS_UNCONNECTED;
143
144	if (protocol < 0 || protocol >= CAN_NPROTO)
145		return -EINVAL;
146
147	if (!net_eq(net, &init_net))
148		return -EAFNOSUPPORT;
149
150	cp = can_get_proto(protocol);
151
152#ifdef CONFIG_MODULES
153	if (!cp) {
154		/* try to load protocol module if kernel is modular */
155
156		err = request_module("can-proto-%d", protocol);
157
158		/*
159		 * In case of error we only print a message but don't
160		 * return the error code immediately.  Below we will
161		 * return -EPROTONOSUPPORT
162		 */
163		if (err)
164			printk_ratelimited(KERN_ERR "can: request_module "
165			       "(can-proto-%d) failed.\n", protocol);
166
167		cp = can_get_proto(protocol);
168	}
169#endif
170
171	/* check for available protocol and correct usage */
172
173	if (!cp)
174		return -EPROTONOSUPPORT;
175
176	if (cp->type != sock->type) {
177		err = -EPROTOTYPE;
178		goto errout;
179	}
180
181	sock->ops = cp->ops;
182
183	sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot);
184	if (!sk) {
185		err = -ENOMEM;
186		goto errout;
187	}
188
189	sock_init_data(sock, sk);
190	sk->sk_destruct = can_sock_destruct;
191
192	if (sk->sk_prot->init)
193		err = sk->sk_prot->init(sk);
194
195	if (err) {
196		/* release sk on errors */
197		sock_orphan(sk);
198		sock_put(sk);
 
199	}
200
201 errout:
202	can_put_proto(cp);
203	return err;
204}
205
206/*
207 * af_can tx path
208 */
209
210/**
211 * can_send - transmit a CAN frame (optional with local loopback)
212 * @skb: pointer to socket buffer with CAN frame in data section
213 * @loop: loopback for listeners on local CAN sockets (recommended default!)
214 *
215 * Due to the loopback this routine must not be called from hardirq context.
216 *
217 * Return:
218 *  0 on success
219 *  -ENETDOWN when the selected interface is down
220 *  -ENOBUFS on full driver queue (see net_xmit_errno())
221 *  -ENOMEM when local loopback failed at calling skb_clone()
222 *  -EPERM when trying to send on a non-CAN interface
 
223 *  -EINVAL when the skb->data does not contain a valid CAN frame
224 */
225int can_send(struct sk_buff *skb, int loop)
226{
227	struct sk_buff *newskb = NULL;
228	struct can_frame *cf = (struct can_frame *)skb->data;
229	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
231	if (skb->len != sizeof(struct can_frame) || cf->can_dlc > 8) {
232		kfree_skb(skb);
233		return -EINVAL;
 
234	}
235
236	if (skb->dev->type != ARPHRD_CAN) {
237		kfree_skb(skb);
238		return -EPERM;
239	}
240
241	if (!(skb->dev->flags & IFF_UP)) {
242		kfree_skb(skb);
243		return -ENETDOWN;
244	}
245
246	skb->protocol = htons(ETH_P_CAN);
 
 
247	skb_reset_network_header(skb);
248	skb_reset_transport_header(skb);
249
250	if (loop) {
251		/* local loopback of sent CAN frames */
252
253		/* indication for the CAN driver: do loopback */
254		skb->pkt_type = PACKET_LOOPBACK;
255
256		/*
257		 * The reference to the originating sock may be required
258		 * by the receiving socket to check whether the frame is
259		 * its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS
260		 * Therefore we have to ensure that skb->sk remains the
261		 * reference to the originating sock by restoring skb->sk
262		 * after each skb_clone() or skb_orphan() usage.
263		 */
264
265		if (!(skb->dev->flags & IFF_ECHO)) {
266			/*
267			 * If the interface is not capable to do loopback
268			 * itself, we do it here.
269			 */
270			newskb = skb_clone(skb, GFP_ATOMIC);
271			if (!newskb) {
272				kfree_skb(skb);
273				return -ENOMEM;
274			}
275
276			newskb->sk = skb->sk;
277			newskb->ip_summed = CHECKSUM_UNNECESSARY;
278			newskb->pkt_type = PACKET_BROADCAST;
279		}
280	} else {
281		/* indication for the CAN driver: no loopback required */
282		skb->pkt_type = PACKET_HOST;
283	}
284
285	/* send to netdevice */
286	err = dev_queue_xmit(skb);
287	if (err > 0)
288		err = net_xmit_errno(err);
289
290	if (err) {
291		kfree_skb(newskb);
292		return err;
293	}
294
295	if (newskb)
296		netif_rx_ni(newskb);
297
298	/* update statistics */
299	can_stats.tx_frames++;
300	can_stats.tx_frames_delta++;
301
302	return 0;
 
 
 
 
303}
304EXPORT_SYMBOL(can_send);
305
306/*
307 * af_can rx path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
308 */
 
 
 
 
 
 
 
309
310static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
311{
312	if (!dev)
313		return &can_rx_alldev_list;
314	else
315		return (struct dev_rcv_lists *)dev->ml_priv;
316}
317
318/**
319 * find_rcv_list - determine optimal filterlist inside device filter struct
320 * @can_id: pointer to CAN identifier of a given can_filter
321 * @mask: pointer to CAN mask of a given can_filter
322 * @d: pointer to the device filter struct
323 *
324 * Description:
325 *  Returns the optimal filterlist to reduce the filter handling in the
326 *  receive path. This function is called by service functions that need
327 *  to register or unregister a can_filter in the filter lists.
328 *
329 *  A filter matches in general, when
330 *
331 *          <received_can_id> & mask == can_id & mask
332 *
333 *  so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe
334 *  relevant bits for the filter.
335 *
336 *  The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
337 *  filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames
338 *  there is a special filterlist and a special rx path filter handling.
339 *
340 * Return:
341 *  Pointer to optimal filterlist for the given can_id/mask pair.
342 *  Constistency checked mask.
343 *  Reduced can_id to have a preprocessed filter compare value.
344 */
345static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
346					struct dev_rcv_lists *d)
347{
348	canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
349
350	/* filter for error frames in extra filterlist */
351	if (*mask & CAN_ERR_FLAG) {
352		/* clear CAN_ERR_FLAG in filter entry */
353		*mask &= CAN_ERR_MASK;
354		return &d->rx[RX_ERR];
355	}
356
357	/* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
358
359#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
360
361	/* ensure valid values in can_mask for 'SFF only' frame filtering */
362	if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
363		*mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
364
365	/* reduce condition testing at receive time */
366	*can_id &= *mask;
367
368	/* inverse can_id/can_mask filter */
369	if (inv)
370		return &d->rx[RX_INV];
371
372	/* mask == 0 => no condition testing at receive time */
373	if (!(*mask))
374		return &d->rx[RX_ALL];
375
376	/* extra filterlists for the subscription of a single non-RTR can_id */
377	if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
378	    !(*can_id & CAN_RTR_FLAG)) {
379
380		if (*can_id & CAN_EFF_FLAG) {
381			if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
382				/* RFC: a future use-case for hash-tables? */
383				return &d->rx[RX_EFF];
384			}
385		} else {
386			if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
387				return &d->rx_sff[*can_id];
388		}
389	}
390
391	/* default: filter via can_id/can_mask */
392	return &d->rx[RX_FIL];
393}
394
395/**
396 * can_rx_register - subscribe CAN frames from a specific interface
397 * @dev: pointer to netdevice (NULL => subcribe from 'all' CAN devices list)
 
398 * @can_id: CAN identifier (see description)
399 * @mask: CAN mask (see description)
400 * @func: callback function on filter match
401 * @data: returned parameter for callback function
402 * @ident: string for calling module indentification
 
403 *
404 * Description:
405 *  Invokes the callback function with the received sk_buff and the given
406 *  parameter 'data' on a matching receive filter. A filter matches, when
407 *
408 *          <received_can_id> & mask == can_id & mask
409 *
410 *  The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
411 *  filter for error frames (CAN_ERR_FLAG bit set in mask).
412 *
413 *  The provided pointer to the sk_buff is guaranteed to be valid as long as
414 *  the callback function is running. The callback function must *not* free
415 *  the given sk_buff while processing it's task. When the given sk_buff is
416 *  needed after the end of the callback function it must be cloned inside
417 *  the callback function with skb_clone().
418 *
419 * Return:
420 *  0 on success
421 *  -ENOMEM on missing cache mem to create subscription entry
422 *  -ENODEV unknown device
423 */
424int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
425		    void (*func)(struct sk_buff *, void *), void *data,
426		    char *ident)
427{
428	struct receiver *r;
429	struct hlist_head *rl;
430	struct dev_rcv_lists *d;
431	int err = 0;
432
433	/* insert new receiver  (dev,canid,mask) -> (func,data) */
434
435	if (dev && dev->type != ARPHRD_CAN)
 
 
 
436		return -ENODEV;
437
438	r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
439	if (!r)
440		return -ENOMEM;
441
442	spin_lock(&can_rcvlists_lock);
443
444	d = find_dev_rcv_lists(dev);
445	if (d) {
446		rl = find_rcv_list(&can_id, &mask, d);
447
448		r->can_id  = can_id;
449		r->mask    = mask;
450		r->matches = 0;
451		r->func    = func;
452		r->data    = data;
453		r->ident   = ident;
454
455		hlist_add_head_rcu(&r->list, rl);
456		d->entries++;
457
458		can_pstats.rcv_entries++;
459		if (can_pstats.rcv_entries_max < can_pstats.rcv_entries)
460			can_pstats.rcv_entries_max = can_pstats.rcv_entries;
461	} else {
462		kmem_cache_free(rcv_cache, r);
463		err = -ENODEV;
464	}
465
466	spin_unlock(&can_rcvlists_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467
468	return err;
469}
470EXPORT_SYMBOL(can_rx_register);
471
472/*
473 * can_rx_delete_receiver - rcu callback for single receiver entry removal
474 */
475static void can_rx_delete_receiver(struct rcu_head *rp)
476{
477	struct receiver *r = container_of(rp, struct receiver, rcu);
 
478
479	kmem_cache_free(rcv_cache, r);
 
 
480}
481
482/**
483 * can_rx_unregister - unsubscribe CAN frames from a specific interface
484 * @dev: pointer to netdevice (NULL => unsubcribe from 'all' CAN devices list)
 
485 * @can_id: CAN identifier
486 * @mask: CAN mask
487 * @func: callback function on filter match
488 * @data: returned parameter for callback function
489 *
490 * Description:
491 *  Removes subscription entry depending on given (subscription) values.
492 */
493void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
494		       void (*func)(struct sk_buff *, void *), void *data)
495{
496	struct receiver *r = NULL;
497	struct hlist_head *rl;
498	struct hlist_node *next;
499	struct dev_rcv_lists *d;
 
500
501	if (dev && dev->type != ARPHRD_CAN)
502		return;
503
504	spin_lock(&can_rcvlists_lock);
 
505
506	d = find_dev_rcv_lists(dev);
507	if (!d) {
508		printk(KERN_ERR "BUG: receive list not found for "
509		       "dev %s, id %03X, mask %03X\n",
510		       DNAME(dev), can_id, mask);
511		goto out;
512	}
513
514	rl = find_rcv_list(&can_id, &mask, d);
 
515
516	/*
517	 * Search the receiver list for the item to delete.  This should
518	 * exist, since no receiver may be unregistered that hasn't
519	 * been registered before.
520	 */
521
522	hlist_for_each_entry_rcu(r, next, rl, list) {
523		if (r->can_id == can_id && r->mask == mask &&
524		    r->func == func && r->data == data)
525			break;
526	}
527
528	/*
529	 * Check for bugs in CAN protocol implementations:
530	 * If no matching list item was found, the list cursor variable next
531	 * will be NULL, while r will point to the last item of the list.
 
532	 */
533
534	if (!next) {
535		printk(KERN_ERR "BUG: receive list entry not found for "
536		       "dev %s, id %03X, mask %03X\n",
537		       DNAME(dev), can_id, mask);
538		r = NULL;
539		goto out;
540	}
541
542	hlist_del_rcu(&r->list);
543	d->entries--;
544
545	if (can_pstats.rcv_entries > 0)
546		can_pstats.rcv_entries--;
547
548	/* remove device structure requested by NETDEV_UNREGISTER */
549	if (d->remove_on_zero_entries && !d->entries) {
550		kfree(d);
551		dev->ml_priv = NULL;
552	}
553
554 out:
555	spin_unlock(&can_rcvlists_lock);
556
557	/* schedule the receiver item for deletion */
558	if (r)
559		call_rcu(&r->rcu, can_rx_delete_receiver);
 
 
 
560}
561EXPORT_SYMBOL(can_rx_unregister);
562
563static inline void deliver(struct sk_buff *skb, struct receiver *r)
564{
565	r->func(skb, r->data);
566	r->matches++;
567}
568
569static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
570{
571	struct receiver *r;
572	struct hlist_node *n;
573	int matches = 0;
574	struct can_frame *cf = (struct can_frame *)skb->data;
575	canid_t can_id = cf->can_id;
576
577	if (d->entries == 0)
578		return 0;
579
580	if (can_id & CAN_ERR_FLAG) {
581		/* check for error frame entries only */
582		hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) {
583			if (can_id & r->mask) {
584				deliver(skb, r);
585				matches++;
586			}
587		}
588		return matches;
589	}
590
591	/* check for unfiltered entries */
592	hlist_for_each_entry_rcu(r, n, &d->rx[RX_ALL], list) {
593		deliver(skb, r);
594		matches++;
595	}
596
597	/* check for can_id/mask entries */
598	hlist_for_each_entry_rcu(r, n, &d->rx[RX_FIL], list) {
599		if ((can_id & r->mask) == r->can_id) {
600			deliver(skb, r);
601			matches++;
602		}
603	}
604
605	/* check for inverted can_id/mask entries */
606	hlist_for_each_entry_rcu(r, n, &d->rx[RX_INV], list) {
607		if ((can_id & r->mask) != r->can_id) {
608			deliver(skb, r);
609			matches++;
610		}
611	}
612
613	/* check filterlists for single non-RTR can_ids */
614	if (can_id & CAN_RTR_FLAG)
615		return matches;
616
617	if (can_id & CAN_EFF_FLAG) {
618		hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
619			if (r->can_id == can_id) {
620				deliver(skb, r);
621				matches++;
622			}
623		}
624	} else {
625		can_id &= CAN_SFF_MASK;
626		hlist_for_each_entry_rcu(r, n, &d->rx_sff[can_id], list) {
627			deliver(skb, r);
628			matches++;
629		}
630	}
631
632	return matches;
633}
634
635static int can_rcv(struct sk_buff *skb, struct net_device *dev,
636		   struct packet_type *pt, struct net_device *orig_dev)
637{
638	struct dev_rcv_lists *d;
639	struct can_frame *cf = (struct can_frame *)skb->data;
 
640	int matches;
641
642	if (!net_eq(dev_net(dev), &init_net))
643		goto drop;
 
644
645	if (WARN_ONCE(dev->type != ARPHRD_CAN ||
646		      skb->len != sizeof(struct can_frame) ||
647		      cf->can_dlc > 8,
648		      "PF_CAN: dropped non conform skbuf: "
649		      "dev type %d, len %d, can_dlc %d\n",
650		      dev->type, skb->len, cf->can_dlc))
651		goto drop;
652
653	/* update statistics */
654	can_stats.rx_frames++;
655	can_stats.rx_frames_delta++;
656
657	rcu_read_lock();
658
659	/* deliver the packet to sockets listening on all devices */
660	matches = can_rcv_filter(&can_rx_alldev_list, skb);
661
662	/* find receive list for this device */
663	d = find_dev_rcv_lists(dev);
664	if (d)
665		matches += can_rcv_filter(d, skb);
666
667	rcu_read_unlock();
668
669	/* consume the skbuff allocated by the netdevice driver */
670	consume_skb(skb);
671
672	if (matches > 0) {
673		can_stats.matches++;
674		can_stats.matches_delta++;
675	}
 
676
 
 
 
 
 
 
 
 
 
 
 
 
677	return NET_RX_SUCCESS;
 
 
 
 
 
 
 
 
678
679drop:
680	kfree_skb(skb);
681	return NET_RX_DROP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
682}
683
684/*
685 * af_can protocol functions
686 */
687
688/**
689 * can_proto_register - register CAN transport protocol
690 * @cp: pointer to CAN protocol structure
691 *
692 * Return:
693 *  0 on success
694 *  -EINVAL invalid (out of range) protocol number
695 *  -EBUSY  protocol already in use
696 *  -ENOBUF if proto_register() fails
697 */
698int can_proto_register(const struct can_proto *cp)
699{
700	int proto = cp->protocol;
701	int err = 0;
702
703	if (proto < 0 || proto >= CAN_NPROTO) {
704		printk(KERN_ERR "can: protocol number %d out of range\n",
705		       proto);
706		return -EINVAL;
707	}
708
709	err = proto_register(cp->prot, 0);
710	if (err < 0)
711		return err;
712
713	mutex_lock(&proto_tab_lock);
714
715	if (proto_tab[proto]) {
716		printk(KERN_ERR "can: protocol %d already registered\n",
717		       proto);
718		err = -EBUSY;
719	} else
720		RCU_INIT_POINTER(proto_tab[proto], cp);
 
721
722	mutex_unlock(&proto_tab_lock);
723
724	if (err < 0)
725		proto_unregister(cp->prot);
726
727	return err;
728}
729EXPORT_SYMBOL(can_proto_register);
730
731/**
732 * can_proto_unregister - unregister CAN transport protocol
733 * @cp: pointer to CAN protocol structure
734 */
735void can_proto_unregister(const struct can_proto *cp)
736{
737	int proto = cp->protocol;
738
739	mutex_lock(&proto_tab_lock);
740	BUG_ON(proto_tab[proto] != cp);
741	RCU_INIT_POINTER(proto_tab[proto], NULL);
742	mutex_unlock(&proto_tab_lock);
743
744	synchronize_rcu();
745
746	proto_unregister(cp->prot);
747}
748EXPORT_SYMBOL(can_proto_unregister);
749
750/*
751 * af_can notifier to create/remove CAN netdevice specific structs
752 */
753static int can_notifier(struct notifier_block *nb, unsigned long msg,
754			void *data)
755{
756	struct net_device *dev = (struct net_device *)data;
757	struct dev_rcv_lists *d;
 
 
 
 
 
 
 
 
 
758
759	if (!net_eq(dev_net(dev), &init_net))
760		return NOTIFY_DONE;
761
762	if (dev->type != ARPHRD_CAN)
763		return NOTIFY_DONE;
764
765	switch (msg) {
766
767	case NETDEV_REGISTER:
768
769		/* create new dev_rcv_lists for this device */
770		d = kzalloc(sizeof(*d), GFP_KERNEL);
771		if (!d) {
772			printk(KERN_ERR
773			       "can: allocation of receive list failed\n");
774			return NOTIFY_DONE;
775		}
776		BUG_ON(dev->ml_priv);
777		dev->ml_priv = d;
 
778
779		break;
780
781	case NETDEV_UNREGISTER:
782		spin_lock(&can_rcvlists_lock);
 
 
 
 
 
783
784		d = dev->ml_priv;
785		if (d) {
786			if (d->entries)
787				d->remove_on_zero_entries = 1;
788			else {
789				kfree(d);
790				dev->ml_priv = NULL;
791			}
792		} else
793			printk(KERN_ERR "can: notifier: receive list not "
794			       "found for dev %s\n", dev->name);
795
796		spin_unlock(&can_rcvlists_lock);
797
798		break;
799	}
800
801	return NOTIFY_DONE;
 
 
802}
803
804/*
805 * af_can module init/exit functions
806 */
807
808static struct packet_type can_packet __read_mostly = {
809	.type = cpu_to_be16(ETH_P_CAN),
810	.dev  = NULL,
811	.func = can_rcv,
812};
813
 
 
 
 
 
 
 
 
 
 
814static const struct net_proto_family can_family_ops = {
815	.family = PF_CAN,
816	.create = can_create,
817	.owner  = THIS_MODULE,
818};
819
820/* notifier block for netdevice event */
821static struct notifier_block can_netdev_notifier __read_mostly = {
822	.notifier_call = can_notifier,
823};
824
825static __init int can_init(void)
826{
827	printk(banner);
828
829	memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
 
 
 
 
 
 
 
 
830
831	rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
832				      0, 0, NULL);
833	if (!rcv_cache)
834		return -ENOMEM;
835
836	if (stats_timer) {
837		/* the statistics are updated every second (timer triggered) */
838		setup_timer(&can_stattimer, can_stat_update, 0);
839		mod_timer(&can_stattimer, round_jiffies(jiffies + HZ));
840	} else
841		can_stattimer.function = NULL;
842
843	can_init_proc();
844
845	/* protocol register */
846	sock_register(&can_family_ops);
847	register_netdevice_notifier(&can_netdev_notifier);
 
 
848	dev_add_pack(&can_packet);
 
 
849
850	return 0;
 
 
 
 
 
 
 
851}
852
853static __exit void can_exit(void)
854{
855	struct net_device *dev;
856
857	if (stats_timer)
858		del_timer_sync(&can_stattimer);
859
860	can_remove_proc();
861
862	/* protocol unregister */
 
 
863	dev_remove_pack(&can_packet);
864	unregister_netdevice_notifier(&can_netdev_notifier);
865	sock_unregister(PF_CAN);
866
867	/* remove created dev_rcv_lists from still registered CAN devices */
868	rcu_read_lock();
869	for_each_netdev_rcu(&init_net, dev) {
870		if (dev->type == ARPHRD_CAN && dev->ml_priv){
871
872			struct dev_rcv_lists *d = dev->ml_priv;
873
874			BUG_ON(d->entries);
875			kfree(d);
876			dev->ml_priv = NULL;
877		}
878	}
879	rcu_read_unlock();
880
881	rcu_barrier(); /* Wait for completion of call_rcu()'s */
882
883	kmem_cache_destroy(rcv_cache);
884}
885
886module_init(can_init);
887module_exit(can_exit);