Loading...
1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2/*
3 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
4 *
5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Volkswagen nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * Alternatively, provided that this notice is retained in full, this
21 * software may be distributed under the terms of the GNU General
22 * Public License ("GPL") version 2, in which case the provisions of the
23 * GPL apply INSTEAD OF those given above.
24 *
25 * The provided data structures and external interfaces from this code
26 * are not restricted to be used by modules with a GPL compatible license.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE.
40 *
41 */
42
43#include <linux/module.h>
44#include <linux/init.h>
45#include <linux/interrupt.h>
46#include <linux/hrtimer.h>
47#include <linux/list.h>
48#include <linux/proc_fs.h>
49#include <linux/seq_file.h>
50#include <linux/uio.h>
51#include <linux/net.h>
52#include <linux/netdevice.h>
53#include <linux/socket.h>
54#include <linux/if_arp.h>
55#include <linux/skbuff.h>
56#include <linux/can.h>
57#include <linux/can/core.h>
58#include <linux/can/skb.h>
59#include <linux/can/bcm.h>
60#include <linux/slab.h>
61#include <net/sock.h>
62#include <net/net_namespace.h>
63
64/*
65 * To send multiple CAN frame content within TX_SETUP or to filter
66 * CAN messages with multiplex index within RX_SETUP, the number of
67 * different filters is limited to 256 due to the one byte index value.
68 */
69#define MAX_NFRAMES 256
70
71/* limit timers to 400 days for sending/timeouts */
72#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
73
74/* use of last_frames[index].flags */
75#define RX_LOCAL 0x10 /* frame was created on the local host */
76#define RX_OWN 0x20 /* frame was sent via the socket it was received on */
77#define RX_RECV 0x40 /* received data for this element */
78#define RX_THR 0x80 /* element not been sent due to throttle feature */
79#define BCM_CAN_FLAGS_MASK 0x0F /* to clean private flags after usage */
80
81/* get best masking value for can_rx_register() for a given single can_id */
82#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
83 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
84 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
85
86MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
87MODULE_LICENSE("Dual BSD/GPL");
88MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
89MODULE_ALIAS("can-proto-2");
90
91#define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
92
93/*
94 * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
95 * 64 bit aligned so the offset has to be multiples of 8 which is ensured
96 * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler().
97 */
98static inline u64 get_u64(const struct canfd_frame *cp, int offset)
99{
100 return *(u64 *)(cp->data + offset);
101}
102
103struct bcm_op {
104 struct list_head list;
105 struct rcu_head rcu;
106 int ifindex;
107 canid_t can_id;
108 u32 flags;
109 unsigned long frames_abs, frames_filtered;
110 struct bcm_timeval ival1, ival2;
111 struct hrtimer timer, thrtimer;
112 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
113 int rx_ifindex;
114 int cfsiz;
115 u32 count;
116 u32 nframes;
117 u32 currframe;
118 /* void pointers to arrays of struct can[fd]_frame */
119 void *frames;
120 void *last_frames;
121 struct canfd_frame sframe;
122 struct canfd_frame last_sframe;
123 struct sock *sk;
124 struct net_device *rx_reg_dev;
125};
126
127struct bcm_sock {
128 struct sock sk;
129 int bound;
130 int ifindex;
131 struct list_head notifier;
132 struct list_head rx_ops;
133 struct list_head tx_ops;
134 unsigned long dropped_usr_msgs;
135 struct proc_dir_entry *bcm_proc_read;
136 char procname [32]; /* inode number in decimal with \0 */
137};
138
139static LIST_HEAD(bcm_notifier_list);
140static DEFINE_SPINLOCK(bcm_notifier_lock);
141static struct bcm_sock *bcm_busy_notifier;
142
143/* Return pointer to store the extra msg flags for bcm_recvmsg().
144 * We use the space of one unsigned int beyond the 'struct sockaddr_can'
145 * in skb->cb.
146 */
147static inline unsigned int *bcm_flags(struct sk_buff *skb)
148{
149 /* return pointer after struct sockaddr_can */
150 return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
151}
152
153static inline struct bcm_sock *bcm_sk(const struct sock *sk)
154{
155 return (struct bcm_sock *)sk;
156}
157
158static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
159{
160 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
161}
162
163/* check limitations for timeval provided by user */
164static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
165{
166 if ((msg_head->ival1.tv_sec < 0) ||
167 (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
168 (msg_head->ival1.tv_usec < 0) ||
169 (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
170 (msg_head->ival2.tv_sec < 0) ||
171 (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
172 (msg_head->ival2.tv_usec < 0) ||
173 (msg_head->ival2.tv_usec >= USEC_PER_SEC))
174 return true;
175
176 return false;
177}
178
179#define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
180#define OPSIZ sizeof(struct bcm_op)
181#define MHSIZ sizeof(struct bcm_msg_head)
182
183/*
184 * procfs functions
185 */
186#if IS_ENABLED(CONFIG_PROC_FS)
187static char *bcm_proc_getifname(struct net *net, char *result, int ifindex)
188{
189 struct net_device *dev;
190
191 if (!ifindex)
192 return "any";
193
194 rcu_read_lock();
195 dev = dev_get_by_index_rcu(net, ifindex);
196 if (dev)
197 strcpy(result, dev->name);
198 else
199 strcpy(result, "???");
200 rcu_read_unlock();
201
202 return result;
203}
204
205static int bcm_proc_show(struct seq_file *m, void *v)
206{
207 char ifname[IFNAMSIZ];
208 struct net *net = m->private;
209 struct sock *sk = (struct sock *)pde_data(m->file->f_inode);
210 struct bcm_sock *bo = bcm_sk(sk);
211 struct bcm_op *op;
212
213 seq_printf(m, ">>> socket %pK", sk->sk_socket);
214 seq_printf(m, " / sk %pK", sk);
215 seq_printf(m, " / bo %pK", bo);
216 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
217 seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
218 seq_printf(m, " <<<\n");
219
220 list_for_each_entry(op, &bo->rx_ops, list) {
221
222 unsigned long reduction;
223
224 /* print only active entries & prevent division by zero */
225 if (!op->frames_abs)
226 continue;
227
228 seq_printf(m, "rx_op: %03X %-5s ", op->can_id,
229 bcm_proc_getifname(net, ifname, op->ifindex));
230
231 if (op->flags & CAN_FD_FRAME)
232 seq_printf(m, "(%u)", op->nframes);
233 else
234 seq_printf(m, "[%u]", op->nframes);
235
236 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
237
238 if (op->kt_ival1)
239 seq_printf(m, "timeo=%lld ",
240 (long long)ktime_to_us(op->kt_ival1));
241
242 if (op->kt_ival2)
243 seq_printf(m, "thr=%lld ",
244 (long long)ktime_to_us(op->kt_ival2));
245
246 seq_printf(m, "# recv %ld (%ld) => reduction: ",
247 op->frames_filtered, op->frames_abs);
248
249 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
250
251 seq_printf(m, "%s%ld%%\n",
252 (reduction == 100) ? "near " : "", reduction);
253 }
254
255 list_for_each_entry(op, &bo->tx_ops, list) {
256
257 seq_printf(m, "tx_op: %03X %s ", op->can_id,
258 bcm_proc_getifname(net, ifname, op->ifindex));
259
260 if (op->flags & CAN_FD_FRAME)
261 seq_printf(m, "(%u) ", op->nframes);
262 else
263 seq_printf(m, "[%u] ", op->nframes);
264
265 if (op->kt_ival1)
266 seq_printf(m, "t1=%lld ",
267 (long long)ktime_to_us(op->kt_ival1));
268
269 if (op->kt_ival2)
270 seq_printf(m, "t2=%lld ",
271 (long long)ktime_to_us(op->kt_ival2));
272
273 seq_printf(m, "# sent %ld\n", op->frames_abs);
274 }
275 seq_putc(m, '\n');
276 return 0;
277}
278#endif /* CONFIG_PROC_FS */
279
280/*
281 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
282 * of the given bcm tx op
283 */
284static void bcm_can_tx(struct bcm_op *op)
285{
286 struct sk_buff *skb;
287 struct net_device *dev;
288 struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
289 int err;
290
291 /* no target device? => exit */
292 if (!op->ifindex)
293 return;
294
295 dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
296 if (!dev) {
297 /* RFC: should this bcm_op remove itself here? */
298 return;
299 }
300
301 skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any());
302 if (!skb)
303 goto out;
304
305 can_skb_reserve(skb);
306 can_skb_prv(skb)->ifindex = dev->ifindex;
307 can_skb_prv(skb)->skbcnt = 0;
308
309 skb_put_data(skb, cf, op->cfsiz);
310
311 /* send with loopback */
312 skb->dev = dev;
313 can_skb_set_owner(skb, op->sk);
314 err = can_send(skb, 1);
315 if (!err)
316 op->frames_abs++;
317
318 op->currframe++;
319
320 /* reached last frame? */
321 if (op->currframe >= op->nframes)
322 op->currframe = 0;
323out:
324 dev_put(dev);
325}
326
327/*
328 * bcm_send_to_user - send a BCM message to the userspace
329 * (consisting of bcm_msg_head + x CAN frames)
330 */
331static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
332 struct canfd_frame *frames, int has_timestamp)
333{
334 struct sk_buff *skb;
335 struct canfd_frame *firstframe;
336 struct sockaddr_can *addr;
337 struct sock *sk = op->sk;
338 unsigned int datalen = head->nframes * op->cfsiz;
339 int err;
340 unsigned int *pflags;
341
342 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
343 if (!skb)
344 return;
345
346 skb_put_data(skb, head, sizeof(*head));
347
348 /* ensure space for sockaddr_can and msg flags */
349 sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
350 sizeof(unsigned int));
351
352 /* initialize msg flags */
353 pflags = bcm_flags(skb);
354 *pflags = 0;
355
356 if (head->nframes) {
357 /* CAN frames starting here */
358 firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
359
360 skb_put_data(skb, frames, datalen);
361
362 /*
363 * the BCM uses the flags-element of the canfd_frame
364 * structure for internal purposes. This is only
365 * relevant for updates that are generated by the
366 * BCM, where nframes is 1
367 */
368 if (head->nframes == 1) {
369 if (firstframe->flags & RX_LOCAL)
370 *pflags |= MSG_DONTROUTE;
371 if (firstframe->flags & RX_OWN)
372 *pflags |= MSG_CONFIRM;
373
374 firstframe->flags &= BCM_CAN_FLAGS_MASK;
375 }
376 }
377
378 if (has_timestamp) {
379 /* restore rx timestamp */
380 skb->tstamp = op->rx_stamp;
381 }
382
383 /*
384 * Put the datagram to the queue so that bcm_recvmsg() can
385 * get it from there. We need to pass the interface index to
386 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
387 * containing the interface index.
388 */
389
390 addr = (struct sockaddr_can *)skb->cb;
391 memset(addr, 0, sizeof(*addr));
392 addr->can_family = AF_CAN;
393 addr->can_ifindex = op->rx_ifindex;
394
395 err = sock_queue_rcv_skb(sk, skb);
396 if (err < 0) {
397 struct bcm_sock *bo = bcm_sk(sk);
398
399 kfree_skb(skb);
400 /* don't care about overflows in this statistic */
401 bo->dropped_usr_msgs++;
402 }
403}
404
405static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt)
406{
407 ktime_t ival;
408
409 if (op->kt_ival1 && op->count)
410 ival = op->kt_ival1;
411 else if (op->kt_ival2)
412 ival = op->kt_ival2;
413 else
414 return false;
415
416 hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival));
417 return true;
418}
419
420static void bcm_tx_start_timer(struct bcm_op *op)
421{
422 if (bcm_tx_set_expiry(op, &op->timer))
423 hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT);
424}
425
426/* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
427static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
428{
429 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
430 struct bcm_msg_head msg_head;
431
432 if (op->kt_ival1 && (op->count > 0)) {
433 op->count--;
434 if (!op->count && (op->flags & TX_COUNTEVT)) {
435
436 /* create notification to user */
437 memset(&msg_head, 0, sizeof(msg_head));
438 msg_head.opcode = TX_EXPIRED;
439 msg_head.flags = op->flags;
440 msg_head.count = op->count;
441 msg_head.ival1 = op->ival1;
442 msg_head.ival2 = op->ival2;
443 msg_head.can_id = op->can_id;
444 msg_head.nframes = 0;
445
446 bcm_send_to_user(op, &msg_head, NULL, 0);
447 }
448 bcm_can_tx(op);
449
450 } else if (op->kt_ival2) {
451 bcm_can_tx(op);
452 }
453
454 return bcm_tx_set_expiry(op, &op->timer) ?
455 HRTIMER_RESTART : HRTIMER_NORESTART;
456}
457
458/*
459 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
460 */
461static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
462{
463 struct bcm_msg_head head;
464
465 /* update statistics */
466 op->frames_filtered++;
467
468 /* prevent statistics overflow */
469 if (op->frames_filtered > ULONG_MAX/100)
470 op->frames_filtered = op->frames_abs = 0;
471
472 /* this element is not throttled anymore */
473 data->flags &= ~RX_THR;
474
475 memset(&head, 0, sizeof(head));
476 head.opcode = RX_CHANGED;
477 head.flags = op->flags;
478 head.count = op->count;
479 head.ival1 = op->ival1;
480 head.ival2 = op->ival2;
481 head.can_id = op->can_id;
482 head.nframes = 1;
483
484 bcm_send_to_user(op, &head, data, 1);
485}
486
487/*
488 * bcm_rx_update_and_send - process a detected relevant receive content change
489 * 1. update the last received data
490 * 2. send a notification to the user (if possible)
491 */
492static void bcm_rx_update_and_send(struct bcm_op *op,
493 struct canfd_frame *lastdata,
494 const struct canfd_frame *rxdata,
495 unsigned char traffic_flags)
496{
497 memcpy(lastdata, rxdata, op->cfsiz);
498
499 /* mark as used and throttled by default */
500 lastdata->flags |= (RX_RECV|RX_THR);
501
502 /* add own/local/remote traffic flags */
503 lastdata->flags |= traffic_flags;
504
505 /* throttling mode inactive ? */
506 if (!op->kt_ival2) {
507 /* send RX_CHANGED to the user immediately */
508 bcm_rx_changed(op, lastdata);
509 return;
510 }
511
512 /* with active throttling timer we are just done here */
513 if (hrtimer_active(&op->thrtimer))
514 return;
515
516 /* first reception with enabled throttling mode */
517 if (!op->kt_lastmsg)
518 goto rx_changed_settime;
519
520 /* got a second frame inside a potential throttle period? */
521 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
522 ktime_to_us(op->kt_ival2)) {
523 /* do not send the saved data - only start throttle timer */
524 hrtimer_start(&op->thrtimer,
525 ktime_add(op->kt_lastmsg, op->kt_ival2),
526 HRTIMER_MODE_ABS_SOFT);
527 return;
528 }
529
530 /* the gap was that big, that throttling was not needed here */
531rx_changed_settime:
532 bcm_rx_changed(op, lastdata);
533 op->kt_lastmsg = ktime_get();
534}
535
536/*
537 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
538 * received data stored in op->last_frames[]
539 */
540static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
541 const struct canfd_frame *rxdata,
542 unsigned char traffic_flags)
543{
544 struct canfd_frame *cf = op->frames + op->cfsiz * index;
545 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
546 int i;
547
548 /*
549 * no one uses the MSBs of flags for comparison,
550 * so we use it here to detect the first time of reception
551 */
552
553 if (!(lcf->flags & RX_RECV)) {
554 /* received data for the first time => send update to user */
555 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
556 return;
557 }
558
559 /* do a real check in CAN frame data section */
560 for (i = 0; i < rxdata->len; i += 8) {
561 if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
562 (get_u64(cf, i) & get_u64(lcf, i))) {
563 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
564 return;
565 }
566 }
567
568 if (op->flags & RX_CHECK_DLC) {
569 /* do a real check in CAN frame length */
570 if (rxdata->len != lcf->len) {
571 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
572 return;
573 }
574 }
575}
576
577/*
578 * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
579 */
580static void bcm_rx_starttimer(struct bcm_op *op)
581{
582 if (op->flags & RX_NO_AUTOTIMER)
583 return;
584
585 if (op->kt_ival1)
586 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT);
587}
588
589/* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */
590static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
591{
592 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
593 struct bcm_msg_head msg_head;
594
595 /* if user wants to be informed, when cyclic CAN-Messages come back */
596 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
597 /* clear received CAN frames to indicate 'nothing received' */
598 memset(op->last_frames, 0, op->nframes * op->cfsiz);
599 }
600
601 /* create notification to user */
602 memset(&msg_head, 0, sizeof(msg_head));
603 msg_head.opcode = RX_TIMEOUT;
604 msg_head.flags = op->flags;
605 msg_head.count = op->count;
606 msg_head.ival1 = op->ival1;
607 msg_head.ival2 = op->ival2;
608 msg_head.can_id = op->can_id;
609 msg_head.nframes = 0;
610
611 bcm_send_to_user(op, &msg_head, NULL, 0);
612
613 return HRTIMER_NORESTART;
614}
615
616/*
617 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
618 */
619static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index)
620{
621 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
622
623 if ((op->last_frames) && (lcf->flags & RX_THR)) {
624 bcm_rx_changed(op, lcf);
625 return 1;
626 }
627 return 0;
628}
629
630/*
631 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
632 */
633static int bcm_rx_thr_flush(struct bcm_op *op)
634{
635 int updated = 0;
636
637 if (op->nframes > 1) {
638 unsigned int i;
639
640 /* for MUX filter we start at index 1 */
641 for (i = 1; i < op->nframes; i++)
642 updated += bcm_rx_do_flush(op, i);
643
644 } else {
645 /* for RX_FILTER_ID and simple filter */
646 updated += bcm_rx_do_flush(op, 0);
647 }
648
649 return updated;
650}
651
652/*
653 * bcm_rx_thr_handler - the time for blocked content updates is over now:
654 * Check for throttled data and send it to the userspace
655 */
656static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
657{
658 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
659
660 if (bcm_rx_thr_flush(op)) {
661 hrtimer_forward_now(hrtimer, op->kt_ival2);
662 return HRTIMER_RESTART;
663 } else {
664 /* rearm throttle handling */
665 op->kt_lastmsg = 0;
666 return HRTIMER_NORESTART;
667 }
668}
669
670/*
671 * bcm_rx_handler - handle a CAN frame reception
672 */
673static void bcm_rx_handler(struct sk_buff *skb, void *data)
674{
675 struct bcm_op *op = (struct bcm_op *)data;
676 const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
677 unsigned int i;
678 unsigned char traffic_flags;
679
680 if (op->can_id != rxframe->can_id)
681 return;
682
683 /* make sure to handle the correct frame type (CAN / CAN FD) */
684 if (op->flags & CAN_FD_FRAME) {
685 if (!can_is_canfd_skb(skb))
686 return;
687 } else {
688 if (!can_is_can_skb(skb))
689 return;
690 }
691
692 /* disable timeout */
693 hrtimer_cancel(&op->timer);
694
695 /* save rx timestamp */
696 op->rx_stamp = skb->tstamp;
697 /* save originator for recvfrom() */
698 op->rx_ifindex = skb->dev->ifindex;
699 /* update statistics */
700 op->frames_abs++;
701
702 if (op->flags & RX_RTR_FRAME) {
703 /* send reply for RTR-request (placed in op->frames[0]) */
704 bcm_can_tx(op);
705 return;
706 }
707
708 /* compute flags to distinguish between own/local/remote CAN traffic */
709 traffic_flags = 0;
710 if (skb->sk) {
711 traffic_flags |= RX_LOCAL;
712 if (skb->sk == op->sk)
713 traffic_flags |= RX_OWN;
714 }
715
716 if (op->flags & RX_FILTER_ID) {
717 /* the easiest case */
718 bcm_rx_update_and_send(op, op->last_frames, rxframe,
719 traffic_flags);
720 goto rx_starttimer;
721 }
722
723 if (op->nframes == 1) {
724 /* simple compare with index 0 */
725 bcm_rx_cmp_to_index(op, 0, rxframe, traffic_flags);
726 goto rx_starttimer;
727 }
728
729 if (op->nframes > 1) {
730 /*
731 * multiplex compare
732 *
733 * find the first multiplex mask that fits.
734 * Remark: The MUX-mask is stored in index 0 - but only the
735 * first 64 bits of the frame data[] are relevant (CAN FD)
736 */
737
738 for (i = 1; i < op->nframes; i++) {
739 if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
740 (get_u64(op->frames, 0) &
741 get_u64(op->frames + op->cfsiz * i, 0))) {
742 bcm_rx_cmp_to_index(op, i, rxframe,
743 traffic_flags);
744 break;
745 }
746 }
747 }
748
749rx_starttimer:
750 bcm_rx_starttimer(op);
751}
752
753/*
754 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
755 */
756static struct bcm_op *bcm_find_op(struct list_head *ops,
757 struct bcm_msg_head *mh, int ifindex)
758{
759 struct bcm_op *op;
760
761 list_for_each_entry(op, ops, list) {
762 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
763 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME))
764 return op;
765 }
766
767 return NULL;
768}
769
770static void bcm_free_op_rcu(struct rcu_head *rcu_head)
771{
772 struct bcm_op *op = container_of(rcu_head, struct bcm_op, rcu);
773
774 if ((op->frames) && (op->frames != &op->sframe))
775 kfree(op->frames);
776
777 if ((op->last_frames) && (op->last_frames != &op->last_sframe))
778 kfree(op->last_frames);
779
780 kfree(op);
781}
782
783static void bcm_remove_op(struct bcm_op *op)
784{
785 hrtimer_cancel(&op->timer);
786 hrtimer_cancel(&op->thrtimer);
787
788 call_rcu(&op->rcu, bcm_free_op_rcu);
789}
790
791static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
792{
793 if (op->rx_reg_dev == dev) {
794 can_rx_unregister(dev_net(dev), dev, op->can_id,
795 REGMASK(op->can_id), bcm_rx_handler, op);
796
797 /* mark as removed subscription */
798 op->rx_reg_dev = NULL;
799 } else
800 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
801 "mismatch %p %p\n", op->rx_reg_dev, dev);
802}
803
804/*
805 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
806 */
807static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
808 int ifindex)
809{
810 struct bcm_op *op, *n;
811
812 list_for_each_entry_safe(op, n, ops, list) {
813 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
814 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
815
816 /* disable automatic timer on frame reception */
817 op->flags |= RX_NO_AUTOTIMER;
818
819 /*
820 * Don't care if we're bound or not (due to netdev
821 * problems) can_rx_unregister() is always a save
822 * thing to do here.
823 */
824 if (op->ifindex) {
825 /*
826 * Only remove subscriptions that had not
827 * been removed due to NETDEV_UNREGISTER
828 * in bcm_notifier()
829 */
830 if (op->rx_reg_dev) {
831 struct net_device *dev;
832
833 dev = dev_get_by_index(sock_net(op->sk),
834 op->ifindex);
835 if (dev) {
836 bcm_rx_unreg(dev, op);
837 dev_put(dev);
838 }
839 }
840 } else
841 can_rx_unregister(sock_net(op->sk), NULL,
842 op->can_id,
843 REGMASK(op->can_id),
844 bcm_rx_handler, op);
845
846 list_del(&op->list);
847 bcm_remove_op(op);
848 return 1; /* done */
849 }
850 }
851
852 return 0; /* not found */
853}
854
855/*
856 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
857 */
858static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
859 int ifindex)
860{
861 struct bcm_op *op, *n;
862
863 list_for_each_entry_safe(op, n, ops, list) {
864 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
865 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
866 list_del(&op->list);
867 bcm_remove_op(op);
868 return 1; /* done */
869 }
870 }
871
872 return 0; /* not found */
873}
874
875/*
876 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
877 */
878static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
879 int ifindex)
880{
881 struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex);
882
883 if (!op)
884 return -EINVAL;
885
886 /* put current values into msg_head */
887 msg_head->flags = op->flags;
888 msg_head->count = op->count;
889 msg_head->ival1 = op->ival1;
890 msg_head->ival2 = op->ival2;
891 msg_head->nframes = op->nframes;
892
893 bcm_send_to_user(op, msg_head, op->frames, 0);
894
895 return MHSIZ;
896}
897
898/*
899 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
900 */
901static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
902 int ifindex, struct sock *sk)
903{
904 struct bcm_sock *bo = bcm_sk(sk);
905 struct bcm_op *op;
906 struct canfd_frame *cf;
907 unsigned int i;
908 int err;
909
910 /* we need a real device to send frames */
911 if (!ifindex)
912 return -ENODEV;
913
914 /* check nframes boundaries - we need at least one CAN frame */
915 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
916 return -EINVAL;
917
918 /* check timeval limitations */
919 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
920 return -EINVAL;
921
922 /* check the given can_id */
923 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
924 if (op) {
925 /* update existing BCM operation */
926
927 /*
928 * Do we need more space for the CAN frames than currently
929 * allocated? -> This is a _really_ unusual use-case and
930 * therefore (complexity / locking) it is not supported.
931 */
932 if (msg_head->nframes > op->nframes)
933 return -E2BIG;
934
935 /* update CAN frames content */
936 for (i = 0; i < msg_head->nframes; i++) {
937
938 cf = op->frames + op->cfsiz * i;
939 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
940
941 if (op->flags & CAN_FD_FRAME) {
942 if (cf->len > 64)
943 err = -EINVAL;
944 } else {
945 if (cf->len > 8)
946 err = -EINVAL;
947 }
948
949 if (err < 0)
950 return err;
951
952 if (msg_head->flags & TX_CP_CAN_ID) {
953 /* copy can_id into frame */
954 cf->can_id = msg_head->can_id;
955 }
956 }
957 op->flags = msg_head->flags;
958
959 } else {
960 /* insert new BCM operation for the given can_id */
961
962 op = kzalloc(OPSIZ, GFP_KERNEL);
963 if (!op)
964 return -ENOMEM;
965
966 op->can_id = msg_head->can_id;
967 op->cfsiz = CFSIZ(msg_head->flags);
968 op->flags = msg_head->flags;
969
970 /* create array for CAN frames and copy the data */
971 if (msg_head->nframes > 1) {
972 op->frames = kmalloc_array(msg_head->nframes,
973 op->cfsiz,
974 GFP_KERNEL);
975 if (!op->frames) {
976 kfree(op);
977 return -ENOMEM;
978 }
979 } else
980 op->frames = &op->sframe;
981
982 for (i = 0; i < msg_head->nframes; i++) {
983
984 cf = op->frames + op->cfsiz * i;
985 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
986 if (err < 0)
987 goto free_op;
988
989 if (op->flags & CAN_FD_FRAME) {
990 if (cf->len > 64)
991 err = -EINVAL;
992 } else {
993 if (cf->len > 8)
994 err = -EINVAL;
995 }
996
997 if (err < 0)
998 goto free_op;
999
1000 if (msg_head->flags & TX_CP_CAN_ID) {
1001 /* copy can_id into frame */
1002 cf->can_id = msg_head->can_id;
1003 }
1004 }
1005
1006 /* tx_ops never compare with previous received messages */
1007 op->last_frames = NULL;
1008
1009 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1010 op->sk = sk;
1011 op->ifindex = ifindex;
1012
1013 /* initialize uninitialized (kzalloc) structure */
1014 hrtimer_init(&op->timer, CLOCK_MONOTONIC,
1015 HRTIMER_MODE_REL_SOFT);
1016 op->timer.function = bcm_tx_timeout_handler;
1017
1018 /* currently unused in tx_ops */
1019 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
1020 HRTIMER_MODE_REL_SOFT);
1021
1022 /* add this bcm_op to the list of the tx_ops */
1023 list_add(&op->list, &bo->tx_ops);
1024
1025 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
1026
1027 if (op->nframes != msg_head->nframes) {
1028 op->nframes = msg_head->nframes;
1029 /* start multiple frame transmission with index 0 */
1030 op->currframe = 0;
1031 }
1032
1033 /* check flags */
1034
1035 if (op->flags & TX_RESET_MULTI_IDX) {
1036 /* start multiple frame transmission with index 0 */
1037 op->currframe = 0;
1038 }
1039
1040 if (op->flags & SETTIMER) {
1041 /* set timer values */
1042 op->count = msg_head->count;
1043 op->ival1 = msg_head->ival1;
1044 op->ival2 = msg_head->ival2;
1045 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1046 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1047
1048 /* disable an active timer due to zero values? */
1049 if (!op->kt_ival1 && !op->kt_ival2)
1050 hrtimer_cancel(&op->timer);
1051 }
1052
1053 if (op->flags & STARTTIMER) {
1054 hrtimer_cancel(&op->timer);
1055 /* spec: send CAN frame when starting timer */
1056 op->flags |= TX_ANNOUNCE;
1057 }
1058
1059 if (op->flags & TX_ANNOUNCE) {
1060 bcm_can_tx(op);
1061 if (op->count)
1062 op->count--;
1063 }
1064
1065 if (op->flags & STARTTIMER)
1066 bcm_tx_start_timer(op);
1067
1068 return msg_head->nframes * op->cfsiz + MHSIZ;
1069
1070free_op:
1071 if (op->frames != &op->sframe)
1072 kfree(op->frames);
1073 kfree(op);
1074 return err;
1075}
1076
1077/*
1078 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
1079 */
1080static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1081 int ifindex, struct sock *sk)
1082{
1083 struct bcm_sock *bo = bcm_sk(sk);
1084 struct bcm_op *op;
1085 int do_rx_register;
1086 int err = 0;
1087
1088 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
1089 /* be robust against wrong usage ... */
1090 msg_head->flags |= RX_FILTER_ID;
1091 /* ignore trailing garbage */
1092 msg_head->nframes = 0;
1093 }
1094
1095 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1096 if (msg_head->nframes > MAX_NFRAMES + 1)
1097 return -EINVAL;
1098
1099 if ((msg_head->flags & RX_RTR_FRAME) &&
1100 ((msg_head->nframes != 1) ||
1101 (!(msg_head->can_id & CAN_RTR_FLAG))))
1102 return -EINVAL;
1103
1104 /* check timeval limitations */
1105 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
1106 return -EINVAL;
1107
1108 /* check the given can_id */
1109 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
1110 if (op) {
1111 /* update existing BCM operation */
1112
1113 /*
1114 * Do we need more space for the CAN frames than currently
1115 * allocated? -> This is a _really_ unusual use-case and
1116 * therefore (complexity / locking) it is not supported.
1117 */
1118 if (msg_head->nframes > op->nframes)
1119 return -E2BIG;
1120
1121 if (msg_head->nframes) {
1122 /* update CAN frames content */
1123 err = memcpy_from_msg(op->frames, msg,
1124 msg_head->nframes * op->cfsiz);
1125 if (err < 0)
1126 return err;
1127
1128 /* clear last_frames to indicate 'nothing received' */
1129 memset(op->last_frames, 0, msg_head->nframes * op->cfsiz);
1130 }
1131
1132 op->nframes = msg_head->nframes;
1133 op->flags = msg_head->flags;
1134
1135 /* Only an update -> do not call can_rx_register() */
1136 do_rx_register = 0;
1137
1138 } else {
1139 /* insert new BCM operation for the given can_id */
1140 op = kzalloc(OPSIZ, GFP_KERNEL);
1141 if (!op)
1142 return -ENOMEM;
1143
1144 op->can_id = msg_head->can_id;
1145 op->nframes = msg_head->nframes;
1146 op->cfsiz = CFSIZ(msg_head->flags);
1147 op->flags = msg_head->flags;
1148
1149 if (msg_head->nframes > 1) {
1150 /* create array for CAN frames and copy the data */
1151 op->frames = kmalloc_array(msg_head->nframes,
1152 op->cfsiz,
1153 GFP_KERNEL);
1154 if (!op->frames) {
1155 kfree(op);
1156 return -ENOMEM;
1157 }
1158
1159 /* create and init array for received CAN frames */
1160 op->last_frames = kcalloc(msg_head->nframes,
1161 op->cfsiz,
1162 GFP_KERNEL);
1163 if (!op->last_frames) {
1164 kfree(op->frames);
1165 kfree(op);
1166 return -ENOMEM;
1167 }
1168
1169 } else {
1170 op->frames = &op->sframe;
1171 op->last_frames = &op->last_sframe;
1172 }
1173
1174 if (msg_head->nframes) {
1175 err = memcpy_from_msg(op->frames, msg,
1176 msg_head->nframes * op->cfsiz);
1177 if (err < 0) {
1178 if (op->frames != &op->sframe)
1179 kfree(op->frames);
1180 if (op->last_frames != &op->last_sframe)
1181 kfree(op->last_frames);
1182 kfree(op);
1183 return err;
1184 }
1185 }
1186
1187 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1188 op->sk = sk;
1189 op->ifindex = ifindex;
1190
1191 /* ifindex for timeout events w/o previous frame reception */
1192 op->rx_ifindex = ifindex;
1193
1194 /* initialize uninitialized (kzalloc) structure */
1195 hrtimer_init(&op->timer, CLOCK_MONOTONIC,
1196 HRTIMER_MODE_REL_SOFT);
1197 op->timer.function = bcm_rx_timeout_handler;
1198
1199 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
1200 HRTIMER_MODE_REL_SOFT);
1201 op->thrtimer.function = bcm_rx_thr_handler;
1202
1203 /* add this bcm_op to the list of the rx_ops */
1204 list_add(&op->list, &bo->rx_ops);
1205
1206 /* call can_rx_register() */
1207 do_rx_register = 1;
1208
1209 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1210
1211 /* check flags */
1212
1213 if (op->flags & RX_RTR_FRAME) {
1214 struct canfd_frame *frame0 = op->frames;
1215
1216 /* no timers in RTR-mode */
1217 hrtimer_cancel(&op->thrtimer);
1218 hrtimer_cancel(&op->timer);
1219
1220 /*
1221 * funny feature in RX(!)_SETUP only for RTR-mode:
1222 * copy can_id into frame BUT without RTR-flag to
1223 * prevent a full-load-loopback-test ... ;-]
1224 */
1225 if ((op->flags & TX_CP_CAN_ID) ||
1226 (frame0->can_id == op->can_id))
1227 frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
1228
1229 } else {
1230 if (op->flags & SETTIMER) {
1231
1232 /* set timer value */
1233 op->ival1 = msg_head->ival1;
1234 op->ival2 = msg_head->ival2;
1235 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1236 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1237
1238 /* disable an active timer due to zero value? */
1239 if (!op->kt_ival1)
1240 hrtimer_cancel(&op->timer);
1241
1242 /*
1243 * In any case cancel the throttle timer, flush
1244 * potentially blocked msgs and reset throttle handling
1245 */
1246 op->kt_lastmsg = 0;
1247 hrtimer_cancel(&op->thrtimer);
1248 bcm_rx_thr_flush(op);
1249 }
1250
1251 if ((op->flags & STARTTIMER) && op->kt_ival1)
1252 hrtimer_start(&op->timer, op->kt_ival1,
1253 HRTIMER_MODE_REL_SOFT);
1254 }
1255
1256 /* now we can register for can_ids, if we added a new bcm_op */
1257 if (do_rx_register) {
1258 if (ifindex) {
1259 struct net_device *dev;
1260
1261 dev = dev_get_by_index(sock_net(sk), ifindex);
1262 if (dev) {
1263 err = can_rx_register(sock_net(sk), dev,
1264 op->can_id,
1265 REGMASK(op->can_id),
1266 bcm_rx_handler, op,
1267 "bcm", sk);
1268
1269 op->rx_reg_dev = dev;
1270 dev_put(dev);
1271 }
1272
1273 } else
1274 err = can_rx_register(sock_net(sk), NULL, op->can_id,
1275 REGMASK(op->can_id),
1276 bcm_rx_handler, op, "bcm", sk);
1277 if (err) {
1278 /* this bcm rx op is broken -> remove it */
1279 list_del(&op->list);
1280 bcm_remove_op(op);
1281 return err;
1282 }
1283 }
1284
1285 return msg_head->nframes * op->cfsiz + MHSIZ;
1286}
1287
1288/*
1289 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1290 */
1291static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
1292 int cfsiz)
1293{
1294 struct sk_buff *skb;
1295 struct net_device *dev;
1296 int err;
1297
1298 /* we need a real device to send frames */
1299 if (!ifindex)
1300 return -ENODEV;
1301
1302 skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL);
1303 if (!skb)
1304 return -ENOMEM;
1305
1306 can_skb_reserve(skb);
1307
1308 err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
1309 if (err < 0) {
1310 kfree_skb(skb);
1311 return err;
1312 }
1313
1314 dev = dev_get_by_index(sock_net(sk), ifindex);
1315 if (!dev) {
1316 kfree_skb(skb);
1317 return -ENODEV;
1318 }
1319
1320 can_skb_prv(skb)->ifindex = dev->ifindex;
1321 can_skb_prv(skb)->skbcnt = 0;
1322 skb->dev = dev;
1323 can_skb_set_owner(skb, sk);
1324 err = can_send(skb, 1); /* send with loopback */
1325 dev_put(dev);
1326
1327 if (err)
1328 return err;
1329
1330 return cfsiz + MHSIZ;
1331}
1332
1333/*
1334 * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1335 */
1336static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1337{
1338 struct sock *sk = sock->sk;
1339 struct bcm_sock *bo = bcm_sk(sk);
1340 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1341 struct bcm_msg_head msg_head;
1342 int cfsiz;
1343 int ret; /* read bytes or error codes as return value */
1344
1345 if (!bo->bound)
1346 return -ENOTCONN;
1347
1348 /* check for valid message length from userspace */
1349 if (size < MHSIZ)
1350 return -EINVAL;
1351
1352 /* read message head information */
1353 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
1354 if (ret < 0)
1355 return ret;
1356
1357 cfsiz = CFSIZ(msg_head.flags);
1358 if ((size - MHSIZ) % cfsiz)
1359 return -EINVAL;
1360
1361 /* check for alternative ifindex for this bcm_op */
1362
1363 if (!ifindex && msg->msg_name) {
1364 /* no bound device as default => check msg_name */
1365 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
1366
1367 if (msg->msg_namelen < BCM_MIN_NAMELEN)
1368 return -EINVAL;
1369
1370 if (addr->can_family != AF_CAN)
1371 return -EINVAL;
1372
1373 /* ifindex from sendto() */
1374 ifindex = addr->can_ifindex;
1375
1376 if (ifindex) {
1377 struct net_device *dev;
1378
1379 dev = dev_get_by_index(sock_net(sk), ifindex);
1380 if (!dev)
1381 return -ENODEV;
1382
1383 if (dev->type != ARPHRD_CAN) {
1384 dev_put(dev);
1385 return -ENODEV;
1386 }
1387
1388 dev_put(dev);
1389 }
1390 }
1391
1392 lock_sock(sk);
1393
1394 switch (msg_head.opcode) {
1395
1396 case TX_SETUP:
1397 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1398 break;
1399
1400 case RX_SETUP:
1401 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1402 break;
1403
1404 case TX_DELETE:
1405 if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex))
1406 ret = MHSIZ;
1407 else
1408 ret = -EINVAL;
1409 break;
1410
1411 case RX_DELETE:
1412 if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex))
1413 ret = MHSIZ;
1414 else
1415 ret = -EINVAL;
1416 break;
1417
1418 case TX_READ:
1419 /* reuse msg_head for the reply to TX_READ */
1420 msg_head.opcode = TX_STATUS;
1421 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1422 break;
1423
1424 case RX_READ:
1425 /* reuse msg_head for the reply to RX_READ */
1426 msg_head.opcode = RX_STATUS;
1427 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1428 break;
1429
1430 case TX_SEND:
1431 /* we need exactly one CAN frame behind the msg head */
1432 if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ))
1433 ret = -EINVAL;
1434 else
1435 ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
1436 break;
1437
1438 default:
1439 ret = -EINVAL;
1440 break;
1441 }
1442
1443 release_sock(sk);
1444
1445 return ret;
1446}
1447
1448/*
1449 * notification handler for netdevice status changes
1450 */
1451static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
1452 struct net_device *dev)
1453{
1454 struct sock *sk = &bo->sk;
1455 struct bcm_op *op;
1456 int notify_enodev = 0;
1457
1458 if (!net_eq(dev_net(dev), sock_net(sk)))
1459 return;
1460
1461 switch (msg) {
1462
1463 case NETDEV_UNREGISTER:
1464 lock_sock(sk);
1465
1466 /* remove device specific receive entries */
1467 list_for_each_entry(op, &bo->rx_ops, list)
1468 if (op->rx_reg_dev == dev)
1469 bcm_rx_unreg(dev, op);
1470
1471 /* remove device reference, if this is our bound device */
1472 if (bo->bound && bo->ifindex == dev->ifindex) {
1473#if IS_ENABLED(CONFIG_PROC_FS)
1474 if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) {
1475 remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
1476 bo->bcm_proc_read = NULL;
1477 }
1478#endif
1479 bo->bound = 0;
1480 bo->ifindex = 0;
1481 notify_enodev = 1;
1482 }
1483
1484 release_sock(sk);
1485
1486 if (notify_enodev) {
1487 sk->sk_err = ENODEV;
1488 if (!sock_flag(sk, SOCK_DEAD))
1489 sk_error_report(sk);
1490 }
1491 break;
1492
1493 case NETDEV_DOWN:
1494 if (bo->bound && bo->ifindex == dev->ifindex) {
1495 sk->sk_err = ENETDOWN;
1496 if (!sock_flag(sk, SOCK_DEAD))
1497 sk_error_report(sk);
1498 }
1499 }
1500}
1501
1502static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1503 void *ptr)
1504{
1505 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1506
1507 if (dev->type != ARPHRD_CAN)
1508 return NOTIFY_DONE;
1509 if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
1510 return NOTIFY_DONE;
1511 if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
1512 return NOTIFY_DONE;
1513
1514 spin_lock(&bcm_notifier_lock);
1515 list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
1516 spin_unlock(&bcm_notifier_lock);
1517 bcm_notify(bcm_busy_notifier, msg, dev);
1518 spin_lock(&bcm_notifier_lock);
1519 }
1520 bcm_busy_notifier = NULL;
1521 spin_unlock(&bcm_notifier_lock);
1522 return NOTIFY_DONE;
1523}
1524
1525/*
1526 * initial settings for all BCM sockets to be set at socket creation time
1527 */
1528static int bcm_init(struct sock *sk)
1529{
1530 struct bcm_sock *bo = bcm_sk(sk);
1531
1532 bo->bound = 0;
1533 bo->ifindex = 0;
1534 bo->dropped_usr_msgs = 0;
1535 bo->bcm_proc_read = NULL;
1536
1537 INIT_LIST_HEAD(&bo->tx_ops);
1538 INIT_LIST_HEAD(&bo->rx_ops);
1539
1540 /* set notifier */
1541 spin_lock(&bcm_notifier_lock);
1542 list_add_tail(&bo->notifier, &bcm_notifier_list);
1543 spin_unlock(&bcm_notifier_lock);
1544
1545 return 0;
1546}
1547
1548/*
1549 * standard socket functions
1550 */
1551static int bcm_release(struct socket *sock)
1552{
1553 struct sock *sk = sock->sk;
1554 struct net *net;
1555 struct bcm_sock *bo;
1556 struct bcm_op *op, *next;
1557
1558 if (!sk)
1559 return 0;
1560
1561 net = sock_net(sk);
1562 bo = bcm_sk(sk);
1563
1564 /* remove bcm_ops, timer, rx_unregister(), etc. */
1565
1566 spin_lock(&bcm_notifier_lock);
1567 while (bcm_busy_notifier == bo) {
1568 spin_unlock(&bcm_notifier_lock);
1569 schedule_timeout_uninterruptible(1);
1570 spin_lock(&bcm_notifier_lock);
1571 }
1572 list_del(&bo->notifier);
1573 spin_unlock(&bcm_notifier_lock);
1574
1575 lock_sock(sk);
1576
1577#if IS_ENABLED(CONFIG_PROC_FS)
1578 /* remove procfs entry */
1579 if (net->can.bcmproc_dir && bo->bcm_proc_read)
1580 remove_proc_entry(bo->procname, net->can.bcmproc_dir);
1581#endif /* CONFIG_PROC_FS */
1582
1583 list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1584 bcm_remove_op(op);
1585
1586 list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1587 /*
1588 * Don't care if we're bound or not (due to netdev problems)
1589 * can_rx_unregister() is always a save thing to do here.
1590 */
1591 if (op->ifindex) {
1592 /*
1593 * Only remove subscriptions that had not
1594 * been removed due to NETDEV_UNREGISTER
1595 * in bcm_notifier()
1596 */
1597 if (op->rx_reg_dev) {
1598 struct net_device *dev;
1599
1600 dev = dev_get_by_index(net, op->ifindex);
1601 if (dev) {
1602 bcm_rx_unreg(dev, op);
1603 dev_put(dev);
1604 }
1605 }
1606 } else
1607 can_rx_unregister(net, NULL, op->can_id,
1608 REGMASK(op->can_id),
1609 bcm_rx_handler, op);
1610
1611 }
1612
1613 synchronize_rcu();
1614
1615 list_for_each_entry_safe(op, next, &bo->rx_ops, list)
1616 bcm_remove_op(op);
1617
1618 /* remove device reference */
1619 if (bo->bound) {
1620 bo->bound = 0;
1621 bo->ifindex = 0;
1622 }
1623
1624 sock_orphan(sk);
1625 sock->sk = NULL;
1626
1627 release_sock(sk);
1628 sock_put(sk);
1629
1630 return 0;
1631}
1632
1633static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1634 int flags)
1635{
1636 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1637 struct sock *sk = sock->sk;
1638 struct bcm_sock *bo = bcm_sk(sk);
1639 struct net *net = sock_net(sk);
1640 int ret = 0;
1641
1642 if (len < BCM_MIN_NAMELEN)
1643 return -EINVAL;
1644
1645 lock_sock(sk);
1646
1647 if (bo->bound) {
1648 ret = -EISCONN;
1649 goto fail;
1650 }
1651
1652 /* bind a device to this socket */
1653 if (addr->can_ifindex) {
1654 struct net_device *dev;
1655
1656 dev = dev_get_by_index(net, addr->can_ifindex);
1657 if (!dev) {
1658 ret = -ENODEV;
1659 goto fail;
1660 }
1661 if (dev->type != ARPHRD_CAN) {
1662 dev_put(dev);
1663 ret = -ENODEV;
1664 goto fail;
1665 }
1666
1667 bo->ifindex = dev->ifindex;
1668 dev_put(dev);
1669
1670 } else {
1671 /* no interface reference for ifindex = 0 ('any' CAN device) */
1672 bo->ifindex = 0;
1673 }
1674
1675#if IS_ENABLED(CONFIG_PROC_FS)
1676 if (net->can.bcmproc_dir) {
1677 /* unique socket address as filename */
1678 sprintf(bo->procname, "%lu", sock_i_ino(sk));
1679 bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644,
1680 net->can.bcmproc_dir,
1681 bcm_proc_show, sk);
1682 if (!bo->bcm_proc_read) {
1683 ret = -ENOMEM;
1684 goto fail;
1685 }
1686 }
1687#endif /* CONFIG_PROC_FS */
1688
1689 bo->bound = 1;
1690
1691fail:
1692 release_sock(sk);
1693
1694 return ret;
1695}
1696
1697static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1698 int flags)
1699{
1700 struct sock *sk = sock->sk;
1701 struct sk_buff *skb;
1702 int error = 0;
1703 int err;
1704
1705 skb = skb_recv_datagram(sk, flags, &error);
1706 if (!skb)
1707 return error;
1708
1709 if (skb->len < size)
1710 size = skb->len;
1711
1712 err = memcpy_to_msg(msg, skb->data, size);
1713 if (err < 0) {
1714 skb_free_datagram(sk, skb);
1715 return err;
1716 }
1717
1718 sock_recv_cmsgs(msg, sk, skb);
1719
1720 if (msg->msg_name) {
1721 __sockaddr_check_size(BCM_MIN_NAMELEN);
1722 msg->msg_namelen = BCM_MIN_NAMELEN;
1723 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1724 }
1725
1726 /* assign the flags that have been recorded in bcm_send_to_user() */
1727 msg->msg_flags |= *(bcm_flags(skb));
1728
1729 skb_free_datagram(sk, skb);
1730
1731 return size;
1732}
1733
1734static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
1735 unsigned long arg)
1736{
1737 /* no ioctls for socket layer -> hand it down to NIC layer */
1738 return -ENOIOCTLCMD;
1739}
1740
1741static const struct proto_ops bcm_ops = {
1742 .family = PF_CAN,
1743 .release = bcm_release,
1744 .bind = sock_no_bind,
1745 .connect = bcm_connect,
1746 .socketpair = sock_no_socketpair,
1747 .accept = sock_no_accept,
1748 .getname = sock_no_getname,
1749 .poll = datagram_poll,
1750 .ioctl = bcm_sock_no_ioctlcmd,
1751 .gettstamp = sock_gettstamp,
1752 .listen = sock_no_listen,
1753 .shutdown = sock_no_shutdown,
1754 .sendmsg = bcm_sendmsg,
1755 .recvmsg = bcm_recvmsg,
1756 .mmap = sock_no_mmap,
1757};
1758
1759static struct proto bcm_proto __read_mostly = {
1760 .name = "CAN_BCM",
1761 .owner = THIS_MODULE,
1762 .obj_size = sizeof(struct bcm_sock),
1763 .init = bcm_init,
1764};
1765
1766static const struct can_proto bcm_can_proto = {
1767 .type = SOCK_DGRAM,
1768 .protocol = CAN_BCM,
1769 .ops = &bcm_ops,
1770 .prot = &bcm_proto,
1771};
1772
1773static int canbcm_pernet_init(struct net *net)
1774{
1775#if IS_ENABLED(CONFIG_PROC_FS)
1776 /* create /proc/net/can-bcm directory */
1777 net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net);
1778#endif /* CONFIG_PROC_FS */
1779
1780 return 0;
1781}
1782
1783static void canbcm_pernet_exit(struct net *net)
1784{
1785#if IS_ENABLED(CONFIG_PROC_FS)
1786 /* remove /proc/net/can-bcm directory */
1787 if (net->can.bcmproc_dir)
1788 remove_proc_entry("can-bcm", net->proc_net);
1789#endif /* CONFIG_PROC_FS */
1790}
1791
1792static struct pernet_operations canbcm_pernet_ops __read_mostly = {
1793 .init = canbcm_pernet_init,
1794 .exit = canbcm_pernet_exit,
1795};
1796
1797static struct notifier_block canbcm_notifier = {
1798 .notifier_call = bcm_notifier
1799};
1800
1801static int __init bcm_module_init(void)
1802{
1803 int err;
1804
1805 pr_info("can: broadcast manager protocol\n");
1806
1807 err = register_pernet_subsys(&canbcm_pernet_ops);
1808 if (err)
1809 return err;
1810
1811 err = register_netdevice_notifier(&canbcm_notifier);
1812 if (err)
1813 goto register_notifier_failed;
1814
1815 err = can_proto_register(&bcm_can_proto);
1816 if (err < 0) {
1817 printk(KERN_ERR "can: registration of bcm protocol failed\n");
1818 goto register_proto_failed;
1819 }
1820
1821 return 0;
1822
1823register_proto_failed:
1824 unregister_netdevice_notifier(&canbcm_notifier);
1825register_notifier_failed:
1826 unregister_pernet_subsys(&canbcm_pernet_ops);
1827 return err;
1828}
1829
1830static void __exit bcm_module_exit(void)
1831{
1832 can_proto_unregister(&bcm_can_proto);
1833 unregister_netdevice_notifier(&canbcm_notifier);
1834 unregister_pernet_subsys(&canbcm_pernet_ops);
1835}
1836
1837module_init(bcm_module_init);
1838module_exit(bcm_module_exit);
1/*
2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
3 *
4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 */
41
42#include <linux/module.h>
43#include <linux/init.h>
44#include <linux/interrupt.h>
45#include <linux/hrtimer.h>
46#include <linux/list.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
49#include <linux/uio.h>
50#include <linux/net.h>
51#include <linux/netdevice.h>
52#include <linux/socket.h>
53#include <linux/if_arp.h>
54#include <linux/skbuff.h>
55#include <linux/can.h>
56#include <linux/can/core.h>
57#include <linux/can/skb.h>
58#include <linux/can/bcm.h>
59#include <linux/slab.h>
60#include <net/sock.h>
61#include <net/net_namespace.h>
62
63/*
64 * To send multiple CAN frame content within TX_SETUP or to filter
65 * CAN messages with multiplex index within RX_SETUP, the number of
66 * different filters is limited to 256 due to the one byte index value.
67 */
68#define MAX_NFRAMES 256
69
70/* use of last_frames[index].can_dlc */
71#define RX_RECV 0x40 /* received data for this element */
72#define RX_THR 0x80 /* element not been sent due to throttle feature */
73#define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
74
75/* get best masking value for can_rx_register() for a given single can_id */
76#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
79
80#define CAN_BCM_VERSION CAN_VERSION
81static __initconst const char banner[] = KERN_INFO
82 "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
83
84MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
85MODULE_LICENSE("Dual BSD/GPL");
86MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
87MODULE_ALIAS("can-proto-2");
88
89/* easy access to can_frame payload */
90static inline u64 GET_U64(const struct can_frame *cp)
91{
92 return *(u64 *)cp->data;
93}
94
95struct bcm_op {
96 struct list_head list;
97 int ifindex;
98 canid_t can_id;
99 u32 flags;
100 unsigned long frames_abs, frames_filtered;
101 struct timeval ival1, ival2;
102 struct hrtimer timer, thrtimer;
103 struct tasklet_struct tsklet, thrtsklet;
104 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
105 int rx_ifindex;
106 u32 count;
107 u32 nframes;
108 u32 currframe;
109 struct can_frame *frames;
110 struct can_frame *last_frames;
111 struct can_frame sframe;
112 struct can_frame last_sframe;
113 struct sock *sk;
114 struct net_device *rx_reg_dev;
115};
116
117static struct proc_dir_entry *proc_dir;
118
119struct bcm_sock {
120 struct sock sk;
121 int bound;
122 int ifindex;
123 struct notifier_block notifier;
124 struct list_head rx_ops;
125 struct list_head tx_ops;
126 unsigned long dropped_usr_msgs;
127 struct proc_dir_entry *bcm_proc_read;
128 char procname [32]; /* inode number in decimal with \0 */
129};
130
131static inline struct bcm_sock *bcm_sk(const struct sock *sk)
132{
133 return (struct bcm_sock *)sk;
134}
135
136#define CFSIZ sizeof(struct can_frame)
137#define OPSIZ sizeof(struct bcm_op)
138#define MHSIZ sizeof(struct bcm_msg_head)
139
140/*
141 * procfs functions
142 */
143static char *bcm_proc_getifname(char *result, int ifindex)
144{
145 struct net_device *dev;
146
147 if (!ifindex)
148 return "any";
149
150 rcu_read_lock();
151 dev = dev_get_by_index_rcu(&init_net, ifindex);
152 if (dev)
153 strcpy(result, dev->name);
154 else
155 strcpy(result, "???");
156 rcu_read_unlock();
157
158 return result;
159}
160
161static int bcm_proc_show(struct seq_file *m, void *v)
162{
163 char ifname[IFNAMSIZ];
164 struct sock *sk = (struct sock *)m->private;
165 struct bcm_sock *bo = bcm_sk(sk);
166 struct bcm_op *op;
167
168 seq_printf(m, ">>> socket %pK", sk->sk_socket);
169 seq_printf(m, " / sk %pK", sk);
170 seq_printf(m, " / bo %pK", bo);
171 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
172 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
173 seq_printf(m, " <<<\n");
174
175 list_for_each_entry(op, &bo->rx_ops, list) {
176
177 unsigned long reduction;
178
179 /* print only active entries & prevent division by zero */
180 if (!op->frames_abs)
181 continue;
182
183 seq_printf(m, "rx_op: %03X %-5s ",
184 op->can_id, bcm_proc_getifname(ifname, op->ifindex));
185 seq_printf(m, "[%u]%c ", op->nframes,
186 (op->flags & RX_CHECK_DLC)?'d':' ');
187 if (op->kt_ival1.tv64)
188 seq_printf(m, "timeo=%lld ",
189 (long long)
190 ktime_to_us(op->kt_ival1));
191
192 if (op->kt_ival2.tv64)
193 seq_printf(m, "thr=%lld ",
194 (long long)
195 ktime_to_us(op->kt_ival2));
196
197 seq_printf(m, "# recv %ld (%ld) => reduction: ",
198 op->frames_filtered, op->frames_abs);
199
200 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
201
202 seq_printf(m, "%s%ld%%\n",
203 (reduction == 100)?"near ":"", reduction);
204 }
205
206 list_for_each_entry(op, &bo->tx_ops, list) {
207
208 seq_printf(m, "tx_op: %03X %s [%u] ",
209 op->can_id,
210 bcm_proc_getifname(ifname, op->ifindex),
211 op->nframes);
212
213 if (op->kt_ival1.tv64)
214 seq_printf(m, "t1=%lld ",
215 (long long) ktime_to_us(op->kt_ival1));
216
217 if (op->kt_ival2.tv64)
218 seq_printf(m, "t2=%lld ",
219 (long long) ktime_to_us(op->kt_ival2));
220
221 seq_printf(m, "# sent %ld\n", op->frames_abs);
222 }
223 seq_putc(m, '\n');
224 return 0;
225}
226
227static int bcm_proc_open(struct inode *inode, struct file *file)
228{
229 return single_open(file, bcm_proc_show, PDE_DATA(inode));
230}
231
232static const struct file_operations bcm_proc_fops = {
233 .owner = THIS_MODULE,
234 .open = bcm_proc_open,
235 .read = seq_read,
236 .llseek = seq_lseek,
237 .release = single_release,
238};
239
240/*
241 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
242 * of the given bcm tx op
243 */
244static void bcm_can_tx(struct bcm_op *op)
245{
246 struct sk_buff *skb;
247 struct net_device *dev;
248 struct can_frame *cf = &op->frames[op->currframe];
249
250 /* no target device? => exit */
251 if (!op->ifindex)
252 return;
253
254 dev = dev_get_by_index(&init_net, op->ifindex);
255 if (!dev) {
256 /* RFC: should this bcm_op remove itself here? */
257 return;
258 }
259
260 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), gfp_any());
261 if (!skb)
262 goto out;
263
264 can_skb_reserve(skb);
265 can_skb_prv(skb)->ifindex = dev->ifindex;
266
267 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
268
269 /* send with loopback */
270 skb->dev = dev;
271 can_skb_set_owner(skb, op->sk);
272 can_send(skb, 1);
273
274 /* update statistics */
275 op->currframe++;
276 op->frames_abs++;
277
278 /* reached last frame? */
279 if (op->currframe >= op->nframes)
280 op->currframe = 0;
281 out:
282 dev_put(dev);
283}
284
285/*
286 * bcm_send_to_user - send a BCM message to the userspace
287 * (consisting of bcm_msg_head + x CAN frames)
288 */
289static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
290 struct can_frame *frames, int has_timestamp)
291{
292 struct sk_buff *skb;
293 struct can_frame *firstframe;
294 struct sockaddr_can *addr;
295 struct sock *sk = op->sk;
296 unsigned int datalen = head->nframes * CFSIZ;
297 int err;
298
299 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
300 if (!skb)
301 return;
302
303 memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
304
305 if (head->nframes) {
306 /* can_frames starting here */
307 firstframe = (struct can_frame *)skb_tail_pointer(skb);
308
309 memcpy(skb_put(skb, datalen), frames, datalen);
310
311 /*
312 * the BCM uses the can_dlc-element of the can_frame
313 * structure for internal purposes. This is only
314 * relevant for updates that are generated by the
315 * BCM, where nframes is 1
316 */
317 if (head->nframes == 1)
318 firstframe->can_dlc &= BCM_CAN_DLC_MASK;
319 }
320
321 if (has_timestamp) {
322 /* restore rx timestamp */
323 skb->tstamp = op->rx_stamp;
324 }
325
326 /*
327 * Put the datagram to the queue so that bcm_recvmsg() can
328 * get it from there. We need to pass the interface index to
329 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
330 * containing the interface index.
331 */
332
333 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
334 addr = (struct sockaddr_can *)skb->cb;
335 memset(addr, 0, sizeof(*addr));
336 addr->can_family = AF_CAN;
337 addr->can_ifindex = op->rx_ifindex;
338
339 err = sock_queue_rcv_skb(sk, skb);
340 if (err < 0) {
341 struct bcm_sock *bo = bcm_sk(sk);
342
343 kfree_skb(skb);
344 /* don't care about overflows in this statistic */
345 bo->dropped_usr_msgs++;
346 }
347}
348
349static void bcm_tx_start_timer(struct bcm_op *op)
350{
351 if (op->kt_ival1.tv64 && op->count)
352 hrtimer_start(&op->timer,
353 ktime_add(ktime_get(), op->kt_ival1),
354 HRTIMER_MODE_ABS);
355 else if (op->kt_ival2.tv64)
356 hrtimer_start(&op->timer,
357 ktime_add(ktime_get(), op->kt_ival2),
358 HRTIMER_MODE_ABS);
359}
360
361static void bcm_tx_timeout_tsklet(unsigned long data)
362{
363 struct bcm_op *op = (struct bcm_op *)data;
364 struct bcm_msg_head msg_head;
365
366 if (op->kt_ival1.tv64 && (op->count > 0)) {
367
368 op->count--;
369 if (!op->count && (op->flags & TX_COUNTEVT)) {
370
371 /* create notification to user */
372 msg_head.opcode = TX_EXPIRED;
373 msg_head.flags = op->flags;
374 msg_head.count = op->count;
375 msg_head.ival1 = op->ival1;
376 msg_head.ival2 = op->ival2;
377 msg_head.can_id = op->can_id;
378 msg_head.nframes = 0;
379
380 bcm_send_to_user(op, &msg_head, NULL, 0);
381 }
382 bcm_can_tx(op);
383
384 } else if (op->kt_ival2.tv64)
385 bcm_can_tx(op);
386
387 bcm_tx_start_timer(op);
388}
389
390/*
391 * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions
392 */
393static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
394{
395 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
396
397 tasklet_schedule(&op->tsklet);
398
399 return HRTIMER_NORESTART;
400}
401
402/*
403 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
404 */
405static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
406{
407 struct bcm_msg_head head;
408
409 /* update statistics */
410 op->frames_filtered++;
411
412 /* prevent statistics overflow */
413 if (op->frames_filtered > ULONG_MAX/100)
414 op->frames_filtered = op->frames_abs = 0;
415
416 /* this element is not throttled anymore */
417 data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
418
419 head.opcode = RX_CHANGED;
420 head.flags = op->flags;
421 head.count = op->count;
422 head.ival1 = op->ival1;
423 head.ival2 = op->ival2;
424 head.can_id = op->can_id;
425 head.nframes = 1;
426
427 bcm_send_to_user(op, &head, data, 1);
428}
429
430/*
431 * bcm_rx_update_and_send - process a detected relevant receive content change
432 * 1. update the last received data
433 * 2. send a notification to the user (if possible)
434 */
435static void bcm_rx_update_and_send(struct bcm_op *op,
436 struct can_frame *lastdata,
437 const struct can_frame *rxdata)
438{
439 memcpy(lastdata, rxdata, CFSIZ);
440
441 /* mark as used and throttled by default */
442 lastdata->can_dlc |= (RX_RECV|RX_THR);
443
444 /* throtteling mode inactive ? */
445 if (!op->kt_ival2.tv64) {
446 /* send RX_CHANGED to the user immediately */
447 bcm_rx_changed(op, lastdata);
448 return;
449 }
450
451 /* with active throttling timer we are just done here */
452 if (hrtimer_active(&op->thrtimer))
453 return;
454
455 /* first receiption with enabled throttling mode */
456 if (!op->kt_lastmsg.tv64)
457 goto rx_changed_settime;
458
459 /* got a second frame inside a potential throttle period? */
460 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
461 ktime_to_us(op->kt_ival2)) {
462 /* do not send the saved data - only start throttle timer */
463 hrtimer_start(&op->thrtimer,
464 ktime_add(op->kt_lastmsg, op->kt_ival2),
465 HRTIMER_MODE_ABS);
466 return;
467 }
468
469 /* the gap was that big, that throttling was not needed here */
470rx_changed_settime:
471 bcm_rx_changed(op, lastdata);
472 op->kt_lastmsg = ktime_get();
473}
474
475/*
476 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
477 * received data stored in op->last_frames[]
478 */
479static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
480 const struct can_frame *rxdata)
481{
482 /*
483 * no one uses the MSBs of can_dlc for comparation,
484 * so we use it here to detect the first time of reception
485 */
486
487 if (!(op->last_frames[index].can_dlc & RX_RECV)) {
488 /* received data for the first time => send update to user */
489 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
490 return;
491 }
492
493 /* do a real check in can_frame data section */
494
495 if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) !=
496 (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) {
497 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
498 return;
499 }
500
501 if (op->flags & RX_CHECK_DLC) {
502 /* do a real check in can_frame dlc */
503 if (rxdata->can_dlc != (op->last_frames[index].can_dlc &
504 BCM_CAN_DLC_MASK)) {
505 bcm_rx_update_and_send(op, &op->last_frames[index],
506 rxdata);
507 return;
508 }
509 }
510}
511
512/*
513 * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption
514 */
515static void bcm_rx_starttimer(struct bcm_op *op)
516{
517 if (op->flags & RX_NO_AUTOTIMER)
518 return;
519
520 if (op->kt_ival1.tv64)
521 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
522}
523
524static void bcm_rx_timeout_tsklet(unsigned long data)
525{
526 struct bcm_op *op = (struct bcm_op *)data;
527 struct bcm_msg_head msg_head;
528
529 /* create notification to user */
530 msg_head.opcode = RX_TIMEOUT;
531 msg_head.flags = op->flags;
532 msg_head.count = op->count;
533 msg_head.ival1 = op->ival1;
534 msg_head.ival2 = op->ival2;
535 msg_head.can_id = op->can_id;
536 msg_head.nframes = 0;
537
538 bcm_send_to_user(op, &msg_head, NULL, 0);
539}
540
541/*
542 * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
543 */
544static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
545{
546 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
547
548 /* schedule before NET_RX_SOFTIRQ */
549 tasklet_hi_schedule(&op->tsklet);
550
551 /* no restart of the timer is done here! */
552
553 /* if user wants to be informed, when cyclic CAN-Messages come back */
554 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
555 /* clear received can_frames to indicate 'nothing received' */
556 memset(op->last_frames, 0, op->nframes * CFSIZ);
557 }
558
559 return HRTIMER_NORESTART;
560}
561
562/*
563 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
564 */
565static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
566 unsigned int index)
567{
568 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
569 if (update)
570 bcm_rx_changed(op, &op->last_frames[index]);
571 return 1;
572 }
573 return 0;
574}
575
576/*
577 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
578 *
579 * update == 0 : just check if throttled data is available (any irq context)
580 * update == 1 : check and send throttled data to userspace (soft_irq context)
581 */
582static int bcm_rx_thr_flush(struct bcm_op *op, int update)
583{
584 int updated = 0;
585
586 if (op->nframes > 1) {
587 unsigned int i;
588
589 /* for MUX filter we start at index 1 */
590 for (i = 1; i < op->nframes; i++)
591 updated += bcm_rx_do_flush(op, update, i);
592
593 } else {
594 /* for RX_FILTER_ID and simple filter */
595 updated += bcm_rx_do_flush(op, update, 0);
596 }
597
598 return updated;
599}
600
601static void bcm_rx_thr_tsklet(unsigned long data)
602{
603 struct bcm_op *op = (struct bcm_op *)data;
604
605 /* push the changed data to the userspace */
606 bcm_rx_thr_flush(op, 1);
607}
608
609/*
610 * bcm_rx_thr_handler - the time for blocked content updates is over now:
611 * Check for throttled data and send it to the userspace
612 */
613static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
614{
615 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
616
617 tasklet_schedule(&op->thrtsklet);
618
619 if (bcm_rx_thr_flush(op, 0)) {
620 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
621 return HRTIMER_RESTART;
622 } else {
623 /* rearm throttle handling */
624 op->kt_lastmsg = ktime_set(0, 0);
625 return HRTIMER_NORESTART;
626 }
627}
628
629/*
630 * bcm_rx_handler - handle a CAN frame receiption
631 */
632static void bcm_rx_handler(struct sk_buff *skb, void *data)
633{
634 struct bcm_op *op = (struct bcm_op *)data;
635 const struct can_frame *rxframe = (struct can_frame *)skb->data;
636 unsigned int i;
637
638 /* disable timeout */
639 hrtimer_cancel(&op->timer);
640
641 if (op->can_id != rxframe->can_id)
642 return;
643
644 /* save rx timestamp */
645 op->rx_stamp = skb->tstamp;
646 /* save originator for recvfrom() */
647 op->rx_ifindex = skb->dev->ifindex;
648 /* update statistics */
649 op->frames_abs++;
650
651 if (op->flags & RX_RTR_FRAME) {
652 /* send reply for RTR-request (placed in op->frames[0]) */
653 bcm_can_tx(op);
654 return;
655 }
656
657 if (op->flags & RX_FILTER_ID) {
658 /* the easiest case */
659 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
660 goto rx_starttimer;
661 }
662
663 if (op->nframes == 1) {
664 /* simple compare with index 0 */
665 bcm_rx_cmp_to_index(op, 0, rxframe);
666 goto rx_starttimer;
667 }
668
669 if (op->nframes > 1) {
670 /*
671 * multiplex compare
672 *
673 * find the first multiplex mask that fits.
674 * Remark: The MUX-mask is stored in index 0
675 */
676
677 for (i = 1; i < op->nframes; i++) {
678 if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
679 (GET_U64(&op->frames[0]) &
680 GET_U64(&op->frames[i]))) {
681 bcm_rx_cmp_to_index(op, i, rxframe);
682 break;
683 }
684 }
685 }
686
687rx_starttimer:
688 bcm_rx_starttimer(op);
689}
690
691/*
692 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
693 */
694static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
695 int ifindex)
696{
697 struct bcm_op *op;
698
699 list_for_each_entry(op, ops, list) {
700 if ((op->can_id == can_id) && (op->ifindex == ifindex))
701 return op;
702 }
703
704 return NULL;
705}
706
707static void bcm_remove_op(struct bcm_op *op)
708{
709 hrtimer_cancel(&op->timer);
710 hrtimer_cancel(&op->thrtimer);
711
712 if (op->tsklet.func)
713 tasklet_kill(&op->tsklet);
714
715 if (op->thrtsklet.func)
716 tasklet_kill(&op->thrtsklet);
717
718 if ((op->frames) && (op->frames != &op->sframe))
719 kfree(op->frames);
720
721 if ((op->last_frames) && (op->last_frames != &op->last_sframe))
722 kfree(op->last_frames);
723
724 kfree(op);
725}
726
727static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
728{
729 if (op->rx_reg_dev == dev) {
730 can_rx_unregister(dev, op->can_id, REGMASK(op->can_id),
731 bcm_rx_handler, op);
732
733 /* mark as removed subscription */
734 op->rx_reg_dev = NULL;
735 } else
736 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
737 "mismatch %p %p\n", op->rx_reg_dev, dev);
738}
739
740/*
741 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
742 */
743static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex)
744{
745 struct bcm_op *op, *n;
746
747 list_for_each_entry_safe(op, n, ops, list) {
748 if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
749
750 /*
751 * Don't care if we're bound or not (due to netdev
752 * problems) can_rx_unregister() is always a save
753 * thing to do here.
754 */
755 if (op->ifindex) {
756 /*
757 * Only remove subscriptions that had not
758 * been removed due to NETDEV_UNREGISTER
759 * in bcm_notifier()
760 */
761 if (op->rx_reg_dev) {
762 struct net_device *dev;
763
764 dev = dev_get_by_index(&init_net,
765 op->ifindex);
766 if (dev) {
767 bcm_rx_unreg(dev, op);
768 dev_put(dev);
769 }
770 }
771 } else
772 can_rx_unregister(NULL, op->can_id,
773 REGMASK(op->can_id),
774 bcm_rx_handler, op);
775
776 list_del(&op->list);
777 bcm_remove_op(op);
778 return 1; /* done */
779 }
780 }
781
782 return 0; /* not found */
783}
784
785/*
786 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
787 */
788static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex)
789{
790 struct bcm_op *op, *n;
791
792 list_for_each_entry_safe(op, n, ops, list) {
793 if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
794 list_del(&op->list);
795 bcm_remove_op(op);
796 return 1; /* done */
797 }
798 }
799
800 return 0; /* not found */
801}
802
803/*
804 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
805 */
806static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
807 int ifindex)
808{
809 struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex);
810
811 if (!op)
812 return -EINVAL;
813
814 /* put current values into msg_head */
815 msg_head->flags = op->flags;
816 msg_head->count = op->count;
817 msg_head->ival1 = op->ival1;
818 msg_head->ival2 = op->ival2;
819 msg_head->nframes = op->nframes;
820
821 bcm_send_to_user(op, msg_head, op->frames, 0);
822
823 return MHSIZ;
824}
825
826/*
827 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
828 */
829static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
830 int ifindex, struct sock *sk)
831{
832 struct bcm_sock *bo = bcm_sk(sk);
833 struct bcm_op *op;
834 unsigned int i;
835 int err;
836
837 /* we need a real device to send frames */
838 if (!ifindex)
839 return -ENODEV;
840
841 /* check nframes boundaries - we need at least one can_frame */
842 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
843 return -EINVAL;
844
845 /* check the given can_id */
846 op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex);
847
848 if (op) {
849 /* update existing BCM operation */
850
851 /*
852 * Do we need more space for the can_frames than currently
853 * allocated? -> This is a _really_ unusual use-case and
854 * therefore (complexity / locking) it is not supported.
855 */
856 if (msg_head->nframes > op->nframes)
857 return -E2BIG;
858
859 /* update can_frames content */
860 for (i = 0; i < msg_head->nframes; i++) {
861 err = memcpy_fromiovec((u8 *)&op->frames[i],
862 msg->msg_iov, CFSIZ);
863
864 if (op->frames[i].can_dlc > 8)
865 err = -EINVAL;
866
867 if (err < 0)
868 return err;
869
870 if (msg_head->flags & TX_CP_CAN_ID) {
871 /* copy can_id into frame */
872 op->frames[i].can_id = msg_head->can_id;
873 }
874 }
875
876 } else {
877 /* insert new BCM operation for the given can_id */
878
879 op = kzalloc(OPSIZ, GFP_KERNEL);
880 if (!op)
881 return -ENOMEM;
882
883 op->can_id = msg_head->can_id;
884
885 /* create array for can_frames and copy the data */
886 if (msg_head->nframes > 1) {
887 op->frames = kmalloc(msg_head->nframes * CFSIZ,
888 GFP_KERNEL);
889 if (!op->frames) {
890 kfree(op);
891 return -ENOMEM;
892 }
893 } else
894 op->frames = &op->sframe;
895
896 for (i = 0; i < msg_head->nframes; i++) {
897 err = memcpy_fromiovec((u8 *)&op->frames[i],
898 msg->msg_iov, CFSIZ);
899
900 if (op->frames[i].can_dlc > 8)
901 err = -EINVAL;
902
903 if (err < 0) {
904 if (op->frames != &op->sframe)
905 kfree(op->frames);
906 kfree(op);
907 return err;
908 }
909
910 if (msg_head->flags & TX_CP_CAN_ID) {
911 /* copy can_id into frame */
912 op->frames[i].can_id = msg_head->can_id;
913 }
914 }
915
916 /* tx_ops never compare with previous received messages */
917 op->last_frames = NULL;
918
919 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
920 op->sk = sk;
921 op->ifindex = ifindex;
922
923 /* initialize uninitialized (kzalloc) structure */
924 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
925 op->timer.function = bcm_tx_timeout_handler;
926
927 /* initialize tasklet for tx countevent notification */
928 tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
929 (unsigned long) op);
930
931 /* currently unused in tx_ops */
932 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
933
934 /* add this bcm_op to the list of the tx_ops */
935 list_add(&op->list, &bo->tx_ops);
936
937 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
938
939 if (op->nframes != msg_head->nframes) {
940 op->nframes = msg_head->nframes;
941 /* start multiple frame transmission with index 0 */
942 op->currframe = 0;
943 }
944
945 /* check flags */
946
947 op->flags = msg_head->flags;
948
949 if (op->flags & TX_RESET_MULTI_IDX) {
950 /* start multiple frame transmission with index 0 */
951 op->currframe = 0;
952 }
953
954 if (op->flags & SETTIMER) {
955 /* set timer values */
956 op->count = msg_head->count;
957 op->ival1 = msg_head->ival1;
958 op->ival2 = msg_head->ival2;
959 op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
960 op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
961
962 /* disable an active timer due to zero values? */
963 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
964 hrtimer_cancel(&op->timer);
965 }
966
967 if (op->flags & STARTTIMER) {
968 hrtimer_cancel(&op->timer);
969 /* spec: send can_frame when starting timer */
970 op->flags |= TX_ANNOUNCE;
971 }
972
973 if (op->flags & TX_ANNOUNCE) {
974 bcm_can_tx(op);
975 if (op->count)
976 op->count--;
977 }
978
979 if (op->flags & STARTTIMER)
980 bcm_tx_start_timer(op);
981
982 return msg_head->nframes * CFSIZ + MHSIZ;
983}
984
985/*
986 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
987 */
988static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
989 int ifindex, struct sock *sk)
990{
991 struct bcm_sock *bo = bcm_sk(sk);
992 struct bcm_op *op;
993 int do_rx_register;
994 int err = 0;
995
996 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
997 /* be robust against wrong usage ... */
998 msg_head->flags |= RX_FILTER_ID;
999 /* ignore trailing garbage */
1000 msg_head->nframes = 0;
1001 }
1002
1003 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1004 if (msg_head->nframes > MAX_NFRAMES + 1)
1005 return -EINVAL;
1006
1007 if ((msg_head->flags & RX_RTR_FRAME) &&
1008 ((msg_head->nframes != 1) ||
1009 (!(msg_head->can_id & CAN_RTR_FLAG))))
1010 return -EINVAL;
1011
1012 /* check the given can_id */
1013 op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex);
1014 if (op) {
1015 /* update existing BCM operation */
1016
1017 /*
1018 * Do we need more space for the can_frames than currently
1019 * allocated? -> This is a _really_ unusual use-case and
1020 * therefore (complexity / locking) it is not supported.
1021 */
1022 if (msg_head->nframes > op->nframes)
1023 return -E2BIG;
1024
1025 if (msg_head->nframes) {
1026 /* update can_frames content */
1027 err = memcpy_fromiovec((u8 *)op->frames,
1028 msg->msg_iov,
1029 msg_head->nframes * CFSIZ);
1030 if (err < 0)
1031 return err;
1032
1033 /* clear last_frames to indicate 'nothing received' */
1034 memset(op->last_frames, 0, msg_head->nframes * CFSIZ);
1035 }
1036
1037 op->nframes = msg_head->nframes;
1038
1039 /* Only an update -> do not call can_rx_register() */
1040 do_rx_register = 0;
1041
1042 } else {
1043 /* insert new BCM operation for the given can_id */
1044 op = kzalloc(OPSIZ, GFP_KERNEL);
1045 if (!op)
1046 return -ENOMEM;
1047
1048 op->can_id = msg_head->can_id;
1049 op->nframes = msg_head->nframes;
1050
1051 if (msg_head->nframes > 1) {
1052 /* create array for can_frames and copy the data */
1053 op->frames = kmalloc(msg_head->nframes * CFSIZ,
1054 GFP_KERNEL);
1055 if (!op->frames) {
1056 kfree(op);
1057 return -ENOMEM;
1058 }
1059
1060 /* create and init array for received can_frames */
1061 op->last_frames = kzalloc(msg_head->nframes * CFSIZ,
1062 GFP_KERNEL);
1063 if (!op->last_frames) {
1064 kfree(op->frames);
1065 kfree(op);
1066 return -ENOMEM;
1067 }
1068
1069 } else {
1070 op->frames = &op->sframe;
1071 op->last_frames = &op->last_sframe;
1072 }
1073
1074 if (msg_head->nframes) {
1075 err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov,
1076 msg_head->nframes * CFSIZ);
1077 if (err < 0) {
1078 if (op->frames != &op->sframe)
1079 kfree(op->frames);
1080 if (op->last_frames != &op->last_sframe)
1081 kfree(op->last_frames);
1082 kfree(op);
1083 return err;
1084 }
1085 }
1086
1087 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1088 op->sk = sk;
1089 op->ifindex = ifindex;
1090
1091 /* ifindex for timeout events w/o previous frame reception */
1092 op->rx_ifindex = ifindex;
1093
1094 /* initialize uninitialized (kzalloc) structure */
1095 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1096 op->timer.function = bcm_rx_timeout_handler;
1097
1098 /* initialize tasklet for rx timeout notification */
1099 tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
1100 (unsigned long) op);
1101
1102 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1103 op->thrtimer.function = bcm_rx_thr_handler;
1104
1105 /* initialize tasklet for rx throttle handling */
1106 tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
1107 (unsigned long) op);
1108
1109 /* add this bcm_op to the list of the rx_ops */
1110 list_add(&op->list, &bo->rx_ops);
1111
1112 /* call can_rx_register() */
1113 do_rx_register = 1;
1114
1115 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1116
1117 /* check flags */
1118 op->flags = msg_head->flags;
1119
1120 if (op->flags & RX_RTR_FRAME) {
1121
1122 /* no timers in RTR-mode */
1123 hrtimer_cancel(&op->thrtimer);
1124 hrtimer_cancel(&op->timer);
1125
1126 /*
1127 * funny feature in RX(!)_SETUP only for RTR-mode:
1128 * copy can_id into frame BUT without RTR-flag to
1129 * prevent a full-load-loopback-test ... ;-]
1130 */
1131 if ((op->flags & TX_CP_CAN_ID) ||
1132 (op->frames[0].can_id == op->can_id))
1133 op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
1134
1135 } else {
1136 if (op->flags & SETTIMER) {
1137
1138 /* set timer value */
1139 op->ival1 = msg_head->ival1;
1140 op->ival2 = msg_head->ival2;
1141 op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
1142 op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
1143
1144 /* disable an active timer due to zero value? */
1145 if (!op->kt_ival1.tv64)
1146 hrtimer_cancel(&op->timer);
1147
1148 /*
1149 * In any case cancel the throttle timer, flush
1150 * potentially blocked msgs and reset throttle handling
1151 */
1152 op->kt_lastmsg = ktime_set(0, 0);
1153 hrtimer_cancel(&op->thrtimer);
1154 bcm_rx_thr_flush(op, 1);
1155 }
1156
1157 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
1158 hrtimer_start(&op->timer, op->kt_ival1,
1159 HRTIMER_MODE_REL);
1160 }
1161
1162 /* now we can register for can_ids, if we added a new bcm_op */
1163 if (do_rx_register) {
1164 if (ifindex) {
1165 struct net_device *dev;
1166
1167 dev = dev_get_by_index(&init_net, ifindex);
1168 if (dev) {
1169 err = can_rx_register(dev, op->can_id,
1170 REGMASK(op->can_id),
1171 bcm_rx_handler, op,
1172 "bcm");
1173
1174 op->rx_reg_dev = dev;
1175 dev_put(dev);
1176 }
1177
1178 } else
1179 err = can_rx_register(NULL, op->can_id,
1180 REGMASK(op->can_id),
1181 bcm_rx_handler, op, "bcm");
1182 if (err) {
1183 /* this bcm rx op is broken -> remove it */
1184 list_del(&op->list);
1185 bcm_remove_op(op);
1186 return err;
1187 }
1188 }
1189
1190 return msg_head->nframes * CFSIZ + MHSIZ;
1191}
1192
1193/*
1194 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1195 */
1196static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1197{
1198 struct sk_buff *skb;
1199 struct net_device *dev;
1200 int err;
1201
1202 /* we need a real device to send frames */
1203 if (!ifindex)
1204 return -ENODEV;
1205
1206 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), GFP_KERNEL);
1207 if (!skb)
1208 return -ENOMEM;
1209
1210 can_skb_reserve(skb);
1211
1212 err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ);
1213 if (err < 0) {
1214 kfree_skb(skb);
1215 return err;
1216 }
1217
1218 dev = dev_get_by_index(&init_net, ifindex);
1219 if (!dev) {
1220 kfree_skb(skb);
1221 return -ENODEV;
1222 }
1223
1224 can_skb_prv(skb)->ifindex = dev->ifindex;
1225 skb->dev = dev;
1226 can_skb_set_owner(skb, sk);
1227 err = can_send(skb, 1); /* send with loopback */
1228 dev_put(dev);
1229
1230 if (err)
1231 return err;
1232
1233 return CFSIZ + MHSIZ;
1234}
1235
1236/*
1237 * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1238 */
1239static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
1240 struct msghdr *msg, size_t size)
1241{
1242 struct sock *sk = sock->sk;
1243 struct bcm_sock *bo = bcm_sk(sk);
1244 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1245 struct bcm_msg_head msg_head;
1246 int ret; /* read bytes or error codes as return value */
1247
1248 if (!bo->bound)
1249 return -ENOTCONN;
1250
1251 /* check for valid message length from userspace */
1252 if (size < MHSIZ || (size - MHSIZ) % CFSIZ)
1253 return -EINVAL;
1254
1255 /* check for alternative ifindex for this bcm_op */
1256
1257 if (!ifindex && msg->msg_name) {
1258 /* no bound device as default => check msg_name */
1259 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
1260
1261 if (msg->msg_namelen < sizeof(*addr))
1262 return -EINVAL;
1263
1264 if (addr->can_family != AF_CAN)
1265 return -EINVAL;
1266
1267 /* ifindex from sendto() */
1268 ifindex = addr->can_ifindex;
1269
1270 if (ifindex) {
1271 struct net_device *dev;
1272
1273 dev = dev_get_by_index(&init_net, ifindex);
1274 if (!dev)
1275 return -ENODEV;
1276
1277 if (dev->type != ARPHRD_CAN) {
1278 dev_put(dev);
1279 return -ENODEV;
1280 }
1281
1282 dev_put(dev);
1283 }
1284 }
1285
1286 /* read message head information */
1287
1288 ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ);
1289 if (ret < 0)
1290 return ret;
1291
1292 lock_sock(sk);
1293
1294 switch (msg_head.opcode) {
1295
1296 case TX_SETUP:
1297 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1298 break;
1299
1300 case RX_SETUP:
1301 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1302 break;
1303
1304 case TX_DELETE:
1305 if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex))
1306 ret = MHSIZ;
1307 else
1308 ret = -EINVAL;
1309 break;
1310
1311 case RX_DELETE:
1312 if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex))
1313 ret = MHSIZ;
1314 else
1315 ret = -EINVAL;
1316 break;
1317
1318 case TX_READ:
1319 /* reuse msg_head for the reply to TX_READ */
1320 msg_head.opcode = TX_STATUS;
1321 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1322 break;
1323
1324 case RX_READ:
1325 /* reuse msg_head for the reply to RX_READ */
1326 msg_head.opcode = RX_STATUS;
1327 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1328 break;
1329
1330 case TX_SEND:
1331 /* we need exactly one can_frame behind the msg head */
1332 if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ))
1333 ret = -EINVAL;
1334 else
1335 ret = bcm_tx_send(msg, ifindex, sk);
1336 break;
1337
1338 default:
1339 ret = -EINVAL;
1340 break;
1341 }
1342
1343 release_sock(sk);
1344
1345 return ret;
1346}
1347
1348/*
1349 * notification handler for netdevice status changes
1350 */
1351static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1352 void *ptr)
1353{
1354 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1355 struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1356 struct sock *sk = &bo->sk;
1357 struct bcm_op *op;
1358 int notify_enodev = 0;
1359
1360 if (!net_eq(dev_net(dev), &init_net))
1361 return NOTIFY_DONE;
1362
1363 if (dev->type != ARPHRD_CAN)
1364 return NOTIFY_DONE;
1365
1366 switch (msg) {
1367
1368 case NETDEV_UNREGISTER:
1369 lock_sock(sk);
1370
1371 /* remove device specific receive entries */
1372 list_for_each_entry(op, &bo->rx_ops, list)
1373 if (op->rx_reg_dev == dev)
1374 bcm_rx_unreg(dev, op);
1375
1376 /* remove device reference, if this is our bound device */
1377 if (bo->bound && bo->ifindex == dev->ifindex) {
1378 bo->bound = 0;
1379 bo->ifindex = 0;
1380 notify_enodev = 1;
1381 }
1382
1383 release_sock(sk);
1384
1385 if (notify_enodev) {
1386 sk->sk_err = ENODEV;
1387 if (!sock_flag(sk, SOCK_DEAD))
1388 sk->sk_error_report(sk);
1389 }
1390 break;
1391
1392 case NETDEV_DOWN:
1393 if (bo->bound && bo->ifindex == dev->ifindex) {
1394 sk->sk_err = ENETDOWN;
1395 if (!sock_flag(sk, SOCK_DEAD))
1396 sk->sk_error_report(sk);
1397 }
1398 }
1399
1400 return NOTIFY_DONE;
1401}
1402
1403/*
1404 * initial settings for all BCM sockets to be set at socket creation time
1405 */
1406static int bcm_init(struct sock *sk)
1407{
1408 struct bcm_sock *bo = bcm_sk(sk);
1409
1410 bo->bound = 0;
1411 bo->ifindex = 0;
1412 bo->dropped_usr_msgs = 0;
1413 bo->bcm_proc_read = NULL;
1414
1415 INIT_LIST_HEAD(&bo->tx_ops);
1416 INIT_LIST_HEAD(&bo->rx_ops);
1417
1418 /* set notifier */
1419 bo->notifier.notifier_call = bcm_notifier;
1420
1421 register_netdevice_notifier(&bo->notifier);
1422
1423 return 0;
1424}
1425
1426/*
1427 * standard socket functions
1428 */
1429static int bcm_release(struct socket *sock)
1430{
1431 struct sock *sk = sock->sk;
1432 struct bcm_sock *bo;
1433 struct bcm_op *op, *next;
1434
1435 if (sk == NULL)
1436 return 0;
1437
1438 bo = bcm_sk(sk);
1439
1440 /* remove bcm_ops, timer, rx_unregister(), etc. */
1441
1442 unregister_netdevice_notifier(&bo->notifier);
1443
1444 lock_sock(sk);
1445
1446 list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1447 bcm_remove_op(op);
1448
1449 list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1450 /*
1451 * Don't care if we're bound or not (due to netdev problems)
1452 * can_rx_unregister() is always a save thing to do here.
1453 */
1454 if (op->ifindex) {
1455 /*
1456 * Only remove subscriptions that had not
1457 * been removed due to NETDEV_UNREGISTER
1458 * in bcm_notifier()
1459 */
1460 if (op->rx_reg_dev) {
1461 struct net_device *dev;
1462
1463 dev = dev_get_by_index(&init_net, op->ifindex);
1464 if (dev) {
1465 bcm_rx_unreg(dev, op);
1466 dev_put(dev);
1467 }
1468 }
1469 } else
1470 can_rx_unregister(NULL, op->can_id,
1471 REGMASK(op->can_id),
1472 bcm_rx_handler, op);
1473
1474 bcm_remove_op(op);
1475 }
1476
1477 /* remove procfs entry */
1478 if (proc_dir && bo->bcm_proc_read)
1479 remove_proc_entry(bo->procname, proc_dir);
1480
1481 /* remove device reference */
1482 if (bo->bound) {
1483 bo->bound = 0;
1484 bo->ifindex = 0;
1485 }
1486
1487 sock_orphan(sk);
1488 sock->sk = NULL;
1489
1490 release_sock(sk);
1491 sock_put(sk);
1492
1493 return 0;
1494}
1495
1496static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1497 int flags)
1498{
1499 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1500 struct sock *sk = sock->sk;
1501 struct bcm_sock *bo = bcm_sk(sk);
1502
1503 if (len < sizeof(*addr))
1504 return -EINVAL;
1505
1506 if (bo->bound)
1507 return -EISCONN;
1508
1509 /* bind a device to this socket */
1510 if (addr->can_ifindex) {
1511 struct net_device *dev;
1512
1513 dev = dev_get_by_index(&init_net, addr->can_ifindex);
1514 if (!dev)
1515 return -ENODEV;
1516
1517 if (dev->type != ARPHRD_CAN) {
1518 dev_put(dev);
1519 return -ENODEV;
1520 }
1521
1522 bo->ifindex = dev->ifindex;
1523 dev_put(dev);
1524
1525 } else {
1526 /* no interface reference for ifindex = 0 ('any' CAN device) */
1527 bo->ifindex = 0;
1528 }
1529
1530 bo->bound = 1;
1531
1532 if (proc_dir) {
1533 /* unique socket address as filename */
1534 sprintf(bo->procname, "%lu", sock_i_ino(sk));
1535 bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1536 proc_dir,
1537 &bcm_proc_fops, sk);
1538 }
1539
1540 return 0;
1541}
1542
1543static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
1544 struct msghdr *msg, size_t size, int flags)
1545{
1546 struct sock *sk = sock->sk;
1547 struct sk_buff *skb;
1548 int error = 0;
1549 int noblock;
1550 int err;
1551
1552 noblock = flags & MSG_DONTWAIT;
1553 flags &= ~MSG_DONTWAIT;
1554 skb = skb_recv_datagram(sk, flags, noblock, &error);
1555 if (!skb)
1556 return error;
1557
1558 if (skb->len < size)
1559 size = skb->len;
1560
1561 err = memcpy_toiovec(msg->msg_iov, skb->data, size);
1562 if (err < 0) {
1563 skb_free_datagram(sk, skb);
1564 return err;
1565 }
1566
1567 sock_recv_ts_and_drops(msg, sk, skb);
1568
1569 if (msg->msg_name) {
1570 __sockaddr_check_size(sizeof(struct sockaddr_can));
1571 msg->msg_namelen = sizeof(struct sockaddr_can);
1572 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1573 }
1574
1575 skb_free_datagram(sk, skb);
1576
1577 return size;
1578}
1579
1580static const struct proto_ops bcm_ops = {
1581 .family = PF_CAN,
1582 .release = bcm_release,
1583 .bind = sock_no_bind,
1584 .connect = bcm_connect,
1585 .socketpair = sock_no_socketpair,
1586 .accept = sock_no_accept,
1587 .getname = sock_no_getname,
1588 .poll = datagram_poll,
1589 .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */
1590 .listen = sock_no_listen,
1591 .shutdown = sock_no_shutdown,
1592 .setsockopt = sock_no_setsockopt,
1593 .getsockopt = sock_no_getsockopt,
1594 .sendmsg = bcm_sendmsg,
1595 .recvmsg = bcm_recvmsg,
1596 .mmap = sock_no_mmap,
1597 .sendpage = sock_no_sendpage,
1598};
1599
1600static struct proto bcm_proto __read_mostly = {
1601 .name = "CAN_BCM",
1602 .owner = THIS_MODULE,
1603 .obj_size = sizeof(struct bcm_sock),
1604 .init = bcm_init,
1605};
1606
1607static const struct can_proto bcm_can_proto = {
1608 .type = SOCK_DGRAM,
1609 .protocol = CAN_BCM,
1610 .ops = &bcm_ops,
1611 .prot = &bcm_proto,
1612};
1613
1614static int __init bcm_module_init(void)
1615{
1616 int err;
1617
1618 printk(banner);
1619
1620 err = can_proto_register(&bcm_can_proto);
1621 if (err < 0) {
1622 printk(KERN_ERR "can: registration of bcm protocol failed\n");
1623 return err;
1624 }
1625
1626 /* create /proc/net/can-bcm directory */
1627 proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
1628 return 0;
1629}
1630
1631static void __exit bcm_module_exit(void)
1632{
1633 can_proto_unregister(&bcm_can_proto);
1634
1635 if (proc_dir)
1636 remove_proc_entry("can-bcm", init_net.proc_net);
1637}
1638
1639module_init(bcm_module_init);
1640module_exit(bcm_module_exit);