Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2014 Protonic Holland,
3 * David Jander
4 * Copyright (C) 2014-2021, 2023 Pengutronix,
5 * Marc Kleine-Budde <kernel@pengutronix.de>
6 */
7
8#include <linux/can/dev.h>
9#include <linux/can/rx-offload.h>
10
11struct can_rx_offload_cb {
12 u32 timestamp;
13};
14
15static inline struct can_rx_offload_cb *
16can_rx_offload_get_cb(struct sk_buff *skb)
17{
18 BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
19
20 return (struct can_rx_offload_cb *)skb->cb;
21}
22
23static inline bool
24can_rx_offload_le(struct can_rx_offload *offload,
25 unsigned int a, unsigned int b)
26{
27 if (offload->inc)
28 return a <= b;
29 else
30 return a >= b;
31}
32
33static inline unsigned int
34can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
35{
36 if (offload->inc)
37 return (*val)++;
38 else
39 return (*val)--;
40}
41
42static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
43{
44 struct can_rx_offload *offload = container_of(napi,
45 struct can_rx_offload,
46 napi);
47 struct net_device *dev = offload->dev;
48 struct net_device_stats *stats = &dev->stats;
49 struct sk_buff *skb;
50 int work_done = 0;
51
52 while ((work_done < quota) &&
53 (skb = skb_dequeue(&offload->skb_queue))) {
54 struct can_frame *cf = (struct can_frame *)skb->data;
55
56 work_done++;
57 if (!(cf->can_id & CAN_ERR_FLAG)) {
58 stats->rx_packets++;
59 if (!(cf->can_id & CAN_RTR_FLAG))
60 stats->rx_bytes += cf->len;
61 }
62 netif_receive_skb(skb);
63 }
64
65 if (work_done < quota) {
66 napi_complete_done(napi, work_done);
67
68 /* Check if there was another interrupt */
69 if (!skb_queue_empty(&offload->skb_queue))
70 napi_schedule(&offload->napi);
71 }
72
73 return work_done;
74}
75
76static inline void
77__skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
78 int (*compare)(struct sk_buff *a, struct sk_buff *b))
79{
80 struct sk_buff *pos, *insert = NULL;
81
82 skb_queue_reverse_walk(head, pos) {
83 const struct can_rx_offload_cb *cb_pos, *cb_new;
84
85 cb_pos = can_rx_offload_get_cb(pos);
86 cb_new = can_rx_offload_get_cb(new);
87
88 netdev_dbg(new->dev,
89 "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
90 __func__,
91 cb_pos->timestamp, cb_new->timestamp,
92 cb_new->timestamp - cb_pos->timestamp,
93 skb_queue_len(head));
94
95 if (compare(pos, new) < 0)
96 continue;
97 insert = pos;
98 break;
99 }
100 if (!insert)
101 __skb_queue_head(head, new);
102 else
103 __skb_queue_after(head, insert, new);
104}
105
106static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
107{
108 const struct can_rx_offload_cb *cb_a, *cb_b;
109
110 cb_a = can_rx_offload_get_cb(a);
111 cb_b = can_rx_offload_get_cb(b);
112
113 /* Subtract two u32 and return result as int, to keep
114 * difference steady around the u32 overflow.
115 */
116 return cb_b->timestamp - cb_a->timestamp;
117}
118
119/**
120 * can_rx_offload_offload_one() - Read one CAN frame from HW
121 * @offload: pointer to rx_offload context
122 * @n: number of mailbox to read
123 *
124 * The task of this function is to read a CAN frame from mailbox @n
125 * from the device and return the mailbox's content as a struct
126 * sk_buff.
127 *
128 * If the struct can_rx_offload::skb_queue exceeds the maximal queue
129 * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
130 * allocated, the mailbox contents is discarded by reading it into an
131 * overflow buffer. This way the mailbox is marked as free by the
132 * driver.
133 *
134 * Return: A pointer to skb containing the CAN frame on success.
135 *
136 * NULL if the mailbox @n is empty.
137 *
138 * ERR_PTR() in case of an error
139 */
140static struct sk_buff *
141can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
142{
143 struct sk_buff *skb;
144 struct can_rx_offload_cb *cb;
145 bool drop = false;
146 u32 timestamp;
147
148 /* If queue is full drop frame */
149 if (unlikely(skb_queue_len(&offload->skb_queue) >
150 offload->skb_queue_len_max))
151 drop = true;
152
153 skb = offload->mailbox_read(offload, n, ×tamp, drop);
154 /* Mailbox was empty. */
155 if (unlikely(!skb))
156 return NULL;
157
158 /* There was a problem reading the mailbox, propagate
159 * error value.
160 */
161 if (IS_ERR(skb)) {
162 offload->dev->stats.rx_dropped++;
163 offload->dev->stats.rx_fifo_errors++;
164
165 return skb;
166 }
167
168 /* Mailbox was read. */
169 cb = can_rx_offload_get_cb(skb);
170 cb->timestamp = timestamp;
171
172 return skb;
173}
174
175int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
176 u64 pending)
177{
178 unsigned int i;
179 int received = 0;
180
181 for (i = offload->mb_first;
182 can_rx_offload_le(offload, i, offload->mb_last);
183 can_rx_offload_inc(offload, &i)) {
184 struct sk_buff *skb;
185
186 if (!(pending & BIT_ULL(i)))
187 continue;
188
189 skb = can_rx_offload_offload_one(offload, i);
190 if (IS_ERR_OR_NULL(skb))
191 continue;
192
193 __skb_queue_add_sort(&offload->skb_irq_queue, skb,
194 can_rx_offload_compare);
195 received++;
196 }
197
198 return received;
199}
200EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
201
202int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
203{
204 struct sk_buff *skb;
205 int received = 0;
206
207 while (1) {
208 skb = can_rx_offload_offload_one(offload, 0);
209 if (IS_ERR(skb))
210 continue;
211 if (!skb)
212 break;
213
214 __skb_queue_tail(&offload->skb_irq_queue, skb);
215 received++;
216 }
217
218 return received;
219}
220EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
221
222int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
223 struct sk_buff *skb, u32 timestamp)
224{
225 struct can_rx_offload_cb *cb;
226
227 if (skb_queue_len(&offload->skb_queue) >
228 offload->skb_queue_len_max) {
229 dev_kfree_skb_any(skb);
230 return -ENOBUFS;
231 }
232
233 cb = can_rx_offload_get_cb(skb);
234 cb->timestamp = timestamp;
235
236 __skb_queue_add_sort(&offload->skb_irq_queue, skb,
237 can_rx_offload_compare);
238
239 return 0;
240}
241EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp);
242
243unsigned int
244can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload,
245 unsigned int idx, u32 timestamp,
246 unsigned int *frame_len_ptr)
247{
248 struct net_device *dev = offload->dev;
249 struct net_device_stats *stats = &dev->stats;
250 struct sk_buff *skb;
251 unsigned int len;
252 int err;
253
254 skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
255 if (!skb)
256 return 0;
257
258 err = can_rx_offload_queue_timestamp(offload, skb, timestamp);
259 if (err) {
260 stats->rx_errors++;
261 stats->tx_fifo_errors++;
262 }
263
264 return len;
265}
266EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_timestamp);
267
268int can_rx_offload_queue_tail(struct can_rx_offload *offload,
269 struct sk_buff *skb)
270{
271 if (skb_queue_len(&offload->skb_queue) >
272 offload->skb_queue_len_max) {
273 dev_kfree_skb_any(skb);
274 return -ENOBUFS;
275 }
276
277 __skb_queue_tail(&offload->skb_irq_queue, skb);
278
279 return 0;
280}
281EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
282
283unsigned int
284can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload,
285 unsigned int idx,
286 unsigned int *frame_len_ptr)
287{
288 struct net_device *dev = offload->dev;
289 struct net_device_stats *stats = &dev->stats;
290 struct sk_buff *skb;
291 unsigned int len;
292 int err;
293
294 skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
295 if (!skb)
296 return 0;
297
298 err = can_rx_offload_queue_tail(offload, skb);
299 if (err) {
300 stats->rx_errors++;
301 stats->tx_fifo_errors++;
302 }
303
304 return len;
305}
306EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_tail);
307
308void can_rx_offload_irq_finish(struct can_rx_offload *offload)
309{
310 unsigned long flags;
311 int queue_len;
312
313 if (skb_queue_empty_lockless(&offload->skb_irq_queue))
314 return;
315
316 spin_lock_irqsave(&offload->skb_queue.lock, flags);
317 skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
318 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
319
320 queue_len = skb_queue_len(&offload->skb_queue);
321 if (queue_len > offload->skb_queue_len_max / 8)
322 netdev_dbg(offload->dev, "%s: queue_len=%d\n",
323 __func__, queue_len);
324
325 napi_schedule(&offload->napi);
326}
327EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish);
328
329void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload)
330{
331 unsigned long flags;
332 int queue_len;
333
334 if (skb_queue_empty_lockless(&offload->skb_irq_queue))
335 return;
336
337 spin_lock_irqsave(&offload->skb_queue.lock, flags);
338 skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
339 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
340
341 queue_len = skb_queue_len(&offload->skb_queue);
342 if (queue_len > offload->skb_queue_len_max / 8)
343 netdev_dbg(offload->dev, "%s: queue_len=%d\n",
344 __func__, queue_len);
345
346 local_bh_disable();
347 napi_schedule(&offload->napi);
348 local_bh_enable();
349}
350EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish);
351
352static int can_rx_offload_init_queue(struct net_device *dev,
353 struct can_rx_offload *offload,
354 unsigned int weight)
355{
356 offload->dev = dev;
357
358 /* Limit queue len to 4x the weight (rounded to next power of two) */
359 offload->skb_queue_len_max = 2 << fls(weight);
360 offload->skb_queue_len_max *= 4;
361 skb_queue_head_init(&offload->skb_queue);
362 __skb_queue_head_init(&offload->skb_irq_queue);
363
364 netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll,
365 weight);
366
367 dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
368 __func__, offload->skb_queue_len_max);
369
370 return 0;
371}
372
373int can_rx_offload_add_timestamp(struct net_device *dev,
374 struct can_rx_offload *offload)
375{
376 unsigned int weight;
377
378 if (offload->mb_first > BITS_PER_LONG_LONG ||
379 offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
380 return -EINVAL;
381
382 if (offload->mb_first < offload->mb_last) {
383 offload->inc = true;
384 weight = offload->mb_last - offload->mb_first;
385 } else {
386 offload->inc = false;
387 weight = offload->mb_first - offload->mb_last;
388 }
389
390 return can_rx_offload_init_queue(dev, offload, weight);
391}
392EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
393
394int can_rx_offload_add_fifo(struct net_device *dev,
395 struct can_rx_offload *offload, unsigned int weight)
396{
397 if (!offload->mailbox_read)
398 return -EINVAL;
399
400 return can_rx_offload_init_queue(dev, offload, weight);
401}
402EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
403
404int can_rx_offload_add_manual(struct net_device *dev,
405 struct can_rx_offload *offload,
406 unsigned int weight)
407{
408 if (offload->mailbox_read)
409 return -EINVAL;
410
411 return can_rx_offload_init_queue(dev, offload, weight);
412}
413EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
414
415void can_rx_offload_enable(struct can_rx_offload *offload)
416{
417 napi_enable(&offload->napi);
418}
419EXPORT_SYMBOL_GPL(can_rx_offload_enable);
420
421void can_rx_offload_del(struct can_rx_offload *offload)
422{
423 netif_napi_del(&offload->napi);
424 skb_queue_purge(&offload->skb_queue);
425 __skb_queue_purge(&offload->skb_irq_queue);
426}
427EXPORT_SYMBOL_GPL(can_rx_offload_del);
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2014 Protonic Holland,
3 * David Jander
4 * Copyright (C) 2014-2017 Pengutronix,
5 * Marc Kleine-Budde <kernel@pengutronix.de>
6 */
7
8#include <linux/can/dev.h>
9#include <linux/can/rx-offload.h>
10
11struct can_rx_offload_cb {
12 u32 timestamp;
13};
14
15static inline struct can_rx_offload_cb *
16can_rx_offload_get_cb(struct sk_buff *skb)
17{
18 BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
19
20 return (struct can_rx_offload_cb *)skb->cb;
21}
22
23static inline bool
24can_rx_offload_le(struct can_rx_offload *offload,
25 unsigned int a, unsigned int b)
26{
27 if (offload->inc)
28 return a <= b;
29 else
30 return a >= b;
31}
32
33static inline unsigned int
34can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
35{
36 if (offload->inc)
37 return (*val)++;
38 else
39 return (*val)--;
40}
41
42static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
43{
44 struct can_rx_offload *offload = container_of(napi,
45 struct can_rx_offload,
46 napi);
47 struct net_device *dev = offload->dev;
48 struct net_device_stats *stats = &dev->stats;
49 struct sk_buff *skb;
50 int work_done = 0;
51
52 while ((work_done < quota) &&
53 (skb = skb_dequeue(&offload->skb_queue))) {
54 struct can_frame *cf = (struct can_frame *)skb->data;
55
56 work_done++;
57 stats->rx_packets++;
58 stats->rx_bytes += cf->len;
59 netif_receive_skb(skb);
60 }
61
62 if (work_done < quota) {
63 napi_complete_done(napi, work_done);
64
65 /* Check if there was another interrupt */
66 if (!skb_queue_empty(&offload->skb_queue))
67 napi_reschedule(&offload->napi);
68 }
69
70 can_led_event(offload->dev, CAN_LED_EVENT_RX);
71
72 return work_done;
73}
74
75static inline void
76__skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
77 int (*compare)(struct sk_buff *a, struct sk_buff *b))
78{
79 struct sk_buff *pos, *insert = NULL;
80
81 skb_queue_reverse_walk(head, pos) {
82 const struct can_rx_offload_cb *cb_pos, *cb_new;
83
84 cb_pos = can_rx_offload_get_cb(pos);
85 cb_new = can_rx_offload_get_cb(new);
86
87 netdev_dbg(new->dev,
88 "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
89 __func__,
90 cb_pos->timestamp, cb_new->timestamp,
91 cb_new->timestamp - cb_pos->timestamp,
92 skb_queue_len(head));
93
94 if (compare(pos, new) < 0)
95 continue;
96 insert = pos;
97 break;
98 }
99 if (!insert)
100 __skb_queue_head(head, new);
101 else
102 __skb_queue_after(head, insert, new);
103}
104
105static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
106{
107 const struct can_rx_offload_cb *cb_a, *cb_b;
108
109 cb_a = can_rx_offload_get_cb(a);
110 cb_b = can_rx_offload_get_cb(b);
111
112 /* Subtract two u32 and return result as int, to keep
113 * difference steady around the u32 overflow.
114 */
115 return cb_b->timestamp - cb_a->timestamp;
116}
117
118/**
119 * can_rx_offload_offload_one() - Read one CAN frame from HW
120 * @offload: pointer to rx_offload context
121 * @n: number of mailbox to read
122 *
123 * The task of this function is to read a CAN frame from mailbox @n
124 * from the device and return the mailbox's content as a struct
125 * sk_buff.
126 *
127 * If the struct can_rx_offload::skb_queue exceeds the maximal queue
128 * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
129 * allocated, the mailbox contents is discarded by reading it into an
130 * overflow buffer. This way the mailbox is marked as free by the
131 * driver.
132 *
133 * Return: A pointer to skb containing the CAN frame on success.
134 *
135 * NULL if the mailbox @n is empty.
136 *
137 * ERR_PTR() in case of an error
138 */
139static struct sk_buff *
140can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
141{
142 struct sk_buff *skb;
143 struct can_rx_offload_cb *cb;
144 bool drop = false;
145 u32 timestamp;
146
147 /* If queue is full drop frame */
148 if (unlikely(skb_queue_len(&offload->skb_queue) >
149 offload->skb_queue_len_max))
150 drop = true;
151
152 skb = offload->mailbox_read(offload, n, ×tamp, drop);
153 /* Mailbox was empty. */
154 if (unlikely(!skb))
155 return NULL;
156
157 /* There was a problem reading the mailbox, propagate
158 * error value.
159 */
160 if (IS_ERR(skb)) {
161 offload->dev->stats.rx_dropped++;
162 offload->dev->stats.rx_fifo_errors++;
163
164 return skb;
165 }
166
167 /* Mailbox was read. */
168 cb = can_rx_offload_get_cb(skb);
169 cb->timestamp = timestamp;
170
171 return skb;
172}
173
174int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
175 u64 pending)
176{
177 struct sk_buff_head skb_queue;
178 unsigned int i;
179
180 __skb_queue_head_init(&skb_queue);
181
182 for (i = offload->mb_first;
183 can_rx_offload_le(offload, i, offload->mb_last);
184 can_rx_offload_inc(offload, &i)) {
185 struct sk_buff *skb;
186
187 if (!(pending & BIT_ULL(i)))
188 continue;
189
190 skb = can_rx_offload_offload_one(offload, i);
191 if (IS_ERR_OR_NULL(skb))
192 continue;
193
194 __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
195 }
196
197 if (!skb_queue_empty(&skb_queue)) {
198 unsigned long flags;
199 u32 queue_len;
200
201 spin_lock_irqsave(&offload->skb_queue.lock, flags);
202 skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
203 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
204
205 queue_len = skb_queue_len(&offload->skb_queue);
206 if (queue_len > offload->skb_queue_len_max / 8)
207 netdev_dbg(offload->dev, "%s: queue_len=%d\n",
208 __func__, queue_len);
209
210 can_rx_offload_schedule(offload);
211 }
212
213 return skb_queue_len(&skb_queue);
214}
215EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
216
217int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
218{
219 struct sk_buff *skb;
220 int received = 0;
221
222 while (1) {
223 skb = can_rx_offload_offload_one(offload, 0);
224 if (IS_ERR(skb))
225 continue;
226 if (!skb)
227 break;
228
229 skb_queue_tail(&offload->skb_queue, skb);
230 received++;
231 }
232
233 if (received)
234 can_rx_offload_schedule(offload);
235
236 return received;
237}
238EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
239
240int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
241 struct sk_buff *skb, u32 timestamp)
242{
243 struct can_rx_offload_cb *cb;
244 unsigned long flags;
245
246 if (skb_queue_len(&offload->skb_queue) >
247 offload->skb_queue_len_max) {
248 dev_kfree_skb_any(skb);
249 return -ENOBUFS;
250 }
251
252 cb = can_rx_offload_get_cb(skb);
253 cb->timestamp = timestamp;
254
255 spin_lock_irqsave(&offload->skb_queue.lock, flags);
256 __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
257 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
258
259 can_rx_offload_schedule(offload);
260
261 return 0;
262}
263EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
264
265unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
266 unsigned int idx, u32 timestamp,
267 unsigned int *frame_len_ptr)
268{
269 struct net_device *dev = offload->dev;
270 struct net_device_stats *stats = &dev->stats;
271 struct sk_buff *skb;
272 u8 len;
273 int err;
274
275 skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
276 if (!skb)
277 return 0;
278
279 err = can_rx_offload_queue_sorted(offload, skb, timestamp);
280 if (err) {
281 stats->rx_errors++;
282 stats->tx_fifo_errors++;
283 }
284
285 return len;
286}
287EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
288
289int can_rx_offload_queue_tail(struct can_rx_offload *offload,
290 struct sk_buff *skb)
291{
292 if (skb_queue_len(&offload->skb_queue) >
293 offload->skb_queue_len_max) {
294 dev_kfree_skb_any(skb);
295 return -ENOBUFS;
296 }
297
298 skb_queue_tail(&offload->skb_queue, skb);
299 can_rx_offload_schedule(offload);
300
301 return 0;
302}
303EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
304
305static int can_rx_offload_init_queue(struct net_device *dev,
306 struct can_rx_offload *offload,
307 unsigned int weight)
308{
309 offload->dev = dev;
310
311 /* Limit queue len to 4x the weight (rounted to next power of two) */
312 offload->skb_queue_len_max = 2 << fls(weight);
313 offload->skb_queue_len_max *= 4;
314 skb_queue_head_init(&offload->skb_queue);
315
316 netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
317
318 dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
319 __func__, offload->skb_queue_len_max);
320
321 return 0;
322}
323
324int can_rx_offload_add_timestamp(struct net_device *dev,
325 struct can_rx_offload *offload)
326{
327 unsigned int weight;
328
329 if (offload->mb_first > BITS_PER_LONG_LONG ||
330 offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
331 return -EINVAL;
332
333 if (offload->mb_first < offload->mb_last) {
334 offload->inc = true;
335 weight = offload->mb_last - offload->mb_first;
336 } else {
337 offload->inc = false;
338 weight = offload->mb_first - offload->mb_last;
339 }
340
341 return can_rx_offload_init_queue(dev, offload, weight);
342}
343EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
344
345int can_rx_offload_add_fifo(struct net_device *dev,
346 struct can_rx_offload *offload, unsigned int weight)
347{
348 if (!offload->mailbox_read)
349 return -EINVAL;
350
351 return can_rx_offload_init_queue(dev, offload, weight);
352}
353EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
354
355int can_rx_offload_add_manual(struct net_device *dev,
356 struct can_rx_offload *offload,
357 unsigned int weight)
358{
359 if (offload->mailbox_read)
360 return -EINVAL;
361
362 return can_rx_offload_init_queue(dev, offload, weight);
363}
364EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
365
366void can_rx_offload_enable(struct can_rx_offload *offload)
367{
368 napi_enable(&offload->napi);
369}
370EXPORT_SYMBOL_GPL(can_rx_offload_enable);
371
372void can_rx_offload_del(struct can_rx_offload *offload)
373{
374 netif_napi_del(&offload->napi);
375 skb_queue_purge(&offload->skb_queue);
376}
377EXPORT_SYMBOL_GPL(can_rx_offload_del);