Loading...
1/*
2 * This file is based on code from OCTEON SDK by Cavium Networks.
3 *
4 * Copyright (c) 2003-2010 Cavium Networks
5 *
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/cache.h>
14#include <linux/cpumask.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/ip.h>
18#include <linux/string.h>
19#include <linux/prefetch.h>
20#include <linux/ratelimit.h>
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <net/dst.h>
24#ifdef CONFIG_XFRM
25#include <linux/xfrm.h>
26#include <net/xfrm.h>
27#endif /* CONFIG_XFRM */
28
29#include <asm/octeon/octeon.h>
30
31#include "ethernet-defines.h"
32#include "ethernet-mem.h"
33#include "ethernet-rx.h"
34#include "octeon-ethernet.h"
35#include "ethernet-util.h"
36
37#include <asm/octeon/cvmx-helper.h>
38#include <asm/octeon/cvmx-wqe.h>
39#include <asm/octeon/cvmx-fau.h>
40#include <asm/octeon/cvmx-pow.h>
41#include <asm/octeon/cvmx-pip.h>
42#include <asm/octeon/cvmx-scratch.h>
43
44#include <asm/octeon/cvmx-gmxx-defs.h>
45
46static struct napi_struct cvm_oct_napi;
47
48/**
49 * cvm_oct_do_interrupt - interrupt handler.
50 * @cpl: Interrupt number. Unused
51 * @dev_id: Cookie to identify the device. Unused
52 *
53 * The interrupt occurs whenever the POW has packets in our group.
54 *
55 */
56static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
57{
58 /* Disable the IRQ and start napi_poll. */
59 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
60 napi_schedule(&cvm_oct_napi);
61
62 return IRQ_HANDLED;
63}
64
65/**
66 * cvm_oct_check_rcv_error - process receive errors
67 * @work: Work queue entry pointing to the packet.
68 *
69 * Returns Non-zero if the packet can be dropped, zero otherwise.
70 */
71static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
72{
73 int port;
74
75 if (octeon_has_feature(OCTEON_FEATURE_PKND))
76 port = work->word0.pip.cn68xx.pknd;
77 else
78 port = work->word1.cn38xx.ipprt;
79
80 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
81 /*
82 * Ignore length errors on min size packets. Some
83 * equipment incorrectly pads packets to 64+4FCS
84 * instead of 60+4FCS. Note these packets still get
85 * counted as frame errors.
86 */
87 } else if (work->word2.snoip.err_code == 5 ||
88 work->word2.snoip.err_code == 7) {
89 /*
90 * We received a packet with either an alignment error
91 * or a FCS error. This may be signalling that we are
92 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
93 * off. If this is the case we need to parse the
94 * packet to determine if we can remove a non spec
95 * preamble and generate a correct packet.
96 */
97 int interface = cvmx_helper_get_interface_num(port);
98 int index = cvmx_helper_get_interface_index_num(port);
99 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
100
101 gmxx_rxx_frm_ctl.u64 =
102 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
103 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
104 u8 *ptr =
105 cvmx_phys_to_ptr(work->packet_ptr.s.addr);
106 int i = 0;
107
108 while (i < work->word1.len - 1) {
109 if (*ptr != 0x55)
110 break;
111 ptr++;
112 i++;
113 }
114
115 if (*ptr == 0xd5) {
116 /* Port received 0xd5 preamble */
117 work->packet_ptr.s.addr += i + 1;
118 work->word1.len -= i + 5;
119 } else if ((*ptr & 0xf) == 0xd) {
120 /* Port received 0xd preamble */
121 work->packet_ptr.s.addr += i;
122 work->word1.len -= i + 4;
123 for (i = 0; i < work->word1.len; i++) {
124 *ptr =
125 ((*ptr & 0xf0) >> 4) |
126 ((*(ptr + 1) & 0xf) << 4);
127 ptr++;
128 }
129 } else {
130 printk_ratelimited("Port %d unknown preamble, packet dropped\n",
131 port);
132 cvm_oct_free_work(work);
133 return 1;
134 }
135 }
136 } else {
137 printk_ratelimited("Port %d receive error code %d, packet dropped\n",
138 port, work->word2.snoip.err_code);
139 cvm_oct_free_work(work);
140 return 1;
141 }
142
143 return 0;
144}
145
146/**
147 * cvm_oct_napi_poll - the NAPI poll function.
148 * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
149 * @budget: Maximum number of packets to receive.
150 *
151 * Returns the number of packets processed.
152 */
153static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
154{
155 const int coreid = cvmx_get_core_num();
156 u64 old_group_mask;
157 u64 old_scratch;
158 int rx_count = 0;
159 int did_work_request = 0;
160 int packet_not_copied;
161
162 /* Prefetch cvm_oct_device since we know we need it soon */
163 prefetch(cvm_oct_device);
164
165 if (USE_ASYNC_IOBDMA) {
166 /* Save scratch in case userspace is using it */
167 CVMX_SYNCIOBDMA;
168 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
169 }
170
171 /* Only allow work for our group (and preserve priorities) */
172 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
173 old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
174 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
175 1ull << pow_receive_group);
176 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
177 } else {
178 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
179 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
180 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
181 }
182
183 if (USE_ASYNC_IOBDMA) {
184 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
185 did_work_request = 1;
186 }
187
188 while (rx_count < budget) {
189 struct sk_buff *skb = NULL;
190 struct sk_buff **pskb = NULL;
191 int skb_in_hw;
192 cvmx_wqe_t *work;
193 int port;
194
195 if (USE_ASYNC_IOBDMA && did_work_request)
196 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
197 else
198 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
199
200 prefetch(work);
201 did_work_request = 0;
202 if (!work) {
203 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
204 cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
205 1ull << pow_receive_group);
206 cvmx_write_csr(CVMX_SSO_WQ_INT,
207 1ull << pow_receive_group);
208 } else {
209 union cvmx_pow_wq_int wq_int;
210
211 wq_int.u64 = 0;
212 wq_int.s.iq_dis = 1 << pow_receive_group;
213 wq_int.s.wq_int = 1 << pow_receive_group;
214 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
215 }
216 break;
217 }
218 pskb = (struct sk_buff **)
219 (cvm_oct_get_buffer_ptr(work->packet_ptr) -
220 sizeof(void *));
221 prefetch(pskb);
222
223 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
224 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
225 CVMX_POW_NO_WAIT);
226 did_work_request = 1;
227 }
228 rx_count++;
229
230 skb_in_hw = work->word2.s.bufs == 1;
231 if (likely(skb_in_hw)) {
232 skb = *pskb;
233 prefetch(&skb->head);
234 prefetch(&skb->len);
235 }
236
237 if (octeon_has_feature(OCTEON_FEATURE_PKND))
238 port = work->word0.pip.cn68xx.pknd;
239 else
240 port = work->word1.cn38xx.ipprt;
241
242 prefetch(cvm_oct_device[port]);
243
244 /* Immediately throw away all packets with receive errors */
245 if (unlikely(work->word2.snoip.rcv_error)) {
246 if (cvm_oct_check_rcv_error(work))
247 continue;
248 }
249
250 /*
251 * We can only use the zero copy path if skbuffs are
252 * in the FPA pool and the packet fits in a single
253 * buffer.
254 */
255 if (likely(skb_in_hw)) {
256 skb->data = skb->head + work->packet_ptr.s.addr -
257 cvmx_ptr_to_phys(skb->head);
258 prefetch(skb->data);
259 skb->len = work->word1.len;
260 skb_set_tail_pointer(skb, skb->len);
261 packet_not_copied = 1;
262 } else {
263 /*
264 * We have to copy the packet. First allocate
265 * an skbuff for it.
266 */
267 skb = dev_alloc_skb(work->word1.len);
268 if (!skb) {
269 cvm_oct_free_work(work);
270 continue;
271 }
272
273 /*
274 * Check if we've received a packet that was
275 * entirely stored in the work entry.
276 */
277 if (unlikely(work->word2.s.bufs == 0)) {
278 u8 *ptr = work->packet_data;
279
280 if (likely(!work->word2.s.not_IP)) {
281 /*
282 * The beginning of the packet
283 * moves for IP packets.
284 */
285 if (work->word2.s.is_v6)
286 ptr += 2;
287 else
288 ptr += 6;
289 }
290 memcpy(skb_put(skb, work->word1.len), ptr,
291 work->word1.len);
292 /* No packet buffers to free */
293 } else {
294 int segments = work->word2.s.bufs;
295 union cvmx_buf_ptr segment_ptr =
296 work->packet_ptr;
297 int len = work->word1.len;
298
299 while (segments--) {
300 union cvmx_buf_ptr next_ptr =
301 *(union cvmx_buf_ptr *)
302 cvmx_phys_to_ptr(
303 segment_ptr.s.addr - 8);
304
305 /*
306 * Octeon Errata PKI-100: The segment size is
307 * wrong. Until it is fixed, calculate the
308 * segment size based on the packet pool
309 * buffer size. When it is fixed, the
310 * following line should be replaced with this
311 * one: int segment_size =
312 * segment_ptr.s.size;
313 */
314 int segment_size =
315 CVMX_FPA_PACKET_POOL_SIZE -
316 (segment_ptr.s.addr -
317 (((segment_ptr.s.addr >> 7) -
318 segment_ptr.s.back) << 7));
319 /*
320 * Don't copy more than what
321 * is left in the packet.
322 */
323 if (segment_size > len)
324 segment_size = len;
325 /* Copy the data into the packet */
326 memcpy(skb_put(skb, segment_size),
327 cvmx_phys_to_ptr(
328 segment_ptr.s.addr),
329 segment_size);
330 len -= segment_size;
331 segment_ptr = next_ptr;
332 }
333 }
334 packet_not_copied = 0;
335 }
336 if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
337 cvm_oct_device[port])) {
338 struct net_device *dev = cvm_oct_device[port];
339 struct octeon_ethernet *priv = netdev_priv(dev);
340
341 /*
342 * Only accept packets for devices that are
343 * currently up.
344 */
345 if (likely(dev->flags & IFF_UP)) {
346 skb->protocol = eth_type_trans(skb, dev);
347 skb->dev = dev;
348
349 if (unlikely(work->word2.s.not_IP ||
350 work->word2.s.IP_exc ||
351 work->word2.s.L4_error ||
352 !work->word2.s.tcp_or_udp))
353 skb->ip_summed = CHECKSUM_NONE;
354 else
355 skb->ip_summed = CHECKSUM_UNNECESSARY;
356
357 /* Increment RX stats for virtual ports */
358 if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
359 priv->stats.rx_packets++;
360 priv->stats.rx_bytes += skb->len;
361 }
362 netif_receive_skb(skb);
363 } else {
364 /*
365 * Drop any packet received for a device that
366 * isn't up.
367 */
368 priv->stats.rx_dropped++;
369 dev_kfree_skb_irq(skb);
370 }
371 } else {
372 /*
373 * Drop any packet received for a device that
374 * doesn't exist.
375 */
376 printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
377 port);
378 dev_kfree_skb_irq(skb);
379 }
380 /*
381 * Check to see if the skbuff and work share the same
382 * packet buffer.
383 */
384 if (likely(packet_not_copied)) {
385 /*
386 * This buffer needs to be replaced, increment
387 * the number of buffers we need to free by
388 * one.
389 */
390 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
391 1);
392
393 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
394 } else {
395 cvm_oct_free_work(work);
396 }
397 }
398 /* Restore the original POW group mask */
399 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
400 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
401 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
402 } else {
403 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
404 }
405
406 if (USE_ASYNC_IOBDMA) {
407 /* Restore the scratch area */
408 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
409 }
410 cvm_oct_rx_refill_pool(0);
411
412 if (rx_count < budget && napi) {
413 /* No more work */
414 napi_complete(napi);
415 enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
416 }
417 return rx_count;
418}
419
420#ifdef CONFIG_NET_POLL_CONTROLLER
421/**
422 * cvm_oct_poll_controller - poll for receive packets
423 * device.
424 *
425 * @dev: Device to poll. Unused
426 */
427void cvm_oct_poll_controller(struct net_device *dev)
428{
429 cvm_oct_napi_poll(NULL, 16);
430}
431#endif
432
433void cvm_oct_rx_initialize(void)
434{
435 int i;
436 struct net_device *dev_for_napi = NULL;
437
438 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
439 if (cvm_oct_device[i]) {
440 dev_for_napi = cvm_oct_device[i];
441 break;
442 }
443 }
444
445 if (!dev_for_napi)
446 panic("No net_devices were allocated.");
447
448 netif_napi_add(dev_for_napi, &cvm_oct_napi, cvm_oct_napi_poll,
449 rx_napi_weight);
450 napi_enable(&cvm_oct_napi);
451
452 /* Register an IRQ handler to receive POW interrupts */
453 i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
454 cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
455
456 if (i)
457 panic("Could not acquire Ethernet IRQ %d\n",
458 OCTEON_IRQ_WORKQ0 + pow_receive_group);
459
460 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
461
462 /* Enable POW interrupt when our port has at least one packet */
463 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
464 union cvmx_sso_wq_int_thrx int_thr;
465 union cvmx_pow_wq_int_pc int_pc;
466
467 int_thr.u64 = 0;
468 int_thr.s.tc_en = 1;
469 int_thr.s.tc_thr = 1;
470 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group),
471 int_thr.u64);
472
473 int_pc.u64 = 0;
474 int_pc.s.pc_thr = 5;
475 cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
476 } else {
477 union cvmx_pow_wq_int_thrx int_thr;
478 union cvmx_pow_wq_int_pc int_pc;
479
480 int_thr.u64 = 0;
481 int_thr.s.tc_en = 1;
482 int_thr.s.tc_thr = 1;
483 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
484 int_thr.u64);
485
486 int_pc.u64 = 0;
487 int_pc.s.pc_thr = 5;
488 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
489 }
490
491 /* Schedule NAPI now. This will indirectly enable the interrupt. */
492 napi_schedule(&cvm_oct_napi);
493}
494
495void cvm_oct_rx_shutdown(void)
496{
497 netif_napi_del(&cvm_oct_napi);
498}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This file is based on code from OCTEON SDK by Cavium Networks.
4 *
5 * Copyright (c) 2003-2010 Cavium Networks
6 */
7
8#include <linux/module.h>
9#include <linux/kernel.h>
10#include <linux/cache.h>
11#include <linux/cpumask.h>
12#include <linux/netdevice.h>
13#include <linux/etherdevice.h>
14#include <linux/ip.h>
15#include <linux/string.h>
16#include <linux/prefetch.h>
17#include <linux/ratelimit.h>
18#include <linux/smp.h>
19#include <linux/interrupt.h>
20#include <net/dst.h>
21#ifdef CONFIG_XFRM
22#include <linux/xfrm.h>
23#include <net/xfrm.h>
24#endif /* CONFIG_XFRM */
25
26#include <asm/octeon/octeon.h>
27
28#include "ethernet-defines.h"
29#include "ethernet-mem.h"
30#include "ethernet-rx.h"
31#include "octeon-ethernet.h"
32#include "ethernet-util.h"
33
34#include <asm/octeon/cvmx-helper.h>
35#include <asm/octeon/cvmx-wqe.h>
36#include <asm/octeon/cvmx-fau.h>
37#include <asm/octeon/cvmx-pow.h>
38#include <asm/octeon/cvmx-pip.h>
39#include <asm/octeon/cvmx-scratch.h>
40
41#include <asm/octeon/cvmx-gmxx-defs.h>
42
43static atomic_t oct_rx_ready = ATOMIC_INIT(0);
44
45static struct oct_rx_group {
46 int irq;
47 int group;
48 struct napi_struct napi;
49} oct_rx_group[16];
50
51/**
52 * cvm_oct_do_interrupt - interrupt handler.
53 * @irq: Interrupt number.
54 * @napi_id: Cookie to identify the NAPI instance.
55 *
56 * The interrupt occurs whenever the POW has packets in our group.
57 *
58 */
59static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
60{
61 /* Disable the IRQ and start napi_poll. */
62 disable_irq_nosync(irq);
63 napi_schedule(napi_id);
64
65 return IRQ_HANDLED;
66}
67
68/**
69 * cvm_oct_check_rcv_error - process receive errors
70 * @work: Work queue entry pointing to the packet.
71 *
72 * Returns Non-zero if the packet can be dropped, zero otherwise.
73 */
74static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
75{
76 int port;
77
78 if (octeon_has_feature(OCTEON_FEATURE_PKND))
79 port = work->word0.pip.cn68xx.pknd;
80 else
81 port = work->word1.cn38xx.ipprt;
82
83 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
84 /*
85 * Ignore length errors on min size packets. Some
86 * equipment incorrectly pads packets to 64+4FCS
87 * instead of 60+4FCS. Note these packets still get
88 * counted as frame errors.
89 */
90 } else if (work->word2.snoip.err_code == 5 ||
91 work->word2.snoip.err_code == 7) {
92 /*
93 * We received a packet with either an alignment error
94 * or a FCS error. This may be signalling that we are
95 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
96 * off. If this is the case we need to parse the
97 * packet to determine if we can remove a non spec
98 * preamble and generate a correct packet.
99 */
100 int interface = cvmx_helper_get_interface_num(port);
101 int index = cvmx_helper_get_interface_index_num(port);
102 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
103
104 gmxx_rxx_frm_ctl.u64 =
105 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
106 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
107 u8 *ptr =
108 cvmx_phys_to_ptr(work->packet_ptr.s.addr);
109 int i = 0;
110
111 while (i < work->word1.len - 1) {
112 if (*ptr != 0x55)
113 break;
114 ptr++;
115 i++;
116 }
117
118 if (*ptr == 0xd5) {
119 /* Port received 0xd5 preamble */
120 work->packet_ptr.s.addr += i + 1;
121 work->word1.len -= i + 5;
122 } else if ((*ptr & 0xf) == 0xd) {
123 /* Port received 0xd preamble */
124 work->packet_ptr.s.addr += i;
125 work->word1.len -= i + 4;
126 for (i = 0; i < work->word1.len; i++) {
127 *ptr =
128 ((*ptr & 0xf0) >> 4) |
129 ((*(ptr + 1) & 0xf) << 4);
130 ptr++;
131 }
132 } else {
133 printk_ratelimited("Port %d unknown preamble, packet dropped\n",
134 port);
135 cvm_oct_free_work(work);
136 return 1;
137 }
138 }
139 } else {
140 printk_ratelimited("Port %d receive error code %d, packet dropped\n",
141 port, work->word2.snoip.err_code);
142 cvm_oct_free_work(work);
143 return 1;
144 }
145
146 return 0;
147}
148
149static void copy_segments_to_skb(cvmx_wqe_t *work, struct sk_buff *skb)
150{
151 int segments = work->word2.s.bufs;
152 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
153 int len = work->word1.len;
154 int segment_size;
155
156 while (segments--) {
157 union cvmx_buf_ptr next_ptr;
158
159 next_ptr = *(union cvmx_buf_ptr *)
160 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
161
162 /*
163 * Octeon Errata PKI-100: The segment size is wrong.
164 *
165 * Until it is fixed, calculate the segment size based on
166 * the packet pool buffer size.
167 * When it is fixed, the following line should be replaced
168 * with this one:
169 * int segment_size = segment_ptr.s.size;
170 */
171 segment_size =
172 CVMX_FPA_PACKET_POOL_SIZE -
173 (segment_ptr.s.addr -
174 (((segment_ptr.s.addr >> 7) -
175 segment_ptr.s.back) << 7));
176
177 /* Don't copy more than what is left in the packet */
178 if (segment_size > len)
179 segment_size = len;
180
181 /* Copy the data into the packet */
182 skb_put_data(skb, cvmx_phys_to_ptr(segment_ptr.s.addr),
183 segment_size);
184 len -= segment_size;
185 segment_ptr = next_ptr;
186 }
187}
188
189static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
190{
191 const int coreid = cvmx_get_core_num();
192 u64 old_group_mask;
193 u64 old_scratch;
194 int rx_count = 0;
195 int did_work_request = 0;
196 int packet_not_copied;
197
198 /* Prefetch cvm_oct_device since we know we need it soon */
199 prefetch(cvm_oct_device);
200
201 if (USE_ASYNC_IOBDMA) {
202 /* Save scratch in case userspace is using it */
203 CVMX_SYNCIOBDMA;
204 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
205 }
206
207 /* Only allow work for our group (and preserve priorities) */
208 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
209 old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
210 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
211 BIT(rx_group->group));
212 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
213 } else {
214 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
215 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
216 (old_group_mask & ~0xFFFFull) |
217 BIT(rx_group->group));
218 }
219
220 if (USE_ASYNC_IOBDMA) {
221 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
222 did_work_request = 1;
223 }
224
225 while (rx_count < budget) {
226 struct sk_buff *skb = NULL;
227 struct sk_buff **pskb = NULL;
228 int skb_in_hw;
229 cvmx_wqe_t *work;
230 int port;
231
232 if (USE_ASYNC_IOBDMA && did_work_request)
233 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
234 else
235 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
236
237 prefetch(work);
238 did_work_request = 0;
239 if (!work) {
240 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
241 cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
242 BIT(rx_group->group));
243 cvmx_write_csr(CVMX_SSO_WQ_INT,
244 BIT(rx_group->group));
245 } else {
246 union cvmx_pow_wq_int wq_int;
247
248 wq_int.u64 = 0;
249 wq_int.s.iq_dis = BIT(rx_group->group);
250 wq_int.s.wq_int = BIT(rx_group->group);
251 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
252 }
253 break;
254 }
255 pskb = (struct sk_buff **)
256 (cvm_oct_get_buffer_ptr(work->packet_ptr) -
257 sizeof(void *));
258 prefetch(pskb);
259
260 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
261 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
262 CVMX_POW_NO_WAIT);
263 did_work_request = 1;
264 }
265 rx_count++;
266
267 skb_in_hw = work->word2.s.bufs == 1;
268 if (likely(skb_in_hw)) {
269 skb = *pskb;
270 prefetch(&skb->head);
271 prefetch(&skb->len);
272 }
273
274 if (octeon_has_feature(OCTEON_FEATURE_PKND))
275 port = work->word0.pip.cn68xx.pknd;
276 else
277 port = work->word1.cn38xx.ipprt;
278
279 prefetch(cvm_oct_device[port]);
280
281 /* Immediately throw away all packets with receive errors */
282 if (unlikely(work->word2.snoip.rcv_error)) {
283 if (cvm_oct_check_rcv_error(work))
284 continue;
285 }
286
287 /*
288 * We can only use the zero copy path if skbuffs are
289 * in the FPA pool and the packet fits in a single
290 * buffer.
291 */
292 if (likely(skb_in_hw)) {
293 skb->data = skb->head + work->packet_ptr.s.addr -
294 cvmx_ptr_to_phys(skb->head);
295 prefetch(skb->data);
296 skb->len = work->word1.len;
297 skb_set_tail_pointer(skb, skb->len);
298 packet_not_copied = 1;
299 } else {
300 /*
301 * We have to copy the packet. First allocate
302 * an skbuff for it.
303 */
304 skb = dev_alloc_skb(work->word1.len);
305 if (!skb) {
306 cvm_oct_free_work(work);
307 continue;
308 }
309
310 /*
311 * Check if we've received a packet that was
312 * entirely stored in the work entry.
313 */
314 if (unlikely(work->word2.s.bufs == 0)) {
315 u8 *ptr = work->packet_data;
316
317 if (likely(!work->word2.s.not_IP)) {
318 /*
319 * The beginning of the packet
320 * moves for IP packets.
321 */
322 if (work->word2.s.is_v6)
323 ptr += 2;
324 else
325 ptr += 6;
326 }
327 skb_put_data(skb, ptr, work->word1.len);
328 /* No packet buffers to free */
329 } else {
330 copy_segments_to_skb(work, skb);
331 }
332 packet_not_copied = 0;
333 }
334 if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
335 cvm_oct_device[port])) {
336 struct net_device *dev = cvm_oct_device[port];
337
338 /*
339 * Only accept packets for devices that are
340 * currently up.
341 */
342 if (likely(dev->flags & IFF_UP)) {
343 skb->protocol = eth_type_trans(skb, dev);
344 skb->dev = dev;
345
346 if (unlikely(work->word2.s.not_IP ||
347 work->word2.s.IP_exc ||
348 work->word2.s.L4_error ||
349 !work->word2.s.tcp_or_udp))
350 skb->ip_summed = CHECKSUM_NONE;
351 else
352 skb->ip_summed = CHECKSUM_UNNECESSARY;
353
354 /* Increment RX stats for virtual ports */
355 if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
356 dev->stats.rx_packets++;
357 dev->stats.rx_bytes += skb->len;
358 }
359 netif_receive_skb(skb);
360 } else {
361 /*
362 * Drop any packet received for a device that
363 * isn't up.
364 */
365 dev->stats.rx_dropped++;
366 dev_kfree_skb_irq(skb);
367 }
368 } else {
369 /*
370 * Drop any packet received for a device that
371 * doesn't exist.
372 */
373 printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
374 port);
375 dev_kfree_skb_irq(skb);
376 }
377 /*
378 * Check to see if the skbuff and work share the same
379 * packet buffer.
380 */
381 if (likely(packet_not_copied)) {
382 /*
383 * This buffer needs to be replaced, increment
384 * the number of buffers we need to free by
385 * one.
386 */
387 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
388 1);
389
390 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
391 } else {
392 cvm_oct_free_work(work);
393 }
394 }
395 /* Restore the original POW group mask */
396 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
397 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
398 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
399 } else {
400 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
401 }
402
403 if (USE_ASYNC_IOBDMA) {
404 /* Restore the scratch area */
405 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
406 }
407 cvm_oct_rx_refill_pool(0);
408
409 return rx_count;
410}
411
412/**
413 * cvm_oct_napi_poll - the NAPI poll function.
414 * @napi: The NAPI instance.
415 * @budget: Maximum number of packets to receive.
416 *
417 * Returns the number of packets processed.
418 */
419static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
420{
421 struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group,
422 napi);
423 int rx_count;
424
425 rx_count = cvm_oct_poll(rx_group, budget);
426
427 if (rx_count < budget) {
428 /* No more work */
429 napi_complete_done(napi, rx_count);
430 enable_irq(rx_group->irq);
431 }
432 return rx_count;
433}
434
435#ifdef CONFIG_NET_POLL_CONTROLLER
436/**
437 * cvm_oct_poll_controller - poll for receive packets
438 * device.
439 *
440 * @dev: Device to poll. Unused
441 */
442void cvm_oct_poll_controller(struct net_device *dev)
443{
444 int i;
445
446 if (!atomic_read(&oct_rx_ready))
447 return;
448
449 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
450 if (!(pow_receive_groups & BIT(i)))
451 continue;
452
453 cvm_oct_poll(&oct_rx_group[i], 16);
454 }
455}
456#endif
457
458void cvm_oct_rx_initialize(void)
459{
460 int i;
461 struct net_device *dev_for_napi = NULL;
462
463 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
464 if (cvm_oct_device[i]) {
465 dev_for_napi = cvm_oct_device[i];
466 break;
467 }
468 }
469
470 if (!dev_for_napi)
471 panic("No net_devices were allocated.");
472
473 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
474 int ret;
475
476 if (!(pow_receive_groups & BIT(i)))
477 continue;
478
479 netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
480 cvm_oct_napi_poll, rx_napi_weight);
481 napi_enable(&oct_rx_group[i].napi);
482
483 oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
484 oct_rx_group[i].group = i;
485
486 /* Register an IRQ handler to receive POW interrupts */
487 ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
488 "Ethernet", &oct_rx_group[i].napi);
489 if (ret)
490 panic("Could not acquire Ethernet IRQ %d\n",
491 oct_rx_group[i].irq);
492
493 disable_irq_nosync(oct_rx_group[i].irq);
494
495 /* Enable POW interrupt when our port has at least one packet */
496 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
497 union cvmx_sso_wq_int_thrx int_thr;
498 union cvmx_pow_wq_int_pc int_pc;
499
500 int_thr.u64 = 0;
501 int_thr.s.tc_en = 1;
502 int_thr.s.tc_thr = 1;
503 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
504
505 int_pc.u64 = 0;
506 int_pc.s.pc_thr = 5;
507 cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
508 } else {
509 union cvmx_pow_wq_int_thrx int_thr;
510 union cvmx_pow_wq_int_pc int_pc;
511
512 int_thr.u64 = 0;
513 int_thr.s.tc_en = 1;
514 int_thr.s.tc_thr = 1;
515 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
516
517 int_pc.u64 = 0;
518 int_pc.s.pc_thr = 5;
519 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
520 }
521
522 /* Schedule NAPI now. This will indirectly enable the
523 * interrupt.
524 */
525 napi_schedule(&oct_rx_group[i].napi);
526 }
527 atomic_inc(&oct_rx_ready);
528}
529
530void cvm_oct_rx_shutdown(void)
531{
532 int i;
533
534 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
535 if (!(pow_receive_groups & BIT(i)))
536 continue;
537
538 /* Disable POW interrupt */
539 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
540 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
541 else
542 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
543
544 /* Free the interrupt handler */
545 free_irq(oct_rx_group[i].irq, cvm_oct_device);
546
547 netif_napi_del(&oct_rx_group[i].napi);
548 }
549}