Loading...
1// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
3 * Parts of this driver are based on the following:
4 * - Kvaser linux pciefd driver (version 5.42)
5 * - PEAK linux canfd driver
6 */
7
8#include <linux/bitfield.h>
9#include <linux/can/dev.h>
10#include <linux/device.h>
11#include <linux/ethtool.h>
12#include <linux/iopoll.h>
13#include <linux/kernel.h>
14#include <linux/minmax.h>
15#include <linux/module.h>
16#include <linux/netdevice.h>
17#include <linux/pci.h>
18#include <linux/timer.h>
19
20MODULE_LICENSE("Dual BSD/GPL");
21MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
22MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
23
24#define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd"
25
26#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
27#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
28#define KVASER_PCIEFD_MAX_ERR_REP 256U
29#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
30#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
31#define KVASER_PCIEFD_DMA_COUNT 2U
32#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
33
34#define KVASER_PCIEFD_VENDOR 0x1a07
35
36/* Altera based devices */
37#define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d
38#define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e
39#define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f
40#define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010
41#define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011
42
43/* SmartFusion2 based devices */
44#define KVASER_PCIEFD_2CAN_V3_DEVICE_ID 0x0012
45#define KVASER_PCIEFD_1CAN_V3_DEVICE_ID 0x0013
46#define KVASER_PCIEFD_4CAN_V2_DEVICE_ID 0x0014
47#define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015
48#define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016
49
50/* Xilinx based devices */
51#define KVASER_PCIEFD_M2_4CAN_DEVICE_ID 0x0017
52#define KVASER_PCIEFD_8CAN_DEVICE_ID 0x0019
53
54/* Altera SerDes Enable 64-bit DMA address translation */
55#define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0)
56
57/* SmartFusion2 SerDes LSB address translation mask */
58#define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12)
59
60/* Xilinx SerDes LSB address translation mask */
61#define KVASER_PCIEFD_XILINX_DMA_LSB_MASK GENMASK(31, 12)
62
63/* Kvaser KCAN CAN controller registers */
64#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
65#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
66#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
67#define KVASER_PCIEFD_KCAN_CMD_REG 0x400
68#define KVASER_PCIEFD_KCAN_IEN_REG 0x408
69#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
70#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414
71#define KVASER_PCIEFD_KCAN_STAT_REG 0x418
72#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
73#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
74#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
75#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
76#define KVASER_PCIEFD_KCAN_PWM_REG 0x430
77/* System identification and information registers */
78#define KVASER_PCIEFD_SYSID_VERSION_REG 0x8
79#define KVASER_PCIEFD_SYSID_CANFREQ_REG 0xc
80#define KVASER_PCIEFD_SYSID_BUSFREQ_REG 0x10
81#define KVASER_PCIEFD_SYSID_BUILD_REG 0x14
82/* Shared receive buffer FIFO registers */
83#define KVASER_PCIEFD_SRB_FIFO_LAST_REG 0x1f4
84/* Shared receive buffer registers */
85#define KVASER_PCIEFD_SRB_CMD_REG 0x0
86#define KVASER_PCIEFD_SRB_IEN_REG 0x04
87#define KVASER_PCIEFD_SRB_IRQ_REG 0x0c
88#define KVASER_PCIEFD_SRB_STAT_REG 0x10
89#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG 0x14
90#define KVASER_PCIEFD_SRB_CTRL_REG 0x18
91
92/* System build information fields */
93#define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24)
94#define KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK GENMASK(23, 16)
95#define KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK GENMASK(7, 0)
96#define KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK GENMASK(15, 1)
97
98/* Reset DMA buffer 0, 1 and FIFO offset */
99#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
100#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
101#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
102
103/* DMA underflow, buffer 0 and 1 */
104#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
105#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
106/* DMA overflow, buffer 0 and 1 */
107#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
108#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
109/* DMA packet done, buffer 0 and 1 */
110#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
111#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
112
113/* Got DMA support */
114#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
115/* DMA idle */
116#define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
117
118/* SRB current packet level */
119#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK GENMASK(7, 0)
120
121/* DMA Enable */
122#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
123
124/* KCAN CTRL packet types */
125#define KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK GENMASK(31, 29)
126#define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH 0x4
127#define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFRAME 0x5
128
129/* Command sequence number */
130#define KVASER_PCIEFD_KCAN_CMD_SEQ_MASK GENMASK(23, 16)
131/* Command bits */
132#define KVASER_PCIEFD_KCAN_CMD_MASK GENMASK(5, 0)
133/* Abort, flush and reset */
134#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
135/* Request status packet */
136#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
137
138/* Transmitter unaligned */
139#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
140/* Tx FIFO empty */
141#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
142/* Tx FIFO overflow */
143#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
144/* Tx buffer flush done */
145#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
146/* Abort done */
147#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
148/* Rx FIFO overflow */
149#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
150/* FDF bit when controller is in classic CAN mode */
151#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
152/* Bus parameter protection error */
153#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
154/* Tx FIFO unaligned end */
155#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
156/* Tx FIFO unaligned read */
157#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
158
159/* Tx FIFO size */
160#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK GENMASK(23, 16)
161/* Tx FIFO current packet level */
162#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK GENMASK(7, 0)
163
164/* Current status packet sequence number */
165#define KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK GENMASK(31, 24)
166/* Controller got CAN FD capability */
167#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
168/* Controller got one-shot capability */
169#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
170/* Controller in reset mode */
171#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
172/* Reset mode request */
173#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
174/* Bus off */
175#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
176/* Idle state. Controller in reset mode and no abort or flush pending */
177#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
178/* Abort request */
179#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
180/* Controller is bus off */
181#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK \
182 (KVASER_PCIEFD_KCAN_STAT_AR | KVASER_PCIEFD_KCAN_STAT_BOFF | \
183 KVASER_PCIEFD_KCAN_STAT_RMR | KVASER_PCIEFD_KCAN_STAT_IRM)
184
185/* Classic CAN mode */
186#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
187/* Active error flag enable. Clear to force error passive */
188#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
189/* Acknowledgment packet type */
190#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
191/* CAN FD non-ISO */
192#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
193/* Error packet enable */
194#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
195/* Listen only mode */
196#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
197/* Reset mode */
198#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
199
200/* BTRN and BTRD fields */
201#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK GENMASK(30, 26)
202#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK GENMASK(25, 17)
203#define KVASER_PCIEFD_KCAN_BTRN_SJW_MASK GENMASK(16, 13)
204#define KVASER_PCIEFD_KCAN_BTRN_BRP_MASK GENMASK(12, 0)
205
206/* PWM Control fields */
207#define KVASER_PCIEFD_KCAN_PWM_TOP_MASK GENMASK(23, 16)
208#define KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK GENMASK(7, 0)
209
210/* KCAN packet type IDs */
211#define KVASER_PCIEFD_PACK_TYPE_DATA 0x0
212#define KVASER_PCIEFD_PACK_TYPE_ACK 0x1
213#define KVASER_PCIEFD_PACK_TYPE_TXRQ 0x2
214#define KVASER_PCIEFD_PACK_TYPE_ERROR 0x3
215#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 0x4
216#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 0x5
217#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 0x6
218#define KVASER_PCIEFD_PACK_TYPE_STATUS 0x8
219#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 0x9
220
221/* Common KCAN packet definitions, second word */
222#define KVASER_PCIEFD_PACKET_TYPE_MASK GENMASK(31, 28)
223#define KVASER_PCIEFD_PACKET_CHID_MASK GENMASK(27, 25)
224#define KVASER_PCIEFD_PACKET_SEQ_MASK GENMASK(7, 0)
225
226/* KCAN Transmit/Receive data packet, first word */
227#define KVASER_PCIEFD_RPACKET_IDE BIT(30)
228#define KVASER_PCIEFD_RPACKET_RTR BIT(29)
229#define KVASER_PCIEFD_RPACKET_ID_MASK GENMASK(28, 0)
230/* KCAN Transmit data packet, second word */
231#define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
232#define KVASER_PCIEFD_TPACKET_SMS BIT(16)
233/* KCAN Transmit/Receive data packet, second word */
234#define KVASER_PCIEFD_RPACKET_FDF BIT(15)
235#define KVASER_PCIEFD_RPACKET_BRS BIT(14)
236#define KVASER_PCIEFD_RPACKET_ESI BIT(13)
237#define KVASER_PCIEFD_RPACKET_DLC_MASK GENMASK(11, 8)
238
239/* KCAN Transmit acknowledge packet, first word */
240#define KVASER_PCIEFD_APACKET_NACK BIT(11)
241#define KVASER_PCIEFD_APACKET_ABL BIT(10)
242#define KVASER_PCIEFD_APACKET_CT BIT(9)
243#define KVASER_PCIEFD_APACKET_FLU BIT(8)
244
245/* KCAN Status packet, first word */
246#define KVASER_PCIEFD_SPACK_RMCD BIT(22)
247#define KVASER_PCIEFD_SPACK_IRM BIT(21)
248#define KVASER_PCIEFD_SPACK_IDET BIT(20)
249#define KVASER_PCIEFD_SPACK_BOFF BIT(16)
250#define KVASER_PCIEFD_SPACK_RXERR_MASK GENMASK(15, 8)
251#define KVASER_PCIEFD_SPACK_TXERR_MASK GENMASK(7, 0)
252/* KCAN Status packet, second word */
253#define KVASER_PCIEFD_SPACK_EPLR BIT(24)
254#define KVASER_PCIEFD_SPACK_EWLR BIT(23)
255#define KVASER_PCIEFD_SPACK_AUTO BIT(21)
256
257/* KCAN Error detected packet, second word */
258#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
259
260/* Macros for calculating addresses of registers */
261#define KVASER_PCIEFD_GET_BLOCK_ADDR(pcie, block) \
262 ((pcie)->reg_base + (pcie)->driver_data->address_offset->block)
263#define KVASER_PCIEFD_PCI_IEN_ADDR(pcie) \
264 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_ien))
265#define KVASER_PCIEFD_PCI_IRQ_ADDR(pcie) \
266 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_irq))
267#define KVASER_PCIEFD_SERDES_ADDR(pcie) \
268 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), serdes))
269#define KVASER_PCIEFD_SYSID_ADDR(pcie) \
270 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), sysid))
271#define KVASER_PCIEFD_LOOPBACK_ADDR(pcie) \
272 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), loopback))
273#define KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) \
274 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb_fifo))
275#define KVASER_PCIEFD_SRB_ADDR(pcie) \
276 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb))
277#define KVASER_PCIEFD_KCAN_CH0_ADDR(pcie) \
278 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch0))
279#define KVASER_PCIEFD_KCAN_CH1_ADDR(pcie) \
280 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch1))
281#define KVASER_PCIEFD_KCAN_CHANNEL_SPAN(pcie) \
282 (KVASER_PCIEFD_KCAN_CH1_ADDR((pcie)) - KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)))
283#define KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i) \
284 (KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)) + (i) * KVASER_PCIEFD_KCAN_CHANNEL_SPAN((pcie)))
285
286struct kvaser_pciefd;
287static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
288 dma_addr_t addr, int index);
289static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
290 dma_addr_t addr, int index);
291static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
292 dma_addr_t addr, int index);
293
294struct kvaser_pciefd_address_offset {
295 u32 serdes;
296 u32 pci_ien;
297 u32 pci_irq;
298 u32 sysid;
299 u32 loopback;
300 u32 kcan_srb_fifo;
301 u32 kcan_srb;
302 u32 kcan_ch0;
303 u32 kcan_ch1;
304};
305
306struct kvaser_pciefd_dev_ops {
307 void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie,
308 dma_addr_t addr, int index);
309};
310
311struct kvaser_pciefd_irq_mask {
312 u32 kcan_rx0;
313 u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS];
314 u32 all;
315};
316
317struct kvaser_pciefd_driver_data {
318 const struct kvaser_pciefd_address_offset *address_offset;
319 const struct kvaser_pciefd_irq_mask *irq_mask;
320 const struct kvaser_pciefd_dev_ops *ops;
321};
322
323static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = {
324 .serdes = 0x1000,
325 .pci_ien = 0x50,
326 .pci_irq = 0x40,
327 .sysid = 0x1f020,
328 .loopback = 0x1f000,
329 .kcan_srb_fifo = 0x1f200,
330 .kcan_srb = 0x1f400,
331 .kcan_ch0 = 0x10000,
332 .kcan_ch1 = 0x11000,
333};
334
335static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offset = {
336 .serdes = 0x280c8,
337 .pci_ien = 0x102004,
338 .pci_irq = 0x102008,
339 .sysid = 0x100000,
340 .loopback = 0x103000,
341 .kcan_srb_fifo = 0x120000,
342 .kcan_srb = 0x121000,
343 .kcan_ch0 = 0x140000,
344 .kcan_ch1 = 0x142000,
345};
346
347static const struct kvaser_pciefd_address_offset kvaser_pciefd_xilinx_address_offset = {
348 .serdes = 0x00208,
349 .pci_ien = 0x102004,
350 .pci_irq = 0x102008,
351 .sysid = 0x100000,
352 .loopback = 0x103000,
353 .kcan_srb_fifo = 0x120000,
354 .kcan_srb = 0x121000,
355 .kcan_ch0 = 0x140000,
356 .kcan_ch1 = 0x142000,
357};
358
359static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = {
360 .kcan_rx0 = BIT(4),
361 .kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) },
362 .all = GENMASK(4, 0),
363};
364
365static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = {
366 .kcan_rx0 = BIT(4),
367 .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) },
368 .all = GENMASK(19, 16) | BIT(4),
369};
370
371static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask = {
372 .kcan_rx0 = BIT(4),
373 .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19), BIT(20), BIT(21), BIT(22), BIT(23) },
374 .all = GENMASK(23, 16) | BIT(4),
375};
376
377static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = {
378 .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera,
379};
380
381static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = {
382 .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2,
383};
384
385static const struct kvaser_pciefd_dev_ops kvaser_pciefd_xilinx_dev_ops = {
386 .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_xilinx,
387};
388
389static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = {
390 .address_offset = &kvaser_pciefd_altera_address_offset,
391 .irq_mask = &kvaser_pciefd_altera_irq_mask,
392 .ops = &kvaser_pciefd_altera_dev_ops,
393};
394
395static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = {
396 .address_offset = &kvaser_pciefd_sf2_address_offset,
397 .irq_mask = &kvaser_pciefd_sf2_irq_mask,
398 .ops = &kvaser_pciefd_sf2_dev_ops,
399};
400
401static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data = {
402 .address_offset = &kvaser_pciefd_xilinx_address_offset,
403 .irq_mask = &kvaser_pciefd_xilinx_irq_mask,
404 .ops = &kvaser_pciefd_xilinx_dev_ops,
405};
406
407struct kvaser_pciefd_can {
408 struct can_priv can;
409 struct kvaser_pciefd *kv_pcie;
410 void __iomem *reg_base;
411 struct can_berr_counter bec;
412 u8 cmd_seq;
413 int err_rep_cnt;
414 int echo_idx;
415 spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
416 spinlock_t echo_lock; /* Locks the message echo buffer */
417 struct timer_list bec_poll_timer;
418 struct completion start_comp, flush_comp;
419};
420
421struct kvaser_pciefd {
422 struct pci_dev *pci;
423 void __iomem *reg_base;
424 struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
425 const struct kvaser_pciefd_driver_data *driver_data;
426 void *dma_data[KVASER_PCIEFD_DMA_COUNT];
427 u8 nr_channels;
428 u32 bus_freq;
429 u32 freq;
430 u32 freq_to_ticks_div;
431};
432
433struct kvaser_pciefd_rx_packet {
434 u32 header[2];
435 u64 timestamp;
436};
437
438struct kvaser_pciefd_tx_packet {
439 u32 header[2];
440 u8 data[64];
441};
442
443static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
444 .name = KVASER_PCIEFD_DRV_NAME,
445 .tseg1_min = 1,
446 .tseg1_max = 512,
447 .tseg2_min = 1,
448 .tseg2_max = 32,
449 .sjw_max = 16,
450 .brp_min = 1,
451 .brp_max = 8192,
452 .brp_inc = 1,
453};
454
455static struct pci_device_id kvaser_pciefd_id_table[] = {
456 {
457 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_DEVICE_ID),
458 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
459 },
460 {
461 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_V2_DEVICE_ID),
462 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
463 },
464 {
465 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_V2_DEVICE_ID),
466 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
467 },
468 {
469 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID),
470 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
471 },
472 {
473 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID),
474 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
475 },
476 {
477 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2CAN_V3_DEVICE_ID),
478 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
479 },
480 {
481 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_1CAN_V3_DEVICE_ID),
482 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
483 },
484 {
485 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4CAN_V2_DEVICE_ID),
486 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
487 },
488 {
489 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID),
490 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
491 },
492 {
493 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID),
494 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
495 },
496 {
497 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_M2_4CAN_DEVICE_ID),
498 .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data,
499 },
500 {
501 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_8CAN_DEVICE_ID),
502 .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data,
503 },
504 {
505 0,
506 },
507};
508MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
509
510static inline void kvaser_pciefd_send_kcan_cmd(struct kvaser_pciefd_can *can, u32 cmd)
511{
512 iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_MASK, cmd) |
513 FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_SEQ_MASK, ++can->cmd_seq),
514 can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
515}
516
517static inline void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
518{
519 kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_SRQ);
520}
521
522static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can *can)
523{
524 kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_AT);
525}
526
527static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
528{
529 u32 mode;
530 unsigned long irq;
531
532 spin_lock_irqsave(&can->lock, irq);
533 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
534 if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) {
535 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
536 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
537 }
538 spin_unlock_irqrestore(&can->lock, irq);
539}
540
541static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
542{
543 u32 mode;
544 unsigned long irq;
545
546 spin_lock_irqsave(&can->lock, irq);
547 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
548 mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN;
549 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
550 spin_unlock_irqrestore(&can->lock, irq);
551}
552
553static inline void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
554{
555 u32 msk;
556
557 msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF |
558 KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
559 KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
560 KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
561 KVASER_PCIEFD_KCAN_IRQ_TAR;
562
563 iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
564}
565
566static inline void kvaser_pciefd_set_skb_timestamp(const struct kvaser_pciefd *pcie,
567 struct sk_buff *skb, u64 timestamp)
568{
569 skb_hwtstamps(skb)->hwtstamp =
570 ns_to_ktime(div_u64(timestamp * 1000, pcie->freq_to_ticks_div));
571}
572
573static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
574{
575 u32 mode;
576 unsigned long irq;
577
578 spin_lock_irqsave(&can->lock, irq);
579 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
580 if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
581 mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
582 if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
583 mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN;
584 else
585 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
586 } else {
587 mode |= KVASER_PCIEFD_KCAN_MODE_CCM;
588 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
589 }
590
591 if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
592 mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
593 else
594 mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM;
595 mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
596 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
597 /* Use ACK packet type */
598 mode &= ~KVASER_PCIEFD_KCAN_MODE_APT;
599 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
600 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
601
602 spin_unlock_irqrestore(&can->lock, irq);
603}
604
605static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
606{
607 u32 status;
608 unsigned long irq;
609
610 spin_lock_irqsave(&can->lock, irq);
611 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
612 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
613 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
614 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
615 if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
616 /* If controller is already idle, run abort, flush and reset */
617 kvaser_pciefd_abort_flush_reset(can);
618 } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
619 u32 mode;
620
621 /* Put controller in reset mode */
622 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
623 mode |= KVASER_PCIEFD_KCAN_MODE_RM;
624 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
625 }
626 spin_unlock_irqrestore(&can->lock, irq);
627}
628
629static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
630{
631 u32 mode;
632 unsigned long irq;
633
634 del_timer(&can->bec_poll_timer);
635 if (!completion_done(&can->flush_comp))
636 kvaser_pciefd_start_controller_flush(can);
637
638 if (!wait_for_completion_timeout(&can->flush_comp,
639 KVASER_PCIEFD_WAIT_TIMEOUT)) {
640 netdev_err(can->can.dev, "Timeout during bus on flush\n");
641 return -ETIMEDOUT;
642 }
643
644 spin_lock_irqsave(&can->lock, irq);
645 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
646 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
647 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
648 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
649 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
650 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
651 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
652 spin_unlock_irqrestore(&can->lock, irq);
653
654 if (!wait_for_completion_timeout(&can->start_comp,
655 KVASER_PCIEFD_WAIT_TIMEOUT)) {
656 netdev_err(can->can.dev, "Timeout during bus on reset\n");
657 return -ETIMEDOUT;
658 }
659 /* Reset interrupt handling */
660 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
661 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
662
663 kvaser_pciefd_set_tx_irq(can);
664 kvaser_pciefd_setup_controller(can);
665 can->can.state = CAN_STATE_ERROR_ACTIVE;
666 netif_wake_queue(can->can.dev);
667 can->bec.txerr = 0;
668 can->bec.rxerr = 0;
669 can->err_rep_cnt = 0;
670
671 return 0;
672}
673
674static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
675{
676 u8 top;
677 u32 pwm_ctrl;
678 unsigned long irq;
679
680 spin_lock_irqsave(&can->lock, irq);
681 pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
682 top = FIELD_GET(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, pwm_ctrl);
683 /* Set duty cycle to zero */
684 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top);
685 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
686 spin_unlock_irqrestore(&can->lock, irq);
687}
688
689static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
690{
691 int top, trigger;
692 u32 pwm_ctrl;
693 unsigned long irq;
694
695 kvaser_pciefd_pwm_stop(can);
696 spin_lock_irqsave(&can->lock, irq);
697 /* Set frequency to 500 KHz */
698 top = can->kv_pcie->bus_freq / (2 * 500000) - 1;
699
700 pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top);
701 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top);
702 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
703
704 /* Set duty cycle to 95 */
705 trigger = (100 * top - 95 * (top + 1) + 50) / 100;
706 pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, trigger);
707 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top);
708 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
709 spin_unlock_irqrestore(&can->lock, irq);
710}
711
712static int kvaser_pciefd_open(struct net_device *netdev)
713{
714 int ret;
715 struct kvaser_pciefd_can *can = netdev_priv(netdev);
716
717 ret = open_candev(netdev);
718 if (ret)
719 return ret;
720
721 ret = kvaser_pciefd_bus_on(can);
722 if (ret) {
723 close_candev(netdev);
724 return ret;
725 }
726
727 return 0;
728}
729
730static int kvaser_pciefd_stop(struct net_device *netdev)
731{
732 struct kvaser_pciefd_can *can = netdev_priv(netdev);
733 int ret = 0;
734
735 /* Don't interrupt ongoing flush */
736 if (!completion_done(&can->flush_comp))
737 kvaser_pciefd_start_controller_flush(can);
738
739 if (!wait_for_completion_timeout(&can->flush_comp,
740 KVASER_PCIEFD_WAIT_TIMEOUT)) {
741 netdev_err(can->can.dev, "Timeout during stop\n");
742 ret = -ETIMEDOUT;
743 } else {
744 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
745 del_timer(&can->bec_poll_timer);
746 }
747 can->can.state = CAN_STATE_STOPPED;
748 close_candev(netdev);
749
750 return ret;
751}
752
753static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
754 struct kvaser_pciefd_can *can,
755 struct sk_buff *skb)
756{
757 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
758 int packet_size;
759 int seq = can->echo_idx;
760
761 memset(p, 0, sizeof(*p));
762 if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
763 p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
764
765 if (cf->can_id & CAN_RTR_FLAG)
766 p->header[0] |= KVASER_PCIEFD_RPACKET_RTR;
767
768 if (cf->can_id & CAN_EFF_FLAG)
769 p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
770
771 p->header[0] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_ID_MASK, cf->can_id);
772 p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
773
774 if (can_is_canfd_skb(skb)) {
775 p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
776 can_fd_len2dlc(cf->len));
777 p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
778 if (cf->flags & CANFD_BRS)
779 p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
780 if (cf->flags & CANFD_ESI)
781 p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
782 } else {
783 p->header[1] |=
784 FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
785 can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode));
786 }
787
788 p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq);
789
790 packet_size = cf->len;
791 memcpy(p->data, cf->data, packet_size);
792
793 return DIV_ROUND_UP(packet_size, 4);
794}
795
796static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
797 struct net_device *netdev)
798{
799 struct kvaser_pciefd_can *can = netdev_priv(netdev);
800 unsigned long irq_flags;
801 struct kvaser_pciefd_tx_packet packet;
802 int nr_words;
803 u8 count;
804
805 if (can_dev_dropped_skb(netdev, skb))
806 return NETDEV_TX_OK;
807
808 nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
809
810 spin_lock_irqsave(&can->echo_lock, irq_flags);
811 /* Prepare and save echo skb in internal slot */
812 can_put_echo_skb(skb, netdev, can->echo_idx, 0);
813
814 /* Move echo index to the next slot */
815 can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
816
817 /* Write header to fifo */
818 iowrite32(packet.header[0],
819 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
820 iowrite32(packet.header[1],
821 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
822
823 if (nr_words) {
824 u32 data_last = ((u32 *)packet.data)[nr_words - 1];
825
826 /* Write data to fifo, except last word */
827 iowrite32_rep(can->reg_base +
828 KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
829 nr_words - 1);
830 /* Write last word to end of fifo */
831 __raw_writel(data_last, can->reg_base +
832 KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
833 } else {
834 /* Complete write to fifo */
835 __raw_writel(0, can->reg_base +
836 KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
837 }
838
839 count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
840 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
841 /* No room for a new message, stop the queue until at least one
842 * successful transmit
843 */
844 if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx])
845 netif_stop_queue(netdev);
846 spin_unlock_irqrestore(&can->echo_lock, irq_flags);
847
848 return NETDEV_TX_OK;
849}
850
851static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
852{
853 u32 mode, test, btrn;
854 unsigned long irq_flags;
855 int ret;
856 struct can_bittiming *bt;
857
858 if (data)
859 bt = &can->can.data_bittiming;
860 else
861 bt = &can->can.bittiming;
862
863 btrn = FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK, bt->phase_seg2 - 1) |
864 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK, bt->prop_seg + bt->phase_seg1 - 1) |
865 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_SJW_MASK, bt->sjw - 1) |
866 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_BRP_MASK, bt->brp - 1);
867
868 spin_lock_irqsave(&can->lock, irq_flags);
869 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
870 /* Put the circuit in reset mode */
871 iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM,
872 can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
873
874 /* Can only set bittiming if in reset mode */
875 ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
876 test, test & KVASER_PCIEFD_KCAN_MODE_RM, 0, 10);
877 if (ret) {
878 spin_unlock_irqrestore(&can->lock, irq_flags);
879 return -EBUSY;
880 }
881
882 if (data)
883 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
884 else
885 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
886 /* Restore previous reset mode status */
887 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
888 spin_unlock_irqrestore(&can->lock, irq_flags);
889
890 return 0;
891}
892
893static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev)
894{
895 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false);
896}
897
898static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev)
899{
900 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true);
901}
902
903static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode)
904{
905 struct kvaser_pciefd_can *can = netdev_priv(ndev);
906 int ret = 0;
907
908 switch (mode) {
909 case CAN_MODE_START:
910 if (!can->can.restart_ms)
911 ret = kvaser_pciefd_bus_on(can);
912 break;
913 default:
914 return -EOPNOTSUPP;
915 }
916
917 return ret;
918}
919
920static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
921 struct can_berr_counter *bec)
922{
923 struct kvaser_pciefd_can *can = netdev_priv(ndev);
924
925 bec->rxerr = can->bec.rxerr;
926 bec->txerr = can->bec.txerr;
927
928 return 0;
929}
930
931static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
932{
933 struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
934
935 kvaser_pciefd_enable_err_gen(can);
936 kvaser_pciefd_request_status(can);
937 can->err_rep_cnt = 0;
938}
939
940static const struct net_device_ops kvaser_pciefd_netdev_ops = {
941 .ndo_open = kvaser_pciefd_open,
942 .ndo_stop = kvaser_pciefd_stop,
943 .ndo_eth_ioctl = can_eth_ioctl_hwts,
944 .ndo_start_xmit = kvaser_pciefd_start_xmit,
945 .ndo_change_mtu = can_change_mtu,
946};
947
948static const struct ethtool_ops kvaser_pciefd_ethtool_ops = {
949 .get_ts_info = can_ethtool_op_get_ts_info_hwts,
950};
951
952static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
953{
954 int i;
955
956 for (i = 0; i < pcie->nr_channels; i++) {
957 struct net_device *netdev;
958 struct kvaser_pciefd_can *can;
959 u32 status, tx_nr_packets_max;
960
961 netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
962 KVASER_PCIEFD_CAN_TX_MAX_COUNT);
963 if (!netdev)
964 return -ENOMEM;
965
966 can = netdev_priv(netdev);
967 netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
968 netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops;
969 can->reg_base = KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i);
970 can->kv_pcie = pcie;
971 can->cmd_seq = 0;
972 can->err_rep_cnt = 0;
973 can->bec.txerr = 0;
974 can->bec.rxerr = 0;
975
976 init_completion(&can->start_comp);
977 init_completion(&can->flush_comp);
978 timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0);
979
980 /* Disable Bus load reporting */
981 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
982
983 tx_nr_packets_max =
984 FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK,
985 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
986
987 can->can.clock.freq = pcie->freq;
988 can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
989 can->echo_idx = 0;
990 spin_lock_init(&can->echo_lock);
991 spin_lock_init(&can->lock);
992
993 can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
994 can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
995 can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
996 can->can.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming;
997 can->can.do_set_mode = kvaser_pciefd_set_mode;
998 can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
999 can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
1000 CAN_CTRLMODE_FD |
1001 CAN_CTRLMODE_FD_NON_ISO |
1002 CAN_CTRLMODE_CC_LEN8_DLC;
1003
1004 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
1005 if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
1006 dev_err(&pcie->pci->dev,
1007 "CAN FD not supported as expected %d\n", i);
1008
1009 free_candev(netdev);
1010 return -ENODEV;
1011 }
1012
1013 if (status & KVASER_PCIEFD_KCAN_STAT_CAP)
1014 can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
1015
1016 netdev->flags |= IFF_ECHO;
1017 SET_NETDEV_DEV(netdev, &pcie->pci->dev);
1018
1019 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1020 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
1021 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1022
1023 pcie->can[i] = can;
1024 kvaser_pciefd_pwm_start(can);
1025 }
1026
1027 return 0;
1028}
1029
1030static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
1031{
1032 int i;
1033
1034 for (i = 0; i < pcie->nr_channels; i++) {
1035 int ret = register_candev(pcie->can[i]->can.dev);
1036
1037 if (ret) {
1038 int j;
1039
1040 /* Unregister all successfully registered devices. */
1041 for (j = 0; j < i; j++)
1042 unregister_candev(pcie->can[j]->can.dev);
1043 return ret;
1044 }
1045 }
1046
1047 return 0;
1048}
1049
1050static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
1051 dma_addr_t addr, int index)
1052{
1053 void __iomem *serdes_base;
1054 u32 word1, word2;
1055
1056 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) {
1057 word1 = lower_32_bits(addr) | KVASER_PCIEFD_ALTERA_DMA_64BIT;
1058 word2 = upper_32_bits(addr);
1059 } else {
1060 word1 = addr;
1061 word2 = 0;
1062 }
1063 serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
1064 iowrite32(word1, serdes_base);
1065 iowrite32(word2, serdes_base + 0x4);
1066}
1067
1068static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
1069 dma_addr_t addr, int index)
1070{
1071 void __iomem *serdes_base;
1072 u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK;
1073 u32 msb = 0x0;
1074
1075 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
1076 msb = upper_32_bits(addr);
1077
1078 serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index;
1079 iowrite32(lsb, serdes_base);
1080 iowrite32(msb, serdes_base + 0x4);
1081}
1082
1083static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
1084 dma_addr_t addr, int index)
1085{
1086 void __iomem *serdes_base;
1087 u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK;
1088 u32 msb = 0x0;
1089
1090 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
1091 msb = upper_32_bits(addr);
1092
1093 serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
1094 iowrite32(msb, serdes_base);
1095 iowrite32(lsb, serdes_base + 0x4);
1096}
1097
1098static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
1099{
1100 int i;
1101 u32 srb_status;
1102 u32 srb_packet_count;
1103 dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
1104
1105 /* Disable the DMA */
1106 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
1107
1108 dma_set_mask_and_coherent(&pcie->pci->dev, DMA_BIT_MASK(64));
1109
1110 for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
1111 pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev,
1112 KVASER_PCIEFD_DMA_SIZE,
1113 &dma_addr[i],
1114 GFP_KERNEL);
1115
1116 if (!pcie->dma_data[i] || !dma_addr[i]) {
1117 dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
1118 KVASER_PCIEFD_DMA_SIZE);
1119 return -ENOMEM;
1120 }
1121 pcie->driver_data->ops->kvaser_pciefd_write_dma_map(pcie, dma_addr[i], i);
1122 }
1123
1124 /* Reset Rx FIFO, and both DMA buffers */
1125 iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
1126 KVASER_PCIEFD_SRB_CMD_RDB1,
1127 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
1128 /* Empty Rx FIFO */
1129 srb_packet_count =
1130 FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK,
1131 ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) +
1132 KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG));
1133 while (srb_packet_count) {
1134 /* Drop current packet in FIFO */
1135 ioread32(KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) + KVASER_PCIEFD_SRB_FIFO_LAST_REG);
1136 srb_packet_count--;
1137 }
1138
1139 srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG);
1140 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
1141 dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
1142 return -EIO;
1143 }
1144
1145 /* Enable the DMA */
1146 iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
1147 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
1148
1149 return 0;
1150}
1151
1152static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
1153{
1154 u32 version, srb_status, build;
1155
1156 version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG);
1157 pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS,
1158 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version));
1159
1160 build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG);
1161 dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n",
1162 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version),
1163 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version),
1164 FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build));
1165
1166 srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG);
1167 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
1168 dev_err(&pcie->pci->dev, "Hardware without DMA is not supported\n");
1169 return -ENODEV;
1170 }
1171
1172 pcie->bus_freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUSFREQ_REG);
1173 pcie->freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_CANFREQ_REG);
1174 pcie->freq_to_ticks_div = pcie->freq / 1000000;
1175 if (pcie->freq_to_ticks_div == 0)
1176 pcie->freq_to_ticks_div = 1;
1177 /* Turn off all loopback functionality */
1178 iowrite32(0, KVASER_PCIEFD_LOOPBACK_ADDR(pcie));
1179
1180 return 0;
1181}
1182
1183static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
1184 struct kvaser_pciefd_rx_packet *p,
1185 __le32 *data)
1186{
1187 struct sk_buff *skb;
1188 struct canfd_frame *cf;
1189 struct can_priv *priv;
1190 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
1191 u8 dlc;
1192
1193 if (ch_id >= pcie->nr_channels)
1194 return -EIO;
1195
1196 priv = &pcie->can[ch_id]->can;
1197 dlc = FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, p->header[1]);
1198
1199 if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
1200 skb = alloc_canfd_skb(priv->dev, &cf);
1201 if (!skb) {
1202 priv->dev->stats.rx_dropped++;
1203 return -ENOMEM;
1204 }
1205
1206 cf->len = can_fd_dlc2len(dlc);
1207 if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
1208 cf->flags |= CANFD_BRS;
1209 if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
1210 cf->flags |= CANFD_ESI;
1211 } else {
1212 skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
1213 if (!skb) {
1214 priv->dev->stats.rx_dropped++;
1215 return -ENOMEM;
1216 }
1217 can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode);
1218 }
1219
1220 cf->can_id = FIELD_GET(KVASER_PCIEFD_RPACKET_ID_MASK, p->header[0]);
1221 if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
1222 cf->can_id |= CAN_EFF_FLAG;
1223
1224 if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) {
1225 cf->can_id |= CAN_RTR_FLAG;
1226 } else {
1227 memcpy(cf->data, data, cf->len);
1228 priv->dev->stats.rx_bytes += cf->len;
1229 }
1230 priv->dev->stats.rx_packets++;
1231 kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
1232
1233 return netif_rx(skb);
1234}
1235
1236static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
1237 struct can_frame *cf,
1238 enum can_state new_state,
1239 enum can_state tx_state,
1240 enum can_state rx_state)
1241{
1242 can_change_state(can->can.dev, cf, tx_state, rx_state);
1243
1244 if (new_state == CAN_STATE_BUS_OFF) {
1245 struct net_device *ndev = can->can.dev;
1246 unsigned long irq_flags;
1247
1248 spin_lock_irqsave(&can->lock, irq_flags);
1249 netif_stop_queue(can->can.dev);
1250 spin_unlock_irqrestore(&can->lock, irq_flags);
1251 /* Prevent CAN controller from auto recover from bus off */
1252 if (!can->can.restart_ms) {
1253 kvaser_pciefd_start_controller_flush(can);
1254 can_bus_off(ndev);
1255 }
1256 }
1257}
1258
1259static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
1260 struct can_berr_counter *bec,
1261 enum can_state *new_state,
1262 enum can_state *tx_state,
1263 enum can_state *rx_state)
1264{
1265 if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
1266 p->header[0] & KVASER_PCIEFD_SPACK_IRM)
1267 *new_state = CAN_STATE_BUS_OFF;
1268 else if (bec->txerr >= 255 || bec->rxerr >= 255)
1269 *new_state = CAN_STATE_BUS_OFF;
1270 else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
1271 *new_state = CAN_STATE_ERROR_PASSIVE;
1272 else if (bec->txerr >= 128 || bec->rxerr >= 128)
1273 *new_state = CAN_STATE_ERROR_PASSIVE;
1274 else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR)
1275 *new_state = CAN_STATE_ERROR_WARNING;
1276 else if (bec->txerr >= 96 || bec->rxerr >= 96)
1277 *new_state = CAN_STATE_ERROR_WARNING;
1278 else
1279 *new_state = CAN_STATE_ERROR_ACTIVE;
1280
1281 *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0;
1282 *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0;
1283}
1284
1285static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
1286 struct kvaser_pciefd_rx_packet *p)
1287{
1288 struct can_berr_counter bec;
1289 enum can_state old_state, new_state, tx_state, rx_state;
1290 struct net_device *ndev = can->can.dev;
1291 struct sk_buff *skb;
1292 struct can_frame *cf = NULL;
1293
1294 old_state = can->can.state;
1295
1296 bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]);
1297 bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]);
1298
1299 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state);
1300 skb = alloc_can_err_skb(ndev, &cf);
1301 if (new_state != old_state) {
1302 kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
1303 if (old_state == CAN_STATE_BUS_OFF &&
1304 new_state == CAN_STATE_ERROR_ACTIVE &&
1305 can->can.restart_ms) {
1306 can->can.can_stats.restarts++;
1307 if (skb)
1308 cf->can_id |= CAN_ERR_RESTARTED;
1309 }
1310 }
1311
1312 can->err_rep_cnt++;
1313 can->can.can_stats.bus_error++;
1314 if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
1315 ndev->stats.tx_errors++;
1316 else
1317 ndev->stats.rx_errors++;
1318
1319 can->bec.txerr = bec.txerr;
1320 can->bec.rxerr = bec.rxerr;
1321
1322 if (!skb) {
1323 ndev->stats.rx_dropped++;
1324 return -ENOMEM;
1325 }
1326
1327 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
1328 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
1329 cf->data[6] = bec.txerr;
1330 cf->data[7] = bec.rxerr;
1331
1332 netif_rx(skb);
1333
1334 return 0;
1335}
1336
1337static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
1338 struct kvaser_pciefd_rx_packet *p)
1339{
1340 struct kvaser_pciefd_can *can;
1341 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
1342
1343 if (ch_id >= pcie->nr_channels)
1344 return -EIO;
1345
1346 can = pcie->can[ch_id];
1347 kvaser_pciefd_rx_error_frame(can, p);
1348 if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
1349 /* Do not report more errors, until bec_poll_timer expires */
1350 kvaser_pciefd_disable_err_gen(can);
1351 /* Start polling the error counters */
1352 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1353
1354 return 0;
1355}
1356
1357static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
1358 struct kvaser_pciefd_rx_packet *p)
1359{
1360 struct can_berr_counter bec;
1361 enum can_state old_state, new_state, tx_state, rx_state;
1362
1363 old_state = can->can.state;
1364
1365 bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]);
1366 bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]);
1367
1368 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state);
1369 if (new_state != old_state) {
1370 struct net_device *ndev = can->can.dev;
1371 struct sk_buff *skb;
1372 struct can_frame *cf;
1373
1374 skb = alloc_can_err_skb(ndev, &cf);
1375 if (!skb) {
1376 ndev->stats.rx_dropped++;
1377 return -ENOMEM;
1378 }
1379
1380 kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
1381 if (old_state == CAN_STATE_BUS_OFF &&
1382 new_state == CAN_STATE_ERROR_ACTIVE &&
1383 can->can.restart_ms) {
1384 can->can.can_stats.restarts++;
1385 cf->can_id |= CAN_ERR_RESTARTED;
1386 }
1387
1388 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
1389
1390 cf->data[6] = bec.txerr;
1391 cf->data[7] = bec.rxerr;
1392
1393 netif_rx(skb);
1394 }
1395 can->bec.txerr = bec.txerr;
1396 can->bec.rxerr = bec.rxerr;
1397 /* Check if we need to poll the error counters */
1398 if (bec.txerr || bec.rxerr)
1399 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1400
1401 return 0;
1402}
1403
1404static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
1405 struct kvaser_pciefd_rx_packet *p)
1406{
1407 struct kvaser_pciefd_can *can;
1408 u8 cmdseq;
1409 u32 status;
1410 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
1411
1412 if (ch_id >= pcie->nr_channels)
1413 return -EIO;
1414
1415 can = pcie->can[ch_id];
1416
1417 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
1418 cmdseq = FIELD_GET(KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK, status);
1419
1420 /* Reset done, start abort and flush */
1421 if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1422 p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1423 p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
1424 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) &&
1425 status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1426 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
1427 can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1428 kvaser_pciefd_abort_flush_reset(can);
1429 } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
1430 p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1431 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) &&
1432 status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1433 /* Reset detected, send end of flush if no packet are in FIFO */
1434 u8 count;
1435
1436 count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
1437 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
1438 if (!count)
1439 iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK,
1440 KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH),
1441 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1442 } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
1443 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1])) {
1444 /* Response to status request received */
1445 kvaser_pciefd_handle_status_resp(can, p);
1446 if (can->can.state != CAN_STATE_BUS_OFF &&
1447 can->can.state != CAN_STATE_ERROR_ACTIVE) {
1448 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1449 }
1450 } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1451 !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK)) {
1452 /* Reset to bus on detected */
1453 if (!completion_done(&can->start_comp))
1454 complete(&can->start_comp);
1455 }
1456
1457 return 0;
1458}
1459
1460static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
1461 struct kvaser_pciefd_rx_packet *p)
1462{
1463 struct sk_buff *skb;
1464 struct can_frame *cf;
1465
1466 skb = alloc_can_err_skb(can->can.dev, &cf);
1467 can->can.dev->stats.tx_errors++;
1468 if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
1469 if (skb)
1470 cf->can_id |= CAN_ERR_LOSTARB;
1471 can->can.can_stats.arbitration_lost++;
1472 } else if (skb) {
1473 cf->can_id |= CAN_ERR_ACK;
1474 }
1475
1476 if (skb) {
1477 cf->can_id |= CAN_ERR_BUSERROR;
1478 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
1479 netif_rx(skb);
1480 } else {
1481 can->can.dev->stats.rx_dropped++;
1482 netdev_warn(can->can.dev, "No memory left for err_skb\n");
1483 }
1484}
1485
1486static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
1487 struct kvaser_pciefd_rx_packet *p)
1488{
1489 struct kvaser_pciefd_can *can;
1490 bool one_shot_fail = false;
1491 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
1492
1493 if (ch_id >= pcie->nr_channels)
1494 return -EIO;
1495
1496 can = pcie->can[ch_id];
1497 /* Ignore control packet ACK */
1498 if (p->header[0] & KVASER_PCIEFD_APACKET_CT)
1499 return 0;
1500
1501 if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) {
1502 kvaser_pciefd_handle_nack_packet(can, p);
1503 one_shot_fail = true;
1504 }
1505
1506 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
1507 netdev_dbg(can->can.dev, "Packet was flushed\n");
1508 } else {
1509 int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]);
1510 int len;
1511 u8 count;
1512 struct sk_buff *skb;
1513
1514 skb = can->can.echo_skb[echo_idx];
1515 if (skb)
1516 kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
1517 len = can_get_echo_skb(can->can.dev, echo_idx, NULL);
1518 count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
1519 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
1520
1521 if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev))
1522 netif_wake_queue(can->can.dev);
1523
1524 if (!one_shot_fail) {
1525 can->can.dev->stats.tx_bytes += len;
1526 can->can.dev->stats.tx_packets++;
1527 }
1528 }
1529
1530 return 0;
1531}
1532
1533static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
1534 struct kvaser_pciefd_rx_packet *p)
1535{
1536 struct kvaser_pciefd_can *can;
1537 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
1538
1539 if (ch_id >= pcie->nr_channels)
1540 return -EIO;
1541
1542 can = pcie->can[ch_id];
1543
1544 if (!completion_done(&can->flush_comp))
1545 complete(&can->flush_comp);
1546
1547 return 0;
1548}
1549
1550static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
1551 int dma_buf)
1552{
1553 __le32 *buffer = pcie->dma_data[dma_buf];
1554 __le64 timestamp;
1555 struct kvaser_pciefd_rx_packet packet;
1556 struct kvaser_pciefd_rx_packet *p = &packet;
1557 u8 type;
1558 int pos = *start_pos;
1559 int size;
1560 int ret = 0;
1561
1562 size = le32_to_cpu(buffer[pos++]);
1563 if (!size) {
1564 *start_pos = 0;
1565 return 0;
1566 }
1567
1568 p->header[0] = le32_to_cpu(buffer[pos++]);
1569 p->header[1] = le32_to_cpu(buffer[pos++]);
1570
1571 /* Read 64-bit timestamp */
1572 memcpy(×tamp, &buffer[pos], sizeof(__le64));
1573 pos += 2;
1574 p->timestamp = le64_to_cpu(timestamp);
1575
1576 type = FIELD_GET(KVASER_PCIEFD_PACKET_TYPE_MASK, p->header[1]);
1577 switch (type) {
1578 case KVASER_PCIEFD_PACK_TYPE_DATA:
1579 ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
1580 if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
1581 u8 data_len;
1582
1583 data_len = can_fd_dlc2len(FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK,
1584 p->header[1]));
1585 pos += DIV_ROUND_UP(data_len, 4);
1586 }
1587 break;
1588
1589 case KVASER_PCIEFD_PACK_TYPE_ACK:
1590 ret = kvaser_pciefd_handle_ack_packet(pcie, p);
1591 break;
1592
1593 case KVASER_PCIEFD_PACK_TYPE_STATUS:
1594 ret = kvaser_pciefd_handle_status_packet(pcie, p);
1595 break;
1596
1597 case KVASER_PCIEFD_PACK_TYPE_ERROR:
1598 ret = kvaser_pciefd_handle_error_packet(pcie, p);
1599 break;
1600
1601 case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
1602 ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
1603 break;
1604
1605 case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
1606 case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
1607 case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
1608 case KVASER_PCIEFD_PACK_TYPE_TXRQ:
1609 dev_info(&pcie->pci->dev,
1610 "Received unexpected packet type 0x%08X\n", type);
1611 break;
1612
1613 default:
1614 dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type);
1615 ret = -EIO;
1616 break;
1617 }
1618
1619 if (ret)
1620 return ret;
1621
1622 /* Position does not point to the end of the package,
1623 * corrupted packet size?
1624 */
1625 if (unlikely((*start_pos + size) != pos))
1626 return -EIO;
1627
1628 /* Point to the next packet header, if any */
1629 *start_pos = pos;
1630
1631 return ret;
1632}
1633
1634static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
1635{
1636 int pos = 0;
1637 int res = 0;
1638
1639 do {
1640 res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
1641 } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
1642
1643 return res;
1644}
1645
1646static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
1647{
1648 u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
1649
1650 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
1651 kvaser_pciefd_read_buffer(pcie, 0);
1652
1653 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
1654 kvaser_pciefd_read_buffer(pcie, 1);
1655
1656 if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
1657 irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
1658 irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
1659 irq & KVASER_PCIEFD_SRB_IRQ_DUF1))
1660 dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
1661
1662 iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
1663 return irq;
1664}
1665
1666static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
1667{
1668 u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1669
1670 if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
1671 netdev_err(can->can.dev, "Tx FIFO overflow\n");
1672
1673 if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
1674 netdev_err(can->can.dev,
1675 "Fail to change bittiming, when not in reset mode\n");
1676
1677 if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC)
1678 netdev_err(can->can.dev, "CAN FD frame in CAN mode\n");
1679
1680 if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF)
1681 netdev_err(can->can.dev, "Rx FIFO overflow\n");
1682
1683 iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1684}
1685
1686static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
1687{
1688 struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
1689 const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
1690 u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
1691 u32 srb_irq = 0;
1692 u32 srb_release = 0;
1693 int i;
1694
1695 if (!(pci_irq & irq_mask->all))
1696 return IRQ_NONE;
1697
1698 if (pci_irq & irq_mask->kcan_rx0)
1699 srb_irq = kvaser_pciefd_receive_irq(pcie);
1700
1701 for (i = 0; i < pcie->nr_channels; i++) {
1702 if (pci_irq & irq_mask->kcan_tx[i])
1703 kvaser_pciefd_transmit_irq(pcie->can[i]);
1704 }
1705
1706 if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
1707 srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
1708
1709 if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
1710 srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
1711
1712 if (srb_release)
1713 iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
1714
1715 return IRQ_HANDLED;
1716}
1717
1718static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
1719{
1720 int i;
1721
1722 for (i = 0; i < pcie->nr_channels; i++) {
1723 struct kvaser_pciefd_can *can = pcie->can[i];
1724
1725 if (can) {
1726 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1727 kvaser_pciefd_pwm_stop(can);
1728 free_candev(can->can.dev);
1729 }
1730 }
1731}
1732
1733static int kvaser_pciefd_probe(struct pci_dev *pdev,
1734 const struct pci_device_id *id)
1735{
1736 int ret;
1737 struct kvaser_pciefd *pcie;
1738 const struct kvaser_pciefd_irq_mask *irq_mask;
1739 void __iomem *irq_en_base;
1740
1741 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1742 if (!pcie)
1743 return -ENOMEM;
1744
1745 pci_set_drvdata(pdev, pcie);
1746 pcie->pci = pdev;
1747 pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data;
1748 irq_mask = pcie->driver_data->irq_mask;
1749
1750 ret = pci_enable_device(pdev);
1751 if (ret)
1752 return ret;
1753
1754 ret = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
1755 if (ret)
1756 goto err_disable_pci;
1757
1758 pcie->reg_base = pci_iomap(pdev, 0, 0);
1759 if (!pcie->reg_base) {
1760 ret = -ENOMEM;
1761 goto err_release_regions;
1762 }
1763
1764 ret = kvaser_pciefd_setup_board(pcie);
1765 if (ret)
1766 goto err_pci_iounmap;
1767
1768 ret = kvaser_pciefd_setup_dma(pcie);
1769 if (ret)
1770 goto err_pci_iounmap;
1771
1772 pci_set_master(pdev);
1773
1774 ret = kvaser_pciefd_setup_can_ctrls(pcie);
1775 if (ret)
1776 goto err_teardown_can_ctrls;
1777
1778 ret = pci_alloc_irq_vectors(pcie->pci, 1, 1, PCI_IRQ_INTX | PCI_IRQ_MSI);
1779 if (ret < 0) {
1780 dev_err(&pcie->pci->dev, "Failed to allocate IRQ vectors.\n");
1781 goto err_teardown_can_ctrls;
1782 }
1783
1784 ret = pci_irq_vector(pcie->pci, 0);
1785 if (ret < 0)
1786 goto err_pci_free_irq_vectors;
1787
1788 pcie->pci->irq = ret;
1789
1790 ret = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
1791 IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
1792 if (ret) {
1793 dev_err(&pcie->pci->dev, "Failed to request IRQ %d\n", pcie->pci->irq);
1794 goto err_pci_free_irq_vectors;
1795 }
1796 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
1797 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
1798
1799 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 |
1800 KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
1801 KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
1802 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
1803
1804 /* Enable PCI interrupts */
1805 irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie);
1806 iowrite32(irq_mask->all, irq_en_base);
1807 /* Ready the DMA buffers */
1808 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1809 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
1810 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1811 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
1812
1813 ret = kvaser_pciefd_reg_candev(pcie);
1814 if (ret)
1815 goto err_free_irq;
1816
1817 return 0;
1818
1819err_free_irq:
1820 /* Disable PCI interrupts */
1821 iowrite32(0, irq_en_base);
1822 free_irq(pcie->pci->irq, pcie);
1823
1824err_pci_free_irq_vectors:
1825 pci_free_irq_vectors(pcie->pci);
1826
1827err_teardown_can_ctrls:
1828 kvaser_pciefd_teardown_can_ctrls(pcie);
1829 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
1830 pci_clear_master(pdev);
1831
1832err_pci_iounmap:
1833 pci_iounmap(pdev, pcie->reg_base);
1834
1835err_release_regions:
1836 pci_release_regions(pdev);
1837
1838err_disable_pci:
1839 pci_disable_device(pdev);
1840
1841 return ret;
1842}
1843
1844static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
1845{
1846 int i;
1847
1848 for (i = 0; i < pcie->nr_channels; i++) {
1849 struct kvaser_pciefd_can *can = pcie->can[i];
1850
1851 if (can) {
1852 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1853 unregister_candev(can->can.dev);
1854 del_timer(&can->bec_poll_timer);
1855 kvaser_pciefd_pwm_stop(can);
1856 free_candev(can->can.dev);
1857 }
1858 }
1859}
1860
1861static void kvaser_pciefd_remove(struct pci_dev *pdev)
1862{
1863 struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
1864
1865 kvaser_pciefd_remove_all_ctrls(pcie);
1866
1867 /* Disable interrupts */
1868 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
1869 iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
1870
1871 free_irq(pcie->pci->irq, pcie);
1872 pci_free_irq_vectors(pcie->pci);
1873 pci_iounmap(pdev, pcie->reg_base);
1874 pci_release_regions(pdev);
1875 pci_disable_device(pdev);
1876}
1877
1878static struct pci_driver kvaser_pciefd = {
1879 .name = KVASER_PCIEFD_DRV_NAME,
1880 .id_table = kvaser_pciefd_id_table,
1881 .probe = kvaser_pciefd_probe,
1882 .remove = kvaser_pciefd_remove,
1883};
1884
1885module_pci_driver(kvaser_pciefd)
1// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
3 * Parts of this driver are based on the following:
4 * - Kvaser linux pciefd driver (version 5.25)
5 * - PEAK linux canfd driver
6 * - Altera Avalon EPCS flash controller driver
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/device.h>
12#include <linux/pci.h>
13#include <linux/can/dev.h>
14#include <linux/timer.h>
15#include <linux/netdevice.h>
16#include <linux/crc32.h>
17#include <linux/iopoll.h>
18
19MODULE_LICENSE("Dual BSD/GPL");
20MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
21MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
22
23#define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd"
24
25#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
26#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
27#define KVASER_PCIEFD_MAX_ERR_REP 256
28#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17
29#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4
30#define KVASER_PCIEFD_DMA_COUNT 2
31
32#define KVASER_PCIEFD_DMA_SIZE (4 * 1024)
33#define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
34
35#define KVASER_PCIEFD_VENDOR 0x1a07
36#define KVASER_PCIEFD_4HS_ID 0x0d
37#define KVASER_PCIEFD_2HS_ID 0x0e
38#define KVASER_PCIEFD_HS_ID 0x0f
39#define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10
40#define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11
41
42/* PCIe IRQ registers */
43#define KVASER_PCIEFD_IRQ_REG 0x40
44#define KVASER_PCIEFD_IEN_REG 0x50
45/* DMA map */
46#define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
47/* Kvaser KCAN CAN controller registers */
48#define KVASER_PCIEFD_KCAN0_BASE 0x10000
49#define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
50#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
51#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
52#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
53#define KVASER_PCIEFD_KCAN_CMD_REG 0x400
54#define KVASER_PCIEFD_KCAN_IEN_REG 0x408
55#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
56#define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414
57#define KVASER_PCIEFD_KCAN_STAT_REG 0x418
58#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
59#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
60#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
61#define KVASER_PCIEFD_KCAN_PWM_REG 0x430
62/* Loopback control register */
63#define KVASER_PCIEFD_LOOP_REG 0x1f000
64/* System identification and information registers */
65#define KVASER_PCIEFD_SYSID_BASE 0x1f020
66#define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8)
67#define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc)
68#define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10)
69#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
70/* Shared receive buffer registers */
71#define KVASER_PCIEFD_SRB_BASE 0x1f200
72#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
73#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
74#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
75#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
76#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
77/* EPCS flash controller registers */
78#define KVASER_PCIEFD_SPI_BASE 0x1fc00
79#define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE
80#define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4)
81#define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8)
82#define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc)
83#define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14)
84
85#define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f
86#define KVASER_PCIEFD_IRQ_SRB BIT(4)
87
88#define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24
89#define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16
90#define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1
91
92/* Reset DMA buffer 0, 1 and FIFO offset */
93#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
94#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
95#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
96
97/* DMA packet done, buffer 0 and 1 */
98#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
99#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
100/* DMA overflow, buffer 0 and 1 */
101#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
102#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
103/* DMA underflow, buffer 0 and 1 */
104#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
105#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
106
107/* DMA idle */
108#define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
109/* DMA support */
110#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
111
112/* DMA Enable */
113#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
114
115/* EPCS flash controller definitions */
116#define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024)
117#define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L)
118#define KVASER_PCIEFD_CFG_MAX_PARAMS 256
119#define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d
120#define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24
121#define KVASER_PCIEFD_CFG_SYS_VER 1
122#define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130
123#define KVASER_PCIEFD_SPI_TMT BIT(5)
124#define KVASER_PCIEFD_SPI_TRDY BIT(6)
125#define KVASER_PCIEFD_SPI_RRDY BIT(7)
126#define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14
127/* Commands for controlling the onboard flash */
128#define KVASER_PCIEFD_FLASH_RES_CMD 0xab
129#define KVASER_PCIEFD_FLASH_READ_CMD 0x3
130#define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5
131
132/* Kvaser KCAN definitions */
133#define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29)
134#define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29)
135
136#define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16
137/* Request status packet */
138#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
139/* Abort, flush and reset */
140#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
141
142/* Tx FIFO unaligned read */
143#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
144/* Tx FIFO unaligned end */
145#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
146/* Bus parameter protection error */
147#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
148/* FDF bit when controller is in classic mode */
149#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
150/* Rx FIFO overflow */
151#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
152/* Abort done */
153#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
154/* Tx buffer flush done */
155#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
156/* Tx FIFO overflow */
157#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
158/* Tx FIFO empty */
159#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
160/* Transmitter unaligned */
161#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
162
163#define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16
164
165#define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24
166/* Abort request */
167#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
168/* Idle state. Controller in reset mode and no abort or flush pending */
169#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
170/* Bus off */
171#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
172/* Reset mode request */
173#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
174/* Controller in reset mode */
175#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
176/* Controller got one-shot capability */
177#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
178/* Controller got CAN FD capability */
179#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
180#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \
181 KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \
182 KVASER_PCIEFD_KCAN_STAT_IRM)
183
184/* Reset mode */
185#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
186/* Listen only mode */
187#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
188/* Error packet enable */
189#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
190/* CAN FD non-ISO */
191#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
192/* Acknowledgment packet type */
193#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
194/* Active error flag enable. Clear to force error passive */
195#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
196/* Classic CAN mode */
197#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
198
199#define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13
200#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17
201#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26
202
203#define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16
204
205/* Kvaser KCAN packet types */
206#define KVASER_PCIEFD_PACK_TYPE_DATA 0
207#define KVASER_PCIEFD_PACK_TYPE_ACK 1
208#define KVASER_PCIEFD_PACK_TYPE_TXRQ 2
209#define KVASER_PCIEFD_PACK_TYPE_ERROR 3
210#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4
211#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5
212#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6
213#define KVASER_PCIEFD_PACK_TYPE_STATUS 8
214#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9
215
216/* Kvaser KCAN packet common definitions */
217#define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff
218#define KVASER_PCIEFD_PACKET_CHID_SHIFT 25
219#define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28
220
221/* Kvaser KCAN TDATA and RDATA first word */
222#define KVASER_PCIEFD_RPACKET_IDE BIT(30)
223#define KVASER_PCIEFD_RPACKET_RTR BIT(29)
224/* Kvaser KCAN TDATA and RDATA second word */
225#define KVASER_PCIEFD_RPACKET_ESI BIT(13)
226#define KVASER_PCIEFD_RPACKET_BRS BIT(14)
227#define KVASER_PCIEFD_RPACKET_FDF BIT(15)
228#define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8
229/* Kvaser KCAN TDATA second word */
230#define KVASER_PCIEFD_TPACKET_SMS BIT(16)
231#define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
232
233/* Kvaser KCAN APACKET */
234#define KVASER_PCIEFD_APACKET_FLU BIT(8)
235#define KVASER_PCIEFD_APACKET_CT BIT(9)
236#define KVASER_PCIEFD_APACKET_ABL BIT(10)
237#define KVASER_PCIEFD_APACKET_NACK BIT(11)
238
239/* Kvaser KCAN SPACK first word */
240#define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8
241#define KVASER_PCIEFD_SPACK_BOFF BIT(16)
242#define KVASER_PCIEFD_SPACK_IDET BIT(20)
243#define KVASER_PCIEFD_SPACK_IRM BIT(21)
244#define KVASER_PCIEFD_SPACK_RMCD BIT(22)
245/* Kvaser KCAN SPACK second word */
246#define KVASER_PCIEFD_SPACK_AUTO BIT(21)
247#define KVASER_PCIEFD_SPACK_EWLR BIT(23)
248#define KVASER_PCIEFD_SPACK_EPLR BIT(24)
249
250struct kvaser_pciefd;
251
252struct kvaser_pciefd_can {
253 struct can_priv can;
254 struct kvaser_pciefd *kv_pcie;
255 void __iomem *reg_base;
256 struct can_berr_counter bec;
257 u8 cmd_seq;
258 int err_rep_cnt;
259 int echo_idx;
260 spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
261 spinlock_t echo_lock; /* Locks the message echo buffer */
262 struct timer_list bec_poll_timer;
263 struct completion start_comp, flush_comp;
264};
265
266struct kvaser_pciefd {
267 struct pci_dev *pci;
268 void __iomem *reg_base;
269 struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
270 void *dma_data[KVASER_PCIEFD_DMA_COUNT];
271 u8 nr_channels;
272 u32 bus_freq;
273 u32 freq;
274 u32 freq_to_ticks_div;
275};
276
277struct kvaser_pciefd_rx_packet {
278 u32 header[2];
279 u64 timestamp;
280};
281
282struct kvaser_pciefd_tx_packet {
283 u32 header[2];
284 u8 data[64];
285};
286
287static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
288 .name = KVASER_PCIEFD_DRV_NAME,
289 .tseg1_min = 1,
290 .tseg1_max = 255,
291 .tseg2_min = 1,
292 .tseg2_max = 32,
293 .sjw_max = 16,
294 .brp_min = 1,
295 .brp_max = 4096,
296 .brp_inc = 1,
297};
298
299struct kvaser_pciefd_cfg_param {
300 __le32 magic;
301 __le32 nr;
302 __le32 len;
303 u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ];
304};
305
306struct kvaser_pciefd_cfg_img {
307 __le32 version;
308 __le32 magic;
309 __le32 crc;
310 struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS];
311};
312
313static struct pci_device_id kvaser_pciefd_id_table[] = {
314 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), },
315 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), },
316 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), },
317 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), },
318 { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), },
319 { 0,},
320};
321MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
322
323/* Onboard flash memory functions */
324static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
325{
326 u32 res;
327 int ret;
328
329 ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
330 res, res & msk, 0, 10);
331
332 return ret;
333}
334
335static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
336 u32 tx_len, u8 *rx, u32 rx_len)
337{
338 int c;
339
340 iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG);
341 iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
342 ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
343
344 c = tx_len;
345 while (c--) {
346 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
347 return -EIO;
348
349 iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
350
351 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
352 return -EIO;
353
354 ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
355 }
356
357 c = rx_len;
358 while (c-- > 0) {
359 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
360 return -EIO;
361
362 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
363
364 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
365 return -EIO;
366
367 *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
368 }
369
370 if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT))
371 return -EIO;
372
373 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
374
375 if (c != -1) {
376 dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n");
377 return -EIO;
378 }
379
380 return 0;
381}
382
383static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie,
384 struct kvaser_pciefd_cfg_img *img)
385{
386 int offset = KVASER_PCIEFD_CFG_IMG_OFFSET;
387 int res, crc;
388 u8 *crc_buff;
389
390 u8 cmd[] = {
391 KVASER_PCIEFD_FLASH_READ_CMD,
392 (u8)((offset >> 16) & 0xff),
393 (u8)((offset >> 8) & 0xff),
394 (u8)(offset & 0xff)
395 };
396
397 res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img,
398 KVASER_PCIEFD_CFG_IMG_SZ);
399 if (res)
400 return res;
401
402 crc_buff = (u8 *)img->params;
403
404 if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) {
405 dev_err(&pcie->pci->dev,
406 "Config flash corrupted, version number is wrong\n");
407 return -ENODEV;
408 }
409
410 if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) {
411 dev_err(&pcie->pci->dev,
412 "Config flash corrupted, magic number is wrong\n");
413 return -ENODEV;
414 }
415
416 crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params));
417 if (le32_to_cpu(img->crc) != crc) {
418 dev_err(&pcie->pci->dev,
419 "Stored CRC does not match flash image contents\n");
420 return -EIO;
421 }
422
423 return 0;
424}
425
426static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie,
427 struct kvaser_pciefd_cfg_img *img)
428{
429 struct kvaser_pciefd_cfg_param *param;
430
431 param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN];
432 memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len));
433}
434
435static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie)
436{
437 int res;
438 struct kvaser_pciefd_cfg_img *img;
439
440 /* Read electronic signature */
441 u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0};
442
443 res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1);
444 if (res)
445 return -EIO;
446
447 img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL);
448 if (!img)
449 return -ENOMEM;
450
451 if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) {
452 dev_err(&pcie->pci->dev,
453 "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n",
454 cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16);
455
456 res = -ENODEV;
457 goto image_free;
458 }
459
460 cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD;
461 res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1);
462 if (res) {
463 goto image_free;
464 } else if (cmd[0] & 1) {
465 res = -EIO;
466 /* No write is ever done, the WIP should never be set */
467 dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n");
468 goto image_free;
469 }
470
471 res = kvaser_pciefd_cfg_read_and_verify(pcie, img);
472 if (res) {
473 res = -EIO;
474 goto image_free;
475 }
476
477 kvaser_pciefd_cfg_read_params(pcie, img);
478
479image_free:
480 kfree(img);
481 return res;
482}
483
484static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
485{
486 u32 cmd;
487
488 cmd = KVASER_PCIEFD_KCAN_CMD_SRQ;
489 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
490 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
491}
492
493static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
494{
495 u32 mode;
496 unsigned long irq;
497
498 spin_lock_irqsave(&can->lock, irq);
499 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
500 if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) {
501 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
502 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
503 }
504 spin_unlock_irqrestore(&can->lock, irq);
505}
506
507static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
508{
509 u32 mode;
510 unsigned long irq;
511
512 spin_lock_irqsave(&can->lock, irq);
513 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
514 mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN;
515 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
516 spin_unlock_irqrestore(&can->lock, irq);
517}
518
519static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
520{
521 u32 msk;
522
523 msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF |
524 KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
525 KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
526 KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
527 KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
528
529 iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
530
531 return 0;
532}
533
534static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
535{
536 u32 mode;
537 unsigned long irq;
538
539 spin_lock_irqsave(&can->lock, irq);
540
541 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
542 if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
543 mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
544 if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
545 mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN;
546 else
547 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
548 } else {
549 mode |= KVASER_PCIEFD_KCAN_MODE_CCM;
550 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
551 }
552
553 if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
554 mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
555
556 mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
557 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
558 /* Use ACK packet type */
559 mode &= ~KVASER_PCIEFD_KCAN_MODE_APT;
560 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
561 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
562
563 spin_unlock_irqrestore(&can->lock, irq);
564}
565
566static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
567{
568 u32 status;
569 unsigned long irq;
570
571 spin_lock_irqsave(&can->lock, irq);
572 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
573 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
574 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
575
576 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
577 if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
578 u32 cmd;
579
580 /* If controller is already idle, run abort, flush and reset */
581 cmd = KVASER_PCIEFD_KCAN_CMD_AT;
582 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
583 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
584 } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
585 u32 mode;
586
587 /* Put controller in reset mode */
588 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
589 mode |= KVASER_PCIEFD_KCAN_MODE_RM;
590 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
591 }
592
593 spin_unlock_irqrestore(&can->lock, irq);
594}
595
596static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
597{
598 u32 mode;
599 unsigned long irq;
600
601 del_timer(&can->bec_poll_timer);
602
603 if (!completion_done(&can->flush_comp))
604 kvaser_pciefd_start_controller_flush(can);
605
606 if (!wait_for_completion_timeout(&can->flush_comp,
607 KVASER_PCIEFD_WAIT_TIMEOUT)) {
608 netdev_err(can->can.dev, "Timeout during bus on flush\n");
609 return -ETIMEDOUT;
610 }
611
612 spin_lock_irqsave(&can->lock, irq);
613 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
614 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
615
616 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
617 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
618
619 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
620 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
621 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
622 spin_unlock_irqrestore(&can->lock, irq);
623
624 if (!wait_for_completion_timeout(&can->start_comp,
625 KVASER_PCIEFD_WAIT_TIMEOUT)) {
626 netdev_err(can->can.dev, "Timeout during bus on reset\n");
627 return -ETIMEDOUT;
628 }
629 /* Reset interrupt handling */
630 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
631 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
632
633 kvaser_pciefd_set_tx_irq(can);
634 kvaser_pciefd_setup_controller(can);
635
636 can->can.state = CAN_STATE_ERROR_ACTIVE;
637 netif_wake_queue(can->can.dev);
638 can->bec.txerr = 0;
639 can->bec.rxerr = 0;
640 can->err_rep_cnt = 0;
641
642 return 0;
643}
644
645static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
646{
647 u8 top;
648 u32 pwm_ctrl;
649 unsigned long irq;
650
651 spin_lock_irqsave(&can->lock, irq);
652 pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
653 top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff;
654
655 /* Set duty cycle to zero */
656 pwm_ctrl |= top;
657 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
658 spin_unlock_irqrestore(&can->lock, irq);
659}
660
661static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
662{
663 int top, trigger;
664 u32 pwm_ctrl;
665 unsigned long irq;
666
667 kvaser_pciefd_pwm_stop(can);
668 spin_lock_irqsave(&can->lock, irq);
669
670 /* Set frequency to 500 KHz*/
671 top = can->kv_pcie->bus_freq / (2 * 500000) - 1;
672
673 pwm_ctrl = top & 0xff;
674 pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
675 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
676
677 /* Set duty cycle to 95 */
678 trigger = (100 * top - 95 * (top + 1) + 50) / 100;
679 pwm_ctrl = trigger & 0xff;
680 pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
681 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
682 spin_unlock_irqrestore(&can->lock, irq);
683}
684
685static int kvaser_pciefd_open(struct net_device *netdev)
686{
687 int err;
688 struct kvaser_pciefd_can *can = netdev_priv(netdev);
689
690 err = open_candev(netdev);
691 if (err)
692 return err;
693
694 err = kvaser_pciefd_bus_on(can);
695 if (err)
696 return err;
697
698 return 0;
699}
700
701static int kvaser_pciefd_stop(struct net_device *netdev)
702{
703 struct kvaser_pciefd_can *can = netdev_priv(netdev);
704 int ret = 0;
705
706 /* Don't interrupt ongoing flush */
707 if (!completion_done(&can->flush_comp))
708 kvaser_pciefd_start_controller_flush(can);
709
710 if (!wait_for_completion_timeout(&can->flush_comp,
711 KVASER_PCIEFD_WAIT_TIMEOUT)) {
712 netdev_err(can->can.dev, "Timeout during stop\n");
713 ret = -ETIMEDOUT;
714 } else {
715 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
716 del_timer(&can->bec_poll_timer);
717 }
718 close_candev(netdev);
719
720 return ret;
721}
722
723static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
724 struct kvaser_pciefd_can *can,
725 struct sk_buff *skb)
726{
727 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
728 int packet_size;
729 int seq = can->echo_idx;
730
731 memset(p, 0, sizeof(*p));
732
733 if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
734 p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
735
736 if (cf->can_id & CAN_RTR_FLAG)
737 p->header[0] |= KVASER_PCIEFD_RPACKET_RTR;
738
739 if (cf->can_id & CAN_EFF_FLAG)
740 p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
741
742 p->header[0] |= cf->can_id & CAN_EFF_MASK;
743 p->header[1] |= can_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
744 p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
745
746 if (can_is_canfd_skb(skb)) {
747 p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
748 if (cf->flags & CANFD_BRS)
749 p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
750 if (cf->flags & CANFD_ESI)
751 p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
752 }
753
754 p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK;
755
756 packet_size = cf->len;
757 memcpy(p->data, cf->data, packet_size);
758
759 return DIV_ROUND_UP(packet_size, 4);
760}
761
762static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
763 struct net_device *netdev)
764{
765 struct kvaser_pciefd_can *can = netdev_priv(netdev);
766 unsigned long irq_flags;
767 struct kvaser_pciefd_tx_packet packet;
768 int nwords;
769 u8 count;
770
771 if (can_dropped_invalid_skb(netdev, skb))
772 return NETDEV_TX_OK;
773
774 nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
775
776 spin_lock_irqsave(&can->echo_lock, irq_flags);
777
778 /* Prepare and save echo skb in internal slot */
779 can_put_echo_skb(skb, netdev, can->echo_idx);
780
781 /* Move echo index to the next slot */
782 can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
783
784 /* Write header to fifo */
785 iowrite32(packet.header[0],
786 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
787 iowrite32(packet.header[1],
788 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
789
790 if (nwords) {
791 u32 data_last = ((u32 *)packet.data)[nwords - 1];
792
793 /* Write data to fifo, except last word */
794 iowrite32_rep(can->reg_base +
795 KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
796 nwords - 1);
797 /* Write last word to end of fifo */
798 __raw_writel(data_last, can->reg_base +
799 KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
800 } else {
801 /* Complete write to fifo */
802 __raw_writel(0, can->reg_base +
803 KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
804 }
805
806 count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
807 /* No room for a new message, stop the queue until at least one
808 * successful transmit
809 */
810 if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT ||
811 can->can.echo_skb[can->echo_idx])
812 netif_stop_queue(netdev);
813
814 spin_unlock_irqrestore(&can->echo_lock, irq_flags);
815
816 return NETDEV_TX_OK;
817}
818
819static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
820{
821 u32 mode, test, btrn;
822 unsigned long irq_flags;
823 int ret;
824 struct can_bittiming *bt;
825
826 if (data)
827 bt = &can->can.data_bittiming;
828 else
829 bt = &can->can.bittiming;
830
831 btrn = ((bt->phase_seg2 - 1) & 0x1f) <<
832 KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT |
833 (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) <<
834 KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT |
835 ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT |
836 ((bt->brp - 1) & 0x1fff);
837
838 spin_lock_irqsave(&can->lock, irq_flags);
839 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
840
841 /* Put the circuit in reset mode */
842 iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM,
843 can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
844
845 /* Can only set bittiming if in reset mode */
846 ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
847 test, test & KVASER_PCIEFD_KCAN_MODE_RM,
848 0, 10);
849
850 if (ret) {
851 spin_unlock_irqrestore(&can->lock, irq_flags);
852 return -EBUSY;
853 }
854
855 if (data)
856 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
857 else
858 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
859
860 /* Restore previous reset mode status */
861 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
862
863 spin_unlock_irqrestore(&can->lock, irq_flags);
864 return 0;
865}
866
867static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev)
868{
869 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false);
870}
871
872static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev)
873{
874 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true);
875}
876
877static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode)
878{
879 struct kvaser_pciefd_can *can = netdev_priv(ndev);
880 int ret = 0;
881
882 switch (mode) {
883 case CAN_MODE_START:
884 if (!can->can.restart_ms)
885 ret = kvaser_pciefd_bus_on(can);
886 break;
887 default:
888 return -EOPNOTSUPP;
889 }
890
891 return ret;
892}
893
894static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
895 struct can_berr_counter *bec)
896{
897 struct kvaser_pciefd_can *can = netdev_priv(ndev);
898
899 bec->rxerr = can->bec.rxerr;
900 bec->txerr = can->bec.txerr;
901 return 0;
902}
903
904static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
905{
906 struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
907
908 kvaser_pciefd_enable_err_gen(can);
909 kvaser_pciefd_request_status(can);
910 can->err_rep_cnt = 0;
911}
912
913static const struct net_device_ops kvaser_pciefd_netdev_ops = {
914 .ndo_open = kvaser_pciefd_open,
915 .ndo_stop = kvaser_pciefd_stop,
916 .ndo_start_xmit = kvaser_pciefd_start_xmit,
917 .ndo_change_mtu = can_change_mtu,
918};
919
920static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
921{
922 int i;
923
924 for (i = 0; i < pcie->nr_channels; i++) {
925 struct net_device *netdev;
926 struct kvaser_pciefd_can *can;
927 u32 status, tx_npackets;
928
929 netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
930 KVASER_PCIEFD_CAN_TX_MAX_COUNT);
931 if (!netdev)
932 return -ENOMEM;
933
934 can = netdev_priv(netdev);
935 netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
936 can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
937 i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
938
939 can->kv_pcie = pcie;
940 can->cmd_seq = 0;
941 can->err_rep_cnt = 0;
942 can->bec.txerr = 0;
943 can->bec.rxerr = 0;
944
945 init_completion(&can->start_comp);
946 init_completion(&can->flush_comp);
947 timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
948 0);
949
950 tx_npackets = ioread32(can->reg_base +
951 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
952 if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
953 0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) {
954 dev_err(&pcie->pci->dev,
955 "Max Tx count is smaller than expected\n");
956
957 free_candev(netdev);
958 return -ENODEV;
959 }
960
961 can->can.clock.freq = pcie->freq;
962 can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT;
963 can->echo_idx = 0;
964 spin_lock_init(&can->echo_lock);
965 spin_lock_init(&can->lock);
966 can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
967 can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
968
969 can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
970 can->can.do_set_data_bittiming =
971 kvaser_pciefd_set_data_bittiming;
972
973 can->can.do_set_mode = kvaser_pciefd_set_mode;
974 can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
975
976 can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
977 CAN_CTRLMODE_FD |
978 CAN_CTRLMODE_FD_NON_ISO;
979
980 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
981 if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
982 dev_err(&pcie->pci->dev,
983 "CAN FD not supported as expected %d\n", i);
984
985 free_candev(netdev);
986 return -ENODEV;
987 }
988
989 if (status & KVASER_PCIEFD_KCAN_STAT_CAP)
990 can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
991
992 netdev->flags |= IFF_ECHO;
993
994 SET_NETDEV_DEV(netdev, &pcie->pci->dev);
995
996 iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
997 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
998 KVASER_PCIEFD_KCAN_IRQ_TFD,
999 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1000
1001 pcie->can[i] = can;
1002 kvaser_pciefd_pwm_start(can);
1003 }
1004
1005 return 0;
1006}
1007
1008static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
1009{
1010 int i;
1011
1012 for (i = 0; i < pcie->nr_channels; i++) {
1013 int err = register_candev(pcie->can[i]->can.dev);
1014
1015 if (err) {
1016 int j;
1017
1018 /* Unregister all successfully registered devices. */
1019 for (j = 0; j < i; j++)
1020 unregister_candev(pcie->can[j]->can.dev);
1021 return err;
1022 }
1023 }
1024
1025 return 0;
1026}
1027
1028static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie,
1029 dma_addr_t addr, int offset)
1030{
1031 u32 word1, word2;
1032
1033#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1034 word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT;
1035 word2 = addr >> 32;
1036#else
1037 word1 = addr;
1038 word2 = 0;
1039#endif
1040 iowrite32(word1, pcie->reg_base + offset);
1041 iowrite32(word2, pcie->reg_base + offset + 4);
1042}
1043
1044static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
1045{
1046 int i;
1047 u32 srb_status;
1048 dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
1049
1050 /* Disable the DMA */
1051 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1052 for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
1053 unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i;
1054
1055 pcie->dma_data[i] =
1056 dmam_alloc_coherent(&pcie->pci->dev,
1057 KVASER_PCIEFD_DMA_SIZE,
1058 &dma_addr[i],
1059 GFP_KERNEL);
1060
1061 if (!pcie->dma_data[i] || !dma_addr[i]) {
1062 dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
1063 KVASER_PCIEFD_DMA_SIZE);
1064 return -ENOMEM;
1065 }
1066
1067 kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset);
1068 }
1069
1070 /* Reset Rx FIFO, and both DMA buffers */
1071 iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
1072 KVASER_PCIEFD_SRB_CMD_RDB1,
1073 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1074
1075 srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
1076 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
1077 dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
1078 return -EIO;
1079 }
1080
1081 /* Enable the DMA */
1082 iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
1083 pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1084
1085 return 0;
1086}
1087
1088static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
1089{
1090 u32 sysid, srb_status, build;
1091 u8 sysid_nr_chan;
1092 int ret;
1093
1094 ret = kvaser_pciefd_read_cfg(pcie);
1095 if (ret)
1096 return ret;
1097
1098 sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
1099 sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff;
1100 if (pcie->nr_channels != sysid_nr_chan) {
1101 dev_err(&pcie->pci->dev,
1102 "Number of channels does not match: %u vs %u\n",
1103 pcie->nr_channels,
1104 sysid_nr_chan);
1105 return -ENODEV;
1106 }
1107
1108 if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS)
1109 pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS;
1110
1111 build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG);
1112 dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n",
1113 (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff,
1114 sysid & 0xff,
1115 (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff);
1116
1117 srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
1118 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
1119 dev_err(&pcie->pci->dev,
1120 "Hardware without DMA is not supported\n");
1121 return -ENODEV;
1122 }
1123
1124 pcie->bus_freq = ioread32(pcie->reg_base +
1125 KVASER_PCIEFD_SYSID_BUSFREQ_REG);
1126 pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG);
1127 pcie->freq_to_ticks_div = pcie->freq / 1000000;
1128 if (pcie->freq_to_ticks_div == 0)
1129 pcie->freq_to_ticks_div = 1;
1130
1131 /* Turn off all loopback functionality */
1132 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG);
1133 return ret;
1134}
1135
1136static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
1137 struct kvaser_pciefd_rx_packet *p,
1138 __le32 *data)
1139{
1140 struct sk_buff *skb;
1141 struct canfd_frame *cf;
1142 struct can_priv *priv;
1143 struct net_device_stats *stats;
1144 struct skb_shared_hwtstamps *shhwtstamps;
1145 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1146
1147 if (ch_id >= pcie->nr_channels)
1148 return -EIO;
1149
1150 priv = &pcie->can[ch_id]->can;
1151 stats = &priv->dev->stats;
1152
1153 if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
1154 skb = alloc_canfd_skb(priv->dev, &cf);
1155 if (!skb) {
1156 stats->rx_dropped++;
1157 return -ENOMEM;
1158 }
1159
1160 if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
1161 cf->flags |= CANFD_BRS;
1162
1163 if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
1164 cf->flags |= CANFD_ESI;
1165 } else {
1166 skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
1167 if (!skb) {
1168 stats->rx_dropped++;
1169 return -ENOMEM;
1170 }
1171 }
1172
1173 cf->can_id = p->header[0] & CAN_EFF_MASK;
1174 if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
1175 cf->can_id |= CAN_EFF_FLAG;
1176
1177 cf->len = can_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
1178
1179 if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
1180 cf->can_id |= CAN_RTR_FLAG;
1181 else
1182 memcpy(cf->data, data, cf->len);
1183
1184 shhwtstamps = skb_hwtstamps(skb);
1185
1186 shhwtstamps->hwtstamp =
1187 ns_to_ktime(div_u64(p->timestamp * 1000,
1188 pcie->freq_to_ticks_div));
1189
1190 stats->rx_bytes += cf->len;
1191 stats->rx_packets++;
1192
1193 return netif_rx(skb);
1194}
1195
1196static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
1197 struct can_frame *cf,
1198 enum can_state new_state,
1199 enum can_state tx_state,
1200 enum can_state rx_state)
1201{
1202 can_change_state(can->can.dev, cf, tx_state, rx_state);
1203
1204 if (new_state == CAN_STATE_BUS_OFF) {
1205 struct net_device *ndev = can->can.dev;
1206 unsigned long irq_flags;
1207
1208 spin_lock_irqsave(&can->lock, irq_flags);
1209 netif_stop_queue(can->can.dev);
1210 spin_unlock_irqrestore(&can->lock, irq_flags);
1211
1212 /* Prevent CAN controller from auto recover from bus off */
1213 if (!can->can.restart_ms) {
1214 kvaser_pciefd_start_controller_flush(can);
1215 can_bus_off(ndev);
1216 }
1217 }
1218}
1219
1220static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
1221 struct can_berr_counter *bec,
1222 enum can_state *new_state,
1223 enum can_state *tx_state,
1224 enum can_state *rx_state)
1225{
1226 if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
1227 p->header[0] & KVASER_PCIEFD_SPACK_IRM)
1228 *new_state = CAN_STATE_BUS_OFF;
1229 else if (bec->txerr >= 255 || bec->rxerr >= 255)
1230 *new_state = CAN_STATE_BUS_OFF;
1231 else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
1232 *new_state = CAN_STATE_ERROR_PASSIVE;
1233 else if (bec->txerr >= 128 || bec->rxerr >= 128)
1234 *new_state = CAN_STATE_ERROR_PASSIVE;
1235 else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR)
1236 *new_state = CAN_STATE_ERROR_WARNING;
1237 else if (bec->txerr >= 96 || bec->rxerr >= 96)
1238 *new_state = CAN_STATE_ERROR_WARNING;
1239 else
1240 *new_state = CAN_STATE_ERROR_ACTIVE;
1241
1242 *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0;
1243 *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0;
1244}
1245
1246static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
1247 struct kvaser_pciefd_rx_packet *p)
1248{
1249 struct can_berr_counter bec;
1250 enum can_state old_state, new_state, tx_state, rx_state;
1251 struct net_device *ndev = can->can.dev;
1252 struct sk_buff *skb;
1253 struct can_frame *cf = NULL;
1254 struct skb_shared_hwtstamps *shhwtstamps;
1255 struct net_device_stats *stats = &ndev->stats;
1256
1257 old_state = can->can.state;
1258
1259 bec.txerr = p->header[0] & 0xff;
1260 bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
1261
1262 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
1263 &rx_state);
1264
1265 skb = alloc_can_err_skb(ndev, &cf);
1266
1267 if (new_state != old_state) {
1268 kvaser_pciefd_change_state(can, cf, new_state, tx_state,
1269 rx_state);
1270
1271 if (old_state == CAN_STATE_BUS_OFF &&
1272 new_state == CAN_STATE_ERROR_ACTIVE &&
1273 can->can.restart_ms) {
1274 can->can.can_stats.restarts++;
1275 if (skb)
1276 cf->can_id |= CAN_ERR_RESTARTED;
1277 }
1278 }
1279
1280 can->err_rep_cnt++;
1281 can->can.can_stats.bus_error++;
1282 stats->rx_errors++;
1283
1284 can->bec.txerr = bec.txerr;
1285 can->bec.rxerr = bec.rxerr;
1286
1287 if (!skb) {
1288 stats->rx_dropped++;
1289 return -ENOMEM;
1290 }
1291
1292 shhwtstamps = skb_hwtstamps(skb);
1293 shhwtstamps->hwtstamp =
1294 ns_to_ktime(div_u64(p->timestamp * 1000,
1295 can->kv_pcie->freq_to_ticks_div));
1296 cf->can_id |= CAN_ERR_BUSERROR;
1297
1298 cf->data[6] = bec.txerr;
1299 cf->data[7] = bec.rxerr;
1300
1301 stats->rx_packets++;
1302 stats->rx_bytes += cf->can_dlc;
1303
1304 netif_rx(skb);
1305 return 0;
1306}
1307
1308static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
1309 struct kvaser_pciefd_rx_packet *p)
1310{
1311 struct kvaser_pciefd_can *can;
1312 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1313
1314 if (ch_id >= pcie->nr_channels)
1315 return -EIO;
1316
1317 can = pcie->can[ch_id];
1318
1319 kvaser_pciefd_rx_error_frame(can, p);
1320 if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
1321 /* Do not report more errors, until bec_poll_timer expires */
1322 kvaser_pciefd_disable_err_gen(can);
1323 /* Start polling the error counters */
1324 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1325 return 0;
1326}
1327
1328static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
1329 struct kvaser_pciefd_rx_packet *p)
1330{
1331 struct can_berr_counter bec;
1332 enum can_state old_state, new_state, tx_state, rx_state;
1333
1334 old_state = can->can.state;
1335
1336 bec.txerr = p->header[0] & 0xff;
1337 bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
1338
1339 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
1340 &rx_state);
1341
1342 if (new_state != old_state) {
1343 struct net_device *ndev = can->can.dev;
1344 struct sk_buff *skb;
1345 struct can_frame *cf;
1346 struct skb_shared_hwtstamps *shhwtstamps;
1347
1348 skb = alloc_can_err_skb(ndev, &cf);
1349 if (!skb) {
1350 struct net_device_stats *stats = &ndev->stats;
1351
1352 stats->rx_dropped++;
1353 return -ENOMEM;
1354 }
1355
1356 kvaser_pciefd_change_state(can, cf, new_state, tx_state,
1357 rx_state);
1358
1359 if (old_state == CAN_STATE_BUS_OFF &&
1360 new_state == CAN_STATE_ERROR_ACTIVE &&
1361 can->can.restart_ms) {
1362 can->can.can_stats.restarts++;
1363 cf->can_id |= CAN_ERR_RESTARTED;
1364 }
1365
1366 shhwtstamps = skb_hwtstamps(skb);
1367 shhwtstamps->hwtstamp =
1368 ns_to_ktime(div_u64(p->timestamp * 1000,
1369 can->kv_pcie->freq_to_ticks_div));
1370
1371 cf->data[6] = bec.txerr;
1372 cf->data[7] = bec.rxerr;
1373
1374 netif_rx(skb);
1375 }
1376 can->bec.txerr = bec.txerr;
1377 can->bec.rxerr = bec.rxerr;
1378 /* Check if we need to poll the error counters */
1379 if (bec.txerr || bec.rxerr)
1380 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1381
1382 return 0;
1383}
1384
1385static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
1386 struct kvaser_pciefd_rx_packet *p)
1387{
1388 struct kvaser_pciefd_can *can;
1389 u8 cmdseq;
1390 u32 status;
1391 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1392
1393 if (ch_id >= pcie->nr_channels)
1394 return -EIO;
1395
1396 can = pcie->can[ch_id];
1397
1398 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
1399 cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff;
1400
1401 /* Reset done, start abort and flush */
1402 if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1403 p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1404 p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
1405 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
1406 status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1407 u32 cmd;
1408
1409 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
1410 can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1411 cmd = KVASER_PCIEFD_KCAN_CMD_AT;
1412 cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
1413 iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
1414
1415 iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
1416 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1417 } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
1418 p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1419 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
1420 status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1421 /* Reset detected, send end of flush if no packet are in FIFO */
1422 u8 count = ioread32(can->reg_base +
1423 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1424
1425 if (!count)
1426 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1427 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1428 } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
1429 cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) {
1430 /* Response to status request received */
1431 kvaser_pciefd_handle_status_resp(can, p);
1432 if (can->can.state != CAN_STATE_BUS_OFF &&
1433 can->can.state != CAN_STATE_ERROR_ACTIVE) {
1434 mod_timer(&can->bec_poll_timer,
1435 KVASER_PCIEFD_BEC_POLL_FREQ);
1436 }
1437 } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1438 !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) {
1439 /* Reset to bus on detected */
1440 if (!completion_done(&can->start_comp))
1441 complete(&can->start_comp);
1442 }
1443
1444 return 0;
1445}
1446
1447static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
1448 struct kvaser_pciefd_rx_packet *p)
1449{
1450 struct kvaser_pciefd_can *can;
1451 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1452
1453 if (ch_id >= pcie->nr_channels)
1454 return -EIO;
1455
1456 can = pcie->can[ch_id];
1457
1458 /* If this is the last flushed packet, send end of flush */
1459 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
1460 u8 count = ioread32(can->reg_base +
1461 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1462
1463 if (count == 0)
1464 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1465 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1466 } else {
1467 int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
1468 int dlc = can_get_echo_skb(can->can.dev, echo_idx);
1469 struct net_device_stats *stats = &can->can.dev->stats;
1470
1471 stats->tx_bytes += dlc;
1472 stats->tx_packets++;
1473
1474 if (netif_queue_stopped(can->can.dev))
1475 netif_wake_queue(can->can.dev);
1476 }
1477
1478 return 0;
1479}
1480
1481static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
1482 struct kvaser_pciefd_rx_packet *p)
1483{
1484 struct sk_buff *skb;
1485 struct net_device_stats *stats = &can->can.dev->stats;
1486 struct can_frame *cf;
1487
1488 skb = alloc_can_err_skb(can->can.dev, &cf);
1489
1490 stats->tx_errors++;
1491 if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
1492 if (skb)
1493 cf->can_id |= CAN_ERR_LOSTARB;
1494 can->can.can_stats.arbitration_lost++;
1495 } else if (skb) {
1496 cf->can_id |= CAN_ERR_ACK;
1497 }
1498
1499 if (skb) {
1500 cf->can_id |= CAN_ERR_BUSERROR;
1501 stats->rx_bytes += cf->can_dlc;
1502 stats->rx_packets++;
1503 netif_rx(skb);
1504 } else {
1505 stats->rx_dropped++;
1506 netdev_warn(can->can.dev, "No memory left for err_skb\n");
1507 }
1508}
1509
1510static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
1511 struct kvaser_pciefd_rx_packet *p)
1512{
1513 struct kvaser_pciefd_can *can;
1514 bool one_shot_fail = false;
1515 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1516
1517 if (ch_id >= pcie->nr_channels)
1518 return -EIO;
1519
1520 can = pcie->can[ch_id];
1521 /* Ignore control packet ACK */
1522 if (p->header[0] & KVASER_PCIEFD_APACKET_CT)
1523 return 0;
1524
1525 if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) {
1526 kvaser_pciefd_handle_nack_packet(can, p);
1527 one_shot_fail = true;
1528 }
1529
1530 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
1531 netdev_dbg(can->can.dev, "Packet was flushed\n");
1532 } else {
1533 int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
1534 int dlc = can_get_echo_skb(can->can.dev, echo_idx);
1535 u8 count = ioread32(can->reg_base +
1536 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1537
1538 if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT &&
1539 netif_queue_stopped(can->can.dev))
1540 netif_wake_queue(can->can.dev);
1541
1542 if (!one_shot_fail) {
1543 struct net_device_stats *stats = &can->can.dev->stats;
1544
1545 stats->tx_bytes += dlc;
1546 stats->tx_packets++;
1547 }
1548 }
1549
1550 return 0;
1551}
1552
1553static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
1554 struct kvaser_pciefd_rx_packet *p)
1555{
1556 struct kvaser_pciefd_can *can;
1557 u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1558
1559 if (ch_id >= pcie->nr_channels)
1560 return -EIO;
1561
1562 can = pcie->can[ch_id];
1563
1564 if (!completion_done(&can->flush_comp))
1565 complete(&can->flush_comp);
1566
1567 return 0;
1568}
1569
1570static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
1571 int dma_buf)
1572{
1573 __le32 *buffer = pcie->dma_data[dma_buf];
1574 __le64 timestamp;
1575 struct kvaser_pciefd_rx_packet packet;
1576 struct kvaser_pciefd_rx_packet *p = &packet;
1577 u8 type;
1578 int pos = *start_pos;
1579 int size;
1580 int ret = 0;
1581
1582 size = le32_to_cpu(buffer[pos++]);
1583 if (!size) {
1584 *start_pos = 0;
1585 return 0;
1586 }
1587
1588 p->header[0] = le32_to_cpu(buffer[pos++]);
1589 p->header[1] = le32_to_cpu(buffer[pos++]);
1590
1591 /* Read 64-bit timestamp */
1592 memcpy(×tamp, &buffer[pos], sizeof(__le64));
1593 pos += 2;
1594 p->timestamp = le64_to_cpu(timestamp);
1595
1596 type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf;
1597 switch (type) {
1598 case KVASER_PCIEFD_PACK_TYPE_DATA:
1599 ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
1600 if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
1601 u8 data_len;
1602
1603 data_len = can_dlc2len(p->header[1] >>
1604 KVASER_PCIEFD_RPACKET_DLC_SHIFT);
1605 pos += DIV_ROUND_UP(data_len, 4);
1606 }
1607 break;
1608
1609 case KVASER_PCIEFD_PACK_TYPE_ACK:
1610 ret = kvaser_pciefd_handle_ack_packet(pcie, p);
1611 break;
1612
1613 case KVASER_PCIEFD_PACK_TYPE_STATUS:
1614 ret = kvaser_pciefd_handle_status_packet(pcie, p);
1615 break;
1616
1617 case KVASER_PCIEFD_PACK_TYPE_ERROR:
1618 ret = kvaser_pciefd_handle_error_packet(pcie, p);
1619 break;
1620
1621 case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
1622 ret = kvaser_pciefd_handle_eack_packet(pcie, p);
1623 break;
1624
1625 case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
1626 ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
1627 break;
1628
1629 case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
1630 case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
1631 case KVASER_PCIEFD_PACK_TYPE_TXRQ:
1632 dev_info(&pcie->pci->dev,
1633 "Received unexpected packet type 0x%08X\n", type);
1634 break;
1635
1636 default:
1637 dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type);
1638 ret = -EIO;
1639 break;
1640 }
1641
1642 if (ret)
1643 return ret;
1644
1645 /* Position does not point to the end of the package,
1646 * corrupted packet size?
1647 */
1648 if ((*start_pos + size) != pos)
1649 return -EIO;
1650
1651 /* Point to the next packet header, if any */
1652 *start_pos = pos;
1653
1654 return ret;
1655}
1656
1657static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
1658{
1659 int pos = 0;
1660 int res = 0;
1661
1662 do {
1663 res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
1664 } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
1665
1666 return res;
1667}
1668
1669static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
1670{
1671 u32 irq;
1672
1673 irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1674 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
1675 kvaser_pciefd_read_buffer(pcie, 0);
1676 /* Reset DMA buffer 0 */
1677 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1678 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1679 }
1680
1681 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
1682 kvaser_pciefd_read_buffer(pcie, 1);
1683 /* Reset DMA buffer 1 */
1684 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1685 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1686 }
1687
1688 if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
1689 irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
1690 irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
1691 irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
1692 dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
1693
1694 iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1695 return 0;
1696}
1697
1698static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
1699{
1700 u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1701
1702 if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
1703 netdev_err(can->can.dev, "Tx FIFO overflow\n");
1704
1705 if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
1706 u8 count = ioread32(can->reg_base +
1707 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1708
1709 if (count == 0)
1710 iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1711 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1712 }
1713
1714 if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
1715 netdev_err(can->can.dev,
1716 "Fail to change bittiming, when not in reset mode\n");
1717
1718 if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC)
1719 netdev_err(can->can.dev, "CAN FD frame in CAN mode\n");
1720
1721 if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF)
1722 netdev_err(can->can.dev, "Rx FIFO overflow\n");
1723
1724 iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1725 return 0;
1726}
1727
1728static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
1729{
1730 struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
1731 u32 board_irq;
1732 int i;
1733
1734 board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1735
1736 if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK))
1737 return IRQ_NONE;
1738
1739 if (board_irq & KVASER_PCIEFD_IRQ_SRB)
1740 kvaser_pciefd_receive_irq(pcie);
1741
1742 for (i = 0; i < pcie->nr_channels; i++) {
1743 if (!pcie->can[i]) {
1744 dev_err(&pcie->pci->dev,
1745 "IRQ mask points to unallocated controller\n");
1746 break;
1747 }
1748
1749 /* Check that mask matches channel (i) IRQ mask */
1750 if (board_irq & (1 << i))
1751 kvaser_pciefd_transmit_irq(pcie->can[i]);
1752 }
1753
1754 iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1755 return IRQ_HANDLED;
1756}
1757
1758static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
1759{
1760 int i;
1761 struct kvaser_pciefd_can *can;
1762
1763 for (i = 0; i < pcie->nr_channels; i++) {
1764 can = pcie->can[i];
1765 if (can) {
1766 iowrite32(0,
1767 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1768 kvaser_pciefd_pwm_stop(can);
1769 free_candev(can->can.dev);
1770 }
1771 }
1772}
1773
1774static int kvaser_pciefd_probe(struct pci_dev *pdev,
1775 const struct pci_device_id *id)
1776{
1777 int err;
1778 struct kvaser_pciefd *pcie;
1779
1780 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1781 if (!pcie)
1782 return -ENOMEM;
1783
1784 pci_set_drvdata(pdev, pcie);
1785 pcie->pci = pdev;
1786
1787 err = pci_enable_device(pdev);
1788 if (err)
1789 return err;
1790
1791 err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
1792 if (err)
1793 goto err_disable_pci;
1794
1795 pcie->reg_base = pci_iomap(pdev, 0, 0);
1796 if (!pcie->reg_base) {
1797 err = -ENOMEM;
1798 goto err_release_regions;
1799 }
1800
1801 err = kvaser_pciefd_setup_board(pcie);
1802 if (err)
1803 goto err_pci_iounmap;
1804
1805 err = kvaser_pciefd_setup_dma(pcie);
1806 if (err)
1807 goto err_pci_iounmap;
1808
1809 pci_set_master(pdev);
1810
1811 err = kvaser_pciefd_setup_can_ctrls(pcie);
1812 if (err)
1813 goto err_teardown_can_ctrls;
1814
1815 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
1816 pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1817
1818 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 |
1819 KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
1820 KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
1821 pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG);
1822
1823 /* Reset IRQ handling, expected to be off before */
1824 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1825 pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1826 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1827 pcie->reg_base + KVASER_PCIEFD_IEN_REG);
1828
1829 /* Ready the DMA buffers */
1830 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1831 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1832 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1833 pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1834
1835 err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
1836 IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
1837 if (err)
1838 goto err_teardown_can_ctrls;
1839
1840 err = kvaser_pciefd_reg_candev(pcie);
1841 if (err)
1842 goto err_free_irq;
1843
1844 return 0;
1845
1846err_free_irq:
1847 free_irq(pcie->pci->irq, pcie);
1848
1849err_teardown_can_ctrls:
1850 kvaser_pciefd_teardown_can_ctrls(pcie);
1851 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1852 pci_clear_master(pdev);
1853
1854err_pci_iounmap:
1855 pci_iounmap(pdev, pcie->reg_base);
1856
1857err_release_regions:
1858 pci_release_regions(pdev);
1859
1860err_disable_pci:
1861 pci_disable_device(pdev);
1862
1863 return err;
1864}
1865
1866static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
1867{
1868 struct kvaser_pciefd_can *can;
1869 int i;
1870
1871 for (i = 0; i < pcie->nr_channels; i++) {
1872 can = pcie->can[i];
1873 if (can) {
1874 iowrite32(0,
1875 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1876 unregister_candev(can->can.dev);
1877 del_timer(&can->bec_poll_timer);
1878 kvaser_pciefd_pwm_stop(can);
1879 free_candev(can->can.dev);
1880 }
1881 }
1882}
1883
1884static void kvaser_pciefd_remove(struct pci_dev *pdev)
1885{
1886 struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
1887
1888 kvaser_pciefd_remove_all_ctrls(pcie);
1889
1890 /* Turn off IRQ generation */
1891 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1892 iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1893 pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1894 iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
1895
1896 free_irq(pcie->pci->irq, pcie);
1897
1898 pci_clear_master(pdev);
1899 pci_iounmap(pdev, pcie->reg_base);
1900 pci_release_regions(pdev);
1901 pci_disable_device(pdev);
1902}
1903
1904static struct pci_driver kvaser_pciefd = {
1905 .name = KVASER_PCIEFD_DRV_NAME,
1906 .id_table = kvaser_pciefd_id_table,
1907 .probe = kvaser_pciefd_probe,
1908 .remove = kvaser_pciefd_remove,
1909};
1910
1911module_pci_driver(kvaser_pciefd)