Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Driver for NXP MCR20A 802.15.4 Wireless-PAN Networking controller
4 *
5 * Copyright (C) 2018 Xue Liu <liuxuenetmail@gmail.com>
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/gpio/consumer.h>
10#include <linux/spi/spi.h>
11#include <linux/workqueue.h>
12#include <linux/interrupt.h>
13#include <linux/irq.h>
14#include <linux/skbuff.h>
15#include <linux/of_gpio.h>
16#include <linux/regmap.h>
17#include <linux/ieee802154.h>
18#include <linux/debugfs.h>
19
20#include <net/mac802154.h>
21#include <net/cfg802154.h>
22
23#include <linux/device.h>
24
25#include "mcr20a.h"
26
27#define SPI_COMMAND_BUFFER 3
28
29#define REGISTER_READ BIT(7)
30#define REGISTER_WRITE (0 << 7)
31#define REGISTER_ACCESS (0 << 6)
32#define PACKET_BUFF_BURST_ACCESS BIT(6)
33#define PACKET_BUFF_BYTE_ACCESS BIT(5)
34
35#define MCR20A_WRITE_REG(x) (x)
36#define MCR20A_READ_REG(x) (REGISTER_READ | (x))
37#define MCR20A_BURST_READ_PACKET_BUF (0xC0)
38#define MCR20A_BURST_WRITE_PACKET_BUF (0x40)
39
40#define MCR20A_CMD_REG 0x80
41#define MCR20A_CMD_REG_MASK 0x3f
42#define MCR20A_CMD_WRITE 0x40
43#define MCR20A_CMD_FB 0x20
44
45/* Number of Interrupt Request Status Register */
46#define MCR20A_IRQSTS_NUM 2 /* only IRQ_STS1 and IRQ_STS2 */
47
48/* MCR20A CCA Type */
49enum {
50 MCR20A_CCA_ED, // energy detect - CCA bit not active,
51 // not to be used for T and CCCA sequences
52 MCR20A_CCA_MODE1, // energy detect - CCA bit ACTIVE
53 MCR20A_CCA_MODE2, // 802.15.4 compliant signal detect - CCA bit ACTIVE
54 MCR20A_CCA_MODE3
55};
56
57enum {
58 MCR20A_XCVSEQ_IDLE = 0x00,
59 MCR20A_XCVSEQ_RX = 0x01,
60 MCR20A_XCVSEQ_TX = 0x02,
61 MCR20A_XCVSEQ_CCA = 0x03,
62 MCR20A_XCVSEQ_TR = 0x04,
63 MCR20A_XCVSEQ_CCCA = 0x05,
64};
65
66/* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */
67#define MCR20A_MIN_CHANNEL (11)
68#define MCR20A_MAX_CHANNEL (26)
69#define MCR20A_CHANNEL_SPACING (5)
70
71/* MCR20A CCA Threshold constans */
72#define MCR20A_MIN_CCA_THRESHOLD (0x6EU)
73#define MCR20A_MAX_CCA_THRESHOLD (0x00U)
74
75/* version 0C */
76#define MCR20A_OVERWRITE_VERSION (0x0C)
77
78/* MCR20A PLL configurations */
79static const u8 PLL_INT[16] = {
80 /* 2405 */ 0x0B, /* 2410 */ 0x0B, /* 2415 */ 0x0B,
81 /* 2420 */ 0x0B, /* 2425 */ 0x0B, /* 2430 */ 0x0B,
82 /* 2435 */ 0x0C, /* 2440 */ 0x0C, /* 2445 */ 0x0C,
83 /* 2450 */ 0x0C, /* 2455 */ 0x0C, /* 2460 */ 0x0C,
84 /* 2465 */ 0x0D, /* 2470 */ 0x0D, /* 2475 */ 0x0D,
85 /* 2480 */ 0x0D
86};
87
88static const u8 PLL_FRAC[16] = {
89 /* 2405 */ 0x28, /* 2410 */ 0x50, /* 2415 */ 0x78,
90 /* 2420 */ 0xA0, /* 2425 */ 0xC8, /* 2430 */ 0xF0,
91 /* 2435 */ 0x18, /* 2440 */ 0x40, /* 2445 */ 0x68,
92 /* 2450 */ 0x90, /* 2455 */ 0xB8, /* 2460 */ 0xE0,
93 /* 2465 */ 0x08, /* 2470 */ 0x30, /* 2475 */ 0x58,
94 /* 2480 */ 0x80
95};
96
97static const struct reg_sequence mar20a_iar_overwrites[] = {
98 { IAR_MISC_PAD_CTRL, 0x02 },
99 { IAR_VCO_CTRL1, 0xB3 },
100 { IAR_VCO_CTRL2, 0x07 },
101 { IAR_PA_TUNING, 0x71 },
102 { IAR_CHF_IBUF, 0x2F },
103 { IAR_CHF_QBUF, 0x2F },
104 { IAR_CHF_IRIN, 0x24 },
105 { IAR_CHF_QRIN, 0x24 },
106 { IAR_CHF_IL, 0x24 },
107 { IAR_CHF_QL, 0x24 },
108 { IAR_CHF_CC1, 0x32 },
109 { IAR_CHF_CCL, 0x1D },
110 { IAR_CHF_CC2, 0x2D },
111 { IAR_CHF_IROUT, 0x24 },
112 { IAR_CHF_QROUT, 0x24 },
113 { IAR_PA_CAL, 0x28 },
114 { IAR_AGC_THR1, 0x55 },
115 { IAR_AGC_THR2, 0x2D },
116 { IAR_ATT_RSSI1, 0x5F },
117 { IAR_ATT_RSSI2, 0x8F },
118 { IAR_RSSI_OFFSET, 0x61 },
119 { IAR_CHF_PMA_GAIN, 0x03 },
120 { IAR_CCA1_THRESH, 0x50 },
121 { IAR_CORR_NVAL, 0x13 },
122 { IAR_ACKDELAY, 0x3D },
123};
124
125#define MCR20A_VALID_CHANNELS (0x07FFF800)
126#define MCR20A_MAX_BUF (127)
127
128#define printdev(X) (&X->spi->dev)
129
130/* regmap information for Direct Access Register (DAR) access */
131#define MCR20A_DAR_WRITE 0x01
132#define MCR20A_DAR_READ 0x00
133#define MCR20A_DAR_NUMREGS 0x3F
134
135/* regmap information for Indirect Access Register (IAR) access */
136#define MCR20A_IAR_ACCESS 0x80
137#define MCR20A_IAR_NUMREGS 0xBEFF
138
139/* Read/Write SPI Commands for DAR and IAR registers. */
140#define MCR20A_READSHORT(reg) ((reg) << 1)
141#define MCR20A_WRITESHORT(reg) ((reg) << 1 | 1)
142#define MCR20A_READLONG(reg) (1 << 15 | (reg) << 5)
143#define MCR20A_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
144
145/* Type definitions for link configuration of instantiable layers */
146#define MCR20A_PHY_INDIRECT_QUEUE_SIZE (12)
147
148static bool
149mcr20a_dar_writeable(struct device *dev, unsigned int reg)
150{
151 switch (reg) {
152 case DAR_IRQ_STS1:
153 case DAR_IRQ_STS2:
154 case DAR_IRQ_STS3:
155 case DAR_PHY_CTRL1:
156 case DAR_PHY_CTRL2:
157 case DAR_PHY_CTRL3:
158 case DAR_PHY_CTRL4:
159 case DAR_SRC_CTRL:
160 case DAR_SRC_ADDRS_SUM_LSB:
161 case DAR_SRC_ADDRS_SUM_MSB:
162 case DAR_T3CMP_LSB:
163 case DAR_T3CMP_MSB:
164 case DAR_T3CMP_USB:
165 case DAR_T2PRIMECMP_LSB:
166 case DAR_T2PRIMECMP_MSB:
167 case DAR_T1CMP_LSB:
168 case DAR_T1CMP_MSB:
169 case DAR_T1CMP_USB:
170 case DAR_T2CMP_LSB:
171 case DAR_T2CMP_MSB:
172 case DAR_T2CMP_USB:
173 case DAR_T4CMP_LSB:
174 case DAR_T4CMP_MSB:
175 case DAR_T4CMP_USB:
176 case DAR_PLL_INT0:
177 case DAR_PLL_FRAC0_LSB:
178 case DAR_PLL_FRAC0_MSB:
179 case DAR_PA_PWR:
180 /* no DAR_ACM */
181 case DAR_OVERWRITE_VER:
182 case DAR_CLK_OUT_CTRL:
183 case DAR_PWR_MODES:
184 return true;
185 default:
186 return false;
187 }
188}
189
190static bool
191mcr20a_dar_readable(struct device *dev, unsigned int reg)
192{
193 bool rc;
194
195 /* all writeable are also readable */
196 rc = mcr20a_dar_writeable(dev, reg);
197 if (rc)
198 return rc;
199
200 /* readonly regs */
201 switch (reg) {
202 case DAR_RX_FRM_LEN:
203 case DAR_CCA1_ED_FNL:
204 case DAR_EVENT_TMR_LSB:
205 case DAR_EVENT_TMR_MSB:
206 case DAR_EVENT_TMR_USB:
207 case DAR_TIMESTAMP_LSB:
208 case DAR_TIMESTAMP_MSB:
209 case DAR_TIMESTAMP_USB:
210 case DAR_SEQ_STATE:
211 case DAR_LQI_VALUE:
212 case DAR_RSSI_CCA_CONT:
213 return true;
214 default:
215 return false;
216 }
217}
218
219static bool
220mcr20a_dar_volatile(struct device *dev, unsigned int reg)
221{
222 /* can be changed during runtime */
223 switch (reg) {
224 case DAR_IRQ_STS1:
225 case DAR_IRQ_STS2:
226 case DAR_IRQ_STS3:
227 /* use them in spi_async and regmap so it's volatile */
228 return true;
229 default:
230 return false;
231 }
232}
233
234static bool
235mcr20a_dar_precious(struct device *dev, unsigned int reg)
236{
237 /* don't clear irq line on read */
238 switch (reg) {
239 case DAR_IRQ_STS1:
240 case DAR_IRQ_STS2:
241 case DAR_IRQ_STS3:
242 return true;
243 default:
244 return false;
245 }
246}
247
248static const struct regmap_config mcr20a_dar_regmap = {
249 .name = "mcr20a_dar",
250 .reg_bits = 8,
251 .val_bits = 8,
252 .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE,
253 .read_flag_mask = REGISTER_ACCESS | REGISTER_READ,
254 .cache_type = REGCACHE_RBTREE,
255 .writeable_reg = mcr20a_dar_writeable,
256 .readable_reg = mcr20a_dar_readable,
257 .volatile_reg = mcr20a_dar_volatile,
258 .precious_reg = mcr20a_dar_precious,
259 .fast_io = true,
260 .can_multi_write = true,
261};
262
263static bool
264mcr20a_iar_writeable(struct device *dev, unsigned int reg)
265{
266 switch (reg) {
267 case IAR_XTAL_TRIM:
268 case IAR_PMC_LP_TRIM:
269 case IAR_MACPANID0_LSB:
270 case IAR_MACPANID0_MSB:
271 case IAR_MACSHORTADDRS0_LSB:
272 case IAR_MACSHORTADDRS0_MSB:
273 case IAR_MACLONGADDRS0_0:
274 case IAR_MACLONGADDRS0_8:
275 case IAR_MACLONGADDRS0_16:
276 case IAR_MACLONGADDRS0_24:
277 case IAR_MACLONGADDRS0_32:
278 case IAR_MACLONGADDRS0_40:
279 case IAR_MACLONGADDRS0_48:
280 case IAR_MACLONGADDRS0_56:
281 case IAR_RX_FRAME_FILTER:
282 case IAR_PLL_INT1:
283 case IAR_PLL_FRAC1_LSB:
284 case IAR_PLL_FRAC1_MSB:
285 case IAR_MACPANID1_LSB:
286 case IAR_MACPANID1_MSB:
287 case IAR_MACSHORTADDRS1_LSB:
288 case IAR_MACSHORTADDRS1_MSB:
289 case IAR_MACLONGADDRS1_0:
290 case IAR_MACLONGADDRS1_8:
291 case IAR_MACLONGADDRS1_16:
292 case IAR_MACLONGADDRS1_24:
293 case IAR_MACLONGADDRS1_32:
294 case IAR_MACLONGADDRS1_40:
295 case IAR_MACLONGADDRS1_48:
296 case IAR_MACLONGADDRS1_56:
297 case IAR_DUAL_PAN_CTRL:
298 case IAR_DUAL_PAN_DWELL:
299 case IAR_CCA1_THRESH:
300 case IAR_CCA1_ED_OFFSET_COMP:
301 case IAR_LQI_OFFSET_COMP:
302 case IAR_CCA_CTRL:
303 case IAR_CCA2_CORR_PEAKS:
304 case IAR_CCA2_CORR_THRESH:
305 case IAR_TMR_PRESCALE:
306 case IAR_ANT_PAD_CTRL:
307 case IAR_MISC_PAD_CTRL:
308 case IAR_BSM_CTRL:
309 case IAR_RNG:
310 case IAR_RX_WTR_MARK:
311 case IAR_SOFT_RESET:
312 case IAR_TXDELAY:
313 case IAR_ACKDELAY:
314 case IAR_CORR_NVAL:
315 case IAR_ANT_AGC_CTRL:
316 case IAR_AGC_THR1:
317 case IAR_AGC_THR2:
318 case IAR_PA_CAL:
319 case IAR_ATT_RSSI1:
320 case IAR_ATT_RSSI2:
321 case IAR_RSSI_OFFSET:
322 case IAR_XTAL_CTRL:
323 case IAR_CHF_PMA_GAIN:
324 case IAR_CHF_IBUF:
325 case IAR_CHF_QBUF:
326 case IAR_CHF_IRIN:
327 case IAR_CHF_QRIN:
328 case IAR_CHF_IL:
329 case IAR_CHF_QL:
330 case IAR_CHF_CC1:
331 case IAR_CHF_CCL:
332 case IAR_CHF_CC2:
333 case IAR_CHF_IROUT:
334 case IAR_CHF_QROUT:
335 case IAR_PA_TUNING:
336 case IAR_VCO_CTRL1:
337 case IAR_VCO_CTRL2:
338 return true;
339 default:
340 return false;
341 }
342}
343
344static bool
345mcr20a_iar_readable(struct device *dev, unsigned int reg)
346{
347 bool rc;
348
349 /* all writeable are also readable */
350 rc = mcr20a_iar_writeable(dev, reg);
351 if (rc)
352 return rc;
353
354 /* readonly regs */
355 switch (reg) {
356 case IAR_PART_ID:
357 case IAR_DUAL_PAN_STS:
358 case IAR_RX_BYTE_COUNT:
359 case IAR_FILTERFAIL_CODE1:
360 case IAR_FILTERFAIL_CODE2:
361 case IAR_RSSI:
362 return true;
363 default:
364 return false;
365 }
366}
367
368static bool
369mcr20a_iar_volatile(struct device *dev, unsigned int reg)
370{
371/* can be changed during runtime */
372 switch (reg) {
373 case IAR_DUAL_PAN_STS:
374 case IAR_RX_BYTE_COUNT:
375 case IAR_FILTERFAIL_CODE1:
376 case IAR_FILTERFAIL_CODE2:
377 case IAR_RSSI:
378 return true;
379 default:
380 return false;
381 }
382}
383
384static const struct regmap_config mcr20a_iar_regmap = {
385 .name = "mcr20a_iar",
386 .reg_bits = 16,
387 .val_bits = 8,
388 .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE | IAR_INDEX,
389 .read_flag_mask = REGISTER_ACCESS | REGISTER_READ | IAR_INDEX,
390 .cache_type = REGCACHE_RBTREE,
391 .writeable_reg = mcr20a_iar_writeable,
392 .readable_reg = mcr20a_iar_readable,
393 .volatile_reg = mcr20a_iar_volatile,
394 .fast_io = true,
395};
396
397struct mcr20a_local {
398 struct spi_device *spi;
399
400 struct ieee802154_hw *hw;
401 struct regmap *regmap_dar;
402 struct regmap *regmap_iar;
403
404 u8 *buf;
405
406 bool is_tx;
407
408 /* for writing tx buffer */
409 struct spi_message tx_buf_msg;
410 u8 tx_header[1];
411 /* burst buffer write command */
412 struct spi_transfer tx_xfer_header;
413 u8 tx_len[1];
414 /* len of tx packet */
415 struct spi_transfer tx_xfer_len;
416 /* data of tx packet */
417 struct spi_transfer tx_xfer_buf;
418 struct sk_buff *tx_skb;
419
420 /* for read length rxfifo */
421 struct spi_message reg_msg;
422 u8 reg_cmd[1];
423 u8 reg_data[MCR20A_IRQSTS_NUM];
424 struct spi_transfer reg_xfer_cmd;
425 struct spi_transfer reg_xfer_data;
426
427 /* receive handling */
428 struct spi_message rx_buf_msg;
429 u8 rx_header[1];
430 struct spi_transfer rx_xfer_header;
431 u8 rx_lqi[1];
432 struct spi_transfer rx_xfer_lqi;
433 u8 rx_buf[MCR20A_MAX_BUF];
434 struct spi_transfer rx_xfer_buf;
435
436 /* isr handling for reading intstat */
437 struct spi_message irq_msg;
438 u8 irq_header[1];
439 u8 irq_data[MCR20A_IRQSTS_NUM];
440 struct spi_transfer irq_xfer_data;
441 struct spi_transfer irq_xfer_header;
442};
443
444static void
445mcr20a_write_tx_buf_complete(void *context)
446{
447 struct mcr20a_local *lp = context;
448 int ret;
449
450 dev_dbg(printdev(lp), "%s\n", __func__);
451
452 lp->reg_msg.complete = NULL;
453 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1);
454 lp->reg_data[0] = MCR20A_XCVSEQ_TX;
455 lp->reg_xfer_data.len = 1;
456
457 ret = spi_async(lp->spi, &lp->reg_msg);
458 if (ret)
459 dev_err(printdev(lp), "failed to set SEQ TX\n");
460}
461
462static int
463mcr20a_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
464{
465 struct mcr20a_local *lp = hw->priv;
466
467 dev_dbg(printdev(lp), "%s\n", __func__);
468
469 lp->tx_skb = skb;
470
471 print_hex_dump_debug("mcr20a tx: ", DUMP_PREFIX_OFFSET, 16, 1,
472 skb->data, skb->len, 0);
473
474 lp->is_tx = 1;
475
476 lp->reg_msg.complete = NULL;
477 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1);
478 lp->reg_data[0] = MCR20A_XCVSEQ_IDLE;
479 lp->reg_xfer_data.len = 1;
480
481 return spi_async(lp->spi, &lp->reg_msg);
482}
483
484static int
485mcr20a_ed(struct ieee802154_hw *hw, u8 *level)
486{
487 WARN_ON(!level);
488 *level = 0xbe;
489 return 0;
490}
491
492static int
493mcr20a_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
494{
495 struct mcr20a_local *lp = hw->priv;
496 int ret;
497
498 dev_dbg(printdev(lp), "%s\n", __func__);
499
500 /* freqency = ((PLL_INT+64) + (PLL_FRAC/65536)) * 32 MHz */
501 ret = regmap_write(lp->regmap_dar, DAR_PLL_INT0, PLL_INT[channel - 11]);
502 if (ret)
503 return ret;
504 ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_LSB, 0x00);
505 if (ret)
506 return ret;
507 ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_MSB,
508 PLL_FRAC[channel - 11]);
509 if (ret)
510 return ret;
511
512 return 0;
513}
514
515static int
516mcr20a_start(struct ieee802154_hw *hw)
517{
518 struct mcr20a_local *lp = hw->priv;
519 int ret;
520
521 dev_dbg(printdev(lp), "%s\n", __func__);
522
523 /* No slotted operation */
524 dev_dbg(printdev(lp), "no slotted operation\n");
525 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
526 DAR_PHY_CTRL1_SLOTTED, 0x0);
527 if (ret < 0)
528 return ret;
529
530 /* enable irq */
531 enable_irq(lp->spi->irq);
532
533 /* Unmask SEQ interrupt */
534 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
535 DAR_PHY_CTRL2_SEQMSK, 0x0);
536 if (ret < 0)
537 return ret;
538
539 /* Start the RX sequence */
540 dev_dbg(printdev(lp), "start the RX sequence\n");
541 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
542 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
543 if (ret < 0)
544 return ret;
545
546 return 0;
547}
548
549static void
550mcr20a_stop(struct ieee802154_hw *hw)
551{
552 struct mcr20a_local *lp = hw->priv;
553
554 dev_dbg(printdev(lp), "%s\n", __func__);
555
556 /* stop all running sequence */
557 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
558 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE);
559
560 /* disable irq */
561 disable_irq(lp->spi->irq);
562}
563
564static int
565mcr20a_set_hw_addr_filt(struct ieee802154_hw *hw,
566 struct ieee802154_hw_addr_filt *filt,
567 unsigned long changed)
568{
569 struct mcr20a_local *lp = hw->priv;
570
571 dev_dbg(printdev(lp), "%s\n", __func__);
572
573 if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
574 u16 addr = le16_to_cpu(filt->short_addr);
575
576 regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_LSB, addr);
577 regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_MSB, addr >> 8);
578 }
579
580 if (changed & IEEE802154_AFILT_PANID_CHANGED) {
581 u16 pan = le16_to_cpu(filt->pan_id);
582
583 regmap_write(lp->regmap_iar, IAR_MACPANID0_LSB, pan);
584 regmap_write(lp->regmap_iar, IAR_MACPANID0_MSB, pan >> 8);
585 }
586
587 if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
588 u8 addr[8], i;
589
590 memcpy(addr, &filt->ieee_addr, 8);
591 for (i = 0; i < 8; i++)
592 regmap_write(lp->regmap_iar,
593 IAR_MACLONGADDRS0_0 + i, addr[i]);
594 }
595
596 if (changed & IEEE802154_AFILT_PANC_CHANGED) {
597 if (filt->pan_coord) {
598 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
599 DAR_PHY_CTRL4_PANCORDNTR0, 0x10);
600 } else {
601 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
602 DAR_PHY_CTRL4_PANCORDNTR0, 0x00);
603 }
604 }
605
606 return 0;
607}
608
609/* -30 dBm to 10 dBm */
610#define MCR20A_MAX_TX_POWERS 0x14
611static const s32 mcr20a_powers[MCR20A_MAX_TX_POWERS + 1] = {
612 -3000, -2800, -2600, -2400, -2200, -2000, -1800, -1600, -1400,
613 -1200, -1000, -800, -600, -400, -200, 0, 200, 400, 600, 800, 1000
614};
615
616static int
617mcr20a_set_txpower(struct ieee802154_hw *hw, s32 mbm)
618{
619 struct mcr20a_local *lp = hw->priv;
620 u32 i;
621
622 dev_dbg(printdev(lp), "%s(%d)\n", __func__, mbm);
623
624 for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
625 if (lp->hw->phy->supported.tx_powers[i] == mbm)
626 return regmap_write(lp->regmap_dar, DAR_PA_PWR,
627 ((i + 8) & 0x1F));
628 }
629
630 return -EINVAL;
631}
632
633#define MCR20A_MAX_ED_LEVELS MCR20A_MIN_CCA_THRESHOLD
634static s32 mcr20a_ed_levels[MCR20A_MAX_ED_LEVELS + 1];
635
636static int
637mcr20a_set_cca_mode(struct ieee802154_hw *hw,
638 const struct wpan_phy_cca *cca)
639{
640 struct mcr20a_local *lp = hw->priv;
641 unsigned int cca_mode = 0xff;
642 bool cca_mode_and = false;
643 int ret;
644
645 dev_dbg(printdev(lp), "%s\n", __func__);
646
647 /* mapping 802.15.4 to driver spec */
648 switch (cca->mode) {
649 case NL802154_CCA_ENERGY:
650 cca_mode = MCR20A_CCA_MODE1;
651 break;
652 case NL802154_CCA_CARRIER:
653 cca_mode = MCR20A_CCA_MODE2;
654 break;
655 case NL802154_CCA_ENERGY_CARRIER:
656 switch (cca->opt) {
657 case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
658 cca_mode = MCR20A_CCA_MODE3;
659 cca_mode_and = true;
660 break;
661 case NL802154_CCA_OPT_ENERGY_CARRIER_OR:
662 cca_mode = MCR20A_CCA_MODE3;
663 cca_mode_and = false;
664 break;
665 default:
666 return -EINVAL;
667 }
668 break;
669 default:
670 return -EINVAL;
671 }
672 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
673 DAR_PHY_CTRL4_CCATYPE_MASK,
674 cca_mode << DAR_PHY_CTRL4_CCATYPE_SHIFT);
675 if (ret < 0)
676 return ret;
677
678 if (cca_mode == MCR20A_CCA_MODE3) {
679 if (cca_mode_and) {
680 ret = regmap_update_bits(lp->regmap_iar, IAR_CCA_CTRL,
681 IAR_CCA_CTRL_CCA3_AND_NOT_OR,
682 0x08);
683 } else {
684 ret = regmap_update_bits(lp->regmap_iar,
685 IAR_CCA_CTRL,
686 IAR_CCA_CTRL_CCA3_AND_NOT_OR,
687 0x00);
688 }
689 if (ret < 0)
690 return ret;
691 }
692
693 return ret;
694}
695
696static int
697mcr20a_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
698{
699 struct mcr20a_local *lp = hw->priv;
700 u32 i;
701
702 dev_dbg(printdev(lp), "%s\n", __func__);
703
704 for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
705 if (hw->phy->supported.cca_ed_levels[i] == mbm)
706 return regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, i);
707 }
708
709 return 0;
710}
711
712static int
713mcr20a_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
714{
715 struct mcr20a_local *lp = hw->priv;
716 int ret;
717 u8 rx_frame_filter_reg = 0x0;
718
719 dev_dbg(printdev(lp), "%s(%d)\n", __func__, on);
720
721 if (on) {
722 /* All frame types accepted*/
723 rx_frame_filter_reg &= ~(IAR_RX_FRAME_FLT_FRM_VER);
724 rx_frame_filter_reg |= (IAR_RX_FRAME_FLT_ACK_FT |
725 IAR_RX_FRAME_FLT_NS_FT);
726
727 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
728 DAR_PHY_CTRL4_PROMISCUOUS,
729 DAR_PHY_CTRL4_PROMISCUOUS);
730 if (ret < 0)
731 return ret;
732
733 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
734 rx_frame_filter_reg);
735 if (ret < 0)
736 return ret;
737 } else {
738 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
739 DAR_PHY_CTRL4_PROMISCUOUS, 0x0);
740 if (ret < 0)
741 return ret;
742
743 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
744 IAR_RX_FRAME_FLT_FRM_VER |
745 IAR_RX_FRAME_FLT_BEACON_FT |
746 IAR_RX_FRAME_FLT_DATA_FT |
747 IAR_RX_FRAME_FLT_CMD_FT);
748 if (ret < 0)
749 return ret;
750 }
751
752 return 0;
753}
754
755static const struct ieee802154_ops mcr20a_hw_ops = {
756 .owner = THIS_MODULE,
757 .xmit_async = mcr20a_xmit,
758 .ed = mcr20a_ed,
759 .set_channel = mcr20a_set_channel,
760 .start = mcr20a_start,
761 .stop = mcr20a_stop,
762 .set_hw_addr_filt = mcr20a_set_hw_addr_filt,
763 .set_txpower = mcr20a_set_txpower,
764 .set_cca_mode = mcr20a_set_cca_mode,
765 .set_cca_ed_level = mcr20a_set_cca_ed_level,
766 .set_promiscuous_mode = mcr20a_set_promiscuous_mode,
767};
768
769static int
770mcr20a_request_rx(struct mcr20a_local *lp)
771{
772 dev_dbg(printdev(lp), "%s\n", __func__);
773
774 /* Start the RX sequence */
775 regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1,
776 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
777
778 return 0;
779}
780
781static void
782mcr20a_handle_rx_read_buf_complete(void *context)
783{
784 struct mcr20a_local *lp = context;
785 u8 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK;
786 struct sk_buff *skb;
787
788 dev_dbg(printdev(lp), "%s\n", __func__);
789
790 dev_dbg(printdev(lp), "RX is done\n");
791
792 if (!ieee802154_is_valid_psdu_len(len)) {
793 dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
794 len = IEEE802154_MTU;
795 }
796
797 len = len - 2; /* get rid of frame check field */
798
799 skb = dev_alloc_skb(len);
800 if (!skb)
801 return;
802
803 __skb_put_data(skb, lp->rx_buf, len);
804 ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]);
805
806 print_hex_dump_debug("mcr20a rx: ", DUMP_PREFIX_OFFSET, 16, 1,
807 lp->rx_buf, len, 0);
808 pr_debug("mcr20a rx: lqi: %02hhx\n", lp->rx_lqi[0]);
809
810 /* start RX sequence */
811 mcr20a_request_rx(lp);
812}
813
814static void
815mcr20a_handle_rx_read_len_complete(void *context)
816{
817 struct mcr20a_local *lp = context;
818 u8 len;
819 int ret;
820
821 dev_dbg(printdev(lp), "%s\n", __func__);
822
823 /* get the length of received frame */
824 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK;
825 dev_dbg(printdev(lp), "frame len : %d\n", len);
826
827 /* prepare to read the rx buf */
828 lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete;
829 lp->rx_header[0] = MCR20A_BURST_READ_PACKET_BUF;
830 lp->rx_xfer_buf.len = len;
831
832 ret = spi_async(lp->spi, &lp->rx_buf_msg);
833 if (ret)
834 dev_err(printdev(lp), "failed to read rx buffer length\n");
835}
836
837static int
838mcr20a_handle_rx(struct mcr20a_local *lp)
839{
840 dev_dbg(printdev(lp), "%s\n", __func__);
841 lp->reg_msg.complete = mcr20a_handle_rx_read_len_complete;
842 lp->reg_cmd[0] = MCR20A_READ_REG(DAR_RX_FRM_LEN);
843 lp->reg_xfer_data.len = 1;
844
845 return spi_async(lp->spi, &lp->reg_msg);
846}
847
848static int
849mcr20a_handle_tx_complete(struct mcr20a_local *lp)
850{
851 dev_dbg(printdev(lp), "%s\n", __func__);
852
853 ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
854
855 return mcr20a_request_rx(lp);
856}
857
858static int
859mcr20a_handle_tx(struct mcr20a_local *lp)
860{
861 int ret;
862
863 dev_dbg(printdev(lp), "%s\n", __func__);
864
865 /* write tx buffer */
866 lp->tx_header[0] = MCR20A_BURST_WRITE_PACKET_BUF;
867 /* add 2 bytes of FCS */
868 lp->tx_len[0] = lp->tx_skb->len + 2;
869 lp->tx_xfer_buf.tx_buf = lp->tx_skb->data;
870 /* add 1 byte psduLength */
871 lp->tx_xfer_buf.len = lp->tx_skb->len + 1;
872
873 ret = spi_async(lp->spi, &lp->tx_buf_msg);
874 if (ret) {
875 dev_err(printdev(lp), "SPI write Failed for TX buf\n");
876 return ret;
877 }
878
879 return 0;
880}
881
882static void
883mcr20a_irq_clean_complete(void *context)
884{
885 struct mcr20a_local *lp = context;
886 u8 seq_state = lp->irq_data[DAR_IRQ_STS1] & DAR_PHY_CTRL1_XCVSEQ_MASK;
887
888 dev_dbg(printdev(lp), "%s\n", __func__);
889
890 enable_irq(lp->spi->irq);
891
892 dev_dbg(printdev(lp), "IRQ STA1 (%02x) STA2 (%02x)\n",
893 lp->irq_data[DAR_IRQ_STS1], lp->irq_data[DAR_IRQ_STS2]);
894
895 switch (seq_state) {
896 /* TX IRQ, RX IRQ and SEQ IRQ */
897 case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
898 if (lp->is_tx) {
899 lp->is_tx = 0;
900 dev_dbg(printdev(lp), "TX is done. No ACK\n");
901 mcr20a_handle_tx_complete(lp);
902 }
903 break;
904 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
905 /* rx is starting */
906 dev_dbg(printdev(lp), "RX is starting\n");
907 mcr20a_handle_rx(lp);
908 break;
909 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
910 if (lp->is_tx) {
911 /* tx is done */
912 lp->is_tx = 0;
913 dev_dbg(printdev(lp), "TX is done. Get ACK\n");
914 mcr20a_handle_tx_complete(lp);
915 } else {
916 /* rx is starting */
917 dev_dbg(printdev(lp), "RX is starting\n");
918 mcr20a_handle_rx(lp);
919 }
920 break;
921 case (DAR_IRQSTS1_SEQIRQ):
922 if (lp->is_tx) {
923 dev_dbg(printdev(lp), "TX is starting\n");
924 mcr20a_handle_tx(lp);
925 } else {
926 dev_dbg(printdev(lp), "MCR20A is stop\n");
927 }
928 break;
929 }
930}
931
932static void mcr20a_irq_status_complete(void *context)
933{
934 int ret;
935 struct mcr20a_local *lp = context;
936
937 dev_dbg(printdev(lp), "%s\n", __func__);
938 regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1,
939 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE);
940
941 lp->reg_msg.complete = mcr20a_irq_clean_complete;
942 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_IRQ_STS1);
943 memcpy(lp->reg_data, lp->irq_data, MCR20A_IRQSTS_NUM);
944 lp->reg_xfer_data.len = MCR20A_IRQSTS_NUM;
945
946 ret = spi_async(lp->spi, &lp->reg_msg);
947
948 if (ret)
949 dev_err(printdev(lp), "failed to clean irq status\n");
950}
951
952static irqreturn_t mcr20a_irq_isr(int irq, void *data)
953{
954 struct mcr20a_local *lp = data;
955 int ret;
956
957 disable_irq_nosync(irq);
958
959 lp->irq_header[0] = MCR20A_READ_REG(DAR_IRQ_STS1);
960 /* read IRQSTSx */
961 ret = spi_async(lp->spi, &lp->irq_msg);
962 if (ret) {
963 enable_irq(irq);
964 return IRQ_NONE;
965 }
966
967 return IRQ_HANDLED;
968}
969
970static void mcr20a_hw_setup(struct mcr20a_local *lp)
971{
972 u8 i;
973 struct ieee802154_hw *hw = lp->hw;
974 struct wpan_phy *phy = lp->hw->phy;
975
976 dev_dbg(printdev(lp), "%s\n", __func__);
977
978 hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
979 IEEE802154_HW_AFILT |
980 IEEE802154_HW_PROMISCUOUS;
981
982 phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
983 WPAN_PHY_FLAG_CCA_MODE;
984
985 phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
986 BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
987 phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
988 BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
989
990 /* initiating cca_ed_levels */
991 for (i = MCR20A_MAX_CCA_THRESHOLD; i < MCR20A_MIN_CCA_THRESHOLD + 1;
992 ++i) {
993 mcr20a_ed_levels[i] = -i * 100;
994 }
995
996 phy->supported.cca_ed_levels = mcr20a_ed_levels;
997 phy->supported.cca_ed_levels_size = ARRAY_SIZE(mcr20a_ed_levels);
998
999 phy->cca.mode = NL802154_CCA_ENERGY;
1000
1001 phy->supported.channels[0] = MCR20A_VALID_CHANNELS;
1002 phy->current_page = 0;
1003 /* MCR20A default reset value */
1004 phy->current_channel = 20;
1005 phy->supported.tx_powers = mcr20a_powers;
1006 phy->supported.tx_powers_size = ARRAY_SIZE(mcr20a_powers);
1007 phy->cca_ed_level = phy->supported.cca_ed_levels[75];
1008 phy->transmit_power = phy->supported.tx_powers[0x0F];
1009}
1010
1011static void
1012mcr20a_setup_tx_spi_messages(struct mcr20a_local *lp)
1013{
1014 spi_message_init(&lp->tx_buf_msg);
1015 lp->tx_buf_msg.context = lp;
1016 lp->tx_buf_msg.complete = mcr20a_write_tx_buf_complete;
1017
1018 lp->tx_xfer_header.len = 1;
1019 lp->tx_xfer_header.tx_buf = lp->tx_header;
1020
1021 lp->tx_xfer_len.len = 1;
1022 lp->tx_xfer_len.tx_buf = lp->tx_len;
1023
1024 spi_message_add_tail(&lp->tx_xfer_header, &lp->tx_buf_msg);
1025 spi_message_add_tail(&lp->tx_xfer_len, &lp->tx_buf_msg);
1026 spi_message_add_tail(&lp->tx_xfer_buf, &lp->tx_buf_msg);
1027}
1028
1029static void
1030mcr20a_setup_rx_spi_messages(struct mcr20a_local *lp)
1031{
1032 spi_message_init(&lp->reg_msg);
1033 lp->reg_msg.context = lp;
1034
1035 lp->reg_xfer_cmd.len = 1;
1036 lp->reg_xfer_cmd.tx_buf = lp->reg_cmd;
1037 lp->reg_xfer_cmd.rx_buf = lp->reg_cmd;
1038
1039 lp->reg_xfer_data.rx_buf = lp->reg_data;
1040 lp->reg_xfer_data.tx_buf = lp->reg_data;
1041
1042 spi_message_add_tail(&lp->reg_xfer_cmd, &lp->reg_msg);
1043 spi_message_add_tail(&lp->reg_xfer_data, &lp->reg_msg);
1044
1045 spi_message_init(&lp->rx_buf_msg);
1046 lp->rx_buf_msg.context = lp;
1047 lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete;
1048 lp->rx_xfer_header.len = 1;
1049 lp->rx_xfer_header.tx_buf = lp->rx_header;
1050 lp->rx_xfer_header.rx_buf = lp->rx_header;
1051
1052 lp->rx_xfer_buf.rx_buf = lp->rx_buf;
1053
1054 lp->rx_xfer_lqi.len = 1;
1055 lp->rx_xfer_lqi.rx_buf = lp->rx_lqi;
1056
1057 spi_message_add_tail(&lp->rx_xfer_header, &lp->rx_buf_msg);
1058 spi_message_add_tail(&lp->rx_xfer_buf, &lp->rx_buf_msg);
1059 spi_message_add_tail(&lp->rx_xfer_lqi, &lp->rx_buf_msg);
1060}
1061
1062static void
1063mcr20a_setup_irq_spi_messages(struct mcr20a_local *lp)
1064{
1065 spi_message_init(&lp->irq_msg);
1066 lp->irq_msg.context = lp;
1067 lp->irq_msg.complete = mcr20a_irq_status_complete;
1068 lp->irq_xfer_header.len = 1;
1069 lp->irq_xfer_header.tx_buf = lp->irq_header;
1070 lp->irq_xfer_header.rx_buf = lp->irq_header;
1071
1072 lp->irq_xfer_data.len = MCR20A_IRQSTS_NUM;
1073 lp->irq_xfer_data.rx_buf = lp->irq_data;
1074
1075 spi_message_add_tail(&lp->irq_xfer_header, &lp->irq_msg);
1076 spi_message_add_tail(&lp->irq_xfer_data, &lp->irq_msg);
1077}
1078
1079static int
1080mcr20a_phy_init(struct mcr20a_local *lp)
1081{
1082 u8 index;
1083 unsigned int phy_reg = 0;
1084 int ret;
1085
1086 dev_dbg(printdev(lp), "%s\n", __func__);
1087
1088 /* Disable Tristate on COCO MISO for SPI reads */
1089 ret = regmap_write(lp->regmap_iar, IAR_MISC_PAD_CTRL, 0x02);
1090 if (ret)
1091 goto err_ret;
1092
1093 /* Clear all PP IRQ bits in IRQSTS1 to avoid unexpected interrupts
1094 * immediately after init
1095 */
1096 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS1, 0xEF);
1097 if (ret)
1098 goto err_ret;
1099
1100 /* Clear all PP IRQ bits in IRQSTS2 */
1101 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS2,
1102 DAR_IRQSTS2_ASM_IRQ | DAR_IRQSTS2_PB_ERR_IRQ |
1103 DAR_IRQSTS2_WAKE_IRQ);
1104 if (ret)
1105 goto err_ret;
1106
1107 /* Disable all timer interrupts */
1108 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS3, 0xFF);
1109 if (ret)
1110 goto err_ret;
1111
1112 /* PHY_CTRL1 : default HW settings + AUTOACK enabled */
1113 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
1114 DAR_PHY_CTRL1_AUTOACK, DAR_PHY_CTRL1_AUTOACK);
1115
1116 /* PHY_CTRL2 : disable all interrupts */
1117 ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL2, 0xFF);
1118 if (ret)
1119 goto err_ret;
1120
1121 /* PHY_CTRL3 : disable all timers and remaining interrupts */
1122 ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL3,
1123 DAR_PHY_CTRL3_ASM_MSK | DAR_PHY_CTRL3_PB_ERR_MSK |
1124 DAR_PHY_CTRL3_WAKE_MSK);
1125 if (ret)
1126 goto err_ret;
1127
1128 /* SRC_CTRL : enable Acknowledge Frame Pending and
1129 * Source Address Matching Enable
1130 */
1131 ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL,
1132 DAR_SRC_CTRL_ACK_FRM_PND |
1133 (DAR_SRC_CTRL_INDEX << DAR_SRC_CTRL_INDEX_SHIFT));
1134 if (ret)
1135 goto err_ret;
1136
1137 /* RX_FRAME_FILTER */
1138 /* FRM_VER[1:0] = b11. Accept FrameVersion 0 and 1 packets */
1139 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
1140 IAR_RX_FRAME_FLT_FRM_VER |
1141 IAR_RX_FRAME_FLT_BEACON_FT |
1142 IAR_RX_FRAME_FLT_DATA_FT |
1143 IAR_RX_FRAME_FLT_CMD_FT);
1144 if (ret)
1145 goto err_ret;
1146
1147 dev_info(printdev(lp), "MCR20A DAR overwrites version: 0x%02x\n",
1148 MCR20A_OVERWRITE_VERSION);
1149
1150 /* Overwrites direct registers */
1151 ret = regmap_write(lp->regmap_dar, DAR_OVERWRITE_VER,
1152 MCR20A_OVERWRITE_VERSION);
1153 if (ret)
1154 goto err_ret;
1155
1156 /* Overwrites indirect registers */
1157 ret = regmap_multi_reg_write(lp->regmap_iar, mar20a_iar_overwrites,
1158 ARRAY_SIZE(mar20a_iar_overwrites));
1159 if (ret)
1160 goto err_ret;
1161
1162 /* Clear HW indirect queue */
1163 dev_dbg(printdev(lp), "clear HW indirect queue\n");
1164 for (index = 0; index < MCR20A_PHY_INDIRECT_QUEUE_SIZE; index++) {
1165 phy_reg = (u8)(((index & DAR_SRC_CTRL_INDEX) <<
1166 DAR_SRC_CTRL_INDEX_SHIFT)
1167 | (DAR_SRC_CTRL_SRCADDR_EN)
1168 | (DAR_SRC_CTRL_INDEX_DISABLE));
1169 ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL, phy_reg);
1170 if (ret)
1171 goto err_ret;
1172 phy_reg = 0;
1173 }
1174
1175 /* Assign HW Indirect hash table to PAN0 */
1176 ret = regmap_read(lp->regmap_iar, IAR_DUAL_PAN_CTRL, &phy_reg);
1177 if (ret)
1178 goto err_ret;
1179
1180 /* Clear current lvl */
1181 phy_reg &= ~IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_MSK;
1182
1183 /* Set new lvl */
1184 phy_reg |= MCR20A_PHY_INDIRECT_QUEUE_SIZE <<
1185 IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_SHIFT;
1186 ret = regmap_write(lp->regmap_iar, IAR_DUAL_PAN_CTRL, phy_reg);
1187 if (ret)
1188 goto err_ret;
1189
1190 /* Set CCA threshold to -75 dBm */
1191 ret = regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, 0x4B);
1192 if (ret)
1193 goto err_ret;
1194
1195 /* Set prescaller to obtain 1 symbol (16us) timebase */
1196 ret = regmap_write(lp->regmap_iar, IAR_TMR_PRESCALE, 0x05);
1197 if (ret)
1198 goto err_ret;
1199
1200 /* Enable autodoze mode. */
1201 ret = regmap_update_bits(lp->regmap_dar, DAR_PWR_MODES,
1202 DAR_PWR_MODES_AUTODOZE,
1203 DAR_PWR_MODES_AUTODOZE);
1204 if (ret)
1205 goto err_ret;
1206
1207 /* Disable clk_out */
1208 ret = regmap_update_bits(lp->regmap_dar, DAR_CLK_OUT_CTRL,
1209 DAR_CLK_OUT_CTRL_EN, 0x0);
1210 if (ret)
1211 goto err_ret;
1212
1213 return 0;
1214
1215err_ret:
1216 return ret;
1217}
1218
1219static int
1220mcr20a_probe(struct spi_device *spi)
1221{
1222 struct ieee802154_hw *hw;
1223 struct mcr20a_local *lp;
1224 struct gpio_desc *rst_b;
1225 int irq_type;
1226 int ret = -ENOMEM;
1227
1228 dev_dbg(&spi->dev, "%s\n", __func__);
1229
1230 if (!spi->irq) {
1231 dev_err(&spi->dev, "no IRQ specified\n");
1232 return -EINVAL;
1233 }
1234
1235 rst_b = devm_gpiod_get(&spi->dev, "rst_b", GPIOD_OUT_HIGH);
1236 if (IS_ERR(rst_b))
1237 return dev_err_probe(&spi->dev, PTR_ERR(rst_b),
1238 "Failed to get 'rst_b' gpio");
1239
1240 /* reset mcr20a */
1241 usleep_range(10, 20);
1242 gpiod_set_value_cansleep(rst_b, 1);
1243 usleep_range(10, 20);
1244 gpiod_set_value_cansleep(rst_b, 0);
1245 usleep_range(120, 240);
1246
1247 /* allocate ieee802154_hw and private data */
1248 hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops);
1249 if (!hw) {
1250 dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n");
1251 return ret;
1252 }
1253
1254 /* init mcr20a local data */
1255 lp = hw->priv;
1256 lp->hw = hw;
1257 lp->spi = spi;
1258
1259 /* init ieee802154_hw */
1260 hw->parent = &spi->dev;
1261 ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
1262
1263 /* init buf */
1264 lp->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL);
1265
1266 if (!lp->buf) {
1267 ret = -ENOMEM;
1268 goto free_dev;
1269 }
1270
1271 mcr20a_setup_tx_spi_messages(lp);
1272 mcr20a_setup_rx_spi_messages(lp);
1273 mcr20a_setup_irq_spi_messages(lp);
1274
1275 /* setup regmap */
1276 lp->regmap_dar = devm_regmap_init_spi(spi, &mcr20a_dar_regmap);
1277 if (IS_ERR(lp->regmap_dar)) {
1278 ret = PTR_ERR(lp->regmap_dar);
1279 dev_err(&spi->dev, "Failed to allocate dar map: %d\n",
1280 ret);
1281 goto free_dev;
1282 }
1283
1284 lp->regmap_iar = devm_regmap_init_spi(spi, &mcr20a_iar_regmap);
1285 if (IS_ERR(lp->regmap_iar)) {
1286 ret = PTR_ERR(lp->regmap_iar);
1287 dev_err(&spi->dev, "Failed to allocate iar map: %d\n", ret);
1288 goto free_dev;
1289 }
1290
1291 mcr20a_hw_setup(lp);
1292
1293 spi_set_drvdata(spi, lp);
1294
1295 ret = mcr20a_phy_init(lp);
1296 if (ret < 0) {
1297 dev_crit(&spi->dev, "mcr20a_phy_init failed\n");
1298 goto free_dev;
1299 }
1300
1301 irq_type = irq_get_trigger_type(spi->irq);
1302 if (!irq_type)
1303 irq_type = IRQF_TRIGGER_FALLING;
1304
1305 ret = devm_request_irq(&spi->dev, spi->irq, mcr20a_irq_isr,
1306 irq_type, dev_name(&spi->dev), lp);
1307 if (ret) {
1308 dev_err(&spi->dev, "could not request_irq for mcr20a\n");
1309 ret = -ENODEV;
1310 goto free_dev;
1311 }
1312
1313 /* disable_irq by default and wait for starting hardware */
1314 disable_irq(spi->irq);
1315
1316 ret = ieee802154_register_hw(hw);
1317 if (ret) {
1318 dev_crit(&spi->dev, "ieee802154_register_hw failed\n");
1319 goto free_dev;
1320 }
1321
1322 return ret;
1323
1324free_dev:
1325 ieee802154_free_hw(lp->hw);
1326
1327 return ret;
1328}
1329
1330static void mcr20a_remove(struct spi_device *spi)
1331{
1332 struct mcr20a_local *lp = spi_get_drvdata(spi);
1333
1334 dev_dbg(&spi->dev, "%s\n", __func__);
1335
1336 ieee802154_unregister_hw(lp->hw);
1337 ieee802154_free_hw(lp->hw);
1338}
1339
1340static const struct of_device_id mcr20a_of_match[] = {
1341 { .compatible = "nxp,mcr20a", },
1342 { },
1343};
1344MODULE_DEVICE_TABLE(of, mcr20a_of_match);
1345
1346static const struct spi_device_id mcr20a_device_id[] = {
1347 { .name = "mcr20a", },
1348 { },
1349};
1350MODULE_DEVICE_TABLE(spi, mcr20a_device_id);
1351
1352static struct spi_driver mcr20a_driver = {
1353 .id_table = mcr20a_device_id,
1354 .driver = {
1355 .of_match_table = mcr20a_of_match,
1356 .name = "mcr20a",
1357 },
1358 .probe = mcr20a_probe,
1359 .remove = mcr20a_remove,
1360};
1361
1362module_spi_driver(mcr20a_driver);
1363
1364MODULE_DESCRIPTION("MCR20A Transceiver Driver");
1365MODULE_LICENSE("GPL v2");
1366MODULE_AUTHOR("Xue Liu <liuxuenetmail@gmail>");
1/*
2 * Driver for NXP MCR20A 802.15.4 Wireless-PAN Networking controller
3 *
4 * Copyright (C) 2018 Xue Liu <liuxuenetmail@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/gpio.h>
19#include <linux/spi/spi.h>
20#include <linux/workqueue.h>
21#include <linux/interrupt.h>
22#include <linux/skbuff.h>
23#include <linux/of_gpio.h>
24#include <linux/regmap.h>
25#include <linux/ieee802154.h>
26#include <linux/debugfs.h>
27
28#include <net/mac802154.h>
29#include <net/cfg802154.h>
30
31#include <linux/device.h>
32
33#include "mcr20a.h"
34
35#define SPI_COMMAND_BUFFER 3
36
37#define REGISTER_READ BIT(7)
38#define REGISTER_WRITE (0 << 7)
39#define REGISTER_ACCESS (0 << 6)
40#define PACKET_BUFF_BURST_ACCESS BIT(6)
41#define PACKET_BUFF_BYTE_ACCESS BIT(5)
42
43#define MCR20A_WRITE_REG(x) (x)
44#define MCR20A_READ_REG(x) (REGISTER_READ | (x))
45#define MCR20A_BURST_READ_PACKET_BUF (0xC0)
46#define MCR20A_BURST_WRITE_PACKET_BUF (0x40)
47
48#define MCR20A_CMD_REG 0x80
49#define MCR20A_CMD_REG_MASK 0x3f
50#define MCR20A_CMD_WRITE 0x40
51#define MCR20A_CMD_FB 0x20
52
53/* Number of Interrupt Request Status Register */
54#define MCR20A_IRQSTS_NUM 2 /* only IRQ_STS1 and IRQ_STS2 */
55
56/* MCR20A CCA Type */
57enum {
58 MCR20A_CCA_ED, // energy detect - CCA bit not active,
59 // not to be used for T and CCCA sequences
60 MCR20A_CCA_MODE1, // energy detect - CCA bit ACTIVE
61 MCR20A_CCA_MODE2, // 802.15.4 compliant signal detect - CCA bit ACTIVE
62 MCR20A_CCA_MODE3
63};
64
65enum {
66 MCR20A_XCVSEQ_IDLE = 0x00,
67 MCR20A_XCVSEQ_RX = 0x01,
68 MCR20A_XCVSEQ_TX = 0x02,
69 MCR20A_XCVSEQ_CCA = 0x03,
70 MCR20A_XCVSEQ_TR = 0x04,
71 MCR20A_XCVSEQ_CCCA = 0x05,
72};
73
74/* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */
75#define MCR20A_MIN_CHANNEL (11)
76#define MCR20A_MAX_CHANNEL (26)
77#define MCR20A_CHANNEL_SPACING (5)
78
79/* MCR20A CCA Threshold constans */
80#define MCR20A_MIN_CCA_THRESHOLD (0x6EU)
81#define MCR20A_MAX_CCA_THRESHOLD (0x00U)
82
83/* version 0C */
84#define MCR20A_OVERWRITE_VERSION (0x0C)
85
86/* MCR20A PLL configurations */
87static const u8 PLL_INT[16] = {
88 /* 2405 */ 0x0B, /* 2410 */ 0x0B, /* 2415 */ 0x0B,
89 /* 2420 */ 0x0B, /* 2425 */ 0x0B, /* 2430 */ 0x0B,
90 /* 2435 */ 0x0C, /* 2440 */ 0x0C, /* 2445 */ 0x0C,
91 /* 2450 */ 0x0C, /* 2455 */ 0x0C, /* 2460 */ 0x0C,
92 /* 2465 */ 0x0D, /* 2470 */ 0x0D, /* 2475 */ 0x0D,
93 /* 2480 */ 0x0D
94};
95
96static const u8 PLL_FRAC[16] = {
97 /* 2405 */ 0x28, /* 2410 */ 0x50, /* 2415 */ 0x78,
98 /* 2420 */ 0xA0, /* 2425 */ 0xC8, /* 2430 */ 0xF0,
99 /* 2435 */ 0x18, /* 2440 */ 0x40, /* 2445 */ 0x68,
100 /* 2450 */ 0x90, /* 2455 */ 0xB8, /* 2460 */ 0xE0,
101 /* 2465 */ 0x08, /* 2470 */ 0x30, /* 2475 */ 0x58,
102 /* 2480 */ 0x80
103};
104
105static const struct reg_sequence mar20a_iar_overwrites[] = {
106 { IAR_MISC_PAD_CTRL, 0x02 },
107 { IAR_VCO_CTRL1, 0xB3 },
108 { IAR_VCO_CTRL2, 0x07 },
109 { IAR_PA_TUNING, 0x71 },
110 { IAR_CHF_IBUF, 0x2F },
111 { IAR_CHF_QBUF, 0x2F },
112 { IAR_CHF_IRIN, 0x24 },
113 { IAR_CHF_QRIN, 0x24 },
114 { IAR_CHF_IL, 0x24 },
115 { IAR_CHF_QL, 0x24 },
116 { IAR_CHF_CC1, 0x32 },
117 { IAR_CHF_CCL, 0x1D },
118 { IAR_CHF_CC2, 0x2D },
119 { IAR_CHF_IROUT, 0x24 },
120 { IAR_CHF_QROUT, 0x24 },
121 { IAR_PA_CAL, 0x28 },
122 { IAR_AGC_THR1, 0x55 },
123 { IAR_AGC_THR2, 0x2D },
124 { IAR_ATT_RSSI1, 0x5F },
125 { IAR_ATT_RSSI2, 0x8F },
126 { IAR_RSSI_OFFSET, 0x61 },
127 { IAR_CHF_PMA_GAIN, 0x03 },
128 { IAR_CCA1_THRESH, 0x50 },
129 { IAR_CORR_NVAL, 0x13 },
130 { IAR_ACKDELAY, 0x3D },
131};
132
133#define MCR20A_VALID_CHANNELS (0x07FFF800)
134
135struct mcr20a_platform_data {
136 int rst_gpio;
137};
138
139#define MCR20A_MAX_BUF (127)
140
141#define printdev(X) (&X->spi->dev)
142
143/* regmap information for Direct Access Register (DAR) access */
144#define MCR20A_DAR_WRITE 0x01
145#define MCR20A_DAR_READ 0x00
146#define MCR20A_DAR_NUMREGS 0x3F
147
148/* regmap information for Indirect Access Register (IAR) access */
149#define MCR20A_IAR_ACCESS 0x80
150#define MCR20A_IAR_NUMREGS 0xBEFF
151
152/* Read/Write SPI Commands for DAR and IAR registers. */
153#define MCR20A_READSHORT(reg) ((reg) << 1)
154#define MCR20A_WRITESHORT(reg) ((reg) << 1 | 1)
155#define MCR20A_READLONG(reg) (1 << 15 | (reg) << 5)
156#define MCR20A_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
157
158/* Type definitions for link configuration of instantiable layers */
159#define MCR20A_PHY_INDIRECT_QUEUE_SIZE (12)
160
161static bool
162mcr20a_dar_writeable(struct device *dev, unsigned int reg)
163{
164 switch (reg) {
165 case DAR_IRQ_STS1:
166 case DAR_IRQ_STS2:
167 case DAR_IRQ_STS3:
168 case DAR_PHY_CTRL1:
169 case DAR_PHY_CTRL2:
170 case DAR_PHY_CTRL3:
171 case DAR_PHY_CTRL4:
172 case DAR_SRC_CTRL:
173 case DAR_SRC_ADDRS_SUM_LSB:
174 case DAR_SRC_ADDRS_SUM_MSB:
175 case DAR_T3CMP_LSB:
176 case DAR_T3CMP_MSB:
177 case DAR_T3CMP_USB:
178 case DAR_T2PRIMECMP_LSB:
179 case DAR_T2PRIMECMP_MSB:
180 case DAR_T1CMP_LSB:
181 case DAR_T1CMP_MSB:
182 case DAR_T1CMP_USB:
183 case DAR_T2CMP_LSB:
184 case DAR_T2CMP_MSB:
185 case DAR_T2CMP_USB:
186 case DAR_T4CMP_LSB:
187 case DAR_T4CMP_MSB:
188 case DAR_T4CMP_USB:
189 case DAR_PLL_INT0:
190 case DAR_PLL_FRAC0_LSB:
191 case DAR_PLL_FRAC0_MSB:
192 case DAR_PA_PWR:
193 /* no DAR_ACM */
194 case DAR_OVERWRITE_VER:
195 case DAR_CLK_OUT_CTRL:
196 case DAR_PWR_MODES:
197 return true;
198 default:
199 return false;
200 }
201}
202
203static bool
204mcr20a_dar_readable(struct device *dev, unsigned int reg)
205{
206 bool rc;
207
208 /* all writeable are also readable */
209 rc = mcr20a_dar_writeable(dev, reg);
210 if (rc)
211 return rc;
212
213 /* readonly regs */
214 switch (reg) {
215 case DAR_RX_FRM_LEN:
216 case DAR_CCA1_ED_FNL:
217 case DAR_EVENT_TMR_LSB:
218 case DAR_EVENT_TMR_MSB:
219 case DAR_EVENT_TMR_USB:
220 case DAR_TIMESTAMP_LSB:
221 case DAR_TIMESTAMP_MSB:
222 case DAR_TIMESTAMP_USB:
223 case DAR_SEQ_STATE:
224 case DAR_LQI_VALUE:
225 case DAR_RSSI_CCA_CONT:
226 return true;
227 default:
228 return false;
229 }
230}
231
232static bool
233mcr20a_dar_volatile(struct device *dev, unsigned int reg)
234{
235 /* can be changed during runtime */
236 switch (reg) {
237 case DAR_IRQ_STS1:
238 case DAR_IRQ_STS2:
239 case DAR_IRQ_STS3:
240 /* use them in spi_async and regmap so it's volatile */
241 return true;
242 default:
243 return false;
244 }
245}
246
247static bool
248mcr20a_dar_precious(struct device *dev, unsigned int reg)
249{
250 /* don't clear irq line on read */
251 switch (reg) {
252 case DAR_IRQ_STS1:
253 case DAR_IRQ_STS2:
254 case DAR_IRQ_STS3:
255 return true;
256 default:
257 return false;
258 }
259}
260
261static const struct regmap_config mcr20a_dar_regmap = {
262 .name = "mcr20a_dar",
263 .reg_bits = 8,
264 .val_bits = 8,
265 .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE,
266 .read_flag_mask = REGISTER_ACCESS | REGISTER_READ,
267 .cache_type = REGCACHE_RBTREE,
268 .writeable_reg = mcr20a_dar_writeable,
269 .readable_reg = mcr20a_dar_readable,
270 .volatile_reg = mcr20a_dar_volatile,
271 .precious_reg = mcr20a_dar_precious,
272 .fast_io = true,
273 .can_multi_write = true,
274};
275
276static bool
277mcr20a_iar_writeable(struct device *dev, unsigned int reg)
278{
279 switch (reg) {
280 case IAR_XTAL_TRIM:
281 case IAR_PMC_LP_TRIM:
282 case IAR_MACPANID0_LSB:
283 case IAR_MACPANID0_MSB:
284 case IAR_MACSHORTADDRS0_LSB:
285 case IAR_MACSHORTADDRS0_MSB:
286 case IAR_MACLONGADDRS0_0:
287 case IAR_MACLONGADDRS0_8:
288 case IAR_MACLONGADDRS0_16:
289 case IAR_MACLONGADDRS0_24:
290 case IAR_MACLONGADDRS0_32:
291 case IAR_MACLONGADDRS0_40:
292 case IAR_MACLONGADDRS0_48:
293 case IAR_MACLONGADDRS0_56:
294 case IAR_RX_FRAME_FILTER:
295 case IAR_PLL_INT1:
296 case IAR_PLL_FRAC1_LSB:
297 case IAR_PLL_FRAC1_MSB:
298 case IAR_MACPANID1_LSB:
299 case IAR_MACPANID1_MSB:
300 case IAR_MACSHORTADDRS1_LSB:
301 case IAR_MACSHORTADDRS1_MSB:
302 case IAR_MACLONGADDRS1_0:
303 case IAR_MACLONGADDRS1_8:
304 case IAR_MACLONGADDRS1_16:
305 case IAR_MACLONGADDRS1_24:
306 case IAR_MACLONGADDRS1_32:
307 case IAR_MACLONGADDRS1_40:
308 case IAR_MACLONGADDRS1_48:
309 case IAR_MACLONGADDRS1_56:
310 case IAR_DUAL_PAN_CTRL:
311 case IAR_DUAL_PAN_DWELL:
312 case IAR_CCA1_THRESH:
313 case IAR_CCA1_ED_OFFSET_COMP:
314 case IAR_LQI_OFFSET_COMP:
315 case IAR_CCA_CTRL:
316 case IAR_CCA2_CORR_PEAKS:
317 case IAR_CCA2_CORR_THRESH:
318 case IAR_TMR_PRESCALE:
319 case IAR_ANT_PAD_CTRL:
320 case IAR_MISC_PAD_CTRL:
321 case IAR_BSM_CTRL:
322 case IAR_RNG:
323 case IAR_RX_WTR_MARK:
324 case IAR_SOFT_RESET:
325 case IAR_TXDELAY:
326 case IAR_ACKDELAY:
327 case IAR_CORR_NVAL:
328 case IAR_ANT_AGC_CTRL:
329 case IAR_AGC_THR1:
330 case IAR_AGC_THR2:
331 case IAR_PA_CAL:
332 case IAR_ATT_RSSI1:
333 case IAR_ATT_RSSI2:
334 case IAR_RSSI_OFFSET:
335 case IAR_XTAL_CTRL:
336 case IAR_CHF_PMA_GAIN:
337 case IAR_CHF_IBUF:
338 case IAR_CHF_QBUF:
339 case IAR_CHF_IRIN:
340 case IAR_CHF_QRIN:
341 case IAR_CHF_IL:
342 case IAR_CHF_QL:
343 case IAR_CHF_CC1:
344 case IAR_CHF_CCL:
345 case IAR_CHF_CC2:
346 case IAR_CHF_IROUT:
347 case IAR_CHF_QROUT:
348 case IAR_PA_TUNING:
349 case IAR_VCO_CTRL1:
350 case IAR_VCO_CTRL2:
351 return true;
352 default:
353 return false;
354 }
355}
356
357static bool
358mcr20a_iar_readable(struct device *dev, unsigned int reg)
359{
360 bool rc;
361
362 /* all writeable are also readable */
363 rc = mcr20a_iar_writeable(dev, reg);
364 if (rc)
365 return rc;
366
367 /* readonly regs */
368 switch (reg) {
369 case IAR_PART_ID:
370 case IAR_DUAL_PAN_STS:
371 case IAR_RX_BYTE_COUNT:
372 case IAR_FILTERFAIL_CODE1:
373 case IAR_FILTERFAIL_CODE2:
374 case IAR_RSSI:
375 return true;
376 default:
377 return false;
378 }
379}
380
381static bool
382mcr20a_iar_volatile(struct device *dev, unsigned int reg)
383{
384/* can be changed during runtime */
385 switch (reg) {
386 case IAR_DUAL_PAN_STS:
387 case IAR_RX_BYTE_COUNT:
388 case IAR_FILTERFAIL_CODE1:
389 case IAR_FILTERFAIL_CODE2:
390 case IAR_RSSI:
391 return true;
392 default:
393 return false;
394 }
395}
396
397static const struct regmap_config mcr20a_iar_regmap = {
398 .name = "mcr20a_iar",
399 .reg_bits = 16,
400 .val_bits = 8,
401 .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE | IAR_INDEX,
402 .read_flag_mask = REGISTER_ACCESS | REGISTER_READ | IAR_INDEX,
403 .cache_type = REGCACHE_RBTREE,
404 .writeable_reg = mcr20a_iar_writeable,
405 .readable_reg = mcr20a_iar_readable,
406 .volatile_reg = mcr20a_iar_volatile,
407 .fast_io = true,
408};
409
410struct mcr20a_local {
411 struct spi_device *spi;
412
413 struct ieee802154_hw *hw;
414 struct mcr20a_platform_data *pdata;
415 struct regmap *regmap_dar;
416 struct regmap *regmap_iar;
417
418 u8 *buf;
419
420 bool is_tx;
421
422 /* for writing tx buffer */
423 struct spi_message tx_buf_msg;
424 u8 tx_header[1];
425 /* burst buffer write command */
426 struct spi_transfer tx_xfer_header;
427 u8 tx_len[1];
428 /* len of tx packet */
429 struct spi_transfer tx_xfer_len;
430 /* data of tx packet */
431 struct spi_transfer tx_xfer_buf;
432 struct sk_buff *tx_skb;
433
434 /* for read length rxfifo */
435 struct spi_message reg_msg;
436 u8 reg_cmd[1];
437 u8 reg_data[MCR20A_IRQSTS_NUM];
438 struct spi_transfer reg_xfer_cmd;
439 struct spi_transfer reg_xfer_data;
440
441 /* receive handling */
442 struct spi_message rx_buf_msg;
443 u8 rx_header[1];
444 struct spi_transfer rx_xfer_header;
445 u8 rx_lqi[1];
446 struct spi_transfer rx_xfer_lqi;
447 u8 rx_buf[MCR20A_MAX_BUF];
448 struct spi_transfer rx_xfer_buf;
449
450 /* isr handling for reading intstat */
451 struct spi_message irq_msg;
452 u8 irq_header[1];
453 u8 irq_data[MCR20A_IRQSTS_NUM];
454 struct spi_transfer irq_xfer_data;
455 struct spi_transfer irq_xfer_header;
456};
457
458static void
459mcr20a_write_tx_buf_complete(void *context)
460{
461 struct mcr20a_local *lp = context;
462 int ret;
463
464 dev_dbg(printdev(lp), "%s\n", __func__);
465
466 lp->reg_msg.complete = NULL;
467 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1);
468 lp->reg_data[0] = MCR20A_XCVSEQ_TX;
469 lp->reg_xfer_data.len = 1;
470
471 ret = spi_async(lp->spi, &lp->reg_msg);
472 if (ret)
473 dev_err(printdev(lp), "failed to set SEQ TX\n");
474}
475
476static int
477mcr20a_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
478{
479 struct mcr20a_local *lp = hw->priv;
480
481 dev_dbg(printdev(lp), "%s\n", __func__);
482
483 lp->tx_skb = skb;
484
485 print_hex_dump_debug("mcr20a tx: ", DUMP_PREFIX_OFFSET, 16, 1,
486 skb->data, skb->len, 0);
487
488 lp->is_tx = 1;
489
490 lp->reg_msg.complete = NULL;
491 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1);
492 lp->reg_data[0] = MCR20A_XCVSEQ_IDLE;
493 lp->reg_xfer_data.len = 1;
494
495 return spi_async(lp->spi, &lp->reg_msg);
496}
497
498static int
499mcr20a_ed(struct ieee802154_hw *hw, u8 *level)
500{
501 WARN_ON(!level);
502 *level = 0xbe;
503 return 0;
504}
505
506static int
507mcr20a_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
508{
509 struct mcr20a_local *lp = hw->priv;
510 int ret;
511
512 dev_dbg(printdev(lp), "%s\n", __func__);
513
514 /* freqency = ((PLL_INT+64) + (PLL_FRAC/65536)) * 32 MHz */
515 ret = regmap_write(lp->regmap_dar, DAR_PLL_INT0, PLL_INT[channel - 11]);
516 if (ret)
517 return ret;
518 ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_LSB, 0x00);
519 if (ret)
520 return ret;
521 ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_MSB,
522 PLL_FRAC[channel - 11]);
523 if (ret)
524 return ret;
525
526 return 0;
527}
528
529static int
530mcr20a_start(struct ieee802154_hw *hw)
531{
532 struct mcr20a_local *lp = hw->priv;
533 int ret;
534
535 dev_dbg(printdev(lp), "%s\n", __func__);
536
537 /* No slotted operation */
538 dev_dbg(printdev(lp), "no slotted operation\n");
539 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
540 DAR_PHY_CTRL1_SLOTTED, 0x0);
541
542 /* enable irq */
543 enable_irq(lp->spi->irq);
544
545 /* Unmask SEQ interrupt */
546 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
547 DAR_PHY_CTRL2_SEQMSK, 0x0);
548
549 /* Start the RX sequence */
550 dev_dbg(printdev(lp), "start the RX sequence\n");
551 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
552 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
553
554 return 0;
555}
556
557static void
558mcr20a_stop(struct ieee802154_hw *hw)
559{
560 struct mcr20a_local *lp = hw->priv;
561
562 dev_dbg(printdev(lp), "%s\n", __func__);
563
564 /* stop all running sequence */
565 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
566 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE);
567
568 /* disable irq */
569 disable_irq(lp->spi->irq);
570}
571
572static int
573mcr20a_set_hw_addr_filt(struct ieee802154_hw *hw,
574 struct ieee802154_hw_addr_filt *filt,
575 unsigned long changed)
576{
577 struct mcr20a_local *lp = hw->priv;
578
579 dev_dbg(printdev(lp), "%s\n", __func__);
580
581 if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
582 u16 addr = le16_to_cpu(filt->short_addr);
583
584 regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_LSB, addr);
585 regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_MSB, addr >> 8);
586 }
587
588 if (changed & IEEE802154_AFILT_PANID_CHANGED) {
589 u16 pan = le16_to_cpu(filt->pan_id);
590
591 regmap_write(lp->regmap_iar, IAR_MACPANID0_LSB, pan);
592 regmap_write(lp->regmap_iar, IAR_MACPANID0_MSB, pan >> 8);
593 }
594
595 if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
596 u8 addr[8], i;
597
598 memcpy(addr, &filt->ieee_addr, 8);
599 for (i = 0; i < 8; i++)
600 regmap_write(lp->regmap_iar,
601 IAR_MACLONGADDRS0_0 + i, addr[i]);
602 }
603
604 if (changed & IEEE802154_AFILT_PANC_CHANGED) {
605 if (filt->pan_coord) {
606 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
607 DAR_PHY_CTRL4_PANCORDNTR0, 0x10);
608 } else {
609 regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
610 DAR_PHY_CTRL4_PANCORDNTR0, 0x00);
611 }
612 }
613
614 return 0;
615}
616
617/* -30 dBm to 10 dBm */
618#define MCR20A_MAX_TX_POWERS 0x14
619static const s32 mcr20a_powers[MCR20A_MAX_TX_POWERS + 1] = {
620 -3000, -2800, -2600, -2400, -2200, -2000, -1800, -1600, -1400,
621 -1200, -1000, -800, -600, -400, -200, 0, 200, 400, 600, 800, 1000
622};
623
624static int
625mcr20a_set_txpower(struct ieee802154_hw *hw, s32 mbm)
626{
627 struct mcr20a_local *lp = hw->priv;
628 u32 i;
629
630 dev_dbg(printdev(lp), "%s(%d)\n", __func__, mbm);
631
632 for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
633 if (lp->hw->phy->supported.tx_powers[i] == mbm)
634 return regmap_write(lp->regmap_dar, DAR_PA_PWR,
635 ((i + 8) & 0x1F));
636 }
637
638 return -EINVAL;
639}
640
641#define MCR20A_MAX_ED_LEVELS MCR20A_MIN_CCA_THRESHOLD
642static s32 mcr20a_ed_levels[MCR20A_MAX_ED_LEVELS + 1];
643
644static int
645mcr20a_set_cca_mode(struct ieee802154_hw *hw,
646 const struct wpan_phy_cca *cca)
647{
648 struct mcr20a_local *lp = hw->priv;
649 unsigned int cca_mode = 0xff;
650 bool cca_mode_and = false;
651 int ret;
652
653 dev_dbg(printdev(lp), "%s\n", __func__);
654
655 /* mapping 802.15.4 to driver spec */
656 switch (cca->mode) {
657 case NL802154_CCA_ENERGY:
658 cca_mode = MCR20A_CCA_MODE1;
659 break;
660 case NL802154_CCA_CARRIER:
661 cca_mode = MCR20A_CCA_MODE2;
662 break;
663 case NL802154_CCA_ENERGY_CARRIER:
664 switch (cca->opt) {
665 case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
666 cca_mode = MCR20A_CCA_MODE3;
667 cca_mode_and = true;
668 break;
669 case NL802154_CCA_OPT_ENERGY_CARRIER_OR:
670 cca_mode = MCR20A_CCA_MODE3;
671 cca_mode_and = false;
672 break;
673 default:
674 return -EINVAL;
675 }
676 break;
677 default:
678 return -EINVAL;
679 }
680 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
681 DAR_PHY_CTRL4_CCATYPE_MASK,
682 cca_mode << DAR_PHY_CTRL4_CCATYPE_SHIFT);
683 if (ret < 0)
684 return ret;
685
686 if (cca_mode == MCR20A_CCA_MODE3) {
687 if (cca_mode_and) {
688 ret = regmap_update_bits(lp->regmap_iar, IAR_CCA_CTRL,
689 IAR_CCA_CTRL_CCA3_AND_NOT_OR,
690 0x08);
691 } else {
692 ret = regmap_update_bits(lp->regmap_iar,
693 IAR_CCA_CTRL,
694 IAR_CCA_CTRL_CCA3_AND_NOT_OR,
695 0x00);
696 }
697 if (ret < 0)
698 return ret;
699 }
700
701 return ret;
702}
703
704static int
705mcr20a_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
706{
707 struct mcr20a_local *lp = hw->priv;
708 u32 i;
709
710 dev_dbg(printdev(lp), "%s\n", __func__);
711
712 for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
713 if (hw->phy->supported.cca_ed_levels[i] == mbm)
714 return regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, i);
715 }
716
717 return 0;
718}
719
720static int
721mcr20a_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
722{
723 struct mcr20a_local *lp = hw->priv;
724 int ret;
725 u8 rx_frame_filter_reg = 0x0;
726
727 dev_dbg(printdev(lp), "%s(%d)\n", __func__, on);
728
729 if (on) {
730 /* All frame types accepted*/
731 rx_frame_filter_reg &= ~(IAR_RX_FRAME_FLT_FRM_VER);
732 rx_frame_filter_reg |= (IAR_RX_FRAME_FLT_ACK_FT |
733 IAR_RX_FRAME_FLT_NS_FT);
734
735 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
736 DAR_PHY_CTRL4_PROMISCUOUS,
737 DAR_PHY_CTRL4_PROMISCUOUS);
738 if (ret < 0)
739 return ret;
740
741 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
742 rx_frame_filter_reg);
743 if (ret < 0)
744 return ret;
745 } else {
746 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
747 DAR_PHY_CTRL4_PROMISCUOUS, 0x0);
748 if (ret < 0)
749 return ret;
750
751 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
752 IAR_RX_FRAME_FLT_FRM_VER |
753 IAR_RX_FRAME_FLT_BEACON_FT |
754 IAR_RX_FRAME_FLT_DATA_FT |
755 IAR_RX_FRAME_FLT_CMD_FT);
756 if (ret < 0)
757 return ret;
758 }
759
760 return 0;
761}
762
763static const struct ieee802154_ops mcr20a_hw_ops = {
764 .owner = THIS_MODULE,
765 .xmit_async = mcr20a_xmit,
766 .ed = mcr20a_ed,
767 .set_channel = mcr20a_set_channel,
768 .start = mcr20a_start,
769 .stop = mcr20a_stop,
770 .set_hw_addr_filt = mcr20a_set_hw_addr_filt,
771 .set_txpower = mcr20a_set_txpower,
772 .set_cca_mode = mcr20a_set_cca_mode,
773 .set_cca_ed_level = mcr20a_set_cca_ed_level,
774 .set_promiscuous_mode = mcr20a_set_promiscuous_mode,
775};
776
777static int
778mcr20a_request_rx(struct mcr20a_local *lp)
779{
780 dev_dbg(printdev(lp), "%s\n", __func__);
781
782 /* Start the RX sequence */
783 regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1,
784 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
785
786 return 0;
787}
788
789static void
790mcr20a_handle_rx_read_buf_complete(void *context)
791{
792 struct mcr20a_local *lp = context;
793 u8 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK;
794 struct sk_buff *skb;
795
796 dev_dbg(printdev(lp), "%s\n", __func__);
797
798 dev_dbg(printdev(lp), "RX is done\n");
799
800 if (!ieee802154_is_valid_psdu_len(len)) {
801 dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
802 len = IEEE802154_MTU;
803 }
804
805 len = len - 2; /* get rid of frame check field */
806
807 skb = dev_alloc_skb(len);
808 if (!skb)
809 return;
810
811 memcpy(skb_put(skb, len), lp->rx_buf, len);
812 ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]);
813
814 print_hex_dump_debug("mcr20a rx: ", DUMP_PREFIX_OFFSET, 16, 1,
815 lp->rx_buf, len, 0);
816 pr_debug("mcr20a rx: lqi: %02hhx\n", lp->rx_lqi[0]);
817
818 /* start RX sequence */
819 mcr20a_request_rx(lp);
820}
821
822static void
823mcr20a_handle_rx_read_len_complete(void *context)
824{
825 struct mcr20a_local *lp = context;
826 u8 len;
827 int ret;
828
829 dev_dbg(printdev(lp), "%s\n", __func__);
830
831 /* get the length of received frame */
832 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK;
833 dev_dbg(printdev(lp), "frame len : %d\n", len);
834
835 /* prepare to read the rx buf */
836 lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete;
837 lp->rx_header[0] = MCR20A_BURST_READ_PACKET_BUF;
838 lp->rx_xfer_buf.len = len;
839
840 ret = spi_async(lp->spi, &lp->rx_buf_msg);
841 if (ret)
842 dev_err(printdev(lp), "failed to read rx buffer length\n");
843}
844
845static int
846mcr20a_handle_rx(struct mcr20a_local *lp)
847{
848 dev_dbg(printdev(lp), "%s\n", __func__);
849 lp->reg_msg.complete = mcr20a_handle_rx_read_len_complete;
850 lp->reg_cmd[0] = MCR20A_READ_REG(DAR_RX_FRM_LEN);
851 lp->reg_xfer_data.len = 1;
852
853 return spi_async(lp->spi, &lp->reg_msg);
854}
855
856static int
857mcr20a_handle_tx_complete(struct mcr20a_local *lp)
858{
859 dev_dbg(printdev(lp), "%s\n", __func__);
860
861 ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
862
863 return mcr20a_request_rx(lp);
864}
865
866static int
867mcr20a_handle_tx(struct mcr20a_local *lp)
868{
869 int ret;
870
871 dev_dbg(printdev(lp), "%s\n", __func__);
872
873 /* write tx buffer */
874 lp->tx_header[0] = MCR20A_BURST_WRITE_PACKET_BUF;
875 /* add 2 bytes of FCS */
876 lp->tx_len[0] = lp->tx_skb->len + 2;
877 lp->tx_xfer_buf.tx_buf = lp->tx_skb->data;
878 /* add 1 byte psduLength */
879 lp->tx_xfer_buf.len = lp->tx_skb->len + 1;
880
881 ret = spi_async(lp->spi, &lp->tx_buf_msg);
882 if (ret) {
883 dev_err(printdev(lp), "SPI write Failed for TX buf\n");
884 return ret;
885 }
886
887 return 0;
888}
889
890static void
891mcr20a_irq_clean_complete(void *context)
892{
893 struct mcr20a_local *lp = context;
894 u8 seq_state = lp->irq_data[DAR_IRQ_STS1] & DAR_PHY_CTRL1_XCVSEQ_MASK;
895
896 dev_dbg(printdev(lp), "%s\n", __func__);
897
898 enable_irq(lp->spi->irq);
899
900 dev_dbg(printdev(lp), "IRQ STA1 (%02x) STA2 (%02x)\n",
901 lp->irq_data[DAR_IRQ_STS1], lp->irq_data[DAR_IRQ_STS2]);
902
903 switch (seq_state) {
904 /* TX IRQ, RX IRQ and SEQ IRQ */
905 case (0x03):
906 if (lp->is_tx) {
907 lp->is_tx = 0;
908 dev_dbg(printdev(lp), "TX is done. No ACK\n");
909 mcr20a_handle_tx_complete(lp);
910 }
911 break;
912 case (0x05):
913 /* rx is starting */
914 dev_dbg(printdev(lp), "RX is starting\n");
915 mcr20a_handle_rx(lp);
916 break;
917 case (0x07):
918 if (lp->is_tx) {
919 /* tx is done */
920 lp->is_tx = 0;
921 dev_dbg(printdev(lp), "TX is done. Get ACK\n");
922 mcr20a_handle_tx_complete(lp);
923 } else {
924 /* rx is starting */
925 dev_dbg(printdev(lp), "RX is starting\n");
926 mcr20a_handle_rx(lp);
927 }
928 break;
929 case (0x01):
930 if (lp->is_tx) {
931 dev_dbg(printdev(lp), "TX is starting\n");
932 mcr20a_handle_tx(lp);
933 } else {
934 dev_dbg(printdev(lp), "MCR20A is stop\n");
935 }
936 break;
937 }
938}
939
940static void mcr20a_irq_status_complete(void *context)
941{
942 int ret;
943 struct mcr20a_local *lp = context;
944
945 dev_dbg(printdev(lp), "%s\n", __func__);
946 regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1,
947 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE);
948
949 lp->reg_msg.complete = mcr20a_irq_clean_complete;
950 lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_IRQ_STS1);
951 memcpy(lp->reg_data, lp->irq_data, MCR20A_IRQSTS_NUM);
952 lp->reg_xfer_data.len = MCR20A_IRQSTS_NUM;
953
954 ret = spi_async(lp->spi, &lp->reg_msg);
955
956 if (ret)
957 dev_err(printdev(lp), "failed to clean irq status\n");
958}
959
960static irqreturn_t mcr20a_irq_isr(int irq, void *data)
961{
962 struct mcr20a_local *lp = data;
963 int ret;
964
965 disable_irq_nosync(irq);
966
967 lp->irq_header[0] = MCR20A_READ_REG(DAR_IRQ_STS1);
968 /* read IRQSTSx */
969 ret = spi_async(lp->spi, &lp->irq_msg);
970 if (ret) {
971 enable_irq(irq);
972 return IRQ_NONE;
973 }
974
975 return IRQ_HANDLED;
976}
977
978static int mcr20a_get_platform_data(struct spi_device *spi,
979 struct mcr20a_platform_data *pdata)
980{
981 int ret = 0;
982
983 if (!spi->dev.of_node)
984 return -EINVAL;
985
986 pdata->rst_gpio = of_get_named_gpio(spi->dev.of_node, "rst_b-gpio", 0);
987 dev_dbg(&spi->dev, "rst_b-gpio: %d\n", pdata->rst_gpio);
988
989 return ret;
990}
991
992static void mcr20a_hw_setup(struct mcr20a_local *lp)
993{
994 u8 i;
995 struct ieee802154_hw *hw = lp->hw;
996 struct wpan_phy *phy = lp->hw->phy;
997
998 dev_dbg(printdev(lp), "%s\n", __func__);
999
1000 phy->symbol_duration = 16;
1001 phy->lifs_period = 40;
1002 phy->sifs_period = 12;
1003
1004 hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
1005 IEEE802154_HW_AFILT |
1006 IEEE802154_HW_PROMISCUOUS;
1007
1008 phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
1009 WPAN_PHY_FLAG_CCA_MODE;
1010
1011 phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
1012 BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
1013 phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
1014 BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
1015
1016 /* initiating cca_ed_levels */
1017 for (i = MCR20A_MAX_CCA_THRESHOLD; i < MCR20A_MIN_CCA_THRESHOLD + 1;
1018 ++i) {
1019 mcr20a_ed_levels[i] = -i * 100;
1020 }
1021
1022 phy->supported.cca_ed_levels = mcr20a_ed_levels;
1023 phy->supported.cca_ed_levels_size = ARRAY_SIZE(mcr20a_ed_levels);
1024
1025 phy->cca.mode = NL802154_CCA_ENERGY;
1026
1027 phy->supported.channels[0] = MCR20A_VALID_CHANNELS;
1028 phy->current_page = 0;
1029 /* MCR20A default reset value */
1030 phy->current_channel = 20;
1031 phy->symbol_duration = 16;
1032 phy->supported.tx_powers = mcr20a_powers;
1033 phy->supported.tx_powers_size = ARRAY_SIZE(mcr20a_powers);
1034 phy->cca_ed_level = phy->supported.cca_ed_levels[75];
1035 phy->transmit_power = phy->supported.tx_powers[0x0F];
1036}
1037
1038static void
1039mcr20a_setup_tx_spi_messages(struct mcr20a_local *lp)
1040{
1041 spi_message_init(&lp->tx_buf_msg);
1042 lp->tx_buf_msg.context = lp;
1043 lp->tx_buf_msg.complete = mcr20a_write_tx_buf_complete;
1044
1045 lp->tx_xfer_header.len = 1;
1046 lp->tx_xfer_header.tx_buf = lp->tx_header;
1047
1048 lp->tx_xfer_len.len = 1;
1049 lp->tx_xfer_len.tx_buf = lp->tx_len;
1050
1051 spi_message_add_tail(&lp->tx_xfer_header, &lp->tx_buf_msg);
1052 spi_message_add_tail(&lp->tx_xfer_len, &lp->tx_buf_msg);
1053 spi_message_add_tail(&lp->tx_xfer_buf, &lp->tx_buf_msg);
1054}
1055
1056static void
1057mcr20a_setup_rx_spi_messages(struct mcr20a_local *lp)
1058{
1059 spi_message_init(&lp->reg_msg);
1060 lp->reg_msg.context = lp;
1061
1062 lp->reg_xfer_cmd.len = 1;
1063 lp->reg_xfer_cmd.tx_buf = lp->reg_cmd;
1064 lp->reg_xfer_cmd.rx_buf = lp->reg_cmd;
1065
1066 lp->reg_xfer_data.rx_buf = lp->reg_data;
1067 lp->reg_xfer_data.tx_buf = lp->reg_data;
1068
1069 spi_message_add_tail(&lp->reg_xfer_cmd, &lp->reg_msg);
1070 spi_message_add_tail(&lp->reg_xfer_data, &lp->reg_msg);
1071
1072 spi_message_init(&lp->rx_buf_msg);
1073 lp->rx_buf_msg.context = lp;
1074 lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete;
1075 lp->rx_xfer_header.len = 1;
1076 lp->rx_xfer_header.tx_buf = lp->rx_header;
1077 lp->rx_xfer_header.rx_buf = lp->rx_header;
1078
1079 lp->rx_xfer_buf.rx_buf = lp->rx_buf;
1080
1081 lp->rx_xfer_lqi.len = 1;
1082 lp->rx_xfer_lqi.rx_buf = lp->rx_lqi;
1083
1084 spi_message_add_tail(&lp->rx_xfer_header, &lp->rx_buf_msg);
1085 spi_message_add_tail(&lp->rx_xfer_buf, &lp->rx_buf_msg);
1086 spi_message_add_tail(&lp->rx_xfer_lqi, &lp->rx_buf_msg);
1087}
1088
1089static void
1090mcr20a_setup_irq_spi_messages(struct mcr20a_local *lp)
1091{
1092 spi_message_init(&lp->irq_msg);
1093 lp->irq_msg.context = lp;
1094 lp->irq_msg.complete = mcr20a_irq_status_complete;
1095 lp->irq_xfer_header.len = 1;
1096 lp->irq_xfer_header.tx_buf = lp->irq_header;
1097 lp->irq_xfer_header.rx_buf = lp->irq_header;
1098
1099 lp->irq_xfer_data.len = MCR20A_IRQSTS_NUM;
1100 lp->irq_xfer_data.rx_buf = lp->irq_data;
1101
1102 spi_message_add_tail(&lp->irq_xfer_header, &lp->irq_msg);
1103 spi_message_add_tail(&lp->irq_xfer_data, &lp->irq_msg);
1104}
1105
1106static int
1107mcr20a_phy_init(struct mcr20a_local *lp)
1108{
1109 u8 index;
1110 unsigned int phy_reg = 0;
1111 int ret;
1112
1113 dev_dbg(printdev(lp), "%s\n", __func__);
1114
1115 /* Disable Tristate on COCO MISO for SPI reads */
1116 ret = regmap_write(lp->regmap_iar, IAR_MISC_PAD_CTRL, 0x02);
1117 if (ret)
1118 goto err_ret;
1119
1120 /* Clear all PP IRQ bits in IRQSTS1 to avoid unexpected interrupts
1121 * immediately after init
1122 */
1123 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS1, 0xEF);
1124 if (ret)
1125 goto err_ret;
1126
1127 /* Clear all PP IRQ bits in IRQSTS2 */
1128 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS2,
1129 DAR_IRQSTS2_ASM_IRQ | DAR_IRQSTS2_PB_ERR_IRQ |
1130 DAR_IRQSTS2_WAKE_IRQ);
1131 if (ret)
1132 goto err_ret;
1133
1134 /* Disable all timer interrupts */
1135 ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS3, 0xFF);
1136 if (ret)
1137 goto err_ret;
1138
1139 /* PHY_CTRL1 : default HW settings + AUTOACK enabled */
1140 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
1141 DAR_PHY_CTRL1_AUTOACK, DAR_PHY_CTRL1_AUTOACK);
1142
1143 /* PHY_CTRL2 : disable all interrupts */
1144 ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL2, 0xFF);
1145 if (ret)
1146 goto err_ret;
1147
1148 /* PHY_CTRL3 : disable all timers and remaining interrupts */
1149 ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL3,
1150 DAR_PHY_CTRL3_ASM_MSK | DAR_PHY_CTRL3_PB_ERR_MSK |
1151 DAR_PHY_CTRL3_WAKE_MSK);
1152 if (ret)
1153 goto err_ret;
1154
1155 /* SRC_CTRL : enable Acknowledge Frame Pending and
1156 * Source Address Matching Enable
1157 */
1158 ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL,
1159 DAR_SRC_CTRL_ACK_FRM_PND |
1160 (DAR_SRC_CTRL_INDEX << DAR_SRC_CTRL_INDEX_SHIFT));
1161 if (ret)
1162 goto err_ret;
1163
1164 /* RX_FRAME_FILTER */
1165 /* FRM_VER[1:0] = b11. Accept FrameVersion 0 and 1 packets */
1166 ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
1167 IAR_RX_FRAME_FLT_FRM_VER |
1168 IAR_RX_FRAME_FLT_BEACON_FT |
1169 IAR_RX_FRAME_FLT_DATA_FT |
1170 IAR_RX_FRAME_FLT_CMD_FT);
1171 if (ret)
1172 goto err_ret;
1173
1174 dev_info(printdev(lp), "MCR20A DAR overwrites version: 0x%02x\n",
1175 MCR20A_OVERWRITE_VERSION);
1176
1177 /* Overwrites direct registers */
1178 ret = regmap_write(lp->regmap_dar, DAR_OVERWRITE_VER,
1179 MCR20A_OVERWRITE_VERSION);
1180 if (ret)
1181 goto err_ret;
1182
1183 /* Overwrites indirect registers */
1184 ret = regmap_multi_reg_write(lp->regmap_iar, mar20a_iar_overwrites,
1185 ARRAY_SIZE(mar20a_iar_overwrites));
1186 if (ret)
1187 goto err_ret;
1188
1189 /* Clear HW indirect queue */
1190 dev_dbg(printdev(lp), "clear HW indirect queue\n");
1191 for (index = 0; index < MCR20A_PHY_INDIRECT_QUEUE_SIZE; index++) {
1192 phy_reg = (u8)(((index & DAR_SRC_CTRL_INDEX) <<
1193 DAR_SRC_CTRL_INDEX_SHIFT)
1194 | (DAR_SRC_CTRL_SRCADDR_EN)
1195 | (DAR_SRC_CTRL_INDEX_DISABLE));
1196 ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL, phy_reg);
1197 if (ret)
1198 goto err_ret;
1199 phy_reg = 0;
1200 }
1201
1202 /* Assign HW Indirect hash table to PAN0 */
1203 ret = regmap_read(lp->regmap_iar, IAR_DUAL_PAN_CTRL, &phy_reg);
1204 if (ret)
1205 goto err_ret;
1206
1207 /* Clear current lvl */
1208 phy_reg &= ~IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_MSK;
1209
1210 /* Set new lvl */
1211 phy_reg |= MCR20A_PHY_INDIRECT_QUEUE_SIZE <<
1212 IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_SHIFT;
1213 ret = regmap_write(lp->regmap_iar, IAR_DUAL_PAN_CTRL, phy_reg);
1214 if (ret)
1215 goto err_ret;
1216
1217 /* Set CCA threshold to -75 dBm */
1218 ret = regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, 0x4B);
1219 if (ret)
1220 goto err_ret;
1221
1222 /* Set prescaller to obtain 1 symbol (16us) timebase */
1223 ret = regmap_write(lp->regmap_iar, IAR_TMR_PRESCALE, 0x05);
1224 if (ret)
1225 goto err_ret;
1226
1227 /* Enable autodoze mode. */
1228 ret = regmap_update_bits(lp->regmap_dar, DAR_PWR_MODES,
1229 DAR_PWR_MODES_AUTODOZE,
1230 DAR_PWR_MODES_AUTODOZE);
1231 if (ret)
1232 goto err_ret;
1233
1234 /* Disable clk_out */
1235 ret = regmap_update_bits(lp->regmap_dar, DAR_CLK_OUT_CTRL,
1236 DAR_CLK_OUT_CTRL_EN, 0x0);
1237 if (ret)
1238 goto err_ret;
1239
1240 return 0;
1241
1242err_ret:
1243 return ret;
1244}
1245
1246static int
1247mcr20a_probe(struct spi_device *spi)
1248{
1249 struct ieee802154_hw *hw;
1250 struct mcr20a_local *lp;
1251 struct mcr20a_platform_data *pdata;
1252 int irq_type;
1253 int ret = -ENOMEM;
1254
1255 dev_dbg(&spi->dev, "%s\n", __func__);
1256
1257 if (!spi->irq) {
1258 dev_err(&spi->dev, "no IRQ specified\n");
1259 return -EINVAL;
1260 }
1261
1262 pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
1263 if (!pdata)
1264 return -ENOMEM;
1265
1266 /* set mcr20a platform data */
1267 ret = mcr20a_get_platform_data(spi, pdata);
1268 if (ret < 0) {
1269 dev_crit(&spi->dev, "mcr20a_get_platform_data failed.\n");
1270 goto free_pdata;
1271 }
1272
1273 /* init reset gpio */
1274 if (gpio_is_valid(pdata->rst_gpio)) {
1275 ret = devm_gpio_request_one(&spi->dev, pdata->rst_gpio,
1276 GPIOF_OUT_INIT_HIGH, "reset");
1277 if (ret)
1278 goto free_pdata;
1279 }
1280
1281 /* reset mcr20a */
1282 if (gpio_is_valid(pdata->rst_gpio)) {
1283 usleep_range(10, 20);
1284 gpio_set_value_cansleep(pdata->rst_gpio, 0);
1285 usleep_range(10, 20);
1286 gpio_set_value_cansleep(pdata->rst_gpio, 1);
1287 usleep_range(120, 240);
1288 }
1289
1290 /* allocate ieee802154_hw and private data */
1291 hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops);
1292 if (!hw) {
1293 dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n");
1294 ret = -ENOMEM;
1295 goto free_pdata;
1296 }
1297
1298 /* init mcr20a local data */
1299 lp = hw->priv;
1300 lp->hw = hw;
1301 lp->spi = spi;
1302 lp->spi->dev.platform_data = pdata;
1303 lp->pdata = pdata;
1304
1305 /* init ieee802154_hw */
1306 hw->parent = &spi->dev;
1307 ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
1308
1309 /* init buf */
1310 lp->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL);
1311
1312 if (!lp->buf) {
1313 ret = -ENOMEM;
1314 goto free_dev;
1315 }
1316
1317 mcr20a_setup_tx_spi_messages(lp);
1318 mcr20a_setup_rx_spi_messages(lp);
1319 mcr20a_setup_irq_spi_messages(lp);
1320
1321 /* setup regmap */
1322 lp->regmap_dar = devm_regmap_init_spi(spi, &mcr20a_dar_regmap);
1323 if (IS_ERR(lp->regmap_dar)) {
1324 ret = PTR_ERR(lp->regmap_dar);
1325 dev_err(&spi->dev, "Failed to allocate dar map: %d\n",
1326 ret);
1327 goto free_dev;
1328 }
1329
1330 lp->regmap_iar = devm_regmap_init_spi(spi, &mcr20a_iar_regmap);
1331 if (IS_ERR(lp->regmap_iar)) {
1332 ret = PTR_ERR(lp->regmap_iar);
1333 dev_err(&spi->dev, "Failed to allocate iar map: %d\n", ret);
1334 goto free_dev;
1335 }
1336
1337 mcr20a_hw_setup(lp);
1338
1339 spi_set_drvdata(spi, lp);
1340
1341 ret = mcr20a_phy_init(lp);
1342 if (ret < 0) {
1343 dev_crit(&spi->dev, "mcr20a_phy_init failed\n");
1344 goto free_dev;
1345 }
1346
1347 irq_type = irq_get_trigger_type(spi->irq);
1348 if (!irq_type)
1349 irq_type = IRQF_TRIGGER_FALLING;
1350
1351 ret = devm_request_irq(&spi->dev, spi->irq, mcr20a_irq_isr,
1352 irq_type, dev_name(&spi->dev), lp);
1353 if (ret) {
1354 dev_err(&spi->dev, "could not request_irq for mcr20a\n");
1355 ret = -ENODEV;
1356 goto free_dev;
1357 }
1358
1359 /* disable_irq by default and wait for starting hardware */
1360 disable_irq(spi->irq);
1361
1362 ret = ieee802154_register_hw(hw);
1363 if (ret) {
1364 dev_crit(&spi->dev, "ieee802154_register_hw failed\n");
1365 goto free_dev;
1366 }
1367
1368 return ret;
1369
1370free_dev:
1371 ieee802154_free_hw(lp->hw);
1372free_pdata:
1373 kfree(pdata);
1374
1375 return ret;
1376}
1377
1378static int mcr20a_remove(struct spi_device *spi)
1379{
1380 struct mcr20a_local *lp = spi_get_drvdata(spi);
1381
1382 dev_dbg(&spi->dev, "%s\n", __func__);
1383
1384 ieee802154_unregister_hw(lp->hw);
1385 ieee802154_free_hw(lp->hw);
1386
1387 return 0;
1388}
1389
1390static const struct of_device_id mcr20a_of_match[] = {
1391 { .compatible = "nxp,mcr20a", },
1392 { },
1393};
1394MODULE_DEVICE_TABLE(of, mcr20a_of_match);
1395
1396static const struct spi_device_id mcr20a_device_id[] = {
1397 { .name = "mcr20a", },
1398 { },
1399};
1400MODULE_DEVICE_TABLE(spi, mcr20a_device_id);
1401
1402static struct spi_driver mcr20a_driver = {
1403 .id_table = mcr20a_device_id,
1404 .driver = {
1405 .of_match_table = of_match_ptr(mcr20a_of_match),
1406 .name = "mcr20a",
1407 },
1408 .probe = mcr20a_probe,
1409 .remove = mcr20a_remove,
1410};
1411
1412module_spi_driver(mcr20a_driver);
1413
1414MODULE_DESCRIPTION("MCR20A Transceiver Driver");
1415MODULE_LICENSE("GPL v2");
1416MODULE_AUTHOR("Xue Liu <liuxuenetmail@gmail>");