Loading...
Note: File does not exist in v3.15.
1// SPDX-License-Identifier: GPL-2.0
2/* NXP C45 PHY driver
3 * Copyright 2021-2023 NXP
4 * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5 */
6
7#include <linux/delay.h>
8#include <linux/ethtool.h>
9#include <linux/ethtool_netlink.h>
10#include <linux/kernel.h>
11#include <linux/mii.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/phy.h>
15#include <linux/processor.h>
16#include <linux/property.h>
17#include <linux/ptp_classify.h>
18#include <linux/net_tstamp.h>
19
20#include "nxp-c45-tja11xx.h"
21
22#define PHY_ID_TJA_1103 0x001BB010
23#define PHY_ID_TJA_1120 0x001BB031
24
25#define VEND1_DEVICE_CONTROL 0x0040
26#define DEVICE_CONTROL_RESET BIT(15)
27#define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
28#define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13)
29
30#define VEND1_DEVICE_CONFIG 0x0048
31
32#define TJA1120_VEND1_EXT_TS_MODE 0x1012
33
34#define TJA1120_GLOBAL_INFRA_IRQ_ACK 0x2C08
35#define TJA1120_GLOBAL_INFRA_IRQ_EN 0x2C0A
36#define TJA1120_GLOBAL_INFRA_IRQ_STATUS 0x2C0C
37#define TJA1120_DEV_BOOT_DONE BIT(1)
38
39#define TJA1120_VEND1_PTP_TRIG_DATA_S 0x1070
40
41#define TJA1120_EGRESS_TS_DATA_S 0x9060
42#define TJA1120_EGRESS_TS_END 0x9067
43#define TJA1120_TS_VALID BIT(0)
44#define TJA1120_MORE_TS BIT(15)
45
46#define VEND1_PHY_IRQ_ACK 0x80A0
47#define VEND1_PHY_IRQ_EN 0x80A1
48#define VEND1_PHY_IRQ_STATUS 0x80A2
49#define PHY_IRQ_LINK_EVENT BIT(1)
50
51#define VEND1_ALWAYS_ACCESSIBLE 0x801F
52#define FUSA_PASS BIT(4)
53
54#define VEND1_PHY_CONTROL 0x8100
55#define PHY_CONFIG_EN BIT(14)
56#define PHY_START_OP BIT(0)
57
58#define VEND1_PHY_CONFIG 0x8108
59#define PHY_CONFIG_AUTO BIT(0)
60
61#define TJA1120_EPHY_RESETS 0x810A
62#define EPHY_PCS_RESET BIT(3)
63
64#define VEND1_SIGNAL_QUALITY 0x8320
65#define SQI_VALID BIT(14)
66#define SQI_MASK GENMASK(2, 0)
67#define MAX_SQI SQI_MASK
68
69#define CABLE_TEST_ENABLE BIT(15)
70#define CABLE_TEST_START BIT(14)
71#define CABLE_TEST_OK 0x00
72#define CABLE_TEST_SHORTED 0x01
73#define CABLE_TEST_OPEN 0x02
74#define CABLE_TEST_UNKNOWN 0x07
75
76#define VEND1_PORT_CONTROL 0x8040
77#define PORT_CONTROL_EN BIT(14)
78
79#define VEND1_PORT_ABILITIES 0x8046
80#define MACSEC_ABILITY BIT(5)
81#define PTP_ABILITY BIT(3)
82
83#define VEND1_PORT_FUNC_IRQ_EN 0x807A
84#define MACSEC_IRQS BIT(5)
85#define PTP_IRQS BIT(3)
86
87#define VEND1_PTP_IRQ_ACK 0x9008
88#define EGR_TS_IRQ BIT(1)
89
90#define VEND1_PORT_INFRA_CONTROL 0xAC00
91#define PORT_INFRA_CONTROL_EN BIT(14)
92
93#define VEND1_RXID 0xAFCC
94#define VEND1_TXID 0xAFCD
95#define ID_ENABLE BIT(15)
96
97#define VEND1_ABILITIES 0xAFC4
98#define RGMII_ID_ABILITY BIT(15)
99#define RGMII_ABILITY BIT(14)
100#define RMII_ABILITY BIT(10)
101#define REVMII_ABILITY BIT(9)
102#define MII_ABILITY BIT(8)
103#define SGMII_ABILITY BIT(0)
104
105#define VEND1_MII_BASIC_CONFIG 0xAFC6
106#define MII_BASIC_CONFIG_REV BIT(4)
107#define MII_BASIC_CONFIG_SGMII 0x9
108#define MII_BASIC_CONFIG_RGMII 0x7
109#define MII_BASIC_CONFIG_RMII 0x5
110#define MII_BASIC_CONFIG_MII 0x4
111
112#define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351
113#define EXTENDED_CNT_EN BIT(15)
114#define VEND1_MONITOR_STATUS 0xAC80
115#define MONITOR_RESET BIT(15)
116#define VEND1_MONITOR_CONFIG 0xAC86
117#define LOST_FRAMES_CNT_EN BIT(9)
118#define ALL_FRAMES_CNT_EN BIT(8)
119
120#define VEND1_SYMBOL_ERROR_COUNTER 0x8350
121#define VEND1_LINK_DROP_COUNTER 0x8352
122#define VEND1_LINK_LOSSES_AND_FAILURES 0x8353
123#define VEND1_RX_PREAMBLE_COUNT 0xAFCE
124#define VEND1_TX_PREAMBLE_COUNT 0xAFCF
125#define VEND1_RX_IPG_LENGTH 0xAFD0
126#define VEND1_TX_IPG_LENGTH 0xAFD1
127#define COUNTER_EN BIT(15)
128
129#define VEND1_PTP_CONFIG 0x1102
130#define EXT_TRG_EDGE BIT(1)
131
132#define TJA1120_SYNC_TRIG_FILTER 0x1010
133#define PTP_TRIG_RISE_TS BIT(3)
134#define PTP_TRIG_FALLING_TS BIT(2)
135
136#define CLK_RATE_ADJ_LD BIT(15)
137#define CLK_RATE_ADJ_DIR BIT(14)
138
139#define VEND1_RX_TS_INSRT_CTRL 0x114D
140#define TJA1103_RX_TS_INSRT_MODE2 0x02
141
142#define TJA1120_RX_TS_INSRT_CTRL 0x9012
143#define TJA1120_RX_TS_INSRT_EN BIT(15)
144#define TJA1120_TS_INSRT_MODE BIT(4)
145
146#define VEND1_EGR_RING_DATA_0 0x114E
147#define VEND1_EGR_RING_CTRL 0x1154
148
149#define RING_DATA_0_TS_VALID BIT(15)
150
151#define RING_DONE BIT(0)
152
153#define TS_SEC_MASK GENMASK(1, 0)
154
155#define PTP_ENABLE BIT(3)
156#define PHY_TEST_ENABLE BIT(0)
157
158#define VEND1_PORT_PTP_CONTROL 0x9000
159#define PORT_PTP_CONTROL_BYPASS BIT(11)
160
161#define PTP_CLK_PERIOD_100BT1 15ULL
162#define PTP_CLK_PERIOD_1000BT1 8ULL
163
164#define EVENT_MSG_FILT_ALL 0x0F
165#define EVENT_MSG_FILT_NONE 0x00
166
167#define VEND1_GPIO_FUNC_CONFIG_BASE 0x2C40
168#define GPIO_FUNC_EN BIT(15)
169#define GPIO_FUNC_PTP BIT(6)
170#define GPIO_SIGNAL_PTP_TRIGGER 0x01
171#define GPIO_SIGNAL_PPS_OUT 0x12
172#define GPIO_DISABLE 0
173#define GPIO_PPS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
174 GPIO_SIGNAL_PPS_OUT)
175#define GPIO_EXTTS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
176 GPIO_SIGNAL_PTP_TRIGGER)
177
178#define RGMII_PERIOD_PS 8000U
179#define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360)
180#define MIN_ID_PS 1644U
181#define MAX_ID_PS 2260U
182#define DEFAULT_ID_PS 2000U
183
184#define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
185 (ppb) * (ptp_clk_period), NSEC_PER_SEC)
186
187#define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
188
189#define TJA11XX_REVERSE_MODE BIT(0)
190
191struct nxp_c45_phy;
192
193struct nxp_c45_skb_cb {
194 struct ptp_header *header;
195 unsigned int type;
196};
197
198#define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size) \
199 ((struct nxp_c45_reg_field) { \
200 .reg = _reg, \
201 .devad = _devad, \
202 .offset = _offset, \
203 .size = _size, \
204 })
205
206struct nxp_c45_reg_field {
207 u16 reg;
208 u8 devad;
209 u8 offset;
210 u8 size;
211};
212
213struct nxp_c45_hwts {
214 u32 nsec;
215 u32 sec;
216 u8 domain_number;
217 u16 sequence_id;
218 u8 msg_type;
219};
220
221struct nxp_c45_regmap {
222 /* PTP config regs. */
223 u16 vend1_ptp_clk_period;
224 u16 vend1_event_msg_filt;
225
226 /* LTC bits and regs. */
227 struct nxp_c45_reg_field ltc_read;
228 struct nxp_c45_reg_field ltc_write;
229 struct nxp_c45_reg_field ltc_lock_ctrl;
230 u16 vend1_ltc_wr_nsec_0;
231 u16 vend1_ltc_wr_nsec_1;
232 u16 vend1_ltc_wr_sec_0;
233 u16 vend1_ltc_wr_sec_1;
234 u16 vend1_ltc_rd_nsec_0;
235 u16 vend1_ltc_rd_nsec_1;
236 u16 vend1_ltc_rd_sec_0;
237 u16 vend1_ltc_rd_sec_1;
238 u16 vend1_rate_adj_subns_0;
239 u16 vend1_rate_adj_subns_1;
240
241 /* External trigger reg fields. */
242 struct nxp_c45_reg_field irq_egr_ts_en;
243 struct nxp_c45_reg_field irq_egr_ts_status;
244 struct nxp_c45_reg_field domain_number;
245 struct nxp_c45_reg_field msg_type;
246 struct nxp_c45_reg_field sequence_id;
247 struct nxp_c45_reg_field sec_1_0;
248 struct nxp_c45_reg_field sec_4_2;
249 struct nxp_c45_reg_field nsec_15_0;
250 struct nxp_c45_reg_field nsec_29_16;
251
252 /* PPS and EXT Trigger bits and regs. */
253 struct nxp_c45_reg_field pps_enable;
254 struct nxp_c45_reg_field pps_polarity;
255 u16 vend1_ext_trg_data_0;
256 u16 vend1_ext_trg_data_1;
257 u16 vend1_ext_trg_data_2;
258 u16 vend1_ext_trg_data_3;
259 u16 vend1_ext_trg_ctrl;
260
261 /* Cable test reg fields. */
262 u16 cable_test;
263 struct nxp_c45_reg_field cable_test_valid;
264 struct nxp_c45_reg_field cable_test_result;
265};
266
267struct nxp_c45_phy_stats {
268 const char *name;
269 const struct nxp_c45_reg_field counter;
270};
271
272struct nxp_c45_phy_data {
273 const struct nxp_c45_regmap *regmap;
274 const struct nxp_c45_phy_stats *stats;
275 int n_stats;
276 u8 ptp_clk_period;
277 bool ext_ts_both_edges;
278 bool ack_ptp_irq;
279 void (*counters_enable)(struct phy_device *phydev);
280 bool (*get_egressts)(struct nxp_c45_phy *priv,
281 struct nxp_c45_hwts *hwts);
282 bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts);
283 void (*ptp_init)(struct phy_device *phydev);
284 void (*ptp_enable)(struct phy_device *phydev, bool enable);
285 void (*nmi_handler)(struct phy_device *phydev,
286 irqreturn_t *irq_status);
287};
288
289static const
290struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
291{
292 return phydev->drv->driver_data;
293}
294
295static const
296struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
297{
298 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
299
300 return phy_data->regmap;
301}
302
303static int nxp_c45_read_reg_field(struct phy_device *phydev,
304 const struct nxp_c45_reg_field *reg_field)
305{
306 u16 mask;
307 int ret;
308
309 if (reg_field->size == 0) {
310 phydev_err(phydev, "Trying to read a reg field of size 0.\n");
311 return -EINVAL;
312 }
313
314 ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
315 if (ret < 0)
316 return ret;
317
318 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
319 GENMASK(reg_field->offset + reg_field->size - 1,
320 reg_field->offset);
321 ret &= mask;
322 ret >>= reg_field->offset;
323
324 return ret;
325}
326
327static int nxp_c45_write_reg_field(struct phy_device *phydev,
328 const struct nxp_c45_reg_field *reg_field,
329 u16 val)
330{
331 u16 mask;
332 u16 set;
333
334 if (reg_field->size == 0) {
335 phydev_err(phydev, "Trying to write a reg field of size 0.\n");
336 return -EINVAL;
337 }
338
339 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
340 GENMASK(reg_field->offset + reg_field->size - 1,
341 reg_field->offset);
342 set = val << reg_field->offset;
343
344 return phy_modify_mmd_changed(phydev, reg_field->devad,
345 reg_field->reg, mask, set);
346}
347
348static int nxp_c45_set_reg_field(struct phy_device *phydev,
349 const struct nxp_c45_reg_field *reg_field)
350{
351 if (reg_field->size != 1) {
352 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
353 return -EINVAL;
354 }
355
356 return nxp_c45_write_reg_field(phydev, reg_field, 1);
357}
358
359static int nxp_c45_clear_reg_field(struct phy_device *phydev,
360 const struct nxp_c45_reg_field *reg_field)
361{
362 if (reg_field->size != 1) {
363 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
364 return -EINVAL;
365 }
366
367 return nxp_c45_write_reg_field(phydev, reg_field, 0);
368}
369
370static bool nxp_c45_poll_txts(struct phy_device *phydev)
371{
372 return phydev->irq <= 0;
373}
374
375static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
376 struct timespec64 *ts,
377 struct ptp_system_timestamp *sts)
378{
379 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
380 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
381
382 nxp_c45_set_reg_field(priv->phydev, ®map->ltc_read);
383 ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
384 regmap->vend1_ltc_rd_nsec_0);
385 ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
386 regmap->vend1_ltc_rd_nsec_1) << 16;
387 ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
388 regmap->vend1_ltc_rd_sec_0);
389 ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
390 regmap->vend1_ltc_rd_sec_1) << 16;
391
392 return 0;
393}
394
395static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
396 struct timespec64 *ts,
397 struct ptp_system_timestamp *sts)
398{
399 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
400
401 mutex_lock(&priv->ptp_lock);
402 _nxp_c45_ptp_gettimex64(ptp, ts, sts);
403 mutex_unlock(&priv->ptp_lock);
404
405 return 0;
406}
407
408static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
409 const struct timespec64 *ts)
410{
411 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
412 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
413
414 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
415 ts->tv_nsec);
416 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
417 ts->tv_nsec >> 16);
418 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
419 ts->tv_sec);
420 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
421 ts->tv_sec >> 16);
422 nxp_c45_set_reg_field(priv->phydev, ®map->ltc_write);
423
424 return 0;
425}
426
427static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
428 const struct timespec64 *ts)
429{
430 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
431
432 mutex_lock(&priv->ptp_lock);
433 _nxp_c45_ptp_settime64(ptp, ts);
434 mutex_unlock(&priv->ptp_lock);
435
436 return 0;
437}
438
439static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
440{
441 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
442 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
443 const struct nxp_c45_regmap *regmap = data->regmap;
444 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
445 u64 subns_inc_val;
446 bool inc;
447
448 mutex_lock(&priv->ptp_lock);
449 inc = ppb >= 0;
450 ppb = abs(ppb);
451
452 subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
453
454 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
455 regmap->vend1_rate_adj_subns_0,
456 subns_inc_val);
457 subns_inc_val >>= 16;
458 subns_inc_val |= CLK_RATE_ADJ_LD;
459 if (inc)
460 subns_inc_val |= CLK_RATE_ADJ_DIR;
461
462 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
463 regmap->vend1_rate_adj_subns_1,
464 subns_inc_val);
465 mutex_unlock(&priv->ptp_lock);
466
467 return 0;
468}
469
470static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
471{
472 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
473 struct timespec64 now, then;
474
475 mutex_lock(&priv->ptp_lock);
476 then = ns_to_timespec64(delta);
477 _nxp_c45_ptp_gettimex64(ptp, &now, NULL);
478 now = timespec64_add(now, then);
479 _nxp_c45_ptp_settime64(ptp, &now);
480 mutex_unlock(&priv->ptp_lock);
481
482 return 0;
483}
484
485static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
486 struct nxp_c45_hwts *hwts)
487{
488 ts->tv_nsec = hwts->nsec;
489 if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
490 ts->tv_sec -= TS_SEC_MASK + 1;
491 ts->tv_sec &= ~TS_SEC_MASK;
492 ts->tv_sec |= hwts->sec & TS_SEC_MASK;
493}
494
495static bool nxp_c45_match_ts(struct ptp_header *header,
496 struct nxp_c45_hwts *hwts,
497 unsigned int type)
498{
499 return ntohs(header->sequence_id) == hwts->sequence_id &&
500 ptp_get_msgtype(header, type) == hwts->msg_type &&
501 header->domain_number == hwts->domain_number;
502}
503
504static bool nxp_c45_get_extts(struct nxp_c45_phy *priv,
505 struct timespec64 *extts)
506{
507 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
508
509 extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
510 regmap->vend1_ext_trg_data_0);
511 extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
512 regmap->vend1_ext_trg_data_1) << 16;
513 extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
514 regmap->vend1_ext_trg_data_2);
515 extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
516 regmap->vend1_ext_trg_data_3) << 16;
517 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
518 regmap->vend1_ext_trg_ctrl, RING_DONE);
519
520 return true;
521}
522
523static bool tja1120_extts_is_valid(struct phy_device *phydev)
524{
525 bool valid;
526 int reg;
527
528 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
529 TJA1120_VEND1_PTP_TRIG_DATA_S);
530 valid = !!(reg & TJA1120_TS_VALID);
531
532 return valid;
533}
534
535static bool tja1120_get_extts(struct nxp_c45_phy *priv,
536 struct timespec64 *extts)
537{
538 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
539 struct phy_device *phydev = priv->phydev;
540 bool more_ts;
541 bool valid;
542 u16 reg;
543
544 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
545 regmap->vend1_ext_trg_ctrl);
546 more_ts = !!(reg & TJA1120_MORE_TS);
547
548 valid = tja1120_extts_is_valid(phydev);
549 if (!valid) {
550 if (!more_ts)
551 goto tja1120_get_extts_out;
552
553 /* Bug workaround for TJA1120 engineering samples: move the new
554 * timestamp from the FIFO to the buffer.
555 */
556 phy_write_mmd(phydev, MDIO_MMD_VEND1,
557 regmap->vend1_ext_trg_ctrl, RING_DONE);
558 valid = tja1120_extts_is_valid(phydev);
559 if (!valid)
560 goto tja1120_get_extts_out;
561 }
562
563 nxp_c45_get_extts(priv, extts);
564tja1120_get_extts_out:
565 return valid;
566}
567
568static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv,
569 struct nxp_c45_hwts *hwts)
570{
571 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
572 struct phy_device *phydev = priv->phydev;
573
574 hwts->domain_number =
575 nxp_c45_read_reg_field(phydev, ®map->domain_number);
576 hwts->msg_type =
577 nxp_c45_read_reg_field(phydev, ®map->msg_type);
578 hwts->sequence_id =
579 nxp_c45_read_reg_field(phydev, ®map->sequence_id);
580 hwts->nsec =
581 nxp_c45_read_reg_field(phydev, ®map->nsec_15_0);
582 hwts->nsec |=
583 nxp_c45_read_reg_field(phydev, ®map->nsec_29_16) << 16;
584 hwts->sec = nxp_c45_read_reg_field(phydev, ®map->sec_1_0);
585 hwts->sec |= nxp_c45_read_reg_field(phydev, ®map->sec_4_2) << 2;
586}
587
588static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
589 struct nxp_c45_hwts *hwts)
590{
591 bool valid;
592 u16 reg;
593
594 mutex_lock(&priv->ptp_lock);
595 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
596 RING_DONE);
597 reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
598 valid = !!(reg & RING_DATA_0_TS_VALID);
599 if (!valid)
600 goto nxp_c45_get_hwtxts_out;
601
602 nxp_c45_read_egress_ts(priv, hwts);
603nxp_c45_get_hwtxts_out:
604 mutex_unlock(&priv->ptp_lock);
605 return valid;
606}
607
608static bool tja1120_egress_ts_is_valid(struct phy_device *phydev)
609{
610 bool valid;
611 u16 reg;
612
613 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S);
614 valid = !!(reg & TJA1120_TS_VALID);
615
616 return valid;
617}
618
619static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv,
620 struct nxp_c45_hwts *hwts)
621{
622 struct phy_device *phydev = priv->phydev;
623 bool more_ts;
624 bool valid;
625 u16 reg;
626
627 mutex_lock(&priv->ptp_lock);
628 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END);
629 more_ts = !!(reg & TJA1120_MORE_TS);
630 valid = tja1120_egress_ts_is_valid(phydev);
631 if (!valid) {
632 if (!more_ts)
633 goto tja1120_get_hwtxts_out;
634
635 /* Bug workaround for TJA1120 engineering samples: move the
636 * new timestamp from the FIFO to the buffer.
637 */
638 phy_write_mmd(phydev, MDIO_MMD_VEND1,
639 TJA1120_EGRESS_TS_END, TJA1120_TS_VALID);
640 valid = tja1120_egress_ts_is_valid(phydev);
641 if (!valid)
642 goto tja1120_get_hwtxts_out;
643 }
644 nxp_c45_read_egress_ts(priv, hwts);
645 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S,
646 TJA1120_TS_VALID);
647tja1120_get_hwtxts_out:
648 mutex_unlock(&priv->ptp_lock);
649 return valid;
650}
651
652static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
653 struct nxp_c45_hwts *txts)
654{
655 struct sk_buff *skb, *tmp, *skb_match = NULL;
656 struct skb_shared_hwtstamps shhwtstamps;
657 struct timespec64 ts;
658 unsigned long flags;
659 bool ts_match;
660 s64 ts_ns;
661
662 spin_lock_irqsave(&priv->tx_queue.lock, flags);
663 skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
664 ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
665 NXP_C45_SKB_CB(skb)->type);
666 if (!ts_match)
667 continue;
668 skb_match = skb;
669 __skb_unlink(skb, &priv->tx_queue);
670 break;
671 }
672 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
673
674 if (skb_match) {
675 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
676 nxp_c45_reconstruct_ts(&ts, txts);
677 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
678 ts_ns = timespec64_to_ns(&ts);
679 shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
680 skb_complete_tx_timestamp(skb_match, &shhwtstamps);
681 } else {
682 phydev_warn(priv->phydev,
683 "the tx timestamp doesn't match with any skb\n");
684 }
685}
686
687static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
688{
689 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
690 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
691 bool poll_txts = nxp_c45_poll_txts(priv->phydev);
692 struct skb_shared_hwtstamps *shhwtstamps_rx;
693 struct ptp_clock_event event;
694 struct nxp_c45_hwts hwts;
695 bool reschedule = false;
696 struct timespec64 ts;
697 struct sk_buff *skb;
698 bool ts_valid;
699 u32 ts_raw;
700
701 while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
702 ts_valid = data->get_egressts(priv, &hwts);
703 if (unlikely(!ts_valid)) {
704 /* Still more skbs in the queue */
705 reschedule = true;
706 break;
707 }
708
709 nxp_c45_process_txts(priv, &hwts);
710 }
711
712 while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
713 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
714 ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
715 hwts.sec = ts_raw >> 30;
716 hwts.nsec = ts_raw & GENMASK(29, 0);
717 nxp_c45_reconstruct_ts(&ts, &hwts);
718 shhwtstamps_rx = skb_hwtstamps(skb);
719 shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
720 NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
721 netif_rx(skb);
722 }
723
724 if (priv->extts) {
725 ts_valid = data->get_extts(priv, &ts);
726 if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) {
727 priv->extts_ts = ts;
728 event.index = priv->extts_index;
729 event.type = PTP_CLOCK_EXTTS;
730 event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
731 ptp_clock_event(priv->ptp_clock, &event);
732 }
733 reschedule = true;
734 }
735
736 return reschedule ? 1 : -1;
737}
738
739static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
740 int pin, u16 pin_cfg)
741{
742 struct phy_device *phydev = priv->phydev;
743
744 phy_write_mmd(phydev, MDIO_MMD_VEND1,
745 VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
746}
747
748static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
749 struct ptp_perout_request *perout, int on)
750{
751 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
752 struct phy_device *phydev = priv->phydev;
753 int pin;
754
755 if (perout->flags & ~PTP_PEROUT_PHASE)
756 return -EOPNOTSUPP;
757
758 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
759 if (pin < 0)
760 return pin;
761
762 if (!on) {
763 nxp_c45_clear_reg_field(priv->phydev,
764 ®map->pps_enable);
765 nxp_c45_clear_reg_field(priv->phydev,
766 ®map->pps_polarity);
767
768 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
769
770 return 0;
771 }
772
773 /* The PPS signal is fixed to 1 second and is always generated when the
774 * seconds counter is incremented. The start time is not configurable.
775 * If the clock is adjusted, the PPS signal is automatically readjusted.
776 */
777 if (perout->period.sec != 1 || perout->period.nsec != 0) {
778 phydev_warn(phydev, "The period can be set only to 1 second.");
779 return -EINVAL;
780 }
781
782 if (!(perout->flags & PTP_PEROUT_PHASE)) {
783 if (perout->start.sec != 0 || perout->start.nsec != 0) {
784 phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
785 return -EINVAL;
786 }
787 } else {
788 if (perout->phase.nsec != 0 &&
789 perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
790 phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
791 return -EINVAL;
792 }
793
794 if (perout->phase.nsec == 0)
795 nxp_c45_clear_reg_field(priv->phydev,
796 ®map->pps_polarity);
797 else
798 nxp_c45_set_reg_field(priv->phydev,
799 ®map->pps_polarity);
800 }
801
802 nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
803
804 nxp_c45_set_reg_field(priv->phydev, ®map->pps_enable);
805
806 return 0;
807}
808
809static void nxp_c45_set_rising_or_falling(struct phy_device *phydev,
810 struct ptp_extts_request *extts)
811{
812 if (extts->flags & PTP_RISING_EDGE)
813 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
814 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
815
816 if (extts->flags & PTP_FALLING_EDGE)
817 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
818 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
819}
820
821static void nxp_c45_set_rising_and_falling(struct phy_device *phydev,
822 struct ptp_extts_request *extts)
823{
824 /* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In
825 * this case external ts will be enabled on rising edge.
826 */
827 if (extts->flags & PTP_RISING_EDGE ||
828 extts->flags == PTP_ENABLE_FEATURE)
829 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
830 TJA1120_SYNC_TRIG_FILTER,
831 PTP_TRIG_RISE_TS);
832 else
833 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
834 TJA1120_SYNC_TRIG_FILTER,
835 PTP_TRIG_RISE_TS);
836
837 if (extts->flags & PTP_FALLING_EDGE)
838 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
839 TJA1120_SYNC_TRIG_FILTER,
840 PTP_TRIG_FALLING_TS);
841 else
842 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
843 TJA1120_SYNC_TRIG_FILTER,
844 PTP_TRIG_FALLING_TS);
845}
846
847static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
848 struct ptp_extts_request *extts, int on)
849{
850 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
851 int pin;
852
853 if (extts->flags & ~(PTP_ENABLE_FEATURE |
854 PTP_RISING_EDGE |
855 PTP_FALLING_EDGE |
856 PTP_STRICT_FLAGS))
857 return -EOPNOTSUPP;
858
859 /* Sampling on both edges is not supported */
860 if ((extts->flags & PTP_RISING_EDGE) &&
861 (extts->flags & PTP_FALLING_EDGE) &&
862 !data->ext_ts_both_edges)
863 return -EOPNOTSUPP;
864
865 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
866 if (pin < 0)
867 return pin;
868
869 if (!on) {
870 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
871 priv->extts = false;
872
873 return 0;
874 }
875
876 if (data->ext_ts_both_edges)
877 nxp_c45_set_rising_and_falling(priv->phydev, extts);
878 else
879 nxp_c45_set_rising_or_falling(priv->phydev, extts);
880
881 nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
882 priv->extts = true;
883 priv->extts_index = extts->index;
884 ptp_schedule_worker(priv->ptp_clock, 0);
885
886 return 0;
887}
888
889static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
890 struct ptp_clock_request *req, int on)
891{
892 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
893
894 switch (req->type) {
895 case PTP_CLK_REQ_EXTTS:
896 return nxp_c45_extts_enable(priv, &req->extts, on);
897 case PTP_CLK_REQ_PEROUT:
898 return nxp_c45_perout_enable(priv, &req->perout, on);
899 default:
900 return -EOPNOTSUPP;
901 }
902}
903
904static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
905 { "nxp_c45_gpio0", 0, PTP_PF_NONE},
906 { "nxp_c45_gpio1", 1, PTP_PF_NONE},
907 { "nxp_c45_gpio2", 2, PTP_PF_NONE},
908 { "nxp_c45_gpio3", 3, PTP_PF_NONE},
909 { "nxp_c45_gpio4", 4, PTP_PF_NONE},
910 { "nxp_c45_gpio5", 5, PTP_PF_NONE},
911 { "nxp_c45_gpio6", 6, PTP_PF_NONE},
912 { "nxp_c45_gpio7", 7, PTP_PF_NONE},
913 { "nxp_c45_gpio8", 8, PTP_PF_NONE},
914 { "nxp_c45_gpio9", 9, PTP_PF_NONE},
915 { "nxp_c45_gpio10", 10, PTP_PF_NONE},
916 { "nxp_c45_gpio11", 11, PTP_PF_NONE},
917};
918
919static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
920 enum ptp_pin_function func, unsigned int chan)
921{
922 if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
923 return -EINVAL;
924
925 switch (func) {
926 case PTP_PF_NONE:
927 case PTP_PF_PEROUT:
928 case PTP_PF_EXTTS:
929 break;
930 default:
931 return -EOPNOTSUPP;
932 }
933
934 return 0;
935}
936
937static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
938{
939 priv->caps = (struct ptp_clock_info) {
940 .owner = THIS_MODULE,
941 .name = "NXP C45 PHC",
942 .max_adj = 16666666,
943 .adjfine = nxp_c45_ptp_adjfine,
944 .adjtime = nxp_c45_ptp_adjtime,
945 .gettimex64 = nxp_c45_ptp_gettimex64,
946 .settime64 = nxp_c45_ptp_settime64,
947 .enable = nxp_c45_ptp_enable,
948 .verify = nxp_c45_ptp_verify_pin,
949 .do_aux_work = nxp_c45_do_aux_work,
950 .pin_config = nxp_c45_ptp_pins,
951 .n_pins = ARRAY_SIZE(nxp_c45_ptp_pins),
952 .n_ext_ts = 1,
953 .n_per_out = 1,
954 };
955
956 priv->ptp_clock = ptp_clock_register(&priv->caps,
957 &priv->phydev->mdio.dev);
958
959 if (IS_ERR(priv->ptp_clock))
960 return PTR_ERR(priv->ptp_clock);
961
962 if (!priv->ptp_clock)
963 return -ENOMEM;
964
965 return 0;
966}
967
968static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
969 struct sk_buff *skb, int type)
970{
971 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
972 mii_ts);
973
974 switch (priv->hwts_tx) {
975 case HWTSTAMP_TX_ON:
976 NXP_C45_SKB_CB(skb)->type = type;
977 NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
978 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
979 skb_queue_tail(&priv->tx_queue, skb);
980 if (nxp_c45_poll_txts(priv->phydev))
981 ptp_schedule_worker(priv->ptp_clock, 0);
982 break;
983 case HWTSTAMP_TX_OFF:
984 default:
985 kfree_skb(skb);
986 break;
987 }
988}
989
990static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
991 struct sk_buff *skb, int type)
992{
993 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
994 mii_ts);
995 struct ptp_header *header = ptp_parse_header(skb, type);
996
997 if (!header)
998 return false;
999
1000 if (!priv->hwts_rx)
1001 return false;
1002
1003 NXP_C45_SKB_CB(skb)->header = header;
1004 skb_queue_tail(&priv->rx_queue, skb);
1005 ptp_schedule_worker(priv->ptp_clock, 0);
1006
1007 return true;
1008}
1009
1010static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
1011 struct kernel_hwtstamp_config *cfg,
1012 struct netlink_ext_ack *extack)
1013{
1014 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1015 mii_ts);
1016 struct phy_device *phydev = priv->phydev;
1017 const struct nxp_c45_phy_data *data;
1018
1019 if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ON)
1020 return -ERANGE;
1021
1022 data = nxp_c45_get_data(phydev);
1023 priv->hwts_tx = cfg->tx_type;
1024
1025 switch (cfg->rx_filter) {
1026 case HWTSTAMP_FILTER_NONE:
1027 priv->hwts_rx = 0;
1028 break;
1029 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1030 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1031 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1032 priv->hwts_rx = 1;
1033 cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1034 break;
1035 default:
1036 return -ERANGE;
1037 }
1038
1039 if (priv->hwts_rx || priv->hwts_tx) {
1040 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1041 data->regmap->vend1_event_msg_filt,
1042 EVENT_MSG_FILT_ALL);
1043 data->ptp_enable(phydev, true);
1044 } else {
1045 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1046 data->regmap->vend1_event_msg_filt,
1047 EVENT_MSG_FILT_NONE);
1048 data->ptp_enable(phydev, false);
1049 }
1050
1051 if (nxp_c45_poll_txts(priv->phydev))
1052 goto nxp_c45_no_ptp_irq;
1053
1054 if (priv->hwts_tx)
1055 nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1056 else
1057 nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1058
1059nxp_c45_no_ptp_irq:
1060 return 0;
1061}
1062
1063static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
1064 struct kernel_ethtool_ts_info *ts_info)
1065{
1066 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1067 mii_ts);
1068
1069 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1070 SOF_TIMESTAMPING_RX_HARDWARE |
1071 SOF_TIMESTAMPING_RAW_HARDWARE;
1072 ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1073 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1074 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1075 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1076 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1077 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1078
1079 return 0;
1080}
1081
1082static const struct nxp_c45_phy_stats common_hw_stats[] = {
1083 { "phy_link_status_drop_cnt",
1084 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
1085 { "phy_link_availability_drop_cnt",
1086 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
1087 { "phy_link_loss_cnt",
1088 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
1089 { "phy_link_failure_cnt",
1090 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
1091 { "phy_symbol_error_cnt",
1092 NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
1093};
1094
1095static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
1096 { "rx_preamble_count",
1097 NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
1098 { "tx_preamble_count",
1099 NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
1100 { "rx_ipg_length",
1101 NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
1102 { "tx_ipg_length",
1103 NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
1104};
1105
1106static const struct nxp_c45_phy_stats tja1120_hw_stats[] = {
1107 { "phy_symbol_error_cnt_ext",
1108 NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) },
1109 { "tx_frames_xtd",
1110 NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), },
1111 { "tx_frames",
1112 NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), },
1113 { "rx_frames_xtd",
1114 NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), },
1115 { "rx_frames",
1116 NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), },
1117 { "tx_lost_frames_xtd",
1118 NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), },
1119 { "tx_lost_frames",
1120 NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), },
1121 { "rx_lost_frames_xtd",
1122 NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), },
1123 { "rx_lost_frames",
1124 NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), },
1125};
1126
1127static int nxp_c45_get_sset_count(struct phy_device *phydev)
1128{
1129 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1130
1131 return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1132}
1133
1134static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
1135{
1136 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1137 size_t count = nxp_c45_get_sset_count(phydev);
1138 size_t idx;
1139 size_t i;
1140
1141 for (i = 0; i < count; i++) {
1142 if (i < ARRAY_SIZE(common_hw_stats)) {
1143 ethtool_puts(&data, common_hw_stats[i].name);
1144 continue;
1145 }
1146 idx = i - ARRAY_SIZE(common_hw_stats);
1147 ethtool_puts(&data, phy_data->stats[idx].name);
1148 }
1149}
1150
1151static void nxp_c45_get_stats(struct phy_device *phydev,
1152 struct ethtool_stats *stats, u64 *data)
1153{
1154 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1155 size_t count = nxp_c45_get_sset_count(phydev);
1156 const struct nxp_c45_reg_field *reg_field;
1157 size_t idx;
1158 size_t i;
1159 int ret;
1160
1161 for (i = 0; i < count; i++) {
1162 if (i < ARRAY_SIZE(common_hw_stats)) {
1163 reg_field = &common_hw_stats[i].counter;
1164 } else {
1165 idx = i - ARRAY_SIZE(common_hw_stats);
1166 reg_field = &phy_data->stats[idx].counter;
1167 }
1168
1169 ret = nxp_c45_read_reg_field(phydev, reg_field);
1170 if (ret < 0)
1171 data[i] = U64_MAX;
1172 else
1173 data[i] = ret;
1174 }
1175}
1176
1177static int nxp_c45_config_enable(struct phy_device *phydev)
1178{
1179 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1180 DEVICE_CONTROL_CONFIG_GLOBAL_EN |
1181 DEVICE_CONTROL_CONFIG_ALL_EN);
1182 usleep_range(400, 450);
1183
1184 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
1185 PORT_CONTROL_EN);
1186 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1187 PHY_CONFIG_EN);
1188 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
1189 PORT_INFRA_CONTROL_EN);
1190
1191 return 0;
1192}
1193
1194static int nxp_c45_start_op(struct phy_device *phydev)
1195{
1196 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1197 PHY_START_OP);
1198}
1199
1200static int nxp_c45_config_intr(struct phy_device *phydev)
1201{
1202 int ret;
1203
1204 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1205 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1206 VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1207 if (ret)
1208 return ret;
1209
1210 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1211 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1212 }
1213
1214 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1215 VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1216 if (ret)
1217 return ret;
1218
1219 return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1220 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1221}
1222
1223static int tja1103_config_intr(struct phy_device *phydev)
1224{
1225 int ret;
1226
1227 /* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */
1228 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE,
1229 FUSA_PASS);
1230 if (ret)
1231 return ret;
1232
1233 return nxp_c45_config_intr(phydev);
1234}
1235
1236static int tja1120_config_intr(struct phy_device *phydev)
1237{
1238 int ret;
1239
1240 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1241 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1242 TJA1120_GLOBAL_INFRA_IRQ_EN,
1243 TJA1120_DEV_BOOT_DONE);
1244 else
1245 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1246 TJA1120_GLOBAL_INFRA_IRQ_EN,
1247 TJA1120_DEV_BOOT_DONE);
1248 if (ret)
1249 return ret;
1250
1251 return nxp_c45_config_intr(phydev);
1252}
1253
1254static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1255{
1256 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1257 struct nxp_c45_phy *priv = phydev->priv;
1258 irqreturn_t ret = IRQ_NONE;
1259 struct nxp_c45_hwts hwts;
1260 int irq;
1261
1262 irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1263 if (irq & PHY_IRQ_LINK_EVENT) {
1264 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1265 PHY_IRQ_LINK_EVENT);
1266 phy_trigger_machine(phydev);
1267 ret = IRQ_HANDLED;
1268 }
1269
1270 irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1271 if (irq) {
1272 /* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1273 * be cleared when the EGR TS FIFO is empty. Otherwise, the
1274 * IRQ bit should be cleared before reading the timestamp,
1275 */
1276 if (data->ack_ptp_irq)
1277 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1278 VEND1_PTP_IRQ_ACK, EGR_TS_IRQ);
1279 while (data->get_egressts(priv, &hwts))
1280 nxp_c45_process_txts(priv, &hwts);
1281
1282 ret = IRQ_HANDLED;
1283 }
1284
1285 data->nmi_handler(phydev, &ret);
1286 nxp_c45_handle_macsec_interrupt(phydev, &ret);
1287
1288 return ret;
1289}
1290
1291static int nxp_c45_soft_reset(struct phy_device *phydev)
1292{
1293 int ret;
1294
1295 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1296 DEVICE_CONTROL_RESET);
1297 if (ret)
1298 return ret;
1299
1300 usleep_range(2000, 2050);
1301
1302 return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1303 VEND1_DEVICE_CONTROL, ret,
1304 !(ret & DEVICE_CONTROL_RESET), 20000,
1305 240000, false);
1306}
1307
1308static int nxp_c45_cable_test_start(struct phy_device *phydev)
1309{
1310 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1311
1312 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1313 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1314 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1315 CABLE_TEST_ENABLE | CABLE_TEST_START);
1316}
1317
1318static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1319 bool *finished)
1320{
1321 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1322 int ret;
1323 u8 cable_test_result;
1324
1325 ret = nxp_c45_read_reg_field(phydev, ®map->cable_test_valid);
1326 if (!ret) {
1327 *finished = false;
1328 return 0;
1329 }
1330
1331 *finished = true;
1332 cable_test_result = nxp_c45_read_reg_field(phydev,
1333 ®map->cable_test_result);
1334
1335 switch (cable_test_result) {
1336 case CABLE_TEST_OK:
1337 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1338 ETHTOOL_A_CABLE_RESULT_CODE_OK);
1339 break;
1340 case CABLE_TEST_SHORTED:
1341 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1342 ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1343 break;
1344 case CABLE_TEST_OPEN:
1345 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1346 ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1347 break;
1348 default:
1349 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1350 ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1351 }
1352
1353 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1354 CABLE_TEST_ENABLE);
1355 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1356 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1357
1358 return nxp_c45_start_op(phydev);
1359}
1360
1361static int nxp_c45_get_sqi(struct phy_device *phydev)
1362{
1363 int reg;
1364
1365 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1366 if (!(reg & SQI_VALID))
1367 return -EINVAL;
1368
1369 reg &= SQI_MASK;
1370
1371 return reg;
1372}
1373
1374static void tja1120_link_change_notify(struct phy_device *phydev)
1375{
1376 /* Bug workaround for TJA1120 enegineering samples: fix egress
1377 * timestamps lost after link recovery.
1378 */
1379 if (phydev->state == PHY_NOLINK) {
1380 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1381 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1382 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1383 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1384 }
1385}
1386
1387static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1388{
1389 return MAX_SQI;
1390}
1391
1392static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1393{
1394 if (delay < MIN_ID_PS) {
1395 phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1396 return -EINVAL;
1397 }
1398
1399 if (delay > MAX_ID_PS) {
1400 phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1401 return -EINVAL;
1402 }
1403
1404 return 0;
1405}
1406
1407static void nxp_c45_counters_enable(struct phy_device *phydev)
1408{
1409 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1410
1411 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1412 COUNTER_EN);
1413
1414 data->counters_enable(phydev);
1415}
1416
1417static void nxp_c45_ptp_init(struct phy_device *phydev)
1418{
1419 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1420
1421 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1422 data->regmap->vend1_ptp_clk_period,
1423 data->ptp_clk_period);
1424 nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1425
1426 data->ptp_init(phydev);
1427}
1428
1429static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1430{
1431 /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1432 * To avoid floating point operations we'll multiply by 10
1433 * and get 1 decimal point precision.
1434 */
1435 phase_offset_raw *= 10;
1436 phase_offset_raw -= 738;
1437 return div_u64(phase_offset_raw, 9);
1438}
1439
1440static void nxp_c45_disable_delays(struct phy_device *phydev)
1441{
1442 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1443 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1444}
1445
1446static void nxp_c45_set_delays(struct phy_device *phydev)
1447{
1448 struct nxp_c45_phy *priv = phydev->priv;
1449 u64 tx_delay = priv->tx_delay;
1450 u64 rx_delay = priv->rx_delay;
1451 u64 degree;
1452
1453 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1454 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1455 degree = div_u64(tx_delay, PS_PER_DEGREE);
1456 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1457 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1458 } else {
1459 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1460 ID_ENABLE);
1461 }
1462
1463 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1464 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1465 degree = div_u64(rx_delay, PS_PER_DEGREE);
1466 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1467 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1468 } else {
1469 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1470 ID_ENABLE);
1471 }
1472}
1473
1474static int nxp_c45_get_delays(struct phy_device *phydev)
1475{
1476 struct nxp_c45_phy *priv = phydev->priv;
1477 int ret;
1478
1479 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1480 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1481 ret = device_property_read_u32(&phydev->mdio.dev,
1482 "tx-internal-delay-ps",
1483 &priv->tx_delay);
1484 if (ret)
1485 priv->tx_delay = DEFAULT_ID_PS;
1486
1487 ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1488 if (ret) {
1489 phydev_err(phydev,
1490 "tx-internal-delay-ps invalid value\n");
1491 return ret;
1492 }
1493 }
1494
1495 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1496 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1497 ret = device_property_read_u32(&phydev->mdio.dev,
1498 "rx-internal-delay-ps",
1499 &priv->rx_delay);
1500 if (ret)
1501 priv->rx_delay = DEFAULT_ID_PS;
1502
1503 ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1504 if (ret) {
1505 phydev_err(phydev,
1506 "rx-internal-delay-ps invalid value\n");
1507 return ret;
1508 }
1509 }
1510
1511 return 0;
1512}
1513
1514static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1515{
1516 struct nxp_c45_phy *priv = phydev->priv;
1517 u16 basic_config;
1518 int ret;
1519
1520 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1521 phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1522
1523 switch (phydev->interface) {
1524 case PHY_INTERFACE_MODE_RGMII:
1525 if (!(ret & RGMII_ABILITY)) {
1526 phydev_err(phydev, "rgmii mode not supported\n");
1527 return -EINVAL;
1528 }
1529 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1530 MII_BASIC_CONFIG_RGMII);
1531 nxp_c45_disable_delays(phydev);
1532 break;
1533 case PHY_INTERFACE_MODE_RGMII_ID:
1534 case PHY_INTERFACE_MODE_RGMII_TXID:
1535 case PHY_INTERFACE_MODE_RGMII_RXID:
1536 if (!(ret & RGMII_ID_ABILITY)) {
1537 phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1538 return -EINVAL;
1539 }
1540 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1541 MII_BASIC_CONFIG_RGMII);
1542 ret = nxp_c45_get_delays(phydev);
1543 if (ret)
1544 return ret;
1545
1546 nxp_c45_set_delays(phydev);
1547 break;
1548 case PHY_INTERFACE_MODE_MII:
1549 if (!(ret & MII_ABILITY)) {
1550 phydev_err(phydev, "mii mode not supported\n");
1551 return -EINVAL;
1552 }
1553 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1554 MII_BASIC_CONFIG_MII);
1555 break;
1556 case PHY_INTERFACE_MODE_REVMII:
1557 if (!(ret & REVMII_ABILITY)) {
1558 phydev_err(phydev, "rev-mii mode not supported\n");
1559 return -EINVAL;
1560 }
1561 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1562 MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1563 break;
1564 case PHY_INTERFACE_MODE_RMII:
1565 if (!(ret & RMII_ABILITY)) {
1566 phydev_err(phydev, "rmii mode not supported\n");
1567 return -EINVAL;
1568 }
1569
1570 basic_config = MII_BASIC_CONFIG_RMII;
1571
1572 /* This is not PHY_INTERFACE_MODE_REVRMII */
1573 if (priv->flags & TJA11XX_REVERSE_MODE)
1574 basic_config |= MII_BASIC_CONFIG_REV;
1575
1576 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1577 basic_config);
1578 break;
1579 case PHY_INTERFACE_MODE_SGMII:
1580 if (!(ret & SGMII_ABILITY)) {
1581 phydev_err(phydev, "sgmii mode not supported\n");
1582 return -EINVAL;
1583 }
1584 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1585 MII_BASIC_CONFIG_SGMII);
1586 break;
1587 case PHY_INTERFACE_MODE_INTERNAL:
1588 break;
1589 default:
1590 return -EINVAL;
1591 }
1592
1593 return 0;
1594}
1595
1596static int nxp_c45_config_init(struct phy_device *phydev)
1597{
1598 int ret;
1599
1600 ret = nxp_c45_config_enable(phydev);
1601 if (ret) {
1602 phydev_err(phydev, "Failed to enable config\n");
1603 return ret;
1604 }
1605
1606 /* Bug workaround for SJA1110 rev B: enable write access
1607 * to MDIO_MMD_PMAPMD
1608 */
1609 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1610 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1611
1612 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1613 PHY_CONFIG_AUTO);
1614
1615 ret = nxp_c45_set_phy_mode(phydev);
1616 if (ret)
1617 return ret;
1618
1619 phydev->autoneg = AUTONEG_DISABLE;
1620
1621 nxp_c45_counters_enable(phydev);
1622 nxp_c45_ptp_init(phydev);
1623 ret = nxp_c45_macsec_config_init(phydev);
1624 if (ret)
1625 return ret;
1626
1627 return nxp_c45_start_op(phydev);
1628}
1629
1630static int nxp_c45_get_features(struct phy_device *phydev)
1631{
1632 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1633 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1634
1635 return genphy_c45_pma_read_abilities(phydev);
1636}
1637
1638static int nxp_c45_parse_dt(struct phy_device *phydev)
1639{
1640 struct device_node *node = phydev->mdio.dev.of_node;
1641 struct nxp_c45_phy *priv = phydev->priv;
1642
1643 if (!IS_ENABLED(CONFIG_OF_MDIO))
1644 return 0;
1645
1646 if (of_property_read_bool(node, "nxp,rmii-refclk-out"))
1647 priv->flags |= TJA11XX_REVERSE_MODE;
1648
1649 return 0;
1650}
1651
1652static int nxp_c45_probe(struct phy_device *phydev)
1653{
1654 struct nxp_c45_phy *priv;
1655 bool macsec_ability;
1656 int phy_abilities;
1657 bool ptp_ability;
1658 int ret = 0;
1659
1660 priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1661 if (!priv)
1662 return -ENOMEM;
1663
1664 skb_queue_head_init(&priv->tx_queue);
1665 skb_queue_head_init(&priv->rx_queue);
1666
1667 priv->phydev = phydev;
1668
1669 phydev->priv = priv;
1670
1671 nxp_c45_parse_dt(phydev);
1672
1673 mutex_init(&priv->ptp_lock);
1674
1675 phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1676 VEND1_PORT_ABILITIES);
1677 ptp_ability = !!(phy_abilities & PTP_ABILITY);
1678 if (!ptp_ability) {
1679 phydev_dbg(phydev, "the phy does not support PTP");
1680 goto no_ptp_support;
1681 }
1682
1683 if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1684 IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1685 priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1686 priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1687 priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1688 priv->mii_ts.ts_info = nxp_c45_ts_info;
1689 phydev->mii_ts = &priv->mii_ts;
1690 ret = nxp_c45_init_ptp_clock(priv);
1691
1692 /* Timestamp selected by default to keep legacy API */
1693 phydev->default_timestamp = true;
1694 } else {
1695 phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1696 }
1697
1698no_ptp_support:
1699 macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1700 if (!macsec_ability) {
1701 phydev_info(phydev, "the phy does not support MACsec\n");
1702 goto no_macsec_support;
1703 }
1704
1705 if (IS_ENABLED(CONFIG_MACSEC)) {
1706 ret = nxp_c45_macsec_probe(phydev);
1707 phydev_dbg(phydev, "MACsec support enabled.");
1708 } else {
1709 phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
1710 }
1711
1712no_macsec_support:
1713
1714 return ret;
1715}
1716
1717static void nxp_c45_remove(struct phy_device *phydev)
1718{
1719 struct nxp_c45_phy *priv = phydev->priv;
1720
1721 if (priv->ptp_clock)
1722 ptp_clock_unregister(priv->ptp_clock);
1723
1724 skb_queue_purge(&priv->tx_queue);
1725 skb_queue_purge(&priv->rx_queue);
1726 nxp_c45_macsec_remove(phydev);
1727}
1728
1729static void tja1103_counters_enable(struct phy_device *phydev)
1730{
1731 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1732 COUNTER_EN);
1733 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1734 COUNTER_EN);
1735 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1736 COUNTER_EN);
1737 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1738 COUNTER_EN);
1739}
1740
1741static void tja1103_ptp_init(struct phy_device *phydev)
1742{
1743 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1744 TJA1103_RX_TS_INSRT_MODE2);
1745 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1746 PTP_ENABLE);
1747}
1748
1749static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1750{
1751 if (enable)
1752 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1753 VEND1_PORT_PTP_CONTROL,
1754 PORT_PTP_CONTROL_BYPASS);
1755 else
1756 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1757 VEND1_PORT_PTP_CONTROL,
1758 PORT_PTP_CONTROL_BYPASS);
1759}
1760
1761static void tja1103_nmi_handler(struct phy_device *phydev,
1762 irqreturn_t *irq_status)
1763{
1764 int ret;
1765
1766 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1767 VEND1_ALWAYS_ACCESSIBLE);
1768 if (ret & FUSA_PASS) {
1769 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1770 VEND1_ALWAYS_ACCESSIBLE,
1771 FUSA_PASS);
1772 *irq_status = IRQ_HANDLED;
1773 }
1774}
1775
1776static const struct nxp_c45_regmap tja1103_regmap = {
1777 .vend1_ptp_clk_period = 0x1104,
1778 .vend1_event_msg_filt = 0x1148,
1779 .pps_enable =
1780 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1781 .pps_polarity =
1782 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1783 .ltc_lock_ctrl =
1784 NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1785 .ltc_read =
1786 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1787 .ltc_write =
1788 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1789 .vend1_ltc_wr_nsec_0 = 0x1106,
1790 .vend1_ltc_wr_nsec_1 = 0x1107,
1791 .vend1_ltc_wr_sec_0 = 0x1108,
1792 .vend1_ltc_wr_sec_1 = 0x1109,
1793 .vend1_ltc_rd_nsec_0 = 0x110A,
1794 .vend1_ltc_rd_nsec_1 = 0x110B,
1795 .vend1_ltc_rd_sec_0 = 0x110C,
1796 .vend1_ltc_rd_sec_1 = 0x110D,
1797 .vend1_rate_adj_subns_0 = 0x110F,
1798 .vend1_rate_adj_subns_1 = 0x1110,
1799 .irq_egr_ts_en =
1800 NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1801 .irq_egr_ts_status =
1802 NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1803 .domain_number =
1804 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1805 .msg_type =
1806 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1807 .sequence_id =
1808 NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1809 .sec_1_0 =
1810 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1811 .sec_4_2 =
1812 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1813 .nsec_15_0 =
1814 NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1815 .nsec_29_16 =
1816 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1817 .vend1_ext_trg_data_0 = 0x1121,
1818 .vend1_ext_trg_data_1 = 0x1122,
1819 .vend1_ext_trg_data_2 = 0x1123,
1820 .vend1_ext_trg_data_3 = 0x1124,
1821 .vend1_ext_trg_ctrl = 0x1126,
1822 .cable_test = 0x8330,
1823 .cable_test_valid =
1824 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1825 .cable_test_result =
1826 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1827};
1828
1829static const struct nxp_c45_phy_data tja1103_phy_data = {
1830 .regmap = &tja1103_regmap,
1831 .stats = tja1103_hw_stats,
1832 .n_stats = ARRAY_SIZE(tja1103_hw_stats),
1833 .ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1834 .ext_ts_both_edges = false,
1835 .ack_ptp_irq = false,
1836 .counters_enable = tja1103_counters_enable,
1837 .get_egressts = nxp_c45_get_hwtxts,
1838 .get_extts = nxp_c45_get_extts,
1839 .ptp_init = tja1103_ptp_init,
1840 .ptp_enable = tja1103_ptp_enable,
1841 .nmi_handler = tja1103_nmi_handler,
1842};
1843
1844static void tja1120_counters_enable(struct phy_device *phydev)
1845{
1846 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD,
1847 EXTENDED_CNT_EN);
1848 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS,
1849 MONITOR_RESET);
1850 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG,
1851 ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN);
1852}
1853
1854static void tja1120_ptp_init(struct phy_device *phydev)
1855{
1856 phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL,
1857 TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE);
1858 phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE,
1859 TJA1120_TS_INSRT_MODE);
1860 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG,
1861 PTP_ENABLE);
1862}
1863
1864static void tja1120_ptp_enable(struct phy_device *phydev, bool enable)
1865{
1866 if (enable)
1867 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1868 VEND1_PORT_FUNC_ENABLES,
1869 PTP_ENABLE);
1870 else
1871 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1872 VEND1_PORT_FUNC_ENABLES,
1873 PTP_ENABLE);
1874}
1875
1876static void tja1120_nmi_handler(struct phy_device *phydev,
1877 irqreturn_t *irq_status)
1878{
1879 int ret;
1880
1881 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1882 TJA1120_GLOBAL_INFRA_IRQ_STATUS);
1883 if (ret & TJA1120_DEV_BOOT_DONE) {
1884 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1885 TJA1120_GLOBAL_INFRA_IRQ_ACK,
1886 TJA1120_DEV_BOOT_DONE);
1887 *irq_status = IRQ_HANDLED;
1888 }
1889}
1890
1891static const struct nxp_c45_regmap tja1120_regmap = {
1892 .vend1_ptp_clk_period = 0x1020,
1893 .vend1_event_msg_filt = 0x9010,
1894 .pps_enable =
1895 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1),
1896 .pps_polarity =
1897 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1),
1898 .ltc_lock_ctrl =
1899 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1),
1900 .ltc_read =
1901 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1),
1902 .ltc_write =
1903 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1),
1904 .vend1_ltc_wr_nsec_0 = 0x1040,
1905 .vend1_ltc_wr_nsec_1 = 0x1041,
1906 .vend1_ltc_wr_sec_0 = 0x1042,
1907 .vend1_ltc_wr_sec_1 = 0x1043,
1908 .vend1_ltc_rd_nsec_0 = 0x1048,
1909 .vend1_ltc_rd_nsec_1 = 0x1049,
1910 .vend1_ltc_rd_sec_0 = 0x104A,
1911 .vend1_ltc_rd_sec_1 = 0x104B,
1912 .vend1_rate_adj_subns_0 = 0x1030,
1913 .vend1_rate_adj_subns_1 = 0x1031,
1914 .irq_egr_ts_en =
1915 NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1),
1916 .irq_egr_ts_status =
1917 NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1),
1918 .domain_number =
1919 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8),
1920 .msg_type =
1921 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4),
1922 .sequence_id =
1923 NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16),
1924 .sec_1_0 =
1925 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2),
1926 .sec_4_2 =
1927 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3),
1928 .nsec_15_0 =
1929 NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16),
1930 .nsec_29_16 =
1931 NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14),
1932 .vend1_ext_trg_data_0 = 0x1071,
1933 .vend1_ext_trg_data_1 = 0x1072,
1934 .vend1_ext_trg_data_2 = 0x1073,
1935 .vend1_ext_trg_data_3 = 0x1074,
1936 .vend1_ext_trg_ctrl = 0x1075,
1937 .cable_test = 0x8360,
1938 .cable_test_valid =
1939 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1),
1940 .cable_test_result =
1941 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3),
1942};
1943
1944static const struct nxp_c45_phy_data tja1120_phy_data = {
1945 .regmap = &tja1120_regmap,
1946 .stats = tja1120_hw_stats,
1947 .n_stats = ARRAY_SIZE(tja1120_hw_stats),
1948 .ptp_clk_period = PTP_CLK_PERIOD_1000BT1,
1949 .ext_ts_both_edges = true,
1950 .ack_ptp_irq = true,
1951 .counters_enable = tja1120_counters_enable,
1952 .get_egressts = tja1120_get_hwtxts,
1953 .get_extts = tja1120_get_extts,
1954 .ptp_init = tja1120_ptp_init,
1955 .ptp_enable = tja1120_ptp_enable,
1956 .nmi_handler = tja1120_nmi_handler,
1957};
1958
1959static struct phy_driver nxp_c45_driver[] = {
1960 {
1961 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1962 .name = "NXP C45 TJA1103",
1963 .get_features = nxp_c45_get_features,
1964 .driver_data = &tja1103_phy_data,
1965 .probe = nxp_c45_probe,
1966 .soft_reset = nxp_c45_soft_reset,
1967 .config_aneg = genphy_c45_config_aneg,
1968 .config_init = nxp_c45_config_init,
1969 .config_intr = tja1103_config_intr,
1970 .handle_interrupt = nxp_c45_handle_interrupt,
1971 .read_status = genphy_c45_read_status,
1972 .suspend = genphy_c45_pma_suspend,
1973 .resume = genphy_c45_pma_resume,
1974 .get_sset_count = nxp_c45_get_sset_count,
1975 .get_strings = nxp_c45_get_strings,
1976 .get_stats = nxp_c45_get_stats,
1977 .cable_test_start = nxp_c45_cable_test_start,
1978 .cable_test_get_status = nxp_c45_cable_test_get_status,
1979 .set_loopback = genphy_c45_loopback,
1980 .get_sqi = nxp_c45_get_sqi,
1981 .get_sqi_max = nxp_c45_get_sqi_max,
1982 .remove = nxp_c45_remove,
1983 },
1984 {
1985 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
1986 .name = "NXP C45 TJA1120",
1987 .get_features = nxp_c45_get_features,
1988 .driver_data = &tja1120_phy_data,
1989 .probe = nxp_c45_probe,
1990 .soft_reset = nxp_c45_soft_reset,
1991 .config_aneg = genphy_c45_config_aneg,
1992 .config_init = nxp_c45_config_init,
1993 .config_intr = tja1120_config_intr,
1994 .handle_interrupt = nxp_c45_handle_interrupt,
1995 .read_status = genphy_c45_read_status,
1996 .link_change_notify = tja1120_link_change_notify,
1997 .suspend = genphy_c45_pma_suspend,
1998 .resume = genphy_c45_pma_resume,
1999 .get_sset_count = nxp_c45_get_sset_count,
2000 .get_strings = nxp_c45_get_strings,
2001 .get_stats = nxp_c45_get_stats,
2002 .cable_test_start = nxp_c45_cable_test_start,
2003 .cable_test_get_status = nxp_c45_cable_test_get_status,
2004 .set_loopback = genphy_c45_loopback,
2005 .get_sqi = nxp_c45_get_sqi,
2006 .get_sqi_max = nxp_c45_get_sqi_max,
2007 .remove = nxp_c45_remove,
2008 },
2009};
2010
2011module_phy_driver(nxp_c45_driver);
2012
2013static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
2014 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
2015 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
2016 { /*sentinel*/ },
2017};
2018
2019MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
2020
2021MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
2022MODULE_DESCRIPTION("NXP C45 PHY driver");
2023MODULE_LICENSE("GPL v2");