Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* NXP C45 PHY driver
3 * Copyright 2021-2023 NXP
4 * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5 */
6
7#include <linux/delay.h>
8#include <linux/ethtool.h>
9#include <linux/ethtool_netlink.h>
10#include <linux/kernel.h>
11#include <linux/mii.h>
12#include <linux/module.h>
13#include <linux/phy.h>
14#include <linux/processor.h>
15#include <linux/property.h>
16#include <linux/ptp_classify.h>
17#include <linux/net_tstamp.h>
18
19#include "nxp-c45-tja11xx.h"
20
21#define PHY_ID_TJA_1103 0x001BB010
22#define PHY_ID_TJA_1120 0x001BB031
23
24#define VEND1_DEVICE_CONTROL 0x0040
25#define DEVICE_CONTROL_RESET BIT(15)
26#define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
27#define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13)
28
29#define VEND1_DEVICE_CONFIG 0x0048
30
31#define TJA1120_VEND1_EXT_TS_MODE 0x1012
32
33#define TJA1120_GLOBAL_INFRA_IRQ_ACK 0x2C08
34#define TJA1120_GLOBAL_INFRA_IRQ_EN 0x2C0A
35#define TJA1120_GLOBAL_INFRA_IRQ_STATUS 0x2C0C
36#define TJA1120_DEV_BOOT_DONE BIT(1)
37
38#define TJA1120_VEND1_PTP_TRIG_DATA_S 0x1070
39
40#define TJA1120_EGRESS_TS_DATA_S 0x9060
41#define TJA1120_EGRESS_TS_END 0x9067
42#define TJA1120_TS_VALID BIT(0)
43#define TJA1120_MORE_TS BIT(15)
44
45#define VEND1_PHY_IRQ_ACK 0x80A0
46#define VEND1_PHY_IRQ_EN 0x80A1
47#define VEND1_PHY_IRQ_STATUS 0x80A2
48#define PHY_IRQ_LINK_EVENT BIT(1)
49
50#define VEND1_ALWAYS_ACCESSIBLE 0x801F
51#define FUSA_PASS BIT(4)
52
53#define VEND1_PHY_CONTROL 0x8100
54#define PHY_CONFIG_EN BIT(14)
55#define PHY_START_OP BIT(0)
56
57#define VEND1_PHY_CONFIG 0x8108
58#define PHY_CONFIG_AUTO BIT(0)
59
60#define TJA1120_EPHY_RESETS 0x810A
61#define EPHY_PCS_RESET BIT(3)
62
63#define VEND1_SIGNAL_QUALITY 0x8320
64#define SQI_VALID BIT(14)
65#define SQI_MASK GENMASK(2, 0)
66#define MAX_SQI SQI_MASK
67
68#define CABLE_TEST_ENABLE BIT(15)
69#define CABLE_TEST_START BIT(14)
70#define CABLE_TEST_OK 0x00
71#define CABLE_TEST_SHORTED 0x01
72#define CABLE_TEST_OPEN 0x02
73#define CABLE_TEST_UNKNOWN 0x07
74
75#define VEND1_PORT_CONTROL 0x8040
76#define PORT_CONTROL_EN BIT(14)
77
78#define VEND1_PORT_ABILITIES 0x8046
79#define MACSEC_ABILITY BIT(5)
80#define PTP_ABILITY BIT(3)
81
82#define VEND1_PORT_FUNC_IRQ_EN 0x807A
83#define MACSEC_IRQS BIT(5)
84#define PTP_IRQS BIT(3)
85
86#define VEND1_PTP_IRQ_ACK 0x9008
87#define EGR_TS_IRQ BIT(1)
88
89#define VEND1_PORT_INFRA_CONTROL 0xAC00
90#define PORT_INFRA_CONTROL_EN BIT(14)
91
92#define VEND1_RXID 0xAFCC
93#define VEND1_TXID 0xAFCD
94#define ID_ENABLE BIT(15)
95
96#define VEND1_ABILITIES 0xAFC4
97#define RGMII_ID_ABILITY BIT(15)
98#define RGMII_ABILITY BIT(14)
99#define RMII_ABILITY BIT(10)
100#define REVMII_ABILITY BIT(9)
101#define MII_ABILITY BIT(8)
102#define SGMII_ABILITY BIT(0)
103
104#define VEND1_MII_BASIC_CONFIG 0xAFC6
105#define MII_BASIC_CONFIG_REV BIT(4)
106#define MII_BASIC_CONFIG_SGMII 0x9
107#define MII_BASIC_CONFIG_RGMII 0x7
108#define MII_BASIC_CONFIG_RMII 0x5
109#define MII_BASIC_CONFIG_MII 0x4
110
111#define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351
112#define EXTENDED_CNT_EN BIT(15)
113#define VEND1_MONITOR_STATUS 0xAC80
114#define MONITOR_RESET BIT(15)
115#define VEND1_MONITOR_CONFIG 0xAC86
116#define LOST_FRAMES_CNT_EN BIT(9)
117#define ALL_FRAMES_CNT_EN BIT(8)
118
119#define VEND1_SYMBOL_ERROR_COUNTER 0x8350
120#define VEND1_LINK_DROP_COUNTER 0x8352
121#define VEND1_LINK_LOSSES_AND_FAILURES 0x8353
122#define VEND1_RX_PREAMBLE_COUNT 0xAFCE
123#define VEND1_TX_PREAMBLE_COUNT 0xAFCF
124#define VEND1_RX_IPG_LENGTH 0xAFD0
125#define VEND1_TX_IPG_LENGTH 0xAFD1
126#define COUNTER_EN BIT(15)
127
128#define VEND1_PTP_CONFIG 0x1102
129#define EXT_TRG_EDGE BIT(1)
130
131#define TJA1120_SYNC_TRIG_FILTER 0x1010
132#define PTP_TRIG_RISE_TS BIT(3)
133#define PTP_TRIG_FALLING_TS BIT(2)
134
135#define CLK_RATE_ADJ_LD BIT(15)
136#define CLK_RATE_ADJ_DIR BIT(14)
137
138#define VEND1_RX_TS_INSRT_CTRL 0x114D
139#define TJA1103_RX_TS_INSRT_MODE2 0x02
140
141#define TJA1120_RX_TS_INSRT_CTRL 0x9012
142#define TJA1120_RX_TS_INSRT_EN BIT(15)
143#define TJA1120_TS_INSRT_MODE BIT(4)
144
145#define VEND1_EGR_RING_DATA_0 0x114E
146#define VEND1_EGR_RING_CTRL 0x1154
147
148#define RING_DATA_0_TS_VALID BIT(15)
149
150#define RING_DONE BIT(0)
151
152#define TS_SEC_MASK GENMASK(1, 0)
153
154#define PTP_ENABLE BIT(3)
155#define PHY_TEST_ENABLE BIT(0)
156
157#define VEND1_PORT_PTP_CONTROL 0x9000
158#define PORT_PTP_CONTROL_BYPASS BIT(11)
159
160#define PTP_CLK_PERIOD_100BT1 15ULL
161#define PTP_CLK_PERIOD_1000BT1 8ULL
162
163#define EVENT_MSG_FILT_ALL 0x0F
164#define EVENT_MSG_FILT_NONE 0x00
165
166#define VEND1_GPIO_FUNC_CONFIG_BASE 0x2C40
167#define GPIO_FUNC_EN BIT(15)
168#define GPIO_FUNC_PTP BIT(6)
169#define GPIO_SIGNAL_PTP_TRIGGER 0x01
170#define GPIO_SIGNAL_PPS_OUT 0x12
171#define GPIO_DISABLE 0
172#define GPIO_PPS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
173 GPIO_SIGNAL_PPS_OUT)
174#define GPIO_EXTTS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
175 GPIO_SIGNAL_PTP_TRIGGER)
176
177#define RGMII_PERIOD_PS 8000U
178#define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360)
179#define MIN_ID_PS 1644U
180#define MAX_ID_PS 2260U
181#define DEFAULT_ID_PS 2000U
182
183#define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
184 (ppb) * (ptp_clk_period), NSEC_PER_SEC)
185
186#define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
187
188struct nxp_c45_phy;
189
190struct nxp_c45_skb_cb {
191 struct ptp_header *header;
192 unsigned int type;
193};
194
195#define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size) \
196 ((struct nxp_c45_reg_field) { \
197 .reg = _reg, \
198 .devad = _devad, \
199 .offset = _offset, \
200 .size = _size, \
201 })
202
203struct nxp_c45_reg_field {
204 u16 reg;
205 u8 devad;
206 u8 offset;
207 u8 size;
208};
209
210struct nxp_c45_hwts {
211 u32 nsec;
212 u32 sec;
213 u8 domain_number;
214 u16 sequence_id;
215 u8 msg_type;
216};
217
218struct nxp_c45_regmap {
219 /* PTP config regs. */
220 u16 vend1_ptp_clk_period;
221 u16 vend1_event_msg_filt;
222
223 /* LTC bits and regs. */
224 struct nxp_c45_reg_field ltc_read;
225 struct nxp_c45_reg_field ltc_write;
226 struct nxp_c45_reg_field ltc_lock_ctrl;
227 u16 vend1_ltc_wr_nsec_0;
228 u16 vend1_ltc_wr_nsec_1;
229 u16 vend1_ltc_wr_sec_0;
230 u16 vend1_ltc_wr_sec_1;
231 u16 vend1_ltc_rd_nsec_0;
232 u16 vend1_ltc_rd_nsec_1;
233 u16 vend1_ltc_rd_sec_0;
234 u16 vend1_ltc_rd_sec_1;
235 u16 vend1_rate_adj_subns_0;
236 u16 vend1_rate_adj_subns_1;
237
238 /* External trigger reg fields. */
239 struct nxp_c45_reg_field irq_egr_ts_en;
240 struct nxp_c45_reg_field irq_egr_ts_status;
241 struct nxp_c45_reg_field domain_number;
242 struct nxp_c45_reg_field msg_type;
243 struct nxp_c45_reg_field sequence_id;
244 struct nxp_c45_reg_field sec_1_0;
245 struct nxp_c45_reg_field sec_4_2;
246 struct nxp_c45_reg_field nsec_15_0;
247 struct nxp_c45_reg_field nsec_29_16;
248
249 /* PPS and EXT Trigger bits and regs. */
250 struct nxp_c45_reg_field pps_enable;
251 struct nxp_c45_reg_field pps_polarity;
252 u16 vend1_ext_trg_data_0;
253 u16 vend1_ext_trg_data_1;
254 u16 vend1_ext_trg_data_2;
255 u16 vend1_ext_trg_data_3;
256 u16 vend1_ext_trg_ctrl;
257
258 /* Cable test reg fields. */
259 u16 cable_test;
260 struct nxp_c45_reg_field cable_test_valid;
261 struct nxp_c45_reg_field cable_test_result;
262};
263
264struct nxp_c45_phy_stats {
265 const char *name;
266 const struct nxp_c45_reg_field counter;
267};
268
269struct nxp_c45_phy_data {
270 const struct nxp_c45_regmap *regmap;
271 const struct nxp_c45_phy_stats *stats;
272 int n_stats;
273 u8 ptp_clk_period;
274 bool ext_ts_both_edges;
275 bool ack_ptp_irq;
276 void (*counters_enable)(struct phy_device *phydev);
277 bool (*get_egressts)(struct nxp_c45_phy *priv,
278 struct nxp_c45_hwts *hwts);
279 bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts);
280 void (*ptp_init)(struct phy_device *phydev);
281 void (*ptp_enable)(struct phy_device *phydev, bool enable);
282 void (*nmi_handler)(struct phy_device *phydev,
283 irqreturn_t *irq_status);
284};
285
286static const
287struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
288{
289 return phydev->drv->driver_data;
290}
291
292static const
293struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
294{
295 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
296
297 return phy_data->regmap;
298}
299
300static int nxp_c45_read_reg_field(struct phy_device *phydev,
301 const struct nxp_c45_reg_field *reg_field)
302{
303 u16 mask;
304 int ret;
305
306 if (reg_field->size == 0) {
307 phydev_err(phydev, "Trying to read a reg field of size 0.\n");
308 return -EINVAL;
309 }
310
311 ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
312 if (ret < 0)
313 return ret;
314
315 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
316 GENMASK(reg_field->offset + reg_field->size - 1,
317 reg_field->offset);
318 ret &= mask;
319 ret >>= reg_field->offset;
320
321 return ret;
322}
323
324static int nxp_c45_write_reg_field(struct phy_device *phydev,
325 const struct nxp_c45_reg_field *reg_field,
326 u16 val)
327{
328 u16 mask;
329 u16 set;
330
331 if (reg_field->size == 0) {
332 phydev_err(phydev, "Trying to write a reg field of size 0.\n");
333 return -EINVAL;
334 }
335
336 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
337 GENMASK(reg_field->offset + reg_field->size - 1,
338 reg_field->offset);
339 set = val << reg_field->offset;
340
341 return phy_modify_mmd_changed(phydev, reg_field->devad,
342 reg_field->reg, mask, set);
343}
344
345static int nxp_c45_set_reg_field(struct phy_device *phydev,
346 const struct nxp_c45_reg_field *reg_field)
347{
348 if (reg_field->size != 1) {
349 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
350 return -EINVAL;
351 }
352
353 return nxp_c45_write_reg_field(phydev, reg_field, 1);
354}
355
356static int nxp_c45_clear_reg_field(struct phy_device *phydev,
357 const struct nxp_c45_reg_field *reg_field)
358{
359 if (reg_field->size != 1) {
360 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
361 return -EINVAL;
362 }
363
364 return nxp_c45_write_reg_field(phydev, reg_field, 0);
365}
366
367static bool nxp_c45_poll_txts(struct phy_device *phydev)
368{
369 return phydev->irq <= 0;
370}
371
372static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
373 struct timespec64 *ts,
374 struct ptp_system_timestamp *sts)
375{
376 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
377 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
378
379 nxp_c45_set_reg_field(priv->phydev, ®map->ltc_read);
380 ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
381 regmap->vend1_ltc_rd_nsec_0);
382 ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
383 regmap->vend1_ltc_rd_nsec_1) << 16;
384 ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
385 regmap->vend1_ltc_rd_sec_0);
386 ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
387 regmap->vend1_ltc_rd_sec_1) << 16;
388
389 return 0;
390}
391
392static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
393 struct timespec64 *ts,
394 struct ptp_system_timestamp *sts)
395{
396 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
397
398 mutex_lock(&priv->ptp_lock);
399 _nxp_c45_ptp_gettimex64(ptp, ts, sts);
400 mutex_unlock(&priv->ptp_lock);
401
402 return 0;
403}
404
405static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
406 const struct timespec64 *ts)
407{
408 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
409 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
410
411 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
412 ts->tv_nsec);
413 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
414 ts->tv_nsec >> 16);
415 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
416 ts->tv_sec);
417 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
418 ts->tv_sec >> 16);
419 nxp_c45_set_reg_field(priv->phydev, ®map->ltc_write);
420
421 return 0;
422}
423
424static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
425 const struct timespec64 *ts)
426{
427 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
428
429 mutex_lock(&priv->ptp_lock);
430 _nxp_c45_ptp_settime64(ptp, ts);
431 mutex_unlock(&priv->ptp_lock);
432
433 return 0;
434}
435
436static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
437{
438 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
439 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
440 const struct nxp_c45_regmap *regmap = data->regmap;
441 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
442 u64 subns_inc_val;
443 bool inc;
444
445 mutex_lock(&priv->ptp_lock);
446 inc = ppb >= 0;
447 ppb = abs(ppb);
448
449 subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
450
451 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
452 regmap->vend1_rate_adj_subns_0,
453 subns_inc_val);
454 subns_inc_val >>= 16;
455 subns_inc_val |= CLK_RATE_ADJ_LD;
456 if (inc)
457 subns_inc_val |= CLK_RATE_ADJ_DIR;
458
459 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
460 regmap->vend1_rate_adj_subns_1,
461 subns_inc_val);
462 mutex_unlock(&priv->ptp_lock);
463
464 return 0;
465}
466
467static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
468{
469 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
470 struct timespec64 now, then;
471
472 mutex_lock(&priv->ptp_lock);
473 then = ns_to_timespec64(delta);
474 _nxp_c45_ptp_gettimex64(ptp, &now, NULL);
475 now = timespec64_add(now, then);
476 _nxp_c45_ptp_settime64(ptp, &now);
477 mutex_unlock(&priv->ptp_lock);
478
479 return 0;
480}
481
482static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
483 struct nxp_c45_hwts *hwts)
484{
485 ts->tv_nsec = hwts->nsec;
486 if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
487 ts->tv_sec -= TS_SEC_MASK + 1;
488 ts->tv_sec &= ~TS_SEC_MASK;
489 ts->tv_sec |= hwts->sec & TS_SEC_MASK;
490}
491
492static bool nxp_c45_match_ts(struct ptp_header *header,
493 struct nxp_c45_hwts *hwts,
494 unsigned int type)
495{
496 return ntohs(header->sequence_id) == hwts->sequence_id &&
497 ptp_get_msgtype(header, type) == hwts->msg_type &&
498 header->domain_number == hwts->domain_number;
499}
500
501static bool nxp_c45_get_extts(struct nxp_c45_phy *priv,
502 struct timespec64 *extts)
503{
504 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
505
506 extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
507 regmap->vend1_ext_trg_data_0);
508 extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
509 regmap->vend1_ext_trg_data_1) << 16;
510 extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
511 regmap->vend1_ext_trg_data_2);
512 extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
513 regmap->vend1_ext_trg_data_3) << 16;
514 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
515 regmap->vend1_ext_trg_ctrl, RING_DONE);
516
517 return true;
518}
519
520static bool tja1120_extts_is_valid(struct phy_device *phydev)
521{
522 bool valid;
523 int reg;
524
525 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
526 TJA1120_VEND1_PTP_TRIG_DATA_S);
527 valid = !!(reg & TJA1120_TS_VALID);
528
529 return valid;
530}
531
532static bool tja1120_get_extts(struct nxp_c45_phy *priv,
533 struct timespec64 *extts)
534{
535 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
536 struct phy_device *phydev = priv->phydev;
537 bool more_ts;
538 bool valid;
539 u16 reg;
540
541 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
542 regmap->vend1_ext_trg_ctrl);
543 more_ts = !!(reg & TJA1120_MORE_TS);
544
545 valid = tja1120_extts_is_valid(phydev);
546 if (!valid) {
547 if (!more_ts)
548 goto tja1120_get_extts_out;
549
550 /* Bug workaround for TJA1120 engineering samples: move the new
551 * timestamp from the FIFO to the buffer.
552 */
553 phy_write_mmd(phydev, MDIO_MMD_VEND1,
554 regmap->vend1_ext_trg_ctrl, RING_DONE);
555 valid = tja1120_extts_is_valid(phydev);
556 if (!valid)
557 goto tja1120_get_extts_out;
558 }
559
560 nxp_c45_get_extts(priv, extts);
561tja1120_get_extts_out:
562 return valid;
563}
564
565static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv,
566 struct nxp_c45_hwts *hwts)
567{
568 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
569 struct phy_device *phydev = priv->phydev;
570
571 hwts->domain_number =
572 nxp_c45_read_reg_field(phydev, ®map->domain_number);
573 hwts->msg_type =
574 nxp_c45_read_reg_field(phydev, ®map->msg_type);
575 hwts->sequence_id =
576 nxp_c45_read_reg_field(phydev, ®map->sequence_id);
577 hwts->nsec =
578 nxp_c45_read_reg_field(phydev, ®map->nsec_15_0);
579 hwts->nsec |=
580 nxp_c45_read_reg_field(phydev, ®map->nsec_29_16) << 16;
581 hwts->sec = nxp_c45_read_reg_field(phydev, ®map->sec_1_0);
582 hwts->sec |= nxp_c45_read_reg_field(phydev, ®map->sec_4_2) << 2;
583}
584
585static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
586 struct nxp_c45_hwts *hwts)
587{
588 bool valid;
589 u16 reg;
590
591 mutex_lock(&priv->ptp_lock);
592 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
593 RING_DONE);
594 reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
595 valid = !!(reg & RING_DATA_0_TS_VALID);
596 if (!valid)
597 goto nxp_c45_get_hwtxts_out;
598
599 nxp_c45_read_egress_ts(priv, hwts);
600nxp_c45_get_hwtxts_out:
601 mutex_unlock(&priv->ptp_lock);
602 return valid;
603}
604
605static bool tja1120_egress_ts_is_valid(struct phy_device *phydev)
606{
607 bool valid;
608 u16 reg;
609
610 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S);
611 valid = !!(reg & TJA1120_TS_VALID);
612
613 return valid;
614}
615
616static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv,
617 struct nxp_c45_hwts *hwts)
618{
619 struct phy_device *phydev = priv->phydev;
620 bool more_ts;
621 bool valid;
622 u16 reg;
623
624 mutex_lock(&priv->ptp_lock);
625 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END);
626 more_ts = !!(reg & TJA1120_MORE_TS);
627 valid = tja1120_egress_ts_is_valid(phydev);
628 if (!valid) {
629 if (!more_ts)
630 goto tja1120_get_hwtxts_out;
631
632 /* Bug workaround for TJA1120 engineering samples: move the
633 * new timestamp from the FIFO to the buffer.
634 */
635 phy_write_mmd(phydev, MDIO_MMD_VEND1,
636 TJA1120_EGRESS_TS_END, TJA1120_TS_VALID);
637 valid = tja1120_egress_ts_is_valid(phydev);
638 if (!valid)
639 goto tja1120_get_hwtxts_out;
640 }
641 nxp_c45_read_egress_ts(priv, hwts);
642 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S,
643 TJA1120_TS_VALID);
644tja1120_get_hwtxts_out:
645 mutex_unlock(&priv->ptp_lock);
646 return valid;
647}
648
649static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
650 struct nxp_c45_hwts *txts)
651{
652 struct sk_buff *skb, *tmp, *skb_match = NULL;
653 struct skb_shared_hwtstamps shhwtstamps;
654 struct timespec64 ts;
655 unsigned long flags;
656 bool ts_match;
657 s64 ts_ns;
658
659 spin_lock_irqsave(&priv->tx_queue.lock, flags);
660 skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
661 ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
662 NXP_C45_SKB_CB(skb)->type);
663 if (!ts_match)
664 continue;
665 skb_match = skb;
666 __skb_unlink(skb, &priv->tx_queue);
667 break;
668 }
669 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
670
671 if (skb_match) {
672 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
673 nxp_c45_reconstruct_ts(&ts, txts);
674 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
675 ts_ns = timespec64_to_ns(&ts);
676 shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
677 skb_complete_tx_timestamp(skb_match, &shhwtstamps);
678 } else {
679 phydev_warn(priv->phydev,
680 "the tx timestamp doesn't match with any skb\n");
681 }
682}
683
684static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
685{
686 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
687 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
688 bool poll_txts = nxp_c45_poll_txts(priv->phydev);
689 struct skb_shared_hwtstamps *shhwtstamps_rx;
690 struct ptp_clock_event event;
691 struct nxp_c45_hwts hwts;
692 bool reschedule = false;
693 struct timespec64 ts;
694 struct sk_buff *skb;
695 bool ts_valid;
696 u32 ts_raw;
697
698 while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
699 ts_valid = data->get_egressts(priv, &hwts);
700 if (unlikely(!ts_valid)) {
701 /* Still more skbs in the queue */
702 reschedule = true;
703 break;
704 }
705
706 nxp_c45_process_txts(priv, &hwts);
707 }
708
709 while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
710 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
711 ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
712 hwts.sec = ts_raw >> 30;
713 hwts.nsec = ts_raw & GENMASK(29, 0);
714 nxp_c45_reconstruct_ts(&ts, &hwts);
715 shhwtstamps_rx = skb_hwtstamps(skb);
716 shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
717 NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
718 netif_rx(skb);
719 }
720
721 if (priv->extts) {
722 ts_valid = data->get_extts(priv, &ts);
723 if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) {
724 priv->extts_ts = ts;
725 event.index = priv->extts_index;
726 event.type = PTP_CLOCK_EXTTS;
727 event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
728 ptp_clock_event(priv->ptp_clock, &event);
729 }
730 reschedule = true;
731 }
732
733 return reschedule ? 1 : -1;
734}
735
736static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
737 int pin, u16 pin_cfg)
738{
739 struct phy_device *phydev = priv->phydev;
740
741 phy_write_mmd(phydev, MDIO_MMD_VEND1,
742 VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
743}
744
745static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
746 struct ptp_perout_request *perout, int on)
747{
748 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
749 struct phy_device *phydev = priv->phydev;
750 int pin;
751
752 if (perout->flags & ~PTP_PEROUT_PHASE)
753 return -EOPNOTSUPP;
754
755 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
756 if (pin < 0)
757 return pin;
758
759 if (!on) {
760 nxp_c45_clear_reg_field(priv->phydev,
761 ®map->pps_enable);
762 nxp_c45_clear_reg_field(priv->phydev,
763 ®map->pps_polarity);
764
765 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
766
767 return 0;
768 }
769
770 /* The PPS signal is fixed to 1 second and is always generated when the
771 * seconds counter is incremented. The start time is not configurable.
772 * If the clock is adjusted, the PPS signal is automatically readjusted.
773 */
774 if (perout->period.sec != 1 || perout->period.nsec != 0) {
775 phydev_warn(phydev, "The period can be set only to 1 second.");
776 return -EINVAL;
777 }
778
779 if (!(perout->flags & PTP_PEROUT_PHASE)) {
780 if (perout->start.sec != 0 || perout->start.nsec != 0) {
781 phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
782 return -EINVAL;
783 }
784 } else {
785 if (perout->phase.nsec != 0 &&
786 perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
787 phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
788 return -EINVAL;
789 }
790
791 if (perout->phase.nsec == 0)
792 nxp_c45_clear_reg_field(priv->phydev,
793 ®map->pps_polarity);
794 else
795 nxp_c45_set_reg_field(priv->phydev,
796 ®map->pps_polarity);
797 }
798
799 nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
800
801 nxp_c45_set_reg_field(priv->phydev, ®map->pps_enable);
802
803 return 0;
804}
805
806static void nxp_c45_set_rising_or_falling(struct phy_device *phydev,
807 struct ptp_extts_request *extts)
808{
809 if (extts->flags & PTP_RISING_EDGE)
810 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
811 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
812
813 if (extts->flags & PTP_FALLING_EDGE)
814 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
815 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
816}
817
818static void nxp_c45_set_rising_and_falling(struct phy_device *phydev,
819 struct ptp_extts_request *extts)
820{
821 /* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In
822 * this case external ts will be enabled on rising edge.
823 */
824 if (extts->flags & PTP_RISING_EDGE ||
825 extts->flags == PTP_ENABLE_FEATURE)
826 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
827 TJA1120_SYNC_TRIG_FILTER,
828 PTP_TRIG_RISE_TS);
829 else
830 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
831 TJA1120_SYNC_TRIG_FILTER,
832 PTP_TRIG_RISE_TS);
833
834 if (extts->flags & PTP_FALLING_EDGE)
835 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
836 TJA1120_SYNC_TRIG_FILTER,
837 PTP_TRIG_FALLING_TS);
838 else
839 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
840 TJA1120_SYNC_TRIG_FILTER,
841 PTP_TRIG_FALLING_TS);
842}
843
844static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
845 struct ptp_extts_request *extts, int on)
846{
847 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
848 int pin;
849
850 if (extts->flags & ~(PTP_ENABLE_FEATURE |
851 PTP_RISING_EDGE |
852 PTP_FALLING_EDGE |
853 PTP_STRICT_FLAGS))
854 return -EOPNOTSUPP;
855
856 /* Sampling on both edges is not supported */
857 if ((extts->flags & PTP_RISING_EDGE) &&
858 (extts->flags & PTP_FALLING_EDGE) &&
859 !data->ext_ts_both_edges)
860 return -EOPNOTSUPP;
861
862 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
863 if (pin < 0)
864 return pin;
865
866 if (!on) {
867 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
868 priv->extts = false;
869
870 return 0;
871 }
872
873 if (data->ext_ts_both_edges)
874 nxp_c45_set_rising_and_falling(priv->phydev, extts);
875 else
876 nxp_c45_set_rising_or_falling(priv->phydev, extts);
877
878 nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
879 priv->extts = true;
880 priv->extts_index = extts->index;
881 ptp_schedule_worker(priv->ptp_clock, 0);
882
883 return 0;
884}
885
886static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
887 struct ptp_clock_request *req, int on)
888{
889 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
890
891 switch (req->type) {
892 case PTP_CLK_REQ_EXTTS:
893 return nxp_c45_extts_enable(priv, &req->extts, on);
894 case PTP_CLK_REQ_PEROUT:
895 return nxp_c45_perout_enable(priv, &req->perout, on);
896 default:
897 return -EOPNOTSUPP;
898 }
899}
900
901static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
902 { "nxp_c45_gpio0", 0, PTP_PF_NONE},
903 { "nxp_c45_gpio1", 1, PTP_PF_NONE},
904 { "nxp_c45_gpio2", 2, PTP_PF_NONE},
905 { "nxp_c45_gpio3", 3, PTP_PF_NONE},
906 { "nxp_c45_gpio4", 4, PTP_PF_NONE},
907 { "nxp_c45_gpio5", 5, PTP_PF_NONE},
908 { "nxp_c45_gpio6", 6, PTP_PF_NONE},
909 { "nxp_c45_gpio7", 7, PTP_PF_NONE},
910 { "nxp_c45_gpio8", 8, PTP_PF_NONE},
911 { "nxp_c45_gpio9", 9, PTP_PF_NONE},
912 { "nxp_c45_gpio10", 10, PTP_PF_NONE},
913 { "nxp_c45_gpio11", 11, PTP_PF_NONE},
914};
915
916static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
917 enum ptp_pin_function func, unsigned int chan)
918{
919 if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
920 return -EINVAL;
921
922 switch (func) {
923 case PTP_PF_NONE:
924 case PTP_PF_PEROUT:
925 case PTP_PF_EXTTS:
926 break;
927 default:
928 return -EOPNOTSUPP;
929 }
930
931 return 0;
932}
933
934static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
935{
936 priv->caps = (struct ptp_clock_info) {
937 .owner = THIS_MODULE,
938 .name = "NXP C45 PHC",
939 .max_adj = 16666666,
940 .adjfine = nxp_c45_ptp_adjfine,
941 .adjtime = nxp_c45_ptp_adjtime,
942 .gettimex64 = nxp_c45_ptp_gettimex64,
943 .settime64 = nxp_c45_ptp_settime64,
944 .enable = nxp_c45_ptp_enable,
945 .verify = nxp_c45_ptp_verify_pin,
946 .do_aux_work = nxp_c45_do_aux_work,
947 .pin_config = nxp_c45_ptp_pins,
948 .n_pins = ARRAY_SIZE(nxp_c45_ptp_pins),
949 .n_ext_ts = 1,
950 .n_per_out = 1,
951 };
952
953 priv->ptp_clock = ptp_clock_register(&priv->caps,
954 &priv->phydev->mdio.dev);
955
956 if (IS_ERR(priv->ptp_clock))
957 return PTR_ERR(priv->ptp_clock);
958
959 if (!priv->ptp_clock)
960 return -ENOMEM;
961
962 return 0;
963}
964
965static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
966 struct sk_buff *skb, int type)
967{
968 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
969 mii_ts);
970
971 switch (priv->hwts_tx) {
972 case HWTSTAMP_TX_ON:
973 NXP_C45_SKB_CB(skb)->type = type;
974 NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
975 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
976 skb_queue_tail(&priv->tx_queue, skb);
977 if (nxp_c45_poll_txts(priv->phydev))
978 ptp_schedule_worker(priv->ptp_clock, 0);
979 break;
980 case HWTSTAMP_TX_OFF:
981 default:
982 kfree_skb(skb);
983 break;
984 }
985}
986
987static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
988 struct sk_buff *skb, int type)
989{
990 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
991 mii_ts);
992 struct ptp_header *header = ptp_parse_header(skb, type);
993
994 if (!header)
995 return false;
996
997 if (!priv->hwts_rx)
998 return false;
999
1000 NXP_C45_SKB_CB(skb)->header = header;
1001 skb_queue_tail(&priv->rx_queue, skb);
1002 ptp_schedule_worker(priv->ptp_clock, 0);
1003
1004 return true;
1005}
1006
1007static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
1008 struct kernel_hwtstamp_config *cfg,
1009 struct netlink_ext_ack *extack)
1010{
1011 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1012 mii_ts);
1013 struct phy_device *phydev = priv->phydev;
1014 const struct nxp_c45_phy_data *data;
1015
1016 if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ON)
1017 return -ERANGE;
1018
1019 data = nxp_c45_get_data(phydev);
1020 priv->hwts_tx = cfg->tx_type;
1021
1022 switch (cfg->rx_filter) {
1023 case HWTSTAMP_FILTER_NONE:
1024 priv->hwts_rx = 0;
1025 break;
1026 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1027 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1028 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1029 priv->hwts_rx = 1;
1030 cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1031 break;
1032 default:
1033 return -ERANGE;
1034 }
1035
1036 if (priv->hwts_rx || priv->hwts_tx) {
1037 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1038 data->regmap->vend1_event_msg_filt,
1039 EVENT_MSG_FILT_ALL);
1040 data->ptp_enable(phydev, true);
1041 } else {
1042 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1043 data->regmap->vend1_event_msg_filt,
1044 EVENT_MSG_FILT_NONE);
1045 data->ptp_enable(phydev, false);
1046 }
1047
1048 if (nxp_c45_poll_txts(priv->phydev))
1049 goto nxp_c45_no_ptp_irq;
1050
1051 if (priv->hwts_tx)
1052 nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1053 else
1054 nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1055
1056nxp_c45_no_ptp_irq:
1057 return 0;
1058}
1059
1060static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
1061 struct ethtool_ts_info *ts_info)
1062{
1063 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1064 mii_ts);
1065
1066 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1067 SOF_TIMESTAMPING_RX_HARDWARE |
1068 SOF_TIMESTAMPING_RAW_HARDWARE;
1069 ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1070 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1071 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1072 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1073 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1074 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1075
1076 return 0;
1077}
1078
1079static const struct nxp_c45_phy_stats common_hw_stats[] = {
1080 { "phy_link_status_drop_cnt",
1081 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
1082 { "phy_link_availability_drop_cnt",
1083 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
1084 { "phy_link_loss_cnt",
1085 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
1086 { "phy_link_failure_cnt",
1087 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
1088 { "phy_symbol_error_cnt",
1089 NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
1090};
1091
1092static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
1093 { "rx_preamble_count",
1094 NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
1095 { "tx_preamble_count",
1096 NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
1097 { "rx_ipg_length",
1098 NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
1099 { "tx_ipg_length",
1100 NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
1101};
1102
1103static const struct nxp_c45_phy_stats tja1120_hw_stats[] = {
1104 { "phy_symbol_error_cnt_ext",
1105 NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) },
1106 { "tx_frames_xtd",
1107 NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), },
1108 { "tx_frames",
1109 NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), },
1110 { "rx_frames_xtd",
1111 NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), },
1112 { "rx_frames",
1113 NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), },
1114 { "tx_lost_frames_xtd",
1115 NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), },
1116 { "tx_lost_frames",
1117 NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), },
1118 { "rx_lost_frames_xtd",
1119 NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), },
1120 { "rx_lost_frames",
1121 NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), },
1122};
1123
1124static int nxp_c45_get_sset_count(struct phy_device *phydev)
1125{
1126 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1127
1128 return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1129}
1130
1131static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
1132{
1133 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1134 size_t count = nxp_c45_get_sset_count(phydev);
1135 size_t idx;
1136 size_t i;
1137
1138 for (i = 0; i < count; i++) {
1139 if (i < ARRAY_SIZE(common_hw_stats)) {
1140 strscpy(data + i * ETH_GSTRING_LEN,
1141 common_hw_stats[i].name, ETH_GSTRING_LEN);
1142 continue;
1143 }
1144 idx = i - ARRAY_SIZE(common_hw_stats);
1145 strscpy(data + i * ETH_GSTRING_LEN,
1146 phy_data->stats[idx].name, ETH_GSTRING_LEN);
1147 }
1148}
1149
1150static void nxp_c45_get_stats(struct phy_device *phydev,
1151 struct ethtool_stats *stats, u64 *data)
1152{
1153 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1154 size_t count = nxp_c45_get_sset_count(phydev);
1155 const struct nxp_c45_reg_field *reg_field;
1156 size_t idx;
1157 size_t i;
1158 int ret;
1159
1160 for (i = 0; i < count; i++) {
1161 if (i < ARRAY_SIZE(common_hw_stats)) {
1162 reg_field = &common_hw_stats[i].counter;
1163 } else {
1164 idx = i - ARRAY_SIZE(common_hw_stats);
1165 reg_field = &phy_data->stats[idx].counter;
1166 }
1167
1168 ret = nxp_c45_read_reg_field(phydev, reg_field);
1169 if (ret < 0)
1170 data[i] = U64_MAX;
1171 else
1172 data[i] = ret;
1173 }
1174}
1175
1176static int nxp_c45_config_enable(struct phy_device *phydev)
1177{
1178 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1179 DEVICE_CONTROL_CONFIG_GLOBAL_EN |
1180 DEVICE_CONTROL_CONFIG_ALL_EN);
1181 usleep_range(400, 450);
1182
1183 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
1184 PORT_CONTROL_EN);
1185 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1186 PHY_CONFIG_EN);
1187 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
1188 PORT_INFRA_CONTROL_EN);
1189
1190 return 0;
1191}
1192
1193static int nxp_c45_start_op(struct phy_device *phydev)
1194{
1195 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1196 PHY_START_OP);
1197}
1198
1199static int nxp_c45_config_intr(struct phy_device *phydev)
1200{
1201 int ret;
1202
1203 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1204 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1205 VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1206 if (ret)
1207 return ret;
1208
1209 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1210 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1211 }
1212
1213 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1214 VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1215 if (ret)
1216 return ret;
1217
1218 return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1219 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1220}
1221
1222static int tja1103_config_intr(struct phy_device *phydev)
1223{
1224 int ret;
1225
1226 /* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */
1227 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE,
1228 FUSA_PASS);
1229 if (ret)
1230 return ret;
1231
1232 return nxp_c45_config_intr(phydev);
1233}
1234
1235static int tja1120_config_intr(struct phy_device *phydev)
1236{
1237 int ret;
1238
1239 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1240 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1241 TJA1120_GLOBAL_INFRA_IRQ_EN,
1242 TJA1120_DEV_BOOT_DONE);
1243 else
1244 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1245 TJA1120_GLOBAL_INFRA_IRQ_EN,
1246 TJA1120_DEV_BOOT_DONE);
1247 if (ret)
1248 return ret;
1249
1250 return nxp_c45_config_intr(phydev);
1251}
1252
1253static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1254{
1255 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1256 struct nxp_c45_phy *priv = phydev->priv;
1257 irqreturn_t ret = IRQ_NONE;
1258 struct nxp_c45_hwts hwts;
1259 int irq;
1260
1261 irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1262 if (irq & PHY_IRQ_LINK_EVENT) {
1263 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1264 PHY_IRQ_LINK_EVENT);
1265 phy_trigger_machine(phydev);
1266 ret = IRQ_HANDLED;
1267 }
1268
1269 irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1270 if (irq) {
1271 /* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1272 * be cleared when the EGR TS FIFO is empty. Otherwise, the
1273 * IRQ bit should be cleared before reading the timestamp,
1274 */
1275 if (data->ack_ptp_irq)
1276 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1277 VEND1_PTP_IRQ_ACK, EGR_TS_IRQ);
1278 while (data->get_egressts(priv, &hwts))
1279 nxp_c45_process_txts(priv, &hwts);
1280
1281 ret = IRQ_HANDLED;
1282 }
1283
1284 data->nmi_handler(phydev, &ret);
1285 nxp_c45_handle_macsec_interrupt(phydev, &ret);
1286
1287 return ret;
1288}
1289
1290static int nxp_c45_soft_reset(struct phy_device *phydev)
1291{
1292 int ret;
1293
1294 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1295 DEVICE_CONTROL_RESET);
1296 if (ret)
1297 return ret;
1298
1299 return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1300 VEND1_DEVICE_CONTROL, ret,
1301 !(ret & DEVICE_CONTROL_RESET), 20000,
1302 240000, false);
1303}
1304
1305static int nxp_c45_cable_test_start(struct phy_device *phydev)
1306{
1307 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1308
1309 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1310 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1311 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1312 CABLE_TEST_ENABLE | CABLE_TEST_START);
1313}
1314
1315static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1316 bool *finished)
1317{
1318 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1319 int ret;
1320 u8 cable_test_result;
1321
1322 ret = nxp_c45_read_reg_field(phydev, ®map->cable_test_valid);
1323 if (!ret) {
1324 *finished = false;
1325 return 0;
1326 }
1327
1328 *finished = true;
1329 cable_test_result = nxp_c45_read_reg_field(phydev,
1330 ®map->cable_test_result);
1331
1332 switch (cable_test_result) {
1333 case CABLE_TEST_OK:
1334 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1335 ETHTOOL_A_CABLE_RESULT_CODE_OK);
1336 break;
1337 case CABLE_TEST_SHORTED:
1338 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1339 ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1340 break;
1341 case CABLE_TEST_OPEN:
1342 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1343 ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1344 break;
1345 default:
1346 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1347 ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1348 }
1349
1350 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1351 CABLE_TEST_ENABLE);
1352 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1353 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1354
1355 return nxp_c45_start_op(phydev);
1356}
1357
1358static int nxp_c45_get_sqi(struct phy_device *phydev)
1359{
1360 int reg;
1361
1362 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1363 if (!(reg & SQI_VALID))
1364 return -EINVAL;
1365
1366 reg &= SQI_MASK;
1367
1368 return reg;
1369}
1370
1371static void tja1120_link_change_notify(struct phy_device *phydev)
1372{
1373 /* Bug workaround for TJA1120 enegineering samples: fix egress
1374 * timestamps lost after link recovery.
1375 */
1376 if (phydev->state == PHY_NOLINK) {
1377 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1378 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1379 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1380 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1381 }
1382}
1383
1384static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1385{
1386 return MAX_SQI;
1387}
1388
1389static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1390{
1391 if (delay < MIN_ID_PS) {
1392 phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1393 return -EINVAL;
1394 }
1395
1396 if (delay > MAX_ID_PS) {
1397 phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1398 return -EINVAL;
1399 }
1400
1401 return 0;
1402}
1403
1404static void nxp_c45_counters_enable(struct phy_device *phydev)
1405{
1406 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1407
1408 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1409 COUNTER_EN);
1410
1411 data->counters_enable(phydev);
1412}
1413
1414static void nxp_c45_ptp_init(struct phy_device *phydev)
1415{
1416 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1417
1418 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1419 data->regmap->vend1_ptp_clk_period,
1420 data->ptp_clk_period);
1421 nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1422
1423 data->ptp_init(phydev);
1424}
1425
1426static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1427{
1428 /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1429 * To avoid floating point operations we'll multiply by 10
1430 * and get 1 decimal point precision.
1431 */
1432 phase_offset_raw *= 10;
1433 phase_offset_raw -= 738;
1434 return div_u64(phase_offset_raw, 9);
1435}
1436
1437static void nxp_c45_disable_delays(struct phy_device *phydev)
1438{
1439 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1440 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1441}
1442
1443static void nxp_c45_set_delays(struct phy_device *phydev)
1444{
1445 struct nxp_c45_phy *priv = phydev->priv;
1446 u64 tx_delay = priv->tx_delay;
1447 u64 rx_delay = priv->rx_delay;
1448 u64 degree;
1449
1450 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1451 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1452 degree = div_u64(tx_delay, PS_PER_DEGREE);
1453 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1454 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1455 } else {
1456 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1457 ID_ENABLE);
1458 }
1459
1460 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1461 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1462 degree = div_u64(rx_delay, PS_PER_DEGREE);
1463 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1464 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1465 } else {
1466 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1467 ID_ENABLE);
1468 }
1469}
1470
1471static int nxp_c45_get_delays(struct phy_device *phydev)
1472{
1473 struct nxp_c45_phy *priv = phydev->priv;
1474 int ret;
1475
1476 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1477 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1478 ret = device_property_read_u32(&phydev->mdio.dev,
1479 "tx-internal-delay-ps",
1480 &priv->tx_delay);
1481 if (ret)
1482 priv->tx_delay = DEFAULT_ID_PS;
1483
1484 ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1485 if (ret) {
1486 phydev_err(phydev,
1487 "tx-internal-delay-ps invalid value\n");
1488 return ret;
1489 }
1490 }
1491
1492 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1493 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1494 ret = device_property_read_u32(&phydev->mdio.dev,
1495 "rx-internal-delay-ps",
1496 &priv->rx_delay);
1497 if (ret)
1498 priv->rx_delay = DEFAULT_ID_PS;
1499
1500 ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1501 if (ret) {
1502 phydev_err(phydev,
1503 "rx-internal-delay-ps invalid value\n");
1504 return ret;
1505 }
1506 }
1507
1508 return 0;
1509}
1510
1511static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1512{
1513 int ret;
1514
1515 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1516 phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1517
1518 switch (phydev->interface) {
1519 case PHY_INTERFACE_MODE_RGMII:
1520 if (!(ret & RGMII_ABILITY)) {
1521 phydev_err(phydev, "rgmii mode not supported\n");
1522 return -EINVAL;
1523 }
1524 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1525 MII_BASIC_CONFIG_RGMII);
1526 nxp_c45_disable_delays(phydev);
1527 break;
1528 case PHY_INTERFACE_MODE_RGMII_ID:
1529 case PHY_INTERFACE_MODE_RGMII_TXID:
1530 case PHY_INTERFACE_MODE_RGMII_RXID:
1531 if (!(ret & RGMII_ID_ABILITY)) {
1532 phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1533 return -EINVAL;
1534 }
1535 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1536 MII_BASIC_CONFIG_RGMII);
1537 ret = nxp_c45_get_delays(phydev);
1538 if (ret)
1539 return ret;
1540
1541 nxp_c45_set_delays(phydev);
1542 break;
1543 case PHY_INTERFACE_MODE_MII:
1544 if (!(ret & MII_ABILITY)) {
1545 phydev_err(phydev, "mii mode not supported\n");
1546 return -EINVAL;
1547 }
1548 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1549 MII_BASIC_CONFIG_MII);
1550 break;
1551 case PHY_INTERFACE_MODE_REVMII:
1552 if (!(ret & REVMII_ABILITY)) {
1553 phydev_err(phydev, "rev-mii mode not supported\n");
1554 return -EINVAL;
1555 }
1556 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1557 MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1558 break;
1559 case PHY_INTERFACE_MODE_RMII:
1560 if (!(ret & RMII_ABILITY)) {
1561 phydev_err(phydev, "rmii mode not supported\n");
1562 return -EINVAL;
1563 }
1564 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1565 MII_BASIC_CONFIG_RMII);
1566 break;
1567 case PHY_INTERFACE_MODE_SGMII:
1568 if (!(ret & SGMII_ABILITY)) {
1569 phydev_err(phydev, "sgmii mode not supported\n");
1570 return -EINVAL;
1571 }
1572 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1573 MII_BASIC_CONFIG_SGMII);
1574 break;
1575 case PHY_INTERFACE_MODE_INTERNAL:
1576 break;
1577 default:
1578 return -EINVAL;
1579 }
1580
1581 return 0;
1582}
1583
1584static int nxp_c45_config_init(struct phy_device *phydev)
1585{
1586 int ret;
1587
1588 ret = nxp_c45_config_enable(phydev);
1589 if (ret) {
1590 phydev_err(phydev, "Failed to enable config\n");
1591 return ret;
1592 }
1593
1594 /* Bug workaround for SJA1110 rev B: enable write access
1595 * to MDIO_MMD_PMAPMD
1596 */
1597 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1598 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1599
1600 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1601 PHY_CONFIG_AUTO);
1602
1603 ret = nxp_c45_set_phy_mode(phydev);
1604 if (ret)
1605 return ret;
1606
1607 phydev->autoneg = AUTONEG_DISABLE;
1608
1609 nxp_c45_counters_enable(phydev);
1610 nxp_c45_ptp_init(phydev);
1611 ret = nxp_c45_macsec_config_init(phydev);
1612 if (ret)
1613 return ret;
1614
1615 return nxp_c45_start_op(phydev);
1616}
1617
1618static int nxp_c45_get_features(struct phy_device *phydev)
1619{
1620 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1621 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1622
1623 return genphy_c45_pma_read_abilities(phydev);
1624}
1625
1626static int nxp_c45_probe(struct phy_device *phydev)
1627{
1628 struct nxp_c45_phy *priv;
1629 bool macsec_ability;
1630 int phy_abilities;
1631 bool ptp_ability;
1632 int ret = 0;
1633
1634 priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1635 if (!priv)
1636 return -ENOMEM;
1637
1638 skb_queue_head_init(&priv->tx_queue);
1639 skb_queue_head_init(&priv->rx_queue);
1640
1641 priv->phydev = phydev;
1642
1643 phydev->priv = priv;
1644
1645 mutex_init(&priv->ptp_lock);
1646
1647 phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1648 VEND1_PORT_ABILITIES);
1649 ptp_ability = !!(phy_abilities & PTP_ABILITY);
1650 if (!ptp_ability) {
1651 phydev_dbg(phydev, "the phy does not support PTP");
1652 goto no_ptp_support;
1653 }
1654
1655 if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1656 IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1657 priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1658 priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1659 priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1660 priv->mii_ts.ts_info = nxp_c45_ts_info;
1661 phydev->mii_ts = &priv->mii_ts;
1662 ret = nxp_c45_init_ptp_clock(priv);
1663 } else {
1664 phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1665 }
1666
1667no_ptp_support:
1668 macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1669 if (!macsec_ability) {
1670 phydev_info(phydev, "the phy does not support MACsec\n");
1671 goto no_macsec_support;
1672 }
1673
1674 if (IS_ENABLED(CONFIG_MACSEC)) {
1675 ret = nxp_c45_macsec_probe(phydev);
1676 phydev_dbg(phydev, "MACsec support enabled.");
1677 } else {
1678 phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
1679 }
1680
1681no_macsec_support:
1682
1683 return ret;
1684}
1685
1686static void nxp_c45_remove(struct phy_device *phydev)
1687{
1688 struct nxp_c45_phy *priv = phydev->priv;
1689
1690 if (priv->ptp_clock)
1691 ptp_clock_unregister(priv->ptp_clock);
1692
1693 skb_queue_purge(&priv->tx_queue);
1694 skb_queue_purge(&priv->rx_queue);
1695 nxp_c45_macsec_remove(phydev);
1696}
1697
1698static void tja1103_counters_enable(struct phy_device *phydev)
1699{
1700 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1701 COUNTER_EN);
1702 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1703 COUNTER_EN);
1704 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1705 COUNTER_EN);
1706 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1707 COUNTER_EN);
1708}
1709
1710static void tja1103_ptp_init(struct phy_device *phydev)
1711{
1712 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1713 TJA1103_RX_TS_INSRT_MODE2);
1714 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1715 PTP_ENABLE);
1716}
1717
1718static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1719{
1720 if (enable)
1721 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1722 VEND1_PORT_PTP_CONTROL,
1723 PORT_PTP_CONTROL_BYPASS);
1724 else
1725 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1726 VEND1_PORT_PTP_CONTROL,
1727 PORT_PTP_CONTROL_BYPASS);
1728}
1729
1730static void tja1103_nmi_handler(struct phy_device *phydev,
1731 irqreturn_t *irq_status)
1732{
1733 int ret;
1734
1735 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1736 VEND1_ALWAYS_ACCESSIBLE);
1737 if (ret & FUSA_PASS) {
1738 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1739 VEND1_ALWAYS_ACCESSIBLE,
1740 FUSA_PASS);
1741 *irq_status = IRQ_HANDLED;
1742 }
1743}
1744
1745static const struct nxp_c45_regmap tja1103_regmap = {
1746 .vend1_ptp_clk_period = 0x1104,
1747 .vend1_event_msg_filt = 0x1148,
1748 .pps_enable =
1749 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1750 .pps_polarity =
1751 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1752 .ltc_lock_ctrl =
1753 NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1754 .ltc_read =
1755 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1756 .ltc_write =
1757 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1758 .vend1_ltc_wr_nsec_0 = 0x1106,
1759 .vend1_ltc_wr_nsec_1 = 0x1107,
1760 .vend1_ltc_wr_sec_0 = 0x1108,
1761 .vend1_ltc_wr_sec_1 = 0x1109,
1762 .vend1_ltc_rd_nsec_0 = 0x110A,
1763 .vend1_ltc_rd_nsec_1 = 0x110B,
1764 .vend1_ltc_rd_sec_0 = 0x110C,
1765 .vend1_ltc_rd_sec_1 = 0x110D,
1766 .vend1_rate_adj_subns_0 = 0x110F,
1767 .vend1_rate_adj_subns_1 = 0x1110,
1768 .irq_egr_ts_en =
1769 NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1770 .irq_egr_ts_status =
1771 NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1772 .domain_number =
1773 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1774 .msg_type =
1775 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1776 .sequence_id =
1777 NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1778 .sec_1_0 =
1779 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1780 .sec_4_2 =
1781 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1782 .nsec_15_0 =
1783 NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1784 .nsec_29_16 =
1785 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1786 .vend1_ext_trg_data_0 = 0x1121,
1787 .vend1_ext_trg_data_1 = 0x1122,
1788 .vend1_ext_trg_data_2 = 0x1123,
1789 .vend1_ext_trg_data_3 = 0x1124,
1790 .vend1_ext_trg_ctrl = 0x1126,
1791 .cable_test = 0x8330,
1792 .cable_test_valid =
1793 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1794 .cable_test_result =
1795 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1796};
1797
1798static const struct nxp_c45_phy_data tja1103_phy_data = {
1799 .regmap = &tja1103_regmap,
1800 .stats = tja1103_hw_stats,
1801 .n_stats = ARRAY_SIZE(tja1103_hw_stats),
1802 .ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1803 .ext_ts_both_edges = false,
1804 .ack_ptp_irq = false,
1805 .counters_enable = tja1103_counters_enable,
1806 .get_egressts = nxp_c45_get_hwtxts,
1807 .get_extts = nxp_c45_get_extts,
1808 .ptp_init = tja1103_ptp_init,
1809 .ptp_enable = tja1103_ptp_enable,
1810 .nmi_handler = tja1103_nmi_handler,
1811};
1812
1813static void tja1120_counters_enable(struct phy_device *phydev)
1814{
1815 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD,
1816 EXTENDED_CNT_EN);
1817 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS,
1818 MONITOR_RESET);
1819 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG,
1820 ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN);
1821}
1822
1823static void tja1120_ptp_init(struct phy_device *phydev)
1824{
1825 phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL,
1826 TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE);
1827 phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE,
1828 TJA1120_TS_INSRT_MODE);
1829 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG,
1830 PTP_ENABLE);
1831}
1832
1833static void tja1120_ptp_enable(struct phy_device *phydev, bool enable)
1834{
1835 if (enable)
1836 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1837 VEND1_PORT_FUNC_ENABLES,
1838 PTP_ENABLE);
1839 else
1840 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1841 VEND1_PORT_FUNC_ENABLES,
1842 PTP_ENABLE);
1843}
1844
1845static void tja1120_nmi_handler(struct phy_device *phydev,
1846 irqreturn_t *irq_status)
1847{
1848 int ret;
1849
1850 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1851 TJA1120_GLOBAL_INFRA_IRQ_STATUS);
1852 if (ret & TJA1120_DEV_BOOT_DONE) {
1853 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1854 TJA1120_GLOBAL_INFRA_IRQ_ACK,
1855 TJA1120_DEV_BOOT_DONE);
1856 *irq_status = IRQ_HANDLED;
1857 }
1858}
1859
1860static const struct nxp_c45_regmap tja1120_regmap = {
1861 .vend1_ptp_clk_period = 0x1020,
1862 .vend1_event_msg_filt = 0x9010,
1863 .pps_enable =
1864 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1),
1865 .pps_polarity =
1866 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1),
1867 .ltc_lock_ctrl =
1868 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1),
1869 .ltc_read =
1870 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1),
1871 .ltc_write =
1872 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1),
1873 .vend1_ltc_wr_nsec_0 = 0x1040,
1874 .vend1_ltc_wr_nsec_1 = 0x1041,
1875 .vend1_ltc_wr_sec_0 = 0x1042,
1876 .vend1_ltc_wr_sec_1 = 0x1043,
1877 .vend1_ltc_rd_nsec_0 = 0x1048,
1878 .vend1_ltc_rd_nsec_1 = 0x1049,
1879 .vend1_ltc_rd_sec_0 = 0x104A,
1880 .vend1_ltc_rd_sec_1 = 0x104B,
1881 .vend1_rate_adj_subns_0 = 0x1030,
1882 .vend1_rate_adj_subns_1 = 0x1031,
1883 .irq_egr_ts_en =
1884 NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1),
1885 .irq_egr_ts_status =
1886 NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1),
1887 .domain_number =
1888 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8),
1889 .msg_type =
1890 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4),
1891 .sequence_id =
1892 NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16),
1893 .sec_1_0 =
1894 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2),
1895 .sec_4_2 =
1896 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3),
1897 .nsec_15_0 =
1898 NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16),
1899 .nsec_29_16 =
1900 NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14),
1901 .vend1_ext_trg_data_0 = 0x1071,
1902 .vend1_ext_trg_data_1 = 0x1072,
1903 .vend1_ext_trg_data_2 = 0x1073,
1904 .vend1_ext_trg_data_3 = 0x1074,
1905 .vend1_ext_trg_ctrl = 0x1075,
1906 .cable_test = 0x8360,
1907 .cable_test_valid =
1908 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1),
1909 .cable_test_result =
1910 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3),
1911};
1912
1913static const struct nxp_c45_phy_data tja1120_phy_data = {
1914 .regmap = &tja1120_regmap,
1915 .stats = tja1120_hw_stats,
1916 .n_stats = ARRAY_SIZE(tja1120_hw_stats),
1917 .ptp_clk_period = PTP_CLK_PERIOD_1000BT1,
1918 .ext_ts_both_edges = true,
1919 .ack_ptp_irq = true,
1920 .counters_enable = tja1120_counters_enable,
1921 .get_egressts = tja1120_get_hwtxts,
1922 .get_extts = tja1120_get_extts,
1923 .ptp_init = tja1120_ptp_init,
1924 .ptp_enable = tja1120_ptp_enable,
1925 .nmi_handler = tja1120_nmi_handler,
1926};
1927
1928static struct phy_driver nxp_c45_driver[] = {
1929 {
1930 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1931 .name = "NXP C45 TJA1103",
1932 .get_features = nxp_c45_get_features,
1933 .driver_data = &tja1103_phy_data,
1934 .probe = nxp_c45_probe,
1935 .soft_reset = nxp_c45_soft_reset,
1936 .config_aneg = genphy_c45_config_aneg,
1937 .config_init = nxp_c45_config_init,
1938 .config_intr = tja1103_config_intr,
1939 .handle_interrupt = nxp_c45_handle_interrupt,
1940 .read_status = genphy_c45_read_status,
1941 .suspend = genphy_c45_pma_suspend,
1942 .resume = genphy_c45_pma_resume,
1943 .get_sset_count = nxp_c45_get_sset_count,
1944 .get_strings = nxp_c45_get_strings,
1945 .get_stats = nxp_c45_get_stats,
1946 .cable_test_start = nxp_c45_cable_test_start,
1947 .cable_test_get_status = nxp_c45_cable_test_get_status,
1948 .set_loopback = genphy_c45_loopback,
1949 .get_sqi = nxp_c45_get_sqi,
1950 .get_sqi_max = nxp_c45_get_sqi_max,
1951 .remove = nxp_c45_remove,
1952 },
1953 {
1954 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
1955 .name = "NXP C45 TJA1120",
1956 .get_features = nxp_c45_get_features,
1957 .driver_data = &tja1120_phy_data,
1958 .probe = nxp_c45_probe,
1959 .soft_reset = nxp_c45_soft_reset,
1960 .config_aneg = genphy_c45_config_aneg,
1961 .config_init = nxp_c45_config_init,
1962 .config_intr = tja1120_config_intr,
1963 .handle_interrupt = nxp_c45_handle_interrupt,
1964 .read_status = genphy_c45_read_status,
1965 .link_change_notify = tja1120_link_change_notify,
1966 .suspend = genphy_c45_pma_suspend,
1967 .resume = genphy_c45_pma_resume,
1968 .get_sset_count = nxp_c45_get_sset_count,
1969 .get_strings = nxp_c45_get_strings,
1970 .get_stats = nxp_c45_get_stats,
1971 .cable_test_start = nxp_c45_cable_test_start,
1972 .cable_test_get_status = nxp_c45_cable_test_get_status,
1973 .set_loopback = genphy_c45_loopback,
1974 .get_sqi = nxp_c45_get_sqi,
1975 .get_sqi_max = nxp_c45_get_sqi_max,
1976 .remove = nxp_c45_remove,
1977 },
1978};
1979
1980module_phy_driver(nxp_c45_driver);
1981
1982static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
1983 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
1984 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
1985 { /*sentinel*/ },
1986};
1987
1988MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
1989
1990MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
1991MODULE_DESCRIPTION("NXP C45 PHY driver");
1992MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/* NXP C45 PHY driver
3 * Copyright (C) 2021 NXP
4 * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5 */
6
7#include <linux/delay.h>
8#include <linux/ethtool.h>
9#include <linux/ethtool_netlink.h>
10#include <linux/kernel.h>
11#include <linux/mii.h>
12#include <linux/module.h>
13#include <linux/phy.h>
14#include <linux/processor.h>
15#include <linux/property.h>
16#include <linux/ptp_classify.h>
17#include <linux/ptp_clock_kernel.h>
18#include <linux/net_tstamp.h>
19
20#define PHY_ID_TJA_1103 0x001BB010
21
22#define PMAPMD_B100T1_PMAPMD_CTL 0x0834
23#define B100T1_PMAPMD_CONFIG_EN BIT(15)
24#define B100T1_PMAPMD_MASTER BIT(14)
25#define MASTER_MODE (B100T1_PMAPMD_CONFIG_EN | \
26 B100T1_PMAPMD_MASTER)
27#define SLAVE_MODE (B100T1_PMAPMD_CONFIG_EN)
28
29#define VEND1_DEVICE_CONTROL 0x0040
30#define DEVICE_CONTROL_RESET BIT(15)
31#define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
32#define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13)
33
34#define VEND1_PHY_IRQ_ACK 0x80A0
35#define VEND1_PHY_IRQ_EN 0x80A1
36#define VEND1_PHY_IRQ_STATUS 0x80A2
37#define PHY_IRQ_LINK_EVENT BIT(1)
38
39#define VEND1_PHY_CONTROL 0x8100
40#define PHY_CONFIG_EN BIT(14)
41#define PHY_START_OP BIT(0)
42
43#define VEND1_PHY_CONFIG 0x8108
44#define PHY_CONFIG_AUTO BIT(0)
45
46#define VEND1_SIGNAL_QUALITY 0x8320
47#define SQI_VALID BIT(14)
48#define SQI_MASK GENMASK(2, 0)
49#define MAX_SQI SQI_MASK
50
51#define VEND1_CABLE_TEST 0x8330
52#define CABLE_TEST_ENABLE BIT(15)
53#define CABLE_TEST_START BIT(14)
54#define CABLE_TEST_VALID BIT(13)
55#define CABLE_TEST_OK 0x00
56#define CABLE_TEST_SHORTED 0x01
57#define CABLE_TEST_OPEN 0x02
58#define CABLE_TEST_UNKNOWN 0x07
59
60#define VEND1_PORT_CONTROL 0x8040
61#define PORT_CONTROL_EN BIT(14)
62
63#define VEND1_PORT_ABILITIES 0x8046
64#define PTP_ABILITY BIT(3)
65
66#define VEND1_PORT_INFRA_CONTROL 0xAC00
67#define PORT_INFRA_CONTROL_EN BIT(14)
68
69#define VEND1_RXID 0xAFCC
70#define VEND1_TXID 0xAFCD
71#define ID_ENABLE BIT(15)
72
73#define VEND1_ABILITIES 0xAFC4
74#define RGMII_ID_ABILITY BIT(15)
75#define RGMII_ABILITY BIT(14)
76#define RMII_ABILITY BIT(10)
77#define REVMII_ABILITY BIT(9)
78#define MII_ABILITY BIT(8)
79#define SGMII_ABILITY BIT(0)
80
81#define VEND1_MII_BASIC_CONFIG 0xAFC6
82#define MII_BASIC_CONFIG_REV BIT(8)
83#define MII_BASIC_CONFIG_SGMII 0x9
84#define MII_BASIC_CONFIG_RGMII 0x7
85#define MII_BASIC_CONFIG_RMII 0x5
86#define MII_BASIC_CONFIG_MII 0x4
87
88#define VEND1_SYMBOL_ERROR_COUNTER 0x8350
89#define VEND1_LINK_DROP_COUNTER 0x8352
90#define VEND1_LINK_LOSSES_AND_FAILURES 0x8353
91#define VEND1_R_GOOD_FRAME_CNT 0xA950
92#define VEND1_R_BAD_FRAME_CNT 0xA952
93#define VEND1_R_RXER_FRAME_CNT 0xA954
94#define VEND1_RX_PREAMBLE_COUNT 0xAFCE
95#define VEND1_TX_PREAMBLE_COUNT 0xAFCF
96#define VEND1_RX_IPG_LENGTH 0xAFD0
97#define VEND1_TX_IPG_LENGTH 0xAFD1
98#define COUNTER_EN BIT(15)
99
100#define VEND1_PTP_CONFIG 0x1102
101#define EXT_TRG_EDGE BIT(1)
102#define PPS_OUT_POL BIT(2)
103#define PPS_OUT_EN BIT(3)
104
105#define VEND1_LTC_LOAD_CTRL 0x1105
106#define READ_LTC BIT(2)
107#define LOAD_LTC BIT(0)
108
109#define VEND1_LTC_WR_NSEC_0 0x1106
110#define VEND1_LTC_WR_NSEC_1 0x1107
111#define VEND1_LTC_WR_SEC_0 0x1108
112#define VEND1_LTC_WR_SEC_1 0x1109
113
114#define VEND1_LTC_RD_NSEC_0 0x110A
115#define VEND1_LTC_RD_NSEC_1 0x110B
116#define VEND1_LTC_RD_SEC_0 0x110C
117#define VEND1_LTC_RD_SEC_1 0x110D
118
119#define VEND1_RATE_ADJ_SUBNS_0 0x110F
120#define VEND1_RATE_ADJ_SUBNS_1 0x1110
121#define CLK_RATE_ADJ_LD BIT(15)
122#define CLK_RATE_ADJ_DIR BIT(14)
123
124#define VEND1_HW_LTC_LOCK_CTRL 0x1115
125#define HW_LTC_LOCK_EN BIT(0)
126
127#define VEND1_PTP_IRQ_EN 0x1131
128#define VEND1_PTP_IRQ_STATUS 0x1132
129#define PTP_IRQ_EGR_TS BIT(0)
130
131#define VEND1_RX_TS_INSRT_CTRL 0x114D
132#define RX_TS_INSRT_MODE2 0x02
133
134#define VEND1_EGR_RING_DATA_0 0x114E
135#define VEND1_EGR_RING_DATA_1_SEQ_ID 0x114F
136#define VEND1_EGR_RING_DATA_2_NSEC_15_0 0x1150
137#define VEND1_EGR_RING_DATA_3 0x1151
138#define VEND1_EGR_RING_CTRL 0x1154
139
140#define VEND1_EXT_TRG_TS_DATA_0 0x1121
141#define VEND1_EXT_TRG_TS_DATA_1 0x1122
142#define VEND1_EXT_TRG_TS_DATA_2 0x1123
143#define VEND1_EXT_TRG_TS_DATA_3 0x1124
144#define VEND1_EXT_TRG_TS_DATA_4 0x1125
145#define VEND1_EXT_TRG_TS_CTRL 0x1126
146
147#define RING_DATA_0_DOMAIN_NUMBER GENMASK(7, 0)
148#define RING_DATA_0_MSG_TYPE GENMASK(11, 8)
149#define RING_DATA_0_SEC_4_2 GENMASK(14, 2)
150#define RING_DATA_0_TS_VALID BIT(15)
151
152#define RING_DATA_3_NSEC_29_16 GENMASK(13, 0)
153#define RING_DATA_3_SEC_1_0 GENMASK(15, 14)
154#define RING_DATA_5_SEC_16_5 GENMASK(15, 4)
155#define RING_DONE BIT(0)
156
157#define TS_SEC_MASK GENMASK(1, 0)
158
159#define VEND1_PORT_FUNC_ENABLES 0x8048
160#define PTP_ENABLE BIT(3)
161
162#define VEND1_PORT_PTP_CONTROL 0x9000
163#define PORT_PTP_CONTROL_BYPASS BIT(11)
164
165#define VEND1_PTP_CLK_PERIOD 0x1104
166#define PTP_CLK_PERIOD_100BT1 15ULL
167
168#define VEND1_EVENT_MSG_FILT 0x1148
169#define EVENT_MSG_FILT_ALL 0x0F
170#define EVENT_MSG_FILT_NONE 0x00
171
172#define VEND1_TX_PIPE_DLY_NS 0x1149
173#define VEND1_TX_PIPEDLY_SUBNS 0x114A
174#define VEND1_RX_PIPE_DLY_NS 0x114B
175#define VEND1_RX_PIPEDLY_SUBNS 0x114C
176
177#define VEND1_GPIO_FUNC_CONFIG_BASE 0x2C40
178#define GPIO_FUNC_EN BIT(15)
179#define GPIO_FUNC_PTP BIT(6)
180#define GPIO_SIGNAL_PTP_TRIGGER 0x01
181#define GPIO_SIGNAL_PPS_OUT 0x12
182#define GPIO_DISABLE 0
183#define GPIO_PPS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
184 GPIO_SIGNAL_PPS_OUT)
185#define GPIO_EXTTS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
186 GPIO_SIGNAL_PTP_TRIGGER)
187
188#define RGMII_PERIOD_PS 8000U
189#define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360)
190#define MIN_ID_PS 1644U
191#define MAX_ID_PS 2260U
192#define DEFAULT_ID_PS 2000U
193
194#define PPM_TO_SUBNS_INC(ppb) div_u64(GENMASK(31, 0) * (ppb) * \
195 PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
196
197#define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
198
199struct nxp_c45_skb_cb {
200 struct ptp_header *header;
201 unsigned int type;
202};
203
204struct nxp_c45_hwts {
205 u32 nsec;
206 u32 sec;
207 u8 domain_number;
208 u16 sequence_id;
209 u8 msg_type;
210};
211
212struct nxp_c45_phy {
213 struct phy_device *phydev;
214 struct mii_timestamper mii_ts;
215 struct ptp_clock *ptp_clock;
216 struct ptp_clock_info caps;
217 struct sk_buff_head tx_queue;
218 struct sk_buff_head rx_queue;
219 /* used to access the PTP registers atomic */
220 struct mutex ptp_lock;
221 int hwts_tx;
222 int hwts_rx;
223 u32 tx_delay;
224 u32 rx_delay;
225 struct timespec64 extts_ts;
226 int extts_index;
227 bool extts;
228};
229
230struct nxp_c45_phy_stats {
231 const char *name;
232 u8 mmd;
233 u16 reg;
234 u8 off;
235 u16 mask;
236};
237
238static bool nxp_c45_poll_txts(struct phy_device *phydev)
239{
240 return phydev->irq <= 0;
241}
242
243static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
244 struct timespec64 *ts,
245 struct ptp_system_timestamp *sts)
246{
247 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
248
249 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
250 READ_LTC);
251 ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
252 VEND1_LTC_RD_NSEC_0);
253 ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
254 VEND1_LTC_RD_NSEC_1) << 16;
255 ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
256 VEND1_LTC_RD_SEC_0);
257 ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
258 VEND1_LTC_RD_SEC_1) << 16;
259
260 return 0;
261}
262
263static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
264 struct timespec64 *ts,
265 struct ptp_system_timestamp *sts)
266{
267 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
268
269 mutex_lock(&priv->ptp_lock);
270 _nxp_c45_ptp_gettimex64(ptp, ts, sts);
271 mutex_unlock(&priv->ptp_lock);
272
273 return 0;
274}
275
276static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
277 const struct timespec64 *ts)
278{
279 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
280
281 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_0,
282 ts->tv_nsec);
283 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_1,
284 ts->tv_nsec >> 16);
285 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_0,
286 ts->tv_sec);
287 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_1,
288 ts->tv_sec >> 16);
289 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
290 LOAD_LTC);
291
292 return 0;
293}
294
295static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
296 const struct timespec64 *ts)
297{
298 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
299
300 mutex_lock(&priv->ptp_lock);
301 _nxp_c45_ptp_settime64(ptp, ts);
302 mutex_unlock(&priv->ptp_lock);
303
304 return 0;
305}
306
307static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
308{
309 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
310 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
311 u64 subns_inc_val;
312 bool inc;
313
314 mutex_lock(&priv->ptp_lock);
315 inc = ppb >= 0;
316 ppb = abs(ppb);
317
318 subns_inc_val = PPM_TO_SUBNS_INC(ppb);
319
320 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_0,
321 subns_inc_val);
322 subns_inc_val >>= 16;
323 subns_inc_val |= CLK_RATE_ADJ_LD;
324 if (inc)
325 subns_inc_val |= CLK_RATE_ADJ_DIR;
326
327 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_1,
328 subns_inc_val);
329 mutex_unlock(&priv->ptp_lock);
330
331 return 0;
332}
333
334static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
335{
336 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
337 struct timespec64 now, then;
338
339 mutex_lock(&priv->ptp_lock);
340 then = ns_to_timespec64(delta);
341 _nxp_c45_ptp_gettimex64(ptp, &now, NULL);
342 now = timespec64_add(now, then);
343 _nxp_c45_ptp_settime64(ptp, &now);
344 mutex_unlock(&priv->ptp_lock);
345
346 return 0;
347}
348
349static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
350 struct nxp_c45_hwts *hwts)
351{
352 ts->tv_nsec = hwts->nsec;
353 if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
354 ts->tv_sec -= TS_SEC_MASK + 1;
355 ts->tv_sec &= ~TS_SEC_MASK;
356 ts->tv_sec |= hwts->sec & TS_SEC_MASK;
357}
358
359static bool nxp_c45_match_ts(struct ptp_header *header,
360 struct nxp_c45_hwts *hwts,
361 unsigned int type)
362{
363 return ntohs(header->sequence_id) == hwts->sequence_id &&
364 ptp_get_msgtype(header, type) == hwts->msg_type &&
365 header->domain_number == hwts->domain_number;
366}
367
368static void nxp_c45_get_extts(struct nxp_c45_phy *priv,
369 struct timespec64 *extts)
370{
371 extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
372 VEND1_EXT_TRG_TS_DATA_0);
373 extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
374 VEND1_EXT_TRG_TS_DATA_1) << 16;
375 extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
376 VEND1_EXT_TRG_TS_DATA_2);
377 extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
378 VEND1_EXT_TRG_TS_DATA_3) << 16;
379 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EXT_TRG_TS_CTRL,
380 RING_DONE);
381}
382
383static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
384 struct nxp_c45_hwts *hwts)
385{
386 bool valid;
387 u16 reg;
388
389 mutex_lock(&priv->ptp_lock);
390 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
391 RING_DONE);
392 reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
393 valid = !!(reg & RING_DATA_0_TS_VALID);
394 if (!valid)
395 goto nxp_c45_get_hwtxts_out;
396
397 hwts->domain_number = reg;
398 hwts->msg_type = (reg & RING_DATA_0_MSG_TYPE) >> 8;
399 hwts->sec = (reg & RING_DATA_0_SEC_4_2) >> 10;
400 hwts->sequence_id = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
401 VEND1_EGR_RING_DATA_1_SEQ_ID);
402 hwts->nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
403 VEND1_EGR_RING_DATA_2_NSEC_15_0);
404 reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_3);
405 hwts->nsec |= (reg & RING_DATA_3_NSEC_29_16) << 16;
406 hwts->sec |= (reg & RING_DATA_3_SEC_1_0) >> 14;
407
408nxp_c45_get_hwtxts_out:
409 mutex_unlock(&priv->ptp_lock);
410 return valid;
411}
412
413static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
414 struct nxp_c45_hwts *txts)
415{
416 struct sk_buff *skb, *tmp, *skb_match = NULL;
417 struct skb_shared_hwtstamps shhwtstamps;
418 struct timespec64 ts;
419 unsigned long flags;
420 bool ts_match;
421 s64 ts_ns;
422
423 spin_lock_irqsave(&priv->tx_queue.lock, flags);
424 skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
425 ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
426 NXP_C45_SKB_CB(skb)->type);
427 if (!ts_match)
428 continue;
429 skb_match = skb;
430 __skb_unlink(skb, &priv->tx_queue);
431 break;
432 }
433 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
434
435 if (skb_match) {
436 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
437 nxp_c45_reconstruct_ts(&ts, txts);
438 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
439 ts_ns = timespec64_to_ns(&ts);
440 shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
441 skb_complete_tx_timestamp(skb_match, &shhwtstamps);
442 } else {
443 phydev_warn(priv->phydev,
444 "the tx timestamp doesn't match with any skb\n");
445 }
446}
447
448static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
449{
450 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
451 bool poll_txts = nxp_c45_poll_txts(priv->phydev);
452 struct skb_shared_hwtstamps *shhwtstamps_rx;
453 struct ptp_clock_event event;
454 struct nxp_c45_hwts hwts;
455 bool reschedule = false;
456 struct timespec64 ts;
457 struct sk_buff *skb;
458 bool txts_valid;
459 u32 ts_raw;
460
461 while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
462 txts_valid = nxp_c45_get_hwtxts(priv, &hwts);
463 if (unlikely(!txts_valid)) {
464 /* Still more skbs in the queue */
465 reschedule = true;
466 break;
467 }
468
469 nxp_c45_process_txts(priv, &hwts);
470 }
471
472 while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
473 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
474 ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
475 hwts.sec = ts_raw >> 30;
476 hwts.nsec = ts_raw & GENMASK(29, 0);
477 nxp_c45_reconstruct_ts(&ts, &hwts);
478 shhwtstamps_rx = skb_hwtstamps(skb);
479 shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
480 NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
481 netif_rx(skb);
482 }
483
484 if (priv->extts) {
485 nxp_c45_get_extts(priv, &ts);
486 if (timespec64_compare(&ts, &priv->extts_ts) != 0) {
487 priv->extts_ts = ts;
488 event.index = priv->extts_index;
489 event.type = PTP_CLOCK_EXTTS;
490 event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
491 ptp_clock_event(priv->ptp_clock, &event);
492 }
493 reschedule = true;
494 }
495
496 return reschedule ? 1 : -1;
497}
498
499static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
500 int pin, u16 pin_cfg)
501{
502 struct phy_device *phydev = priv->phydev;
503
504 phy_write_mmd(phydev, MDIO_MMD_VEND1,
505 VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
506}
507
508static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
509 struct ptp_perout_request *perout, int on)
510{
511 struct phy_device *phydev = priv->phydev;
512 int pin;
513
514 if (perout->flags & ~PTP_PEROUT_PHASE)
515 return -EOPNOTSUPP;
516
517 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
518 if (pin < 0)
519 return pin;
520
521 if (!on) {
522 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG,
523 PPS_OUT_EN);
524 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG,
525 PPS_OUT_POL);
526
527 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
528
529 return 0;
530 }
531
532 /* The PPS signal is fixed to 1 second and is always generated when the
533 * seconds counter is incremented. The start time is not configurable.
534 * If the clock is adjusted, the PPS signal is automatically readjusted.
535 */
536 if (perout->period.sec != 1 || perout->period.nsec != 0) {
537 phydev_warn(phydev, "The period can be set only to 1 second.");
538 return -EINVAL;
539 }
540
541 if (!(perout->flags & PTP_PEROUT_PHASE)) {
542 if (perout->start.sec != 0 || perout->start.nsec != 0) {
543 phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
544 return -EINVAL;
545 }
546 } else {
547 if (perout->phase.nsec != 0 &&
548 perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
549 phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
550 return -EINVAL;
551 }
552
553 if (perout->phase.nsec == 0)
554 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
555 VEND1_PTP_CONFIG, PPS_OUT_POL);
556 else
557 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
558 VEND1_PTP_CONFIG, PPS_OUT_POL);
559 }
560
561 nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
562
563 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG, PPS_OUT_EN);
564
565 return 0;
566}
567
568static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
569 struct ptp_extts_request *extts, int on)
570{
571 int pin;
572
573 if (extts->flags & ~(PTP_ENABLE_FEATURE |
574 PTP_RISING_EDGE |
575 PTP_FALLING_EDGE |
576 PTP_STRICT_FLAGS))
577 return -EOPNOTSUPP;
578
579 /* Sampling on both edges is not supported */
580 if ((extts->flags & PTP_RISING_EDGE) &&
581 (extts->flags & PTP_FALLING_EDGE))
582 return -EOPNOTSUPP;
583
584 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
585 if (pin < 0)
586 return pin;
587
588 if (!on) {
589 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
590 priv->extts = false;
591
592 return 0;
593 }
594
595 if (extts->flags & PTP_RISING_EDGE)
596 phy_clear_bits_mmd(priv->phydev, MDIO_MMD_VEND1,
597 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
598
599 if (extts->flags & PTP_FALLING_EDGE)
600 phy_set_bits_mmd(priv->phydev, MDIO_MMD_VEND1,
601 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
602
603 nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
604 priv->extts = true;
605 priv->extts_index = extts->index;
606 ptp_schedule_worker(priv->ptp_clock, 0);
607
608 return 0;
609}
610
611static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
612 struct ptp_clock_request *req, int on)
613{
614 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
615
616 switch (req->type) {
617 case PTP_CLK_REQ_EXTTS:
618 return nxp_c45_extts_enable(priv, &req->extts, on);
619 case PTP_CLK_REQ_PEROUT:
620 return nxp_c45_perout_enable(priv, &req->perout, on);
621 default:
622 return -EOPNOTSUPP;
623 }
624}
625
626static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
627 { "nxp_c45_gpio0", 0, PTP_PF_NONE},
628 { "nxp_c45_gpio1", 1, PTP_PF_NONE},
629 { "nxp_c45_gpio2", 2, PTP_PF_NONE},
630 { "nxp_c45_gpio3", 3, PTP_PF_NONE},
631 { "nxp_c45_gpio4", 4, PTP_PF_NONE},
632 { "nxp_c45_gpio5", 5, PTP_PF_NONE},
633 { "nxp_c45_gpio6", 6, PTP_PF_NONE},
634 { "nxp_c45_gpio7", 7, PTP_PF_NONE},
635 { "nxp_c45_gpio8", 8, PTP_PF_NONE},
636 { "nxp_c45_gpio9", 9, PTP_PF_NONE},
637 { "nxp_c45_gpio10", 10, PTP_PF_NONE},
638 { "nxp_c45_gpio11", 11, PTP_PF_NONE},
639};
640
641static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
642 enum ptp_pin_function func, unsigned int chan)
643{
644 if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
645 return -EINVAL;
646
647 switch (func) {
648 case PTP_PF_NONE:
649 case PTP_PF_PEROUT:
650 case PTP_PF_EXTTS:
651 break;
652 default:
653 return -EOPNOTSUPP;
654 }
655
656 return 0;
657}
658
659static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
660{
661 priv->caps = (struct ptp_clock_info) {
662 .owner = THIS_MODULE,
663 .name = "NXP C45 PHC",
664 .max_adj = 16666666,
665 .adjfine = nxp_c45_ptp_adjfine,
666 .adjtime = nxp_c45_ptp_adjtime,
667 .gettimex64 = nxp_c45_ptp_gettimex64,
668 .settime64 = nxp_c45_ptp_settime64,
669 .enable = nxp_c45_ptp_enable,
670 .verify = nxp_c45_ptp_verify_pin,
671 .do_aux_work = nxp_c45_do_aux_work,
672 .pin_config = nxp_c45_ptp_pins,
673 .n_pins = ARRAY_SIZE(nxp_c45_ptp_pins),
674 .n_ext_ts = 1,
675 .n_per_out = 1,
676 };
677
678 priv->ptp_clock = ptp_clock_register(&priv->caps,
679 &priv->phydev->mdio.dev);
680
681 if (IS_ERR(priv->ptp_clock))
682 return PTR_ERR(priv->ptp_clock);
683
684 if (!priv->ptp_clock)
685 return -ENOMEM;
686
687 return 0;
688}
689
690static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
691 struct sk_buff *skb, int type)
692{
693 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
694 mii_ts);
695
696 switch (priv->hwts_tx) {
697 case HWTSTAMP_TX_ON:
698 NXP_C45_SKB_CB(skb)->type = type;
699 NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
700 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
701 skb_queue_tail(&priv->tx_queue, skb);
702 if (nxp_c45_poll_txts(priv->phydev))
703 ptp_schedule_worker(priv->ptp_clock, 0);
704 break;
705 case HWTSTAMP_TX_OFF:
706 default:
707 kfree_skb(skb);
708 break;
709 }
710}
711
712static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
713 struct sk_buff *skb, int type)
714{
715 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
716 mii_ts);
717 struct ptp_header *header = ptp_parse_header(skb, type);
718
719 if (!header)
720 return false;
721
722 if (!priv->hwts_rx)
723 return false;
724
725 NXP_C45_SKB_CB(skb)->header = header;
726 skb_queue_tail(&priv->rx_queue, skb);
727 ptp_schedule_worker(priv->ptp_clock, 0);
728
729 return true;
730}
731
732static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
733 struct ifreq *ifreq)
734{
735 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
736 mii_ts);
737 struct phy_device *phydev = priv->phydev;
738 struct hwtstamp_config cfg;
739
740 if (copy_from_user(&cfg, ifreq->ifr_data, sizeof(cfg)))
741 return -EFAULT;
742
743 if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ON)
744 return -ERANGE;
745
746 priv->hwts_tx = cfg.tx_type;
747
748 switch (cfg.rx_filter) {
749 case HWTSTAMP_FILTER_NONE:
750 priv->hwts_rx = 0;
751 break;
752 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
753 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
754 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
755 priv->hwts_rx = 1;
756 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
757 break;
758 default:
759 return -ERANGE;
760 }
761
762 if (priv->hwts_rx || priv->hwts_tx) {
763 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
764 EVENT_MSG_FILT_ALL);
765 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
766 VEND1_PORT_PTP_CONTROL,
767 PORT_PTP_CONTROL_BYPASS);
768 } else {
769 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
770 EVENT_MSG_FILT_NONE);
771 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_PTP_CONTROL,
772 PORT_PTP_CONTROL_BYPASS);
773 }
774
775 if (nxp_c45_poll_txts(priv->phydev))
776 goto nxp_c45_no_ptp_irq;
777
778 if (priv->hwts_tx)
779 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
780 VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
781 else
782 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
783 VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
784
785nxp_c45_no_ptp_irq:
786 return copy_to_user(ifreq->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
787}
788
789static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
790 struct ethtool_ts_info *ts_info)
791{
792 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
793 mii_ts);
794
795 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
796 SOF_TIMESTAMPING_RX_HARDWARE |
797 SOF_TIMESTAMPING_RAW_HARDWARE;
798 ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
799 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
800 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
801 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
802 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
803 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
804
805 return 0;
806}
807
808static const struct nxp_c45_phy_stats nxp_c45_hw_stats[] = {
809 { "phy_symbol_error_cnt", MDIO_MMD_VEND1,
810 VEND1_SYMBOL_ERROR_COUNTER, 0, GENMASK(15, 0) },
811 { "phy_link_status_drop_cnt", MDIO_MMD_VEND1,
812 VEND1_LINK_DROP_COUNTER, 8, GENMASK(13, 8) },
813 { "phy_link_availability_drop_cnt", MDIO_MMD_VEND1,
814 VEND1_LINK_DROP_COUNTER, 0, GENMASK(5, 0) },
815 { "phy_link_loss_cnt", MDIO_MMD_VEND1,
816 VEND1_LINK_LOSSES_AND_FAILURES, 10, GENMASK(15, 10) },
817 { "phy_link_failure_cnt", MDIO_MMD_VEND1,
818 VEND1_LINK_LOSSES_AND_FAILURES, 0, GENMASK(9, 0) },
819 { "r_good_frame_cnt", MDIO_MMD_VEND1,
820 VEND1_R_GOOD_FRAME_CNT, 0, GENMASK(15, 0) },
821 { "r_bad_frame_cnt", MDIO_MMD_VEND1,
822 VEND1_R_BAD_FRAME_CNT, 0, GENMASK(15, 0) },
823 { "r_rxer_frame_cnt", MDIO_MMD_VEND1,
824 VEND1_R_RXER_FRAME_CNT, 0, GENMASK(15, 0) },
825 { "rx_preamble_count", MDIO_MMD_VEND1,
826 VEND1_RX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
827 { "tx_preamble_count", MDIO_MMD_VEND1,
828 VEND1_TX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
829 { "rx_ipg_length", MDIO_MMD_VEND1,
830 VEND1_RX_IPG_LENGTH, 0, GENMASK(8, 0) },
831 { "tx_ipg_length", MDIO_MMD_VEND1,
832 VEND1_TX_IPG_LENGTH, 0, GENMASK(8, 0) },
833};
834
835static int nxp_c45_get_sset_count(struct phy_device *phydev)
836{
837 return ARRAY_SIZE(nxp_c45_hw_stats);
838}
839
840static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
841{
842 size_t i;
843
844 for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
845 strncpy(data + i * ETH_GSTRING_LEN,
846 nxp_c45_hw_stats[i].name, ETH_GSTRING_LEN);
847 }
848}
849
850static void nxp_c45_get_stats(struct phy_device *phydev,
851 struct ethtool_stats *stats, u64 *data)
852{
853 size_t i;
854 int ret;
855
856 for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
857 ret = phy_read_mmd(phydev, nxp_c45_hw_stats[i].mmd,
858 nxp_c45_hw_stats[i].reg);
859 if (ret < 0) {
860 data[i] = U64_MAX;
861 } else {
862 data[i] = ret & nxp_c45_hw_stats[i].mask;
863 data[i] >>= nxp_c45_hw_stats[i].off;
864 }
865 }
866}
867
868static int nxp_c45_config_enable(struct phy_device *phydev)
869{
870 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
871 DEVICE_CONTROL_CONFIG_GLOBAL_EN |
872 DEVICE_CONTROL_CONFIG_ALL_EN);
873 usleep_range(400, 450);
874
875 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
876 PORT_CONTROL_EN);
877 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
878 PHY_CONFIG_EN);
879 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
880 PORT_INFRA_CONTROL_EN);
881
882 return 0;
883}
884
885static int nxp_c45_start_op(struct phy_device *phydev)
886{
887 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
888 PHY_START_OP);
889}
890
891static int nxp_c45_config_intr(struct phy_device *phydev)
892{
893 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
894 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
895 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
896 else
897 return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
898 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
899}
900
901static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
902{
903 struct nxp_c45_phy *priv = phydev->priv;
904 irqreturn_t ret = IRQ_NONE;
905 struct nxp_c45_hwts hwts;
906 int irq;
907
908 irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
909 if (irq & PHY_IRQ_LINK_EVENT) {
910 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
911 PHY_IRQ_LINK_EVENT);
912 phy_trigger_machine(phydev);
913 ret = IRQ_HANDLED;
914 }
915
916 /* There is no need for ACK.
917 * The irq signal will be asserted until the EGR TS FIFO will be
918 * emptied.
919 */
920 irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_IRQ_STATUS);
921 if (irq & PTP_IRQ_EGR_TS) {
922 while (nxp_c45_get_hwtxts(priv, &hwts))
923 nxp_c45_process_txts(priv, &hwts);
924
925 ret = IRQ_HANDLED;
926 }
927
928 return ret;
929}
930
931static int nxp_c45_soft_reset(struct phy_device *phydev)
932{
933 int ret;
934
935 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
936 DEVICE_CONTROL_RESET);
937 if (ret)
938 return ret;
939
940 return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
941 VEND1_DEVICE_CONTROL, ret,
942 !(ret & DEVICE_CONTROL_RESET), 20000,
943 240000, false);
944}
945
946static int nxp_c45_cable_test_start(struct phy_device *phydev)
947{
948 return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
949 CABLE_TEST_ENABLE | CABLE_TEST_START);
950}
951
952static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
953 bool *finished)
954{
955 int ret;
956 u8 cable_test_result;
957
958 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST);
959 if (!(ret & CABLE_TEST_VALID)) {
960 *finished = false;
961 return 0;
962 }
963
964 *finished = true;
965 cable_test_result = ret & GENMASK(2, 0);
966
967 switch (cable_test_result) {
968 case CABLE_TEST_OK:
969 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
970 ETHTOOL_A_CABLE_RESULT_CODE_OK);
971 break;
972 case CABLE_TEST_SHORTED:
973 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
974 ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
975 break;
976 case CABLE_TEST_OPEN:
977 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
978 ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
979 break;
980 default:
981 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
982 ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
983 }
984
985 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
986 CABLE_TEST_ENABLE);
987
988 return nxp_c45_start_op(phydev);
989}
990
991static int nxp_c45_setup_master_slave(struct phy_device *phydev)
992{
993 switch (phydev->master_slave_set) {
994 case MASTER_SLAVE_CFG_MASTER_FORCE:
995 case MASTER_SLAVE_CFG_MASTER_PREFERRED:
996 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
997 MASTER_MODE);
998 break;
999 case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
1000 case MASTER_SLAVE_CFG_SLAVE_FORCE:
1001 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
1002 SLAVE_MODE);
1003 break;
1004 case MASTER_SLAVE_CFG_UNKNOWN:
1005 case MASTER_SLAVE_CFG_UNSUPPORTED:
1006 return 0;
1007 default:
1008 phydev_warn(phydev, "Unsupported Master/Slave mode\n");
1009 return -EOPNOTSUPP;
1010 }
1011
1012 return 0;
1013}
1014
1015static int nxp_c45_read_master_slave(struct phy_device *phydev)
1016{
1017 int reg;
1018
1019 phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
1020 phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
1021
1022 reg = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL);
1023 if (reg < 0)
1024 return reg;
1025
1026 if (reg & B100T1_PMAPMD_MASTER) {
1027 phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
1028 phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
1029 } else {
1030 phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
1031 phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
1032 }
1033
1034 return 0;
1035}
1036
1037static int nxp_c45_config_aneg(struct phy_device *phydev)
1038{
1039 return nxp_c45_setup_master_slave(phydev);
1040}
1041
1042static int nxp_c45_read_status(struct phy_device *phydev)
1043{
1044 int ret;
1045
1046 ret = genphy_c45_read_status(phydev);
1047 if (ret)
1048 return ret;
1049
1050 ret = nxp_c45_read_master_slave(phydev);
1051 if (ret)
1052 return ret;
1053
1054 return 0;
1055}
1056
1057static int nxp_c45_get_sqi(struct phy_device *phydev)
1058{
1059 int reg;
1060
1061 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1062 if (!(reg & SQI_VALID))
1063 return -EINVAL;
1064
1065 reg &= SQI_MASK;
1066
1067 return reg;
1068}
1069
1070static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1071{
1072 return MAX_SQI;
1073}
1074
1075static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1076{
1077 if (delay < MIN_ID_PS) {
1078 phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1079 return -EINVAL;
1080 }
1081
1082 if (delay > MAX_ID_PS) {
1083 phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1084 return -EINVAL;
1085 }
1086
1087 return 0;
1088}
1089
1090static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1091{
1092 /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1093 * To avoid floating point operations we'll multiply by 10
1094 * and get 1 decimal point precision.
1095 */
1096 phase_offset_raw *= 10;
1097 phase_offset_raw -= 738;
1098 return div_u64(phase_offset_raw, 9);
1099}
1100
1101static void nxp_c45_disable_delays(struct phy_device *phydev)
1102{
1103 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1104 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1105}
1106
1107static void nxp_c45_set_delays(struct phy_device *phydev)
1108{
1109 struct nxp_c45_phy *priv = phydev->priv;
1110 u64 tx_delay = priv->tx_delay;
1111 u64 rx_delay = priv->rx_delay;
1112 u64 degree;
1113
1114 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1115 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1116 degree = div_u64(tx_delay, PS_PER_DEGREE);
1117 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1118 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1119 } else {
1120 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1121 ID_ENABLE);
1122 }
1123
1124 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1125 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1126 degree = div_u64(rx_delay, PS_PER_DEGREE);
1127 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1128 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1129 } else {
1130 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1131 ID_ENABLE);
1132 }
1133}
1134
1135static int nxp_c45_get_delays(struct phy_device *phydev)
1136{
1137 struct nxp_c45_phy *priv = phydev->priv;
1138 int ret;
1139
1140 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1141 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1142 ret = device_property_read_u32(&phydev->mdio.dev,
1143 "tx-internal-delay-ps",
1144 &priv->tx_delay);
1145 if (ret)
1146 priv->tx_delay = DEFAULT_ID_PS;
1147
1148 ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1149 if (ret) {
1150 phydev_err(phydev,
1151 "tx-internal-delay-ps invalid value\n");
1152 return ret;
1153 }
1154 }
1155
1156 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1157 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1158 ret = device_property_read_u32(&phydev->mdio.dev,
1159 "rx-internal-delay-ps",
1160 &priv->rx_delay);
1161 if (ret)
1162 priv->rx_delay = DEFAULT_ID_PS;
1163
1164 ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1165 if (ret) {
1166 phydev_err(phydev,
1167 "rx-internal-delay-ps invalid value\n");
1168 return ret;
1169 }
1170 }
1171
1172 return 0;
1173}
1174
1175static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1176{
1177 int ret;
1178
1179 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1180 phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1181
1182 switch (phydev->interface) {
1183 case PHY_INTERFACE_MODE_RGMII:
1184 if (!(ret & RGMII_ABILITY)) {
1185 phydev_err(phydev, "rgmii mode not supported\n");
1186 return -EINVAL;
1187 }
1188 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1189 MII_BASIC_CONFIG_RGMII);
1190 nxp_c45_disable_delays(phydev);
1191 break;
1192 case PHY_INTERFACE_MODE_RGMII_ID:
1193 case PHY_INTERFACE_MODE_RGMII_TXID:
1194 case PHY_INTERFACE_MODE_RGMII_RXID:
1195 if (!(ret & RGMII_ID_ABILITY)) {
1196 phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1197 return -EINVAL;
1198 }
1199 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1200 MII_BASIC_CONFIG_RGMII);
1201 ret = nxp_c45_get_delays(phydev);
1202 if (ret)
1203 return ret;
1204
1205 nxp_c45_set_delays(phydev);
1206 break;
1207 case PHY_INTERFACE_MODE_MII:
1208 if (!(ret & MII_ABILITY)) {
1209 phydev_err(phydev, "mii mode not supported\n");
1210 return -EINVAL;
1211 }
1212 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1213 MII_BASIC_CONFIG_MII);
1214 break;
1215 case PHY_INTERFACE_MODE_REVMII:
1216 if (!(ret & REVMII_ABILITY)) {
1217 phydev_err(phydev, "rev-mii mode not supported\n");
1218 return -EINVAL;
1219 }
1220 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1221 MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1222 break;
1223 case PHY_INTERFACE_MODE_RMII:
1224 if (!(ret & RMII_ABILITY)) {
1225 phydev_err(phydev, "rmii mode not supported\n");
1226 return -EINVAL;
1227 }
1228 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1229 MII_BASIC_CONFIG_RMII);
1230 break;
1231 case PHY_INTERFACE_MODE_SGMII:
1232 if (!(ret & SGMII_ABILITY)) {
1233 phydev_err(phydev, "sgmii mode not supported\n");
1234 return -EINVAL;
1235 }
1236 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1237 MII_BASIC_CONFIG_SGMII);
1238 break;
1239 case PHY_INTERFACE_MODE_INTERNAL:
1240 break;
1241 default:
1242 return -EINVAL;
1243 }
1244
1245 return 0;
1246}
1247
1248static int nxp_c45_config_init(struct phy_device *phydev)
1249{
1250 int ret;
1251
1252 ret = nxp_c45_config_enable(phydev);
1253 if (ret) {
1254 phydev_err(phydev, "Failed to enable config\n");
1255 return ret;
1256 }
1257
1258 /* Bug workaround for SJA1110 rev B: enable write access
1259 * to MDIO_MMD_PMAPMD
1260 */
1261 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1262 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1263
1264 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1265 PHY_CONFIG_AUTO);
1266
1267 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1268 COUNTER_EN);
1269 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1270 COUNTER_EN);
1271 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1272 COUNTER_EN);
1273 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1274 COUNTER_EN);
1275 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1276 COUNTER_EN);
1277
1278 ret = nxp_c45_set_phy_mode(phydev);
1279 if (ret)
1280 return ret;
1281
1282 phydev->autoneg = AUTONEG_DISABLE;
1283
1284 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CLK_PERIOD,
1285 PTP_CLK_PERIOD_100BT1);
1286 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_HW_LTC_LOCK_CTRL,
1287 HW_LTC_LOCK_EN);
1288 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1289 RX_TS_INSRT_MODE2);
1290 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1291 PTP_ENABLE);
1292
1293 return nxp_c45_start_op(phydev);
1294}
1295
1296static int nxp_c45_probe(struct phy_device *phydev)
1297{
1298 struct nxp_c45_phy *priv;
1299 int ptp_ability;
1300 int ret = 0;
1301
1302 priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1303 if (!priv)
1304 return -ENOMEM;
1305
1306 skb_queue_head_init(&priv->tx_queue);
1307 skb_queue_head_init(&priv->rx_queue);
1308
1309 priv->phydev = phydev;
1310
1311 phydev->priv = priv;
1312
1313 mutex_init(&priv->ptp_lock);
1314
1315 ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1316 VEND1_PORT_ABILITIES);
1317 ptp_ability = !!(ptp_ability & PTP_ABILITY);
1318 if (!ptp_ability) {
1319 phydev_dbg(phydev, "the phy does not support PTP");
1320 goto no_ptp_support;
1321 }
1322
1323 if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1324 IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1325 priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1326 priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1327 priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1328 priv->mii_ts.ts_info = nxp_c45_ts_info;
1329 phydev->mii_ts = &priv->mii_ts;
1330 ret = nxp_c45_init_ptp_clock(priv);
1331 } else {
1332 phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1333 }
1334
1335no_ptp_support:
1336
1337 return ret;
1338}
1339
1340static struct phy_driver nxp_c45_driver[] = {
1341 {
1342 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1343 .name = "NXP C45 TJA1103",
1344 .features = PHY_BASIC_T1_FEATURES,
1345 .probe = nxp_c45_probe,
1346 .soft_reset = nxp_c45_soft_reset,
1347 .config_aneg = nxp_c45_config_aneg,
1348 .config_init = nxp_c45_config_init,
1349 .config_intr = nxp_c45_config_intr,
1350 .handle_interrupt = nxp_c45_handle_interrupt,
1351 .read_status = nxp_c45_read_status,
1352 .suspend = genphy_c45_pma_suspend,
1353 .resume = genphy_c45_pma_resume,
1354 .get_sset_count = nxp_c45_get_sset_count,
1355 .get_strings = nxp_c45_get_strings,
1356 .get_stats = nxp_c45_get_stats,
1357 .cable_test_start = nxp_c45_cable_test_start,
1358 .cable_test_get_status = nxp_c45_cable_test_get_status,
1359 .set_loopback = genphy_c45_loopback,
1360 .get_sqi = nxp_c45_get_sqi,
1361 .get_sqi_max = nxp_c45_get_sqi_max,
1362 },
1363};
1364
1365module_phy_driver(nxp_c45_driver);
1366
1367static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
1368 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
1369 { /*sentinel*/ },
1370};
1371
1372MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
1373
1374MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
1375MODULE_DESCRIPTION("NXP C45 PHY driver");
1376MODULE_LICENSE("GPL v2");