Loading...
Note: File does not exist in v4.10.11.
1// SPDX-License-Identifier: GPL-2.0
2
3/* Texas Instruments ICSSG SR1.0 Ethernet Driver
4 *
5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6 * Copyright (c) Siemens AG, 2024
7 *
8 */
9
10#include <linux/etherdevice.h>
11#include <linux/genalloc.h>
12#include <linux/kernel.h>
13#include <linux/mfd/syscon.h>
14#include <linux/module.h>
15#include <linux/of.h>
16#include <linux/of_mdio.h>
17#include <linux/of_net.h>
18#include <linux/platform_device.h>
19#include <linux/property.h>
20#include <linux/phy.h>
21#include <linux/remoteproc/pruss.h>
22#include <linux/pruss_driver.h>
23
24#include "icssg_prueth.h"
25#include "icssg_mii_rt.h"
26#include "../k3-cppi-desc-pool.h"
27
28#define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG SR1.0 Ethernet driver"
29
30/* SR1: Set buffer sizes for the pools. There are 8 internal queues
31 * implemented in firmware, but only 4 tx channels/threads in the Egress
32 * direction to firmware. Need a high priority queue for management
33 * messages since they shouldn't be blocked even during high traffic
34 * situation. So use Q0-Q2 as data queues and Q3 as management queue
35 * in the max case. However for ease of configuration, use the max
36 * data queue + 1 for management message if we are not using max
37 * case.
38 *
39 * Allocate 4 MTU buffers per data queue. Firmware requires
40 * pool sizes to be set for internal queues. Set the upper 5 queue
41 * pool size to min size of 128 bytes since there are only 3 tx
42 * data channels and management queue requires only minimum buffer.
43 * i.e lower queues are used by driver and highest priority queue
44 * from that is used for management message.
45 */
46
47static int emac_egress_buf_pool_size[] = {
48 PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_SIZE_SR1,
49 PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
50 PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
51 PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1
52};
53
54static void icssg_config_sr1(struct prueth *prueth, struct prueth_emac *emac,
55 int slice)
56{
57 struct icssg_sr1_config config;
58 void __iomem *va;
59 int i, index;
60
61 memset(&config, 0, sizeof(config));
62 config.addr_lo = cpu_to_le32(lower_32_bits(prueth->msmcram.pa));
63 config.addr_hi = cpu_to_le32(upper_32_bits(prueth->msmcram.pa));
64 config.rx_flow_id = cpu_to_le32(emac->rx_flow_id_base); /* flow id for host port */
65 config.rx_mgr_flow_id = cpu_to_le32(emac->rx_mgm_flow_id_base); /* for mgm ch */
66 config.rand_seed = cpu_to_le32(get_random_u32());
67
68 for (i = PRUETH_EMAC_BUF_POOL_START_SR1; i < PRUETH_NUM_BUF_POOLS_SR1; i++) {
69 index = i - PRUETH_EMAC_BUF_POOL_START_SR1;
70 config.tx_buf_sz[i] = cpu_to_le32(emac_egress_buf_pool_size[index]);
71 }
72
73 va = prueth->shram.va + slice * ICSSG_CONFIG_OFFSET_SLICE1;
74 memcpy_toio(va, &config, sizeof(config));
75
76 emac->speed = SPEED_1000;
77 emac->duplex = DUPLEX_FULL;
78}
79
80static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
81{
82 struct cppi5_host_desc_t *first_desc;
83 u32 pkt_len = sizeof(emac->cmd_data);
84 __le32 *data = emac->cmd_data;
85 dma_addr_t desc_dma, buf_dma;
86 struct prueth_tx_chn *tx_chn;
87 void **swdata;
88 int ret = 0;
89 u32 *epib;
90
91 netdev_dbg(emac->ndev, "Sending cmd %x\n", cmd);
92
93 /* only one command at a time allowed to firmware */
94 mutex_lock(&emac->cmd_lock);
95 data[0] = cpu_to_le32(cmd);
96
97 /* highest priority channel for management messages */
98 tx_chn = &emac->tx_chns[emac->tx_ch_num - 1];
99
100 /* Map the linear buffer */
101 buf_dma = dma_map_single(tx_chn->dma_dev, data, pkt_len, DMA_TO_DEVICE);
102 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
103 netdev_err(emac->ndev, "cmd %x: failed to map cmd buffer\n", cmd);
104 ret = -EINVAL;
105 goto err_unlock;
106 }
107
108 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
109 if (!first_desc) {
110 netdev_err(emac->ndev, "cmd %x: failed to allocate descriptor\n", cmd);
111 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
112 ret = -ENOMEM;
113 goto err_unlock;
114 }
115
116 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
117 PRUETH_NAV_PS_DATA_SIZE);
118 cppi5_hdesc_set_pkttype(first_desc, PRUETH_PKT_TYPE_CMD);
119 epib = first_desc->epib;
120 epib[0] = 0;
121 epib[1] = 0;
122
123 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
124 swdata = cppi5_hdesc_get_swdata(first_desc);
125 *swdata = data;
126
127 cppi5_hdesc_set_pktlen(first_desc, pkt_len);
128 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
129
130 /* send command */
131 reinit_completion(&emac->cmd_complete);
132 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
133 if (ret) {
134 netdev_err(emac->ndev, "cmd %x: push failed: %d\n", cmd, ret);
135 goto free_desc;
136 }
137 ret = wait_for_completion_timeout(&emac->cmd_complete, msecs_to_jiffies(100));
138 if (!ret)
139 netdev_err(emac->ndev, "cmd %x: completion timeout\n", cmd);
140
141 mutex_unlock(&emac->cmd_lock);
142
143 return ret;
144free_desc:
145 prueth_xmit_free(tx_chn, first_desc);
146err_unlock:
147 mutex_unlock(&emac->cmd_lock);
148
149 return ret;
150}
151
152static void icssg_config_set_speed_sr1(struct prueth_emac *emac)
153{
154 u32 cmd = ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1, val;
155 struct prueth *prueth = emac->prueth;
156 int slice = prueth_emac_slice(emac);
157
158 val = icssg_rgmii_get_speed(prueth->miig_rt, slice);
159 /* firmware expects speed settings in bit 2-1 */
160 val <<= 1;
161 cmd |= val;
162
163 val = icssg_rgmii_get_fullduplex(prueth->miig_rt, slice);
164 /* firmware expects full duplex settings in bit 3 */
165 val <<= 3;
166 cmd |= val;
167
168 emac_send_command_sr1(emac, cmd);
169}
170
171/* called back by PHY layer if there is change in link state of hw port*/
172static void emac_adjust_link_sr1(struct net_device *ndev)
173{
174 struct prueth_emac *emac = netdev_priv(ndev);
175 struct phy_device *phydev = ndev->phydev;
176 struct prueth *prueth = emac->prueth;
177 bool new_state = false;
178 unsigned long flags;
179
180 if (phydev->link) {
181 /* check the mode of operation - full/half duplex */
182 if (phydev->duplex != emac->duplex) {
183 new_state = true;
184 emac->duplex = phydev->duplex;
185 }
186 if (phydev->speed != emac->speed) {
187 new_state = true;
188 emac->speed = phydev->speed;
189 }
190 if (!emac->link) {
191 new_state = true;
192 emac->link = 1;
193 }
194 } else if (emac->link) {
195 new_state = true;
196 emac->link = 0;
197
198 /* f/w should support 100 & 1000 */
199 emac->speed = SPEED_1000;
200
201 /* half duplex may not be supported by f/w */
202 emac->duplex = DUPLEX_FULL;
203 }
204
205 if (new_state) {
206 phy_print_status(phydev);
207
208 /* update RGMII and MII configuration based on PHY negotiated
209 * values
210 */
211 if (emac->link) {
212 /* Set the RGMII cfg for gig en and full duplex */
213 icssg_update_rgmii_cfg(prueth->miig_rt, emac);
214
215 /* update the Tx IPG based on 100M/1G speed */
216 spin_lock_irqsave(&emac->lock, flags);
217 icssg_config_ipg(emac);
218 spin_unlock_irqrestore(&emac->lock, flags);
219 icssg_config_set_speed_sr1(emac);
220 }
221 }
222
223 if (emac->link) {
224 /* reactivate the transmit queue */
225 netif_tx_wake_all_queues(ndev);
226 } else {
227 netif_tx_stop_all_queues(ndev);
228 prueth_cleanup_tx_ts(emac);
229 }
230}
231
232static int emac_phy_connect(struct prueth_emac *emac)
233{
234 struct prueth *prueth = emac->prueth;
235 struct net_device *ndev = emac->ndev;
236 /* connect PHY */
237 ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
238 &emac_adjust_link_sr1, 0,
239 emac->phy_if);
240 if (!ndev->phydev) {
241 dev_err(prueth->dev, "couldn't connect to phy %s\n",
242 emac->phy_node->full_name);
243 return -ENODEV;
244 }
245
246 if (!emac->half_duplex) {
247 dev_dbg(prueth->dev, "half duplex mode is not supported\n");
248 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
249 }
250
251 /* Remove 100Mbits half-duplex due to RGMII misreporting connection
252 * as full duplex */
253 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
254
255 /* remove unsupported modes */
256 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
257 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
258 phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
259
260 if (emac->phy_if == PHY_INTERFACE_MODE_MII)
261 phy_set_max_speed(ndev->phydev, SPEED_100);
262
263 return 0;
264}
265
266/* get one packet from requested flow_id
267 *
268 * Returns skb pointer if packet found else NULL
269 * Caller must free the returned skb.
270 */
271static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
272 u32 flow_id)
273{
274 struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn;
275 struct net_device *ndev = emac->ndev;
276 struct cppi5_host_desc_t *desc_rx;
277 struct sk_buff *skb, *new_skb;
278 dma_addr_t desc_dma, buf_dma;
279 u32 buf_dma_len, pkt_len;
280 void **swdata;
281 int ret;
282
283 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
284 if (ret) {
285 if (ret != -ENODATA)
286 netdev_err(ndev, "rx mgm pop: failed: %d\n", ret);
287 return NULL;
288 }
289
290 if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown */
291 return NULL;
292
293 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
294
295 /* Fix FW bug about incorrect PSDATA size */
296 if (cppi5_hdesc_get_psdata_size(desc_rx) != PRUETH_NAV_PS_DATA_SIZE) {
297 cppi5_hdesc_update_psdata_size(desc_rx,
298 PRUETH_NAV_PS_DATA_SIZE);
299 }
300
301 swdata = cppi5_hdesc_get_swdata(desc_rx);
302 skb = *swdata;
303 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
304 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
305
306 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
307 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
308
309 new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
310 /* if allocation fails we drop the packet but push the
311 * descriptor back to the ring with old skb to prevent a stall
312 */
313 if (!new_skb) {
314 netdev_err(ndev,
315 "skb alloc failed, dropped mgm pkt from flow %d\n",
316 flow_id);
317 new_skb = skb;
318 skb = NULL; /* return NULL */
319 } else {
320 /* return the filled skb */
321 skb_put(skb, pkt_len);
322 }
323
324 /* queue another DMA */
325 ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_mgm_chn);
326 if (WARN_ON(ret < 0))
327 dev_kfree_skb_any(new_skb);
328
329 return skb;
330}
331
332static void prueth_tx_ts_sr1(struct prueth_emac *emac,
333 struct emac_tx_ts_response_sr1 *tsr)
334{
335 struct skb_shared_hwtstamps ssh;
336 u32 hi_ts, lo_ts, cookie;
337 struct sk_buff *skb;
338 u64 ns;
339
340 hi_ts = le32_to_cpu(tsr->hi_ts);
341 lo_ts = le32_to_cpu(tsr->lo_ts);
342
343 ns = (u64)hi_ts << 32 | lo_ts;
344
345 cookie = le32_to_cpu(tsr->cookie);
346 if (cookie >= PRUETH_MAX_TX_TS_REQUESTS) {
347 netdev_dbg(emac->ndev, "Invalid TX TS cookie 0x%x\n",
348 cookie);
349 return;
350 }
351
352 skb = emac->tx_ts_skb[cookie];
353 emac->tx_ts_skb[cookie] = NULL; /* free slot */
354
355 memset(&ssh, 0, sizeof(ssh));
356 ssh.hwtstamp = ns_to_ktime(ns);
357
358 skb_tstamp_tx(skb, &ssh);
359 dev_consume_skb_any(skb);
360}
361
362static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
363{
364 struct prueth_emac *emac = dev_id;
365 struct sk_buff *skb;
366
367 skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
368 if (!skb)
369 return IRQ_NONE;
370
371 prueth_tx_ts_sr1(emac, (void *)skb->data);
372 dev_kfree_skb_any(skb);
373
374 return IRQ_HANDLED;
375}
376
377static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
378{
379 struct prueth_emac *emac = dev_id;
380 struct sk_buff *skb;
381 u32 rsp;
382
383 skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
384 if (!skb)
385 return IRQ_NONE;
386
387 /* Process command response */
388 rsp = le32_to_cpu(*(__le32 *)skb->data) & 0xffff0000;
389 if (rsp == ICSSG_SHUTDOWN_CMD_SR1) {
390 netdev_dbg(emac->ndev, "f/w Shutdown cmd resp %x\n", rsp);
391 complete(&emac->cmd_complete);
392 } else if (rsp == ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1) {
393 netdev_dbg(emac->ndev, "f/w Speed/Duplex cmd rsp %x\n", rsp);
394 complete(&emac->cmd_complete);
395 }
396
397 dev_kfree_skb_any(skb);
398
399 return IRQ_HANDLED;
400}
401
402static struct icssg_firmwares icssg_sr1_emac_firmwares[] = {
403 {
404 .pru = "ti-pruss/am65x-pru0-prueth-fw.elf",
405 .rtu = "ti-pruss/am65x-rtu0-prueth-fw.elf",
406 },
407 {
408 .pru = "ti-pruss/am65x-pru1-prueth-fw.elf",
409 .rtu = "ti-pruss/am65x-rtu1-prueth-fw.elf",
410 }
411};
412
413static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
414{
415 struct icssg_firmwares *firmwares;
416 struct device *dev = prueth->dev;
417 int slice, ret;
418
419 firmwares = icssg_sr1_emac_firmwares;
420
421 slice = prueth_emac_slice(emac);
422 if (slice < 0) {
423 netdev_err(emac->ndev, "invalid port\n");
424 return -EINVAL;
425 }
426
427 icssg_config_sr1(prueth, emac, slice);
428
429 ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
430 ret = rproc_boot(prueth->pru[slice]);
431 if (ret) {
432 dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
433 return -EINVAL;
434 }
435
436 ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
437 ret = rproc_boot(prueth->rtu[slice]);
438 if (ret) {
439 dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
440 goto halt_pru;
441 }
442
443 return 0;
444
445halt_pru:
446 rproc_shutdown(prueth->pru[slice]);
447
448 return ret;
449}
450
451static void prueth_emac_stop(struct prueth_emac *emac)
452{
453 struct prueth *prueth = emac->prueth;
454 int slice;
455
456 switch (emac->port_id) {
457 case PRUETH_PORT_MII0:
458 slice = ICSS_SLICE0;
459 break;
460 case PRUETH_PORT_MII1:
461 slice = ICSS_SLICE1;
462 break;
463 default:
464 netdev_err(emac->ndev, "invalid port\n");
465 return;
466 }
467
468 if (!emac->is_sr1)
469 rproc_shutdown(prueth->txpru[slice]);
470 rproc_shutdown(prueth->rtu[slice]);
471 rproc_shutdown(prueth->pru[slice]);
472}
473
474/**
475 * emac_ndo_open - EMAC device open
476 * @ndev: network adapter device
477 *
478 * Called when system wants to start the interface.
479 *
480 * Return: 0 for a successful open, or appropriate error code
481 */
482static int emac_ndo_open(struct net_device *ndev)
483{
484 struct prueth_emac *emac = netdev_priv(ndev);
485 int num_data_chn = emac->tx_ch_num - 1;
486 struct prueth *prueth = emac->prueth;
487 int slice = prueth_emac_slice(emac);
488 struct device *dev = prueth->dev;
489 int max_rx_flows, rx_flow;
490 int ret, i;
491
492 /* clear SMEM and MSMC settings for all slices */
493 if (!prueth->emacs_initialized) {
494 memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
495 memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
496 }
497
498 /* set h/w MAC as user might have re-configured */
499 ether_addr_copy(emac->mac_addr, ndev->dev_addr);
500
501 icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
502
503 icssg_class_default(prueth->miig_rt, slice, 0, true);
504
505 /* Notify the stack of the actual queue counts. */
506 ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
507 if (ret) {
508 dev_err(dev, "cannot set real number of tx queues\n");
509 return ret;
510 }
511
512 init_completion(&emac->cmd_complete);
513 ret = prueth_init_tx_chns(emac);
514 if (ret) {
515 dev_err(dev, "failed to init tx channel: %d\n", ret);
516 return ret;
517 }
518
519 max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1;
520 ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
521 max_rx_flows, PRUETH_MAX_RX_DESC);
522 if (ret) {
523 dev_err(dev, "failed to init rx channel: %d\n", ret);
524 goto cleanup_tx;
525 }
526
527 ret = prueth_init_rx_chns(emac, &emac->rx_mgm_chn, "rxmgm",
528 PRUETH_MAX_RX_MGM_FLOWS_SR1,
529 PRUETH_MAX_RX_MGM_DESC_SR1);
530 if (ret) {
531 dev_err(dev, "failed to init rx mgmt channel: %d\n",
532 ret);
533 goto cleanup_rx;
534 }
535
536 ret = prueth_ndev_add_tx_napi(emac);
537 if (ret)
538 goto cleanup_rx_mgm;
539
540 /* we use only the highest priority flow for now i.e. @irq[3] */
541 rx_flow = PRUETH_RX_FLOW_DATA_SR1;
542 ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
543 IRQF_TRIGGER_HIGH, dev_name(dev), emac);
544 if (ret) {
545 dev_err(dev, "unable to request RX IRQ\n");
546 goto cleanup_napi;
547 }
548
549 ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1],
550 NULL, prueth_rx_mgm_rsp_thread,
551 IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
552 dev_name(dev), emac);
553 if (ret) {
554 dev_err(dev, "unable to request RX Management RSP IRQ\n");
555 goto free_rx_irq;
556 }
557
558 ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1],
559 NULL, prueth_rx_mgm_ts_thread_sr1,
560 IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
561 dev_name(dev), emac);
562 if (ret) {
563 dev_err(dev, "unable to request RX Management TS IRQ\n");
564 goto free_rx_mgm_rsp_irq;
565 }
566
567 /* reset and start PRU firmware */
568 ret = prueth_emac_start(prueth, emac);
569 if (ret)
570 goto free_rx_mgmt_ts_irq;
571
572 icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
573
574 /* Prepare RX */
575 ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
576 if (ret)
577 goto stop;
578
579 ret = prueth_prepare_rx_chan(emac, &emac->rx_mgm_chn, 64);
580 if (ret)
581 goto reset_rx_chn;
582
583 ret = k3_udma_glue_enable_rx_chn(emac->rx_mgm_chn.rx_chn);
584 if (ret)
585 goto reset_rx_chn;
586
587 ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
588 if (ret)
589 goto reset_rx_mgm_chn;
590
591 for (i = 0; i < emac->tx_ch_num; i++) {
592 ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
593 if (ret)
594 goto reset_tx_chan;
595 }
596
597 /* Enable NAPI in Tx and Rx direction */
598 for (i = 0; i < emac->tx_ch_num; i++)
599 napi_enable(&emac->tx_chns[i].napi_tx);
600 napi_enable(&emac->napi_rx);
601
602 /* start PHY */
603 phy_start(ndev->phydev);
604
605 prueth->emacs_initialized++;
606
607 queue_work(system_long_wq, &emac->stats_work.work);
608
609 return 0;
610
611reset_tx_chan:
612 /* Since interface is not yet up, there is wouldn't be
613 * any SKB for completion. So set false to free_skb
614 */
615 prueth_reset_tx_chan(emac, i, false);
616reset_rx_mgm_chn:
617 prueth_reset_rx_chan(&emac->rx_mgm_chn,
618 PRUETH_MAX_RX_MGM_FLOWS_SR1, true);
619reset_rx_chn:
620 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
621stop:
622 prueth_emac_stop(emac);
623free_rx_mgmt_ts_irq:
624 free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1],
625 emac);
626free_rx_mgm_rsp_irq:
627 free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1],
628 emac);
629free_rx_irq:
630 free_irq(emac->rx_chns.irq[rx_flow], emac);
631cleanup_napi:
632 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
633cleanup_rx_mgm:
634 prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn,
635 PRUETH_MAX_RX_MGM_FLOWS_SR1);
636cleanup_rx:
637 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
638cleanup_tx:
639 prueth_cleanup_tx_chns(emac);
640
641 return ret;
642}
643
644/**
645 * emac_ndo_stop - EMAC device stop
646 * @ndev: network adapter device
647 *
648 * Called when system wants to stop or down the interface.
649 *
650 * Return: Always 0 (Success)
651 */
652static int emac_ndo_stop(struct net_device *ndev)
653{
654 struct prueth_emac *emac = netdev_priv(ndev);
655 int rx_flow = PRUETH_RX_FLOW_DATA_SR1;
656 struct prueth *prueth = emac->prueth;
657 int max_rx_flows;
658 int ret, i;
659
660 /* inform the upper layers. */
661 netif_tx_stop_all_queues(ndev);
662
663 /* block packets from wire */
664 if (ndev->phydev)
665 phy_stop(ndev->phydev);
666
667 icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
668
669 emac_send_command_sr1(emac, ICSSG_SHUTDOWN_CMD_SR1);
670
671 atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
672 /* ensure new tdown_cnt value is visible */
673 smp_mb__after_atomic();
674 /* tear down and disable UDMA channels */
675 reinit_completion(&emac->tdown_complete);
676 for (i = 0; i < emac->tx_ch_num; i++)
677 k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
678
679 ret = wait_for_completion_timeout(&emac->tdown_complete,
680 msecs_to_jiffies(1000));
681 if (!ret)
682 netdev_err(ndev, "tx teardown timeout\n");
683
684 prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
685 for (i = 0; i < emac->tx_ch_num; i++)
686 napi_disable(&emac->tx_chns[i].napi_tx);
687
688 max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1;
689 k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
690
691 prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
692 /* Teardown RX MGM channel */
693 k3_udma_glue_tdown_rx_chn(emac->rx_mgm_chn.rx_chn, true);
694 prueth_reset_rx_chan(&emac->rx_mgm_chn,
695 PRUETH_MAX_RX_MGM_FLOWS_SR1, true);
696
697 napi_disable(&emac->napi_rx);
698
699 /* Destroying the queued work in ndo_stop() */
700 cancel_delayed_work_sync(&emac->stats_work);
701
702 /* stop PRUs */
703 prueth_emac_stop(emac);
704
705 free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1], emac);
706 free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1], emac);
707 free_irq(emac->rx_chns.irq[rx_flow], emac);
708 prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
709 prueth_cleanup_tx_chns(emac);
710
711 prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn, PRUETH_MAX_RX_MGM_FLOWS_SR1);
712 prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
713
714 prueth->emacs_initialized--;
715
716 return 0;
717}
718
719static void emac_ndo_set_rx_mode_sr1(struct net_device *ndev)
720{
721 struct prueth_emac *emac = netdev_priv(ndev);
722 bool allmulti = ndev->flags & IFF_ALLMULTI;
723 bool promisc = ndev->flags & IFF_PROMISC;
724 struct prueth *prueth = emac->prueth;
725 int slice = prueth_emac_slice(emac);
726
727 if (promisc) {
728 icssg_class_promiscuous_sr1(prueth->miig_rt, slice);
729 return;
730 }
731
732 if (allmulti) {
733 icssg_class_default(prueth->miig_rt, slice, 1, true);
734 return;
735 }
736
737 icssg_class_default(prueth->miig_rt, slice, 0, true);
738 if (!netdev_mc_empty(ndev)) {
739 /* program multicast address list into Classifier */
740 icssg_class_add_mcast_sr1(prueth->miig_rt, slice, ndev);
741 }
742}
743
744static const struct net_device_ops emac_netdev_ops = {
745 .ndo_open = emac_ndo_open,
746 .ndo_stop = emac_ndo_stop,
747 .ndo_start_xmit = icssg_ndo_start_xmit,
748 .ndo_set_mac_address = eth_mac_addr,
749 .ndo_validate_addr = eth_validate_addr,
750 .ndo_tx_timeout = icssg_ndo_tx_timeout,
751 .ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1,
752 .ndo_eth_ioctl = icssg_ndo_ioctl,
753 .ndo_get_stats64 = icssg_ndo_get_stats64,
754 .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
755};
756
757static int prueth_netdev_init(struct prueth *prueth,
758 struct device_node *eth_node)
759{
760 struct prueth_emac *emac;
761 struct net_device *ndev;
762 enum prueth_port port;
763 enum prueth_mac mac;
764 /* Only enable one TX channel due to timeouts when
765 * using multiple channels */
766 int num_tx_chn = 1;
767 int ret;
768
769 port = prueth_node_port(eth_node);
770 if (port == PRUETH_PORT_INVALID)
771 return -EINVAL;
772
773 mac = prueth_node_mac(eth_node);
774 if (mac == PRUETH_MAC_INVALID)
775 return -EINVAL;
776
777 ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
778 if (!ndev)
779 return -ENOMEM;
780
781 emac = netdev_priv(ndev);
782 emac->is_sr1 = 1;
783 emac->prueth = prueth;
784 emac->ndev = ndev;
785 emac->port_id = port;
786 emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
787 if (!emac->cmd_wq) {
788 ret = -ENOMEM;
789 goto free_ndev;
790 }
791
792 INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
793
794 ret = pruss_request_mem_region(prueth->pruss,
795 port == PRUETH_PORT_MII0 ?
796 PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
797 &emac->dram);
798 if (ret) {
799 dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
800 ret = -ENOMEM;
801 goto free_wq;
802 }
803
804 /* SR1.0 uses a dedicated high priority channel
805 * to send commands to the firmware
806 */
807 emac->tx_ch_num = 2;
808
809 SET_NETDEV_DEV(ndev, prueth->dev);
810 spin_lock_init(&emac->lock);
811 mutex_init(&emac->cmd_lock);
812
813 emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
814 if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
815 dev_err(prueth->dev, "couldn't find phy-handle\n");
816 ret = -ENODEV;
817 goto free;
818 } else if (of_phy_is_fixed_link(eth_node)) {
819 ret = of_phy_register_fixed_link(eth_node);
820 if (ret) {
821 ret = dev_err_probe(prueth->dev, ret,
822 "failed to register fixed-link phy\n");
823 goto free;
824 }
825
826 emac->phy_node = eth_node;
827 }
828
829 ret = of_get_phy_mode(eth_node, &emac->phy_if);
830 if (ret) {
831 dev_err(prueth->dev, "could not get phy-mode property\n");
832 goto free;
833 }
834
835 if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
836 !phy_interface_mode_is_rgmii(emac->phy_if)) {
837 dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
838 ret = -EINVAL;
839 goto free;
840 }
841
842 /* AM65 SR2.0 has TX Internal delay always enabled by hardware
843 * and it is not possible to disable TX Internal delay. The below
844 * switch case block describes how we handle different phy modes
845 * based on hardware restriction.
846 */
847 switch (emac->phy_if) {
848 case PHY_INTERFACE_MODE_RGMII_ID:
849 emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
850 break;
851 case PHY_INTERFACE_MODE_RGMII_TXID:
852 emac->phy_if = PHY_INTERFACE_MODE_RGMII;
853 break;
854 case PHY_INTERFACE_MODE_RGMII:
855 case PHY_INTERFACE_MODE_RGMII_RXID:
856 dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
857 ret = -EINVAL;
858 goto free;
859 default:
860 break;
861 }
862
863 /* get mac address from DT and set private and netdev addr */
864 ret = of_get_ethdev_address(eth_node, ndev);
865 if (!is_valid_ether_addr(ndev->dev_addr)) {
866 eth_hw_addr_random(ndev);
867 dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
868 port, ndev->dev_addr);
869 }
870 ether_addr_copy(emac->mac_addr, ndev->dev_addr);
871
872 ndev->dev.of_node = eth_node;
873 ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
874 ndev->max_mtu = PRUETH_MAX_MTU;
875 ndev->netdev_ops = &emac_netdev_ops;
876 ndev->ethtool_ops = &icssg_ethtool_ops;
877 ndev->hw_features = NETIF_F_SG;
878 ndev->features = ndev->hw_features;
879
880 netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
881 prueth->emac[mac] = emac;
882
883 return 0;
884
885free:
886 pruss_release_mem_region(prueth->pruss, &emac->dram);
887free_wq:
888 destroy_workqueue(emac->cmd_wq);
889free_ndev:
890 emac->ndev = NULL;
891 prueth->emac[mac] = NULL;
892 free_netdev(ndev);
893
894 return ret;
895}
896
897static int prueth_probe(struct platform_device *pdev)
898{
899 struct device_node *eth_node, *eth_ports_node;
900 struct device_node *eth0_node = NULL;
901 struct device_node *eth1_node = NULL;
902 struct device *dev = &pdev->dev;
903 struct device_node *np;
904 struct prueth *prueth;
905 struct pruss *pruss;
906 u32 msmc_ram_size;
907 int i, ret;
908
909 np = dev->of_node;
910
911 prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
912 if (!prueth)
913 return -ENOMEM;
914
915 dev_set_drvdata(dev, prueth);
916 prueth->pdev = pdev;
917 prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
918
919 prueth->dev = dev;
920 eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
921 if (!eth_ports_node)
922 return -ENOENT;
923
924 for_each_child_of_node(eth_ports_node, eth_node) {
925 u32 reg;
926
927 if (strcmp(eth_node->name, "port"))
928 continue;
929 ret = of_property_read_u32(eth_node, "reg", ®);
930 if (ret < 0) {
931 dev_err(dev, "%pOF error reading port_id %d\n",
932 eth_node, ret);
933 }
934
935 of_node_get(eth_node);
936
937 if (reg == 0) {
938 eth0_node = eth_node;
939 if (!of_device_is_available(eth0_node)) {
940 of_node_put(eth0_node);
941 eth0_node = NULL;
942 }
943 } else if (reg == 1) {
944 eth1_node = eth_node;
945 if (!of_device_is_available(eth1_node)) {
946 of_node_put(eth1_node);
947 eth1_node = NULL;
948 }
949 } else {
950 dev_err(dev, "port reg should be 0 or 1\n");
951 }
952 }
953
954 of_node_put(eth_ports_node);
955
956 /* At least one node must be present and available else we fail */
957 if (!eth0_node && !eth1_node) {
958 dev_err(dev, "neither port0 nor port1 node available\n");
959 return -ENODEV;
960 }
961
962 if (eth0_node == eth1_node) {
963 dev_err(dev, "port0 and port1 can't have same reg\n");
964 of_node_put(eth0_node);
965 return -ENODEV;
966 }
967
968 prueth->eth_node[PRUETH_MAC0] = eth0_node;
969 prueth->eth_node[PRUETH_MAC1] = eth1_node;
970
971 prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
972 if (IS_ERR(prueth->miig_rt)) {
973 dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
974 return -ENODEV;
975 }
976
977 prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
978 if (IS_ERR(prueth->mii_rt)) {
979 dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
980 return -ENODEV;
981 }
982
983 if (eth0_node) {
984 ret = prueth_get_cores(prueth, ICSS_SLICE0, true);
985 if (ret)
986 goto put_cores;
987 }
988
989 if (eth1_node) {
990 ret = prueth_get_cores(prueth, ICSS_SLICE1, true);
991 if (ret)
992 goto put_cores;
993 }
994
995 pruss = pruss_get(eth0_node ?
996 prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
997 if (IS_ERR(pruss)) {
998 ret = PTR_ERR(pruss);
999 dev_err(dev, "unable to get pruss handle\n");
1000 goto put_cores;
1001 }
1002
1003 prueth->pruss = pruss;
1004
1005 ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
1006 &prueth->shram);
1007 if (ret) {
1008 dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
1009 goto put_pruss;
1010 }
1011
1012 prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
1013 if (!prueth->sram_pool) {
1014 dev_err(dev, "unable to get SRAM pool\n");
1015 ret = -ENODEV;
1016
1017 goto put_mem;
1018 }
1019
1020 msmc_ram_size = MSMC_RAM_SIZE_SR1;
1021
1022 prueth->msmcram.va = (void __iomem *)gen_pool_alloc(prueth->sram_pool,
1023 msmc_ram_size);
1024
1025 if (!prueth->msmcram.va) {
1026 ret = -ENOMEM;
1027 dev_err(dev, "unable to allocate MSMC resource\n");
1028 goto put_mem;
1029 }
1030 prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1031 (unsigned long)prueth->msmcram.va);
1032 prueth->msmcram.size = msmc_ram_size;
1033 memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1034 dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
1035 prueth->msmcram.va, prueth->msmcram.size);
1036
1037 prueth->iep0 = icss_iep_get_idx(np, 0);
1038 if (IS_ERR(prueth->iep0)) {
1039 ret = dev_err_probe(dev, PTR_ERR(prueth->iep0),
1040 "iep0 get failed\n");
1041 goto free_pool;
1042 }
1043
1044 prueth->iep1 = icss_iep_get_idx(np, 1);
1045 if (IS_ERR(prueth->iep1)) {
1046 ret = dev_err_probe(dev, PTR_ERR(prueth->iep1),
1047 "iep1 get failed\n");
1048 goto put_iep0;
1049 }
1050
1051 ret = icss_iep_init(prueth->iep0, NULL, NULL, 0);
1052 if (ret) {
1053 dev_err_probe(dev, ret, "failed to init iep0\n");
1054 goto put_iep;
1055 }
1056
1057 ret = icss_iep_init(prueth->iep1, NULL, NULL, 0);
1058 if (ret) {
1059 dev_err_probe(dev, ret, "failed to init iep1\n");
1060 goto exit_iep0;
1061 }
1062
1063 if (eth0_node) {
1064 ret = prueth_netdev_init(prueth, eth0_node);
1065 if (ret) {
1066 dev_err_probe(dev, ret, "netdev init %s failed\n",
1067 eth0_node->name);
1068 goto exit_iep;
1069 }
1070
1071 prueth->emac[PRUETH_MAC0]->half_duplex =
1072 of_property_read_bool(eth0_node, "ti,half-duplex-capable");
1073
1074 prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
1075 }
1076
1077 if (eth1_node) {
1078 ret = prueth_netdev_init(prueth, eth1_node);
1079 if (ret) {
1080 dev_err_probe(dev, ret, "netdev init %s failed\n",
1081 eth1_node->name);
1082 goto netdev_exit;
1083 }
1084
1085 prueth->emac[PRUETH_MAC1]->half_duplex =
1086 of_property_read_bool(eth1_node, "ti,half-duplex-capable");
1087
1088 prueth->emac[PRUETH_MAC1]->iep = prueth->iep1;
1089 }
1090
1091 /* register the network devices */
1092 if (eth0_node) {
1093 ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1094 if (ret) {
1095 dev_err(dev, "can't register netdev for port MII0\n");
1096 goto netdev_exit;
1097 }
1098
1099 prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1100 emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1101 phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1102 }
1103
1104 if (eth1_node) {
1105 ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1106 if (ret) {
1107 dev_err(dev, "can't register netdev for port MII1\n");
1108 goto netdev_unregister;
1109 }
1110
1111 prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1112 emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1113 phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1114 }
1115
1116 dev_info(dev, "TI PRU SR1.0 ethernet driver initialized: %s EMAC mode\n",
1117 (!eth0_node || !eth1_node) ? "single" : "dual");
1118
1119 if (eth1_node)
1120 of_node_put(eth1_node);
1121 if (eth0_node)
1122 of_node_put(eth0_node);
1123
1124 return 0;
1125
1126netdev_unregister:
1127 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1128 if (!prueth->registered_netdevs[i])
1129 continue;
1130
1131 if (prueth->emac[i]->ndev->phydev) {
1132 phy_disconnect(prueth->emac[i]->ndev->phydev);
1133 prueth->emac[i]->ndev->phydev = NULL;
1134 }
1135 unregister_netdev(prueth->registered_netdevs[i]);
1136 }
1137
1138netdev_exit:
1139 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1140 eth_node = prueth->eth_node[i];
1141 if (!eth_node)
1142 continue;
1143
1144 prueth_netdev_exit(prueth, eth_node);
1145 }
1146
1147exit_iep:
1148 icss_iep_exit(prueth->iep1);
1149exit_iep0:
1150 icss_iep_exit(prueth->iep0);
1151
1152put_iep:
1153 icss_iep_put(prueth->iep1);
1154
1155put_iep0:
1156 icss_iep_put(prueth->iep0);
1157 prueth->iep0 = NULL;
1158 prueth->iep1 = NULL;
1159
1160free_pool:
1161 gen_pool_free(prueth->sram_pool,
1162 (unsigned long)prueth->msmcram.va, msmc_ram_size);
1163
1164put_mem:
1165 pruss_release_mem_region(prueth->pruss, &prueth->shram);
1166
1167put_pruss:
1168 pruss_put(prueth->pruss);
1169
1170put_cores:
1171 if (eth1_node) {
1172 prueth_put_cores(prueth, ICSS_SLICE1);
1173 of_node_put(eth1_node);
1174 }
1175
1176 if (eth0_node) {
1177 prueth_put_cores(prueth, ICSS_SLICE0);
1178 of_node_put(eth0_node);
1179 }
1180
1181 return ret;
1182}
1183
1184static void prueth_remove(struct platform_device *pdev)
1185{
1186 struct prueth *prueth = platform_get_drvdata(pdev);
1187 struct device_node *eth_node;
1188 int i;
1189
1190 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1191 if (!prueth->registered_netdevs[i])
1192 continue;
1193 phy_stop(prueth->emac[i]->ndev->phydev);
1194 phy_disconnect(prueth->emac[i]->ndev->phydev);
1195 prueth->emac[i]->ndev->phydev = NULL;
1196 unregister_netdev(prueth->registered_netdevs[i]);
1197 }
1198
1199 for (i = 0; i < PRUETH_NUM_MACS; i++) {
1200 eth_node = prueth->eth_node[i];
1201 if (!eth_node)
1202 continue;
1203
1204 prueth_netdev_exit(prueth, eth_node);
1205 }
1206
1207 icss_iep_exit(prueth->iep1);
1208 icss_iep_exit(prueth->iep0);
1209
1210 icss_iep_put(prueth->iep1);
1211 icss_iep_put(prueth->iep0);
1212
1213 gen_pool_free(prueth->sram_pool,
1214 (unsigned long)prueth->msmcram.va,
1215 MSMC_RAM_SIZE_SR1);
1216
1217 pruss_release_mem_region(prueth->pruss, &prueth->shram);
1218
1219 pruss_put(prueth->pruss);
1220
1221 if (prueth->eth_node[PRUETH_MAC1])
1222 prueth_put_cores(prueth, ICSS_SLICE1);
1223
1224 if (prueth->eth_node[PRUETH_MAC0])
1225 prueth_put_cores(prueth, ICSS_SLICE0);
1226}
1227
1228static const struct prueth_pdata am654_sr1_icssg_pdata = {
1229 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
1230};
1231
1232static const struct of_device_id prueth_dt_match[] = {
1233 { .compatible = "ti,am654-sr1-icssg-prueth", .data = &am654_sr1_icssg_pdata },
1234 { /* sentinel */ }
1235};
1236MODULE_DEVICE_TABLE(of, prueth_dt_match);
1237
1238static struct platform_driver prueth_driver = {
1239 .probe = prueth_probe,
1240 .remove = prueth_remove,
1241 .driver = {
1242 .name = "icssg-prueth-sr1",
1243 .of_match_table = prueth_dt_match,
1244 .pm = &prueth_dev_pm_ops,
1245 },
1246};
1247module_platform_driver(prueth_driver);
1248
1249MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
1250MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
1251MODULE_AUTHOR("Diogo Ivo <diogo.ivo@siemens.com>");
1252MODULE_DESCRIPTION(PRUETH_MODULE_DESCRIPTION);
1253MODULE_LICENSE("GPL");