Loading...
1/*******************************************************************************
2 This contains the functions to handle the platform driver.
3
4 Copyright (C) 2007-2011 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 The full GNU General Public License is included in this distribution in
16 the file called "COPYING".
17
18 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
19*******************************************************************************/
20
21#include <linux/platform_device.h>
22#include <linux/module.h>
23#include <linux/io.h>
24#include <linux/of.h>
25#include <linux/of_net.h>
26#include <linux/of_device.h>
27#include <linux/of_mdio.h>
28
29#include "stmmac.h"
30#include "stmmac_platform.h"
31
32#ifdef CONFIG_OF
33
34/**
35 * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins
36 * @mcast_bins: Multicast filtering bins
37 * Description:
38 * this function validates the number of Multicast filtering bins specified
39 * by the configuration through the device tree. The Synopsys GMAC supports
40 * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC
41 * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds
42 * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is
43 * invalid and will cause the filtering algorithm to use Multicast
44 * promiscuous mode.
45 */
46static int dwmac1000_validate_mcast_bins(int mcast_bins)
47{
48 int x = mcast_bins;
49
50 switch (x) {
51 case HASH_TABLE_SIZE:
52 case 128:
53 case 256:
54 break;
55 default:
56 x = 0;
57 pr_info("Hash table entries set to unexpected value %d",
58 mcast_bins);
59 break;
60 }
61 return x;
62}
63
64/**
65 * dwmac1000_validate_ucast_entries - validate the Unicast address entries
66 * @ucast_entries: number of Unicast address entries
67 * Description:
68 * This function validates the number of Unicast address entries supported
69 * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
70 * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
71 * logic. This function validates a valid, supported configuration is
72 * selected, and defaults to 1 Unicast address if an unsupported
73 * configuration is selected.
74 */
75static int dwmac1000_validate_ucast_entries(int ucast_entries)
76{
77 int x = ucast_entries;
78
79 switch (x) {
80 case 1:
81 case 32:
82 case 64:
83 case 128:
84 break;
85 default:
86 x = 1;
87 pr_info("Unicast table entries set to unexpected value %d\n",
88 ucast_entries);
89 break;
90 }
91 return x;
92}
93
94/**
95 * stmmac_axi_setup - parse DT parameters for programming the AXI register
96 * @pdev: platform device
97 * @priv: driver private struct.
98 * Description:
99 * if required, from device-tree the AXI internal register can be tuned
100 * by using platform parameters.
101 */
102static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
103{
104 struct device_node *np;
105 struct stmmac_axi *axi;
106
107 np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
108 if (!np)
109 return NULL;
110
111 axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
112 if (!axi) {
113 of_node_put(np);
114 return ERR_PTR(-ENOMEM);
115 }
116
117 axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
118 axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
119 axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
120 axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
121 axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
122 axi->axi_rb = of_property_read_bool(np, "snps,axi_rb");
123
124 if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
125 axi->axi_wr_osr_lmt = 1;
126 if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt))
127 axi->axi_rd_osr_lmt = 1;
128 of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
129 of_node_put(np);
130
131 return axi;
132}
133
134/**
135 * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
136 * @pdev: platform device
137 */
138static int stmmac_mtl_setup(struct platform_device *pdev,
139 struct plat_stmmacenet_data *plat)
140{
141 struct device_node *q_node;
142 struct device_node *rx_node;
143 struct device_node *tx_node;
144 u8 queue = 0;
145 int ret = 0;
146
147 /* For backwards-compatibility with device trees that don't have any
148 * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
149 * to one RX and TX queues each.
150 */
151 plat->rx_queues_to_use = 1;
152 plat->tx_queues_to_use = 1;
153
154 /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
155 * to always set this, otherwise Queue will be classified as AVB
156 * (because MTL_QUEUE_AVB = 0).
157 */
158 plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
159 plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
160
161 rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
162 if (!rx_node)
163 return ret;
164
165 tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
166 if (!tx_node) {
167 of_node_put(rx_node);
168 return ret;
169 }
170
171 /* Processing RX queues common config */
172 if (of_property_read_u32(rx_node, "snps,rx-queues-to-use",
173 &plat->rx_queues_to_use))
174 plat->rx_queues_to_use = 1;
175
176 if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
177 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
178 else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
179 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
180 else
181 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
182
183 /* Processing individual RX queue config */
184 for_each_child_of_node(rx_node, q_node) {
185 if (queue >= plat->rx_queues_to_use)
186 break;
187
188 if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
189 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
190 else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
191 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
192 else
193 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
194
195 if (of_property_read_u32(q_node, "snps,map-to-dma-channel",
196 &plat->rx_queues_cfg[queue].chan))
197 plat->rx_queues_cfg[queue].chan = queue;
198 /* TODO: Dynamic mapping to be included in the future */
199
200 if (of_property_read_u32(q_node, "snps,priority",
201 &plat->rx_queues_cfg[queue].prio)) {
202 plat->rx_queues_cfg[queue].prio = 0;
203 plat->rx_queues_cfg[queue].use_prio = false;
204 } else {
205 plat->rx_queues_cfg[queue].use_prio = true;
206 }
207
208 /* RX queue specific packet type routing */
209 if (of_property_read_bool(q_node, "snps,route-avcp"))
210 plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
211 else if (of_property_read_bool(q_node, "snps,route-ptp"))
212 plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
213 else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
214 plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
215 else if (of_property_read_bool(q_node, "snps,route-up"))
216 plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
217 else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
218 plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
219 else
220 plat->rx_queues_cfg[queue].pkt_route = 0x0;
221
222 queue++;
223 }
224 if (queue != plat->rx_queues_to_use) {
225 ret = -EINVAL;
226 dev_err(&pdev->dev, "Not all RX queues were configured\n");
227 goto out;
228 }
229
230 /* Processing TX queues common config */
231 if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
232 &plat->tx_queues_to_use))
233 plat->tx_queues_to_use = 1;
234
235 if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
236 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
237 else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
238 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
239 else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
240 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
241 else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
242 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
243 else
244 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
245
246 queue = 0;
247
248 /* Processing individual TX queue config */
249 for_each_child_of_node(tx_node, q_node) {
250 if (queue >= plat->tx_queues_to_use)
251 break;
252
253 if (of_property_read_u32(q_node, "snps,weight",
254 &plat->tx_queues_cfg[queue].weight))
255 plat->tx_queues_cfg[queue].weight = 0x10 + queue;
256
257 if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
258 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
259 } else if (of_property_read_bool(q_node,
260 "snps,avb-algorithm")) {
261 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
262
263 /* Credit Base Shaper parameters used by AVB */
264 if (of_property_read_u32(q_node, "snps,send_slope",
265 &plat->tx_queues_cfg[queue].send_slope))
266 plat->tx_queues_cfg[queue].send_slope = 0x0;
267 if (of_property_read_u32(q_node, "snps,idle_slope",
268 &plat->tx_queues_cfg[queue].idle_slope))
269 plat->tx_queues_cfg[queue].idle_slope = 0x0;
270 if (of_property_read_u32(q_node, "snps,high_credit",
271 &plat->tx_queues_cfg[queue].high_credit))
272 plat->tx_queues_cfg[queue].high_credit = 0x0;
273 if (of_property_read_u32(q_node, "snps,low_credit",
274 &plat->tx_queues_cfg[queue].low_credit))
275 plat->tx_queues_cfg[queue].low_credit = 0x0;
276 } else {
277 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
278 }
279
280 if (of_property_read_u32(q_node, "snps,priority",
281 &plat->tx_queues_cfg[queue].prio)) {
282 plat->tx_queues_cfg[queue].prio = 0;
283 plat->tx_queues_cfg[queue].use_prio = false;
284 } else {
285 plat->tx_queues_cfg[queue].use_prio = true;
286 }
287
288 queue++;
289 }
290 if (queue != plat->tx_queues_to_use) {
291 ret = -EINVAL;
292 dev_err(&pdev->dev, "Not all TX queues were configured\n");
293 goto out;
294 }
295
296out:
297 of_node_put(rx_node);
298 of_node_put(tx_node);
299 of_node_put(q_node);
300
301 return ret;
302}
303
304/**
305 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
306 * @plat: driver data platform structure
307 * @np: device tree node
308 * @dev: device pointer
309 * Description:
310 * The mdio bus will be allocated in case of a phy transceiver is on board;
311 * it will be NULL if the fixed-link is configured.
312 * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
313 * in any case (for DSA, mdio must be registered even if fixed-link).
314 * The table below sums the supported configurations:
315 * -------------------------------
316 * snps,phy-addr | Y
317 * -------------------------------
318 * phy-handle | Y
319 * -------------------------------
320 * fixed-link | N
321 * -------------------------------
322 * snps,dwmac-mdio |
323 * even if | Y
324 * fixed-link |
325 * -------------------------------
326 *
327 * It returns 0 in case of success otherwise -ENODEV.
328 */
329static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
330 struct device_node *np, struct device *dev)
331{
332 bool mdio = true;
333 static const struct of_device_id need_mdio_ids[] = {
334 { .compatible = "snps,dwc-qos-ethernet-4.10" },
335 {},
336 };
337
338 /* If phy-handle property is passed from DT, use it as the PHY */
339 plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
340 if (plat->phy_node)
341 dev_dbg(dev, "Found phy-handle subnode\n");
342
343 /* If phy-handle is not specified, check if we have a fixed-phy */
344 if (!plat->phy_node && of_phy_is_fixed_link(np)) {
345 if ((of_phy_register_fixed_link(np) < 0))
346 return -ENODEV;
347
348 dev_dbg(dev, "Found fixed-link subnode\n");
349 plat->phy_node = of_node_get(np);
350 mdio = false;
351 }
352
353 if (of_match_node(need_mdio_ids, np)) {
354 plat->mdio_node = of_get_child_by_name(np, "mdio");
355 } else {
356 /**
357 * If snps,dwmac-mdio is passed from DT, always register
358 * the MDIO
359 */
360 for_each_child_of_node(np, plat->mdio_node) {
361 if (of_device_is_compatible(plat->mdio_node,
362 "snps,dwmac-mdio"))
363 break;
364 }
365 }
366
367 if (plat->mdio_node) {
368 dev_dbg(dev, "Found MDIO subnode\n");
369 mdio = true;
370 }
371
372 if (mdio)
373 plat->mdio_bus_data =
374 devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
375 GFP_KERNEL);
376 return 0;
377}
378
379/**
380 * stmmac_probe_config_dt - parse device-tree driver parameters
381 * @pdev: platform_device structure
382 * @mac: MAC address to use
383 * Description:
384 * this function is to read the driver parameters from device-tree and
385 * set some private fields that will be used by the main at runtime.
386 */
387struct plat_stmmacenet_data *
388stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
389{
390 struct device_node *np = pdev->dev.of_node;
391 struct plat_stmmacenet_data *plat;
392 struct stmmac_dma_cfg *dma_cfg;
393 int rc;
394
395 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
396 if (!plat)
397 return ERR_PTR(-ENOMEM);
398
399 *mac = of_get_mac_address(np);
400 plat->interface = of_get_phy_mode(np);
401
402 /* Get max speed of operation from device tree */
403 if (of_property_read_u32(np, "max-speed", &plat->max_speed))
404 plat->max_speed = -1;
405
406 plat->bus_id = of_alias_get_id(np, "ethernet");
407 if (plat->bus_id < 0)
408 plat->bus_id = 0;
409
410 /* Default to phy auto-detection */
411 plat->phy_addr = -1;
412
413 /* "snps,phy-addr" is not a standard property. Mark it as deprecated
414 * and warn of its use. Remove this when phy node support is added.
415 */
416 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
417 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
418
419 /* To Configure PHY by using all device-tree supported properties */
420 rc = stmmac_dt_phy(plat, np, &pdev->dev);
421 if (rc)
422 return ERR_PTR(rc);
423
424 of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
425
426 of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size);
427
428 plat->force_sf_dma_mode =
429 of_property_read_bool(np, "snps,force_sf_dma_mode");
430
431 plat->en_tx_lpi_clockgating =
432 of_property_read_bool(np, "snps,en-tx-lpi-clockgating");
433
434 /* Set the maxmtu to a default of JUMBO_LEN in case the
435 * parameter is not present in the device tree.
436 */
437 plat->maxmtu = JUMBO_LEN;
438
439 /* Set default value for multicast hash bins */
440 plat->multicast_filter_bins = HASH_TABLE_SIZE;
441
442 /* Set default value for unicast filter entries */
443 plat->unicast_filter_entries = 1;
444
445 /*
446 * Currently only the properties needed on SPEAr600
447 * are provided. All other properties should be added
448 * once needed on other platforms.
449 */
450 if (of_device_is_compatible(np, "st,spear600-gmac") ||
451 of_device_is_compatible(np, "snps,dwmac-3.50a") ||
452 of_device_is_compatible(np, "snps,dwmac-3.70a") ||
453 of_device_is_compatible(np, "snps,dwmac")) {
454 /* Note that the max-frame-size parameter as defined in the
455 * ePAPR v1.1 spec is defined as max-frame-size, it's
456 * actually used as the IEEE definition of MAC Client
457 * data, or MTU. The ePAPR specification is confusing as
458 * the definition is max-frame-size, but usage examples
459 * are clearly MTUs
460 */
461 of_property_read_u32(np, "max-frame-size", &plat->maxmtu);
462 of_property_read_u32(np, "snps,multicast-filter-bins",
463 &plat->multicast_filter_bins);
464 of_property_read_u32(np, "snps,perfect-filter-entries",
465 &plat->unicast_filter_entries);
466 plat->unicast_filter_entries = dwmac1000_validate_ucast_entries(
467 plat->unicast_filter_entries);
468 plat->multicast_filter_bins = dwmac1000_validate_mcast_bins(
469 plat->multicast_filter_bins);
470 plat->has_gmac = 1;
471 plat->pmt = 1;
472 }
473
474 if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
475 of_device_is_compatible(np, "snps,dwmac-4.10a")) {
476 plat->has_gmac4 = 1;
477 plat->has_gmac = 0;
478 plat->pmt = 1;
479 plat->tso_en = of_property_read_bool(np, "snps,tso");
480 }
481
482 if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
483 of_device_is_compatible(np, "snps,dwmac-3.710")) {
484 plat->enh_desc = 1;
485 plat->bugged_jumbo = 1;
486 plat->force_sf_dma_mode = 1;
487 }
488
489 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
490 GFP_KERNEL);
491 if (!dma_cfg) {
492 stmmac_remove_config_dt(pdev, plat);
493 return ERR_PTR(-ENOMEM);
494 }
495 plat->dma_cfg = dma_cfg;
496
497 of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
498 if (!dma_cfg->pbl)
499 dma_cfg->pbl = DEFAULT_DMA_PBL;
500 of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl);
501 of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl);
502 dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8");
503
504 dma_cfg->aal = of_property_read_bool(np, "snps,aal");
505 dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
506 dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
507
508 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
509 if (plat->force_thresh_dma_mode) {
510 plat->force_sf_dma_mode = 0;
511 pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
512 }
513
514 of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed);
515
516 plat->axi = stmmac_axi_setup(pdev);
517
518 rc = stmmac_mtl_setup(pdev, plat);
519 if (rc) {
520 stmmac_remove_config_dt(pdev, plat);
521 return ERR_PTR(rc);
522 }
523
524 /* clock setup */
525 plat->stmmac_clk = devm_clk_get(&pdev->dev,
526 STMMAC_RESOURCE_NAME);
527 if (IS_ERR(plat->stmmac_clk)) {
528 dev_warn(&pdev->dev, "Cannot get CSR clock\n");
529 plat->stmmac_clk = NULL;
530 }
531 clk_prepare_enable(plat->stmmac_clk);
532
533 plat->pclk = devm_clk_get(&pdev->dev, "pclk");
534 if (IS_ERR(plat->pclk)) {
535 if (PTR_ERR(plat->pclk) == -EPROBE_DEFER)
536 goto error_pclk_get;
537
538 plat->pclk = NULL;
539 }
540 clk_prepare_enable(plat->pclk);
541
542 /* Fall-back to main clock in case of no PTP ref is passed */
543 plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
544 if (IS_ERR(plat->clk_ptp_ref)) {
545 plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
546 plat->clk_ptp_ref = NULL;
547 dev_warn(&pdev->dev, "PTP uses main clock\n");
548 } else {
549 plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
550 dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
551 }
552
553 plat->stmmac_rst = devm_reset_control_get(&pdev->dev,
554 STMMAC_RESOURCE_NAME);
555 if (IS_ERR(plat->stmmac_rst)) {
556 if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER)
557 goto error_hw_init;
558
559 dev_info(&pdev->dev, "no reset control found\n");
560 plat->stmmac_rst = NULL;
561 }
562
563 return plat;
564
565error_hw_init:
566 clk_disable_unprepare(plat->pclk);
567error_pclk_get:
568 clk_disable_unprepare(plat->stmmac_clk);
569
570 return ERR_PTR(-EPROBE_DEFER);
571}
572
573/**
574 * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
575 * @pdev: platform_device structure
576 * @plat: driver data platform structure
577 *
578 * Release resources claimed by stmmac_probe_config_dt().
579 */
580void stmmac_remove_config_dt(struct platform_device *pdev,
581 struct plat_stmmacenet_data *plat)
582{
583 struct device_node *np = pdev->dev.of_node;
584
585 if (of_phy_is_fixed_link(np))
586 of_phy_deregister_fixed_link(np);
587 of_node_put(plat->phy_node);
588 of_node_put(plat->mdio_node);
589}
590#else
591struct plat_stmmacenet_data *
592stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
593{
594 return ERR_PTR(-EINVAL);
595}
596
597void stmmac_remove_config_dt(struct platform_device *pdev,
598 struct plat_stmmacenet_data *plat)
599{
600}
601#endif /* CONFIG_OF */
602EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
603EXPORT_SYMBOL_GPL(stmmac_remove_config_dt);
604
605int stmmac_get_platform_resources(struct platform_device *pdev,
606 struct stmmac_resources *stmmac_res)
607{
608 struct resource *res;
609
610 memset(stmmac_res, 0, sizeof(*stmmac_res));
611
612 /* Get IRQ information early to have an ability to ask for deferred
613 * probe if needed before we went too far with resource allocation.
614 */
615 stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
616 if (stmmac_res->irq < 0) {
617 if (stmmac_res->irq != -EPROBE_DEFER) {
618 dev_err(&pdev->dev,
619 "MAC IRQ configuration information not found\n");
620 }
621 return stmmac_res->irq;
622 }
623
624 /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
625 * The external wake up irq can be passed through the platform code
626 * named as "eth_wake_irq"
627 *
628 * In case the wake up interrupt is not passed from the platform
629 * so the driver will continue to use the mac irq (ndev->irq)
630 */
631 stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
632 if (stmmac_res->wol_irq < 0) {
633 if (stmmac_res->wol_irq == -EPROBE_DEFER)
634 return -EPROBE_DEFER;
635 stmmac_res->wol_irq = stmmac_res->irq;
636 }
637
638 stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
639 if (stmmac_res->lpi_irq == -EPROBE_DEFER)
640 return -EPROBE_DEFER;
641
642 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
643 stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
644
645 return PTR_ERR_OR_ZERO(stmmac_res->addr);
646}
647EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
648
649/**
650 * stmmac_pltfr_remove
651 * @pdev: platform device pointer
652 * Description: this function calls the main to free the net resources
653 * and calls the platforms hook and release the resources (e.g. mem).
654 */
655int stmmac_pltfr_remove(struct platform_device *pdev)
656{
657 struct net_device *ndev = platform_get_drvdata(pdev);
658 struct stmmac_priv *priv = netdev_priv(ndev);
659 struct plat_stmmacenet_data *plat = priv->plat;
660 int ret = stmmac_dvr_remove(&pdev->dev);
661
662 if (plat->exit)
663 plat->exit(pdev, plat->bsp_priv);
664
665 stmmac_remove_config_dt(pdev, plat);
666
667 return ret;
668}
669EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
670
671#ifdef CONFIG_PM_SLEEP
672/**
673 * stmmac_pltfr_suspend
674 * @dev: device pointer
675 * Description: this function is invoked when suspend the driver and it direcly
676 * call the main suspend function and then, if required, on some platform, it
677 * can call an exit helper.
678 */
679static int stmmac_pltfr_suspend(struct device *dev)
680{
681 int ret;
682 struct net_device *ndev = dev_get_drvdata(dev);
683 struct stmmac_priv *priv = netdev_priv(ndev);
684 struct platform_device *pdev = to_platform_device(dev);
685
686 ret = stmmac_suspend(dev);
687 if (priv->plat->exit)
688 priv->plat->exit(pdev, priv->plat->bsp_priv);
689
690 return ret;
691}
692
693/**
694 * stmmac_pltfr_resume
695 * @dev: device pointer
696 * Description: this function is invoked when resume the driver before calling
697 * the main resume function, on some platforms, it can call own init helper
698 * if required.
699 */
700static int stmmac_pltfr_resume(struct device *dev)
701{
702 struct net_device *ndev = dev_get_drvdata(dev);
703 struct stmmac_priv *priv = netdev_priv(ndev);
704 struct platform_device *pdev = to_platform_device(dev);
705
706 if (priv->plat->init)
707 priv->plat->init(pdev, priv->plat->bsp_priv);
708
709 return stmmac_resume(dev);
710}
711#endif /* CONFIG_PM_SLEEP */
712
713SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
714 stmmac_pltfr_resume);
715EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
716
717MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
718MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
719MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*******************************************************************************
3 This contains the functions to handle the platform driver.
4
5 Copyright (C) 2007-2011 STMicroelectronics Ltd
6
7
8 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
9*******************************************************************************/
10
11#include <linux/platform_device.h>
12#include <linux/module.h>
13#include <linux/io.h>
14#include <linux/of.h>
15#include <linux/of_net.h>
16#include <linux/of_device.h>
17#include <linux/of_mdio.h>
18
19#include "stmmac.h"
20#include "stmmac_platform.h"
21
22#ifdef CONFIG_OF
23
24/**
25 * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins
26 * @dev: struct device of the platform device
27 * @mcast_bins: Multicast filtering bins
28 * Description:
29 * this function validates the number of Multicast filtering bins specified
30 * by the configuration through the device tree. The Synopsys GMAC supports
31 * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC
32 * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds
33 * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is
34 * invalid and will cause the filtering algorithm to use Multicast
35 * promiscuous mode.
36 */
37static int dwmac1000_validate_mcast_bins(struct device *dev, int mcast_bins)
38{
39 int x = mcast_bins;
40
41 switch (x) {
42 case HASH_TABLE_SIZE:
43 case 128:
44 case 256:
45 break;
46 default:
47 x = 0;
48 dev_info(dev, "Hash table entries set to unexpected value %d\n",
49 mcast_bins);
50 break;
51 }
52 return x;
53}
54
55/**
56 * dwmac1000_validate_ucast_entries - validate the Unicast address entries
57 * @dev: struct device of the platform device
58 * @ucast_entries: number of Unicast address entries
59 * Description:
60 * This function validates the number of Unicast address entries supported
61 * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
62 * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
63 * logic. This function validates a valid, supported configuration is
64 * selected, and defaults to 1 Unicast address if an unsupported
65 * configuration is selected.
66 */
67static int dwmac1000_validate_ucast_entries(struct device *dev,
68 int ucast_entries)
69{
70 int x = ucast_entries;
71
72 switch (x) {
73 case 1 ... 32:
74 case 64:
75 case 128:
76 break;
77 default:
78 x = 1;
79 dev_info(dev, "Unicast table entries set to unexpected value %d\n",
80 ucast_entries);
81 break;
82 }
83 return x;
84}
85
86/**
87 * stmmac_axi_setup - parse DT parameters for programming the AXI register
88 * @pdev: platform device
89 * Description:
90 * if required, from device-tree the AXI internal register can be tuned
91 * by using platform parameters.
92 */
93static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
94{
95 struct device_node *np;
96 struct stmmac_axi *axi;
97
98 np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
99 if (!np)
100 return NULL;
101
102 axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
103 if (!axi) {
104 of_node_put(np);
105 return ERR_PTR(-ENOMEM);
106 }
107
108 axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
109 axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
110 axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
111 axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
112 axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
113 axi->axi_rb = of_property_read_bool(np, "snps,axi_rb");
114
115 if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
116 axi->axi_wr_osr_lmt = 1;
117 if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt))
118 axi->axi_rd_osr_lmt = 1;
119 of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
120 of_node_put(np);
121
122 return axi;
123}
124
125/**
126 * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
127 * @pdev: platform device
128 */
129static int stmmac_mtl_setup(struct platform_device *pdev,
130 struct plat_stmmacenet_data *plat)
131{
132 struct device_node *q_node;
133 struct device_node *rx_node;
134 struct device_node *tx_node;
135 u8 queue = 0;
136 int ret = 0;
137
138 /* For backwards-compatibility with device trees that don't have any
139 * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
140 * to one RX and TX queues each.
141 */
142 plat->rx_queues_to_use = 1;
143 plat->tx_queues_to_use = 1;
144
145 /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
146 * to always set this, otherwise Queue will be classified as AVB
147 * (because MTL_QUEUE_AVB = 0).
148 */
149 plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
150 plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
151
152 rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
153 if (!rx_node)
154 return ret;
155
156 tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
157 if (!tx_node) {
158 of_node_put(rx_node);
159 return ret;
160 }
161
162 /* Processing RX queues common config */
163 if (of_property_read_u32(rx_node, "snps,rx-queues-to-use",
164 &plat->rx_queues_to_use))
165 plat->rx_queues_to_use = 1;
166
167 if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
168 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
169 else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
170 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
171 else
172 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
173
174 /* Processing individual RX queue config */
175 for_each_child_of_node(rx_node, q_node) {
176 if (queue >= plat->rx_queues_to_use)
177 break;
178
179 if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
180 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
181 else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
182 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
183 else
184 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
185
186 if (of_property_read_u32(q_node, "snps,map-to-dma-channel",
187 &plat->rx_queues_cfg[queue].chan))
188 plat->rx_queues_cfg[queue].chan = queue;
189 /* TODO: Dynamic mapping to be included in the future */
190
191 if (of_property_read_u32(q_node, "snps,priority",
192 &plat->rx_queues_cfg[queue].prio)) {
193 plat->rx_queues_cfg[queue].prio = 0;
194 plat->rx_queues_cfg[queue].use_prio = false;
195 } else {
196 plat->rx_queues_cfg[queue].use_prio = true;
197 }
198
199 /* RX queue specific packet type routing */
200 if (of_property_read_bool(q_node, "snps,route-avcp"))
201 plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
202 else if (of_property_read_bool(q_node, "snps,route-ptp"))
203 plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
204 else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
205 plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
206 else if (of_property_read_bool(q_node, "snps,route-up"))
207 plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
208 else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
209 plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
210 else
211 plat->rx_queues_cfg[queue].pkt_route = 0x0;
212
213 queue++;
214 }
215 if (queue != plat->rx_queues_to_use) {
216 ret = -EINVAL;
217 dev_err(&pdev->dev, "Not all RX queues were configured\n");
218 goto out;
219 }
220
221 /* Processing TX queues common config */
222 if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
223 &plat->tx_queues_to_use))
224 plat->tx_queues_to_use = 1;
225
226 if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
227 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
228 else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
229 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
230 else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
231 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
232 else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
233 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
234 else
235 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
236
237 queue = 0;
238
239 /* Processing individual TX queue config */
240 for_each_child_of_node(tx_node, q_node) {
241 if (queue >= plat->tx_queues_to_use)
242 break;
243
244 if (of_property_read_u32(q_node, "snps,weight",
245 &plat->tx_queues_cfg[queue].weight))
246 plat->tx_queues_cfg[queue].weight = 0x10 + queue;
247
248 if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
249 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
250 } else if (of_property_read_bool(q_node,
251 "snps,avb-algorithm")) {
252 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
253
254 /* Credit Base Shaper parameters used by AVB */
255 if (of_property_read_u32(q_node, "snps,send_slope",
256 &plat->tx_queues_cfg[queue].send_slope))
257 plat->tx_queues_cfg[queue].send_slope = 0x0;
258 if (of_property_read_u32(q_node, "snps,idle_slope",
259 &plat->tx_queues_cfg[queue].idle_slope))
260 plat->tx_queues_cfg[queue].idle_slope = 0x0;
261 if (of_property_read_u32(q_node, "snps,high_credit",
262 &plat->tx_queues_cfg[queue].high_credit))
263 plat->tx_queues_cfg[queue].high_credit = 0x0;
264 if (of_property_read_u32(q_node, "snps,low_credit",
265 &plat->tx_queues_cfg[queue].low_credit))
266 plat->tx_queues_cfg[queue].low_credit = 0x0;
267 } else {
268 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
269 }
270
271 if (of_property_read_u32(q_node, "snps,priority",
272 &plat->tx_queues_cfg[queue].prio)) {
273 plat->tx_queues_cfg[queue].prio = 0;
274 plat->tx_queues_cfg[queue].use_prio = false;
275 } else {
276 plat->tx_queues_cfg[queue].use_prio = true;
277 }
278
279 queue++;
280 }
281 if (queue != plat->tx_queues_to_use) {
282 ret = -EINVAL;
283 dev_err(&pdev->dev, "Not all TX queues were configured\n");
284 goto out;
285 }
286
287out:
288 of_node_put(rx_node);
289 of_node_put(tx_node);
290 of_node_put(q_node);
291
292 return ret;
293}
294
295/**
296 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
297 * @plat: driver data platform structure
298 * @np: device tree node
299 * @dev: device pointer
300 * Description:
301 * The mdio bus will be allocated in case of a phy transceiver is on board;
302 * it will be NULL if the fixed-link is configured.
303 * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
304 * in any case (for DSA, mdio must be registered even if fixed-link).
305 * The table below sums the supported configurations:
306 * -------------------------------
307 * snps,phy-addr | Y
308 * -------------------------------
309 * phy-handle | Y
310 * -------------------------------
311 * fixed-link | N
312 * -------------------------------
313 * snps,dwmac-mdio |
314 * even if | Y
315 * fixed-link |
316 * -------------------------------
317 *
318 * It returns 0 in case of success otherwise -ENODEV.
319 */
320static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
321 struct device_node *np, struct device *dev)
322{
323 bool mdio = !of_phy_is_fixed_link(np);
324 static const struct of_device_id need_mdio_ids[] = {
325 { .compatible = "snps,dwc-qos-ethernet-4.10" },
326 {},
327 };
328
329 if (of_match_node(need_mdio_ids, np)) {
330 plat->mdio_node = of_get_child_by_name(np, "mdio");
331 } else {
332 /**
333 * If snps,dwmac-mdio is passed from DT, always register
334 * the MDIO
335 */
336 for_each_child_of_node(np, plat->mdio_node) {
337 if (of_device_is_compatible(plat->mdio_node,
338 "snps,dwmac-mdio"))
339 break;
340 }
341 }
342
343 if (plat->mdio_node) {
344 dev_dbg(dev, "Found MDIO subnode\n");
345 mdio = true;
346 }
347
348 if (mdio) {
349 plat->mdio_bus_data =
350 devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
351 GFP_KERNEL);
352 if (!plat->mdio_bus_data)
353 return -ENOMEM;
354
355 plat->mdio_bus_data->needs_reset = true;
356 }
357
358 return 0;
359}
360
361/**
362 * stmmac_of_get_mac_mode - retrieves the interface of the MAC
363 * @np - device-tree node
364 * Description:
365 * Similar to `of_get_phy_mode()`, this function will retrieve (from
366 * the device-tree) the interface mode on the MAC side. This assumes
367 * that there is mode converter in-between the MAC & PHY
368 * (e.g. GMII-to-RGMII).
369 */
370static int stmmac_of_get_mac_mode(struct device_node *np)
371{
372 const char *pm;
373 int err, i;
374
375 err = of_property_read_string(np, "mac-mode", &pm);
376 if (err < 0)
377 return err;
378
379 for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
380 if (!strcasecmp(pm, phy_modes(i)))
381 return i;
382 }
383
384 return -ENODEV;
385}
386
387/**
388 * stmmac_probe_config_dt - parse device-tree driver parameters
389 * @pdev: platform_device structure
390 * @mac: MAC address to use
391 * Description:
392 * this function is to read the driver parameters from device-tree and
393 * set some private fields that will be used by the main at runtime.
394 */
395struct plat_stmmacenet_data *
396stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
397{
398 struct device_node *np = pdev->dev.of_node;
399 struct plat_stmmacenet_data *plat;
400 struct stmmac_dma_cfg *dma_cfg;
401 int rc;
402
403 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
404 if (!plat)
405 return ERR_PTR(-ENOMEM);
406
407 *mac = of_get_mac_address(np);
408 if (IS_ERR(*mac)) {
409 if (PTR_ERR(*mac) == -EPROBE_DEFER)
410 return ERR_CAST(*mac);
411
412 *mac = NULL;
413 }
414
415 plat->phy_interface = device_get_phy_mode(&pdev->dev);
416 if (plat->phy_interface < 0)
417 return ERR_PTR(plat->phy_interface);
418
419 plat->interface = stmmac_of_get_mac_mode(np);
420 if (plat->interface < 0)
421 plat->interface = plat->phy_interface;
422
423 /* Some wrapper drivers still rely on phy_node. Let's save it while
424 * they are not converted to phylink. */
425 plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
426
427 /* PHYLINK automatically parses the phy-handle property */
428 plat->phylink_node = np;
429
430 /* Get max speed of operation from device tree */
431 if (of_property_read_u32(np, "max-speed", &plat->max_speed))
432 plat->max_speed = -1;
433
434 plat->bus_id = of_alias_get_id(np, "ethernet");
435 if (plat->bus_id < 0)
436 plat->bus_id = 0;
437
438 /* Default to phy auto-detection */
439 plat->phy_addr = -1;
440
441 /* Default to get clk_csr from stmmac_clk_crs_set(),
442 * or get clk_csr from device tree.
443 */
444 plat->clk_csr = -1;
445 of_property_read_u32(np, "clk_csr", &plat->clk_csr);
446
447 /* "snps,phy-addr" is not a standard property. Mark it as deprecated
448 * and warn of its use. Remove this when phy node support is added.
449 */
450 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
451 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
452
453 /* To Configure PHY by using all device-tree supported properties */
454 rc = stmmac_dt_phy(plat, np, &pdev->dev);
455 if (rc)
456 return ERR_PTR(rc);
457
458 of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
459
460 of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size);
461
462 plat->force_sf_dma_mode =
463 of_property_read_bool(np, "snps,force_sf_dma_mode");
464
465 plat->en_tx_lpi_clockgating =
466 of_property_read_bool(np, "snps,en-tx-lpi-clockgating");
467
468 /* Set the maxmtu to a default of JUMBO_LEN in case the
469 * parameter is not present in the device tree.
470 */
471 plat->maxmtu = JUMBO_LEN;
472
473 /* Set default value for multicast hash bins */
474 plat->multicast_filter_bins = HASH_TABLE_SIZE;
475
476 /* Set default value for unicast filter entries */
477 plat->unicast_filter_entries = 1;
478
479 /*
480 * Currently only the properties needed on SPEAr600
481 * are provided. All other properties should be added
482 * once needed on other platforms.
483 */
484 if (of_device_is_compatible(np, "st,spear600-gmac") ||
485 of_device_is_compatible(np, "snps,dwmac-3.50a") ||
486 of_device_is_compatible(np, "snps,dwmac-3.70a") ||
487 of_device_is_compatible(np, "snps,dwmac")) {
488 /* Note that the max-frame-size parameter as defined in the
489 * ePAPR v1.1 spec is defined as max-frame-size, it's
490 * actually used as the IEEE definition of MAC Client
491 * data, or MTU. The ePAPR specification is confusing as
492 * the definition is max-frame-size, but usage examples
493 * are clearly MTUs
494 */
495 of_property_read_u32(np, "max-frame-size", &plat->maxmtu);
496 of_property_read_u32(np, "snps,multicast-filter-bins",
497 &plat->multicast_filter_bins);
498 of_property_read_u32(np, "snps,perfect-filter-entries",
499 &plat->unicast_filter_entries);
500 plat->unicast_filter_entries = dwmac1000_validate_ucast_entries(
501 &pdev->dev, plat->unicast_filter_entries);
502 plat->multicast_filter_bins = dwmac1000_validate_mcast_bins(
503 &pdev->dev, plat->multicast_filter_bins);
504 plat->has_gmac = 1;
505 plat->pmt = 1;
506 }
507
508 if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
509 of_device_is_compatible(np, "snps,dwmac-4.10a") ||
510 of_device_is_compatible(np, "snps,dwmac-4.20a") ||
511 of_device_is_compatible(np, "snps,dwmac-5.10a")) {
512 plat->has_gmac4 = 1;
513 plat->has_gmac = 0;
514 plat->pmt = 1;
515 plat->tso_en = of_property_read_bool(np, "snps,tso");
516 }
517
518 if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
519 of_device_is_compatible(np, "snps,dwmac-3.710")) {
520 plat->enh_desc = 1;
521 plat->bugged_jumbo = 1;
522 plat->force_sf_dma_mode = 1;
523 }
524
525 if (of_device_is_compatible(np, "snps,dwxgmac")) {
526 plat->has_xgmac = 1;
527 plat->pmt = 1;
528 plat->tso_en = of_property_read_bool(np, "snps,tso");
529 }
530
531 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
532 GFP_KERNEL);
533 if (!dma_cfg) {
534 stmmac_remove_config_dt(pdev, plat);
535 return ERR_PTR(-ENOMEM);
536 }
537 plat->dma_cfg = dma_cfg;
538
539 of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
540 if (!dma_cfg->pbl)
541 dma_cfg->pbl = DEFAULT_DMA_PBL;
542 of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl);
543 of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl);
544 dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8");
545
546 dma_cfg->aal = of_property_read_bool(np, "snps,aal");
547 dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
548 dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
549
550 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
551 if (plat->force_thresh_dma_mode) {
552 plat->force_sf_dma_mode = 0;
553 dev_warn(&pdev->dev,
554 "force_sf_dma_mode is ignored if force_thresh_dma_mode is set.\n");
555 }
556
557 of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed);
558
559 plat->axi = stmmac_axi_setup(pdev);
560
561 rc = stmmac_mtl_setup(pdev, plat);
562 if (rc) {
563 stmmac_remove_config_dt(pdev, plat);
564 return ERR_PTR(rc);
565 }
566
567 /* clock setup */
568 if (!of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) {
569 plat->stmmac_clk = devm_clk_get(&pdev->dev,
570 STMMAC_RESOURCE_NAME);
571 if (IS_ERR(plat->stmmac_clk)) {
572 dev_warn(&pdev->dev, "Cannot get CSR clock\n");
573 plat->stmmac_clk = NULL;
574 }
575 clk_prepare_enable(plat->stmmac_clk);
576 }
577
578 plat->pclk = devm_clk_get(&pdev->dev, "pclk");
579 if (IS_ERR(plat->pclk)) {
580 if (PTR_ERR(plat->pclk) == -EPROBE_DEFER)
581 goto error_pclk_get;
582
583 plat->pclk = NULL;
584 }
585 clk_prepare_enable(plat->pclk);
586
587 /* Fall-back to main clock in case of no PTP ref is passed */
588 plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
589 if (IS_ERR(plat->clk_ptp_ref)) {
590 plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
591 plat->clk_ptp_ref = NULL;
592 dev_info(&pdev->dev, "PTP uses main clock\n");
593 } else {
594 plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
595 dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
596 }
597
598 plat->stmmac_rst = devm_reset_control_get(&pdev->dev,
599 STMMAC_RESOURCE_NAME);
600 if (IS_ERR(plat->stmmac_rst)) {
601 if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER)
602 goto error_hw_init;
603
604 dev_info(&pdev->dev, "no reset control found\n");
605 plat->stmmac_rst = NULL;
606 }
607
608 return plat;
609
610error_hw_init:
611 clk_disable_unprepare(plat->pclk);
612error_pclk_get:
613 clk_disable_unprepare(plat->stmmac_clk);
614
615 return ERR_PTR(-EPROBE_DEFER);
616}
617
618/**
619 * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
620 * @pdev: platform_device structure
621 * @plat: driver data platform structure
622 *
623 * Release resources claimed by stmmac_probe_config_dt().
624 */
625void stmmac_remove_config_dt(struct platform_device *pdev,
626 struct plat_stmmacenet_data *plat)
627{
628 of_node_put(plat->phy_node);
629 of_node_put(plat->mdio_node);
630}
631#else
632struct plat_stmmacenet_data *
633stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
634{
635 return ERR_PTR(-EINVAL);
636}
637
638void stmmac_remove_config_dt(struct platform_device *pdev,
639 struct plat_stmmacenet_data *plat)
640{
641}
642#endif /* CONFIG_OF */
643EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
644EXPORT_SYMBOL_GPL(stmmac_remove_config_dt);
645
646int stmmac_get_platform_resources(struct platform_device *pdev,
647 struct stmmac_resources *stmmac_res)
648{
649 memset(stmmac_res, 0, sizeof(*stmmac_res));
650
651 /* Get IRQ information early to have an ability to ask for deferred
652 * probe if needed before we went too far with resource allocation.
653 */
654 stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
655 if (stmmac_res->irq < 0)
656 return stmmac_res->irq;
657
658 /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
659 * The external wake up irq can be passed through the platform code
660 * named as "eth_wake_irq"
661 *
662 * In case the wake up interrupt is not passed from the platform
663 * so the driver will continue to use the mac irq (ndev->irq)
664 */
665 stmmac_res->wol_irq =
666 platform_get_irq_byname_optional(pdev, "eth_wake_irq");
667 if (stmmac_res->wol_irq < 0) {
668 if (stmmac_res->wol_irq == -EPROBE_DEFER)
669 return -EPROBE_DEFER;
670 dev_info(&pdev->dev, "IRQ eth_wake_irq not found\n");
671 stmmac_res->wol_irq = stmmac_res->irq;
672 }
673
674 stmmac_res->lpi_irq =
675 platform_get_irq_byname_optional(pdev, "eth_lpi");
676 if (stmmac_res->lpi_irq < 0) {
677 if (stmmac_res->lpi_irq == -EPROBE_DEFER)
678 return -EPROBE_DEFER;
679 dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
680 }
681
682 stmmac_res->addr = devm_platform_ioremap_resource(pdev, 0);
683
684 return PTR_ERR_OR_ZERO(stmmac_res->addr);
685}
686EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
687
688/**
689 * stmmac_pltfr_remove
690 * @pdev: platform device pointer
691 * Description: this function calls the main to free the net resources
692 * and calls the platforms hook and release the resources (e.g. mem).
693 */
694int stmmac_pltfr_remove(struct platform_device *pdev)
695{
696 struct net_device *ndev = platform_get_drvdata(pdev);
697 struct stmmac_priv *priv = netdev_priv(ndev);
698 struct plat_stmmacenet_data *plat = priv->plat;
699 int ret = stmmac_dvr_remove(&pdev->dev);
700
701 if (plat->exit)
702 plat->exit(pdev, plat->bsp_priv);
703
704 stmmac_remove_config_dt(pdev, plat);
705
706 return ret;
707}
708EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
709
710#ifdef CONFIG_PM_SLEEP
711/**
712 * stmmac_pltfr_suspend
713 * @dev: device pointer
714 * Description: this function is invoked when suspend the driver and it direcly
715 * call the main suspend function and then, if required, on some platform, it
716 * can call an exit helper.
717 */
718static int stmmac_pltfr_suspend(struct device *dev)
719{
720 int ret;
721 struct net_device *ndev = dev_get_drvdata(dev);
722 struct stmmac_priv *priv = netdev_priv(ndev);
723 struct platform_device *pdev = to_platform_device(dev);
724
725 ret = stmmac_suspend(dev);
726 if (priv->plat->exit)
727 priv->plat->exit(pdev, priv->plat->bsp_priv);
728
729 return ret;
730}
731
732/**
733 * stmmac_pltfr_resume
734 * @dev: device pointer
735 * Description: this function is invoked when resume the driver before calling
736 * the main resume function, on some platforms, it can call own init helper
737 * if required.
738 */
739static int stmmac_pltfr_resume(struct device *dev)
740{
741 struct net_device *ndev = dev_get_drvdata(dev);
742 struct stmmac_priv *priv = netdev_priv(ndev);
743 struct platform_device *pdev = to_platform_device(dev);
744
745 if (priv->plat->init)
746 priv->plat->init(pdev, priv->plat->bsp_priv);
747
748 return stmmac_resume(dev);
749}
750#endif /* CONFIG_PM_SLEEP */
751
752SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
753 stmmac_pltfr_resume);
754EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
755
756MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
757MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
758MODULE_LICENSE("GPL");