Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/platform_device.h>
12#include <linux/export.h>
13#include <bcm63xx_dev_enet.h>
14#include <bcm63xx_io.h>
15#include <bcm63xx_regs.h>
16
17static const unsigned long bcm6348_regs_enetdmac[] = {
18 [ENETDMAC_CHANCFG] = ENETDMAC_CHANCFG_REG,
19 [ENETDMAC_IR] = ENETDMAC_IR_REG,
20 [ENETDMAC_IRMASK] = ENETDMAC_IRMASK_REG,
21 [ENETDMAC_MAXBURST] = ENETDMAC_MAXBURST_REG,
22};
23
24static const unsigned long bcm6345_regs_enetdmac[] = {
25 [ENETDMAC_CHANCFG] = ENETDMA_6345_CHANCFG_REG,
26 [ENETDMAC_IR] = ENETDMA_6345_IR_REG,
27 [ENETDMAC_IRMASK] = ENETDMA_6345_IRMASK_REG,
28 [ENETDMAC_MAXBURST] = ENETDMA_6345_MAXBURST_REG,
29 [ENETDMAC_BUFALLOC] = ENETDMA_6345_BUFALLOC_REG,
30 [ENETDMAC_RSTART] = ENETDMA_6345_RSTART_REG,
31 [ENETDMAC_FC] = ENETDMA_6345_FC_REG,
32 [ENETDMAC_LEN] = ENETDMA_6345_LEN_REG,
33};
34
35const unsigned long *bcm63xx_regs_enetdmac;
36EXPORT_SYMBOL(bcm63xx_regs_enetdmac);
37
38static __init void bcm63xx_enetdmac_regs_init(void)
39{
40 if (BCMCPU_IS_6345())
41 bcm63xx_regs_enetdmac = bcm6345_regs_enetdmac;
42 else
43 bcm63xx_regs_enetdmac = bcm6348_regs_enetdmac;
44}
45
46static struct resource shared_res[] = {
47 {
48 .start = -1, /* filled at runtime */
49 .end = -1, /* filled at runtime */
50 .flags = IORESOURCE_MEM,
51 },
52 {
53 .start = -1, /* filled at runtime */
54 .end = -1, /* filled at runtime */
55 .flags = IORESOURCE_MEM,
56 },
57 {
58 .start = -1, /* filled at runtime */
59 .end = -1, /* filled at runtime */
60 .flags = IORESOURCE_MEM,
61 },
62};
63
64static struct platform_device bcm63xx_enet_shared_device = {
65 .name = "bcm63xx_enet_shared",
66 .id = 0,
67 .num_resources = ARRAY_SIZE(shared_res),
68 .resource = shared_res,
69};
70
71static int shared_device_registered;
72
73static u64 enet_dmamask = DMA_BIT_MASK(32);
74
75static struct resource enet0_res[] = {
76 {
77 .start = -1, /* filled at runtime */
78 .end = -1, /* filled at runtime */
79 .flags = IORESOURCE_MEM,
80 },
81 {
82 .start = -1, /* filled at runtime */
83 .flags = IORESOURCE_IRQ,
84 },
85 {
86 .start = -1, /* filled at runtime */
87 .flags = IORESOURCE_IRQ,
88 },
89 {
90 .start = -1, /* filled at runtime */
91 .flags = IORESOURCE_IRQ,
92 },
93};
94
95static struct bcm63xx_enet_platform_data enet0_pd;
96
97static struct platform_device bcm63xx_enet0_device = {
98 .name = "bcm63xx_enet",
99 .id = 0,
100 .num_resources = ARRAY_SIZE(enet0_res),
101 .resource = enet0_res,
102 .dev = {
103 .platform_data = &enet0_pd,
104 .dma_mask = &enet_dmamask,
105 .coherent_dma_mask = DMA_BIT_MASK(32),
106 },
107};
108
109static struct resource enet1_res[] = {
110 {
111 .start = -1, /* filled at runtime */
112 .end = -1, /* filled at runtime */
113 .flags = IORESOURCE_MEM,
114 },
115 {
116 .start = -1, /* filled at runtime */
117 .flags = IORESOURCE_IRQ,
118 },
119 {
120 .start = -1, /* filled at runtime */
121 .flags = IORESOURCE_IRQ,
122 },
123 {
124 .start = -1, /* filled at runtime */
125 .flags = IORESOURCE_IRQ,
126 },
127};
128
129static struct bcm63xx_enet_platform_data enet1_pd;
130
131static struct platform_device bcm63xx_enet1_device = {
132 .name = "bcm63xx_enet",
133 .id = 1,
134 .num_resources = ARRAY_SIZE(enet1_res),
135 .resource = enet1_res,
136 .dev = {
137 .platform_data = &enet1_pd,
138 .dma_mask = &enet_dmamask,
139 .coherent_dma_mask = DMA_BIT_MASK(32),
140 },
141};
142
143static struct resource enetsw_res[] = {
144 {
145 /* start & end filled at runtime */
146 .flags = IORESOURCE_MEM,
147 },
148 {
149 /* start filled at runtime */
150 .flags = IORESOURCE_IRQ,
151 },
152 {
153 /* start filled at runtime */
154 .flags = IORESOURCE_IRQ,
155 },
156};
157
158static struct bcm63xx_enetsw_platform_data enetsw_pd;
159
160static struct platform_device bcm63xx_enetsw_device = {
161 .name = "bcm63xx_enetsw",
162 .num_resources = ARRAY_SIZE(enetsw_res),
163 .resource = enetsw_res,
164 .dev = {
165 .platform_data = &enetsw_pd,
166 .dma_mask = &enet_dmamask,
167 .coherent_dma_mask = DMA_BIT_MASK(32),
168 },
169};
170
171static int __init register_shared(void)
172{
173 int ret, chan_count;
174
175 if (shared_device_registered)
176 return 0;
177
178 bcm63xx_enetdmac_regs_init();
179
180 shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
181 shared_res[0].end = shared_res[0].start;
182 if (BCMCPU_IS_6345())
183 shared_res[0].end += (RSET_6345_ENETDMA_SIZE) - 1;
184 else
185 shared_res[0].end += (RSET_ENETDMA_SIZE) - 1;
186
187 if (BCMCPU_IS_6328() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
188 chan_count = 32;
189 else if (BCMCPU_IS_6345())
190 chan_count = 8;
191 else
192 chan_count = 16;
193
194 shared_res[1].start = bcm63xx_regset_address(RSET_ENETDMAC);
195 shared_res[1].end = shared_res[1].start;
196 shared_res[1].end += RSET_ENETDMAC_SIZE(chan_count) - 1;
197
198 shared_res[2].start = bcm63xx_regset_address(RSET_ENETDMAS);
199 shared_res[2].end = shared_res[2].start;
200 shared_res[2].end += RSET_ENETDMAS_SIZE(chan_count) - 1;
201
202 ret = platform_device_register(&bcm63xx_enet_shared_device);
203 if (ret)
204 return ret;
205 shared_device_registered = 1;
206
207 return 0;
208}
209
210int __init bcm63xx_enet_register(int unit,
211 const struct bcm63xx_enet_platform_data *pd)
212{
213 struct platform_device *pdev;
214 struct bcm63xx_enet_platform_data *dpd;
215 int ret;
216
217 if (unit > 1)
218 return -ENODEV;
219
220 if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345()))
221 return -ENODEV;
222
223 ret = register_shared();
224 if (ret)
225 return ret;
226
227 if (unit == 0) {
228 enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0);
229 enet0_res[0].end = enet0_res[0].start;
230 enet0_res[0].end += RSET_ENET_SIZE - 1;
231 enet0_res[1].start = bcm63xx_get_irq_number(IRQ_ENET0);
232 enet0_res[2].start = bcm63xx_get_irq_number(IRQ_ENET0_RXDMA);
233 enet0_res[3].start = bcm63xx_get_irq_number(IRQ_ENET0_TXDMA);
234 pdev = &bcm63xx_enet0_device;
235 } else {
236 enet1_res[0].start = bcm63xx_regset_address(RSET_ENET1);
237 enet1_res[0].end = enet1_res[0].start;
238 enet1_res[0].end += RSET_ENET_SIZE - 1;
239 enet1_res[1].start = bcm63xx_get_irq_number(IRQ_ENET1);
240 enet1_res[2].start = bcm63xx_get_irq_number(IRQ_ENET1_RXDMA);
241 enet1_res[3].start = bcm63xx_get_irq_number(IRQ_ENET1_TXDMA);
242 pdev = &bcm63xx_enet1_device;
243 }
244
245 /* copy given platform data */
246 dpd = pdev->dev.platform_data;
247 memcpy(dpd, pd, sizeof(*pd));
248
249 /* adjust them in case internal phy is used */
250 if (dpd->use_internal_phy) {
251
252 /* internal phy only exists for enet0 */
253 if (unit == 1)
254 return -ENODEV;
255
256 dpd->phy_id = 1;
257 dpd->has_phy_interrupt = 1;
258 dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY);
259 }
260
261 dpd->dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
262 dpd->dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
263 if (BCMCPU_IS_6345()) {
264 dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_CHAINING_MASK;
265 dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_WRAP_EN_MASK;
266 dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_FLOWC_EN_MASK;
267 dpd->dma_chan_int_mask |= ENETDMA_IR_BUFDONE_MASK;
268 dpd->dma_chan_int_mask |= ENETDMA_IR_NOTOWNER_MASK;
269 dpd->dma_chan_width = ENETDMA_6345_CHAN_WIDTH;
270 dpd->dma_desc_shift = ENETDMA_6345_DESC_SHIFT;
271 } else {
272 dpd->dma_has_sram = true;
273 dpd->dma_chan_width = ENETDMA_CHAN_WIDTH;
274 }
275
276 if (unit == 0) {
277 dpd->rx_chan = 0;
278 dpd->tx_chan = 1;
279 } else {
280 dpd->rx_chan = 2;
281 dpd->tx_chan = 3;
282 }
283
284 ret = platform_device_register(pdev);
285 if (ret)
286 return ret;
287 return 0;
288}
289
290int __init
291bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd)
292{
293 int ret;
294
295 if (!BCMCPU_IS_6328() && !BCMCPU_IS_6362() && !BCMCPU_IS_6368())
296 return -ENODEV;
297
298 ret = register_shared();
299 if (ret)
300 return ret;
301
302 enetsw_res[0].start = bcm63xx_regset_address(RSET_ENETSW);
303 enetsw_res[0].end = enetsw_res[0].start;
304 enetsw_res[0].end += RSET_ENETSW_SIZE - 1;
305 enetsw_res[1].start = bcm63xx_get_irq_number(IRQ_ENETSW_RXDMA0);
306 enetsw_res[2].start = bcm63xx_get_irq_number(IRQ_ENETSW_TXDMA0);
307 if (!enetsw_res[2].start)
308 enetsw_res[2].start = -1;
309
310 memcpy(bcm63xx_enetsw_device.dev.platform_data, pd, sizeof(*pd));
311
312 if (BCMCPU_IS_6328())
313 enetsw_pd.num_ports = ENETSW_PORTS_6328;
314 else if (BCMCPU_IS_6362() || BCMCPU_IS_6368())
315 enetsw_pd.num_ports = ENETSW_PORTS_6368;
316
317 enetsw_pd.dma_has_sram = true;
318 enetsw_pd.dma_chan_width = ENETDMA_CHAN_WIDTH;
319 enetsw_pd.dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
320 enetsw_pd.dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
321
322 ret = platform_device_register(&bcm63xx_enetsw_device);
323 if (ret)
324 return ret;
325
326 return 0;
327}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/platform_device.h>
12#include <linux/export.h>
13#include <bcm63xx_dev_enet.h>
14#include <bcm63xx_io.h>
15#include <bcm63xx_regs.h>
16
17#ifdef BCMCPU_RUNTIME_DETECT
18static const unsigned long bcm6348_regs_enetdmac[] = {
19 [ENETDMAC_CHANCFG] = ENETDMAC_CHANCFG_REG,
20 [ENETDMAC_IR] = ENETDMAC_IR_REG,
21 [ENETDMAC_IRMASK] = ENETDMAC_IRMASK_REG,
22 [ENETDMAC_MAXBURST] = ENETDMAC_MAXBURST_REG,
23};
24
25static const unsigned long bcm6345_regs_enetdmac[] = {
26 [ENETDMAC_CHANCFG] = ENETDMA_6345_CHANCFG_REG,
27 [ENETDMAC_IR] = ENETDMA_6345_IR_REG,
28 [ENETDMAC_IRMASK] = ENETDMA_6345_IRMASK_REG,
29 [ENETDMAC_MAXBURST] = ENETDMA_6345_MAXBURST_REG,
30 [ENETDMAC_BUFALLOC] = ENETDMA_6345_BUFALLOC_REG,
31 [ENETDMAC_RSTART] = ENETDMA_6345_RSTART_REG,
32 [ENETDMAC_FC] = ENETDMA_6345_FC_REG,
33 [ENETDMAC_LEN] = ENETDMA_6345_LEN_REG,
34};
35
36const unsigned long *bcm63xx_regs_enetdmac;
37EXPORT_SYMBOL(bcm63xx_regs_enetdmac);
38
39static __init void bcm63xx_enetdmac_regs_init(void)
40{
41 if (BCMCPU_IS_6345())
42 bcm63xx_regs_enetdmac = bcm6345_regs_enetdmac;
43 else
44 bcm63xx_regs_enetdmac = bcm6348_regs_enetdmac;
45}
46#else
47static __init void bcm63xx_enetdmac_regs_init(void) { }
48#endif
49
50static struct resource shared_res[] = {
51 {
52 .start = -1, /* filled at runtime */
53 .end = -1, /* filled at runtime */
54 .flags = IORESOURCE_MEM,
55 },
56 {
57 .start = -1, /* filled at runtime */
58 .end = -1, /* filled at runtime */
59 .flags = IORESOURCE_MEM,
60 },
61 {
62 .start = -1, /* filled at runtime */
63 .end = -1, /* filled at runtime */
64 .flags = IORESOURCE_MEM,
65 },
66};
67
68static struct platform_device bcm63xx_enet_shared_device = {
69 .name = "bcm63xx_enet_shared",
70 .id = 0,
71 .num_resources = ARRAY_SIZE(shared_res),
72 .resource = shared_res,
73};
74
75static int shared_device_registered;
76
77static struct resource enet0_res[] = {
78 {
79 .start = -1, /* filled at runtime */
80 .end = -1, /* filled at runtime */
81 .flags = IORESOURCE_MEM,
82 },
83 {
84 .start = -1, /* filled at runtime */
85 .flags = IORESOURCE_IRQ,
86 },
87 {
88 .start = -1, /* filled at runtime */
89 .flags = IORESOURCE_IRQ,
90 },
91 {
92 .start = -1, /* filled at runtime */
93 .flags = IORESOURCE_IRQ,
94 },
95};
96
97static struct bcm63xx_enet_platform_data enet0_pd;
98
99static struct platform_device bcm63xx_enet0_device = {
100 .name = "bcm63xx_enet",
101 .id = 0,
102 .num_resources = ARRAY_SIZE(enet0_res),
103 .resource = enet0_res,
104 .dev = {
105 .platform_data = &enet0_pd,
106 },
107};
108
109static struct resource enet1_res[] = {
110 {
111 .start = -1, /* filled at runtime */
112 .end = -1, /* filled at runtime */
113 .flags = IORESOURCE_MEM,
114 },
115 {
116 .start = -1, /* filled at runtime */
117 .flags = IORESOURCE_IRQ,
118 },
119 {
120 .start = -1, /* filled at runtime */
121 .flags = IORESOURCE_IRQ,
122 },
123 {
124 .start = -1, /* filled at runtime */
125 .flags = IORESOURCE_IRQ,
126 },
127};
128
129static struct bcm63xx_enet_platform_data enet1_pd;
130
131static struct platform_device bcm63xx_enet1_device = {
132 .name = "bcm63xx_enet",
133 .id = 1,
134 .num_resources = ARRAY_SIZE(enet1_res),
135 .resource = enet1_res,
136 .dev = {
137 .platform_data = &enet1_pd,
138 },
139};
140
141static struct resource enetsw_res[] = {
142 {
143 /* start & end filled at runtime */
144 .flags = IORESOURCE_MEM,
145 },
146 {
147 /* start filled at runtime */
148 .flags = IORESOURCE_IRQ,
149 },
150 {
151 /* start filled at runtime */
152 .flags = IORESOURCE_IRQ,
153 },
154};
155
156static struct bcm63xx_enetsw_platform_data enetsw_pd;
157
158static struct platform_device bcm63xx_enetsw_device = {
159 .name = "bcm63xx_enetsw",
160 .num_resources = ARRAY_SIZE(enetsw_res),
161 .resource = enetsw_res,
162 .dev = {
163 .platform_data = &enetsw_pd,
164 },
165};
166
167static int __init register_shared(void)
168{
169 int ret, chan_count;
170
171 if (shared_device_registered)
172 return 0;
173
174 bcm63xx_enetdmac_regs_init();
175
176 shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
177 shared_res[0].end = shared_res[0].start;
178 if (BCMCPU_IS_6345())
179 shared_res[0].end += (RSET_6345_ENETDMA_SIZE) - 1;
180 else
181 shared_res[0].end += (RSET_ENETDMA_SIZE) - 1;
182
183 if (BCMCPU_IS_6328() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
184 chan_count = 32;
185 else if (BCMCPU_IS_6345())
186 chan_count = 8;
187 else
188 chan_count = 16;
189
190 shared_res[1].start = bcm63xx_regset_address(RSET_ENETDMAC);
191 shared_res[1].end = shared_res[1].start;
192 shared_res[1].end += RSET_ENETDMAC_SIZE(chan_count) - 1;
193
194 shared_res[2].start = bcm63xx_regset_address(RSET_ENETDMAS);
195 shared_res[2].end = shared_res[2].start;
196 shared_res[2].end += RSET_ENETDMAS_SIZE(chan_count) - 1;
197
198 ret = platform_device_register(&bcm63xx_enet_shared_device);
199 if (ret)
200 return ret;
201 shared_device_registered = 1;
202
203 return 0;
204}
205
206int __init bcm63xx_enet_register(int unit,
207 const struct bcm63xx_enet_platform_data *pd)
208{
209 struct platform_device *pdev;
210 struct bcm63xx_enet_platform_data *dpd;
211 int ret;
212
213 if (unit > 1)
214 return -ENODEV;
215
216 if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345()))
217 return -ENODEV;
218
219 ret = register_shared();
220 if (ret)
221 return ret;
222
223 if (unit == 0) {
224 enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0);
225 enet0_res[0].end = enet0_res[0].start;
226 enet0_res[0].end += RSET_ENET_SIZE - 1;
227 enet0_res[1].start = bcm63xx_get_irq_number(IRQ_ENET0);
228 enet0_res[2].start = bcm63xx_get_irq_number(IRQ_ENET0_RXDMA);
229 enet0_res[3].start = bcm63xx_get_irq_number(IRQ_ENET0_TXDMA);
230 pdev = &bcm63xx_enet0_device;
231 } else {
232 enet1_res[0].start = bcm63xx_regset_address(RSET_ENET1);
233 enet1_res[0].end = enet1_res[0].start;
234 enet1_res[0].end += RSET_ENET_SIZE - 1;
235 enet1_res[1].start = bcm63xx_get_irq_number(IRQ_ENET1);
236 enet1_res[2].start = bcm63xx_get_irq_number(IRQ_ENET1_RXDMA);
237 enet1_res[3].start = bcm63xx_get_irq_number(IRQ_ENET1_TXDMA);
238 pdev = &bcm63xx_enet1_device;
239 }
240
241 /* copy given platform data */
242 dpd = pdev->dev.platform_data;
243 memcpy(dpd, pd, sizeof(*pd));
244
245 /* adjust them in case internal phy is used */
246 if (dpd->use_internal_phy) {
247
248 /* internal phy only exists for enet0 */
249 if (unit == 1)
250 return -ENODEV;
251
252 dpd->phy_id = 1;
253 dpd->has_phy_interrupt = 1;
254 dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY);
255 }
256
257 dpd->dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
258 dpd->dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
259 if (BCMCPU_IS_6345()) {
260 dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_CHAINING_MASK;
261 dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_WRAP_EN_MASK;
262 dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_FLOWC_EN_MASK;
263 dpd->dma_chan_int_mask |= ENETDMA_IR_BUFDONE_MASK;
264 dpd->dma_chan_int_mask |= ENETDMA_IR_NOTOWNER_MASK;
265 dpd->dma_chan_width = ENETDMA_6345_CHAN_WIDTH;
266 dpd->dma_desc_shift = ENETDMA_6345_DESC_SHIFT;
267 } else {
268 dpd->dma_has_sram = true;
269 dpd->dma_chan_width = ENETDMA_CHAN_WIDTH;
270 }
271
272 ret = platform_device_register(pdev);
273 if (ret)
274 return ret;
275 return 0;
276}
277
278int __init
279bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd)
280{
281 int ret;
282
283 if (!BCMCPU_IS_6328() && !BCMCPU_IS_6362() && !BCMCPU_IS_6368())
284 return -ENODEV;
285
286 ret = register_shared();
287 if (ret)
288 return ret;
289
290 enetsw_res[0].start = bcm63xx_regset_address(RSET_ENETSW);
291 enetsw_res[0].end = enetsw_res[0].start;
292 enetsw_res[0].end += RSET_ENETSW_SIZE - 1;
293 enetsw_res[1].start = bcm63xx_get_irq_number(IRQ_ENETSW_RXDMA0);
294 enetsw_res[2].start = bcm63xx_get_irq_number(IRQ_ENETSW_TXDMA0);
295 if (!enetsw_res[2].start)
296 enetsw_res[2].start = -1;
297
298 memcpy(bcm63xx_enetsw_device.dev.platform_data, pd, sizeof(*pd));
299
300 if (BCMCPU_IS_6328())
301 enetsw_pd.num_ports = ENETSW_PORTS_6328;
302 else if (BCMCPU_IS_6362() || BCMCPU_IS_6368())
303 enetsw_pd.num_ports = ENETSW_PORTS_6368;
304
305 enetsw_pd.dma_has_sram = true;
306 enetsw_pd.dma_chan_width = ENETDMA_CHAN_WIDTH;
307 enetsw_pd.dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
308 enetsw_pd.dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
309
310 ret = platform_device_register(&bcm63xx_enetsw_device);
311 if (ret)
312 return ret;
313
314 return 0;
315}