Loading...
1/*
2 * Driver for Marvell NETA network controller Buffer Manager.
3 *
4 * Copyright (C) 2015 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/clk.h>
14#include <linux/genalloc.h>
15#include <linux/io.h>
16#include <linux/kernel.h>
17#include <linux/mbus.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/of.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/skbuff.h>
24#include <net/hwbm.h>
25#include "mvneta_bm.h"
26
27#define MVNETA_BM_DRIVER_NAME "mvneta_bm"
28#define MVNETA_BM_DRIVER_VERSION "1.0"
29
30static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data)
31{
32 writel(data, priv->reg_base + offset);
33}
34
35static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset)
36{
37 return readl(priv->reg_base + offset);
38}
39
40static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id)
41{
42 u32 val;
43
44 val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
45 val |= MVNETA_BM_POOL_ENABLE_MASK;
46 mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
47
48 /* Clear BM cause register */
49 mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
50}
51
52static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id)
53{
54 u32 val;
55
56 val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
57 val &= ~MVNETA_BM_POOL_ENABLE_MASK;
58 mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
59}
60
61static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask)
62{
63 u32 val;
64
65 val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
66 val |= mask;
67 mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
68}
69
70static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask)
71{
72 u32 val;
73
74 val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
75 val &= ~mask;
76 mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
77}
78
79static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id,
80 u8 target_id, u8 attr)
81{
82 u32 val;
83
84 val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id));
85 val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id);
86 val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id);
87 val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id);
88 val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr);
89
90 mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val);
91}
92
93int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
94{
95 struct mvneta_bm_pool *bm_pool =
96 (struct mvneta_bm_pool *)hwbm_pool->priv;
97 struct mvneta_bm *priv = bm_pool->priv;
98 dma_addr_t phys_addr;
99
100 /* In order to update buf_cookie field of RX descriptor properly,
101 * BM hardware expects buf virtual address to be placed in the
102 * first four bytes of mapped buffer.
103 */
104 *(u32 *)buf = (u32)buf;
105 phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size,
106 DMA_FROM_DEVICE);
107 if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr)))
108 return -ENOMEM;
109
110 mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr);
111 return 0;
112}
113EXPORT_SYMBOL_GPL(mvneta_bm_construct);
114
115/* Create pool */
116static int mvneta_bm_pool_create(struct mvneta_bm *priv,
117 struct mvneta_bm_pool *bm_pool)
118{
119 struct platform_device *pdev = priv->pdev;
120 u8 target_id, attr;
121 int size_bytes, err;
122 size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size;
123 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
124 &bm_pool->phys_addr,
125 GFP_KERNEL);
126 if (!bm_pool->virt_addr)
127 return -ENOMEM;
128
129 if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) {
130 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
131 bm_pool->phys_addr);
132 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
133 bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN);
134 return -ENOMEM;
135 }
136
137 err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id,
138 &attr);
139 if (err < 0) {
140 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
141 bm_pool->phys_addr);
142 return err;
143 }
144
145 /* Set pool address */
146 mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id),
147 bm_pool->phys_addr);
148
149 mvneta_bm_pool_target_set(priv, bm_pool->id, target_id, attr);
150 mvneta_bm_pool_enable(priv, bm_pool->id);
151
152 return 0;
153}
154
155/* Notify the driver that BM pool is being used as specific type and return the
156 * pool pointer on success
157 */
158struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
159 enum mvneta_bm_type type, u8 port_id,
160 int pkt_size)
161{
162 struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id];
163 int num, err;
164
165 if (new_pool->type == MVNETA_BM_LONG &&
166 new_pool->port_map != 1 << port_id) {
167 dev_err(&priv->pdev->dev,
168 "long pool cannot be shared by the ports\n");
169 return NULL;
170 }
171
172 if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) {
173 dev_err(&priv->pdev->dev,
174 "mixing pools' types between the ports is forbidden\n");
175 return NULL;
176 }
177
178 if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT)
179 new_pool->pkt_size = pkt_size;
180
181 /* Allocate buffers in case BM pool hasn't been used yet */
182 if (new_pool->type == MVNETA_BM_FREE) {
183 struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool;
184
185 new_pool->priv = priv;
186 new_pool->type = type;
187 new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size);
188 hwbm_pool->frag_size =
189 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) +
190 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
191 hwbm_pool->construct = mvneta_bm_construct;
192 hwbm_pool->priv = new_pool;
193 mutex_init(&hwbm_pool->buf_lock);
194
195 /* Create new pool */
196 err = mvneta_bm_pool_create(priv, new_pool);
197 if (err) {
198 dev_err(&priv->pdev->dev, "fail to create pool %d\n",
199 new_pool->id);
200 return NULL;
201 }
202
203 /* Allocate buffers for this pool */
204 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
205 if (num != hwbm_pool->size) {
206 WARN(1, "pool %d: %d of %d allocated\n",
207 new_pool->id, num, hwbm_pool->size);
208 return NULL;
209 }
210 }
211
212 return new_pool;
213}
214EXPORT_SYMBOL_GPL(mvneta_bm_pool_use);
215
216/* Free all buffers from the pool */
217void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
218 u8 port_map)
219{
220 int i;
221
222 bm_pool->port_map &= ~port_map;
223 if (bm_pool->port_map)
224 return;
225
226 mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
227
228 for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) {
229 dma_addr_t buf_phys_addr;
230 u32 *vaddr;
231
232 /* Get buffer physical address (indirect access) */
233 buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool);
234
235 /* Work-around to the problems when destroying the pool,
236 * when it occurs that a read access to BPPI returns 0.
237 */
238 if (buf_phys_addr == 0)
239 continue;
240
241 vaddr = phys_to_virt(buf_phys_addr);
242 if (!vaddr)
243 break;
244
245 dma_unmap_single(&priv->pdev->dev, buf_phys_addr,
246 bm_pool->buf_size, DMA_FROM_DEVICE);
247 hwbm_buf_free(&bm_pool->hwbm_pool, vaddr);
248 }
249
250 mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
251
252 /* Update BM driver with number of buffers removed from pool */
253 bm_pool->hwbm_pool.buf_num -= i;
254}
255EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
256
257/* Cleanup pool */
258void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
259 struct mvneta_bm_pool *bm_pool, u8 port_map)
260{
261 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
262 bm_pool->port_map &= ~port_map;
263 if (bm_pool->port_map)
264 return;
265
266 bm_pool->type = MVNETA_BM_FREE;
267
268 mvneta_bm_bufs_free(priv, bm_pool, port_map);
269 if (hwbm_pool->buf_num)
270 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
271
272 if (bm_pool->virt_addr) {
273 dma_free_coherent(&priv->pdev->dev,
274 sizeof(u32) * hwbm_pool->size,
275 bm_pool->virt_addr, bm_pool->phys_addr);
276 bm_pool->virt_addr = NULL;
277 }
278
279 mvneta_bm_pool_disable(priv, bm_pool->id);
280}
281EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy);
282
283static void mvneta_bm_pools_init(struct mvneta_bm *priv)
284{
285 struct device_node *dn = priv->pdev->dev.of_node;
286 struct mvneta_bm_pool *bm_pool;
287 char prop[15];
288 u32 size;
289 int i;
290
291 /* Activate BM unit */
292 mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK);
293
294 /* Create all pools with maximum size */
295 for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
296 bm_pool = &priv->bm_pools[i];
297 bm_pool->id = i;
298 bm_pool->type = MVNETA_BM_FREE;
299
300 /* Reset read pointer */
301 mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0);
302
303 /* Reset write pointer */
304 mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0);
305
306 /* Configure pool size according to DT or use default value */
307 sprintf(prop, "pool%d,capacity", i);
308 if (of_property_read_u32(dn, prop, &size)) {
309 size = MVNETA_BM_POOL_CAP_DEF;
310 } else if (size > MVNETA_BM_POOL_CAP_MAX) {
311 dev_warn(&priv->pdev->dev,
312 "Illegal pool %d capacity %d, set to %d\n",
313 i, size, MVNETA_BM_POOL_CAP_MAX);
314 size = MVNETA_BM_POOL_CAP_MAX;
315 } else if (size < MVNETA_BM_POOL_CAP_MIN) {
316 dev_warn(&priv->pdev->dev,
317 "Illegal pool %d capacity %d, set to %d\n",
318 i, size, MVNETA_BM_POOL_CAP_MIN);
319 size = MVNETA_BM_POOL_CAP_MIN;
320 } else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) {
321 dev_warn(&priv->pdev->dev,
322 "Illegal pool %d capacity %d, round to %d\n",
323 i, size, ALIGN(size,
324 MVNETA_BM_POOL_CAP_ALIGN));
325 size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN);
326 }
327 bm_pool->hwbm_pool.size = size;
328
329 mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i),
330 bm_pool->hwbm_pool.size);
331
332 /* Obtain custom pkt_size from DT */
333 sprintf(prop, "pool%d,pkt-size", i);
334 if (of_property_read_u32(dn, prop, &bm_pool->pkt_size))
335 bm_pool->pkt_size = 0;
336 }
337}
338
339static void mvneta_bm_default_set(struct mvneta_bm *priv)
340{
341 u32 val;
342
343 /* Mask BM all interrupts */
344 mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0);
345
346 /* Clear BM cause register */
347 mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
348
349 /* Set BM configuration register */
350 val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
351
352 /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */
353 val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK;
354 val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP;
355 mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
356}
357
358static int mvneta_bm_init(struct mvneta_bm *priv)
359{
360 mvneta_bm_default_set(priv);
361
362 /* Allocate and initialize BM pools structures */
363 priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM,
364 sizeof(struct mvneta_bm_pool),
365 GFP_KERNEL);
366 if (!priv->bm_pools)
367 return -ENOMEM;
368
369 mvneta_bm_pools_init(priv);
370
371 return 0;
372}
373
374static int mvneta_bm_get_sram(struct device_node *dn,
375 struct mvneta_bm *priv)
376{
377 priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0);
378 if (!priv->bppi_pool)
379 return -ENOMEM;
380
381 priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool,
382 MVNETA_BM_BPPI_SIZE,
383 &priv->bppi_phys_addr);
384 if (!priv->bppi_virt_addr)
385 return -ENOMEM;
386
387 return 0;
388}
389
390static void mvneta_bm_put_sram(struct mvneta_bm *priv)
391{
392 gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr,
393 MVNETA_BM_BPPI_SIZE);
394}
395
396struct mvneta_bm *mvneta_bm_get(struct device_node *node)
397{
398 struct platform_device *pdev = of_find_device_by_node(node);
399
400 return pdev ? platform_get_drvdata(pdev) : NULL;
401}
402EXPORT_SYMBOL_GPL(mvneta_bm_get);
403
404void mvneta_bm_put(struct mvneta_bm *priv)
405{
406 platform_device_put(priv->pdev);
407}
408EXPORT_SYMBOL_GPL(mvneta_bm_put);
409
410static int mvneta_bm_probe(struct platform_device *pdev)
411{
412 struct device_node *dn = pdev->dev.of_node;
413 struct mvneta_bm *priv;
414 int err;
415
416 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL);
417 if (!priv)
418 return -ENOMEM;
419
420 priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
421 if (IS_ERR(priv->reg_base))
422 return PTR_ERR(priv->reg_base);
423
424 priv->clk = devm_clk_get(&pdev->dev, NULL);
425 if (IS_ERR(priv->clk))
426 return PTR_ERR(priv->clk);
427 err = clk_prepare_enable(priv->clk);
428 if (err < 0)
429 return err;
430
431 err = mvneta_bm_get_sram(dn, priv);
432 if (err < 0) {
433 dev_err(&pdev->dev, "failed to allocate internal memory\n");
434 goto err_clk;
435 }
436
437 priv->pdev = pdev;
438
439 /* Initialize buffer manager internals */
440 err = mvneta_bm_init(priv);
441 if (err < 0) {
442 dev_err(&pdev->dev, "failed to initialize controller\n");
443 goto err_sram;
444 }
445
446 dn->data = priv;
447 platform_set_drvdata(pdev, priv);
448
449 dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n");
450
451 return 0;
452
453err_sram:
454 mvneta_bm_put_sram(priv);
455err_clk:
456 clk_disable_unprepare(priv->clk);
457 return err;
458}
459
460static void mvneta_bm_remove(struct platform_device *pdev)
461{
462 struct mvneta_bm *priv = platform_get_drvdata(pdev);
463 u8 all_ports_map = 0xff;
464 int i = 0;
465
466 for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
467 struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i];
468
469 mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map);
470 }
471
472 mvneta_bm_put_sram(priv);
473
474 /* Dectivate BM unit */
475 mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK);
476
477 clk_disable_unprepare(priv->clk);
478}
479
480static const struct of_device_id mvneta_bm_match[] = {
481 { .compatible = "marvell,armada-380-neta-bm" },
482 { }
483};
484MODULE_DEVICE_TABLE(of, mvneta_bm_match);
485
486static struct platform_driver mvneta_bm_driver = {
487 .probe = mvneta_bm_probe,
488 .remove_new = mvneta_bm_remove,
489 .driver = {
490 .name = MVNETA_BM_DRIVER_NAME,
491 .of_match_table = mvneta_bm_match,
492 },
493};
494
495module_platform_driver(mvneta_bm_driver);
496
497MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com");
498MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
499MODULE_LICENSE("GPL v2");
1/*
2 * Driver for Marvell NETA network controller Buffer Manager.
3 *
4 * Copyright (C) 2015 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/clk.h>
14#include <linux/genalloc.h>
15#include <linux/io.h>
16#include <linux/kernel.h>
17#include <linux/mbus.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/skbuff.h>
23#include <net/hwbm.h>
24#include "mvneta_bm.h"
25
26#define MVNETA_BM_DRIVER_NAME "mvneta_bm"
27#define MVNETA_BM_DRIVER_VERSION "1.0"
28
29static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data)
30{
31 writel(data, priv->reg_base + offset);
32}
33
34static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset)
35{
36 return readl(priv->reg_base + offset);
37}
38
39static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id)
40{
41 u32 val;
42
43 val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
44 val |= MVNETA_BM_POOL_ENABLE_MASK;
45 mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
46
47 /* Clear BM cause register */
48 mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
49}
50
51static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id)
52{
53 u32 val;
54
55 val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
56 val &= ~MVNETA_BM_POOL_ENABLE_MASK;
57 mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
58}
59
60static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask)
61{
62 u32 val;
63
64 val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
65 val |= mask;
66 mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
67}
68
69static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask)
70{
71 u32 val;
72
73 val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
74 val &= ~mask;
75 mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
76}
77
78static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id,
79 u8 target_id, u8 attr)
80{
81 u32 val;
82
83 val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id));
84 val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id);
85 val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id);
86 val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id);
87 val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr);
88
89 mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val);
90}
91
92int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
93{
94 struct mvneta_bm_pool *bm_pool =
95 (struct mvneta_bm_pool *)hwbm_pool->priv;
96 struct mvneta_bm *priv = bm_pool->priv;
97 dma_addr_t phys_addr;
98
99 /* In order to update buf_cookie field of RX descriptor properly,
100 * BM hardware expects buf virtual address to be placed in the
101 * first four bytes of mapped buffer.
102 */
103 *(u32 *)buf = (u32)buf;
104 phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size,
105 DMA_FROM_DEVICE);
106 if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr)))
107 return -ENOMEM;
108
109 mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr);
110 return 0;
111}
112EXPORT_SYMBOL_GPL(mvneta_bm_construct);
113
114/* Create pool */
115static int mvneta_bm_pool_create(struct mvneta_bm *priv,
116 struct mvneta_bm_pool *bm_pool)
117{
118 struct platform_device *pdev = priv->pdev;
119 u8 target_id, attr;
120 int size_bytes, err;
121 size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size;
122 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
123 &bm_pool->phys_addr,
124 GFP_KERNEL);
125 if (!bm_pool->virt_addr)
126 return -ENOMEM;
127
128 if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) {
129 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
130 bm_pool->phys_addr);
131 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
132 bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN);
133 return -ENOMEM;
134 }
135
136 err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id,
137 &attr);
138 if (err < 0) {
139 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
140 bm_pool->phys_addr);
141 return err;
142 }
143
144 /* Set pool address */
145 mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id),
146 bm_pool->phys_addr);
147
148 mvneta_bm_pool_target_set(priv, bm_pool->id, target_id, attr);
149 mvneta_bm_pool_enable(priv, bm_pool->id);
150
151 return 0;
152}
153
154/* Notify the driver that BM pool is being used as specific type and return the
155 * pool pointer on success
156 */
157struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
158 enum mvneta_bm_type type, u8 port_id,
159 int pkt_size)
160{
161 struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id];
162 int num, err;
163
164 if (new_pool->type == MVNETA_BM_LONG &&
165 new_pool->port_map != 1 << port_id) {
166 dev_err(&priv->pdev->dev,
167 "long pool cannot be shared by the ports\n");
168 return NULL;
169 }
170
171 if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) {
172 dev_err(&priv->pdev->dev,
173 "mixing pools' types between the ports is forbidden\n");
174 return NULL;
175 }
176
177 if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT)
178 new_pool->pkt_size = pkt_size;
179
180 /* Allocate buffers in case BM pool hasn't been used yet */
181 if (new_pool->type == MVNETA_BM_FREE) {
182 struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool;
183
184 new_pool->priv = priv;
185 new_pool->type = type;
186 new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size);
187 hwbm_pool->frag_size =
188 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) +
189 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
190 hwbm_pool->construct = mvneta_bm_construct;
191 hwbm_pool->priv = new_pool;
192 spin_lock_init(&hwbm_pool->lock);
193
194 /* Create new pool */
195 err = mvneta_bm_pool_create(priv, new_pool);
196 if (err) {
197 dev_err(&priv->pdev->dev, "fail to create pool %d\n",
198 new_pool->id);
199 return NULL;
200 }
201
202 /* Allocate buffers for this pool */
203 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
204 if (num != hwbm_pool->size) {
205 WARN(1, "pool %d: %d of %d allocated\n",
206 new_pool->id, num, hwbm_pool->size);
207 return NULL;
208 }
209 }
210
211 return new_pool;
212}
213EXPORT_SYMBOL_GPL(mvneta_bm_pool_use);
214
215/* Free all buffers from the pool */
216void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
217 u8 port_map)
218{
219 int i;
220
221 bm_pool->port_map &= ~port_map;
222 if (bm_pool->port_map)
223 return;
224
225 mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
226
227 for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) {
228 dma_addr_t buf_phys_addr;
229 u32 *vaddr;
230
231 /* Get buffer physical address (indirect access) */
232 buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool);
233
234 /* Work-around to the problems when destroying the pool,
235 * when it occurs that a read access to BPPI returns 0.
236 */
237 if (buf_phys_addr == 0)
238 continue;
239
240 vaddr = phys_to_virt(buf_phys_addr);
241 if (!vaddr)
242 break;
243
244 dma_unmap_single(&priv->pdev->dev, buf_phys_addr,
245 bm_pool->buf_size, DMA_FROM_DEVICE);
246 hwbm_buf_free(&bm_pool->hwbm_pool, vaddr);
247 }
248
249 mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
250
251 /* Update BM driver with number of buffers removed from pool */
252 bm_pool->hwbm_pool.buf_num -= i;
253}
254EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
255
256/* Cleanup pool */
257void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
258 struct mvneta_bm_pool *bm_pool, u8 port_map)
259{
260 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
261 bm_pool->port_map &= ~port_map;
262 if (bm_pool->port_map)
263 return;
264
265 bm_pool->type = MVNETA_BM_FREE;
266
267 mvneta_bm_bufs_free(priv, bm_pool, port_map);
268 if (hwbm_pool->buf_num)
269 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
270
271 if (bm_pool->virt_addr) {
272 dma_free_coherent(&priv->pdev->dev,
273 sizeof(u32) * hwbm_pool->size,
274 bm_pool->virt_addr, bm_pool->phys_addr);
275 bm_pool->virt_addr = NULL;
276 }
277
278 mvneta_bm_pool_disable(priv, bm_pool->id);
279}
280EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy);
281
282static void mvneta_bm_pools_init(struct mvneta_bm *priv)
283{
284 struct device_node *dn = priv->pdev->dev.of_node;
285 struct mvneta_bm_pool *bm_pool;
286 char prop[15];
287 u32 size;
288 int i;
289
290 /* Activate BM unit */
291 mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK);
292
293 /* Create all pools with maximum size */
294 for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
295 bm_pool = &priv->bm_pools[i];
296 bm_pool->id = i;
297 bm_pool->type = MVNETA_BM_FREE;
298
299 /* Reset read pointer */
300 mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0);
301
302 /* Reset write pointer */
303 mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0);
304
305 /* Configure pool size according to DT or use default value */
306 sprintf(prop, "pool%d,capacity", i);
307 if (of_property_read_u32(dn, prop, &size)) {
308 size = MVNETA_BM_POOL_CAP_DEF;
309 } else if (size > MVNETA_BM_POOL_CAP_MAX) {
310 dev_warn(&priv->pdev->dev,
311 "Illegal pool %d capacity %d, set to %d\n",
312 i, size, MVNETA_BM_POOL_CAP_MAX);
313 size = MVNETA_BM_POOL_CAP_MAX;
314 } else if (size < MVNETA_BM_POOL_CAP_MIN) {
315 dev_warn(&priv->pdev->dev,
316 "Illegal pool %d capacity %d, set to %d\n",
317 i, size, MVNETA_BM_POOL_CAP_MIN);
318 size = MVNETA_BM_POOL_CAP_MIN;
319 } else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) {
320 dev_warn(&priv->pdev->dev,
321 "Illegal pool %d capacity %d, round to %d\n",
322 i, size, ALIGN(size,
323 MVNETA_BM_POOL_CAP_ALIGN));
324 size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN);
325 }
326 bm_pool->hwbm_pool.size = size;
327
328 mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i),
329 bm_pool->hwbm_pool.size);
330
331 /* Obtain custom pkt_size from DT */
332 sprintf(prop, "pool%d,pkt-size", i);
333 if (of_property_read_u32(dn, prop, &bm_pool->pkt_size))
334 bm_pool->pkt_size = 0;
335 }
336}
337
338static void mvneta_bm_default_set(struct mvneta_bm *priv)
339{
340 u32 val;
341
342 /* Mask BM all interrupts */
343 mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0);
344
345 /* Clear BM cause register */
346 mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
347
348 /* Set BM configuration register */
349 val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
350
351 /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */
352 val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK;
353 val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP;
354 mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
355}
356
357static int mvneta_bm_init(struct mvneta_bm *priv)
358{
359 mvneta_bm_default_set(priv);
360
361 /* Allocate and initialize BM pools structures */
362 priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM,
363 sizeof(struct mvneta_bm_pool),
364 GFP_KERNEL);
365 if (!priv->bm_pools)
366 return -ENOMEM;
367
368 mvneta_bm_pools_init(priv);
369
370 return 0;
371}
372
373static int mvneta_bm_get_sram(struct device_node *dn,
374 struct mvneta_bm *priv)
375{
376 priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0);
377 if (!priv->bppi_pool)
378 return -ENOMEM;
379
380 priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool,
381 MVNETA_BM_BPPI_SIZE,
382 &priv->bppi_phys_addr);
383 if (!priv->bppi_virt_addr)
384 return -ENOMEM;
385
386 return 0;
387}
388
389static void mvneta_bm_put_sram(struct mvneta_bm *priv)
390{
391 gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr,
392 MVNETA_BM_BPPI_SIZE);
393}
394
395static int mvneta_bm_probe(struct platform_device *pdev)
396{
397 struct device_node *dn = pdev->dev.of_node;
398 struct mvneta_bm *priv;
399 struct resource *res;
400 int err;
401
402 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL);
403 if (!priv)
404 return -ENOMEM;
405
406 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
407 priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
408 if (IS_ERR(priv->reg_base))
409 return PTR_ERR(priv->reg_base);
410
411 priv->clk = devm_clk_get(&pdev->dev, NULL);
412 if (IS_ERR(priv->clk))
413 return PTR_ERR(priv->clk);
414 err = clk_prepare_enable(priv->clk);
415 if (err < 0)
416 return err;
417
418 err = mvneta_bm_get_sram(dn, priv);
419 if (err < 0) {
420 dev_err(&pdev->dev, "failed to allocate internal memory\n");
421 goto err_clk;
422 }
423
424 priv->pdev = pdev;
425
426 /* Initialize buffer manager internals */
427 err = mvneta_bm_init(priv);
428 if (err < 0) {
429 dev_err(&pdev->dev, "failed to initialize controller\n");
430 goto err_sram;
431 }
432
433 dn->data = priv;
434 platform_set_drvdata(pdev, priv);
435
436 dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n");
437
438 return 0;
439
440err_sram:
441 mvneta_bm_put_sram(priv);
442err_clk:
443 clk_disable_unprepare(priv->clk);
444 return err;
445}
446
447static int mvneta_bm_remove(struct platform_device *pdev)
448{
449 struct mvneta_bm *priv = platform_get_drvdata(pdev);
450 u8 all_ports_map = 0xff;
451 int i = 0;
452
453 for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
454 struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i];
455
456 mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map);
457 }
458
459 mvneta_bm_put_sram(priv);
460
461 /* Dectivate BM unit */
462 mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK);
463
464 clk_disable_unprepare(priv->clk);
465
466 return 0;
467}
468
469static const struct of_device_id mvneta_bm_match[] = {
470 { .compatible = "marvell,armada-380-neta-bm" },
471 { }
472};
473MODULE_DEVICE_TABLE(of, mvneta_bm_match);
474
475static struct platform_driver mvneta_bm_driver = {
476 .probe = mvneta_bm_probe,
477 .remove = mvneta_bm_remove,
478 .driver = {
479 .name = MVNETA_BM_DRIVER_NAME,
480 .of_match_table = mvneta_bm_match,
481 },
482};
483
484module_platform_driver(mvneta_bm_driver);
485
486MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com");
487MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
488MODULE_LICENSE("GPL v2");