Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell.
5 *
6 */
7#include <linux/bitfield.h>
8#include <linux/module.h>
9#include <linux/pci.h>
10
11#include "rvu_struct.h"
12#include "rvu_reg.h"
13#include "rvu.h"
14
15static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
16 struct npa_aq_inst_s *inst)
17{
18 struct admin_queue *aq = block->aq;
19 struct npa_aq_res_s *result;
20 int timeout = 1000;
21 u64 reg, head;
22
23 result = (struct npa_aq_res_s *)aq->res->base;
24
25 /* Get current head pointer where to append this instruction */
26 reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
27 head = (reg >> 4) & AQ_PTR_MASK;
28
29 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
30 (void *)inst, aq->inst->entry_sz);
31 memset(result, 0, sizeof(*result));
32 /* sync into memory */
33 wmb();
34
35 /* Ring the doorbell and wait for result */
36 rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
37 while (result->compcode == NPA_AQ_COMP_NOTDONE) {
38 cpu_relax();
39 udelay(1);
40 timeout--;
41 if (!timeout)
42 return -EBUSY;
43 }
44
45 if (result->compcode != NPA_AQ_COMP_GOOD) {
46 /* TODO: Replace this with some error code */
47 if (result->compcode == NPA_AQ_COMP_CTX_FAULT ||
48 result->compcode == NPA_AQ_COMP_LOCKERR ||
49 result->compcode == NPA_AQ_COMP_CTX_POISON) {
50 if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0))
51 dev_err(rvu->dev,
52 "%s: Not able to unlock cachelines\n", __func__);
53 }
54
55 return -EBUSY;
56 }
57
58 return 0;
59}
60
61int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
62 struct npa_aq_enq_rsp *rsp)
63{
64 struct rvu_hwinfo *hw = rvu->hw;
65 u16 pcifunc = req->hdr.pcifunc;
66 int blkaddr, npalf, rc = 0;
67 struct npa_aq_inst_s inst;
68 struct rvu_block *block;
69 struct admin_queue *aq;
70 struct rvu_pfvf *pfvf;
71 void *ctx, *mask;
72 bool ena;
73
74 pfvf = rvu_get_pfvf(rvu, pcifunc);
75 if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
76 return NPA_AF_ERR_AQ_ENQUEUE;
77
78 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
79 if (!pfvf->npalf || blkaddr < 0)
80 return NPA_AF_ERR_AF_LF_INVALID;
81
82 block = &hw->block[blkaddr];
83 aq = block->aq;
84 if (!aq) {
85 dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
86 return NPA_AF_ERR_AQ_ENQUEUE;
87 }
88
89 npalf = rvu_get_lf(rvu, block, pcifunc, 0);
90 if (npalf < 0)
91 return NPA_AF_ERR_AF_LF_INVALID;
92
93 memset(&inst, 0, sizeof(struct npa_aq_inst_s));
94 inst.cindex = req->aura_id;
95 inst.lf = npalf;
96 inst.ctype = req->ctype;
97 inst.op = req->op;
98 /* Currently we are not supporting enqueuing multiple instructions,
99 * so always choose first entry in result memory.
100 */
101 inst.res_addr = (u64)aq->res->iova;
102
103 /* Hardware uses same aq->res->base for updating result of
104 * previous instruction hence wait here till it is done.
105 */
106 spin_lock(&aq->lock);
107
108 /* Clean result + context memory */
109 memset(aq->res->base, 0, aq->res->entry_sz);
110 /* Context needs to be written at RES_ADDR + 128 */
111 ctx = aq->res->base + 128;
112 /* Mask needs to be written at RES_ADDR + 256 */
113 mask = aq->res->base + 256;
114
115 switch (req->op) {
116 case NPA_AQ_INSTOP_WRITE:
117 /* Copy context and write mask */
118 if (req->ctype == NPA_AQ_CTYPE_AURA) {
119 memcpy(mask, &req->aura_mask,
120 sizeof(struct npa_aura_s));
121 memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
122 } else {
123 memcpy(mask, &req->pool_mask,
124 sizeof(struct npa_pool_s));
125 memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
126 }
127 break;
128 case NPA_AQ_INSTOP_INIT:
129 if (req->ctype == NPA_AQ_CTYPE_AURA) {
130 if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
131 rc = NPA_AF_ERR_AQ_FULL;
132 break;
133 }
134 /* Set pool's context address */
135 req->aura.pool_addr = pfvf->pool_ctx->iova +
136 (req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
137 memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
138 } else { /* POOL's context */
139 memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
140 }
141 break;
142 case NPA_AQ_INSTOP_NOP:
143 case NPA_AQ_INSTOP_READ:
144 case NPA_AQ_INSTOP_LOCK:
145 case NPA_AQ_INSTOP_UNLOCK:
146 break;
147 default:
148 rc = NPA_AF_ERR_AQ_FULL;
149 break;
150 }
151
152 if (rc) {
153 spin_unlock(&aq->lock);
154 return rc;
155 }
156
157 /* Submit the instruction to AQ */
158 rc = npa_aq_enqueue_wait(rvu, block, &inst);
159 if (rc) {
160 spin_unlock(&aq->lock);
161 return rc;
162 }
163
164 /* Set aura bitmap if aura hw context is enabled */
165 if (req->ctype == NPA_AQ_CTYPE_AURA) {
166 if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
167 __set_bit(req->aura_id, pfvf->aura_bmap);
168 if (req->op == NPA_AQ_INSTOP_WRITE) {
169 ena = (req->aura.ena & req->aura_mask.ena) |
170 (test_bit(req->aura_id, pfvf->aura_bmap) &
171 ~req->aura_mask.ena);
172 if (ena)
173 __set_bit(req->aura_id, pfvf->aura_bmap);
174 else
175 __clear_bit(req->aura_id, pfvf->aura_bmap);
176 }
177 }
178
179 /* Set pool bitmap if pool hw context is enabled */
180 if (req->ctype == NPA_AQ_CTYPE_POOL) {
181 if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
182 __set_bit(req->aura_id, pfvf->pool_bmap);
183 if (req->op == NPA_AQ_INSTOP_WRITE) {
184 ena = (req->pool.ena & req->pool_mask.ena) |
185 (test_bit(req->aura_id, pfvf->pool_bmap) &
186 ~req->pool_mask.ena);
187 if (ena)
188 __set_bit(req->aura_id, pfvf->pool_bmap);
189 else
190 __clear_bit(req->aura_id, pfvf->pool_bmap);
191 }
192 }
193 spin_unlock(&aq->lock);
194
195 if (rsp) {
196 /* Copy read context into mailbox */
197 if (req->op == NPA_AQ_INSTOP_READ) {
198 if (req->ctype == NPA_AQ_CTYPE_AURA)
199 memcpy(&rsp->aura, ctx,
200 sizeof(struct npa_aura_s));
201 else
202 memcpy(&rsp->pool, ctx,
203 sizeof(struct npa_pool_s));
204 }
205 }
206
207 return 0;
208}
209
210static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
211{
212 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
213 struct npa_aq_enq_req aq_req;
214 unsigned long *bmap;
215 int id, cnt = 0;
216 int err = 0, rc;
217
218 if (!pfvf->pool_ctx || !pfvf->aura_ctx)
219 return NPA_AF_ERR_AQ_ENQUEUE;
220
221 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
222 aq_req.hdr.pcifunc = req->hdr.pcifunc;
223
224 if (req->ctype == NPA_AQ_CTYPE_POOL) {
225 aq_req.pool.ena = 0;
226 aq_req.pool_mask.ena = 1;
227 cnt = pfvf->pool_ctx->qsize;
228 bmap = pfvf->pool_bmap;
229 } else if (req->ctype == NPA_AQ_CTYPE_AURA) {
230 aq_req.aura.ena = 0;
231 aq_req.aura_mask.ena = 1;
232 aq_req.aura.bp_ena = 0;
233 aq_req.aura_mask.bp_ena = 1;
234 cnt = pfvf->aura_ctx->qsize;
235 bmap = pfvf->aura_bmap;
236 }
237
238 aq_req.ctype = req->ctype;
239 aq_req.op = NPA_AQ_INSTOP_WRITE;
240
241 for (id = 0; id < cnt; id++) {
242 if (!test_bit(id, bmap))
243 continue;
244 aq_req.aura_id = id;
245 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
246 if (rc) {
247 err = rc;
248 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
249 (req->ctype == NPA_AQ_CTYPE_AURA) ?
250 "Aura" : "Pool", id);
251 }
252 }
253
254 return err;
255}
256
257#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
258static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
259{
260 struct npa_aq_enq_req lock_ctx_req;
261 int err;
262
263 if (req->op != NPA_AQ_INSTOP_INIT)
264 return 0;
265
266 memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
267 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
268 lock_ctx_req.ctype = req->ctype;
269 lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
270 lock_ctx_req.aura_id = req->aura_id;
271 err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
272 if (err)
273 dev_err(rvu->dev,
274 "PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
275 req->hdr.pcifunc,
276 (req->ctype == NPA_AQ_CTYPE_AURA) ?
277 "Aura" : "Pool", req->aura_id);
278 return err;
279}
280
281int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
282 struct npa_aq_enq_req *req,
283 struct npa_aq_enq_rsp *rsp)
284{
285 int err;
286
287 err = rvu_npa_aq_enq_inst(rvu, req, rsp);
288 if (!err)
289 err = npa_lf_hwctx_lockdown(rvu, req);
290 return err;
291}
292#else
293
294int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
295 struct npa_aq_enq_req *req,
296 struct npa_aq_enq_rsp *rsp)
297{
298 return rvu_npa_aq_enq_inst(rvu, req, rsp);
299}
300#endif
301
302int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
303 struct hwctx_disable_req *req,
304 struct msg_rsp *rsp)
305{
306 return npa_lf_hwctx_disable(rvu, req);
307}
308
309static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
310{
311 kfree(pfvf->aura_bmap);
312 pfvf->aura_bmap = NULL;
313
314 qmem_free(rvu->dev, pfvf->aura_ctx);
315 pfvf->aura_ctx = NULL;
316
317 kfree(pfvf->pool_bmap);
318 pfvf->pool_bmap = NULL;
319
320 qmem_free(rvu->dev, pfvf->pool_ctx);
321 pfvf->pool_ctx = NULL;
322
323 qmem_free(rvu->dev, pfvf->npa_qints_ctx);
324 pfvf->npa_qints_ctx = NULL;
325}
326
327int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
328 struct npa_lf_alloc_req *req,
329 struct npa_lf_alloc_rsp *rsp)
330{
331 int npalf, qints, hwctx_size, err, rc = 0;
332 struct rvu_hwinfo *hw = rvu->hw;
333 u16 pcifunc = req->hdr.pcifunc;
334 struct rvu_block *block;
335 struct rvu_pfvf *pfvf;
336 u64 cfg, ctx_cfg;
337 int blkaddr;
338
339 if (req->aura_sz > NPA_AURA_SZ_MAX ||
340 req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
341 return NPA_AF_ERR_PARAM;
342
343 if (req->way_mask)
344 req->way_mask &= 0xFFFF;
345
346 pfvf = rvu_get_pfvf(rvu, pcifunc);
347 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
348 if (!pfvf->npalf || blkaddr < 0)
349 return NPA_AF_ERR_AF_LF_INVALID;
350
351 block = &hw->block[blkaddr];
352 npalf = rvu_get_lf(rvu, block, pcifunc, 0);
353 if (npalf < 0)
354 return NPA_AF_ERR_AF_LF_INVALID;
355
356 /* Reset this NPA LF */
357 err = rvu_lf_reset(rvu, block, npalf);
358 if (err) {
359 dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
360 return NPA_AF_ERR_LF_RESET;
361 }
362
363 ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
364
365 /* Alloc memory for aura HW contexts */
366 hwctx_size = 1UL << (ctx_cfg & 0xF);
367 err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
368 NPA_AURA_COUNT(req->aura_sz), hwctx_size);
369 if (err)
370 goto free_mem;
371
372 pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
373 GFP_KERNEL);
374 if (!pfvf->aura_bmap)
375 goto free_mem;
376
377 /* Alloc memory for pool HW contexts */
378 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
379 err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
380 if (err)
381 goto free_mem;
382
383 pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
384 GFP_KERNEL);
385 if (!pfvf->pool_bmap)
386 goto free_mem;
387
388 /* Get no of queue interrupts supported */
389 cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
390 qints = (cfg >> 28) & 0xFFF;
391
392 /* Alloc memory for Qints HW contexts */
393 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
394 err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
395 if (err)
396 goto free_mem;
397
398 cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
399 /* Clear way partition mask and set aura offset to '0' */
400 cfg &= ~(BIT_ULL(34) - 1);
401 /* Set aura size & enable caching of contexts */
402 cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
403
404 rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
405
406 /* Configure aura HW context's base */
407 rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
408 (u64)pfvf->aura_ctx->iova);
409
410 /* Enable caching of qints hw context */
411 rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
412 BIT_ULL(36) | req->way_mask << 20);
413 rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
414 (u64)pfvf->npa_qints_ctx->iova);
415
416 goto exit;
417
418free_mem:
419 npa_ctx_free(rvu, pfvf);
420 rc = -ENOMEM;
421
422exit:
423 /* set stack page info */
424 cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
425 rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
426 rsp->stack_pg_bytes = cfg & 0xFF;
427 rsp->qints = (cfg >> 28) & 0xFFF;
428 if (!is_rvu_otx2(rvu)) {
429 cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
430 rsp->cache_lines = (cfg >> 1) & 0x3F;
431 }
432 return rc;
433}
434
435int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
436 struct msg_rsp *rsp)
437{
438 struct rvu_hwinfo *hw = rvu->hw;
439 u16 pcifunc = req->hdr.pcifunc;
440 struct rvu_block *block;
441 struct rvu_pfvf *pfvf;
442 int npalf, err;
443 int blkaddr;
444
445 pfvf = rvu_get_pfvf(rvu, pcifunc);
446 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
447 if (!pfvf->npalf || blkaddr < 0)
448 return NPA_AF_ERR_AF_LF_INVALID;
449
450 block = &hw->block[blkaddr];
451 npalf = rvu_get_lf(rvu, block, pcifunc, 0);
452 if (npalf < 0)
453 return NPA_AF_ERR_AF_LF_INVALID;
454
455 /* Reset this NPA LF */
456 err = rvu_lf_reset(rvu, block, npalf);
457 if (err) {
458 dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
459 return NPA_AF_ERR_LF_RESET;
460 }
461
462 npa_ctx_free(rvu, pfvf);
463
464 return 0;
465}
466
467static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
468{
469 u64 cfg;
470 int err;
471
472 /* Set admin queue endianness */
473 cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
474#ifdef __BIG_ENDIAN
475 cfg |= BIT_ULL(1);
476 rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
477#else
478 cfg &= ~BIT_ULL(1);
479 rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
480#endif
481
482 /* Do not bypass NDC cache */
483 cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
484 cfg &= ~0x03DULL;
485#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
486 /* Disable caching of stack pages */
487 cfg |= 0x10ULL;
488#endif
489 rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
490
491 /* For CN10K NPA BATCH DMA set 35 cache lines */
492 if (!is_rvu_otx2(rvu)) {
493 cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
494 cfg &= ~0x7EULL;
495 cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1);
496 rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg);
497 }
498 /* Result structure can be followed by Aura/Pool context at
499 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
500 * operation type. Alloc sufficient result memory for all operations.
501 */
502 err = rvu_aq_alloc(rvu, &block->aq,
503 Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
504 ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
505 if (err)
506 return err;
507
508 rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
509 rvu_write64(rvu, block->addr,
510 NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
511 return 0;
512}
513
514int rvu_npa_init(struct rvu *rvu)
515{
516 struct rvu_hwinfo *hw = rvu->hw;
517 int blkaddr;
518
519 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
520 if (blkaddr < 0)
521 return 0;
522
523 /* Initialize admin queue */
524 return npa_aq_init(rvu, &hw->block[blkaddr]);
525}
526
527void rvu_npa_freemem(struct rvu *rvu)
528{
529 struct rvu_hwinfo *hw = rvu->hw;
530 struct rvu_block *block;
531 int blkaddr;
532
533 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
534 if (blkaddr < 0)
535 return;
536
537 block = &hw->block[blkaddr];
538 rvu_aq_free(rvu, block->aq);
539}
540
541void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
542{
543 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
544 struct hwctx_disable_req ctx_req;
545
546 /* Disable all pools */
547 ctx_req.hdr.pcifunc = pcifunc;
548 ctx_req.ctype = NPA_AQ_CTYPE_POOL;
549 npa_lf_hwctx_disable(rvu, &ctx_req);
550
551 /* Disable all auras */
552 ctx_req.ctype = NPA_AQ_CTYPE_AURA;
553 npa_lf_hwctx_disable(rvu, &ctx_req);
554
555 npa_ctx_free(rvu, pfvf);
556}
557
558/* Due to an Hardware errata, in some corner cases, AQ context lock
559 * operations can result in a NDC way getting into an illegal state
560 * of not valid but locked.
561 *
562 * This API solves the problem by clearing the lock bit of the NDC block.
563 * The operation needs to be done for each line of all the NDC banks.
564 */
565int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
566{
567 int bank, max_bank, line, max_line, err;
568 u64 reg, ndc_af_const;
569
570 /* Set the ENABLE bit(63) to '0' */
571 reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
572 rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
573
574 /* Poll until the BUSY bits(47:32) are set to '0' */
575 err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true);
576 if (err) {
577 dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n");
578 return err;
579 }
580
581 ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST);
582 max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
583 max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const);
584 for (bank = 0; bank < max_bank; bank++) {
585 for (line = 0; line < max_line; line++) {
586 /* Check if 'cache line valid bit(63)' is not set
587 * but 'cache line lock bit(60)' is set and on
588 * success, reset the lock bit(60).
589 */
590 reg = rvu_read64(rvu, blkaddr,
591 NDC_AF_BANKX_LINEX_METADATA(bank, line));
592 if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) {
593 rvu_write64(rvu, blkaddr,
594 NDC_AF_BANKX_LINEX_METADATA(bank, line),
595 reg & ~BIT_ULL(60));
596 }
597 }
598 }
599
600 return 0;
601}
1// SPDX-License-Identifier: GPL-2.0
2/* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell.
5 *
6 */
7
8#include <linux/module.h>
9#include <linux/pci.h>
10
11#include "rvu_struct.h"
12#include "rvu_reg.h"
13#include "rvu.h"
14
15static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
16 struct npa_aq_inst_s *inst)
17{
18 struct admin_queue *aq = block->aq;
19 struct npa_aq_res_s *result;
20 int timeout = 1000;
21 u64 reg, head;
22
23 result = (struct npa_aq_res_s *)aq->res->base;
24
25 /* Get current head pointer where to append this instruction */
26 reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
27 head = (reg >> 4) & AQ_PTR_MASK;
28
29 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
30 (void *)inst, aq->inst->entry_sz);
31 memset(result, 0, sizeof(*result));
32 /* sync into memory */
33 wmb();
34
35 /* Ring the doorbell and wait for result */
36 rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
37 while (result->compcode == NPA_AQ_COMP_NOTDONE) {
38 cpu_relax();
39 udelay(1);
40 timeout--;
41 if (!timeout)
42 return -EBUSY;
43 }
44
45 if (result->compcode != NPA_AQ_COMP_GOOD)
46 /* TODO: Replace this with some error code */
47 return -EBUSY;
48
49 return 0;
50}
51
52int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
53 struct npa_aq_enq_rsp *rsp)
54{
55 struct rvu_hwinfo *hw = rvu->hw;
56 u16 pcifunc = req->hdr.pcifunc;
57 int blkaddr, npalf, rc = 0;
58 struct npa_aq_inst_s inst;
59 struct rvu_block *block;
60 struct admin_queue *aq;
61 struct rvu_pfvf *pfvf;
62 void *ctx, *mask;
63 bool ena;
64
65 pfvf = rvu_get_pfvf(rvu, pcifunc);
66 if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
67 return NPA_AF_ERR_AQ_ENQUEUE;
68
69 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
70 if (!pfvf->npalf || blkaddr < 0)
71 return NPA_AF_ERR_AF_LF_INVALID;
72
73 block = &hw->block[blkaddr];
74 aq = block->aq;
75 if (!aq) {
76 dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
77 return NPA_AF_ERR_AQ_ENQUEUE;
78 }
79
80 npalf = rvu_get_lf(rvu, block, pcifunc, 0);
81 if (npalf < 0)
82 return NPA_AF_ERR_AF_LF_INVALID;
83
84 memset(&inst, 0, sizeof(struct npa_aq_inst_s));
85 inst.cindex = req->aura_id;
86 inst.lf = npalf;
87 inst.ctype = req->ctype;
88 inst.op = req->op;
89 /* Currently we are not supporting enqueuing multiple instructions,
90 * so always choose first entry in result memory.
91 */
92 inst.res_addr = (u64)aq->res->iova;
93
94 /* Hardware uses same aq->res->base for updating result of
95 * previous instruction hence wait here till it is done.
96 */
97 spin_lock(&aq->lock);
98
99 /* Clean result + context memory */
100 memset(aq->res->base, 0, aq->res->entry_sz);
101 /* Context needs to be written at RES_ADDR + 128 */
102 ctx = aq->res->base + 128;
103 /* Mask needs to be written at RES_ADDR + 256 */
104 mask = aq->res->base + 256;
105
106 switch (req->op) {
107 case NPA_AQ_INSTOP_WRITE:
108 /* Copy context and write mask */
109 if (req->ctype == NPA_AQ_CTYPE_AURA) {
110 memcpy(mask, &req->aura_mask,
111 sizeof(struct npa_aura_s));
112 memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
113 } else {
114 memcpy(mask, &req->pool_mask,
115 sizeof(struct npa_pool_s));
116 memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
117 }
118 break;
119 case NPA_AQ_INSTOP_INIT:
120 if (req->ctype == NPA_AQ_CTYPE_AURA) {
121 if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
122 rc = NPA_AF_ERR_AQ_FULL;
123 break;
124 }
125 /* Set pool's context address */
126 req->aura.pool_addr = pfvf->pool_ctx->iova +
127 (req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
128 memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
129 } else { /* POOL's context */
130 memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
131 }
132 break;
133 case NPA_AQ_INSTOP_NOP:
134 case NPA_AQ_INSTOP_READ:
135 case NPA_AQ_INSTOP_LOCK:
136 case NPA_AQ_INSTOP_UNLOCK:
137 break;
138 default:
139 rc = NPA_AF_ERR_AQ_FULL;
140 break;
141 }
142
143 if (rc) {
144 spin_unlock(&aq->lock);
145 return rc;
146 }
147
148 /* Submit the instruction to AQ */
149 rc = npa_aq_enqueue_wait(rvu, block, &inst);
150 if (rc) {
151 spin_unlock(&aq->lock);
152 return rc;
153 }
154
155 /* Set aura bitmap if aura hw context is enabled */
156 if (req->ctype == NPA_AQ_CTYPE_AURA) {
157 if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
158 __set_bit(req->aura_id, pfvf->aura_bmap);
159 if (req->op == NPA_AQ_INSTOP_WRITE) {
160 ena = (req->aura.ena & req->aura_mask.ena) |
161 (test_bit(req->aura_id, pfvf->aura_bmap) &
162 ~req->aura_mask.ena);
163 if (ena)
164 __set_bit(req->aura_id, pfvf->aura_bmap);
165 else
166 __clear_bit(req->aura_id, pfvf->aura_bmap);
167 }
168 }
169
170 /* Set pool bitmap if pool hw context is enabled */
171 if (req->ctype == NPA_AQ_CTYPE_POOL) {
172 if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
173 __set_bit(req->aura_id, pfvf->pool_bmap);
174 if (req->op == NPA_AQ_INSTOP_WRITE) {
175 ena = (req->pool.ena & req->pool_mask.ena) |
176 (test_bit(req->aura_id, pfvf->pool_bmap) &
177 ~req->pool_mask.ena);
178 if (ena)
179 __set_bit(req->aura_id, pfvf->pool_bmap);
180 else
181 __clear_bit(req->aura_id, pfvf->pool_bmap);
182 }
183 }
184 spin_unlock(&aq->lock);
185
186 if (rsp) {
187 /* Copy read context into mailbox */
188 if (req->op == NPA_AQ_INSTOP_READ) {
189 if (req->ctype == NPA_AQ_CTYPE_AURA)
190 memcpy(&rsp->aura, ctx,
191 sizeof(struct npa_aura_s));
192 else
193 memcpy(&rsp->pool, ctx,
194 sizeof(struct npa_pool_s));
195 }
196 }
197
198 return 0;
199}
200
201static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
202{
203 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
204 struct npa_aq_enq_req aq_req;
205 unsigned long *bmap;
206 int id, cnt = 0;
207 int err = 0, rc;
208
209 if (!pfvf->pool_ctx || !pfvf->aura_ctx)
210 return NPA_AF_ERR_AQ_ENQUEUE;
211
212 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
213 aq_req.hdr.pcifunc = req->hdr.pcifunc;
214
215 if (req->ctype == NPA_AQ_CTYPE_POOL) {
216 aq_req.pool.ena = 0;
217 aq_req.pool_mask.ena = 1;
218 cnt = pfvf->pool_ctx->qsize;
219 bmap = pfvf->pool_bmap;
220 } else if (req->ctype == NPA_AQ_CTYPE_AURA) {
221 aq_req.aura.ena = 0;
222 aq_req.aura_mask.ena = 1;
223 aq_req.aura.bp_ena = 0;
224 aq_req.aura_mask.bp_ena = 1;
225 cnt = pfvf->aura_ctx->qsize;
226 bmap = pfvf->aura_bmap;
227 }
228
229 aq_req.ctype = req->ctype;
230 aq_req.op = NPA_AQ_INSTOP_WRITE;
231
232 for (id = 0; id < cnt; id++) {
233 if (!test_bit(id, bmap))
234 continue;
235 aq_req.aura_id = id;
236 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
237 if (rc) {
238 err = rc;
239 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
240 (req->ctype == NPA_AQ_CTYPE_AURA) ?
241 "Aura" : "Pool", id);
242 }
243 }
244
245 return err;
246}
247
248#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
249static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
250{
251 struct npa_aq_enq_req lock_ctx_req;
252 int err;
253
254 if (req->op != NPA_AQ_INSTOP_INIT)
255 return 0;
256
257 memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
258 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
259 lock_ctx_req.ctype = req->ctype;
260 lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
261 lock_ctx_req.aura_id = req->aura_id;
262 err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
263 if (err)
264 dev_err(rvu->dev,
265 "PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
266 req->hdr.pcifunc,
267 (req->ctype == NPA_AQ_CTYPE_AURA) ?
268 "Aura" : "Pool", req->aura_id);
269 return err;
270}
271
272int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
273 struct npa_aq_enq_req *req,
274 struct npa_aq_enq_rsp *rsp)
275{
276 int err;
277
278 err = rvu_npa_aq_enq_inst(rvu, req, rsp);
279 if (!err)
280 err = npa_lf_hwctx_lockdown(rvu, req);
281 return err;
282}
283#else
284
285int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
286 struct npa_aq_enq_req *req,
287 struct npa_aq_enq_rsp *rsp)
288{
289 return rvu_npa_aq_enq_inst(rvu, req, rsp);
290}
291#endif
292
293int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
294 struct hwctx_disable_req *req,
295 struct msg_rsp *rsp)
296{
297 return npa_lf_hwctx_disable(rvu, req);
298}
299
300static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
301{
302 kfree(pfvf->aura_bmap);
303 pfvf->aura_bmap = NULL;
304
305 qmem_free(rvu->dev, pfvf->aura_ctx);
306 pfvf->aura_ctx = NULL;
307
308 kfree(pfvf->pool_bmap);
309 pfvf->pool_bmap = NULL;
310
311 qmem_free(rvu->dev, pfvf->pool_ctx);
312 pfvf->pool_ctx = NULL;
313
314 qmem_free(rvu->dev, pfvf->npa_qints_ctx);
315 pfvf->npa_qints_ctx = NULL;
316}
317
318int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
319 struct npa_lf_alloc_req *req,
320 struct npa_lf_alloc_rsp *rsp)
321{
322 int npalf, qints, hwctx_size, err, rc = 0;
323 struct rvu_hwinfo *hw = rvu->hw;
324 u16 pcifunc = req->hdr.pcifunc;
325 struct rvu_block *block;
326 struct rvu_pfvf *pfvf;
327 u64 cfg, ctx_cfg;
328 int blkaddr;
329
330 if (req->aura_sz > NPA_AURA_SZ_MAX ||
331 req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
332 return NPA_AF_ERR_PARAM;
333
334 if (req->way_mask)
335 req->way_mask &= 0xFFFF;
336
337 pfvf = rvu_get_pfvf(rvu, pcifunc);
338 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
339 if (!pfvf->npalf || blkaddr < 0)
340 return NPA_AF_ERR_AF_LF_INVALID;
341
342 block = &hw->block[blkaddr];
343 npalf = rvu_get_lf(rvu, block, pcifunc, 0);
344 if (npalf < 0)
345 return NPA_AF_ERR_AF_LF_INVALID;
346
347 /* Reset this NPA LF */
348 err = rvu_lf_reset(rvu, block, npalf);
349 if (err) {
350 dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
351 return NPA_AF_ERR_LF_RESET;
352 }
353
354 ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
355
356 /* Alloc memory for aura HW contexts */
357 hwctx_size = 1UL << (ctx_cfg & 0xF);
358 err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
359 NPA_AURA_COUNT(req->aura_sz), hwctx_size);
360 if (err)
361 goto free_mem;
362
363 pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
364 GFP_KERNEL);
365 if (!pfvf->aura_bmap)
366 goto free_mem;
367
368 /* Alloc memory for pool HW contexts */
369 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
370 err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
371 if (err)
372 goto free_mem;
373
374 pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
375 GFP_KERNEL);
376 if (!pfvf->pool_bmap)
377 goto free_mem;
378
379 /* Get no of queue interrupts supported */
380 cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
381 qints = (cfg >> 28) & 0xFFF;
382
383 /* Alloc memory for Qints HW contexts */
384 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
385 err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
386 if (err)
387 goto free_mem;
388
389 cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
390 /* Clear way partition mask and set aura offset to '0' */
391 cfg &= ~(BIT_ULL(34) - 1);
392 /* Set aura size & enable caching of contexts */
393 cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
394
395 rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
396
397 /* Configure aura HW context's base */
398 rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
399 (u64)pfvf->aura_ctx->iova);
400
401 /* Enable caching of qints hw context */
402 rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
403 BIT_ULL(36) | req->way_mask << 20);
404 rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
405 (u64)pfvf->npa_qints_ctx->iova);
406
407 goto exit;
408
409free_mem:
410 npa_ctx_free(rvu, pfvf);
411 rc = -ENOMEM;
412
413exit:
414 /* set stack page info */
415 cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
416 rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
417 rsp->stack_pg_bytes = cfg & 0xFF;
418 rsp->qints = (cfg >> 28) & 0xFFF;
419 if (!is_rvu_otx2(rvu)) {
420 cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
421 rsp->cache_lines = (cfg >> 1) & 0x3F;
422 }
423 return rc;
424}
425
426int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
427 struct msg_rsp *rsp)
428{
429 struct rvu_hwinfo *hw = rvu->hw;
430 u16 pcifunc = req->hdr.pcifunc;
431 struct rvu_block *block;
432 struct rvu_pfvf *pfvf;
433 int npalf, err;
434 int blkaddr;
435
436 pfvf = rvu_get_pfvf(rvu, pcifunc);
437 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
438 if (!pfvf->npalf || blkaddr < 0)
439 return NPA_AF_ERR_AF_LF_INVALID;
440
441 block = &hw->block[blkaddr];
442 npalf = rvu_get_lf(rvu, block, pcifunc, 0);
443 if (npalf < 0)
444 return NPA_AF_ERR_AF_LF_INVALID;
445
446 /* Reset this NPA LF */
447 err = rvu_lf_reset(rvu, block, npalf);
448 if (err) {
449 dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
450 return NPA_AF_ERR_LF_RESET;
451 }
452
453 npa_ctx_free(rvu, pfvf);
454
455 return 0;
456}
457
458static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
459{
460 u64 cfg;
461 int err;
462
463 /* Set admin queue endianness */
464 cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
465#ifdef __BIG_ENDIAN
466 cfg |= BIT_ULL(1);
467 rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
468#else
469 cfg &= ~BIT_ULL(1);
470 rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
471#endif
472
473 /* Do not bypass NDC cache */
474 cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
475 cfg &= ~0x03DULL;
476#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
477 /* Disable caching of stack pages */
478 cfg |= 0x10ULL;
479#endif
480 rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
481
482 /* For CN10K NPA BATCH DMA set 35 cache lines */
483 if (!is_rvu_otx2(rvu)) {
484 cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
485 cfg &= ~0x7EULL;
486 cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1);
487 rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg);
488 }
489 /* Result structure can be followed by Aura/Pool context at
490 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
491 * operation type. Alloc sufficient result memory for all operations.
492 */
493 err = rvu_aq_alloc(rvu, &block->aq,
494 Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
495 ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
496 if (err)
497 return err;
498
499 rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
500 rvu_write64(rvu, block->addr,
501 NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
502 return 0;
503}
504
505int rvu_npa_init(struct rvu *rvu)
506{
507 struct rvu_hwinfo *hw = rvu->hw;
508 int blkaddr;
509
510 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
511 if (blkaddr < 0)
512 return 0;
513
514 /* Initialize admin queue */
515 return npa_aq_init(rvu, &hw->block[blkaddr]);
516}
517
518void rvu_npa_freemem(struct rvu *rvu)
519{
520 struct rvu_hwinfo *hw = rvu->hw;
521 struct rvu_block *block;
522 int blkaddr;
523
524 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
525 if (blkaddr < 0)
526 return;
527
528 block = &hw->block[blkaddr];
529 rvu_aq_free(rvu, block->aq);
530}
531
532void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
533{
534 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
535 struct hwctx_disable_req ctx_req;
536
537 /* Disable all pools */
538 ctx_req.hdr.pcifunc = pcifunc;
539 ctx_req.ctype = NPA_AQ_CTYPE_POOL;
540 npa_lf_hwctx_disable(rvu, &ctx_req);
541
542 /* Disable all auras */
543 ctx_req.ctype = NPA_AQ_CTYPE_AURA;
544 npa_lf_hwctx_disable(rvu, &ctx_req);
545
546 npa_ctx_free(rvu, pfvf);
547}