Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * IBM Accelerator Family 'GenWQE'
4 *
5 * (C) Copyright IBM Corp. 2013
6 *
7 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
8 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
9 * Author: Michael Jung <mijung@gmx.net>
10 * Author: Michael Ruettger <michael@ibmra.de>
11 */
12
13/*
14 * Miscelanous functionality used in the other GenWQE driver parts.
15 */
16
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/vmalloc.h>
20#include <linux/page-flags.h>
21#include <linux/scatterlist.h>
22#include <linux/hugetlb.h>
23#include <linux/iommu.h>
24#include <linux/pci.h>
25#include <linux/dma-mapping.h>
26#include <linux/ctype.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/delay.h>
30#include <linux/pgtable.h>
31
32#include "genwqe_driver.h"
33#include "card_base.h"
34#include "card_ddcb.h"
35
36/**
37 * __genwqe_writeq() - Write 64-bit register
38 * @cd: genwqe device descriptor
39 * @byte_offs: byte offset within BAR
40 * @val: 64-bit value
41 *
42 * Return: 0 if success; < 0 if error
43 */
44int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val)
45{
46 struct pci_dev *pci_dev = cd->pci_dev;
47
48 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
49 return -EIO;
50
51 if (cd->mmio == NULL)
52 return -EIO;
53
54 if (pci_channel_offline(pci_dev))
55 return -EIO;
56
57 __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs);
58 return 0;
59}
60
61/**
62 * __genwqe_readq() - Read 64-bit register
63 * @cd: genwqe device descriptor
64 * @byte_offs: offset within BAR
65 *
66 * Return: value from register
67 */
68u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs)
69{
70 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
71 return 0xffffffffffffffffull;
72
73 if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) &&
74 (byte_offs == IO_SLC_CFGREG_GFIR))
75 return 0x000000000000ffffull;
76
77 if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) &&
78 (byte_offs == IO_SLC_CFGREG_GFIR))
79 return 0x00000000ffff0000ull;
80
81 if (cd->mmio == NULL)
82 return 0xffffffffffffffffull;
83
84 return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs));
85}
86
87/**
88 * __genwqe_writel() - Write 32-bit register
89 * @cd: genwqe device descriptor
90 * @byte_offs: byte offset within BAR
91 * @val: 32-bit value
92 *
93 * Return: 0 if success; < 0 if error
94 */
95int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val)
96{
97 struct pci_dev *pci_dev = cd->pci_dev;
98
99 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
100 return -EIO;
101
102 if (cd->mmio == NULL)
103 return -EIO;
104
105 if (pci_channel_offline(pci_dev))
106 return -EIO;
107
108 __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs);
109 return 0;
110}
111
112/**
113 * __genwqe_readl() - Read 32-bit register
114 * @cd: genwqe device descriptor
115 * @byte_offs: offset within BAR
116 *
117 * Return: Value from register
118 */
119u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
120{
121 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
122 return 0xffffffff;
123
124 if (cd->mmio == NULL)
125 return 0xffffffff;
126
127 return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs));
128}
129
130/**
131 * genwqe_read_app_id() - Extract app_id
132 * @cd: genwqe device descriptor
133 * @app_name: carrier used to pass-back name
134 * @len: length of data for name
135 *
136 * app_unitcfg need to be filled with valid data first
137 */
138int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len)
139{
140 int i, j;
141 u32 app_id = (u32)cd->app_unitcfg;
142
143 memset(app_name, 0, len);
144 for (i = 0, j = 0; j < min(len, 4); j++) {
145 char ch = (char)((app_id >> (24 - j*8)) & 0xff);
146
147 if (ch == ' ')
148 continue;
149 app_name[i++] = isprint(ch) ? ch : 'X';
150 }
151 return i;
152}
153
154#define CRC32_POLYNOMIAL 0x20044009
155static u32 crc32_tab[256]; /* crc32 lookup table */
156
157/**
158 * genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations
159 *
160 * Existing kernel functions seem to use a different polynom,
161 * therefore we could not use them here.
162 *
163 * Genwqe's Polynomial = 0x20044009
164 */
165void genwqe_init_crc32(void)
166{
167 int i, j;
168 u32 crc;
169
170 for (i = 0; i < 256; i++) {
171 crc = i << 24;
172 for (j = 0; j < 8; j++) {
173 if (crc & 0x80000000)
174 crc = (crc << 1) ^ CRC32_POLYNOMIAL;
175 else
176 crc = (crc << 1);
177 }
178 crc32_tab[i] = crc;
179 }
180}
181
182/**
183 * genwqe_crc32() - Generate 32-bit crc as required for DDCBs
184 * @buff: pointer to data buffer
185 * @len: length of data for calculation
186 * @init: initial crc (0xffffffff at start)
187 *
188 * polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009)
189 *
190 * Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should
191 * result in a crc32 of 0xf33cb7d3.
192 *
193 * The existing kernel crc functions did not cover this polynom yet.
194 *
195 * Return: crc32 checksum.
196 */
197u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
198{
199 int i;
200 u32 crc;
201
202 crc = init;
203 while (len--) {
204 i = ((crc >> 24) ^ *buff++) & 0xFF;
205 crc = (crc << 8) ^ crc32_tab[i];
206 }
207 return crc;
208}
209
210void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
211 dma_addr_t *dma_handle)
212{
213 if (get_order(size) > MAX_PAGE_ORDER)
214 return NULL;
215
216 return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
217 GFP_KERNEL);
218}
219
220void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
221 void *vaddr, dma_addr_t dma_handle)
222{
223 if (vaddr == NULL)
224 return;
225
226 dma_free_coherent(&cd->pci_dev->dev, size, vaddr, dma_handle);
227}
228
229static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
230 int num_pages)
231{
232 int i;
233 struct pci_dev *pci_dev = cd->pci_dev;
234
235 for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) {
236 dma_unmap_page(&pci_dev->dev, dma_list[i], PAGE_SIZE,
237 DMA_BIDIRECTIONAL);
238 dma_list[i] = 0x0;
239 }
240}
241
242static int genwqe_map_pages(struct genwqe_dev *cd,
243 struct page **page_list, int num_pages,
244 dma_addr_t *dma_list)
245{
246 int i;
247 struct pci_dev *pci_dev = cd->pci_dev;
248
249 /* establish DMA mapping for requested pages */
250 for (i = 0; i < num_pages; i++) {
251 dma_addr_t daddr;
252
253 dma_list[i] = 0x0;
254 daddr = dma_map_page(&pci_dev->dev, page_list[i],
255 0, /* map_offs */
256 PAGE_SIZE,
257 DMA_BIDIRECTIONAL); /* FIXME rd/rw */
258
259 if (dma_mapping_error(&pci_dev->dev, daddr)) {
260 dev_err(&pci_dev->dev,
261 "[%s] err: no dma addr daddr=%016llx!\n",
262 __func__, (long long)daddr);
263 goto err;
264 }
265
266 dma_list[i] = daddr;
267 }
268 return 0;
269
270 err:
271 genwqe_unmap_pages(cd, dma_list, num_pages);
272 return -EIO;
273}
274
275static int genwqe_sgl_size(int num_pages)
276{
277 int len, num_tlb = num_pages / 7;
278
279 len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1);
280 return roundup(len, PAGE_SIZE);
281}
282
283/*
284 * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
285 *
286 * Allocates memory for sgl and overlapping pages. Pages which might
287 * overlap other user-space memory blocks are being cached for DMAs,
288 * such that we do not run into syncronization issues. Data is copied
289 * from user-space into the cached pages.
290 */
291int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
292 void __user *user_addr, size_t user_size, int write)
293{
294 int ret = -ENOMEM;
295 struct pci_dev *pci_dev = cd->pci_dev;
296
297 sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
298 sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size);
299 sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE);
300 sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE;
301
302 dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n",
303 __func__, user_addr, user_size, sgl->nr_pages,
304 sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size);
305
306 sgl->user_addr = user_addr;
307 sgl->user_size = user_size;
308 sgl->write = write;
309 sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
310
311 if (get_order(sgl->sgl_size) > MAX_PAGE_ORDER) {
312 dev_err(&pci_dev->dev,
313 "[%s] err: too much memory requested!\n", __func__);
314 return ret;
315 }
316
317 sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
318 &sgl->sgl_dma_addr);
319 if (sgl->sgl == NULL) {
320 dev_err(&pci_dev->dev,
321 "[%s] err: no memory available!\n", __func__);
322 return ret;
323 }
324
325 /* Only use buffering on incomplete pages */
326 if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) {
327 sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
328 &sgl->fpage_dma_addr);
329 if (sgl->fpage == NULL)
330 goto err_out;
331
332 /* Sync with user memory */
333 if (copy_from_user(sgl->fpage + sgl->fpage_offs,
334 user_addr, sgl->fpage_size)) {
335 ret = -EFAULT;
336 goto err_out;
337 }
338 }
339 if (sgl->lpage_size != 0) {
340 sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
341 &sgl->lpage_dma_addr);
342 if (sgl->lpage == NULL)
343 goto err_out1;
344
345 /* Sync with user memory */
346 if (copy_from_user(sgl->lpage, user_addr + user_size -
347 sgl->lpage_size, sgl->lpage_size)) {
348 ret = -EFAULT;
349 goto err_out2;
350 }
351 }
352 return 0;
353
354 err_out2:
355 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
356 sgl->lpage_dma_addr);
357 sgl->lpage = NULL;
358 sgl->lpage_dma_addr = 0;
359 err_out1:
360 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
361 sgl->fpage_dma_addr);
362 sgl->fpage = NULL;
363 sgl->fpage_dma_addr = 0;
364 err_out:
365 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
366 sgl->sgl_dma_addr);
367 sgl->sgl = NULL;
368 sgl->sgl_dma_addr = 0;
369 sgl->sgl_size = 0;
370
371 return ret;
372}
373
374int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
375 dma_addr_t *dma_list)
376{
377 int i = 0, j = 0, p;
378 unsigned long dma_offs, map_offs;
379 dma_addr_t prev_daddr = 0;
380 struct sg_entry *s, *last_s = NULL;
381 size_t size = sgl->user_size;
382
383 dma_offs = 128; /* next block if needed/dma_offset */
384 map_offs = sgl->fpage_offs; /* offset in first page */
385
386 s = &sgl->sgl[0]; /* first set of 8 entries */
387 p = 0; /* page */
388 while (p < sgl->nr_pages) {
389 dma_addr_t daddr;
390 unsigned int size_to_map;
391
392 /* always write the chaining entry, cleanup is done later */
393 j = 0;
394 s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs);
395 s[j].len = cpu_to_be32(128);
396 s[j].flags = cpu_to_be32(SG_CHAINED);
397 j++;
398
399 while (j < 8) {
400 /* DMA mapping for requested page, offs, size */
401 size_to_map = min(size, PAGE_SIZE - map_offs);
402
403 if ((p == 0) && (sgl->fpage != NULL)) {
404 daddr = sgl->fpage_dma_addr + map_offs;
405
406 } else if ((p == sgl->nr_pages - 1) &&
407 (sgl->lpage != NULL)) {
408 daddr = sgl->lpage_dma_addr;
409 } else {
410 daddr = dma_list[p] + map_offs;
411 }
412
413 size -= size_to_map;
414 map_offs = 0;
415
416 if (prev_daddr == daddr) {
417 u32 prev_len = be32_to_cpu(last_s->len);
418
419 /* pr_info("daddr combining: "
420 "%016llx/%08x -> %016llx\n",
421 prev_daddr, prev_len, daddr); */
422
423 last_s->len = cpu_to_be32(prev_len +
424 size_to_map);
425
426 p++; /* process next page */
427 if (p == sgl->nr_pages)
428 goto fixup; /* nothing to do */
429
430 prev_daddr = daddr + size_to_map;
431 continue;
432 }
433
434 /* start new entry */
435 s[j].target_addr = cpu_to_be64(daddr);
436 s[j].len = cpu_to_be32(size_to_map);
437 s[j].flags = cpu_to_be32(SG_DATA);
438 prev_daddr = daddr + size_to_map;
439 last_s = &s[j];
440 j++;
441
442 p++; /* process next page */
443 if (p == sgl->nr_pages)
444 goto fixup; /* nothing to do */
445 }
446 dma_offs += 128;
447 s += 8; /* continue 8 elements further */
448 }
449 fixup:
450 if (j == 1) { /* combining happened on last entry! */
451 s -= 8; /* full shift needed on previous sgl block */
452 j = 7; /* shift all elements */
453 }
454
455 for (i = 0; i < j; i++) /* move elements 1 up */
456 s[i] = s[i + 1];
457
458 s[i].target_addr = cpu_to_be64(0);
459 s[i].len = cpu_to_be32(0);
460 s[i].flags = cpu_to_be32(SG_END_LIST);
461 return 0;
462}
463
464/**
465 * genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
466 * @cd: genwqe device descriptor
467 * @sgl: scatter gather list describing user-space memory
468 *
469 * After the DMA transfer has been completed we free the memory for
470 * the sgl and the cached pages. Data is being transferred from cached
471 * pages into user-space buffers.
472 */
473int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
474{
475 int rc = 0;
476 size_t offset;
477 unsigned long res;
478 struct pci_dev *pci_dev = cd->pci_dev;
479
480 if (sgl->fpage) {
481 if (sgl->write) {
482 res = copy_to_user(sgl->user_addr,
483 sgl->fpage + sgl->fpage_offs, sgl->fpage_size);
484 if (res) {
485 dev_err(&pci_dev->dev,
486 "[%s] err: copying fpage! (res=%lu)\n",
487 __func__, res);
488 rc = -EFAULT;
489 }
490 }
491 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
492 sgl->fpage_dma_addr);
493 sgl->fpage = NULL;
494 sgl->fpage_dma_addr = 0;
495 }
496 if (sgl->lpage) {
497 if (sgl->write) {
498 offset = sgl->user_size - sgl->lpage_size;
499 res = copy_to_user(sgl->user_addr + offset, sgl->lpage,
500 sgl->lpage_size);
501 if (res) {
502 dev_err(&pci_dev->dev,
503 "[%s] err: copying lpage! (res=%lu)\n",
504 __func__, res);
505 rc = -EFAULT;
506 }
507 }
508 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
509 sgl->lpage_dma_addr);
510 sgl->lpage = NULL;
511 sgl->lpage_dma_addr = 0;
512 }
513 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
514 sgl->sgl_dma_addr);
515
516 sgl->sgl = NULL;
517 sgl->sgl_dma_addr = 0x0;
518 sgl->sgl_size = 0;
519 return rc;
520}
521
522/**
523 * genwqe_user_vmap() - Map user-space memory to virtual kernel memory
524 * @cd: pointer to genwqe device
525 * @m: mapping params
526 * @uaddr: user virtual address
527 * @size: size of memory to be mapped
528 *
529 * We need to think about how we could speed this up. Of course it is
530 * not a good idea to do this over and over again, like we are
531 * currently doing it. Nevertheless, I am curious where on the path
532 * the performance is spend. Most probably within the memory
533 * allocation functions, but maybe also in the DMA mapping code.
534 *
535 * Restrictions: The maximum size of the possible mapping currently depends
536 * on the amount of memory we can get using kzalloc() for the
537 * page_list and pci_alloc_consistent for the sg_list.
538 * The sg_list is currently itself not scattered, which could
539 * be fixed with some effort. The page_list must be split into
540 * PAGE_SIZE chunks too. All that will make the complicated
541 * code more complicated.
542 *
543 * Return: 0 if success
544 */
545int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
546 unsigned long size)
547{
548 int rc = -EINVAL;
549 unsigned long data, offs;
550 struct pci_dev *pci_dev = cd->pci_dev;
551
552 if ((uaddr == NULL) || (size == 0)) {
553 m->size = 0; /* mark unused and not added */
554 return -EINVAL;
555 }
556 m->u_vaddr = uaddr;
557 m->size = size;
558
559 /* determine space needed for page_list. */
560 data = (unsigned long)uaddr;
561 offs = offset_in_page(data);
562 if (size > ULONG_MAX - PAGE_SIZE - offs) {
563 m->size = 0; /* mark unused and not added */
564 return -EINVAL;
565 }
566 m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
567
568 m->page_list = kcalloc(m->nr_pages,
569 sizeof(struct page *) + sizeof(dma_addr_t),
570 GFP_KERNEL);
571 if (!m->page_list) {
572 dev_err(&pci_dev->dev, "err: alloc page_list failed\n");
573 m->nr_pages = 0;
574 m->u_vaddr = NULL;
575 m->size = 0; /* mark unused and not added */
576 return -ENOMEM;
577 }
578 m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
579
580 /* pin user pages in memory */
581 rc = pin_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
582 m->nr_pages,
583 m->write ? FOLL_WRITE : 0, /* readable/writable */
584 m->page_list); /* ptrs to pages */
585 if (rc < 0)
586 goto fail_pin_user_pages;
587
588 /* assumption: pin_user_pages can be killed by signals. */
589 if (rc < m->nr_pages) {
590 unpin_user_pages_dirty_lock(m->page_list, rc, m->write);
591 rc = -EFAULT;
592 goto fail_pin_user_pages;
593 }
594
595 rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
596 if (rc != 0)
597 goto fail_free_user_pages;
598
599 return 0;
600
601 fail_free_user_pages:
602 unpin_user_pages_dirty_lock(m->page_list, m->nr_pages, m->write);
603
604 fail_pin_user_pages:
605 kfree(m->page_list);
606 m->page_list = NULL;
607 m->dma_list = NULL;
608 m->nr_pages = 0;
609 m->u_vaddr = NULL;
610 m->size = 0; /* mark unused and not added */
611 return rc;
612}
613
614/**
615 * genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel
616 * memory
617 * @cd: pointer to genwqe device
618 * @m: mapping params
619 */
620int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m)
621{
622 struct pci_dev *pci_dev = cd->pci_dev;
623
624 if (!dma_mapping_used(m)) {
625 dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n",
626 __func__, m);
627 return -EINVAL;
628 }
629
630 if (m->dma_list)
631 genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
632
633 if (m->page_list) {
634 unpin_user_pages_dirty_lock(m->page_list, m->nr_pages,
635 m->write);
636 kfree(m->page_list);
637 m->page_list = NULL;
638 m->dma_list = NULL;
639 m->nr_pages = 0;
640 }
641
642 m->u_vaddr = NULL;
643 m->size = 0; /* mark as unused and not added */
644 return 0;
645}
646
647/**
648 * genwqe_card_type() - Get chip type SLU Configuration Register
649 * @cd: pointer to the genwqe device descriptor
650 * Return: 0: Altera Stratix-IV 230
651 * 1: Altera Stratix-IV 530
652 * 2: Altera Stratix-V A4
653 * 3: Altera Stratix-V A7
654 */
655u8 genwqe_card_type(struct genwqe_dev *cd)
656{
657 u64 card_type = cd->slu_unitcfg;
658
659 return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20);
660}
661
662/**
663 * genwqe_card_reset() - Reset the card
664 * @cd: pointer to the genwqe device descriptor
665 */
666int genwqe_card_reset(struct genwqe_dev *cd)
667{
668 u64 softrst;
669 struct pci_dev *pci_dev = cd->pci_dev;
670
671 if (!genwqe_is_privileged(cd))
672 return -ENODEV;
673
674 /* new SL */
675 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull);
676 msleep(1000);
677 __genwqe_readq(cd, IO_HSU_FIR_CLR);
678 __genwqe_readq(cd, IO_APP_FIR_CLR);
679 __genwqe_readq(cd, IO_SLU_FIR_CLR);
680
681 /*
682 * Read-modify-write to preserve the stealth bits
683 *
684 * For SL >= 039, Stealth WE bit allows removing
685 * the read-modify-wrote.
686 * r-m-w may require a mask 0x3C to avoid hitting hard
687 * reset again for error reset (should be 0, chicken).
688 */
689 softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull;
690 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull);
691
692 /* give ERRORRESET some time to finish */
693 msleep(50);
694
695 if (genwqe_need_err_masking(cd)) {
696 dev_info(&pci_dev->dev,
697 "[%s] masking errors for old bitstreams\n", __func__);
698 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
699 }
700 return 0;
701}
702
703int genwqe_read_softreset(struct genwqe_dev *cd)
704{
705 u64 bitstream;
706
707 if (!genwqe_is_privileged(cd))
708 return -ENODEV;
709
710 bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
711 cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull;
712 return 0;
713}
714
715/**
716 * genwqe_set_interrupt_capability() - Configure MSI capability structure
717 * @cd: pointer to the device
718 * @count: number of vectors to allocate
719 * Return: 0 if no error
720 */
721int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
722{
723 int rc;
724
725 rc = pci_alloc_irq_vectors(cd->pci_dev, 1, count, PCI_IRQ_MSI);
726 if (rc < 0)
727 return rc;
728 return 0;
729}
730
731/**
732 * genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability()
733 * @cd: pointer to the device
734 */
735void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
736{
737 pci_free_irq_vectors(cd->pci_dev);
738}
739
740/**
741 * set_reg_idx() - Fill array with data. Ignore illegal offsets.
742 * @cd: card device
743 * @r: debug register array
744 * @i: index to desired entry
745 * @m: maximum possible entries
746 * @addr: addr which is read
747 * @idx: index in debug array
748 * @val: read value
749 */
750static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
751 unsigned int *i, unsigned int m, u32 addr, u32 idx,
752 u64 val)
753{
754 if (WARN_ON_ONCE(*i >= m))
755 return -EFAULT;
756
757 r[*i].addr = addr;
758 r[*i].idx = idx;
759 r[*i].val = val;
760 ++*i;
761 return 0;
762}
763
764static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r,
765 unsigned int *i, unsigned int m, u32 addr, u64 val)
766{
767 return set_reg_idx(cd, r, i, m, addr, 0, val);
768}
769
770int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
771 unsigned int max_regs, int all)
772{
773 unsigned int i, j, idx = 0;
774 u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr;
775 u64 gfir, sluid, appid, ufir, ufec, sfir, sfec;
776
777 /* Global FIR */
778 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
779 set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir);
780
781 /* UnitCfg for SLU */
782 sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */
783 set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid);
784
785 /* UnitCfg for APP */
786 appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */
787 set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid);
788
789 /* Check all chip Units */
790 for (i = 0; i < GENWQE_MAX_UNITS; i++) {
791
792 /* Unit FIR */
793 ufir_addr = (i << 24) | 0x008;
794 ufir = __genwqe_readq(cd, ufir_addr);
795 set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir);
796
797 /* Unit FEC */
798 ufec_addr = (i << 24) | 0x018;
799 ufec = __genwqe_readq(cd, ufec_addr);
800 set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec);
801
802 for (j = 0; j < 64; j++) {
803 /* wherever there is a primary 1, read the 2ndary */
804 if (!all && (!(ufir & (1ull << j))))
805 continue;
806
807 sfir_addr = (i << 24) | (0x100 + 8 * j);
808 sfir = __genwqe_readq(cd, sfir_addr);
809 set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir);
810
811 sfec_addr = (i << 24) | (0x300 + 8 * j);
812 sfec = __genwqe_readq(cd, sfec_addr);
813 set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec);
814 }
815 }
816
817 /* fill with invalid data until end */
818 for (i = idx; i < max_regs; i++) {
819 regs[i].addr = 0xffffffff;
820 regs[i].val = 0xffffffffffffffffull;
821 }
822 return idx;
823}
824
825/**
826 * genwqe_ffdc_buff_size() - Calculates the number of dump registers
827 * @cd: genwqe device descriptor
828 * @uid: unit ID
829 */
830int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
831{
832 int entries = 0, ring, traps, traces, trace_entries;
833 u32 eevptr_addr, l_addr, d_len, d_type;
834 u64 eevptr, val, addr;
835
836 eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
837 eevptr = __genwqe_readq(cd, eevptr_addr);
838
839 if ((eevptr != 0x0) && (eevptr != -1ull)) {
840 l_addr = GENWQE_UID_OFFS(uid) | eevptr;
841
842 while (1) {
843 val = __genwqe_readq(cd, l_addr);
844
845 if ((val == 0x0) || (val == -1ull))
846 break;
847
848 /* 38:24 */
849 d_len = (val & 0x0000007fff000000ull) >> 24;
850
851 /* 39 */
852 d_type = (val & 0x0000008000000000ull) >> 36;
853
854 if (d_type) { /* repeat */
855 entries += d_len;
856 } else { /* size in bytes! */
857 entries += d_len >> 3;
858 }
859
860 l_addr += 8;
861 }
862 }
863
864 for (ring = 0; ring < 8; ring++) {
865 addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
866 val = __genwqe_readq(cd, addr);
867
868 if ((val == 0x0ull) || (val == -1ull))
869 continue;
870
871 traps = (val >> 24) & 0xff;
872 traces = (val >> 16) & 0xff;
873 trace_entries = val & 0xffff;
874
875 entries += traps + (traces * trace_entries);
876 }
877 return entries;
878}
879
880/**
881 * genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure
882 * @cd: genwqe device descriptor
883 * @uid: unit ID
884 * @regs: register information
885 * @max_regs: number of register entries
886 */
887int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
888 struct genwqe_reg *regs, unsigned int max_regs)
889{
890 int i, traps, traces, trace, trace_entries, trace_entry, ring;
891 unsigned int idx = 0;
892 u32 eevptr_addr, l_addr, d_addr, d_len, d_type;
893 u64 eevptr, e, val, addr;
894
895 eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
896 eevptr = __genwqe_readq(cd, eevptr_addr);
897
898 if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) {
899 l_addr = GENWQE_UID_OFFS(uid) | eevptr;
900 while (1) {
901 e = __genwqe_readq(cd, l_addr);
902 if ((e == 0x0) || (e == 0xffffffffffffffffull))
903 break;
904
905 d_addr = (e & 0x0000000000ffffffull); /* 23:0 */
906 d_len = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */
907 d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */
908 d_addr |= GENWQE_UID_OFFS(uid);
909
910 if (d_type) {
911 for (i = 0; i < (int)d_len; i++) {
912 val = __genwqe_readq(cd, d_addr);
913 set_reg_idx(cd, regs, &idx, max_regs,
914 d_addr, i, val);
915 }
916 } else {
917 d_len >>= 3; /* Size in bytes! */
918 for (i = 0; i < (int)d_len; i++, d_addr += 8) {
919 val = __genwqe_readq(cd, d_addr);
920 set_reg_idx(cd, regs, &idx, max_regs,
921 d_addr, 0, val);
922 }
923 }
924 l_addr += 8;
925 }
926 }
927
928 /*
929 * To save time, there are only 6 traces poplulated on Uid=2,
930 * Ring=1. each with iters=512.
931 */
932 for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds,
933 2...7 are ASI rings */
934 addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
935 val = __genwqe_readq(cd, addr);
936
937 if ((val == 0x0ull) || (val == -1ull))
938 continue;
939
940 traps = (val >> 24) & 0xff; /* Number of Traps */
941 traces = (val >> 16) & 0xff; /* Number of Traces */
942 trace_entries = val & 0xffff; /* Entries per trace */
943
944 /* Note: This is a combined loop that dumps both the traps */
945 /* (for the trace == 0 case) as well as the traces 1 to */
946 /* 'traces'. */
947 for (trace = 0; trace <= traces; trace++) {
948 u32 diag_sel =
949 GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace);
950
951 addr = (GENWQE_UID_OFFS(uid) |
952 IO_EXTENDED_DIAG_SELECTOR);
953 __genwqe_writeq(cd, addr, diag_sel);
954
955 for (trace_entry = 0;
956 trace_entry < (trace ? trace_entries : traps);
957 trace_entry++) {
958 addr = (GENWQE_UID_OFFS(uid) |
959 IO_EXTENDED_DIAG_READ_MBX);
960 val = __genwqe_readq(cd, addr);
961 set_reg_idx(cd, regs, &idx, max_regs, addr,
962 (diag_sel<<16) | trace_entry, val);
963 }
964 }
965 }
966 return 0;
967}
968
969/**
970 * genwqe_write_vreg() - Write register in virtual window
971 * @cd: genwqe device descriptor
972 * @reg: register (byte) offset within BAR
973 * @val: value to write
974 * @func: PCI virtual function
975 *
976 * Note, these registers are only accessible to the PF through the
977 * VF-window. It is not intended for the VF to access.
978 */
979int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
980{
981 __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
982 __genwqe_writeq(cd, reg, val);
983 return 0;
984}
985
986/**
987 * genwqe_read_vreg() - Read register in virtual window
988 * @cd: genwqe device descriptor
989 * @reg: register (byte) offset within BAR
990 * @func: PCI virtual function
991 *
992 * Note, these registers are only accessible to the PF through the
993 * VF-window. It is not intended for the VF to access.
994 */
995u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
996{
997 __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
998 return __genwqe_readq(cd, reg);
999}
1000
1001/**
1002 * genwqe_base_clock_frequency() - Deteremine base clock frequency of the card
1003 * @cd: genwqe device descriptor
1004 *
1005 * Note: From a design perspective it turned out to be a bad idea to
1006 * use codes here to specifiy the frequency/speed values. An old
1007 * driver cannot understand new codes and is therefore always a
1008 * problem. Better is to measure out the value or put the
1009 * speed/frequency directly into a register which is always a valid
1010 * value for old as well as for new software.
1011 *
1012 * Return: Card clock in MHz
1013 */
1014int genwqe_base_clock_frequency(struct genwqe_dev *cd)
1015{
1016 u16 speed; /* MHz MHz MHz MHz */
1017 static const int speed_grade[] = { 250, 200, 166, 175 };
1018
1019 speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
1020 if (speed >= ARRAY_SIZE(speed_grade))
1021 return 0; /* illegal value */
1022
1023 return speed_grade[speed];
1024}
1025
1026/**
1027 * genwqe_stop_traps() - Stop traps
1028 * @cd: genwqe device descriptor
1029 *
1030 * Before reading out the analysis data, we need to stop the traps.
1031 */
1032void genwqe_stop_traps(struct genwqe_dev *cd)
1033{
1034 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull);
1035}
1036
1037/**
1038 * genwqe_start_traps() - Start traps
1039 * @cd: genwqe device descriptor
1040 *
1041 * After having read the data, we can/must enable the traps again.
1042 */
1043void genwqe_start_traps(struct genwqe_dev *cd)
1044{
1045 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull);
1046
1047 if (genwqe_need_err_masking(cd))
1048 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
1049}
1/**
2 * IBM Accelerator Family 'GenWQE'
3 *
4 * (C) Copyright IBM Corp. 2013
5 *
6 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
7 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
8 * Author: Michael Jung <mijung@gmx.net>
9 * Author: Michael Ruettger <michael@ibmra.de>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License (version 2 only)
13 * as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21/*
22 * Miscelanous functionality used in the other GenWQE driver parts.
23 */
24
25#include <linux/kernel.h>
26#include <linux/dma-mapping.h>
27#include <linux/sched.h>
28#include <linux/vmalloc.h>
29#include <linux/page-flags.h>
30#include <linux/scatterlist.h>
31#include <linux/hugetlb.h>
32#include <linux/iommu.h>
33#include <linux/delay.h>
34#include <linux/pci.h>
35#include <linux/dma-mapping.h>
36#include <linux/ctype.h>
37#include <linux/module.h>
38#include <linux/platform_device.h>
39#include <linux/delay.h>
40#include <asm/pgtable.h>
41
42#include "genwqe_driver.h"
43#include "card_base.h"
44#include "card_ddcb.h"
45
46/**
47 * __genwqe_writeq() - Write 64-bit register
48 * @cd: genwqe device descriptor
49 * @byte_offs: byte offset within BAR
50 * @val: 64-bit value
51 *
52 * Return: 0 if success; < 0 if error
53 */
54int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val)
55{
56 struct pci_dev *pci_dev = cd->pci_dev;
57
58 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
59 return -EIO;
60
61 if (cd->mmio == NULL)
62 return -EIO;
63
64 if (pci_channel_offline(pci_dev))
65 return -EIO;
66
67 __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs);
68 return 0;
69}
70
71/**
72 * __genwqe_readq() - Read 64-bit register
73 * @cd: genwqe device descriptor
74 * @byte_offs: offset within BAR
75 *
76 * Return: value from register
77 */
78u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs)
79{
80 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
81 return 0xffffffffffffffffull;
82
83 if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) &&
84 (byte_offs == IO_SLC_CFGREG_GFIR))
85 return 0x000000000000ffffull;
86
87 if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) &&
88 (byte_offs == IO_SLC_CFGREG_GFIR))
89 return 0x00000000ffff0000ull;
90
91 if (cd->mmio == NULL)
92 return 0xffffffffffffffffull;
93
94 return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs));
95}
96
97/**
98 * __genwqe_writel() - Write 32-bit register
99 * @cd: genwqe device descriptor
100 * @byte_offs: byte offset within BAR
101 * @val: 32-bit value
102 *
103 * Return: 0 if success; < 0 if error
104 */
105int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val)
106{
107 struct pci_dev *pci_dev = cd->pci_dev;
108
109 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
110 return -EIO;
111
112 if (cd->mmio == NULL)
113 return -EIO;
114
115 if (pci_channel_offline(pci_dev))
116 return -EIO;
117
118 __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs);
119 return 0;
120}
121
122/**
123 * __genwqe_readl() - Read 32-bit register
124 * @cd: genwqe device descriptor
125 * @byte_offs: offset within BAR
126 *
127 * Return: Value from register
128 */
129u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
130{
131 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
132 return 0xffffffff;
133
134 if (cd->mmio == NULL)
135 return 0xffffffff;
136
137 return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs));
138}
139
140/**
141 * genwqe_read_app_id() - Extract app_id
142 *
143 * app_unitcfg need to be filled with valid data first
144 */
145int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len)
146{
147 int i, j;
148 u32 app_id = (u32)cd->app_unitcfg;
149
150 memset(app_name, 0, len);
151 for (i = 0, j = 0; j < min(len, 4); j++) {
152 char ch = (char)((app_id >> (24 - j*8)) & 0xff);
153
154 if (ch == ' ')
155 continue;
156 app_name[i++] = isprint(ch) ? ch : 'X';
157 }
158 return i;
159}
160
161/**
162 * genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations
163 *
164 * Existing kernel functions seem to use a different polynom,
165 * therefore we could not use them here.
166 *
167 * Genwqe's Polynomial = 0x20044009
168 */
169#define CRC32_POLYNOMIAL 0x20044009
170static u32 crc32_tab[256]; /* crc32 lookup table */
171
172void genwqe_init_crc32(void)
173{
174 int i, j;
175 u32 crc;
176
177 for (i = 0; i < 256; i++) {
178 crc = i << 24;
179 for (j = 0; j < 8; j++) {
180 if (crc & 0x80000000)
181 crc = (crc << 1) ^ CRC32_POLYNOMIAL;
182 else
183 crc = (crc << 1);
184 }
185 crc32_tab[i] = crc;
186 }
187}
188
189/**
190 * genwqe_crc32() - Generate 32-bit crc as required for DDCBs
191 * @buff: pointer to data buffer
192 * @len: length of data for calculation
193 * @init: initial crc (0xffffffff at start)
194 *
195 * polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009)
196
197 * Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should
198 * result in a crc32 of 0xf33cb7d3.
199 *
200 * The existing kernel crc functions did not cover this polynom yet.
201 *
202 * Return: crc32 checksum.
203 */
204u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
205{
206 int i;
207 u32 crc;
208
209 crc = init;
210 while (len--) {
211 i = ((crc >> 24) ^ *buff++) & 0xFF;
212 crc = (crc << 8) ^ crc32_tab[i];
213 }
214 return crc;
215}
216
217void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
218 dma_addr_t *dma_handle)
219{
220 if (get_order(size) > MAX_ORDER)
221 return NULL;
222
223 return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
224 GFP_KERNEL);
225}
226
227void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
228 void *vaddr, dma_addr_t dma_handle)
229{
230 if (vaddr == NULL)
231 return;
232
233 dma_free_coherent(&cd->pci_dev->dev, size, vaddr, dma_handle);
234}
235
236static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
237 int num_pages)
238{
239 int i;
240 struct pci_dev *pci_dev = cd->pci_dev;
241
242 for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) {
243 pci_unmap_page(pci_dev, dma_list[i],
244 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
245 dma_list[i] = 0x0;
246 }
247}
248
249static int genwqe_map_pages(struct genwqe_dev *cd,
250 struct page **page_list, int num_pages,
251 dma_addr_t *dma_list)
252{
253 int i;
254 struct pci_dev *pci_dev = cd->pci_dev;
255
256 /* establish DMA mapping for requested pages */
257 for (i = 0; i < num_pages; i++) {
258 dma_addr_t daddr;
259
260 dma_list[i] = 0x0;
261 daddr = pci_map_page(pci_dev, page_list[i],
262 0, /* map_offs */
263 PAGE_SIZE,
264 PCI_DMA_BIDIRECTIONAL); /* FIXME rd/rw */
265
266 if (pci_dma_mapping_error(pci_dev, daddr)) {
267 dev_err(&pci_dev->dev,
268 "[%s] err: no dma addr daddr=%016llx!\n",
269 __func__, (long long)daddr);
270 goto err;
271 }
272
273 dma_list[i] = daddr;
274 }
275 return 0;
276
277 err:
278 genwqe_unmap_pages(cd, dma_list, num_pages);
279 return -EIO;
280}
281
282static int genwqe_sgl_size(int num_pages)
283{
284 int len, num_tlb = num_pages / 7;
285
286 len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1);
287 return roundup(len, PAGE_SIZE);
288}
289
290/**
291 * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
292 *
293 * Allocates memory for sgl and overlapping pages. Pages which might
294 * overlap other user-space memory blocks are being cached for DMAs,
295 * such that we do not run into syncronization issues. Data is copied
296 * from user-space into the cached pages.
297 */
298int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
299 void __user *user_addr, size_t user_size, int write)
300{
301 int rc;
302 struct pci_dev *pci_dev = cd->pci_dev;
303
304 sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
305 sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size);
306 sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE);
307 sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE;
308
309 dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n",
310 __func__, user_addr, user_size, sgl->nr_pages,
311 sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size);
312
313 sgl->user_addr = user_addr;
314 sgl->user_size = user_size;
315 sgl->write = write;
316 sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
317
318 if (get_order(sgl->sgl_size) > MAX_ORDER) {
319 dev_err(&pci_dev->dev,
320 "[%s] err: too much memory requested!\n", __func__);
321 return -ENOMEM;
322 }
323
324 sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
325 &sgl->sgl_dma_addr);
326 if (sgl->sgl == NULL) {
327 dev_err(&pci_dev->dev,
328 "[%s] err: no memory available!\n", __func__);
329 return -ENOMEM;
330 }
331
332 /* Only use buffering on incomplete pages */
333 if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) {
334 sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
335 &sgl->fpage_dma_addr);
336 if (sgl->fpage == NULL)
337 goto err_out;
338
339 /* Sync with user memory */
340 if (copy_from_user(sgl->fpage + sgl->fpage_offs,
341 user_addr, sgl->fpage_size)) {
342 rc = -EFAULT;
343 goto err_out;
344 }
345 }
346 if (sgl->lpage_size != 0) {
347 sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
348 &sgl->lpage_dma_addr);
349 if (sgl->lpage == NULL)
350 goto err_out1;
351
352 /* Sync with user memory */
353 if (copy_from_user(sgl->lpage, user_addr + user_size -
354 sgl->lpage_size, sgl->lpage_size)) {
355 rc = -EFAULT;
356 goto err_out2;
357 }
358 }
359 return 0;
360
361 err_out2:
362 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
363 sgl->lpage_dma_addr);
364 sgl->lpage = NULL;
365 sgl->lpage_dma_addr = 0;
366 err_out1:
367 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
368 sgl->fpage_dma_addr);
369 sgl->fpage = NULL;
370 sgl->fpage_dma_addr = 0;
371 err_out:
372 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
373 sgl->sgl_dma_addr);
374 sgl->sgl = NULL;
375 sgl->sgl_dma_addr = 0;
376 sgl->sgl_size = 0;
377 return -ENOMEM;
378}
379
380int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
381 dma_addr_t *dma_list)
382{
383 int i = 0, j = 0, p;
384 unsigned long dma_offs, map_offs;
385 dma_addr_t prev_daddr = 0;
386 struct sg_entry *s, *last_s = NULL;
387 size_t size = sgl->user_size;
388
389 dma_offs = 128; /* next block if needed/dma_offset */
390 map_offs = sgl->fpage_offs; /* offset in first page */
391
392 s = &sgl->sgl[0]; /* first set of 8 entries */
393 p = 0; /* page */
394 while (p < sgl->nr_pages) {
395 dma_addr_t daddr;
396 unsigned int size_to_map;
397
398 /* always write the chaining entry, cleanup is done later */
399 j = 0;
400 s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs);
401 s[j].len = cpu_to_be32(128);
402 s[j].flags = cpu_to_be32(SG_CHAINED);
403 j++;
404
405 while (j < 8) {
406 /* DMA mapping for requested page, offs, size */
407 size_to_map = min(size, PAGE_SIZE - map_offs);
408
409 if ((p == 0) && (sgl->fpage != NULL)) {
410 daddr = sgl->fpage_dma_addr + map_offs;
411
412 } else if ((p == sgl->nr_pages - 1) &&
413 (sgl->lpage != NULL)) {
414 daddr = sgl->lpage_dma_addr;
415 } else {
416 daddr = dma_list[p] + map_offs;
417 }
418
419 size -= size_to_map;
420 map_offs = 0;
421
422 if (prev_daddr == daddr) {
423 u32 prev_len = be32_to_cpu(last_s->len);
424
425 /* pr_info("daddr combining: "
426 "%016llx/%08x -> %016llx\n",
427 prev_daddr, prev_len, daddr); */
428
429 last_s->len = cpu_to_be32(prev_len +
430 size_to_map);
431
432 p++; /* process next page */
433 if (p == sgl->nr_pages)
434 goto fixup; /* nothing to do */
435
436 prev_daddr = daddr + size_to_map;
437 continue;
438 }
439
440 /* start new entry */
441 s[j].target_addr = cpu_to_be64(daddr);
442 s[j].len = cpu_to_be32(size_to_map);
443 s[j].flags = cpu_to_be32(SG_DATA);
444 prev_daddr = daddr + size_to_map;
445 last_s = &s[j];
446 j++;
447
448 p++; /* process next page */
449 if (p == sgl->nr_pages)
450 goto fixup; /* nothing to do */
451 }
452 dma_offs += 128;
453 s += 8; /* continue 8 elements further */
454 }
455 fixup:
456 if (j == 1) { /* combining happened on last entry! */
457 s -= 8; /* full shift needed on previous sgl block */
458 j = 7; /* shift all elements */
459 }
460
461 for (i = 0; i < j; i++) /* move elements 1 up */
462 s[i] = s[i + 1];
463
464 s[i].target_addr = cpu_to_be64(0);
465 s[i].len = cpu_to_be32(0);
466 s[i].flags = cpu_to_be32(SG_END_LIST);
467 return 0;
468}
469
470/**
471 * genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
472 *
473 * After the DMA transfer has been completed we free the memory for
474 * the sgl and the cached pages. Data is being transferred from cached
475 * pages into user-space buffers.
476 */
477int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
478{
479 int rc = 0;
480 size_t offset;
481 unsigned long res;
482 struct pci_dev *pci_dev = cd->pci_dev;
483
484 if (sgl->fpage) {
485 if (sgl->write) {
486 res = copy_to_user(sgl->user_addr,
487 sgl->fpage + sgl->fpage_offs, sgl->fpage_size);
488 if (res) {
489 dev_err(&pci_dev->dev,
490 "[%s] err: copying fpage! (res=%lu)\n",
491 __func__, res);
492 rc = -EFAULT;
493 }
494 }
495 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
496 sgl->fpage_dma_addr);
497 sgl->fpage = NULL;
498 sgl->fpage_dma_addr = 0;
499 }
500 if (sgl->lpage) {
501 if (sgl->write) {
502 offset = sgl->user_size - sgl->lpage_size;
503 res = copy_to_user(sgl->user_addr + offset, sgl->lpage,
504 sgl->lpage_size);
505 if (res) {
506 dev_err(&pci_dev->dev,
507 "[%s] err: copying lpage! (res=%lu)\n",
508 __func__, res);
509 rc = -EFAULT;
510 }
511 }
512 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
513 sgl->lpage_dma_addr);
514 sgl->lpage = NULL;
515 sgl->lpage_dma_addr = 0;
516 }
517 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
518 sgl->sgl_dma_addr);
519
520 sgl->sgl = NULL;
521 sgl->sgl_dma_addr = 0x0;
522 sgl->sgl_size = 0;
523 return rc;
524}
525
526/**
527 * genwqe_free_user_pages() - Give pinned pages back
528 *
529 * Documentation of get_user_pages is in mm/gup.c:
530 *
531 * If the page is written to, set_page_dirty (or set_page_dirty_lock,
532 * as appropriate) must be called after the page is finished with, and
533 * before put_page is called.
534 */
535static int genwqe_free_user_pages(struct page **page_list,
536 unsigned int nr_pages, int dirty)
537{
538 unsigned int i;
539
540 for (i = 0; i < nr_pages; i++) {
541 if (page_list[i] != NULL) {
542 if (dirty)
543 set_page_dirty_lock(page_list[i]);
544 put_page(page_list[i]);
545 }
546 }
547 return 0;
548}
549
550/**
551 * genwqe_user_vmap() - Map user-space memory to virtual kernel memory
552 * @cd: pointer to genwqe device
553 * @m: mapping params
554 * @uaddr: user virtual address
555 * @size: size of memory to be mapped
556 *
557 * We need to think about how we could speed this up. Of course it is
558 * not a good idea to do this over and over again, like we are
559 * currently doing it. Nevertheless, I am curious where on the path
560 * the performance is spend. Most probably within the memory
561 * allocation functions, but maybe also in the DMA mapping code.
562 *
563 * Restrictions: The maximum size of the possible mapping currently depends
564 * on the amount of memory we can get using kzalloc() for the
565 * page_list and pci_alloc_consistent for the sg_list.
566 * The sg_list is currently itself not scattered, which could
567 * be fixed with some effort. The page_list must be split into
568 * PAGE_SIZE chunks too. All that will make the complicated
569 * code more complicated.
570 *
571 * Return: 0 if success
572 */
573int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
574 unsigned long size)
575{
576 int rc = -EINVAL;
577 unsigned long data, offs;
578 struct pci_dev *pci_dev = cd->pci_dev;
579
580 if ((uaddr == NULL) || (size == 0)) {
581 m->size = 0; /* mark unused and not added */
582 return -EINVAL;
583 }
584 m->u_vaddr = uaddr;
585 m->size = size;
586
587 /* determine space needed for page_list. */
588 data = (unsigned long)uaddr;
589 offs = offset_in_page(data);
590 m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
591
592 m->page_list = kcalloc(m->nr_pages,
593 sizeof(struct page *) + sizeof(dma_addr_t),
594 GFP_KERNEL);
595 if (!m->page_list) {
596 dev_err(&pci_dev->dev, "err: alloc page_list failed\n");
597 m->nr_pages = 0;
598 m->u_vaddr = NULL;
599 m->size = 0; /* mark unused and not added */
600 return -ENOMEM;
601 }
602 m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
603
604 /* pin user pages in memory */
605 rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
606 m->nr_pages,
607 m->write, /* readable/writable */
608 m->page_list); /* ptrs to pages */
609 if (rc < 0)
610 goto fail_get_user_pages;
611
612 /* assumption: get_user_pages can be killed by signals. */
613 if (rc < m->nr_pages) {
614 genwqe_free_user_pages(m->page_list, rc, m->write);
615 rc = -EFAULT;
616 goto fail_get_user_pages;
617 }
618
619 rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
620 if (rc != 0)
621 goto fail_free_user_pages;
622
623 return 0;
624
625 fail_free_user_pages:
626 genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
627
628 fail_get_user_pages:
629 kfree(m->page_list);
630 m->page_list = NULL;
631 m->dma_list = NULL;
632 m->nr_pages = 0;
633 m->u_vaddr = NULL;
634 m->size = 0; /* mark unused and not added */
635 return rc;
636}
637
638/**
639 * genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel
640 * memory
641 * @cd: pointer to genwqe device
642 * @m: mapping params
643 */
644int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m)
645{
646 struct pci_dev *pci_dev = cd->pci_dev;
647
648 if (!dma_mapping_used(m)) {
649 dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n",
650 __func__, m);
651 return -EINVAL;
652 }
653
654 if (m->dma_list)
655 genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
656
657 if (m->page_list) {
658 genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
659
660 kfree(m->page_list);
661 m->page_list = NULL;
662 m->dma_list = NULL;
663 m->nr_pages = 0;
664 }
665
666 m->u_vaddr = NULL;
667 m->size = 0; /* mark as unused and not added */
668 return 0;
669}
670
671/**
672 * genwqe_card_type() - Get chip type SLU Configuration Register
673 * @cd: pointer to the genwqe device descriptor
674 * Return: 0: Altera Stratix-IV 230
675 * 1: Altera Stratix-IV 530
676 * 2: Altera Stratix-V A4
677 * 3: Altera Stratix-V A7
678 */
679u8 genwqe_card_type(struct genwqe_dev *cd)
680{
681 u64 card_type = cd->slu_unitcfg;
682
683 return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20);
684}
685
686/**
687 * genwqe_card_reset() - Reset the card
688 * @cd: pointer to the genwqe device descriptor
689 */
690int genwqe_card_reset(struct genwqe_dev *cd)
691{
692 u64 softrst;
693 struct pci_dev *pci_dev = cd->pci_dev;
694
695 if (!genwqe_is_privileged(cd))
696 return -ENODEV;
697
698 /* new SL */
699 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull);
700 msleep(1000);
701 __genwqe_readq(cd, IO_HSU_FIR_CLR);
702 __genwqe_readq(cd, IO_APP_FIR_CLR);
703 __genwqe_readq(cd, IO_SLU_FIR_CLR);
704
705 /*
706 * Read-modify-write to preserve the stealth bits
707 *
708 * For SL >= 039, Stealth WE bit allows removing
709 * the read-modify-wrote.
710 * r-m-w may require a mask 0x3C to avoid hitting hard
711 * reset again for error reset (should be 0, chicken).
712 */
713 softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull;
714 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull);
715
716 /* give ERRORRESET some time to finish */
717 msleep(50);
718
719 if (genwqe_need_err_masking(cd)) {
720 dev_info(&pci_dev->dev,
721 "[%s] masking errors for old bitstreams\n", __func__);
722 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
723 }
724 return 0;
725}
726
727int genwqe_read_softreset(struct genwqe_dev *cd)
728{
729 u64 bitstream;
730
731 if (!genwqe_is_privileged(cd))
732 return -ENODEV;
733
734 bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
735 cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull;
736 return 0;
737}
738
739/**
740 * genwqe_set_interrupt_capability() - Configure MSI capability structure
741 * @cd: pointer to the device
742 * Return: 0 if no error
743 */
744int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
745{
746 int rc;
747
748 rc = pci_alloc_irq_vectors(cd->pci_dev, 1, count, PCI_IRQ_MSI);
749 if (rc < 0)
750 return rc;
751 return 0;
752}
753
754/**
755 * genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability()
756 * @cd: pointer to the device
757 */
758void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
759{
760 pci_free_irq_vectors(cd->pci_dev);
761}
762
763/**
764 * set_reg_idx() - Fill array with data. Ignore illegal offsets.
765 * @cd: card device
766 * @r: debug register array
767 * @i: index to desired entry
768 * @m: maximum possible entries
769 * @addr: addr which is read
770 * @index: index in debug array
771 * @val: read value
772 */
773static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
774 unsigned int *i, unsigned int m, u32 addr, u32 idx,
775 u64 val)
776{
777 if (WARN_ON_ONCE(*i >= m))
778 return -EFAULT;
779
780 r[*i].addr = addr;
781 r[*i].idx = idx;
782 r[*i].val = val;
783 ++*i;
784 return 0;
785}
786
787static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r,
788 unsigned int *i, unsigned int m, u32 addr, u64 val)
789{
790 return set_reg_idx(cd, r, i, m, addr, 0, val);
791}
792
793int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
794 unsigned int max_regs, int all)
795{
796 unsigned int i, j, idx = 0;
797 u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr;
798 u64 gfir, sluid, appid, ufir, ufec, sfir, sfec;
799
800 /* Global FIR */
801 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
802 set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir);
803
804 /* UnitCfg for SLU */
805 sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */
806 set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid);
807
808 /* UnitCfg for APP */
809 appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */
810 set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid);
811
812 /* Check all chip Units */
813 for (i = 0; i < GENWQE_MAX_UNITS; i++) {
814
815 /* Unit FIR */
816 ufir_addr = (i << 24) | 0x008;
817 ufir = __genwqe_readq(cd, ufir_addr);
818 set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir);
819
820 /* Unit FEC */
821 ufec_addr = (i << 24) | 0x018;
822 ufec = __genwqe_readq(cd, ufec_addr);
823 set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec);
824
825 for (j = 0; j < 64; j++) {
826 /* wherever there is a primary 1, read the 2ndary */
827 if (!all && (!(ufir & (1ull << j))))
828 continue;
829
830 sfir_addr = (i << 24) | (0x100 + 8 * j);
831 sfir = __genwqe_readq(cd, sfir_addr);
832 set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir);
833
834 sfec_addr = (i << 24) | (0x300 + 8 * j);
835 sfec = __genwqe_readq(cd, sfec_addr);
836 set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec);
837 }
838 }
839
840 /* fill with invalid data until end */
841 for (i = idx; i < max_regs; i++) {
842 regs[i].addr = 0xffffffff;
843 regs[i].val = 0xffffffffffffffffull;
844 }
845 return idx;
846}
847
848/**
849 * genwqe_ffdc_buff_size() - Calculates the number of dump registers
850 */
851int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
852{
853 int entries = 0, ring, traps, traces, trace_entries;
854 u32 eevptr_addr, l_addr, d_len, d_type;
855 u64 eevptr, val, addr;
856
857 eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
858 eevptr = __genwqe_readq(cd, eevptr_addr);
859
860 if ((eevptr != 0x0) && (eevptr != -1ull)) {
861 l_addr = GENWQE_UID_OFFS(uid) | eevptr;
862
863 while (1) {
864 val = __genwqe_readq(cd, l_addr);
865
866 if ((val == 0x0) || (val == -1ull))
867 break;
868
869 /* 38:24 */
870 d_len = (val & 0x0000007fff000000ull) >> 24;
871
872 /* 39 */
873 d_type = (val & 0x0000008000000000ull) >> 36;
874
875 if (d_type) { /* repeat */
876 entries += d_len;
877 } else { /* size in bytes! */
878 entries += d_len >> 3;
879 }
880
881 l_addr += 8;
882 }
883 }
884
885 for (ring = 0; ring < 8; ring++) {
886 addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
887 val = __genwqe_readq(cd, addr);
888
889 if ((val == 0x0ull) || (val == -1ull))
890 continue;
891
892 traps = (val >> 24) & 0xff;
893 traces = (val >> 16) & 0xff;
894 trace_entries = val & 0xffff;
895
896 entries += traps + (traces * trace_entries);
897 }
898 return entries;
899}
900
901/**
902 * genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure
903 */
904int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
905 struct genwqe_reg *regs, unsigned int max_regs)
906{
907 int i, traps, traces, trace, trace_entries, trace_entry, ring;
908 unsigned int idx = 0;
909 u32 eevptr_addr, l_addr, d_addr, d_len, d_type;
910 u64 eevptr, e, val, addr;
911
912 eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
913 eevptr = __genwqe_readq(cd, eevptr_addr);
914
915 if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) {
916 l_addr = GENWQE_UID_OFFS(uid) | eevptr;
917 while (1) {
918 e = __genwqe_readq(cd, l_addr);
919 if ((e == 0x0) || (e == 0xffffffffffffffffull))
920 break;
921
922 d_addr = (e & 0x0000000000ffffffull); /* 23:0 */
923 d_len = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */
924 d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */
925 d_addr |= GENWQE_UID_OFFS(uid);
926
927 if (d_type) {
928 for (i = 0; i < (int)d_len; i++) {
929 val = __genwqe_readq(cd, d_addr);
930 set_reg_idx(cd, regs, &idx, max_regs,
931 d_addr, i, val);
932 }
933 } else {
934 d_len >>= 3; /* Size in bytes! */
935 for (i = 0; i < (int)d_len; i++, d_addr += 8) {
936 val = __genwqe_readq(cd, d_addr);
937 set_reg_idx(cd, regs, &idx, max_regs,
938 d_addr, 0, val);
939 }
940 }
941 l_addr += 8;
942 }
943 }
944
945 /*
946 * To save time, there are only 6 traces poplulated on Uid=2,
947 * Ring=1. each with iters=512.
948 */
949 for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds,
950 2...7 are ASI rings */
951 addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
952 val = __genwqe_readq(cd, addr);
953
954 if ((val == 0x0ull) || (val == -1ull))
955 continue;
956
957 traps = (val >> 24) & 0xff; /* Number of Traps */
958 traces = (val >> 16) & 0xff; /* Number of Traces */
959 trace_entries = val & 0xffff; /* Entries per trace */
960
961 /* Note: This is a combined loop that dumps both the traps */
962 /* (for the trace == 0 case) as well as the traces 1 to */
963 /* 'traces'. */
964 for (trace = 0; trace <= traces; trace++) {
965 u32 diag_sel =
966 GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace);
967
968 addr = (GENWQE_UID_OFFS(uid) |
969 IO_EXTENDED_DIAG_SELECTOR);
970 __genwqe_writeq(cd, addr, diag_sel);
971
972 for (trace_entry = 0;
973 trace_entry < (trace ? trace_entries : traps);
974 trace_entry++) {
975 addr = (GENWQE_UID_OFFS(uid) |
976 IO_EXTENDED_DIAG_READ_MBX);
977 val = __genwqe_readq(cd, addr);
978 set_reg_idx(cd, regs, &idx, max_regs, addr,
979 (diag_sel<<16) | trace_entry, val);
980 }
981 }
982 }
983 return 0;
984}
985
986/**
987 * genwqe_write_vreg() - Write register in virtual window
988 *
989 * Note, these registers are only accessible to the PF through the
990 * VF-window. It is not intended for the VF to access.
991 */
992int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
993{
994 __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
995 __genwqe_writeq(cd, reg, val);
996 return 0;
997}
998
999/**
1000 * genwqe_read_vreg() - Read register in virtual window
1001 *
1002 * Note, these registers are only accessible to the PF through the
1003 * VF-window. It is not intended for the VF to access.
1004 */
1005u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
1006{
1007 __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
1008 return __genwqe_readq(cd, reg);
1009}
1010
1011/**
1012 * genwqe_base_clock_frequency() - Deteremine base clock frequency of the card
1013 *
1014 * Note: From a design perspective it turned out to be a bad idea to
1015 * use codes here to specifiy the frequency/speed values. An old
1016 * driver cannot understand new codes and is therefore always a
1017 * problem. Better is to measure out the value or put the
1018 * speed/frequency directly into a register which is always a valid
1019 * value for old as well as for new software.
1020 *
1021 * Return: Card clock in MHz
1022 */
1023int genwqe_base_clock_frequency(struct genwqe_dev *cd)
1024{
1025 u16 speed; /* MHz MHz MHz MHz */
1026 static const int speed_grade[] = { 250, 200, 166, 175 };
1027
1028 speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
1029 if (speed >= ARRAY_SIZE(speed_grade))
1030 return 0; /* illegal value */
1031
1032 return speed_grade[speed];
1033}
1034
1035/**
1036 * genwqe_stop_traps() - Stop traps
1037 *
1038 * Before reading out the analysis data, we need to stop the traps.
1039 */
1040void genwqe_stop_traps(struct genwqe_dev *cd)
1041{
1042 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull);
1043}
1044
1045/**
1046 * genwqe_start_traps() - Start traps
1047 *
1048 * After having read the data, we can/must enable the traps again.
1049 */
1050void genwqe_start_traps(struct genwqe_dev *cd)
1051{
1052 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull);
1053
1054 if (genwqe_need_err_masking(cd))
1055 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
1056}