Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2012
4 *
5 * Author(s):
6 * Jan Glauber <jang@linux.vnet.ibm.com>
7 *
8 * The System z PCI code is a rewrite from a prototype by
9 * the following people (Kudoz!):
10 * Alexander Schmidt
11 * Christoph Raisch
12 * Hannes Hering
13 * Hoang-Nam Nguyen
14 * Jan-Bernd Themann
15 * Stefan Roscher
16 * Thomas Klein
17 */
18
19#define KMSG_COMPONENT "zpci"
20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21
22#include <linux/kernel.h>
23#include <linux/slab.h>
24#include <linux/err.h>
25#include <linux/export.h>
26#include <linux/delay.h>
27#include <linux/seq_file.h>
28#include <linux/jump_label.h>
29#include <linux/pci.h>
30#include <linux/printk.h>
31#include <linux/lockdep.h>
32
33#include <asm/isc.h>
34#include <asm/airq.h>
35#include <asm/facility.h>
36#include <asm/pci_insn.h>
37#include <asm/pci_clp.h>
38#include <asm/pci_dma.h>
39
40#include "pci_bus.h"
41#include "pci_iov.h"
42
43/* list of all detected zpci devices */
44static LIST_HEAD(zpci_list);
45static DEFINE_SPINLOCK(zpci_list_lock);
46
47static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
48static DEFINE_SPINLOCK(zpci_domain_lock);
49
50#define ZPCI_IOMAP_ENTRIES \
51 min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
52 ZPCI_IOMAP_MAX_ENTRIES)
53
54unsigned int s390_pci_no_rid;
55
56static DEFINE_SPINLOCK(zpci_iomap_lock);
57static unsigned long *zpci_iomap_bitmap;
58struct zpci_iomap_entry *zpci_iomap_start;
59EXPORT_SYMBOL_GPL(zpci_iomap_start);
60
61DEFINE_STATIC_KEY_FALSE(have_mio);
62
63static struct kmem_cache *zdev_fmb_cache;
64
65/* AEN structures that must be preserved over KVM module re-insertion */
66union zpci_sic_iib *zpci_aipb;
67EXPORT_SYMBOL_GPL(zpci_aipb);
68struct airq_iv *zpci_aif_sbv;
69EXPORT_SYMBOL_GPL(zpci_aif_sbv);
70
71struct zpci_dev *get_zdev_by_fid(u32 fid)
72{
73 struct zpci_dev *tmp, *zdev = NULL;
74
75 spin_lock(&zpci_list_lock);
76 list_for_each_entry(tmp, &zpci_list, entry) {
77 if (tmp->fid == fid) {
78 zdev = tmp;
79 zpci_zdev_get(zdev);
80 break;
81 }
82 }
83 spin_unlock(&zpci_list_lock);
84 return zdev;
85}
86
87void zpci_remove_reserved_devices(void)
88{
89 struct zpci_dev *tmp, *zdev;
90 enum zpci_state state;
91 LIST_HEAD(remove);
92
93 spin_lock(&zpci_list_lock);
94 list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
95 if (zdev->state == ZPCI_FN_STATE_STANDBY &&
96 !clp_get_state(zdev->fid, &state) &&
97 state == ZPCI_FN_STATE_RESERVED)
98 list_move_tail(&zdev->entry, &remove);
99 }
100 spin_unlock(&zpci_list_lock);
101
102 list_for_each_entry_safe(zdev, tmp, &remove, entry)
103 zpci_device_reserved(zdev);
104}
105
106int pci_domain_nr(struct pci_bus *bus)
107{
108 return ((struct zpci_bus *) bus->sysdata)->domain_nr;
109}
110EXPORT_SYMBOL_GPL(pci_domain_nr);
111
112int pci_proc_domain(struct pci_bus *bus)
113{
114 return pci_domain_nr(bus);
115}
116EXPORT_SYMBOL_GPL(pci_proc_domain);
117
118/* Modify PCI: Register I/O address translation parameters */
119int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
120 u64 base, u64 limit, u64 iota, u8 *status)
121{
122 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
123 struct zpci_fib fib = {0};
124 u8 cc;
125
126 WARN_ON_ONCE(iota & 0x3fff);
127 fib.pba = base;
128 /* Work around off by one in ISM virt device */
129 if (zdev->pft == PCI_FUNC_TYPE_ISM && limit > base)
130 fib.pal = limit + (1 << 12);
131 else
132 fib.pal = limit;
133 fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
134 fib.gd = zdev->gisa;
135 cc = zpci_mod_fc(req, &fib, status);
136 if (cc)
137 zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, *status);
138 return cc;
139}
140EXPORT_SYMBOL_GPL(zpci_register_ioat);
141
142/* Modify PCI: Unregister I/O address translation parameters */
143int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
144{
145 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
146 struct zpci_fib fib = {0};
147 u8 cc, status;
148
149 fib.gd = zdev->gisa;
150
151 cc = zpci_mod_fc(req, &fib, &status);
152 if (cc)
153 zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
154 return cc;
155}
156
157/* Modify PCI: Set PCI function measurement parameters */
158int zpci_fmb_enable_device(struct zpci_dev *zdev)
159{
160 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
161 struct zpci_iommu_ctrs *ctrs;
162 struct zpci_fib fib = {0};
163 u8 cc, status;
164
165 if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
166 return -EINVAL;
167
168 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
169 if (!zdev->fmb)
170 return -ENOMEM;
171 WARN_ON((u64) zdev->fmb & 0xf);
172
173 /* reset software counters */
174 ctrs = zpci_get_iommu_ctrs(zdev);
175 if (ctrs) {
176 atomic64_set(&ctrs->mapped_pages, 0);
177 atomic64_set(&ctrs->unmapped_pages, 0);
178 atomic64_set(&ctrs->global_rpcits, 0);
179 atomic64_set(&ctrs->sync_map_rpcits, 0);
180 atomic64_set(&ctrs->sync_rpcits, 0);
181 }
182
183
184 fib.fmb_addr = virt_to_phys(zdev->fmb);
185 fib.gd = zdev->gisa;
186 cc = zpci_mod_fc(req, &fib, &status);
187 if (cc) {
188 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
189 zdev->fmb = NULL;
190 }
191 return cc ? -EIO : 0;
192}
193
194/* Modify PCI: Disable PCI function measurement */
195int zpci_fmb_disable_device(struct zpci_dev *zdev)
196{
197 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
198 struct zpci_fib fib = {0};
199 u8 cc, status;
200
201 if (!zdev->fmb)
202 return -EINVAL;
203
204 fib.gd = zdev->gisa;
205
206 /* Function measurement is disabled if fmb address is zero */
207 cc = zpci_mod_fc(req, &fib, &status);
208 if (cc == 3) /* Function already gone. */
209 cc = 0;
210
211 if (!cc) {
212 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
213 zdev->fmb = NULL;
214 }
215 return cc ? -EIO : 0;
216}
217
218static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
219{
220 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
221 u64 data;
222 int rc;
223
224 rc = __zpci_load(&data, req, offset);
225 if (!rc) {
226 data = le64_to_cpu((__force __le64) data);
227 data >>= (8 - len) * 8;
228 *val = (u32) data;
229 } else
230 *val = 0xffffffff;
231 return rc;
232}
233
234static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
235{
236 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
237 u64 data = val;
238 int rc;
239
240 data <<= (8 - len) * 8;
241 data = (__force u64) cpu_to_le64(data);
242 rc = __zpci_store(data, req, offset);
243 return rc;
244}
245
246resource_size_t pcibios_align_resource(void *data, const struct resource *res,
247 resource_size_t size,
248 resource_size_t align)
249{
250 return 0;
251}
252
253/* combine single writes by using store-block insn */
254void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
255{
256 zpci_memcpy_toio(to, from, count * 8);
257}
258
259void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
260 unsigned long prot)
261{
262 /*
263 * When PCI MIO instructions are unavailable the "physical" address
264 * encodes a hint for accessing the PCI memory space it represents.
265 * Just pass it unchanged such that ioread/iowrite can decode it.
266 */
267 if (!static_branch_unlikely(&have_mio))
268 return (void __iomem *)phys_addr;
269
270 return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
271}
272EXPORT_SYMBOL(ioremap_prot);
273
274void iounmap(volatile void __iomem *addr)
275{
276 if (static_branch_likely(&have_mio))
277 generic_iounmap(addr);
278}
279EXPORT_SYMBOL(iounmap);
280
281/* Create a virtual mapping cookie for a PCI BAR */
282static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
283 unsigned long offset, unsigned long max)
284{
285 struct zpci_dev *zdev = to_zpci(pdev);
286 int idx;
287
288 idx = zdev->bars[bar].map_idx;
289 spin_lock(&zpci_iomap_lock);
290 /* Detect overrun */
291 WARN_ON(!++zpci_iomap_start[idx].count);
292 zpci_iomap_start[idx].fh = zdev->fh;
293 zpci_iomap_start[idx].bar = bar;
294 spin_unlock(&zpci_iomap_lock);
295
296 return (void __iomem *) ZPCI_ADDR(idx) + offset;
297}
298
299static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
300 unsigned long offset,
301 unsigned long max)
302{
303 unsigned long barsize = pci_resource_len(pdev, bar);
304 struct zpci_dev *zdev = to_zpci(pdev);
305 void __iomem *iova;
306
307 iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
308 return iova ? iova + offset : iova;
309}
310
311void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
312 unsigned long offset, unsigned long max)
313{
314 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
315 return NULL;
316
317 if (static_branch_likely(&have_mio))
318 return pci_iomap_range_mio(pdev, bar, offset, max);
319 else
320 return pci_iomap_range_fh(pdev, bar, offset, max);
321}
322EXPORT_SYMBOL(pci_iomap_range);
323
324void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
325{
326 return pci_iomap_range(dev, bar, 0, maxlen);
327}
328EXPORT_SYMBOL(pci_iomap);
329
330static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
331 unsigned long offset, unsigned long max)
332{
333 unsigned long barsize = pci_resource_len(pdev, bar);
334 struct zpci_dev *zdev = to_zpci(pdev);
335 void __iomem *iova;
336
337 iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
338 return iova ? iova + offset : iova;
339}
340
341void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
342 unsigned long offset, unsigned long max)
343{
344 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
345 return NULL;
346
347 if (static_branch_likely(&have_mio))
348 return pci_iomap_wc_range_mio(pdev, bar, offset, max);
349 else
350 return pci_iomap_range_fh(pdev, bar, offset, max);
351}
352EXPORT_SYMBOL(pci_iomap_wc_range);
353
354void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
355{
356 return pci_iomap_wc_range(dev, bar, 0, maxlen);
357}
358EXPORT_SYMBOL(pci_iomap_wc);
359
360static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
361{
362 unsigned int idx = ZPCI_IDX(addr);
363
364 spin_lock(&zpci_iomap_lock);
365 /* Detect underrun */
366 WARN_ON(!zpci_iomap_start[idx].count);
367 if (!--zpci_iomap_start[idx].count) {
368 zpci_iomap_start[idx].fh = 0;
369 zpci_iomap_start[idx].bar = 0;
370 }
371 spin_unlock(&zpci_iomap_lock);
372}
373
374static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
375{
376 iounmap(addr);
377}
378
379void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
380{
381 if (static_branch_likely(&have_mio))
382 pci_iounmap_mio(pdev, addr);
383 else
384 pci_iounmap_fh(pdev, addr);
385}
386EXPORT_SYMBOL(pci_iounmap);
387
388static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
389 int size, u32 *val)
390{
391 struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
392
393 return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
394}
395
396static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
397 int size, u32 val)
398{
399 struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
400
401 return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
402}
403
404static struct pci_ops pci_root_ops = {
405 .read = pci_read,
406 .write = pci_write,
407};
408
409static void zpci_map_resources(struct pci_dev *pdev)
410{
411 struct zpci_dev *zdev = to_zpci(pdev);
412 resource_size_t len;
413 int i;
414
415 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
416 len = pci_resource_len(pdev, i);
417 if (!len)
418 continue;
419
420 if (zpci_use_mio(zdev))
421 pdev->resource[i].start =
422 (resource_size_t __force) zdev->bars[i].mio_wt;
423 else
424 pdev->resource[i].start = (resource_size_t __force)
425 pci_iomap_range_fh(pdev, i, 0, 0);
426 pdev->resource[i].end = pdev->resource[i].start + len - 1;
427 }
428
429 zpci_iov_map_resources(pdev);
430}
431
432static void zpci_unmap_resources(struct pci_dev *pdev)
433{
434 struct zpci_dev *zdev = to_zpci(pdev);
435 resource_size_t len;
436 int i;
437
438 if (zpci_use_mio(zdev))
439 return;
440
441 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
442 len = pci_resource_len(pdev, i);
443 if (!len)
444 continue;
445 pci_iounmap_fh(pdev, (void __iomem __force *)
446 pdev->resource[i].start);
447 }
448}
449
450static int zpci_alloc_iomap(struct zpci_dev *zdev)
451{
452 unsigned long entry;
453
454 spin_lock(&zpci_iomap_lock);
455 entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
456 if (entry == ZPCI_IOMAP_ENTRIES) {
457 spin_unlock(&zpci_iomap_lock);
458 return -ENOSPC;
459 }
460 set_bit(entry, zpci_iomap_bitmap);
461 spin_unlock(&zpci_iomap_lock);
462 return entry;
463}
464
465static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
466{
467 spin_lock(&zpci_iomap_lock);
468 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
469 clear_bit(entry, zpci_iomap_bitmap);
470 spin_unlock(&zpci_iomap_lock);
471}
472
473static void zpci_do_update_iomap_fh(struct zpci_dev *zdev, u32 fh)
474{
475 int bar, idx;
476
477 spin_lock(&zpci_iomap_lock);
478 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
479 if (!zdev->bars[bar].size)
480 continue;
481 idx = zdev->bars[bar].map_idx;
482 if (!zpci_iomap_start[idx].count)
483 continue;
484 WRITE_ONCE(zpci_iomap_start[idx].fh, zdev->fh);
485 }
486 spin_unlock(&zpci_iomap_lock);
487}
488
489void zpci_update_fh(struct zpci_dev *zdev, u32 fh)
490{
491 if (!fh || zdev->fh == fh)
492 return;
493
494 zdev->fh = fh;
495 if (zpci_use_mio(zdev))
496 return;
497 if (zdev->has_resources && zdev_enabled(zdev))
498 zpci_do_update_iomap_fh(zdev, fh);
499}
500
501static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
502 unsigned long size, unsigned long flags)
503{
504 struct resource *r;
505
506 r = kzalloc(sizeof(*r), GFP_KERNEL);
507 if (!r)
508 return NULL;
509
510 r->start = start;
511 r->end = r->start + size - 1;
512 r->flags = flags;
513 r->name = zdev->res_name;
514
515 if (request_resource(&iomem_resource, r)) {
516 kfree(r);
517 return NULL;
518 }
519 return r;
520}
521
522int zpci_setup_bus_resources(struct zpci_dev *zdev)
523{
524 unsigned long addr, size, flags;
525 struct resource *res;
526 int i, entry;
527
528 snprintf(zdev->res_name, sizeof(zdev->res_name),
529 "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
530
531 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
532 if (!zdev->bars[i].size)
533 continue;
534 entry = zpci_alloc_iomap(zdev);
535 if (entry < 0)
536 return entry;
537 zdev->bars[i].map_idx = entry;
538
539 /* only MMIO is supported */
540 flags = IORESOURCE_MEM;
541 if (zdev->bars[i].val & 8)
542 flags |= IORESOURCE_PREFETCH;
543 if (zdev->bars[i].val & 4)
544 flags |= IORESOURCE_MEM_64;
545
546 if (zpci_use_mio(zdev))
547 addr = (unsigned long) zdev->bars[i].mio_wt;
548 else
549 addr = ZPCI_ADDR(entry);
550 size = 1UL << zdev->bars[i].size;
551
552 res = __alloc_res(zdev, addr, size, flags);
553 if (!res) {
554 zpci_free_iomap(zdev, entry);
555 return -ENOMEM;
556 }
557 zdev->bars[i].res = res;
558 }
559 zdev->has_resources = 1;
560
561 return 0;
562}
563
564static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
565{
566 struct resource *res;
567 int i;
568
569 pci_lock_rescan_remove();
570 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
571 res = zdev->bars[i].res;
572 if (!res)
573 continue;
574
575 release_resource(res);
576 pci_bus_remove_resource(zdev->zbus->bus, res);
577 zpci_free_iomap(zdev, zdev->bars[i].map_idx);
578 zdev->bars[i].res = NULL;
579 kfree(res);
580 }
581 zdev->has_resources = 0;
582 pci_unlock_rescan_remove();
583}
584
585int pcibios_device_add(struct pci_dev *pdev)
586{
587 struct zpci_dev *zdev = to_zpci(pdev);
588 struct resource *res;
589 int i;
590
591 /* The pdev has a reference to the zdev via its bus */
592 zpci_zdev_get(zdev);
593 if (pdev->is_physfn)
594 pdev->no_vf_scan = 1;
595
596 pdev->dev.groups = zpci_attr_groups;
597 zpci_map_resources(pdev);
598
599 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
600 res = &pdev->resource[i];
601 if (res->parent || !res->flags)
602 continue;
603 pci_claim_resource(pdev, i);
604 }
605
606 return 0;
607}
608
609void pcibios_release_device(struct pci_dev *pdev)
610{
611 struct zpci_dev *zdev = to_zpci(pdev);
612
613 zpci_unmap_resources(pdev);
614 zpci_zdev_put(zdev);
615}
616
617int pcibios_enable_device(struct pci_dev *pdev, int mask)
618{
619 struct zpci_dev *zdev = to_zpci(pdev);
620
621 zpci_debug_init_device(zdev, dev_name(&pdev->dev));
622 zpci_fmb_enable_device(zdev);
623
624 return pci_enable_resources(pdev, mask);
625}
626
627void pcibios_disable_device(struct pci_dev *pdev)
628{
629 struct zpci_dev *zdev = to_zpci(pdev);
630
631 zpci_fmb_disable_device(zdev);
632 zpci_debug_exit_device(zdev);
633}
634
635static int __zpci_register_domain(int domain)
636{
637 spin_lock(&zpci_domain_lock);
638 if (test_bit(domain, zpci_domain)) {
639 spin_unlock(&zpci_domain_lock);
640 pr_err("Domain %04x is already assigned\n", domain);
641 return -EEXIST;
642 }
643 set_bit(domain, zpci_domain);
644 spin_unlock(&zpci_domain_lock);
645 return domain;
646}
647
648static int __zpci_alloc_domain(void)
649{
650 int domain;
651
652 spin_lock(&zpci_domain_lock);
653 /*
654 * We can always auto allocate domains below ZPCI_NR_DEVICES.
655 * There is either a free domain or we have reached the maximum in
656 * which case we would have bailed earlier.
657 */
658 domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
659 set_bit(domain, zpci_domain);
660 spin_unlock(&zpci_domain_lock);
661 return domain;
662}
663
664int zpci_alloc_domain(int domain)
665{
666 if (zpci_unique_uid) {
667 if (domain)
668 return __zpci_register_domain(domain);
669 pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
670 update_uid_checking(false);
671 }
672 return __zpci_alloc_domain();
673}
674
675void zpci_free_domain(int domain)
676{
677 spin_lock(&zpci_domain_lock);
678 clear_bit(domain, zpci_domain);
679 spin_unlock(&zpci_domain_lock);
680}
681
682
683int zpci_enable_device(struct zpci_dev *zdev)
684{
685 u32 fh = zdev->fh;
686 int rc = 0;
687
688 if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
689 rc = -EIO;
690 else
691 zpci_update_fh(zdev, fh);
692 return rc;
693}
694EXPORT_SYMBOL_GPL(zpci_enable_device);
695
696int zpci_disable_device(struct zpci_dev *zdev)
697{
698 u32 fh = zdev->fh;
699 int cc, rc = 0;
700
701 cc = clp_disable_fh(zdev, &fh);
702 if (!cc) {
703 zpci_update_fh(zdev, fh);
704 } else if (cc == CLP_RC_SETPCIFN_ALRDY) {
705 pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
706 zdev->fid);
707 /* Function is already disabled - update handle */
708 rc = clp_refresh_fh(zdev->fid, &fh);
709 if (!rc) {
710 zpci_update_fh(zdev, fh);
711 rc = -EINVAL;
712 }
713 } else {
714 rc = -EIO;
715 }
716 return rc;
717}
718EXPORT_SYMBOL_GPL(zpci_disable_device);
719
720/**
721 * zpci_hot_reset_device - perform a reset of the given zPCI function
722 * @zdev: the slot which should be reset
723 *
724 * Performs a low level reset of the zPCI function. The reset is low level in
725 * the sense that the zPCI function can be reset without detaching it from the
726 * common PCI subsystem. The reset may be performed while under control of
727 * either DMA or IOMMU APIs in which case the existing DMA/IOMMU translation
728 * table is reinstated at the end of the reset.
729 *
730 * After the reset the functions internal state is reset to an initial state
731 * equivalent to its state during boot when first probing a driver.
732 * Consequently after reset the PCI function requires re-initialization via the
733 * common PCI code including re-enabling IRQs via pci_alloc_irq_vectors()
734 * and enabling the function via e.g. pci_enable_device_flags(). The caller
735 * must guard against concurrent reset attempts.
736 *
737 * In most cases this function should not be called directly but through
738 * pci_reset_function() or pci_reset_bus() which handle the save/restore and
739 * locking - asserted by lockdep.
740 *
741 * Return: 0 on success and an error value otherwise
742 */
743int zpci_hot_reset_device(struct zpci_dev *zdev)
744{
745 u8 status;
746 int rc;
747
748 lockdep_assert_held(&zdev->state_lock);
749 zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh);
750 if (zdev_enabled(zdev)) {
751 /* Disables device access, DMAs and IRQs (reset state) */
752 rc = zpci_disable_device(zdev);
753 /*
754 * Due to a z/VM vs LPAR inconsistency in the error state the
755 * FH may indicate an enabled device but disable says the
756 * device is already disabled don't treat it as an error here.
757 */
758 if (rc == -EINVAL)
759 rc = 0;
760 if (rc)
761 return rc;
762 }
763
764 rc = zpci_enable_device(zdev);
765 if (rc)
766 return rc;
767
768 if (zdev->dma_table)
769 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
770 virt_to_phys(zdev->dma_table), &status);
771 if (rc) {
772 zpci_disable_device(zdev);
773 return rc;
774 }
775
776 return 0;
777}
778
779/**
780 * zpci_create_device() - Create a new zpci_dev and add it to the zbus
781 * @fid: Function ID of the device to be created
782 * @fh: Current Function Handle of the device to be created
783 * @state: Initial state after creation either Standby or Configured
784 *
785 * Creates a new zpci device and adds it to its, possibly newly created, zbus
786 * as well as zpci_list.
787 *
788 * Returns: the zdev on success or an error pointer otherwise
789 */
790struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
791{
792 struct zpci_dev *zdev;
793 int rc;
794
795 zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
796 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
797 if (!zdev)
798 return ERR_PTR(-ENOMEM);
799
800 /* FID and Function Handle are the static/dynamic identifiers */
801 zdev->fid = fid;
802 zdev->fh = fh;
803
804 /* Query function properties and update zdev */
805 rc = clp_query_pci_fn(zdev);
806 if (rc)
807 goto error;
808 zdev->state = state;
809
810 kref_init(&zdev->kref);
811 mutex_init(&zdev->state_lock);
812 mutex_init(&zdev->fmb_lock);
813 mutex_init(&zdev->kzdev_lock);
814
815 rc = zpci_init_iommu(zdev);
816 if (rc)
817 goto error;
818
819 rc = zpci_bus_device_register(zdev, &pci_root_ops);
820 if (rc)
821 goto error_destroy_iommu;
822
823 spin_lock(&zpci_list_lock);
824 list_add_tail(&zdev->entry, &zpci_list);
825 spin_unlock(&zpci_list_lock);
826
827 return zdev;
828
829error_destroy_iommu:
830 zpci_destroy_iommu(zdev);
831error:
832 zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
833 kfree(zdev);
834 return ERR_PTR(rc);
835}
836
837bool zpci_is_device_configured(struct zpci_dev *zdev)
838{
839 enum zpci_state state = zdev->state;
840
841 return state != ZPCI_FN_STATE_RESERVED &&
842 state != ZPCI_FN_STATE_STANDBY;
843}
844
845/**
846 * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
847 * @zdev: The zpci_dev to be configured
848 * @fh: The general function handle supplied by the platform
849 *
850 * Given a device in the configuration state Configured, enables, scans and
851 * adds it to the common code PCI subsystem if possible. If any failure occurs,
852 * the zpci_dev is left disabled.
853 *
854 * Return: 0 on success, or an error code otherwise
855 */
856int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
857{
858 zpci_update_fh(zdev, fh);
859 return zpci_bus_scan_device(zdev);
860}
861
862/**
863 * zpci_deconfigure_device() - Deconfigure a zpci_dev
864 * @zdev: The zpci_dev to configure
865 *
866 * Deconfigure a zPCI function that is currently configured and possibly known
867 * to the common code PCI subsystem.
868 * If any failure occurs the device is left as is.
869 *
870 * Return: 0 on success, or an error code otherwise
871 */
872int zpci_deconfigure_device(struct zpci_dev *zdev)
873{
874 int rc;
875
876 lockdep_assert_held(&zdev->state_lock);
877 if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
878 return 0;
879
880 if (zdev->zbus->bus)
881 zpci_bus_remove_device(zdev, false);
882
883 if (zdev_enabled(zdev)) {
884 rc = zpci_disable_device(zdev);
885 if (rc)
886 return rc;
887 }
888
889 rc = sclp_pci_deconfigure(zdev->fid);
890 zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
891 if (rc)
892 return rc;
893 zdev->state = ZPCI_FN_STATE_STANDBY;
894
895 return 0;
896}
897
898/**
899 * zpci_device_reserved() - Mark device as reserved
900 * @zdev: the zpci_dev that was reserved
901 *
902 * Handle the case that a given zPCI function was reserved by another system.
903 * After a call to this function the zpci_dev can not be found via
904 * get_zdev_by_fid() anymore but may still be accessible via existing
905 * references though it will not be functional anymore.
906 */
907void zpci_device_reserved(struct zpci_dev *zdev)
908{
909 /*
910 * Remove device from zpci_list as it is going away. This also
911 * makes sure we ignore subsequent zPCI events for this device.
912 */
913 spin_lock(&zpci_list_lock);
914 list_del(&zdev->entry);
915 spin_unlock(&zpci_list_lock);
916 zdev->state = ZPCI_FN_STATE_RESERVED;
917 zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
918 zpci_zdev_put(zdev);
919}
920
921void zpci_release_device(struct kref *kref)
922{
923 struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
924 int ret;
925
926 if (zdev->has_hp_slot)
927 zpci_exit_slot(zdev);
928
929 if (zdev->zbus->bus)
930 zpci_bus_remove_device(zdev, false);
931
932 if (zdev_enabled(zdev))
933 zpci_disable_device(zdev);
934
935 switch (zdev->state) {
936 case ZPCI_FN_STATE_CONFIGURED:
937 ret = sclp_pci_deconfigure(zdev->fid);
938 zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
939 fallthrough;
940 case ZPCI_FN_STATE_STANDBY:
941 if (zdev->has_hp_slot)
942 zpci_exit_slot(zdev);
943 spin_lock(&zpci_list_lock);
944 list_del(&zdev->entry);
945 spin_unlock(&zpci_list_lock);
946 zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
947 fallthrough;
948 case ZPCI_FN_STATE_RESERVED:
949 if (zdev->has_resources)
950 zpci_cleanup_bus_resources(zdev);
951 zpci_bus_device_unregister(zdev);
952 zpci_destroy_iommu(zdev);
953 fallthrough;
954 default:
955 break;
956 }
957 zpci_dbg(3, "rem fid:%x\n", zdev->fid);
958 kfree_rcu(zdev, rcu);
959}
960
961int zpci_report_error(struct pci_dev *pdev,
962 struct zpci_report_error_header *report)
963{
964 struct zpci_dev *zdev = to_zpci(pdev);
965
966 return sclp_pci_report(report, zdev->fh, zdev->fid);
967}
968EXPORT_SYMBOL(zpci_report_error);
969
970/**
971 * zpci_clear_error_state() - Clears the zPCI error state of the device
972 * @zdev: The zdev for which the zPCI error state should be reset
973 *
974 * Clear the zPCI error state of the device. If clearing the zPCI error state
975 * fails the device is left in the error state. In this case it may make sense
976 * to call zpci_io_perm_failure() on the associated pdev if it exists.
977 *
978 * Returns: 0 on success, -EIO otherwise
979 */
980int zpci_clear_error_state(struct zpci_dev *zdev)
981{
982 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_ERROR);
983 struct zpci_fib fib = {0};
984 u8 status;
985 int cc;
986
987 cc = zpci_mod_fc(req, &fib, &status);
988 if (cc) {
989 zpci_dbg(3, "ces fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
990 return -EIO;
991 }
992
993 return 0;
994}
995
996/**
997 * zpci_reset_load_store_blocked() - Re-enables L/S from error state
998 * @zdev: The zdev for which to unblock load/store access
999 *
1000 * Re-enables load/store access for a PCI function in the error state while
1001 * keeping DMA blocked. In this state drivers can poke MMIO space to determine
1002 * if error recovery is possible while catching any rogue DMA access from the
1003 * device.
1004 *
1005 * Returns: 0 on success, -EIO otherwise
1006 */
1007int zpci_reset_load_store_blocked(struct zpci_dev *zdev)
1008{
1009 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_BLOCK);
1010 struct zpci_fib fib = {0};
1011 u8 status;
1012 int cc;
1013
1014 cc = zpci_mod_fc(req, &fib, &status);
1015 if (cc) {
1016 zpci_dbg(3, "rls fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
1017 return -EIO;
1018 }
1019
1020 return 0;
1021}
1022
1023static int zpci_mem_init(void)
1024{
1025 BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
1026 __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
1027
1028 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
1029 __alignof__(struct zpci_fmb), 0, NULL);
1030 if (!zdev_fmb_cache)
1031 goto error_fmb;
1032
1033 zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
1034 sizeof(*zpci_iomap_start), GFP_KERNEL);
1035 if (!zpci_iomap_start)
1036 goto error_iomap;
1037
1038 zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
1039 sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
1040 if (!zpci_iomap_bitmap)
1041 goto error_iomap_bitmap;
1042
1043 if (static_branch_likely(&have_mio))
1044 clp_setup_writeback_mio();
1045
1046 return 0;
1047error_iomap_bitmap:
1048 kfree(zpci_iomap_start);
1049error_iomap:
1050 kmem_cache_destroy(zdev_fmb_cache);
1051error_fmb:
1052 return -ENOMEM;
1053}
1054
1055static void zpci_mem_exit(void)
1056{
1057 kfree(zpci_iomap_bitmap);
1058 kfree(zpci_iomap_start);
1059 kmem_cache_destroy(zdev_fmb_cache);
1060}
1061
1062static unsigned int s390_pci_probe __initdata = 1;
1063unsigned int s390_pci_force_floating __initdata;
1064static unsigned int s390_pci_initialized;
1065
1066char * __init pcibios_setup(char *str)
1067{
1068 if (!strcmp(str, "off")) {
1069 s390_pci_probe = 0;
1070 return NULL;
1071 }
1072 if (!strcmp(str, "nomio")) {
1073 S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
1074 return NULL;
1075 }
1076 if (!strcmp(str, "force_floating")) {
1077 s390_pci_force_floating = 1;
1078 return NULL;
1079 }
1080 if (!strcmp(str, "norid")) {
1081 s390_pci_no_rid = 1;
1082 return NULL;
1083 }
1084 return str;
1085}
1086
1087bool zpci_is_enabled(void)
1088{
1089 return s390_pci_initialized;
1090}
1091
1092static int __init pci_base_init(void)
1093{
1094 int rc;
1095
1096 if (!s390_pci_probe)
1097 return 0;
1098
1099 if (!test_facility(69) || !test_facility(71)) {
1100 pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
1101 return 0;
1102 }
1103
1104 if (MACHINE_HAS_PCI_MIO) {
1105 static_branch_enable(&have_mio);
1106 system_ctl_set_bit(2, CR2_MIO_ADDRESSING_BIT);
1107 }
1108
1109 rc = zpci_debug_init();
1110 if (rc)
1111 goto out;
1112
1113 rc = zpci_mem_init();
1114 if (rc)
1115 goto out_mem;
1116
1117 rc = zpci_irq_init();
1118 if (rc)
1119 goto out_irq;
1120
1121 rc = clp_scan_pci_devices();
1122 if (rc)
1123 goto out_find;
1124 zpci_bus_scan_busses();
1125
1126 s390_pci_initialized = 1;
1127 return 0;
1128
1129out_find:
1130 zpci_irq_exit();
1131out_irq:
1132 zpci_mem_exit();
1133out_mem:
1134 zpci_debug_exit();
1135out:
1136 return rc;
1137}
1138subsys_initcall_sync(pci_base_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2012
4 *
5 * Author(s):
6 * Jan Glauber <jang@linux.vnet.ibm.com>
7 *
8 * The System z PCI code is a rewrite from a prototype by
9 * the following people (Kudoz!):
10 * Alexander Schmidt
11 * Christoph Raisch
12 * Hannes Hering
13 * Hoang-Nam Nguyen
14 * Jan-Bernd Themann
15 * Stefan Roscher
16 * Thomas Klein
17 */
18
19#define KMSG_COMPONENT "zpci"
20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21
22#include <linux/kernel.h>
23#include <linux/slab.h>
24#include <linux/err.h>
25#include <linux/export.h>
26#include <linux/delay.h>
27#include <linux/seq_file.h>
28#include <linux/jump_label.h>
29#include <linux/pci.h>
30
31#include <asm/isc.h>
32#include <asm/airq.h>
33#include <asm/facility.h>
34#include <asm/pci_insn.h>
35#include <asm/pci_clp.h>
36#include <asm/pci_dma.h>
37
38/* list of all detected zpci devices */
39static LIST_HEAD(zpci_list);
40static DEFINE_SPINLOCK(zpci_list_lock);
41
42static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
43static DEFINE_SPINLOCK(zpci_domain_lock);
44
45#define ZPCI_IOMAP_ENTRIES \
46 min(((unsigned long) ZPCI_NR_DEVICES * PCI_BAR_COUNT / 2), \
47 ZPCI_IOMAP_MAX_ENTRIES)
48
49static DEFINE_SPINLOCK(zpci_iomap_lock);
50static unsigned long *zpci_iomap_bitmap;
51struct zpci_iomap_entry *zpci_iomap_start;
52EXPORT_SYMBOL_GPL(zpci_iomap_start);
53
54DEFINE_STATIC_KEY_FALSE(have_mio);
55
56static struct kmem_cache *zdev_fmb_cache;
57
58struct zpci_dev *get_zdev_by_fid(u32 fid)
59{
60 struct zpci_dev *tmp, *zdev = NULL;
61
62 spin_lock(&zpci_list_lock);
63 list_for_each_entry(tmp, &zpci_list, entry) {
64 if (tmp->fid == fid) {
65 zdev = tmp;
66 break;
67 }
68 }
69 spin_unlock(&zpci_list_lock);
70 return zdev;
71}
72
73void zpci_remove_reserved_devices(void)
74{
75 struct zpci_dev *tmp, *zdev;
76 enum zpci_state state;
77 LIST_HEAD(remove);
78
79 spin_lock(&zpci_list_lock);
80 list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
81 if (zdev->state == ZPCI_FN_STATE_STANDBY &&
82 !clp_get_state(zdev->fid, &state) &&
83 state == ZPCI_FN_STATE_RESERVED)
84 list_move_tail(&zdev->entry, &remove);
85 }
86 spin_unlock(&zpci_list_lock);
87
88 list_for_each_entry_safe(zdev, tmp, &remove, entry)
89 zpci_remove_device(zdev);
90}
91
92static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
93{
94 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
95}
96
97int pci_domain_nr(struct pci_bus *bus)
98{
99 return ((struct zpci_dev *) bus->sysdata)->domain;
100}
101EXPORT_SYMBOL_GPL(pci_domain_nr);
102
103int pci_proc_domain(struct pci_bus *bus)
104{
105 return pci_domain_nr(bus);
106}
107EXPORT_SYMBOL_GPL(pci_proc_domain);
108
109/* Modify PCI: Register I/O address translation parameters */
110int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
111 u64 base, u64 limit, u64 iota)
112{
113 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
114 struct zpci_fib fib = {0};
115 u8 status;
116
117 WARN_ON_ONCE(iota & 0x3fff);
118 fib.pba = base;
119 fib.pal = limit;
120 fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
121 return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
122}
123
124/* Modify PCI: Unregister I/O address translation parameters */
125int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
126{
127 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
128 struct zpci_fib fib = {0};
129 u8 cc, status;
130
131 cc = zpci_mod_fc(req, &fib, &status);
132 if (cc == 3) /* Function already gone. */
133 cc = 0;
134 return cc ? -EIO : 0;
135}
136
137/* Modify PCI: Set PCI function measurement parameters */
138int zpci_fmb_enable_device(struct zpci_dev *zdev)
139{
140 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
141 struct zpci_fib fib = {0};
142 u8 cc, status;
143
144 if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
145 return -EINVAL;
146
147 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
148 if (!zdev->fmb)
149 return -ENOMEM;
150 WARN_ON((u64) zdev->fmb & 0xf);
151
152 /* reset software counters */
153 atomic64_set(&zdev->allocated_pages, 0);
154 atomic64_set(&zdev->mapped_pages, 0);
155 atomic64_set(&zdev->unmapped_pages, 0);
156
157 fib.fmb_addr = virt_to_phys(zdev->fmb);
158 cc = zpci_mod_fc(req, &fib, &status);
159 if (cc) {
160 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
161 zdev->fmb = NULL;
162 }
163 return cc ? -EIO : 0;
164}
165
166/* Modify PCI: Disable PCI function measurement */
167int zpci_fmb_disable_device(struct zpci_dev *zdev)
168{
169 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
170 struct zpci_fib fib = {0};
171 u8 cc, status;
172
173 if (!zdev->fmb)
174 return -EINVAL;
175
176 /* Function measurement is disabled if fmb address is zero */
177 cc = zpci_mod_fc(req, &fib, &status);
178 if (cc == 3) /* Function already gone. */
179 cc = 0;
180
181 if (!cc) {
182 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
183 zdev->fmb = NULL;
184 }
185 return cc ? -EIO : 0;
186}
187
188static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
189{
190 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
191 u64 data;
192 int rc;
193
194 rc = __zpci_load(&data, req, offset);
195 if (!rc) {
196 data = le64_to_cpu((__force __le64) data);
197 data >>= (8 - len) * 8;
198 *val = (u32) data;
199 } else
200 *val = 0xffffffff;
201 return rc;
202}
203
204static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
205{
206 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
207 u64 data = val;
208 int rc;
209
210 data <<= (8 - len) * 8;
211 data = (__force u64) cpu_to_le64(data);
212 rc = __zpci_store(data, req, offset);
213 return rc;
214}
215
216resource_size_t pcibios_align_resource(void *data, const struct resource *res,
217 resource_size_t size,
218 resource_size_t align)
219{
220 return 0;
221}
222
223/* combine single writes by using store-block insn */
224void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
225{
226 zpci_memcpy_toio(to, from, count);
227}
228
229void __iomem *ioremap(unsigned long ioaddr, unsigned long size)
230{
231 struct vm_struct *area;
232 unsigned long offset;
233
234 if (!size)
235 return NULL;
236
237 if (!static_branch_unlikely(&have_mio))
238 return (void __iomem *) ioaddr;
239
240 offset = ioaddr & ~PAGE_MASK;
241 ioaddr &= PAGE_MASK;
242 size = PAGE_ALIGN(size + offset);
243 area = get_vm_area(size, VM_IOREMAP);
244 if (!area)
245 return NULL;
246
247 if (ioremap_page_range((unsigned long) area->addr,
248 (unsigned long) area->addr + size,
249 ioaddr, PAGE_KERNEL)) {
250 vunmap(area->addr);
251 return NULL;
252 }
253 return (void __iomem *) ((unsigned long) area->addr + offset);
254}
255EXPORT_SYMBOL(ioremap);
256
257void iounmap(volatile void __iomem *addr)
258{
259 if (static_branch_likely(&have_mio))
260 vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
261}
262EXPORT_SYMBOL(iounmap);
263
264/* Create a virtual mapping cookie for a PCI BAR */
265static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
266 unsigned long offset, unsigned long max)
267{
268 struct zpci_dev *zdev = to_zpci(pdev);
269 int idx;
270
271 idx = zdev->bars[bar].map_idx;
272 spin_lock(&zpci_iomap_lock);
273 /* Detect overrun */
274 WARN_ON(!++zpci_iomap_start[idx].count);
275 zpci_iomap_start[idx].fh = zdev->fh;
276 zpci_iomap_start[idx].bar = bar;
277 spin_unlock(&zpci_iomap_lock);
278
279 return (void __iomem *) ZPCI_ADDR(idx) + offset;
280}
281
282static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
283 unsigned long offset,
284 unsigned long max)
285{
286 unsigned long barsize = pci_resource_len(pdev, bar);
287 struct zpci_dev *zdev = to_zpci(pdev);
288 void __iomem *iova;
289
290 iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
291 return iova ? iova + offset : iova;
292}
293
294void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
295 unsigned long offset, unsigned long max)
296{
297 if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT)
298 return NULL;
299
300 if (static_branch_likely(&have_mio))
301 return pci_iomap_range_mio(pdev, bar, offset, max);
302 else
303 return pci_iomap_range_fh(pdev, bar, offset, max);
304}
305EXPORT_SYMBOL(pci_iomap_range);
306
307void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
308{
309 return pci_iomap_range(dev, bar, 0, maxlen);
310}
311EXPORT_SYMBOL(pci_iomap);
312
313static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
314 unsigned long offset, unsigned long max)
315{
316 unsigned long barsize = pci_resource_len(pdev, bar);
317 struct zpci_dev *zdev = to_zpci(pdev);
318 void __iomem *iova;
319
320 iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
321 return iova ? iova + offset : iova;
322}
323
324void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
325 unsigned long offset, unsigned long max)
326{
327 if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT)
328 return NULL;
329
330 if (static_branch_likely(&have_mio))
331 return pci_iomap_wc_range_mio(pdev, bar, offset, max);
332 else
333 return pci_iomap_range_fh(pdev, bar, offset, max);
334}
335EXPORT_SYMBOL(pci_iomap_wc_range);
336
337void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
338{
339 return pci_iomap_wc_range(dev, bar, 0, maxlen);
340}
341EXPORT_SYMBOL(pci_iomap_wc);
342
343static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
344{
345 unsigned int idx = ZPCI_IDX(addr);
346
347 spin_lock(&zpci_iomap_lock);
348 /* Detect underrun */
349 WARN_ON(!zpci_iomap_start[idx].count);
350 if (!--zpci_iomap_start[idx].count) {
351 zpci_iomap_start[idx].fh = 0;
352 zpci_iomap_start[idx].bar = 0;
353 }
354 spin_unlock(&zpci_iomap_lock);
355}
356
357static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
358{
359 iounmap(addr);
360}
361
362void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
363{
364 if (static_branch_likely(&have_mio))
365 pci_iounmap_mio(pdev, addr);
366 else
367 pci_iounmap_fh(pdev, addr);
368}
369EXPORT_SYMBOL(pci_iounmap);
370
371static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
372 int size, u32 *val)
373{
374 struct zpci_dev *zdev = get_zdev_by_bus(bus);
375 int ret;
376
377 if (!zdev || devfn != ZPCI_DEVFN)
378 ret = -ENODEV;
379 else
380 ret = zpci_cfg_load(zdev, where, val, size);
381
382 return ret;
383}
384
385static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
386 int size, u32 val)
387{
388 struct zpci_dev *zdev = get_zdev_by_bus(bus);
389 int ret;
390
391 if (!zdev || devfn != ZPCI_DEVFN)
392 ret = -ENODEV;
393 else
394 ret = zpci_cfg_store(zdev, where, val, size);
395
396 return ret;
397}
398
399static struct pci_ops pci_root_ops = {
400 .read = pci_read,
401 .write = pci_write,
402};
403
404#ifdef CONFIG_PCI_IOV
405static struct resource iov_res = {
406 .name = "PCI IOV res",
407 .start = 0,
408 .end = -1,
409 .flags = IORESOURCE_MEM,
410};
411#endif
412
413static void zpci_map_resources(struct pci_dev *pdev)
414{
415 struct zpci_dev *zdev = to_zpci(pdev);
416 resource_size_t len;
417 int i;
418
419 for (i = 0; i < PCI_BAR_COUNT; i++) {
420 len = pci_resource_len(pdev, i);
421 if (!len)
422 continue;
423
424 if (zpci_use_mio(zdev))
425 pdev->resource[i].start =
426 (resource_size_t __force) zdev->bars[i].mio_wb;
427 else
428 pdev->resource[i].start = (resource_size_t __force)
429 pci_iomap_range_fh(pdev, i, 0, 0);
430 pdev->resource[i].end = pdev->resource[i].start + len - 1;
431 }
432
433#ifdef CONFIG_PCI_IOV
434 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
435 int bar = i + PCI_IOV_RESOURCES;
436
437 len = pci_resource_len(pdev, bar);
438 if (!len)
439 continue;
440 pdev->resource[bar].parent = &iov_res;
441 }
442#endif
443}
444
445static void zpci_unmap_resources(struct pci_dev *pdev)
446{
447 struct zpci_dev *zdev = to_zpci(pdev);
448 resource_size_t len;
449 int i;
450
451 if (zpci_use_mio(zdev))
452 return;
453
454 for (i = 0; i < PCI_BAR_COUNT; i++) {
455 len = pci_resource_len(pdev, i);
456 if (!len)
457 continue;
458 pci_iounmap_fh(pdev, (void __iomem __force *)
459 pdev->resource[i].start);
460 }
461}
462
463static int zpci_alloc_iomap(struct zpci_dev *zdev)
464{
465 unsigned long entry;
466
467 spin_lock(&zpci_iomap_lock);
468 entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
469 if (entry == ZPCI_IOMAP_ENTRIES) {
470 spin_unlock(&zpci_iomap_lock);
471 return -ENOSPC;
472 }
473 set_bit(entry, zpci_iomap_bitmap);
474 spin_unlock(&zpci_iomap_lock);
475 return entry;
476}
477
478static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
479{
480 spin_lock(&zpci_iomap_lock);
481 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
482 clear_bit(entry, zpci_iomap_bitmap);
483 spin_unlock(&zpci_iomap_lock);
484}
485
486static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
487 unsigned long size, unsigned long flags)
488{
489 struct resource *r;
490
491 r = kzalloc(sizeof(*r), GFP_KERNEL);
492 if (!r)
493 return NULL;
494
495 r->start = start;
496 r->end = r->start + size - 1;
497 r->flags = flags;
498 r->name = zdev->res_name;
499
500 if (request_resource(&iomem_resource, r)) {
501 kfree(r);
502 return NULL;
503 }
504 return r;
505}
506
507static int zpci_setup_bus_resources(struct zpci_dev *zdev,
508 struct list_head *resources)
509{
510 unsigned long addr, size, flags;
511 struct resource *res;
512 int i, entry;
513
514 snprintf(zdev->res_name, sizeof(zdev->res_name),
515 "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR);
516
517 for (i = 0; i < PCI_BAR_COUNT; i++) {
518 if (!zdev->bars[i].size)
519 continue;
520 entry = zpci_alloc_iomap(zdev);
521 if (entry < 0)
522 return entry;
523 zdev->bars[i].map_idx = entry;
524
525 /* only MMIO is supported */
526 flags = IORESOURCE_MEM;
527 if (zdev->bars[i].val & 8)
528 flags |= IORESOURCE_PREFETCH;
529 if (zdev->bars[i].val & 4)
530 flags |= IORESOURCE_MEM_64;
531
532 if (zpci_use_mio(zdev))
533 addr = (unsigned long) zdev->bars[i].mio_wb;
534 else
535 addr = ZPCI_ADDR(entry);
536 size = 1UL << zdev->bars[i].size;
537
538 res = __alloc_res(zdev, addr, size, flags);
539 if (!res) {
540 zpci_free_iomap(zdev, entry);
541 return -ENOMEM;
542 }
543 zdev->bars[i].res = res;
544 pci_add_resource(resources, res);
545 }
546
547 return 0;
548}
549
550static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
551{
552 int i;
553
554 for (i = 0; i < PCI_BAR_COUNT; i++) {
555 if (!zdev->bars[i].size || !zdev->bars[i].res)
556 continue;
557
558 zpci_free_iomap(zdev, zdev->bars[i].map_idx);
559 release_resource(zdev->bars[i].res);
560 kfree(zdev->bars[i].res);
561 }
562}
563
564int pcibios_add_device(struct pci_dev *pdev)
565{
566 struct resource *res;
567 int i;
568
569 if (pdev->is_physfn)
570 pdev->no_vf_scan = 1;
571
572 pdev->dev.groups = zpci_attr_groups;
573 pdev->dev.dma_ops = &s390_pci_dma_ops;
574 zpci_map_resources(pdev);
575
576 for (i = 0; i < PCI_BAR_COUNT; i++) {
577 res = &pdev->resource[i];
578 if (res->parent || !res->flags)
579 continue;
580 pci_claim_resource(pdev, i);
581 }
582
583 return 0;
584}
585
586void pcibios_release_device(struct pci_dev *pdev)
587{
588 zpci_unmap_resources(pdev);
589}
590
591int pcibios_enable_device(struct pci_dev *pdev, int mask)
592{
593 struct zpci_dev *zdev = to_zpci(pdev);
594
595 zpci_debug_init_device(zdev, dev_name(&pdev->dev));
596 zpci_fmb_enable_device(zdev);
597
598 return pci_enable_resources(pdev, mask);
599}
600
601void pcibios_disable_device(struct pci_dev *pdev)
602{
603 struct zpci_dev *zdev = to_zpci(pdev);
604
605 zpci_fmb_disable_device(zdev);
606 zpci_debug_exit_device(zdev);
607}
608
609#ifdef CONFIG_HIBERNATE_CALLBACKS
610static int zpci_restore(struct device *dev)
611{
612 struct pci_dev *pdev = to_pci_dev(dev);
613 struct zpci_dev *zdev = to_zpci(pdev);
614 int ret = 0;
615
616 if (zdev->state != ZPCI_FN_STATE_ONLINE)
617 goto out;
618
619 ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
620 if (ret)
621 goto out;
622
623 zpci_map_resources(pdev);
624 zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
625 (u64) zdev->dma_table);
626
627out:
628 return ret;
629}
630
631static int zpci_freeze(struct device *dev)
632{
633 struct pci_dev *pdev = to_pci_dev(dev);
634 struct zpci_dev *zdev = to_zpci(pdev);
635
636 if (zdev->state != ZPCI_FN_STATE_ONLINE)
637 return 0;
638
639 zpci_unregister_ioat(zdev, 0);
640 zpci_unmap_resources(pdev);
641 return clp_disable_fh(zdev);
642}
643
644struct dev_pm_ops pcibios_pm_ops = {
645 .thaw_noirq = zpci_restore,
646 .freeze_noirq = zpci_freeze,
647 .restore_noirq = zpci_restore,
648 .poweroff_noirq = zpci_freeze,
649};
650#endif /* CONFIG_HIBERNATE_CALLBACKS */
651
652static int zpci_alloc_domain(struct zpci_dev *zdev)
653{
654 if (zpci_unique_uid) {
655 zdev->domain = (u16) zdev->uid;
656 if (zdev->domain >= ZPCI_NR_DEVICES)
657 return 0;
658
659 spin_lock(&zpci_domain_lock);
660 if (test_bit(zdev->domain, zpci_domain)) {
661 spin_unlock(&zpci_domain_lock);
662 return -EEXIST;
663 }
664 set_bit(zdev->domain, zpci_domain);
665 spin_unlock(&zpci_domain_lock);
666 return 0;
667 }
668
669 spin_lock(&zpci_domain_lock);
670 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
671 if (zdev->domain == ZPCI_NR_DEVICES) {
672 spin_unlock(&zpci_domain_lock);
673 return -ENOSPC;
674 }
675 set_bit(zdev->domain, zpci_domain);
676 spin_unlock(&zpci_domain_lock);
677 return 0;
678}
679
680static void zpci_free_domain(struct zpci_dev *zdev)
681{
682 if (zdev->domain >= ZPCI_NR_DEVICES)
683 return;
684
685 spin_lock(&zpci_domain_lock);
686 clear_bit(zdev->domain, zpci_domain);
687 spin_unlock(&zpci_domain_lock);
688}
689
690void pcibios_remove_bus(struct pci_bus *bus)
691{
692 struct zpci_dev *zdev = get_zdev_by_bus(bus);
693
694 zpci_exit_slot(zdev);
695 zpci_cleanup_bus_resources(zdev);
696 zpci_destroy_iommu(zdev);
697 zpci_free_domain(zdev);
698
699 spin_lock(&zpci_list_lock);
700 list_del(&zdev->entry);
701 spin_unlock(&zpci_list_lock);
702
703 zpci_dbg(3, "rem fid:%x\n", zdev->fid);
704 kfree(zdev);
705}
706
707static int zpci_scan_bus(struct zpci_dev *zdev)
708{
709 LIST_HEAD(resources);
710 int ret;
711
712 ret = zpci_setup_bus_resources(zdev, &resources);
713 if (ret)
714 goto error;
715
716 zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
717 zdev, &resources);
718 if (!zdev->bus) {
719 ret = -EIO;
720 goto error;
721 }
722 zdev->bus->max_bus_speed = zdev->max_bus_speed;
723 pci_bus_add_devices(zdev->bus);
724 return 0;
725
726error:
727 zpci_cleanup_bus_resources(zdev);
728 pci_free_resource_list(&resources);
729 return ret;
730}
731
732int zpci_enable_device(struct zpci_dev *zdev)
733{
734 int rc;
735
736 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
737 if (rc)
738 goto out;
739
740 rc = zpci_dma_init_device(zdev);
741 if (rc)
742 goto out_dma;
743
744 zdev->state = ZPCI_FN_STATE_ONLINE;
745 return 0;
746
747out_dma:
748 clp_disable_fh(zdev);
749out:
750 return rc;
751}
752EXPORT_SYMBOL_GPL(zpci_enable_device);
753
754int zpci_disable_device(struct zpci_dev *zdev)
755{
756 zpci_dma_exit_device(zdev);
757 return clp_disable_fh(zdev);
758}
759EXPORT_SYMBOL_GPL(zpci_disable_device);
760
761int zpci_create_device(struct zpci_dev *zdev)
762{
763 int rc;
764
765 rc = zpci_alloc_domain(zdev);
766 if (rc)
767 goto out;
768
769 rc = zpci_init_iommu(zdev);
770 if (rc)
771 goto out_free;
772
773 mutex_init(&zdev->lock);
774 if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
775 rc = zpci_enable_device(zdev);
776 if (rc)
777 goto out_destroy_iommu;
778 }
779 rc = zpci_scan_bus(zdev);
780 if (rc)
781 goto out_disable;
782
783 spin_lock(&zpci_list_lock);
784 list_add_tail(&zdev->entry, &zpci_list);
785 spin_unlock(&zpci_list_lock);
786
787 zpci_init_slot(zdev);
788
789 return 0;
790
791out_disable:
792 if (zdev->state == ZPCI_FN_STATE_ONLINE)
793 zpci_disable_device(zdev);
794out_destroy_iommu:
795 zpci_destroy_iommu(zdev);
796out_free:
797 zpci_free_domain(zdev);
798out:
799 return rc;
800}
801
802void zpci_remove_device(struct zpci_dev *zdev)
803{
804 if (!zdev->bus)
805 return;
806
807 pci_stop_root_bus(zdev->bus);
808 pci_remove_root_bus(zdev->bus);
809}
810
811int zpci_report_error(struct pci_dev *pdev,
812 struct zpci_report_error_header *report)
813{
814 struct zpci_dev *zdev = to_zpci(pdev);
815
816 return sclp_pci_report(report, zdev->fh, zdev->fid);
817}
818EXPORT_SYMBOL(zpci_report_error);
819
820static int zpci_mem_init(void)
821{
822 BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
823 __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
824
825 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
826 __alignof__(struct zpci_fmb), 0, NULL);
827 if (!zdev_fmb_cache)
828 goto error_fmb;
829
830 zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
831 sizeof(*zpci_iomap_start), GFP_KERNEL);
832 if (!zpci_iomap_start)
833 goto error_iomap;
834
835 zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
836 sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
837 if (!zpci_iomap_bitmap)
838 goto error_iomap_bitmap;
839
840 return 0;
841error_iomap_bitmap:
842 kfree(zpci_iomap_start);
843error_iomap:
844 kmem_cache_destroy(zdev_fmb_cache);
845error_fmb:
846 return -ENOMEM;
847}
848
849static void zpci_mem_exit(void)
850{
851 kfree(zpci_iomap_bitmap);
852 kfree(zpci_iomap_start);
853 kmem_cache_destroy(zdev_fmb_cache);
854}
855
856static unsigned int s390_pci_probe __initdata = 1;
857static unsigned int s390_pci_no_mio __initdata;
858unsigned int s390_pci_force_floating __initdata;
859static unsigned int s390_pci_initialized;
860
861char * __init pcibios_setup(char *str)
862{
863 if (!strcmp(str, "off")) {
864 s390_pci_probe = 0;
865 return NULL;
866 }
867 if (!strcmp(str, "nomio")) {
868 s390_pci_no_mio = 1;
869 return NULL;
870 }
871 if (!strcmp(str, "force_floating")) {
872 s390_pci_force_floating = 1;
873 return NULL;
874 }
875 return str;
876}
877
878bool zpci_is_enabled(void)
879{
880 return s390_pci_initialized;
881}
882
883static int __init pci_base_init(void)
884{
885 int rc;
886
887 if (!s390_pci_probe)
888 return 0;
889
890 if (!test_facility(69) || !test_facility(71))
891 return 0;
892
893 if (test_facility(153) && !s390_pci_no_mio) {
894 static_branch_enable(&have_mio);
895 ctl_set_bit(2, 5);
896 }
897
898 rc = zpci_debug_init();
899 if (rc)
900 goto out;
901
902 rc = zpci_mem_init();
903 if (rc)
904 goto out_mem;
905
906 rc = zpci_irq_init();
907 if (rc)
908 goto out_irq;
909
910 rc = zpci_dma_init();
911 if (rc)
912 goto out_dma;
913
914 rc = clp_scan_pci_devices();
915 if (rc)
916 goto out_find;
917
918 s390_pci_initialized = 1;
919 return 0;
920
921out_find:
922 zpci_dma_exit();
923out_dma:
924 zpci_irq_exit();
925out_irq:
926 zpci_mem_exit();
927out_mem:
928 zpci_debug_exit();
929out:
930 return rc;
931}
932subsys_initcall_sync(pci_base_init);
933
934void zpci_rescan(void)
935{
936 if (zpci_is_enabled())
937 clp_rescan_pci_devices_simple();
938}