Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright IBM Corp. 2012
   4 *
   5 * Author(s):
   6 *   Jan Glauber <jang@linux.vnet.ibm.com>
   7 *
   8 * The System z PCI code is a rewrite from a prototype by
   9 * the following people (Kudoz!):
  10 *   Alexander Schmidt
  11 *   Christoph Raisch
  12 *   Hannes Hering
  13 *   Hoang-Nam Nguyen
  14 *   Jan-Bernd Themann
  15 *   Stefan Roscher
  16 *   Thomas Klein
  17 */
  18
  19#define KMSG_COMPONENT "zpci"
  20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  21
  22#include <linux/kernel.h>
  23#include <linux/slab.h>
  24#include <linux/err.h>
  25#include <linux/export.h>
  26#include <linux/delay.h>
  27#include <linux/seq_file.h>
  28#include <linux/jump_label.h>
  29#include <linux/pci.h>
  30#include <linux/printk.h>
  31
  32#include <asm/isc.h>
  33#include <asm/airq.h>
  34#include <asm/facility.h>
  35#include <asm/pci_insn.h>
  36#include <asm/pci_clp.h>
  37#include <asm/pci_dma.h>
  38
  39#include "pci_bus.h"
  40#include "pci_iov.h"
  41
  42/* list of all detected zpci devices */
  43static LIST_HEAD(zpci_list);
  44static DEFINE_SPINLOCK(zpci_list_lock);
  45
  46static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
  47static DEFINE_SPINLOCK(zpci_domain_lock);
  48
  49#define ZPCI_IOMAP_ENTRIES						\
  50	min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2),	\
  51	    ZPCI_IOMAP_MAX_ENTRIES)
  52
  53unsigned int s390_pci_no_rid;
  54
  55static DEFINE_SPINLOCK(zpci_iomap_lock);
  56static unsigned long *zpci_iomap_bitmap;
  57struct zpci_iomap_entry *zpci_iomap_start;
  58EXPORT_SYMBOL_GPL(zpci_iomap_start);
  59
  60DEFINE_STATIC_KEY_FALSE(have_mio);
  61
  62static struct kmem_cache *zdev_fmb_cache;
  63
  64struct zpci_dev *get_zdev_by_fid(u32 fid)
  65{
  66	struct zpci_dev *tmp, *zdev = NULL;
  67
  68	spin_lock(&zpci_list_lock);
  69	list_for_each_entry(tmp, &zpci_list, entry) {
  70		if (tmp->fid == fid) {
  71			zdev = tmp;
  72			break;
  73		}
  74	}
  75	spin_unlock(&zpci_list_lock);
  76	return zdev;
  77}
  78
  79void zpci_remove_reserved_devices(void)
  80{
  81	struct zpci_dev *tmp, *zdev;
  82	enum zpci_state state;
  83	LIST_HEAD(remove);
  84
  85	spin_lock(&zpci_list_lock);
  86	list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
  87		if (zdev->state == ZPCI_FN_STATE_STANDBY &&
  88		    !clp_get_state(zdev->fid, &state) &&
  89		    state == ZPCI_FN_STATE_RESERVED)
  90			list_move_tail(&zdev->entry, &remove);
  91	}
  92	spin_unlock(&zpci_list_lock);
  93
  94	list_for_each_entry_safe(zdev, tmp, &remove, entry)
  95		zpci_device_reserved(zdev);
  96}
  97
  98int pci_domain_nr(struct pci_bus *bus)
  99{
 100	return ((struct zpci_bus *) bus->sysdata)->domain_nr;
 101}
 102EXPORT_SYMBOL_GPL(pci_domain_nr);
 103
 104int pci_proc_domain(struct pci_bus *bus)
 105{
 106	return pci_domain_nr(bus);
 107}
 108EXPORT_SYMBOL_GPL(pci_proc_domain);
 109
 110/* Modify PCI: Register I/O address translation parameters */
 111int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
 112		       u64 base, u64 limit, u64 iota)
 113{
 114	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
 115	struct zpci_fib fib = {0};
 116	u8 status;
 117
 118	WARN_ON_ONCE(iota & 0x3fff);
 119	fib.pba = base;
 120	fib.pal = limit;
 121	fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
 122	return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
 123}
 124
 125/* Modify PCI: Unregister I/O address translation parameters */
 126int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
 127{
 128	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
 129	struct zpci_fib fib = {0};
 130	u8 cc, status;
 131
 132	cc = zpci_mod_fc(req, &fib, &status);
 133	if (cc == 3) /* Function already gone. */
 134		cc = 0;
 135	return cc ? -EIO : 0;
 136}
 137
 138/* Modify PCI: Set PCI function measurement parameters */
 139int zpci_fmb_enable_device(struct zpci_dev *zdev)
 140{
 141	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
 142	struct zpci_fib fib = {0};
 143	u8 cc, status;
 144
 145	if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
 146		return -EINVAL;
 147
 148	zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
 149	if (!zdev->fmb)
 150		return -ENOMEM;
 151	WARN_ON((u64) zdev->fmb & 0xf);
 152
 153	/* reset software counters */
 154	atomic64_set(&zdev->allocated_pages, 0);
 155	atomic64_set(&zdev->mapped_pages, 0);
 156	atomic64_set(&zdev->unmapped_pages, 0);
 157
 158	fib.fmb_addr = virt_to_phys(zdev->fmb);
 159	cc = zpci_mod_fc(req, &fib, &status);
 160	if (cc) {
 161		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
 162		zdev->fmb = NULL;
 163	}
 164	return cc ? -EIO : 0;
 165}
 166
 167/* Modify PCI: Disable PCI function measurement */
 168int zpci_fmb_disable_device(struct zpci_dev *zdev)
 169{
 170	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
 171	struct zpci_fib fib = {0};
 172	u8 cc, status;
 173
 174	if (!zdev->fmb)
 175		return -EINVAL;
 176
 177	/* Function measurement is disabled if fmb address is zero */
 178	cc = zpci_mod_fc(req, &fib, &status);
 179	if (cc == 3) /* Function already gone. */
 180		cc = 0;
 181
 182	if (!cc) {
 183		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
 184		zdev->fmb = NULL;
 185	}
 186	return cc ? -EIO : 0;
 187}
 188
 189static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
 190{
 191	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
 192	u64 data;
 193	int rc;
 194
 195	rc = __zpci_load(&data, req, offset);
 196	if (!rc) {
 197		data = le64_to_cpu((__force __le64) data);
 198		data >>= (8 - len) * 8;
 199		*val = (u32) data;
 200	} else
 201		*val = 0xffffffff;
 202	return rc;
 203}
 204
 205static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
 206{
 207	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
 208	u64 data = val;
 209	int rc;
 210
 211	data <<= (8 - len) * 8;
 212	data = (__force u64) cpu_to_le64(data);
 213	rc = __zpci_store(data, req, offset);
 214	return rc;
 215}
 216
 217resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 218				       resource_size_t size,
 219				       resource_size_t align)
 220{
 221	return 0;
 222}
 223
 224/* combine single writes by using store-block insn */
 225void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
 226{
 227       zpci_memcpy_toio(to, from, count);
 228}
 229
 230static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
 231{
 232	unsigned long offset, vaddr;
 233	struct vm_struct *area;
 234	phys_addr_t last_addr;
 235
 236	last_addr = addr + size - 1;
 237	if (!size || last_addr < addr)
 238		return NULL;
 239
 240	if (!static_branch_unlikely(&have_mio))
 241		return (void __iomem *) addr;
 242
 243	offset = addr & ~PAGE_MASK;
 244	addr &= PAGE_MASK;
 245	size = PAGE_ALIGN(size + offset);
 246	area = get_vm_area(size, VM_IOREMAP);
 247	if (!area)
 248		return NULL;
 249
 250	vaddr = (unsigned long) area->addr;
 251	if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
 252		free_vm_area(area);
 253		return NULL;
 254	}
 255	return (void __iomem *) ((unsigned long) area->addr + offset);
 256}
 257
 258void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
 259{
 260	return __ioremap(addr, size, __pgprot(prot));
 261}
 262EXPORT_SYMBOL(ioremap_prot);
 263
 264void __iomem *ioremap(phys_addr_t addr, size_t size)
 265{
 266	return __ioremap(addr, size, PAGE_KERNEL);
 267}
 268EXPORT_SYMBOL(ioremap);
 269
 270void __iomem *ioremap_wc(phys_addr_t addr, size_t size)
 271{
 272	return __ioremap(addr, size, pgprot_writecombine(PAGE_KERNEL));
 273}
 274EXPORT_SYMBOL(ioremap_wc);
 275
 276void __iomem *ioremap_wt(phys_addr_t addr, size_t size)
 277{
 278	return __ioremap(addr, size, pgprot_writethrough(PAGE_KERNEL));
 279}
 280EXPORT_SYMBOL(ioremap_wt);
 281
 282void iounmap(volatile void __iomem *addr)
 283{
 284	if (static_branch_likely(&have_mio))
 285		vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
 286}
 287EXPORT_SYMBOL(iounmap);
 288
 289/* Create a virtual mapping cookie for a PCI BAR */
 290static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
 291					unsigned long offset, unsigned long max)
 292{
 293	struct zpci_dev *zdev =	to_zpci(pdev);
 294	int idx;
 295
 296	idx = zdev->bars[bar].map_idx;
 297	spin_lock(&zpci_iomap_lock);
 298	/* Detect overrun */
 299	WARN_ON(!++zpci_iomap_start[idx].count);
 300	zpci_iomap_start[idx].fh = zdev->fh;
 301	zpci_iomap_start[idx].bar = bar;
 302	spin_unlock(&zpci_iomap_lock);
 303
 304	return (void __iomem *) ZPCI_ADDR(idx) + offset;
 305}
 306
 307static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
 308					 unsigned long offset,
 309					 unsigned long max)
 310{
 311	unsigned long barsize = pci_resource_len(pdev, bar);
 312	struct zpci_dev *zdev = to_zpci(pdev);
 313	void __iomem *iova;
 314
 315	iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
 316	return iova ? iova + offset : iova;
 317}
 318
 319void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
 320			      unsigned long offset, unsigned long max)
 321{
 322	if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
 323		return NULL;
 324
 325	if (static_branch_likely(&have_mio))
 326		return pci_iomap_range_mio(pdev, bar, offset, max);
 327	else
 328		return pci_iomap_range_fh(pdev, bar, offset, max);
 329}
 330EXPORT_SYMBOL(pci_iomap_range);
 331
 332void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 333{
 334	return pci_iomap_range(dev, bar, 0, maxlen);
 335}
 336EXPORT_SYMBOL(pci_iomap);
 337
 338static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
 339					    unsigned long offset, unsigned long max)
 340{
 341	unsigned long barsize = pci_resource_len(pdev, bar);
 342	struct zpci_dev *zdev = to_zpci(pdev);
 343	void __iomem *iova;
 344
 345	iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
 346	return iova ? iova + offset : iova;
 347}
 348
 349void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
 350				 unsigned long offset, unsigned long max)
 351{
 352	if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
 353		return NULL;
 354
 355	if (static_branch_likely(&have_mio))
 356		return pci_iomap_wc_range_mio(pdev, bar, offset, max);
 357	else
 358		return pci_iomap_range_fh(pdev, bar, offset, max);
 359}
 360EXPORT_SYMBOL(pci_iomap_wc_range);
 361
 362void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
 363{
 364	return pci_iomap_wc_range(dev, bar, 0, maxlen);
 365}
 366EXPORT_SYMBOL(pci_iomap_wc);
 367
 368static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
 369{
 370	unsigned int idx = ZPCI_IDX(addr);
 371
 372	spin_lock(&zpci_iomap_lock);
 373	/* Detect underrun */
 374	WARN_ON(!zpci_iomap_start[idx].count);
 375	if (!--zpci_iomap_start[idx].count) {
 376		zpci_iomap_start[idx].fh = 0;
 377		zpci_iomap_start[idx].bar = 0;
 378	}
 379	spin_unlock(&zpci_iomap_lock);
 380}
 381
 382static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
 383{
 384	iounmap(addr);
 385}
 386
 387void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
 388{
 389	if (static_branch_likely(&have_mio))
 390		pci_iounmap_mio(pdev, addr);
 391	else
 392		pci_iounmap_fh(pdev, addr);
 393}
 394EXPORT_SYMBOL(pci_iounmap);
 395
 396static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
 397		    int size, u32 *val)
 398{
 399	struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
 400
 401	return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
 402}
 403
 404static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
 405		     int size, u32 val)
 406{
 407	struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
 408
 409	return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
 410}
 411
 412static struct pci_ops pci_root_ops = {
 413	.read = pci_read,
 414	.write = pci_write,
 415};
 416
 417static void zpci_map_resources(struct pci_dev *pdev)
 418{
 419	struct zpci_dev *zdev = to_zpci(pdev);
 420	resource_size_t len;
 421	int i;
 422
 423	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 424		len = pci_resource_len(pdev, i);
 425		if (!len)
 426			continue;
 427
 428		if (zpci_use_mio(zdev))
 429			pdev->resource[i].start =
 430				(resource_size_t __force) zdev->bars[i].mio_wt;
 431		else
 432			pdev->resource[i].start = (resource_size_t __force)
 433				pci_iomap_range_fh(pdev, i, 0, 0);
 434		pdev->resource[i].end = pdev->resource[i].start + len - 1;
 435	}
 436
 437	zpci_iov_map_resources(pdev);
 438}
 439
 440static void zpci_unmap_resources(struct pci_dev *pdev)
 441{
 442	struct zpci_dev *zdev = to_zpci(pdev);
 443	resource_size_t len;
 444	int i;
 445
 446	if (zpci_use_mio(zdev))
 447		return;
 448
 449	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 450		len = pci_resource_len(pdev, i);
 451		if (!len)
 452			continue;
 453		pci_iounmap_fh(pdev, (void __iomem __force *)
 454			       pdev->resource[i].start);
 455	}
 456}
 457
 458static int zpci_alloc_iomap(struct zpci_dev *zdev)
 459{
 460	unsigned long entry;
 461
 462	spin_lock(&zpci_iomap_lock);
 463	entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
 464	if (entry == ZPCI_IOMAP_ENTRIES) {
 465		spin_unlock(&zpci_iomap_lock);
 466		return -ENOSPC;
 467	}
 468	set_bit(entry, zpci_iomap_bitmap);
 469	spin_unlock(&zpci_iomap_lock);
 470	return entry;
 471}
 472
 473static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
 474{
 475	spin_lock(&zpci_iomap_lock);
 476	memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
 477	clear_bit(entry, zpci_iomap_bitmap);
 478	spin_unlock(&zpci_iomap_lock);
 479}
 480
 481static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
 482				    unsigned long size, unsigned long flags)
 483{
 484	struct resource *r;
 485
 486	r = kzalloc(sizeof(*r), GFP_KERNEL);
 487	if (!r)
 488		return NULL;
 489
 490	r->start = start;
 491	r->end = r->start + size - 1;
 492	r->flags = flags;
 493	r->name = zdev->res_name;
 494
 495	if (request_resource(&iomem_resource, r)) {
 496		kfree(r);
 497		return NULL;
 498	}
 499	return r;
 500}
 501
 502int zpci_setup_bus_resources(struct zpci_dev *zdev,
 503			     struct list_head *resources)
 504{
 505	unsigned long addr, size, flags;
 506	struct resource *res;
 507	int i, entry;
 508
 509	snprintf(zdev->res_name, sizeof(zdev->res_name),
 510		 "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
 511
 512	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 513		if (!zdev->bars[i].size)
 514			continue;
 515		entry = zpci_alloc_iomap(zdev);
 516		if (entry < 0)
 517			return entry;
 518		zdev->bars[i].map_idx = entry;
 519
 520		/* only MMIO is supported */
 521		flags = IORESOURCE_MEM;
 522		if (zdev->bars[i].val & 8)
 523			flags |= IORESOURCE_PREFETCH;
 524		if (zdev->bars[i].val & 4)
 525			flags |= IORESOURCE_MEM_64;
 526
 527		if (zpci_use_mio(zdev))
 528			addr = (unsigned long) zdev->bars[i].mio_wt;
 529		else
 530			addr = ZPCI_ADDR(entry);
 531		size = 1UL << zdev->bars[i].size;
 532
 533		res = __alloc_res(zdev, addr, size, flags);
 534		if (!res) {
 535			zpci_free_iomap(zdev, entry);
 536			return -ENOMEM;
 537		}
 538		zdev->bars[i].res = res;
 539		pci_add_resource(resources, res);
 540	}
 541	zdev->has_resources = 1;
 542
 543	return 0;
 544}
 545
 546static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
 547{
 548	int i;
 549
 550	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 551		if (!zdev->bars[i].size || !zdev->bars[i].res)
 552			continue;
 553
 554		zpci_free_iomap(zdev, zdev->bars[i].map_idx);
 555		release_resource(zdev->bars[i].res);
 556		kfree(zdev->bars[i].res);
 557	}
 558	zdev->has_resources = 0;
 559}
 560
 561int pcibios_add_device(struct pci_dev *pdev)
 562{
 563	struct zpci_dev *zdev = to_zpci(pdev);
 564	struct resource *res;
 565	int i;
 566
 567	/* The pdev has a reference to the zdev via its bus */
 568	zpci_zdev_get(zdev);
 569	if (pdev->is_physfn)
 570		pdev->no_vf_scan = 1;
 571
 572	pdev->dev.groups = zpci_attr_groups;
 573	pdev->dev.dma_ops = &s390_pci_dma_ops;
 574	zpci_map_resources(pdev);
 575
 576	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 577		res = &pdev->resource[i];
 578		if (res->parent || !res->flags)
 579			continue;
 580		pci_claim_resource(pdev, i);
 581	}
 582
 583	return 0;
 584}
 585
 586void pcibios_release_device(struct pci_dev *pdev)
 587{
 588	struct zpci_dev *zdev = to_zpci(pdev);
 589
 590	zpci_unmap_resources(pdev);
 591	zpci_zdev_put(zdev);
 592}
 593
 594int pcibios_enable_device(struct pci_dev *pdev, int mask)
 595{
 596	struct zpci_dev *zdev = to_zpci(pdev);
 597
 598	zpci_debug_init_device(zdev, dev_name(&pdev->dev));
 599	zpci_fmb_enable_device(zdev);
 600
 601	return pci_enable_resources(pdev, mask);
 602}
 603
 604void pcibios_disable_device(struct pci_dev *pdev)
 605{
 606	struct zpci_dev *zdev = to_zpci(pdev);
 607
 608	zpci_fmb_disable_device(zdev);
 609	zpci_debug_exit_device(zdev);
 610}
 611
 612static int __zpci_register_domain(int domain)
 613{
 614	spin_lock(&zpci_domain_lock);
 615	if (test_bit(domain, zpci_domain)) {
 616		spin_unlock(&zpci_domain_lock);
 617		pr_err("Domain %04x is already assigned\n", domain);
 618		return -EEXIST;
 619	}
 620	set_bit(domain, zpci_domain);
 621	spin_unlock(&zpci_domain_lock);
 622	return domain;
 623}
 624
 625static int __zpci_alloc_domain(void)
 626{
 627	int domain;
 628
 629	spin_lock(&zpci_domain_lock);
 630	/*
 631	 * We can always auto allocate domains below ZPCI_NR_DEVICES.
 632	 * There is either a free domain or we have reached the maximum in
 633	 * which case we would have bailed earlier.
 634	 */
 635	domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
 636	set_bit(domain, zpci_domain);
 637	spin_unlock(&zpci_domain_lock);
 638	return domain;
 639}
 640
 641int zpci_alloc_domain(int domain)
 642{
 643	if (zpci_unique_uid) {
 644		if (domain)
 645			return __zpci_register_domain(domain);
 646		pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
 647		update_uid_checking(false);
 648	}
 649	return __zpci_alloc_domain();
 650}
 651
 652void zpci_free_domain(int domain)
 653{
 654	spin_lock(&zpci_domain_lock);
 655	clear_bit(domain, zpci_domain);
 656	spin_unlock(&zpci_domain_lock);
 657}
 658
 659
 660int zpci_enable_device(struct zpci_dev *zdev)
 661{
 662	int rc;
 663
 664	if (clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES)) {
 665		rc = -EIO;
 666		goto out;
 667	}
 668
 669	rc = zpci_dma_init_device(zdev);
 670	if (rc)
 671		goto out_dma;
 672
 673	return 0;
 674
 675out_dma:
 676	clp_disable_fh(zdev);
 677out:
 678	return rc;
 679}
 680
 681int zpci_disable_device(struct zpci_dev *zdev)
 682{
 683	zpci_dma_exit_device(zdev);
 684	/*
 685	 * The zPCI function may already be disabled by the platform, this is
 686	 * detected in clp_disable_fh() which becomes a no-op.
 687	 */
 688	return clp_disable_fh(zdev) ? -EIO : 0;
 689}
 690
 691/**
 692 * zpci_create_device() - Create a new zpci_dev and add it to the zbus
 693 * @fid: Function ID of the device to be created
 694 * @fh: Current Function Handle of the device to be created
 695 * @state: Initial state after creation either Standby or Configured
 696 *
 697 * Creates a new zpci device and adds it to its, possibly newly created, zbus
 698 * as well as zpci_list.
 699 *
 700 * Returns: the zdev on success or an error pointer otherwise
 701 */
 702struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
 703{
 704	struct zpci_dev *zdev;
 705	int rc;
 706
 707	zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
 708	zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
 709	if (!zdev)
 710		return ERR_PTR(-ENOMEM);
 711
 712	/* FID and Function Handle are the static/dynamic identifiers */
 713	zdev->fid = fid;
 714	zdev->fh = fh;
 715
 716	/* Query function properties and update zdev */
 717	rc = clp_query_pci_fn(zdev);
 718	if (rc)
 719		goto error;
 720	zdev->state =  state;
 721
 722	kref_init(&zdev->kref);
 723	mutex_init(&zdev->lock);
 724
 725	rc = zpci_init_iommu(zdev);
 726	if (rc)
 727		goto error;
 728
 729	rc = zpci_bus_device_register(zdev, &pci_root_ops);
 730	if (rc)
 731		goto error_destroy_iommu;
 732
 733	spin_lock(&zpci_list_lock);
 734	list_add_tail(&zdev->entry, &zpci_list);
 735	spin_unlock(&zpci_list_lock);
 736
 737	return zdev;
 738
 739error_destroy_iommu:
 740	zpci_destroy_iommu(zdev);
 741error:
 742	zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
 743	kfree(zdev);
 744	return ERR_PTR(rc);
 745}
 746
 747bool zpci_is_device_configured(struct zpci_dev *zdev)
 748{
 749	enum zpci_state state = zdev->state;
 750
 751	return state != ZPCI_FN_STATE_RESERVED &&
 752		state != ZPCI_FN_STATE_STANDBY;
 753}
 754
 755/**
 756 * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
 757 * @zdev: The zpci_dev to be configured
 758 * @fh: The general function handle supplied by the platform
 759 *
 760 * Given a device in the configuration state Configured, enables, scans and
 761 * adds it to the common code PCI subsystem if possible. If the PCI device is
 762 * parked because we can not yet create a PCI bus because we have not seen
 763 * function 0, it is ignored but will be scanned once function 0 appears.
 764 * If any failure occurs, the zpci_dev is left disabled.
 765 *
 766 * Return: 0 on success, or an error code otherwise
 767 */
 768int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
 769{
 770	int rc;
 771
 772	zdev->fh = fh;
 773	/* the PCI function will be scanned once function 0 appears */
 774	if (!zdev->zbus->bus)
 775		return 0;
 776
 777	/* For function 0 on a multi-function bus scan whole bus as we might
 778	 * have to pick up existing functions waiting for it to allow creating
 779	 * the PCI bus
 780	 */
 781	if (zdev->devfn == 0 && zdev->zbus->multifunction)
 782		rc = zpci_bus_scan_bus(zdev->zbus);
 783	else
 784		rc = zpci_bus_scan_device(zdev);
 785
 786	return rc;
 787}
 788
 789/**
 790 * zpci_deconfigure_device() - Deconfigure a zpci_dev
 791 * @zdev: The zpci_dev to configure
 792 *
 793 * Deconfigure a zPCI function that is currently configured and possibly known
 794 * to the common code PCI subsystem.
 795 * If any failure occurs the device is left as is.
 796 *
 797 * Return: 0 on success, or an error code otherwise
 798 */
 799int zpci_deconfigure_device(struct zpci_dev *zdev)
 800{
 801	int rc;
 802
 803	if (zdev->zbus->bus)
 804		zpci_bus_remove_device(zdev, false);
 805
 806	if (zdev_enabled(zdev)) {
 807		rc = zpci_disable_device(zdev);
 808		if (rc)
 809			return rc;
 810	}
 811
 812	rc = sclp_pci_deconfigure(zdev->fid);
 813	zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
 814	if (rc)
 815		return rc;
 816	zdev->state = ZPCI_FN_STATE_STANDBY;
 817
 818	return 0;
 819}
 820
 821/**
 822 * zpci_device_reserved() - Mark device as resverved
 823 * @zdev: the zpci_dev that was reserved
 824 *
 825 * Handle the case that a given zPCI function was reserved by another system.
 826 * After a call to this function the zpci_dev can not be found via
 827 * get_zdev_by_fid() anymore but may still be accessible via existing
 828 * references though it will not be functional anymore.
 829 */
 830void zpci_device_reserved(struct zpci_dev *zdev)
 831{
 832	if (zdev->has_hp_slot)
 833		zpci_exit_slot(zdev);
 834	/*
 835	 * Remove device from zpci_list as it is going away. This also
 836	 * makes sure we ignore subsequent zPCI events for this device.
 837	 */
 838	spin_lock(&zpci_list_lock);
 839	list_del(&zdev->entry);
 840	spin_unlock(&zpci_list_lock);
 841	zdev->state = ZPCI_FN_STATE_RESERVED;
 842	zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
 843	zpci_zdev_put(zdev);
 844}
 845
 846void zpci_release_device(struct kref *kref)
 847{
 848	struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
 849	int ret;
 850
 851	if (zdev->zbus->bus)
 852		zpci_bus_remove_device(zdev, false);
 853
 854	if (zdev_enabled(zdev))
 855		zpci_disable_device(zdev);
 856
 857	switch (zdev->state) {
 858	case ZPCI_FN_STATE_CONFIGURED:
 859		ret = sclp_pci_deconfigure(zdev->fid);
 860		zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
 861		fallthrough;
 862	case ZPCI_FN_STATE_STANDBY:
 863		if (zdev->has_hp_slot)
 864			zpci_exit_slot(zdev);
 865		spin_lock(&zpci_list_lock);
 866		list_del(&zdev->entry);
 867		spin_unlock(&zpci_list_lock);
 868		zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
 869		fallthrough;
 870	case ZPCI_FN_STATE_RESERVED:
 871		if (zdev->has_resources)
 872			zpci_cleanup_bus_resources(zdev);
 873		zpci_bus_device_unregister(zdev);
 874		zpci_destroy_iommu(zdev);
 875		fallthrough;
 876	default:
 877		break;
 878	}
 879	zpci_dbg(3, "rem fid:%x\n", zdev->fid);
 880	kfree(zdev);
 881}
 882
 883int zpci_report_error(struct pci_dev *pdev,
 884		      struct zpci_report_error_header *report)
 885{
 886	struct zpci_dev *zdev = to_zpci(pdev);
 887
 888	return sclp_pci_report(report, zdev->fh, zdev->fid);
 889}
 890EXPORT_SYMBOL(zpci_report_error);
 891
 892static int zpci_mem_init(void)
 893{
 894	BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
 895		     __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
 896
 897	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
 898					   __alignof__(struct zpci_fmb), 0, NULL);
 899	if (!zdev_fmb_cache)
 900		goto error_fmb;
 901
 902	zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
 903				   sizeof(*zpci_iomap_start), GFP_KERNEL);
 904	if (!zpci_iomap_start)
 905		goto error_iomap;
 906
 907	zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
 908				    sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
 909	if (!zpci_iomap_bitmap)
 910		goto error_iomap_bitmap;
 911
 912	if (static_branch_likely(&have_mio))
 913		clp_setup_writeback_mio();
 914
 915	return 0;
 916error_iomap_bitmap:
 917	kfree(zpci_iomap_start);
 918error_iomap:
 919	kmem_cache_destroy(zdev_fmb_cache);
 920error_fmb:
 921	return -ENOMEM;
 922}
 923
 924static void zpci_mem_exit(void)
 925{
 926	kfree(zpci_iomap_bitmap);
 927	kfree(zpci_iomap_start);
 928	kmem_cache_destroy(zdev_fmb_cache);
 929}
 930
 931static unsigned int s390_pci_probe __initdata = 1;
 932unsigned int s390_pci_force_floating __initdata;
 933static unsigned int s390_pci_initialized;
 934
 935char * __init pcibios_setup(char *str)
 936{
 937	if (!strcmp(str, "off")) {
 938		s390_pci_probe = 0;
 939		return NULL;
 940	}
 941	if (!strcmp(str, "nomio")) {
 942		S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
 943		return NULL;
 944	}
 945	if (!strcmp(str, "force_floating")) {
 946		s390_pci_force_floating = 1;
 947		return NULL;
 948	}
 949	if (!strcmp(str, "norid")) {
 950		s390_pci_no_rid = 1;
 951		return NULL;
 952	}
 953	return str;
 954}
 955
 956bool zpci_is_enabled(void)
 957{
 958	return s390_pci_initialized;
 959}
 960
 961static int __init pci_base_init(void)
 962{
 963	int rc;
 964
 965	if (!s390_pci_probe)
 966		return 0;
 967
 968	if (!test_facility(69) || !test_facility(71)) {
 969		pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
 970		return 0;
 971	}
 972
 973	if (MACHINE_HAS_PCI_MIO) {
 974		static_branch_enable(&have_mio);
 975		ctl_set_bit(2, 5);
 976	}
 977
 978	rc = zpci_debug_init();
 979	if (rc)
 980		goto out;
 981
 982	rc = zpci_mem_init();
 983	if (rc)
 984		goto out_mem;
 985
 986	rc = zpci_irq_init();
 987	if (rc)
 988		goto out_irq;
 989
 990	rc = zpci_dma_init();
 991	if (rc)
 992		goto out_dma;
 993
 994	rc = clp_scan_pci_devices();
 995	if (rc)
 996		goto out_find;
 997	zpci_bus_scan_busses();
 998
 999	s390_pci_initialized = 1;
1000	return 0;
1001
1002out_find:
1003	zpci_dma_exit();
1004out_dma:
1005	zpci_irq_exit();
1006out_irq:
1007	zpci_mem_exit();
1008out_mem:
1009	zpci_debug_exit();
1010out:
1011	return rc;
1012}
1013subsys_initcall_sync(pci_base_init);