Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Port for PPC64 David Engebretsen, IBM Corp.
  3 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
  4 * 
  5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
  6 *   Rework, based on alpha PCI code.
  7 *
  8 *      This program is free software; you can redistribute it and/or
  9 *      modify it under the terms of the GNU General Public License
 10 *      as published by the Free Software Foundation; either version
 11 *      2 of the License, or (at your option) any later version.
 12 */
 13
 14#undef DEBUG
 15
 16#include <linux/kernel.h>
 17#include <linux/pci.h>
 18#include <linux/string.h>
 19#include <linux/init.h>
 20#include <linux/export.h>
 21#include <linux/mm.h>
 22#include <linux/list.h>
 23#include <linux/syscalls.h>
 24#include <linux/irq.h>
 25#include <linux/vmalloc.h>
 
 26
 27#include <asm/processor.h>
 28#include <asm/io.h>
 29#include <asm/prom.h>
 30#include <asm/pci-bridge.h>
 31#include <asm/byteorder.h>
 32#include <asm/machdep.h>
 33#include <asm/ppc-pci.h>
 34
 35/* pci_io_base -- the base address from which io bars are offsets.
 36 * This is the lowest I/O base address (so bar values are always positive),
 37 * and it *must* be the start of ISA space if an ISA bus exists because
 38 * ISA drivers use hard coded offsets.  If no ISA bus exists nothing
 39 * is mapped on the first 64K of IO space
 40 */
 41unsigned long pci_io_base;
 42EXPORT_SYMBOL(pci_io_base);
 43
 44static int __init pcibios_init(void)
 45{
 46	struct pci_controller *hose, *tmp;
 47
 48	printk(KERN_INFO "PCI: Probing PCI hardware\n");
 49
 50	/* For now, override phys_mem_access_prot. If we need it,g
 51	 * later, we may move that initialization to each ppc_md
 52	 */
 53	ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
 54
 55	/* On ppc64, we always enable PCI domains and we keep domain 0
 56	 * backward compatible in /proc for video cards
 57	 */
 58	pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
 59
 60	/* Scan all of the recorded PCI controllers.  */
 61	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
 62		pcibios_scan_phb(hose);
 63		pci_bus_add_devices(hose->bus);
 64	}
 65
 66	/* Call common code to handle resource allocation */
 67	pcibios_resource_survey();
 68
 
 
 
 
 
 
 
 
 69	printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
 70
 71	return 0;
 72}
 73
 74subsys_initcall(pcibios_init);
 75
 76int pcibios_unmap_io_space(struct pci_bus *bus)
 77{
 78	struct pci_controller *hose;
 79
 80	WARN_ON(bus == NULL);
 81
 82	/* If this is not a PHB, we only flush the hash table over
 83	 * the area mapped by this bridge. We don't play with the PTE
 84	 * mappings since we might have to deal with sub-page alignments
 85	 * so flushing the hash table is the only sane way to make sure
 86	 * that no hash entries are covering that removed bridge area
 87	 * while still allowing other busses overlapping those pages
 88	 *
 89	 * Note: If we ever support P2P hotplug on Book3E, we'll have
 90	 * to do an appropriate TLB flush here too
 91	 */
 92	if (bus->self) {
 93#ifdef CONFIG_PPC_BOOK3S_64
 94		struct resource *res = bus->resource[0];
 95#endif
 96
 97		pr_debug("IO unmapping for PCI-PCI bridge %s\n",
 98			 pci_name(bus->self));
 99
100#ifdef CONFIG_PPC_BOOK3S_64
101		__flush_hash_table_range(&init_mm, res->start + _IO_BASE,
102					 res->end + _IO_BASE + 1);
103#endif
104		return 0;
105	}
106
107	/* Get the host bridge */
108	hose = pci_bus_to_host(bus);
109
110	/* Check if we have IOs allocated */
111	if (hose->io_base_alloc == NULL)
112		return 0;
113
114	pr_debug("IO unmapping for PHB %pOF\n", hose->dn);
115	pr_debug("  alloc=0x%p\n", hose->io_base_alloc);
116
117	/* This is a PHB, we fully unmap the IO area */
118	vunmap(hose->io_base_alloc);
119
120	return 0;
121}
122EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
123
124static int pcibios_map_phb_io_space(struct pci_controller *hose)
125{
126	struct vm_struct *area;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127	unsigned long phys_page;
128	unsigned long size_page;
129	unsigned long io_virt_offset;
130
131	phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
132	size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE);
133
134	/* Make sure IO area address is clear */
135	hose->io_base_alloc = NULL;
136
137	/* If there's no IO to map on that bus, get away too */
138	if (hose->pci_io_size == 0 || hose->io_base_phys == 0)
139		return 0;
140
141	/* Let's allocate some IO space for that guy. We don't pass
142	 * VM_IOREMAP because we don't care about alignment tricks that
143	 * the core does in that case. Maybe we should due to stupid card
144	 * with incomplete address decoding but I'd rather not deal with
145	 * those outside of the reserved 64K legacy region.
146	 */
147	area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END);
148	if (area == NULL)
149		return -ENOMEM;
150	hose->io_base_alloc = area->addr;
151	hose->io_base_virt = (void __iomem *)(area->addr +
152					      hose->io_base_phys - phys_page);
153
154	pr_debug("IO mapping for PHB %pOF\n", hose->dn);
155	pr_debug("  phys=0x%016llx, virt=0x%p (alloc=0x%p)\n",
156		 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
157	pr_debug("  size=0x%016llx (alloc=0x%016lx)\n",
158		 hose->pci_io_size, size_page);
159
160	/* Establish the mapping */
161	if (__ioremap_at(phys_page, area->addr, size_page,
162			 pgprot_val(pgprot_noncached(__pgprot(0)))) == NULL)
163		return -ENOMEM;
164
165	/* Fixup hose IO resource */
166	io_virt_offset = pcibios_io_space_offset(hose);
167	hose->io_resource.start += io_virt_offset;
168	hose->io_resource.end += io_virt_offset;
169
170	pr_debug("  hose->io_resource=%pR\n", &hose->io_resource);
171
172	return 0;
173}
174
175int pcibios_map_io_space(struct pci_bus *bus)
176{
177	WARN_ON(bus == NULL);
178
179	/* If this not a PHB, nothing to do, page tables still exist and
180	 * thus HPTEs will be faulted in when needed
181	 */
182	if (bus->self) {
183		pr_debug("IO mapping for PCI-PCI bridge %s\n",
184			 pci_name(bus->self));
185		pr_debug("  virt=0x%016llx...0x%016llx\n",
186			 bus->resource[0]->start + _IO_BASE,
187			 bus->resource[0]->end + _IO_BASE);
188		return 0;
189	}
190
191	return pcibios_map_phb_io_space(pci_bus_to_host(bus));
192}
193EXPORT_SYMBOL_GPL(pcibios_map_io_space);
194
195void pcibios_setup_phb_io_space(struct pci_controller *hose)
196{
197	pcibios_map_phb_io_space(hose);
198}
199
200#define IOBASE_BRIDGE_NUMBER	0
201#define IOBASE_MEMORY		1
202#define IOBASE_IO		2
203#define IOBASE_ISA_IO		3
204#define IOBASE_ISA_MEM		4
205
206long sys_pciconfig_iobase(long which, unsigned long in_bus,
207			  unsigned long in_devfn)
208{
209	struct pci_controller* hose;
210	struct pci_bus *tmp_bus, *bus = NULL;
211	struct device_node *hose_node;
212
213	/* Argh ! Please forgive me for that hack, but that's the
214	 * simplest way to get existing XFree to not lockup on some
215	 * G5 machines... So when something asks for bus 0 io base
216	 * (bus 0 is HT root), we return the AGP one instead.
217	 */
218	if (in_bus == 0 && of_machine_is_compatible("MacRISC4")) {
219		struct device_node *agp;
220
221		agp = of_find_compatible_node(NULL, NULL, "u3-agp");
222		if (agp)
223			in_bus = 0xf0;
224		of_node_put(agp);
225	}
226
227	/* That syscall isn't quite compatible with PCI domains, but it's
228	 * used on pre-domains setup. We return the first match
229	 */
230
231	list_for_each_entry(tmp_bus, &pci_root_buses, node) {
232		if (in_bus >= tmp_bus->number &&
233		    in_bus <= tmp_bus->busn_res.end) {
234			bus = tmp_bus;
235			break;
236		}
237	}
238	if (bus == NULL || bus->dev.of_node == NULL)
239		return -ENODEV;
240
241	hose_node = bus->dev.of_node;
242	hose = PCI_DN(hose_node)->phb;
243
244	switch (which) {
245	case IOBASE_BRIDGE_NUMBER:
246		return (long)hose->first_busno;
247	case IOBASE_MEMORY:
248		return (long)hose->mem_offset[0];
249	case IOBASE_IO:
250		return (long)hose->io_base_phys;
251	case IOBASE_ISA_IO:
252		return (long)isa_io_base;
253	case IOBASE_ISA_MEM:
254		return -EINVAL;
255	}
256
257	return -EOPNOTSUPP;
258}
259
260#ifdef CONFIG_NUMA
261int pcibus_to_node(struct pci_bus *bus)
262{
263	struct pci_controller *phb = pci_bus_to_host(bus);
264	return phb->node;
265}
266EXPORT_SYMBOL(pcibus_to_node);
 
 
 
 
 
 
 
 
 
 
 
267#endif
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Port for PPC64 David Engebretsen, IBM Corp.
  4 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
  5 * 
  6 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
  7 *   Rework, based on alpha PCI code.
 
 
 
 
 
  8 */
  9
 10#undef DEBUG
 11
 12#include <linux/kernel.h>
 13#include <linux/pci.h>
 14#include <linux/string.h>
 15#include <linux/init.h>
 16#include <linux/export.h>
 17#include <linux/mm.h>
 18#include <linux/list.h>
 19#include <linux/syscalls.h>
 20#include <linux/irq.h>
 21#include <linux/vmalloc.h>
 22#include <linux/of.h>
 23
 24#include <asm/processor.h>
 25#include <asm/io.h>
 
 26#include <asm/pci-bridge.h>
 27#include <asm/byteorder.h>
 28#include <asm/machdep.h>
 29#include <asm/ppc-pci.h>
 30
 31/* pci_io_base -- the base address from which io bars are offsets.
 32 * This is the lowest I/O base address (so bar values are always positive),
 33 * and it *must* be the start of ISA space if an ISA bus exists because
 34 * ISA drivers use hard coded offsets.  If no ISA bus exists nothing
 35 * is mapped on the first 64K of IO space
 36 */
 37unsigned long pci_io_base;
 38EXPORT_SYMBOL(pci_io_base);
 39
 40static int __init pcibios_init(void)
 41{
 42	struct pci_controller *hose, *tmp;
 43
 44	printk(KERN_INFO "PCI: Probing PCI hardware\n");
 45
 46	/* For now, override phys_mem_access_prot. If we need it,g
 47	 * later, we may move that initialization to each ppc_md
 48	 */
 49	ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
 50
 51	/* On ppc64, we always enable PCI domains and we keep domain 0
 52	 * backward compatible in /proc for video cards
 53	 */
 54	pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
 55
 56	/* Scan all of the recorded PCI controllers.  */
 57	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
 58		pcibios_scan_phb(hose);
 
 
 59
 60	/* Call common code to handle resource allocation */
 61	pcibios_resource_survey();
 62
 63	/* Add devices. */
 64	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
 65		pci_bus_add_devices(hose->bus);
 66
 67	/* Call machine dependent fixup */
 68	if (ppc_md.pcibios_fixup)
 69		ppc_md.pcibios_fixup();
 70
 71	printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
 72
 73	return 0;
 74}
 75
 76subsys_initcall_sync(pcibios_init);
 77
 78int pcibios_unmap_io_space(struct pci_bus *bus)
 79{
 80	struct pci_controller *hose;
 81
 82	WARN_ON(bus == NULL);
 83
 84	/* If this is not a PHB, we only flush the hash table over
 85	 * the area mapped by this bridge. We don't play with the PTE
 86	 * mappings since we might have to deal with sub-page alignments
 87	 * so flushing the hash table is the only sane way to make sure
 88	 * that no hash entries are covering that removed bridge area
 89	 * while still allowing other busses overlapping those pages
 90	 *
 91	 * Note: If we ever support P2P hotplug on Book3E, we'll have
 92	 * to do an appropriate TLB flush here too
 93	 */
 94	if (bus->self) {
 95#ifdef CONFIG_PPC_BOOK3S_64
 96		struct resource *res = bus->resource[0];
 97#endif
 98
 99		pr_debug("IO unmapping for PCI-PCI bridge %s\n",
100			 pci_name(bus->self));
101
102#ifdef CONFIG_PPC_BOOK3S_64
103		__flush_hash_table_range(res->start + _IO_BASE,
104					 res->end + _IO_BASE + 1);
105#endif
106		return 0;
107	}
108
109	/* Get the host bridge */
110	hose = pci_bus_to_host(bus);
111
 
 
 
 
112	pr_debug("IO unmapping for PHB %pOF\n", hose->dn);
113	pr_debug("  alloc=0x%p\n", hose->io_base_alloc);
114
115	iounmap(hose->io_base_alloc);
 
 
116	return 0;
117}
118EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
119
120void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size)
121{
122	struct vm_struct *area;
123	unsigned long addr;
124
125	WARN_ON_ONCE(paddr & ~PAGE_MASK);
126	WARN_ON_ONCE(size & ~PAGE_MASK);
127
128	/*
129	 * Let's allocate some IO space for that guy. We don't pass VM_IOREMAP
130	 * because we don't care about alignment tricks that the core does in
131	 * that case.  Maybe we should due to stupid card with incomplete
132	 * address decoding but I'd rather not deal with those outside of the
133	 * reserved 64K legacy region.
134	 */
135	area = __get_vm_area_caller(size, VM_IOREMAP, PHB_IO_BASE, PHB_IO_END,
136				    __builtin_return_address(0));
137	if (!area)
138		return NULL;
139
140	addr = (unsigned long)area->addr;
141	if (ioremap_page_range(addr, addr + size, paddr,
142			pgprot_noncached(PAGE_KERNEL))) {
143		vunmap_range(addr, addr + size);
144		return NULL;
145	}
146
147	return (void __iomem *)addr;
148}
149EXPORT_SYMBOL_GPL(ioremap_phb);
150
151static int pcibios_map_phb_io_space(struct pci_controller *hose)
152{
153	unsigned long phys_page;
154	unsigned long size_page;
155	unsigned long io_virt_offset;
156
157	phys_page = ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
158	size_page = ALIGN(hose->pci_io_size, PAGE_SIZE);
159
160	/* Make sure IO area address is clear */
161	hose->io_base_alloc = NULL;
162
163	/* If there's no IO to map on that bus, get away too */
164	if (hose->pci_io_size == 0 || hose->io_base_phys == 0)
165		return 0;
166
167	/* Let's allocate some IO space for that guy. We don't pass
168	 * VM_IOREMAP because we don't care about alignment tricks that
169	 * the core does in that case. Maybe we should due to stupid card
170	 * with incomplete address decoding but I'd rather not deal with
171	 * those outside of the reserved 64K legacy region.
172	 */
173	hose->io_base_alloc = ioremap_phb(phys_page, size_page);
174	if (!hose->io_base_alloc)
175		return -ENOMEM;
176	hose->io_base_virt = hose->io_base_alloc +
177				hose->io_base_phys - phys_page;
 
178
179	pr_debug("IO mapping for PHB %pOF\n", hose->dn);
180	pr_debug("  phys=0x%016llx, virt=0x%p (alloc=0x%p)\n",
181		 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
182	pr_debug("  size=0x%016llx (alloc=0x%016lx)\n",
183		 hose->pci_io_size, size_page);
184
 
 
 
 
 
185	/* Fixup hose IO resource */
186	io_virt_offset = pcibios_io_space_offset(hose);
187	hose->io_resource.start += io_virt_offset;
188	hose->io_resource.end += io_virt_offset;
189
190	pr_debug("  hose->io_resource=%pR\n", &hose->io_resource);
191
192	return 0;
193}
194
195int pcibios_map_io_space(struct pci_bus *bus)
196{
197	WARN_ON(bus == NULL);
198
199	/* If this not a PHB, nothing to do, page tables still exist and
200	 * thus HPTEs will be faulted in when needed
201	 */
202	if (bus->self) {
203		pr_debug("IO mapping for PCI-PCI bridge %s\n",
204			 pci_name(bus->self));
205		pr_debug("  virt=0x%016llx...0x%016llx\n",
206			 bus->resource[0]->start + _IO_BASE,
207			 bus->resource[0]->end + _IO_BASE);
208		return 0;
209	}
210
211	return pcibios_map_phb_io_space(pci_bus_to_host(bus));
212}
213EXPORT_SYMBOL_GPL(pcibios_map_io_space);
214
215void pcibios_setup_phb_io_space(struct pci_controller *hose)
216{
217	pcibios_map_phb_io_space(hose);
218}
219
220#define IOBASE_BRIDGE_NUMBER	0
221#define IOBASE_MEMORY		1
222#define IOBASE_IO		2
223#define IOBASE_ISA_IO		3
224#define IOBASE_ISA_MEM		4
225
226SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus,
227			  unsigned long, in_devfn)
228{
229	struct pci_controller* hose;
230	struct pci_bus *tmp_bus, *bus = NULL;
231	struct device_node *hose_node;
232
233	/* Argh ! Please forgive me for that hack, but that's the
234	 * simplest way to get existing XFree to not lockup on some
235	 * G5 machines... So when something asks for bus 0 io base
236	 * (bus 0 is HT root), we return the AGP one instead.
237	 */
238	if (in_bus == 0 && of_machine_is_compatible("MacRISC4")) {
239		struct device_node *agp;
240
241		agp = of_find_compatible_node(NULL, NULL, "u3-agp");
242		if (agp)
243			in_bus = 0xf0;
244		of_node_put(agp);
245	}
246
247	/* That syscall isn't quite compatible with PCI domains, but it's
248	 * used on pre-domains setup. We return the first match
249	 */
250
251	list_for_each_entry(tmp_bus, &pci_root_buses, node) {
252		if (in_bus >= tmp_bus->number &&
253		    in_bus <= tmp_bus->busn_res.end) {
254			bus = tmp_bus;
255			break;
256		}
257	}
258	if (bus == NULL || bus->dev.of_node == NULL)
259		return -ENODEV;
260
261	hose_node = bus->dev.of_node;
262	hose = PCI_DN(hose_node)->phb;
263
264	switch (which) {
265	case IOBASE_BRIDGE_NUMBER:
266		return (long)hose->first_busno;
267	case IOBASE_MEMORY:
268		return (long)hose->mem_offset[0];
269	case IOBASE_IO:
270		return (long)hose->io_base_phys;
271	case IOBASE_ISA_IO:
272		return (long)isa_io_base;
273	case IOBASE_ISA_MEM:
274		return -EINVAL;
275	}
276
277	return -EOPNOTSUPP;
278}
279
280#ifdef CONFIG_NUMA
281int pcibus_to_node(struct pci_bus *bus)
282{
283	struct pci_controller *phb = pci_bus_to_host(bus);
284	return phb->node;
285}
286EXPORT_SYMBOL(pcibus_to_node);
287#endif
288
289#ifdef CONFIG_PPC_PMAC
290int pci_device_from_OF_node(struct device_node *np, u8 *bus, u8 *devfn)
291{
292	if (!PCI_DN(np))
293		return -ENODEV;
294	*bus = PCI_DN(np)->busno;
295	*devfn = PCI_DN(np)->devfn;
296	return 0;
297}
298#endif