Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * pci.c - Low-Level PCI Access in IA-64
  4 *
  5 * Derived from bios32.c of i386 tree.
  6 *
  7 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
  8 *	David Mosberger-Tang <davidm@hpl.hp.com>
  9 *	Bjorn Helgaas <bjorn.helgaas@hp.com>
 10 * Copyright (C) 2004 Silicon Graphics, Inc.
 11 *
 12 * Note: Above list of copyright holders is incomplete...
 13 */
 14
 15#include <linux/acpi.h>
 16#include <linux/types.h>
 17#include <linux/kernel.h>
 18#include <linux/pci.h>
 19#include <linux/pci-acpi.h>
 20#include <linux/init.h>
 21#include <linux/ioport.h>
 22#include <linux/slab.h>
 23#include <linux/spinlock.h>
 24#include <linux/memblock.h>
 25#include <linux/export.h>
 26
 
 27#include <asm/page.h>
 
 28#include <asm/io.h>
 29#include <asm/sal.h>
 30#include <asm/smp.h>
 31#include <asm/irq.h>
 32#include <asm/hw_irq.h>
 33
 34/*
 35 * Low-level SAL-based PCI configuration access functions. Note that SAL
 36 * calls are already serialized (via sal_lock), so we don't need another
 37 * synchronization mechanism here.
 38 */
 39
 40#define PCI_SAL_ADDRESS(seg, bus, devfn, reg)		\
 41	(((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
 42
 43/* SAL 3.2 adds support for extended config space. */
 44
 45#define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg)	\
 46	(((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
 47
 48int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
 49	      int reg, int len, u32 *value)
 50{
 51	u64 addr, data = 0;
 52	int mode, result;
 53
 54	if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
 55		return -EINVAL;
 56
 57	if ((seg | reg) <= 255) {
 58		addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
 59		mode = 0;
 60	} else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
 61		addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
 62		mode = 1;
 63	} else {
 64		return -EINVAL;
 65	}
 66
 67	result = ia64_sal_pci_config_read(addr, mode, len, &data);
 68	if (result != 0)
 69		return -EINVAL;
 70
 71	*value = (u32) data;
 72	return 0;
 73}
 74
 75int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
 76	       int reg, int len, u32 value)
 77{
 78	u64 addr;
 79	int mode, result;
 80
 81	if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
 82		return -EINVAL;
 83
 84	if ((seg | reg) <= 255) {
 85		addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
 86		mode = 0;
 87	} else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
 88		addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
 89		mode = 1;
 90	} else {
 91		return -EINVAL;
 92	}
 93	result = ia64_sal_pci_config_write(addr, mode, len, value);
 94	if (result != 0)
 95		return -EINVAL;
 96	return 0;
 97}
 98
 99static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
100							int size, u32 *value)
101{
102	return raw_pci_read(pci_domain_nr(bus), bus->number,
103				 devfn, where, size, value);
104}
105
106static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
107							int size, u32 value)
108{
109	return raw_pci_write(pci_domain_nr(bus), bus->number,
110				  devfn, where, size, value);
111}
112
113struct pci_ops pci_root_ops = {
114	.read = pci_read,
115	.write = pci_write,
116};
117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118struct pci_root_info {
119	struct acpi_pci_root_info common;
120	struct pci_controller controller;
121	struct list_head io_resources;
122};
123
124static unsigned int new_space(u64 phys_base, int sparse)
 
125{
126	u64 mmio_base;
127	int i;
128
129	if (phys_base == 0)
130		return 0;	/* legacy I/O port space */
131
132	mmio_base = (u64) ioremap(phys_base, 0);
133	for (i = 0; i < num_io_spaces; i++)
134		if (io_space[i].mmio_base == mmio_base &&
135		    io_space[i].sparse == sparse)
136			return i;
137
138	if (num_io_spaces == MAX_IO_SPACES) {
139		pr_err("PCI: Too many IO port spaces "
140			"(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
141		return ~0;
142	}
143
144	i = num_io_spaces++;
145	io_space[i].mmio_base = mmio_base;
146	io_space[i].sparse = sparse;
147
148	return i;
149}
150
151static int add_io_space(struct device *dev, struct pci_root_info *info,
152			struct resource_entry *entry)
153{
154	struct resource_entry *iospace;
155	struct resource *resource, *res = entry->res;
156	char *name;
157	unsigned long base, min, max, base_port;
158	unsigned int sparse = 0, space_nr, len;
159
160	len = strlen(info->common.name) + 32;
161	iospace = resource_list_create_entry(NULL, len);
162	if (!iospace) {
163		dev_err(dev, "PCI: No memory for %s I/O port space\n",
164			info->common.name);
165		return -ENOMEM;
166	}
167
168	if (res->flags & IORESOURCE_IO_SPARSE)
 
 
 
 
 
 
 
 
 
 
169		sparse = 1;
170	space_nr = new_space(entry->offset, sparse);
 
171	if (space_nr == ~0)
172		goto free_resource;
173
174	name = (char *)(iospace + 1);
175	min = res->start - entry->offset;
176	max = res->end - entry->offset;
177	base = __pa(io_space[space_nr].mmio_base);
178	base_port = IO_SPACE_BASE(space_nr);
179	snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->common.name,
180		 base_port + min, base_port + max);
181
182	/*
183	 * The SDM guarantees the legacy 0-64K space is sparse, but if the
184	 * mapping is done by the processor (not the bridge), ACPI may not
185	 * mark it as sparse.
186	 */
187	if (space_nr == 0)
188		sparse = 1;
189
190	resource = iospace->res;
191	resource->name  = name;
192	resource->flags = IORESOURCE_MEM;
193	resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
194	resource->end   = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
195	if (insert_resource(&iomem_resource, resource)) {
196		dev_err(dev,
197			"can't allocate host bridge io space resource  %pR\n",
198			resource);
199		goto free_resource;
200	}
201
202	entry->offset = base_port;
203	res->start = min + base_port;
204	res->end = max + base_port;
205	resource_list_add_tail(iospace, &info->io_resources);
206
207	return 0;
208
 
 
209free_resource:
210	resource_list_free_entry(iospace);
211	return -ENOSPC;
 
212}
213
214/*
215 * An IO port or MMIO resource assigned to a PCI host bridge may be
216 * consumed by the host bridge itself or available to its child
217 * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
218 * to tell whether the resource is consumed by the host bridge itself,
219 * but firmware hasn't used that bit consistently, so we can't rely on it.
220 *
221 * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
222 * to be available to child bus/devices except one special case:
223 *     IO port [0xCF8-0xCFF] is consumed by the host bridge itself
224 *     to access PCI configuration space.
225 *
226 * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
227 */
228static bool resource_is_pcicfg_ioport(struct resource *res)
229{
230	return (res->flags & IORESOURCE_IO) &&
231		res->start == 0xCF8 && res->end == 0xCFF;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232}
233
234static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
 
235{
236	struct device *dev = &ci->bridge->dev;
237	struct pci_root_info *info;
238	struct resource *res;
239	struct resource_entry *entry, *tmp;
240	int status;
241
242	status = acpi_pci_probe_root_resources(ci);
243	if (status > 0) {
244		info = container_of(ci, struct pci_root_info, common);
245		resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
246			res = entry->res;
247			if (res->flags & IORESOURCE_MEM) {
248				/*
249				 * HP's firmware has a hack to work around a
250				 * Windows bug. Ignore these tiny memory ranges.
251				 */
252				if (resource_size(res) <= 16) {
253					resource_list_del(entry);
254					insert_resource(&iomem_resource,
255							entry->res);
256					resource_list_add_tail(entry,
257							&info->io_resources);
258				}
259			} else if (res->flags & IORESOURCE_IO) {
260				if (resource_is_pcicfg_ioport(entry->res))
261					resource_list_destroy_entry(entry);
262				else if (add_io_space(dev, info, entry))
263					resource_list_destroy_entry(entry);
264			}
265		}
266	}
267
268	return status;
269}
270
271static void pci_acpi_root_release_info(struct acpi_pci_root_info *ci)
272{
273	struct pci_root_info *info;
274	struct resource_entry *entry, *tmp;
275
276	info = container_of(ci, struct pci_root_info, common);
277	resource_list_for_each_entry_safe(entry, tmp, &info->io_resources) {
278		release_resource(entry->res);
279		resource_list_destroy_entry(entry);
280	}
281	kfree(info);
282}
283
284static struct acpi_pci_root_ops pci_acpi_root_ops = {
285	.pci_ops = &pci_root_ops,
286	.release_info = pci_acpi_root_release_info,
287	.prepare_resources = pci_acpi_root_prepare_resources,
288};
289
290struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
 
291{
292	struct acpi_device *device = root->device;
293	struct pci_root_info *info;
 
 
 
 
 
 
294
295	info = kzalloc(sizeof(*info), GFP_KERNEL);
296	if (!info) {
297		dev_err(&device->dev,
298			"pci_bus %04x:%02x: ignored (out of memory)\n",
299			root->segment, (int)root->secondary.start);
300		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301	}
 
 
 
 
 
 
 
302
303	info->controller.segment = root->segment;
304	info->controller.companion = device;
305	info->controller.node = acpi_get_node(device->handle);
306	INIT_LIST_HEAD(&info->io_resources);
307	return acpi_pci_root_create(root, &pci_acpi_root_ops,
308				    &info->common, &info->controller);
 
 
309}
310
311int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 
312{
313	/*
314	 * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
315	 * here, pci_create_root_bus() has been called by someone else and
316	 * sysdata is likely to be different from what we expect.  Let it go in
317	 * that case.
318	 */
319	if (!bridge->dev.parent) {
320		struct pci_controller *controller = bridge->bus->sysdata;
321		ACPI_COMPANION_SET(&bridge->dev, controller->companion);
 
 
 
 
 
322	}
323	return 0;
 
 
324}
 
325
326void pcibios_fixup_device_resources(struct pci_dev *dev)
 
327{
328	int idx;
329
330	if (!dev->bus)
331		return;
332
333	for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
334		struct resource *r = &dev->resource[idx];
335
336		if (!r->flags || r->parent || !r->start)
 
 
337			continue;
338
339		pci_claim_resource(dev, idx);
 
 
 
 
340	}
 
 
 
341}
342EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
343
344static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
345{
346	int idx;
 
347
348	if (!dev->bus)
349		return;
350
351	for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
352		struct resource *r = &dev->resource[idx];
353
354		if (!r->flags || r->parent || !r->start)
 
355			continue;
 
 
 
 
 
 
 
 
 
 
 
 
356
357		pci_claim_bridge_resource(dev, idx);
 
 
 
 
 
 
 
358	}
359}
360
 
 
 
 
 
 
 
 
 
 
 
361/*
362 *  Called after each bus is probed, but before its children are examined.
363 */
364void pcibios_fixup_bus(struct pci_bus *b)
 
365{
366	struct pci_dev *dev;
367
368	if (b->self) {
369		pci_read_bridge_bases(b);
370		pcibios_fixup_bridge_resources(b->self);
 
 
371	}
372	list_for_each_entry(dev, &b->devices, bus_list)
373		pcibios_fixup_device_resources(dev);
374}
375
376void pcibios_add_bus(struct pci_bus *bus)
377{
378	acpi_pci_add_bus(bus);
379}
380
381void pcibios_remove_bus(struct pci_bus *bus)
 
382{
383	acpi_pci_remove_bus(bus);
384}
385
386void pcibios_set_master (struct pci_dev *dev)
387{
388	/* No special bus mastering setup handling */
389}
390
391int
392pcibios_enable_device (struct pci_dev *dev, int mask)
393{
394	int ret;
395
396	ret = pci_enable_resources(dev, mask);
397	if (ret < 0)
398		return ret;
399
400	if (!pci_dev_msi_enabled(dev))
401		return acpi_pci_irq_enable(dev);
402	return 0;
403}
404
405void
406pcibios_disable_device (struct pci_dev *dev)
407{
408	BUG_ON(atomic_read(&dev->enable_cnt));
409	if (!pci_dev_msi_enabled(dev))
410		acpi_pci_irq_disable(dev);
411}
412
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
413/**
414 * pci_get_legacy_mem - generic legacy mem routine
415 * @bus: bus to get legacy memory base address for
416 *
417 * Find the base of legacy memory for @bus.  This is typically the first
418 * megabyte of bus address space for @bus or is simply 0 on platforms whose
419 * chipsets support legacy I/O and memory routing.  Returns the base address
420 * or an error pointer if an error occurred.
421 *
422 * This is the ia64 generic version of this routine.  Other platforms
423 * are free to override it with a machine vector.
424 */
425char *pci_get_legacy_mem(struct pci_bus *bus)
426{
427	return (char *)__IA64_UNCACHED_OFFSET;
428}
429
430/**
431 * pci_mmap_legacy_page_range - map legacy memory space to userland
432 * @bus: bus whose legacy space we're mapping
433 * @vma: vma passed in by mmap
434 *
435 * Map legacy memory space for this device back to userspace using a machine
436 * vector to get the base address.
437 */
438int
439pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
440			   enum pci_mmap_state mmap_state)
441{
442	unsigned long size = vma->vm_end - vma->vm_start;
443	pgprot_t prot;
444	char *addr;
445
446	/* We only support mmap'ing of legacy memory space */
447	if (mmap_state != pci_mmap_mem)
448		return -ENOSYS;
449
450	/*
451	 * Avoid attribute aliasing.  See Documentation/ia64/aliasing.rst
452	 * for more details.
453	 */
454	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
455		return -EINVAL;
456	prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
457				    vma->vm_page_prot);
458
459	addr = pci_get_legacy_mem(bus);
460	if (IS_ERR(addr))
461		return PTR_ERR(addr);
462
463	vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
464	vma->vm_page_prot = prot;
465
466	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
467			    size, vma->vm_page_prot))
468		return -EAGAIN;
469
470	return 0;
471}
472
473/**
474 * pci_legacy_read - read from legacy I/O space
475 * @bus: bus to read
476 * @port: legacy port value
477 * @val: caller allocated storage for returned value
478 * @size: number of bytes to read
479 *
480 * Simply reads @size bytes from @port and puts the result in @val.
481 *
482 * Again, this (and the write routine) are generic versions that can be
483 * overridden by the platform.  This is necessary on platforms that don't
484 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
485 */
486int pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
487{
488	int ret = size;
489
490	switch (size) {
491	case 1:
492		*val = inb(port);
493		break;
494	case 2:
495		*val = inw(port);
496		break;
497	case 4:
498		*val = inl(port);
499		break;
500	default:
501		ret = -EINVAL;
502		break;
503	}
504
505	return ret;
506}
507
508/**
509 * pci_legacy_write - perform a legacy I/O write
510 * @bus: bus pointer
511 * @port: port to write
512 * @val: value to write
513 * @size: number of bytes to write from @val
514 *
515 * Simply writes @size bytes of @val to @port.
516 */
517int pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
518{
519	int ret = size;
520
521	switch (size) {
522	case 1:
523		outb(val, port);
524		break;
525	case 2:
526		outw(val, port);
527		break;
528	case 4:
529		outl(val, port);
530		break;
531	default:
532		ret = -EINVAL;
533		break;
534	}
535
536	return ret;
537}
538
539/**
540 * set_pci_cacheline_size - determine cacheline size for PCI devices
541 *
542 * We want to use the line-size of the outer-most cache.  We assume
543 * that this line-size is the same for all CPUs.
544 *
545 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
546 */
547static void __init set_pci_dfl_cacheline_size(void)
548{
549	unsigned long levels, unique_caches;
550	long status;
551	pal_cache_config_info_t cci;
552
553	status = ia64_pal_cache_summary(&levels, &unique_caches);
554	if (status != 0) {
555		pr_err("%s: ia64_pal_cache_summary() failed "
556			"(status=%ld)\n", __func__, status);
557		return;
558	}
559
560	status = ia64_pal_cache_config_info(levels - 1,
561				/* cache_type (data_or_unified)= */ 2, &cci);
562	if (status != 0) {
563		pr_err("%s: ia64_pal_cache_config_info() failed "
564			"(status=%ld)\n", __func__, status);
565		return;
566	}
567	pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
568}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
569
570static int __init pcibios_init(void)
571{
572	set_pci_dfl_cacheline_size();
573	return 0;
574}
575
576subsys_initcall(pcibios_init);
v3.1
 
  1/*
  2 * pci.c - Low-Level PCI Access in IA-64
  3 *
  4 * Derived from bios32.c of i386 tree.
  5 *
  6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
  7 *	David Mosberger-Tang <davidm@hpl.hp.com>
  8 *	Bjorn Helgaas <bjorn.helgaas@hp.com>
  9 * Copyright (C) 2004 Silicon Graphics, Inc.
 10 *
 11 * Note: Above list of copyright holders is incomplete...
 12 */
 13
 14#include <linux/acpi.h>
 15#include <linux/types.h>
 16#include <linux/kernel.h>
 17#include <linux/pci.h>
 
 18#include <linux/init.h>
 19#include <linux/ioport.h>
 20#include <linux/slab.h>
 21#include <linux/spinlock.h>
 22#include <linux/bootmem.h>
 
 23
 24#include <asm/machvec.h>
 25#include <asm/page.h>
 26#include <asm/system.h>
 27#include <asm/io.h>
 28#include <asm/sal.h>
 29#include <asm/smp.h>
 30#include <asm/irq.h>
 31#include <asm/hw_irq.h>
 32
 33/*
 34 * Low-level SAL-based PCI configuration access functions. Note that SAL
 35 * calls are already serialized (via sal_lock), so we don't need another
 36 * synchronization mechanism here.
 37 */
 38
 39#define PCI_SAL_ADDRESS(seg, bus, devfn, reg)		\
 40	(((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
 41
 42/* SAL 3.2 adds support for extended config space. */
 43
 44#define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg)	\
 45	(((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
 46
 47int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
 48	      int reg, int len, u32 *value)
 49{
 50	u64 addr, data = 0;
 51	int mode, result;
 52
 53	if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
 54		return -EINVAL;
 55
 56	if ((seg | reg) <= 255) {
 57		addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
 58		mode = 0;
 59	} else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
 60		addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
 61		mode = 1;
 62	} else {
 63		return -EINVAL;
 64	}
 65
 66	result = ia64_sal_pci_config_read(addr, mode, len, &data);
 67	if (result != 0)
 68		return -EINVAL;
 69
 70	*value = (u32) data;
 71	return 0;
 72}
 73
 74int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
 75	       int reg, int len, u32 value)
 76{
 77	u64 addr;
 78	int mode, result;
 79
 80	if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
 81		return -EINVAL;
 82
 83	if ((seg | reg) <= 255) {
 84		addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
 85		mode = 0;
 86	} else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
 87		addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
 88		mode = 1;
 89	} else {
 90		return -EINVAL;
 91	}
 92	result = ia64_sal_pci_config_write(addr, mode, len, value);
 93	if (result != 0)
 94		return -EINVAL;
 95	return 0;
 96}
 97
 98static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
 99							int size, u32 *value)
100{
101	return raw_pci_read(pci_domain_nr(bus), bus->number,
102				 devfn, where, size, value);
103}
104
105static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
106							int size, u32 value)
107{
108	return raw_pci_write(pci_domain_nr(bus), bus->number,
109				  devfn, where, size, value);
110}
111
112struct pci_ops pci_root_ops = {
113	.read = pci_read,
114	.write = pci_write,
115};
116
117/* Called by ACPI when it finds a new root bus.  */
118
119static struct pci_controller * __devinit
120alloc_pci_controller (int seg)
121{
122	struct pci_controller *controller;
123
124	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
125	if (!controller)
126		return NULL;
127
128	controller->segment = seg;
129	controller->node = -1;
130	return controller;
131}
132
133struct pci_root_info {
134	struct acpi_device *bridge;
135	struct pci_controller *controller;
136	char *name;
137};
138
139static unsigned int
140new_space (u64 phys_base, int sparse)
141{
142	u64 mmio_base;
143	int i;
144
145	if (phys_base == 0)
146		return 0;	/* legacy I/O port space */
147
148	mmio_base = (u64) ioremap(phys_base, 0);
149	for (i = 0; i < num_io_spaces; i++)
150		if (io_space[i].mmio_base == mmio_base &&
151		    io_space[i].sparse == sparse)
152			return i;
153
154	if (num_io_spaces == MAX_IO_SPACES) {
155		printk(KERN_ERR "PCI: Too many IO port spaces "
156			"(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
157		return ~0;
158	}
159
160	i = num_io_spaces++;
161	io_space[i].mmio_base = mmio_base;
162	io_space[i].sparse = sparse;
163
164	return i;
165}
166
167static u64 __devinit
168add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr)
169{
170	struct resource *resource;
 
171	char *name;
172	unsigned long base, min, max, base_port;
173	unsigned int sparse = 0, space_nr, len;
174
175	resource = kzalloc(sizeof(*resource), GFP_KERNEL);
176	if (!resource) {
177		printk(KERN_ERR "PCI: No memory for %s I/O port space\n",
178			info->name);
179		goto out;
 
180	}
181
182	len = strlen(info->name) + 32;
183	name = kzalloc(len, GFP_KERNEL);
184	if (!name) {
185		printk(KERN_ERR "PCI: No memory for %s I/O port space name\n",
186			info->name);
187		goto free_resource;
188	}
189
190	min = addr->minimum;
191	max = min + addr->address_length - 1;
192	if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
193		sparse = 1;
194
195	space_nr = new_space(addr->translation_offset, sparse);
196	if (space_nr == ~0)
197		goto free_name;
198
 
 
 
199	base = __pa(io_space[space_nr].mmio_base);
200	base_port = IO_SPACE_BASE(space_nr);
201	snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->name,
202		base_port + min, base_port + max);
203
204	/*
205	 * The SDM guarantees the legacy 0-64K space is sparse, but if the
206	 * mapping is done by the processor (not the bridge), ACPI may not
207	 * mark it as sparse.
208	 */
209	if (space_nr == 0)
210		sparse = 1;
211
 
212	resource->name  = name;
213	resource->flags = IORESOURCE_MEM;
214	resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
215	resource->end   = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
216	insert_resource(&iomem_resource, resource);
 
 
 
 
 
217
218	return base_port;
 
 
 
 
 
219
220free_name:
221	kfree(name);
222free_resource:
223	kfree(resource);
224out:
225	return ~0;
226}
227
228static acpi_status __devinit resource_to_window(struct acpi_resource *resource,
229	struct acpi_resource_address64 *addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
230{
231	acpi_status status;
232
233	/*
234	 * We're only interested in _CRS descriptors that are
235	 *	- address space descriptors for memory or I/O space
236	 *	- non-zero size
237	 *	- producers, i.e., the address space is routed downstream,
238	 *	  not consumed by the bridge itself
239	 */
240	status = acpi_resource_to_address64(resource, addr);
241	if (ACPI_SUCCESS(status) &&
242	    (addr->resource_type == ACPI_MEMORY_RANGE ||
243	     addr->resource_type == ACPI_IO_RANGE) &&
244	    addr->address_length &&
245	    addr->producer_consumer == ACPI_PRODUCER)
246		return AE_OK;
247
248	return AE_ERROR;
249}
250
251static acpi_status __devinit
252count_window (struct acpi_resource *resource, void *data)
253{
254	unsigned int *windows = (unsigned int *) data;
255	struct acpi_resource_address64 addr;
256	acpi_status status;
257
258	status = resource_to_window(resource, &addr);
259	if (ACPI_SUCCESS(status))
260		(*windows)++;
261
262	return AE_OK;
263}
264
265static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
266{
267	struct pci_root_info *info = data;
268	struct pci_window *window;
269	struct acpi_resource_address64 addr;
270	acpi_status status;
271	unsigned long flags, offset = 0;
272	struct resource *root;
273
274	/* Return AE_OK for non-window resources to keep scanning for more */
275	status = resource_to_window(res, &addr);
276	if (!ACPI_SUCCESS(status))
277		return AE_OK;
278
279	if (addr.resource_type == ACPI_MEMORY_RANGE) {
280		flags = IORESOURCE_MEM;
281		root = &iomem_resource;
282		offset = addr.translation_offset;
283	} else if (addr.resource_type == ACPI_IO_RANGE) {
284		flags = IORESOURCE_IO;
285		root = &ioport_resource;
286		offset = add_io_space(info, &addr);
287		if (offset == ~0)
288			return AE_OK;
289	} else
290		return AE_OK;
291
292	window = &info->controller->window[info->controller->windows++];
293	window->resource.name = info->name;
294	window->resource.flags = flags;
295	window->resource.start = addr.minimum + offset;
296	window->resource.end = window->resource.start + addr.address_length - 1;
297	window->resource.child = NULL;
298	window->offset = offset;
299
300	if (insert_resource(root, &window->resource)) {
301		dev_err(&info->bridge->dev,
302			"can't allocate host bridge window %pR\n",
303			&window->resource);
304	} else {
305		if (offset)
306			dev_info(&info->bridge->dev, "host bridge window %pR "
307				 "(PCI address [%#llx-%#llx])\n",
308				 &window->resource,
309				 window->resource.start - offset,
310				 window->resource.end - offset);
311		else
312			dev_info(&info->bridge->dev,
313				 "host bridge window %pR\n",
314				 &window->resource);
315	}
316
317	return AE_OK;
318}
319
320static void __devinit
321pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
322{
323	int i;
324
325	pci_bus_remove_resources(bus);
326	for (i = 0; i < ctrl->windows; i++) {
327		struct resource *res = &ctrl->window[i].resource;
328		/* HP's firmware has a hack to work around a Windows bug.
329		 * Ignore these tiny memory ranges */
330		if ((res->flags & IORESOURCE_MEM) &&
331		    (res->end - res->start < 16))
332			continue;
333		pci_bus_add_resource(bus, res, 0);
334	}
335}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
336
337struct pci_bus * __devinit
338pci_acpi_scan_root(struct acpi_pci_root *root)
339{
340	struct acpi_device *device = root->device;
341	int domain = root->segment;
342	int bus = root->secondary.start;
343	struct pci_controller *controller;
344	unsigned int windows = 0;
345	struct pci_bus *pbus;
346	char *name;
347	int pxm;
348
349	controller = alloc_pci_controller(domain);
350	if (!controller)
351		goto out1;
352
353	controller->acpi_handle = device->handle;
354
355	pxm = acpi_get_pxm(controller->acpi_handle);
356#ifdef CONFIG_NUMA
357	if (pxm >= 0)
358		controller->node = pxm_to_node(pxm);
359#endif
360
361	acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
362			&windows);
363	if (windows) {
364		struct pci_root_info info;
365
366		controller->window =
367			kmalloc_node(sizeof(*controller->window) * windows,
368				     GFP_KERNEL, controller->node);
369		if (!controller->window)
370			goto out2;
371
372		name = kmalloc(16, GFP_KERNEL);
373		if (!name)
374			goto out3;
375
376		sprintf(name, "PCI Bus %04x:%02x", domain, bus);
377		info.bridge = device;
378		info.controller = controller;
379		info.name = name;
380		acpi_walk_resources(device->handle, METHOD_NAME__CRS,
381			add_window, &info);
382	}
383	/*
384	 * See arch/x86/pci/acpi.c.
385	 * The desired pci bus might already be scanned in a quirk. We
386	 * should handle the case here, but it appears that IA64 hasn't
387	 * such quirk. So we just ignore the case now.
388	 */
389	pbus = pci_scan_bus_parented(NULL, bus, &pci_root_ops, controller);
390
391	return pbus;
392
393out3:
394	kfree(controller->window);
395out2:
396	kfree(controller);
397out1:
398	return NULL;
399}
400
401void pcibios_resource_to_bus(struct pci_dev *dev,
402		struct pci_bus_region *region, struct resource *res)
403{
404	struct pci_controller *controller = PCI_CONTROLLER(dev);
405	unsigned long offset = 0;
406	int i;
407
408	for (i = 0; i < controller->windows; i++) {
409		struct pci_window *window = &controller->window[i];
410		if (!(window->resource.flags & res->flags))
411			continue;
412		if (window->resource.start > res->start)
413			continue;
414		if (window->resource.end < res->end)
415			continue;
416		offset = window->offset;
417		break;
418	}
419
420	region->start = res->start - offset;
421	region->end = res->end - offset;
422}
423EXPORT_SYMBOL(pcibios_resource_to_bus);
424
425void pcibios_bus_to_resource(struct pci_dev *dev,
426		struct resource *res, struct pci_bus_region *region)
427{
428	struct pci_controller *controller = PCI_CONTROLLER(dev);
429	unsigned long offset = 0;
430	int i;
 
 
 
 
431
432	for (i = 0; i < controller->windows; i++) {
433		struct pci_window *window = &controller->window[i];
434		if (!(window->resource.flags & res->flags))
435			continue;
436		if (window->resource.start - window->offset > region->start)
437			continue;
438		if (window->resource.end - window->offset < region->end)
439			continue;
440		offset = window->offset;
441		break;
442	}
443
444	res->start = region->start + offset;
445	res->end = region->end + offset;
446}
447EXPORT_SYMBOL(pcibios_bus_to_resource);
448
449static int __devinit is_valid_resource(struct pci_dev *dev, int idx)
450{
451	unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
452	struct resource *devr = &dev->resource[idx], *busr;
453
454	if (!dev->bus)
455		return 0;
 
 
 
456
457	pci_bus_for_each_resource(dev->bus, busr, i) {
458		if (!busr || ((busr->flags ^ devr->flags) & type_mask))
459			continue;
460		if ((devr->start) && (devr->start >= busr->start) &&
461				(devr->end <= busr->end))
462			return 1;
463	}
464	return 0;
465}
466
467static void __devinit
468pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
469{
470	struct pci_bus_region region;
471	int i;
472
473	for (i = start; i < limit; i++) {
474		if (!dev->resource[i].flags)
475			continue;
476		region.start = dev->resource[i].start;
477		region.end = dev->resource[i].end;
478		pcibios_bus_to_resource(dev, &dev->resource[i], &region);
479		if ((is_valid_resource(dev, i)))
480			pci_claim_resource(dev, i);
481	}
482}
483
484void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
485{
486	pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
487}
488EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
489
490static void __devinit pcibios_fixup_bridge_resources(struct pci_dev *dev)
491{
492	pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES);
493}
494
495/*
496 *  Called after each bus is probed, but before its children are examined.
497 */
498void __devinit
499pcibios_fixup_bus (struct pci_bus *b)
500{
501	struct pci_dev *dev;
502
503	if (b->self) {
504		pci_read_bridge_bases(b);
505		pcibios_fixup_bridge_resources(b->self);
506	} else {
507		pcibios_setup_root_windows(b, b->sysdata);
508	}
509	list_for_each_entry(dev, &b->devices, bus_list)
510		pcibios_fixup_device_resources(dev);
511	platform_pci_fixup_bus(b);
512
513	return;
 
 
514}
515
516void __devinit
517pcibios_update_irq (struct pci_dev *dev, int irq)
518{
519	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
 
520
521	/* ??? FIXME -- record old value for shutdown.  */
 
 
522}
523
524int
525pcibios_enable_device (struct pci_dev *dev, int mask)
526{
527	int ret;
528
529	ret = pci_enable_resources(dev, mask);
530	if (ret < 0)
531		return ret;
532
533	if (!dev->msi_enabled)
534		return acpi_pci_irq_enable(dev);
535	return 0;
536}
537
538void
539pcibios_disable_device (struct pci_dev *dev)
540{
541	BUG_ON(atomic_read(&dev->enable_cnt));
542	if (!dev->msi_enabled)
543		acpi_pci_irq_disable(dev);
544}
545
546resource_size_t
547pcibios_align_resource (void *data, const struct resource *res,
548		        resource_size_t size, resource_size_t align)
549{
550	return res->start;
551}
552
553/*
554 * PCI BIOS setup, always defaults to SAL interface
555 */
556char * __init
557pcibios_setup (char *str)
558{
559	return str;
560}
561
562int
563pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
564		     enum pci_mmap_state mmap_state, int write_combine)
565{
566	unsigned long size = vma->vm_end - vma->vm_start;
567	pgprot_t prot;
568
569	/*
570	 * I/O space cannot be accessed via normal processor loads and
571	 * stores on this platform.
572	 */
573	if (mmap_state == pci_mmap_io)
574		/*
575		 * XXX we could relax this for I/O spaces for which ACPI
576		 * indicates that the space is 1-to-1 mapped.  But at the
577		 * moment, we don't support multiple PCI address spaces and
578		 * the legacy I/O space is not 1-to-1 mapped, so this is moot.
579		 */
580		return -EINVAL;
581
582	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
583		return -EINVAL;
584
585	prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
586				    vma->vm_page_prot);
587
588	/*
589	 * If the user requested WC, the kernel uses UC or WC for this region,
590	 * and the chipset supports WC, we can use WC. Otherwise, we have to
591	 * use the same attribute the kernel uses.
592	 */
593	if (write_combine &&
594	    ((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC ||
595	     (pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) &&
596	    efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
597		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
598	else
599		vma->vm_page_prot = prot;
600
601	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
602			     vma->vm_end - vma->vm_start, vma->vm_page_prot))
603		return -EAGAIN;
604
605	return 0;
606}
607
608/**
609 * ia64_pci_get_legacy_mem - generic legacy mem routine
610 * @bus: bus to get legacy memory base address for
611 *
612 * Find the base of legacy memory for @bus.  This is typically the first
613 * megabyte of bus address space for @bus or is simply 0 on platforms whose
614 * chipsets support legacy I/O and memory routing.  Returns the base address
615 * or an error pointer if an error occurred.
616 *
617 * This is the ia64 generic version of this routine.  Other platforms
618 * are free to override it with a machine vector.
619 */
620char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
621{
622	return (char *)__IA64_UNCACHED_OFFSET;
623}
624
625/**
626 * pci_mmap_legacy_page_range - map legacy memory space to userland
627 * @bus: bus whose legacy space we're mapping
628 * @vma: vma passed in by mmap
629 *
630 * Map legacy memory space for this device back to userspace using a machine
631 * vector to get the base address.
632 */
633int
634pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
635			   enum pci_mmap_state mmap_state)
636{
637	unsigned long size = vma->vm_end - vma->vm_start;
638	pgprot_t prot;
639	char *addr;
640
641	/* We only support mmap'ing of legacy memory space */
642	if (mmap_state != pci_mmap_mem)
643		return -ENOSYS;
644
645	/*
646	 * Avoid attribute aliasing.  See Documentation/ia64/aliasing.txt
647	 * for more details.
648	 */
649	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
650		return -EINVAL;
651	prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
652				    vma->vm_page_prot);
653
654	addr = pci_get_legacy_mem(bus);
655	if (IS_ERR(addr))
656		return PTR_ERR(addr);
657
658	vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
659	vma->vm_page_prot = prot;
660
661	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
662			    size, vma->vm_page_prot))
663		return -EAGAIN;
664
665	return 0;
666}
667
668/**
669 * ia64_pci_legacy_read - read from legacy I/O space
670 * @bus: bus to read
671 * @port: legacy port value
672 * @val: caller allocated storage for returned value
673 * @size: number of bytes to read
674 *
675 * Simply reads @size bytes from @port and puts the result in @val.
676 *
677 * Again, this (and the write routine) are generic versions that can be
678 * overridden by the platform.  This is necessary on platforms that don't
679 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
680 */
681int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
682{
683	int ret = size;
684
685	switch (size) {
686	case 1:
687		*val = inb(port);
688		break;
689	case 2:
690		*val = inw(port);
691		break;
692	case 4:
693		*val = inl(port);
694		break;
695	default:
696		ret = -EINVAL;
697		break;
698	}
699
700	return ret;
701}
702
703/**
704 * ia64_pci_legacy_write - perform a legacy I/O write
705 * @bus: bus pointer
706 * @port: port to write
707 * @val: value to write
708 * @size: number of bytes to write from @val
709 *
710 * Simply writes @size bytes of @val to @port.
711 */
712int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
713{
714	int ret = size;
715
716	switch (size) {
717	case 1:
718		outb(val, port);
719		break;
720	case 2:
721		outw(val, port);
722		break;
723	case 4:
724		outl(val, port);
725		break;
726	default:
727		ret = -EINVAL;
728		break;
729	}
730
731	return ret;
732}
733
734/**
735 * set_pci_cacheline_size - determine cacheline size for PCI devices
736 *
737 * We want to use the line-size of the outer-most cache.  We assume
738 * that this line-size is the same for all CPUs.
739 *
740 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
741 */
742static void __init set_pci_dfl_cacheline_size(void)
743{
744	unsigned long levels, unique_caches;
745	long status;
746	pal_cache_config_info_t cci;
747
748	status = ia64_pal_cache_summary(&levels, &unique_caches);
749	if (status != 0) {
750		printk(KERN_ERR "%s: ia64_pal_cache_summary() failed "
751			"(status=%ld)\n", __func__, status);
752		return;
753	}
754
755	status = ia64_pal_cache_config_info(levels - 1,
756				/* cache_type (data_or_unified)= */ 2, &cci);
757	if (status != 0) {
758		printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed "
759			"(status=%ld)\n", __func__, status);
760		return;
761	}
762	pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
763}
764
765u64 ia64_dma_get_required_mask(struct device *dev)
766{
767	u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
768	u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
769	u64 mask;
770
771	if (!high_totalram) {
772		/* convert to mask just covering totalram */
773		low_totalram = (1 << (fls(low_totalram) - 1));
774		low_totalram += low_totalram - 1;
775		mask = low_totalram;
776	} else {
777		high_totalram = (1 << (fls(high_totalram) - 1));
778		high_totalram += high_totalram - 1;
779		mask = (((u64)high_totalram) << 32) + 0xffffffff;
780	}
781	return mask;
782}
783EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
784
785u64 dma_get_required_mask(struct device *dev)
786{
787	return platform_dma_get_required_mask(dev);
788}
789EXPORT_SYMBOL_GPL(dma_get_required_mask);
790
791static int __init pcibios_init(void)
792{
793	set_pci_dfl_cacheline_size();
794	return 0;
795}
796
797subsys_initcall(pcibios_init);