Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1/*
  2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 */
 14
 15#include <linux/kernel.h>
 16#include <linux/pci.h>
 17#include <linux/delay.h>
 18#include <linux/string.h>
 19#include <linux/init.h>
 20#include <linux/capability.h>
 21#include <linux/sched.h>
 22#include <linux/errno.h>
 23#include <linux/bootmem.h>
 24#include <linux/irq.h>
 25#include <linux/io.h>
 26#include <linux/uaccess.h>
 27
 28#include <asm/processor.h>
 29#include <asm/sections.h>
 30#include <asm/byteorder.h>
 31#include <asm/hv_driver.h>
 32#include <hv/drv_pcie_rc_intf.h>
 33
 34
 35/*
 36 * Initialization flow and process
 37 * -------------------------------
 38 *
 39 * This files contains the routines to search for PCI buses,
 40 * enumerate the buses, and configure any attached devices.
 41 *
 42 * There are two entry points here:
 43 * 1) tile_pci_init
 44 *    This sets up the pci_controller structs, and opens the
 45 *    FDs to the hypervisor.  This is called from setup_arch() early
 46 *    in the boot process.
 47 * 2) pcibios_init
 48 *    This probes the PCI bus(es) for any attached hardware.  It's
 49 *    called by subsys_initcall.  All of the real work is done by the
 50 *    generic Linux PCI layer.
 51 *
 52 */
 53
 54/*
 55 * This flag tells if the platform is TILEmpower that needs
 56 * special configuration for the PLX switch chip.
 57 */
 58int __write_once tile_plx_gen1;
 59
 60static struct pci_controller controllers[TILE_NUM_PCIE];
 61static int num_controllers;
 62static int pci_scan_flags[TILE_NUM_PCIE];
 63
 64static struct pci_ops tile_cfg_ops;
 65
 66
 67/*
 68 * We don't need to worry about the alignment of resources.
 69 */
 70resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 71			    resource_size_t size, resource_size_t align)
 72{
 73	return res->start;
 74}
 75EXPORT_SYMBOL(pcibios_align_resource);
 76
 77/*
 78 * Open a FD to the hypervisor PCI device.
 79 *
 80 * controller_id is the controller number, config type is 0 or 1 for
 81 * config0 or config1 operations.
 82 */
 83static int __devinit tile_pcie_open(int controller_id, int config_type)
 84{
 85	char filename[32];
 86	int fd;
 87
 88	sprintf(filename, "pcie/%d/config%d", controller_id, config_type);
 89
 90	fd = hv_dev_open((HV_VirtAddr)filename, 0);
 91
 92	return fd;
 93}
 94
 95
 96/*
 97 * Get the IRQ numbers from the HV and set up the handlers for them.
 98 */
 99static int __devinit tile_init_irqs(int controller_id,
100				 struct pci_controller *controller)
101{
102	char filename[32];
103	int fd;
104	int ret;
105	int x;
106	struct pcie_rc_config rc_config;
107
108	sprintf(filename, "pcie/%d/ctl", controller_id);
109	fd = hv_dev_open((HV_VirtAddr)filename, 0);
110	if (fd < 0) {
111		pr_err("PCI: hv_dev_open(%s) failed\n", filename);
112		return -1;
113	}
114	ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config),
115			   sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF);
116	hv_dev_close(fd);
117	if (ret != sizeof(rc_config)) {
118		pr_err("PCI: wanted %zd bytes, got %d\n",
119		       sizeof(rc_config), ret);
120		return -1;
121	}
122	/* Record irq_base so that we can map INTx to IRQ # later. */
123	controller->irq_base = rc_config.intr;
124
125	for (x = 0; x < 4; x++)
126		tile_irq_activate(rc_config.intr + x,
127				  TILE_IRQ_HW_CLEAR);
128
129	if (rc_config.plx_gen1)
130		controller->plx_gen1 = 1;
131
132	return 0;
133}
134
135/*
136 * First initialization entry point, called from setup_arch().
137 *
138 * Find valid controllers and fill in pci_controller structs for each
139 * of them.
140 *
141 * Returns the number of controllers discovered.
142 */
143int __devinit tile_pci_init(void)
144{
145	int i;
146
147	pr_info("PCI: Searching for controllers...\n");
148
149	/* Re-init number of PCIe controllers to support hot-plug feature. */
150	num_controllers = 0;
151
152	/* Do any configuration we need before using the PCIe */
153
154	for (i = 0; i < TILE_NUM_PCIE; i++) {
155		/*
156		 * To see whether we need a real config op based on
157		 * the results of pcibios_init(), to support PCIe hot-plug.
158		 */
159		if (pci_scan_flags[i] == 0) {
160			int hv_cfg_fd0 = -1;
161			int hv_cfg_fd1 = -1;
162			int hv_mem_fd = -1;
163			char name[32];
164			struct pci_controller *controller;
165
166			/*
167			 * Open the fd to the HV.  If it fails then this
168			 * device doesn't exist.
169			 */
170			hv_cfg_fd0 = tile_pcie_open(i, 0);
171			if (hv_cfg_fd0 < 0)
172				continue;
173			hv_cfg_fd1 = tile_pcie_open(i, 1);
174			if (hv_cfg_fd1 < 0) {
175				pr_err("PCI: Couldn't open config fd to HV "
176				    "for controller %d\n", i);
177				goto err_cont;
178			}
179
180			sprintf(name, "pcie/%d/mem", i);
181			hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0);
182			if (hv_mem_fd < 0) {
183				pr_err("PCI: Could not open mem fd to HV!\n");
184				goto err_cont;
185			}
186
187			pr_info("PCI: Found PCI controller #%d\n", i);
188
189			controller = &controllers[i];
190
191			controller->index = i;
192			controller->hv_cfg_fd[0] = hv_cfg_fd0;
193			controller->hv_cfg_fd[1] = hv_cfg_fd1;
194			controller->hv_mem_fd = hv_mem_fd;
195			controller->first_busno = 0;
196			controller->last_busno = 0xff;
197			controller->ops = &tile_cfg_ops;
198
199			num_controllers++;
200			continue;
201
202err_cont:
203			if (hv_cfg_fd0 >= 0)
204				hv_dev_close(hv_cfg_fd0);
205			if (hv_cfg_fd1 >= 0)
206				hv_dev_close(hv_cfg_fd1);
207			if (hv_mem_fd >= 0)
208				hv_dev_close(hv_mem_fd);
209			continue;
210		}
211	}
212
213	/*
214	 * Before using the PCIe, see if we need to do any platform-specific
215	 * configuration, such as the PLX switch Gen 1 issue on TILEmpower.
216	 */
217	for (i = 0; i < num_controllers; i++) {
218		struct pci_controller *controller = &controllers[i];
219
220		if (controller->plx_gen1)
221			tile_plx_gen1 = 1;
222	}
223
224	return num_controllers;
225}
226
227/*
228 * (pin - 1) converts from the PCI standard's [1:4] convention to
229 * a normal [0:3] range.
230 */
231static int tile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
232{
233	struct pci_controller *controller =
234		(struct pci_controller *)dev->sysdata;
235	return (pin - 1) + controller->irq_base;
236}
237
238
239static void __devinit fixup_read_and_payload_sizes(void)
240{
241	struct pci_dev *dev = NULL;
242	int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
243	int max_read_size = 0x2; /* Limit to 512 byte reads. */
244	u16 new_values;
245
246	/* Scan for the smallest maximum payload size. */
247	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
248		int pcie_caps_offset;
249		u32 devcap;
250		int max_payload;
251
252		pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
253		if (pcie_caps_offset == 0)
254			continue;
255
256		pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP,
257				      &devcap);
258		max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD;
259		if (max_payload < smallest_max_payload)
260			smallest_max_payload = max_payload;
261	}
262
263	/* Now, set the max_payload_size for all devices to that value. */
264	new_values = (max_read_size << 12) | (smallest_max_payload << 5);
265	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
266		int pcie_caps_offset;
267		u16 devctl;
268
269		pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
270		if (pcie_caps_offset == 0)
271			continue;
272
273		pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
274				     &devctl);
275		devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ);
276		devctl |= new_values;
277		pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
278				      devctl);
279	}
280}
281
282
283/*
284 * Second PCI initialization entry point, called by subsys_initcall.
285 *
286 * The controllers have been set up by the time we get here, by a call to
287 * tile_pci_init.
288 */
289int __devinit pcibios_init(void)
290{
291	int i;
292
293	pr_info("PCI: Probing PCI hardware\n");
294
295	/*
296	 * Delay a bit in case devices aren't ready.  Some devices are
297	 * known to require at least 20ms here, but we use a more
298	 * conservative value.
299	 */
300	mdelay(250);
301
302	/* Scan all of the recorded PCI controllers.  */
303	for (i = 0; i < TILE_NUM_PCIE; i++) {
304		/*
305		 * Do real pcibios init ops if the controller is initialized
306		 * by tile_pci_init() successfully and not initialized by
307		 * pcibios_init() yet to support PCIe hot-plug.
308		 */
309		if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
310			struct pci_controller *controller = &controllers[i];
311			struct pci_bus *bus;
312
313			if (tile_init_irqs(i, controller)) {
314				pr_err("PCI: Could not initialize IRQs\n");
315				continue;
316			}
317
318			pr_info("PCI: initializing controller #%d\n", i);
319
320			/*
321			 * This comes from the generic Linux PCI driver.
322			 *
323			 * It reads the PCI tree for this bus into the Linux
324			 * data structures.
325			 *
326			 * This is inlined in linux/pci.h and calls into
327			 * pci_scan_bus_parented() in probe.c.
328			 */
329			bus = pci_scan_bus(0, controller->ops, controller);
330			controller->root_bus = bus;
331			controller->last_busno = bus->subordinate;
332		}
333	}
334
335	/* Do machine dependent PCI interrupt routing */
336	pci_fixup_irqs(pci_common_swizzle, tile_map_irq);
337
338	/*
339	 * This comes from the generic Linux PCI driver.
340	 *
341	 * It allocates all of the resources (I/O memory, etc)
342	 * associated with the devices read in above.
343	 */
344	pci_assign_unassigned_resources();
345
346	/* Configure the max_read_size and max_payload_size values. */
347	fixup_read_and_payload_sizes();
348
349	/* Record the I/O resources in the PCI controller structure. */
350	for (i = 0; i < TILE_NUM_PCIE; i++) {
351		/*
352		 * Do real pcibios init ops if the controller is initialized
353		 * by tile_pci_init() successfully and not initialized by
354		 * pcibios_init() yet to support PCIe hot-plug.
355		 */
356		if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
357			struct pci_bus *root_bus = controllers[i].root_bus;
358			struct pci_bus *next_bus;
359			struct pci_dev *dev;
360
361			list_for_each_entry(dev, &root_bus->devices, bus_list) {
362				/*
363				 * Find the PCI host controller, ie. the 1st
364				 * bridge.
365				 */
366				if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
367					(PCI_SLOT(dev->devfn) == 0)) {
368					next_bus = dev->subordinate;
369					controllers[i].mem_resources[0] =
370						*next_bus->resource[0];
371					controllers[i].mem_resources[1] =
372						 *next_bus->resource[1];
373					controllers[i].mem_resources[2] =
374						 *next_bus->resource[2];
375
376					/* Setup flags. */
377					pci_scan_flags[i] = 1;
378
379					break;
380				}
381			}
382		}
383	}
384
385	return 0;
386}
387subsys_initcall(pcibios_init);
388
389/*
390 * No bus fixups needed.
391 */
392void __devinit pcibios_fixup_bus(struct pci_bus *bus)
393{
394	/* Nothing needs to be done. */
395}
396
397/*
398 * This can be called from the generic PCI layer, but doesn't need to
399 * do anything.
400 */
401char __devinit *pcibios_setup(char *str)
402{
403	/* Nothing needs to be done. */
404	return str;
405}
406
407/*
408 * This is called from the generic Linux layer.
409 */
410void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
411{
412	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
413}
414
415/*
416 * Enable memory and/or address decoding, as appropriate, for the
417 * device described by the 'dev' struct.
418 *
419 * This is called from the generic PCI layer, and can be called
420 * for bridges or endpoints.
421 */
422int pcibios_enable_device(struct pci_dev *dev, int mask)
423{
424	u16 cmd, old_cmd;
425	u8 header_type;
426	int i;
427	struct resource *r;
428
429	pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
430
431	pci_read_config_word(dev, PCI_COMMAND, &cmd);
432	old_cmd = cmd;
433	if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
434		/*
435		 * For bridges, we enable both memory and I/O decoding
436		 * in call cases.
437		 */
438		cmd |= PCI_COMMAND_IO;
439		cmd |= PCI_COMMAND_MEMORY;
440	} else {
441		/*
442		 * For endpoints, we enable memory and/or I/O decoding
443		 * only if they have a memory resource of that type.
444		 */
445		for (i = 0; i < 6; i++) {
446			r = &dev->resource[i];
447			if (r->flags & IORESOURCE_UNSET) {
448				pr_err("PCI: Device %s not available "
449				       "because of resource collisions\n",
450				       pci_name(dev));
451				return -EINVAL;
452			}
453			if (r->flags & IORESOURCE_IO)
454				cmd |= PCI_COMMAND_IO;
455			if (r->flags & IORESOURCE_MEM)
456				cmd |= PCI_COMMAND_MEMORY;
457		}
458	}
459
460	/*
461	 * We only write the command if it changed.
462	 */
463	if (cmd != old_cmd)
464		pci_write_config_word(dev, PCI_COMMAND, cmd);
465	return 0;
466}
467
468void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
469{
470	unsigned long start = pci_resource_start(dev, bar);
471	unsigned long len = pci_resource_len(dev, bar);
472	unsigned long flags = pci_resource_flags(dev, bar);
473
474	if (!len)
475		return NULL;
476	if (max && len > max)
477		len = max;
478
479	if (!(flags & IORESOURCE_MEM)) {
480		pr_info("PCI: Trying to map invalid resource %#lx\n", flags);
481		start = 0;
482	}
483
484	return (void __iomem *)start;
485}
486EXPORT_SYMBOL(pci_iomap);
487
488
489/****************************************************************
490 *
491 * Tile PCI config space read/write routines
492 *
493 ****************************************************************/
494
495/*
496 * These are the normal read and write ops
497 * These are expanded with macros from  pci_bus_read_config_byte() etc.
498 *
499 * devfn is the combined PCI slot & function.
500 *
501 * offset is in bytes, from the start of config space for the
502 * specified bus & slot.
503 */
504
505static int __devinit tile_cfg_read(struct pci_bus *bus,
506				   unsigned int devfn,
507				   int offset,
508				   int size,
509				   u32 *val)
510{
511	struct pci_controller *controller = bus->sysdata;
512	int busnum = bus->number & 0xff;
513	int slot = (devfn >> 3) & 0x1f;
514	int function = devfn & 0x7;
515	u32 addr;
516	int config_mode = 1;
517
518	/*
519	 * There is no bridge between the Tile and bus 0, so we
520	 * use config0 to talk to bus 0.
521	 *
522	 * If we're talking to a bus other than zero then we
523	 * must have found a bridge.
524	 */
525	if (busnum == 0) {
526		/*
527		 * We fake an empty slot for (busnum == 0) && (slot > 0),
528		 * since there is only one slot on bus 0.
529		 */
530		if (slot) {
531			*val = 0xFFFFFFFF;
532			return 0;
533		}
534		config_mode = 0;
535	}
536
537	addr = busnum << 20;		/* Bus in 27:20 */
538	addr |= slot << 15;		/* Slot (device) in 19:15 */
539	addr |= function << 12;		/* Function is in 14:12 */
540	addr |= (offset & 0xFFF);	/* byte address in 0:11 */
541
542	return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0,
543			    (HV_VirtAddr)(val), size, addr);
544}
545
546
547/*
548 * See tile_cfg_read() for relevant comments.
549 * Note that "val" is the value to write, not a pointer to that value.
550 */
551static int __devinit tile_cfg_write(struct pci_bus *bus,
552				    unsigned int devfn,
553				    int offset,
554				    int size,
555				    u32 val)
556{
557	struct pci_controller *controller = bus->sysdata;
558	int busnum = bus->number & 0xff;
559	int slot = (devfn >> 3) & 0x1f;
560	int function = devfn & 0x7;
561	u32 addr;
562	int config_mode = 1;
563	HV_VirtAddr valp = (HV_VirtAddr)&val;
564
565	/*
566	 * For bus 0 slot 0 we use config 0 accesses.
567	 */
568	if (busnum == 0) {
569		/*
570		 * We fake an empty slot for (busnum == 0) && (slot > 0),
571		 * since there is only one slot on bus 0.
572		 */
573		if (slot)
574			return 0;
575		config_mode = 0;
576	}
577
578	addr = busnum << 20;		/* Bus in 27:20 */
579	addr |= slot << 15;		/* Slot (device) in 19:15 */
580	addr |= function << 12;		/* Function is in 14:12 */
581	addr |= (offset & 0xFFF);	/* byte address in 0:11 */
582
583#ifdef __BIG_ENDIAN
584	/* Point to the correct part of the 32-bit "val". */
585	valp += 4 - size;
586#endif
587
588	return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0,
589			     valp, size, addr);
590}
591
592
593static struct pci_ops tile_cfg_ops = {
594	.read =         tile_cfg_read,
595	.write =        tile_cfg_write,
596};
597
598
599/*
600 * In the following, each PCI controller's mem_resources[1]
601 * represents its (non-prefetchable) PCI memory resource.
602 * mem_resources[0] and mem_resources[2] refer to its PCI I/O and
603 * prefetchable PCI memory resources, respectively.
604 * For more details, see pci_setup_bridge() in setup-bus.c.
605 * By comparing the target PCI memory address against the
606 * end address of controller 0, we can determine the controller
607 * that should accept the PCI memory access.
608 */
609#define TILE_READ(size, type)						\
610type _tile_read##size(unsigned long addr)				\
611{									\
612	type val;							\
613	int idx = 0;							\
614	if (addr > controllers[0].mem_resources[1].end &&		\
615	    addr > controllers[0].mem_resources[2].end)			\
616		idx = 1;                                                \
617	if (hv_dev_pread(controllers[idx].hv_mem_fd, 0,			\
618			 (HV_VirtAddr)(&val), sizeof(type), addr))	\
619		pr_err("PCI: read %zd bytes at 0x%lX failed\n",		\
620		       sizeof(type), addr);				\
621	return val;							\
622}									\
623EXPORT_SYMBOL(_tile_read##size)
624
625TILE_READ(b, u8);
626TILE_READ(w, u16);
627TILE_READ(l, u32);
628TILE_READ(q, u64);
629
630#define TILE_WRITE(size, type)						\
631void _tile_write##size(type val, unsigned long addr)			\
632{									\
633	int idx = 0;							\
634	if (addr > controllers[0].mem_resources[1].end &&		\
635	    addr > controllers[0].mem_resources[2].end)			\
636		idx = 1;                                                \
637	if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0,		\
638			  (HV_VirtAddr)(&val), sizeof(type), addr))	\
639		pr_err("PCI: write %zd bytes at 0x%lX failed\n",	\
640		       sizeof(type), addr);				\
641}									\
642EXPORT_SYMBOL(_tile_write##size)
643
644TILE_WRITE(b, u8);
645TILE_WRITE(w, u16);
646TILE_WRITE(l, u32);
647TILE_WRITE(q, u64);