Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 */
 14
 15#include <linux/kernel.h>
 16#include <linux/pci.h>
 17#include <linux/delay.h>
 18#include <linux/string.h>
 19#include <linux/init.h>
 20#include <linux/capability.h>
 21#include <linux/sched.h>
 22#include <linux/errno.h>
 23#include <linux/bootmem.h>
 24#include <linux/irq.h>
 25#include <linux/io.h>
 26#include <linux/uaccess.h>
 27#include <linux/export.h>
 28
 29#include <asm/processor.h>
 30#include <asm/sections.h>
 31#include <asm/byteorder.h>
 32#include <asm/hv_driver.h>
 33#include <hv/drv_pcie_rc_intf.h>
 34
 35
 36/*
 37 * Initialization flow and process
 38 * -------------------------------
 39 *
 40 * This files contains the routines to search for PCI buses,
 41 * enumerate the buses, and configure any attached devices.
 42 *
 43 * There are two entry points here:
 44 * 1) tile_pci_init
 45 *    This sets up the pci_controller structs, and opens the
 46 *    FDs to the hypervisor.  This is called from setup_arch() early
 47 *    in the boot process.
 48 * 2) pcibios_init
 49 *    This probes the PCI bus(es) for any attached hardware.  It's
 50 *    called by subsys_initcall.  All of the real work is done by the
 51 *    generic Linux PCI layer.
 52 *
 53 */
 54
 
 
 55/*
 56 * This flag tells if the platform is TILEmpower that needs
 57 * special configuration for the PLX switch chip.
 58 */
 59int __write_once tile_plx_gen1;
 60
 61static struct pci_controller controllers[TILE_NUM_PCIE];
 62static int num_controllers;
 63static int pci_scan_flags[TILE_NUM_PCIE];
 64
 65static struct pci_ops tile_cfg_ops;
 66
 67
 68/*
 69 * We don't need to worry about the alignment of resources.
 70 */
 71resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 72			    resource_size_t size, resource_size_t align)
 73{
 74	return res->start;
 75}
 76EXPORT_SYMBOL(pcibios_align_resource);
 77
 78/*
 79 * Open a FD to the hypervisor PCI device.
 80 *
 81 * controller_id is the controller number, config type is 0 or 1 for
 82 * config0 or config1 operations.
 83 */
 84static int __devinit tile_pcie_open(int controller_id, int config_type)
 85{
 86	char filename[32];
 87	int fd;
 88
 89	sprintf(filename, "pcie/%d/config%d", controller_id, config_type);
 90
 91	fd = hv_dev_open((HV_VirtAddr)filename, 0);
 92
 93	return fd;
 94}
 95
 96
 97/*
 98 * Get the IRQ numbers from the HV and set up the handlers for them.
 99 */
100static int __devinit tile_init_irqs(int controller_id,
101				 struct pci_controller *controller)
102{
103	char filename[32];
104	int fd;
105	int ret;
106	int x;
107	struct pcie_rc_config rc_config;
108
109	sprintf(filename, "pcie/%d/ctl", controller_id);
110	fd = hv_dev_open((HV_VirtAddr)filename, 0);
111	if (fd < 0) {
112		pr_err("PCI: hv_dev_open(%s) failed\n", filename);
113		return -1;
114	}
115	ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config),
116			   sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF);
117	hv_dev_close(fd);
118	if (ret != sizeof(rc_config)) {
119		pr_err("PCI: wanted %zd bytes, got %d\n",
120		       sizeof(rc_config), ret);
121		return -1;
122	}
123	/* Record irq_base so that we can map INTx to IRQ # later. */
124	controller->irq_base = rc_config.intr;
125
126	for (x = 0; x < 4; x++)
127		tile_irq_activate(rc_config.intr + x,
128				  TILE_IRQ_HW_CLEAR);
129
130	if (rc_config.plx_gen1)
131		controller->plx_gen1 = 1;
132
133	return 0;
134}
135
136/*
137 * First initialization entry point, called from setup_arch().
138 *
139 * Find valid controllers and fill in pci_controller structs for each
140 * of them.
141 *
142 * Returns the number of controllers discovered.
143 */
144int __init tile_pci_init(void)
145{
146	int i;
147
 
 
 
 
 
148	pr_info("PCI: Searching for controllers...\n");
149
150	/* Re-init number of PCIe controllers to support hot-plug feature. */
151	num_controllers = 0;
152
153	/* Do any configuration we need before using the PCIe */
154
155	for (i = 0; i < TILE_NUM_PCIE; i++) {
156		/*
157		 * To see whether we need a real config op based on
158		 * the results of pcibios_init(), to support PCIe hot-plug.
159		 */
160		if (pci_scan_flags[i] == 0) {
161			int hv_cfg_fd0 = -1;
162			int hv_cfg_fd1 = -1;
163			int hv_mem_fd = -1;
164			char name[32];
165			struct pci_controller *controller;
166
167			/*
168			 * Open the fd to the HV.  If it fails then this
169			 * device doesn't exist.
170			 */
171			hv_cfg_fd0 = tile_pcie_open(i, 0);
172			if (hv_cfg_fd0 < 0)
173				continue;
174			hv_cfg_fd1 = tile_pcie_open(i, 1);
175			if (hv_cfg_fd1 < 0) {
176				pr_err("PCI: Couldn't open config fd to HV "
177				    "for controller %d\n", i);
178				goto err_cont;
179			}
180
181			sprintf(name, "pcie/%d/mem", i);
182			hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0);
183			if (hv_mem_fd < 0) {
184				pr_err("PCI: Could not open mem fd to HV!\n");
185				goto err_cont;
186			}
187
188			pr_info("PCI: Found PCI controller #%d\n", i);
189
190			controller = &controllers[i];
191
192			controller->index = i;
193			controller->hv_cfg_fd[0] = hv_cfg_fd0;
194			controller->hv_cfg_fd[1] = hv_cfg_fd1;
195			controller->hv_mem_fd = hv_mem_fd;
196			controller->first_busno = 0;
197			controller->last_busno = 0xff;
198			controller->ops = &tile_cfg_ops;
199
200			num_controllers++;
201			continue;
202
203err_cont:
204			if (hv_cfg_fd0 >= 0)
205				hv_dev_close(hv_cfg_fd0);
206			if (hv_cfg_fd1 >= 0)
207				hv_dev_close(hv_cfg_fd1);
208			if (hv_mem_fd >= 0)
209				hv_dev_close(hv_mem_fd);
210			continue;
211		}
212	}
213
214	/*
215	 * Before using the PCIe, see if we need to do any platform-specific
216	 * configuration, such as the PLX switch Gen 1 issue on TILEmpower.
217	 */
218	for (i = 0; i < num_controllers; i++) {
219		struct pci_controller *controller = &controllers[i];
220
221		if (controller->plx_gen1)
222			tile_plx_gen1 = 1;
223	}
224
225	return num_controllers;
226}
227
228/*
229 * (pin - 1) converts from the PCI standard's [1:4] convention to
230 * a normal [0:3] range.
231 */
232static int tile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
233{
234	struct pci_controller *controller =
235		(struct pci_controller *)dev->sysdata;
236	return (pin - 1) + controller->irq_base;
237}
238
239
240static void __devinit fixup_read_and_payload_sizes(void)
241{
242	struct pci_dev *dev = NULL;
243	int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
244	int max_read_size = 0x2; /* Limit to 512 byte reads. */
245	u16 new_values;
246
247	/* Scan for the smallest maximum payload size. */
248	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
249		int pcie_caps_offset;
250		u32 devcap;
251		int max_payload;
252
253		pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
254		if (pcie_caps_offset == 0)
255			continue;
256
257		pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP,
258				      &devcap);
259		max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD;
260		if (max_payload < smallest_max_payload)
261			smallest_max_payload = max_payload;
262	}
263
264	/* Now, set the max_payload_size for all devices to that value. */
265	new_values = (max_read_size << 12) | (smallest_max_payload << 5);
266	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
267		int pcie_caps_offset;
268		u16 devctl;
269
270		pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
271		if (pcie_caps_offset == 0)
272			continue;
273
274		pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
275				     &devctl);
276		devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ);
277		devctl |= new_values;
278		pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
279				      devctl);
280	}
281}
282
283
284/*
285 * Second PCI initialization entry point, called by subsys_initcall.
286 *
287 * The controllers have been set up by the time we get here, by a call to
288 * tile_pci_init.
289 */
290int __init pcibios_init(void)
291{
292	int i;
293
294	pr_info("PCI: Probing PCI hardware\n");
295
296	/*
297	 * Delay a bit in case devices aren't ready.  Some devices are
298	 * known to require at least 20ms here, but we use a more
299	 * conservative value.
300	 */
301	mdelay(250);
302
303	/* Scan all of the recorded PCI controllers.  */
304	for (i = 0; i < TILE_NUM_PCIE; i++) {
305		/*
306		 * Do real pcibios init ops if the controller is initialized
307		 * by tile_pci_init() successfully and not initialized by
308		 * pcibios_init() yet to support PCIe hot-plug.
309		 */
310		if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
311			struct pci_controller *controller = &controllers[i];
312			struct pci_bus *bus;
 
313
314			if (tile_init_irqs(i, controller)) {
315				pr_err("PCI: Could not initialize IRQs\n");
316				continue;
317			}
318
319			pr_info("PCI: initializing controller #%d\n", i);
320
321			/*
322			 * This comes from the generic Linux PCI driver.
323			 *
324			 * It reads the PCI tree for this bus into the Linux
325			 * data structures.
326			 *
327			 * This is inlined in linux/pci.h and calls into
328			 * pci_scan_bus_parented() in probe.c.
329			 */
330			bus = pci_scan_bus(0, controller->ops, controller);
331			controller->root_bus = bus;
332			controller->last_busno = bus->subordinate;
333		}
334	}
335
336	/* Do machine dependent PCI interrupt routing */
337	pci_fixup_irqs(pci_common_swizzle, tile_map_irq);
338
339	/*
340	 * This comes from the generic Linux PCI driver.
341	 *
342	 * It allocates all of the resources (I/O memory, etc)
343	 * associated with the devices read in above.
344	 */
345	pci_assign_unassigned_resources();
346
347	/* Configure the max_read_size and max_payload_size values. */
348	fixup_read_and_payload_sizes();
349
350	/* Record the I/O resources in the PCI controller structure. */
351	for (i = 0; i < TILE_NUM_PCIE; i++) {
352		/*
353		 * Do real pcibios init ops if the controller is initialized
354		 * by tile_pci_init() successfully and not initialized by
355		 * pcibios_init() yet to support PCIe hot-plug.
356		 */
357		if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
358			struct pci_bus *root_bus = controllers[i].root_bus;
359			struct pci_bus *next_bus;
360			struct pci_dev *dev;
361
 
 
362			list_for_each_entry(dev, &root_bus->devices, bus_list) {
363				/*
364				 * Find the PCI host controller, ie. the 1st
365				 * bridge.
366				 */
367				if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
368					(PCI_SLOT(dev->devfn) == 0)) {
369					next_bus = dev->subordinate;
370					controllers[i].mem_resources[0] =
371						*next_bus->resource[0];
372					controllers[i].mem_resources[1] =
373						 *next_bus->resource[1];
374					controllers[i].mem_resources[2] =
375						 *next_bus->resource[2];
376
377					/* Setup flags. */
378					pci_scan_flags[i] = 1;
379
380					break;
381				}
382			}
383		}
384	}
385
386	return 0;
387}
388subsys_initcall(pcibios_init);
389
390/*
391 * No bus fixups needed.
392 */
393void __devinit pcibios_fixup_bus(struct pci_bus *bus)
394{
395	/* Nothing needs to be done. */
396}
397
398void pcibios_set_master(struct pci_dev *dev)
399{
400	/* No special bus mastering setup handling. */
401}
402
403/*
404 * This can be called from the generic PCI layer, but doesn't need to
405 * do anything.
406 */
407char __devinit *pcibios_setup(char *str)
408{
409	/* Nothing needs to be done. */
 
 
 
410	return str;
411}
412
413/*
414 * This is called from the generic Linux layer.
415 */
416void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
417{
418	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
419}
420
421/*
422 * Enable memory and/or address decoding, as appropriate, for the
423 * device described by the 'dev' struct.
424 *
425 * This is called from the generic PCI layer, and can be called
426 * for bridges or endpoints.
427 */
428int pcibios_enable_device(struct pci_dev *dev, int mask)
429{
430	u16 cmd, old_cmd;
431	u8 header_type;
432	int i;
433	struct resource *r;
434
435	pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
436
437	pci_read_config_word(dev, PCI_COMMAND, &cmd);
438	old_cmd = cmd;
439	if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
440		/*
441		 * For bridges, we enable both memory and I/O decoding
442		 * in call cases.
443		 */
444		cmd |= PCI_COMMAND_IO;
445		cmd |= PCI_COMMAND_MEMORY;
446	} else {
447		/*
448		 * For endpoints, we enable memory and/or I/O decoding
449		 * only if they have a memory resource of that type.
450		 */
451		for (i = 0; i < 6; i++) {
452			r = &dev->resource[i];
453			if (r->flags & IORESOURCE_UNSET) {
454				pr_err("PCI: Device %s not available "
455				       "because of resource collisions\n",
456				       pci_name(dev));
457				return -EINVAL;
458			}
459			if (r->flags & IORESOURCE_IO)
460				cmd |= PCI_COMMAND_IO;
461			if (r->flags & IORESOURCE_MEM)
462				cmd |= PCI_COMMAND_MEMORY;
463		}
464	}
465
466	/*
467	 * We only write the command if it changed.
468	 */
469	if (cmd != old_cmd)
470		pci_write_config_word(dev, PCI_COMMAND, cmd);
471	return 0;
472}
473
474/****************************************************************
475 *
476 * Tile PCI config space read/write routines
477 *
478 ****************************************************************/
479
480/*
481 * These are the normal read and write ops
482 * These are expanded with macros from  pci_bus_read_config_byte() etc.
483 *
484 * devfn is the combined PCI slot & function.
485 *
486 * offset is in bytes, from the start of config space for the
487 * specified bus & slot.
488 */
489
490static int __devinit tile_cfg_read(struct pci_bus *bus,
491				   unsigned int devfn,
492				   int offset,
493				   int size,
494				   u32 *val)
495{
496	struct pci_controller *controller = bus->sysdata;
497	int busnum = bus->number & 0xff;
498	int slot = (devfn >> 3) & 0x1f;
499	int function = devfn & 0x7;
500	u32 addr;
501	int config_mode = 1;
502
503	/*
504	 * There is no bridge between the Tile and bus 0, so we
505	 * use config0 to talk to bus 0.
506	 *
507	 * If we're talking to a bus other than zero then we
508	 * must have found a bridge.
509	 */
510	if (busnum == 0) {
511		/*
512		 * We fake an empty slot for (busnum == 0) && (slot > 0),
513		 * since there is only one slot on bus 0.
514		 */
515		if (slot) {
516			*val = 0xFFFFFFFF;
517			return 0;
518		}
519		config_mode = 0;
520	}
521
522	addr = busnum << 20;		/* Bus in 27:20 */
523	addr |= slot << 15;		/* Slot (device) in 19:15 */
524	addr |= function << 12;		/* Function is in 14:12 */
525	addr |= (offset & 0xFFF);	/* byte address in 0:11 */
526
527	return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0,
528			    (HV_VirtAddr)(val), size, addr);
529}
530
531
532/*
533 * See tile_cfg_read() for relevant comments.
534 * Note that "val" is the value to write, not a pointer to that value.
535 */
536static int __devinit tile_cfg_write(struct pci_bus *bus,
537				    unsigned int devfn,
538				    int offset,
539				    int size,
540				    u32 val)
541{
542	struct pci_controller *controller = bus->sysdata;
543	int busnum = bus->number & 0xff;
544	int slot = (devfn >> 3) & 0x1f;
545	int function = devfn & 0x7;
546	u32 addr;
547	int config_mode = 1;
548	HV_VirtAddr valp = (HV_VirtAddr)&val;
549
550	/*
551	 * For bus 0 slot 0 we use config 0 accesses.
552	 */
553	if (busnum == 0) {
554		/*
555		 * We fake an empty slot for (busnum == 0) && (slot > 0),
556		 * since there is only one slot on bus 0.
557		 */
558		if (slot)
559			return 0;
560		config_mode = 0;
561	}
562
563	addr = busnum << 20;		/* Bus in 27:20 */
564	addr |= slot << 15;		/* Slot (device) in 19:15 */
565	addr |= function << 12;		/* Function is in 14:12 */
566	addr |= (offset & 0xFFF);	/* byte address in 0:11 */
567
568#ifdef __BIG_ENDIAN
569	/* Point to the correct part of the 32-bit "val". */
570	valp += 4 - size;
571#endif
572
573	return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0,
574			     valp, size, addr);
575}
576
577
578static struct pci_ops tile_cfg_ops = {
579	.read =         tile_cfg_read,
580	.write =        tile_cfg_write,
581};
582
583
584/*
585 * In the following, each PCI controller's mem_resources[1]
586 * represents its (non-prefetchable) PCI memory resource.
587 * mem_resources[0] and mem_resources[2] refer to its PCI I/O and
588 * prefetchable PCI memory resources, respectively.
589 * For more details, see pci_setup_bridge() in setup-bus.c.
590 * By comparing the target PCI memory address against the
591 * end address of controller 0, we can determine the controller
592 * that should accept the PCI memory access.
593 */
594#define TILE_READ(size, type)						\
595type _tile_read##size(unsigned long addr)				\
596{									\
597	type val;							\
598	int idx = 0;							\
599	if (addr > controllers[0].mem_resources[1].end &&		\
600	    addr > controllers[0].mem_resources[2].end)			\
601		idx = 1;                                                \
602	if (hv_dev_pread(controllers[idx].hv_mem_fd, 0,			\
603			 (HV_VirtAddr)(&val), sizeof(type), addr))	\
604		pr_err("PCI: read %zd bytes at 0x%lX failed\n",		\
605		       sizeof(type), addr);				\
606	return val;							\
607}									\
608EXPORT_SYMBOL(_tile_read##size)
609
610TILE_READ(b, u8);
611TILE_READ(w, u16);
612TILE_READ(l, u32);
613TILE_READ(q, u64);
614
615#define TILE_WRITE(size, type)						\
616void _tile_write##size(type val, unsigned long addr)			\
617{									\
618	int idx = 0;							\
619	if (addr > controllers[0].mem_resources[1].end &&		\
620	    addr > controllers[0].mem_resources[2].end)			\
621		idx = 1;                                                \
622	if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0,		\
623			  (HV_VirtAddr)(&val), sizeof(type), addr))	\
624		pr_err("PCI: write %zd bytes at 0x%lX failed\n",	\
625		       sizeof(type), addr);				\
626}									\
627EXPORT_SYMBOL(_tile_write##size)
628
629TILE_WRITE(b, u8);
630TILE_WRITE(w, u16);
631TILE_WRITE(l, u32);
632TILE_WRITE(q, u64);
v4.6
  1/*
  2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 */
 14
 15#include <linux/kernel.h>
 16#include <linux/pci.h>
 17#include <linux/delay.h>
 18#include <linux/string.h>
 19#include <linux/init.h>
 20#include <linux/capability.h>
 21#include <linux/sched.h>
 22#include <linux/errno.h>
 
 23#include <linux/irq.h>
 24#include <linux/io.h>
 25#include <linux/uaccess.h>
 26#include <linux/export.h>
 27
 28#include <asm/processor.h>
 29#include <asm/sections.h>
 30#include <asm/byteorder.h>
 31#include <asm/hv_driver.h>
 32#include <hv/drv_pcie_rc_intf.h>
 33
 34
 35/*
 36 * Initialization flow and process
 37 * -------------------------------
 38 *
 39 * This files contains the routines to search for PCI buses,
 40 * enumerate the buses, and configure any attached devices.
 41 *
 42 * There are two entry points here:
 43 * 1) tile_pci_init
 44 *    This sets up the pci_controller structs, and opens the
 45 *    FDs to the hypervisor.  This is called from setup_arch() early
 46 *    in the boot process.
 47 * 2) pcibios_init
 48 *    This probes the PCI bus(es) for any attached hardware.  It's
 49 *    called by subsys_initcall.  All of the real work is done by the
 50 *    generic Linux PCI layer.
 51 *
 52 */
 53
 54static int pci_probe = 1;
 55
 56/*
 57 * This flag tells if the platform is TILEmpower that needs
 58 * special configuration for the PLX switch chip.
 59 */
 60int __write_once tile_plx_gen1;
 61
 62static struct pci_controller controllers[TILE_NUM_PCIE];
 63static int num_controllers;
 64static int pci_scan_flags[TILE_NUM_PCIE];
 65
 66static struct pci_ops tile_cfg_ops;
 67
 68
 69/*
 70 * We don't need to worry about the alignment of resources.
 71 */
 72resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 73			    resource_size_t size, resource_size_t align)
 74{
 75	return res->start;
 76}
 77EXPORT_SYMBOL(pcibios_align_resource);
 78
 79/*
 80 * Open a FD to the hypervisor PCI device.
 81 *
 82 * controller_id is the controller number, config type is 0 or 1 for
 83 * config0 or config1 operations.
 84 */
 85static int tile_pcie_open(int controller_id, int config_type)
 86{
 87	char filename[32];
 88	int fd;
 89
 90	sprintf(filename, "pcie/%d/config%d", controller_id, config_type);
 91
 92	fd = hv_dev_open((HV_VirtAddr)filename, 0);
 93
 94	return fd;
 95}
 96
 97
 98/*
 99 * Get the IRQ numbers from the HV and set up the handlers for them.
100 */
101static int tile_init_irqs(int controller_id, struct pci_controller *controller)
 
102{
103	char filename[32];
104	int fd;
105	int ret;
106	int x;
107	struct pcie_rc_config rc_config;
108
109	sprintf(filename, "pcie/%d/ctl", controller_id);
110	fd = hv_dev_open((HV_VirtAddr)filename, 0);
111	if (fd < 0) {
112		pr_err("PCI: hv_dev_open(%s) failed\n", filename);
113		return -1;
114	}
115	ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config),
116			   sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF);
117	hv_dev_close(fd);
118	if (ret != sizeof(rc_config)) {
119		pr_err("PCI: wanted %zd bytes, got %d\n",
120		       sizeof(rc_config), ret);
121		return -1;
122	}
123	/* Record irq_base so that we can map INTx to IRQ # later. */
124	controller->irq_base = rc_config.intr;
125
126	for (x = 0; x < 4; x++)
127		tile_irq_activate(rc_config.intr + x,
128				  TILE_IRQ_HW_CLEAR);
129
130	if (rc_config.plx_gen1)
131		controller->plx_gen1 = 1;
132
133	return 0;
134}
135
136/*
137 * First initialization entry point, called from setup_arch().
138 *
139 * Find valid controllers and fill in pci_controller structs for each
140 * of them.
141 *
142 * Returns the number of controllers discovered.
143 */
144int __init tile_pci_init(void)
145{
146	int i;
147
148	if (!pci_probe) {
149		pr_info("PCI: disabled by boot argument\n");
150		return 0;
151	}
152
153	pr_info("PCI: Searching for controllers...\n");
154
155	/* Re-init number of PCIe controllers to support hot-plug feature. */
156	num_controllers = 0;
157
158	/* Do any configuration we need before using the PCIe */
159
160	for (i = 0; i < TILE_NUM_PCIE; i++) {
161		/*
162		 * To see whether we need a real config op based on
163		 * the results of pcibios_init(), to support PCIe hot-plug.
164		 */
165		if (pci_scan_flags[i] == 0) {
166			int hv_cfg_fd0 = -1;
167			int hv_cfg_fd1 = -1;
168			int hv_mem_fd = -1;
169			char name[32];
170			struct pci_controller *controller;
171
172			/*
173			 * Open the fd to the HV.  If it fails then this
174			 * device doesn't exist.
175			 */
176			hv_cfg_fd0 = tile_pcie_open(i, 0);
177			if (hv_cfg_fd0 < 0)
178				continue;
179			hv_cfg_fd1 = tile_pcie_open(i, 1);
180			if (hv_cfg_fd1 < 0) {
181				pr_err("PCI: Couldn't open config fd to HV for controller %d\n",
182				       i);
183				goto err_cont;
184			}
185
186			sprintf(name, "pcie/%d/mem", i);
187			hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0);
188			if (hv_mem_fd < 0) {
189				pr_err("PCI: Could not open mem fd to HV!\n");
190				goto err_cont;
191			}
192
193			pr_info("PCI: Found PCI controller #%d\n", i);
194
195			controller = &controllers[i];
196
197			controller->index = i;
198			controller->hv_cfg_fd[0] = hv_cfg_fd0;
199			controller->hv_cfg_fd[1] = hv_cfg_fd1;
200			controller->hv_mem_fd = hv_mem_fd;
 
201			controller->last_busno = 0xff;
202			controller->ops = &tile_cfg_ops;
203
204			num_controllers++;
205			continue;
206
207err_cont:
208			if (hv_cfg_fd0 >= 0)
209				hv_dev_close(hv_cfg_fd0);
210			if (hv_cfg_fd1 >= 0)
211				hv_dev_close(hv_cfg_fd1);
212			if (hv_mem_fd >= 0)
213				hv_dev_close(hv_mem_fd);
214			continue;
215		}
216	}
217
218	/*
219	 * Before using the PCIe, see if we need to do any platform-specific
220	 * configuration, such as the PLX switch Gen 1 issue on TILEmpower.
221	 */
222	for (i = 0; i < num_controllers; i++) {
223		struct pci_controller *controller = &controllers[i];
224
225		if (controller->plx_gen1)
226			tile_plx_gen1 = 1;
227	}
228
229	return num_controllers;
230}
231
232/*
233 * (pin - 1) converts from the PCI standard's [1:4] convention to
234 * a normal [0:3] range.
235 */
236static int tile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
237{
238	struct pci_controller *controller =
239		(struct pci_controller *)dev->sysdata;
240	return (pin - 1) + controller->irq_base;
241}
242
243
244static void fixup_read_and_payload_sizes(void)
245{
246	struct pci_dev *dev = NULL;
247	int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
248	int max_read_size = PCI_EXP_DEVCTL_READRQ_512B;
249	u16 new_values;
250
251	/* Scan for the smallest maximum payload size. */
252	for_each_pci_dev(dev) {
253		if (!pci_is_pcie(dev))
 
 
 
 
 
254			continue;
255
256		if (dev->pcie_mpss < smallest_max_payload)
257			smallest_max_payload = dev->pcie_mpss;
 
 
 
258	}
259
260	/* Now, set the max_payload_size for all devices to that value. */
261	new_values = max_read_size | (smallest_max_payload << 5);
262	for_each_pci_dev(dev)
263		pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
264				PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ,
265				new_values);
 
 
 
 
 
 
 
 
 
 
 
266}
267
268
269/*
270 * Second PCI initialization entry point, called by subsys_initcall.
271 *
272 * The controllers have been set up by the time we get here, by a call to
273 * tile_pci_init.
274 */
275int __init pcibios_init(void)
276{
277	int i;
278
279	pr_info("PCI: Probing PCI hardware\n");
280
281	/*
282	 * Delay a bit in case devices aren't ready.  Some devices are
283	 * known to require at least 20ms here, but we use a more
284	 * conservative value.
285	 */
286	msleep(250);
287
288	/* Scan all of the recorded PCI controllers.  */
289	for (i = 0; i < TILE_NUM_PCIE; i++) {
290		/*
291		 * Do real pcibios init ops if the controller is initialized
292		 * by tile_pci_init() successfully and not initialized by
293		 * pcibios_init() yet to support PCIe hot-plug.
294		 */
295		if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
296			struct pci_controller *controller = &controllers[i];
297			struct pci_bus *bus;
298			LIST_HEAD(resources);
299
300			if (tile_init_irqs(i, controller)) {
301				pr_err("PCI: Could not initialize IRQs\n");
302				continue;
303			}
304
305			pr_info("PCI: initializing controller #%d\n", i);
306
307			pci_add_resource(&resources, &ioport_resource);
308			pci_add_resource(&resources, &iomem_resource);
309			bus = pci_scan_root_bus(NULL, 0, controller->ops,
310						controller, &resources);
 
 
 
 
 
 
311			controller->root_bus = bus;
312			controller->last_busno = bus->busn_res.end;
313		}
314	}
315
316	/* Do machine dependent PCI interrupt routing */
317	pci_fixup_irqs(pci_common_swizzle, tile_map_irq);
318
319	/*
320	 * This comes from the generic Linux PCI driver.
321	 *
322	 * It allocates all of the resources (I/O memory, etc)
323	 * associated with the devices read in above.
324	 */
325	pci_assign_unassigned_resources();
326
327	/* Configure the max_read_size and max_payload_size values. */
328	fixup_read_and_payload_sizes();
329
330	/* Record the I/O resources in the PCI controller structure. */
331	for (i = 0; i < TILE_NUM_PCIE; i++) {
332		/*
333		 * Do real pcibios init ops if the controller is initialized
334		 * by tile_pci_init() successfully and not initialized by
335		 * pcibios_init() yet to support PCIe hot-plug.
336		 */
337		if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
338			struct pci_bus *root_bus = controllers[i].root_bus;
339			struct pci_bus *next_bus;
340			struct pci_dev *dev;
341
342			pci_bus_add_devices(root_bus);
343
344			list_for_each_entry(dev, &root_bus->devices, bus_list) {
345				/*
346				 * Find the PCI host controller, ie. the 1st
347				 * bridge.
348				 */
349				if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
350					(PCI_SLOT(dev->devfn) == 0)) {
351					next_bus = dev->subordinate;
352					controllers[i].mem_resources[0] =
353						*next_bus->resource[0];
354					controllers[i].mem_resources[1] =
355						 *next_bus->resource[1];
356					controllers[i].mem_resources[2] =
357						 *next_bus->resource[2];
358
359					/* Setup flags. */
360					pci_scan_flags[i] = 1;
361
362					break;
363				}
364			}
365		}
366	}
367
368	return 0;
369}
370subsys_initcall(pcibios_init);
371
372/*
373 * No bus fixups needed.
374 */
375void pcibios_fixup_bus(struct pci_bus *bus)
376{
377	/* Nothing needs to be done. */
378}
379
380void pcibios_set_master(struct pci_dev *dev)
381{
382	/* No special bus mastering setup handling. */
383}
384
385/* Process any "pci=" kernel boot arguments. */
386char *__init pcibios_setup(char *str)
 
 
 
387{
388	if (!strcmp(str, "off")) {
389		pci_probe = 0;
390		return NULL;
391	}
392	return str;
393}
394
395/*
 
 
 
 
 
 
 
 
396 * Enable memory and/or address decoding, as appropriate, for the
397 * device described by the 'dev' struct.
398 *
399 * This is called from the generic PCI layer, and can be called
400 * for bridges or endpoints.
401 */
402int pcibios_enable_device(struct pci_dev *dev, int mask)
403{
404	u16 cmd, old_cmd;
405	u8 header_type;
406	int i;
407	struct resource *r;
408
409	pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
410
411	pci_read_config_word(dev, PCI_COMMAND, &cmd);
412	old_cmd = cmd;
413	if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
414		/*
415		 * For bridges, we enable both memory and I/O decoding
416		 * in call cases.
417		 */
418		cmd |= PCI_COMMAND_IO;
419		cmd |= PCI_COMMAND_MEMORY;
420	} else {
421		/*
422		 * For endpoints, we enable memory and/or I/O decoding
423		 * only if they have a memory resource of that type.
424		 */
425		for (i = 0; i < 6; i++) {
426			r = &dev->resource[i];
427			if (r->flags & IORESOURCE_UNSET) {
428				pr_err("PCI: Device %s not available because of resource collisions\n",
 
429				       pci_name(dev));
430				return -EINVAL;
431			}
432			if (r->flags & IORESOURCE_IO)
433				cmd |= PCI_COMMAND_IO;
434			if (r->flags & IORESOURCE_MEM)
435				cmd |= PCI_COMMAND_MEMORY;
436		}
437	}
438
439	/*
440	 * We only write the command if it changed.
441	 */
442	if (cmd != old_cmd)
443		pci_write_config_word(dev, PCI_COMMAND, cmd);
444	return 0;
445}
446
447/****************************************************************
448 *
449 * Tile PCI config space read/write routines
450 *
451 ****************************************************************/
452
453/*
454 * These are the normal read and write ops
455 * These are expanded with macros from  pci_bus_read_config_byte() etc.
456 *
457 * devfn is the combined PCI slot & function.
458 *
459 * offset is in bytes, from the start of config space for the
460 * specified bus & slot.
461 */
462
463static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
464			 int size, u32 *val)
 
 
 
465{
466	struct pci_controller *controller = bus->sysdata;
467	int busnum = bus->number & 0xff;
468	int slot = (devfn >> 3) & 0x1f;
469	int function = devfn & 0x7;
470	u32 addr;
471	int config_mode = 1;
472
473	/*
474	 * There is no bridge between the Tile and bus 0, so we
475	 * use config0 to talk to bus 0.
476	 *
477	 * If we're talking to a bus other than zero then we
478	 * must have found a bridge.
479	 */
480	if (busnum == 0) {
481		/*
482		 * We fake an empty slot for (busnum == 0) && (slot > 0),
483		 * since there is only one slot on bus 0.
484		 */
485		if (slot) {
486			*val = 0xFFFFFFFF;
487			return 0;
488		}
489		config_mode = 0;
490	}
491
492	addr = busnum << 20;		/* Bus in 27:20 */
493	addr |= slot << 15;		/* Slot (device) in 19:15 */
494	addr |= function << 12;		/* Function is in 14:12 */
495	addr |= (offset & 0xFFF);	/* byte address in 0:11 */
496
497	return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0,
498			    (HV_VirtAddr)(val), size, addr);
499}
500
501
502/*
503 * See tile_cfg_read() for relevant comments.
504 * Note that "val" is the value to write, not a pointer to that value.
505 */
506static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
507			  int size, u32 val)
 
 
 
508{
509	struct pci_controller *controller = bus->sysdata;
510	int busnum = bus->number & 0xff;
511	int slot = (devfn >> 3) & 0x1f;
512	int function = devfn & 0x7;
513	u32 addr;
514	int config_mode = 1;
515	HV_VirtAddr valp = (HV_VirtAddr)&val;
516
517	/*
518	 * For bus 0 slot 0 we use config 0 accesses.
519	 */
520	if (busnum == 0) {
521		/*
522		 * We fake an empty slot for (busnum == 0) && (slot > 0),
523		 * since there is only one slot on bus 0.
524		 */
525		if (slot)
526			return 0;
527		config_mode = 0;
528	}
529
530	addr = busnum << 20;		/* Bus in 27:20 */
531	addr |= slot << 15;		/* Slot (device) in 19:15 */
532	addr |= function << 12;		/* Function is in 14:12 */
533	addr |= (offset & 0xFFF);	/* byte address in 0:11 */
534
535#ifdef __BIG_ENDIAN
536	/* Point to the correct part of the 32-bit "val". */
537	valp += 4 - size;
538#endif
539
540	return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0,
541			     valp, size, addr);
542}
543
544
545static struct pci_ops tile_cfg_ops = {
546	.read =         tile_cfg_read,
547	.write =        tile_cfg_write,
548};
549
550
551/*
552 * In the following, each PCI controller's mem_resources[1]
553 * represents its (non-prefetchable) PCI memory resource.
554 * mem_resources[0] and mem_resources[2] refer to its PCI I/O and
555 * prefetchable PCI memory resources, respectively.
556 * For more details, see pci_setup_bridge() in setup-bus.c.
557 * By comparing the target PCI memory address against the
558 * end address of controller 0, we can determine the controller
559 * that should accept the PCI memory access.
560 */
561#define TILE_READ(size, type)						\
562type _tile_read##size(unsigned long addr)				\
563{									\
564	type val;							\
565	int idx = 0;							\
566	if (addr > controllers[0].mem_resources[1].end &&		\
567	    addr > controllers[0].mem_resources[2].end)			\
568		idx = 1;                                                \
569	if (hv_dev_pread(controllers[idx].hv_mem_fd, 0,			\
570			 (HV_VirtAddr)(&val), sizeof(type), addr))	\
571		pr_err("PCI: read %zd bytes at 0x%lX failed\n",		\
572		       sizeof(type), addr);				\
573	return val;							\
574}									\
575EXPORT_SYMBOL(_tile_read##size)
576
577TILE_READ(b, u8);
578TILE_READ(w, u16);
579TILE_READ(l, u32);
580TILE_READ(q, u64);
581
582#define TILE_WRITE(size, type)						\
583void _tile_write##size(type val, unsigned long addr)			\
584{									\
585	int idx = 0;							\
586	if (addr > controllers[0].mem_resources[1].end &&		\
587	    addr > controllers[0].mem_resources[2].end)			\
588		idx = 1;                                                \
589	if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0,		\
590			  (HV_VirtAddr)(&val), sizeof(type), addr))	\
591		pr_err("PCI: write %zd bytes at 0x%lX failed\n",	\
592		       sizeof(type), addr);				\
593}									\
594EXPORT_SYMBOL(_tile_write##size)
595
596TILE_WRITE(b, u8);
597TILE_WRITE(w, u16);
598TILE_WRITE(l, u32);
599TILE_WRITE(q, u64);