Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2005-2009 Cavium Networks
  7 */
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/pci.h>
 11#include <linux/interrupt.h>
 12#include <linux/time.h>
 13#include <linux/delay.h>
 14#include <linux/platform_device.h>
 15#include <linux/swiotlb.h>
 16
 17#include <asm/time.h>
 18
 19#include <asm/octeon/octeon.h>
 20#include <asm/octeon/cvmx-npi-defs.h>
 21#include <asm/octeon/cvmx-pci-defs.h>
 22#include <asm/octeon/pci-octeon.h>
 23
 
 
 24#define USE_OCTEON_INTERNAL_ARBITER
 25
 26/*
 27 * Octeon's PCI controller uses did=3, subdid=2 for PCI IO
 28 * addresses. Use PCI endian swapping 1 so no address swapping is
 29 * necessary. The Linux io routines will endian swap the data.
 30 */
 31#define OCTEON_PCI_IOSPACE_BASE	    0x80011a0400000000ull
 32#define OCTEON_PCI_IOSPACE_SIZE	    (1ull<<32)
 33
 34/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
 35#define OCTEON_PCI_MEMSPACE_OFFSET  (0x00011b0000000000ull)
 36
 37u64 octeon_bar1_pci_phys;
 38
 39/**
 40 * This is the bit decoding used for the Octeon PCI controller addresses
 41 */
 42union octeon_pci_address {
 43	uint64_t u64;
 44	struct {
 45		uint64_t upper:2;
 46		uint64_t reserved:13;
 47		uint64_t io:1;
 48		uint64_t did:5;
 49		uint64_t subdid:3;
 50		uint64_t reserved2:4;
 51		uint64_t endian_swap:2;
 52		uint64_t reserved3:10;
 53		uint64_t bus:8;
 54		uint64_t dev:5;
 55		uint64_t func:3;
 56		uint64_t reg:8;
 57	} s;
 58};
 59
 60int (*octeon_pcibios_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
 
 61enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
 62
 63/**
 64 * Map a PCI device to the appropriate interrupt line
 65 *
 66 * @dev:    The Linux PCI device structure for the device to map
 67 * @slot:   The slot number for this device on __BUS 0__. Linux
 68 *		 enumerates through all the bridges and figures out the
 69 *		 slot on Bus 0 where this device eventually hooks to.
 70 * @pin:    The PCI interrupt pin read from the device, then swizzled
 71 *		 as it goes through each bridge.
 72 * Returns Interrupt number for the device
 73 */
 74int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 75{
 76	if (octeon_pcibios_map_irq)
 77		return octeon_pcibios_map_irq(dev, slot, pin);
 78	else
 79		panic("octeon_pcibios_map_irq not set.");
 80}
 81
 82
 83/*
 84 * Called to perform platform specific PCI setup
 85 */
 86int pcibios_plat_dev_init(struct pci_dev *dev)
 87{
 88	uint16_t config;
 89	uint32_t dconfig;
 90	int pos;
 91	/*
 92	 * Force the Cache line setting to 64 bytes. The standard
 93	 * Linux bus scan doesn't seem to set it. Octeon really has
 94	 * 128 byte lines, but Intel bridges get really upset if you
 95	 * try and set values above 64 bytes. Value is specified in
 96	 * 32bit words.
 97	 */
 98	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4);
 99	/* Set latency timers for all devices */
100	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
101
102	/* Enable reporting System errors and parity errors on all devices */
103	/* Enable parity checking and error reporting */
104	pci_read_config_word(dev, PCI_COMMAND, &config);
105	config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
106	pci_write_config_word(dev, PCI_COMMAND, config);
107
108	if (dev->subordinate) {
109		/* Set latency timers on sub bridges */
110		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 64);
111		/* More bridge error detection */
112		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config);
113		config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
114		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config);
115	}
116
117	/* Enable the PCIe normal error reporting */
118	config = PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */
119	config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */
120	config |= PCI_EXP_DEVCTL_FERE;	/* Fatal Error Reporting */
121	config |= PCI_EXP_DEVCTL_URRE;	/* Unsupported Request */
122	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, config);
123
124	/* Find the Advanced Error Reporting capability */
125	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
126	if (pos) {
127		/* Clear Uncorrectable Error Status */
128		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
129				      &dconfig);
130		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
131				       dconfig);
132		/* Enable reporting of all uncorrectable errors */
133		/* Uncorrectable Error Mask - turned on bits disable errors */
134		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0);
135		/*
136		 * Leave severity at HW default. This only controls if
137		 * errors are reported as uncorrectable or
138		 * correctable, not if the error is reported.
139		 */
140		/* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
141		/* Clear Correctable Error Status */
142		pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig);
143		pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig);
144		/* Enable reporting of all correctable errors */
145		/* Correctable Error Mask - turned on bits disable errors */
146		pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0);
147		/* Advanced Error Capabilities */
148		pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig);
149		/* ECRC Generation Enable */
150		if (config & PCI_ERR_CAP_ECRC_GENC)
151			config |= PCI_ERR_CAP_ECRC_GENE;
152		/* ECRC Check Enable */
153		if (config & PCI_ERR_CAP_ECRC_CHKC)
154			config |= PCI_ERR_CAP_ECRC_CHKE;
155		pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig);
156		/* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
157		/* Report all errors to the root complex */
158		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND,
159				       PCI_ERR_ROOT_CMD_COR_EN |
160				       PCI_ERR_ROOT_CMD_NONFATAL_EN |
161				       PCI_ERR_ROOT_CMD_FATAL_EN);
162		/* Clear the Root status register */
163		pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig);
164		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
165	}
166
 
 
167	return 0;
168}
169
170/**
171 * Return the mapping of PCI device number to IRQ line. Each
172 * character in the return string represents the interrupt
173 * line for the device at that position. Device 1 maps to the
174 * first character, etc. The characters A-D are used for PCI
175 * interrupts.
176 *
177 * Returns PCI interrupt mapping
178 */
179const char *octeon_get_pci_interrupts(void)
180{
181	/*
182	 * Returning an empty string causes the interrupts to be
183	 * routed based on the PCI specification. From the PCI spec:
184	 *
185	 * INTA# of Device Number 0 is connected to IRQW on the system
186	 * board.  (Device Number has no significance regarding being
187	 * located on the system board or in a connector.) INTA# of
188	 * Device Number 1 is connected to IRQX on the system
189	 * board. INTA# of Device Number 2 is connected to IRQY on the
190	 * system board. INTA# of Device Number 3 is connected to IRQZ
191	 * on the system board. The table below describes how each
192	 * agent's INTx# lines are connected to the system board
193	 * interrupt lines. The following equation can be used to
194	 * determine to which INTx# signal on the system board a given
195	 * device's INTx# line(s) is connected.
196	 *
197	 * MB = (D + I) MOD 4 MB = System board Interrupt (IRQW = 0,
198	 * IRQX = 1, IRQY = 2, and IRQZ = 3) D = Device Number I =
199	 * Interrupt Number (INTA# = 0, INTB# = 1, INTC# = 2, and
200	 * INTD# = 3)
201	 */
202	if (of_machine_is_compatible("dlink,dsr-500n"))
203		return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
204	switch (octeon_bootinfo->board_type) {
205	case CVMX_BOARD_TYPE_NAO38:
206		/* This is really the NAC38 */
207		return "AAAAADABAAAAAAAAAAAAAAAAAAAAAAAA";
208	case CVMX_BOARD_TYPE_EBH3100:
209	case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
210	case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
211		return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
212	case CVMX_BOARD_TYPE_BBGW_REF:
213		return "AABCD";
214	case CVMX_BOARD_TYPE_CUST_DSR1000N:
215		return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
216	case CVMX_BOARD_TYPE_THUNDER:
217	case CVMX_BOARD_TYPE_EBH3000:
218	default:
219		return "";
220	}
221}
222
223/**
224 * Map a PCI device to the appropriate interrupt line
225 *
226 * @dev:    The Linux PCI device structure for the device to map
227 * @slot:   The slot number for this device on __BUS 0__. Linux
228 *		 enumerates through all the bridges and figures out the
229 *		 slot on Bus 0 where this device eventually hooks to.
230 * @pin:    The PCI interrupt pin read from the device, then swizzled
231 *		 as it goes through each bridge.
232 * Returns Interrupt number for the device
233 */
234int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev,
235				      u8 slot, u8 pin)
236{
237	int irq_num;
238	const char *interrupts;
239	int dev_num;
240
241	/* Get the board specific interrupt mapping */
242	interrupts = octeon_get_pci_interrupts();
243
244	dev_num = dev->devfn >> 3;
245	if (dev_num < strlen(interrupts))
246		irq_num = ((interrupts[dev_num] - 'A' + pin - 1) & 3) +
247			OCTEON_IRQ_PCI_INT0;
248	else
249		irq_num = ((slot + pin - 3) & 3) + OCTEON_IRQ_PCI_INT0;
250	return irq_num;
251}
252
253
254/*
255 * Read a value from configuration space
256 */
257static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
258			      int reg, int size, u32 *val)
259{
260	union octeon_pci_address pci_addr;
261
262	pci_addr.u64 = 0;
263	pci_addr.s.upper = 2;
264	pci_addr.s.io = 1;
265	pci_addr.s.did = 3;
266	pci_addr.s.subdid = 1;
267	pci_addr.s.endian_swap = 1;
268	pci_addr.s.bus = bus->number;
269	pci_addr.s.dev = devfn >> 3;
270	pci_addr.s.func = devfn & 0x7;
271	pci_addr.s.reg = reg;
272
 
 
 
273	switch (size) {
274	case 4:
275		*val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
276		return PCIBIOS_SUCCESSFUL;
277	case 2:
278		*val = le16_to_cpu(cvmx_read64_uint16(pci_addr.u64));
279		return PCIBIOS_SUCCESSFUL;
280	case 1:
281		*val = cvmx_read64_uint8(pci_addr.u64);
282		return PCIBIOS_SUCCESSFUL;
283	}
284	return PCIBIOS_FUNC_NOT_SUPPORTED;
285}
286
287
288/*
289 * Write a value to PCI configuration space
290 */
291static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
292			       int reg, int size, u32 val)
293{
294	union octeon_pci_address pci_addr;
295
296	pci_addr.u64 = 0;
297	pci_addr.s.upper = 2;
298	pci_addr.s.io = 1;
299	pci_addr.s.did = 3;
300	pci_addr.s.subdid = 1;
301	pci_addr.s.endian_swap = 1;
302	pci_addr.s.bus = bus->number;
303	pci_addr.s.dev = devfn >> 3;
304	pci_addr.s.func = devfn & 0x7;
305	pci_addr.s.reg = reg;
306
 
 
 
307	switch (size) {
308	case 4:
309		cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
310		return PCIBIOS_SUCCESSFUL;
311	case 2:
312		cvmx_write64_uint16(pci_addr.u64, cpu_to_le16(val));
313		return PCIBIOS_SUCCESSFUL;
314	case 1:
315		cvmx_write64_uint8(pci_addr.u64, val);
316		return PCIBIOS_SUCCESSFUL;
317	}
318	return PCIBIOS_FUNC_NOT_SUPPORTED;
319}
320
321
322static struct pci_ops octeon_pci_ops = {
323	.read	= octeon_read_config,
324	.write	= octeon_write_config,
325};
326
327static struct resource octeon_pci_mem_resource = {
328	.start = 0,
329	.end = 0,
330	.name = "Octeon PCI MEM",
331	.flags = IORESOURCE_MEM,
332};
333
334/*
335 * PCI ports must be above 16KB so the ISA bus filtering in the PCI-X to PCI
336 * bridge
337 */
338static struct resource octeon_pci_io_resource = {
339	.start = 0x4000,
340	.end = OCTEON_PCI_IOSPACE_SIZE - 1,
341	.name = "Octeon PCI IO",
342	.flags = IORESOURCE_IO,
343};
344
345static struct pci_controller octeon_pci_controller = {
346	.pci_ops = &octeon_pci_ops,
347	.mem_resource = &octeon_pci_mem_resource,
348	.mem_offset = OCTEON_PCI_MEMSPACE_OFFSET,
349	.io_resource = &octeon_pci_io_resource,
350	.io_offset = 0,
351	.io_map_base = OCTEON_PCI_IOSPACE_BASE,
352};
353
354
355/*
356 * Low level initialize the Octeon PCI controller
357 */
358static void octeon_pci_initialize(void)
359{
360	union cvmx_pci_cfg01 cfg01;
361	union cvmx_npi_ctl_status ctl_status;
362	union cvmx_pci_ctl_status_2 ctl_status_2;
363	union cvmx_pci_cfg19 cfg19;
364	union cvmx_pci_cfg16 cfg16;
365	union cvmx_pci_cfg22 cfg22;
366	union cvmx_pci_cfg56 cfg56;
367
368	/* Reset the PCI Bus */
369	cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1);
370	cvmx_read_csr(CVMX_CIU_SOFT_PRST);
371
372	udelay(2000);		/* Hold PCI reset for 2 ms */
373
374	ctl_status.u64 = 0;	/* cvmx_read_csr(CVMX_NPI_CTL_STATUS); */
375	ctl_status.s.max_word = 1;
376	ctl_status.s.timer = 1;
377	cvmx_write_csr(CVMX_NPI_CTL_STATUS, ctl_status.u64);
378
379	/* Deassert PCI reset and advertize PCX Host Mode Device Capability
380	   (64b) */
381	cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4);
382	cvmx_read_csr(CVMX_CIU_SOFT_PRST);
383
384	udelay(2000);		/* Wait 2 ms after deasserting PCI reset */
385
386	ctl_status_2.u32 = 0;
387	ctl_status_2.s.tsr_hwm = 1;	/* Initializes to 0.  Must be set
388					   before any PCI reads. */
389	ctl_status_2.s.bar2pres = 1;	/* Enable BAR2 */
390	ctl_status_2.s.bar2_enb = 1;
391	ctl_status_2.s.bar2_cax = 1;	/* Don't use L2 */
392	ctl_status_2.s.bar2_esx = 1;
393	ctl_status_2.s.pmo_amod = 1;	/* Round robin priority */
394	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
395		/* BAR1 hole */
396		ctl_status_2.s.bb1_hole = OCTEON_PCI_BAR1_HOLE_BITS;
397		ctl_status_2.s.bb1_siz = 1;  /* BAR1 is 2GB */
398		ctl_status_2.s.bb_ca = 1;    /* Don't use L2 with big bars */
399		ctl_status_2.s.bb_es = 1;    /* Big bar in byte swap mode */
400		ctl_status_2.s.bb1 = 1;	     /* BAR1 is big */
401		ctl_status_2.s.bb0 = 1;	     /* BAR0 is big */
402	}
403
404	octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32);
405	udelay(2000);		/* Wait 2 ms before doing PCI reads */
406
407	ctl_status_2.u32 = octeon_npi_read32(CVMX_NPI_PCI_CTL_STATUS_2);
408	pr_notice("PCI Status: %s %s-bit\n",
409		  ctl_status_2.s.ap_pcix ? "PCI-X" : "PCI",
410		  ctl_status_2.s.ap_64ad ? "64" : "32");
411
412	if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
413		union cvmx_pci_cnt_reg cnt_reg_start;
414		union cvmx_pci_cnt_reg cnt_reg_end;
415		unsigned long cycles, pci_clock;
416
417		cnt_reg_start.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
418		cycles = read_c0_cvmcount();
419		udelay(1000);
420		cnt_reg_end.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
421		cycles = read_c0_cvmcount() - cycles;
422		pci_clock = (cnt_reg_end.s.pcicnt - cnt_reg_start.s.pcicnt) /
423			    (cycles / (mips_hpt_frequency / 1000000));
424		pr_notice("PCI Clock: %lu MHz\n", pci_clock);
425	}
426
427	/*
428	 * TDOMC must be set to one in PCI mode. TDOMC should be set to 4
429	 * in PCI-X mode to allow four outstanding splits. Otherwise,
430	 * should not change from its reset value. Don't write PCI_CFG19
431	 * in PCI mode (0x82000001 reset value), write it to 0x82000004
432	 * after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero.
433	 * MRBCM -> must be one.
434	 */
435	if (ctl_status_2.s.ap_pcix) {
436		cfg19.u32 = 0;
437		/*
438		 * Target Delayed/Split request outstanding maximum
439		 * count. [1..31] and 0=32.  NOTE: If the user
440		 * programs these bits beyond the Designed Maximum
441		 * outstanding count, then the designed maximum table
442		 * depth will be used instead.	No additional
443		 * Deferred/Split transactions will be accepted if
444		 * this outstanding maximum count is
445		 * reached. Furthermore, no additional deferred/split
446		 * transactions will be accepted if the I/O delay/ I/O
447		 * Split Request outstanding maximum is reached.
448		 */
449		cfg19.s.tdomc = 4;
450		/*
451		 * Master Deferred Read Request Outstanding Max Count
452		 * (PCI only).	CR4C[26:24] Max SAC cycles MAX DAC
453		 * cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101
454		 * 5 2 110 6 3 111 7 3 For example, if these bits are
455		 * programmed to 100, the core can support 2 DAC
456		 * cycles, 4 SAC cycles or a combination of 1 DAC and
457		 * 2 SAC cycles. NOTE: For the PCI-X maximum
458		 * outstanding split transactions, refer to
459		 * CRE0[22:20].
460		 */
461		cfg19.s.mdrrmc = 2;
462		/*
463		 * Master Request (Memory Read) Byte Count/Byte Enable
464		 * select. 0 = Byte Enables valid. In PCI mode, a
465		 * burst transaction cannot be performed using Memory
466		 * Read command=4?h6. 1 = DWORD Byte Count valid
467		 * (default). In PCI Mode, the memory read byte
468		 * enables are automatically generated by the
469		 * core. Note: N3 Master Request transaction sizes are
470		 * always determined through the
471		 * am_attr[<35:32>|<7:0>] field.
472		 */
473		cfg19.s.mrbcm = 1;
474		octeon_npi_write32(CVMX_NPI_PCI_CFG19, cfg19.u32);
475	}
476
477
478	cfg01.u32 = 0;
479	cfg01.s.msae = 1;	/* Memory Space Access Enable */
480	cfg01.s.me = 1;		/* Master Enable */
481	cfg01.s.pee = 1;	/* PERR# Enable */
482	cfg01.s.see = 1;	/* System Error Enable */
483	cfg01.s.fbbe = 1;	/* Fast Back to Back Transaction Enable */
484
485	octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
486
487#ifdef USE_OCTEON_INTERNAL_ARBITER
488	/*
489	 * When OCTEON is a PCI host, most systems will use OCTEON's
490	 * internal arbiter, so must enable it before any PCI/PCI-X
491	 * traffic can occur.
492	 */
493	{
494		union cvmx_npi_pci_int_arb_cfg pci_int_arb_cfg;
495
496		pci_int_arb_cfg.u64 = 0;
497		pci_int_arb_cfg.s.en = 1;	/* Internal arbiter enable */
498		cvmx_write_csr(CVMX_NPI_PCI_INT_ARB_CFG, pci_int_arb_cfg.u64);
499	}
500#endif	/* USE_OCTEON_INTERNAL_ARBITER */
501
502	/*
503	 * Preferably written to 1 to set MLTD. [RDSATI,TRTAE,
504	 * TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to
505	 * 1..7.
506	 */
507	cfg16.u32 = 0;
508	cfg16.s.mltd = 1;	/* Master Latency Timer Disable */
509	octeon_npi_write32(CVMX_NPI_PCI_CFG16, cfg16.u32);
510
511	/*
512	 * Should be written to 0x4ff00. MTTV -> must be zero.
513	 * FLUSH -> must be 1. MRV -> should be 0xFF.
514	 */
515	cfg22.u32 = 0;
516	/* Master Retry Value [1..255] and 0=infinite */
517	cfg22.s.mrv = 0xff;
518	/*
519	 * AM_DO_FLUSH_I control NOTE: This bit MUST BE ONE for proper
520	 * N3K operation.
521	 */
522	cfg22.s.flush = 1;
523	octeon_npi_write32(CVMX_NPI_PCI_CFG22, cfg22.u32);
524
525	/*
526	 * MOST Indicates the maximum number of outstanding splits (in -1
527	 * notation) when OCTEON is in PCI-X mode.  PCI-X performance is
528	 * affected by the MOST selection.  Should generally be written
529	 * with one of 0x3be807, 0x2be807, 0x1be807, or 0x0be807,
530	 * depending on the desired MOST of 3, 2, 1, or 0, respectively.
531	 */
532	cfg56.u32 = 0;
533	cfg56.s.pxcid = 7;	/* RO - PCI-X Capability ID */
534	cfg56.s.ncp = 0xe8;	/* RO - Next Capability Pointer */
535	cfg56.s.dpere = 1;	/* Data Parity Error Recovery Enable */
536	cfg56.s.roe = 1;	/* Relaxed Ordering Enable */
537	cfg56.s.mmbc = 1;	/* Maximum Memory Byte Count
538				   [0=512B,1=1024B,2=2048B,3=4096B] */
539	cfg56.s.most = 3;	/* Maximum outstanding Split transactions [0=1
540				   .. 7=32] */
541
542	octeon_npi_write32(CVMX_NPI_PCI_CFG56, cfg56.u32);
543
544	/*
545	 * Affects PCI performance when OCTEON services reads to its
546	 * BAR1/BAR2. Refer to Section 10.6.1.	The recommended values are
547	 * 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and
548	 * PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700,
549	 * these values need to be changed so they won't possibly prefetch off
550	 * of the end of memory if PCI is DMAing a buffer at the end of
551	 * memory. Note that these values differ from their reset values.
552	 */
553	octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_6, 0x21);
554	octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_C, 0x31);
555	octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_E, 0x31);
556}
557
558
559/*
560 * Initialize the Octeon PCI controller
561 */
562static int __init octeon_pci_setup(void)
563{
564	union cvmx_npi_mem_access_subidx mem_access;
565	int index;
566
567	/* Only these chips have PCI */
568	if (octeon_has_feature(OCTEON_FEATURE_PCIE))
569		return 0;
570
571	if (!octeon_is_pci_host()) {
572		pr_notice("Not in host mode, PCI Controller not initialized\n");
573		return 0;
574	}
575
576	/* Point pcibios_map_irq() to the PCI version of it */
577	octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
578
579	/* Only use the big bars on chips that support it */
580	if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
581	    OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
582	    OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
583		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_SMALL;
584	else
585		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
586
 
 
 
 
 
587	/* PCI I/O and PCI MEM values */
588	set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
589	ioport_resource.start = 0;
590	ioport_resource.end = OCTEON_PCI_IOSPACE_SIZE - 1;
591
592	pr_notice("%s Octeon big bar support\n",
593		  (octeon_dma_bar_type ==
594		  OCTEON_DMA_BAR_TYPE_BIG) ? "Enabling" : "Disabling");
595
596	octeon_pci_initialize();
597
598	mem_access.u64 = 0;
599	mem_access.s.esr = 1;	/* Endian-Swap on read. */
600	mem_access.s.esw = 1;	/* Endian-Swap on write. */
601	mem_access.s.nsr = 0;	/* No-Snoop on read. */
602	mem_access.s.nsw = 0;	/* No-Snoop on write. */
603	mem_access.s.ror = 0;	/* Relax Read on read. */
604	mem_access.s.row = 0;	/* Relax Order on write. */
605	mem_access.s.ba = 0;	/* PCI Address bits [63:36]. */
606	cvmx_write_csr(CVMX_NPI_MEM_ACCESS_SUBID3, mem_access.u64);
607
608	/*
609	 * Remap the Octeon BAR 2 above all 32 bit devices
610	 * (0x8000000000ul).  This is done here so it is remapped
611	 * before the readl()'s below. We don't want BAR2 overlapping
612	 * with BAR0/BAR1 during these reads.
613	 */
614	octeon_npi_write32(CVMX_NPI_PCI_CFG08,
615			   (u32)(OCTEON_BAR2_PCI_ADDRESS & 0xffffffffull));
616	octeon_npi_write32(CVMX_NPI_PCI_CFG09,
617			   (u32)(OCTEON_BAR2_PCI_ADDRESS >> 32));
618
619	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
620		/* Remap the Octeon BAR 0 to 0-2GB */
621		octeon_npi_write32(CVMX_NPI_PCI_CFG04, 0);
622		octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
623
624		/*
625		 * Remap the Octeon BAR 1 to map 2GB-4GB (minus the
626		 * BAR 1 hole).
627		 */
628		octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30);
629		octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
630
631		/* BAR1 movable mappings set for identity mapping */
632		octeon_bar1_pci_phys = 0x80000000ull;
633		for (index = 0; index < 32; index++) {
634			union cvmx_pci_bar1_indexx bar1_index;
635
636			bar1_index.u32 = 0;
637			/* Address bits[35:22] sent to L2C */
638			bar1_index.s.addr_idx =
639				(octeon_bar1_pci_phys >> 22) + index;
640			/* Don't put PCI accesses in L2. */
641			bar1_index.s.ca = 1;
642			/* Endian Swap Mode */
643			bar1_index.s.end_swp = 1;
644			/* Set '1' when the selected address range is valid. */
645			bar1_index.s.addr_v = 1;
646			octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
647					   bar1_index.u32);
648		}
649
650		/* Devices go after BAR1 */
651		octeon_pci_mem_resource.start =
652			OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) -
653			(OCTEON_PCI_BAR1_HOLE_SIZE << 20);
654		octeon_pci_mem_resource.end =
655			octeon_pci_mem_resource.start + (1ul << 30);
656	} else {
657		/* Remap the Octeon BAR 0 to map 128MB-(128MB+4KB) */
658		octeon_npi_write32(CVMX_NPI_PCI_CFG04, 128ul << 20);
659		octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
660
661		/* Remap the Octeon BAR 1 to map 0-128MB */
662		octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0);
663		octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
664
665		/* BAR1 movable regions contiguous to cover the swiotlb */
666		octeon_bar1_pci_phys =
667			virt_to_phys(octeon_swiotlb) & ~((1ull << 22) - 1);
668
669		for (index = 0; index < 32; index++) {
670			union cvmx_pci_bar1_indexx bar1_index;
671
672			bar1_index.u32 = 0;
673			/* Address bits[35:22] sent to L2C */
674			bar1_index.s.addr_idx =
675				(octeon_bar1_pci_phys >> 22) + index;
676			/* Don't put PCI accesses in L2. */
677			bar1_index.s.ca = 1;
678			/* Endian Swap Mode */
679			bar1_index.s.end_swp = 1;
680			/* Set '1' when the selected address range is valid. */
681			bar1_index.s.addr_v = 1;
682			octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
683					   bar1_index.u32);
684		}
685
686		/* Devices go after BAR0 */
687		octeon_pci_mem_resource.start =
688			OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) +
689			(4ul << 10);
690		octeon_pci_mem_resource.end =
691			octeon_pci_mem_resource.start + (1ul << 30);
692	}
693
694	register_pci_controller(&octeon_pci_controller);
695
696	/*
697	 * Clear any errors that might be pending from before the bus
698	 * was setup properly.
699	 */
700	cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1);
701
702	if (IS_ERR(platform_device_register_simple("octeon_pci_edac",
703						   -1, NULL, 0)))
704		pr_err("Registration of co_pci_edac failed!\n");
705
706	octeon_pci_dma_init();
707
708	return 0;
709}
710
711arch_initcall(octeon_pci_setup);
v3.15
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2005-2009 Cavium Networks
  7 */
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/pci.h>
 11#include <linux/interrupt.h>
 12#include <linux/time.h>
 13#include <linux/delay.h>
 14#include <linux/platform_device.h>
 15#include <linux/swiotlb.h>
 16
 17#include <asm/time.h>
 18
 19#include <asm/octeon/octeon.h>
 20#include <asm/octeon/cvmx-npi-defs.h>
 21#include <asm/octeon/cvmx-pci-defs.h>
 22#include <asm/octeon/pci-octeon.h>
 23
 24#include <dma-coherence.h>
 25
 26#define USE_OCTEON_INTERNAL_ARBITER
 27
 28/*
 29 * Octeon's PCI controller uses did=3, subdid=2 for PCI IO
 30 * addresses. Use PCI endian swapping 1 so no address swapping is
 31 * necessary. The Linux io routines will endian swap the data.
 32 */
 33#define OCTEON_PCI_IOSPACE_BASE	    0x80011a0400000000ull
 34#define OCTEON_PCI_IOSPACE_SIZE	    (1ull<<32)
 35
 36/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
 37#define OCTEON_PCI_MEMSPACE_OFFSET  (0x00011b0000000000ull)
 38
 39u64 octeon_bar1_pci_phys;
 40
 41/**
 42 * This is the bit decoding used for the Octeon PCI controller addresses
 43 */
 44union octeon_pci_address {
 45	uint64_t u64;
 46	struct {
 47		uint64_t upper:2;
 48		uint64_t reserved:13;
 49		uint64_t io:1;
 50		uint64_t did:5;
 51		uint64_t subdid:3;
 52		uint64_t reserved2:4;
 53		uint64_t endian_swap:2;
 54		uint64_t reserved3:10;
 55		uint64_t bus:8;
 56		uint64_t dev:5;
 57		uint64_t func:3;
 58		uint64_t reg:8;
 59	} s;
 60};
 61
 62int __initconst (*octeon_pcibios_map_irq)(const struct pci_dev *dev,
 63					 u8 slot, u8 pin);
 64enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
 65
 66/**
 67 * Map a PCI device to the appropriate interrupt line
 68 *
 69 * @dev:    The Linux PCI device structure for the device to map
 70 * @slot:   The slot number for this device on __BUS 0__. Linux
 71 *		 enumerates through all the bridges and figures out the
 72 *		 slot on Bus 0 where this device eventually hooks to.
 73 * @pin:    The PCI interrupt pin read from the device, then swizzled
 74 *		 as it goes through each bridge.
 75 * Returns Interrupt number for the device
 76 */
 77int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 78{
 79	if (octeon_pcibios_map_irq)
 80		return octeon_pcibios_map_irq(dev, slot, pin);
 81	else
 82		panic("octeon_pcibios_map_irq not set.");
 83}
 84
 85
 86/*
 87 * Called to perform platform specific PCI setup
 88 */
 89int pcibios_plat_dev_init(struct pci_dev *dev)
 90{
 91	uint16_t config;
 92	uint32_t dconfig;
 93	int pos;
 94	/*
 95	 * Force the Cache line setting to 64 bytes. The standard
 96	 * Linux bus scan doesn't seem to set it. Octeon really has
 97	 * 128 byte lines, but Intel bridges get really upset if you
 98	 * try and set values above 64 bytes. Value is specified in
 99	 * 32bit words.
100	 */
101	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4);
102	/* Set latency timers for all devices */
103	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
104
105	/* Enable reporting System errors and parity errors on all devices */
106	/* Enable parity checking and error reporting */
107	pci_read_config_word(dev, PCI_COMMAND, &config);
108	config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
109	pci_write_config_word(dev, PCI_COMMAND, config);
110
111	if (dev->subordinate) {
112		/* Set latency timers on sub bridges */
113		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 64);
114		/* More bridge error detection */
115		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config);
116		config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
117		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config);
118	}
119
120	/* Enable the PCIe normal error reporting */
121	config = PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */
122	config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */
123	config |= PCI_EXP_DEVCTL_FERE;	/* Fatal Error Reporting */
124	config |= PCI_EXP_DEVCTL_URRE;	/* Unsupported Request */
125	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, config);
126
127	/* Find the Advanced Error Reporting capability */
128	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
129	if (pos) {
130		/* Clear Uncorrectable Error Status */
131		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
132				      &dconfig);
133		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
134				       dconfig);
135		/* Enable reporting of all uncorrectable errors */
136		/* Uncorrectable Error Mask - turned on bits disable errors */
137		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0);
138		/*
139		 * Leave severity at HW default. This only controls if
140		 * errors are reported as uncorrectable or
141		 * correctable, not if the error is reported.
142		 */
143		/* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
144		/* Clear Correctable Error Status */
145		pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig);
146		pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig);
147		/* Enable reporting of all correctable errors */
148		/* Correctable Error Mask - turned on bits disable errors */
149		pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0);
150		/* Advanced Error Capabilities */
151		pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig);
152		/* ECRC Generation Enable */
153		if (config & PCI_ERR_CAP_ECRC_GENC)
154			config |= PCI_ERR_CAP_ECRC_GENE;
155		/* ECRC Check Enable */
156		if (config & PCI_ERR_CAP_ECRC_CHKC)
157			config |= PCI_ERR_CAP_ECRC_CHKE;
158		pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig);
159		/* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
160		/* Report all errors to the root complex */
161		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND,
162				       PCI_ERR_ROOT_CMD_COR_EN |
163				       PCI_ERR_ROOT_CMD_NONFATAL_EN |
164				       PCI_ERR_ROOT_CMD_FATAL_EN);
165		/* Clear the Root status register */
166		pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig);
167		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
168	}
169
170	dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops;
171
172	return 0;
173}
174
175/**
176 * Return the mapping of PCI device number to IRQ line. Each
177 * character in the return string represents the interrupt
178 * line for the device at that position. Device 1 maps to the
179 * first character, etc. The characters A-D are used for PCI
180 * interrupts.
181 *
182 * Returns PCI interrupt mapping
183 */
184const char *octeon_get_pci_interrupts(void)
185{
186	/*
187	 * Returning an empty string causes the interrupts to be
188	 * routed based on the PCI specification. From the PCI spec:
189	 *
190	 * INTA# of Device Number 0 is connected to IRQW on the system
191	 * board.  (Device Number has no significance regarding being
192	 * located on the system board or in a connector.) INTA# of
193	 * Device Number 1 is connected to IRQX on the system
194	 * board. INTA# of Device Number 2 is connected to IRQY on the
195	 * system board. INTA# of Device Number 3 is connected to IRQZ
196	 * on the system board. The table below describes how each
197	 * agent's INTx# lines are connected to the system board
198	 * interrupt lines. The following equation can be used to
199	 * determine to which INTx# signal on the system board a given
200	 * device's INTx# line(s) is connected.
201	 *
202	 * MB = (D + I) MOD 4 MB = System board Interrupt (IRQW = 0,
203	 * IRQX = 1, IRQY = 2, and IRQZ = 3) D = Device Number I =
204	 * Interrupt Number (INTA# = 0, INTB# = 1, INTC# = 2, and
205	 * INTD# = 3)
206	 */
 
 
207	switch (octeon_bootinfo->board_type) {
208	case CVMX_BOARD_TYPE_NAO38:
209		/* This is really the NAC38 */
210		return "AAAAADABAAAAAAAAAAAAAAAAAAAAAAAA";
211	case CVMX_BOARD_TYPE_EBH3100:
212	case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
213	case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
214		return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
215	case CVMX_BOARD_TYPE_BBGW_REF:
216		return "AABCD";
 
 
217	case CVMX_BOARD_TYPE_THUNDER:
218	case CVMX_BOARD_TYPE_EBH3000:
219	default:
220		return "";
221	}
222}
223
224/**
225 * Map a PCI device to the appropriate interrupt line
226 *
227 * @dev:    The Linux PCI device structure for the device to map
228 * @slot:   The slot number for this device on __BUS 0__. Linux
229 *		 enumerates through all the bridges and figures out the
230 *		 slot on Bus 0 where this device eventually hooks to.
231 * @pin:    The PCI interrupt pin read from the device, then swizzled
232 *		 as it goes through each bridge.
233 * Returns Interrupt number for the device
234 */
235int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev,
236				      u8 slot, u8 pin)
237{
238	int irq_num;
239	const char *interrupts;
240	int dev_num;
241
242	/* Get the board specific interrupt mapping */
243	interrupts = octeon_get_pci_interrupts();
244
245	dev_num = dev->devfn >> 3;
246	if (dev_num < strlen(interrupts))
247		irq_num = ((interrupts[dev_num] - 'A' + pin - 1) & 3) +
248			OCTEON_IRQ_PCI_INT0;
249	else
250		irq_num = ((slot + pin - 3) & 3) + OCTEON_IRQ_PCI_INT0;
251	return irq_num;
252}
253
254
255/*
256 * Read a value from configuration space
257 */
258static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
259			      int reg, int size, u32 *val)
260{
261	union octeon_pci_address pci_addr;
262
263	pci_addr.u64 = 0;
264	pci_addr.s.upper = 2;
265	pci_addr.s.io = 1;
266	pci_addr.s.did = 3;
267	pci_addr.s.subdid = 1;
268	pci_addr.s.endian_swap = 1;
269	pci_addr.s.bus = bus->number;
270	pci_addr.s.dev = devfn >> 3;
271	pci_addr.s.func = devfn & 0x7;
272	pci_addr.s.reg = reg;
273
274#if PCI_CONFIG_SPACE_DELAY
275	udelay(PCI_CONFIG_SPACE_DELAY);
276#endif
277	switch (size) {
278	case 4:
279		*val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
280		return PCIBIOS_SUCCESSFUL;
281	case 2:
282		*val = le16_to_cpu(cvmx_read64_uint16(pci_addr.u64));
283		return PCIBIOS_SUCCESSFUL;
284	case 1:
285		*val = cvmx_read64_uint8(pci_addr.u64);
286		return PCIBIOS_SUCCESSFUL;
287	}
288	return PCIBIOS_FUNC_NOT_SUPPORTED;
289}
290
291
292/*
293 * Write a value to PCI configuration space
294 */
295static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
296			       int reg, int size, u32 val)
297{
298	union octeon_pci_address pci_addr;
299
300	pci_addr.u64 = 0;
301	pci_addr.s.upper = 2;
302	pci_addr.s.io = 1;
303	pci_addr.s.did = 3;
304	pci_addr.s.subdid = 1;
305	pci_addr.s.endian_swap = 1;
306	pci_addr.s.bus = bus->number;
307	pci_addr.s.dev = devfn >> 3;
308	pci_addr.s.func = devfn & 0x7;
309	pci_addr.s.reg = reg;
310
311#if PCI_CONFIG_SPACE_DELAY
312	udelay(PCI_CONFIG_SPACE_DELAY);
313#endif
314	switch (size) {
315	case 4:
316		cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
317		return PCIBIOS_SUCCESSFUL;
318	case 2:
319		cvmx_write64_uint16(pci_addr.u64, cpu_to_le16(val));
320		return PCIBIOS_SUCCESSFUL;
321	case 1:
322		cvmx_write64_uint8(pci_addr.u64, val);
323		return PCIBIOS_SUCCESSFUL;
324	}
325	return PCIBIOS_FUNC_NOT_SUPPORTED;
326}
327
328
329static struct pci_ops octeon_pci_ops = {
330	octeon_read_config,
331	octeon_write_config,
332};
333
334static struct resource octeon_pci_mem_resource = {
335	.start = 0,
336	.end = 0,
337	.name = "Octeon PCI MEM",
338	.flags = IORESOURCE_MEM,
339};
340
341/*
342 * PCI ports must be above 16KB so the ISA bus filtering in the PCI-X to PCI
343 * bridge
344 */
345static struct resource octeon_pci_io_resource = {
346	.start = 0x4000,
347	.end = OCTEON_PCI_IOSPACE_SIZE - 1,
348	.name = "Octeon PCI IO",
349	.flags = IORESOURCE_IO,
350};
351
352static struct pci_controller octeon_pci_controller = {
353	.pci_ops = &octeon_pci_ops,
354	.mem_resource = &octeon_pci_mem_resource,
355	.mem_offset = OCTEON_PCI_MEMSPACE_OFFSET,
356	.io_resource = &octeon_pci_io_resource,
357	.io_offset = 0,
358	.io_map_base = OCTEON_PCI_IOSPACE_BASE,
359};
360
361
362/*
363 * Low level initialize the Octeon PCI controller
364 */
365static void octeon_pci_initialize(void)
366{
367	union cvmx_pci_cfg01 cfg01;
368	union cvmx_npi_ctl_status ctl_status;
369	union cvmx_pci_ctl_status_2 ctl_status_2;
370	union cvmx_pci_cfg19 cfg19;
371	union cvmx_pci_cfg16 cfg16;
372	union cvmx_pci_cfg22 cfg22;
373	union cvmx_pci_cfg56 cfg56;
374
375	/* Reset the PCI Bus */
376	cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1);
377	cvmx_read_csr(CVMX_CIU_SOFT_PRST);
378
379	udelay(2000);		/* Hold PCI reset for 2 ms */
380
381	ctl_status.u64 = 0;	/* cvmx_read_csr(CVMX_NPI_CTL_STATUS); */
382	ctl_status.s.max_word = 1;
383	ctl_status.s.timer = 1;
384	cvmx_write_csr(CVMX_NPI_CTL_STATUS, ctl_status.u64);
385
386	/* Deassert PCI reset and advertize PCX Host Mode Device Capability
387	   (64b) */
388	cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4);
389	cvmx_read_csr(CVMX_CIU_SOFT_PRST);
390
391	udelay(2000);		/* Wait 2 ms after deasserting PCI reset */
392
393	ctl_status_2.u32 = 0;
394	ctl_status_2.s.tsr_hwm = 1;	/* Initializes to 0.  Must be set
395					   before any PCI reads. */
396	ctl_status_2.s.bar2pres = 1;	/* Enable BAR2 */
397	ctl_status_2.s.bar2_enb = 1;
398	ctl_status_2.s.bar2_cax = 1;	/* Don't use L2 */
399	ctl_status_2.s.bar2_esx = 1;
400	ctl_status_2.s.pmo_amod = 1;	/* Round robin priority */
401	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
402		/* BAR1 hole */
403		ctl_status_2.s.bb1_hole = OCTEON_PCI_BAR1_HOLE_BITS;
404		ctl_status_2.s.bb1_siz = 1;  /* BAR1 is 2GB */
405		ctl_status_2.s.bb_ca = 1;    /* Don't use L2 with big bars */
406		ctl_status_2.s.bb_es = 1;    /* Big bar in byte swap mode */
407		ctl_status_2.s.bb1 = 1;	     /* BAR1 is big */
408		ctl_status_2.s.bb0 = 1;	     /* BAR0 is big */
409	}
410
411	octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32);
412	udelay(2000);		/* Wait 2 ms before doing PCI reads */
413
414	ctl_status_2.u32 = octeon_npi_read32(CVMX_NPI_PCI_CTL_STATUS_2);
415	pr_notice("PCI Status: %s %s-bit\n",
416		  ctl_status_2.s.ap_pcix ? "PCI-X" : "PCI",
417		  ctl_status_2.s.ap_64ad ? "64" : "32");
418
419	if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
420		union cvmx_pci_cnt_reg cnt_reg_start;
421		union cvmx_pci_cnt_reg cnt_reg_end;
422		unsigned long cycles, pci_clock;
423
424		cnt_reg_start.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
425		cycles = read_c0_cvmcount();
426		udelay(1000);
427		cnt_reg_end.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
428		cycles = read_c0_cvmcount() - cycles;
429		pci_clock = (cnt_reg_end.s.pcicnt - cnt_reg_start.s.pcicnt) /
430			    (cycles / (mips_hpt_frequency / 1000000));
431		pr_notice("PCI Clock: %lu MHz\n", pci_clock);
432	}
433
434	/*
435	 * TDOMC must be set to one in PCI mode. TDOMC should be set to 4
436	 * in PCI-X mode to allow four outstanding splits. Otherwise,
437	 * should not change from its reset value. Don't write PCI_CFG19
438	 * in PCI mode (0x82000001 reset value), write it to 0x82000004
439	 * after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero.
440	 * MRBCM -> must be one.
441	 */
442	if (ctl_status_2.s.ap_pcix) {
443		cfg19.u32 = 0;
444		/*
445		 * Target Delayed/Split request outstanding maximum
446		 * count. [1..31] and 0=32.  NOTE: If the user
447		 * programs these bits beyond the Designed Maximum
448		 * outstanding count, then the designed maximum table
449		 * depth will be used instead.	No additional
450		 * Deferred/Split transactions will be accepted if
451		 * this outstanding maximum count is
452		 * reached. Furthermore, no additional deferred/split
453		 * transactions will be accepted if the I/O delay/ I/O
454		 * Split Request outstanding maximum is reached.
455		 */
456		cfg19.s.tdomc = 4;
457		/*
458		 * Master Deferred Read Request Outstanding Max Count
459		 * (PCI only).	CR4C[26:24] Max SAC cycles MAX DAC
460		 * cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101
461		 * 5 2 110 6 3 111 7 3 For example, if these bits are
462		 * programmed to 100, the core can support 2 DAC
463		 * cycles, 4 SAC cycles or a combination of 1 DAC and
464		 * 2 SAC cycles. NOTE: For the PCI-X maximum
465		 * outstanding split transactions, refer to
466		 * CRE0[22:20].
467		 */
468		cfg19.s.mdrrmc = 2;
469		/*
470		 * Master Request (Memory Read) Byte Count/Byte Enable
471		 * select. 0 = Byte Enables valid. In PCI mode, a
472		 * burst transaction cannot be performed using Memory
473		 * Read command=4?h6. 1 = DWORD Byte Count valid
474		 * (default). In PCI Mode, the memory read byte
475		 * enables are automatically generated by the
476		 * core. Note: N3 Master Request transaction sizes are
477		 * always determined through the
478		 * am_attr[<35:32>|<7:0>] field.
479		 */
480		cfg19.s.mrbcm = 1;
481		octeon_npi_write32(CVMX_NPI_PCI_CFG19, cfg19.u32);
482	}
483
484
485	cfg01.u32 = 0;
486	cfg01.s.msae = 1;	/* Memory Space Access Enable */
487	cfg01.s.me = 1;		/* Master Enable */
488	cfg01.s.pee = 1;	/* PERR# Enable */
489	cfg01.s.see = 1;	/* System Error Enable */
490	cfg01.s.fbbe = 1;	/* Fast Back to Back Transaction Enable */
491
492	octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
493
494#ifdef USE_OCTEON_INTERNAL_ARBITER
495	/*
496	 * When OCTEON is a PCI host, most systems will use OCTEON's
497	 * internal arbiter, so must enable it before any PCI/PCI-X
498	 * traffic can occur.
499	 */
500	{
501		union cvmx_npi_pci_int_arb_cfg pci_int_arb_cfg;
502
503		pci_int_arb_cfg.u64 = 0;
504		pci_int_arb_cfg.s.en = 1;	/* Internal arbiter enable */
505		cvmx_write_csr(CVMX_NPI_PCI_INT_ARB_CFG, pci_int_arb_cfg.u64);
506	}
507#endif	/* USE_OCTEON_INTERNAL_ARBITER */
508
509	/*
510	 * Preferably written to 1 to set MLTD. [RDSATI,TRTAE,
511	 * TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to
512	 * 1..7.
513	 */
514	cfg16.u32 = 0;
515	cfg16.s.mltd = 1;	/* Master Latency Timer Disable */
516	octeon_npi_write32(CVMX_NPI_PCI_CFG16, cfg16.u32);
517
518	/*
519	 * Should be written to 0x4ff00. MTTV -> must be zero.
520	 * FLUSH -> must be 1. MRV -> should be 0xFF.
521	 */
522	cfg22.u32 = 0;
523	/* Master Retry Value [1..255] and 0=infinite */
524	cfg22.s.mrv = 0xff;
525	/*
526	 * AM_DO_FLUSH_I control NOTE: This bit MUST BE ONE for proper
527	 * N3K operation.
528	 */
529	cfg22.s.flush = 1;
530	octeon_npi_write32(CVMX_NPI_PCI_CFG22, cfg22.u32);
531
532	/*
533	 * MOST Indicates the maximum number of outstanding splits (in -1
534	 * notation) when OCTEON is in PCI-X mode.  PCI-X performance is
535	 * affected by the MOST selection.  Should generally be written
536	 * with one of 0x3be807, 0x2be807, 0x1be807, or 0x0be807,
537	 * depending on the desired MOST of 3, 2, 1, or 0, respectively.
538	 */
539	cfg56.u32 = 0;
540	cfg56.s.pxcid = 7;	/* RO - PCI-X Capability ID */
541	cfg56.s.ncp = 0xe8;	/* RO - Next Capability Pointer */
542	cfg56.s.dpere = 1;	/* Data Parity Error Recovery Enable */
543	cfg56.s.roe = 1;	/* Relaxed Ordering Enable */
544	cfg56.s.mmbc = 1;	/* Maximum Memory Byte Count
545				   [0=512B,1=1024B,2=2048B,3=4096B] */
546	cfg56.s.most = 3;	/* Maximum outstanding Split transactions [0=1
547				   .. 7=32] */
548
549	octeon_npi_write32(CVMX_NPI_PCI_CFG56, cfg56.u32);
550
551	/*
552	 * Affects PCI performance when OCTEON services reads to its
553	 * BAR1/BAR2. Refer to Section 10.6.1.	The recommended values are
554	 * 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and
555	 * PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700,
556	 * these values need to be changed so they won't possibly prefetch off
557	 * of the end of memory if PCI is DMAing a buffer at the end of
558	 * memory. Note that these values differ from their reset values.
559	 */
560	octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_6, 0x21);
561	octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_C, 0x31);
562	octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_E, 0x31);
563}
564
565
566/*
567 * Initialize the Octeon PCI controller
568 */
569static int __init octeon_pci_setup(void)
570{
571	union cvmx_npi_mem_access_subidx mem_access;
572	int index;
573
574	/* Only these chips have PCI */
575	if (octeon_has_feature(OCTEON_FEATURE_PCIE))
576		return 0;
577
 
 
 
 
 
578	/* Point pcibios_map_irq() to the PCI version of it */
579	octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
580
581	/* Only use the big bars on chips that support it */
582	if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
583	    OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
584	    OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
585		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_SMALL;
586	else
587		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
588
589	if (!octeon_is_pci_host()) {
590		pr_notice("Not in host mode, PCI Controller not initialized\n");
591		return 0;
592	}
593
594	/* PCI I/O and PCI MEM values */
595	set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
596	ioport_resource.start = 0;
597	ioport_resource.end = OCTEON_PCI_IOSPACE_SIZE - 1;
598
599	pr_notice("%s Octeon big bar support\n",
600		  (octeon_dma_bar_type ==
601		  OCTEON_DMA_BAR_TYPE_BIG) ? "Enabling" : "Disabling");
602
603	octeon_pci_initialize();
604
605	mem_access.u64 = 0;
606	mem_access.s.esr = 1;	/* Endian-Swap on read. */
607	mem_access.s.esw = 1;	/* Endian-Swap on write. */
608	mem_access.s.nsr = 0;	/* No-Snoop on read. */
609	mem_access.s.nsw = 0;	/* No-Snoop on write. */
610	mem_access.s.ror = 0;	/* Relax Read on read. */
611	mem_access.s.row = 0;	/* Relax Order on write. */
612	mem_access.s.ba = 0;	/* PCI Address bits [63:36]. */
613	cvmx_write_csr(CVMX_NPI_MEM_ACCESS_SUBID3, mem_access.u64);
614
615	/*
616	 * Remap the Octeon BAR 2 above all 32 bit devices
617	 * (0x8000000000ul).  This is done here so it is remapped
618	 * before the readl()'s below. We don't want BAR2 overlapping
619	 * with BAR0/BAR1 during these reads.
620	 */
621	octeon_npi_write32(CVMX_NPI_PCI_CFG08,
622			   (u32)(OCTEON_BAR2_PCI_ADDRESS & 0xffffffffull));
623	octeon_npi_write32(CVMX_NPI_PCI_CFG09,
624			   (u32)(OCTEON_BAR2_PCI_ADDRESS >> 32));
625
626	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
627		/* Remap the Octeon BAR 0 to 0-2GB */
628		octeon_npi_write32(CVMX_NPI_PCI_CFG04, 0);
629		octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
630
631		/*
632		 * Remap the Octeon BAR 1 to map 2GB-4GB (minus the
633		 * BAR 1 hole).
634		 */
635		octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30);
636		octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
637
638		/* BAR1 movable mappings set for identity mapping */
639		octeon_bar1_pci_phys = 0x80000000ull;
640		for (index = 0; index < 32; index++) {
641			union cvmx_pci_bar1_indexx bar1_index;
642
643			bar1_index.u32 = 0;
644			/* Address bits[35:22] sent to L2C */
645			bar1_index.s.addr_idx =
646				(octeon_bar1_pci_phys >> 22) + index;
647			/* Don't put PCI accesses in L2. */
648			bar1_index.s.ca = 1;
649			/* Endian Swap Mode */
650			bar1_index.s.end_swp = 1;
651			/* Set '1' when the selected address range is valid. */
652			bar1_index.s.addr_v = 1;
653			octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
654					   bar1_index.u32);
655		}
656
657		/* Devices go after BAR1 */
658		octeon_pci_mem_resource.start =
659			OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) -
660			(OCTEON_PCI_BAR1_HOLE_SIZE << 20);
661		octeon_pci_mem_resource.end =
662			octeon_pci_mem_resource.start + (1ul << 30);
663	} else {
664		/* Remap the Octeon BAR 0 to map 128MB-(128MB+4KB) */
665		octeon_npi_write32(CVMX_NPI_PCI_CFG04, 128ul << 20);
666		octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
667
668		/* Remap the Octeon BAR 1 to map 0-128MB */
669		octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0);
670		octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
671
672		/* BAR1 movable regions contiguous to cover the swiotlb */
673		octeon_bar1_pci_phys =
674			virt_to_phys(octeon_swiotlb) & ~((1ull << 22) - 1);
675
676		for (index = 0; index < 32; index++) {
677			union cvmx_pci_bar1_indexx bar1_index;
678
679			bar1_index.u32 = 0;
680			/* Address bits[35:22] sent to L2C */
681			bar1_index.s.addr_idx =
682				(octeon_bar1_pci_phys >> 22) + index;
683			/* Don't put PCI accesses in L2. */
684			bar1_index.s.ca = 1;
685			/* Endian Swap Mode */
686			bar1_index.s.end_swp = 1;
687			/* Set '1' when the selected address range is valid. */
688			bar1_index.s.addr_v = 1;
689			octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
690					   bar1_index.u32);
691		}
692
693		/* Devices go after BAR0 */
694		octeon_pci_mem_resource.start =
695			OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) +
696			(4ul << 10);
697		octeon_pci_mem_resource.end =
698			octeon_pci_mem_resource.start + (1ul << 30);
699	}
700
701	register_pci_controller(&octeon_pci_controller);
702
703	/*
704	 * Clear any errors that might be pending from before the bus
705	 * was setup properly.
706	 */
707	cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1);
708
709	if (IS_ERR(platform_device_register_simple("octeon_pci_edac",
710						   -1, NULL, 0)))
711		pr_err("Registation of co_pci_edac failed!\n");
712
713	octeon_pci_dma_init();
714
715	return 0;
716}
717
718arch_initcall(octeon_pci_setup);