Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2005-2009 Cavium Networks
  7 */
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/pci.h>
 11#include <linux/interrupt.h>
 12#include <linux/time.h>
 13#include <linux/delay.h>
 
 14#include <linux/swiotlb.h>
 15
 16#include <asm/time.h>
 17
 18#include <asm/octeon/octeon.h>
 19#include <asm/octeon/cvmx-npi-defs.h>
 20#include <asm/octeon/cvmx-pci-defs.h>
 21#include <asm/octeon/pci-octeon.h>
 22
 23#include <dma-coherence.h>
 24
 25#define USE_OCTEON_INTERNAL_ARBITER
 26
 27/*
 28 * Octeon's PCI controller uses did=3, subdid=2 for PCI IO
 29 * addresses. Use PCI endian swapping 1 so no address swapping is
 30 * necessary. The Linux io routines will endian swap the data.
 31 */
 32#define OCTEON_PCI_IOSPACE_BASE     0x80011a0400000000ull
 33#define OCTEON_PCI_IOSPACE_SIZE     (1ull<<32)
 34
 35/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
 36#define OCTEON_PCI_MEMSPACE_OFFSET  (0x00011b0000000000ull)
 37
 38u64 octeon_bar1_pci_phys;
 39
 40/**
 41 * This is the bit decoding used for the Octeon PCI controller addresses
 42 */
 43union octeon_pci_address {
 44	uint64_t u64;
 45	struct {
 46		uint64_t upper:2;
 47		uint64_t reserved:13;
 48		uint64_t io:1;
 49		uint64_t did:5;
 50		uint64_t subdid:3;
 51		uint64_t reserved2:4;
 52		uint64_t endian_swap:2;
 53		uint64_t reserved3:10;
 54		uint64_t bus:8;
 55		uint64_t dev:5;
 56		uint64_t func:3;
 57		uint64_t reg:8;
 58	} s;
 59};
 60
 61int __initdata (*octeon_pcibios_map_irq)(const struct pci_dev *dev,
 62					 u8 slot, u8 pin);
 63enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
 64
 65/**
 66 * Map a PCI device to the appropriate interrupt line
 67 *
 68 * @dev:    The Linux PCI device structure for the device to map
 69 * @slot:   The slot number for this device on __BUS 0__. Linux
 70 *               enumerates through all the bridges and figures out the
 71 *               slot on Bus 0 where this device eventually hooks to.
 72 * @pin:    The PCI interrupt pin read from the device, then swizzled
 73 *               as it goes through each bridge.
 74 * Returns Interrupt number for the device
 75 */
 76int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 77{
 78	if (octeon_pcibios_map_irq)
 79		return octeon_pcibios_map_irq(dev, slot, pin);
 80	else
 81		panic("octeon_pcibios_map_irq not set.");
 82}
 83
 84
 85/*
 86 * Called to perform platform specific PCI setup
 87 */
 88int pcibios_plat_dev_init(struct pci_dev *dev)
 89{
 90	uint16_t config;
 91	uint32_t dconfig;
 92	int pos;
 93	/*
 94	 * Force the Cache line setting to 64 bytes. The standard
 95	 * Linux bus scan doesn't seem to set it. Octeon really has
 96	 * 128 byte lines, but Intel bridges get really upset if you
 97	 * try and set values above 64 bytes. Value is specified in
 98	 * 32bit words.
 99	 */
100	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4);
101	/* Set latency timers for all devices */
102	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 48);
103
104	/* Enable reporting System errors and parity errors on all devices */
105	/* Enable parity checking and error reporting */
106	pci_read_config_word(dev, PCI_COMMAND, &config);
107	config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
108	pci_write_config_word(dev, PCI_COMMAND, config);
109
110	if (dev->subordinate) {
111		/* Set latency timers on sub bridges */
112		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 48);
113		/* More bridge error detection */
114		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config);
115		config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
116		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config);
117	}
118
119	/* Enable the PCIe normal error reporting */
120	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
121	if (pos) {
122		/* Update Device Control */
123		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &config);
124		/* Correctable Error Reporting */
125		config |= PCI_EXP_DEVCTL_CERE;
126		/* Non-Fatal Error Reporting */
127		config |= PCI_EXP_DEVCTL_NFERE;
128		/* Fatal Error Reporting */
129		config |= PCI_EXP_DEVCTL_FERE;
130		/* Unsupported Request */
131		config |= PCI_EXP_DEVCTL_URRE;
132		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, config);
133	}
134
135	/* Find the Advanced Error Reporting capability */
136	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
137	if (pos) {
138		/* Clear Uncorrectable Error Status */
139		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
140				      &dconfig);
141		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
142				       dconfig);
143		/* Enable reporting of all uncorrectable errors */
144		/* Uncorrectable Error Mask - turned on bits disable errors */
145		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0);
146		/*
147		 * Leave severity at HW default. This only controls if
148		 * errors are reported as uncorrectable or
149		 * correctable, not if the error is reported.
150		 */
151		/* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
152		/* Clear Correctable Error Status */
153		pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig);
154		pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig);
155		/* Enable reporting of all correctable errors */
156		/* Correctable Error Mask - turned on bits disable errors */
157		pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0);
158		/* Advanced Error Capabilities */
159		pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig);
160		/* ECRC Generation Enable */
161		if (config & PCI_ERR_CAP_ECRC_GENC)
162			config |= PCI_ERR_CAP_ECRC_GENE;
163		/* ECRC Check Enable */
164		if (config & PCI_ERR_CAP_ECRC_CHKC)
165			config |= PCI_ERR_CAP_ECRC_CHKE;
166		pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig);
167		/* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
168		/* Report all errors to the root complex */
169		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND,
170				       PCI_ERR_ROOT_CMD_COR_EN |
171				       PCI_ERR_ROOT_CMD_NONFATAL_EN |
172				       PCI_ERR_ROOT_CMD_FATAL_EN);
173		/* Clear the Root status register */
174		pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig);
175		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
176	}
177
178	dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops;
179
180	return 0;
181}
182
183/**
184 * Return the mapping of PCI device number to IRQ line. Each
185 * character in the return string represents the interrupt
186 * line for the device at that position. Device 1 maps to the
187 * first character, etc. The characters A-D are used for PCI
188 * interrupts.
189 *
190 * Returns PCI interrupt mapping
191 */
192const char *octeon_get_pci_interrupts(void)
193{
194	/*
195	 * Returning an empty string causes the interrupts to be
196	 * routed based on the PCI specification. From the PCI spec:
197	 *
198	 * INTA# of Device Number 0 is connected to IRQW on the system
199	 * board.  (Device Number has no significance regarding being
200	 * located on the system board or in a connector.) INTA# of
201	 * Device Number 1 is connected to IRQX on the system
202	 * board. INTA# of Device Number 2 is connected to IRQY on the
203	 * system board. INTA# of Device Number 3 is connected to IRQZ
204	 * on the system board. The table below describes how each
205	 * agent's INTx# lines are connected to the system board
206	 * interrupt lines. The following equation can be used to
207	 * determine to which INTx# signal on the system board a given
208	 * device's INTx# line(s) is connected.
209	 *
210	 * MB = (D + I) MOD 4 MB = System board Interrupt (IRQW = 0,
211	 * IRQX = 1, IRQY = 2, and IRQZ = 3) D = Device Number I =
212	 * Interrupt Number (INTA# = 0, INTB# = 1, INTC# = 2, and
213	 * INTD# = 3)
214	 */
 
 
215	switch (octeon_bootinfo->board_type) {
216	case CVMX_BOARD_TYPE_NAO38:
217		/* This is really the NAC38 */
218		return "AAAAADABAAAAAAAAAAAAAAAAAAAAAAAA";
219	case CVMX_BOARD_TYPE_EBH3100:
220	case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
221	case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
222		return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
223	case CVMX_BOARD_TYPE_BBGW_REF:
224		return "AABCD";
 
 
225	case CVMX_BOARD_TYPE_THUNDER:
226	case CVMX_BOARD_TYPE_EBH3000:
227	default:
228		return "";
229	}
230}
231
232/**
233 * Map a PCI device to the appropriate interrupt line
234 *
235 * @dev:    The Linux PCI device structure for the device to map
236 * @slot:   The slot number for this device on __BUS 0__. Linux
237 *               enumerates through all the bridges and figures out the
238 *               slot on Bus 0 where this device eventually hooks to.
239 * @pin:    The PCI interrupt pin read from the device, then swizzled
240 *               as it goes through each bridge.
241 * Returns Interrupt number for the device
242 */
243int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev,
244				      u8 slot, u8 pin)
245{
246	int irq_num;
247	const char *interrupts;
248	int dev_num;
249
250	/* Get the board specific interrupt mapping */
251	interrupts = octeon_get_pci_interrupts();
252
253	dev_num = dev->devfn >> 3;
254	if (dev_num < strlen(interrupts))
255		irq_num = ((interrupts[dev_num] - 'A' + pin - 1) & 3) +
256			OCTEON_IRQ_PCI_INT0;
257	else
258		irq_num = ((slot + pin - 3) & 3) + OCTEON_IRQ_PCI_INT0;
259	return irq_num;
260}
261
262
263/*
264 * Read a value from configuration space
265 */
266static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
267			      int reg, int size, u32 *val)
268{
269	union octeon_pci_address pci_addr;
270
271	pci_addr.u64 = 0;
272	pci_addr.s.upper = 2;
273	pci_addr.s.io = 1;
274	pci_addr.s.did = 3;
275	pci_addr.s.subdid = 1;
276	pci_addr.s.endian_swap = 1;
277	pci_addr.s.bus = bus->number;
278	pci_addr.s.dev = devfn >> 3;
279	pci_addr.s.func = devfn & 0x7;
280	pci_addr.s.reg = reg;
281
282#if PCI_CONFIG_SPACE_DELAY
283	udelay(PCI_CONFIG_SPACE_DELAY);
284#endif
285	switch (size) {
286	case 4:
287		*val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
288		return PCIBIOS_SUCCESSFUL;
289	case 2:
290		*val = le16_to_cpu(cvmx_read64_uint16(pci_addr.u64));
291		return PCIBIOS_SUCCESSFUL;
292	case 1:
293		*val = cvmx_read64_uint8(pci_addr.u64);
294		return PCIBIOS_SUCCESSFUL;
295	}
296	return PCIBIOS_FUNC_NOT_SUPPORTED;
297}
298
299
300/*
301 * Write a value to PCI configuration space
302 */
303static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
304			       int reg, int size, u32 val)
305{
306	union octeon_pci_address pci_addr;
307
308	pci_addr.u64 = 0;
309	pci_addr.s.upper = 2;
310	pci_addr.s.io = 1;
311	pci_addr.s.did = 3;
312	pci_addr.s.subdid = 1;
313	pci_addr.s.endian_swap = 1;
314	pci_addr.s.bus = bus->number;
315	pci_addr.s.dev = devfn >> 3;
316	pci_addr.s.func = devfn & 0x7;
317	pci_addr.s.reg = reg;
318
319#if PCI_CONFIG_SPACE_DELAY
320	udelay(PCI_CONFIG_SPACE_DELAY);
321#endif
322	switch (size) {
323	case 4:
324		cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
325		return PCIBIOS_SUCCESSFUL;
326	case 2:
327		cvmx_write64_uint16(pci_addr.u64, cpu_to_le16(val));
328		return PCIBIOS_SUCCESSFUL;
329	case 1:
330		cvmx_write64_uint8(pci_addr.u64, val);
331		return PCIBIOS_SUCCESSFUL;
332	}
333	return PCIBIOS_FUNC_NOT_SUPPORTED;
334}
335
336
337static struct pci_ops octeon_pci_ops = {
338	octeon_read_config,
339	octeon_write_config,
340};
341
342static struct resource octeon_pci_mem_resource = {
343	.start = 0,
344	.end = 0,
345	.name = "Octeon PCI MEM",
346	.flags = IORESOURCE_MEM,
347};
348
349/*
350 * PCI ports must be above 16KB so the ISA bus filtering in the PCI-X to PCI
351 * bridge
352 */
353static struct resource octeon_pci_io_resource = {
354	.start = 0x4000,
355	.end = OCTEON_PCI_IOSPACE_SIZE - 1,
356	.name = "Octeon PCI IO",
357	.flags = IORESOURCE_IO,
358};
359
360static struct pci_controller octeon_pci_controller = {
361	.pci_ops = &octeon_pci_ops,
362	.mem_resource = &octeon_pci_mem_resource,
363	.mem_offset = OCTEON_PCI_MEMSPACE_OFFSET,
364	.io_resource = &octeon_pci_io_resource,
365	.io_offset = 0,
366	.io_map_base = OCTEON_PCI_IOSPACE_BASE,
367};
368
369
370/*
371 * Low level initialize the Octeon PCI controller
372 */
373static void octeon_pci_initialize(void)
374{
375	union cvmx_pci_cfg01 cfg01;
376	union cvmx_npi_ctl_status ctl_status;
377	union cvmx_pci_ctl_status_2 ctl_status_2;
378	union cvmx_pci_cfg19 cfg19;
379	union cvmx_pci_cfg16 cfg16;
380	union cvmx_pci_cfg22 cfg22;
381	union cvmx_pci_cfg56 cfg56;
382
383	/* Reset the PCI Bus */
384	cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1);
385	cvmx_read_csr(CVMX_CIU_SOFT_PRST);
386
387	udelay(2000);		/* Hold PCI reset for 2 ms */
388
389	ctl_status.u64 = 0;	/* cvmx_read_csr(CVMX_NPI_CTL_STATUS); */
390	ctl_status.s.max_word = 1;
391	ctl_status.s.timer = 1;
392	cvmx_write_csr(CVMX_NPI_CTL_STATUS, ctl_status.u64);
393
394	/* Deassert PCI reset and advertize PCX Host Mode Device Capability
395	   (64b) */
396	cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4);
397	cvmx_read_csr(CVMX_CIU_SOFT_PRST);
398
399	udelay(2000);		/* Wait 2 ms after deasserting PCI reset */
400
401	ctl_status_2.u32 = 0;
402	ctl_status_2.s.tsr_hwm = 1;	/* Initializes to 0.  Must be set
403					   before any PCI reads. */
404	ctl_status_2.s.bar2pres = 1;	/* Enable BAR2 */
405	ctl_status_2.s.bar2_enb = 1;
406	ctl_status_2.s.bar2_cax = 1;	/* Don't use L2 */
407	ctl_status_2.s.bar2_esx = 1;
408	ctl_status_2.s.pmo_amod = 1;	/* Round robin priority */
409	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
410		/* BAR1 hole */
411		ctl_status_2.s.bb1_hole = OCTEON_PCI_BAR1_HOLE_BITS;
412		ctl_status_2.s.bb1_siz = 1;  /* BAR1 is 2GB */
413		ctl_status_2.s.bb_ca = 1;    /* Don't use L2 with big bars */
414		ctl_status_2.s.bb_es = 1;    /* Big bar in byte swap mode */
415		ctl_status_2.s.bb1 = 1;      /* BAR1 is big */
416		ctl_status_2.s.bb0 = 1;      /* BAR0 is big */
417	}
418
419	octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32);
420	udelay(2000);		/* Wait 2 ms before doing PCI reads */
421
422	ctl_status_2.u32 = octeon_npi_read32(CVMX_NPI_PCI_CTL_STATUS_2);
423	pr_notice("PCI Status: %s %s-bit\n",
424		  ctl_status_2.s.ap_pcix ? "PCI-X" : "PCI",
425		  ctl_status_2.s.ap_64ad ? "64" : "32");
426
427	if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
428		union cvmx_pci_cnt_reg cnt_reg_start;
429		union cvmx_pci_cnt_reg cnt_reg_end;
430		unsigned long cycles, pci_clock;
431
432		cnt_reg_start.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
433		cycles = read_c0_cvmcount();
434		udelay(1000);
435		cnt_reg_end.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
436		cycles = read_c0_cvmcount() - cycles;
437		pci_clock = (cnt_reg_end.s.pcicnt - cnt_reg_start.s.pcicnt) /
438			    (cycles / (mips_hpt_frequency / 1000000));
439		pr_notice("PCI Clock: %lu MHz\n", pci_clock);
440	}
441
442	/*
443	 * TDOMC must be set to one in PCI mode. TDOMC should be set to 4
444	 * in PCI-X mode to allow four outstanding splits. Otherwise,
445	 * should not change from its reset value. Don't write PCI_CFG19
446	 * in PCI mode (0x82000001 reset value), write it to 0x82000004
447	 * after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero.
448	 * MRBCM -> must be one.
449	 */
450	if (ctl_status_2.s.ap_pcix) {
451		cfg19.u32 = 0;
452		/*
453		 * Target Delayed/Split request outstanding maximum
454		 * count. [1..31] and 0=32.  NOTE: If the user
455		 * programs these bits beyond the Designed Maximum
456		 * outstanding count, then the designed maximum table
457		 * depth will be used instead.  No additional
458		 * Deferred/Split transactions will be accepted if
459		 * this outstanding maximum count is
460		 * reached. Furthermore, no additional deferred/split
461		 * transactions will be accepted if the I/O delay/ I/O
462		 * Split Request outstanding maximum is reached.
463		 */
464		cfg19.s.tdomc = 4;
465		/*
466		 * Master Deferred Read Request Outstanding Max Count
467		 * (PCI only).  CR4C[26:24] Max SAC cycles MAX DAC
468		 * cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101
469		 * 5 2 110 6 3 111 7 3 For example, if these bits are
470		 * programmed to 100, the core can support 2 DAC
471		 * cycles, 4 SAC cycles or a combination of 1 DAC and
472		 * 2 SAC cycles. NOTE: For the PCI-X maximum
473		 * outstanding split transactions, refer to
474		 * CRE0[22:20].
475		 */
476		cfg19.s.mdrrmc = 2;
477		/*
478		 * Master Request (Memory Read) Byte Count/Byte Enable
479		 * select. 0 = Byte Enables valid. In PCI mode, a
480		 * burst transaction cannot be performed using Memory
481		 * Read command=4?h6. 1 = DWORD Byte Count valid
482		 * (default). In PCI Mode, the memory read byte
483		 * enables are automatically generated by the
484		 * core. Note: N3 Master Request transaction sizes are
485		 * always determined through the
486		 * am_attr[<35:32>|<7:0>] field.
487		 */
488		cfg19.s.mrbcm = 1;
489		octeon_npi_write32(CVMX_NPI_PCI_CFG19, cfg19.u32);
490	}
491
492
493	cfg01.u32 = 0;
494	cfg01.s.msae = 1;	/* Memory Space Access Enable */
495	cfg01.s.me = 1;		/* Master Enable */
496	cfg01.s.pee = 1;	/* PERR# Enable */
497	cfg01.s.see = 1;	/* System Error Enable */
498	cfg01.s.fbbe = 1;	/* Fast Back to Back Transaction Enable */
499
500	octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
501
502#ifdef USE_OCTEON_INTERNAL_ARBITER
503	/*
504	 * When OCTEON is a PCI host, most systems will use OCTEON's
505	 * internal arbiter, so must enable it before any PCI/PCI-X
506	 * traffic can occur.
507	 */
508	{
509		union cvmx_npi_pci_int_arb_cfg pci_int_arb_cfg;
510
511		pci_int_arb_cfg.u64 = 0;
512		pci_int_arb_cfg.s.en = 1;	/* Internal arbiter enable */
513		cvmx_write_csr(CVMX_NPI_PCI_INT_ARB_CFG, pci_int_arb_cfg.u64);
514	}
515#endif	/* USE_OCTEON_INTERNAL_ARBITER */
516
517	/*
518	 * Preferably written to 1 to set MLTD. [RDSATI,TRTAE,
519	 * TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to
520	 * 1..7.
521	 */
522	cfg16.u32 = 0;
523	cfg16.s.mltd = 1;	/* Master Latency Timer Disable */
524	octeon_npi_write32(CVMX_NPI_PCI_CFG16, cfg16.u32);
525
526	/*
527	 * Should be written to 0x4ff00. MTTV -> must be zero.
528	 * FLUSH -> must be 1. MRV -> should be 0xFF.
529	 */
530	cfg22.u32 = 0;
531	/* Master Retry Value [1..255] and 0=infinite */
532	cfg22.s.mrv = 0xff;
533	/*
534	 * AM_DO_FLUSH_I control NOTE: This bit MUST BE ONE for proper
535	 * N3K operation.
536	 */
537	cfg22.s.flush = 1;
538	octeon_npi_write32(CVMX_NPI_PCI_CFG22, cfg22.u32);
539
540	/*
541	 * MOST Indicates the maximum number of outstanding splits (in -1
542	 * notation) when OCTEON is in PCI-X mode.  PCI-X performance is
543	 * affected by the MOST selection.  Should generally be written
544	 * with one of 0x3be807, 0x2be807, 0x1be807, or 0x0be807,
545	 * depending on the desired MOST of 3, 2, 1, or 0, respectively.
546	 */
547	cfg56.u32 = 0;
548	cfg56.s.pxcid = 7;	/* RO - PCI-X Capability ID */
549	cfg56.s.ncp = 0xe8;	/* RO - Next Capability Pointer */
550	cfg56.s.dpere = 1;	/* Data Parity Error Recovery Enable */
551	cfg56.s.roe = 1;	/* Relaxed Ordering Enable */
552	cfg56.s.mmbc = 1;	/* Maximum Memory Byte Count
553				   [0=512B,1=1024B,2=2048B,3=4096B] */
554	cfg56.s.most = 3;	/* Maximum outstanding Split transactions [0=1
555				   .. 7=32] */
556
557	octeon_npi_write32(CVMX_NPI_PCI_CFG56, cfg56.u32);
558
559	/*
560	 * Affects PCI performance when OCTEON services reads to its
561	 * BAR1/BAR2. Refer to Section 10.6.1.  The recommended values are
562	 * 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and
563	 * PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700,
564	 * these values need to be changed so they won't possibly prefetch off
565	 * of the end of memory if PCI is DMAing a buffer at the end of
566	 * memory. Note that these values differ from their reset values.
567	 */
568	octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_6, 0x21);
569	octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_C, 0x31);
570	octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_E, 0x31);
571}
572
573
574/*
575 * Initialize the Octeon PCI controller
576 */
577static int __init octeon_pci_setup(void)
578{
579	union cvmx_npi_mem_access_subidx mem_access;
580	int index;
581
582	/* Only these chips have PCI */
583	if (octeon_has_feature(OCTEON_FEATURE_PCIE))
584		return 0;
585
 
 
 
 
 
586	/* Point pcibios_map_irq() to the PCI version of it */
587	octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
588
589	/* Only use the big bars on chips that support it */
590	if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
591	    OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
592	    OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
593		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_SMALL;
594	else
595		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
596
597	/* PCI I/O and PCI MEM values */
598	set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
599	ioport_resource.start = 0;
600	ioport_resource.end = OCTEON_PCI_IOSPACE_SIZE - 1;
601	if (!octeon_is_pci_host()) {
602		pr_notice("Not in host mode, PCI Controller not initialized\n");
603		return 0;
604	}
605
606	pr_notice("%s Octeon big bar support\n",
607		  (octeon_dma_bar_type ==
608		  OCTEON_DMA_BAR_TYPE_BIG) ? "Enabling" : "Disabling");
609
610	octeon_pci_initialize();
611
612	mem_access.u64 = 0;
613	mem_access.s.esr = 1;	/* Endian-Swap on read. */
614	mem_access.s.esw = 1;	/* Endian-Swap on write. */
615	mem_access.s.nsr = 0;	/* No-Snoop on read. */
616	mem_access.s.nsw = 0;	/* No-Snoop on write. */
617	mem_access.s.ror = 0;	/* Relax Read on read. */
618	mem_access.s.row = 0;	/* Relax Order on write. */
619	mem_access.s.ba = 0;	/* PCI Address bits [63:36]. */
620	cvmx_write_csr(CVMX_NPI_MEM_ACCESS_SUBID3, mem_access.u64);
621
622	/*
623	 * Remap the Octeon BAR 2 above all 32 bit devices
624	 * (0x8000000000ul).  This is done here so it is remapped
625	 * before the readl()'s below. We don't want BAR2 overlapping
626	 * with BAR0/BAR1 during these reads.
627	 */
628	octeon_npi_write32(CVMX_NPI_PCI_CFG08,
629			   (u32)(OCTEON_BAR2_PCI_ADDRESS & 0xffffffffull));
630	octeon_npi_write32(CVMX_NPI_PCI_CFG09,
631			   (u32)(OCTEON_BAR2_PCI_ADDRESS >> 32));
632
633	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
634		/* Remap the Octeon BAR 0 to 0-2GB */
635		octeon_npi_write32(CVMX_NPI_PCI_CFG04, 0);
636		octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
637
638		/*
639		 * Remap the Octeon BAR 1 to map 2GB-4GB (minus the
640		 * BAR 1 hole).
641		 */
642		octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30);
643		octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
644
645		/* BAR1 movable mappings set for identity mapping */
646		octeon_bar1_pci_phys = 0x80000000ull;
647		for (index = 0; index < 32; index++) {
648			union cvmx_pci_bar1_indexx bar1_index;
649
650			bar1_index.u32 = 0;
651			/* Address bits[35:22] sent to L2C */
652			bar1_index.s.addr_idx =
653				(octeon_bar1_pci_phys >> 22) + index;
654			/* Don't put PCI accesses in L2. */
655			bar1_index.s.ca = 1;
656			/* Endian Swap Mode */
657			bar1_index.s.end_swp = 1;
658			/* Set '1' when the selected address range is valid. */
659			bar1_index.s.addr_v = 1;
660			octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
661					   bar1_index.u32);
662		}
663
664		/* Devices go after BAR1 */
665		octeon_pci_mem_resource.start =
666			OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) -
667			(OCTEON_PCI_BAR1_HOLE_SIZE << 20);
668		octeon_pci_mem_resource.end =
669			octeon_pci_mem_resource.start + (1ul << 30);
670	} else {
671		/* Remap the Octeon BAR 0 to map 128MB-(128MB+4KB) */
672		octeon_npi_write32(CVMX_NPI_PCI_CFG04, 128ul << 20);
673		octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
674
675		/* Remap the Octeon BAR 1 to map 0-128MB */
676		octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0);
677		octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
678
679		/* BAR1 movable regions contiguous to cover the swiotlb */
680		octeon_bar1_pci_phys =
681			virt_to_phys(octeon_swiotlb) & ~((1ull << 22) - 1);
682
683		for (index = 0; index < 32; index++) {
684			union cvmx_pci_bar1_indexx bar1_index;
685
686			bar1_index.u32 = 0;
687			/* Address bits[35:22] sent to L2C */
688			bar1_index.s.addr_idx =
689				(octeon_bar1_pci_phys >> 22) + index;
690			/* Don't put PCI accesses in L2. */
691			bar1_index.s.ca = 1;
692			/* Endian Swap Mode */
693			bar1_index.s.end_swp = 1;
694			/* Set '1' when the selected address range is valid. */
695			bar1_index.s.addr_v = 1;
696			octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
697					   bar1_index.u32);
698		}
699
700		/* Devices go after BAR0 */
701		octeon_pci_mem_resource.start =
702			OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) +
703			(4ul << 10);
704		octeon_pci_mem_resource.end =
705			octeon_pci_mem_resource.start + (1ul << 30);
706	}
707
708	register_pci_controller(&octeon_pci_controller);
709
710	/*
711	 * Clear any errors that might be pending from before the bus
712	 * was setup properly.
713	 */
714	cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1);
 
 
 
 
715
716	octeon_pci_dma_init();
717
718	return 0;
719}
720
721arch_initcall(octeon_pci_setup);
v6.13.7
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2005-2009 Cavium Networks
  7 */
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/pci.h>
 11#include <linux/interrupt.h>
 12#include <linux/time.h>
 13#include <linux/delay.h>
 14#include <linux/platform_device.h>
 15#include <linux/swiotlb.h>
 16
 17#include <asm/time.h>
 18
 19#include <asm/octeon/octeon.h>
 20#include <asm/octeon/cvmx-npi-defs.h>
 21#include <asm/octeon/cvmx-pci-defs.h>
 22#include <asm/octeon/pci-octeon.h>
 23
 
 
 24#define USE_OCTEON_INTERNAL_ARBITER
 25
 26/*
 27 * Octeon's PCI controller uses did=3, subdid=2 for PCI IO
 28 * addresses. Use PCI endian swapping 1 so no address swapping is
 29 * necessary. The Linux io routines will endian swap the data.
 30 */
 31#define OCTEON_PCI_IOSPACE_BASE	    0x80011a0400000000ull
 32#define OCTEON_PCI_IOSPACE_SIZE	    (1ull<<32)
 33
 34/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
 35#define OCTEON_PCI_MEMSPACE_OFFSET  (0x00011b0000000000ull)
 36
 37u64 octeon_bar1_pci_phys;
 38
 39/**
 40 * This is the bit decoding used for the Octeon PCI controller addresses
 41 */
 42union octeon_pci_address {
 43	uint64_t u64;
 44	struct {
 45		uint64_t upper:2;
 46		uint64_t reserved:13;
 47		uint64_t io:1;
 48		uint64_t did:5;
 49		uint64_t subdid:3;
 50		uint64_t reserved2:4;
 51		uint64_t endian_swap:2;
 52		uint64_t reserved3:10;
 53		uint64_t bus:8;
 54		uint64_t dev:5;
 55		uint64_t func:3;
 56		uint64_t reg:8;
 57	} s;
 58};
 59
 60int (*octeon_pcibios_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
 
 61enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
 62
 63/**
 64 * Map a PCI device to the appropriate interrupt line
 65 *
 66 * @dev:    The Linux PCI device structure for the device to map
 67 * @slot:   The slot number for this device on __BUS 0__. Linux
 68 *		 enumerates through all the bridges and figures out the
 69 *		 slot on Bus 0 where this device eventually hooks to.
 70 * @pin:    The PCI interrupt pin read from the device, then swizzled
 71 *		 as it goes through each bridge.
 72 * Returns Interrupt number for the device
 73 */
 74int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 75{
 76	if (octeon_pcibios_map_irq)
 77		return octeon_pcibios_map_irq(dev, slot, pin);
 78	else
 79		panic("octeon_pcibios_map_irq not set.");
 80}
 81
 82
 83/*
 84 * Called to perform platform specific PCI setup
 85 */
 86int pcibios_plat_dev_init(struct pci_dev *dev)
 87{
 88	uint16_t config;
 89	uint32_t dconfig;
 90	int pos;
 91	/*
 92	 * Force the Cache line setting to 64 bytes. The standard
 93	 * Linux bus scan doesn't seem to set it. Octeon really has
 94	 * 128 byte lines, but Intel bridges get really upset if you
 95	 * try and set values above 64 bytes. Value is specified in
 96	 * 32bit words.
 97	 */
 98	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4);
 99	/* Set latency timers for all devices */
100	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
101
102	/* Enable reporting System errors and parity errors on all devices */
103	/* Enable parity checking and error reporting */
104	pci_read_config_word(dev, PCI_COMMAND, &config);
105	config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
106	pci_write_config_word(dev, PCI_COMMAND, config);
107
108	if (dev->subordinate) {
109		/* Set latency timers on sub bridges */
110		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 64);
111		/* More bridge error detection */
112		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config);
113		config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
114		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config);
115	}
116
117	/* Enable the PCIe normal error reporting */
118	config = PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */
119	config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */
120	config |= PCI_EXP_DEVCTL_FERE;	/* Fatal Error Reporting */
121	config |= PCI_EXP_DEVCTL_URRE;	/* Unsupported Request */
122	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, config);
 
 
 
 
 
 
 
 
 
123
124	/* Find the Advanced Error Reporting capability */
125	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
126	if (pos) {
127		/* Clear Uncorrectable Error Status */
128		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
129				      &dconfig);
130		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
131				       dconfig);
132		/* Enable reporting of all uncorrectable errors */
133		/* Uncorrectable Error Mask - turned on bits disable errors */
134		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0);
135		/*
136		 * Leave severity at HW default. This only controls if
137		 * errors are reported as uncorrectable or
138		 * correctable, not if the error is reported.
139		 */
140		/* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
141		/* Clear Correctable Error Status */
142		pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig);
143		pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig);
144		/* Enable reporting of all correctable errors */
145		/* Correctable Error Mask - turned on bits disable errors */
146		pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0);
147		/* Advanced Error Capabilities */
148		pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig);
149		/* ECRC Generation Enable */
150		if (config & PCI_ERR_CAP_ECRC_GENC)
151			config |= PCI_ERR_CAP_ECRC_GENE;
152		/* ECRC Check Enable */
153		if (config & PCI_ERR_CAP_ECRC_CHKC)
154			config |= PCI_ERR_CAP_ECRC_CHKE;
155		pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig);
156		/* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
157		/* Report all errors to the root complex */
158		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND,
159				       PCI_ERR_ROOT_CMD_COR_EN |
160				       PCI_ERR_ROOT_CMD_NONFATAL_EN |
161				       PCI_ERR_ROOT_CMD_FATAL_EN);
162		/* Clear the Root status register */
163		pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig);
164		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
165	}
166
 
 
167	return 0;
168}
169
170/**
171 * Return the mapping of PCI device number to IRQ line. Each
172 * character in the return string represents the interrupt
173 * line for the device at that position. Device 1 maps to the
174 * first character, etc. The characters A-D are used for PCI
175 * interrupts.
176 *
177 * Returns PCI interrupt mapping
178 */
179const char *octeon_get_pci_interrupts(void)
180{
181	/*
182	 * Returning an empty string causes the interrupts to be
183	 * routed based on the PCI specification. From the PCI spec:
184	 *
185	 * INTA# of Device Number 0 is connected to IRQW on the system
186	 * board.  (Device Number has no significance regarding being
187	 * located on the system board or in a connector.) INTA# of
188	 * Device Number 1 is connected to IRQX on the system
189	 * board. INTA# of Device Number 2 is connected to IRQY on the
190	 * system board. INTA# of Device Number 3 is connected to IRQZ
191	 * on the system board. The table below describes how each
192	 * agent's INTx# lines are connected to the system board
193	 * interrupt lines. The following equation can be used to
194	 * determine to which INTx# signal on the system board a given
195	 * device's INTx# line(s) is connected.
196	 *
197	 * MB = (D + I) MOD 4 MB = System board Interrupt (IRQW = 0,
198	 * IRQX = 1, IRQY = 2, and IRQZ = 3) D = Device Number I =
199	 * Interrupt Number (INTA# = 0, INTB# = 1, INTC# = 2, and
200	 * INTD# = 3)
201	 */
202	if (of_machine_is_compatible("dlink,dsr-500n"))
203		return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
204	switch (octeon_bootinfo->board_type) {
205	case CVMX_BOARD_TYPE_NAO38:
206		/* This is really the NAC38 */
207		return "AAAAADABAAAAAAAAAAAAAAAAAAAAAAAA";
208	case CVMX_BOARD_TYPE_EBH3100:
209	case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
210	case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
211		return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
212	case CVMX_BOARD_TYPE_BBGW_REF:
213		return "AABCD";
214	case CVMX_BOARD_TYPE_CUST_DSR1000N:
215		return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
216	case CVMX_BOARD_TYPE_THUNDER:
217	case CVMX_BOARD_TYPE_EBH3000:
218	default:
219		return "";
220	}
221}
222
223/**
224 * Map a PCI device to the appropriate interrupt line
225 *
226 * @dev:    The Linux PCI device structure for the device to map
227 * @slot:   The slot number for this device on __BUS 0__. Linux
228 *		 enumerates through all the bridges and figures out the
229 *		 slot on Bus 0 where this device eventually hooks to.
230 * @pin:    The PCI interrupt pin read from the device, then swizzled
231 *		 as it goes through each bridge.
232 * Returns Interrupt number for the device
233 */
234int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev,
235				      u8 slot, u8 pin)
236{
237	int irq_num;
238	const char *interrupts;
239	int dev_num;
240
241	/* Get the board specific interrupt mapping */
242	interrupts = octeon_get_pci_interrupts();
243
244	dev_num = dev->devfn >> 3;
245	if (dev_num < strlen(interrupts))
246		irq_num = ((interrupts[dev_num] - 'A' + pin - 1) & 3) +
247			OCTEON_IRQ_PCI_INT0;
248	else
249		irq_num = ((slot + pin - 3) & 3) + OCTEON_IRQ_PCI_INT0;
250	return irq_num;
251}
252
253
254/*
255 * Read a value from configuration space
256 */
257static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
258			      int reg, int size, u32 *val)
259{
260	union octeon_pci_address pci_addr;
261
262	pci_addr.u64 = 0;
263	pci_addr.s.upper = 2;
264	pci_addr.s.io = 1;
265	pci_addr.s.did = 3;
266	pci_addr.s.subdid = 1;
267	pci_addr.s.endian_swap = 1;
268	pci_addr.s.bus = bus->number;
269	pci_addr.s.dev = devfn >> 3;
270	pci_addr.s.func = devfn & 0x7;
271	pci_addr.s.reg = reg;
272
 
 
 
273	switch (size) {
274	case 4:
275		*val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
276		return PCIBIOS_SUCCESSFUL;
277	case 2:
278		*val = le16_to_cpu(cvmx_read64_uint16(pci_addr.u64));
279		return PCIBIOS_SUCCESSFUL;
280	case 1:
281		*val = cvmx_read64_uint8(pci_addr.u64);
282		return PCIBIOS_SUCCESSFUL;
283	}
284	return PCIBIOS_FUNC_NOT_SUPPORTED;
285}
286
287
288/*
289 * Write a value to PCI configuration space
290 */
291static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
292			       int reg, int size, u32 val)
293{
294	union octeon_pci_address pci_addr;
295
296	pci_addr.u64 = 0;
297	pci_addr.s.upper = 2;
298	pci_addr.s.io = 1;
299	pci_addr.s.did = 3;
300	pci_addr.s.subdid = 1;
301	pci_addr.s.endian_swap = 1;
302	pci_addr.s.bus = bus->number;
303	pci_addr.s.dev = devfn >> 3;
304	pci_addr.s.func = devfn & 0x7;
305	pci_addr.s.reg = reg;
306
 
 
 
307	switch (size) {
308	case 4:
309		cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
310		return PCIBIOS_SUCCESSFUL;
311	case 2:
312		cvmx_write64_uint16(pci_addr.u64, cpu_to_le16(val));
313		return PCIBIOS_SUCCESSFUL;
314	case 1:
315		cvmx_write64_uint8(pci_addr.u64, val);
316		return PCIBIOS_SUCCESSFUL;
317	}
318	return PCIBIOS_FUNC_NOT_SUPPORTED;
319}
320
321
322static struct pci_ops octeon_pci_ops = {
323	.read	= octeon_read_config,
324	.write	= octeon_write_config,
325};
326
327static struct resource octeon_pci_mem_resource = {
328	.start = 0,
329	.end = 0,
330	.name = "Octeon PCI MEM",
331	.flags = IORESOURCE_MEM,
332};
333
334/*
335 * PCI ports must be above 16KB so the ISA bus filtering in the PCI-X to PCI
336 * bridge
337 */
338static struct resource octeon_pci_io_resource = {
339	.start = 0x4000,
340	.end = OCTEON_PCI_IOSPACE_SIZE - 1,
341	.name = "Octeon PCI IO",
342	.flags = IORESOURCE_IO,
343};
344
345static struct pci_controller octeon_pci_controller = {
346	.pci_ops = &octeon_pci_ops,
347	.mem_resource = &octeon_pci_mem_resource,
348	.mem_offset = OCTEON_PCI_MEMSPACE_OFFSET,
349	.io_resource = &octeon_pci_io_resource,
350	.io_offset = 0,
351	.io_map_base = OCTEON_PCI_IOSPACE_BASE,
352};
353
354
355/*
356 * Low level initialize the Octeon PCI controller
357 */
358static void octeon_pci_initialize(void)
359{
360	union cvmx_pci_cfg01 cfg01;
361	union cvmx_npi_ctl_status ctl_status;
362	union cvmx_pci_ctl_status_2 ctl_status_2;
363	union cvmx_pci_cfg19 cfg19;
364	union cvmx_pci_cfg16 cfg16;
365	union cvmx_pci_cfg22 cfg22;
366	union cvmx_pci_cfg56 cfg56;
367
368	/* Reset the PCI Bus */
369	cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1);
370	cvmx_read_csr(CVMX_CIU_SOFT_PRST);
371
372	udelay(2000);		/* Hold PCI reset for 2 ms */
373
374	ctl_status.u64 = 0;	/* cvmx_read_csr(CVMX_NPI_CTL_STATUS); */
375	ctl_status.s.max_word = 1;
376	ctl_status.s.timer = 1;
377	cvmx_write_csr(CVMX_NPI_CTL_STATUS, ctl_status.u64);
378
379	/* Deassert PCI reset and advertise PCX Host Mode Device Capability
380	   (64b) */
381	cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4);
382	cvmx_read_csr(CVMX_CIU_SOFT_PRST);
383
384	udelay(2000);		/* Wait 2 ms after deasserting PCI reset */
385
386	ctl_status_2.u32 = 0;
387	ctl_status_2.s.tsr_hwm = 1;	/* Initializes to 0.  Must be set
388					   before any PCI reads. */
389	ctl_status_2.s.bar2pres = 1;	/* Enable BAR2 */
390	ctl_status_2.s.bar2_enb = 1;
391	ctl_status_2.s.bar2_cax = 1;	/* Don't use L2 */
392	ctl_status_2.s.bar2_esx = 1;
393	ctl_status_2.s.pmo_amod = 1;	/* Round robin priority */
394	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
395		/* BAR1 hole */
396		ctl_status_2.s.bb1_hole = OCTEON_PCI_BAR1_HOLE_BITS;
397		ctl_status_2.s.bb1_siz = 1;  /* BAR1 is 2GB */
398		ctl_status_2.s.bb_ca = 1;    /* Don't use L2 with big bars */
399		ctl_status_2.s.bb_es = 1;    /* Big bar in byte swap mode */
400		ctl_status_2.s.bb1 = 1;	     /* BAR1 is big */
401		ctl_status_2.s.bb0 = 1;	     /* BAR0 is big */
402	}
403
404	octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32);
405	udelay(2000);		/* Wait 2 ms before doing PCI reads */
406
407	ctl_status_2.u32 = octeon_npi_read32(CVMX_NPI_PCI_CTL_STATUS_2);
408	pr_notice("PCI Status: %s %s-bit\n",
409		  ctl_status_2.s.ap_pcix ? "PCI-X" : "PCI",
410		  ctl_status_2.s.ap_64ad ? "64" : "32");
411
412	if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
413		union cvmx_pci_cnt_reg cnt_reg_start;
414		union cvmx_pci_cnt_reg cnt_reg_end;
415		unsigned long cycles, pci_clock;
416
417		cnt_reg_start.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
418		cycles = read_c0_cvmcount();
419		udelay(1000);
420		cnt_reg_end.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
421		cycles = read_c0_cvmcount() - cycles;
422		pci_clock = (cnt_reg_end.s.pcicnt - cnt_reg_start.s.pcicnt) /
423			    (cycles / (mips_hpt_frequency / 1000000));
424		pr_notice("PCI Clock: %lu MHz\n", pci_clock);
425	}
426
427	/*
428	 * TDOMC must be set to one in PCI mode. TDOMC should be set to 4
429	 * in PCI-X mode to allow four outstanding splits. Otherwise,
430	 * should not change from its reset value. Don't write PCI_CFG19
431	 * in PCI mode (0x82000001 reset value), write it to 0x82000004
432	 * after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero.
433	 * MRBCM -> must be one.
434	 */
435	if (ctl_status_2.s.ap_pcix) {
436		cfg19.u32 = 0;
437		/*
438		 * Target Delayed/Split request outstanding maximum
439		 * count. [1..31] and 0=32.  NOTE: If the user
440		 * programs these bits beyond the Designed Maximum
441		 * outstanding count, then the designed maximum table
442		 * depth will be used instead.	No additional
443		 * Deferred/Split transactions will be accepted if
444		 * this outstanding maximum count is
445		 * reached. Furthermore, no additional deferred/split
446		 * transactions will be accepted if the I/O delay/ I/O
447		 * Split Request outstanding maximum is reached.
448		 */
449		cfg19.s.tdomc = 4;
450		/*
451		 * Master Deferred Read Request Outstanding Max Count
452		 * (PCI only).	CR4C[26:24] Max SAC cycles MAX DAC
453		 * cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101
454		 * 5 2 110 6 3 111 7 3 For example, if these bits are
455		 * programmed to 100, the core can support 2 DAC
456		 * cycles, 4 SAC cycles or a combination of 1 DAC and
457		 * 2 SAC cycles. NOTE: For the PCI-X maximum
458		 * outstanding split transactions, refer to
459		 * CRE0[22:20].
460		 */
461		cfg19.s.mdrrmc = 2;
462		/*
463		 * Master Request (Memory Read) Byte Count/Byte Enable
464		 * select. 0 = Byte Enables valid. In PCI mode, a
465		 * burst transaction cannot be performed using Memory
466		 * Read command=4?h6. 1 = DWORD Byte Count valid
467		 * (default). In PCI Mode, the memory read byte
468		 * enables are automatically generated by the
469		 * core. Note: N3 Master Request transaction sizes are
470		 * always determined through the
471		 * am_attr[<35:32>|<7:0>] field.
472		 */
473		cfg19.s.mrbcm = 1;
474		octeon_npi_write32(CVMX_NPI_PCI_CFG19, cfg19.u32);
475	}
476
477
478	cfg01.u32 = 0;
479	cfg01.s.msae = 1;	/* Memory Space Access Enable */
480	cfg01.s.me = 1;		/* Master Enable */
481	cfg01.s.pee = 1;	/* PERR# Enable */
482	cfg01.s.see = 1;	/* System Error Enable */
483	cfg01.s.fbbe = 1;	/* Fast Back to Back Transaction Enable */
484
485	octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
486
487#ifdef USE_OCTEON_INTERNAL_ARBITER
488	/*
489	 * When OCTEON is a PCI host, most systems will use OCTEON's
490	 * internal arbiter, so must enable it before any PCI/PCI-X
491	 * traffic can occur.
492	 */
493	{
494		union cvmx_npi_pci_int_arb_cfg pci_int_arb_cfg;
495
496		pci_int_arb_cfg.u64 = 0;
497		pci_int_arb_cfg.s.en = 1;	/* Internal arbiter enable */
498		cvmx_write_csr(CVMX_NPI_PCI_INT_ARB_CFG, pci_int_arb_cfg.u64);
499	}
500#endif	/* USE_OCTEON_INTERNAL_ARBITER */
501
502	/*
503	 * Preferably written to 1 to set MLTD. [RDSATI,TRTAE,
504	 * TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to
505	 * 1..7.
506	 */
507	cfg16.u32 = 0;
508	cfg16.s.mltd = 1;	/* Master Latency Timer Disable */
509	octeon_npi_write32(CVMX_NPI_PCI_CFG16, cfg16.u32);
510
511	/*
512	 * Should be written to 0x4ff00. MTTV -> must be zero.
513	 * FLUSH -> must be 1. MRV -> should be 0xFF.
514	 */
515	cfg22.u32 = 0;
516	/* Master Retry Value [1..255] and 0=infinite */
517	cfg22.s.mrv = 0xff;
518	/*
519	 * AM_DO_FLUSH_I control NOTE: This bit MUST BE ONE for proper
520	 * N3K operation.
521	 */
522	cfg22.s.flush = 1;
523	octeon_npi_write32(CVMX_NPI_PCI_CFG22, cfg22.u32);
524
525	/*
526	 * MOST Indicates the maximum number of outstanding splits (in -1
527	 * notation) when OCTEON is in PCI-X mode.  PCI-X performance is
528	 * affected by the MOST selection.  Should generally be written
529	 * with one of 0x3be807, 0x2be807, 0x1be807, or 0x0be807,
530	 * depending on the desired MOST of 3, 2, 1, or 0, respectively.
531	 */
532	cfg56.u32 = 0;
533	cfg56.s.pxcid = 7;	/* RO - PCI-X Capability ID */
534	cfg56.s.ncp = 0xe8;	/* RO - Next Capability Pointer */
535	cfg56.s.dpere = 1;	/* Data Parity Error Recovery Enable */
536	cfg56.s.roe = 1;	/* Relaxed Ordering Enable */
537	cfg56.s.mmbc = 1;	/* Maximum Memory Byte Count
538				   [0=512B,1=1024B,2=2048B,3=4096B] */
539	cfg56.s.most = 3;	/* Maximum outstanding Split transactions [0=1
540				   .. 7=32] */
541
542	octeon_npi_write32(CVMX_NPI_PCI_CFG56, cfg56.u32);
543
544	/*
545	 * Affects PCI performance when OCTEON services reads to its
546	 * BAR1/BAR2. Refer to Section 10.6.1.	The recommended values are
547	 * 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and
548	 * PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700,
549	 * these values need to be changed so they won't possibly prefetch off
550	 * of the end of memory if PCI is DMAing a buffer at the end of
551	 * memory. Note that these values differ from their reset values.
552	 */
553	octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_6, 0x21);
554	octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_C, 0x31);
555	octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_E, 0x31);
556}
557
558
559/*
560 * Initialize the Octeon PCI controller
561 */
562static int __init octeon_pci_setup(void)
563{
564	union cvmx_npi_mem_access_subidx mem_access;
565	int index;
566
567	/* Only these chips have PCI */
568	if (octeon_has_feature(OCTEON_FEATURE_PCIE))
569		return 0;
570
571	if (!octeon_is_pci_host()) {
572		pr_notice("Not in host mode, PCI Controller not initialized\n");
573		return 0;
574	}
575
576	/* Point pcibios_map_irq() to the PCI version of it */
577	octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
578
579	/* Only use the big bars on chips that support it */
580	if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
581	    OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
582	    OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
583		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_SMALL;
584	else
585		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
586
587	/* PCI I/O and PCI MEM values */
588	set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
589	ioport_resource.start = 0;
590	ioport_resource.end = OCTEON_PCI_IOSPACE_SIZE - 1;
 
 
 
 
591
592	pr_notice("%s Octeon big bar support\n",
593		  (octeon_dma_bar_type ==
594		  OCTEON_DMA_BAR_TYPE_BIG) ? "Enabling" : "Disabling");
595
596	octeon_pci_initialize();
597
598	mem_access.u64 = 0;
599	mem_access.s.esr = 1;	/* Endian-Swap on read. */
600	mem_access.s.esw = 1;	/* Endian-Swap on write. */
601	mem_access.s.nsr = 0;	/* No-Snoop on read. */
602	mem_access.s.nsw = 0;	/* No-Snoop on write. */
603	mem_access.s.ror = 0;	/* Relax Read on read. */
604	mem_access.s.row = 0;	/* Relax Order on write. */
605	mem_access.s.ba = 0;	/* PCI Address bits [63:36]. */
606	cvmx_write_csr(CVMX_NPI_MEM_ACCESS_SUBID3, mem_access.u64);
607
608	/*
609	 * Remap the Octeon BAR 2 above all 32 bit devices
610	 * (0x8000000000ul).  This is done here so it is remapped
611	 * before the readl()'s below. We don't want BAR2 overlapping
612	 * with BAR0/BAR1 during these reads.
613	 */
614	octeon_npi_write32(CVMX_NPI_PCI_CFG08,
615			   (u32)(OCTEON_BAR2_PCI_ADDRESS & 0xffffffffull));
616	octeon_npi_write32(CVMX_NPI_PCI_CFG09,
617			   (u32)(OCTEON_BAR2_PCI_ADDRESS >> 32));
618
619	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
620		/* Remap the Octeon BAR 0 to 0-2GB */
621		octeon_npi_write32(CVMX_NPI_PCI_CFG04, 0);
622		octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
623
624		/*
625		 * Remap the Octeon BAR 1 to map 2GB-4GB (minus the
626		 * BAR 1 hole).
627		 */
628		octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30);
629		octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
630
631		/* BAR1 movable mappings set for identity mapping */
632		octeon_bar1_pci_phys = 0x80000000ull;
633		for (index = 0; index < 32; index++) {
634			union cvmx_pci_bar1_indexx bar1_index;
635
636			bar1_index.u32 = 0;
637			/* Address bits[35:22] sent to L2C */
638			bar1_index.s.addr_idx =
639				(octeon_bar1_pci_phys >> 22) + index;
640			/* Don't put PCI accesses in L2. */
641			bar1_index.s.ca = 1;
642			/* Endian Swap Mode */
643			bar1_index.s.end_swp = 1;
644			/* Set '1' when the selected address range is valid. */
645			bar1_index.s.addr_v = 1;
646			octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
647					   bar1_index.u32);
648		}
649
650		/* Devices go after BAR1 */
651		octeon_pci_mem_resource.start =
652			OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) -
653			(OCTEON_PCI_BAR1_HOLE_SIZE << 20);
654		octeon_pci_mem_resource.end =
655			octeon_pci_mem_resource.start + (1ul << 30);
656	} else {
657		/* Remap the Octeon BAR 0 to map 128MB-(128MB+4KB) */
658		octeon_npi_write32(CVMX_NPI_PCI_CFG04, 128ul << 20);
659		octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
660
661		/* Remap the Octeon BAR 1 to map 0-128MB */
662		octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0);
663		octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
664
665		/* BAR1 movable regions contiguous to cover the swiotlb */
666		octeon_bar1_pci_phys =
667			default_swiotlb_base() & ~((1ull << 22) - 1);
668
669		for (index = 0; index < 32; index++) {
670			union cvmx_pci_bar1_indexx bar1_index;
671
672			bar1_index.u32 = 0;
673			/* Address bits[35:22] sent to L2C */
674			bar1_index.s.addr_idx =
675				(octeon_bar1_pci_phys >> 22) + index;
676			/* Don't put PCI accesses in L2. */
677			bar1_index.s.ca = 1;
678			/* Endian Swap Mode */
679			bar1_index.s.end_swp = 1;
680			/* Set '1' when the selected address range is valid. */
681			bar1_index.s.addr_v = 1;
682			octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
683					   bar1_index.u32);
684		}
685
686		/* Devices go after BAR0 */
687		octeon_pci_mem_resource.start =
688			OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) +
689			(4ul << 10);
690		octeon_pci_mem_resource.end =
691			octeon_pci_mem_resource.start + (1ul << 30);
692	}
693
694	register_pci_controller(&octeon_pci_controller);
695
696	/*
697	 * Clear any errors that might be pending from before the bus
698	 * was setup properly.
699	 */
700	cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1);
701
702	if (IS_ERR(platform_device_register_simple("octeon_pci_edac",
703						   -1, NULL, 0)))
704		pr_err("Registration of co_pci_edac failed!\n");
705
706	octeon_pci_dma_init();
707
708	return 0;
709}
710
711arch_initcall(octeon_pci_setup);