Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * DesignWare application register space functions for Keystone PCI controller
  4 *
  5 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
  6 *		http://www.ti.com
  7 *
  8 * Author: Murali Karicheri <m-karicheri2@ti.com>
  9 */
 10
 11#include <linux/irq.h>
 12#include <linux/irqdomain.h>
 13#include <linux/irqreturn.h>
 14#include <linux/module.h>
 15#include <linux/of.h>
 16#include <linux/of_pci.h>
 17#include <linux/pci.h>
 18#include <linux/platform_device.h>
 19
 20#include "pcie-designware.h"
 21#include "pci-keystone.h"
 22
 23/* Application register defines */
 24#define LTSSM_EN_VAL		        1
 25#define LTSSM_STATE_MASK		0x1f
 26#define LTSSM_STATE_L0			0x11
 27#define DBI_CS2_EN_VAL			0x20
 28#define OB_XLAT_EN_VAL		        2
 29
 30/* Application registers */
 31#define CMD_STATUS			0x004
 32#define CFG_SETUP			0x008
 33#define OB_SIZE				0x030
 34#define CFG_PCIM_WIN_SZ_IDX		3
 35#define CFG_PCIM_WIN_CNT		32
 36#define SPACE0_REMOTE_CFG_OFFSET	0x1000
 37#define OB_OFFSET_INDEX(n)		(0x200 + (8 * n))
 38#define OB_OFFSET_HI(n)			(0x204 + (8 * n))
 39
 40/* IRQ register defines */
 41#define IRQ_EOI				0x050
 42#define IRQ_STATUS			0x184
 43#define IRQ_ENABLE_SET			0x188
 44#define IRQ_ENABLE_CLR			0x18c
 45
 46#define MSI_IRQ				0x054
 47#define MSI0_IRQ_STATUS			0x104
 48#define MSI0_IRQ_ENABLE_SET		0x108
 49#define MSI0_IRQ_ENABLE_CLR		0x10c
 50#define IRQ_STATUS			0x184
 51#define MSI_IRQ_OFFSET			4
 52
 53/* Error IRQ bits */
 54#define ERR_AER		BIT(5)	/* ECRC error */
 55#define ERR_AXI		BIT(4)	/* AXI tag lookup fatal error */
 56#define ERR_CORR	BIT(3)	/* Correctable error */
 57#define ERR_NONFATAL	BIT(2)	/* Non-fatal error */
 58#define ERR_FATAL	BIT(1)	/* Fatal error */
 59#define ERR_SYS		BIT(0)	/* System (fatal, non-fatal, or correctable) */
 60#define ERR_IRQ_ALL	(ERR_AER | ERR_AXI | ERR_CORR | \
 61			 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
 62#define ERR_FATAL_IRQ	(ERR_FATAL | ERR_AXI)
 63#define ERR_IRQ_STATUS_RAW		0x1c0
 64#define ERR_IRQ_STATUS			0x1c4
 65#define ERR_IRQ_ENABLE_SET		0x1c8
 66#define ERR_IRQ_ENABLE_CLR		0x1cc
 67
 68/* Config space registers */
 69#define DEBUG0				0x728
 70
 71#define to_keystone_pcie(x)	dev_get_drvdata((x)->dev)
 72
 73static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
 74					     u32 *bit_pos)
 75{
 76	*reg_offset = offset % 8;
 77	*bit_pos = offset >> 3;
 78}
 79
 80phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
 81{
 82	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 83	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 84
 85	return ks_pcie->app.start + MSI_IRQ;
 86}
 87
 88static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
 89{
 90	return readl(ks_pcie->va_app_base + offset);
 91}
 92
 93static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val)
 94{
 95	writel(val, ks_pcie->va_app_base + offset);
 96}
 97
 98void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
 99{
100	struct dw_pcie *pci = ks_pcie->pci;
101	struct pcie_port *pp = &pci->pp;
102	struct device *dev = pci->dev;
103	u32 pending, vector;
104	int src, virq;
105
106	pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
107
108	/*
109	 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
110	 * shows 1, 9, 17, 25 and so forth
111	 */
112	for (src = 0; src < 4; src++) {
113		if (BIT(src) & pending) {
114			vector = offset + (src << 3);
115			virq = irq_linear_revmap(pp->irq_domain, vector);
116			dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
117				src, vector, virq);
118			generic_handle_irq(virq);
119		}
120	}
121}
122
123void ks_dw_pcie_msi_irq_ack(int irq, struct pcie_port *pp)
124{
125	u32 reg_offset, bit_pos;
126	struct keystone_pcie *ks_pcie;
127	struct dw_pcie *pci;
128
129	pci = to_dw_pcie_from_pp(pp);
130	ks_pcie = to_keystone_pcie(pci);
131	update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
132
133	ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
134			 BIT(bit_pos));
135	ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
136}
137
138void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
139{
140	u32 reg_offset, bit_pos;
141	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
142	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
143
144	update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
145	ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
146			 BIT(bit_pos));
147}
148
149void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
150{
151	u32 reg_offset, bit_pos;
152	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
153	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
154
155	update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
156	ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
157			 BIT(bit_pos));
158}
159
160int ks_dw_pcie_msi_host_init(struct pcie_port *pp)
161{
162	return dw_pcie_allocate_domains(pp);
163}
164
165void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
166{
167	int i;
168
169	for (i = 0; i < PCI_NUM_INTX; i++)
170		ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
171}
172
173void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
174{
175	struct dw_pcie *pci = ks_pcie->pci;
176	struct device *dev = pci->dev;
177	u32 pending;
178	int virq;
179
180	pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
181
182	if (BIT(0) & pending) {
183		virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
184		dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
185		generic_handle_irq(virq);
186	}
187
188	/* EOI the INTx interrupt */
189	ks_dw_app_writel(ks_pcie, IRQ_EOI, offset);
190}
191
192void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
193{
194	ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
195}
196
197irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
198{
199	u32 status;
200
201	status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
202	if (!status)
203		return IRQ_NONE;
204
205	if (status & ERR_FATAL_IRQ)
206		dev_err(ks_pcie->pci->dev, "fatal error (status %#010x)\n",
207			status);
208
209	/* Ack the IRQ; status bits are RW1C */
210	ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status);
211	return IRQ_HANDLED;
212}
213
214static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
215{
216}
217
218static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
219{
220}
221
222static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
223{
224}
225
226static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
227	.name = "Keystone-PCI-Legacy-IRQ",
228	.irq_ack = ks_dw_pcie_ack_legacy_irq,
229	.irq_mask = ks_dw_pcie_mask_legacy_irq,
230	.irq_unmask = ks_dw_pcie_unmask_legacy_irq,
231};
232
233static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
234				unsigned int irq, irq_hw_number_t hw_irq)
235{
236	irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
237				 handle_level_irq);
238	irq_set_chip_data(irq, d->host_data);
239
240	return 0;
241}
242
243static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
244	.map = ks_dw_pcie_init_legacy_irq_map,
245	.xlate = irq_domain_xlate_onetwocell,
246};
247
248/**
249 * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
250 * registers
251 *
252 * Since modification of dbi_cs2 involves different clock domain, read the
253 * status back to ensure the transition is complete.
254 */
255static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
256{
257	u32 val;
258
259	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
260	ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val);
261
262	do {
263		val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
264	} while (!(val & DBI_CS2_EN_VAL));
265}
266
267/**
268 * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
269 *
270 * Since modification of dbi_cs2 involves different clock domain, read the
271 * status back to ensure the transition is complete.
272 */
273static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
274{
275	u32 val;
276
277	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
278	ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val);
279
280	do {
281		val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
282	} while (val & DBI_CS2_EN_VAL);
283}
284
285void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
286{
287	struct dw_pcie *pci = ks_pcie->pci;
288	struct pcie_port *pp = &pci->pp;
289	u32 start = pp->mem->start, end = pp->mem->end;
290	int i, tr_size;
291	u32 val;
292
293	/* Disable BARs for inbound access */
294	ks_dw_pcie_set_dbi_mode(ks_pcie);
295	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
296	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
297	ks_dw_pcie_clear_dbi_mode(ks_pcie);
298
299	/* Set outbound translation size per window division */
300	ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7);
301
302	tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
303
304	/* Using Direct 1:1 mapping of RC <-> PCI memory space */
305	for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
306		ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1);
307		ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0);
308		start += tr_size;
309	}
310
311	/* Enable OB translation */
312	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
313	ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val);
314}
315
316/**
317 * ks_pcie_cfg_setup() - Set up configuration space address for a device
318 *
319 * @ks_pcie: ptr to keystone_pcie structure
320 * @bus: Bus number the device is residing on
321 * @devfn: device, function number info
322 *
323 * Forms and returns the address of configuration space mapped in PCIESS
324 * address space 0.  Also configures CFG_SETUP for remote configuration space
325 * access.
326 *
327 * The address space has two regions to access configuration - local and remote.
328 * We access local region for bus 0 (as RC is attached on bus 0) and remote
329 * region for others with TYPE 1 access when bus > 1.  As for device on bus = 1,
330 * we will do TYPE 0 access as it will be on our secondary bus (logical).
331 * CFG_SETUP is needed only for remote configuration access.
332 */
333static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
334				       unsigned int devfn)
335{
336	u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
337	struct dw_pcie *pci = ks_pcie->pci;
338	struct pcie_port *pp = &pci->pp;
339	u32 regval;
340
341	if (bus == 0)
342		return pci->dbi_base;
343
344	regval = (bus << 16) | (device << 8) | function;
345
346	/*
347	 * Since Bus#1 will be a virtual bus, we need to have TYPE0
348	 * access only.
349	 * TYPE 1
350	 */
351	if (bus != 1)
352		regval |= BIT(24);
353
354	ks_dw_app_writel(ks_pcie, CFG_SETUP, regval);
355	return pp->va_cfg0_base;
356}
357
358int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
359			     unsigned int devfn, int where, int size, u32 *val)
360{
361	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
362	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
363	u8 bus_num = bus->number;
364	void __iomem *addr;
365
366	addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
367
368	return dw_pcie_read(addr + where, size, val);
369}
370
371int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
372			     unsigned int devfn, int where, int size, u32 val)
373{
374	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
375	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
376	u8 bus_num = bus->number;
377	void __iomem *addr;
378
379	addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
380
381	return dw_pcie_write(addr + where, size, val);
382}
383
384/**
385 * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
386 *
387 * This sets BAR0 to enable inbound access for MSI_IRQ register
388 */
389void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
390{
391	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
392	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
393
394	/* Configure and set up BAR0 */
395	ks_dw_pcie_set_dbi_mode(ks_pcie);
396
397	/* Enable BAR0 */
398	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
399	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
400
401	ks_dw_pcie_clear_dbi_mode(ks_pcie);
402
403	 /*
404	  * For BAR0, just setting bus address for inbound writes (MSI) should
405	  * be sufficient.  Use physical address to avoid any conflicts.
406	  */
407	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
408}
409
410/**
411 * ks_dw_pcie_link_up() - Check if link up
412 */
413int ks_dw_pcie_link_up(struct dw_pcie *pci)
414{
415	u32 val;
416
417	val = dw_pcie_readl_dbi(pci, DEBUG0);
418	return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
419}
420
421void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
422{
423	u32 val;
424
425	/* Disable Link training */
426	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
427	val &= ~LTSSM_EN_VAL;
428	ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
429
430	/* Initiate Link Training */
431	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
432	ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
433}
434
435/**
436 * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
437 *
438 * Ioremap the register resources, initialize legacy irq domain
439 * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
440 * PCI host controller.
441 */
442int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
443				struct device_node *msi_intc_np)
444{
445	struct dw_pcie *pci = ks_pcie->pci;
446	struct pcie_port *pp = &pci->pp;
447	struct device *dev = pci->dev;
448	struct platform_device *pdev = to_platform_device(dev);
449	struct resource *res;
450
451	/* Index 0 is the config reg. space address */
452	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
453	pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
454	if (IS_ERR(pci->dbi_base))
455		return PTR_ERR(pci->dbi_base);
456
457	/*
458	 * We set these same and is used in pcie rd/wr_other_conf
459	 * functions
460	 */
461	pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
462	pp->va_cfg1_base = pp->va_cfg0_base;
463
464	/* Index 1 is the application reg. space address */
465	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
466	ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
467	if (IS_ERR(ks_pcie->va_app_base))
468		return PTR_ERR(ks_pcie->va_app_base);
469
470	ks_pcie->app = *res;
471
472	/* Create legacy IRQ domain */
473	ks_pcie->legacy_irq_domain =
474			irq_domain_add_linear(ks_pcie->legacy_intc_np,
475					PCI_NUM_INTX,
476					&ks_dw_pcie_legacy_irq_domain_ops,
477					NULL);
478	if (!ks_pcie->legacy_irq_domain) {
479		dev_err(dev, "Failed to add irq domain for legacy irqs\n");
480		return -EINVAL;
481	}
482
483	return dw_pcie_host_init(pp);
484}