Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * AppliedMicro X-Gene SoC SATA Host Controller Driver
  4 *
  5 * Copyright (c) 2014, Applied Micro Circuits Corporation
  6 * Author: Loc Ho <lho@apm.com>
  7 *         Tuan Phan <tphan@apm.com>
  8 *         Suman Tripathi <stripathi@apm.com>
  9 *
 10 * NOTE: PM support is not currently available.
 11 */
 12#include <linux/acpi.h>
 13#include <linux/module.h>
 14#include <linux/platform_device.h>
 15#include <linux/ahci_platform.h>
 16#include <linux/of.h>
 
 
 17#include <linux/phy/phy.h>
 18#include "ahci.h"
 19
 20#define DRV_NAME "xgene-ahci"
 21
 22/* Max # of disk per a controller */
 23#define MAX_AHCI_CHN_PERCTR		2
 24
 25/* MUX CSR */
 26#define SATA_ENET_CONFIG_REG		0x00000000
 27#define  CFG_SATA_ENET_SELECT_MASK	0x00000001
 28
 29/* SATA core host controller CSR */
 30#define SLVRDERRATTRIBUTES		0x00000000
 31#define SLVWRERRATTRIBUTES		0x00000004
 32#define MSTRDERRATTRIBUTES		0x00000008
 33#define MSTWRERRATTRIBUTES		0x0000000c
 34#define BUSCTLREG			0x00000014
 35#define IOFMSTRWAUX			0x00000018
 36#define INTSTATUSMASK			0x0000002c
 37#define ERRINTSTATUS			0x00000030
 38#define ERRINTSTATUSMASK		0x00000034
 39
 40/* SATA host AHCI CSR */
 41#define PORTCFG				0x000000a4
 42#define  PORTADDR_SET(dst, src) \
 43		(((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f))
 44#define PORTPHY1CFG		0x000000a8
 45#define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \
 46		(((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000))
 47#define PORTPHY2CFG			0x000000ac
 48#define PORTPHY3CFG			0x000000b0
 49#define PORTPHY4CFG			0x000000b4
 50#define PORTPHY5CFG			0x000000b8
 51#define SCTL0				0x0000012C
 52#define PORTPHY5CFG_RTCHG_SET(dst, src) \
 53		(((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000))
 54#define PORTAXICFG_EN_CONTEXT_SET(dst, src) \
 55		(((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000))
 56#define PORTAXICFG			0x000000bc
 57#define PORTAXICFG_OUTTRANS_SET(dst, src) \
 58		(((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
 59#define PORTRANSCFG			0x000000c8
 60#define PORTRANSCFG_RXWM_SET(dst, src)		\
 61		(((dst) & ~0x0000007f) | (((u32)(src)) & 0x0000007f))
 62
 63/* SATA host controller AXI CSR */
 64#define INT_SLV_TMOMASK			0x00000010
 65
 66/* SATA diagnostic CSR */
 67#define CFG_MEM_RAM_SHUTDOWN		0x00000070
 68#define BLOCK_MEM_RDY			0x00000074
 69
 70/* Max retry for link down */
 71#define MAX_LINK_DOWN_RETRY 3
 72
 73enum xgene_ahci_version {
 74	XGENE_AHCI_V1 = 1,
 75	XGENE_AHCI_V2,
 76};
 77
 78struct xgene_ahci_context {
 79	struct ahci_host_priv *hpriv;
 80	struct device *dev;
 81	u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
 82	u32 class[MAX_AHCI_CHN_PERCTR]; /* tracking the class of device */
 83	void __iomem *csr_core;		/* Core CSR address of IP */
 84	void __iomem *csr_diag;		/* Diag CSR address of IP */
 85	void __iomem *csr_axi;		/* AXI CSR address of IP */
 86	void __iomem *csr_mux;		/* MUX CSR address of IP */
 87};
 88
 89static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
 90{
 91	dev_dbg(ctx->dev, "Release memory from shutdown\n");
 92	writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN);
 93	readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */
 94	msleep(1);	/* reset may take up to 1ms */
 95	if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) {
 96		dev_err(ctx->dev, "failed to release memory from shutdown\n");
 97		return -ENODEV;
 98	}
 99	return 0;
100}
101
102/**
103 * xgene_ahci_poll_reg_val- Poll a register on a specific value.
104 * @ap : ATA port of interest.
105 * @reg : Register of interest.
106 * @val : Value to be attained.
107 * @interval : waiting interval for polling.
108 * @timeout : timeout for achieving the value.
109 */
110static int xgene_ahci_poll_reg_val(struct ata_port *ap,
111				   void __iomem *reg, unsigned int val,
112				   unsigned int interval, unsigned int timeout)
 
113{
114	unsigned long deadline;
115	unsigned int tmp;
116
117	tmp = ioread32(reg);
118	deadline = ata_deadline(jiffies, timeout);
119
120	while (tmp != val && time_before(jiffies, deadline)) {
121		ata_msleep(ap, interval);
122		tmp = ioread32(reg);
123	}
124
125	return tmp;
126}
127
128/**
129 * xgene_ahci_restart_engine - Restart the dma engine.
130 * @ap : ATA port of interest
131 *
132 * Waits for completion of multiple commands and restarts
133 * the DMA engine inside the controller.
134 */
135static int xgene_ahci_restart_engine(struct ata_port *ap)
136{
137	struct ahci_host_priv *hpriv = ap->host->private_data;
138	struct ahci_port_priv *pp = ap->private_data;
139	void __iomem *port_mmio = ahci_port_base(ap);
140	u32 fbs;
141
142	/*
143	 * In case of PMP multiple IDENTIFY DEVICE commands can be
144	 * issued inside PxCI. So need to poll PxCI for the
145	 * completion of outstanding IDENTIFY DEVICE commands before
146	 * we restart the DMA engine.
147	 */
148	if (xgene_ahci_poll_reg_val(ap, port_mmio +
149				    PORT_CMD_ISSUE, 0x0, 1, 100))
150		  return -EBUSY;
151
152	hpriv->stop_engine(ap);
153	ahci_start_fis_rx(ap);
154
155	/*
156	 * Enable the PxFBS.FBS_EN bit as it
157	 * gets cleared due to stopping the engine.
158	 */
159	if (pp->fbs_supported) {
160		fbs = readl(port_mmio + PORT_FBS);
161		writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
162		fbs = readl(port_mmio + PORT_FBS);
163	}
164
165	hpriv->start_engine(ap);
166
167	return 0;
168}
169
170/**
171 * xgene_ahci_qc_issue - Issue commands to the device
172 * @qc: Command to issue
173 *
174 * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
175 * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
176 * state machine goes into the CMFatalErrorUpdate state and locks up. By
177 * restarting the dma engine, it removes the controller out of lock up state.
178 *
179 * Due to H/W errata, the controller is unable to save the PMP
180 * field fetched from command header before sending the H2D FIS.
181 * When the device returns the PMP port field in the D2H FIS, there is
182 * a mismatch and results in command completion failure. The
183 * workaround is to write the pmp value to PxFBS.DEV field before issuing
184 * any command to PMP.
185 */
186static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
187{
188	struct ata_port *ap = qc->ap;
189	struct ahci_host_priv *hpriv = ap->host->private_data;
190	struct xgene_ahci_context *ctx = hpriv->plat_data;
191	int rc = 0;
192	u32 port_fbs;
193	void __iomem *port_mmio = ahci_port_base(ap);
194
195	/*
196	 * Write the pmp value to PxFBS.DEV
197	 * for case of Port Mulitplier.
198	 */
199	if (ctx->class[ap->port_no] == ATA_DEV_PMP) {
200		port_fbs = readl(port_mmio + PORT_FBS);
201		port_fbs &= ~PORT_FBS_DEV_MASK;
202		port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
203		writel(port_fbs, port_mmio + PORT_FBS);
204	}
205
206	if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
207	    (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) ||
208	    (ctx->last_cmd[ap->port_no] == ATA_CMD_SMART)))
209		xgene_ahci_restart_engine(ap);
210
211	rc = ahci_qc_issue(qc);
212
213	/* Save the last command issued */
214	ctx->last_cmd[ap->port_no] = qc->tf.command;
215
216	return rc;
217}
218
219static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
220{
221	void __iomem *diagcsr = ctx->csr_diag;
222
223	return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
224	        readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
225}
226
227/**
228 * xgene_ahci_read_id - Read ID data from the specified device
229 * @dev: device
230 * @tf: proposed taskfile
231 * @id: data buffer
232 *
233 * This custom read ID function is required due to the fact that the HW
234 * does not support DEVSLP.
235 */
236static unsigned int xgene_ahci_read_id(struct ata_device *dev,
237				       struct ata_taskfile *tf, __le16 *id)
238{
239	u32 err_mask;
240
241	err_mask = ata_do_dev_read_id(dev, tf, id);
242	if (err_mask)
243		return err_mask;
244
245	/*
246	 * Mask reserved area. Word78 spec of Link Power Management
247	 * bit15-8: reserved
248	 * bit7: NCQ autosence
249	 * bit6: Software settings preservation supported
250	 * bit5: reserved
251	 * bit4: In-order sata delivery supported
252	 * bit3: DIPM requests supported
253	 * bit2: DMA Setup FIS Auto-Activate optimization supported
254	 * bit1: DMA Setup FIX non-Zero buffer offsets supported
255	 * bit0: Reserved
256	 *
257	 * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
258	 */
259	id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
260
261	return 0;
262}
263
264static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
265{
266	void __iomem *mmio = ctx->hpriv->mmio;
267	u32 val;
268
269	dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n",
270		mmio, channel);
271	val = readl(mmio + PORTCFG);
272	val = PORTADDR_SET(val, channel == 0 ? 2 : 3);
273	writel(val, mmio + PORTCFG);
274	readl(mmio + PORTCFG);  /* Force a barrier */
275	/* Disable fix rate */
276	writel(0x0001fffe, mmio + PORTPHY1CFG);
277	readl(mmio + PORTPHY1CFG); /* Force a barrier */
278	writel(0x28183219, mmio + PORTPHY2CFG);
279	readl(mmio + PORTPHY2CFG); /* Force a barrier */
280	writel(0x13081008, mmio + PORTPHY3CFG);
281	readl(mmio + PORTPHY3CFG); /* Force a barrier */
282	writel(0x00480815, mmio + PORTPHY4CFG);
283	readl(mmio + PORTPHY4CFG); /* Force a barrier */
284	/* Set window negotiation */
285	val = readl(mmio + PORTPHY5CFG);
286	val = PORTPHY5CFG_RTCHG_SET(val, 0x300);
287	writel(val, mmio + PORTPHY5CFG);
288	readl(mmio + PORTPHY5CFG); /* Force a barrier */
289	val = readl(mmio + PORTAXICFG);
290	val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */
291	val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
292	writel(val, mmio + PORTAXICFG);
293	readl(mmio + PORTAXICFG); /* Force a barrier */
294	/* Set the watermark threshold of the receive FIFO */
295	val = readl(mmio + PORTRANSCFG);
296	val = PORTRANSCFG_RXWM_SET(val, 0x30);
297	writel(val, mmio + PORTRANSCFG);
298}
299
300/**
301 * xgene_ahci_do_hardreset - Issue the actual COMRESET
302 * @link: link to reset
303 * @deadline: deadline jiffies for the operation
304 * @online: Return value to indicate if device online
305 *
306 * Due to the limitation of the hardware PHY, a difference set of setting is
307 * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps),
308 * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
309 * report disparity error and etc. In addition, during COMRESET, there can
310 * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
311 * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
312 * reboot cycle regression, sometimes the PHY reports link down even if the
313 * device is present because of speed negotiation failure. so need to retry
314 * the COMRESET to get the link up. The following algorithm is followed to
315 * proper configure the hardware PHY during COMRESET:
316 *
317 * Alg Part 1:
318 * 1. Start the PHY at Gen3 speed (default setting)
319 * 2. Issue the COMRESET
320 * 3. If no link, go to Alg Part 3
321 * 4. If link up, determine if the negotiated speed matches the PHY
322 *    configured speed
323 * 5. If they matched, go to Alg Part 2
324 * 6. If they do not matched and first time, configure the PHY for the linked
325 *    up disk speed and repeat step 2
326 * 7. Go to Alg Part 2
327 *
328 * Alg Part 2:
329 * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
330 *    reported in the register PORT_SCR_ERR, then reset the PHY receiver line
331 * 2. Go to Alg Part 4
332 *
333 * Alg Part 3:
334 * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
335 *    communication establishment failed and maximum link down attempts are
336 *    less than Max attempts 3 then goto Alg Part 1.
337 * 2. Go to Alg Part 4.
338 *
339 * Alg Part 4:
340 * 1. Clear any pending from register PORT_SCR_ERR.
341 *
342 * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
343 *       and until the underlying PHY supports an method to reset the receiver
344 *       line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors,
345 *       an warning message will be printed.
346 */
347static int xgene_ahci_do_hardreset(struct ata_link *link,
348				   unsigned long deadline, bool *online)
349{
350	const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
351	struct ata_port *ap = link->ap;
352	struct ahci_host_priv *hpriv = ap->host->private_data;
353	struct xgene_ahci_context *ctx = hpriv->plat_data;
354	struct ahci_port_priv *pp = ap->private_data;
355	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
356	void __iomem *port_mmio = ahci_port_base(ap);
357	struct ata_taskfile tf;
358	int link_down_retry = 0;
359	int rc;
360	u32 val, sstatus;
361
362	do {
363		/* clear D2H reception area to properly wait for D2H FIS */
364		ata_tf_init(link->device, &tf);
365		tf.status = ATA_BUSY;
366		ata_tf_to_fis(&tf, 0, 0, d2h_fis);
367		rc = sata_link_hardreset(link, timing, deadline, online,
368				 ahci_check_ready);
369		if (*online) {
370			val = readl(port_mmio + PORT_SCR_ERR);
371			if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
372				dev_warn(ctx->dev, "link has error\n");
373			break;
374		}
375
376		sata_scr_read(link, SCR_STATUS, &sstatus);
377	} while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
378		 (sstatus & 0xff) == 0x1);
379
380	/* clear all errors if any pending */
381	val = readl(port_mmio + PORT_SCR_ERR);
382	writel(val, port_mmio + PORT_SCR_ERR);
383
384	return rc;
385}
386
387static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
388				unsigned long deadline)
389{
390	struct ata_port *ap = link->ap;
391        struct ahci_host_priv *hpriv = ap->host->private_data;
392	void __iomem *port_mmio = ahci_port_base(ap);
393	bool online;
394	int rc;
395	u32 portcmd_saved;
396	u32 portclb_saved;
397	u32 portclbhi_saved;
398	u32 portrxfis_saved;
399	u32 portrxfishi_saved;
400
401	/* As hardreset resets these CSR, save it to restore later */
402	portcmd_saved = readl(port_mmio + PORT_CMD);
403	portclb_saved = readl(port_mmio + PORT_LST_ADDR);
404	portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI);
405	portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
406	portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
407
408	hpriv->stop_engine(ap);
409
410	rc = xgene_ahci_do_hardreset(link, deadline, &online);
411
412	/* As controller hardreset clears them, restore them */
413	writel(portcmd_saved, port_mmio + PORT_CMD);
414	writel(portclb_saved, port_mmio + PORT_LST_ADDR);
415	writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI);
416	writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR);
417	writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI);
418
419	hpriv->start_engine(ap);
420
421	if (online)
422		*class = ahci_dev_classify(ap);
423
424	return rc;
425}
426
427static void xgene_ahci_host_stop(struct ata_host *host)
428{
429	struct ahci_host_priv *hpriv = host->private_data;
430
431	ahci_platform_disable_resources(hpriv);
432}
433
434/**
435 * xgene_ahci_pmp_softreset - Issue the softreset to the drives connected
436 *                            to Port Multiplier.
437 * @link: link to reset
438 * @class: Return value to indicate class of device
439 * @deadline: deadline jiffies for the operation
440 *
441 * Due to H/W errata, the controller is unable to save the PMP
442 * field fetched from command header before sending the H2D FIS.
443 * When the device returns the PMP port field in the D2H FIS, there is
444 * a mismatch and results in command completion failure. The workaround
445 * is to write the pmp value to PxFBS.DEV field before issuing any command
446 * to PMP.
447 */
448static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
449			  unsigned long deadline)
450{
451	int pmp = sata_srst_pmp(link);
452	struct ata_port *ap = link->ap;
453	u32 rc;
454	void __iomem *port_mmio = ahci_port_base(ap);
455	u32 port_fbs;
456
457	/*
458	 * Set PxFBS.DEV field with pmp
459	 * value.
460	 */
461	port_fbs = readl(port_mmio + PORT_FBS);
462	port_fbs &= ~PORT_FBS_DEV_MASK;
463	port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
464	writel(port_fbs, port_mmio + PORT_FBS);
465
466	rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
467
468	return rc;
469}
470
471/**
472 * xgene_ahci_softreset - Issue the softreset to the drive.
473 * @link: link to reset
474 * @class: Return value to indicate class of device
475 * @deadline: deadline jiffies for the operation
476 *
477 * Due to H/W errata, the controller is unable to save the PMP
478 * field fetched from command header before sending the H2D FIS.
479 * When the device returns the PMP port field in the D2H FIS, there is
480 * a mismatch and results in command completion failure. The workaround
481 * is to write the pmp value to PxFBS.DEV field before issuing any command
482 * to PMP. Here is the algorithm to detect PMP :
483 *
484 * 1. Save the PxFBS value
485 * 2. Program PxFBS.DEV with pmp value send by framework. Framework sends
486 *    0xF for both PMP/NON-PMP initially
487 * 3. Issue softreset
488 * 4. If signature class is PMP goto 6
489 * 5. restore the original PxFBS and goto 3
490 * 6. return
491 */
492static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
493			  unsigned long deadline)
494{
495	int pmp = sata_srst_pmp(link);
496	struct ata_port *ap = link->ap;
497	struct ahci_host_priv *hpriv = ap->host->private_data;
498	struct xgene_ahci_context *ctx = hpriv->plat_data;
499	void __iomem *port_mmio = ahci_port_base(ap);
500	u32 port_fbs;
501	u32 port_fbs_save;
502	u32 retry = 1;
503	u32 rc;
504
505	port_fbs_save = readl(port_mmio + PORT_FBS);
506
507	/*
508	 * Set PxFBS.DEV field with pmp
509	 * value.
510	 */
511	port_fbs = readl(port_mmio + PORT_FBS);
512	port_fbs &= ~PORT_FBS_DEV_MASK;
513	port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
514	writel(port_fbs, port_mmio + PORT_FBS);
515
516softreset_retry:
517	rc = ahci_do_softreset(link, class, pmp,
518			       deadline, ahci_check_ready);
519
520	ctx->class[ap->port_no] = *class;
521	if (*class != ATA_DEV_PMP) {
522		/*
523		 * Retry for normal drives without
524		 * setting PxFBS.DEV field with pmp value.
525		 */
526		if (retry--) {
527			writel(port_fbs_save, port_mmio + PORT_FBS);
528			goto softreset_retry;
529		}
530	}
531
532	return rc;
533}
534
535/**
536 * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
537 * @host: Host that received the irq
538 * @irq_masked: HOST_IRQ_STAT value
539 *
540 * For hardware with broken edge trigger latch
541 * the HOST_IRQ_STAT register misses the edge interrupt
542 * when clearing of HOST_IRQ_STAT register and hardware
543 * reporting the PORT_IRQ_STAT register at the
544 * same clock cycle.
545 * As such, the algorithm below outlines the workaround.
546 *
547 * 1. Read HOST_IRQ_STAT register and save the state.
548 * 2. Clear the HOST_IRQ_STAT register.
549 * 3. Read back the HOST_IRQ_STAT register.
550 * 4. If HOST_IRQ_STAT register equals to zero, then
551 *    traverse the rest of port's PORT_IRQ_STAT register
552 *    to check if an interrupt is triggered at that point else
553 *    go to step 6.
554 * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
555 *    then update the state of HOST_IRQ_STAT saved in step 1.
556 * 6. Handle port interrupts.
557 * 7. Exit
558 */
559static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
560					     u32 irq_masked)
561{
562	struct ahci_host_priv *hpriv = host->private_data;
563	void __iomem *port_mmio;
564	int i;
565
566	if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
567		for (i = 0; i < host->n_ports; i++) {
568			if (irq_masked & (1 << i))
569				continue;
570
571			port_mmio = ahci_port_base(host->ports[i]);
572			if (readl(port_mmio + PORT_IRQ_STAT))
573				irq_masked |= (1 << i);
574		}
575	}
576
577	return ahci_handle_port_intr(host, irq_masked);
578}
579
580static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
581{
582	struct ata_host *host = dev_instance;
583	struct ahci_host_priv *hpriv;
584	unsigned int rc = 0;
585	void __iomem *mmio;
586	u32 irq_stat, irq_masked;
587
 
 
588	hpriv = host->private_data;
589	mmio = hpriv->mmio;
590
591	/* sigh.  0xffffffff is a valid return from h/w */
592	irq_stat = readl(mmio + HOST_IRQ_STAT);
593	if (!irq_stat)
594		return IRQ_NONE;
595
596	irq_masked = irq_stat & hpriv->port_map;
597
598	spin_lock(&host->lock);
599
600	/*
601	 * HOST_IRQ_STAT behaves as edge triggered latch meaning that
602	 * it should be cleared before all the port events are cleared.
603	 */
604	writel(irq_stat, mmio + HOST_IRQ_STAT);
605
606	rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
607
608	spin_unlock(&host->lock);
609
 
 
610	return IRQ_RETVAL(rc);
611}
612
613static struct ata_port_operations xgene_ahci_v1_ops = {
614	.inherits = &ahci_ops,
615	.host_stop = xgene_ahci_host_stop,
616	.hardreset = xgene_ahci_hardreset,
617	.read_id = xgene_ahci_read_id,
618	.qc_issue = xgene_ahci_qc_issue,
619	.softreset = xgene_ahci_softreset,
620	.pmp_softreset = xgene_ahci_pmp_softreset
621};
622
623static const struct ata_port_info xgene_ahci_v1_port_info = {
624	.flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
625	.pio_mask = ATA_PIO4,
626	.udma_mask = ATA_UDMA6,
627	.port_ops = &xgene_ahci_v1_ops,
628};
629
630static struct ata_port_operations xgene_ahci_v2_ops = {
631	.inherits = &ahci_ops,
632	.host_stop = xgene_ahci_host_stop,
633	.hardreset = xgene_ahci_hardreset,
634	.read_id = xgene_ahci_read_id,
635};
636
637static const struct ata_port_info xgene_ahci_v2_port_info = {
638	.flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
639	.pio_mask = ATA_PIO4,
640	.udma_mask = ATA_UDMA6,
641	.port_ops = &xgene_ahci_v2_ops,
642};
643
644static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv)
645{
646	struct xgene_ahci_context *ctx = hpriv->plat_data;
647	int i;
648	int rc;
649	u32 val;
650
651	/* Remove IP RAM out of shutdown */
652	rc = xgene_ahci_init_memram(ctx);
653	if (rc)
654		return rc;
655
656	for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++)
657		xgene_ahci_set_phy_cfg(ctx, i);
658
659	/* AXI disable Mask */
660	writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT);
661	readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */
662	writel(0, ctx->csr_core + INTSTATUSMASK);
663	val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */
664	dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n",
665		INTSTATUSMASK, val);
666
667	writel(0x0, ctx->csr_core + ERRINTSTATUSMASK);
668	readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */
669	writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK);
670	readl(ctx->csr_axi + INT_SLV_TMOMASK);
671
672	/* Enable AXI Interrupt */
673	writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES);
674	writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES);
675	writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES);
676	writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES);
677
678	/* Enable coherency */
679	val = readl(ctx->csr_core + BUSCTLREG);
680	val &= ~0x00000002;     /* Enable write coherency */
681	val &= ~0x00000001;     /* Enable read coherency */
682	writel(val, ctx->csr_core + BUSCTLREG);
683
684	val = readl(ctx->csr_core + IOFMSTRWAUX);
685	val |= (1 << 3);        /* Enable read coherency */
686	val |= (1 << 9);        /* Enable write coherency */
687	writel(val, ctx->csr_core + IOFMSTRWAUX);
688	val = readl(ctx->csr_core + IOFMSTRWAUX);
689	dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n",
690		IOFMSTRWAUX, val);
691
692	return rc;
693}
694
695static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
696{
697	u32 val;
698
699	/* Check for optional MUX resource */
700	if (!ctx->csr_mux)
701		return 0;
702
703	val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
704	val &= ~CFG_SATA_ENET_SELECT_MASK;
705	writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG);
706	val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
707	return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
708}
709
710static const struct scsi_host_template ahci_platform_sht = {
711	AHCI_SHT(DRV_NAME),
712};
713
714#ifdef CONFIG_ACPI
715static const struct acpi_device_id xgene_ahci_acpi_match[] = {
716	{ "APMC0D0D", XGENE_AHCI_V1},
717	{ "APMC0D32", XGENE_AHCI_V2},
718	{},
719};
720MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
721#endif
722
723static const struct of_device_id xgene_ahci_of_match[] = {
724	{.compatible = "apm,xgene-ahci", .data = (void *) XGENE_AHCI_V1},
725	{.compatible = "apm,xgene-ahci-v2", .data = (void *) XGENE_AHCI_V2},
726	{ /* sentinel */ }
727};
728MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
729
730static int xgene_ahci_probe(struct platform_device *pdev)
731{
732	struct device *dev = &pdev->dev;
733	struct ahci_host_priv *hpriv;
734	struct xgene_ahci_context *ctx;
735	struct resource *res;
 
736	enum xgene_ahci_version version = XGENE_AHCI_V1;
737	const struct ata_port_info *ppi[] = { &xgene_ahci_v1_port_info,
738					      &xgene_ahci_v2_port_info };
739	int rc;
740
741	hpriv = ahci_platform_get_resources(pdev, 0);
742	if (IS_ERR(hpriv))
743		return PTR_ERR(hpriv);
744
745	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
746	if (!ctx)
747		return -ENOMEM;
748
749	hpriv->plat_data = ctx;
750	ctx->hpriv = hpriv;
751	ctx->dev = dev;
752
753	/* Retrieve the IP core resource */
754	ctx->csr_core = devm_platform_ioremap_resource(pdev, 1);
 
755	if (IS_ERR(ctx->csr_core))
756		return PTR_ERR(ctx->csr_core);
757
758	/* Retrieve the IP diagnostic resource */
759	ctx->csr_diag = devm_platform_ioremap_resource(pdev, 2);
 
760	if (IS_ERR(ctx->csr_diag))
761		return PTR_ERR(ctx->csr_diag);
762
763	/* Retrieve the IP AXI resource */
764	ctx->csr_axi = devm_platform_ioremap_resource(pdev, 3);
 
765	if (IS_ERR(ctx->csr_axi))
766		return PTR_ERR(ctx->csr_axi);
767
768	/* Retrieve the optional IP mux resource */
769	res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
770	if (res) {
771		void __iomem *csr = devm_ioremap_resource(dev, res);
772		if (IS_ERR(csr))
773			return PTR_ERR(csr);
774
775		ctx->csr_mux = csr;
776	}
777
778	if (dev->of_node) {
779		version = (enum xgene_ahci_version)of_device_get_match_data(dev);
 
 
780	}
781#ifdef CONFIG_ACPI
782	else {
783		const struct acpi_device_id *acpi_id;
784		struct acpi_device_info *info;
785		acpi_status status;
786
787		acpi_id = acpi_match_device(xgene_ahci_acpi_match, &pdev->dev);
788		if (!acpi_id) {
789			dev_warn(&pdev->dev, "No node entry in ACPI table. Assume version1\n");
790			version = XGENE_AHCI_V1;
791		} else if (acpi_id->driver_data) {
792			version = (enum xgene_ahci_version) acpi_id->driver_data;
793			status = acpi_get_object_info(ACPI_HANDLE(&pdev->dev), &info);
794			if (ACPI_FAILURE(status)) {
795				dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
796					__func__);
797				version = XGENE_AHCI_V1;
798			} else {
799				if (info->valid & ACPI_VALID_CID)
800					version = XGENE_AHCI_V2;
801				kfree(info);
802			}
803		}
804	}
805#endif
806
807	dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core,
808		hpriv->mmio);
809
810	/* Select ATA */
811	if ((rc = xgene_ahci_mux_select(ctx))) {
812		dev_err(dev, "SATA mux selection failed error %d\n", rc);
813		return -ENODEV;
814	}
815
816	if (xgene_ahci_is_memram_inited(ctx)) {
817		dev_info(dev, "skip clock and PHY initialization\n");
818		goto skip_clk_phy;
819	}
820
821	/* Due to errata, HW requires full toggle transition */
822	rc = ahci_platform_enable_clks(hpriv);
823	if (rc)
824		goto disable_resources;
825	ahci_platform_disable_clks(hpriv);
826
827	rc = ahci_platform_enable_resources(hpriv);
828	if (rc)
829		goto disable_resources;
830
831	/* Configure the host controller */
832	xgene_ahci_hw_init(hpriv);
833skip_clk_phy:
834
835	switch (version) {
836	case XGENE_AHCI_V1:
837		hpriv->flags = AHCI_HFLAG_NO_NCQ;
838		break;
839	case XGENE_AHCI_V2:
840		hpriv->flags |= AHCI_HFLAG_YES_FBS;
841		hpriv->irq_handler = xgene_ahci_irq_intr;
842		break;
843	default:
844		break;
845	}
846
847	rc = ahci_platform_init_host(pdev, hpriv, ppi[version - 1],
848				     &ahci_platform_sht);
849	if (rc)
850		goto disable_resources;
851
852	dev_dbg(dev, "X-Gene SATA host controller initialized\n");
853	return 0;
854
855disable_resources:
856	ahci_platform_disable_resources(hpriv);
857	return rc;
858}
859
860static struct platform_driver xgene_ahci_driver = {
861	.probe = xgene_ahci_probe,
862	.remove = ata_platform_remove_one,
863	.driver = {
864		.name = DRV_NAME,
865		.of_match_table = xgene_ahci_of_match,
866		.acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match),
867	},
868};
869
870module_platform_driver(xgene_ahci_driver);
871
872MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver");
873MODULE_AUTHOR("Loc Ho <lho@apm.com>");
874MODULE_LICENSE("GPL");
875MODULE_VERSION("0.4");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * AppliedMicro X-Gene SoC SATA Host Controller Driver
  4 *
  5 * Copyright (c) 2014, Applied Micro Circuits Corporation
  6 * Author: Loc Ho <lho@apm.com>
  7 *         Tuan Phan <tphan@apm.com>
  8 *         Suman Tripathi <stripathi@apm.com>
  9 *
 10 * NOTE: PM support is not currently available.
 11 */
 12#include <linux/acpi.h>
 13#include <linux/module.h>
 14#include <linux/platform_device.h>
 15#include <linux/ahci_platform.h>
 16#include <linux/of_address.h>
 17#include <linux/of_device.h>
 18#include <linux/of_irq.h>
 19#include <linux/phy/phy.h>
 20#include "ahci.h"
 21
 22#define DRV_NAME "xgene-ahci"
 23
 24/* Max # of disk per a controller */
 25#define MAX_AHCI_CHN_PERCTR		2
 26
 27/* MUX CSR */
 28#define SATA_ENET_CONFIG_REG		0x00000000
 29#define  CFG_SATA_ENET_SELECT_MASK	0x00000001
 30
 31/* SATA core host controller CSR */
 32#define SLVRDERRATTRIBUTES		0x00000000
 33#define SLVWRERRATTRIBUTES		0x00000004
 34#define MSTRDERRATTRIBUTES		0x00000008
 35#define MSTWRERRATTRIBUTES		0x0000000c
 36#define BUSCTLREG			0x00000014
 37#define IOFMSTRWAUX			0x00000018
 38#define INTSTATUSMASK			0x0000002c
 39#define ERRINTSTATUS			0x00000030
 40#define ERRINTSTATUSMASK		0x00000034
 41
 42/* SATA host AHCI CSR */
 43#define PORTCFG				0x000000a4
 44#define  PORTADDR_SET(dst, src) \
 45		(((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f))
 46#define PORTPHY1CFG		0x000000a8
 47#define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \
 48		(((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000))
 49#define PORTPHY2CFG			0x000000ac
 50#define PORTPHY3CFG			0x000000b0
 51#define PORTPHY4CFG			0x000000b4
 52#define PORTPHY5CFG			0x000000b8
 53#define SCTL0				0x0000012C
 54#define PORTPHY5CFG_RTCHG_SET(dst, src) \
 55		(((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000))
 56#define PORTAXICFG_EN_CONTEXT_SET(dst, src) \
 57		(((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000))
 58#define PORTAXICFG			0x000000bc
 59#define PORTAXICFG_OUTTRANS_SET(dst, src) \
 60		(((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
 61#define PORTRANSCFG			0x000000c8
 62#define PORTRANSCFG_RXWM_SET(dst, src)		\
 63		(((dst) & ~0x0000007f) | (((u32)(src)) & 0x0000007f))
 64
 65/* SATA host controller AXI CSR */
 66#define INT_SLV_TMOMASK			0x00000010
 67
 68/* SATA diagnostic CSR */
 69#define CFG_MEM_RAM_SHUTDOWN		0x00000070
 70#define BLOCK_MEM_RDY			0x00000074
 71
 72/* Max retry for link down */
 73#define MAX_LINK_DOWN_RETRY 3
 74
 75enum xgene_ahci_version {
 76	XGENE_AHCI_V1 = 1,
 77	XGENE_AHCI_V2,
 78};
 79
 80struct xgene_ahci_context {
 81	struct ahci_host_priv *hpriv;
 82	struct device *dev;
 83	u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
 84	u32 class[MAX_AHCI_CHN_PERCTR]; /* tracking the class of device */
 85	void __iomem *csr_core;		/* Core CSR address of IP */
 86	void __iomem *csr_diag;		/* Diag CSR address of IP */
 87	void __iomem *csr_axi;		/* AXI CSR address of IP */
 88	void __iomem *csr_mux;		/* MUX CSR address of IP */
 89};
 90
 91static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
 92{
 93	dev_dbg(ctx->dev, "Release memory from shutdown\n");
 94	writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN);
 95	readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */
 96	msleep(1);	/* reset may take up to 1ms */
 97	if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) {
 98		dev_err(ctx->dev, "failed to release memory from shutdown\n");
 99		return -ENODEV;
100	}
101	return 0;
102}
103
104/**
105 * xgene_ahci_poll_reg_val- Poll a register on a specific value.
106 * @ap : ATA port of interest.
107 * @reg : Register of interest.
108 * @val : Value to be attained.
109 * @interval : waiting interval for polling.
110 * @timeout : timeout for achieving the value.
111 */
112static int xgene_ahci_poll_reg_val(struct ata_port *ap,
113				   void __iomem *reg, unsigned
114				   int val, unsigned long interval,
115				   unsigned long timeout)
116{
117	unsigned long deadline;
118	unsigned int tmp;
119
120	tmp = ioread32(reg);
121	deadline = ata_deadline(jiffies, timeout);
122
123	while (tmp != val && time_before(jiffies, deadline)) {
124		ata_msleep(ap, interval);
125		tmp = ioread32(reg);
126	}
127
128	return tmp;
129}
130
131/**
132 * xgene_ahci_restart_engine - Restart the dma engine.
133 * @ap : ATA port of interest
134 *
135 * Waits for completion of multiple commands and restarts
136 * the DMA engine inside the controller.
137 */
138static int xgene_ahci_restart_engine(struct ata_port *ap)
139{
140	struct ahci_host_priv *hpriv = ap->host->private_data;
141	struct ahci_port_priv *pp = ap->private_data;
142	void __iomem *port_mmio = ahci_port_base(ap);
143	u32 fbs;
144
145	/*
146	 * In case of PMP multiple IDENTIFY DEVICE commands can be
147	 * issued inside PxCI. So need to poll PxCI for the
148	 * completion of outstanding IDENTIFY DEVICE commands before
149	 * we restart the DMA engine.
150	 */
151	if (xgene_ahci_poll_reg_val(ap, port_mmio +
152				    PORT_CMD_ISSUE, 0x0, 1, 100))
153		  return -EBUSY;
154
155	hpriv->stop_engine(ap);
156	ahci_start_fis_rx(ap);
157
158	/*
159	 * Enable the PxFBS.FBS_EN bit as it
160	 * gets cleared due to stopping the engine.
161	 */
162	if (pp->fbs_supported) {
163		fbs = readl(port_mmio + PORT_FBS);
164		writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
165		fbs = readl(port_mmio + PORT_FBS);
166	}
167
168	hpriv->start_engine(ap);
169
170	return 0;
171}
172
173/**
174 * xgene_ahci_qc_issue - Issue commands to the device
175 * @qc: Command to issue
176 *
177 * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
178 * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
179 * state machine goes into the CMFatalErrorUpdate state and locks up. By
180 * restarting the dma engine, it removes the controller out of lock up state.
181 *
182 * Due to H/W errata, the controller is unable to save the PMP
183 * field fetched from command header before sending the H2D FIS.
184 * When the device returns the PMP port field in the D2H FIS, there is
185 * a mismatch and results in command completion failure. The
186 * workaround is to write the pmp value to PxFBS.DEV field before issuing
187 * any command to PMP.
188 */
189static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
190{
191	struct ata_port *ap = qc->ap;
192	struct ahci_host_priv *hpriv = ap->host->private_data;
193	struct xgene_ahci_context *ctx = hpriv->plat_data;
194	int rc = 0;
195	u32 port_fbs;
196	void *port_mmio = ahci_port_base(ap);
197
198	/*
199	 * Write the pmp value to PxFBS.DEV
200	 * for case of Port Mulitplier.
201	 */
202	if (ctx->class[ap->port_no] == ATA_DEV_PMP) {
203		port_fbs = readl(port_mmio + PORT_FBS);
204		port_fbs &= ~PORT_FBS_DEV_MASK;
205		port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
206		writel(port_fbs, port_mmio + PORT_FBS);
207	}
208
209	if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
210	    (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) ||
211	    (ctx->last_cmd[ap->port_no] == ATA_CMD_SMART)))
212		xgene_ahci_restart_engine(ap);
213
214	rc = ahci_qc_issue(qc);
215
216	/* Save the last command issued */
217	ctx->last_cmd[ap->port_no] = qc->tf.command;
218
219	return rc;
220}
221
222static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
223{
224	void __iomem *diagcsr = ctx->csr_diag;
225
226	return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
227	        readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
228}
229
230/**
231 * xgene_ahci_read_id - Read ID data from the specified device
232 * @dev: device
233 * @tf: proposed taskfile
234 * @id: data buffer
235 *
236 * This custom read ID function is required due to the fact that the HW
237 * does not support DEVSLP.
238 */
239static unsigned int xgene_ahci_read_id(struct ata_device *dev,
240				       struct ata_taskfile *tf, u16 *id)
241{
242	u32 err_mask;
243
244	err_mask = ata_do_dev_read_id(dev, tf, id);
245	if (err_mask)
246		return err_mask;
247
248	/*
249	 * Mask reserved area. Word78 spec of Link Power Management
250	 * bit15-8: reserved
251	 * bit7: NCQ autosence
252	 * bit6: Software settings preservation supported
253	 * bit5: reserved
254	 * bit4: In-order sata delivery supported
255	 * bit3: DIPM requests supported
256	 * bit2: DMA Setup FIS Auto-Activate optimization supported
257	 * bit1: DMA Setup FIX non-Zero buffer offsets supported
258	 * bit0: Reserved
259	 *
260	 * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
261	 */
262	id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
263
264	return 0;
265}
266
267static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
268{
269	void __iomem *mmio = ctx->hpriv->mmio;
270	u32 val;
271
272	dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n",
273		mmio, channel);
274	val = readl(mmio + PORTCFG);
275	val = PORTADDR_SET(val, channel == 0 ? 2 : 3);
276	writel(val, mmio + PORTCFG);
277	readl(mmio + PORTCFG);  /* Force a barrier */
278	/* Disable fix rate */
279	writel(0x0001fffe, mmio + PORTPHY1CFG);
280	readl(mmio + PORTPHY1CFG); /* Force a barrier */
281	writel(0x28183219, mmio + PORTPHY2CFG);
282	readl(mmio + PORTPHY2CFG); /* Force a barrier */
283	writel(0x13081008, mmio + PORTPHY3CFG);
284	readl(mmio + PORTPHY3CFG); /* Force a barrier */
285	writel(0x00480815, mmio + PORTPHY4CFG);
286	readl(mmio + PORTPHY4CFG); /* Force a barrier */
287	/* Set window negotiation */
288	val = readl(mmio + PORTPHY5CFG);
289	val = PORTPHY5CFG_RTCHG_SET(val, 0x300);
290	writel(val, mmio + PORTPHY5CFG);
291	readl(mmio + PORTPHY5CFG); /* Force a barrier */
292	val = readl(mmio + PORTAXICFG);
293	val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */
294	val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
295	writel(val, mmio + PORTAXICFG);
296	readl(mmio + PORTAXICFG); /* Force a barrier */
297	/* Set the watermark threshold of the receive FIFO */
298	val = readl(mmio + PORTRANSCFG);
299	val = PORTRANSCFG_RXWM_SET(val, 0x30);
300	writel(val, mmio + PORTRANSCFG);
301}
302
303/**
304 * xgene_ahci_do_hardreset - Issue the actual COMRESET
305 * @link: link to reset
306 * @deadline: deadline jiffies for the operation
307 * @online: Return value to indicate if device online
308 *
309 * Due to the limitation of the hardware PHY, a difference set of setting is
310 * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps),
311 * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
312 * report disparity error and etc. In addition, during COMRESET, there can
313 * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
314 * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
315 * reboot cycle regression, sometimes the PHY reports link down even if the
316 * device is present because of speed negotiation failure. so need to retry
317 * the COMRESET to get the link up. The following algorithm is followed to
318 * proper configure the hardware PHY during COMRESET:
319 *
320 * Alg Part 1:
321 * 1. Start the PHY at Gen3 speed (default setting)
322 * 2. Issue the COMRESET
323 * 3. If no link, go to Alg Part 3
324 * 4. If link up, determine if the negotiated speed matches the PHY
325 *    configured speed
326 * 5. If they matched, go to Alg Part 2
327 * 6. If they do not matched and first time, configure the PHY for the linked
328 *    up disk speed and repeat step 2
329 * 7. Go to Alg Part 2
330 *
331 * Alg Part 2:
332 * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
333 *    reported in the register PORT_SCR_ERR, then reset the PHY receiver line
334 * 2. Go to Alg Part 4
335 *
336 * Alg Part 3:
337 * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
338 *    communication establishment failed and maximum link down attempts are
339 *    less than Max attempts 3 then goto Alg Part 1.
340 * 2. Go to Alg Part 4.
341 *
342 * Alg Part 4:
343 * 1. Clear any pending from register PORT_SCR_ERR.
344 *
345 * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
346 *       and until the underlying PHY supports an method to reset the receiver
347 *       line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors,
348 *       an warning message will be printed.
349 */
350static int xgene_ahci_do_hardreset(struct ata_link *link,
351				   unsigned long deadline, bool *online)
352{
353	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
354	struct ata_port *ap = link->ap;
355	struct ahci_host_priv *hpriv = ap->host->private_data;
356	struct xgene_ahci_context *ctx = hpriv->plat_data;
357	struct ahci_port_priv *pp = ap->private_data;
358	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
359	void __iomem *port_mmio = ahci_port_base(ap);
360	struct ata_taskfile tf;
361	int link_down_retry = 0;
362	int rc;
363	u32 val, sstatus;
364
365	do {
366		/* clear D2H reception area to properly wait for D2H FIS */
367		ata_tf_init(link->device, &tf);
368		tf.command = ATA_BUSY;
369		ata_tf_to_fis(&tf, 0, 0, d2h_fis);
370		rc = sata_link_hardreset(link, timing, deadline, online,
371				 ahci_check_ready);
372		if (*online) {
373			val = readl(port_mmio + PORT_SCR_ERR);
374			if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
375				dev_warn(ctx->dev, "link has error\n");
376			break;
377		}
378
379		sata_scr_read(link, SCR_STATUS, &sstatus);
380	} while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
381		 (sstatus & 0xff) == 0x1);
382
383	/* clear all errors if any pending */
384	val = readl(port_mmio + PORT_SCR_ERR);
385	writel(val, port_mmio + PORT_SCR_ERR);
386
387	return rc;
388}
389
390static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
391				unsigned long deadline)
392{
393	struct ata_port *ap = link->ap;
394        struct ahci_host_priv *hpriv = ap->host->private_data;
395	void __iomem *port_mmio = ahci_port_base(ap);
396	bool online;
397	int rc;
398	u32 portcmd_saved;
399	u32 portclb_saved;
400	u32 portclbhi_saved;
401	u32 portrxfis_saved;
402	u32 portrxfishi_saved;
403
404	/* As hardreset resets these CSR, save it to restore later */
405	portcmd_saved = readl(port_mmio + PORT_CMD);
406	portclb_saved = readl(port_mmio + PORT_LST_ADDR);
407	portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI);
408	portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
409	portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
410
411	hpriv->stop_engine(ap);
412
413	rc = xgene_ahci_do_hardreset(link, deadline, &online);
414
415	/* As controller hardreset clears them, restore them */
416	writel(portcmd_saved, port_mmio + PORT_CMD);
417	writel(portclb_saved, port_mmio + PORT_LST_ADDR);
418	writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI);
419	writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR);
420	writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI);
421
422	hpriv->start_engine(ap);
423
424	if (online)
425		*class = ahci_dev_classify(ap);
426
427	return rc;
428}
429
430static void xgene_ahci_host_stop(struct ata_host *host)
431{
432	struct ahci_host_priv *hpriv = host->private_data;
433
434	ahci_platform_disable_resources(hpriv);
435}
436
437/**
438 * xgene_ahci_pmp_softreset - Issue the softreset to the drives connected
439 *                            to Port Multiplier.
440 * @link: link to reset
441 * @class: Return value to indicate class of device
442 * @deadline: deadline jiffies for the operation
443 *
444 * Due to H/W errata, the controller is unable to save the PMP
445 * field fetched from command header before sending the H2D FIS.
446 * When the device returns the PMP port field in the D2H FIS, there is
447 * a mismatch and results in command completion failure. The workaround
448 * is to write the pmp value to PxFBS.DEV field before issuing any command
449 * to PMP.
450 */
451static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
452			  unsigned long deadline)
453{
454	int pmp = sata_srst_pmp(link);
455	struct ata_port *ap = link->ap;
456	u32 rc;
457	void *port_mmio = ahci_port_base(ap);
458	u32 port_fbs;
459
460	/*
461	 * Set PxFBS.DEV field with pmp
462	 * value.
463	 */
464	port_fbs = readl(port_mmio + PORT_FBS);
465	port_fbs &= ~PORT_FBS_DEV_MASK;
466	port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
467	writel(port_fbs, port_mmio + PORT_FBS);
468
469	rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
470
471	return rc;
472}
473
474/**
475 * xgene_ahci_softreset - Issue the softreset to the drive.
476 * @link: link to reset
477 * @class: Return value to indicate class of device
478 * @deadline: deadline jiffies for the operation
479 *
480 * Due to H/W errata, the controller is unable to save the PMP
481 * field fetched from command header before sending the H2D FIS.
482 * When the device returns the PMP port field in the D2H FIS, there is
483 * a mismatch and results in command completion failure. The workaround
484 * is to write the pmp value to PxFBS.DEV field before issuing any command
485 * to PMP. Here is the algorithm to detect PMP :
486 *
487 * 1. Save the PxFBS value
488 * 2. Program PxFBS.DEV with pmp value send by framework. Framework sends
489 *    0xF for both PMP/NON-PMP initially
490 * 3. Issue softreset
491 * 4. If signature class is PMP goto 6
492 * 5. restore the original PxFBS and goto 3
493 * 6. return
494 */
495static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
496			  unsigned long deadline)
497{
498	int pmp = sata_srst_pmp(link);
499	struct ata_port *ap = link->ap;
500	struct ahci_host_priv *hpriv = ap->host->private_data;
501	struct xgene_ahci_context *ctx = hpriv->plat_data;
502	void *port_mmio = ahci_port_base(ap);
503	u32 port_fbs;
504	u32 port_fbs_save;
505	u32 retry = 1;
506	u32 rc;
507
508	port_fbs_save = readl(port_mmio + PORT_FBS);
509
510	/*
511	 * Set PxFBS.DEV field with pmp
512	 * value.
513	 */
514	port_fbs = readl(port_mmio + PORT_FBS);
515	port_fbs &= ~PORT_FBS_DEV_MASK;
516	port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
517	writel(port_fbs, port_mmio + PORT_FBS);
518
519softreset_retry:
520	rc = ahci_do_softreset(link, class, pmp,
521			       deadline, ahci_check_ready);
522
523	ctx->class[ap->port_no] = *class;
524	if (*class != ATA_DEV_PMP) {
525		/*
526		 * Retry for normal drives without
527		 * setting PxFBS.DEV field with pmp value.
528		 */
529		if (retry--) {
530			writel(port_fbs_save, port_mmio + PORT_FBS);
531			goto softreset_retry;
532		}
533	}
534
535	return rc;
536}
537
538/**
539 * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
540 * @ata_host: Host that recieved the irq
541 * @irq_masked: HOST_IRQ_STAT value
542 *
543 * For hardware with broken edge trigger latch
544 * the HOST_IRQ_STAT register misses the edge interrupt
545 * when clearing of HOST_IRQ_STAT register and hardware
546 * reporting the PORT_IRQ_STAT register at the
547 * same clock cycle.
548 * As such, the algorithm below outlines the workaround.
549 *
550 * 1. Read HOST_IRQ_STAT register and save the state.
551 * 2. Clear the HOST_IRQ_STAT register.
552 * 3. Read back the HOST_IRQ_STAT register.
553 * 4. If HOST_IRQ_STAT register equals to zero, then
554 *    traverse the rest of port's PORT_IRQ_STAT register
555 *    to check if an interrupt is triggered at that point else
556 *    go to step 6.
557 * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
558 *    then update the state of HOST_IRQ_STAT saved in step 1.
559 * 6. Handle port interrupts.
560 * 7. Exit
561 */
562static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
563					     u32 irq_masked)
564{
565	struct ahci_host_priv *hpriv = host->private_data;
566	void __iomem *port_mmio;
567	int i;
568
569	if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
570		for (i = 0; i < host->n_ports; i++) {
571			if (irq_masked & (1 << i))
572				continue;
573
574			port_mmio = ahci_port_base(host->ports[i]);
575			if (readl(port_mmio + PORT_IRQ_STAT))
576				irq_masked |= (1 << i);
577		}
578	}
579
580	return ahci_handle_port_intr(host, irq_masked);
581}
582
583static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
584{
585	struct ata_host *host = dev_instance;
586	struct ahci_host_priv *hpriv;
587	unsigned int rc = 0;
588	void __iomem *mmio;
589	u32 irq_stat, irq_masked;
590
591	VPRINTK("ENTER\n");
592
593	hpriv = host->private_data;
594	mmio = hpriv->mmio;
595
596	/* sigh.  0xffffffff is a valid return from h/w */
597	irq_stat = readl(mmio + HOST_IRQ_STAT);
598	if (!irq_stat)
599		return IRQ_NONE;
600
601	irq_masked = irq_stat & hpriv->port_map;
602
603	spin_lock(&host->lock);
604
605	/*
606	 * HOST_IRQ_STAT behaves as edge triggered latch meaning that
607	 * it should be cleared before all the port events are cleared.
608	 */
609	writel(irq_stat, mmio + HOST_IRQ_STAT);
610
611	rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
612
613	spin_unlock(&host->lock);
614
615	VPRINTK("EXIT\n");
616
617	return IRQ_RETVAL(rc);
618}
619
620static struct ata_port_operations xgene_ahci_v1_ops = {
621	.inherits = &ahci_ops,
622	.host_stop = xgene_ahci_host_stop,
623	.hardreset = xgene_ahci_hardreset,
624	.read_id = xgene_ahci_read_id,
625	.qc_issue = xgene_ahci_qc_issue,
626	.softreset = xgene_ahci_softreset,
627	.pmp_softreset = xgene_ahci_pmp_softreset
628};
629
630static const struct ata_port_info xgene_ahci_v1_port_info = {
631	.flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
632	.pio_mask = ATA_PIO4,
633	.udma_mask = ATA_UDMA6,
634	.port_ops = &xgene_ahci_v1_ops,
635};
636
637static struct ata_port_operations xgene_ahci_v2_ops = {
638	.inherits = &ahci_ops,
639	.host_stop = xgene_ahci_host_stop,
640	.hardreset = xgene_ahci_hardreset,
641	.read_id = xgene_ahci_read_id,
642};
643
644static const struct ata_port_info xgene_ahci_v2_port_info = {
645	.flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
646	.pio_mask = ATA_PIO4,
647	.udma_mask = ATA_UDMA6,
648	.port_ops = &xgene_ahci_v2_ops,
649};
650
651static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv)
652{
653	struct xgene_ahci_context *ctx = hpriv->plat_data;
654	int i;
655	int rc;
656	u32 val;
657
658	/* Remove IP RAM out of shutdown */
659	rc = xgene_ahci_init_memram(ctx);
660	if (rc)
661		return rc;
662
663	for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++)
664		xgene_ahci_set_phy_cfg(ctx, i);
665
666	/* AXI disable Mask */
667	writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT);
668	readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */
669	writel(0, ctx->csr_core + INTSTATUSMASK);
670	val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */
671	dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n",
672		INTSTATUSMASK, val);
673
674	writel(0x0, ctx->csr_core + ERRINTSTATUSMASK);
675	readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */
676	writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK);
677	readl(ctx->csr_axi + INT_SLV_TMOMASK);
678
679	/* Enable AXI Interrupt */
680	writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES);
681	writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES);
682	writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES);
683	writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES);
684
685	/* Enable coherency */
686	val = readl(ctx->csr_core + BUSCTLREG);
687	val &= ~0x00000002;     /* Enable write coherency */
688	val &= ~0x00000001;     /* Enable read coherency */
689	writel(val, ctx->csr_core + BUSCTLREG);
690
691	val = readl(ctx->csr_core + IOFMSTRWAUX);
692	val |= (1 << 3);        /* Enable read coherency */
693	val |= (1 << 9);        /* Enable write coherency */
694	writel(val, ctx->csr_core + IOFMSTRWAUX);
695	val = readl(ctx->csr_core + IOFMSTRWAUX);
696	dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n",
697		IOFMSTRWAUX, val);
698
699	return rc;
700}
701
702static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
703{
704	u32 val;
705
706	/* Check for optional MUX resource */
707	if (!ctx->csr_mux)
708		return 0;
709
710	val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
711	val &= ~CFG_SATA_ENET_SELECT_MASK;
712	writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG);
713	val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
714	return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
715}
716
717static struct scsi_host_template ahci_platform_sht = {
718	AHCI_SHT(DRV_NAME),
719};
720
721#ifdef CONFIG_ACPI
722static const struct acpi_device_id xgene_ahci_acpi_match[] = {
723	{ "APMC0D0D", XGENE_AHCI_V1},
724	{ "APMC0D32", XGENE_AHCI_V2},
725	{},
726};
727MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
728#endif
729
730static const struct of_device_id xgene_ahci_of_match[] = {
731	{.compatible = "apm,xgene-ahci", .data = (void *) XGENE_AHCI_V1},
732	{.compatible = "apm,xgene-ahci-v2", .data = (void *) XGENE_AHCI_V2},
733	{},
734};
735MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
736
737static int xgene_ahci_probe(struct platform_device *pdev)
738{
739	struct device *dev = &pdev->dev;
740	struct ahci_host_priv *hpriv;
741	struct xgene_ahci_context *ctx;
742	struct resource *res;
743	const struct of_device_id *of_devid;
744	enum xgene_ahci_version version = XGENE_AHCI_V1;
745	const struct ata_port_info *ppi[] = { &xgene_ahci_v1_port_info,
746					      &xgene_ahci_v2_port_info };
747	int rc;
748
749	hpriv = ahci_platform_get_resources(pdev, 0);
750	if (IS_ERR(hpriv))
751		return PTR_ERR(hpriv);
752
753	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
754	if (!ctx)
755		return -ENOMEM;
756
757	hpriv->plat_data = ctx;
758	ctx->hpriv = hpriv;
759	ctx->dev = dev;
760
761	/* Retrieve the IP core resource */
762	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
763	ctx->csr_core = devm_ioremap_resource(dev, res);
764	if (IS_ERR(ctx->csr_core))
765		return PTR_ERR(ctx->csr_core);
766
767	/* Retrieve the IP diagnostic resource */
768	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
769	ctx->csr_diag = devm_ioremap_resource(dev, res);
770	if (IS_ERR(ctx->csr_diag))
771		return PTR_ERR(ctx->csr_diag);
772
773	/* Retrieve the IP AXI resource */
774	res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
775	ctx->csr_axi = devm_ioremap_resource(dev, res);
776	if (IS_ERR(ctx->csr_axi))
777		return PTR_ERR(ctx->csr_axi);
778
779	/* Retrieve the optional IP mux resource */
780	res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
781	if (res) {
782		void __iomem *csr = devm_ioremap_resource(dev, res);
783		if (IS_ERR(csr))
784			return PTR_ERR(csr);
785
786		ctx->csr_mux = csr;
787	}
788
789	of_devid = of_match_device(xgene_ahci_of_match, dev);
790	if (of_devid) {
791		if (of_devid->data)
792			version = (enum xgene_ahci_version) of_devid->data;
793	}
794#ifdef CONFIG_ACPI
795	else {
796		const struct acpi_device_id *acpi_id;
797		struct acpi_device_info *info;
798		acpi_status status;
799
800		acpi_id = acpi_match_device(xgene_ahci_acpi_match, &pdev->dev);
801		if (!acpi_id) {
802			dev_warn(&pdev->dev, "No node entry in ACPI table. Assume version1\n");
803			version = XGENE_AHCI_V1;
804		} else if (acpi_id->driver_data) {
805			version = (enum xgene_ahci_version) acpi_id->driver_data;
806			status = acpi_get_object_info(ACPI_HANDLE(&pdev->dev), &info);
807			if (ACPI_FAILURE(status)) {
808				dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
809					__func__);
810				version = XGENE_AHCI_V1;
811			} else {
812				if (info->valid & ACPI_VALID_CID)
813					version = XGENE_AHCI_V2;
814				kfree(info);
815			}
816		}
817	}
818#endif
819
820	dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core,
821		hpriv->mmio);
822
823	/* Select ATA */
824	if ((rc = xgene_ahci_mux_select(ctx))) {
825		dev_err(dev, "SATA mux selection failed error %d\n", rc);
826		return -ENODEV;
827	}
828
829	if (xgene_ahci_is_memram_inited(ctx)) {
830		dev_info(dev, "skip clock and PHY initialization\n");
831		goto skip_clk_phy;
832	}
833
834	/* Due to errata, HW requires full toggle transition */
835	rc = ahci_platform_enable_clks(hpriv);
836	if (rc)
837		goto disable_resources;
838	ahci_platform_disable_clks(hpriv);
839
840	rc = ahci_platform_enable_resources(hpriv);
841	if (rc)
842		goto disable_resources;
843
844	/* Configure the host controller */
845	xgene_ahci_hw_init(hpriv);
846skip_clk_phy:
847
848	switch (version) {
849	case XGENE_AHCI_V1:
850		hpriv->flags = AHCI_HFLAG_NO_NCQ;
851		break;
852	case XGENE_AHCI_V2:
853		hpriv->flags |= AHCI_HFLAG_YES_FBS;
854		hpriv->irq_handler = xgene_ahci_irq_intr;
855		break;
856	default:
857		break;
858	}
859
860	rc = ahci_platform_init_host(pdev, hpriv, ppi[version - 1],
861				     &ahci_platform_sht);
862	if (rc)
863		goto disable_resources;
864
865	dev_dbg(dev, "X-Gene SATA host controller initialized\n");
866	return 0;
867
868disable_resources:
869	ahci_platform_disable_resources(hpriv);
870	return rc;
871}
872
873static struct platform_driver xgene_ahci_driver = {
874	.probe = xgene_ahci_probe,
875	.remove = ata_platform_remove_one,
876	.driver = {
877		.name = DRV_NAME,
878		.of_match_table = xgene_ahci_of_match,
879		.acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match),
880	},
881};
882
883module_platform_driver(xgene_ahci_driver);
884
885MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver");
886MODULE_AUTHOR("Loc Ho <lho@apm.com>");
887MODULE_LICENSE("GPL");
888MODULE_VERSION("0.4");