Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * AppliedMicro X-Gene SoC SATA Host Controller Driver
4 *
5 * Copyright (c) 2014, Applied Micro Circuits Corporation
6 * Author: Loc Ho <lho@apm.com>
7 * Tuan Phan <tphan@apm.com>
8 * Suman Tripathi <stripathi@apm.com>
9 *
10 * NOTE: PM support is not currently available.
11 */
12#include <linux/acpi.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/ahci_platform.h>
16#include <linux/of.h>
17#include <linux/phy/phy.h>
18#include "ahci.h"
19
20#define DRV_NAME "xgene-ahci"
21
22/* Max # of disk per a controller */
23#define MAX_AHCI_CHN_PERCTR 2
24
25/* MUX CSR */
26#define SATA_ENET_CONFIG_REG 0x00000000
27#define CFG_SATA_ENET_SELECT_MASK 0x00000001
28
29/* SATA core host controller CSR */
30#define SLVRDERRATTRIBUTES 0x00000000
31#define SLVWRERRATTRIBUTES 0x00000004
32#define MSTRDERRATTRIBUTES 0x00000008
33#define MSTWRERRATTRIBUTES 0x0000000c
34#define BUSCTLREG 0x00000014
35#define IOFMSTRWAUX 0x00000018
36#define INTSTATUSMASK 0x0000002c
37#define ERRINTSTATUS 0x00000030
38#define ERRINTSTATUSMASK 0x00000034
39
40/* SATA host AHCI CSR */
41#define PORTCFG 0x000000a4
42#define PORTADDR_SET(dst, src) \
43 (((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f))
44#define PORTPHY1CFG 0x000000a8
45#define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \
46 (((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000))
47#define PORTPHY2CFG 0x000000ac
48#define PORTPHY3CFG 0x000000b0
49#define PORTPHY4CFG 0x000000b4
50#define PORTPHY5CFG 0x000000b8
51#define SCTL0 0x0000012C
52#define PORTPHY5CFG_RTCHG_SET(dst, src) \
53 (((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000))
54#define PORTAXICFG_EN_CONTEXT_SET(dst, src) \
55 (((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000))
56#define PORTAXICFG 0x000000bc
57#define PORTAXICFG_OUTTRANS_SET(dst, src) \
58 (((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
59#define PORTRANSCFG 0x000000c8
60#define PORTRANSCFG_RXWM_SET(dst, src) \
61 (((dst) & ~0x0000007f) | (((u32)(src)) & 0x0000007f))
62
63/* SATA host controller AXI CSR */
64#define INT_SLV_TMOMASK 0x00000010
65
66/* SATA diagnostic CSR */
67#define CFG_MEM_RAM_SHUTDOWN 0x00000070
68#define BLOCK_MEM_RDY 0x00000074
69
70/* Max retry for link down */
71#define MAX_LINK_DOWN_RETRY 3
72
73enum xgene_ahci_version {
74 XGENE_AHCI_V1 = 1,
75 XGENE_AHCI_V2,
76};
77
78struct xgene_ahci_context {
79 struct ahci_host_priv *hpriv;
80 struct device *dev;
81 u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
82 u32 class[MAX_AHCI_CHN_PERCTR]; /* tracking the class of device */
83 void __iomem *csr_core; /* Core CSR address of IP */
84 void __iomem *csr_diag; /* Diag CSR address of IP */
85 void __iomem *csr_axi; /* AXI CSR address of IP */
86 void __iomem *csr_mux; /* MUX CSR address of IP */
87};
88
89static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
90{
91 dev_dbg(ctx->dev, "Release memory from shutdown\n");
92 writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN);
93 readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */
94 msleep(1); /* reset may take up to 1ms */
95 if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) {
96 dev_err(ctx->dev, "failed to release memory from shutdown\n");
97 return -ENODEV;
98 }
99 return 0;
100}
101
102/**
103 * xgene_ahci_poll_reg_val- Poll a register on a specific value.
104 * @ap : ATA port of interest.
105 * @reg : Register of interest.
106 * @val : Value to be attained.
107 * @interval : waiting interval for polling.
108 * @timeout : timeout for achieving the value.
109 */
110static int xgene_ahci_poll_reg_val(struct ata_port *ap,
111 void __iomem *reg, unsigned int val,
112 unsigned int interval, unsigned int timeout)
113{
114 unsigned long deadline;
115 unsigned int tmp;
116
117 tmp = ioread32(reg);
118 deadline = ata_deadline(jiffies, timeout);
119
120 while (tmp != val && time_before(jiffies, deadline)) {
121 ata_msleep(ap, interval);
122 tmp = ioread32(reg);
123 }
124
125 return tmp;
126}
127
128/**
129 * xgene_ahci_restart_engine - Restart the dma engine.
130 * @ap : ATA port of interest
131 *
132 * Waits for completion of multiple commands and restarts
133 * the DMA engine inside the controller.
134 */
135static int xgene_ahci_restart_engine(struct ata_port *ap)
136{
137 struct ahci_host_priv *hpriv = ap->host->private_data;
138 struct ahci_port_priv *pp = ap->private_data;
139 void __iomem *port_mmio = ahci_port_base(ap);
140 u32 fbs;
141
142 /*
143 * In case of PMP multiple IDENTIFY DEVICE commands can be
144 * issued inside PxCI. So need to poll PxCI for the
145 * completion of outstanding IDENTIFY DEVICE commands before
146 * we restart the DMA engine.
147 */
148 if (xgene_ahci_poll_reg_val(ap, port_mmio +
149 PORT_CMD_ISSUE, 0x0, 1, 100))
150 return -EBUSY;
151
152 hpriv->stop_engine(ap);
153 ahci_start_fis_rx(ap);
154
155 /*
156 * Enable the PxFBS.FBS_EN bit as it
157 * gets cleared due to stopping the engine.
158 */
159 if (pp->fbs_supported) {
160 fbs = readl(port_mmio + PORT_FBS);
161 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
162 fbs = readl(port_mmio + PORT_FBS);
163 }
164
165 hpriv->start_engine(ap);
166
167 return 0;
168}
169
170/**
171 * xgene_ahci_qc_issue - Issue commands to the device
172 * @qc: Command to issue
173 *
174 * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
175 * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
176 * state machine goes into the CMFatalErrorUpdate state and locks up. By
177 * restarting the dma engine, it removes the controller out of lock up state.
178 *
179 * Due to H/W errata, the controller is unable to save the PMP
180 * field fetched from command header before sending the H2D FIS.
181 * When the device returns the PMP port field in the D2H FIS, there is
182 * a mismatch and results in command completion failure. The
183 * workaround is to write the pmp value to PxFBS.DEV field before issuing
184 * any command to PMP.
185 */
186static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
187{
188 struct ata_port *ap = qc->ap;
189 struct ahci_host_priv *hpriv = ap->host->private_data;
190 struct xgene_ahci_context *ctx = hpriv->plat_data;
191 int rc = 0;
192 u32 port_fbs;
193 void __iomem *port_mmio = ahci_port_base(ap);
194
195 /*
196 * Write the pmp value to PxFBS.DEV
197 * for case of Port Mulitplier.
198 */
199 if (ctx->class[ap->port_no] == ATA_DEV_PMP) {
200 port_fbs = readl(port_mmio + PORT_FBS);
201 port_fbs &= ~PORT_FBS_DEV_MASK;
202 port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
203 writel(port_fbs, port_mmio + PORT_FBS);
204 }
205
206 if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
207 (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) ||
208 (ctx->last_cmd[ap->port_no] == ATA_CMD_SMART)))
209 xgene_ahci_restart_engine(ap);
210
211 rc = ahci_qc_issue(qc);
212
213 /* Save the last command issued */
214 ctx->last_cmd[ap->port_no] = qc->tf.command;
215
216 return rc;
217}
218
219static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
220{
221 void __iomem *diagcsr = ctx->csr_diag;
222
223 return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
224 readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
225}
226
227/**
228 * xgene_ahci_read_id - Read ID data from the specified device
229 * @dev: device
230 * @tf: proposed taskfile
231 * @id: data buffer
232 *
233 * This custom read ID function is required due to the fact that the HW
234 * does not support DEVSLP.
235 */
236static unsigned int xgene_ahci_read_id(struct ata_device *dev,
237 struct ata_taskfile *tf, __le16 *id)
238{
239 u32 err_mask;
240
241 err_mask = ata_do_dev_read_id(dev, tf, id);
242 if (err_mask)
243 return err_mask;
244
245 /*
246 * Mask reserved area. Word78 spec of Link Power Management
247 * bit15-8: reserved
248 * bit7: NCQ autosence
249 * bit6: Software settings preservation supported
250 * bit5: reserved
251 * bit4: In-order sata delivery supported
252 * bit3: DIPM requests supported
253 * bit2: DMA Setup FIS Auto-Activate optimization supported
254 * bit1: DMA Setup FIX non-Zero buffer offsets supported
255 * bit0: Reserved
256 *
257 * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
258 */
259 id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
260
261 return 0;
262}
263
264static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
265{
266 void __iomem *mmio = ctx->hpriv->mmio;
267 u32 val;
268
269 dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n",
270 mmio, channel);
271 val = readl(mmio + PORTCFG);
272 val = PORTADDR_SET(val, channel == 0 ? 2 : 3);
273 writel(val, mmio + PORTCFG);
274 readl(mmio + PORTCFG); /* Force a barrier */
275 /* Disable fix rate */
276 writel(0x0001fffe, mmio + PORTPHY1CFG);
277 readl(mmio + PORTPHY1CFG); /* Force a barrier */
278 writel(0x28183219, mmio + PORTPHY2CFG);
279 readl(mmio + PORTPHY2CFG); /* Force a barrier */
280 writel(0x13081008, mmio + PORTPHY3CFG);
281 readl(mmio + PORTPHY3CFG); /* Force a barrier */
282 writel(0x00480815, mmio + PORTPHY4CFG);
283 readl(mmio + PORTPHY4CFG); /* Force a barrier */
284 /* Set window negotiation */
285 val = readl(mmio + PORTPHY5CFG);
286 val = PORTPHY5CFG_RTCHG_SET(val, 0x300);
287 writel(val, mmio + PORTPHY5CFG);
288 readl(mmio + PORTPHY5CFG); /* Force a barrier */
289 val = readl(mmio + PORTAXICFG);
290 val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */
291 val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
292 writel(val, mmio + PORTAXICFG);
293 readl(mmio + PORTAXICFG); /* Force a barrier */
294 /* Set the watermark threshold of the receive FIFO */
295 val = readl(mmio + PORTRANSCFG);
296 val = PORTRANSCFG_RXWM_SET(val, 0x30);
297 writel(val, mmio + PORTRANSCFG);
298}
299
300/**
301 * xgene_ahci_do_hardreset - Issue the actual COMRESET
302 * @link: link to reset
303 * @deadline: deadline jiffies for the operation
304 * @online: Return value to indicate if device online
305 *
306 * Due to the limitation of the hardware PHY, a difference set of setting is
307 * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps),
308 * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
309 * report disparity error and etc. In addition, during COMRESET, there can
310 * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
311 * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
312 * reboot cycle regression, sometimes the PHY reports link down even if the
313 * device is present because of speed negotiation failure. so need to retry
314 * the COMRESET to get the link up. The following algorithm is followed to
315 * proper configure the hardware PHY during COMRESET:
316 *
317 * Alg Part 1:
318 * 1. Start the PHY at Gen3 speed (default setting)
319 * 2. Issue the COMRESET
320 * 3. If no link, go to Alg Part 3
321 * 4. If link up, determine if the negotiated speed matches the PHY
322 * configured speed
323 * 5. If they matched, go to Alg Part 2
324 * 6. If they do not matched and first time, configure the PHY for the linked
325 * up disk speed and repeat step 2
326 * 7. Go to Alg Part 2
327 *
328 * Alg Part 2:
329 * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
330 * reported in the register PORT_SCR_ERR, then reset the PHY receiver line
331 * 2. Go to Alg Part 4
332 *
333 * Alg Part 3:
334 * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
335 * communication establishment failed and maximum link down attempts are
336 * less than Max attempts 3 then goto Alg Part 1.
337 * 2. Go to Alg Part 4.
338 *
339 * Alg Part 4:
340 * 1. Clear any pending from register PORT_SCR_ERR.
341 *
342 * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
343 * and until the underlying PHY supports an method to reset the receiver
344 * line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors,
345 * an warning message will be printed.
346 */
347static int xgene_ahci_do_hardreset(struct ata_link *link,
348 unsigned long deadline, bool *online)
349{
350 const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
351 struct ata_port *ap = link->ap;
352 struct ahci_host_priv *hpriv = ap->host->private_data;
353 struct xgene_ahci_context *ctx = hpriv->plat_data;
354 struct ahci_port_priv *pp = ap->private_data;
355 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
356 void __iomem *port_mmio = ahci_port_base(ap);
357 struct ata_taskfile tf;
358 int link_down_retry = 0;
359 int rc;
360 u32 val, sstatus;
361
362 do {
363 /* clear D2H reception area to properly wait for D2H FIS */
364 ata_tf_init(link->device, &tf);
365 tf.status = ATA_BUSY;
366 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
367 rc = sata_link_hardreset(link, timing, deadline, online,
368 ahci_check_ready);
369 if (*online) {
370 val = readl(port_mmio + PORT_SCR_ERR);
371 if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
372 dev_warn(ctx->dev, "link has error\n");
373 break;
374 }
375
376 sata_scr_read(link, SCR_STATUS, &sstatus);
377 } while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
378 (sstatus & 0xff) == 0x1);
379
380 /* clear all errors if any pending */
381 val = readl(port_mmio + PORT_SCR_ERR);
382 writel(val, port_mmio + PORT_SCR_ERR);
383
384 return rc;
385}
386
387static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
388 unsigned long deadline)
389{
390 struct ata_port *ap = link->ap;
391 struct ahci_host_priv *hpriv = ap->host->private_data;
392 void __iomem *port_mmio = ahci_port_base(ap);
393 bool online;
394 int rc;
395 u32 portcmd_saved;
396 u32 portclb_saved;
397 u32 portclbhi_saved;
398 u32 portrxfis_saved;
399 u32 portrxfishi_saved;
400
401 /* As hardreset resets these CSR, save it to restore later */
402 portcmd_saved = readl(port_mmio + PORT_CMD);
403 portclb_saved = readl(port_mmio + PORT_LST_ADDR);
404 portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI);
405 portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
406 portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
407
408 hpriv->stop_engine(ap);
409
410 rc = xgene_ahci_do_hardreset(link, deadline, &online);
411
412 /* As controller hardreset clears them, restore them */
413 writel(portcmd_saved, port_mmio + PORT_CMD);
414 writel(portclb_saved, port_mmio + PORT_LST_ADDR);
415 writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI);
416 writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR);
417 writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI);
418
419 hpriv->start_engine(ap);
420
421 if (online)
422 *class = ahci_dev_classify(ap);
423
424 return rc;
425}
426
427static void xgene_ahci_host_stop(struct ata_host *host)
428{
429 struct ahci_host_priv *hpriv = host->private_data;
430
431 ahci_platform_disable_resources(hpriv);
432}
433
434/**
435 * xgene_ahci_pmp_softreset - Issue the softreset to the drives connected
436 * to Port Multiplier.
437 * @link: link to reset
438 * @class: Return value to indicate class of device
439 * @deadline: deadline jiffies for the operation
440 *
441 * Due to H/W errata, the controller is unable to save the PMP
442 * field fetched from command header before sending the H2D FIS.
443 * When the device returns the PMP port field in the D2H FIS, there is
444 * a mismatch and results in command completion failure. The workaround
445 * is to write the pmp value to PxFBS.DEV field before issuing any command
446 * to PMP.
447 */
448static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
449 unsigned long deadline)
450{
451 int pmp = sata_srst_pmp(link);
452 struct ata_port *ap = link->ap;
453 u32 rc;
454 void __iomem *port_mmio = ahci_port_base(ap);
455 u32 port_fbs;
456
457 /*
458 * Set PxFBS.DEV field with pmp
459 * value.
460 */
461 port_fbs = readl(port_mmio + PORT_FBS);
462 port_fbs &= ~PORT_FBS_DEV_MASK;
463 port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
464 writel(port_fbs, port_mmio + PORT_FBS);
465
466 rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
467
468 return rc;
469}
470
471/**
472 * xgene_ahci_softreset - Issue the softreset to the drive.
473 * @link: link to reset
474 * @class: Return value to indicate class of device
475 * @deadline: deadline jiffies for the operation
476 *
477 * Due to H/W errata, the controller is unable to save the PMP
478 * field fetched from command header before sending the H2D FIS.
479 * When the device returns the PMP port field in the D2H FIS, there is
480 * a mismatch and results in command completion failure. The workaround
481 * is to write the pmp value to PxFBS.DEV field before issuing any command
482 * to PMP. Here is the algorithm to detect PMP :
483 *
484 * 1. Save the PxFBS value
485 * 2. Program PxFBS.DEV with pmp value send by framework. Framework sends
486 * 0xF for both PMP/NON-PMP initially
487 * 3. Issue softreset
488 * 4. If signature class is PMP goto 6
489 * 5. restore the original PxFBS and goto 3
490 * 6. return
491 */
492static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
493 unsigned long deadline)
494{
495 int pmp = sata_srst_pmp(link);
496 struct ata_port *ap = link->ap;
497 struct ahci_host_priv *hpriv = ap->host->private_data;
498 struct xgene_ahci_context *ctx = hpriv->plat_data;
499 void __iomem *port_mmio = ahci_port_base(ap);
500 u32 port_fbs;
501 u32 port_fbs_save;
502 u32 retry = 1;
503 u32 rc;
504
505 port_fbs_save = readl(port_mmio + PORT_FBS);
506
507 /*
508 * Set PxFBS.DEV field with pmp
509 * value.
510 */
511 port_fbs = readl(port_mmio + PORT_FBS);
512 port_fbs &= ~PORT_FBS_DEV_MASK;
513 port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
514 writel(port_fbs, port_mmio + PORT_FBS);
515
516softreset_retry:
517 rc = ahci_do_softreset(link, class, pmp,
518 deadline, ahci_check_ready);
519
520 ctx->class[ap->port_no] = *class;
521 if (*class != ATA_DEV_PMP) {
522 /*
523 * Retry for normal drives without
524 * setting PxFBS.DEV field with pmp value.
525 */
526 if (retry--) {
527 writel(port_fbs_save, port_mmio + PORT_FBS);
528 goto softreset_retry;
529 }
530 }
531
532 return rc;
533}
534
535/**
536 * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
537 * @host: Host that recieved the irq
538 * @irq_masked: HOST_IRQ_STAT value
539 *
540 * For hardware with broken edge trigger latch
541 * the HOST_IRQ_STAT register misses the edge interrupt
542 * when clearing of HOST_IRQ_STAT register and hardware
543 * reporting the PORT_IRQ_STAT register at the
544 * same clock cycle.
545 * As such, the algorithm below outlines the workaround.
546 *
547 * 1. Read HOST_IRQ_STAT register and save the state.
548 * 2. Clear the HOST_IRQ_STAT register.
549 * 3. Read back the HOST_IRQ_STAT register.
550 * 4. If HOST_IRQ_STAT register equals to zero, then
551 * traverse the rest of port's PORT_IRQ_STAT register
552 * to check if an interrupt is triggered at that point else
553 * go to step 6.
554 * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
555 * then update the state of HOST_IRQ_STAT saved in step 1.
556 * 6. Handle port interrupts.
557 * 7. Exit
558 */
559static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
560 u32 irq_masked)
561{
562 struct ahci_host_priv *hpriv = host->private_data;
563 void __iomem *port_mmio;
564 int i;
565
566 if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
567 for (i = 0; i < host->n_ports; i++) {
568 if (irq_masked & (1 << i))
569 continue;
570
571 port_mmio = ahci_port_base(host->ports[i]);
572 if (readl(port_mmio + PORT_IRQ_STAT))
573 irq_masked |= (1 << i);
574 }
575 }
576
577 return ahci_handle_port_intr(host, irq_masked);
578}
579
580static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
581{
582 struct ata_host *host = dev_instance;
583 struct ahci_host_priv *hpriv;
584 unsigned int rc = 0;
585 void __iomem *mmio;
586 u32 irq_stat, irq_masked;
587
588 hpriv = host->private_data;
589 mmio = hpriv->mmio;
590
591 /* sigh. 0xffffffff is a valid return from h/w */
592 irq_stat = readl(mmio + HOST_IRQ_STAT);
593 if (!irq_stat)
594 return IRQ_NONE;
595
596 irq_masked = irq_stat & hpriv->port_map;
597
598 spin_lock(&host->lock);
599
600 /*
601 * HOST_IRQ_STAT behaves as edge triggered latch meaning that
602 * it should be cleared before all the port events are cleared.
603 */
604 writel(irq_stat, mmio + HOST_IRQ_STAT);
605
606 rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
607
608 spin_unlock(&host->lock);
609
610 return IRQ_RETVAL(rc);
611}
612
613static struct ata_port_operations xgene_ahci_v1_ops = {
614 .inherits = &ahci_ops,
615 .host_stop = xgene_ahci_host_stop,
616 .hardreset = xgene_ahci_hardreset,
617 .read_id = xgene_ahci_read_id,
618 .qc_issue = xgene_ahci_qc_issue,
619 .softreset = xgene_ahci_softreset,
620 .pmp_softreset = xgene_ahci_pmp_softreset
621};
622
623static const struct ata_port_info xgene_ahci_v1_port_info = {
624 .flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
625 .pio_mask = ATA_PIO4,
626 .udma_mask = ATA_UDMA6,
627 .port_ops = &xgene_ahci_v1_ops,
628};
629
630static struct ata_port_operations xgene_ahci_v2_ops = {
631 .inherits = &ahci_ops,
632 .host_stop = xgene_ahci_host_stop,
633 .hardreset = xgene_ahci_hardreset,
634 .read_id = xgene_ahci_read_id,
635};
636
637static const struct ata_port_info xgene_ahci_v2_port_info = {
638 .flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
639 .pio_mask = ATA_PIO4,
640 .udma_mask = ATA_UDMA6,
641 .port_ops = &xgene_ahci_v2_ops,
642};
643
644static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv)
645{
646 struct xgene_ahci_context *ctx = hpriv->plat_data;
647 int i;
648 int rc;
649 u32 val;
650
651 /* Remove IP RAM out of shutdown */
652 rc = xgene_ahci_init_memram(ctx);
653 if (rc)
654 return rc;
655
656 for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++)
657 xgene_ahci_set_phy_cfg(ctx, i);
658
659 /* AXI disable Mask */
660 writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT);
661 readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */
662 writel(0, ctx->csr_core + INTSTATUSMASK);
663 val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */
664 dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n",
665 INTSTATUSMASK, val);
666
667 writel(0x0, ctx->csr_core + ERRINTSTATUSMASK);
668 readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */
669 writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK);
670 readl(ctx->csr_axi + INT_SLV_TMOMASK);
671
672 /* Enable AXI Interrupt */
673 writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES);
674 writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES);
675 writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES);
676 writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES);
677
678 /* Enable coherency */
679 val = readl(ctx->csr_core + BUSCTLREG);
680 val &= ~0x00000002; /* Enable write coherency */
681 val &= ~0x00000001; /* Enable read coherency */
682 writel(val, ctx->csr_core + BUSCTLREG);
683
684 val = readl(ctx->csr_core + IOFMSTRWAUX);
685 val |= (1 << 3); /* Enable read coherency */
686 val |= (1 << 9); /* Enable write coherency */
687 writel(val, ctx->csr_core + IOFMSTRWAUX);
688 val = readl(ctx->csr_core + IOFMSTRWAUX);
689 dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n",
690 IOFMSTRWAUX, val);
691
692 return rc;
693}
694
695static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
696{
697 u32 val;
698
699 /* Check for optional MUX resource */
700 if (!ctx->csr_mux)
701 return 0;
702
703 val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
704 val &= ~CFG_SATA_ENET_SELECT_MASK;
705 writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG);
706 val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
707 return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
708}
709
710static const struct scsi_host_template ahci_platform_sht = {
711 AHCI_SHT(DRV_NAME),
712};
713
714#ifdef CONFIG_ACPI
715static const struct acpi_device_id xgene_ahci_acpi_match[] = {
716 { "APMC0D0D", XGENE_AHCI_V1},
717 { "APMC0D32", XGENE_AHCI_V2},
718 {},
719};
720MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
721#endif
722
723static const struct of_device_id xgene_ahci_of_match[] = {
724 {.compatible = "apm,xgene-ahci", .data = (void *) XGENE_AHCI_V1},
725 {.compatible = "apm,xgene-ahci-v2", .data = (void *) XGENE_AHCI_V2},
726 { /* sentinel */ }
727};
728MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
729
730static int xgene_ahci_probe(struct platform_device *pdev)
731{
732 struct device *dev = &pdev->dev;
733 struct ahci_host_priv *hpriv;
734 struct xgene_ahci_context *ctx;
735 struct resource *res;
736 enum xgene_ahci_version version = XGENE_AHCI_V1;
737 const struct ata_port_info *ppi[] = { &xgene_ahci_v1_port_info,
738 &xgene_ahci_v2_port_info };
739 int rc;
740
741 hpriv = ahci_platform_get_resources(pdev, 0);
742 if (IS_ERR(hpriv))
743 return PTR_ERR(hpriv);
744
745 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
746 if (!ctx)
747 return -ENOMEM;
748
749 hpriv->plat_data = ctx;
750 ctx->hpriv = hpriv;
751 ctx->dev = dev;
752
753 /* Retrieve the IP core resource */
754 ctx->csr_core = devm_platform_ioremap_resource(pdev, 1);
755 if (IS_ERR(ctx->csr_core))
756 return PTR_ERR(ctx->csr_core);
757
758 /* Retrieve the IP diagnostic resource */
759 ctx->csr_diag = devm_platform_ioremap_resource(pdev, 2);
760 if (IS_ERR(ctx->csr_diag))
761 return PTR_ERR(ctx->csr_diag);
762
763 /* Retrieve the IP AXI resource */
764 ctx->csr_axi = devm_platform_ioremap_resource(pdev, 3);
765 if (IS_ERR(ctx->csr_axi))
766 return PTR_ERR(ctx->csr_axi);
767
768 /* Retrieve the optional IP mux resource */
769 res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
770 if (res) {
771 void __iomem *csr = devm_ioremap_resource(dev, res);
772 if (IS_ERR(csr))
773 return PTR_ERR(csr);
774
775 ctx->csr_mux = csr;
776 }
777
778 if (dev->of_node) {
779 version = (enum xgene_ahci_version)of_device_get_match_data(dev);
780 }
781#ifdef CONFIG_ACPI
782 else {
783 const struct acpi_device_id *acpi_id;
784 struct acpi_device_info *info;
785 acpi_status status;
786
787 acpi_id = acpi_match_device(xgene_ahci_acpi_match, &pdev->dev);
788 if (!acpi_id) {
789 dev_warn(&pdev->dev, "No node entry in ACPI table. Assume version1\n");
790 version = XGENE_AHCI_V1;
791 } else if (acpi_id->driver_data) {
792 version = (enum xgene_ahci_version) acpi_id->driver_data;
793 status = acpi_get_object_info(ACPI_HANDLE(&pdev->dev), &info);
794 if (ACPI_FAILURE(status)) {
795 dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
796 __func__);
797 version = XGENE_AHCI_V1;
798 } else {
799 if (info->valid & ACPI_VALID_CID)
800 version = XGENE_AHCI_V2;
801 kfree(info);
802 }
803 }
804 }
805#endif
806
807 dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core,
808 hpriv->mmio);
809
810 /* Select ATA */
811 if ((rc = xgene_ahci_mux_select(ctx))) {
812 dev_err(dev, "SATA mux selection failed error %d\n", rc);
813 return -ENODEV;
814 }
815
816 if (xgene_ahci_is_memram_inited(ctx)) {
817 dev_info(dev, "skip clock and PHY initialization\n");
818 goto skip_clk_phy;
819 }
820
821 /* Due to errata, HW requires full toggle transition */
822 rc = ahci_platform_enable_clks(hpriv);
823 if (rc)
824 goto disable_resources;
825 ahci_platform_disable_clks(hpriv);
826
827 rc = ahci_platform_enable_resources(hpriv);
828 if (rc)
829 goto disable_resources;
830
831 /* Configure the host controller */
832 xgene_ahci_hw_init(hpriv);
833skip_clk_phy:
834
835 switch (version) {
836 case XGENE_AHCI_V1:
837 hpriv->flags = AHCI_HFLAG_NO_NCQ;
838 break;
839 case XGENE_AHCI_V2:
840 hpriv->flags |= AHCI_HFLAG_YES_FBS;
841 hpriv->irq_handler = xgene_ahci_irq_intr;
842 break;
843 default:
844 break;
845 }
846
847 rc = ahci_platform_init_host(pdev, hpriv, ppi[version - 1],
848 &ahci_platform_sht);
849 if (rc)
850 goto disable_resources;
851
852 dev_dbg(dev, "X-Gene SATA host controller initialized\n");
853 return 0;
854
855disable_resources:
856 ahci_platform_disable_resources(hpriv);
857 return rc;
858}
859
860static struct platform_driver xgene_ahci_driver = {
861 .probe = xgene_ahci_probe,
862 .remove_new = ata_platform_remove_one,
863 .driver = {
864 .name = DRV_NAME,
865 .of_match_table = xgene_ahci_of_match,
866 .acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match),
867 },
868};
869
870module_platform_driver(xgene_ahci_driver);
871
872MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver");
873MODULE_AUTHOR("Loc Ho <lho@apm.com>");
874MODULE_LICENSE("GPL");
875MODULE_VERSION("0.4");
1/*
2 * AppliedMicro X-Gene SoC SATA Host Controller Driver
3 *
4 * Copyright (c) 2014, Applied Micro Circuits Corporation
5 * Author: Loc Ho <lho@apm.com>
6 * Tuan Phan <tphan@apm.com>
7 * Suman Tripathi <stripathi@apm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 *
22 * NOTE: PM support is not currently available.
23 *
24 */
25#include <linux/acpi.h>
26#include <linux/module.h>
27#include <linux/platform_device.h>
28#include <linux/ahci_platform.h>
29#include <linux/of_address.h>
30#include <linux/of_device.h>
31#include <linux/of_irq.h>
32#include <linux/phy/phy.h>
33#include "ahci.h"
34
35#define DRV_NAME "xgene-ahci"
36
37/* Max # of disk per a controller */
38#define MAX_AHCI_CHN_PERCTR 2
39
40/* MUX CSR */
41#define SATA_ENET_CONFIG_REG 0x00000000
42#define CFG_SATA_ENET_SELECT_MASK 0x00000001
43
44/* SATA core host controller CSR */
45#define SLVRDERRATTRIBUTES 0x00000000
46#define SLVWRERRATTRIBUTES 0x00000004
47#define MSTRDERRATTRIBUTES 0x00000008
48#define MSTWRERRATTRIBUTES 0x0000000c
49#define BUSCTLREG 0x00000014
50#define IOFMSTRWAUX 0x00000018
51#define INTSTATUSMASK 0x0000002c
52#define ERRINTSTATUS 0x00000030
53#define ERRINTSTATUSMASK 0x00000034
54
55/* SATA host AHCI CSR */
56#define PORTCFG 0x000000a4
57#define PORTADDR_SET(dst, src) \
58 (((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f))
59#define PORTPHY1CFG 0x000000a8
60#define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \
61 (((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000))
62#define PORTPHY2CFG 0x000000ac
63#define PORTPHY3CFG 0x000000b0
64#define PORTPHY4CFG 0x000000b4
65#define PORTPHY5CFG 0x000000b8
66#define SCTL0 0x0000012C
67#define PORTPHY5CFG_RTCHG_SET(dst, src) \
68 (((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000))
69#define PORTAXICFG_EN_CONTEXT_SET(dst, src) \
70 (((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000))
71#define PORTAXICFG 0x000000bc
72#define PORTAXICFG_OUTTRANS_SET(dst, src) \
73 (((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
74#define PORTRANSCFG 0x000000c8
75#define PORTRANSCFG_RXWM_SET(dst, src) \
76 (((dst) & ~0x0000007f) | (((u32)(src)) & 0x0000007f))
77
78/* SATA host controller AXI CSR */
79#define INT_SLV_TMOMASK 0x00000010
80
81/* SATA diagnostic CSR */
82#define CFG_MEM_RAM_SHUTDOWN 0x00000070
83#define BLOCK_MEM_RDY 0x00000074
84
85/* Max retry for link down */
86#define MAX_LINK_DOWN_RETRY 3
87
88enum xgene_ahci_version {
89 XGENE_AHCI_V1 = 1,
90 XGENE_AHCI_V2,
91};
92
93struct xgene_ahci_context {
94 struct ahci_host_priv *hpriv;
95 struct device *dev;
96 u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
97 u32 class[MAX_AHCI_CHN_PERCTR]; /* tracking the class of device */
98 void __iomem *csr_core; /* Core CSR address of IP */
99 void __iomem *csr_diag; /* Diag CSR address of IP */
100 void __iomem *csr_axi; /* AXI CSR address of IP */
101 void __iomem *csr_mux; /* MUX CSR address of IP */
102};
103
104static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
105{
106 dev_dbg(ctx->dev, "Release memory from shutdown\n");
107 writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN);
108 readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */
109 msleep(1); /* reset may take up to 1ms */
110 if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) {
111 dev_err(ctx->dev, "failed to release memory from shutdown\n");
112 return -ENODEV;
113 }
114 return 0;
115}
116
117/**
118 * xgene_ahci_poll_reg_val- Poll a register on a specific value.
119 * @ap : ATA port of interest.
120 * @reg : Register of interest.
121 * @val : Value to be attained.
122 * @interval : waiting interval for polling.
123 * @timeout : timeout for achieving the value.
124 */
125static int xgene_ahci_poll_reg_val(struct ata_port *ap,
126 void __iomem *reg, unsigned
127 int val, unsigned long interval,
128 unsigned long timeout)
129{
130 unsigned long deadline;
131 unsigned int tmp;
132
133 tmp = ioread32(reg);
134 deadline = ata_deadline(jiffies, timeout);
135
136 while (tmp != val && time_before(jiffies, deadline)) {
137 ata_msleep(ap, interval);
138 tmp = ioread32(reg);
139 }
140
141 return tmp;
142}
143
144/**
145 * xgene_ahci_restart_engine - Restart the dma engine.
146 * @ap : ATA port of interest
147 *
148 * Waits for completion of multiple commands and restarts
149 * the DMA engine inside the controller.
150 */
151static int xgene_ahci_restart_engine(struct ata_port *ap)
152{
153 struct ahci_host_priv *hpriv = ap->host->private_data;
154 struct ahci_port_priv *pp = ap->private_data;
155 void __iomem *port_mmio = ahci_port_base(ap);
156 u32 fbs;
157
158 /*
159 * In case of PMP multiple IDENTIFY DEVICE commands can be
160 * issued inside PxCI. So need to poll PxCI for the
161 * completion of outstanding IDENTIFY DEVICE commands before
162 * we restart the DMA engine.
163 */
164 if (xgene_ahci_poll_reg_val(ap, port_mmio +
165 PORT_CMD_ISSUE, 0x0, 1, 100))
166 return -EBUSY;
167
168 ahci_stop_engine(ap);
169 ahci_start_fis_rx(ap);
170
171 /*
172 * Enable the PxFBS.FBS_EN bit as it
173 * gets cleared due to stopping the engine.
174 */
175 if (pp->fbs_supported) {
176 fbs = readl(port_mmio + PORT_FBS);
177 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
178 fbs = readl(port_mmio + PORT_FBS);
179 }
180
181 hpriv->start_engine(ap);
182
183 return 0;
184}
185
186/**
187 * xgene_ahci_qc_issue - Issue commands to the device
188 * @qc: Command to issue
189 *
190 * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
191 * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
192 * state machine goes into the CMFatalErrorUpdate state and locks up. By
193 * restarting the dma engine, it removes the controller out of lock up state.
194 *
195 * Due to H/W errata, the controller is unable to save the PMP
196 * field fetched from command header before sending the H2D FIS.
197 * When the device returns the PMP port field in the D2H FIS, there is
198 * a mismatch and results in command completion failure. The
199 * workaround is to write the pmp value to PxFBS.DEV field before issuing
200 * any command to PMP.
201 */
202static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
203{
204 struct ata_port *ap = qc->ap;
205 struct ahci_host_priv *hpriv = ap->host->private_data;
206 struct xgene_ahci_context *ctx = hpriv->plat_data;
207 int rc = 0;
208 u32 port_fbs;
209 void *port_mmio = ahci_port_base(ap);
210
211 /*
212 * Write the pmp value to PxFBS.DEV
213 * for case of Port Mulitplier.
214 */
215 if (ctx->class[ap->port_no] == ATA_DEV_PMP) {
216 port_fbs = readl(port_mmio + PORT_FBS);
217 port_fbs &= ~PORT_FBS_DEV_MASK;
218 port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
219 writel(port_fbs, port_mmio + PORT_FBS);
220 }
221
222 if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
223 (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) ||
224 (ctx->last_cmd[ap->port_no] == ATA_CMD_SMART)))
225 xgene_ahci_restart_engine(ap);
226
227 rc = ahci_qc_issue(qc);
228
229 /* Save the last command issued */
230 ctx->last_cmd[ap->port_no] = qc->tf.command;
231
232 return rc;
233}
234
235static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
236{
237 void __iomem *diagcsr = ctx->csr_diag;
238
239 return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
240 readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
241}
242
243/**
244 * xgene_ahci_read_id - Read ID data from the specified device
245 * @dev: device
246 * @tf: proposed taskfile
247 * @id: data buffer
248 *
249 * This custom read ID function is required due to the fact that the HW
250 * does not support DEVSLP.
251 */
252static unsigned int xgene_ahci_read_id(struct ata_device *dev,
253 struct ata_taskfile *tf, u16 *id)
254{
255 u32 err_mask;
256
257 err_mask = ata_do_dev_read_id(dev, tf, id);
258 if (err_mask)
259 return err_mask;
260
261 /*
262 * Mask reserved area. Word78 spec of Link Power Management
263 * bit15-8: reserved
264 * bit7: NCQ autosence
265 * bit6: Software settings preservation supported
266 * bit5: reserved
267 * bit4: In-order sata delivery supported
268 * bit3: DIPM requests supported
269 * bit2: DMA Setup FIS Auto-Activate optimization supported
270 * bit1: DMA Setup FIX non-Zero buffer offsets supported
271 * bit0: Reserved
272 *
273 * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
274 */
275 id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
276
277 return 0;
278}
279
280static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
281{
282 void __iomem *mmio = ctx->hpriv->mmio;
283 u32 val;
284
285 dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n",
286 mmio, channel);
287 val = readl(mmio + PORTCFG);
288 val = PORTADDR_SET(val, channel == 0 ? 2 : 3);
289 writel(val, mmio + PORTCFG);
290 readl(mmio + PORTCFG); /* Force a barrier */
291 /* Disable fix rate */
292 writel(0x0001fffe, mmio + PORTPHY1CFG);
293 readl(mmio + PORTPHY1CFG); /* Force a barrier */
294 writel(0x28183219, mmio + PORTPHY2CFG);
295 readl(mmio + PORTPHY2CFG); /* Force a barrier */
296 writel(0x13081008, mmio + PORTPHY3CFG);
297 readl(mmio + PORTPHY3CFG); /* Force a barrier */
298 writel(0x00480815, mmio + PORTPHY4CFG);
299 readl(mmio + PORTPHY4CFG); /* Force a barrier */
300 /* Set window negotiation */
301 val = readl(mmio + PORTPHY5CFG);
302 val = PORTPHY5CFG_RTCHG_SET(val, 0x300);
303 writel(val, mmio + PORTPHY5CFG);
304 readl(mmio + PORTPHY5CFG); /* Force a barrier */
305 val = readl(mmio + PORTAXICFG);
306 val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */
307 val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
308 writel(val, mmio + PORTAXICFG);
309 readl(mmio + PORTAXICFG); /* Force a barrier */
310 /* Set the watermark threshold of the receive FIFO */
311 val = readl(mmio + PORTRANSCFG);
312 val = PORTRANSCFG_RXWM_SET(val, 0x30);
313 writel(val, mmio + PORTRANSCFG);
314}
315
316/**
317 * xgene_ahci_do_hardreset - Issue the actual COMRESET
318 * @link: link to reset
319 * @deadline: deadline jiffies for the operation
320 * @online: Return value to indicate if device online
321 *
322 * Due to the limitation of the hardware PHY, a difference set of setting is
323 * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps),
324 * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
325 * report disparity error and etc. In addition, during COMRESET, there can
326 * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
327 * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
328 * reboot cycle regression, sometimes the PHY reports link down even if the
329 * device is present because of speed negotiation failure. so need to retry
330 * the COMRESET to get the link up. The following algorithm is followed to
331 * proper configure the hardware PHY during COMRESET:
332 *
333 * Alg Part 1:
334 * 1. Start the PHY at Gen3 speed (default setting)
335 * 2. Issue the COMRESET
336 * 3. If no link, go to Alg Part 3
337 * 4. If link up, determine if the negotiated speed matches the PHY
338 * configured speed
339 * 5. If they matched, go to Alg Part 2
340 * 6. If they do not matched and first time, configure the PHY for the linked
341 * up disk speed and repeat step 2
342 * 7. Go to Alg Part 2
343 *
344 * Alg Part 2:
345 * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
346 * reported in the register PORT_SCR_ERR, then reset the PHY receiver line
347 * 2. Go to Alg Part 4
348 *
349 * Alg Part 3:
350 * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
351 * communication establishment failed and maximum link down attempts are
352 * less than Max attempts 3 then goto Alg Part 1.
353 * 2. Go to Alg Part 4.
354 *
355 * Alg Part 4:
356 * 1. Clear any pending from register PORT_SCR_ERR.
357 *
358 * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
359 * and until the underlying PHY supports an method to reset the receiver
360 * line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors,
361 * an warning message will be printed.
362 */
363static int xgene_ahci_do_hardreset(struct ata_link *link,
364 unsigned long deadline, bool *online)
365{
366 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
367 struct ata_port *ap = link->ap;
368 struct ahci_host_priv *hpriv = ap->host->private_data;
369 struct xgene_ahci_context *ctx = hpriv->plat_data;
370 struct ahci_port_priv *pp = ap->private_data;
371 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
372 void __iomem *port_mmio = ahci_port_base(ap);
373 struct ata_taskfile tf;
374 int link_down_retry = 0;
375 int rc;
376 u32 val, sstatus;
377
378 do {
379 /* clear D2H reception area to properly wait for D2H FIS */
380 ata_tf_init(link->device, &tf);
381 tf.command = ATA_BUSY;
382 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
383 rc = sata_link_hardreset(link, timing, deadline, online,
384 ahci_check_ready);
385 if (*online) {
386 val = readl(port_mmio + PORT_SCR_ERR);
387 if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
388 dev_warn(ctx->dev, "link has error\n");
389 break;
390 }
391
392 sata_scr_read(link, SCR_STATUS, &sstatus);
393 } while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
394 (sstatus & 0xff) == 0x1);
395
396 /* clear all errors if any pending */
397 val = readl(port_mmio + PORT_SCR_ERR);
398 writel(val, port_mmio + PORT_SCR_ERR);
399
400 return rc;
401}
402
403static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
404 unsigned long deadline)
405{
406 struct ata_port *ap = link->ap;
407 struct ahci_host_priv *hpriv = ap->host->private_data;
408 void __iomem *port_mmio = ahci_port_base(ap);
409 bool online;
410 int rc;
411 u32 portcmd_saved;
412 u32 portclb_saved;
413 u32 portclbhi_saved;
414 u32 portrxfis_saved;
415 u32 portrxfishi_saved;
416
417 /* As hardreset resets these CSR, save it to restore later */
418 portcmd_saved = readl(port_mmio + PORT_CMD);
419 portclb_saved = readl(port_mmio + PORT_LST_ADDR);
420 portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI);
421 portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
422 portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
423
424 ahci_stop_engine(ap);
425
426 rc = xgene_ahci_do_hardreset(link, deadline, &online);
427
428 /* As controller hardreset clears them, restore them */
429 writel(portcmd_saved, port_mmio + PORT_CMD);
430 writel(portclb_saved, port_mmio + PORT_LST_ADDR);
431 writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI);
432 writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR);
433 writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI);
434
435 hpriv->start_engine(ap);
436
437 if (online)
438 *class = ahci_dev_classify(ap);
439
440 return rc;
441}
442
443static void xgene_ahci_host_stop(struct ata_host *host)
444{
445 struct ahci_host_priv *hpriv = host->private_data;
446
447 ahci_platform_disable_resources(hpriv);
448}
449
450/**
451 * xgene_ahci_pmp_softreset - Issue the softreset to the drives connected
452 * to Port Multiplier.
453 * @link: link to reset
454 * @class: Return value to indicate class of device
455 * @deadline: deadline jiffies for the operation
456 *
457 * Due to H/W errata, the controller is unable to save the PMP
458 * field fetched from command header before sending the H2D FIS.
459 * When the device returns the PMP port field in the D2H FIS, there is
460 * a mismatch and results in command completion failure. The workaround
461 * is to write the pmp value to PxFBS.DEV field before issuing any command
462 * to PMP.
463 */
464static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
465 unsigned long deadline)
466{
467 int pmp = sata_srst_pmp(link);
468 struct ata_port *ap = link->ap;
469 u32 rc;
470 void *port_mmio = ahci_port_base(ap);
471 u32 port_fbs;
472
473 /*
474 * Set PxFBS.DEV field with pmp
475 * value.
476 */
477 port_fbs = readl(port_mmio + PORT_FBS);
478 port_fbs &= ~PORT_FBS_DEV_MASK;
479 port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
480 writel(port_fbs, port_mmio + PORT_FBS);
481
482 rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
483
484 return rc;
485}
486
487/**
488 * xgene_ahci_softreset - Issue the softreset to the drive.
489 * @link: link to reset
490 * @class: Return value to indicate class of device
491 * @deadline: deadline jiffies for the operation
492 *
493 * Due to H/W errata, the controller is unable to save the PMP
494 * field fetched from command header before sending the H2D FIS.
495 * When the device returns the PMP port field in the D2H FIS, there is
496 * a mismatch and results in command completion failure. The workaround
497 * is to write the pmp value to PxFBS.DEV field before issuing any command
498 * to PMP. Here is the algorithm to detect PMP :
499 *
500 * 1. Save the PxFBS value
501 * 2. Program PxFBS.DEV with pmp value send by framework. Framework sends
502 * 0xF for both PMP/NON-PMP initially
503 * 3. Issue softreset
504 * 4. If signature class is PMP goto 6
505 * 5. restore the original PxFBS and goto 3
506 * 6. return
507 */
508static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
509 unsigned long deadline)
510{
511 int pmp = sata_srst_pmp(link);
512 struct ata_port *ap = link->ap;
513 struct ahci_host_priv *hpriv = ap->host->private_data;
514 struct xgene_ahci_context *ctx = hpriv->plat_data;
515 void *port_mmio = ahci_port_base(ap);
516 u32 port_fbs;
517 u32 port_fbs_save;
518 u32 retry = 1;
519 u32 rc;
520
521 port_fbs_save = readl(port_mmio + PORT_FBS);
522
523 /*
524 * Set PxFBS.DEV field with pmp
525 * value.
526 */
527 port_fbs = readl(port_mmio + PORT_FBS);
528 port_fbs &= ~PORT_FBS_DEV_MASK;
529 port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
530 writel(port_fbs, port_mmio + PORT_FBS);
531
532softreset_retry:
533 rc = ahci_do_softreset(link, class, pmp,
534 deadline, ahci_check_ready);
535
536 ctx->class[ap->port_no] = *class;
537 if (*class != ATA_DEV_PMP) {
538 /*
539 * Retry for normal drives without
540 * setting PxFBS.DEV field with pmp value.
541 */
542 if (retry--) {
543 writel(port_fbs_save, port_mmio + PORT_FBS);
544 goto softreset_retry;
545 }
546 }
547
548 return rc;
549}
550
551/**
552 * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
553 * @ata_host: Host that recieved the irq
554 * @irq_masked: HOST_IRQ_STAT value
555 *
556 * For hardware with broken edge trigger latch
557 * the HOST_IRQ_STAT register misses the edge interrupt
558 * when clearing of HOST_IRQ_STAT register and hardware
559 * reporting the PORT_IRQ_STAT register at the
560 * same clock cycle.
561 * As such, the algorithm below outlines the workaround.
562 *
563 * 1. Read HOST_IRQ_STAT register and save the state.
564 * 2. Clear the HOST_IRQ_STAT register.
565 * 3. Read back the HOST_IRQ_STAT register.
566 * 4. If HOST_IRQ_STAT register equals to zero, then
567 * traverse the rest of port's PORT_IRQ_STAT register
568 * to check if an interrupt is triggered at that point else
569 * go to step 6.
570 * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
571 * then update the state of HOST_IRQ_STAT saved in step 1.
572 * 6. Handle port interrupts.
573 * 7. Exit
574 */
575static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
576 u32 irq_masked)
577{
578 struct ahci_host_priv *hpriv = host->private_data;
579 void __iomem *port_mmio;
580 int i;
581
582 if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
583 for (i = 0; i < host->n_ports; i++) {
584 if (irq_masked & (1 << i))
585 continue;
586
587 port_mmio = ahci_port_base(host->ports[i]);
588 if (readl(port_mmio + PORT_IRQ_STAT))
589 irq_masked |= (1 << i);
590 }
591 }
592
593 return ahci_handle_port_intr(host, irq_masked);
594}
595
596static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
597{
598 struct ata_host *host = dev_instance;
599 struct ahci_host_priv *hpriv;
600 unsigned int rc = 0;
601 void __iomem *mmio;
602 u32 irq_stat, irq_masked;
603
604 VPRINTK("ENTER\n");
605
606 hpriv = host->private_data;
607 mmio = hpriv->mmio;
608
609 /* sigh. 0xffffffff is a valid return from h/w */
610 irq_stat = readl(mmio + HOST_IRQ_STAT);
611 if (!irq_stat)
612 return IRQ_NONE;
613
614 irq_masked = irq_stat & hpriv->port_map;
615
616 spin_lock(&host->lock);
617
618 /*
619 * HOST_IRQ_STAT behaves as edge triggered latch meaning that
620 * it should be cleared before all the port events are cleared.
621 */
622 writel(irq_stat, mmio + HOST_IRQ_STAT);
623
624 rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
625
626 spin_unlock(&host->lock);
627
628 VPRINTK("EXIT\n");
629
630 return IRQ_RETVAL(rc);
631}
632
633static struct ata_port_operations xgene_ahci_v1_ops = {
634 .inherits = &ahci_ops,
635 .host_stop = xgene_ahci_host_stop,
636 .hardreset = xgene_ahci_hardreset,
637 .read_id = xgene_ahci_read_id,
638 .qc_issue = xgene_ahci_qc_issue,
639 .softreset = xgene_ahci_softreset,
640 .pmp_softreset = xgene_ahci_pmp_softreset
641};
642
643static const struct ata_port_info xgene_ahci_v1_port_info = {
644 .flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
645 .pio_mask = ATA_PIO4,
646 .udma_mask = ATA_UDMA6,
647 .port_ops = &xgene_ahci_v1_ops,
648};
649
650static struct ata_port_operations xgene_ahci_v2_ops = {
651 .inherits = &ahci_ops,
652 .host_stop = xgene_ahci_host_stop,
653 .hardreset = xgene_ahci_hardreset,
654 .read_id = xgene_ahci_read_id,
655};
656
657static const struct ata_port_info xgene_ahci_v2_port_info = {
658 .flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
659 .pio_mask = ATA_PIO4,
660 .udma_mask = ATA_UDMA6,
661 .port_ops = &xgene_ahci_v2_ops,
662};
663
664static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv)
665{
666 struct xgene_ahci_context *ctx = hpriv->plat_data;
667 int i;
668 int rc;
669 u32 val;
670
671 /* Remove IP RAM out of shutdown */
672 rc = xgene_ahci_init_memram(ctx);
673 if (rc)
674 return rc;
675
676 for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++)
677 xgene_ahci_set_phy_cfg(ctx, i);
678
679 /* AXI disable Mask */
680 writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT);
681 readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */
682 writel(0, ctx->csr_core + INTSTATUSMASK);
683 val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */
684 dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n",
685 INTSTATUSMASK, val);
686
687 writel(0x0, ctx->csr_core + ERRINTSTATUSMASK);
688 readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */
689 writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK);
690 readl(ctx->csr_axi + INT_SLV_TMOMASK);
691
692 /* Enable AXI Interrupt */
693 writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES);
694 writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES);
695 writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES);
696 writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES);
697
698 /* Enable coherency */
699 val = readl(ctx->csr_core + BUSCTLREG);
700 val &= ~0x00000002; /* Enable write coherency */
701 val &= ~0x00000001; /* Enable read coherency */
702 writel(val, ctx->csr_core + BUSCTLREG);
703
704 val = readl(ctx->csr_core + IOFMSTRWAUX);
705 val |= (1 << 3); /* Enable read coherency */
706 val |= (1 << 9); /* Enable write coherency */
707 writel(val, ctx->csr_core + IOFMSTRWAUX);
708 val = readl(ctx->csr_core + IOFMSTRWAUX);
709 dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n",
710 IOFMSTRWAUX, val);
711
712 return rc;
713}
714
715static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
716{
717 u32 val;
718
719 /* Check for optional MUX resource */
720 if (!ctx->csr_mux)
721 return 0;
722
723 val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
724 val &= ~CFG_SATA_ENET_SELECT_MASK;
725 writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG);
726 val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
727 return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
728}
729
730static struct scsi_host_template ahci_platform_sht = {
731 AHCI_SHT(DRV_NAME),
732};
733
734#ifdef CONFIG_ACPI
735static const struct acpi_device_id xgene_ahci_acpi_match[] = {
736 { "APMC0D0D", XGENE_AHCI_V1},
737 { "APMC0D32", XGENE_AHCI_V2},
738 {},
739};
740MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
741#endif
742
743static const struct of_device_id xgene_ahci_of_match[] = {
744 {.compatible = "apm,xgene-ahci", .data = (void *) XGENE_AHCI_V1},
745 {.compatible = "apm,xgene-ahci-v2", .data = (void *) XGENE_AHCI_V2},
746 {},
747};
748MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
749
750static int xgene_ahci_probe(struct platform_device *pdev)
751{
752 struct device *dev = &pdev->dev;
753 struct ahci_host_priv *hpriv;
754 struct xgene_ahci_context *ctx;
755 struct resource *res;
756 const struct of_device_id *of_devid;
757 enum xgene_ahci_version version = XGENE_AHCI_V1;
758 const struct ata_port_info *ppi[] = { &xgene_ahci_v1_port_info,
759 &xgene_ahci_v2_port_info };
760 int rc;
761
762 hpriv = ahci_platform_get_resources(pdev);
763 if (IS_ERR(hpriv))
764 return PTR_ERR(hpriv);
765
766 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
767 if (!ctx)
768 return -ENOMEM;
769
770 hpriv->plat_data = ctx;
771 ctx->hpriv = hpriv;
772 ctx->dev = dev;
773
774 /* Retrieve the IP core resource */
775 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
776 ctx->csr_core = devm_ioremap_resource(dev, res);
777 if (IS_ERR(ctx->csr_core))
778 return PTR_ERR(ctx->csr_core);
779
780 /* Retrieve the IP diagnostic resource */
781 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
782 ctx->csr_diag = devm_ioremap_resource(dev, res);
783 if (IS_ERR(ctx->csr_diag))
784 return PTR_ERR(ctx->csr_diag);
785
786 /* Retrieve the IP AXI resource */
787 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
788 ctx->csr_axi = devm_ioremap_resource(dev, res);
789 if (IS_ERR(ctx->csr_axi))
790 return PTR_ERR(ctx->csr_axi);
791
792 /* Retrieve the optional IP mux resource */
793 res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
794 if (res) {
795 void __iomem *csr = devm_ioremap_resource(dev, res);
796 if (IS_ERR(csr))
797 return PTR_ERR(csr);
798
799 ctx->csr_mux = csr;
800 }
801
802 of_devid = of_match_device(xgene_ahci_of_match, dev);
803 if (of_devid) {
804 if (of_devid->data)
805 version = (enum xgene_ahci_version) of_devid->data;
806 }
807#ifdef CONFIG_ACPI
808 else {
809 const struct acpi_device_id *acpi_id;
810 struct acpi_device_info *info;
811 acpi_status status;
812
813 acpi_id = acpi_match_device(xgene_ahci_acpi_match, &pdev->dev);
814 if (!acpi_id) {
815 dev_warn(&pdev->dev, "No node entry in ACPI table. Assume version1\n");
816 version = XGENE_AHCI_V1;
817 } else if (acpi_id->driver_data) {
818 version = (enum xgene_ahci_version) acpi_id->driver_data;
819 status = acpi_get_object_info(ACPI_HANDLE(&pdev->dev), &info);
820 if (ACPI_FAILURE(status)) {
821 dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
822 __func__);
823 version = XGENE_AHCI_V1;
824 } else if (info->valid & ACPI_VALID_CID) {
825 version = XGENE_AHCI_V2;
826 }
827 }
828 }
829#endif
830
831 dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core,
832 hpriv->mmio);
833
834 /* Select ATA */
835 if ((rc = xgene_ahci_mux_select(ctx))) {
836 dev_err(dev, "SATA mux selection failed error %d\n", rc);
837 return -ENODEV;
838 }
839
840 if (xgene_ahci_is_memram_inited(ctx)) {
841 dev_info(dev, "skip clock and PHY initialization\n");
842 goto skip_clk_phy;
843 }
844
845 /* Due to errata, HW requires full toggle transition */
846 rc = ahci_platform_enable_clks(hpriv);
847 if (rc)
848 goto disable_resources;
849 ahci_platform_disable_clks(hpriv);
850
851 rc = ahci_platform_enable_resources(hpriv);
852 if (rc)
853 goto disable_resources;
854
855 /* Configure the host controller */
856 xgene_ahci_hw_init(hpriv);
857skip_clk_phy:
858
859 switch (version) {
860 case XGENE_AHCI_V1:
861 hpriv->flags = AHCI_HFLAG_NO_NCQ;
862 break;
863 case XGENE_AHCI_V2:
864 hpriv->flags |= AHCI_HFLAG_YES_FBS;
865 hpriv->irq_handler = xgene_ahci_irq_intr;
866 break;
867 default:
868 break;
869 }
870
871 rc = ahci_platform_init_host(pdev, hpriv, ppi[version - 1],
872 &ahci_platform_sht);
873 if (rc)
874 goto disable_resources;
875
876 dev_dbg(dev, "X-Gene SATA host controller initialized\n");
877 return 0;
878
879disable_resources:
880 ahci_platform_disable_resources(hpriv);
881 return rc;
882}
883
884static struct platform_driver xgene_ahci_driver = {
885 .probe = xgene_ahci_probe,
886 .remove = ata_platform_remove_one,
887 .driver = {
888 .name = DRV_NAME,
889 .of_match_table = xgene_ahci_of_match,
890 .acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match),
891 },
892};
893
894module_platform_driver(xgene_ahci_driver);
895
896MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver");
897MODULE_AUTHOR("Loc Ho <lho@apm.com>");
898MODULE_LICENSE("GPL");
899MODULE_VERSION("0.4");