Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Freescale eSDHC controller driver.
4 *
5 * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
6 * Copyright (c) 2009 MontaVista Software, Inc.
7 * Copyright 2020 NXP
8 *
9 * Authors: Xiaobo Xie <X.Xie@freescale.com>
10 * Anton Vorontsov <avorontsov@ru.mvista.com>
11 */
12
13#include <linux/err.h>
14#include <linux/io.h>
15#include <linux/of.h>
16#include <linux/of_address.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/sys_soc.h>
20#include <linux/clk.h>
21#include <linux/ktime.h>
22#include <linux/dma-mapping.h>
23#include <linux/iopoll.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/mmc.h>
26#include "sdhci-pltfm.h"
27#include "sdhci-esdhc.h"
28
29#define VENDOR_V_22 0x12
30#define VENDOR_V_23 0x13
31
32#define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
33
34struct esdhc_clk_fixup {
35 const unsigned int sd_dflt_max_clk;
36 const unsigned int max_clk[MMC_TIMING_NUM];
37};
38
39static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
40 .sd_dflt_max_clk = 25000000,
41 .max_clk[MMC_TIMING_MMC_HS] = 46500000,
42 .max_clk[MMC_TIMING_SD_HS] = 46500000,
43};
44
45static const struct esdhc_clk_fixup ls1043a_esdhc_clk = {
46 .sd_dflt_max_clk = 25000000,
47 .max_clk[MMC_TIMING_UHS_SDR104] = 116700000,
48 .max_clk[MMC_TIMING_MMC_HS200] = 116700000,
49};
50
51static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
52 .sd_dflt_max_clk = 25000000,
53 .max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
54 .max_clk[MMC_TIMING_MMC_HS200] = 167000000,
55};
56
57static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
58 .sd_dflt_max_clk = 25000000,
59 .max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
60 .max_clk[MMC_TIMING_MMC_HS200] = 125000000,
61};
62
63static const struct esdhc_clk_fixup p1010_esdhc_clk = {
64 .sd_dflt_max_clk = 20000000,
65 .max_clk[MMC_TIMING_LEGACY] = 20000000,
66 .max_clk[MMC_TIMING_MMC_HS] = 42000000,
67 .max_clk[MMC_TIMING_SD_HS] = 40000000,
68};
69
70static const struct of_device_id sdhci_esdhc_of_match[] = {
71 { .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
72 { .compatible = "fsl,ls1043a-esdhc", .data = &ls1043a_esdhc_clk},
73 { .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
74 { .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
75 { .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk},
76 { .compatible = "fsl,mpc8379-esdhc" },
77 { .compatible = "fsl,mpc8536-esdhc" },
78 { .compatible = "fsl,esdhc" },
79 { }
80};
81MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
82
83struct sdhci_esdhc {
84 u8 vendor_ver;
85 u8 spec_ver;
86 bool quirk_incorrect_hostver;
87 bool quirk_limited_clk_division;
88 bool quirk_unreliable_pulse_detection;
89 bool quirk_tuning_erratum_type1;
90 bool quirk_tuning_erratum_type2;
91 bool quirk_ignore_data_inhibit;
92 bool quirk_delay_before_data_reset;
93 bool quirk_trans_complete_erratum;
94 bool in_sw_tuning;
95 unsigned int peripheral_clock;
96 const struct esdhc_clk_fixup *clk_fixup;
97 u32 div_ratio;
98};
99
100/**
101 * esdhc_readl_fixup - Fixup the value read from incompatible eSDHC register
102 * to make it compatible with SD spec.
103 *
104 * @host: pointer to sdhci_host
105 * @spec_reg: SD spec register address
106 * @value: 32bit eSDHC register value on spec_reg address
107 *
108 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
109 * registers are 32 bits. There are differences in register size, register
110 * address, register function, bit position and function between eSDHC spec
111 * and SD spec.
112 *
113 * Return a fixed up register value
114 */
115static u32 esdhc_readl_fixup(struct sdhci_host *host,
116 int spec_reg, u32 value)
117{
118 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
119 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
120 u32 ret;
121
122 /*
123 * The bit of ADMA flag in eSDHC is not compatible with standard
124 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
125 * supported by eSDHC.
126 * And for many FSL eSDHC controller, the reset value of field
127 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
128 * only these vendor version is greater than 2.2/0x12 support ADMA.
129 */
130 if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
131 if (esdhc->vendor_ver > VENDOR_V_22) {
132 ret = value | SDHCI_CAN_DO_ADMA2;
133 return ret;
134 }
135 }
136 /*
137 * The DAT[3:0] line signal levels and the CMD line signal level are
138 * not compatible with standard SDHC register. The line signal levels
139 * DAT[7:0] are at bits 31:24 and the command line signal level is at
140 * bit 23. All other bits are the same as in the standard SDHC
141 * register.
142 */
143 if (spec_reg == SDHCI_PRESENT_STATE) {
144 ret = value & 0x000fffff;
145 ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
146 ret |= (value << 1) & SDHCI_CMD_LVL;
147 return ret;
148 }
149
150 /*
151 * DTS properties of mmc host are used to enable each speed mode
152 * according to soc and board capability. So clean up
153 * SDR50/SDR104/DDR50 support bits here.
154 */
155 if (spec_reg == SDHCI_CAPABILITIES_1) {
156 ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
157 SDHCI_SUPPORT_DDR50);
158 return ret;
159 }
160
161 /*
162 * Some controllers have unreliable Data Line Active
163 * bit for commands with busy signal. This affects
164 * Command Inhibit (data) bit. Just ignore it since
165 * MMC core driver has already polled card status
166 * with CMD13 after any command with busy siganl.
167 */
168 if ((spec_reg == SDHCI_PRESENT_STATE) &&
169 (esdhc->quirk_ignore_data_inhibit == true)) {
170 ret = value & ~SDHCI_DATA_INHIBIT;
171 return ret;
172 }
173
174 ret = value;
175 return ret;
176}
177
178static u16 esdhc_readw_fixup(struct sdhci_host *host,
179 int spec_reg, u32 value)
180{
181 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
182 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
183 u16 ret;
184 int shift = (spec_reg & 0x2) * 8;
185
186 if (spec_reg == SDHCI_TRANSFER_MODE)
187 return pltfm_host->xfer_mode_shadow;
188
189 if (spec_reg == SDHCI_HOST_VERSION)
190 ret = value & 0xffff;
191 else
192 ret = (value >> shift) & 0xffff;
193 /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
194 * vendor version and spec version information.
195 */
196 if ((spec_reg == SDHCI_HOST_VERSION) &&
197 (esdhc->quirk_incorrect_hostver))
198 ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
199 return ret;
200}
201
202static u8 esdhc_readb_fixup(struct sdhci_host *host,
203 int spec_reg, u32 value)
204{
205 u8 ret;
206 u8 dma_bits;
207 int shift = (spec_reg & 0x3) * 8;
208
209 ret = (value >> shift) & 0xff;
210
211 /*
212 * "DMA select" locates at offset 0x28 in SD specification, but on
213 * P5020 or P3041, it locates at 0x29.
214 */
215 if (spec_reg == SDHCI_HOST_CONTROL) {
216 /* DMA select is 22,23 bits in Protocol Control Register */
217 dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
218 /* fixup the result */
219 ret &= ~SDHCI_CTRL_DMA_MASK;
220 ret |= dma_bits;
221 }
222 return ret;
223}
224
225/**
226 * esdhc_writel_fixup - Fixup the SD spec register value so that it could be
227 * written into eSDHC register.
228 *
229 * @host: pointer to sdhci_host
230 * @spec_reg: SD spec register address
231 * @value: 8/16/32bit SD spec register value that would be written
232 * @old_value: 32bit eSDHC register value on spec_reg address
233 *
234 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
235 * registers are 32 bits. There are differences in register size, register
236 * address, register function, bit position and function between eSDHC spec
237 * and SD spec.
238 *
239 * Return a fixed up register value
240 */
241static u32 esdhc_writel_fixup(struct sdhci_host *host,
242 int spec_reg, u32 value, u32 old_value)
243{
244 u32 ret;
245
246 /*
247 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
248 * when SYSCTL[RSTD] is set for some special operations.
249 * No any impact on other operation.
250 */
251 if (spec_reg == SDHCI_INT_ENABLE)
252 ret = value | SDHCI_INT_BLK_GAP;
253 else
254 ret = value;
255
256 return ret;
257}
258
259static u32 esdhc_writew_fixup(struct sdhci_host *host,
260 int spec_reg, u16 value, u32 old_value)
261{
262 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
263 int shift = (spec_reg & 0x2) * 8;
264 u32 ret;
265
266 switch (spec_reg) {
267 case SDHCI_TRANSFER_MODE:
268 /*
269 * Postpone this write, we must do it together with a
270 * command write that is down below. Return old value.
271 */
272 pltfm_host->xfer_mode_shadow = value;
273 return old_value;
274 case SDHCI_COMMAND:
275 ret = (value << 16) | pltfm_host->xfer_mode_shadow;
276 return ret;
277 }
278
279 ret = old_value & (~(0xffff << shift));
280 ret |= (value << shift);
281
282 if (spec_reg == SDHCI_BLOCK_SIZE) {
283 /*
284 * Two last DMA bits are reserved, and first one is used for
285 * non-standard blksz of 4096 bytes that we don't support
286 * yet. So clear the DMA boundary bits.
287 */
288 ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
289 }
290 return ret;
291}
292
293static u32 esdhc_writeb_fixup(struct sdhci_host *host,
294 int spec_reg, u8 value, u32 old_value)
295{
296 u32 ret;
297 u32 dma_bits;
298 u8 tmp;
299 int shift = (spec_reg & 0x3) * 8;
300
301 /*
302 * eSDHC doesn't have a standard power control register, so we do
303 * nothing here to avoid incorrect operation.
304 */
305 if (spec_reg == SDHCI_POWER_CONTROL)
306 return old_value;
307 /*
308 * "DMA select" location is offset 0x28 in SD specification, but on
309 * P5020 or P3041, it's located at 0x29.
310 */
311 if (spec_reg == SDHCI_HOST_CONTROL) {
312 /*
313 * If host control register is not standard, exit
314 * this function
315 */
316 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
317 return old_value;
318
319 /* DMA select is 22,23 bits in Protocol Control Register */
320 dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
321 ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
322 tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
323 (old_value & SDHCI_CTRL_DMA_MASK);
324 ret = (ret & (~0xff)) | tmp;
325
326 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
327 ret &= ~ESDHC_HOST_CONTROL_RES;
328 return ret;
329 }
330
331 ret = (old_value & (~(0xff << shift))) | (value << shift);
332 return ret;
333}
334
335static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
336{
337 u32 ret;
338 u32 value;
339
340 if (reg == SDHCI_CAPABILITIES_1)
341 value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
342 else
343 value = ioread32be(host->ioaddr + reg);
344
345 ret = esdhc_readl_fixup(host, reg, value);
346
347 return ret;
348}
349
350static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
351{
352 u32 ret;
353 u32 value;
354
355 if (reg == SDHCI_CAPABILITIES_1)
356 value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
357 else
358 value = ioread32(host->ioaddr + reg);
359
360 ret = esdhc_readl_fixup(host, reg, value);
361
362 return ret;
363}
364
365static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
366{
367 u16 ret;
368 u32 value;
369 int base = reg & ~0x3;
370
371 value = ioread32be(host->ioaddr + base);
372 ret = esdhc_readw_fixup(host, reg, value);
373 return ret;
374}
375
376static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
377{
378 u16 ret;
379 u32 value;
380 int base = reg & ~0x3;
381
382 value = ioread32(host->ioaddr + base);
383 ret = esdhc_readw_fixup(host, reg, value);
384 return ret;
385}
386
387static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
388{
389 u8 ret;
390 u32 value;
391 int base = reg & ~0x3;
392
393 value = ioread32be(host->ioaddr + base);
394 ret = esdhc_readb_fixup(host, reg, value);
395 return ret;
396}
397
398static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
399{
400 u8 ret;
401 u32 value;
402 int base = reg & ~0x3;
403
404 value = ioread32(host->ioaddr + base);
405 ret = esdhc_readb_fixup(host, reg, value);
406 return ret;
407}
408
409static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
410{
411 u32 value;
412
413 value = esdhc_writel_fixup(host, reg, val, 0);
414 iowrite32be(value, host->ioaddr + reg);
415}
416
417static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
418{
419 u32 value;
420
421 value = esdhc_writel_fixup(host, reg, val, 0);
422 iowrite32(value, host->ioaddr + reg);
423}
424
425static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
426{
427 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
428 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
429 int base = reg & ~0x3;
430 u32 value;
431 u32 ret;
432
433 value = ioread32be(host->ioaddr + base);
434 ret = esdhc_writew_fixup(host, reg, val, value);
435 if (reg != SDHCI_TRANSFER_MODE)
436 iowrite32be(ret, host->ioaddr + base);
437
438 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
439 * 1us later after ESDHC_EXTN is set.
440 */
441 if (base == ESDHC_SYSTEM_CONTROL_2) {
442 if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
443 esdhc->in_sw_tuning) {
444 udelay(1);
445 ret |= ESDHC_SMPCLKSEL;
446 iowrite32be(ret, host->ioaddr + base);
447 }
448 }
449}
450
451static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
452{
453 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
454 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
455 int base = reg & ~0x3;
456 u32 value;
457 u32 ret;
458
459 value = ioread32(host->ioaddr + base);
460 ret = esdhc_writew_fixup(host, reg, val, value);
461 if (reg != SDHCI_TRANSFER_MODE)
462 iowrite32(ret, host->ioaddr + base);
463
464 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
465 * 1us later after ESDHC_EXTN is set.
466 */
467 if (base == ESDHC_SYSTEM_CONTROL_2) {
468 if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
469 esdhc->in_sw_tuning) {
470 udelay(1);
471 ret |= ESDHC_SMPCLKSEL;
472 iowrite32(ret, host->ioaddr + base);
473 }
474 }
475}
476
477static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
478{
479 int base = reg & ~0x3;
480 u32 value;
481 u32 ret;
482
483 value = ioread32be(host->ioaddr + base);
484 ret = esdhc_writeb_fixup(host, reg, val, value);
485 iowrite32be(ret, host->ioaddr + base);
486}
487
488static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
489{
490 int base = reg & ~0x3;
491 u32 value;
492 u32 ret;
493
494 value = ioread32(host->ioaddr + base);
495 ret = esdhc_writeb_fixup(host, reg, val, value);
496 iowrite32(ret, host->ioaddr + base);
497}
498
499/*
500 * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
501 * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
502 * and Block Gap Event(IRQSTAT[BGE]) are also set.
503 * For Continue, apply soft reset for data(SYSCTL[RSTD]);
504 * and re-issue the entire read transaction from beginning.
505 */
506static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
507{
508 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
509 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
510 bool applicable;
511 dma_addr_t dmastart;
512 dma_addr_t dmanow;
513
514 applicable = (intmask & SDHCI_INT_DATA_END) &&
515 (intmask & SDHCI_INT_BLK_GAP) &&
516 (esdhc->vendor_ver == VENDOR_V_23);
517 if (!applicable)
518 return;
519
520 host->data->error = 0;
521 dmastart = sg_dma_address(host->data->sg);
522 dmanow = dmastart + host->data->bytes_xfered;
523 /*
524 * Force update to the next DMA block boundary.
525 */
526 dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
527 SDHCI_DEFAULT_BOUNDARY_SIZE;
528 host->data->bytes_xfered = dmanow - dmastart;
529 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
530}
531
532static int esdhc_of_enable_dma(struct sdhci_host *host)
533{
534 int ret;
535 u32 value;
536 struct device *dev = mmc_dev(host->mmc);
537
538 if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
539 of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
540 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
541 if (ret)
542 return ret;
543 }
544
545 value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
546
547 if (of_dma_is_coherent(dev->of_node))
548 value |= ESDHC_DMA_SNOOP;
549 else
550 value &= ~ESDHC_DMA_SNOOP;
551
552 sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
553 return 0;
554}
555
556static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
557{
558 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
559 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
560
561 if (esdhc->peripheral_clock)
562 return esdhc->peripheral_clock;
563 else
564 return pltfm_host->clock;
565}
566
567static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
568{
569 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
570 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
571 unsigned int clock;
572
573 if (esdhc->peripheral_clock)
574 clock = esdhc->peripheral_clock;
575 else
576 clock = pltfm_host->clock;
577 return clock / 256 / 16;
578}
579
580static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
581{
582 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
583 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
584 ktime_t timeout;
585 u32 val, clk_en;
586
587 clk_en = ESDHC_CLOCK_SDCLKEN;
588
589 /*
590 * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
591 * is 2.2 or lower.
592 */
593 if (esdhc->vendor_ver <= VENDOR_V_22)
594 clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
595 ESDHC_CLOCK_PEREN);
596
597 val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
598
599 if (enable)
600 val |= clk_en;
601 else
602 val &= ~clk_en;
603
604 sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
605
606 /*
607 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
608 * wait clock stable bit which does not exist.
609 */
610 timeout = ktime_add_ms(ktime_get(), 20);
611 while (esdhc->vendor_ver > VENDOR_V_22) {
612 bool timedout = ktime_after(ktime_get(), timeout);
613
614 if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
615 break;
616 if (timedout) {
617 pr_err("%s: Internal clock never stabilised.\n",
618 mmc_hostname(host->mmc));
619 break;
620 }
621 usleep_range(10, 20);
622 }
623}
624
625static void esdhc_flush_async_fifo(struct sdhci_host *host)
626{
627 ktime_t timeout;
628 u32 val;
629
630 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
631 val |= ESDHC_FLUSH_ASYNC_FIFO;
632 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
633
634 /* Wait max 20 ms */
635 timeout = ktime_add_ms(ktime_get(), 20);
636 while (1) {
637 bool timedout = ktime_after(ktime_get(), timeout);
638
639 if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
640 ESDHC_FLUSH_ASYNC_FIFO))
641 break;
642 if (timedout) {
643 pr_err("%s: flushing asynchronous FIFO timeout.\n",
644 mmc_hostname(host->mmc));
645 break;
646 }
647 usleep_range(10, 20);
648 }
649}
650
651static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
652{
653 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
654 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
655 unsigned int pre_div = 1, div = 1;
656 unsigned int clock_fixup = 0;
657 ktime_t timeout;
658 u32 temp;
659
660 if (clock == 0) {
661 host->mmc->actual_clock = 0;
662 esdhc_clock_enable(host, false);
663 return;
664 }
665
666 /* Start pre_div at 2 for vendor version < 2.3. */
667 if (esdhc->vendor_ver < VENDOR_V_23)
668 pre_div = 2;
669
670 /* Fix clock value. */
671 if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
672 esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
673 clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
674 else if (esdhc->clk_fixup)
675 clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
676
677 if (clock_fixup == 0 || clock < clock_fixup)
678 clock_fixup = clock;
679
680 /* Calculate pre_div and div. */
681 while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
682 pre_div *= 2;
683
684 while (host->max_clk / pre_div / div > clock_fixup && div < 16)
685 div++;
686
687 esdhc->div_ratio = pre_div * div;
688
689 /* Limit clock division for HS400 200MHz clock for quirk. */
690 if (esdhc->quirk_limited_clk_division &&
691 clock == MMC_HS200_MAX_DTR &&
692 (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
693 host->flags & SDHCI_HS400_TUNING)) {
694 if (esdhc->div_ratio <= 4) {
695 pre_div = 4;
696 div = 1;
697 } else if (esdhc->div_ratio <= 8) {
698 pre_div = 4;
699 div = 2;
700 } else if (esdhc->div_ratio <= 12) {
701 pre_div = 4;
702 div = 3;
703 } else {
704 pr_warn("%s: using unsupported clock division.\n",
705 mmc_hostname(host->mmc));
706 }
707 esdhc->div_ratio = pre_div * div;
708 }
709
710 host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
711
712 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
713 clock, host->mmc->actual_clock);
714
715 /* Set clock division into register. */
716 pre_div >>= 1;
717 div--;
718
719 esdhc_clock_enable(host, false);
720
721 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
722 temp &= ~ESDHC_CLOCK_MASK;
723 temp |= ((div << ESDHC_DIVIDER_SHIFT) |
724 (pre_div << ESDHC_PREDIV_SHIFT));
725 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
726
727 /*
728 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
729 * wait clock stable bit which does not exist.
730 */
731 timeout = ktime_add_ms(ktime_get(), 20);
732 while (esdhc->vendor_ver > VENDOR_V_22) {
733 bool timedout = ktime_after(ktime_get(), timeout);
734
735 if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
736 break;
737 if (timedout) {
738 pr_err("%s: Internal clock never stabilised.\n",
739 mmc_hostname(host->mmc));
740 break;
741 }
742 usleep_range(10, 20);
743 }
744
745 /* Additional setting for HS400. */
746 if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
747 clock == MMC_HS200_MAX_DTR) {
748 temp = sdhci_readl(host, ESDHC_TBCTL);
749 sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
750 temp = sdhci_readl(host, ESDHC_SDCLKCTL);
751 sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
752 esdhc_clock_enable(host, true);
753
754 temp = sdhci_readl(host, ESDHC_DLLCFG0);
755 temp |= ESDHC_DLL_ENABLE;
756 if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
757 temp |= ESDHC_DLL_FREQ_SEL;
758 sdhci_writel(host, temp, ESDHC_DLLCFG0);
759
760 temp |= ESDHC_DLL_RESET;
761 sdhci_writel(host, temp, ESDHC_DLLCFG0);
762 udelay(1);
763 temp &= ~ESDHC_DLL_RESET;
764 sdhci_writel(host, temp, ESDHC_DLLCFG0);
765
766 /* Wait max 20 ms */
767 if (read_poll_timeout(sdhci_readl, temp,
768 temp & ESDHC_DLL_STS_SLV_LOCK,
769 10, 20000, false,
770 host, ESDHC_DLLSTAT0))
771 pr_err("%s: timeout for delay chain lock.\n",
772 mmc_hostname(host->mmc));
773
774 temp = sdhci_readl(host, ESDHC_TBCTL);
775 sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
776
777 esdhc_clock_enable(host, false);
778 esdhc_flush_async_fifo(host);
779 }
780 esdhc_clock_enable(host, true);
781}
782
783static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
784{
785 u32 ctrl;
786
787 ctrl = sdhci_readl(host, ESDHC_PROCTL);
788 ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
789 switch (width) {
790 case MMC_BUS_WIDTH_8:
791 ctrl |= ESDHC_CTRL_8BITBUS;
792 break;
793
794 case MMC_BUS_WIDTH_4:
795 ctrl |= ESDHC_CTRL_4BITBUS;
796 break;
797
798 default:
799 break;
800 }
801
802 sdhci_writel(host, ctrl, ESDHC_PROCTL);
803}
804
805static void esdhc_reset(struct sdhci_host *host, u8 mask)
806{
807 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
808 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
809 u32 val, bus_width = 0;
810
811 /*
812 * Add delay to make sure all the DMA transfers are finished
813 * for quirk.
814 */
815 if (esdhc->quirk_delay_before_data_reset &&
816 (mask & SDHCI_RESET_DATA) &&
817 (host->flags & SDHCI_REQ_USE_DMA))
818 mdelay(5);
819
820 /*
821 * Save bus-width for eSDHC whose vendor version is 2.2
822 * or lower for data reset.
823 */
824 if ((mask & SDHCI_RESET_DATA) &&
825 (esdhc->vendor_ver <= VENDOR_V_22)) {
826 val = sdhci_readl(host, ESDHC_PROCTL);
827 bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
828 }
829
830 sdhci_reset(host, mask);
831
832 /*
833 * Restore bus-width setting and interrupt registers for eSDHC
834 * whose vendor version is 2.2 or lower for data reset.
835 */
836 if ((mask & SDHCI_RESET_DATA) &&
837 (esdhc->vendor_ver <= VENDOR_V_22)) {
838 val = sdhci_readl(host, ESDHC_PROCTL);
839 val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
840 val |= bus_width;
841 sdhci_writel(host, val, ESDHC_PROCTL);
842
843 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
844 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
845 }
846
847 /*
848 * Some bits have to be cleaned manually for eSDHC whose spec
849 * version is higher than 3.0 for all reset.
850 */
851 if ((mask & SDHCI_RESET_ALL) &&
852 (esdhc->spec_ver >= SDHCI_SPEC_300)) {
853 val = sdhci_readl(host, ESDHC_TBCTL);
854 val &= ~ESDHC_TB_EN;
855 sdhci_writel(host, val, ESDHC_TBCTL);
856
857 /*
858 * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
859 * 0 for quirk.
860 */
861 if (esdhc->quirk_unreliable_pulse_detection) {
862 val = sdhci_readl(host, ESDHC_DLLCFG1);
863 val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
864 sdhci_writel(host, val, ESDHC_DLLCFG1);
865 }
866 }
867}
868
869/* The SCFG, Supplemental Configuration Unit, provides SoC specific
870 * configuration and status registers for the device. There is a
871 * SDHC IO VSEL control register on SCFG for some platforms. It's
872 * used to support SDHC IO voltage switching.
873 */
874static const struct of_device_id scfg_device_ids[] = {
875 { .compatible = "fsl,t1040-scfg", },
876 { .compatible = "fsl,ls1012a-scfg", },
877 { .compatible = "fsl,ls1046a-scfg", },
878 {}
879};
880
881/* SDHC IO VSEL control register definition */
882#define SCFG_SDHCIOVSELCR 0x408
883#define SDHCIOVSELCR_TGLEN 0x80000000
884#define SDHCIOVSELCR_VSELVAL 0x60000000
885#define SDHCIOVSELCR_SDHC_VS 0x00000001
886
887static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
888 struct mmc_ios *ios)
889{
890 struct sdhci_host *host = mmc_priv(mmc);
891 struct device_node *scfg_node;
892 void __iomem *scfg_base = NULL;
893 u32 sdhciovselcr;
894 u32 val;
895
896 /*
897 * Signal Voltage Switching is only applicable for Host Controllers
898 * v3.00 and above.
899 */
900 if (host->version < SDHCI_SPEC_300)
901 return 0;
902
903 val = sdhci_readl(host, ESDHC_PROCTL);
904
905 switch (ios->signal_voltage) {
906 case MMC_SIGNAL_VOLTAGE_330:
907 val &= ~ESDHC_VOLT_SEL;
908 sdhci_writel(host, val, ESDHC_PROCTL);
909 return 0;
910 case MMC_SIGNAL_VOLTAGE_180:
911 scfg_node = of_find_matching_node(NULL, scfg_device_ids);
912 if (scfg_node)
913 scfg_base = of_iomap(scfg_node, 0);
914 of_node_put(scfg_node);
915 if (scfg_base) {
916 sdhciovselcr = SDHCIOVSELCR_TGLEN |
917 SDHCIOVSELCR_VSELVAL;
918 iowrite32be(sdhciovselcr,
919 scfg_base + SCFG_SDHCIOVSELCR);
920
921 val |= ESDHC_VOLT_SEL;
922 sdhci_writel(host, val, ESDHC_PROCTL);
923 mdelay(5);
924
925 sdhciovselcr = SDHCIOVSELCR_TGLEN |
926 SDHCIOVSELCR_SDHC_VS;
927 iowrite32be(sdhciovselcr,
928 scfg_base + SCFG_SDHCIOVSELCR);
929 iounmap(scfg_base);
930 } else {
931 val |= ESDHC_VOLT_SEL;
932 sdhci_writel(host, val, ESDHC_PROCTL);
933 }
934 return 0;
935 default:
936 return 0;
937 }
938}
939
940static struct soc_device_attribute soc_tuning_erratum_type1[] = {
941 { .family = "QorIQ T1023", },
942 { .family = "QorIQ T1040", },
943 { .family = "QorIQ T2080", },
944 { .family = "QorIQ LS1021A", },
945 { /* sentinel */ }
946};
947
948static struct soc_device_attribute soc_tuning_erratum_type2[] = {
949 { .family = "QorIQ LS1012A", },
950 { .family = "QorIQ LS1043A", },
951 { .family = "QorIQ LS1046A", },
952 { .family = "QorIQ LS1080A", },
953 { .family = "QorIQ LS2080A", },
954 { .family = "QorIQ LA1575A", },
955 { /* sentinel */ }
956};
957
958static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
959{
960 u32 val;
961
962 esdhc_clock_enable(host, false);
963 esdhc_flush_async_fifo(host);
964
965 val = sdhci_readl(host, ESDHC_TBCTL);
966 if (enable)
967 val |= ESDHC_TB_EN;
968 else
969 val &= ~ESDHC_TB_EN;
970 sdhci_writel(host, val, ESDHC_TBCTL);
971
972 esdhc_clock_enable(host, true);
973}
974
975static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
976 u8 *window_end)
977{
978 u32 val;
979
980 /* Write TBCTL[11:8]=4'h8 */
981 val = sdhci_readl(host, ESDHC_TBCTL);
982 val &= ~(0xf << 8);
983 val |= 8 << 8;
984 sdhci_writel(host, val, ESDHC_TBCTL);
985
986 mdelay(1);
987
988 /* Read TBCTL[31:0] register and rewrite again */
989 val = sdhci_readl(host, ESDHC_TBCTL);
990 sdhci_writel(host, val, ESDHC_TBCTL);
991
992 mdelay(1);
993
994 /* Read the TBSTAT[31:0] register twice */
995 val = sdhci_readl(host, ESDHC_TBSTAT);
996 val = sdhci_readl(host, ESDHC_TBSTAT);
997
998 *window_end = val & 0xff;
999 *window_start = (val >> 8) & 0xff;
1000}
1001
1002static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
1003 u8 *window_end)
1004{
1005 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1006 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1007 u8 start_ptr, end_ptr;
1008
1009 if (esdhc->quirk_tuning_erratum_type1) {
1010 *window_start = 5 * esdhc->div_ratio;
1011 *window_end = 3 * esdhc->div_ratio;
1012 return;
1013 }
1014
1015 esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
1016
1017 /* Reset data lines by setting ESDHCCTL[RSTD] */
1018 sdhci_reset(host, SDHCI_RESET_DATA);
1019 /* Write 32'hFFFF_FFFF to IRQSTAT register */
1020 sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
1021
1022 /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
1023 * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
1024 * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
1025 * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
1026 */
1027
1028 if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
1029 *window_start = 8 * esdhc->div_ratio;
1030 *window_end = 4 * esdhc->div_ratio;
1031 } else {
1032 *window_start = 5 * esdhc->div_ratio;
1033 *window_end = 3 * esdhc->div_ratio;
1034 }
1035}
1036
1037static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
1038 u8 window_start, u8 window_end)
1039{
1040 struct sdhci_host *host = mmc_priv(mmc);
1041 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1042 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1043 u32 val;
1044 int ret;
1045
1046 /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
1047 val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
1048 ESDHC_WNDW_STRT_PTR_MASK;
1049 val |= window_end & ESDHC_WNDW_END_PTR_MASK;
1050 sdhci_writel(host, val, ESDHC_TBPTR);
1051
1052 /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
1053 val = sdhci_readl(host, ESDHC_TBCTL);
1054 val &= ~ESDHC_TB_MODE_MASK;
1055 val |= ESDHC_TB_MODE_SW;
1056 sdhci_writel(host, val, ESDHC_TBCTL);
1057
1058 esdhc->in_sw_tuning = true;
1059 ret = sdhci_execute_tuning(mmc, opcode);
1060 esdhc->in_sw_tuning = false;
1061 return ret;
1062}
1063
1064static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
1065{
1066 struct sdhci_host *host = mmc_priv(mmc);
1067 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1068 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1069 u8 window_start, window_end;
1070 int ret, retries = 1;
1071 bool hs400_tuning;
1072 unsigned int clk;
1073 u32 val;
1074
1075 /* For tuning mode, the sd clock divisor value
1076 * must be larger than 3 according to reference manual.
1077 */
1078 clk = esdhc->peripheral_clock / 3;
1079 if (host->clock > clk)
1080 esdhc_of_set_clock(host, clk);
1081
1082 esdhc_tuning_block_enable(host, true);
1083
1084 /*
1085 * The eSDHC controller takes the data timeout value into account
1086 * during tuning. If the SD card is too slow sending the response, the
1087 * timer will expire and a "Buffer Read Ready" interrupt without data
1088 * is triggered. This leads to tuning errors.
1089 *
1090 * Just set the timeout to the maximum value because the core will
1091 * already take care of it in sdhci_send_tuning().
1092 */
1093 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
1094
1095 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1096
1097 do {
1098 if (esdhc->quirk_limited_clk_division &&
1099 hs400_tuning)
1100 esdhc_of_set_clock(host, host->clock);
1101
1102 /* Do HW tuning */
1103 val = sdhci_readl(host, ESDHC_TBCTL);
1104 val &= ~ESDHC_TB_MODE_MASK;
1105 val |= ESDHC_TB_MODE_3;
1106 sdhci_writel(host, val, ESDHC_TBCTL);
1107
1108 ret = sdhci_execute_tuning(mmc, opcode);
1109 if (ret)
1110 break;
1111
1112 /* For type2 affected platforms of the tuning erratum,
1113 * tuning may succeed although eSDHC might not have
1114 * tuned properly. Need to check tuning window.
1115 */
1116 if (esdhc->quirk_tuning_erratum_type2 &&
1117 !host->tuning_err) {
1118 esdhc_tuning_window_ptr(host, &window_start,
1119 &window_end);
1120 if (abs(window_start - window_end) >
1121 (4 * esdhc->div_ratio + 2))
1122 host->tuning_err = -EAGAIN;
1123 }
1124
1125 /* If HW tuning fails and triggers erratum,
1126 * try workaround.
1127 */
1128 ret = host->tuning_err;
1129 if (ret == -EAGAIN &&
1130 (esdhc->quirk_tuning_erratum_type1 ||
1131 esdhc->quirk_tuning_erratum_type2)) {
1132 /* Recover HS400 tuning flag */
1133 if (hs400_tuning)
1134 host->flags |= SDHCI_HS400_TUNING;
1135 pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
1136 mmc_hostname(mmc));
1137 /* Do SW tuning */
1138 esdhc_prepare_sw_tuning(host, &window_start,
1139 &window_end);
1140 ret = esdhc_execute_sw_tuning(mmc, opcode,
1141 window_start,
1142 window_end);
1143 if (ret)
1144 break;
1145
1146 /* Retry both HW/SW tuning with reduced clock. */
1147 ret = host->tuning_err;
1148 if (ret == -EAGAIN && retries) {
1149 /* Recover HS400 tuning flag */
1150 if (hs400_tuning)
1151 host->flags |= SDHCI_HS400_TUNING;
1152
1153 clk = host->max_clk / (esdhc->div_ratio + 1);
1154 esdhc_of_set_clock(host, clk);
1155 pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
1156 mmc_hostname(mmc));
1157 } else {
1158 break;
1159 }
1160 } else {
1161 break;
1162 }
1163 } while (retries--);
1164
1165 if (ret) {
1166 esdhc_tuning_block_enable(host, false);
1167 } else if (hs400_tuning) {
1168 val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1169 val |= ESDHC_FLW_CTL_BG;
1170 sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1171 }
1172
1173 return ret;
1174}
1175
1176static void esdhc_set_uhs_signaling(struct sdhci_host *host,
1177 unsigned int timing)
1178{
1179 u32 val;
1180
1181 /*
1182 * There are specific registers setting for HS400 mode.
1183 * Clean all of them if controller is in HS400 mode to
1184 * exit HS400 mode before re-setting any speed mode.
1185 */
1186 val = sdhci_readl(host, ESDHC_TBCTL);
1187 if (val & ESDHC_HS400_MODE) {
1188 val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1189 val &= ~ESDHC_FLW_CTL_BG;
1190 sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1191
1192 val = sdhci_readl(host, ESDHC_SDCLKCTL);
1193 val &= ~ESDHC_CMD_CLK_CTL;
1194 sdhci_writel(host, val, ESDHC_SDCLKCTL);
1195
1196 esdhc_clock_enable(host, false);
1197 val = sdhci_readl(host, ESDHC_TBCTL);
1198 val &= ~ESDHC_HS400_MODE;
1199 sdhci_writel(host, val, ESDHC_TBCTL);
1200 esdhc_clock_enable(host, true);
1201
1202 val = sdhci_readl(host, ESDHC_DLLCFG0);
1203 val &= ~(ESDHC_DLL_ENABLE | ESDHC_DLL_FREQ_SEL);
1204 sdhci_writel(host, val, ESDHC_DLLCFG0);
1205
1206 val = sdhci_readl(host, ESDHC_TBCTL);
1207 val &= ~ESDHC_HS400_WNDW_ADJUST;
1208 sdhci_writel(host, val, ESDHC_TBCTL);
1209
1210 esdhc_tuning_block_enable(host, false);
1211 }
1212
1213 if (timing == MMC_TIMING_MMC_HS400)
1214 esdhc_tuning_block_enable(host, true);
1215 else
1216 sdhci_set_uhs_signaling(host, timing);
1217}
1218
1219static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
1220{
1221 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1222 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1223 u32 command;
1224
1225 if (esdhc->quirk_trans_complete_erratum) {
1226 command = SDHCI_GET_CMD(sdhci_readw(host,
1227 SDHCI_COMMAND));
1228 if (command == MMC_WRITE_MULTIPLE_BLOCK &&
1229 sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
1230 intmask & SDHCI_INT_DATA_END) {
1231 intmask &= ~SDHCI_INT_DATA_END;
1232 sdhci_writel(host, SDHCI_INT_DATA_END,
1233 SDHCI_INT_STATUS);
1234 }
1235 }
1236 return intmask;
1237}
1238
1239#ifdef CONFIG_PM_SLEEP
1240static u32 esdhc_proctl;
1241static int esdhc_of_suspend(struct device *dev)
1242{
1243 struct sdhci_host *host = dev_get_drvdata(dev);
1244
1245 esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
1246
1247 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1248 mmc_retune_needed(host->mmc);
1249
1250 return sdhci_suspend_host(host);
1251}
1252
1253static int esdhc_of_resume(struct device *dev)
1254{
1255 struct sdhci_host *host = dev_get_drvdata(dev);
1256 int ret = sdhci_resume_host(host);
1257
1258 if (ret == 0) {
1259 /* Isn't this already done by sdhci_resume_host() ? --rmk */
1260 esdhc_of_enable_dma(host);
1261 sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
1262 }
1263 return ret;
1264}
1265#endif
1266
1267static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
1268 esdhc_of_suspend,
1269 esdhc_of_resume);
1270
1271static const struct sdhci_ops sdhci_esdhc_be_ops = {
1272 .read_l = esdhc_be_readl,
1273 .read_w = esdhc_be_readw,
1274 .read_b = esdhc_be_readb,
1275 .write_l = esdhc_be_writel,
1276 .write_w = esdhc_be_writew,
1277 .write_b = esdhc_be_writeb,
1278 .set_clock = esdhc_of_set_clock,
1279 .enable_dma = esdhc_of_enable_dma,
1280 .get_max_clock = esdhc_of_get_max_clock,
1281 .get_min_clock = esdhc_of_get_min_clock,
1282 .adma_workaround = esdhc_of_adma_workaround,
1283 .set_bus_width = esdhc_pltfm_set_bus_width,
1284 .reset = esdhc_reset,
1285 .set_uhs_signaling = esdhc_set_uhs_signaling,
1286 .irq = esdhc_irq,
1287};
1288
1289static const struct sdhci_ops sdhci_esdhc_le_ops = {
1290 .read_l = esdhc_le_readl,
1291 .read_w = esdhc_le_readw,
1292 .read_b = esdhc_le_readb,
1293 .write_l = esdhc_le_writel,
1294 .write_w = esdhc_le_writew,
1295 .write_b = esdhc_le_writeb,
1296 .set_clock = esdhc_of_set_clock,
1297 .enable_dma = esdhc_of_enable_dma,
1298 .get_max_clock = esdhc_of_get_max_clock,
1299 .get_min_clock = esdhc_of_get_min_clock,
1300 .adma_workaround = esdhc_of_adma_workaround,
1301 .set_bus_width = esdhc_pltfm_set_bus_width,
1302 .reset = esdhc_reset,
1303 .set_uhs_signaling = esdhc_set_uhs_signaling,
1304 .irq = esdhc_irq,
1305};
1306
1307static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
1308 .quirks = ESDHC_DEFAULT_QUIRKS |
1309#ifdef CONFIG_PPC
1310 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1311#endif
1312 SDHCI_QUIRK_NO_CARD_NO_RESET |
1313 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1314 .ops = &sdhci_esdhc_be_ops,
1315};
1316
1317static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
1318 .quirks = ESDHC_DEFAULT_QUIRKS |
1319 SDHCI_QUIRK_NO_CARD_NO_RESET |
1320 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1321 .ops = &sdhci_esdhc_le_ops,
1322};
1323
1324static struct soc_device_attribute soc_incorrect_hostver[] = {
1325 { .family = "QorIQ T4240", .revision = "1.0", },
1326 { .family = "QorIQ T4240", .revision = "2.0", },
1327 { /* sentinel */ }
1328};
1329
1330static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1331 { .family = "QorIQ LX2160A", .revision = "1.0", },
1332 { .family = "QorIQ LX2160A", .revision = "2.0", },
1333 { .family = "QorIQ LS1028A", .revision = "1.0", },
1334 { /* sentinel */ }
1335};
1336
1337static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1338 { .family = "QorIQ LX2160A", .revision = "1.0", },
1339 { .family = "QorIQ LX2160A", .revision = "2.0", },
1340 { .family = "QorIQ LS1028A", .revision = "1.0", },
1341 { /* sentinel */ }
1342};
1343
1344static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
1345{
1346 const struct of_device_id *match;
1347 struct sdhci_pltfm_host *pltfm_host;
1348 struct sdhci_esdhc *esdhc;
1349 struct device_node *np;
1350 struct clk *clk;
1351 u32 val;
1352 u16 host_ver;
1353
1354 pltfm_host = sdhci_priv(host);
1355 esdhc = sdhci_pltfm_priv(pltfm_host);
1356
1357 host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
1358 esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
1359 SDHCI_VENDOR_VER_SHIFT;
1360 esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
1361 if (soc_device_match(soc_incorrect_hostver))
1362 esdhc->quirk_incorrect_hostver = true;
1363 else
1364 esdhc->quirk_incorrect_hostver = false;
1365
1366 if (soc_device_match(soc_fixup_sdhc_clkdivs))
1367 esdhc->quirk_limited_clk_division = true;
1368 else
1369 esdhc->quirk_limited_clk_division = false;
1370
1371 if (soc_device_match(soc_unreliable_pulse_detection))
1372 esdhc->quirk_unreliable_pulse_detection = true;
1373 else
1374 esdhc->quirk_unreliable_pulse_detection = false;
1375
1376 match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
1377 if (match)
1378 esdhc->clk_fixup = match->data;
1379 np = pdev->dev.of_node;
1380
1381 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1382 esdhc->quirk_delay_before_data_reset = true;
1383 esdhc->quirk_trans_complete_erratum = true;
1384 }
1385
1386 clk = of_clk_get(np, 0);
1387 if (!IS_ERR(clk)) {
1388 /*
1389 * esdhc->peripheral_clock would be assigned with a value
1390 * which is eSDHC base clock when use periperal clock.
1391 * For some platforms, the clock value got by common clk
1392 * API is peripheral clock while the eSDHC base clock is
1393 * 1/2 peripheral clock.
1394 */
1395 if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
1396 of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
1397 of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
1398 esdhc->peripheral_clock = clk_get_rate(clk) / 2;
1399 else
1400 esdhc->peripheral_clock = clk_get_rate(clk);
1401
1402 clk_put(clk);
1403 }
1404
1405 esdhc_clock_enable(host, false);
1406 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
1407 /*
1408 * This bit is not able to be reset by SDHCI_RESET_ALL. Need to
1409 * initialize it as 1 or 0 once, to override the different value
1410 * which may be configured in bootloader.
1411 */
1412 if (esdhc->peripheral_clock)
1413 val |= ESDHC_PERIPHERAL_CLK_SEL;
1414 else
1415 val &= ~ESDHC_PERIPHERAL_CLK_SEL;
1416 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
1417 esdhc_clock_enable(host, true);
1418}
1419
1420static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
1421{
1422 esdhc_tuning_block_enable(mmc_priv(mmc), false);
1423 return 0;
1424}
1425
1426static int sdhci_esdhc_probe(struct platform_device *pdev)
1427{
1428 struct sdhci_host *host;
1429 struct device_node *np, *tp;
1430 struct sdhci_pltfm_host *pltfm_host;
1431 struct sdhci_esdhc *esdhc;
1432 int ret;
1433
1434 np = pdev->dev.of_node;
1435
1436 if (of_property_read_bool(np, "little-endian"))
1437 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
1438 sizeof(struct sdhci_esdhc));
1439 else
1440 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
1441 sizeof(struct sdhci_esdhc));
1442
1443 if (IS_ERR(host))
1444 return PTR_ERR(host);
1445
1446 host->mmc_host_ops.start_signal_voltage_switch =
1447 esdhc_signal_voltage_switch;
1448 host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
1449 host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
1450 host->tuning_delay = 1;
1451
1452 esdhc_init(pdev, host);
1453
1454 sdhci_get_of_property(pdev);
1455
1456 pltfm_host = sdhci_priv(host);
1457 esdhc = sdhci_pltfm_priv(pltfm_host);
1458 if (soc_device_match(soc_tuning_erratum_type1))
1459 esdhc->quirk_tuning_erratum_type1 = true;
1460 else
1461 esdhc->quirk_tuning_erratum_type1 = false;
1462
1463 if (soc_device_match(soc_tuning_erratum_type2))
1464 esdhc->quirk_tuning_erratum_type2 = true;
1465 else
1466 esdhc->quirk_tuning_erratum_type2 = false;
1467
1468 if (esdhc->vendor_ver == VENDOR_V_22)
1469 host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
1470
1471 if (esdhc->vendor_ver > VENDOR_V_22)
1472 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1473
1474 tp = of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc");
1475 if (tp) {
1476 of_node_put(tp);
1477 host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
1478 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1479 }
1480
1481 if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
1482 of_device_is_compatible(np, "fsl,p5020-esdhc") ||
1483 of_device_is_compatible(np, "fsl,p4080-esdhc") ||
1484 of_device_is_compatible(np, "fsl,p1020-esdhc") ||
1485 of_device_is_compatible(np, "fsl,t1040-esdhc"))
1486 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1487
1488 if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
1489 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1490
1491 esdhc->quirk_ignore_data_inhibit = false;
1492 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1493 /*
1494 * Freescale messed up with P2020 as it has a non-standard
1495 * host control register
1496 */
1497 host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
1498 esdhc->quirk_ignore_data_inhibit = true;
1499 }
1500
1501 /* call to generic mmc_of_parse to support additional capabilities */
1502 ret = mmc_of_parse(host->mmc);
1503 if (ret)
1504 goto err;
1505
1506 mmc_of_parse_voltage(host->mmc, &host->ocr_mask);
1507
1508 ret = sdhci_add_host(host);
1509 if (ret)
1510 goto err;
1511
1512 return 0;
1513 err:
1514 sdhci_pltfm_free(pdev);
1515 return ret;
1516}
1517
1518static struct platform_driver sdhci_esdhc_driver = {
1519 .driver = {
1520 .name = "sdhci-esdhc",
1521 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1522 .of_match_table = sdhci_esdhc_of_match,
1523 .pm = &esdhc_of_dev_pm_ops,
1524 },
1525 .probe = sdhci_esdhc_probe,
1526 .remove = sdhci_pltfm_unregister,
1527};
1528
1529module_platform_driver(sdhci_esdhc_driver);
1530
1531MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1532MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1533 "Anton Vorontsov <avorontsov@ru.mvista.com>");
1534MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Freescale eSDHC controller driver.
4 *
5 * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
6 * Copyright (c) 2009 MontaVista Software, Inc.
7 *
8 * Authors: Xiaobo Xie <X.Xie@freescale.com>
9 * Anton Vorontsov <avorontsov@ru.mvista.com>
10 */
11
12#include <linux/err.h>
13#include <linux/io.h>
14#include <linux/of.h>
15#include <linux/of_address.h>
16#include <linux/delay.h>
17#include <linux/module.h>
18#include <linux/sys_soc.h>
19#include <linux/clk.h>
20#include <linux/ktime.h>
21#include <linux/dma-mapping.h>
22#include <linux/mmc/host.h>
23#include <linux/mmc/mmc.h>
24#include "sdhci-pltfm.h"
25#include "sdhci-esdhc.h"
26
27#define VENDOR_V_22 0x12
28#define VENDOR_V_23 0x13
29
30#define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
31
32struct esdhc_clk_fixup {
33 const unsigned int sd_dflt_max_clk;
34 const unsigned int max_clk[MMC_TIMING_NUM];
35};
36
37static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
38 .sd_dflt_max_clk = 25000000,
39 .max_clk[MMC_TIMING_MMC_HS] = 46500000,
40 .max_clk[MMC_TIMING_SD_HS] = 46500000,
41};
42
43static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
44 .sd_dflt_max_clk = 25000000,
45 .max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
46 .max_clk[MMC_TIMING_MMC_HS200] = 167000000,
47};
48
49static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
50 .sd_dflt_max_clk = 25000000,
51 .max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
52 .max_clk[MMC_TIMING_MMC_HS200] = 125000000,
53};
54
55static const struct esdhc_clk_fixup p1010_esdhc_clk = {
56 .sd_dflt_max_clk = 20000000,
57 .max_clk[MMC_TIMING_LEGACY] = 20000000,
58 .max_clk[MMC_TIMING_MMC_HS] = 42000000,
59 .max_clk[MMC_TIMING_SD_HS] = 40000000,
60};
61
62static const struct of_device_id sdhci_esdhc_of_match[] = {
63 { .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
64 { .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
65 { .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
66 { .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk},
67 { .compatible = "fsl,mpc8379-esdhc" },
68 { .compatible = "fsl,mpc8536-esdhc" },
69 { .compatible = "fsl,esdhc" },
70 { }
71};
72MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
73
74struct sdhci_esdhc {
75 u8 vendor_ver;
76 u8 spec_ver;
77 bool quirk_incorrect_hostver;
78 bool quirk_limited_clk_division;
79 bool quirk_unreliable_pulse_detection;
80 bool quirk_fixup_tuning;
81 bool quirk_ignore_data_inhibit;
82 unsigned int peripheral_clock;
83 const struct esdhc_clk_fixup *clk_fixup;
84 u32 div_ratio;
85};
86
87/**
88 * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
89 * to make it compatible with SD spec.
90 *
91 * @host: pointer to sdhci_host
92 * @spec_reg: SD spec register address
93 * @value: 32bit eSDHC register value on spec_reg address
94 *
95 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
96 * registers are 32 bits. There are differences in register size, register
97 * address, register function, bit position and function between eSDHC spec
98 * and SD spec.
99 *
100 * Return a fixed up register value
101 */
102static u32 esdhc_readl_fixup(struct sdhci_host *host,
103 int spec_reg, u32 value)
104{
105 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
106 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
107 u32 ret;
108
109 /*
110 * The bit of ADMA flag in eSDHC is not compatible with standard
111 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
112 * supported by eSDHC.
113 * And for many FSL eSDHC controller, the reset value of field
114 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
115 * only these vendor version is greater than 2.2/0x12 support ADMA.
116 */
117 if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
118 if (esdhc->vendor_ver > VENDOR_V_22) {
119 ret = value | SDHCI_CAN_DO_ADMA2;
120 return ret;
121 }
122 }
123 /*
124 * The DAT[3:0] line signal levels and the CMD line signal level are
125 * not compatible with standard SDHC register. The line signal levels
126 * DAT[7:0] are at bits 31:24 and the command line signal level is at
127 * bit 23. All other bits are the same as in the standard SDHC
128 * register.
129 */
130 if (spec_reg == SDHCI_PRESENT_STATE) {
131 ret = value & 0x000fffff;
132 ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
133 ret |= (value << 1) & SDHCI_CMD_LVL;
134 return ret;
135 }
136
137 /*
138 * DTS properties of mmc host are used to enable each speed mode
139 * according to soc and board capability. So clean up
140 * SDR50/SDR104/DDR50 support bits here.
141 */
142 if (spec_reg == SDHCI_CAPABILITIES_1) {
143 ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
144 SDHCI_SUPPORT_DDR50);
145 return ret;
146 }
147
148 /*
149 * Some controllers have unreliable Data Line Active
150 * bit for commands with busy signal. This affects
151 * Command Inhibit (data) bit. Just ignore it since
152 * MMC core driver has already polled card status
153 * with CMD13 after any command with busy siganl.
154 */
155 if ((spec_reg == SDHCI_PRESENT_STATE) &&
156 (esdhc->quirk_ignore_data_inhibit == true)) {
157 ret = value & ~SDHCI_DATA_INHIBIT;
158 return ret;
159 }
160
161 ret = value;
162 return ret;
163}
164
165static u16 esdhc_readw_fixup(struct sdhci_host *host,
166 int spec_reg, u32 value)
167{
168 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
169 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
170 u16 ret;
171 int shift = (spec_reg & 0x2) * 8;
172
173 if (spec_reg == SDHCI_HOST_VERSION)
174 ret = value & 0xffff;
175 else
176 ret = (value >> shift) & 0xffff;
177 /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
178 * vendor version and spec version information.
179 */
180 if ((spec_reg == SDHCI_HOST_VERSION) &&
181 (esdhc->quirk_incorrect_hostver))
182 ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
183 return ret;
184}
185
186static u8 esdhc_readb_fixup(struct sdhci_host *host,
187 int spec_reg, u32 value)
188{
189 u8 ret;
190 u8 dma_bits;
191 int shift = (spec_reg & 0x3) * 8;
192
193 ret = (value >> shift) & 0xff;
194
195 /*
196 * "DMA select" locates at offset 0x28 in SD specification, but on
197 * P5020 or P3041, it locates at 0x29.
198 */
199 if (spec_reg == SDHCI_HOST_CONTROL) {
200 /* DMA select is 22,23 bits in Protocol Control Register */
201 dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
202 /* fixup the result */
203 ret &= ~SDHCI_CTRL_DMA_MASK;
204 ret |= dma_bits;
205 }
206 return ret;
207}
208
209/**
210 * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
211 * written into eSDHC register.
212 *
213 * @host: pointer to sdhci_host
214 * @spec_reg: SD spec register address
215 * @value: 8/16/32bit SD spec register value that would be written
216 * @old_value: 32bit eSDHC register value on spec_reg address
217 *
218 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
219 * registers are 32 bits. There are differences in register size, register
220 * address, register function, bit position and function between eSDHC spec
221 * and SD spec.
222 *
223 * Return a fixed up register value
224 */
225static u32 esdhc_writel_fixup(struct sdhci_host *host,
226 int spec_reg, u32 value, u32 old_value)
227{
228 u32 ret;
229
230 /*
231 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
232 * when SYSCTL[RSTD] is set for some special operations.
233 * No any impact on other operation.
234 */
235 if (spec_reg == SDHCI_INT_ENABLE)
236 ret = value | SDHCI_INT_BLK_GAP;
237 else
238 ret = value;
239
240 return ret;
241}
242
243static u32 esdhc_writew_fixup(struct sdhci_host *host,
244 int spec_reg, u16 value, u32 old_value)
245{
246 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
247 int shift = (spec_reg & 0x2) * 8;
248 u32 ret;
249
250 switch (spec_reg) {
251 case SDHCI_TRANSFER_MODE:
252 /*
253 * Postpone this write, we must do it together with a
254 * command write that is down below. Return old value.
255 */
256 pltfm_host->xfer_mode_shadow = value;
257 return old_value;
258 case SDHCI_COMMAND:
259 ret = (value << 16) | pltfm_host->xfer_mode_shadow;
260 return ret;
261 }
262
263 ret = old_value & (~(0xffff << shift));
264 ret |= (value << shift);
265
266 if (spec_reg == SDHCI_BLOCK_SIZE) {
267 /*
268 * Two last DMA bits are reserved, and first one is used for
269 * non-standard blksz of 4096 bytes that we don't support
270 * yet. So clear the DMA boundary bits.
271 */
272 ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
273 }
274 return ret;
275}
276
277static u32 esdhc_writeb_fixup(struct sdhci_host *host,
278 int spec_reg, u8 value, u32 old_value)
279{
280 u32 ret;
281 u32 dma_bits;
282 u8 tmp;
283 int shift = (spec_reg & 0x3) * 8;
284
285 /*
286 * eSDHC doesn't have a standard power control register, so we do
287 * nothing here to avoid incorrect operation.
288 */
289 if (spec_reg == SDHCI_POWER_CONTROL)
290 return old_value;
291 /*
292 * "DMA select" location is offset 0x28 in SD specification, but on
293 * P5020 or P3041, it's located at 0x29.
294 */
295 if (spec_reg == SDHCI_HOST_CONTROL) {
296 /*
297 * If host control register is not standard, exit
298 * this function
299 */
300 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
301 return old_value;
302
303 /* DMA select is 22,23 bits in Protocol Control Register */
304 dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
305 ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
306 tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
307 (old_value & SDHCI_CTRL_DMA_MASK);
308 ret = (ret & (~0xff)) | tmp;
309
310 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
311 ret &= ~ESDHC_HOST_CONTROL_RES;
312 return ret;
313 }
314
315 ret = (old_value & (~(0xff << shift))) | (value << shift);
316 return ret;
317}
318
319static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
320{
321 u32 ret;
322 u32 value;
323
324 if (reg == SDHCI_CAPABILITIES_1)
325 value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
326 else
327 value = ioread32be(host->ioaddr + reg);
328
329 ret = esdhc_readl_fixup(host, reg, value);
330
331 return ret;
332}
333
334static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
335{
336 u32 ret;
337 u32 value;
338
339 if (reg == SDHCI_CAPABILITIES_1)
340 value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
341 else
342 value = ioread32(host->ioaddr + reg);
343
344 ret = esdhc_readl_fixup(host, reg, value);
345
346 return ret;
347}
348
349static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
350{
351 u16 ret;
352 u32 value;
353 int base = reg & ~0x3;
354
355 value = ioread32be(host->ioaddr + base);
356 ret = esdhc_readw_fixup(host, reg, value);
357 return ret;
358}
359
360static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
361{
362 u16 ret;
363 u32 value;
364 int base = reg & ~0x3;
365
366 value = ioread32(host->ioaddr + base);
367 ret = esdhc_readw_fixup(host, reg, value);
368 return ret;
369}
370
371static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
372{
373 u8 ret;
374 u32 value;
375 int base = reg & ~0x3;
376
377 value = ioread32be(host->ioaddr + base);
378 ret = esdhc_readb_fixup(host, reg, value);
379 return ret;
380}
381
382static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
383{
384 u8 ret;
385 u32 value;
386 int base = reg & ~0x3;
387
388 value = ioread32(host->ioaddr + base);
389 ret = esdhc_readb_fixup(host, reg, value);
390 return ret;
391}
392
393static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
394{
395 u32 value;
396
397 value = esdhc_writel_fixup(host, reg, val, 0);
398 iowrite32be(value, host->ioaddr + reg);
399}
400
401static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
402{
403 u32 value;
404
405 value = esdhc_writel_fixup(host, reg, val, 0);
406 iowrite32(value, host->ioaddr + reg);
407}
408
409static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
410{
411 int base = reg & ~0x3;
412 u32 value;
413 u32 ret;
414
415 value = ioread32be(host->ioaddr + base);
416 ret = esdhc_writew_fixup(host, reg, val, value);
417 if (reg != SDHCI_TRANSFER_MODE)
418 iowrite32be(ret, host->ioaddr + base);
419}
420
421static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
422{
423 int base = reg & ~0x3;
424 u32 value;
425 u32 ret;
426
427 value = ioread32(host->ioaddr + base);
428 ret = esdhc_writew_fixup(host, reg, val, value);
429 if (reg != SDHCI_TRANSFER_MODE)
430 iowrite32(ret, host->ioaddr + base);
431}
432
433static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
434{
435 int base = reg & ~0x3;
436 u32 value;
437 u32 ret;
438
439 value = ioread32be(host->ioaddr + base);
440 ret = esdhc_writeb_fixup(host, reg, val, value);
441 iowrite32be(ret, host->ioaddr + base);
442}
443
444static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
445{
446 int base = reg & ~0x3;
447 u32 value;
448 u32 ret;
449
450 value = ioread32(host->ioaddr + base);
451 ret = esdhc_writeb_fixup(host, reg, val, value);
452 iowrite32(ret, host->ioaddr + base);
453}
454
455/*
456 * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
457 * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
458 * and Block Gap Event(IRQSTAT[BGE]) are also set.
459 * For Continue, apply soft reset for data(SYSCTL[RSTD]);
460 * and re-issue the entire read transaction from beginning.
461 */
462static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
463{
464 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
465 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
466 bool applicable;
467 dma_addr_t dmastart;
468 dma_addr_t dmanow;
469
470 applicable = (intmask & SDHCI_INT_DATA_END) &&
471 (intmask & SDHCI_INT_BLK_GAP) &&
472 (esdhc->vendor_ver == VENDOR_V_23);
473 if (!applicable)
474 return;
475
476 host->data->error = 0;
477 dmastart = sg_dma_address(host->data->sg);
478 dmanow = dmastart + host->data->bytes_xfered;
479 /*
480 * Force update to the next DMA block boundary.
481 */
482 dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
483 SDHCI_DEFAULT_BOUNDARY_SIZE;
484 host->data->bytes_xfered = dmanow - dmastart;
485 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
486}
487
488static int esdhc_of_enable_dma(struct sdhci_host *host)
489{
490 u32 value;
491 struct device *dev = mmc_dev(host->mmc);
492
493 if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
494 of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
495 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
496
497 value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
498
499 if (of_dma_is_coherent(dev->of_node))
500 value |= ESDHC_DMA_SNOOP;
501 else
502 value &= ~ESDHC_DMA_SNOOP;
503
504 sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
505 return 0;
506}
507
508static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
509{
510 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
511 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
512
513 if (esdhc->peripheral_clock)
514 return esdhc->peripheral_clock;
515 else
516 return pltfm_host->clock;
517}
518
519static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
520{
521 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
522 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
523 unsigned int clock;
524
525 if (esdhc->peripheral_clock)
526 clock = esdhc->peripheral_clock;
527 else
528 clock = pltfm_host->clock;
529 return clock / 256 / 16;
530}
531
532static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
533{
534 u32 val;
535 ktime_t timeout;
536
537 val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
538
539 if (enable)
540 val |= ESDHC_CLOCK_SDCLKEN;
541 else
542 val &= ~ESDHC_CLOCK_SDCLKEN;
543
544 sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
545
546 /* Wait max 20 ms */
547 timeout = ktime_add_ms(ktime_get(), 20);
548 val = ESDHC_CLOCK_STABLE;
549 while (1) {
550 bool timedout = ktime_after(ktime_get(), timeout);
551
552 if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
553 break;
554 if (timedout) {
555 pr_err("%s: Internal clock never stabilised.\n",
556 mmc_hostname(host->mmc));
557 break;
558 }
559 udelay(10);
560 }
561}
562
563static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
564{
565 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
566 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
567 int pre_div = 1;
568 int div = 1;
569 int division;
570 ktime_t timeout;
571 long fixup = 0;
572 u32 temp;
573
574 host->mmc->actual_clock = 0;
575
576 if (clock == 0) {
577 esdhc_clock_enable(host, false);
578 return;
579 }
580
581 /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
582 if (esdhc->vendor_ver < VENDOR_V_23)
583 pre_div = 2;
584
585 if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
586 esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
587 fixup = esdhc->clk_fixup->sd_dflt_max_clk;
588 else if (esdhc->clk_fixup)
589 fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
590
591 if (fixup && clock > fixup)
592 clock = fixup;
593
594 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
595 temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
596 ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
597 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
598
599 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
600 pre_div *= 2;
601
602 while (host->max_clk / pre_div / div > clock && div < 16)
603 div++;
604
605 if (esdhc->quirk_limited_clk_division &&
606 clock == MMC_HS200_MAX_DTR &&
607 (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
608 host->flags & SDHCI_HS400_TUNING)) {
609 division = pre_div * div;
610 if (division <= 4) {
611 pre_div = 4;
612 div = 1;
613 } else if (division <= 8) {
614 pre_div = 4;
615 div = 2;
616 } else if (division <= 12) {
617 pre_div = 4;
618 div = 3;
619 } else {
620 pr_warn("%s: using unsupported clock division.\n",
621 mmc_hostname(host->mmc));
622 }
623 }
624
625 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
626 clock, host->max_clk / pre_div / div);
627 host->mmc->actual_clock = host->max_clk / pre_div / div;
628 esdhc->div_ratio = pre_div * div;
629 pre_div >>= 1;
630 div--;
631
632 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
633 temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
634 | (div << ESDHC_DIVIDER_SHIFT)
635 | (pre_div << ESDHC_PREDIV_SHIFT));
636 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
637
638 if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
639 clock == MMC_HS200_MAX_DTR) {
640 temp = sdhci_readl(host, ESDHC_TBCTL);
641 sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
642 temp = sdhci_readl(host, ESDHC_SDCLKCTL);
643 sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
644 esdhc_clock_enable(host, true);
645
646 temp = sdhci_readl(host, ESDHC_DLLCFG0);
647 temp |= ESDHC_DLL_ENABLE;
648 if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
649 temp |= ESDHC_DLL_FREQ_SEL;
650 sdhci_writel(host, temp, ESDHC_DLLCFG0);
651 temp = sdhci_readl(host, ESDHC_TBCTL);
652 sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
653
654 esdhc_clock_enable(host, false);
655 temp = sdhci_readl(host, ESDHC_DMA_SYSCTL);
656 temp |= ESDHC_FLUSH_ASYNC_FIFO;
657 sdhci_writel(host, temp, ESDHC_DMA_SYSCTL);
658 }
659
660 /* Wait max 20 ms */
661 timeout = ktime_add_ms(ktime_get(), 20);
662 while (1) {
663 bool timedout = ktime_after(ktime_get(), timeout);
664
665 if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
666 break;
667 if (timedout) {
668 pr_err("%s: Internal clock never stabilised.\n",
669 mmc_hostname(host->mmc));
670 return;
671 }
672 udelay(10);
673 }
674
675 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
676 temp |= ESDHC_CLOCK_SDCLKEN;
677 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
678}
679
680static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
681{
682 u32 ctrl;
683
684 ctrl = sdhci_readl(host, ESDHC_PROCTL);
685 ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
686 switch (width) {
687 case MMC_BUS_WIDTH_8:
688 ctrl |= ESDHC_CTRL_8BITBUS;
689 break;
690
691 case MMC_BUS_WIDTH_4:
692 ctrl |= ESDHC_CTRL_4BITBUS;
693 break;
694
695 default:
696 break;
697 }
698
699 sdhci_writel(host, ctrl, ESDHC_PROCTL);
700}
701
702static void esdhc_reset(struct sdhci_host *host, u8 mask)
703{
704 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
705 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
706 u32 val;
707
708 sdhci_reset(host, mask);
709
710 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
711 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
712
713 if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
714 mdelay(5);
715
716 if (mask & SDHCI_RESET_ALL) {
717 val = sdhci_readl(host, ESDHC_TBCTL);
718 val &= ~ESDHC_TB_EN;
719 sdhci_writel(host, val, ESDHC_TBCTL);
720
721 if (esdhc->quirk_unreliable_pulse_detection) {
722 val = sdhci_readl(host, ESDHC_DLLCFG1);
723 val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
724 sdhci_writel(host, val, ESDHC_DLLCFG1);
725 }
726 }
727}
728
729/* The SCFG, Supplemental Configuration Unit, provides SoC specific
730 * configuration and status registers for the device. There is a
731 * SDHC IO VSEL control register on SCFG for some platforms. It's
732 * used to support SDHC IO voltage switching.
733 */
734static const struct of_device_id scfg_device_ids[] = {
735 { .compatible = "fsl,t1040-scfg", },
736 { .compatible = "fsl,ls1012a-scfg", },
737 { .compatible = "fsl,ls1046a-scfg", },
738 {}
739};
740
741/* SDHC IO VSEL control register definition */
742#define SCFG_SDHCIOVSELCR 0x408
743#define SDHCIOVSELCR_TGLEN 0x80000000
744#define SDHCIOVSELCR_VSELVAL 0x60000000
745#define SDHCIOVSELCR_SDHC_VS 0x00000001
746
747static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
748 struct mmc_ios *ios)
749{
750 struct sdhci_host *host = mmc_priv(mmc);
751 struct device_node *scfg_node;
752 void __iomem *scfg_base = NULL;
753 u32 sdhciovselcr;
754 u32 val;
755
756 /*
757 * Signal Voltage Switching is only applicable for Host Controllers
758 * v3.00 and above.
759 */
760 if (host->version < SDHCI_SPEC_300)
761 return 0;
762
763 val = sdhci_readl(host, ESDHC_PROCTL);
764
765 switch (ios->signal_voltage) {
766 case MMC_SIGNAL_VOLTAGE_330:
767 val &= ~ESDHC_VOLT_SEL;
768 sdhci_writel(host, val, ESDHC_PROCTL);
769 return 0;
770 case MMC_SIGNAL_VOLTAGE_180:
771 scfg_node = of_find_matching_node(NULL, scfg_device_ids);
772 if (scfg_node)
773 scfg_base = of_iomap(scfg_node, 0);
774 if (scfg_base) {
775 sdhciovselcr = SDHCIOVSELCR_TGLEN |
776 SDHCIOVSELCR_VSELVAL;
777 iowrite32be(sdhciovselcr,
778 scfg_base + SCFG_SDHCIOVSELCR);
779
780 val |= ESDHC_VOLT_SEL;
781 sdhci_writel(host, val, ESDHC_PROCTL);
782 mdelay(5);
783
784 sdhciovselcr = SDHCIOVSELCR_TGLEN |
785 SDHCIOVSELCR_SDHC_VS;
786 iowrite32be(sdhciovselcr,
787 scfg_base + SCFG_SDHCIOVSELCR);
788 iounmap(scfg_base);
789 } else {
790 val |= ESDHC_VOLT_SEL;
791 sdhci_writel(host, val, ESDHC_PROCTL);
792 }
793 return 0;
794 default:
795 return 0;
796 }
797}
798
799static struct soc_device_attribute soc_fixup_tuning[] = {
800 { .family = "QorIQ T1040", .revision = "1.0", },
801 { .family = "QorIQ T2080", .revision = "1.0", },
802 { .family = "QorIQ T1023", .revision = "1.0", },
803 { .family = "QorIQ LS1021A", .revision = "1.0", },
804 { .family = "QorIQ LS1080A", .revision = "1.0", },
805 { .family = "QorIQ LS2080A", .revision = "1.0", },
806 { .family = "QorIQ LS1012A", .revision = "1.0", },
807 { .family = "QorIQ LS1043A", .revision = "1.*", },
808 { .family = "QorIQ LS1046A", .revision = "1.0", },
809 { },
810};
811
812static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
813{
814 u32 val;
815
816 esdhc_clock_enable(host, false);
817
818 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
819 val |= ESDHC_FLUSH_ASYNC_FIFO;
820 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
821
822 val = sdhci_readl(host, ESDHC_TBCTL);
823 if (enable)
824 val |= ESDHC_TB_EN;
825 else
826 val &= ~ESDHC_TB_EN;
827 sdhci_writel(host, val, ESDHC_TBCTL);
828
829 esdhc_clock_enable(host, true);
830}
831
832static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
833{
834 struct sdhci_host *host = mmc_priv(mmc);
835 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
836 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
837 bool hs400_tuning;
838 unsigned int clk;
839 u32 val;
840 int ret;
841
842 /* For tuning mode, the sd clock divisor value
843 * must be larger than 3 according to reference manual.
844 */
845 clk = esdhc->peripheral_clock / 3;
846 if (host->clock > clk)
847 esdhc_of_set_clock(host, clk);
848
849 if (esdhc->quirk_limited_clk_division &&
850 host->flags & SDHCI_HS400_TUNING)
851 esdhc_of_set_clock(host, host->clock);
852
853 esdhc_tuning_block_enable(host, true);
854
855 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
856 ret = sdhci_execute_tuning(mmc, opcode);
857
858 if (hs400_tuning) {
859 val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
860 val |= ESDHC_FLW_CTL_BG;
861 sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
862 }
863
864 if (host->tuning_err == -EAGAIN && esdhc->quirk_fixup_tuning) {
865
866 /* program TBPTR[TB_WNDW_END_PTR] = 3*DIV_RATIO and
867 * program TBPTR[TB_WNDW_START_PTR] = 5*DIV_RATIO
868 */
869 val = sdhci_readl(host, ESDHC_TBPTR);
870 val = (val & ~((0x7f << 8) | 0x7f)) |
871 (3 * esdhc->div_ratio) | ((5 * esdhc->div_ratio) << 8);
872 sdhci_writel(host, val, ESDHC_TBPTR);
873
874 /* program the software tuning mode by setting
875 * TBCTL[TB_MODE]=2'h3
876 */
877 val = sdhci_readl(host, ESDHC_TBCTL);
878 val |= 0x3;
879 sdhci_writel(host, val, ESDHC_TBCTL);
880 sdhci_execute_tuning(mmc, opcode);
881 }
882 return ret;
883}
884
885static void esdhc_set_uhs_signaling(struct sdhci_host *host,
886 unsigned int timing)
887{
888 if (timing == MMC_TIMING_MMC_HS400)
889 esdhc_tuning_block_enable(host, true);
890 else
891 sdhci_set_uhs_signaling(host, timing);
892}
893
894static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
895{
896 u32 command;
897
898 if (of_find_compatible_node(NULL, NULL,
899 "fsl,p2020-esdhc")) {
900 command = SDHCI_GET_CMD(sdhci_readw(host,
901 SDHCI_COMMAND));
902 if (command == MMC_WRITE_MULTIPLE_BLOCK &&
903 sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
904 intmask & SDHCI_INT_DATA_END) {
905 intmask &= ~SDHCI_INT_DATA_END;
906 sdhci_writel(host, SDHCI_INT_DATA_END,
907 SDHCI_INT_STATUS);
908 }
909 }
910 return intmask;
911}
912
913#ifdef CONFIG_PM_SLEEP
914static u32 esdhc_proctl;
915static int esdhc_of_suspend(struct device *dev)
916{
917 struct sdhci_host *host = dev_get_drvdata(dev);
918
919 esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
920
921 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
922 mmc_retune_needed(host->mmc);
923
924 return sdhci_suspend_host(host);
925}
926
927static int esdhc_of_resume(struct device *dev)
928{
929 struct sdhci_host *host = dev_get_drvdata(dev);
930 int ret = sdhci_resume_host(host);
931
932 if (ret == 0) {
933 /* Isn't this already done by sdhci_resume_host() ? --rmk */
934 esdhc_of_enable_dma(host);
935 sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
936 }
937 return ret;
938}
939#endif
940
941static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
942 esdhc_of_suspend,
943 esdhc_of_resume);
944
945static const struct sdhci_ops sdhci_esdhc_be_ops = {
946 .read_l = esdhc_be_readl,
947 .read_w = esdhc_be_readw,
948 .read_b = esdhc_be_readb,
949 .write_l = esdhc_be_writel,
950 .write_w = esdhc_be_writew,
951 .write_b = esdhc_be_writeb,
952 .set_clock = esdhc_of_set_clock,
953 .enable_dma = esdhc_of_enable_dma,
954 .get_max_clock = esdhc_of_get_max_clock,
955 .get_min_clock = esdhc_of_get_min_clock,
956 .adma_workaround = esdhc_of_adma_workaround,
957 .set_bus_width = esdhc_pltfm_set_bus_width,
958 .reset = esdhc_reset,
959 .set_uhs_signaling = esdhc_set_uhs_signaling,
960 .irq = esdhc_irq,
961};
962
963static const struct sdhci_ops sdhci_esdhc_le_ops = {
964 .read_l = esdhc_le_readl,
965 .read_w = esdhc_le_readw,
966 .read_b = esdhc_le_readb,
967 .write_l = esdhc_le_writel,
968 .write_w = esdhc_le_writew,
969 .write_b = esdhc_le_writeb,
970 .set_clock = esdhc_of_set_clock,
971 .enable_dma = esdhc_of_enable_dma,
972 .get_max_clock = esdhc_of_get_max_clock,
973 .get_min_clock = esdhc_of_get_min_clock,
974 .adma_workaround = esdhc_of_adma_workaround,
975 .set_bus_width = esdhc_pltfm_set_bus_width,
976 .reset = esdhc_reset,
977 .set_uhs_signaling = esdhc_set_uhs_signaling,
978 .irq = esdhc_irq,
979};
980
981static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
982 .quirks = ESDHC_DEFAULT_QUIRKS |
983#ifdef CONFIG_PPC
984 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
985#endif
986 SDHCI_QUIRK_NO_CARD_NO_RESET |
987 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
988 .ops = &sdhci_esdhc_be_ops,
989};
990
991static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
992 .quirks = ESDHC_DEFAULT_QUIRKS |
993 SDHCI_QUIRK_NO_CARD_NO_RESET |
994 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
995 .ops = &sdhci_esdhc_le_ops,
996};
997
998static struct soc_device_attribute soc_incorrect_hostver[] = {
999 { .family = "QorIQ T4240", .revision = "1.0", },
1000 { .family = "QorIQ T4240", .revision = "2.0", },
1001 { },
1002};
1003
1004static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1005 { .family = "QorIQ LX2160A", .revision = "1.0", },
1006 { .family = "QorIQ LX2160A", .revision = "2.0", },
1007 { .family = "QorIQ LS1028A", .revision = "1.0", },
1008 { },
1009};
1010
1011static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1012 { .family = "QorIQ LX2160A", .revision = "1.0", },
1013 { },
1014};
1015
1016static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
1017{
1018 const struct of_device_id *match;
1019 struct sdhci_pltfm_host *pltfm_host;
1020 struct sdhci_esdhc *esdhc;
1021 struct device_node *np;
1022 struct clk *clk;
1023 u32 val;
1024 u16 host_ver;
1025
1026 pltfm_host = sdhci_priv(host);
1027 esdhc = sdhci_pltfm_priv(pltfm_host);
1028
1029 host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
1030 esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
1031 SDHCI_VENDOR_VER_SHIFT;
1032 esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
1033 if (soc_device_match(soc_incorrect_hostver))
1034 esdhc->quirk_incorrect_hostver = true;
1035 else
1036 esdhc->quirk_incorrect_hostver = false;
1037
1038 if (soc_device_match(soc_fixup_sdhc_clkdivs))
1039 esdhc->quirk_limited_clk_division = true;
1040 else
1041 esdhc->quirk_limited_clk_division = false;
1042
1043 if (soc_device_match(soc_unreliable_pulse_detection))
1044 esdhc->quirk_unreliable_pulse_detection = true;
1045 else
1046 esdhc->quirk_unreliable_pulse_detection = false;
1047
1048 match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
1049 if (match)
1050 esdhc->clk_fixup = match->data;
1051 np = pdev->dev.of_node;
1052 clk = of_clk_get(np, 0);
1053 if (!IS_ERR(clk)) {
1054 /*
1055 * esdhc->peripheral_clock would be assigned with a value
1056 * which is eSDHC base clock when use periperal clock.
1057 * For some platforms, the clock value got by common clk
1058 * API is peripheral clock while the eSDHC base clock is
1059 * 1/2 peripheral clock.
1060 */
1061 if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
1062 of_device_is_compatible(np, "fsl,ls1028a-esdhc"))
1063 esdhc->peripheral_clock = clk_get_rate(clk) / 2;
1064 else
1065 esdhc->peripheral_clock = clk_get_rate(clk);
1066
1067 clk_put(clk);
1068 }
1069
1070 if (esdhc->peripheral_clock) {
1071 esdhc_clock_enable(host, false);
1072 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
1073 val |= ESDHC_PERIPHERAL_CLK_SEL;
1074 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
1075 esdhc_clock_enable(host, true);
1076 }
1077}
1078
1079static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
1080{
1081 esdhc_tuning_block_enable(mmc_priv(mmc), false);
1082 return 0;
1083}
1084
1085static int sdhci_esdhc_probe(struct platform_device *pdev)
1086{
1087 struct sdhci_host *host;
1088 struct device_node *np;
1089 struct sdhci_pltfm_host *pltfm_host;
1090 struct sdhci_esdhc *esdhc;
1091 int ret;
1092
1093 np = pdev->dev.of_node;
1094
1095 if (of_property_read_bool(np, "little-endian"))
1096 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
1097 sizeof(struct sdhci_esdhc));
1098 else
1099 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
1100 sizeof(struct sdhci_esdhc));
1101
1102 if (IS_ERR(host))
1103 return PTR_ERR(host);
1104
1105 host->mmc_host_ops.start_signal_voltage_switch =
1106 esdhc_signal_voltage_switch;
1107 host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
1108 host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
1109 host->tuning_delay = 1;
1110
1111 esdhc_init(pdev, host);
1112
1113 sdhci_get_of_property(pdev);
1114
1115 pltfm_host = sdhci_priv(host);
1116 esdhc = sdhci_pltfm_priv(pltfm_host);
1117 if (soc_device_match(soc_fixup_tuning))
1118 esdhc->quirk_fixup_tuning = true;
1119 else
1120 esdhc->quirk_fixup_tuning = false;
1121
1122 if (esdhc->vendor_ver == VENDOR_V_22)
1123 host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
1124
1125 if (esdhc->vendor_ver > VENDOR_V_22)
1126 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1127
1128 if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
1129 host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
1130 host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1131 }
1132
1133 if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
1134 of_device_is_compatible(np, "fsl,p5020-esdhc") ||
1135 of_device_is_compatible(np, "fsl,p4080-esdhc") ||
1136 of_device_is_compatible(np, "fsl,p1020-esdhc") ||
1137 of_device_is_compatible(np, "fsl,t1040-esdhc"))
1138 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1139
1140 if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
1141 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1142
1143 esdhc->quirk_ignore_data_inhibit = false;
1144 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1145 /*
1146 * Freescale messed up with P2020 as it has a non-standard
1147 * host control register
1148 */
1149 host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
1150 esdhc->quirk_ignore_data_inhibit = true;
1151 }
1152
1153 /* call to generic mmc_of_parse to support additional capabilities */
1154 ret = mmc_of_parse(host->mmc);
1155 if (ret)
1156 goto err;
1157
1158 mmc_of_parse_voltage(np, &host->ocr_mask);
1159
1160 ret = sdhci_add_host(host);
1161 if (ret)
1162 goto err;
1163
1164 return 0;
1165 err:
1166 sdhci_pltfm_free(pdev);
1167 return ret;
1168}
1169
1170static struct platform_driver sdhci_esdhc_driver = {
1171 .driver = {
1172 .name = "sdhci-esdhc",
1173 .of_match_table = sdhci_esdhc_of_match,
1174 .pm = &esdhc_of_dev_pm_ops,
1175 },
1176 .probe = sdhci_esdhc_probe,
1177 .remove = sdhci_pltfm_unregister,
1178};
1179
1180module_platform_driver(sdhci_esdhc_driver);
1181
1182MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1183MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1184 "Anton Vorontsov <avorontsov@ru.mvista.com>");
1185MODULE_LICENSE("GPL v2");