Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Freescale eSDHC controller driver.
  3 *
  4 * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
  5 * Copyright (c) 2009 MontaVista Software, Inc.
 
  6 *
  7 * Authors: Xiaobo Xie <X.Xie@freescale.com>
  8 *	    Anton Vorontsov <avorontsov@ru.mvista.com>
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of the GNU General Public License as published by
 12 * the Free Software Foundation; either version 2 of the License, or (at
 13 * your option) any later version.
 14 */
 15
 16#include <linux/err.h>
 17#include <linux/io.h>
 18#include <linux/of.h>
 
 19#include <linux/delay.h>
 20#include <linux/module.h>
 
 
 
 
 
 21#include <linux/mmc/host.h>
 
 22#include "sdhci-pltfm.h"
 23#include "sdhci-esdhc.h"
 24
 25#define VENDOR_V_22	0x12
 26#define VENDOR_V_23	0x13
 27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 28struct sdhci_esdhc {
 29	u8 vendor_ver;
 30	u8 spec_ver;
 
 
 
 
 
 
 
 
 
 
 
 
 31};
 32
 33/**
 34 * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
 35 *		       to make it compatible with SD spec.
 36 *
 37 * @host: pointer to sdhci_host
 38 * @spec_reg: SD spec register address
 39 * @value: 32bit eSDHC register value on spec_reg address
 40 *
 41 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
 42 * registers are 32 bits. There are differences in register size, register
 43 * address, register function, bit position and function between eSDHC spec
 44 * and SD spec.
 45 *
 46 * Return a fixed up register value
 47 */
 48static u32 esdhc_readl_fixup(struct sdhci_host *host,
 49				     int spec_reg, u32 value)
 50{
 51	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 52	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
 53	u32 ret;
 54
 55	/*
 56	 * The bit of ADMA flag in eSDHC is not compatible with standard
 57	 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
 58	 * supported by eSDHC.
 59	 * And for many FSL eSDHC controller, the reset value of field
 60	 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
 61	 * only these vendor version is greater than 2.2/0x12 support ADMA.
 62	 */
 63	if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
 64		if (esdhc->vendor_ver > VENDOR_V_22) {
 65			ret = value | SDHCI_CAN_DO_ADMA2;
 66			return ret;
 67		}
 68	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 69	ret = value;
 70	return ret;
 71}
 72
 73static u16 esdhc_readw_fixup(struct sdhci_host *host,
 74				     int spec_reg, u32 value)
 75{
 
 
 76	u16 ret;
 77	int shift = (spec_reg & 0x2) * 8;
 78
 
 
 
 79	if (spec_reg == SDHCI_HOST_VERSION)
 80		ret = value & 0xffff;
 81	else
 82		ret = (value >> shift) & 0xffff;
 
 
 
 
 
 
 83	return ret;
 84}
 85
 86static u8 esdhc_readb_fixup(struct sdhci_host *host,
 87				     int spec_reg, u32 value)
 88{
 89	u8 ret;
 90	u8 dma_bits;
 91	int shift = (spec_reg & 0x3) * 8;
 92
 93	ret = (value >> shift) & 0xff;
 94
 95	/*
 96	 * "DMA select" locates at offset 0x28 in SD specification, but on
 97	 * P5020 or P3041, it locates at 0x29.
 98	 */
 99	if (spec_reg == SDHCI_HOST_CONTROL) {
100		/* DMA select is 22,23 bits in Protocol Control Register */
101		dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
102		/* fixup the result */
103		ret &= ~SDHCI_CTRL_DMA_MASK;
104		ret |= dma_bits;
105	}
106	return ret;
107}
108
109/**
110 * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
111 *			written into eSDHC register.
112 *
113 * @host: pointer to sdhci_host
114 * @spec_reg: SD spec register address
115 * @value: 8/16/32bit SD spec register value that would be written
116 * @old_value: 32bit eSDHC register value on spec_reg address
117 *
118 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
119 * registers are 32 bits. There are differences in register size, register
120 * address, register function, bit position and function between eSDHC spec
121 * and SD spec.
122 *
123 * Return a fixed up register value
124 */
125static u32 esdhc_writel_fixup(struct sdhci_host *host,
126				     int spec_reg, u32 value, u32 old_value)
127{
128	u32 ret;
129
130	/*
131	 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
132	 * when SYSCTL[RSTD] is set for some special operations.
133	 * No any impact on other operation.
134	 */
135	if (spec_reg == SDHCI_INT_ENABLE)
136		ret = value | SDHCI_INT_BLK_GAP;
137	else
138		ret = value;
139
140	return ret;
141}
142
143static u32 esdhc_writew_fixup(struct sdhci_host *host,
144				     int spec_reg, u16 value, u32 old_value)
145{
146	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
147	int shift = (spec_reg & 0x2) * 8;
148	u32 ret;
149
150	switch (spec_reg) {
151	case SDHCI_TRANSFER_MODE:
152		/*
153		 * Postpone this write, we must do it together with a
154		 * command write that is down below. Return old value.
155		 */
156		pltfm_host->xfer_mode_shadow = value;
157		return old_value;
158	case SDHCI_COMMAND:
159		ret = (value << 16) | pltfm_host->xfer_mode_shadow;
160		return ret;
161	}
162
163	ret = old_value & (~(0xffff << shift));
164	ret |= (value << shift);
165
166	if (spec_reg == SDHCI_BLOCK_SIZE) {
167		/*
168		 * Two last DMA bits are reserved, and first one is used for
169		 * non-standard blksz of 4096 bytes that we don't support
170		 * yet. So clear the DMA boundary bits.
171		 */
172		ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
173	}
174	return ret;
175}
176
177static u32 esdhc_writeb_fixup(struct sdhci_host *host,
178				     int spec_reg, u8 value, u32 old_value)
179{
180	u32 ret;
181	u32 dma_bits;
182	u8 tmp;
183	int shift = (spec_reg & 0x3) * 8;
184
185	/*
186	 * eSDHC doesn't have a standard power control register, so we do
187	 * nothing here to avoid incorrect operation.
188	 */
189	if (spec_reg == SDHCI_POWER_CONTROL)
190		return old_value;
191	/*
192	 * "DMA select" location is offset 0x28 in SD specification, but on
193	 * P5020 or P3041, it's located at 0x29.
194	 */
195	if (spec_reg == SDHCI_HOST_CONTROL) {
196		/*
197		 * If host control register is not standard, exit
198		 * this function
199		 */
200		if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
201			return old_value;
202
203		/* DMA select is 22,23 bits in Protocol Control Register */
204		dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
205		ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
206		tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
207		      (old_value & SDHCI_CTRL_DMA_MASK);
208		ret = (ret & (~0xff)) | tmp;
209
210		/* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
211		ret &= ~ESDHC_HOST_CONTROL_RES;
212		return ret;
213	}
214
215	ret = (old_value & (~(0xff << shift))) | (value << shift);
216	return ret;
217}
218
219static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
220{
221	u32 ret;
222	u32 value;
223
224	value = ioread32be(host->ioaddr + reg);
 
 
 
 
225	ret = esdhc_readl_fixup(host, reg, value);
226
227	return ret;
228}
229
230static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
231{
232	u32 ret;
233	u32 value;
234
235	value = ioread32(host->ioaddr + reg);
 
 
 
 
236	ret = esdhc_readl_fixup(host, reg, value);
237
238	return ret;
239}
240
241static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
242{
243	u16 ret;
244	u32 value;
245	int base = reg & ~0x3;
246
247	value = ioread32be(host->ioaddr + base);
248	ret = esdhc_readw_fixup(host, reg, value);
249	return ret;
250}
251
252static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
253{
254	u16 ret;
255	u32 value;
256	int base = reg & ~0x3;
257
258	value = ioread32(host->ioaddr + base);
259	ret = esdhc_readw_fixup(host, reg, value);
260	return ret;
261}
262
263static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
264{
265	u8 ret;
266	u32 value;
267	int base = reg & ~0x3;
268
269	value = ioread32be(host->ioaddr + base);
270	ret = esdhc_readb_fixup(host, reg, value);
271	return ret;
272}
273
274static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
275{
276	u8 ret;
277	u32 value;
278	int base = reg & ~0x3;
279
280	value = ioread32(host->ioaddr + base);
281	ret = esdhc_readb_fixup(host, reg, value);
282	return ret;
283}
284
285static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
286{
287	u32 value;
288
289	value = esdhc_writel_fixup(host, reg, val, 0);
290	iowrite32be(value, host->ioaddr + reg);
291}
292
293static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
294{
295	u32 value;
296
297	value = esdhc_writel_fixup(host, reg, val, 0);
298	iowrite32(value, host->ioaddr + reg);
299}
300
301static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
302{
 
 
303	int base = reg & ~0x3;
304	u32 value;
305	u32 ret;
306
307	value = ioread32be(host->ioaddr + base);
308	ret = esdhc_writew_fixup(host, reg, val, value);
309	if (reg != SDHCI_TRANSFER_MODE)
310		iowrite32be(ret, host->ioaddr + base);
 
 
 
 
 
 
 
 
 
 
 
 
311}
312
313static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
314{
 
 
315	int base = reg & ~0x3;
316	u32 value;
317	u32 ret;
318
319	value = ioread32(host->ioaddr + base);
320	ret = esdhc_writew_fixup(host, reg, val, value);
321	if (reg != SDHCI_TRANSFER_MODE)
322		iowrite32(ret, host->ioaddr + base);
 
 
 
 
 
 
 
 
 
 
 
 
323}
324
325static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
326{
327	int base = reg & ~0x3;
328	u32 value;
329	u32 ret;
330
331	value = ioread32be(host->ioaddr + base);
332	ret = esdhc_writeb_fixup(host, reg, val, value);
333	iowrite32be(ret, host->ioaddr + base);
334}
335
336static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
337{
338	int base = reg & ~0x3;
339	u32 value;
340	u32 ret;
341
342	value = ioread32(host->ioaddr + base);
343	ret = esdhc_writeb_fixup(host, reg, val, value);
344	iowrite32(ret, host->ioaddr + base);
345}
346
347/*
348 * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
349 * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
350 * and Block Gap Event(IRQSTAT[BGE]) are also set.
351 * For Continue, apply soft reset for data(SYSCTL[RSTD]);
352 * and re-issue the entire read transaction from beginning.
353 */
354static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
355{
356	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
357	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
358	bool applicable;
359	dma_addr_t dmastart;
360	dma_addr_t dmanow;
361
362	applicable = (intmask & SDHCI_INT_DATA_END) &&
363		     (intmask & SDHCI_INT_BLK_GAP) &&
364		     (esdhc->vendor_ver == VENDOR_V_23);
365	if (!applicable)
366		return;
367
368	host->data->error = 0;
369	dmastart = sg_dma_address(host->data->sg);
370	dmanow = dmastart + host->data->bytes_xfered;
371	/*
372	 * Force update to the next DMA block boundary.
373	 */
374	dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
375		SDHCI_DEFAULT_BOUNDARY_SIZE;
376	host->data->bytes_xfered = dmanow - dmastart;
377	sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
378}
379
380static int esdhc_of_enable_dma(struct sdhci_host *host)
381{
 
382	u32 value;
 
 
 
 
 
 
 
 
383
384	value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
385	value |= ESDHC_DMA_SNOOP;
 
 
 
 
 
386	sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
387	return 0;
388}
389
390static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
391{
392	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 
393
394	return pltfm_host->clock;
 
 
 
395}
396
397static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
398{
399	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 
 
400
401	return pltfm_host->clock / 256 / 16;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
402}
403
404static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
405{
406	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
407	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
408	int pre_div = 1;
409	int div = 1;
 
410	u32 temp;
411
412	host->mmc->actual_clock = 0;
413
414	if (clock == 0)
415		return;
 
416
417	/* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
418	if (esdhc->vendor_ver < VENDOR_V_23)
419		pre_div = 2;
420
421	/* Workaround to reduce the clock frequency for p1010 esdhc */
422	if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
423		if (clock > 20000000)
424			clock -= 5000000;
425		if (clock > 40000000)
426			clock -= 5000000;
427	}
428
429	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
430	temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
431		| ESDHC_CLOCK_MASK);
432	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
433
434	while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
 
435		pre_div *= 2;
436
437	while (host->max_clk / pre_div / div > clock && div < 16)
438		div++;
439
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
440	dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
441		clock, host->max_clk / pre_div / div);
442	host->mmc->actual_clock = host->max_clk / pre_div / div;
 
443	pre_div >>= 1;
444	div--;
445
 
 
446	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
447	temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
448		| (div << ESDHC_DIVIDER_SHIFT)
449		| (pre_div << ESDHC_PREDIV_SHIFT));
450	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
451	mdelay(1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452}
453
454static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
455{
456	u32 ctrl;
457
458	ctrl = sdhci_readl(host, ESDHC_PROCTL);
459	ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
460	switch (width) {
461	case MMC_BUS_WIDTH_8:
462		ctrl |= ESDHC_CTRL_8BITBUS;
463		break;
464
465	case MMC_BUS_WIDTH_4:
466		ctrl |= ESDHC_CTRL_4BITBUS;
467		break;
468
469	default:
470		break;
471	}
472
473	sdhci_writel(host, ctrl, ESDHC_PROCTL);
474}
475
476static void esdhc_reset(struct sdhci_host *host, u8 mask)
477{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
478	sdhci_reset(host, mask);
479
480	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
481	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
482}
483
484#ifdef CONFIG_PM
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485static u32 esdhc_proctl;
486static int esdhc_of_suspend(struct device *dev)
487{
488	struct sdhci_host *host = dev_get_drvdata(dev);
489
490	esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
491
 
 
 
492	return sdhci_suspend_host(host);
493}
494
495static int esdhc_of_resume(struct device *dev)
496{
497	struct sdhci_host *host = dev_get_drvdata(dev);
498	int ret = sdhci_resume_host(host);
499
500	if (ret == 0) {
501		/* Isn't this already done by sdhci_resume_host() ? --rmk */
502		esdhc_of_enable_dma(host);
503		sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
504	}
505	return ret;
506}
507
508static const struct dev_pm_ops esdhc_pmops = {
509	.suspend	= esdhc_of_suspend,
510	.resume		= esdhc_of_resume,
511};
512#define ESDHC_PMOPS (&esdhc_pmops)
513#else
514#define ESDHC_PMOPS NULL
515#endif
516
 
 
 
 
517static const struct sdhci_ops sdhci_esdhc_be_ops = {
518	.read_l = esdhc_be_readl,
519	.read_w = esdhc_be_readw,
520	.read_b = esdhc_be_readb,
521	.write_l = esdhc_be_writel,
522	.write_w = esdhc_be_writew,
523	.write_b = esdhc_be_writeb,
524	.set_clock = esdhc_of_set_clock,
525	.enable_dma = esdhc_of_enable_dma,
526	.get_max_clock = esdhc_of_get_max_clock,
527	.get_min_clock = esdhc_of_get_min_clock,
528	.adma_workaround = esdhc_of_adma_workaround,
529	.set_bus_width = esdhc_pltfm_set_bus_width,
530	.reset = esdhc_reset,
531	.set_uhs_signaling = sdhci_set_uhs_signaling,
 
532};
533
534static const struct sdhci_ops sdhci_esdhc_le_ops = {
535	.read_l = esdhc_le_readl,
536	.read_w = esdhc_le_readw,
537	.read_b = esdhc_le_readb,
538	.write_l = esdhc_le_writel,
539	.write_w = esdhc_le_writew,
540	.write_b = esdhc_le_writeb,
541	.set_clock = esdhc_of_set_clock,
542	.enable_dma = esdhc_of_enable_dma,
543	.get_max_clock = esdhc_of_get_max_clock,
544	.get_min_clock = esdhc_of_get_min_clock,
545	.adma_workaround = esdhc_of_adma_workaround,
546	.set_bus_width = esdhc_pltfm_set_bus_width,
547	.reset = esdhc_reset,
548	.set_uhs_signaling = sdhci_set_uhs_signaling,
 
549};
550
551static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
552	.quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
553		| SDHCI_QUIRK_NO_CARD_NO_RESET
554		| SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
 
 
 
555	.ops = &sdhci_esdhc_be_ops,
556};
557
558static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
559	.quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
560		| SDHCI_QUIRK_NO_CARD_NO_RESET
561		| SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
562	.ops = &sdhci_esdhc_le_ops,
563};
564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
565static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
566{
 
567	struct sdhci_pltfm_host *pltfm_host;
568	struct sdhci_esdhc *esdhc;
 
 
 
569	u16 host_ver;
570
571	pltfm_host = sdhci_priv(host);
572	esdhc = sdhci_pltfm_priv(pltfm_host);
573
574	host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
575	esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
576			     SDHCI_VENDOR_VER_SHIFT;
577	esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578}
579
580static int sdhci_esdhc_probe(struct platform_device *pdev)
581{
582	struct sdhci_host *host;
583	struct device_node *np;
584	struct sdhci_pltfm_host *pltfm_host;
585	struct sdhci_esdhc *esdhc;
586	int ret;
587
588	np = pdev->dev.of_node;
589
590	if (of_get_property(np, "little-endian", NULL))
591		host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
592					sizeof(struct sdhci_esdhc));
593	else
594		host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
595					sizeof(struct sdhci_esdhc));
596
597	if (IS_ERR(host))
598		return PTR_ERR(host);
599
 
 
 
 
 
 
600	esdhc_init(pdev, host);
601
602	sdhci_get_of_property(pdev);
603
604	pltfm_host = sdhci_priv(host);
605	esdhc = sdhci_pltfm_priv(pltfm_host);
 
 
 
 
 
 
 
 
 
 
606	if (esdhc->vendor_ver == VENDOR_V_22)
607		host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
608
609	if (esdhc->vendor_ver > VENDOR_V_22)
610		host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
611
 
 
 
 
 
 
 
612	if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
613	    of_device_is_compatible(np, "fsl,p5020-esdhc") ||
614	    of_device_is_compatible(np, "fsl,p4080-esdhc") ||
615	    of_device_is_compatible(np, "fsl,p1020-esdhc") ||
616	    of_device_is_compatible(np, "fsl,t1040-esdhc") ||
617	    of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
618		host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
619
620	if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
621		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
622
 
623	if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
624		/*
625		 * Freescale messed up with P2020 as it has a non-standard
626		 * host control register
627		 */
628		host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
 
629	}
630
631	/* call to generic mmc_of_parse to support additional capabilities */
632	ret = mmc_of_parse(host->mmc);
633	if (ret)
634		goto err;
635
636	mmc_of_parse_voltage(np, &host->ocr_mask);
637
638	ret = sdhci_add_host(host);
639	if (ret)
640		goto err;
641
642	return 0;
643 err:
644	sdhci_pltfm_free(pdev);
645	return ret;
646}
647
648static const struct of_device_id sdhci_esdhc_of_match[] = {
649	{ .compatible = "fsl,mpc8379-esdhc" },
650	{ .compatible = "fsl,mpc8536-esdhc" },
651	{ .compatible = "fsl,esdhc" },
652	{ }
653};
654MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
655
656static struct platform_driver sdhci_esdhc_driver = {
657	.driver = {
658		.name = "sdhci-esdhc",
 
659		.of_match_table = sdhci_esdhc_of_match,
660		.pm = ESDHC_PMOPS,
661	},
662	.probe = sdhci_esdhc_probe,
663	.remove = sdhci_pltfm_unregister,
664};
665
666module_platform_driver(sdhci_esdhc_driver);
667
668MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
669MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
670	      "Anton Vorontsov <avorontsov@ru.mvista.com>");
671MODULE_LICENSE("GPL v2");
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Freescale eSDHC controller driver.
   4 *
   5 * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
   6 * Copyright (c) 2009 MontaVista Software, Inc.
   7 * Copyright 2020 NXP
   8 *
   9 * Authors: Xiaobo Xie <X.Xie@freescale.com>
  10 *	    Anton Vorontsov <avorontsov@ru.mvista.com>
 
 
 
 
 
  11 */
  12
  13#include <linux/err.h>
  14#include <linux/io.h>
  15#include <linux/of.h>
  16#include <linux/of_address.h>
  17#include <linux/delay.h>
  18#include <linux/module.h>
  19#include <linux/sys_soc.h>
  20#include <linux/clk.h>
  21#include <linux/ktime.h>
  22#include <linux/dma-mapping.h>
  23#include <linux/iopoll.h>
  24#include <linux/mmc/host.h>
  25#include <linux/mmc/mmc.h>
  26#include "sdhci-pltfm.h"
  27#include "sdhci-esdhc.h"
  28
  29#define VENDOR_V_22	0x12
  30#define VENDOR_V_23	0x13
  31
  32#define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
  33
  34struct esdhc_clk_fixup {
  35	const unsigned int sd_dflt_max_clk;
  36	const unsigned int max_clk[MMC_TIMING_NUM];
  37};
  38
  39static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
  40	.sd_dflt_max_clk = 25000000,
  41	.max_clk[MMC_TIMING_MMC_HS] = 46500000,
  42	.max_clk[MMC_TIMING_SD_HS] = 46500000,
  43};
  44
  45static const struct esdhc_clk_fixup ls1043a_esdhc_clk = {
  46	.sd_dflt_max_clk = 25000000,
  47	.max_clk[MMC_TIMING_UHS_SDR104] = 116700000,
  48	.max_clk[MMC_TIMING_MMC_HS200] = 116700000,
  49};
  50
  51static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
  52	.sd_dflt_max_clk = 25000000,
  53	.max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
  54	.max_clk[MMC_TIMING_MMC_HS200] = 167000000,
  55};
  56
  57static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
  58	.sd_dflt_max_clk = 25000000,
  59	.max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
  60	.max_clk[MMC_TIMING_MMC_HS200] = 125000000,
  61};
  62
  63static const struct esdhc_clk_fixup p1010_esdhc_clk = {
  64	.sd_dflt_max_clk = 20000000,
  65	.max_clk[MMC_TIMING_LEGACY] = 20000000,
  66	.max_clk[MMC_TIMING_MMC_HS] = 42000000,
  67	.max_clk[MMC_TIMING_SD_HS] = 40000000,
  68};
  69
  70static const struct of_device_id sdhci_esdhc_of_match[] = {
  71	{ .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
  72	{ .compatible = "fsl,ls1043a-esdhc", .data = &ls1043a_esdhc_clk},
  73	{ .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
  74	{ .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
  75	{ .compatible = "fsl,p1010-esdhc",   .data = &p1010_esdhc_clk},
  76	{ .compatible = "fsl,mpc8379-esdhc" },
  77	{ .compatible = "fsl,mpc8536-esdhc" },
  78	{ .compatible = "fsl,esdhc" },
  79	{ }
  80};
  81MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
  82
  83struct sdhci_esdhc {
  84	u8 vendor_ver;
  85	u8 spec_ver;
  86	bool quirk_incorrect_hostver;
  87	bool quirk_limited_clk_division;
  88	bool quirk_unreliable_pulse_detection;
  89	bool quirk_tuning_erratum_type1;
  90	bool quirk_tuning_erratum_type2;
  91	bool quirk_ignore_data_inhibit;
  92	bool quirk_delay_before_data_reset;
  93	bool quirk_trans_complete_erratum;
  94	bool in_sw_tuning;
  95	unsigned int peripheral_clock;
  96	const struct esdhc_clk_fixup *clk_fixup;
  97	u32 div_ratio;
  98};
  99
 100/**
 101 * esdhc_readl_fixup - Fixup the value read from incompatible eSDHC register
 102 *		       to make it compatible with SD spec.
 103 *
 104 * @host: pointer to sdhci_host
 105 * @spec_reg: SD spec register address
 106 * @value: 32bit eSDHC register value on spec_reg address
 107 *
 108 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
 109 * registers are 32 bits. There are differences in register size, register
 110 * address, register function, bit position and function between eSDHC spec
 111 * and SD spec.
 112 *
 113 * Return a fixed up register value
 114 */
 115static u32 esdhc_readl_fixup(struct sdhci_host *host,
 116				     int spec_reg, u32 value)
 117{
 118	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 119	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
 120	u32 ret;
 121
 122	/*
 123	 * The bit of ADMA flag in eSDHC is not compatible with standard
 124	 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
 125	 * supported by eSDHC.
 126	 * And for many FSL eSDHC controller, the reset value of field
 127	 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
 128	 * only these vendor version is greater than 2.2/0x12 support ADMA.
 129	 */
 130	if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
 131		if (esdhc->vendor_ver > VENDOR_V_22) {
 132			ret = value | SDHCI_CAN_DO_ADMA2;
 133			return ret;
 134		}
 135	}
 136
 137	/*
 138	 * The DAT[3:0] line signal levels and the CMD line signal level are
 139	 * not compatible with standard SDHC register. The line signal levels
 140	 * DAT[7:0] are at bits 31:24 and the command line signal level is at
 141	 * bit 23. All other bits are the same as in the standard SDHC
 142	 * register.
 143	 */
 144	if (spec_reg == SDHCI_PRESENT_STATE) {
 145		ret = value & 0x000fffff;
 146		ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
 147		ret |= (value << 1) & SDHCI_CMD_LVL;
 148
 149		/*
 150		 * Some controllers have unreliable Data Line Active
 151		 * bit for commands with busy signal. This affects
 152		 * Command Inhibit (data) bit. Just ignore it since
 153		 * MMC core driver has already polled card status
 154		 * with CMD13 after any command with busy siganl.
 155		 */
 156		if (esdhc->quirk_ignore_data_inhibit)
 157			ret &= ~SDHCI_DATA_INHIBIT;
 158		return ret;
 159	}
 160
 161	/*
 162	 * DTS properties of mmc host are used to enable each speed mode
 163	 * according to soc and board capability. So clean up
 164	 * SDR50/SDR104/DDR50 support bits here.
 165	 */
 166	if (spec_reg == SDHCI_CAPABILITIES_1) {
 167		ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
 168				SDHCI_SUPPORT_DDR50);
 169		return ret;
 170	}
 171
 172	ret = value;
 173	return ret;
 174}
 175
 176static u16 esdhc_readw_fixup(struct sdhci_host *host,
 177				     int spec_reg, u32 value)
 178{
 179	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 180	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
 181	u16 ret;
 182	int shift = (spec_reg & 0x2) * 8;
 183
 184	if (spec_reg == SDHCI_TRANSFER_MODE)
 185		return pltfm_host->xfer_mode_shadow;
 186
 187	if (spec_reg == SDHCI_HOST_VERSION)
 188		ret = value & 0xffff;
 189	else
 190		ret = (value >> shift) & 0xffff;
 191	/* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
 192	 * vendor version and spec version information.
 193	 */
 194	if ((spec_reg == SDHCI_HOST_VERSION) &&
 195	    (esdhc->quirk_incorrect_hostver))
 196		ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
 197	return ret;
 198}
 199
 200static u8 esdhc_readb_fixup(struct sdhci_host *host,
 201				     int spec_reg, u32 value)
 202{
 203	u8 ret;
 204	u8 dma_bits;
 205	int shift = (spec_reg & 0x3) * 8;
 206
 207	ret = (value >> shift) & 0xff;
 208
 209	/*
 210	 * "DMA select" locates at offset 0x28 in SD specification, but on
 211	 * P5020 or P3041, it locates at 0x29.
 212	 */
 213	if (spec_reg == SDHCI_HOST_CONTROL) {
 214		/* DMA select is 22,23 bits in Protocol Control Register */
 215		dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
 216		/* fixup the result */
 217		ret &= ~SDHCI_CTRL_DMA_MASK;
 218		ret |= dma_bits;
 219	}
 220	return ret;
 221}
 222
 223/**
 224 * esdhc_writel_fixup - Fixup the SD spec register value so that it could be
 225 *			written into eSDHC register.
 226 *
 227 * @host: pointer to sdhci_host
 228 * @spec_reg: SD spec register address
 229 * @value: 8/16/32bit SD spec register value that would be written
 230 * @old_value: 32bit eSDHC register value on spec_reg address
 231 *
 232 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
 233 * registers are 32 bits. There are differences in register size, register
 234 * address, register function, bit position and function between eSDHC spec
 235 * and SD spec.
 236 *
 237 * Return a fixed up register value
 238 */
 239static u32 esdhc_writel_fixup(struct sdhci_host *host,
 240				     int spec_reg, u32 value, u32 old_value)
 241{
 242	u32 ret;
 243
 244	/*
 245	 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
 246	 * when SYSCTL[RSTD] is set for some special operations.
 247	 * No any impact on other operation.
 248	 */
 249	if (spec_reg == SDHCI_INT_ENABLE)
 250		ret = value | SDHCI_INT_BLK_GAP;
 251	else
 252		ret = value;
 253
 254	return ret;
 255}
 256
 257static u32 esdhc_writew_fixup(struct sdhci_host *host,
 258				     int spec_reg, u16 value, u32 old_value)
 259{
 260	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 261	int shift = (spec_reg & 0x2) * 8;
 262	u32 ret;
 263
 264	switch (spec_reg) {
 265	case SDHCI_TRANSFER_MODE:
 266		/*
 267		 * Postpone this write, we must do it together with a
 268		 * command write that is down below. Return old value.
 269		 */
 270		pltfm_host->xfer_mode_shadow = value;
 271		return old_value;
 272	case SDHCI_COMMAND:
 273		ret = (value << 16) | pltfm_host->xfer_mode_shadow;
 274		return ret;
 275	}
 276
 277	ret = old_value & (~(0xffff << shift));
 278	ret |= (value << shift);
 279
 280	if (spec_reg == SDHCI_BLOCK_SIZE) {
 281		/*
 282		 * Two last DMA bits are reserved, and first one is used for
 283		 * non-standard blksz of 4096 bytes that we don't support
 284		 * yet. So clear the DMA boundary bits.
 285		 */
 286		ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
 287	}
 288	return ret;
 289}
 290
 291static u32 esdhc_writeb_fixup(struct sdhci_host *host,
 292				     int spec_reg, u8 value, u32 old_value)
 293{
 294	u32 ret;
 295	u32 dma_bits;
 296	u8 tmp;
 297	int shift = (spec_reg & 0x3) * 8;
 298
 299	/*
 300	 * eSDHC doesn't have a standard power control register, so we do
 301	 * nothing here to avoid incorrect operation.
 302	 */
 303	if (spec_reg == SDHCI_POWER_CONTROL)
 304		return old_value;
 305	/*
 306	 * "DMA select" location is offset 0x28 in SD specification, but on
 307	 * P5020 or P3041, it's located at 0x29.
 308	 */
 309	if (spec_reg == SDHCI_HOST_CONTROL) {
 310		/*
 311		 * If host control register is not standard, exit
 312		 * this function
 313		 */
 314		if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
 315			return old_value;
 316
 317		/* DMA select is 22,23 bits in Protocol Control Register */
 318		dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
 319		ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
 320		tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
 321		      (old_value & SDHCI_CTRL_DMA_MASK);
 322		ret = (ret & (~0xff)) | tmp;
 323
 324		/* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
 325		ret &= ~ESDHC_HOST_CONTROL_RES;
 326		return ret;
 327	}
 328
 329	ret = (old_value & (~(0xff << shift))) | (value << shift);
 330	return ret;
 331}
 332
 333static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
 334{
 335	u32 ret;
 336	u32 value;
 337
 338	if (reg == SDHCI_CAPABILITIES_1)
 339		value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
 340	else
 341		value = ioread32be(host->ioaddr + reg);
 342
 343	ret = esdhc_readl_fixup(host, reg, value);
 344
 345	return ret;
 346}
 347
 348static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
 349{
 350	u32 ret;
 351	u32 value;
 352
 353	if (reg == SDHCI_CAPABILITIES_1)
 354		value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
 355	else
 356		value = ioread32(host->ioaddr + reg);
 357
 358	ret = esdhc_readl_fixup(host, reg, value);
 359
 360	return ret;
 361}
 362
 363static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
 364{
 365	u16 ret;
 366	u32 value;
 367	int base = reg & ~0x3;
 368
 369	value = ioread32be(host->ioaddr + base);
 370	ret = esdhc_readw_fixup(host, reg, value);
 371	return ret;
 372}
 373
 374static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
 375{
 376	u16 ret;
 377	u32 value;
 378	int base = reg & ~0x3;
 379
 380	value = ioread32(host->ioaddr + base);
 381	ret = esdhc_readw_fixup(host, reg, value);
 382	return ret;
 383}
 384
 385static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
 386{
 387	u8 ret;
 388	u32 value;
 389	int base = reg & ~0x3;
 390
 391	value = ioread32be(host->ioaddr + base);
 392	ret = esdhc_readb_fixup(host, reg, value);
 393	return ret;
 394}
 395
 396static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
 397{
 398	u8 ret;
 399	u32 value;
 400	int base = reg & ~0x3;
 401
 402	value = ioread32(host->ioaddr + base);
 403	ret = esdhc_readb_fixup(host, reg, value);
 404	return ret;
 405}
 406
 407static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
 408{
 409	u32 value;
 410
 411	value = esdhc_writel_fixup(host, reg, val, 0);
 412	iowrite32be(value, host->ioaddr + reg);
 413}
 414
 415static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
 416{
 417	u32 value;
 418
 419	value = esdhc_writel_fixup(host, reg, val, 0);
 420	iowrite32(value, host->ioaddr + reg);
 421}
 422
 423static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
 424{
 425	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 426	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
 427	int base = reg & ~0x3;
 428	u32 value;
 429	u32 ret;
 430
 431	value = ioread32be(host->ioaddr + base);
 432	ret = esdhc_writew_fixup(host, reg, val, value);
 433	if (reg != SDHCI_TRANSFER_MODE)
 434		iowrite32be(ret, host->ioaddr + base);
 435
 436	/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
 437	 * 1us later after ESDHC_EXTN is set.
 438	 */
 439	if (base == ESDHC_SYSTEM_CONTROL_2) {
 440		if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
 441		    esdhc->in_sw_tuning) {
 442			udelay(1);
 443			ret |= ESDHC_SMPCLKSEL;
 444			iowrite32be(ret, host->ioaddr + base);
 445		}
 446	}
 447}
 448
 449static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
 450{
 451	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 452	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
 453	int base = reg & ~0x3;
 454	u32 value;
 455	u32 ret;
 456
 457	value = ioread32(host->ioaddr + base);
 458	ret = esdhc_writew_fixup(host, reg, val, value);
 459	if (reg != SDHCI_TRANSFER_MODE)
 460		iowrite32(ret, host->ioaddr + base);
 461
 462	/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
 463	 * 1us later after ESDHC_EXTN is set.
 464	 */
 465	if (base == ESDHC_SYSTEM_CONTROL_2) {
 466		if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
 467		    esdhc->in_sw_tuning) {
 468			udelay(1);
 469			ret |= ESDHC_SMPCLKSEL;
 470			iowrite32(ret, host->ioaddr + base);
 471		}
 472	}
 473}
 474
 475static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
 476{
 477	int base = reg & ~0x3;
 478	u32 value;
 479	u32 ret;
 480
 481	value = ioread32be(host->ioaddr + base);
 482	ret = esdhc_writeb_fixup(host, reg, val, value);
 483	iowrite32be(ret, host->ioaddr + base);
 484}
 485
 486static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
 487{
 488	int base = reg & ~0x3;
 489	u32 value;
 490	u32 ret;
 491
 492	value = ioread32(host->ioaddr + base);
 493	ret = esdhc_writeb_fixup(host, reg, val, value);
 494	iowrite32(ret, host->ioaddr + base);
 495}
 496
 497/*
 498 * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
 499 * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
 500 * and Block Gap Event(IRQSTAT[BGE]) are also set.
 501 * For Continue, apply soft reset for data(SYSCTL[RSTD]);
 502 * and re-issue the entire read transaction from beginning.
 503 */
 504static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
 505{
 506	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 507	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
 508	bool applicable;
 509	dma_addr_t dmastart;
 510	dma_addr_t dmanow;
 511
 512	applicable = (intmask & SDHCI_INT_DATA_END) &&
 513		     (intmask & SDHCI_INT_BLK_GAP) &&
 514		     (esdhc->vendor_ver == VENDOR_V_23);
 515	if (!applicable)
 516		return;
 517
 518	host->data->error = 0;
 519	dmastart = sg_dma_address(host->data->sg);
 520	dmanow = dmastart + host->data->bytes_xfered;
 521	/*
 522	 * Force update to the next DMA block boundary.
 523	 */
 524	dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
 525		SDHCI_DEFAULT_BOUNDARY_SIZE;
 526	host->data->bytes_xfered = dmanow - dmastart;
 527	sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
 528}
 529
 530static int esdhc_of_enable_dma(struct sdhci_host *host)
 531{
 532	int ret;
 533	u32 value;
 534	struct device *dev = mmc_dev(host->mmc);
 535
 536	if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
 537	    of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
 538		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
 539		if (ret)
 540			return ret;
 541	}
 542
 543	value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
 544
 545	if (of_dma_is_coherent(dev->of_node))
 546		value |= ESDHC_DMA_SNOOP;
 547	else
 548		value &= ~ESDHC_DMA_SNOOP;
 549
 550	sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
 551	return 0;
 552}
 553
 554static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
 555{
 556	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 557	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
 558
 559	if (esdhc->peripheral_clock)
 560		return esdhc->peripheral_clock;
 561	else
 562		return pltfm_host->clock;
 563}
 564
 565static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
 566{
 567	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 568	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
 569	unsigned int clock;
 570
 571	if (esdhc->peripheral_clock)
 572		clock = esdhc->peripheral_clock;
 573	else
 574		clock = pltfm_host->clock;
 575	return clock / 256 / 16;
 576}
 577
 578static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
 579{
 580	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 581	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
 582	ktime_t timeout;
 583	u32 val, clk_en;
 584
 585	clk_en = ESDHC_CLOCK_SDCLKEN;
 586
 587	/*
 588	 * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
 589	 * is 2.2 or lower.
 590	 */
 591	if (esdhc->vendor_ver <= VENDOR_V_22)
 592		clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
 593			   ESDHC_CLOCK_PEREN);
 594
 595	val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
 596
 597	if (enable)
 598		val |= clk_en;
 599	else
 600		val &= ~clk_en;
 601
 602	sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
 603
 604	/*
 605	 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
 606	 * wait clock stable bit which does not exist.
 607	 */
 608	timeout = ktime_add_ms(ktime_get(), 20);
 609	while (esdhc->vendor_ver > VENDOR_V_22) {
 610		bool timedout = ktime_after(ktime_get(), timeout);
 611
 612		if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
 613			break;
 614		if (timedout) {
 615			pr_err("%s: Internal clock never stabilised.\n",
 616				mmc_hostname(host->mmc));
 617			break;
 618		}
 619		usleep_range(10, 20);
 620	}
 621}
 622
 623static void esdhc_flush_async_fifo(struct sdhci_host *host)
 624{
 625	ktime_t timeout;
 626	u32 val;
 627
 628	val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
 629	val |= ESDHC_FLUSH_ASYNC_FIFO;
 630	sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
 631
 632	/* Wait max 20 ms */
 633	timeout = ktime_add_ms(ktime_get(), 20);
 634	while (1) {
 635		bool timedout = ktime_after(ktime_get(), timeout);
 636
 637		if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
 638		      ESDHC_FLUSH_ASYNC_FIFO))
 639			break;
 640		if (timedout) {
 641			pr_err("%s: flushing asynchronous FIFO timeout.\n",
 642				mmc_hostname(host->mmc));
 643			break;
 644		}
 645		usleep_range(10, 20);
 646	}
 647}
 648
 649static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
 650{
 651	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 652	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
 653	unsigned int pre_div = 1, div = 1;
 654	unsigned int clock_fixup = 0;
 655	ktime_t timeout;
 656	u32 temp;
 657
 658	if (clock == 0) {
 659		host->mmc->actual_clock = 0;
 660		esdhc_clock_enable(host, false);
 661		return;
 662	}
 663
 664	/* Start pre_div at 2 for vendor version < 2.3. */
 665	if (esdhc->vendor_ver < VENDOR_V_23)
 666		pre_div = 2;
 667
 668	/* Fix clock value. */
 669	if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
 670	    esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
 671		clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
 672	else if (esdhc->clk_fixup)
 673		clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
 
 674
 675	if (clock_fixup == 0 || clock < clock_fixup)
 676		clock_fixup = clock;
 
 
 677
 678	/* Calculate pre_div and div. */
 679	while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
 680		pre_div *= 2;
 681
 682	while (host->max_clk / pre_div / div > clock_fixup && div < 16)
 683		div++;
 684
 685	esdhc->div_ratio = pre_div * div;
 686
 687	/* Limit clock division for HS400 200MHz clock for quirk. */
 688	if (esdhc->quirk_limited_clk_division &&
 689	    clock == MMC_HS200_MAX_DTR &&
 690	    (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
 691	     host->flags & SDHCI_HS400_TUNING)) {
 692		if (esdhc->div_ratio <= 4) {
 693			pre_div = 4;
 694			div = 1;
 695		} else if (esdhc->div_ratio <= 8) {
 696			pre_div = 4;
 697			div = 2;
 698		} else if (esdhc->div_ratio <= 12) {
 699			pre_div = 4;
 700			div = 3;
 701		} else {
 702			pr_warn("%s: using unsupported clock division.\n",
 703				mmc_hostname(host->mmc));
 704		}
 705		esdhc->div_ratio = pre_div * div;
 706	}
 707
 708	host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
 709
 710	dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
 711		clock, host->mmc->actual_clock);
 712
 713	/* Set clock division into register. */
 714	pre_div >>= 1;
 715	div--;
 716
 717	esdhc_clock_enable(host, false);
 718
 719	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
 720	temp &= ~ESDHC_CLOCK_MASK;
 721	temp |= ((div << ESDHC_DIVIDER_SHIFT) |
 722		(pre_div << ESDHC_PREDIV_SHIFT));
 723	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
 724
 725	/*
 726	 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
 727	 * wait clock stable bit which does not exist.
 728	 */
 729	timeout = ktime_add_ms(ktime_get(), 20);
 730	while (esdhc->vendor_ver > VENDOR_V_22) {
 731		bool timedout = ktime_after(ktime_get(), timeout);
 732
 733		if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
 734			break;
 735		if (timedout) {
 736			pr_err("%s: Internal clock never stabilised.\n",
 737				mmc_hostname(host->mmc));
 738			break;
 739		}
 740		usleep_range(10, 20);
 741	}
 742
 743	/* Additional setting for HS400. */
 744	if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
 745	    clock == MMC_HS200_MAX_DTR) {
 746		temp = sdhci_readl(host, ESDHC_TBCTL);
 747		sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
 748		temp = sdhci_readl(host, ESDHC_SDCLKCTL);
 749		sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
 750		esdhc_clock_enable(host, true);
 751
 752		temp = sdhci_readl(host, ESDHC_DLLCFG0);
 753		temp |= ESDHC_DLL_ENABLE;
 754		if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
 755			temp |= ESDHC_DLL_FREQ_SEL;
 756		sdhci_writel(host, temp, ESDHC_DLLCFG0);
 757
 758		temp |= ESDHC_DLL_RESET;
 759		sdhci_writel(host, temp, ESDHC_DLLCFG0);
 760		udelay(1);
 761		temp &= ~ESDHC_DLL_RESET;
 762		sdhci_writel(host, temp, ESDHC_DLLCFG0);
 763
 764		/* Wait max 20 ms */
 765		if (read_poll_timeout(sdhci_readl, temp,
 766				      temp & ESDHC_DLL_STS_SLV_LOCK,
 767				      10, 20000, false,
 768				      host, ESDHC_DLLSTAT0))
 769			pr_err("%s: timeout for delay chain lock.\n",
 770			       mmc_hostname(host->mmc));
 771
 772		temp = sdhci_readl(host, ESDHC_TBCTL);
 773		sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
 774
 775		esdhc_clock_enable(host, false);
 776		esdhc_flush_async_fifo(host);
 777	}
 778	esdhc_clock_enable(host, true);
 779}
 780
 781static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
 782{
 783	u32 ctrl;
 784
 785	ctrl = sdhci_readl(host, ESDHC_PROCTL);
 786	ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
 787	switch (width) {
 788	case MMC_BUS_WIDTH_8:
 789		ctrl |= ESDHC_CTRL_8BITBUS;
 790		break;
 791
 792	case MMC_BUS_WIDTH_4:
 793		ctrl |= ESDHC_CTRL_4BITBUS;
 794		break;
 795
 796	default:
 797		break;
 798	}
 799
 800	sdhci_writel(host, ctrl, ESDHC_PROCTL);
 801}
 802
 803static void esdhc_reset(struct sdhci_host *host, u8 mask)
 804{
 805	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 806	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
 807	u32 val, bus_width = 0;
 808
 809	/*
 810	 * Add delay to make sure all the DMA transfers are finished
 811	 * for quirk.
 812	 */
 813	if (esdhc->quirk_delay_before_data_reset &&
 814	    (mask & SDHCI_RESET_DATA) &&
 815	    (host->flags & SDHCI_REQ_USE_DMA))
 816		mdelay(5);
 817
 818	/*
 819	 * Save bus-width for eSDHC whose vendor version is 2.2
 820	 * or lower for data reset.
 821	 */
 822	if ((mask & SDHCI_RESET_DATA) &&
 823	    (esdhc->vendor_ver <= VENDOR_V_22)) {
 824		val = sdhci_readl(host, ESDHC_PROCTL);
 825		bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
 826	}
 827
 828	sdhci_reset(host, mask);
 829
 830	/*
 831	 * Restore bus-width setting and interrupt registers for eSDHC
 832	 * whose vendor version is 2.2 or lower for data reset.
 833	 */
 834	if ((mask & SDHCI_RESET_DATA) &&
 835	    (esdhc->vendor_ver <= VENDOR_V_22)) {
 836		val = sdhci_readl(host, ESDHC_PROCTL);
 837		val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
 838		val |= bus_width;
 839		sdhci_writel(host, val, ESDHC_PROCTL);
 840
 841		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
 842		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
 843	}
 844
 845	/*
 846	 * Some bits have to be cleaned manually for eSDHC whose spec
 847	 * version is higher than 3.0 for all reset.
 848	 */
 849	if ((mask & SDHCI_RESET_ALL) &&
 850	    (esdhc->spec_ver >= SDHCI_SPEC_300)) {
 851		val = sdhci_readl(host, ESDHC_TBCTL);
 852		val &= ~ESDHC_TB_EN;
 853		sdhci_writel(host, val, ESDHC_TBCTL);
 854
 855		/*
 856		 * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
 857		 * 0 for quirk.
 858		 */
 859		if (esdhc->quirk_unreliable_pulse_detection) {
 860			val = sdhci_readl(host, ESDHC_DLLCFG1);
 861			val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
 862			sdhci_writel(host, val, ESDHC_DLLCFG1);
 863		}
 864	}
 865}
 866
 867/* The SCFG, Supplemental Configuration Unit, provides SoC specific
 868 * configuration and status registers for the device. There is a
 869 * SDHC IO VSEL control register on SCFG for some platforms. It's
 870 * used to support SDHC IO voltage switching.
 871 */
 872static const struct of_device_id scfg_device_ids[] = {
 873	{ .compatible = "fsl,t1040-scfg", },
 874	{ .compatible = "fsl,ls1012a-scfg", },
 875	{ .compatible = "fsl,ls1046a-scfg", },
 876	{}
 877};
 878
 879/* SDHC IO VSEL control register definition */
 880#define SCFG_SDHCIOVSELCR	0x408
 881#define SDHCIOVSELCR_TGLEN	0x80000000
 882#define SDHCIOVSELCR_VSELVAL	0x60000000
 883#define SDHCIOVSELCR_SDHC_VS	0x00000001
 884
 885static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
 886				       struct mmc_ios *ios)
 887{
 888	struct sdhci_host *host = mmc_priv(mmc);
 889	struct device_node *scfg_node;
 890	void __iomem *scfg_base = NULL;
 891	u32 sdhciovselcr;
 892	u32 val;
 893
 894	/*
 895	 * Signal Voltage Switching is only applicable for Host Controllers
 896	 * v3.00 and above.
 897	 */
 898	if (host->version < SDHCI_SPEC_300)
 899		return 0;
 900
 901	val = sdhci_readl(host, ESDHC_PROCTL);
 902
 903	switch (ios->signal_voltage) {
 904	case MMC_SIGNAL_VOLTAGE_330:
 905		val &= ~ESDHC_VOLT_SEL;
 906		sdhci_writel(host, val, ESDHC_PROCTL);
 907		return 0;
 908	case MMC_SIGNAL_VOLTAGE_180:
 909		scfg_node = of_find_matching_node(NULL, scfg_device_ids);
 910		if (scfg_node)
 911			scfg_base = of_iomap(scfg_node, 0);
 912		of_node_put(scfg_node);
 913		if (scfg_base) {
 914			sdhciovselcr = SDHCIOVSELCR_TGLEN |
 915				       SDHCIOVSELCR_VSELVAL;
 916			iowrite32be(sdhciovselcr,
 917				scfg_base + SCFG_SDHCIOVSELCR);
 918
 919			val |= ESDHC_VOLT_SEL;
 920			sdhci_writel(host, val, ESDHC_PROCTL);
 921			mdelay(5);
 922
 923			sdhciovselcr = SDHCIOVSELCR_TGLEN |
 924				       SDHCIOVSELCR_SDHC_VS;
 925			iowrite32be(sdhciovselcr,
 926				scfg_base + SCFG_SDHCIOVSELCR);
 927			iounmap(scfg_base);
 928		} else {
 929			val |= ESDHC_VOLT_SEL;
 930			sdhci_writel(host, val, ESDHC_PROCTL);
 931		}
 932		return 0;
 933	default:
 934		return 0;
 935	}
 936}
 937
 938static struct soc_device_attribute soc_tuning_erratum_type1[] = {
 939	{ .family = "QorIQ T1023", },
 940	{ .family = "QorIQ T1040", },
 941	{ .family = "QorIQ T2080", },
 942	{ .family = "QorIQ LS1021A", },
 943	{ /* sentinel */ }
 944};
 945
 946static struct soc_device_attribute soc_tuning_erratum_type2[] = {
 947	{ .family = "QorIQ LS1012A", },
 948	{ .family = "QorIQ LS1043A", },
 949	{ .family = "QorIQ LS1046A", },
 950	{ .family = "QorIQ LS1080A", },
 951	{ .family = "QorIQ LS2080A", },
 952	{ .family = "QorIQ LA1575A", },
 953	{ /* sentinel */ }
 954};
 955
 956static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
 957{
 958	u32 val;
 959
 960	esdhc_clock_enable(host, false);
 961	esdhc_flush_async_fifo(host);
 962
 963	val = sdhci_readl(host, ESDHC_TBCTL);
 964	if (enable)
 965		val |= ESDHC_TB_EN;
 966	else
 967		val &= ~ESDHC_TB_EN;
 968	sdhci_writel(host, val, ESDHC_TBCTL);
 969
 970	esdhc_clock_enable(host, true);
 971}
 972
 973static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
 974				    u8 *window_end)
 975{
 976	u32 val;
 977
 978	/* Write TBCTL[11:8]=4'h8 */
 979	val = sdhci_readl(host, ESDHC_TBCTL);
 980	val &= ~(0xf << 8);
 981	val |= 8 << 8;
 982	sdhci_writel(host, val, ESDHC_TBCTL);
 983
 984	mdelay(1);
 985
 986	/* Read TBCTL[31:0] register and rewrite again */
 987	val = sdhci_readl(host, ESDHC_TBCTL);
 988	sdhci_writel(host, val, ESDHC_TBCTL);
 989
 990	mdelay(1);
 991
 992	/* Read the TBSTAT[31:0] register twice */
 993	val = sdhci_readl(host, ESDHC_TBSTAT);
 994	val = sdhci_readl(host, ESDHC_TBSTAT);
 995
 996	*window_end = val & 0xff;
 997	*window_start = (val >> 8) & 0xff;
 998}
 999
1000static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
1001				    u8 *window_end)
1002{
1003	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1004	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1005	u8 start_ptr, end_ptr;
1006
1007	if (esdhc->quirk_tuning_erratum_type1) {
1008		*window_start = 5 * esdhc->div_ratio;
1009		*window_end = 3 * esdhc->div_ratio;
1010		return;
1011	}
1012
1013	esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
1014
1015	/* Reset data lines by setting ESDHCCTL[RSTD] */
1016	sdhci_reset(host, SDHCI_RESET_DATA);
1017	/* Write 32'hFFFF_FFFF to IRQSTAT register */
1018	sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
1019
1020	/* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
1021	 * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
1022	 * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
1023	 * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
1024	 */
1025
1026	if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
1027		*window_start = 8 * esdhc->div_ratio;
1028		*window_end = 4 * esdhc->div_ratio;
1029	} else {
1030		*window_start = 5 * esdhc->div_ratio;
1031		*window_end = 3 * esdhc->div_ratio;
1032	}
1033}
1034
1035static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
1036				   u8 window_start, u8 window_end)
1037{
1038	struct sdhci_host *host = mmc_priv(mmc);
1039	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1040	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1041	u32 val;
1042	int ret;
1043
1044	/* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
1045	val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
1046	      ESDHC_WNDW_STRT_PTR_MASK;
1047	val |= window_end & ESDHC_WNDW_END_PTR_MASK;
1048	sdhci_writel(host, val, ESDHC_TBPTR);
1049
1050	/* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
1051	val = sdhci_readl(host, ESDHC_TBCTL);
1052	val &= ~ESDHC_TB_MODE_MASK;
1053	val |= ESDHC_TB_MODE_SW;
1054	sdhci_writel(host, val, ESDHC_TBCTL);
1055
1056	esdhc->in_sw_tuning = true;
1057	ret = sdhci_execute_tuning(mmc, opcode);
1058	esdhc->in_sw_tuning = false;
1059	return ret;
1060}
1061
1062static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
1063{
1064	struct sdhci_host *host = mmc_priv(mmc);
1065	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1066	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1067	u8 window_start, window_end;
1068	int ret, retries = 1;
1069	bool hs400_tuning;
1070	unsigned int clk;
1071	u32 val;
1072
1073	/* For tuning mode, the sd clock divisor value
1074	 * must be larger than 3 according to reference manual.
1075	 */
1076	clk = esdhc->peripheral_clock / 3;
1077	if (host->clock > clk)
1078		esdhc_of_set_clock(host, clk);
1079
1080	esdhc_tuning_block_enable(host, true);
1081
1082	/*
1083	 * The eSDHC controller takes the data timeout value into account
1084	 * during tuning. If the SD card is too slow sending the response, the
1085	 * timer will expire and a "Buffer Read Ready" interrupt without data
1086	 * is triggered. This leads to tuning errors.
1087	 *
1088	 * Just set the timeout to the maximum value because the core will
1089	 * already take care of it in sdhci_send_tuning().
1090	 */
1091	sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
1092
1093	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1094
1095	do {
1096		if (esdhc->quirk_limited_clk_division &&
1097		    hs400_tuning)
1098			esdhc_of_set_clock(host, host->clock);
1099
1100		/* Do HW tuning */
1101		val = sdhci_readl(host, ESDHC_TBCTL);
1102		val &= ~ESDHC_TB_MODE_MASK;
1103		val |= ESDHC_TB_MODE_3;
1104		sdhci_writel(host, val, ESDHC_TBCTL);
1105
1106		ret = sdhci_execute_tuning(mmc, opcode);
1107		if (ret)
1108			break;
1109
1110		/* For type2 affected platforms of the tuning erratum,
1111		 * tuning may succeed although eSDHC might not have
1112		 * tuned properly. Need to check tuning window.
1113		 */
1114		if (esdhc->quirk_tuning_erratum_type2 &&
1115		    !host->tuning_err) {
1116			esdhc_tuning_window_ptr(host, &window_start,
1117						&window_end);
1118			if (abs(window_start - window_end) >
1119			    (4 * esdhc->div_ratio + 2))
1120				host->tuning_err = -EAGAIN;
1121		}
1122
1123		/* If HW tuning fails and triggers erratum,
1124		 * try workaround.
1125		 */
1126		ret = host->tuning_err;
1127		if (ret == -EAGAIN &&
1128		    (esdhc->quirk_tuning_erratum_type1 ||
1129		     esdhc->quirk_tuning_erratum_type2)) {
1130			/* Recover HS400 tuning flag */
1131			if (hs400_tuning)
1132				host->flags |= SDHCI_HS400_TUNING;
1133			pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
1134				mmc_hostname(mmc));
1135			/* Do SW tuning */
1136			esdhc_prepare_sw_tuning(host, &window_start,
1137						&window_end);
1138			ret = esdhc_execute_sw_tuning(mmc, opcode,
1139						      window_start,
1140						      window_end);
1141			if (ret)
1142				break;
1143
1144			/* Retry both HW/SW tuning with reduced clock. */
1145			ret = host->tuning_err;
1146			if (ret == -EAGAIN && retries) {
1147				/* Recover HS400 tuning flag */
1148				if (hs400_tuning)
1149					host->flags |= SDHCI_HS400_TUNING;
1150
1151				clk = host->max_clk / (esdhc->div_ratio + 1);
1152				esdhc_of_set_clock(host, clk);
1153				pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
1154					mmc_hostname(mmc));
1155			} else {
1156				break;
1157			}
1158		} else {
1159			break;
1160		}
1161	} while (retries--);
1162
1163	if (ret) {
1164		esdhc_tuning_block_enable(host, false);
1165	} else if (hs400_tuning) {
1166		val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1167		val |= ESDHC_FLW_CTL_BG;
1168		sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1169	}
1170
1171	return ret;
1172}
1173
1174static void esdhc_set_uhs_signaling(struct sdhci_host *host,
1175				   unsigned int timing)
1176{
1177	u32 val;
1178
1179	/*
1180	 * There are specific registers setting for HS400 mode.
1181	 * Clean all of them if controller is in HS400 mode to
1182	 * exit HS400 mode before re-setting any speed mode.
1183	 */
1184	val = sdhci_readl(host, ESDHC_TBCTL);
1185	if (val & ESDHC_HS400_MODE) {
1186		val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1187		val &= ~ESDHC_FLW_CTL_BG;
1188		sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1189
1190		val = sdhci_readl(host, ESDHC_SDCLKCTL);
1191		val &= ~ESDHC_CMD_CLK_CTL;
1192		sdhci_writel(host, val, ESDHC_SDCLKCTL);
1193
1194		esdhc_clock_enable(host, false);
1195		val = sdhci_readl(host, ESDHC_TBCTL);
1196		val &= ~ESDHC_HS400_MODE;
1197		sdhci_writel(host, val, ESDHC_TBCTL);
1198		esdhc_clock_enable(host, true);
1199
1200		val = sdhci_readl(host, ESDHC_DLLCFG0);
1201		val &= ~(ESDHC_DLL_ENABLE | ESDHC_DLL_FREQ_SEL);
1202		sdhci_writel(host, val, ESDHC_DLLCFG0);
1203
1204		val = sdhci_readl(host, ESDHC_TBCTL);
1205		val &= ~ESDHC_HS400_WNDW_ADJUST;
1206		sdhci_writel(host, val, ESDHC_TBCTL);
1207
1208		esdhc_tuning_block_enable(host, false);
1209	}
1210
1211	if (timing == MMC_TIMING_MMC_HS400)
1212		esdhc_tuning_block_enable(host, true);
1213	else
1214		sdhci_set_uhs_signaling(host, timing);
1215}
1216
1217static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
1218{
1219	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1220	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1221	u32 command;
1222
1223	if (esdhc->quirk_trans_complete_erratum) {
1224		command = SDHCI_GET_CMD(sdhci_readw(host,
1225					SDHCI_COMMAND));
1226		if (command == MMC_WRITE_MULTIPLE_BLOCK &&
1227				sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
1228				intmask & SDHCI_INT_DATA_END) {
1229			intmask &= ~SDHCI_INT_DATA_END;
1230			sdhci_writel(host, SDHCI_INT_DATA_END,
1231					SDHCI_INT_STATUS);
1232		}
1233	}
1234	return intmask;
1235}
1236
1237#ifdef CONFIG_PM_SLEEP
1238static u32 esdhc_proctl;
1239static int esdhc_of_suspend(struct device *dev)
1240{
1241	struct sdhci_host *host = dev_get_drvdata(dev);
1242
1243	esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
1244
1245	if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1246		mmc_retune_needed(host->mmc);
1247
1248	return sdhci_suspend_host(host);
1249}
1250
1251static int esdhc_of_resume(struct device *dev)
1252{
1253	struct sdhci_host *host = dev_get_drvdata(dev);
1254	int ret = sdhci_resume_host(host);
1255
1256	if (ret == 0) {
1257		/* Isn't this already done by sdhci_resume_host() ? --rmk */
1258		esdhc_of_enable_dma(host);
1259		sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
1260	}
1261	return ret;
1262}
 
 
 
 
 
 
 
 
1263#endif
1264
1265static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
1266			esdhc_of_suspend,
1267			esdhc_of_resume);
1268
1269static const struct sdhci_ops sdhci_esdhc_be_ops = {
1270	.read_l = esdhc_be_readl,
1271	.read_w = esdhc_be_readw,
1272	.read_b = esdhc_be_readb,
1273	.write_l = esdhc_be_writel,
1274	.write_w = esdhc_be_writew,
1275	.write_b = esdhc_be_writeb,
1276	.set_clock = esdhc_of_set_clock,
1277	.enable_dma = esdhc_of_enable_dma,
1278	.get_max_clock = esdhc_of_get_max_clock,
1279	.get_min_clock = esdhc_of_get_min_clock,
1280	.adma_workaround = esdhc_of_adma_workaround,
1281	.set_bus_width = esdhc_pltfm_set_bus_width,
1282	.reset = esdhc_reset,
1283	.set_uhs_signaling = esdhc_set_uhs_signaling,
1284	.irq = esdhc_irq,
1285};
1286
1287static const struct sdhci_ops sdhci_esdhc_le_ops = {
1288	.read_l = esdhc_le_readl,
1289	.read_w = esdhc_le_readw,
1290	.read_b = esdhc_le_readb,
1291	.write_l = esdhc_le_writel,
1292	.write_w = esdhc_le_writew,
1293	.write_b = esdhc_le_writeb,
1294	.set_clock = esdhc_of_set_clock,
1295	.enable_dma = esdhc_of_enable_dma,
1296	.get_max_clock = esdhc_of_get_max_clock,
1297	.get_min_clock = esdhc_of_get_min_clock,
1298	.adma_workaround = esdhc_of_adma_workaround,
1299	.set_bus_width = esdhc_pltfm_set_bus_width,
1300	.reset = esdhc_reset,
1301	.set_uhs_signaling = esdhc_set_uhs_signaling,
1302	.irq = esdhc_irq,
1303};
1304
1305static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
1306	.quirks = ESDHC_DEFAULT_QUIRKS |
1307#ifdef CONFIG_PPC
1308		  SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1309#endif
1310		  SDHCI_QUIRK_NO_CARD_NO_RESET |
1311		  SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1312	.ops = &sdhci_esdhc_be_ops,
1313};
1314
1315static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
1316	.quirks = ESDHC_DEFAULT_QUIRKS |
1317		  SDHCI_QUIRK_NO_CARD_NO_RESET |
1318		  SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1319	.ops = &sdhci_esdhc_le_ops,
1320};
1321
1322static struct soc_device_attribute soc_incorrect_hostver[] = {
1323	{ .family = "QorIQ T4240", .revision = "1.0", },
1324	{ .family = "QorIQ T4240", .revision = "2.0", },
1325	{ /* sentinel */ }
1326};
1327
1328static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1329	{ .family = "QorIQ LX2160A", .revision = "1.0", },
1330	{ .family = "QorIQ LX2160A", .revision = "2.0", },
1331	{ .family = "QorIQ LS1028A", .revision = "1.0", },
1332	{ /* sentinel */ }
1333};
1334
1335static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1336	{ .family = "QorIQ LX2160A", .revision = "1.0", },
1337	{ .family = "QorIQ LX2160A", .revision = "2.0", },
1338	{ .family = "QorIQ LS1028A", .revision = "1.0", },
1339	{ /* sentinel */ }
1340};
1341
1342static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
1343{
1344	const struct of_device_id *match;
1345	struct sdhci_pltfm_host *pltfm_host;
1346	struct sdhci_esdhc *esdhc;
1347	struct device_node *np;
1348	struct clk *clk;
1349	u32 val;
1350	u16 host_ver;
1351
1352	pltfm_host = sdhci_priv(host);
1353	esdhc = sdhci_pltfm_priv(pltfm_host);
1354
1355	host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
1356	esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
1357			     SDHCI_VENDOR_VER_SHIFT;
1358	esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
1359	if (soc_device_match(soc_incorrect_hostver))
1360		esdhc->quirk_incorrect_hostver = true;
1361	else
1362		esdhc->quirk_incorrect_hostver = false;
1363
1364	if (soc_device_match(soc_fixup_sdhc_clkdivs))
1365		esdhc->quirk_limited_clk_division = true;
1366	else
1367		esdhc->quirk_limited_clk_division = false;
1368
1369	if (soc_device_match(soc_unreliable_pulse_detection))
1370		esdhc->quirk_unreliable_pulse_detection = true;
1371	else
1372		esdhc->quirk_unreliable_pulse_detection = false;
1373
1374	match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
1375	if (match)
1376		esdhc->clk_fixup = match->data;
1377	np = pdev->dev.of_node;
1378
1379	if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1380		esdhc->quirk_delay_before_data_reset = true;
1381		esdhc->quirk_trans_complete_erratum = true;
1382	}
1383
1384	clk = of_clk_get(np, 0);
1385	if (!IS_ERR(clk)) {
1386		/*
1387		 * esdhc->peripheral_clock would be assigned with a value
1388		 * which is eSDHC base clock when use periperal clock.
1389		 * For some platforms, the clock value got by common clk
1390		 * API is peripheral clock while the eSDHC base clock is
1391		 * 1/2 peripheral clock.
1392		 */
1393		if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
1394		    of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
1395		    of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
1396			esdhc->peripheral_clock = clk_get_rate(clk) / 2;
1397		else
1398			esdhc->peripheral_clock = clk_get_rate(clk);
1399
1400		clk_put(clk);
1401	}
1402
1403	esdhc_clock_enable(host, false);
1404	val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
1405	/*
1406	 * This bit is not able to be reset by SDHCI_RESET_ALL. Need to
1407	 * initialize it as 1 or 0 once, to override the different value
1408	 * which may be configured in bootloader.
1409	 */
1410	if (esdhc->peripheral_clock)
1411		val |= ESDHC_PERIPHERAL_CLK_SEL;
1412	else
1413		val &= ~ESDHC_PERIPHERAL_CLK_SEL;
1414	sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
1415	esdhc_clock_enable(host, true);
1416}
1417
1418static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
1419{
1420	esdhc_tuning_block_enable(mmc_priv(mmc), false);
1421	return 0;
1422}
1423
1424static int sdhci_esdhc_probe(struct platform_device *pdev)
1425{
1426	struct sdhci_host *host;
1427	struct device_node *np, *tp;
1428	struct sdhci_pltfm_host *pltfm_host;
1429	struct sdhci_esdhc *esdhc;
1430	int ret;
1431
1432	np = pdev->dev.of_node;
1433
1434	if (of_property_read_bool(np, "little-endian"))
1435		host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
1436					sizeof(struct sdhci_esdhc));
1437	else
1438		host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
1439					sizeof(struct sdhci_esdhc));
1440
1441	if (IS_ERR(host))
1442		return PTR_ERR(host);
1443
1444	host->mmc_host_ops.start_signal_voltage_switch =
1445		esdhc_signal_voltage_switch;
1446	host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
1447	host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
1448	host->tuning_delay = 1;
1449
1450	esdhc_init(pdev, host);
1451
1452	sdhci_get_of_property(pdev);
1453
1454	pltfm_host = sdhci_priv(host);
1455	esdhc = sdhci_pltfm_priv(pltfm_host);
1456	if (soc_device_match(soc_tuning_erratum_type1))
1457		esdhc->quirk_tuning_erratum_type1 = true;
1458	else
1459		esdhc->quirk_tuning_erratum_type1 = false;
1460
1461	if (soc_device_match(soc_tuning_erratum_type2))
1462		esdhc->quirk_tuning_erratum_type2 = true;
1463	else
1464		esdhc->quirk_tuning_erratum_type2 = false;
1465
1466	if (esdhc->vendor_ver == VENDOR_V_22)
1467		host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
1468
1469	if (esdhc->vendor_ver > VENDOR_V_22)
1470		host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1471
1472	tp = of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc");
1473	if (tp) {
1474		of_node_put(tp);
1475		host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
1476		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1477	}
1478
1479	if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
1480	    of_device_is_compatible(np, "fsl,p5020-esdhc") ||
1481	    of_device_is_compatible(np, "fsl,p4080-esdhc") ||
1482	    of_device_is_compatible(np, "fsl,p1020-esdhc") ||
1483	    of_device_is_compatible(np, "fsl,t1040-esdhc"))
 
1484		host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1485
1486	if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
1487		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1488
1489	esdhc->quirk_ignore_data_inhibit = false;
1490	if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1491		/*
1492		 * Freescale messed up with P2020 as it has a non-standard
1493		 * host control register
1494		 */
1495		host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
1496		esdhc->quirk_ignore_data_inhibit = true;
1497	}
1498
1499	/* call to generic mmc_of_parse to support additional capabilities */
1500	ret = mmc_of_parse(host->mmc);
1501	if (ret)
1502		goto err;
1503
1504	mmc_of_parse_voltage(host->mmc, &host->ocr_mask);
1505
1506	ret = sdhci_add_host(host);
1507	if (ret)
1508		goto err;
1509
1510	return 0;
1511 err:
1512	sdhci_pltfm_free(pdev);
1513	return ret;
1514}
1515
 
 
 
 
 
 
 
 
1516static struct platform_driver sdhci_esdhc_driver = {
1517	.driver = {
1518		.name = "sdhci-esdhc",
1519		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1520		.of_match_table = sdhci_esdhc_of_match,
1521		.pm = &esdhc_of_dev_pm_ops,
1522	},
1523	.probe = sdhci_esdhc_probe,
1524	.remove_new = sdhci_pltfm_remove,
1525};
1526
1527module_platform_driver(sdhci_esdhc_driver);
1528
1529MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1530MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1531	      "Anton Vorontsov <avorontsov@ru.mvista.com>");
1532MODULE_LICENSE("GPL v2");