Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2010 Google, Inc.
   4 */
   5
   6#include <linux/bitfield.h>
   7#include <linux/clk.h>
   8#include <linux/delay.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/err.h>
  11#include <linux/gpio/consumer.h>
  12#include <linux/init.h>
  13#include <linux/io.h>
  14#include <linux/iommu.h>
  15#include <linux/iopoll.h>
  16#include <linux/ktime.h>
  17#include <linux/mmc/card.h>
  18#include <linux/mmc/host.h>
  19#include <linux/mmc/mmc.h>
  20#include <linux/mmc/slot-gpio.h>
  21#include <linux/module.h>
  22#include <linux/of_device.h>
  23#include <linux/of.h>
 
  24#include <linux/pinctrl/consumer.h>
  25#include <linux/platform_device.h>
  26#include <linux/pm_opp.h>
  27#include <linux/pm_runtime.h>
  28#include <linux/regulator/consumer.h>
  29#include <linux/reset.h>
 
 
 
 
 
 
  30
  31#include <soc/tegra/common.h>
  32
  33#include "sdhci-cqhci.h"
  34#include "sdhci-pltfm.h"
  35#include "cqhci.h"
  36
  37/* Tegra SDHOST controller vendor register definitions */
  38#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL			0x100
  39#define SDHCI_CLOCK_CTRL_TAP_MASK			0x00ff0000
  40#define SDHCI_CLOCK_CTRL_TAP_SHIFT			16
  41#define SDHCI_CLOCK_CTRL_TRIM_MASK			0x1f000000
  42#define SDHCI_CLOCK_CTRL_TRIM_SHIFT			24
  43#define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE		BIT(5)
  44#define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE		BIT(3)
  45#define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE	BIT(2)
  46
  47#define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL			0x104
  48#define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE		BIT(31)
  49
  50#define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES		0x10c
  51#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK		0x00003f00
  52#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT	8
  53
  54#define SDHCI_TEGRA_VENDOR_MISC_CTRL			0x120
  55#define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT		BIT(0)
  56#define SDHCI_MISC_CTRL_ENABLE_SDR104			0x8
  57#define SDHCI_MISC_CTRL_ENABLE_SDR50			0x10
  58#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300		0x20
  59#define SDHCI_MISC_CTRL_ENABLE_DDR50			0x200
  60
  61#define SDHCI_TEGRA_VENDOR_DLLCAL_CFG			0x1b0
  62#define SDHCI_TEGRA_DLLCAL_CALIBRATE			BIT(31)
  63
  64#define SDHCI_TEGRA_VENDOR_DLLCAL_STA			0x1bc
  65#define SDHCI_TEGRA_DLLCAL_STA_ACTIVE			BIT(31)
  66
  67#define SDHCI_VNDR_TUN_CTRL0_0				0x1c0
  68#define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP			0x20000
  69#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK		0x03fc0000
  70#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT	18
  71#define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK			0x00001fc0
  72#define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT		6
  73#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK		0x000e000
  74#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT		13
  75#define TRIES_128					2
  76#define TRIES_256					4
  77#define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK		0x7
  78
  79#define SDHCI_TEGRA_VNDR_TUN_CTRL1_0			0x1c4
  80#define SDHCI_TEGRA_VNDR_TUN_STATUS0			0x1C8
  81#define SDHCI_TEGRA_VNDR_TUN_STATUS1			0x1CC
  82#define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK		0xFF
  83#define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT	0x8
  84#define TUNING_WORD_BIT_SIZE				32
  85
  86#define SDHCI_TEGRA_AUTO_CAL_CONFIG			0x1e4
  87#define SDHCI_AUTO_CAL_START				BIT(31)
  88#define SDHCI_AUTO_CAL_ENABLE				BIT(29)
  89#define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK			0x0000ffff
  90
  91#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL			0x1e0
  92#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK	0x0000000f
  93#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL	0x7
  94#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD	BIT(31)
  95#define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK		0x07FFF000
  96
  97#define SDHCI_TEGRA_AUTO_CAL_STATUS			0x1ec
  98#define SDHCI_TEGRA_AUTO_CAL_ACTIVE			BIT(31)
  99
 100#define SDHCI_TEGRA_CIF2AXI_CTRL_0			0x1fc
 101
 102#define NVQUIRK_FORCE_SDHCI_SPEC_200			BIT(0)
 103#define NVQUIRK_ENABLE_BLOCK_GAP_DET			BIT(1)
 104#define NVQUIRK_ENABLE_SDHCI_SPEC_300			BIT(2)
 105#define NVQUIRK_ENABLE_SDR50				BIT(3)
 106#define NVQUIRK_ENABLE_SDR104				BIT(4)
 107#define NVQUIRK_ENABLE_DDR50				BIT(5)
 108/*
 109 * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
 110 * drive strength.
 111 */
 112#define NVQUIRK_HAS_PADCALIB				BIT(6)
 113/*
 114 * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
 115 * 3V3/1V8 pad selection happens through pinctrl state selection depending
 116 * on the signaling mode.
 117 */
 118#define NVQUIRK_NEEDS_PAD_CONTROL			BIT(7)
 119#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP			BIT(8)
 120#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING		BIT(9)
 121
 122/*
 123 * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
 124 * SDMMC hardware data timeout.
 125 */
 126#define NVQUIRK_HAS_TMCLK				BIT(10)
 127
 128#define NVQUIRK_HAS_ANDROID_GPT_SECTOR			BIT(11)
 129#define NVQUIRK_PROGRAM_STREAMID			BIT(12)
 130
 131/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
 132#define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
 133
 134#define SDHCI_TEGRA_CQE_TRNS_MODE	(SDHCI_TRNS_MULTI | \
 135					 SDHCI_TRNS_BLK_CNT_EN | \
 136					 SDHCI_TRNS_DMA)
 137
 138struct sdhci_tegra_soc_data {
 139	const struct sdhci_pltfm_data *pdata;
 140	u64 dma_mask;
 141	u32 nvquirks;
 142	u8 min_tap_delay;
 143	u8 max_tap_delay;
 144};
 145
 146/* Magic pull up and pull down pad calibration offsets */
 147struct sdhci_tegra_autocal_offsets {
 148	u32 pull_up_3v3;
 149	u32 pull_down_3v3;
 150	u32 pull_up_3v3_timeout;
 151	u32 pull_down_3v3_timeout;
 152	u32 pull_up_1v8;
 153	u32 pull_down_1v8;
 154	u32 pull_up_1v8_timeout;
 155	u32 pull_down_1v8_timeout;
 156	u32 pull_up_sdr104;
 157	u32 pull_down_sdr104;
 158	u32 pull_up_hs400;
 159	u32 pull_down_hs400;
 160};
 161
 162struct sdhci_tegra {
 163	const struct sdhci_tegra_soc_data *soc_data;
 164	struct gpio_desc *power_gpio;
 165	struct clk *tmclk;
 166	bool ddr_signaling;
 167	bool pad_calib_required;
 168	bool pad_control_available;
 169
 170	struct reset_control *rst;
 171	struct pinctrl *pinctrl_sdmmc;
 172	struct pinctrl_state *pinctrl_state_3v3;
 173	struct pinctrl_state *pinctrl_state_1v8;
 174	struct pinctrl_state *pinctrl_state_3v3_drv;
 175	struct pinctrl_state *pinctrl_state_1v8_drv;
 176
 177	struct sdhci_tegra_autocal_offsets autocal_offsets;
 178	ktime_t last_calib;
 179
 180	u32 default_tap;
 181	u32 default_trim;
 182	u32 dqs_trim;
 183	bool enable_hwcq;
 184	unsigned long curr_clk_rate;
 185	u8 tuned_tap_delay;
 186	u32 stream_id;
 187};
 188
 189static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
 190{
 191	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 192	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 193	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 194
 195	if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
 196			(reg == SDHCI_HOST_VERSION))) {
 197		/* Erratum: Version register is invalid in HW. */
 198		return SDHCI_SPEC_200;
 199	}
 200
 201	return readw(host->ioaddr + reg);
 202}
 203
 204static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
 205{
 206	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 207
 208	switch (reg) {
 209	case SDHCI_TRANSFER_MODE:
 210		/*
 211		 * Postpone this write, we must do it together with a
 212		 * command write that is down below.
 213		 */
 214		pltfm_host->xfer_mode_shadow = val;
 215		return;
 216	case SDHCI_COMMAND:
 217		writel((val << 16) | pltfm_host->xfer_mode_shadow,
 218			host->ioaddr + SDHCI_TRANSFER_MODE);
 219		return;
 220	}
 221
 222	writew(val, host->ioaddr + reg);
 223}
 224
 225static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
 226{
 227	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 228	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 229	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 230
 231	/* Seems like we're getting spurious timeout and crc errors, so
 232	 * disable signalling of them. In case of real errors software
 233	 * timers should take care of eventually detecting them.
 234	 */
 235	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
 236		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
 237
 238	writel(val, host->ioaddr + reg);
 239
 240	if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
 241			(reg == SDHCI_INT_ENABLE))) {
 242		/* Erratum: Must enable block gap interrupt detection */
 243		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
 244		if (val & SDHCI_INT_CARD_INT)
 245			gap_ctrl |= 0x8;
 246		else
 247			gap_ctrl &= ~0x8;
 248		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
 249	}
 250}
 251
 252static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
 253{
 254	bool status;
 255	u32 reg;
 256
 257	reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
 258	status = !!(reg & SDHCI_CLOCK_CARD_EN);
 259
 260	if (status == enable)
 261		return status;
 262
 263	if (enable)
 264		reg |= SDHCI_CLOCK_CARD_EN;
 265	else
 266		reg &= ~SDHCI_CLOCK_CARD_EN;
 267
 268	sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
 269
 270	return status;
 271}
 272
 273static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
 274{
 275	bool is_tuning_cmd = 0;
 276	bool clk_enabled;
 
 277
 278	if (reg == SDHCI_COMMAND)
 279		is_tuning_cmd = mmc_op_tuning(SDHCI_GET_CMD(val));
 
 
 
 280
 281	if (is_tuning_cmd)
 282		clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
 283
 284	writew(val, host->ioaddr + reg);
 285
 286	if (is_tuning_cmd) {
 287		udelay(1);
 288		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 289		tegra_sdhci_configure_card_clk(host, clk_enabled);
 290	}
 291}
 292
 293static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
 294{
 295	/*
 296	 * Write-enable shall be assumed if GPIO is missing in a board's
 297	 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
 298	 * Tegra.
 299	 */
 300	return mmc_gpio_get_ro(host->mmc);
 301}
 302
 303static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
 304{
 305	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 306	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 307	int has_1v8, has_3v3;
 308
 309	/*
 310	 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
 311	 * voltage configuration in order to perform voltage switching. This
 312	 * means that valid pinctrl info is required on SDHCI instances capable
 313	 * of performing voltage switching. Whether or not an SDHCI instance is
 314	 * capable of voltage switching is determined based on the regulator.
 315	 */
 316
 317	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
 318		return true;
 319
 320	if (IS_ERR(host->mmc->supply.vqmmc))
 321		return false;
 322
 323	has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
 324						 1700000, 1950000);
 325
 326	has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
 327						 2700000, 3600000);
 328
 329	if (has_1v8 == 1 && has_3v3 == 1)
 330		return tegra_host->pad_control_available;
 331
 332	/* Fixed voltage, no pad control required. */
 333	return true;
 334}
 335
 336static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
 337{
 338	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 339	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 340	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 341	bool card_clk_enabled = false;
 342	u32 reg;
 343
 344	/*
 345	 * Touching the tap values is a bit tricky on some SoC generations.
 346	 * The quirk enables a workaround for a glitch that sometimes occurs if
 347	 * the tap values are changed.
 348	 */
 349
 350	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
 351		card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
 352
 353	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 354	reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
 355	reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
 356	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 357
 358	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
 359	    card_clk_enabled) {
 360		udelay(1);
 361		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 362		tegra_sdhci_configure_card_clk(host, card_clk_enabled);
 363	}
 364}
 365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 366static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
 367{
 368	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 369	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 370	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 371	u32 misc_ctrl, clk_ctrl, pad_ctrl;
 372
 373	sdhci_and_cqhci_reset(host, mask);
 374
 375	if (!(mask & SDHCI_RESET_ALL))
 376		return;
 377
 378	tegra_sdhci_set_tap(host, tegra_host->default_tap);
 379
 380	misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
 381	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 382
 383	misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
 384		       SDHCI_MISC_CTRL_ENABLE_SDR50 |
 385		       SDHCI_MISC_CTRL_ENABLE_DDR50 |
 386		       SDHCI_MISC_CTRL_ENABLE_SDR104);
 387
 388	clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
 389		      SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
 390
 391	if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
 392		/* Erratum: Enable SDHCI spec v3.00 support */
 393		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
 394			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
 395		/* Advertise UHS modes as supported by host */
 396		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
 397			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
 398		if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
 399			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
 400		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
 401			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
 402		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
 403			clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
 404	}
 405
 406	clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
 407
 408	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
 409	sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 410
 411	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
 412		pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 413		pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
 414		pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
 415		sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 416
 417		tegra_host->pad_calib_required = true;
 418	}
 419
 420	tegra_host->ddr_signaling = false;
 421}
 422
 423static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
 424{
 425	u32 val;
 426
 427	/*
 428	 * Enable or disable the additional I/O pad used by the drive strength
 429	 * calibration process.
 430	 */
 431	val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 432
 433	if (enable)
 434		val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
 435	else
 436		val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
 437
 438	sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 439
 440	if (enable)
 441		usleep_range(1, 2);
 442}
 443
 444static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
 445					       u16 pdpu)
 446{
 447	u32 reg;
 448
 449	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 450	reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
 451	reg |= pdpu;
 452	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 453}
 454
 455static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
 456				   bool state_drvupdn)
 457{
 458	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 459	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 460	struct sdhci_tegra_autocal_offsets *offsets =
 461						&tegra_host->autocal_offsets;
 462	struct pinctrl_state *pinctrl_drvupdn = NULL;
 463	int ret = 0;
 464	u8 drvup = 0, drvdn = 0;
 465	u32 reg;
 466
 467	if (!state_drvupdn) {
 468		/* PADS Drive Strength */
 469		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
 470			if (tegra_host->pinctrl_state_1v8_drv) {
 471				pinctrl_drvupdn =
 472					tegra_host->pinctrl_state_1v8_drv;
 473			} else {
 474				drvup = offsets->pull_up_1v8_timeout;
 475				drvdn = offsets->pull_down_1v8_timeout;
 476			}
 477		} else {
 478			if (tegra_host->pinctrl_state_3v3_drv) {
 479				pinctrl_drvupdn =
 480					tegra_host->pinctrl_state_3v3_drv;
 481			} else {
 482				drvup = offsets->pull_up_3v3_timeout;
 483				drvdn = offsets->pull_down_3v3_timeout;
 484			}
 485		}
 486
 487		if (pinctrl_drvupdn != NULL) {
 488			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 489							pinctrl_drvupdn);
 490			if (ret < 0)
 491				dev_err(mmc_dev(host->mmc),
 492					"failed pads drvupdn, ret: %d\n", ret);
 493		} else if ((drvup) || (drvdn)) {
 494			reg = sdhci_readl(host,
 495					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 496			reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
 497			reg |= (drvup << 20) | (drvdn << 12);
 498			sdhci_writel(host, reg,
 499					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 500		}
 501
 502	} else {
 503		/* Dual Voltage PADS Voltage selection */
 504		if (!tegra_host->pad_control_available)
 505			return 0;
 506
 507		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
 508			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 509						tegra_host->pinctrl_state_1v8);
 510			if (ret < 0)
 511				dev_err(mmc_dev(host->mmc),
 512					"setting 1.8V failed, ret: %d\n", ret);
 513		} else {
 514			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 515						tegra_host->pinctrl_state_3v3);
 516			if (ret < 0)
 517				dev_err(mmc_dev(host->mmc),
 518					"setting 3.3V failed, ret: %d\n", ret);
 519		}
 520	}
 521
 522	return ret;
 523}
 524
 525static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
 526{
 527	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 528	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 529	struct sdhci_tegra_autocal_offsets offsets =
 530			tegra_host->autocal_offsets;
 531	struct mmc_ios *ios = &host->mmc->ios;
 532	bool card_clk_enabled;
 533	u16 pdpu;
 534	u32 reg;
 535	int ret;
 536
 537	switch (ios->timing) {
 538	case MMC_TIMING_UHS_SDR104:
 539		pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
 540		break;
 541	case MMC_TIMING_MMC_HS400:
 542		pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
 543		break;
 544	default:
 545		if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
 546			pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
 547		else
 548			pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
 549	}
 550
 551	/* Set initial offset before auto-calibration */
 552	tegra_sdhci_set_pad_autocal_offset(host, pdpu);
 553
 554	card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
 555
 556	tegra_sdhci_configure_cal_pad(host, true);
 557
 558	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 559	reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
 560	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 561
 562	usleep_range(1, 2);
 563	/* 10 ms timeout */
 564	ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
 565				 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
 566				 1000, 10000);
 567
 568	tegra_sdhci_configure_cal_pad(host, false);
 569
 570	tegra_sdhci_configure_card_clk(host, card_clk_enabled);
 571
 572	if (ret) {
 573		dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
 574
 575		/* Disable automatic cal and use fixed Drive Strengths */
 576		reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 577		reg &= ~SDHCI_AUTO_CAL_ENABLE;
 578		sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 579
 580		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
 581		if (ret < 0)
 582			dev_err(mmc_dev(host->mmc),
 583				"Setting drive strengths failed: %d\n", ret);
 584	}
 585}
 586
 587static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
 588{
 589	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 590	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 591	struct sdhci_tegra_autocal_offsets *autocal =
 592			&tegra_host->autocal_offsets;
 593	int err;
 594
 595	err = device_property_read_u32(mmc_dev(host->mmc),
 596			"nvidia,pad-autocal-pull-up-offset-3v3",
 597			&autocal->pull_up_3v3);
 598	if (err)
 599		autocal->pull_up_3v3 = 0;
 600
 601	err = device_property_read_u32(mmc_dev(host->mmc),
 602			"nvidia,pad-autocal-pull-down-offset-3v3",
 603			&autocal->pull_down_3v3);
 604	if (err)
 605		autocal->pull_down_3v3 = 0;
 606
 607	err = device_property_read_u32(mmc_dev(host->mmc),
 608			"nvidia,pad-autocal-pull-up-offset-1v8",
 609			&autocal->pull_up_1v8);
 610	if (err)
 611		autocal->pull_up_1v8 = 0;
 612
 613	err = device_property_read_u32(mmc_dev(host->mmc),
 614			"nvidia,pad-autocal-pull-down-offset-1v8",
 615			&autocal->pull_down_1v8);
 616	if (err)
 617		autocal->pull_down_1v8 = 0;
 618
 619	err = device_property_read_u32(mmc_dev(host->mmc),
 620			"nvidia,pad-autocal-pull-up-offset-sdr104",
 621			&autocal->pull_up_sdr104);
 622	if (err)
 623		autocal->pull_up_sdr104 = autocal->pull_up_1v8;
 624
 625	err = device_property_read_u32(mmc_dev(host->mmc),
 626			"nvidia,pad-autocal-pull-down-offset-sdr104",
 627			&autocal->pull_down_sdr104);
 628	if (err)
 629		autocal->pull_down_sdr104 = autocal->pull_down_1v8;
 630
 631	err = device_property_read_u32(mmc_dev(host->mmc),
 632			"nvidia,pad-autocal-pull-up-offset-hs400",
 633			&autocal->pull_up_hs400);
 634	if (err)
 635		autocal->pull_up_hs400 = autocal->pull_up_1v8;
 636
 637	err = device_property_read_u32(mmc_dev(host->mmc),
 638			"nvidia,pad-autocal-pull-down-offset-hs400",
 639			&autocal->pull_down_hs400);
 640	if (err)
 641		autocal->pull_down_hs400 = autocal->pull_down_1v8;
 642
 643	/*
 644	 * Different fail-safe drive strength values based on the signaling
 645	 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
 646	 * So, avoid reading below device tree properties for SoCs that don't
 647	 * have NVQUIRK_NEEDS_PAD_CONTROL.
 648	 */
 649	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
 650		return;
 651
 652	err = device_property_read_u32(mmc_dev(host->mmc),
 653			"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
 654			&autocal->pull_up_3v3_timeout);
 655	if (err) {
 656		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
 657			(tegra_host->pinctrl_state_3v3_drv == NULL))
 658			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
 659				mmc_hostname(host->mmc));
 660		autocal->pull_up_3v3_timeout = 0;
 661	}
 662
 663	err = device_property_read_u32(mmc_dev(host->mmc),
 664			"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
 665			&autocal->pull_down_3v3_timeout);
 666	if (err) {
 667		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
 668			(tegra_host->pinctrl_state_3v3_drv == NULL))
 669			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
 670				mmc_hostname(host->mmc));
 671		autocal->pull_down_3v3_timeout = 0;
 672	}
 673
 674	err = device_property_read_u32(mmc_dev(host->mmc),
 675			"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
 676			&autocal->pull_up_1v8_timeout);
 677	if (err) {
 678		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
 679			(tegra_host->pinctrl_state_1v8_drv == NULL))
 680			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
 681				mmc_hostname(host->mmc));
 682		autocal->pull_up_1v8_timeout = 0;
 683	}
 684
 685	err = device_property_read_u32(mmc_dev(host->mmc),
 686			"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
 687			&autocal->pull_down_1v8_timeout);
 688	if (err) {
 689		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
 690			(tegra_host->pinctrl_state_1v8_drv == NULL))
 691			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
 692				mmc_hostname(host->mmc));
 693		autocal->pull_down_1v8_timeout = 0;
 694	}
 695}
 696
 697static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 698{
 699	struct sdhci_host *host = mmc_priv(mmc);
 700	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 701	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 702	ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
 703
 704	/* 100 ms calibration interval is specified in the TRM */
 705	if (ktime_to_ms(since_calib) > 100) {
 706		tegra_sdhci_pad_autocalib(host);
 707		tegra_host->last_calib = ktime_get();
 708	}
 709
 710	sdhci_request(mmc, mrq);
 711}
 712
 713static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
 714{
 715	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 716	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 717	int err;
 718
 719	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap",
 720				       &tegra_host->default_tap);
 721	if (err)
 722		tegra_host->default_tap = 0;
 723
 724	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim",
 725				       &tegra_host->default_trim);
 726	if (err)
 727		tegra_host->default_trim = 0;
 728
 729	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim",
 730				       &tegra_host->dqs_trim);
 731	if (err)
 732		tegra_host->dqs_trim = 0x11;
 733}
 734
 735static void tegra_sdhci_parse_dt(struct sdhci_host *host)
 736{
 737	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 738	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 739
 740	if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe"))
 741		tegra_host->enable_hwcq = true;
 742	else
 743		tegra_host->enable_hwcq = false;
 744
 745	tegra_sdhci_parse_pad_autocal_dt(host);
 746	tegra_sdhci_parse_tap_and_trim(host);
 747}
 748
 749static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
 750{
 751	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 752	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 753	struct device *dev = mmc_dev(host->mmc);
 754	unsigned long host_clk;
 755	int err;
 756
 757	if (!clock)
 758		return sdhci_set_clock(host, clock);
 759
 760	/*
 761	 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
 762	 * divider to be configured to divided the host clock by two. The SDHCI
 763	 * clock divider is calculated as part of sdhci_set_clock() by
 764	 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
 765	 * the requested clock rate.
 766	 *
 767	 * By setting the host->max_clk to clock * 2 the divider calculation
 768	 * will always result in the correct value for DDR50/52 modes,
 769	 * regardless of clock rate rounding, which may happen if the value
 770	 * from clk_get_rate() is used.
 771	 */
 772	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
 773
 774	err = dev_pm_opp_set_rate(dev, host_clk);
 775	if (err)
 776		dev_err(dev, "failed to set clk rate to %luHz: %d\n",
 777			host_clk, err);
 778
 779	tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
 780	if (tegra_host->ddr_signaling)
 781		host->max_clk = host_clk;
 782	else
 783		host->max_clk = clk_get_rate(pltfm_host->clk);
 784
 785	sdhci_set_clock(host, clock);
 786
 787	if (tegra_host->pad_calib_required) {
 788		tegra_sdhci_pad_autocalib(host);
 789		tegra_host->pad_calib_required = false;
 790	}
 791}
 792
 793static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
 794					      struct mmc_ios *ios)
 795{
 796	struct sdhci_host *host = mmc_priv(mmc);
 797	u32 val;
 798
 799	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
 800
 801	if (ios->enhanced_strobe) {
 802		val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
 803		/*
 804		 * When CMD13 is sent from mmc_select_hs400es() after
 805		 * switching to HS400ES mode, the bus is operating at
 806		 * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
 807		 * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
 808		 * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
 809		 * controller CAR clock and the interface clock are rate matched.
 810		 */
 811		tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
 812	} else {
 813		val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
 814	}
 815
 816	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
 817}
 818
 819static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
 820{
 821	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 822
 823	return clk_round_rate(pltfm_host->clk, UINT_MAX);
 824}
 825
 826static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
 827{
 828	u32 val;
 829
 830	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
 831	val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
 832	val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
 833	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
 834}
 835
 836static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
 837{
 838	u32 reg;
 839	int err;
 840
 841	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
 842	reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
 843	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
 844
 845	/* 1 ms sleep, 5 ms timeout */
 846	err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
 847				 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
 848				 1000, 5000);
 849	if (err)
 850		dev_err(mmc_dev(host->mmc),
 851			"HS400 delay line calibration timed out\n");
 852}
 853
 854static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
 855				       u8 thd_low, u8 fixed_tap)
 856{
 857	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 858	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 859	u32 val, tun_status;
 860	u8 word, bit, edge1, tap, window;
 861	bool tap_result;
 862	bool start_fail = false;
 863	bool start_pass = false;
 864	bool end_pass = false;
 865	bool first_fail = false;
 866	bool first_pass = false;
 867	u8 start_pass_tap = 0;
 868	u8 end_pass_tap = 0;
 869	u8 first_fail_tap = 0;
 870	u8 first_pass_tap = 0;
 871	u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
 872
 873	/*
 874	 * Read auto-tuned results and extract good valid passing window by
 875	 * filtering out un-wanted bubble/partial/merged windows.
 876	 */
 877	for (word = 0; word < total_tuning_words; word++) {
 878		val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
 879		val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
 880		val |= word;
 881		sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
 882		tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
 883		bit = 0;
 884		while (bit < TUNING_WORD_BIT_SIZE) {
 885			tap = word * TUNING_WORD_BIT_SIZE + bit;
 886			tap_result = tun_status & (1 << bit);
 887			if (!tap_result && !start_fail) {
 888				start_fail = true;
 889				if (!first_fail) {
 890					first_fail_tap = tap;
 891					first_fail = true;
 892				}
 893
 894			} else if (tap_result && start_fail && !start_pass) {
 895				start_pass_tap = tap;
 896				start_pass = true;
 897				if (!first_pass) {
 898					first_pass_tap = tap;
 899					first_pass = true;
 900				}
 901
 902			} else if (!tap_result && start_fail && start_pass &&
 903				   !end_pass) {
 904				end_pass_tap = tap - 1;
 905				end_pass = true;
 906			} else if (tap_result && start_pass && start_fail &&
 907				   end_pass) {
 908				window = end_pass_tap - start_pass_tap;
 909				/* discard merged window and bubble window */
 910				if (window >= thd_up || window < thd_low) {
 911					start_pass_tap = tap;
 912					end_pass = false;
 913				} else {
 914					/* set tap at middle of valid window */
 915					tap = start_pass_tap + window / 2;
 916					tegra_host->tuned_tap_delay = tap;
 917					return;
 918				}
 919			}
 920
 921			bit++;
 922		}
 923	}
 924
 925	if (!first_fail) {
 926		WARN(1, "no edge detected, continue with hw tuned delay.\n");
 927	} else if (first_pass) {
 928		/* set tap location at fixed tap relative to the first edge */
 929		edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
 930		if (edge1 - 1 > fixed_tap)
 931			tegra_host->tuned_tap_delay = edge1 - fixed_tap;
 932		else
 933			tegra_host->tuned_tap_delay = edge1 + fixed_tap;
 934	}
 935}
 936
 937static void tegra_sdhci_post_tuning(struct sdhci_host *host)
 938{
 939	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 940	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 941	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 942	u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
 943	u8 fixed_tap, start_tap, end_tap, window_width;
 944	u8 thdupper, thdlower;
 945	u8 num_iter;
 946	u32 clk_rate_mhz, period_ps, bestcase, worstcase;
 947
 948	/* retain HW tuned tap to use incase if no correction is needed */
 949	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 950	tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
 951				      SDHCI_CLOCK_CTRL_TAP_SHIFT;
 952	if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
 953		min_tap_dly = soc_data->min_tap_delay;
 954		max_tap_dly = soc_data->max_tap_delay;
 955		clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
 956		period_ps = USEC_PER_SEC / clk_rate_mhz;
 957		bestcase = period_ps / min_tap_dly;
 958		worstcase = period_ps / max_tap_dly;
 959		/*
 960		 * Upper and Lower bound thresholds used to detect merged and
 961		 * bubble windows
 962		 */
 963		thdupper = (2 * worstcase + bestcase) / 2;
 964		thdlower = worstcase / 4;
 965		/*
 966		 * fixed tap is used when HW tuning result contains single edge
 967		 * and tap is set at fixed tap delay relative to the first edge
 968		 */
 969		avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
 970		fixed_tap = avg_tap_dly / 2;
 971
 972		val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
 973		start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
 974		end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
 975			  SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
 976		window_width = end_tap - start_tap;
 977		num_iter = host->tuning_loop_count;
 978		/*
 979		 * partial window includes edges of the tuning range.
 980		 * merged window includes more taps so window width is higher
 981		 * than upper threshold.
 982		 */
 983		if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
 984		    (end_tap == num_iter - 2) || window_width >= thdupper) {
 985			pr_debug("%s: Apply tuning correction\n",
 986				 mmc_hostname(host->mmc));
 987			tegra_sdhci_tap_correction(host, thdupper, thdlower,
 988						   fixed_tap);
 989		}
 990	}
 991
 992	tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
 993}
 994
 995static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
 996{
 997	struct sdhci_host *host = mmc_priv(mmc);
 998	int err;
 999
1000	err = sdhci_execute_tuning(mmc, opcode);
1001	if (!err && !host->tuning_err)
1002		tegra_sdhci_post_tuning(host);
1003
1004	return err;
1005}
1006
1007static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
1008					  unsigned timing)
1009{
1010	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1011	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1012	bool set_default_tap = false;
1013	bool set_dqs_trim = false;
1014	bool do_hs400_dll_cal = false;
1015	u8 iter = TRIES_256;
1016	u32 val;
1017
1018	tegra_host->ddr_signaling = false;
1019	switch (timing) {
1020	case MMC_TIMING_UHS_SDR50:
1021		break;
1022	case MMC_TIMING_UHS_SDR104:
1023	case MMC_TIMING_MMC_HS200:
1024		/* Don't set default tap on tunable modes. */
1025		iter = TRIES_128;
1026		break;
1027	case MMC_TIMING_MMC_HS400:
1028		set_dqs_trim = true;
1029		do_hs400_dll_cal = true;
1030		iter = TRIES_128;
1031		break;
1032	case MMC_TIMING_MMC_DDR52:
1033	case MMC_TIMING_UHS_DDR50:
1034		tegra_host->ddr_signaling = true;
1035		set_default_tap = true;
1036		break;
1037	default:
1038		set_default_tap = true;
1039		break;
1040	}
1041
1042	val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1043	val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
1044		 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
1045		 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
1046	val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
1047		0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
1048		1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
1049	sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
1050	sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
1051
1052	host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
1053
1054	sdhci_set_uhs_signaling(host, timing);
1055
1056	tegra_sdhci_pad_autocalib(host);
1057
1058	if (tegra_host->tuned_tap_delay && !set_default_tap)
1059		tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
1060	else
1061		tegra_sdhci_set_tap(host, tegra_host->default_tap);
1062
1063	if (set_dqs_trim)
1064		tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
1065
1066	if (do_hs400_dll_cal)
1067		tegra_sdhci_hs400_dll_cal(host);
1068}
1069
1070static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1071{
1072	unsigned int min, max;
1073
1074	/*
1075	 * Start search for minimum tap value at 10, as smaller values are
1076	 * may wrongly be reported as working but fail at higher speeds,
1077	 * according to the TRM.
1078	 */
1079	min = 10;
1080	while (min < 255) {
1081		tegra_sdhci_set_tap(host, min);
1082		if (!mmc_send_tuning(host->mmc, opcode, NULL))
1083			break;
1084		min++;
1085	}
1086
1087	/* Find the maximum tap value that still passes. */
1088	max = min + 1;
1089	while (max < 255) {
1090		tegra_sdhci_set_tap(host, max);
1091		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1092			max--;
1093			break;
1094		}
1095		max++;
1096	}
1097
1098	/* The TRM states the ideal tap value is at 75% in the passing range. */
1099	tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1100
1101	return mmc_send_tuning(host->mmc, opcode, NULL);
1102}
1103
1104static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1105						   struct mmc_ios *ios)
1106{
1107	struct sdhci_host *host = mmc_priv(mmc);
1108	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1109	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1110	int ret = 0;
1111
1112	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1113		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1114		if (ret < 0)
1115			return ret;
1116		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1117	} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1118		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1119		if (ret < 0)
1120			return ret;
1121		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1122	}
1123
1124	if (tegra_host->pad_calib_required)
1125		tegra_sdhci_pad_autocalib(host);
1126
1127	return ret;
1128}
1129
1130static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1131					 struct sdhci_tegra *tegra_host)
1132{
1133	tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1134	if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1135		dev_dbg(dev, "No pinctrl info, err: %ld\n",
1136			PTR_ERR(tegra_host->pinctrl_sdmmc));
1137		return -1;
1138	}
1139
1140	tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1141				tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1142	if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1143		if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1144			tegra_host->pinctrl_state_1v8_drv = NULL;
1145	}
1146
1147	tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1148				tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1149	if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1150		if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1151			tegra_host->pinctrl_state_3v3_drv = NULL;
1152	}
1153
1154	tegra_host->pinctrl_state_3v3 =
1155		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1156	if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1157		dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1158			 PTR_ERR(tegra_host->pinctrl_state_3v3));
1159		return -1;
1160	}
1161
1162	tegra_host->pinctrl_state_1v8 =
1163		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1164	if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1165		dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1166			 PTR_ERR(tegra_host->pinctrl_state_1v8));
1167		return -1;
1168	}
1169
1170	tegra_host->pad_control_available = true;
1171
1172	return 0;
1173}
1174
1175static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1176{
1177	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1178	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1179	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1180
1181	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1182		tegra_host->pad_calib_required = true;
1183}
1184
1185static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1186{
1187	struct mmc_host *mmc = cq_host->mmc;
1188	struct sdhci_host *host = mmc_priv(mmc);
1189	u8 ctrl;
1190	ktime_t timeout;
1191	bool timed_out;
1192
1193	/*
1194	 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1195	 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1196	 * to be re-configured.
1197	 * Tegra CQHCI/SDHCI prevents write access to block size register when
1198	 * CQE is unhalted. So handling CQE resume sequence here to configure
1199	 * SDHCI block registers prior to exiting CQE halt state.
1200	 */
1201	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1202	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
1203		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1204		sdhci_cqe_enable(mmc);
1205		writel(val, cq_host->mmio + reg);
1206		timeout = ktime_add_us(ktime_get(), 50);
1207		while (1) {
1208			timed_out = ktime_compare(ktime_get(), timeout) > 0;
1209			ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1210			if (!(ctrl & CQHCI_HALT) || timed_out)
1211				break;
1212		}
1213		/*
1214		 * CQE usually resumes very quick, but incase if Tegra CQE
1215		 * doesn't resume retry unhalt.
1216		 */
1217		if (timed_out)
1218			writel(val, cq_host->mmio + reg);
1219	} else {
1220		writel(val, cq_host->mmio + reg);
1221	}
1222}
1223
1224static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1225					 struct mmc_request *mrq, u64 *data)
1226{
1227	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1228	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1229	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1230
1231	if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1232	    mrq->cmd->flags & MMC_RSP_R1B)
1233		*data |= CQHCI_CMD_TIMING(1);
1234}
1235
1236static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1237{
1238	struct cqhci_host *cq_host = mmc->cqe_private;
1239	struct sdhci_host *host = mmc_priv(mmc);
1240	u32 val;
1241
1242	/*
1243	 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1244	 * register when CQE is enabled and unhalted.
1245	 * CQHCI driver enables CQE prior to activation, so disable CQE before
1246	 * programming block size in sdhci controller and enable it back.
1247	 */
1248	if (!cq_host->activated) {
1249		val = cqhci_readl(cq_host, CQHCI_CFG);
1250		if (val & CQHCI_ENABLE)
1251			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1252				     CQHCI_CFG);
1253		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1254		sdhci_cqe_enable(mmc);
1255		if (val & CQHCI_ENABLE)
1256			cqhci_writel(cq_host, val, CQHCI_CFG);
1257	}
1258
1259	/*
1260	 * CMD CRC errors are seen sometimes with some eMMC devices when status
1261	 * command is sent during transfer of last data block which is the
1262	 * default case as send status command block counter (CBC) is 1.
1263	 * Recommended fix to set CBC to 0 allowing send status command only
1264	 * when data lines are idle.
1265	 */
1266	val = cqhci_readl(cq_host, CQHCI_SSC1);
1267	val &= ~CQHCI_SSC1_CBC_MASK;
1268	cqhci_writel(cq_host, val, CQHCI_SSC1);
1269}
1270
1271static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1272{
1273	sdhci_dumpregs(mmc_priv(mmc));
1274}
1275
1276static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1277{
1278	int cmd_error = 0;
1279	int data_error = 0;
1280
1281	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1282		return intmask;
1283
1284	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1285
1286	return 0;
1287}
1288
1289static void tegra_sdhci_set_timeout(struct sdhci_host *host,
1290				    struct mmc_command *cmd)
1291{
1292	u32 val;
1293
1294	/*
1295	 * HW busy detection timeout is based on programmed data timeout
1296	 * counter and maximum supported timeout is 11s which may not be
1297	 * enough for long operations like cache flush, sleep awake, erase.
1298	 *
1299	 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
1300	 * host controller to wait for busy state until the card is busy
1301	 * without HW timeout.
1302	 *
1303	 * So, use infinite busy wait mode for operations that may take
1304	 * more than maximum HW busy timeout of 11s otherwise use finite
1305	 * busy wait mode.
1306	 */
1307	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1308	if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC)
1309		val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1310	else
1311		val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1312	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1313
1314	__sdhci_set_timeout(host, cmd);
1315}
1316
1317static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
1318{
1319	struct cqhci_host *cq_host = mmc->cqe_private;
1320	u32 reg;
1321
1322	reg = cqhci_readl(cq_host, CQHCI_CFG);
1323	reg |= CQHCI_ENABLE;
1324	cqhci_writel(cq_host, reg, CQHCI_CFG);
1325}
1326
1327static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
1328{
1329	struct cqhci_host *cq_host = mmc->cqe_private;
1330	struct sdhci_host *host = mmc_priv(mmc);
1331	u32 reg;
1332
1333	reg = cqhci_readl(cq_host, CQHCI_CFG);
1334	reg &= ~CQHCI_ENABLE;
1335	cqhci_writel(cq_host, reg, CQHCI_CFG);
1336	sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1337}
1338
1339static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1340	.write_l    = tegra_cqhci_writel,
1341	.enable	= sdhci_tegra_cqe_enable,
1342	.disable = sdhci_cqe_disable,
1343	.dumpregs = sdhci_tegra_dumpregs,
1344	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1345	.pre_enable = sdhci_tegra_cqe_pre_enable,
1346	.post_disable = sdhci_tegra_cqe_post_disable,
1347};
1348
1349static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1350{
1351	struct sdhci_pltfm_host *platform = sdhci_priv(host);
1352	struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1353	const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1354	struct device *dev = mmc_dev(host->mmc);
1355
1356	if (soc->dma_mask)
1357		return dma_set_mask_and_coherent(dev, soc->dma_mask);
1358
1359	return 0;
1360}
1361
1362static const struct sdhci_ops tegra_sdhci_ops = {
1363	.get_ro     = tegra_sdhci_get_ro,
1364	.read_w     = tegra_sdhci_readw,
1365	.write_l    = tegra_sdhci_writel,
1366	.set_clock  = tegra_sdhci_set_clock,
1367	.set_dma_mask = tegra_sdhci_set_dma_mask,
1368	.set_bus_width = sdhci_set_bus_width,
1369	.reset      = tegra_sdhci_reset,
1370	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1371	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1372	.voltage_switch = tegra_sdhci_voltage_switch,
1373	.get_max_clock = tegra_sdhci_get_max_clock,
1374};
1375
1376static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1377	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1378		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1379		  SDHCI_QUIRK_NO_HISPD_BIT |
1380		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1381		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1382	.ops  = &tegra_sdhci_ops,
1383};
1384
1385static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1386	.pdata = &sdhci_tegra20_pdata,
1387	.dma_mask = DMA_BIT_MASK(32),
1388	.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
1389		    NVQUIRK_HAS_ANDROID_GPT_SECTOR |
1390		    NVQUIRK_ENABLE_BLOCK_GAP_DET,
1391};
1392
1393static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1394	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1395		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1396		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1397		  SDHCI_QUIRK_NO_HISPD_BIT |
1398		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1399		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1400	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1401		   SDHCI_QUIRK2_BROKEN_HS200 |
1402		   /*
1403		    * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1404		    * though no command operation was in progress."
1405		    *
1406		    * The exact reason is unknown, as the same hardware seems
1407		    * to support Auto CMD23 on a downstream 3.1 kernel.
1408		    */
1409		   SDHCI_QUIRK2_ACMD23_BROKEN,
1410	.ops  = &tegra_sdhci_ops,
1411};
1412
1413static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1414	.pdata = &sdhci_tegra30_pdata,
1415	.dma_mask = DMA_BIT_MASK(32),
1416	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1417		    NVQUIRK_ENABLE_SDR50 |
1418		    NVQUIRK_ENABLE_SDR104 |
1419		    NVQUIRK_HAS_ANDROID_GPT_SECTOR |
1420		    NVQUIRK_HAS_PADCALIB,
1421};
1422
1423static const struct sdhci_ops tegra114_sdhci_ops = {
1424	.get_ro     = tegra_sdhci_get_ro,
1425	.read_w     = tegra_sdhci_readw,
1426	.write_w    = tegra_sdhci_writew,
1427	.write_l    = tegra_sdhci_writel,
1428	.set_clock  = tegra_sdhci_set_clock,
1429	.set_dma_mask = tegra_sdhci_set_dma_mask,
1430	.set_bus_width = sdhci_set_bus_width,
1431	.reset      = tegra_sdhci_reset,
1432	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1433	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1434	.voltage_switch = tegra_sdhci_voltage_switch,
1435	.get_max_clock = tegra_sdhci_get_max_clock,
1436};
1437
1438static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1439	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1440		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1441		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1442		  SDHCI_QUIRK_NO_HISPD_BIT |
1443		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1444		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1445	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1446	.ops  = &tegra114_sdhci_ops,
1447};
1448
1449static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1450	.pdata = &sdhci_tegra114_pdata,
1451	.dma_mask = DMA_BIT_MASK(32),
1452	.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
1453};
1454
1455static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1456	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1457		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1458		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1459		  SDHCI_QUIRK_NO_HISPD_BIT |
1460		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1461		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1462	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1463	.ops  = &tegra114_sdhci_ops,
1464};
1465
1466static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1467	.pdata = &sdhci_tegra124_pdata,
1468	.dma_mask = DMA_BIT_MASK(34),
1469	.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
1470};
1471
1472static const struct sdhci_ops tegra210_sdhci_ops = {
1473	.get_ro     = tegra_sdhci_get_ro,
1474	.read_w     = tegra_sdhci_readw,
1475	.write_w    = tegra210_sdhci_writew,
1476	.write_l    = tegra_sdhci_writel,
1477	.set_clock  = tegra_sdhci_set_clock,
1478	.set_dma_mask = tegra_sdhci_set_dma_mask,
1479	.set_bus_width = sdhci_set_bus_width,
1480	.reset      = tegra_sdhci_reset,
1481	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1482	.voltage_switch = tegra_sdhci_voltage_switch,
1483	.get_max_clock = tegra_sdhci_get_max_clock,
1484	.set_timeout = tegra_sdhci_set_timeout,
1485};
1486
1487static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1488	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1489		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1490		  SDHCI_QUIRK_NO_HISPD_BIT |
1491		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1492		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1493	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1494	.ops  = &tegra210_sdhci_ops,
1495};
1496
1497static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1498	.pdata = &sdhci_tegra210_pdata,
1499	.dma_mask = DMA_BIT_MASK(34),
1500	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1501		    NVQUIRK_HAS_PADCALIB |
1502		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1503		    NVQUIRK_ENABLE_SDR50 |
1504		    NVQUIRK_ENABLE_SDR104 |
1505		    NVQUIRK_HAS_TMCLK,
1506	.min_tap_delay = 106,
1507	.max_tap_delay = 185,
1508};
1509
1510static const struct sdhci_ops tegra186_sdhci_ops = {
1511	.get_ro     = tegra_sdhci_get_ro,
1512	.read_w     = tegra_sdhci_readw,
1513	.write_l    = tegra_sdhci_writel,
1514	.set_clock  = tegra_sdhci_set_clock,
1515	.set_dma_mask = tegra_sdhci_set_dma_mask,
1516	.set_bus_width = sdhci_set_bus_width,
1517	.reset      = tegra_sdhci_reset,
1518	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1519	.voltage_switch = tegra_sdhci_voltage_switch,
1520	.get_max_clock = tegra_sdhci_get_max_clock,
1521	.irq = sdhci_tegra_cqhci_irq,
1522	.set_timeout = tegra_sdhci_set_timeout,
1523};
1524
1525static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1526	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1527		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1528		  SDHCI_QUIRK_NO_HISPD_BIT |
1529		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1530		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1531	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1532		   SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER,
1533	.ops  = &tegra186_sdhci_ops,
1534};
1535
1536static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1537	.pdata = &sdhci_tegra186_pdata,
1538	.dma_mask = DMA_BIT_MASK(40),
1539	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1540		    NVQUIRK_HAS_PADCALIB |
1541		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1542		    NVQUIRK_ENABLE_SDR50 |
1543		    NVQUIRK_ENABLE_SDR104 |
1544		    NVQUIRK_HAS_TMCLK |
1545		    NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1546	.min_tap_delay = 84,
1547	.max_tap_delay = 136,
1548};
1549
1550static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1551	.pdata = &sdhci_tegra186_pdata,
1552	.dma_mask = DMA_BIT_MASK(39),
1553	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1554		    NVQUIRK_HAS_PADCALIB |
1555		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1556		    NVQUIRK_ENABLE_SDR50 |
1557		    NVQUIRK_ENABLE_SDR104 |
1558		    NVQUIRK_HAS_TMCLK,
1559	.min_tap_delay = 96,
1560	.max_tap_delay = 139,
1561};
1562
1563static const struct sdhci_tegra_soc_data soc_data_tegra234 = {
1564	.pdata = &sdhci_tegra186_pdata,
1565	.dma_mask = DMA_BIT_MASK(39),
1566	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1567		    NVQUIRK_HAS_PADCALIB |
1568		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1569		    NVQUIRK_ENABLE_SDR50 |
1570		    NVQUIRK_ENABLE_SDR104 |
1571		    NVQUIRK_PROGRAM_STREAMID |
1572		    NVQUIRK_HAS_TMCLK,
1573	.min_tap_delay = 95,
1574	.max_tap_delay = 111,
1575};
1576
1577static const struct of_device_id sdhci_tegra_dt_match[] = {
1578	{ .compatible = "nvidia,tegra234-sdhci", .data = &soc_data_tegra234 },
1579	{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1580	{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1581	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1582	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1583	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1584	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1585	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1586	{}
1587};
1588MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1589
1590static int sdhci_tegra_add_host(struct sdhci_host *host)
1591{
1592	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1593	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1594	struct cqhci_host *cq_host;
1595	bool dma64;
1596	int ret;
1597
1598	if (!tegra_host->enable_hwcq)
1599		return sdhci_add_host(host);
1600
1601	sdhci_enable_v4_mode(host);
1602
1603	ret = sdhci_setup_host(host);
1604	if (ret)
1605		return ret;
1606
1607	host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1608
1609	cq_host = devm_kzalloc(mmc_dev(host->mmc),
1610				sizeof(*cq_host), GFP_KERNEL);
1611	if (!cq_host) {
1612		ret = -ENOMEM;
1613		goto cleanup;
1614	}
1615
1616	cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1617	cq_host->ops = &sdhci_tegra_cqhci_ops;
1618
1619	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1620	if (dma64)
1621		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1622
1623	ret = cqhci_init(cq_host, host->mmc, dma64);
1624	if (ret)
1625		goto cleanup;
1626
1627	ret = __sdhci_add_host(host);
1628	if (ret)
1629		goto cleanup;
1630
1631	return 0;
1632
1633cleanup:
1634	sdhci_cleanup_host(host);
1635	return ret;
1636}
1637
1638/* Program MC streamID for DMA transfers */
1639static void sdhci_tegra_program_stream_id(struct sdhci_host *host)
1640{
1641	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1642	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1643
1644	if (tegra_host->soc_data->nvquirks & NVQUIRK_PROGRAM_STREAMID) {
1645		tegra_sdhci_writel(host, FIELD_PREP(GENMASK(15, 8), tegra_host->stream_id) |
1646					 FIELD_PREP(GENMASK(7, 0), tegra_host->stream_id),
1647					 SDHCI_TEGRA_CIF2AXI_CTRL_0);
1648	}
1649}
1650
1651static int sdhci_tegra_probe(struct platform_device *pdev)
1652{
 
1653	const struct sdhci_tegra_soc_data *soc_data;
1654	struct sdhci_host *host;
1655	struct sdhci_pltfm_host *pltfm_host;
1656	struct sdhci_tegra *tegra_host;
1657	struct clk *clk;
1658	int rc;
1659
1660	soc_data = of_device_get_match_data(&pdev->dev);
1661	if (!soc_data)
1662		return -EINVAL;
 
1663
1664	host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1665	if (IS_ERR(host))
1666		return PTR_ERR(host);
1667	pltfm_host = sdhci_priv(host);
1668
1669	tegra_host = sdhci_pltfm_priv(pltfm_host);
1670	tegra_host->ddr_signaling = false;
1671	tegra_host->pad_calib_required = false;
1672	tegra_host->pad_control_available = false;
1673	tegra_host->soc_data = soc_data;
1674
1675	if (soc_data->nvquirks & NVQUIRK_HAS_ANDROID_GPT_SECTOR)
1676		host->mmc->caps2 |= MMC_CAP2_ALT_GPT_TEGRA;
1677
1678	if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1679		rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1680		if (rc == 0)
1681			host->mmc_host_ops.start_signal_voltage_switch =
1682				sdhci_tegra_start_signal_voltage_switch;
1683	}
1684
1685	/* Hook to periodically rerun pad calibration */
1686	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1687		host->mmc_host_ops.request = tegra_sdhci_request;
1688
1689	host->mmc_host_ops.hs400_enhanced_strobe =
1690			tegra_sdhci_hs400_enhanced_strobe;
1691
1692	if (!host->ops->platform_execute_tuning)
1693		host->mmc_host_ops.execute_tuning =
1694				tegra_sdhci_execute_hw_tuning;
1695
1696	rc = mmc_of_parse(host->mmc);
1697	if (rc)
1698		goto err_parse_dt;
1699
1700	if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1701		host->mmc->caps |= MMC_CAP_1_8V_DDR;
1702
1703	/* HW busy detection is supported, but R1B responses are required. */
1704	host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
1705
1706	/* GPIO CD can be set as a wakeup source */
1707	host->mmc->caps |= MMC_CAP_CD_WAKE;
1708
1709	tegra_sdhci_parse_dt(host);
1710
1711	if (tegra_host->soc_data->nvquirks & NVQUIRK_PROGRAM_STREAMID &&
1712	    !tegra_dev_iommu_get_stream_id(&pdev->dev, &tegra_host->stream_id)) {
1713		dev_warn(mmc_dev(host->mmc), "missing IOMMU stream ID\n");
1714		tegra_host->stream_id = 0x7f;
1715	}
1716
1717	tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1718							 GPIOD_OUT_HIGH);
1719	if (IS_ERR(tegra_host->power_gpio)) {
1720		rc = PTR_ERR(tegra_host->power_gpio);
1721		goto err_power_req;
1722	}
1723
1724	/*
1725	 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
1726	 * timeout clock and SW can choose TMCLK or SDCLK for hardware
1727	 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
1728	 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
1729	 *
1730	 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
1731	 * 12Mhz TMCLK which is advertised in host capability register.
1732	 * With TMCLK of 12Mhz provides maximum data timeout period that can
1733	 * be achieved is 11s better than using SDCLK for data timeout.
1734	 *
1735	 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
1736	 * supporting separate TMCLK.
1737	 */
1738
1739	if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
1740		clk = devm_clk_get(&pdev->dev, "tmclk");
1741		if (IS_ERR(clk)) {
1742			rc = PTR_ERR(clk);
1743			if (rc == -EPROBE_DEFER)
1744				goto err_power_req;
1745
1746			dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
1747			clk = NULL;
1748		}
1749
1750		clk_set_rate(clk, 12000000);
1751		rc = clk_prepare_enable(clk);
1752		if (rc) {
1753			dev_err(&pdev->dev,
1754				"failed to enable tmclk: %d\n", rc);
1755			goto err_power_req;
1756		}
1757
1758		tegra_host->tmclk = clk;
1759	}
1760
1761	clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1762	if (IS_ERR(clk)) {
1763		rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
1764				   "failed to get clock\n");
1765		goto err_clk_get;
1766	}
 
1767	pltfm_host->clk = clk;
1768
1769	tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1770							   "sdhci");
1771	if (IS_ERR(tegra_host->rst)) {
1772		rc = PTR_ERR(tegra_host->rst);
1773		dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1774		goto err_rst_get;
1775	}
1776
1777	rc = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
1778	if (rc)
1779		goto err_rst_get;
1780
1781	pm_runtime_enable(&pdev->dev);
1782	rc = pm_runtime_resume_and_get(&pdev->dev);
1783	if (rc)
1784		goto err_pm_get;
1785
1786	rc = reset_control_assert(tegra_host->rst);
1787	if (rc)
1788		goto err_rst_assert;
1789
1790	usleep_range(2000, 4000);
1791
1792	rc = reset_control_deassert(tegra_host->rst);
1793	if (rc)
1794		goto err_rst_assert;
1795
1796	usleep_range(2000, 4000);
1797
1798	rc = sdhci_tegra_add_host(host);
1799	if (rc)
1800		goto err_add_host;
1801
1802	sdhci_tegra_program_stream_id(host);
1803
1804	return 0;
1805
1806err_add_host:
1807	reset_control_assert(tegra_host->rst);
1808err_rst_assert:
1809	pm_runtime_put_sync_suspend(&pdev->dev);
1810err_pm_get:
1811	pm_runtime_disable(&pdev->dev);
1812err_rst_get:
 
1813err_clk_get:
1814	clk_disable_unprepare(tegra_host->tmclk);
1815err_power_req:
1816err_parse_dt:
1817	sdhci_pltfm_free(pdev);
1818	return rc;
1819}
1820
1821static int sdhci_tegra_remove(struct platform_device *pdev)
1822{
1823	struct sdhci_host *host = platform_get_drvdata(pdev);
1824	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1825	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1826
1827	sdhci_remove_host(host, 0);
1828
1829	reset_control_assert(tegra_host->rst);
1830	usleep_range(2000, 4000);
1831
1832	pm_runtime_put_sync_suspend(&pdev->dev);
1833	pm_runtime_force_suspend(&pdev->dev);
1834
1835	clk_disable_unprepare(tegra_host->tmclk);
1836	sdhci_pltfm_free(pdev);
1837
1838	return 0;
1839}
1840
1841static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev)
1842{
1843	struct sdhci_host *host = dev_get_drvdata(dev);
1844	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1845
1846	clk_disable_unprepare(pltfm_host->clk);
1847
1848	return 0;
1849}
1850
1851static int __maybe_unused sdhci_tegra_runtime_resume(struct device *dev)
1852{
1853	struct sdhci_host *host = dev_get_drvdata(dev);
1854	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1855
1856	return clk_prepare_enable(pltfm_host->clk);
1857}
1858
1859#ifdef CONFIG_PM_SLEEP
1860static int sdhci_tegra_suspend(struct device *dev)
1861{
1862	struct sdhci_host *host = dev_get_drvdata(dev);
 
1863	int ret;
1864
1865	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1866		ret = cqhci_suspend(host->mmc);
1867		if (ret)
1868			return ret;
1869	}
1870
1871	ret = sdhci_suspend_host(host);
1872	if (ret) {
1873		cqhci_resume(host->mmc);
1874		return ret;
1875	}
1876
1877	ret = pm_runtime_force_suspend(dev);
1878	if (ret) {
1879		sdhci_resume_host(host);
1880		cqhci_resume(host->mmc);
1881		return ret;
1882	}
1883
1884	return mmc_gpio_set_cd_wake(host->mmc, true);
1885}
1886
1887static int sdhci_tegra_resume(struct device *dev)
1888{
1889	struct sdhci_host *host = dev_get_drvdata(dev);
 
1890	int ret;
1891
1892	ret = mmc_gpio_set_cd_wake(host->mmc, false);
1893	if (ret)
1894		return ret;
1895
1896	ret = pm_runtime_force_resume(dev);
1897	if (ret)
1898		return ret;
1899
1900	sdhci_tegra_program_stream_id(host);
1901
1902	ret = sdhci_resume_host(host);
1903	if (ret)
1904		goto disable_clk;
1905
1906	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1907		ret = cqhci_resume(host->mmc);
1908		if (ret)
1909			goto suspend_host;
1910	}
1911
1912	return 0;
1913
1914suspend_host:
1915	sdhci_suspend_host(host);
1916disable_clk:
1917	pm_runtime_force_suspend(dev);
1918	return ret;
1919}
1920#endif
1921
1922static const struct dev_pm_ops sdhci_tegra_dev_pm_ops = {
1923	SET_RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume,
1924			   NULL)
1925	SET_SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume)
1926};
1927
1928static struct platform_driver sdhci_tegra_driver = {
1929	.driver		= {
1930		.name	= "sdhci-tegra",
1931		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1932		.of_match_table = sdhci_tegra_dt_match,
1933		.pm	= &sdhci_tegra_dev_pm_ops,
1934	},
1935	.probe		= sdhci_tegra_probe,
1936	.remove		= sdhci_tegra_remove,
1937};
1938
1939module_platform_driver(sdhci_tegra_driver);
1940
1941MODULE_DESCRIPTION("SDHCI driver for Tegra");
1942MODULE_AUTHOR("Google, Inc.");
1943MODULE_LICENSE("GPL v2");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2010 Google, Inc.
   4 */
   5
 
 
   6#include <linux/delay.h>
   7#include <linux/dma-mapping.h>
   8#include <linux/err.h>
   9#include <linux/module.h>
  10#include <linux/init.h>
 
 
  11#include <linux/iopoll.h>
  12#include <linux/platform_device.h>
  13#include <linux/clk.h>
  14#include <linux/io.h>
 
 
 
 
  15#include <linux/of.h>
  16#include <linux/of_device.h>
  17#include <linux/pinctrl/consumer.h>
 
 
 
  18#include <linux/regulator/consumer.h>
  19#include <linux/reset.h>
  20#include <linux/mmc/card.h>
  21#include <linux/mmc/host.h>
  22#include <linux/mmc/mmc.h>
  23#include <linux/mmc/slot-gpio.h>
  24#include <linux/gpio/consumer.h>
  25#include <linux/ktime.h>
  26
 
 
 
  27#include "sdhci-pltfm.h"
  28#include "cqhci.h"
  29
  30/* Tegra SDHOST controller vendor register definitions */
  31#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL			0x100
  32#define SDHCI_CLOCK_CTRL_TAP_MASK			0x00ff0000
  33#define SDHCI_CLOCK_CTRL_TAP_SHIFT			16
  34#define SDHCI_CLOCK_CTRL_TRIM_MASK			0x1f000000
  35#define SDHCI_CLOCK_CTRL_TRIM_SHIFT			24
  36#define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE		BIT(5)
  37#define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE		BIT(3)
  38#define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE	BIT(2)
  39
  40#define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL			0x104
  41#define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE		BIT(31)
  42
  43#define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES		0x10c
  44#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK		0x00003f00
  45#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT	8
  46
  47#define SDHCI_TEGRA_VENDOR_MISC_CTRL			0x120
  48#define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT		BIT(0)
  49#define SDHCI_MISC_CTRL_ENABLE_SDR104			0x8
  50#define SDHCI_MISC_CTRL_ENABLE_SDR50			0x10
  51#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300		0x20
  52#define SDHCI_MISC_CTRL_ENABLE_DDR50			0x200
  53
  54#define SDHCI_TEGRA_VENDOR_DLLCAL_CFG			0x1b0
  55#define SDHCI_TEGRA_DLLCAL_CALIBRATE			BIT(31)
  56
  57#define SDHCI_TEGRA_VENDOR_DLLCAL_STA			0x1bc
  58#define SDHCI_TEGRA_DLLCAL_STA_ACTIVE			BIT(31)
  59
  60#define SDHCI_VNDR_TUN_CTRL0_0				0x1c0
  61#define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP			0x20000
  62#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK		0x03fc0000
  63#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT	18
  64#define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK			0x00001fc0
  65#define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT		6
  66#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK		0x000e000
  67#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT		13
  68#define TRIES_128					2
  69#define TRIES_256					4
  70#define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK		0x7
  71
  72#define SDHCI_TEGRA_VNDR_TUN_CTRL1_0			0x1c4
  73#define SDHCI_TEGRA_VNDR_TUN_STATUS0			0x1C8
  74#define SDHCI_TEGRA_VNDR_TUN_STATUS1			0x1CC
  75#define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK		0xFF
  76#define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT	0x8
  77#define TUNING_WORD_BIT_SIZE				32
  78
  79#define SDHCI_TEGRA_AUTO_CAL_CONFIG			0x1e4
  80#define SDHCI_AUTO_CAL_START				BIT(31)
  81#define SDHCI_AUTO_CAL_ENABLE				BIT(29)
  82#define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK			0x0000ffff
  83
  84#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL			0x1e0
  85#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK	0x0000000f
  86#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL	0x7
  87#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD	BIT(31)
  88#define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK		0x07FFF000
  89
  90#define SDHCI_TEGRA_AUTO_CAL_STATUS			0x1ec
  91#define SDHCI_TEGRA_AUTO_CAL_ACTIVE			BIT(31)
  92
 
 
  93#define NVQUIRK_FORCE_SDHCI_SPEC_200			BIT(0)
  94#define NVQUIRK_ENABLE_BLOCK_GAP_DET			BIT(1)
  95#define NVQUIRK_ENABLE_SDHCI_SPEC_300			BIT(2)
  96#define NVQUIRK_ENABLE_SDR50				BIT(3)
  97#define NVQUIRK_ENABLE_SDR104				BIT(4)
  98#define NVQUIRK_ENABLE_DDR50				BIT(5)
  99/*
 100 * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
 101 * drive strength.
 102 */
 103#define NVQUIRK_HAS_PADCALIB				BIT(6)
 104/*
 105 * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
 106 * 3V3/1V8 pad selection happens through pinctrl state selection depending
 107 * on the signaling mode.
 108 */
 109#define NVQUIRK_NEEDS_PAD_CONTROL			BIT(7)
 110#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP			BIT(8)
 111#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING		BIT(9)
 112
 113/*
 114 * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
 115 * SDMMC hardware data timeout.
 116 */
 117#define NVQUIRK_HAS_TMCLK				BIT(10)
 118
 
 
 
 119/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
 120#define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
 121
 122#define SDHCI_TEGRA_CQE_TRNS_MODE	(SDHCI_TRNS_MULTI | \
 123					 SDHCI_TRNS_BLK_CNT_EN | \
 124					 SDHCI_TRNS_DMA)
 125
 126struct sdhci_tegra_soc_data {
 127	const struct sdhci_pltfm_data *pdata;
 128	u64 dma_mask;
 129	u32 nvquirks;
 130	u8 min_tap_delay;
 131	u8 max_tap_delay;
 132};
 133
 134/* Magic pull up and pull down pad calibration offsets */
 135struct sdhci_tegra_autocal_offsets {
 136	u32 pull_up_3v3;
 137	u32 pull_down_3v3;
 138	u32 pull_up_3v3_timeout;
 139	u32 pull_down_3v3_timeout;
 140	u32 pull_up_1v8;
 141	u32 pull_down_1v8;
 142	u32 pull_up_1v8_timeout;
 143	u32 pull_down_1v8_timeout;
 144	u32 pull_up_sdr104;
 145	u32 pull_down_sdr104;
 146	u32 pull_up_hs400;
 147	u32 pull_down_hs400;
 148};
 149
 150struct sdhci_tegra {
 151	const struct sdhci_tegra_soc_data *soc_data;
 152	struct gpio_desc *power_gpio;
 153	struct clk *tmclk;
 154	bool ddr_signaling;
 155	bool pad_calib_required;
 156	bool pad_control_available;
 157
 158	struct reset_control *rst;
 159	struct pinctrl *pinctrl_sdmmc;
 160	struct pinctrl_state *pinctrl_state_3v3;
 161	struct pinctrl_state *pinctrl_state_1v8;
 162	struct pinctrl_state *pinctrl_state_3v3_drv;
 163	struct pinctrl_state *pinctrl_state_1v8_drv;
 164
 165	struct sdhci_tegra_autocal_offsets autocal_offsets;
 166	ktime_t last_calib;
 167
 168	u32 default_tap;
 169	u32 default_trim;
 170	u32 dqs_trim;
 171	bool enable_hwcq;
 172	unsigned long curr_clk_rate;
 173	u8 tuned_tap_delay;
 
 174};
 175
 176static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
 177{
 178	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 179	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 180	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 181
 182	if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
 183			(reg == SDHCI_HOST_VERSION))) {
 184		/* Erratum: Version register is invalid in HW. */
 185		return SDHCI_SPEC_200;
 186	}
 187
 188	return readw(host->ioaddr + reg);
 189}
 190
 191static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
 192{
 193	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 194
 195	switch (reg) {
 196	case SDHCI_TRANSFER_MODE:
 197		/*
 198		 * Postpone this write, we must do it together with a
 199		 * command write that is down below.
 200		 */
 201		pltfm_host->xfer_mode_shadow = val;
 202		return;
 203	case SDHCI_COMMAND:
 204		writel((val << 16) | pltfm_host->xfer_mode_shadow,
 205			host->ioaddr + SDHCI_TRANSFER_MODE);
 206		return;
 207	}
 208
 209	writew(val, host->ioaddr + reg);
 210}
 211
 212static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
 213{
 214	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 215	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 216	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 217
 218	/* Seems like we're getting spurious timeout and crc errors, so
 219	 * disable signalling of them. In case of real errors software
 220	 * timers should take care of eventually detecting them.
 221	 */
 222	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
 223		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
 224
 225	writel(val, host->ioaddr + reg);
 226
 227	if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
 228			(reg == SDHCI_INT_ENABLE))) {
 229		/* Erratum: Must enable block gap interrupt detection */
 230		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
 231		if (val & SDHCI_INT_CARD_INT)
 232			gap_ctrl |= 0x8;
 233		else
 234			gap_ctrl &= ~0x8;
 235		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
 236	}
 237}
 238
 239static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
 240{
 241	bool status;
 242	u32 reg;
 243
 244	reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
 245	status = !!(reg & SDHCI_CLOCK_CARD_EN);
 246
 247	if (status == enable)
 248		return status;
 249
 250	if (enable)
 251		reg |= SDHCI_CLOCK_CARD_EN;
 252	else
 253		reg &= ~SDHCI_CLOCK_CARD_EN;
 254
 255	sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
 256
 257	return status;
 258}
 259
 260static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
 261{
 262	bool is_tuning_cmd = 0;
 263	bool clk_enabled;
 264	u8 cmd;
 265
 266	if (reg == SDHCI_COMMAND) {
 267		cmd = SDHCI_GET_CMD(val);
 268		is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
 269				cmd == MMC_SEND_TUNING_BLOCK_HS200;
 270	}
 271
 272	if (is_tuning_cmd)
 273		clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
 274
 275	writew(val, host->ioaddr + reg);
 276
 277	if (is_tuning_cmd) {
 278		udelay(1);
 279		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 280		tegra_sdhci_configure_card_clk(host, clk_enabled);
 281	}
 282}
 283
 284static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
 285{
 286	/*
 287	 * Write-enable shall be assumed if GPIO is missing in a board's
 288	 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
 289	 * Tegra.
 290	 */
 291	return mmc_gpio_get_ro(host->mmc);
 292}
 293
 294static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
 295{
 296	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 297	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 298	int has_1v8, has_3v3;
 299
 300	/*
 301	 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
 302	 * voltage configuration in order to perform voltage switching. This
 303	 * means that valid pinctrl info is required on SDHCI instances capable
 304	 * of performing voltage switching. Whether or not an SDHCI instance is
 305	 * capable of voltage switching is determined based on the regulator.
 306	 */
 307
 308	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
 309		return true;
 310
 311	if (IS_ERR(host->mmc->supply.vqmmc))
 312		return false;
 313
 314	has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
 315						 1700000, 1950000);
 316
 317	has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
 318						 2700000, 3600000);
 319
 320	if (has_1v8 == 1 && has_3v3 == 1)
 321		return tegra_host->pad_control_available;
 322
 323	/* Fixed voltage, no pad control required. */
 324	return true;
 325}
 326
 327static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
 328{
 329	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 330	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 331	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 332	bool card_clk_enabled = false;
 333	u32 reg;
 334
 335	/*
 336	 * Touching the tap values is a bit tricky on some SoC generations.
 337	 * The quirk enables a workaround for a glitch that sometimes occurs if
 338	 * the tap values are changed.
 339	 */
 340
 341	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
 342		card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
 343
 344	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 345	reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
 346	reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
 347	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 348
 349	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
 350	    card_clk_enabled) {
 351		udelay(1);
 352		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 353		tegra_sdhci_configure_card_clk(host, card_clk_enabled);
 354	}
 355}
 356
 357static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
 358					      struct mmc_ios *ios)
 359{
 360	struct sdhci_host *host = mmc_priv(mmc);
 361	u32 val;
 362
 363	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
 364
 365	if (ios->enhanced_strobe)
 366		val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
 367	else
 368		val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
 369
 370	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
 371
 372}
 373
 374static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
 375{
 376	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 377	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 378	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 379	u32 misc_ctrl, clk_ctrl, pad_ctrl;
 380
 381	sdhci_reset(host, mask);
 382
 383	if (!(mask & SDHCI_RESET_ALL))
 384		return;
 385
 386	tegra_sdhci_set_tap(host, tegra_host->default_tap);
 387
 388	misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
 389	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 390
 391	misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
 392		       SDHCI_MISC_CTRL_ENABLE_SDR50 |
 393		       SDHCI_MISC_CTRL_ENABLE_DDR50 |
 394		       SDHCI_MISC_CTRL_ENABLE_SDR104);
 395
 396	clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
 397		      SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
 398
 399	if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
 400		/* Erratum: Enable SDHCI spec v3.00 support */
 401		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
 402			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
 403		/* Advertise UHS modes as supported by host */
 404		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
 405			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
 406		if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
 407			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
 408		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
 409			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
 410		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
 411			clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
 412	}
 413
 414	clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
 415
 416	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
 417	sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 418
 419	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
 420		pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 421		pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
 422		pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
 423		sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 424
 425		tegra_host->pad_calib_required = true;
 426	}
 427
 428	tegra_host->ddr_signaling = false;
 429}
 430
 431static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
 432{
 433	u32 val;
 434
 435	/*
 436	 * Enable or disable the additional I/O pad used by the drive strength
 437	 * calibration process.
 438	 */
 439	val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 440
 441	if (enable)
 442		val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
 443	else
 444		val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
 445
 446	sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 447
 448	if (enable)
 449		usleep_range(1, 2);
 450}
 451
 452static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
 453					       u16 pdpu)
 454{
 455	u32 reg;
 456
 457	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 458	reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
 459	reg |= pdpu;
 460	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 461}
 462
 463static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
 464				   bool state_drvupdn)
 465{
 466	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 467	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 468	struct sdhci_tegra_autocal_offsets *offsets =
 469						&tegra_host->autocal_offsets;
 470	struct pinctrl_state *pinctrl_drvupdn = NULL;
 471	int ret = 0;
 472	u8 drvup = 0, drvdn = 0;
 473	u32 reg;
 474
 475	if (!state_drvupdn) {
 476		/* PADS Drive Strength */
 477		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
 478			if (tegra_host->pinctrl_state_1v8_drv) {
 479				pinctrl_drvupdn =
 480					tegra_host->pinctrl_state_1v8_drv;
 481			} else {
 482				drvup = offsets->pull_up_1v8_timeout;
 483				drvdn = offsets->pull_down_1v8_timeout;
 484			}
 485		} else {
 486			if (tegra_host->pinctrl_state_3v3_drv) {
 487				pinctrl_drvupdn =
 488					tegra_host->pinctrl_state_3v3_drv;
 489			} else {
 490				drvup = offsets->pull_up_3v3_timeout;
 491				drvdn = offsets->pull_down_3v3_timeout;
 492			}
 493		}
 494
 495		if (pinctrl_drvupdn != NULL) {
 496			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 497							pinctrl_drvupdn);
 498			if (ret < 0)
 499				dev_err(mmc_dev(host->mmc),
 500					"failed pads drvupdn, ret: %d\n", ret);
 501		} else if ((drvup) || (drvdn)) {
 502			reg = sdhci_readl(host,
 503					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 504			reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
 505			reg |= (drvup << 20) | (drvdn << 12);
 506			sdhci_writel(host, reg,
 507					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 508		}
 509
 510	} else {
 511		/* Dual Voltage PADS Voltage selection */
 512		if (!tegra_host->pad_control_available)
 513			return 0;
 514
 515		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
 516			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 517						tegra_host->pinctrl_state_1v8);
 518			if (ret < 0)
 519				dev_err(mmc_dev(host->mmc),
 520					"setting 1.8V failed, ret: %d\n", ret);
 521		} else {
 522			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 523						tegra_host->pinctrl_state_3v3);
 524			if (ret < 0)
 525				dev_err(mmc_dev(host->mmc),
 526					"setting 3.3V failed, ret: %d\n", ret);
 527		}
 528	}
 529
 530	return ret;
 531}
 532
 533static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
 534{
 535	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 536	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 537	struct sdhci_tegra_autocal_offsets offsets =
 538			tegra_host->autocal_offsets;
 539	struct mmc_ios *ios = &host->mmc->ios;
 540	bool card_clk_enabled;
 541	u16 pdpu;
 542	u32 reg;
 543	int ret;
 544
 545	switch (ios->timing) {
 546	case MMC_TIMING_UHS_SDR104:
 547		pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
 548		break;
 549	case MMC_TIMING_MMC_HS400:
 550		pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
 551		break;
 552	default:
 553		if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
 554			pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
 555		else
 556			pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
 557	}
 558
 559	/* Set initial offset before auto-calibration */
 560	tegra_sdhci_set_pad_autocal_offset(host, pdpu);
 561
 562	card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
 563
 564	tegra_sdhci_configure_cal_pad(host, true);
 565
 566	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 567	reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
 568	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 569
 570	usleep_range(1, 2);
 571	/* 10 ms timeout */
 572	ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
 573				 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
 574				 1000, 10000);
 575
 576	tegra_sdhci_configure_cal_pad(host, false);
 577
 578	tegra_sdhci_configure_card_clk(host, card_clk_enabled);
 579
 580	if (ret) {
 581		dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
 582
 583		/* Disable automatic cal and use fixed Drive Strengths */
 584		reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 585		reg &= ~SDHCI_AUTO_CAL_ENABLE;
 586		sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 587
 588		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
 589		if (ret < 0)
 590			dev_err(mmc_dev(host->mmc),
 591				"Setting drive strengths failed: %d\n", ret);
 592	}
 593}
 594
 595static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
 596{
 597	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 598	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 599	struct sdhci_tegra_autocal_offsets *autocal =
 600			&tegra_host->autocal_offsets;
 601	int err;
 602
 603	err = device_property_read_u32(mmc_dev(host->mmc),
 604			"nvidia,pad-autocal-pull-up-offset-3v3",
 605			&autocal->pull_up_3v3);
 606	if (err)
 607		autocal->pull_up_3v3 = 0;
 608
 609	err = device_property_read_u32(mmc_dev(host->mmc),
 610			"nvidia,pad-autocal-pull-down-offset-3v3",
 611			&autocal->pull_down_3v3);
 612	if (err)
 613		autocal->pull_down_3v3 = 0;
 614
 615	err = device_property_read_u32(mmc_dev(host->mmc),
 616			"nvidia,pad-autocal-pull-up-offset-1v8",
 617			&autocal->pull_up_1v8);
 618	if (err)
 619		autocal->pull_up_1v8 = 0;
 620
 621	err = device_property_read_u32(mmc_dev(host->mmc),
 622			"nvidia,pad-autocal-pull-down-offset-1v8",
 623			&autocal->pull_down_1v8);
 624	if (err)
 625		autocal->pull_down_1v8 = 0;
 626
 627	err = device_property_read_u32(mmc_dev(host->mmc),
 628			"nvidia,pad-autocal-pull-up-offset-sdr104",
 629			&autocal->pull_up_sdr104);
 630	if (err)
 631		autocal->pull_up_sdr104 = autocal->pull_up_1v8;
 632
 633	err = device_property_read_u32(mmc_dev(host->mmc),
 634			"nvidia,pad-autocal-pull-down-offset-sdr104",
 635			&autocal->pull_down_sdr104);
 636	if (err)
 637		autocal->pull_down_sdr104 = autocal->pull_down_1v8;
 638
 639	err = device_property_read_u32(mmc_dev(host->mmc),
 640			"nvidia,pad-autocal-pull-up-offset-hs400",
 641			&autocal->pull_up_hs400);
 642	if (err)
 643		autocal->pull_up_hs400 = autocal->pull_up_1v8;
 644
 645	err = device_property_read_u32(mmc_dev(host->mmc),
 646			"nvidia,pad-autocal-pull-down-offset-hs400",
 647			&autocal->pull_down_hs400);
 648	if (err)
 649		autocal->pull_down_hs400 = autocal->pull_down_1v8;
 650
 651	/*
 652	 * Different fail-safe drive strength values based on the signaling
 653	 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
 654	 * So, avoid reading below device tree properties for SoCs that don't
 655	 * have NVQUIRK_NEEDS_PAD_CONTROL.
 656	 */
 657	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
 658		return;
 659
 660	err = device_property_read_u32(mmc_dev(host->mmc),
 661			"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
 662			&autocal->pull_up_3v3_timeout);
 663	if (err) {
 664		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
 665			(tegra_host->pinctrl_state_3v3_drv == NULL))
 666			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
 667				mmc_hostname(host->mmc));
 668		autocal->pull_up_3v3_timeout = 0;
 669	}
 670
 671	err = device_property_read_u32(mmc_dev(host->mmc),
 672			"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
 673			&autocal->pull_down_3v3_timeout);
 674	if (err) {
 675		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
 676			(tegra_host->pinctrl_state_3v3_drv == NULL))
 677			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
 678				mmc_hostname(host->mmc));
 679		autocal->pull_down_3v3_timeout = 0;
 680	}
 681
 682	err = device_property_read_u32(mmc_dev(host->mmc),
 683			"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
 684			&autocal->pull_up_1v8_timeout);
 685	if (err) {
 686		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
 687			(tegra_host->pinctrl_state_1v8_drv == NULL))
 688			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
 689				mmc_hostname(host->mmc));
 690		autocal->pull_up_1v8_timeout = 0;
 691	}
 692
 693	err = device_property_read_u32(mmc_dev(host->mmc),
 694			"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
 695			&autocal->pull_down_1v8_timeout);
 696	if (err) {
 697		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
 698			(tegra_host->pinctrl_state_1v8_drv == NULL))
 699			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
 700				mmc_hostname(host->mmc));
 701		autocal->pull_down_1v8_timeout = 0;
 702	}
 703}
 704
 705static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 706{
 707	struct sdhci_host *host = mmc_priv(mmc);
 708	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 709	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 710	ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
 711
 712	/* 100 ms calibration interval is specified in the TRM */
 713	if (ktime_to_ms(since_calib) > 100) {
 714		tegra_sdhci_pad_autocalib(host);
 715		tegra_host->last_calib = ktime_get();
 716	}
 717
 718	sdhci_request(mmc, mrq);
 719}
 720
 721static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
 722{
 723	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 724	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 725	int err;
 726
 727	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap",
 728				       &tegra_host->default_tap);
 729	if (err)
 730		tegra_host->default_tap = 0;
 731
 732	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim",
 733				       &tegra_host->default_trim);
 734	if (err)
 735		tegra_host->default_trim = 0;
 736
 737	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim",
 738				       &tegra_host->dqs_trim);
 739	if (err)
 740		tegra_host->dqs_trim = 0x11;
 741}
 742
 743static void tegra_sdhci_parse_dt(struct sdhci_host *host)
 744{
 745	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 746	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 747
 748	if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe"))
 749		tegra_host->enable_hwcq = true;
 750	else
 751		tegra_host->enable_hwcq = false;
 752
 753	tegra_sdhci_parse_pad_autocal_dt(host);
 754	tegra_sdhci_parse_tap_and_trim(host);
 755}
 756
 757static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
 758{
 759	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 760	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 
 761	unsigned long host_clk;
 
 762
 763	if (!clock)
 764		return sdhci_set_clock(host, clock);
 765
 766	/*
 767	 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
 768	 * divider to be configured to divided the host clock by two. The SDHCI
 769	 * clock divider is calculated as part of sdhci_set_clock() by
 770	 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
 771	 * the requested clock rate.
 772	 *
 773	 * By setting the host->max_clk to clock * 2 the divider calculation
 774	 * will always result in the correct value for DDR50/52 modes,
 775	 * regardless of clock rate rounding, which may happen if the value
 776	 * from clk_get_rate() is used.
 777	 */
 778	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
 779	clk_set_rate(pltfm_host->clk, host_clk);
 780	tegra_host->curr_clk_rate = host_clk;
 
 
 
 
 
 781	if (tegra_host->ddr_signaling)
 782		host->max_clk = host_clk;
 783	else
 784		host->max_clk = clk_get_rate(pltfm_host->clk);
 785
 786	sdhci_set_clock(host, clock);
 787
 788	if (tegra_host->pad_calib_required) {
 789		tegra_sdhci_pad_autocalib(host);
 790		tegra_host->pad_calib_required = false;
 791	}
 792}
 793
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 794static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
 795{
 796	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 797
 798	return clk_round_rate(pltfm_host->clk, UINT_MAX);
 799}
 800
 801static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
 802{
 803	u32 val;
 804
 805	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
 806	val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
 807	val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
 808	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
 809}
 810
 811static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
 812{
 813	u32 reg;
 814	int err;
 815
 816	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
 817	reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
 818	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
 819
 820	/* 1 ms sleep, 5 ms timeout */
 821	err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
 822				 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
 823				 1000, 5000);
 824	if (err)
 825		dev_err(mmc_dev(host->mmc),
 826			"HS400 delay line calibration timed out\n");
 827}
 828
 829static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
 830				       u8 thd_low, u8 fixed_tap)
 831{
 832	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 833	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 834	u32 val, tun_status;
 835	u8 word, bit, edge1, tap, window;
 836	bool tap_result;
 837	bool start_fail = false;
 838	bool start_pass = false;
 839	bool end_pass = false;
 840	bool first_fail = false;
 841	bool first_pass = false;
 842	u8 start_pass_tap = 0;
 843	u8 end_pass_tap = 0;
 844	u8 first_fail_tap = 0;
 845	u8 first_pass_tap = 0;
 846	u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
 847
 848	/*
 849	 * Read auto-tuned results and extract good valid passing window by
 850	 * filtering out un-wanted bubble/partial/merged windows.
 851	 */
 852	for (word = 0; word < total_tuning_words; word++) {
 853		val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
 854		val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
 855		val |= word;
 856		sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
 857		tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
 858		bit = 0;
 859		while (bit < TUNING_WORD_BIT_SIZE) {
 860			tap = word * TUNING_WORD_BIT_SIZE + bit;
 861			tap_result = tun_status & (1 << bit);
 862			if (!tap_result && !start_fail) {
 863				start_fail = true;
 864				if (!first_fail) {
 865					first_fail_tap = tap;
 866					first_fail = true;
 867				}
 868
 869			} else if (tap_result && start_fail && !start_pass) {
 870				start_pass_tap = tap;
 871				start_pass = true;
 872				if (!first_pass) {
 873					first_pass_tap = tap;
 874					first_pass = true;
 875				}
 876
 877			} else if (!tap_result && start_fail && start_pass &&
 878				   !end_pass) {
 879				end_pass_tap = tap - 1;
 880				end_pass = true;
 881			} else if (tap_result && start_pass && start_fail &&
 882				   end_pass) {
 883				window = end_pass_tap - start_pass_tap;
 884				/* discard merged window and bubble window */
 885				if (window >= thd_up || window < thd_low) {
 886					start_pass_tap = tap;
 887					end_pass = false;
 888				} else {
 889					/* set tap at middle of valid window */
 890					tap = start_pass_tap + window / 2;
 891					tegra_host->tuned_tap_delay = tap;
 892					return;
 893				}
 894			}
 895
 896			bit++;
 897		}
 898	}
 899
 900	if (!first_fail) {
 901		WARN(1, "no edge detected, continue with hw tuned delay.\n");
 902	} else if (first_pass) {
 903		/* set tap location at fixed tap relative to the first edge */
 904		edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
 905		if (edge1 - 1 > fixed_tap)
 906			tegra_host->tuned_tap_delay = edge1 - fixed_tap;
 907		else
 908			tegra_host->tuned_tap_delay = edge1 + fixed_tap;
 909	}
 910}
 911
 912static void tegra_sdhci_post_tuning(struct sdhci_host *host)
 913{
 914	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 915	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 916	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 917	u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
 918	u8 fixed_tap, start_tap, end_tap, window_width;
 919	u8 thdupper, thdlower;
 920	u8 num_iter;
 921	u32 clk_rate_mhz, period_ps, bestcase, worstcase;
 922
 923	/* retain HW tuned tap to use incase if no correction is needed */
 924	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 925	tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
 926				      SDHCI_CLOCK_CTRL_TAP_SHIFT;
 927	if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
 928		min_tap_dly = soc_data->min_tap_delay;
 929		max_tap_dly = soc_data->max_tap_delay;
 930		clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
 931		period_ps = USEC_PER_SEC / clk_rate_mhz;
 932		bestcase = period_ps / min_tap_dly;
 933		worstcase = period_ps / max_tap_dly;
 934		/*
 935		 * Upper and Lower bound thresholds used to detect merged and
 936		 * bubble windows
 937		 */
 938		thdupper = (2 * worstcase + bestcase) / 2;
 939		thdlower = worstcase / 4;
 940		/*
 941		 * fixed tap is used when HW tuning result contains single edge
 942		 * and tap is set at fixed tap delay relative to the first edge
 943		 */
 944		avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
 945		fixed_tap = avg_tap_dly / 2;
 946
 947		val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
 948		start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
 949		end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
 950			  SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
 951		window_width = end_tap - start_tap;
 952		num_iter = host->tuning_loop_count;
 953		/*
 954		 * partial window includes edges of the tuning range.
 955		 * merged window includes more taps so window width is higher
 956		 * than upper threshold.
 957		 */
 958		if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
 959		    (end_tap == num_iter - 2) || window_width >= thdupper) {
 960			pr_debug("%s: Apply tuning correction\n",
 961				 mmc_hostname(host->mmc));
 962			tegra_sdhci_tap_correction(host, thdupper, thdlower,
 963						   fixed_tap);
 964		}
 965	}
 966
 967	tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
 968}
 969
 970static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
 971{
 972	struct sdhci_host *host = mmc_priv(mmc);
 973	int err;
 974
 975	err = sdhci_execute_tuning(mmc, opcode);
 976	if (!err && !host->tuning_err)
 977		tegra_sdhci_post_tuning(host);
 978
 979	return err;
 980}
 981
 982static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
 983					  unsigned timing)
 984{
 985	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 986	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 987	bool set_default_tap = false;
 988	bool set_dqs_trim = false;
 989	bool do_hs400_dll_cal = false;
 990	u8 iter = TRIES_256;
 991	u32 val;
 992
 993	tegra_host->ddr_signaling = false;
 994	switch (timing) {
 995	case MMC_TIMING_UHS_SDR50:
 996		break;
 997	case MMC_TIMING_UHS_SDR104:
 998	case MMC_TIMING_MMC_HS200:
 999		/* Don't set default tap on tunable modes. */
1000		iter = TRIES_128;
1001		break;
1002	case MMC_TIMING_MMC_HS400:
1003		set_dqs_trim = true;
1004		do_hs400_dll_cal = true;
1005		iter = TRIES_128;
1006		break;
1007	case MMC_TIMING_MMC_DDR52:
1008	case MMC_TIMING_UHS_DDR50:
1009		tegra_host->ddr_signaling = true;
1010		set_default_tap = true;
1011		break;
1012	default:
1013		set_default_tap = true;
1014		break;
1015	}
1016
1017	val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1018	val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
1019		 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
1020		 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
1021	val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
1022		0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
1023		1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
1024	sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
1025	sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
1026
1027	host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
1028
1029	sdhci_set_uhs_signaling(host, timing);
1030
1031	tegra_sdhci_pad_autocalib(host);
1032
1033	if (tegra_host->tuned_tap_delay && !set_default_tap)
1034		tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
1035	else
1036		tegra_sdhci_set_tap(host, tegra_host->default_tap);
1037
1038	if (set_dqs_trim)
1039		tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
1040
1041	if (do_hs400_dll_cal)
1042		tegra_sdhci_hs400_dll_cal(host);
1043}
1044
1045static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1046{
1047	unsigned int min, max;
1048
1049	/*
1050	 * Start search for minimum tap value at 10, as smaller values are
1051	 * may wrongly be reported as working but fail at higher speeds,
1052	 * according to the TRM.
1053	 */
1054	min = 10;
1055	while (min < 255) {
1056		tegra_sdhci_set_tap(host, min);
1057		if (!mmc_send_tuning(host->mmc, opcode, NULL))
1058			break;
1059		min++;
1060	}
1061
1062	/* Find the maximum tap value that still passes. */
1063	max = min + 1;
1064	while (max < 255) {
1065		tegra_sdhci_set_tap(host, max);
1066		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1067			max--;
1068			break;
1069		}
1070		max++;
1071	}
1072
1073	/* The TRM states the ideal tap value is at 75% in the passing range. */
1074	tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1075
1076	return mmc_send_tuning(host->mmc, opcode, NULL);
1077}
1078
1079static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1080						   struct mmc_ios *ios)
1081{
1082	struct sdhci_host *host = mmc_priv(mmc);
1083	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1084	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1085	int ret = 0;
1086
1087	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1088		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1089		if (ret < 0)
1090			return ret;
1091		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1092	} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1093		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1094		if (ret < 0)
1095			return ret;
1096		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1097	}
1098
1099	if (tegra_host->pad_calib_required)
1100		tegra_sdhci_pad_autocalib(host);
1101
1102	return ret;
1103}
1104
1105static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1106					 struct sdhci_tegra *tegra_host)
1107{
1108	tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1109	if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1110		dev_dbg(dev, "No pinctrl info, err: %ld\n",
1111			PTR_ERR(tegra_host->pinctrl_sdmmc));
1112		return -1;
1113	}
1114
1115	tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1116				tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1117	if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1118		if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1119			tegra_host->pinctrl_state_1v8_drv = NULL;
1120	}
1121
1122	tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1123				tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1124	if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1125		if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1126			tegra_host->pinctrl_state_3v3_drv = NULL;
1127	}
1128
1129	tegra_host->pinctrl_state_3v3 =
1130		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1131	if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1132		dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1133			 PTR_ERR(tegra_host->pinctrl_state_3v3));
1134		return -1;
1135	}
1136
1137	tegra_host->pinctrl_state_1v8 =
1138		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1139	if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1140		dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1141			 PTR_ERR(tegra_host->pinctrl_state_1v8));
1142		return -1;
1143	}
1144
1145	tegra_host->pad_control_available = true;
1146
1147	return 0;
1148}
1149
1150static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1151{
1152	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1153	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1154	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1155
1156	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1157		tegra_host->pad_calib_required = true;
1158}
1159
1160static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1161{
1162	struct mmc_host *mmc = cq_host->mmc;
1163	struct sdhci_host *host = mmc_priv(mmc);
1164	u8 ctrl;
1165	ktime_t timeout;
1166	bool timed_out;
1167
1168	/*
1169	 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1170	 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1171	 * to be re-configured.
1172	 * Tegra CQHCI/SDHCI prevents write access to block size register when
1173	 * CQE is unhalted. So handling CQE resume sequence here to configure
1174	 * SDHCI block registers prior to exiting CQE halt state.
1175	 */
1176	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1177	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
1178		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1179		sdhci_cqe_enable(mmc);
1180		writel(val, cq_host->mmio + reg);
1181		timeout = ktime_add_us(ktime_get(), 50);
1182		while (1) {
1183			timed_out = ktime_compare(ktime_get(), timeout) > 0;
1184			ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1185			if (!(ctrl & CQHCI_HALT) || timed_out)
1186				break;
1187		}
1188		/*
1189		 * CQE usually resumes very quick, but incase if Tegra CQE
1190		 * doesn't resume retry unhalt.
1191		 */
1192		if (timed_out)
1193			writel(val, cq_host->mmio + reg);
1194	} else {
1195		writel(val, cq_host->mmio + reg);
1196	}
1197}
1198
1199static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1200					 struct mmc_request *mrq, u64 *data)
1201{
1202	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1203	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1204	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1205
1206	if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1207	    mrq->cmd->flags & MMC_RSP_R1B)
1208		*data |= CQHCI_CMD_TIMING(1);
1209}
1210
1211static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1212{
1213	struct cqhci_host *cq_host = mmc->cqe_private;
1214	struct sdhci_host *host = mmc_priv(mmc);
1215	u32 val;
1216
1217	/*
1218	 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1219	 * register when CQE is enabled and unhalted.
1220	 * CQHCI driver enables CQE prior to activation, so disable CQE before
1221	 * programming block size in sdhci controller and enable it back.
1222	 */
1223	if (!cq_host->activated) {
1224		val = cqhci_readl(cq_host, CQHCI_CFG);
1225		if (val & CQHCI_ENABLE)
1226			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1227				     CQHCI_CFG);
1228		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1229		sdhci_cqe_enable(mmc);
1230		if (val & CQHCI_ENABLE)
1231			cqhci_writel(cq_host, val, CQHCI_CFG);
1232	}
1233
1234	/*
1235	 * CMD CRC errors are seen sometimes with some eMMC devices when status
1236	 * command is sent during transfer of last data block which is the
1237	 * default case as send status command block counter (CBC) is 1.
1238	 * Recommended fix to set CBC to 0 allowing send status command only
1239	 * when data lines are idle.
1240	 */
1241	val = cqhci_readl(cq_host, CQHCI_SSC1);
1242	val &= ~CQHCI_SSC1_CBC_MASK;
1243	cqhci_writel(cq_host, val, CQHCI_SSC1);
1244}
1245
1246static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1247{
1248	sdhci_dumpregs(mmc_priv(mmc));
1249}
1250
1251static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1252{
1253	int cmd_error = 0;
1254	int data_error = 0;
1255
1256	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1257		return intmask;
1258
1259	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1260
1261	return 0;
1262}
1263
1264static void tegra_sdhci_set_timeout(struct sdhci_host *host,
1265				    struct mmc_command *cmd)
1266{
1267	u32 val;
1268
1269	/*
1270	 * HW busy detection timeout is based on programmed data timeout
1271	 * counter and maximum supported timeout is 11s which may not be
1272	 * enough for long operations like cache flush, sleep awake, erase.
1273	 *
1274	 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
1275	 * host controller to wait for busy state until the card is busy
1276	 * without HW timeout.
1277	 *
1278	 * So, use infinite busy wait mode for operations that may take
1279	 * more than maximum HW busy timeout of 11s otherwise use finite
1280	 * busy wait mode.
1281	 */
1282	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1283	if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC)
1284		val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1285	else
1286		val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1287	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1288
1289	__sdhci_set_timeout(host, cmd);
1290}
1291
1292static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
1293{
1294	struct cqhci_host *cq_host = mmc->cqe_private;
1295	u32 reg;
1296
1297	reg = cqhci_readl(cq_host, CQHCI_CFG);
1298	reg |= CQHCI_ENABLE;
1299	cqhci_writel(cq_host, reg, CQHCI_CFG);
1300}
1301
1302static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
1303{
1304	struct cqhci_host *cq_host = mmc->cqe_private;
1305	struct sdhci_host *host = mmc_priv(mmc);
1306	u32 reg;
1307
1308	reg = cqhci_readl(cq_host, CQHCI_CFG);
1309	reg &= ~CQHCI_ENABLE;
1310	cqhci_writel(cq_host, reg, CQHCI_CFG);
1311	sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1312}
1313
1314static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1315	.write_l    = tegra_cqhci_writel,
1316	.enable	= sdhci_tegra_cqe_enable,
1317	.disable = sdhci_cqe_disable,
1318	.dumpregs = sdhci_tegra_dumpregs,
1319	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1320	.pre_enable = sdhci_tegra_cqe_pre_enable,
1321	.post_disable = sdhci_tegra_cqe_post_disable,
1322};
1323
1324static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1325{
1326	struct sdhci_pltfm_host *platform = sdhci_priv(host);
1327	struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1328	const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1329	struct device *dev = mmc_dev(host->mmc);
1330
1331	if (soc->dma_mask)
1332		return dma_set_mask_and_coherent(dev, soc->dma_mask);
1333
1334	return 0;
1335}
1336
1337static const struct sdhci_ops tegra_sdhci_ops = {
1338	.get_ro     = tegra_sdhci_get_ro,
1339	.read_w     = tegra_sdhci_readw,
1340	.write_l    = tegra_sdhci_writel,
1341	.set_clock  = tegra_sdhci_set_clock,
1342	.set_dma_mask = tegra_sdhci_set_dma_mask,
1343	.set_bus_width = sdhci_set_bus_width,
1344	.reset      = tegra_sdhci_reset,
1345	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1346	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1347	.voltage_switch = tegra_sdhci_voltage_switch,
1348	.get_max_clock = tegra_sdhci_get_max_clock,
1349};
1350
1351static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1352	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1353		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1354		  SDHCI_QUIRK_NO_HISPD_BIT |
1355		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1356		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1357	.ops  = &tegra_sdhci_ops,
1358};
1359
1360static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1361	.pdata = &sdhci_tegra20_pdata,
1362	.dma_mask = DMA_BIT_MASK(32),
1363	.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
 
1364		    NVQUIRK_ENABLE_BLOCK_GAP_DET,
1365};
1366
1367static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1368	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1369		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1370		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1371		  SDHCI_QUIRK_NO_HISPD_BIT |
1372		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1373		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1374	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1375		   SDHCI_QUIRK2_BROKEN_HS200 |
1376		   /*
1377		    * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1378		    * though no command operation was in progress."
1379		    *
1380		    * The exact reason is unknown, as the same hardware seems
1381		    * to support Auto CMD23 on a downstream 3.1 kernel.
1382		    */
1383		   SDHCI_QUIRK2_ACMD23_BROKEN,
1384	.ops  = &tegra_sdhci_ops,
1385};
1386
1387static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1388	.pdata = &sdhci_tegra30_pdata,
1389	.dma_mask = DMA_BIT_MASK(32),
1390	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1391		    NVQUIRK_ENABLE_SDR50 |
1392		    NVQUIRK_ENABLE_SDR104 |
 
1393		    NVQUIRK_HAS_PADCALIB,
1394};
1395
1396static const struct sdhci_ops tegra114_sdhci_ops = {
1397	.get_ro     = tegra_sdhci_get_ro,
1398	.read_w     = tegra_sdhci_readw,
1399	.write_w    = tegra_sdhci_writew,
1400	.write_l    = tegra_sdhci_writel,
1401	.set_clock  = tegra_sdhci_set_clock,
1402	.set_dma_mask = tegra_sdhci_set_dma_mask,
1403	.set_bus_width = sdhci_set_bus_width,
1404	.reset      = tegra_sdhci_reset,
1405	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1406	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1407	.voltage_switch = tegra_sdhci_voltage_switch,
1408	.get_max_clock = tegra_sdhci_get_max_clock,
1409};
1410
1411static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1412	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1413		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1414		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1415		  SDHCI_QUIRK_NO_HISPD_BIT |
1416		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1417		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1418	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1419	.ops  = &tegra114_sdhci_ops,
1420};
1421
1422static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1423	.pdata = &sdhci_tegra114_pdata,
1424	.dma_mask = DMA_BIT_MASK(32),
 
1425};
1426
1427static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1428	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1429		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1430		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1431		  SDHCI_QUIRK_NO_HISPD_BIT |
1432		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1433		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1434	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1435	.ops  = &tegra114_sdhci_ops,
1436};
1437
1438static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1439	.pdata = &sdhci_tegra124_pdata,
1440	.dma_mask = DMA_BIT_MASK(34),
 
1441};
1442
1443static const struct sdhci_ops tegra210_sdhci_ops = {
1444	.get_ro     = tegra_sdhci_get_ro,
1445	.read_w     = tegra_sdhci_readw,
1446	.write_w    = tegra210_sdhci_writew,
1447	.write_l    = tegra_sdhci_writel,
1448	.set_clock  = tegra_sdhci_set_clock,
1449	.set_dma_mask = tegra_sdhci_set_dma_mask,
1450	.set_bus_width = sdhci_set_bus_width,
1451	.reset      = tegra_sdhci_reset,
1452	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1453	.voltage_switch = tegra_sdhci_voltage_switch,
1454	.get_max_clock = tegra_sdhci_get_max_clock,
1455	.set_timeout = tegra_sdhci_set_timeout,
1456};
1457
1458static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1459	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1460		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1461		  SDHCI_QUIRK_NO_HISPD_BIT |
1462		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1463		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1464	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1465	.ops  = &tegra210_sdhci_ops,
1466};
1467
1468static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1469	.pdata = &sdhci_tegra210_pdata,
1470	.dma_mask = DMA_BIT_MASK(34),
1471	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1472		    NVQUIRK_HAS_PADCALIB |
1473		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1474		    NVQUIRK_ENABLE_SDR50 |
1475		    NVQUIRK_ENABLE_SDR104 |
1476		    NVQUIRK_HAS_TMCLK,
1477	.min_tap_delay = 106,
1478	.max_tap_delay = 185,
1479};
1480
1481static const struct sdhci_ops tegra186_sdhci_ops = {
1482	.get_ro     = tegra_sdhci_get_ro,
1483	.read_w     = tegra_sdhci_readw,
1484	.write_l    = tegra_sdhci_writel,
1485	.set_clock  = tegra_sdhci_set_clock,
1486	.set_dma_mask = tegra_sdhci_set_dma_mask,
1487	.set_bus_width = sdhci_set_bus_width,
1488	.reset      = tegra_sdhci_reset,
1489	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1490	.voltage_switch = tegra_sdhci_voltage_switch,
1491	.get_max_clock = tegra_sdhci_get_max_clock,
1492	.irq = sdhci_tegra_cqhci_irq,
1493	.set_timeout = tegra_sdhci_set_timeout,
1494};
1495
1496static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1497	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1498		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1499		  SDHCI_QUIRK_NO_HISPD_BIT |
1500		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1501		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1502	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 
1503	.ops  = &tegra186_sdhci_ops,
1504};
1505
1506static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1507	.pdata = &sdhci_tegra186_pdata,
1508	.dma_mask = DMA_BIT_MASK(40),
1509	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1510		    NVQUIRK_HAS_PADCALIB |
1511		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1512		    NVQUIRK_ENABLE_SDR50 |
1513		    NVQUIRK_ENABLE_SDR104 |
1514		    NVQUIRK_HAS_TMCLK |
1515		    NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1516	.min_tap_delay = 84,
1517	.max_tap_delay = 136,
1518};
1519
1520static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1521	.pdata = &sdhci_tegra186_pdata,
1522	.dma_mask = DMA_BIT_MASK(39),
1523	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1524		    NVQUIRK_HAS_PADCALIB |
1525		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1526		    NVQUIRK_ENABLE_SDR50 |
1527		    NVQUIRK_ENABLE_SDR104 |
1528		    NVQUIRK_HAS_TMCLK,
1529	.min_tap_delay = 96,
1530	.max_tap_delay = 139,
1531};
1532
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1533static const struct of_device_id sdhci_tegra_dt_match[] = {
 
1534	{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1535	{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1536	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1537	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1538	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1539	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1540	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1541	{}
1542};
1543MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1544
1545static int sdhci_tegra_add_host(struct sdhci_host *host)
1546{
1547	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1548	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1549	struct cqhci_host *cq_host;
1550	bool dma64;
1551	int ret;
1552
1553	if (!tegra_host->enable_hwcq)
1554		return sdhci_add_host(host);
1555
1556	sdhci_enable_v4_mode(host);
1557
1558	ret = sdhci_setup_host(host);
1559	if (ret)
1560		return ret;
1561
1562	host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1563
1564	cq_host = devm_kzalloc(mmc_dev(host->mmc),
1565				sizeof(*cq_host), GFP_KERNEL);
1566	if (!cq_host) {
1567		ret = -ENOMEM;
1568		goto cleanup;
1569	}
1570
1571	cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1572	cq_host->ops = &sdhci_tegra_cqhci_ops;
1573
1574	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1575	if (dma64)
1576		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1577
1578	ret = cqhci_init(cq_host, host->mmc, dma64);
1579	if (ret)
1580		goto cleanup;
1581
1582	ret = __sdhci_add_host(host);
1583	if (ret)
1584		goto cleanup;
1585
1586	return 0;
1587
1588cleanup:
1589	sdhci_cleanup_host(host);
1590	return ret;
1591}
1592
 
 
 
 
 
 
 
 
 
 
 
 
 
1593static int sdhci_tegra_probe(struct platform_device *pdev)
1594{
1595	const struct of_device_id *match;
1596	const struct sdhci_tegra_soc_data *soc_data;
1597	struct sdhci_host *host;
1598	struct sdhci_pltfm_host *pltfm_host;
1599	struct sdhci_tegra *tegra_host;
1600	struct clk *clk;
1601	int rc;
1602
1603	match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
1604	if (!match)
1605		return -EINVAL;
1606	soc_data = match->data;
1607
1608	host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1609	if (IS_ERR(host))
1610		return PTR_ERR(host);
1611	pltfm_host = sdhci_priv(host);
1612
1613	tegra_host = sdhci_pltfm_priv(pltfm_host);
1614	tegra_host->ddr_signaling = false;
1615	tegra_host->pad_calib_required = false;
1616	tegra_host->pad_control_available = false;
1617	tegra_host->soc_data = soc_data;
1618
 
 
 
1619	if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1620		rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1621		if (rc == 0)
1622			host->mmc_host_ops.start_signal_voltage_switch =
1623				sdhci_tegra_start_signal_voltage_switch;
1624	}
1625
1626	/* Hook to periodically rerun pad calibration */
1627	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1628		host->mmc_host_ops.request = tegra_sdhci_request;
1629
1630	host->mmc_host_ops.hs400_enhanced_strobe =
1631			tegra_sdhci_hs400_enhanced_strobe;
1632
1633	if (!host->ops->platform_execute_tuning)
1634		host->mmc_host_ops.execute_tuning =
1635				tegra_sdhci_execute_hw_tuning;
1636
1637	rc = mmc_of_parse(host->mmc);
1638	if (rc)
1639		goto err_parse_dt;
1640
1641	if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1642		host->mmc->caps |= MMC_CAP_1_8V_DDR;
1643
1644	/* HW busy detection is supported, but R1B responses are required. */
1645	host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
1646
 
 
 
1647	tegra_sdhci_parse_dt(host);
1648
 
 
 
 
 
 
1649	tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1650							 GPIOD_OUT_HIGH);
1651	if (IS_ERR(tegra_host->power_gpio)) {
1652		rc = PTR_ERR(tegra_host->power_gpio);
1653		goto err_power_req;
1654	}
1655
1656	/*
1657	 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
1658	 * timeout clock and SW can choose TMCLK or SDCLK for hardware
1659	 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
1660	 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
1661	 *
1662	 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
1663	 * 12Mhz TMCLK which is advertised in host capability register.
1664	 * With TMCLK of 12Mhz provides maximum data timeout period that can
1665	 * be achieved is 11s better than using SDCLK for data timeout.
1666	 *
1667	 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
1668	 * supporting separate TMCLK.
1669	 */
1670
1671	if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
1672		clk = devm_clk_get(&pdev->dev, "tmclk");
1673		if (IS_ERR(clk)) {
1674			rc = PTR_ERR(clk);
1675			if (rc == -EPROBE_DEFER)
1676				goto err_power_req;
1677
1678			dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
1679			clk = NULL;
1680		}
1681
1682		clk_set_rate(clk, 12000000);
1683		rc = clk_prepare_enable(clk);
1684		if (rc) {
1685			dev_err(&pdev->dev,
1686				"failed to enable tmclk: %d\n", rc);
1687			goto err_power_req;
1688		}
1689
1690		tegra_host->tmclk = clk;
1691	}
1692
1693	clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1694	if (IS_ERR(clk)) {
1695		rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
1696				   "failed to get clock\n");
1697		goto err_clk_get;
1698	}
1699	clk_prepare_enable(clk);
1700	pltfm_host->clk = clk;
1701
1702	tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1703							   "sdhci");
1704	if (IS_ERR(tegra_host->rst)) {
1705		rc = PTR_ERR(tegra_host->rst);
1706		dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1707		goto err_rst_get;
1708	}
1709
 
 
 
 
 
 
 
 
 
1710	rc = reset_control_assert(tegra_host->rst);
1711	if (rc)
1712		goto err_rst_get;
1713
1714	usleep_range(2000, 4000);
1715
1716	rc = reset_control_deassert(tegra_host->rst);
1717	if (rc)
1718		goto err_rst_get;
1719
1720	usleep_range(2000, 4000);
1721
1722	rc = sdhci_tegra_add_host(host);
1723	if (rc)
1724		goto err_add_host;
1725
 
 
1726	return 0;
1727
1728err_add_host:
1729	reset_control_assert(tegra_host->rst);
 
 
 
 
1730err_rst_get:
1731	clk_disable_unprepare(pltfm_host->clk);
1732err_clk_get:
1733	clk_disable_unprepare(tegra_host->tmclk);
1734err_power_req:
1735err_parse_dt:
1736	sdhci_pltfm_free(pdev);
1737	return rc;
1738}
1739
1740static int sdhci_tegra_remove(struct platform_device *pdev)
1741{
1742	struct sdhci_host *host = platform_get_drvdata(pdev);
1743	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1744	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1745
1746	sdhci_remove_host(host, 0);
1747
1748	reset_control_assert(tegra_host->rst);
1749	usleep_range(2000, 4000);
1750	clk_disable_unprepare(pltfm_host->clk);
 
 
 
1751	clk_disable_unprepare(tegra_host->tmclk);
 
 
 
 
 
 
 
 
 
1752
1753	sdhci_pltfm_free(pdev);
1754
1755	return 0;
1756}
1757
 
 
 
 
 
 
 
 
1758#ifdef CONFIG_PM_SLEEP
1759static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
1760{
1761	struct sdhci_host *host = dev_get_drvdata(dev);
1762	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1763	int ret;
1764
1765	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1766		ret = cqhci_suspend(host->mmc);
1767		if (ret)
1768			return ret;
1769	}
1770
1771	ret = sdhci_suspend_host(host);
1772	if (ret) {
1773		cqhci_resume(host->mmc);
1774		return ret;
1775	}
1776
1777	clk_disable_unprepare(pltfm_host->clk);
1778	return 0;
 
 
 
 
 
 
1779}
1780
1781static int __maybe_unused sdhci_tegra_resume(struct device *dev)
1782{
1783	struct sdhci_host *host = dev_get_drvdata(dev);
1784	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1785	int ret;
1786
1787	ret = clk_prepare_enable(pltfm_host->clk);
 
 
 
 
1788	if (ret)
1789		return ret;
1790
 
 
1791	ret = sdhci_resume_host(host);
1792	if (ret)
1793		goto disable_clk;
1794
1795	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1796		ret = cqhci_resume(host->mmc);
1797		if (ret)
1798			goto suspend_host;
1799	}
1800
1801	return 0;
1802
1803suspend_host:
1804	sdhci_suspend_host(host);
1805disable_clk:
1806	clk_disable_unprepare(pltfm_host->clk);
1807	return ret;
1808}
1809#endif
1810
1811static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
1812			 sdhci_tegra_resume);
 
 
 
1813
1814static struct platform_driver sdhci_tegra_driver = {
1815	.driver		= {
1816		.name	= "sdhci-tegra",
1817		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1818		.of_match_table = sdhci_tegra_dt_match,
1819		.pm	= &sdhci_tegra_dev_pm_ops,
1820	},
1821	.probe		= sdhci_tegra_probe,
1822	.remove		= sdhci_tegra_remove,
1823};
1824
1825module_platform_driver(sdhci_tegra_driver);
1826
1827MODULE_DESCRIPTION("SDHCI driver for Tegra");
1828MODULE_AUTHOR("Google, Inc.");
1829MODULE_LICENSE("GPL v2");