Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2010 Google, Inc.
   4 */
   5
   6#include <linux/bitfield.h>
   7#include <linux/clk.h>
   8#include <linux/delay.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/err.h>
  11#include <linux/gpio/consumer.h>
  12#include <linux/init.h>
  13#include <linux/io.h>
  14#include <linux/iommu.h>
  15#include <linux/iopoll.h>
  16#include <linux/ktime.h>
  17#include <linux/mmc/card.h>
  18#include <linux/mmc/host.h>
  19#include <linux/mmc/mmc.h>
  20#include <linux/mmc/slot-gpio.h>
  21#include <linux/module.h>
  22#include <linux/of_device.h>
  23#include <linux/of.h>
 
  24#include <linux/pinctrl/consumer.h>
  25#include <linux/platform_device.h>
  26#include <linux/pm_opp.h>
  27#include <linux/pm_runtime.h>
  28#include <linux/regulator/consumer.h>
  29#include <linux/reset.h>
 
 
 
 
 
 
  30
  31#include <soc/tegra/common.h>
  32
  33#include "sdhci-cqhci.h"
  34#include "sdhci-pltfm.h"
  35#include "cqhci.h"
  36
  37/* Tegra SDHOST controller vendor register definitions */
  38#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL			0x100
  39#define SDHCI_CLOCK_CTRL_TAP_MASK			0x00ff0000
  40#define SDHCI_CLOCK_CTRL_TAP_SHIFT			16
  41#define SDHCI_CLOCK_CTRL_TRIM_MASK			0x1f000000
  42#define SDHCI_CLOCK_CTRL_TRIM_SHIFT			24
  43#define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE		BIT(5)
  44#define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE		BIT(3)
  45#define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE	BIT(2)
  46
  47#define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL			0x104
  48#define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE		BIT(31)
  49
  50#define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES		0x10c
  51#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK		0x00003f00
  52#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT	8
  53
  54#define SDHCI_TEGRA_VENDOR_MISC_CTRL			0x120
  55#define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT		BIT(0)
  56#define SDHCI_MISC_CTRL_ENABLE_SDR104			0x8
  57#define SDHCI_MISC_CTRL_ENABLE_SDR50			0x10
  58#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300		0x20
  59#define SDHCI_MISC_CTRL_ENABLE_DDR50			0x200
  60
  61#define SDHCI_TEGRA_VENDOR_DLLCAL_CFG			0x1b0
  62#define SDHCI_TEGRA_DLLCAL_CALIBRATE			BIT(31)
  63
  64#define SDHCI_TEGRA_VENDOR_DLLCAL_STA			0x1bc
  65#define SDHCI_TEGRA_DLLCAL_STA_ACTIVE			BIT(31)
  66
  67#define SDHCI_VNDR_TUN_CTRL0_0				0x1c0
  68#define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP			0x20000
  69#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK		0x03fc0000
  70#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT	18
  71#define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK			0x00001fc0
  72#define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT		6
  73#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK		0x000e000
  74#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT		13
  75#define TRIES_128					2
  76#define TRIES_256					4
  77#define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK		0x7
  78
  79#define SDHCI_TEGRA_VNDR_TUN_CTRL1_0			0x1c4
  80#define SDHCI_TEGRA_VNDR_TUN_STATUS0			0x1C8
  81#define SDHCI_TEGRA_VNDR_TUN_STATUS1			0x1CC
  82#define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK		0xFF
  83#define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT	0x8
  84#define TUNING_WORD_BIT_SIZE				32
  85
  86#define SDHCI_TEGRA_AUTO_CAL_CONFIG			0x1e4
  87#define SDHCI_AUTO_CAL_START				BIT(31)
  88#define SDHCI_AUTO_CAL_ENABLE				BIT(29)
  89#define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK			0x0000ffff
  90
  91#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL			0x1e0
  92#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK	0x0000000f
  93#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL	0x7
  94#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD	BIT(31)
  95#define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK		0x07FFF000
  96
  97#define SDHCI_TEGRA_AUTO_CAL_STATUS			0x1ec
  98#define SDHCI_TEGRA_AUTO_CAL_ACTIVE			BIT(31)
  99
 100#define SDHCI_TEGRA_CIF2AXI_CTRL_0			0x1fc
 101
 102#define NVQUIRK_FORCE_SDHCI_SPEC_200			BIT(0)
 103#define NVQUIRK_ENABLE_BLOCK_GAP_DET			BIT(1)
 104#define NVQUIRK_ENABLE_SDHCI_SPEC_300			BIT(2)
 105#define NVQUIRK_ENABLE_SDR50				BIT(3)
 106#define NVQUIRK_ENABLE_SDR104				BIT(4)
 107#define NVQUIRK_ENABLE_DDR50				BIT(5)
 108/*
 109 * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
 110 * drive strength.
 111 */
 112#define NVQUIRK_HAS_PADCALIB				BIT(6)
 113/*
 114 * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
 115 * 3V3/1V8 pad selection happens through pinctrl state selection depending
 116 * on the signaling mode.
 117 */
 118#define NVQUIRK_NEEDS_PAD_CONTROL			BIT(7)
 119#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP			BIT(8)
 120#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING		BIT(9)
 121
 122/*
 123 * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
 124 * SDMMC hardware data timeout.
 125 */
 126#define NVQUIRK_HAS_TMCLK				BIT(10)
 127
 128#define NVQUIRK_HAS_ANDROID_GPT_SECTOR			BIT(11)
 129#define NVQUIRK_PROGRAM_STREAMID			BIT(12)
 130
 131/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
 132#define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
 133
 134#define SDHCI_TEGRA_CQE_TRNS_MODE	(SDHCI_TRNS_MULTI | \
 135					 SDHCI_TRNS_BLK_CNT_EN | \
 136					 SDHCI_TRNS_DMA)
 137
 138struct sdhci_tegra_soc_data {
 139	const struct sdhci_pltfm_data *pdata;
 140	u64 dma_mask;
 141	u32 nvquirks;
 142	u8 min_tap_delay;
 143	u8 max_tap_delay;
 144};
 145
 146/* Magic pull up and pull down pad calibration offsets */
 147struct sdhci_tegra_autocal_offsets {
 148	u32 pull_up_3v3;
 149	u32 pull_down_3v3;
 150	u32 pull_up_3v3_timeout;
 151	u32 pull_down_3v3_timeout;
 152	u32 pull_up_1v8;
 153	u32 pull_down_1v8;
 154	u32 pull_up_1v8_timeout;
 155	u32 pull_down_1v8_timeout;
 156	u32 pull_up_sdr104;
 157	u32 pull_down_sdr104;
 158	u32 pull_up_hs400;
 159	u32 pull_down_hs400;
 160};
 161
 162struct sdhci_tegra {
 163	const struct sdhci_tegra_soc_data *soc_data;
 164	struct gpio_desc *power_gpio;
 165	struct clk *tmclk;
 166	bool ddr_signaling;
 167	bool pad_calib_required;
 168	bool pad_control_available;
 169
 170	struct reset_control *rst;
 171	struct pinctrl *pinctrl_sdmmc;
 172	struct pinctrl_state *pinctrl_state_3v3;
 173	struct pinctrl_state *pinctrl_state_1v8;
 174	struct pinctrl_state *pinctrl_state_3v3_drv;
 175	struct pinctrl_state *pinctrl_state_1v8_drv;
 176
 177	struct sdhci_tegra_autocal_offsets autocal_offsets;
 178	ktime_t last_calib;
 179
 180	u32 default_tap;
 181	u32 default_trim;
 182	u32 dqs_trim;
 183	bool enable_hwcq;
 184	unsigned long curr_clk_rate;
 185	u8 tuned_tap_delay;
 186	u32 stream_id;
 187};
 188
 189static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
 190{
 191	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 192	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 193	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 194
 195	if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
 196			(reg == SDHCI_HOST_VERSION))) {
 197		/* Erratum: Version register is invalid in HW. */
 198		return SDHCI_SPEC_200;
 199	}
 200
 201	return readw(host->ioaddr + reg);
 202}
 203
 204static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
 205{
 206	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 207
 208	switch (reg) {
 209	case SDHCI_TRANSFER_MODE:
 210		/*
 211		 * Postpone this write, we must do it together with a
 212		 * command write that is down below.
 213		 */
 214		pltfm_host->xfer_mode_shadow = val;
 215		return;
 216	case SDHCI_COMMAND:
 217		writel((val << 16) | pltfm_host->xfer_mode_shadow,
 218			host->ioaddr + SDHCI_TRANSFER_MODE);
 219		return;
 220	}
 221
 222	writew(val, host->ioaddr + reg);
 223}
 224
 225static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
 226{
 227	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 228	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 229	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 230
 231	/* Seems like we're getting spurious timeout and crc errors, so
 232	 * disable signalling of them. In case of real errors software
 233	 * timers should take care of eventually detecting them.
 234	 */
 235	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
 236		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
 237
 238	writel(val, host->ioaddr + reg);
 239
 240	if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
 241			(reg == SDHCI_INT_ENABLE))) {
 242		/* Erratum: Must enable block gap interrupt detection */
 243		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
 244		if (val & SDHCI_INT_CARD_INT)
 245			gap_ctrl |= 0x8;
 246		else
 247			gap_ctrl &= ~0x8;
 248		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
 249	}
 250}
 251
 252static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
 253{
 254	bool status;
 255	u32 reg;
 256
 257	reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
 258	status = !!(reg & SDHCI_CLOCK_CARD_EN);
 259
 260	if (status == enable)
 261		return status;
 262
 263	if (enable)
 264		reg |= SDHCI_CLOCK_CARD_EN;
 265	else
 266		reg &= ~SDHCI_CLOCK_CARD_EN;
 267
 268	sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
 269
 270	return status;
 271}
 272
 273static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
 274{
 275	bool is_tuning_cmd = 0;
 276	bool clk_enabled;
 
 277
 278	if (reg == SDHCI_COMMAND)
 279		is_tuning_cmd = mmc_op_tuning(SDHCI_GET_CMD(val));
 
 
 
 280
 281	if (is_tuning_cmd)
 282		clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
 283
 284	writew(val, host->ioaddr + reg);
 285
 286	if (is_tuning_cmd) {
 287		udelay(1);
 288		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 289		tegra_sdhci_configure_card_clk(host, clk_enabled);
 290	}
 291}
 292
 293static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
 294{
 295	/*
 296	 * Write-enable shall be assumed if GPIO is missing in a board's
 297	 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
 298	 * Tegra.
 299	 */
 300	return mmc_gpio_get_ro(host->mmc);
 301}
 302
 303static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
 304{
 305	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 306	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 307	int has_1v8, has_3v3;
 308
 309	/*
 310	 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
 311	 * voltage configuration in order to perform voltage switching. This
 312	 * means that valid pinctrl info is required on SDHCI instances capable
 313	 * of performing voltage switching. Whether or not an SDHCI instance is
 314	 * capable of voltage switching is determined based on the regulator.
 315	 */
 316
 317	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
 318		return true;
 319
 320	if (IS_ERR(host->mmc->supply.vqmmc))
 321		return false;
 322
 323	has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
 324						 1700000, 1950000);
 325
 326	has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
 327						 2700000, 3600000);
 328
 329	if (has_1v8 == 1 && has_3v3 == 1)
 330		return tegra_host->pad_control_available;
 331
 332	/* Fixed voltage, no pad control required. */
 333	return true;
 334}
 335
 336static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
 337{
 338	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 339	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 340	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 341	bool card_clk_enabled = false;
 342	u32 reg;
 343
 344	/*
 345	 * Touching the tap values is a bit tricky on some SoC generations.
 346	 * The quirk enables a workaround for a glitch that sometimes occurs if
 347	 * the tap values are changed.
 348	 */
 349
 350	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
 351		card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
 352
 353	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 354	reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
 355	reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
 356	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 357
 358	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
 359	    card_clk_enabled) {
 360		udelay(1);
 361		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 362		tegra_sdhci_configure_card_clk(host, card_clk_enabled);
 363	}
 364}
 365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 366static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
 367{
 368	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 369	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 370	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 371	u32 misc_ctrl, clk_ctrl, pad_ctrl;
 372
 373	sdhci_and_cqhci_reset(host, mask);
 374
 375	if (!(mask & SDHCI_RESET_ALL))
 376		return;
 377
 378	tegra_sdhci_set_tap(host, tegra_host->default_tap);
 379
 380	misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
 381	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 382
 383	misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
 384		       SDHCI_MISC_CTRL_ENABLE_SDR50 |
 385		       SDHCI_MISC_CTRL_ENABLE_DDR50 |
 386		       SDHCI_MISC_CTRL_ENABLE_SDR104);
 387
 388	clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
 389		      SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
 390
 391	if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
 392		/* Erratum: Enable SDHCI spec v3.00 support */
 393		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
 394			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
 395		/* Advertise UHS modes as supported by host */
 396		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
 397			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
 398		if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
 399			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
 400		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
 401			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
 402		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
 403			clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
 404	}
 405
 406	clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
 407
 408	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
 409	sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 410
 411	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
 412		pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 413		pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
 414		pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
 415		sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 416
 417		tegra_host->pad_calib_required = true;
 418	}
 419
 420	tegra_host->ddr_signaling = false;
 421}
 422
 423static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
 424{
 425	u32 val;
 426
 427	/*
 428	 * Enable or disable the additional I/O pad used by the drive strength
 429	 * calibration process.
 430	 */
 431	val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 432
 433	if (enable)
 434		val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
 435	else
 436		val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
 437
 438	sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 439
 440	if (enable)
 441		usleep_range(1, 2);
 442}
 443
 444static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
 445					       u16 pdpu)
 446{
 447	u32 reg;
 448
 449	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 450	reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
 451	reg |= pdpu;
 452	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 453}
 454
 455static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
 456				   bool state_drvupdn)
 457{
 458	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 459	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 460	struct sdhci_tegra_autocal_offsets *offsets =
 461						&tegra_host->autocal_offsets;
 462	struct pinctrl_state *pinctrl_drvupdn = NULL;
 463	int ret = 0;
 464	u8 drvup = 0, drvdn = 0;
 465	u32 reg;
 466
 467	if (!state_drvupdn) {
 468		/* PADS Drive Strength */
 469		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
 470			if (tegra_host->pinctrl_state_1v8_drv) {
 471				pinctrl_drvupdn =
 472					tegra_host->pinctrl_state_1v8_drv;
 473			} else {
 474				drvup = offsets->pull_up_1v8_timeout;
 475				drvdn = offsets->pull_down_1v8_timeout;
 476			}
 477		} else {
 478			if (tegra_host->pinctrl_state_3v3_drv) {
 479				pinctrl_drvupdn =
 480					tegra_host->pinctrl_state_3v3_drv;
 481			} else {
 482				drvup = offsets->pull_up_3v3_timeout;
 483				drvdn = offsets->pull_down_3v3_timeout;
 484			}
 485		}
 486
 487		if (pinctrl_drvupdn != NULL) {
 488			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 489							pinctrl_drvupdn);
 490			if (ret < 0)
 491				dev_err(mmc_dev(host->mmc),
 492					"failed pads drvupdn, ret: %d\n", ret);
 493		} else if ((drvup) || (drvdn)) {
 494			reg = sdhci_readl(host,
 495					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 496			reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
 497			reg |= (drvup << 20) | (drvdn << 12);
 498			sdhci_writel(host, reg,
 499					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 500		}
 501
 502	} else {
 503		/* Dual Voltage PADS Voltage selection */
 504		if (!tegra_host->pad_control_available)
 505			return 0;
 506
 507		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
 508			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 509						tegra_host->pinctrl_state_1v8);
 510			if (ret < 0)
 511				dev_err(mmc_dev(host->mmc),
 512					"setting 1.8V failed, ret: %d\n", ret);
 513		} else {
 514			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 515						tegra_host->pinctrl_state_3v3);
 516			if (ret < 0)
 517				dev_err(mmc_dev(host->mmc),
 518					"setting 3.3V failed, ret: %d\n", ret);
 519		}
 520	}
 521
 522	return ret;
 523}
 524
 525static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
 526{
 527	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 528	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 529	struct sdhci_tegra_autocal_offsets offsets =
 530			tegra_host->autocal_offsets;
 531	struct mmc_ios *ios = &host->mmc->ios;
 532	bool card_clk_enabled;
 533	u16 pdpu;
 534	u32 reg;
 535	int ret;
 536
 537	switch (ios->timing) {
 538	case MMC_TIMING_UHS_SDR104:
 539		pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
 540		break;
 541	case MMC_TIMING_MMC_HS400:
 542		pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
 543		break;
 544	default:
 545		if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
 546			pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
 547		else
 548			pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
 549	}
 550
 551	/* Set initial offset before auto-calibration */
 552	tegra_sdhci_set_pad_autocal_offset(host, pdpu);
 553
 554	card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
 555
 556	tegra_sdhci_configure_cal_pad(host, true);
 557
 558	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 559	reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
 560	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 561
 562	usleep_range(1, 2);
 563	/* 10 ms timeout */
 564	ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
 565				 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
 566				 1000, 10000);
 567
 568	tegra_sdhci_configure_cal_pad(host, false);
 569
 570	tegra_sdhci_configure_card_clk(host, card_clk_enabled);
 571
 572	if (ret) {
 573		dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
 574
 575		/* Disable automatic cal and use fixed Drive Strengths */
 576		reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 577		reg &= ~SDHCI_AUTO_CAL_ENABLE;
 578		sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 579
 580		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
 581		if (ret < 0)
 582			dev_err(mmc_dev(host->mmc),
 583				"Setting drive strengths failed: %d\n", ret);
 584	}
 585}
 586
 587static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
 588{
 589	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 590	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 591	struct sdhci_tegra_autocal_offsets *autocal =
 592			&tegra_host->autocal_offsets;
 593	int err;
 594
 595	err = device_property_read_u32(mmc_dev(host->mmc),
 596			"nvidia,pad-autocal-pull-up-offset-3v3",
 597			&autocal->pull_up_3v3);
 598	if (err)
 599		autocal->pull_up_3v3 = 0;
 600
 601	err = device_property_read_u32(mmc_dev(host->mmc),
 602			"nvidia,pad-autocal-pull-down-offset-3v3",
 603			&autocal->pull_down_3v3);
 604	if (err)
 605		autocal->pull_down_3v3 = 0;
 606
 607	err = device_property_read_u32(mmc_dev(host->mmc),
 608			"nvidia,pad-autocal-pull-up-offset-1v8",
 609			&autocal->pull_up_1v8);
 610	if (err)
 611		autocal->pull_up_1v8 = 0;
 612
 613	err = device_property_read_u32(mmc_dev(host->mmc),
 614			"nvidia,pad-autocal-pull-down-offset-1v8",
 615			&autocal->pull_down_1v8);
 616	if (err)
 617		autocal->pull_down_1v8 = 0;
 618
 619	err = device_property_read_u32(mmc_dev(host->mmc),
 620			"nvidia,pad-autocal-pull-up-offset-sdr104",
 621			&autocal->pull_up_sdr104);
 622	if (err)
 623		autocal->pull_up_sdr104 = autocal->pull_up_1v8;
 624
 625	err = device_property_read_u32(mmc_dev(host->mmc),
 626			"nvidia,pad-autocal-pull-down-offset-sdr104",
 627			&autocal->pull_down_sdr104);
 628	if (err)
 629		autocal->pull_down_sdr104 = autocal->pull_down_1v8;
 630
 631	err = device_property_read_u32(mmc_dev(host->mmc),
 632			"nvidia,pad-autocal-pull-up-offset-hs400",
 633			&autocal->pull_up_hs400);
 634	if (err)
 635		autocal->pull_up_hs400 = autocal->pull_up_1v8;
 636
 637	err = device_property_read_u32(mmc_dev(host->mmc),
 638			"nvidia,pad-autocal-pull-down-offset-hs400",
 639			&autocal->pull_down_hs400);
 640	if (err)
 641		autocal->pull_down_hs400 = autocal->pull_down_1v8;
 642
 643	/*
 644	 * Different fail-safe drive strength values based on the signaling
 645	 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
 646	 * So, avoid reading below device tree properties for SoCs that don't
 647	 * have NVQUIRK_NEEDS_PAD_CONTROL.
 648	 */
 649	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
 650		return;
 651
 652	err = device_property_read_u32(mmc_dev(host->mmc),
 653			"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
 654			&autocal->pull_up_3v3_timeout);
 655	if (err) {
 656		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
 657			(tegra_host->pinctrl_state_3v3_drv == NULL))
 658			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
 659				mmc_hostname(host->mmc));
 660		autocal->pull_up_3v3_timeout = 0;
 661	}
 662
 663	err = device_property_read_u32(mmc_dev(host->mmc),
 664			"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
 665			&autocal->pull_down_3v3_timeout);
 666	if (err) {
 667		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
 668			(tegra_host->pinctrl_state_3v3_drv == NULL))
 669			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
 670				mmc_hostname(host->mmc));
 671		autocal->pull_down_3v3_timeout = 0;
 672	}
 673
 674	err = device_property_read_u32(mmc_dev(host->mmc),
 675			"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
 676			&autocal->pull_up_1v8_timeout);
 677	if (err) {
 678		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
 679			(tegra_host->pinctrl_state_1v8_drv == NULL))
 680			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
 681				mmc_hostname(host->mmc));
 682		autocal->pull_up_1v8_timeout = 0;
 683	}
 684
 685	err = device_property_read_u32(mmc_dev(host->mmc),
 686			"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
 687			&autocal->pull_down_1v8_timeout);
 688	if (err) {
 689		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
 690			(tegra_host->pinctrl_state_1v8_drv == NULL))
 691			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
 692				mmc_hostname(host->mmc));
 693		autocal->pull_down_1v8_timeout = 0;
 694	}
 695}
 696
 697static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 698{
 699	struct sdhci_host *host = mmc_priv(mmc);
 700	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 701	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 702	ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
 703
 704	/* 100 ms calibration interval is specified in the TRM */
 705	if (ktime_to_ms(since_calib) > 100) {
 706		tegra_sdhci_pad_autocalib(host);
 707		tegra_host->last_calib = ktime_get();
 708	}
 709
 710	sdhci_request(mmc, mrq);
 711}
 712
 713static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
 714{
 715	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 716	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 717	int err;
 718
 719	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap",
 720				       &tegra_host->default_tap);
 721	if (err)
 722		tegra_host->default_tap = 0;
 723
 724	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim",
 725				       &tegra_host->default_trim);
 726	if (err)
 727		tegra_host->default_trim = 0;
 728
 729	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim",
 730				       &tegra_host->dqs_trim);
 731	if (err)
 732		tegra_host->dqs_trim = 0x11;
 733}
 734
 735static void tegra_sdhci_parse_dt(struct sdhci_host *host)
 736{
 737	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 738	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 739
 740	if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe"))
 741		tegra_host->enable_hwcq = true;
 742	else
 743		tegra_host->enable_hwcq = false;
 744
 745	tegra_sdhci_parse_pad_autocal_dt(host);
 746	tegra_sdhci_parse_tap_and_trim(host);
 747}
 748
 749static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
 750{
 751	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 752	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 753	struct device *dev = mmc_dev(host->mmc);
 754	unsigned long host_clk;
 755	int err;
 756
 757	if (!clock)
 758		return sdhci_set_clock(host, clock);
 759
 760	/*
 761	 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
 762	 * divider to be configured to divided the host clock by two. The SDHCI
 763	 * clock divider is calculated as part of sdhci_set_clock() by
 764	 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
 765	 * the requested clock rate.
 766	 *
 767	 * By setting the host->max_clk to clock * 2 the divider calculation
 768	 * will always result in the correct value for DDR50/52 modes,
 769	 * regardless of clock rate rounding, which may happen if the value
 770	 * from clk_get_rate() is used.
 771	 */
 772	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
 773
 774	err = dev_pm_opp_set_rate(dev, host_clk);
 775	if (err)
 776		dev_err(dev, "failed to set clk rate to %luHz: %d\n",
 777			host_clk, err);
 778
 779	tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
 780	if (tegra_host->ddr_signaling)
 781		host->max_clk = host_clk;
 782	else
 783		host->max_clk = clk_get_rate(pltfm_host->clk);
 784
 785	sdhci_set_clock(host, clock);
 786
 787	if (tegra_host->pad_calib_required) {
 788		tegra_sdhci_pad_autocalib(host);
 789		tegra_host->pad_calib_required = false;
 790	}
 791}
 792
 793static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
 794					      struct mmc_ios *ios)
 795{
 796	struct sdhci_host *host = mmc_priv(mmc);
 797	u32 val;
 798
 799	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
 800
 801	if (ios->enhanced_strobe) {
 802		val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
 803		/*
 804		 * When CMD13 is sent from mmc_select_hs400es() after
 805		 * switching to HS400ES mode, the bus is operating at
 806		 * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
 807		 * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
 808		 * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
 809		 * controller CAR clock and the interface clock are rate matched.
 810		 */
 811		tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
 812	} else {
 813		val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
 814	}
 815
 816	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
 817}
 818
 819static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
 820{
 821	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 822
 823	return clk_round_rate(pltfm_host->clk, UINT_MAX);
 824}
 825
 826static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
 827{
 828	u32 val;
 829
 830	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
 831	val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
 832	val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
 833	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
 834}
 835
 836static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
 837{
 838	u32 reg;
 839	int err;
 840
 841	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
 842	reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
 843	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
 844
 845	/* 1 ms sleep, 5 ms timeout */
 846	err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
 847				 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
 848				 1000, 5000);
 849	if (err)
 850		dev_err(mmc_dev(host->mmc),
 851			"HS400 delay line calibration timed out\n");
 852}
 853
 854static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
 855				       u8 thd_low, u8 fixed_tap)
 856{
 857	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 858	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 859	u32 val, tun_status;
 860	u8 word, bit, edge1, tap, window;
 861	bool tap_result;
 862	bool start_fail = false;
 863	bool start_pass = false;
 864	bool end_pass = false;
 865	bool first_fail = false;
 866	bool first_pass = false;
 867	u8 start_pass_tap = 0;
 868	u8 end_pass_tap = 0;
 869	u8 first_fail_tap = 0;
 870	u8 first_pass_tap = 0;
 871	u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
 872
 873	/*
 874	 * Read auto-tuned results and extract good valid passing window by
 875	 * filtering out un-wanted bubble/partial/merged windows.
 876	 */
 877	for (word = 0; word < total_tuning_words; word++) {
 878		val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
 879		val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
 880		val |= word;
 881		sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
 882		tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
 883		bit = 0;
 884		while (bit < TUNING_WORD_BIT_SIZE) {
 885			tap = word * TUNING_WORD_BIT_SIZE + bit;
 886			tap_result = tun_status & (1 << bit);
 887			if (!tap_result && !start_fail) {
 888				start_fail = true;
 889				if (!first_fail) {
 890					first_fail_tap = tap;
 891					first_fail = true;
 892				}
 893
 894			} else if (tap_result && start_fail && !start_pass) {
 895				start_pass_tap = tap;
 896				start_pass = true;
 897				if (!first_pass) {
 898					first_pass_tap = tap;
 899					first_pass = true;
 900				}
 901
 902			} else if (!tap_result && start_fail && start_pass &&
 903				   !end_pass) {
 904				end_pass_tap = tap - 1;
 905				end_pass = true;
 906			} else if (tap_result && start_pass && start_fail &&
 907				   end_pass) {
 908				window = end_pass_tap - start_pass_tap;
 909				/* discard merged window and bubble window */
 910				if (window >= thd_up || window < thd_low) {
 911					start_pass_tap = tap;
 912					end_pass = false;
 913				} else {
 914					/* set tap at middle of valid window */
 915					tap = start_pass_tap + window / 2;
 916					tegra_host->tuned_tap_delay = tap;
 917					return;
 918				}
 919			}
 920
 921			bit++;
 922		}
 923	}
 924
 925	if (!first_fail) {
 926		WARN(1, "no edge detected, continue with hw tuned delay.\n");
 927	} else if (first_pass) {
 928		/* set tap location at fixed tap relative to the first edge */
 929		edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
 930		if (edge1 - 1 > fixed_tap)
 931			tegra_host->tuned_tap_delay = edge1 - fixed_tap;
 932		else
 933			tegra_host->tuned_tap_delay = edge1 + fixed_tap;
 934	}
 935}
 936
 937static void tegra_sdhci_post_tuning(struct sdhci_host *host)
 938{
 939	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 940	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 941	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 942	u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
 943	u8 fixed_tap, start_tap, end_tap, window_width;
 944	u8 thdupper, thdlower;
 945	u8 num_iter;
 946	u32 clk_rate_mhz, period_ps, bestcase, worstcase;
 947
 948	/* retain HW tuned tap to use incase if no correction is needed */
 949	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 950	tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
 951				      SDHCI_CLOCK_CTRL_TAP_SHIFT;
 952	if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
 953		min_tap_dly = soc_data->min_tap_delay;
 954		max_tap_dly = soc_data->max_tap_delay;
 955		clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
 956		period_ps = USEC_PER_SEC / clk_rate_mhz;
 957		bestcase = period_ps / min_tap_dly;
 958		worstcase = period_ps / max_tap_dly;
 959		/*
 960		 * Upper and Lower bound thresholds used to detect merged and
 961		 * bubble windows
 962		 */
 963		thdupper = (2 * worstcase + bestcase) / 2;
 964		thdlower = worstcase / 4;
 965		/*
 966		 * fixed tap is used when HW tuning result contains single edge
 967		 * and tap is set at fixed tap delay relative to the first edge
 968		 */
 969		avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
 970		fixed_tap = avg_tap_dly / 2;
 971
 972		val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
 973		start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
 974		end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
 975			  SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
 976		window_width = end_tap - start_tap;
 977		num_iter = host->tuning_loop_count;
 978		/*
 979		 * partial window includes edges of the tuning range.
 980		 * merged window includes more taps so window width is higher
 981		 * than upper threshold.
 982		 */
 983		if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
 984		    (end_tap == num_iter - 2) || window_width >= thdupper) {
 985			pr_debug("%s: Apply tuning correction\n",
 986				 mmc_hostname(host->mmc));
 987			tegra_sdhci_tap_correction(host, thdupper, thdlower,
 988						   fixed_tap);
 989		}
 990	}
 991
 992	tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
 993}
 994
 995static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
 996{
 997	struct sdhci_host *host = mmc_priv(mmc);
 998	int err;
 999
1000	err = sdhci_execute_tuning(mmc, opcode);
1001	if (!err && !host->tuning_err)
1002		tegra_sdhci_post_tuning(host);
1003
1004	return err;
1005}
1006
1007static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
1008					  unsigned timing)
1009{
1010	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1011	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1012	bool set_default_tap = false;
1013	bool set_dqs_trim = false;
1014	bool do_hs400_dll_cal = false;
1015	u8 iter = TRIES_256;
1016	u32 val;
1017
1018	tegra_host->ddr_signaling = false;
1019	switch (timing) {
1020	case MMC_TIMING_UHS_SDR50:
1021		break;
1022	case MMC_TIMING_UHS_SDR104:
1023	case MMC_TIMING_MMC_HS200:
1024		/* Don't set default tap on tunable modes. */
1025		iter = TRIES_128;
1026		break;
1027	case MMC_TIMING_MMC_HS400:
1028		set_dqs_trim = true;
1029		do_hs400_dll_cal = true;
1030		iter = TRIES_128;
1031		break;
1032	case MMC_TIMING_MMC_DDR52:
1033	case MMC_TIMING_UHS_DDR50:
1034		tegra_host->ddr_signaling = true;
1035		set_default_tap = true;
1036		break;
1037	default:
1038		set_default_tap = true;
1039		break;
1040	}
1041
1042	val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1043	val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
1044		 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
1045		 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
1046	val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
1047		0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
1048		1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
1049	sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
1050	sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
1051
1052	host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
1053
1054	sdhci_set_uhs_signaling(host, timing);
1055
1056	tegra_sdhci_pad_autocalib(host);
1057
1058	if (tegra_host->tuned_tap_delay && !set_default_tap)
1059		tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
1060	else
1061		tegra_sdhci_set_tap(host, tegra_host->default_tap);
1062
1063	if (set_dqs_trim)
1064		tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
1065
1066	if (do_hs400_dll_cal)
1067		tegra_sdhci_hs400_dll_cal(host);
1068}
1069
1070static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1071{
1072	unsigned int min, max;
1073
1074	/*
1075	 * Start search for minimum tap value at 10, as smaller values are
1076	 * may wrongly be reported as working but fail at higher speeds,
1077	 * according to the TRM.
1078	 */
1079	min = 10;
1080	while (min < 255) {
1081		tegra_sdhci_set_tap(host, min);
1082		if (!mmc_send_tuning(host->mmc, opcode, NULL))
1083			break;
1084		min++;
1085	}
1086
1087	/* Find the maximum tap value that still passes. */
1088	max = min + 1;
1089	while (max < 255) {
1090		tegra_sdhci_set_tap(host, max);
1091		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1092			max--;
1093			break;
1094		}
1095		max++;
1096	}
1097
1098	/* The TRM states the ideal tap value is at 75% in the passing range. */
1099	tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1100
1101	return mmc_send_tuning(host->mmc, opcode, NULL);
1102}
1103
1104static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1105						   struct mmc_ios *ios)
1106{
1107	struct sdhci_host *host = mmc_priv(mmc);
1108	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1109	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1110	int ret = 0;
1111
1112	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1113		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1114		if (ret < 0)
1115			return ret;
1116		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1117	} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1118		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1119		if (ret < 0)
1120			return ret;
1121		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1122	}
1123
1124	if (tegra_host->pad_calib_required)
1125		tegra_sdhci_pad_autocalib(host);
1126
1127	return ret;
1128}
1129
1130static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1131					 struct sdhci_tegra *tegra_host)
1132{
1133	tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1134	if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1135		dev_dbg(dev, "No pinctrl info, err: %ld\n",
1136			PTR_ERR(tegra_host->pinctrl_sdmmc));
1137		return -1;
1138	}
1139
1140	tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1141				tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1142	if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1143		if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1144			tegra_host->pinctrl_state_1v8_drv = NULL;
1145	}
1146
1147	tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1148				tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1149	if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1150		if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1151			tegra_host->pinctrl_state_3v3_drv = NULL;
1152	}
1153
1154	tegra_host->pinctrl_state_3v3 =
1155		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1156	if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1157		dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1158			 PTR_ERR(tegra_host->pinctrl_state_3v3));
1159		return -1;
1160	}
1161
1162	tegra_host->pinctrl_state_1v8 =
1163		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1164	if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1165		dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1166			 PTR_ERR(tegra_host->pinctrl_state_1v8));
1167		return -1;
1168	}
1169
1170	tegra_host->pad_control_available = true;
1171
1172	return 0;
1173}
1174
1175static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1176{
1177	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1178	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1179	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1180
1181	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1182		tegra_host->pad_calib_required = true;
1183}
1184
1185static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1186{
1187	struct mmc_host *mmc = cq_host->mmc;
1188	struct sdhci_host *host = mmc_priv(mmc);
1189	u8 ctrl;
1190	ktime_t timeout;
1191	bool timed_out;
1192
1193	/*
1194	 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1195	 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1196	 * to be re-configured.
1197	 * Tegra CQHCI/SDHCI prevents write access to block size register when
1198	 * CQE is unhalted. So handling CQE resume sequence here to configure
1199	 * SDHCI block registers prior to exiting CQE halt state.
1200	 */
1201	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1202	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
1203		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1204		sdhci_cqe_enable(mmc);
1205		writel(val, cq_host->mmio + reg);
1206		timeout = ktime_add_us(ktime_get(), 50);
1207		while (1) {
1208			timed_out = ktime_compare(ktime_get(), timeout) > 0;
1209			ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1210			if (!(ctrl & CQHCI_HALT) || timed_out)
1211				break;
1212		}
1213		/*
1214		 * CQE usually resumes very quick, but incase if Tegra CQE
1215		 * doesn't resume retry unhalt.
1216		 */
1217		if (timed_out)
1218			writel(val, cq_host->mmio + reg);
1219	} else {
1220		writel(val, cq_host->mmio + reg);
1221	}
1222}
1223
1224static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1225					 struct mmc_request *mrq, u64 *data)
1226{
1227	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1228	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1229	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1230
1231	if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1232	    mrq->cmd->flags & MMC_RSP_R1B)
1233		*data |= CQHCI_CMD_TIMING(1);
1234}
1235
1236static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1237{
1238	struct cqhci_host *cq_host = mmc->cqe_private;
1239	struct sdhci_host *host = mmc_priv(mmc);
1240	u32 val;
1241
1242	/*
1243	 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1244	 * register when CQE is enabled and unhalted.
1245	 * CQHCI driver enables CQE prior to activation, so disable CQE before
1246	 * programming block size in sdhci controller and enable it back.
1247	 */
1248	if (!cq_host->activated) {
1249		val = cqhci_readl(cq_host, CQHCI_CFG);
1250		if (val & CQHCI_ENABLE)
1251			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1252				     CQHCI_CFG);
1253		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1254		sdhci_cqe_enable(mmc);
1255		if (val & CQHCI_ENABLE)
1256			cqhci_writel(cq_host, val, CQHCI_CFG);
1257	}
1258
1259	/*
1260	 * CMD CRC errors are seen sometimes with some eMMC devices when status
1261	 * command is sent during transfer of last data block which is the
1262	 * default case as send status command block counter (CBC) is 1.
1263	 * Recommended fix to set CBC to 0 allowing send status command only
1264	 * when data lines are idle.
1265	 */
1266	val = cqhci_readl(cq_host, CQHCI_SSC1);
1267	val &= ~CQHCI_SSC1_CBC_MASK;
1268	cqhci_writel(cq_host, val, CQHCI_SSC1);
1269}
1270
1271static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1272{
1273	sdhci_dumpregs(mmc_priv(mmc));
1274}
1275
1276static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1277{
1278	int cmd_error = 0;
1279	int data_error = 0;
1280
1281	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1282		return intmask;
1283
1284	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1285
1286	return 0;
1287}
1288
1289static void tegra_sdhci_set_timeout(struct sdhci_host *host,
1290				    struct mmc_command *cmd)
1291{
1292	u32 val;
1293
1294	/*
1295	 * HW busy detection timeout is based on programmed data timeout
1296	 * counter and maximum supported timeout is 11s which may not be
1297	 * enough for long operations like cache flush, sleep awake, erase.
1298	 *
1299	 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
1300	 * host controller to wait for busy state until the card is busy
1301	 * without HW timeout.
1302	 *
1303	 * So, use infinite busy wait mode for operations that may take
1304	 * more than maximum HW busy timeout of 11s otherwise use finite
1305	 * busy wait mode.
1306	 */
1307	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1308	if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC)
1309		val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1310	else
1311		val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1312	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1313
1314	__sdhci_set_timeout(host, cmd);
1315}
1316
1317static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
1318{
1319	struct cqhci_host *cq_host = mmc->cqe_private;
1320	u32 reg;
1321
1322	reg = cqhci_readl(cq_host, CQHCI_CFG);
1323	reg |= CQHCI_ENABLE;
1324	cqhci_writel(cq_host, reg, CQHCI_CFG);
1325}
1326
1327static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
1328{
1329	struct cqhci_host *cq_host = mmc->cqe_private;
1330	struct sdhci_host *host = mmc_priv(mmc);
1331	u32 reg;
1332
1333	reg = cqhci_readl(cq_host, CQHCI_CFG);
1334	reg &= ~CQHCI_ENABLE;
1335	cqhci_writel(cq_host, reg, CQHCI_CFG);
1336	sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1337}
1338
1339static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1340	.write_l    = tegra_cqhci_writel,
1341	.enable	= sdhci_tegra_cqe_enable,
1342	.disable = sdhci_cqe_disable,
1343	.dumpregs = sdhci_tegra_dumpregs,
1344	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1345	.pre_enable = sdhci_tegra_cqe_pre_enable,
1346	.post_disable = sdhci_tegra_cqe_post_disable,
1347};
1348
1349static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1350{
1351	struct sdhci_pltfm_host *platform = sdhci_priv(host);
1352	struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1353	const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1354	struct device *dev = mmc_dev(host->mmc);
1355
1356	if (soc->dma_mask)
1357		return dma_set_mask_and_coherent(dev, soc->dma_mask);
1358
1359	return 0;
1360}
1361
1362static const struct sdhci_ops tegra_sdhci_ops = {
1363	.get_ro     = tegra_sdhci_get_ro,
1364	.read_w     = tegra_sdhci_readw,
1365	.write_l    = tegra_sdhci_writel,
1366	.set_clock  = tegra_sdhci_set_clock,
1367	.set_dma_mask = tegra_sdhci_set_dma_mask,
1368	.set_bus_width = sdhci_set_bus_width,
1369	.reset      = tegra_sdhci_reset,
1370	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1371	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1372	.voltage_switch = tegra_sdhci_voltage_switch,
1373	.get_max_clock = tegra_sdhci_get_max_clock,
1374};
1375
1376static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1377	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1378		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1379		  SDHCI_QUIRK_NO_HISPD_BIT |
1380		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1381		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1382	.ops  = &tegra_sdhci_ops,
1383};
1384
1385static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1386	.pdata = &sdhci_tegra20_pdata,
1387	.dma_mask = DMA_BIT_MASK(32),
1388	.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
1389		    NVQUIRK_HAS_ANDROID_GPT_SECTOR |
1390		    NVQUIRK_ENABLE_BLOCK_GAP_DET,
1391};
1392
1393static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1394	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1395		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1396		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1397		  SDHCI_QUIRK_NO_HISPD_BIT |
1398		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1399		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1400	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1401		   SDHCI_QUIRK2_BROKEN_HS200 |
1402		   /*
1403		    * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1404		    * though no command operation was in progress."
1405		    *
1406		    * The exact reason is unknown, as the same hardware seems
1407		    * to support Auto CMD23 on a downstream 3.1 kernel.
1408		    */
1409		   SDHCI_QUIRK2_ACMD23_BROKEN,
1410	.ops  = &tegra_sdhci_ops,
1411};
1412
1413static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1414	.pdata = &sdhci_tegra30_pdata,
1415	.dma_mask = DMA_BIT_MASK(32),
1416	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1417		    NVQUIRK_ENABLE_SDR50 |
1418		    NVQUIRK_ENABLE_SDR104 |
1419		    NVQUIRK_HAS_ANDROID_GPT_SECTOR |
1420		    NVQUIRK_HAS_PADCALIB,
1421};
1422
1423static const struct sdhci_ops tegra114_sdhci_ops = {
1424	.get_ro     = tegra_sdhci_get_ro,
1425	.read_w     = tegra_sdhci_readw,
1426	.write_w    = tegra_sdhci_writew,
1427	.write_l    = tegra_sdhci_writel,
1428	.set_clock  = tegra_sdhci_set_clock,
1429	.set_dma_mask = tegra_sdhci_set_dma_mask,
1430	.set_bus_width = sdhci_set_bus_width,
1431	.reset      = tegra_sdhci_reset,
1432	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1433	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1434	.voltage_switch = tegra_sdhci_voltage_switch,
1435	.get_max_clock = tegra_sdhci_get_max_clock,
1436};
1437
1438static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1439	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1440		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1441		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1442		  SDHCI_QUIRK_NO_HISPD_BIT |
1443		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1444		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1445	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1446	.ops  = &tegra114_sdhci_ops,
1447};
1448
1449static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1450	.pdata = &sdhci_tegra114_pdata,
1451	.dma_mask = DMA_BIT_MASK(32),
1452	.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
1453};
1454
1455static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1456	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1457		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1458		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1459		  SDHCI_QUIRK_NO_HISPD_BIT |
1460		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1461		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1462	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1463	.ops  = &tegra114_sdhci_ops,
1464};
1465
1466static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1467	.pdata = &sdhci_tegra124_pdata,
1468	.dma_mask = DMA_BIT_MASK(34),
1469	.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
1470};
1471
1472static const struct sdhci_ops tegra210_sdhci_ops = {
1473	.get_ro     = tegra_sdhci_get_ro,
1474	.read_w     = tegra_sdhci_readw,
1475	.write_w    = tegra210_sdhci_writew,
1476	.write_l    = tegra_sdhci_writel,
1477	.set_clock  = tegra_sdhci_set_clock,
1478	.set_dma_mask = tegra_sdhci_set_dma_mask,
1479	.set_bus_width = sdhci_set_bus_width,
1480	.reset      = tegra_sdhci_reset,
1481	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1482	.voltage_switch = tegra_sdhci_voltage_switch,
1483	.get_max_clock = tegra_sdhci_get_max_clock,
1484	.set_timeout = tegra_sdhci_set_timeout,
1485};
1486
1487static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1488	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1489		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1490		  SDHCI_QUIRK_NO_HISPD_BIT |
1491		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1492		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1493	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1494	.ops  = &tegra210_sdhci_ops,
1495};
1496
1497static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1498	.pdata = &sdhci_tegra210_pdata,
1499	.dma_mask = DMA_BIT_MASK(34),
1500	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1501		    NVQUIRK_HAS_PADCALIB |
1502		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1503		    NVQUIRK_ENABLE_SDR50 |
1504		    NVQUIRK_ENABLE_SDR104 |
1505		    NVQUIRK_HAS_TMCLK,
1506	.min_tap_delay = 106,
1507	.max_tap_delay = 185,
1508};
1509
1510static const struct sdhci_ops tegra186_sdhci_ops = {
1511	.get_ro     = tegra_sdhci_get_ro,
1512	.read_w     = tegra_sdhci_readw,
1513	.write_l    = tegra_sdhci_writel,
1514	.set_clock  = tegra_sdhci_set_clock,
1515	.set_dma_mask = tegra_sdhci_set_dma_mask,
1516	.set_bus_width = sdhci_set_bus_width,
1517	.reset      = tegra_sdhci_reset,
1518	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1519	.voltage_switch = tegra_sdhci_voltage_switch,
1520	.get_max_clock = tegra_sdhci_get_max_clock,
1521	.irq = sdhci_tegra_cqhci_irq,
1522	.set_timeout = tegra_sdhci_set_timeout,
1523};
1524
1525static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1526	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1527		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1528		  SDHCI_QUIRK_NO_HISPD_BIT |
1529		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1530		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1531	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1532		   SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER,
1533	.ops  = &tegra186_sdhci_ops,
1534};
1535
1536static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1537	.pdata = &sdhci_tegra186_pdata,
1538	.dma_mask = DMA_BIT_MASK(40),
1539	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1540		    NVQUIRK_HAS_PADCALIB |
1541		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1542		    NVQUIRK_ENABLE_SDR50 |
1543		    NVQUIRK_ENABLE_SDR104 |
1544		    NVQUIRK_HAS_TMCLK |
1545		    NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1546	.min_tap_delay = 84,
1547	.max_tap_delay = 136,
1548};
1549
1550static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1551	.pdata = &sdhci_tegra186_pdata,
1552	.dma_mask = DMA_BIT_MASK(39),
1553	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1554		    NVQUIRK_HAS_PADCALIB |
1555		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1556		    NVQUIRK_ENABLE_SDR50 |
1557		    NVQUIRK_ENABLE_SDR104 |
1558		    NVQUIRK_HAS_TMCLK,
1559	.min_tap_delay = 96,
1560	.max_tap_delay = 139,
1561};
1562
1563static const struct sdhci_tegra_soc_data soc_data_tegra234 = {
1564	.pdata = &sdhci_tegra186_pdata,
1565	.dma_mask = DMA_BIT_MASK(39),
1566	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1567		    NVQUIRK_HAS_PADCALIB |
1568		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1569		    NVQUIRK_ENABLE_SDR50 |
1570		    NVQUIRK_ENABLE_SDR104 |
1571		    NVQUIRK_PROGRAM_STREAMID |
1572		    NVQUIRK_HAS_TMCLK,
1573	.min_tap_delay = 95,
1574	.max_tap_delay = 111,
1575};
1576
1577static const struct of_device_id sdhci_tegra_dt_match[] = {
1578	{ .compatible = "nvidia,tegra234-sdhci", .data = &soc_data_tegra234 },
1579	{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1580	{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1581	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1582	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1583	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1584	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1585	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1586	{}
1587};
1588MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1589
1590static int sdhci_tegra_add_host(struct sdhci_host *host)
1591{
1592	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1593	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1594	struct cqhci_host *cq_host;
1595	bool dma64;
1596	int ret;
1597
1598	if (!tegra_host->enable_hwcq)
1599		return sdhci_add_host(host);
1600
1601	sdhci_enable_v4_mode(host);
1602
1603	ret = sdhci_setup_host(host);
1604	if (ret)
1605		return ret;
1606
1607	host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1608
1609	cq_host = devm_kzalloc(mmc_dev(host->mmc),
1610				sizeof(*cq_host), GFP_KERNEL);
1611	if (!cq_host) {
1612		ret = -ENOMEM;
1613		goto cleanup;
1614	}
1615
1616	cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1617	cq_host->ops = &sdhci_tegra_cqhci_ops;
1618
1619	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1620	if (dma64)
1621		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1622
1623	ret = cqhci_init(cq_host, host->mmc, dma64);
1624	if (ret)
1625		goto cleanup;
1626
1627	ret = __sdhci_add_host(host);
1628	if (ret)
1629		goto cleanup;
1630
1631	return 0;
1632
1633cleanup:
1634	sdhci_cleanup_host(host);
1635	return ret;
1636}
1637
1638/* Program MC streamID for DMA transfers */
1639static void sdhci_tegra_program_stream_id(struct sdhci_host *host)
1640{
1641	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1642	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1643
1644	if (tegra_host->soc_data->nvquirks & NVQUIRK_PROGRAM_STREAMID) {
1645		tegra_sdhci_writel(host, FIELD_PREP(GENMASK(15, 8), tegra_host->stream_id) |
1646					 FIELD_PREP(GENMASK(7, 0), tegra_host->stream_id),
1647					 SDHCI_TEGRA_CIF2AXI_CTRL_0);
1648	}
1649}
1650
1651static int sdhci_tegra_probe(struct platform_device *pdev)
1652{
 
1653	const struct sdhci_tegra_soc_data *soc_data;
1654	struct sdhci_host *host;
1655	struct sdhci_pltfm_host *pltfm_host;
1656	struct sdhci_tegra *tegra_host;
1657	struct clk *clk;
1658	int rc;
1659
1660	soc_data = of_device_get_match_data(&pdev->dev);
1661	if (!soc_data)
1662		return -EINVAL;
 
1663
1664	host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1665	if (IS_ERR(host))
1666		return PTR_ERR(host);
1667	pltfm_host = sdhci_priv(host);
1668
1669	tegra_host = sdhci_pltfm_priv(pltfm_host);
1670	tegra_host->ddr_signaling = false;
1671	tegra_host->pad_calib_required = false;
1672	tegra_host->pad_control_available = false;
1673	tegra_host->soc_data = soc_data;
1674
1675	if (soc_data->nvquirks & NVQUIRK_HAS_ANDROID_GPT_SECTOR)
1676		host->mmc->caps2 |= MMC_CAP2_ALT_GPT_TEGRA;
1677
1678	if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1679		rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1680		if (rc == 0)
1681			host->mmc_host_ops.start_signal_voltage_switch =
1682				sdhci_tegra_start_signal_voltage_switch;
1683	}
1684
1685	/* Hook to periodically rerun pad calibration */
1686	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1687		host->mmc_host_ops.request = tegra_sdhci_request;
1688
1689	host->mmc_host_ops.hs400_enhanced_strobe =
1690			tegra_sdhci_hs400_enhanced_strobe;
1691
1692	if (!host->ops->platform_execute_tuning)
1693		host->mmc_host_ops.execute_tuning =
1694				tegra_sdhci_execute_hw_tuning;
1695
1696	rc = mmc_of_parse(host->mmc);
1697	if (rc)
1698		goto err_parse_dt;
1699
1700	if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1701		host->mmc->caps |= MMC_CAP_1_8V_DDR;
1702
1703	/* HW busy detection is supported, but R1B responses are required. */
1704	host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
1705
1706	/* GPIO CD can be set as a wakeup source */
1707	host->mmc->caps |= MMC_CAP_CD_WAKE;
1708
1709	tegra_sdhci_parse_dt(host);
1710
1711	if (tegra_host->soc_data->nvquirks & NVQUIRK_PROGRAM_STREAMID &&
1712	    !tegra_dev_iommu_get_stream_id(&pdev->dev, &tegra_host->stream_id)) {
1713		dev_warn(mmc_dev(host->mmc), "missing IOMMU stream ID\n");
1714		tegra_host->stream_id = 0x7f;
1715	}
1716
1717	tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1718							 GPIOD_OUT_HIGH);
1719	if (IS_ERR(tegra_host->power_gpio)) {
1720		rc = PTR_ERR(tegra_host->power_gpio);
1721		goto err_power_req;
1722	}
1723
1724	/*
1725	 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
1726	 * timeout clock and SW can choose TMCLK or SDCLK for hardware
1727	 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
1728	 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
1729	 *
1730	 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
1731	 * 12Mhz TMCLK which is advertised in host capability register.
1732	 * With TMCLK of 12Mhz provides maximum data timeout period that can
1733	 * be achieved is 11s better than using SDCLK for data timeout.
1734	 *
1735	 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
1736	 * supporting separate TMCLK.
1737	 */
1738
1739	if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
1740		clk = devm_clk_get(&pdev->dev, "tmclk");
1741		if (IS_ERR(clk)) {
1742			rc = PTR_ERR(clk);
1743			if (rc == -EPROBE_DEFER)
1744				goto err_power_req;
1745
1746			dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
1747			clk = NULL;
1748		}
1749
1750		clk_set_rate(clk, 12000000);
1751		rc = clk_prepare_enable(clk);
1752		if (rc) {
1753			dev_err(&pdev->dev,
1754				"failed to enable tmclk: %d\n", rc);
1755			goto err_power_req;
1756		}
1757
1758		tegra_host->tmclk = clk;
1759	}
1760
1761	clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1762	if (IS_ERR(clk)) {
1763		rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
1764				   "failed to get clock\n");
 
 
 
1765		goto err_clk_get;
1766	}
 
1767	pltfm_host->clk = clk;
1768
1769	tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1770							   "sdhci");
1771	if (IS_ERR(tegra_host->rst)) {
1772		rc = PTR_ERR(tegra_host->rst);
1773		dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1774		goto err_rst_get;
1775	}
1776
1777	rc = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
1778	if (rc)
1779		goto err_rst_get;
1780
1781	pm_runtime_enable(&pdev->dev);
1782	rc = pm_runtime_resume_and_get(&pdev->dev);
1783	if (rc)
1784		goto err_pm_get;
1785
1786	rc = reset_control_assert(tegra_host->rst);
1787	if (rc)
1788		goto err_rst_assert;
1789
1790	usleep_range(2000, 4000);
1791
1792	rc = reset_control_deassert(tegra_host->rst);
1793	if (rc)
1794		goto err_rst_assert;
1795
1796	usleep_range(2000, 4000);
1797
1798	rc = sdhci_tegra_add_host(host);
1799	if (rc)
1800		goto err_add_host;
1801
1802	sdhci_tegra_program_stream_id(host);
1803
1804	return 0;
1805
1806err_add_host:
1807	reset_control_assert(tegra_host->rst);
1808err_rst_assert:
1809	pm_runtime_put_sync_suspend(&pdev->dev);
1810err_pm_get:
1811	pm_runtime_disable(&pdev->dev);
1812err_rst_get:
 
1813err_clk_get:
1814	clk_disable_unprepare(tegra_host->tmclk);
1815err_power_req:
1816err_parse_dt:
1817	sdhci_pltfm_free(pdev);
1818	return rc;
1819}
1820
1821static int sdhci_tegra_remove(struct platform_device *pdev)
1822{
1823	struct sdhci_host *host = platform_get_drvdata(pdev);
1824	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1825	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1826
1827	sdhci_remove_host(host, 0);
1828
1829	reset_control_assert(tegra_host->rst);
1830	usleep_range(2000, 4000);
1831
1832	pm_runtime_put_sync_suspend(&pdev->dev);
1833	pm_runtime_force_suspend(&pdev->dev);
1834
1835	clk_disable_unprepare(tegra_host->tmclk);
1836	sdhci_pltfm_free(pdev);
1837
1838	return 0;
1839}
1840
1841static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev)
1842{
1843	struct sdhci_host *host = dev_get_drvdata(dev);
1844	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1845
1846	clk_disable_unprepare(pltfm_host->clk);
1847
1848	return 0;
1849}
1850
1851static int __maybe_unused sdhci_tegra_runtime_resume(struct device *dev)
1852{
1853	struct sdhci_host *host = dev_get_drvdata(dev);
1854	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1855
1856	return clk_prepare_enable(pltfm_host->clk);
1857}
1858
1859#ifdef CONFIG_PM_SLEEP
1860static int sdhci_tegra_suspend(struct device *dev)
1861{
1862	struct sdhci_host *host = dev_get_drvdata(dev);
 
1863	int ret;
1864
1865	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1866		ret = cqhci_suspend(host->mmc);
1867		if (ret)
1868			return ret;
1869	}
1870
1871	ret = sdhci_suspend_host(host);
1872	if (ret) {
1873		cqhci_resume(host->mmc);
1874		return ret;
1875	}
1876
1877	ret = pm_runtime_force_suspend(dev);
1878	if (ret) {
1879		sdhci_resume_host(host);
1880		cqhci_resume(host->mmc);
1881		return ret;
1882	}
1883
1884	return mmc_gpio_set_cd_wake(host->mmc, true);
1885}
1886
1887static int sdhci_tegra_resume(struct device *dev)
1888{
1889	struct sdhci_host *host = dev_get_drvdata(dev);
 
1890	int ret;
1891
1892	ret = mmc_gpio_set_cd_wake(host->mmc, false);
1893	if (ret)
1894		return ret;
1895
1896	ret = pm_runtime_force_resume(dev);
1897	if (ret)
1898		return ret;
1899
1900	sdhci_tegra_program_stream_id(host);
1901
1902	ret = sdhci_resume_host(host);
1903	if (ret)
1904		goto disable_clk;
1905
1906	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1907		ret = cqhci_resume(host->mmc);
1908		if (ret)
1909			goto suspend_host;
1910	}
1911
1912	return 0;
1913
1914suspend_host:
1915	sdhci_suspend_host(host);
1916disable_clk:
1917	pm_runtime_force_suspend(dev);
1918	return ret;
1919}
1920#endif
1921
1922static const struct dev_pm_ops sdhci_tegra_dev_pm_ops = {
1923	SET_RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume,
1924			   NULL)
1925	SET_SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume)
1926};
1927
1928static struct platform_driver sdhci_tegra_driver = {
1929	.driver		= {
1930		.name	= "sdhci-tegra",
1931		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1932		.of_match_table = sdhci_tegra_dt_match,
1933		.pm	= &sdhci_tegra_dev_pm_ops,
1934	},
1935	.probe		= sdhci_tegra_probe,
1936	.remove		= sdhci_tegra_remove,
1937};
1938
1939module_platform_driver(sdhci_tegra_driver);
1940
1941MODULE_DESCRIPTION("SDHCI driver for Tegra");
1942MODULE_AUTHOR("Google, Inc.");
1943MODULE_LICENSE("GPL v2");
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2010 Google, Inc.
   4 */
   5
 
 
   6#include <linux/delay.h>
   7#include <linux/dma-mapping.h>
   8#include <linux/err.h>
   9#include <linux/module.h>
  10#include <linux/init.h>
 
 
  11#include <linux/iopoll.h>
  12#include <linux/platform_device.h>
  13#include <linux/clk.h>
  14#include <linux/io.h>
 
 
 
 
  15#include <linux/of.h>
  16#include <linux/of_device.h>
  17#include <linux/pinctrl/consumer.h>
 
 
 
  18#include <linux/regulator/consumer.h>
  19#include <linux/reset.h>
  20#include <linux/mmc/card.h>
  21#include <linux/mmc/host.h>
  22#include <linux/mmc/mmc.h>
  23#include <linux/mmc/slot-gpio.h>
  24#include <linux/gpio/consumer.h>
  25#include <linux/ktime.h>
  26
 
 
 
  27#include "sdhci-pltfm.h"
  28#include "cqhci.h"
  29
  30/* Tegra SDHOST controller vendor register definitions */
  31#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL			0x100
  32#define SDHCI_CLOCK_CTRL_TAP_MASK			0x00ff0000
  33#define SDHCI_CLOCK_CTRL_TAP_SHIFT			16
  34#define SDHCI_CLOCK_CTRL_TRIM_MASK			0x1f000000
  35#define SDHCI_CLOCK_CTRL_TRIM_SHIFT			24
  36#define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE		BIT(5)
  37#define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE		BIT(3)
  38#define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE	BIT(2)
  39
  40#define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL			0x104
  41#define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE		BIT(31)
  42
  43#define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES		0x10c
  44#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK		0x00003f00
  45#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT	8
  46
  47#define SDHCI_TEGRA_VENDOR_MISC_CTRL			0x120
  48#define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT		BIT(0)
  49#define SDHCI_MISC_CTRL_ENABLE_SDR104			0x8
  50#define SDHCI_MISC_CTRL_ENABLE_SDR50			0x10
  51#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300		0x20
  52#define SDHCI_MISC_CTRL_ENABLE_DDR50			0x200
  53
  54#define SDHCI_TEGRA_VENDOR_DLLCAL_CFG			0x1b0
  55#define SDHCI_TEGRA_DLLCAL_CALIBRATE			BIT(31)
  56
  57#define SDHCI_TEGRA_VENDOR_DLLCAL_STA			0x1bc
  58#define SDHCI_TEGRA_DLLCAL_STA_ACTIVE			BIT(31)
  59
  60#define SDHCI_VNDR_TUN_CTRL0_0				0x1c0
  61#define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP			0x20000
  62#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK		0x03fc0000
  63#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT	18
  64#define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK			0x00001fc0
  65#define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT		6
  66#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK		0x000e000
  67#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT		13
  68#define TRIES_128					2
  69#define TRIES_256					4
  70#define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK		0x7
  71
  72#define SDHCI_TEGRA_VNDR_TUN_CTRL1_0			0x1c4
  73#define SDHCI_TEGRA_VNDR_TUN_STATUS0			0x1C8
  74#define SDHCI_TEGRA_VNDR_TUN_STATUS1			0x1CC
  75#define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK		0xFF
  76#define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT	0x8
  77#define TUNING_WORD_BIT_SIZE				32
  78
  79#define SDHCI_TEGRA_AUTO_CAL_CONFIG			0x1e4
  80#define SDHCI_AUTO_CAL_START				BIT(31)
  81#define SDHCI_AUTO_CAL_ENABLE				BIT(29)
  82#define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK			0x0000ffff
  83
  84#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL			0x1e0
  85#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK	0x0000000f
  86#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL	0x7
  87#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD	BIT(31)
  88#define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK		0x07FFF000
  89
  90#define SDHCI_TEGRA_AUTO_CAL_STATUS			0x1ec
  91#define SDHCI_TEGRA_AUTO_CAL_ACTIVE			BIT(31)
  92
 
 
  93#define NVQUIRK_FORCE_SDHCI_SPEC_200			BIT(0)
  94#define NVQUIRK_ENABLE_BLOCK_GAP_DET			BIT(1)
  95#define NVQUIRK_ENABLE_SDHCI_SPEC_300			BIT(2)
  96#define NVQUIRK_ENABLE_SDR50				BIT(3)
  97#define NVQUIRK_ENABLE_SDR104				BIT(4)
  98#define NVQUIRK_ENABLE_DDR50				BIT(5)
  99/*
 100 * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
 101 * drive strength.
 102 */
 103#define NVQUIRK_HAS_PADCALIB				BIT(6)
 104/*
 105 * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
 106 * 3V3/1V8 pad selection happens through pinctrl state selection depending
 107 * on the signaling mode.
 108 */
 109#define NVQUIRK_NEEDS_PAD_CONTROL			BIT(7)
 110#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP			BIT(8)
 111#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING		BIT(9)
 112
 113/*
 114 * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
 115 * SDMMC hardware data timeout.
 116 */
 117#define NVQUIRK_HAS_TMCLK				BIT(10)
 118
 
 
 
 119/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
 120#define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
 121
 
 
 
 
 122struct sdhci_tegra_soc_data {
 123	const struct sdhci_pltfm_data *pdata;
 124	u64 dma_mask;
 125	u32 nvquirks;
 126	u8 min_tap_delay;
 127	u8 max_tap_delay;
 128};
 129
 130/* Magic pull up and pull down pad calibration offsets */
 131struct sdhci_tegra_autocal_offsets {
 132	u32 pull_up_3v3;
 133	u32 pull_down_3v3;
 134	u32 pull_up_3v3_timeout;
 135	u32 pull_down_3v3_timeout;
 136	u32 pull_up_1v8;
 137	u32 pull_down_1v8;
 138	u32 pull_up_1v8_timeout;
 139	u32 pull_down_1v8_timeout;
 140	u32 pull_up_sdr104;
 141	u32 pull_down_sdr104;
 142	u32 pull_up_hs400;
 143	u32 pull_down_hs400;
 144};
 145
 146struct sdhci_tegra {
 147	const struct sdhci_tegra_soc_data *soc_data;
 148	struct gpio_desc *power_gpio;
 149	struct clk *tmclk;
 150	bool ddr_signaling;
 151	bool pad_calib_required;
 152	bool pad_control_available;
 153
 154	struct reset_control *rst;
 155	struct pinctrl *pinctrl_sdmmc;
 156	struct pinctrl_state *pinctrl_state_3v3;
 157	struct pinctrl_state *pinctrl_state_1v8;
 158	struct pinctrl_state *pinctrl_state_3v3_drv;
 159	struct pinctrl_state *pinctrl_state_1v8_drv;
 160
 161	struct sdhci_tegra_autocal_offsets autocal_offsets;
 162	ktime_t last_calib;
 163
 164	u32 default_tap;
 165	u32 default_trim;
 166	u32 dqs_trim;
 167	bool enable_hwcq;
 168	unsigned long curr_clk_rate;
 169	u8 tuned_tap_delay;
 
 170};
 171
 172static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
 173{
 174	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 175	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 176	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 177
 178	if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
 179			(reg == SDHCI_HOST_VERSION))) {
 180		/* Erratum: Version register is invalid in HW. */
 181		return SDHCI_SPEC_200;
 182	}
 183
 184	return readw(host->ioaddr + reg);
 185}
 186
 187static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
 188{
 189	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 190
 191	switch (reg) {
 192	case SDHCI_TRANSFER_MODE:
 193		/*
 194		 * Postpone this write, we must do it together with a
 195		 * command write that is down below.
 196		 */
 197		pltfm_host->xfer_mode_shadow = val;
 198		return;
 199	case SDHCI_COMMAND:
 200		writel((val << 16) | pltfm_host->xfer_mode_shadow,
 201			host->ioaddr + SDHCI_TRANSFER_MODE);
 202		return;
 203	}
 204
 205	writew(val, host->ioaddr + reg);
 206}
 207
 208static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
 209{
 210	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 211	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 212	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 213
 214	/* Seems like we're getting spurious timeout and crc errors, so
 215	 * disable signalling of them. In case of real errors software
 216	 * timers should take care of eventually detecting them.
 217	 */
 218	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
 219		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
 220
 221	writel(val, host->ioaddr + reg);
 222
 223	if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
 224			(reg == SDHCI_INT_ENABLE))) {
 225		/* Erratum: Must enable block gap interrupt detection */
 226		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
 227		if (val & SDHCI_INT_CARD_INT)
 228			gap_ctrl |= 0x8;
 229		else
 230			gap_ctrl &= ~0x8;
 231		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
 232	}
 233}
 234
 235static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
 236{
 237	bool status;
 238	u32 reg;
 239
 240	reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
 241	status = !!(reg & SDHCI_CLOCK_CARD_EN);
 242
 243	if (status == enable)
 244		return status;
 245
 246	if (enable)
 247		reg |= SDHCI_CLOCK_CARD_EN;
 248	else
 249		reg &= ~SDHCI_CLOCK_CARD_EN;
 250
 251	sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
 252
 253	return status;
 254}
 255
 256static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
 257{
 258	bool is_tuning_cmd = 0;
 259	bool clk_enabled;
 260	u8 cmd;
 261
 262	if (reg == SDHCI_COMMAND) {
 263		cmd = SDHCI_GET_CMD(val);
 264		is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
 265				cmd == MMC_SEND_TUNING_BLOCK_HS200;
 266	}
 267
 268	if (is_tuning_cmd)
 269		clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
 270
 271	writew(val, host->ioaddr + reg);
 272
 273	if (is_tuning_cmd) {
 274		udelay(1);
 275		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 276		tegra_sdhci_configure_card_clk(host, clk_enabled);
 277	}
 278}
 279
 280static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
 281{
 282	/*
 283	 * Write-enable shall be assumed if GPIO is missing in a board's
 284	 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
 285	 * Tegra.
 286	 */
 287	return mmc_gpio_get_ro(host->mmc);
 288}
 289
 290static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
 291{
 292	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 293	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 294	int has_1v8, has_3v3;
 295
 296	/*
 297	 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
 298	 * voltage configuration in order to perform voltage switching. This
 299	 * means that valid pinctrl info is required on SDHCI instances capable
 300	 * of performing voltage switching. Whether or not an SDHCI instance is
 301	 * capable of voltage switching is determined based on the regulator.
 302	 */
 303
 304	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
 305		return true;
 306
 307	if (IS_ERR(host->mmc->supply.vqmmc))
 308		return false;
 309
 310	has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
 311						 1700000, 1950000);
 312
 313	has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
 314						 2700000, 3600000);
 315
 316	if (has_1v8 == 1 && has_3v3 == 1)
 317		return tegra_host->pad_control_available;
 318
 319	/* Fixed voltage, no pad control required. */
 320	return true;
 321}
 322
 323static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
 324{
 325	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 326	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 327	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 328	bool card_clk_enabled = false;
 329	u32 reg;
 330
 331	/*
 332	 * Touching the tap values is a bit tricky on some SoC generations.
 333	 * The quirk enables a workaround for a glitch that sometimes occurs if
 334	 * the tap values are changed.
 335	 */
 336
 337	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
 338		card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
 339
 340	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 341	reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
 342	reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
 343	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 344
 345	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
 346	    card_clk_enabled) {
 347		udelay(1);
 348		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 349		tegra_sdhci_configure_card_clk(host, card_clk_enabled);
 350	}
 351}
 352
 353static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
 354					      struct mmc_ios *ios)
 355{
 356	struct sdhci_host *host = mmc_priv(mmc);
 357	u32 val;
 358
 359	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
 360
 361	if (ios->enhanced_strobe)
 362		val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
 363	else
 364		val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
 365
 366	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
 367
 368}
 369
 370static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
 371{
 372	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 373	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 374	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 375	u32 misc_ctrl, clk_ctrl, pad_ctrl;
 376
 377	sdhci_reset(host, mask);
 378
 379	if (!(mask & SDHCI_RESET_ALL))
 380		return;
 381
 382	tegra_sdhci_set_tap(host, tegra_host->default_tap);
 383
 384	misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
 385	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 386
 387	misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
 388		       SDHCI_MISC_CTRL_ENABLE_SDR50 |
 389		       SDHCI_MISC_CTRL_ENABLE_DDR50 |
 390		       SDHCI_MISC_CTRL_ENABLE_SDR104);
 391
 392	clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
 393		      SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
 394
 395	if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
 396		/* Erratum: Enable SDHCI spec v3.00 support */
 397		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
 398			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
 399		/* Advertise UHS modes as supported by host */
 400		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
 401			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
 402		if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
 403			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
 404		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
 405			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
 406		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
 407			clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
 408	}
 409
 410	clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
 411
 412	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
 413	sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 414
 415	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
 416		pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 417		pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
 418		pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
 419		sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 420
 421		tegra_host->pad_calib_required = true;
 422	}
 423
 424	tegra_host->ddr_signaling = false;
 425}
 426
 427static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
 428{
 429	u32 val;
 430
 431	/*
 432	 * Enable or disable the additional I/O pad used by the drive strength
 433	 * calibration process.
 434	 */
 435	val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 436
 437	if (enable)
 438		val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
 439	else
 440		val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
 441
 442	sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 443
 444	if (enable)
 445		usleep_range(1, 2);
 446}
 447
 448static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
 449					       u16 pdpu)
 450{
 451	u32 reg;
 452
 453	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 454	reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
 455	reg |= pdpu;
 456	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 457}
 458
 459static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
 460				   bool state_drvupdn)
 461{
 462	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 463	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 464	struct sdhci_tegra_autocal_offsets *offsets =
 465						&tegra_host->autocal_offsets;
 466	struct pinctrl_state *pinctrl_drvupdn = NULL;
 467	int ret = 0;
 468	u8 drvup = 0, drvdn = 0;
 469	u32 reg;
 470
 471	if (!state_drvupdn) {
 472		/* PADS Drive Strength */
 473		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
 474			if (tegra_host->pinctrl_state_1v8_drv) {
 475				pinctrl_drvupdn =
 476					tegra_host->pinctrl_state_1v8_drv;
 477			} else {
 478				drvup = offsets->pull_up_1v8_timeout;
 479				drvdn = offsets->pull_down_1v8_timeout;
 480			}
 481		} else {
 482			if (tegra_host->pinctrl_state_3v3_drv) {
 483				pinctrl_drvupdn =
 484					tegra_host->pinctrl_state_3v3_drv;
 485			} else {
 486				drvup = offsets->pull_up_3v3_timeout;
 487				drvdn = offsets->pull_down_3v3_timeout;
 488			}
 489		}
 490
 491		if (pinctrl_drvupdn != NULL) {
 492			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 493							pinctrl_drvupdn);
 494			if (ret < 0)
 495				dev_err(mmc_dev(host->mmc),
 496					"failed pads drvupdn, ret: %d\n", ret);
 497		} else if ((drvup) || (drvdn)) {
 498			reg = sdhci_readl(host,
 499					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 500			reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
 501			reg |= (drvup << 20) | (drvdn << 12);
 502			sdhci_writel(host, reg,
 503					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 504		}
 505
 506	} else {
 507		/* Dual Voltage PADS Voltage selection */
 508		if (!tegra_host->pad_control_available)
 509			return 0;
 510
 511		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
 512			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 513						tegra_host->pinctrl_state_1v8);
 514			if (ret < 0)
 515				dev_err(mmc_dev(host->mmc),
 516					"setting 1.8V failed, ret: %d\n", ret);
 517		} else {
 518			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 519						tegra_host->pinctrl_state_3v3);
 520			if (ret < 0)
 521				dev_err(mmc_dev(host->mmc),
 522					"setting 3.3V failed, ret: %d\n", ret);
 523		}
 524	}
 525
 526	return ret;
 527}
 528
 529static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
 530{
 531	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 532	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 533	struct sdhci_tegra_autocal_offsets offsets =
 534			tegra_host->autocal_offsets;
 535	struct mmc_ios *ios = &host->mmc->ios;
 536	bool card_clk_enabled;
 537	u16 pdpu;
 538	u32 reg;
 539	int ret;
 540
 541	switch (ios->timing) {
 542	case MMC_TIMING_UHS_SDR104:
 543		pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
 544		break;
 545	case MMC_TIMING_MMC_HS400:
 546		pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
 547		break;
 548	default:
 549		if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
 550			pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
 551		else
 552			pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
 553	}
 554
 555	/* Set initial offset before auto-calibration */
 556	tegra_sdhci_set_pad_autocal_offset(host, pdpu);
 557
 558	card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
 559
 560	tegra_sdhci_configure_cal_pad(host, true);
 561
 562	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 563	reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
 564	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 565
 566	usleep_range(1, 2);
 567	/* 10 ms timeout */
 568	ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
 569				 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
 570				 1000, 10000);
 571
 572	tegra_sdhci_configure_cal_pad(host, false);
 573
 574	tegra_sdhci_configure_card_clk(host, card_clk_enabled);
 575
 576	if (ret) {
 577		dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
 578
 579		/* Disable automatic cal and use fixed Drive Strengths */
 580		reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 581		reg &= ~SDHCI_AUTO_CAL_ENABLE;
 582		sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 583
 584		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
 585		if (ret < 0)
 586			dev_err(mmc_dev(host->mmc),
 587				"Setting drive strengths failed: %d\n", ret);
 588	}
 589}
 590
 591static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
 592{
 593	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 594	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 595	struct sdhci_tegra_autocal_offsets *autocal =
 596			&tegra_host->autocal_offsets;
 597	int err;
 598
 599	err = device_property_read_u32(host->mmc->parent,
 600			"nvidia,pad-autocal-pull-up-offset-3v3",
 601			&autocal->pull_up_3v3);
 602	if (err)
 603		autocal->pull_up_3v3 = 0;
 604
 605	err = device_property_read_u32(host->mmc->parent,
 606			"nvidia,pad-autocal-pull-down-offset-3v3",
 607			&autocal->pull_down_3v3);
 608	if (err)
 609		autocal->pull_down_3v3 = 0;
 610
 611	err = device_property_read_u32(host->mmc->parent,
 612			"nvidia,pad-autocal-pull-up-offset-1v8",
 613			&autocal->pull_up_1v8);
 614	if (err)
 615		autocal->pull_up_1v8 = 0;
 616
 617	err = device_property_read_u32(host->mmc->parent,
 618			"nvidia,pad-autocal-pull-down-offset-1v8",
 619			&autocal->pull_down_1v8);
 620	if (err)
 621		autocal->pull_down_1v8 = 0;
 622
 623	err = device_property_read_u32(host->mmc->parent,
 624			"nvidia,pad-autocal-pull-up-offset-sdr104",
 625			&autocal->pull_up_sdr104);
 626	if (err)
 627		autocal->pull_up_sdr104 = autocal->pull_up_1v8;
 628
 629	err = device_property_read_u32(host->mmc->parent,
 630			"nvidia,pad-autocal-pull-down-offset-sdr104",
 631			&autocal->pull_down_sdr104);
 632	if (err)
 633		autocal->pull_down_sdr104 = autocal->pull_down_1v8;
 634
 635	err = device_property_read_u32(host->mmc->parent,
 636			"nvidia,pad-autocal-pull-up-offset-hs400",
 637			&autocal->pull_up_hs400);
 638	if (err)
 639		autocal->pull_up_hs400 = autocal->pull_up_1v8;
 640
 641	err = device_property_read_u32(host->mmc->parent,
 642			"nvidia,pad-autocal-pull-down-offset-hs400",
 643			&autocal->pull_down_hs400);
 644	if (err)
 645		autocal->pull_down_hs400 = autocal->pull_down_1v8;
 646
 647	/*
 648	 * Different fail-safe drive strength values based on the signaling
 649	 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
 650	 * So, avoid reading below device tree properties for SoCs that don't
 651	 * have NVQUIRK_NEEDS_PAD_CONTROL.
 652	 */
 653	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
 654		return;
 655
 656	err = device_property_read_u32(host->mmc->parent,
 657			"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
 658			&autocal->pull_up_3v3_timeout);
 659	if (err) {
 660		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
 661			(tegra_host->pinctrl_state_3v3_drv == NULL))
 662			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
 663				mmc_hostname(host->mmc));
 664		autocal->pull_up_3v3_timeout = 0;
 665	}
 666
 667	err = device_property_read_u32(host->mmc->parent,
 668			"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
 669			&autocal->pull_down_3v3_timeout);
 670	if (err) {
 671		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
 672			(tegra_host->pinctrl_state_3v3_drv == NULL))
 673			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
 674				mmc_hostname(host->mmc));
 675		autocal->pull_down_3v3_timeout = 0;
 676	}
 677
 678	err = device_property_read_u32(host->mmc->parent,
 679			"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
 680			&autocal->pull_up_1v8_timeout);
 681	if (err) {
 682		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
 683			(tegra_host->pinctrl_state_1v8_drv == NULL))
 684			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
 685				mmc_hostname(host->mmc));
 686		autocal->pull_up_1v8_timeout = 0;
 687	}
 688
 689	err = device_property_read_u32(host->mmc->parent,
 690			"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
 691			&autocal->pull_down_1v8_timeout);
 692	if (err) {
 693		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
 694			(tegra_host->pinctrl_state_1v8_drv == NULL))
 695			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
 696				mmc_hostname(host->mmc));
 697		autocal->pull_down_1v8_timeout = 0;
 698	}
 699}
 700
 701static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 702{
 703	struct sdhci_host *host = mmc_priv(mmc);
 704	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 705	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 706	ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
 707
 708	/* 100 ms calibration interval is specified in the TRM */
 709	if (ktime_to_ms(since_calib) > 100) {
 710		tegra_sdhci_pad_autocalib(host);
 711		tegra_host->last_calib = ktime_get();
 712	}
 713
 714	sdhci_request(mmc, mrq);
 715}
 716
 717static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
 718{
 719	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 720	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 721	int err;
 722
 723	err = device_property_read_u32(host->mmc->parent, "nvidia,default-tap",
 724				       &tegra_host->default_tap);
 725	if (err)
 726		tegra_host->default_tap = 0;
 727
 728	err = device_property_read_u32(host->mmc->parent, "nvidia,default-trim",
 729				       &tegra_host->default_trim);
 730	if (err)
 731		tegra_host->default_trim = 0;
 732
 733	err = device_property_read_u32(host->mmc->parent, "nvidia,dqs-trim",
 734				       &tegra_host->dqs_trim);
 735	if (err)
 736		tegra_host->dqs_trim = 0x11;
 737}
 738
 739static void tegra_sdhci_parse_dt(struct sdhci_host *host)
 740{
 741	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 742	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 743
 744	if (device_property_read_bool(host->mmc->parent, "supports-cqe"))
 745		tegra_host->enable_hwcq = true;
 746	else
 747		tegra_host->enable_hwcq = false;
 748
 749	tegra_sdhci_parse_pad_autocal_dt(host);
 750	tegra_sdhci_parse_tap_and_trim(host);
 751}
 752
 753static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
 754{
 755	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 756	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 
 757	unsigned long host_clk;
 
 758
 759	if (!clock)
 760		return sdhci_set_clock(host, clock);
 761
 762	/*
 763	 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
 764	 * divider to be configured to divided the host clock by two. The SDHCI
 765	 * clock divider is calculated as part of sdhci_set_clock() by
 766	 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
 767	 * the requested clock rate.
 768	 *
 769	 * By setting the host->max_clk to clock * 2 the divider calculation
 770	 * will always result in the correct value for DDR50/52 modes,
 771	 * regardless of clock rate rounding, which may happen if the value
 772	 * from clk_get_rate() is used.
 773	 */
 774	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
 775	clk_set_rate(pltfm_host->clk, host_clk);
 776	tegra_host->curr_clk_rate = host_clk;
 
 
 
 
 
 777	if (tegra_host->ddr_signaling)
 778		host->max_clk = host_clk;
 779	else
 780		host->max_clk = clk_get_rate(pltfm_host->clk);
 781
 782	sdhci_set_clock(host, clock);
 783
 784	if (tegra_host->pad_calib_required) {
 785		tegra_sdhci_pad_autocalib(host);
 786		tegra_host->pad_calib_required = false;
 787	}
 788}
 789
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 790static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
 791{
 792	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 793
 794	return clk_round_rate(pltfm_host->clk, UINT_MAX);
 795}
 796
 797static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
 798{
 799	u32 val;
 800
 801	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
 802	val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
 803	val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
 804	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
 805}
 806
 807static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
 808{
 809	u32 reg;
 810	int err;
 811
 812	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
 813	reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
 814	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
 815
 816	/* 1 ms sleep, 5 ms timeout */
 817	err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
 818				 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
 819				 1000, 5000);
 820	if (err)
 821		dev_err(mmc_dev(host->mmc),
 822			"HS400 delay line calibration timed out\n");
 823}
 824
 825static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
 826				       u8 thd_low, u8 fixed_tap)
 827{
 828	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 829	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 830	u32 val, tun_status;
 831	u8 word, bit, edge1, tap, window;
 832	bool tap_result;
 833	bool start_fail = false;
 834	bool start_pass = false;
 835	bool end_pass = false;
 836	bool first_fail = false;
 837	bool first_pass = false;
 838	u8 start_pass_tap = 0;
 839	u8 end_pass_tap = 0;
 840	u8 first_fail_tap = 0;
 841	u8 first_pass_tap = 0;
 842	u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
 843
 844	/*
 845	 * Read auto-tuned results and extract good valid passing window by
 846	 * filtering out un-wanted bubble/partial/merged windows.
 847	 */
 848	for (word = 0; word < total_tuning_words; word++) {
 849		val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
 850		val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
 851		val |= word;
 852		sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
 853		tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
 854		bit = 0;
 855		while (bit < TUNING_WORD_BIT_SIZE) {
 856			tap = word * TUNING_WORD_BIT_SIZE + bit;
 857			tap_result = tun_status & (1 << bit);
 858			if (!tap_result && !start_fail) {
 859				start_fail = true;
 860				if (!first_fail) {
 861					first_fail_tap = tap;
 862					first_fail = true;
 863				}
 864
 865			} else if (tap_result && start_fail && !start_pass) {
 866				start_pass_tap = tap;
 867				start_pass = true;
 868				if (!first_pass) {
 869					first_pass_tap = tap;
 870					first_pass = true;
 871				}
 872
 873			} else if (!tap_result && start_fail && start_pass &&
 874				   !end_pass) {
 875				end_pass_tap = tap - 1;
 876				end_pass = true;
 877			} else if (tap_result && start_pass && start_fail &&
 878				   end_pass) {
 879				window = end_pass_tap - start_pass_tap;
 880				/* discard merged window and bubble window */
 881				if (window >= thd_up || window < thd_low) {
 882					start_pass_tap = tap;
 883					end_pass = false;
 884				} else {
 885					/* set tap at middle of valid window */
 886					tap = start_pass_tap + window / 2;
 887					tegra_host->tuned_tap_delay = tap;
 888					return;
 889				}
 890			}
 891
 892			bit++;
 893		}
 894	}
 895
 896	if (!first_fail) {
 897		WARN(1, "no edge detected, continue with hw tuned delay.\n");
 898	} else if (first_pass) {
 899		/* set tap location at fixed tap relative to the first edge */
 900		edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
 901		if (edge1 - 1 > fixed_tap)
 902			tegra_host->tuned_tap_delay = edge1 - fixed_tap;
 903		else
 904			tegra_host->tuned_tap_delay = edge1 + fixed_tap;
 905	}
 906}
 907
 908static void tegra_sdhci_post_tuning(struct sdhci_host *host)
 909{
 910	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 911	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 912	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 913	u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
 914	u8 fixed_tap, start_tap, end_tap, window_width;
 915	u8 thdupper, thdlower;
 916	u8 num_iter;
 917	u32 clk_rate_mhz, period_ps, bestcase, worstcase;
 918
 919	/* retain HW tuned tap to use incase if no correction is needed */
 920	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 921	tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
 922				      SDHCI_CLOCK_CTRL_TAP_SHIFT;
 923	if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
 924		min_tap_dly = soc_data->min_tap_delay;
 925		max_tap_dly = soc_data->max_tap_delay;
 926		clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
 927		period_ps = USEC_PER_SEC / clk_rate_mhz;
 928		bestcase = period_ps / min_tap_dly;
 929		worstcase = period_ps / max_tap_dly;
 930		/*
 931		 * Upper and Lower bound thresholds used to detect merged and
 932		 * bubble windows
 933		 */
 934		thdupper = (2 * worstcase + bestcase) / 2;
 935		thdlower = worstcase / 4;
 936		/*
 937		 * fixed tap is used when HW tuning result contains single edge
 938		 * and tap is set at fixed tap delay relative to the first edge
 939		 */
 940		avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
 941		fixed_tap = avg_tap_dly / 2;
 942
 943		val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
 944		start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
 945		end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
 946			  SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
 947		window_width = end_tap - start_tap;
 948		num_iter = host->tuning_loop_count;
 949		/*
 950		 * partial window includes edges of the tuning range.
 951		 * merged window includes more taps so window width is higher
 952		 * than upper threshold.
 953		 */
 954		if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
 955		    (end_tap == num_iter - 2) || window_width >= thdupper) {
 956			pr_debug("%s: Apply tuning correction\n",
 957				 mmc_hostname(host->mmc));
 958			tegra_sdhci_tap_correction(host, thdupper, thdlower,
 959						   fixed_tap);
 960		}
 961	}
 962
 963	tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
 964}
 965
 966static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
 967{
 968	struct sdhci_host *host = mmc_priv(mmc);
 969	int err;
 970
 971	err = sdhci_execute_tuning(mmc, opcode);
 972	if (!err && !host->tuning_err)
 973		tegra_sdhci_post_tuning(host);
 974
 975	return err;
 976}
 977
 978static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
 979					  unsigned timing)
 980{
 981	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 982	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 983	bool set_default_tap = false;
 984	bool set_dqs_trim = false;
 985	bool do_hs400_dll_cal = false;
 986	u8 iter = TRIES_256;
 987	u32 val;
 988
 989	tegra_host->ddr_signaling = false;
 990	switch (timing) {
 991	case MMC_TIMING_UHS_SDR50:
 992		break;
 993	case MMC_TIMING_UHS_SDR104:
 994	case MMC_TIMING_MMC_HS200:
 995		/* Don't set default tap on tunable modes. */
 996		iter = TRIES_128;
 997		break;
 998	case MMC_TIMING_MMC_HS400:
 999		set_dqs_trim = true;
1000		do_hs400_dll_cal = true;
1001		iter = TRIES_128;
1002		break;
1003	case MMC_TIMING_MMC_DDR52:
1004	case MMC_TIMING_UHS_DDR50:
1005		tegra_host->ddr_signaling = true;
1006		set_default_tap = true;
1007		break;
1008	default:
1009		set_default_tap = true;
1010		break;
1011	}
1012
1013	val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1014	val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
1015		 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
1016		 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
1017	val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
1018		0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
1019		1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
1020	sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
1021	sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
1022
1023	host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
1024
1025	sdhci_set_uhs_signaling(host, timing);
1026
1027	tegra_sdhci_pad_autocalib(host);
1028
1029	if (tegra_host->tuned_tap_delay && !set_default_tap)
1030		tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
1031	else
1032		tegra_sdhci_set_tap(host, tegra_host->default_tap);
1033
1034	if (set_dqs_trim)
1035		tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
1036
1037	if (do_hs400_dll_cal)
1038		tegra_sdhci_hs400_dll_cal(host);
1039}
1040
1041static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1042{
1043	unsigned int min, max;
1044
1045	/*
1046	 * Start search for minimum tap value at 10, as smaller values are
1047	 * may wrongly be reported as working but fail at higher speeds,
1048	 * according to the TRM.
1049	 */
1050	min = 10;
1051	while (min < 255) {
1052		tegra_sdhci_set_tap(host, min);
1053		if (!mmc_send_tuning(host->mmc, opcode, NULL))
1054			break;
1055		min++;
1056	}
1057
1058	/* Find the maximum tap value that still passes. */
1059	max = min + 1;
1060	while (max < 255) {
1061		tegra_sdhci_set_tap(host, max);
1062		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1063			max--;
1064			break;
1065		}
1066		max++;
1067	}
1068
1069	/* The TRM states the ideal tap value is at 75% in the passing range. */
1070	tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1071
1072	return mmc_send_tuning(host->mmc, opcode, NULL);
1073}
1074
1075static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1076						   struct mmc_ios *ios)
1077{
1078	struct sdhci_host *host = mmc_priv(mmc);
1079	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1080	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1081	int ret = 0;
1082
1083	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1084		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1085		if (ret < 0)
1086			return ret;
1087		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1088	} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1089		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1090		if (ret < 0)
1091			return ret;
1092		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1093	}
1094
1095	if (tegra_host->pad_calib_required)
1096		tegra_sdhci_pad_autocalib(host);
1097
1098	return ret;
1099}
1100
1101static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1102					 struct sdhci_tegra *tegra_host)
1103{
1104	tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1105	if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1106		dev_dbg(dev, "No pinctrl info, err: %ld\n",
1107			PTR_ERR(tegra_host->pinctrl_sdmmc));
1108		return -1;
1109	}
1110
1111	tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1112				tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1113	if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1114		if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1115			tegra_host->pinctrl_state_1v8_drv = NULL;
1116	}
1117
1118	tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1119				tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1120	if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1121		if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1122			tegra_host->pinctrl_state_3v3_drv = NULL;
1123	}
1124
1125	tegra_host->pinctrl_state_3v3 =
1126		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1127	if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1128		dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1129			 PTR_ERR(tegra_host->pinctrl_state_3v3));
1130		return -1;
1131	}
1132
1133	tegra_host->pinctrl_state_1v8 =
1134		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1135	if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1136		dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1137			 PTR_ERR(tegra_host->pinctrl_state_1v8));
1138		return -1;
1139	}
1140
1141	tegra_host->pad_control_available = true;
1142
1143	return 0;
1144}
1145
1146static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1147{
1148	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1149	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1150	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1151
1152	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1153		tegra_host->pad_calib_required = true;
1154}
1155
1156static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1157{
1158	struct mmc_host *mmc = cq_host->mmc;
 
1159	u8 ctrl;
1160	ktime_t timeout;
1161	bool timed_out;
1162
1163	/*
1164	 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1165	 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1166	 * to be re-configured.
1167	 * Tegra CQHCI/SDHCI prevents write access to block size register when
1168	 * CQE is unhalted. So handling CQE resume sequence here to configure
1169	 * SDHCI block registers prior to exiting CQE halt state.
1170	 */
1171	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1172	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
 
1173		sdhci_cqe_enable(mmc);
1174		writel(val, cq_host->mmio + reg);
1175		timeout = ktime_add_us(ktime_get(), 50);
1176		while (1) {
1177			timed_out = ktime_compare(ktime_get(), timeout) > 0;
1178			ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1179			if (!(ctrl & CQHCI_HALT) || timed_out)
1180				break;
1181		}
1182		/*
1183		 * CQE usually resumes very quick, but incase if Tegra CQE
1184		 * doesn't resume retry unhalt.
1185		 */
1186		if (timed_out)
1187			writel(val, cq_host->mmio + reg);
1188	} else {
1189		writel(val, cq_host->mmio + reg);
1190	}
1191}
1192
1193static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1194					 struct mmc_request *mrq, u64 *data)
1195{
1196	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1197	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1198	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1199
1200	if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1201	    mrq->cmd->flags & MMC_RSP_R1B)
1202		*data |= CQHCI_CMD_TIMING(1);
1203}
1204
1205static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1206{
1207	struct cqhci_host *cq_host = mmc->cqe_private;
 
1208	u32 val;
1209
1210	/*
1211	 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1212	 * register when CQE is enabled and unhalted.
1213	 * CQHCI driver enables CQE prior to activation, so disable CQE before
1214	 * programming block size in sdhci controller and enable it back.
1215	 */
1216	if (!cq_host->activated) {
1217		val = cqhci_readl(cq_host, CQHCI_CFG);
1218		if (val & CQHCI_ENABLE)
1219			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1220				     CQHCI_CFG);
 
1221		sdhci_cqe_enable(mmc);
1222		if (val & CQHCI_ENABLE)
1223			cqhci_writel(cq_host, val, CQHCI_CFG);
1224	}
1225
1226	/*
1227	 * CMD CRC errors are seen sometimes with some eMMC devices when status
1228	 * command is sent during transfer of last data block which is the
1229	 * default case as send status command block counter (CBC) is 1.
1230	 * Recommended fix to set CBC to 0 allowing send status command only
1231	 * when data lines are idle.
1232	 */
1233	val = cqhci_readl(cq_host, CQHCI_SSC1);
1234	val &= ~CQHCI_SSC1_CBC_MASK;
1235	cqhci_writel(cq_host, val, CQHCI_SSC1);
1236}
1237
1238static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1239{
1240	sdhci_dumpregs(mmc_priv(mmc));
1241}
1242
1243static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1244{
1245	int cmd_error = 0;
1246	int data_error = 0;
1247
1248	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1249		return intmask;
1250
1251	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1252
1253	return 0;
1254}
1255
1256static void tegra_sdhci_set_timeout(struct sdhci_host *host,
1257				    struct mmc_command *cmd)
1258{
1259	u32 val;
1260
1261	/*
1262	 * HW busy detection timeout is based on programmed data timeout
1263	 * counter and maximum supported timeout is 11s which may not be
1264	 * enough for long operations like cache flush, sleep awake, erase.
1265	 *
1266	 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
1267	 * host controller to wait for busy state until the card is busy
1268	 * without HW timeout.
1269	 *
1270	 * So, use infinite busy wait mode for operations that may take
1271	 * more than maximum HW busy timeout of 11s otherwise use finite
1272	 * busy wait mode.
1273	 */
1274	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1275	if (cmd && cmd->busy_timeout >= 11 * HZ)
1276		val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1277	else
1278		val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1279	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1280
1281	__sdhci_set_timeout(host, cmd);
1282}
1283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1284static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1285	.write_l    = tegra_cqhci_writel,
1286	.enable	= sdhci_tegra_cqe_enable,
1287	.disable = sdhci_cqe_disable,
1288	.dumpregs = sdhci_tegra_dumpregs,
1289	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
 
 
1290};
1291
1292static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1293{
1294	struct sdhci_pltfm_host *platform = sdhci_priv(host);
1295	struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1296	const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1297	struct device *dev = mmc_dev(host->mmc);
1298
1299	if (soc->dma_mask)
1300		return dma_set_mask_and_coherent(dev, soc->dma_mask);
1301
1302	return 0;
1303}
1304
1305static const struct sdhci_ops tegra_sdhci_ops = {
1306	.get_ro     = tegra_sdhci_get_ro,
1307	.read_w     = tegra_sdhci_readw,
1308	.write_l    = tegra_sdhci_writel,
1309	.set_clock  = tegra_sdhci_set_clock,
1310	.set_dma_mask = tegra_sdhci_set_dma_mask,
1311	.set_bus_width = sdhci_set_bus_width,
1312	.reset      = tegra_sdhci_reset,
1313	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1314	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1315	.voltage_switch = tegra_sdhci_voltage_switch,
1316	.get_max_clock = tegra_sdhci_get_max_clock,
1317};
1318
1319static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1320	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1321		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1322		  SDHCI_QUIRK_NO_HISPD_BIT |
1323		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1324		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1325	.ops  = &tegra_sdhci_ops,
1326};
1327
1328static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1329	.pdata = &sdhci_tegra20_pdata,
1330	.dma_mask = DMA_BIT_MASK(32),
1331	.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
 
1332		    NVQUIRK_ENABLE_BLOCK_GAP_DET,
1333};
1334
1335static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1336	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1337		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1338		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1339		  SDHCI_QUIRK_NO_HISPD_BIT |
1340		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1341		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1342	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1343		   SDHCI_QUIRK2_BROKEN_HS200 |
1344		   /*
1345		    * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1346		    * though no command operation was in progress."
1347		    *
1348		    * The exact reason is unknown, as the same hardware seems
1349		    * to support Auto CMD23 on a downstream 3.1 kernel.
1350		    */
1351		   SDHCI_QUIRK2_ACMD23_BROKEN,
1352	.ops  = &tegra_sdhci_ops,
1353};
1354
1355static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1356	.pdata = &sdhci_tegra30_pdata,
1357	.dma_mask = DMA_BIT_MASK(32),
1358	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1359		    NVQUIRK_ENABLE_SDR50 |
1360		    NVQUIRK_ENABLE_SDR104 |
 
1361		    NVQUIRK_HAS_PADCALIB,
1362};
1363
1364static const struct sdhci_ops tegra114_sdhci_ops = {
1365	.get_ro     = tegra_sdhci_get_ro,
1366	.read_w     = tegra_sdhci_readw,
1367	.write_w    = tegra_sdhci_writew,
1368	.write_l    = tegra_sdhci_writel,
1369	.set_clock  = tegra_sdhci_set_clock,
1370	.set_dma_mask = tegra_sdhci_set_dma_mask,
1371	.set_bus_width = sdhci_set_bus_width,
1372	.reset      = tegra_sdhci_reset,
1373	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1374	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1375	.voltage_switch = tegra_sdhci_voltage_switch,
1376	.get_max_clock = tegra_sdhci_get_max_clock,
1377};
1378
1379static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1380	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1381		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1382		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1383		  SDHCI_QUIRK_NO_HISPD_BIT |
1384		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1385		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1386	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1387	.ops  = &tegra114_sdhci_ops,
1388};
1389
1390static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1391	.pdata = &sdhci_tegra114_pdata,
1392	.dma_mask = DMA_BIT_MASK(32),
 
1393};
1394
1395static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1396	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1397		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1398		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1399		  SDHCI_QUIRK_NO_HISPD_BIT |
1400		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1401		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1402	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1403	.ops  = &tegra114_sdhci_ops,
1404};
1405
1406static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1407	.pdata = &sdhci_tegra124_pdata,
1408	.dma_mask = DMA_BIT_MASK(34),
 
1409};
1410
1411static const struct sdhci_ops tegra210_sdhci_ops = {
1412	.get_ro     = tegra_sdhci_get_ro,
1413	.read_w     = tegra_sdhci_readw,
1414	.write_w    = tegra210_sdhci_writew,
1415	.write_l    = tegra_sdhci_writel,
1416	.set_clock  = tegra_sdhci_set_clock,
1417	.set_dma_mask = tegra_sdhci_set_dma_mask,
1418	.set_bus_width = sdhci_set_bus_width,
1419	.reset      = tegra_sdhci_reset,
1420	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1421	.voltage_switch = tegra_sdhci_voltage_switch,
1422	.get_max_clock = tegra_sdhci_get_max_clock,
1423	.set_timeout = tegra_sdhci_set_timeout,
1424};
1425
1426static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1427	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1428		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1429		  SDHCI_QUIRK_NO_HISPD_BIT |
1430		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1431		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1432	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1433	.ops  = &tegra210_sdhci_ops,
1434};
1435
1436static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1437	.pdata = &sdhci_tegra210_pdata,
1438	.dma_mask = DMA_BIT_MASK(34),
1439	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1440		    NVQUIRK_HAS_PADCALIB |
1441		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1442		    NVQUIRK_ENABLE_SDR50 |
1443		    NVQUIRK_ENABLE_SDR104 |
1444		    NVQUIRK_HAS_TMCLK,
1445	.min_tap_delay = 106,
1446	.max_tap_delay = 185,
1447};
1448
1449static const struct sdhci_ops tegra186_sdhci_ops = {
1450	.get_ro     = tegra_sdhci_get_ro,
1451	.read_w     = tegra_sdhci_readw,
1452	.write_l    = tegra_sdhci_writel,
1453	.set_clock  = tegra_sdhci_set_clock,
1454	.set_dma_mask = tegra_sdhci_set_dma_mask,
1455	.set_bus_width = sdhci_set_bus_width,
1456	.reset      = tegra_sdhci_reset,
1457	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1458	.voltage_switch = tegra_sdhci_voltage_switch,
1459	.get_max_clock = tegra_sdhci_get_max_clock,
1460	.irq = sdhci_tegra_cqhci_irq,
1461	.set_timeout = tegra_sdhci_set_timeout,
1462};
1463
1464static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1465	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1466		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1467		  SDHCI_QUIRK_NO_HISPD_BIT |
1468		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1469		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1470	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 
1471	.ops  = &tegra186_sdhci_ops,
1472};
1473
1474static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1475	.pdata = &sdhci_tegra186_pdata,
1476	.dma_mask = DMA_BIT_MASK(40),
1477	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1478		    NVQUIRK_HAS_PADCALIB |
1479		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1480		    NVQUIRK_ENABLE_SDR50 |
1481		    NVQUIRK_ENABLE_SDR104 |
1482		    NVQUIRK_HAS_TMCLK |
1483		    NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1484	.min_tap_delay = 84,
1485	.max_tap_delay = 136,
1486};
1487
1488static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1489	.pdata = &sdhci_tegra186_pdata,
1490	.dma_mask = DMA_BIT_MASK(39),
1491	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1492		    NVQUIRK_HAS_PADCALIB |
1493		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1494		    NVQUIRK_ENABLE_SDR50 |
1495		    NVQUIRK_ENABLE_SDR104 |
1496		    NVQUIRK_HAS_TMCLK,
1497	.min_tap_delay = 96,
1498	.max_tap_delay = 139,
1499};
1500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1501static const struct of_device_id sdhci_tegra_dt_match[] = {
 
1502	{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1503	{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1504	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1505	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1506	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1507	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1508	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1509	{}
1510};
1511MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1512
1513static int sdhci_tegra_add_host(struct sdhci_host *host)
1514{
1515	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1516	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1517	struct cqhci_host *cq_host;
1518	bool dma64;
1519	int ret;
1520
1521	if (!tegra_host->enable_hwcq)
1522		return sdhci_add_host(host);
1523
1524	sdhci_enable_v4_mode(host);
1525
1526	ret = sdhci_setup_host(host);
1527	if (ret)
1528		return ret;
1529
1530	host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1531
1532	cq_host = devm_kzalloc(host->mmc->parent,
1533				sizeof(*cq_host), GFP_KERNEL);
1534	if (!cq_host) {
1535		ret = -ENOMEM;
1536		goto cleanup;
1537	}
1538
1539	cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1540	cq_host->ops = &sdhci_tegra_cqhci_ops;
1541
1542	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1543	if (dma64)
1544		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1545
1546	ret = cqhci_init(cq_host, host->mmc, dma64);
1547	if (ret)
1548		goto cleanup;
1549
1550	ret = __sdhci_add_host(host);
1551	if (ret)
1552		goto cleanup;
1553
1554	return 0;
1555
1556cleanup:
1557	sdhci_cleanup_host(host);
1558	return ret;
1559}
1560
 
 
 
 
 
 
 
 
 
 
 
 
 
1561static int sdhci_tegra_probe(struct platform_device *pdev)
1562{
1563	const struct of_device_id *match;
1564	const struct sdhci_tegra_soc_data *soc_data;
1565	struct sdhci_host *host;
1566	struct sdhci_pltfm_host *pltfm_host;
1567	struct sdhci_tegra *tegra_host;
1568	struct clk *clk;
1569	int rc;
1570
1571	match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
1572	if (!match)
1573		return -EINVAL;
1574	soc_data = match->data;
1575
1576	host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1577	if (IS_ERR(host))
1578		return PTR_ERR(host);
1579	pltfm_host = sdhci_priv(host);
1580
1581	tegra_host = sdhci_pltfm_priv(pltfm_host);
1582	tegra_host->ddr_signaling = false;
1583	tegra_host->pad_calib_required = false;
1584	tegra_host->pad_control_available = false;
1585	tegra_host->soc_data = soc_data;
1586
 
 
 
1587	if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1588		rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1589		if (rc == 0)
1590			host->mmc_host_ops.start_signal_voltage_switch =
1591				sdhci_tegra_start_signal_voltage_switch;
1592	}
1593
1594	/* Hook to periodically rerun pad calibration */
1595	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1596		host->mmc_host_ops.request = tegra_sdhci_request;
1597
1598	host->mmc_host_ops.hs400_enhanced_strobe =
1599			tegra_sdhci_hs400_enhanced_strobe;
1600
1601	if (!host->ops->platform_execute_tuning)
1602		host->mmc_host_ops.execute_tuning =
1603				tegra_sdhci_execute_hw_tuning;
1604
1605	rc = mmc_of_parse(host->mmc);
1606	if (rc)
1607		goto err_parse_dt;
1608
1609	if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1610		host->mmc->caps |= MMC_CAP_1_8V_DDR;
1611
1612	/* HW busy detection is supported, but R1B responses are required. */
1613	host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
1614
 
 
 
1615	tegra_sdhci_parse_dt(host);
1616
 
 
 
 
 
 
1617	tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1618							 GPIOD_OUT_HIGH);
1619	if (IS_ERR(tegra_host->power_gpio)) {
1620		rc = PTR_ERR(tegra_host->power_gpio);
1621		goto err_power_req;
1622	}
1623
1624	/*
1625	 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
1626	 * timeout clock and SW can choose TMCLK or SDCLK for hardware
1627	 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
1628	 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
1629	 *
1630	 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
1631	 * 12Mhz TMCLK which is advertised in host capability register.
1632	 * With TMCLK of 12Mhz provides maximum data timeout period that can
1633	 * be achieved is 11s better than using SDCLK for data timeout.
1634	 *
1635	 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
1636	 * supporting separate TMCLK.
1637	 */
1638
1639	if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
1640		clk = devm_clk_get(&pdev->dev, "tmclk");
1641		if (IS_ERR(clk)) {
1642			rc = PTR_ERR(clk);
1643			if (rc == -EPROBE_DEFER)
1644				goto err_power_req;
1645
1646			dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
1647			clk = NULL;
1648		}
1649
1650		clk_set_rate(clk, 12000000);
1651		rc = clk_prepare_enable(clk);
1652		if (rc) {
1653			dev_err(&pdev->dev,
1654				"failed to enable tmclk: %d\n", rc);
1655			goto err_power_req;
1656		}
1657
1658		tegra_host->tmclk = clk;
1659	}
1660
1661	clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1662	if (IS_ERR(clk)) {
1663		rc = PTR_ERR(clk);
1664
1665		if (rc != -EPROBE_DEFER)
1666			dev_err(&pdev->dev, "failed to get clock: %d\n", rc);
1667
1668		goto err_clk_get;
1669	}
1670	clk_prepare_enable(clk);
1671	pltfm_host->clk = clk;
1672
1673	tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1674							   "sdhci");
1675	if (IS_ERR(tegra_host->rst)) {
1676		rc = PTR_ERR(tegra_host->rst);
1677		dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1678		goto err_rst_get;
1679	}
1680
 
 
 
 
 
 
 
 
 
1681	rc = reset_control_assert(tegra_host->rst);
1682	if (rc)
1683		goto err_rst_get;
1684
1685	usleep_range(2000, 4000);
1686
1687	rc = reset_control_deassert(tegra_host->rst);
1688	if (rc)
1689		goto err_rst_get;
1690
1691	usleep_range(2000, 4000);
1692
1693	rc = sdhci_tegra_add_host(host);
1694	if (rc)
1695		goto err_add_host;
1696
 
 
1697	return 0;
1698
1699err_add_host:
1700	reset_control_assert(tegra_host->rst);
 
 
 
 
1701err_rst_get:
1702	clk_disable_unprepare(pltfm_host->clk);
1703err_clk_get:
1704	clk_disable_unprepare(tegra_host->tmclk);
1705err_power_req:
1706err_parse_dt:
1707	sdhci_pltfm_free(pdev);
1708	return rc;
1709}
1710
1711static int sdhci_tegra_remove(struct platform_device *pdev)
1712{
1713	struct sdhci_host *host = platform_get_drvdata(pdev);
1714	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1715	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1716
1717	sdhci_remove_host(host, 0);
1718
1719	reset_control_assert(tegra_host->rst);
1720	usleep_range(2000, 4000);
1721	clk_disable_unprepare(pltfm_host->clk);
 
 
 
1722	clk_disable_unprepare(tegra_host->tmclk);
 
 
 
 
1723
1724	sdhci_pltfm_free(pdev);
 
 
 
 
 
1725
1726	return 0;
1727}
1728
 
 
 
 
 
 
 
 
1729#ifdef CONFIG_PM_SLEEP
1730static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
1731{
1732	struct sdhci_host *host = dev_get_drvdata(dev);
1733	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1734	int ret;
1735
1736	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1737		ret = cqhci_suspend(host->mmc);
1738		if (ret)
1739			return ret;
1740	}
1741
1742	ret = sdhci_suspend_host(host);
1743	if (ret) {
1744		cqhci_resume(host->mmc);
1745		return ret;
1746	}
1747
1748	clk_disable_unprepare(pltfm_host->clk);
1749	return 0;
 
 
 
 
 
 
1750}
1751
1752static int __maybe_unused sdhci_tegra_resume(struct device *dev)
1753{
1754	struct sdhci_host *host = dev_get_drvdata(dev);
1755	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1756	int ret;
1757
1758	ret = clk_prepare_enable(pltfm_host->clk);
1759	if (ret)
1760		return ret;
1761
 
 
 
 
 
 
1762	ret = sdhci_resume_host(host);
1763	if (ret)
1764		goto disable_clk;
1765
1766	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1767		ret = cqhci_resume(host->mmc);
1768		if (ret)
1769			goto suspend_host;
1770	}
1771
1772	return 0;
1773
1774suspend_host:
1775	sdhci_suspend_host(host);
1776disable_clk:
1777	clk_disable_unprepare(pltfm_host->clk);
1778	return ret;
1779}
1780#endif
1781
1782static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
1783			 sdhci_tegra_resume);
 
 
 
1784
1785static struct platform_driver sdhci_tegra_driver = {
1786	.driver		= {
1787		.name	= "sdhci-tegra",
 
1788		.of_match_table = sdhci_tegra_dt_match,
1789		.pm	= &sdhci_tegra_dev_pm_ops,
1790	},
1791	.probe		= sdhci_tegra_probe,
1792	.remove		= sdhci_tegra_remove,
1793};
1794
1795module_platform_driver(sdhci_tegra_driver);
1796
1797MODULE_DESCRIPTION("SDHCI driver for Tegra");
1798MODULE_AUTHOR("Google, Inc.");
1799MODULE_LICENSE("GPL v2");