Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2010 Google, Inc.
   4 */
   5
   6#include <linux/bitfield.h>
   7#include <linux/clk.h>
   8#include <linux/delay.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/err.h>
  11#include <linux/gpio/consumer.h>
  12#include <linux/init.h>
  13#include <linux/io.h>
  14#include <linux/iommu.h>
  15#include <linux/iopoll.h>
  16#include <linux/ktime.h>
  17#include <linux/mmc/card.h>
  18#include <linux/mmc/host.h>
  19#include <linux/mmc/mmc.h>
  20#include <linux/mmc/slot-gpio.h>
  21#include <linux/module.h>
  22#include <linux/of_device.h>
  23#include <linux/of.h>
  24#include <linux/pinctrl/consumer.h>
  25#include <linux/platform_device.h>
  26#include <linux/pm_opp.h>
  27#include <linux/pm_runtime.h>
  28#include <linux/regulator/consumer.h>
  29#include <linux/reset.h>
  30
  31#include <soc/tegra/common.h>
  32
  33#include "sdhci-cqhci.h"
  34#include "sdhci-pltfm.h"
  35#include "cqhci.h"
  36
  37/* Tegra SDHOST controller vendor register definitions */
  38#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL			0x100
  39#define SDHCI_CLOCK_CTRL_TAP_MASK			0x00ff0000
  40#define SDHCI_CLOCK_CTRL_TAP_SHIFT			16
  41#define SDHCI_CLOCK_CTRL_TRIM_MASK			0x1f000000
  42#define SDHCI_CLOCK_CTRL_TRIM_SHIFT			24
  43#define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE		BIT(5)
  44#define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE		BIT(3)
  45#define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE	BIT(2)
  46
  47#define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL			0x104
  48#define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE		BIT(31)
  49
  50#define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES		0x10c
  51#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK		0x00003f00
  52#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT	8
  53
  54#define SDHCI_TEGRA_VENDOR_MISC_CTRL			0x120
  55#define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT		BIT(0)
  56#define SDHCI_MISC_CTRL_ENABLE_SDR104			0x8
  57#define SDHCI_MISC_CTRL_ENABLE_SDR50			0x10
  58#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300		0x20
  59#define SDHCI_MISC_CTRL_ENABLE_DDR50			0x200
  60
  61#define SDHCI_TEGRA_VENDOR_DLLCAL_CFG			0x1b0
  62#define SDHCI_TEGRA_DLLCAL_CALIBRATE			BIT(31)
  63
  64#define SDHCI_TEGRA_VENDOR_DLLCAL_STA			0x1bc
  65#define SDHCI_TEGRA_DLLCAL_STA_ACTIVE			BIT(31)
  66
  67#define SDHCI_VNDR_TUN_CTRL0_0				0x1c0
  68#define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP			0x20000
  69#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK		0x03fc0000
  70#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT	18
  71#define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK			0x00001fc0
  72#define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT		6
  73#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK		0x000e000
  74#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT		13
  75#define TRIES_128					2
  76#define TRIES_256					4
  77#define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK		0x7
  78
  79#define SDHCI_TEGRA_VNDR_TUN_CTRL1_0			0x1c4
  80#define SDHCI_TEGRA_VNDR_TUN_STATUS0			0x1C8
  81#define SDHCI_TEGRA_VNDR_TUN_STATUS1			0x1CC
  82#define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK		0xFF
  83#define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT	0x8
  84#define TUNING_WORD_BIT_SIZE				32
  85
  86#define SDHCI_TEGRA_AUTO_CAL_CONFIG			0x1e4
  87#define SDHCI_AUTO_CAL_START				BIT(31)
  88#define SDHCI_AUTO_CAL_ENABLE				BIT(29)
  89#define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK			0x0000ffff
  90
  91#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL			0x1e0
  92#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK	0x0000000f
  93#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL	0x7
  94#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD	BIT(31)
  95#define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK		0x07FFF000
  96
  97#define SDHCI_TEGRA_AUTO_CAL_STATUS			0x1ec
  98#define SDHCI_TEGRA_AUTO_CAL_ACTIVE			BIT(31)
  99
 100#define SDHCI_TEGRA_CIF2AXI_CTRL_0			0x1fc
 101
 102#define NVQUIRK_FORCE_SDHCI_SPEC_200			BIT(0)
 103#define NVQUIRK_ENABLE_BLOCK_GAP_DET			BIT(1)
 104#define NVQUIRK_ENABLE_SDHCI_SPEC_300			BIT(2)
 105#define NVQUIRK_ENABLE_SDR50				BIT(3)
 106#define NVQUIRK_ENABLE_SDR104				BIT(4)
 107#define NVQUIRK_ENABLE_DDR50				BIT(5)
 108/*
 109 * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
 110 * drive strength.
 111 */
 112#define NVQUIRK_HAS_PADCALIB				BIT(6)
 113/*
 114 * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
 115 * 3V3/1V8 pad selection happens through pinctrl state selection depending
 116 * on the signaling mode.
 117 */
 118#define NVQUIRK_NEEDS_PAD_CONTROL			BIT(7)
 119#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP			BIT(8)
 120#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING		BIT(9)
 121
 122/*
 123 * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
 124 * SDMMC hardware data timeout.
 125 */
 126#define NVQUIRK_HAS_TMCLK				BIT(10)
 127
 128#define NVQUIRK_HAS_ANDROID_GPT_SECTOR			BIT(11)
 129#define NVQUIRK_PROGRAM_STREAMID			BIT(12)
 130
 131/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
 132#define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
 133
 134#define SDHCI_TEGRA_CQE_TRNS_MODE	(SDHCI_TRNS_MULTI | \
 135					 SDHCI_TRNS_BLK_CNT_EN | \
 136					 SDHCI_TRNS_DMA)
 137
 138struct sdhci_tegra_soc_data {
 139	const struct sdhci_pltfm_data *pdata;
 140	u64 dma_mask;
 141	u32 nvquirks;
 142	u8 min_tap_delay;
 143	u8 max_tap_delay;
 144};
 145
 146/* Magic pull up and pull down pad calibration offsets */
 147struct sdhci_tegra_autocal_offsets {
 148	u32 pull_up_3v3;
 149	u32 pull_down_3v3;
 150	u32 pull_up_3v3_timeout;
 151	u32 pull_down_3v3_timeout;
 152	u32 pull_up_1v8;
 153	u32 pull_down_1v8;
 154	u32 pull_up_1v8_timeout;
 155	u32 pull_down_1v8_timeout;
 156	u32 pull_up_sdr104;
 157	u32 pull_down_sdr104;
 158	u32 pull_up_hs400;
 159	u32 pull_down_hs400;
 160};
 161
 162struct sdhci_tegra {
 163	const struct sdhci_tegra_soc_data *soc_data;
 164	struct gpio_desc *power_gpio;
 165	struct clk *tmclk;
 166	bool ddr_signaling;
 167	bool pad_calib_required;
 168	bool pad_control_available;
 169
 170	struct reset_control *rst;
 171	struct pinctrl *pinctrl_sdmmc;
 172	struct pinctrl_state *pinctrl_state_3v3;
 173	struct pinctrl_state *pinctrl_state_1v8;
 174	struct pinctrl_state *pinctrl_state_3v3_drv;
 175	struct pinctrl_state *pinctrl_state_1v8_drv;
 176
 177	struct sdhci_tegra_autocal_offsets autocal_offsets;
 178	ktime_t last_calib;
 179
 180	u32 default_tap;
 181	u32 default_trim;
 182	u32 dqs_trim;
 183	bool enable_hwcq;
 184	unsigned long curr_clk_rate;
 185	u8 tuned_tap_delay;
 186	u32 stream_id;
 187};
 188
 189static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
 190{
 191	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 192	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 193	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 194
 195	if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
 196			(reg == SDHCI_HOST_VERSION))) {
 197		/* Erratum: Version register is invalid in HW. */
 198		return SDHCI_SPEC_200;
 199	}
 200
 201	return readw(host->ioaddr + reg);
 202}
 203
 204static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
 205{
 206	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 207
 208	switch (reg) {
 209	case SDHCI_TRANSFER_MODE:
 210		/*
 211		 * Postpone this write, we must do it together with a
 212		 * command write that is down below.
 213		 */
 214		pltfm_host->xfer_mode_shadow = val;
 215		return;
 216	case SDHCI_COMMAND:
 217		writel((val << 16) | pltfm_host->xfer_mode_shadow,
 218			host->ioaddr + SDHCI_TRANSFER_MODE);
 219		return;
 220	}
 221
 222	writew(val, host->ioaddr + reg);
 223}
 224
 225static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
 226{
 227	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 228	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 229	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 230
 231	/* Seems like we're getting spurious timeout and crc errors, so
 232	 * disable signalling of them. In case of real errors software
 233	 * timers should take care of eventually detecting them.
 234	 */
 235	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
 236		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
 237
 238	writel(val, host->ioaddr + reg);
 239
 240	if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
 241			(reg == SDHCI_INT_ENABLE))) {
 242		/* Erratum: Must enable block gap interrupt detection */
 243		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
 244		if (val & SDHCI_INT_CARD_INT)
 245			gap_ctrl |= 0x8;
 246		else
 247			gap_ctrl &= ~0x8;
 248		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
 249	}
 250}
 251
 252static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
 253{
 254	bool status;
 255	u32 reg;
 256
 257	reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
 258	status = !!(reg & SDHCI_CLOCK_CARD_EN);
 259
 260	if (status == enable)
 261		return status;
 262
 263	if (enable)
 264		reg |= SDHCI_CLOCK_CARD_EN;
 265	else
 266		reg &= ~SDHCI_CLOCK_CARD_EN;
 267
 268	sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
 269
 270	return status;
 271}
 272
 273static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
 274{
 275	bool is_tuning_cmd = 0;
 276	bool clk_enabled;
 277
 278	if (reg == SDHCI_COMMAND)
 279		is_tuning_cmd = mmc_op_tuning(SDHCI_GET_CMD(val));
 280
 281	if (is_tuning_cmd)
 282		clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
 283
 284	writew(val, host->ioaddr + reg);
 285
 286	if (is_tuning_cmd) {
 287		udelay(1);
 288		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 289		tegra_sdhci_configure_card_clk(host, clk_enabled);
 290	}
 291}
 292
 293static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
 294{
 295	/*
 296	 * Write-enable shall be assumed if GPIO is missing in a board's
 297	 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
 298	 * Tegra.
 299	 */
 300	return mmc_gpio_get_ro(host->mmc);
 301}
 302
 303static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
 304{
 305	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 306	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 307	int has_1v8, has_3v3;
 308
 309	/*
 310	 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
 311	 * voltage configuration in order to perform voltage switching. This
 312	 * means that valid pinctrl info is required on SDHCI instances capable
 313	 * of performing voltage switching. Whether or not an SDHCI instance is
 314	 * capable of voltage switching is determined based on the regulator.
 315	 */
 316
 317	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
 318		return true;
 319
 320	if (IS_ERR(host->mmc->supply.vqmmc))
 321		return false;
 322
 323	has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
 324						 1700000, 1950000);
 325
 326	has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
 327						 2700000, 3600000);
 328
 329	if (has_1v8 == 1 && has_3v3 == 1)
 330		return tegra_host->pad_control_available;
 331
 332	/* Fixed voltage, no pad control required. */
 333	return true;
 334}
 335
 336static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
 337{
 338	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 339	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 340	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 341	bool card_clk_enabled = false;
 342	u32 reg;
 343
 344	/*
 345	 * Touching the tap values is a bit tricky on some SoC generations.
 346	 * The quirk enables a workaround for a glitch that sometimes occurs if
 347	 * the tap values are changed.
 348	 */
 349
 350	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
 351		card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
 352
 353	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 354	reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
 355	reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
 356	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 357
 358	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
 359	    card_clk_enabled) {
 360		udelay(1);
 361		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 362		tegra_sdhci_configure_card_clk(host, card_clk_enabled);
 363	}
 364}
 365
 366static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
 367{
 368	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 369	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 370	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 371	u32 misc_ctrl, clk_ctrl, pad_ctrl;
 372
 373	sdhci_and_cqhci_reset(host, mask);
 374
 375	if (!(mask & SDHCI_RESET_ALL))
 376		return;
 377
 378	tegra_sdhci_set_tap(host, tegra_host->default_tap);
 379
 380	misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
 381	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 382
 383	misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
 384		       SDHCI_MISC_CTRL_ENABLE_SDR50 |
 385		       SDHCI_MISC_CTRL_ENABLE_DDR50 |
 386		       SDHCI_MISC_CTRL_ENABLE_SDR104);
 387
 388	clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
 389		      SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
 390
 391	if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
 392		/* Erratum: Enable SDHCI spec v3.00 support */
 393		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
 394			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
 395		/* Advertise UHS modes as supported by host */
 396		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
 397			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
 398		if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
 399			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
 400		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
 401			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
 402		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
 403			clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
 404	}
 405
 406	clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
 407
 408	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
 409	sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 410
 411	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
 412		pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 413		pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
 414		pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
 415		sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 416
 417		tegra_host->pad_calib_required = true;
 418	}
 419
 420	tegra_host->ddr_signaling = false;
 421}
 422
 423static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
 424{
 425	u32 val;
 426
 427	/*
 428	 * Enable or disable the additional I/O pad used by the drive strength
 429	 * calibration process.
 430	 */
 431	val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 432
 433	if (enable)
 434		val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
 435	else
 436		val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
 437
 438	sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 439
 440	if (enable)
 441		usleep_range(1, 2);
 442}
 443
 444static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
 445					       u16 pdpu)
 446{
 447	u32 reg;
 448
 449	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 450	reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
 451	reg |= pdpu;
 452	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 453}
 454
 455static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
 456				   bool state_drvupdn)
 457{
 458	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 459	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 460	struct sdhci_tegra_autocal_offsets *offsets =
 461						&tegra_host->autocal_offsets;
 462	struct pinctrl_state *pinctrl_drvupdn = NULL;
 463	int ret = 0;
 464	u8 drvup = 0, drvdn = 0;
 465	u32 reg;
 466
 467	if (!state_drvupdn) {
 468		/* PADS Drive Strength */
 469		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
 470			if (tegra_host->pinctrl_state_1v8_drv) {
 471				pinctrl_drvupdn =
 472					tegra_host->pinctrl_state_1v8_drv;
 473			} else {
 474				drvup = offsets->pull_up_1v8_timeout;
 475				drvdn = offsets->pull_down_1v8_timeout;
 476			}
 477		} else {
 478			if (tegra_host->pinctrl_state_3v3_drv) {
 479				pinctrl_drvupdn =
 480					tegra_host->pinctrl_state_3v3_drv;
 481			} else {
 482				drvup = offsets->pull_up_3v3_timeout;
 483				drvdn = offsets->pull_down_3v3_timeout;
 484			}
 485		}
 486
 487		if (pinctrl_drvupdn != NULL) {
 488			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 489							pinctrl_drvupdn);
 490			if (ret < 0)
 491				dev_err(mmc_dev(host->mmc),
 492					"failed pads drvupdn, ret: %d\n", ret);
 493		} else if ((drvup) || (drvdn)) {
 494			reg = sdhci_readl(host,
 495					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 496			reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
 497			reg |= (drvup << 20) | (drvdn << 12);
 498			sdhci_writel(host, reg,
 499					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 500		}
 501
 502	} else {
 503		/* Dual Voltage PADS Voltage selection */
 504		if (!tegra_host->pad_control_available)
 505			return 0;
 506
 507		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
 508			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 509						tegra_host->pinctrl_state_1v8);
 510			if (ret < 0)
 511				dev_err(mmc_dev(host->mmc),
 512					"setting 1.8V failed, ret: %d\n", ret);
 513		} else {
 514			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 515						tegra_host->pinctrl_state_3v3);
 516			if (ret < 0)
 517				dev_err(mmc_dev(host->mmc),
 518					"setting 3.3V failed, ret: %d\n", ret);
 519		}
 520	}
 521
 522	return ret;
 523}
 524
 525static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
 526{
 527	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 528	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 529	struct sdhci_tegra_autocal_offsets offsets =
 530			tegra_host->autocal_offsets;
 531	struct mmc_ios *ios = &host->mmc->ios;
 532	bool card_clk_enabled;
 533	u16 pdpu;
 534	u32 reg;
 535	int ret;
 536
 537	switch (ios->timing) {
 538	case MMC_TIMING_UHS_SDR104:
 539		pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
 540		break;
 541	case MMC_TIMING_MMC_HS400:
 542		pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
 543		break;
 544	default:
 545		if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
 546			pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
 547		else
 548			pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
 549	}
 550
 551	/* Set initial offset before auto-calibration */
 552	tegra_sdhci_set_pad_autocal_offset(host, pdpu);
 553
 554	card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
 555
 556	tegra_sdhci_configure_cal_pad(host, true);
 557
 558	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 559	reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
 560	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 561
 562	usleep_range(1, 2);
 563	/* 10 ms timeout */
 564	ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
 565				 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
 566				 1000, 10000);
 567
 568	tegra_sdhci_configure_cal_pad(host, false);
 569
 570	tegra_sdhci_configure_card_clk(host, card_clk_enabled);
 571
 572	if (ret) {
 573		dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
 574
 575		/* Disable automatic cal and use fixed Drive Strengths */
 576		reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 577		reg &= ~SDHCI_AUTO_CAL_ENABLE;
 578		sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 579
 580		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
 581		if (ret < 0)
 582			dev_err(mmc_dev(host->mmc),
 583				"Setting drive strengths failed: %d\n", ret);
 584	}
 585}
 586
 587static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
 588{
 589	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 590	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 591	struct sdhci_tegra_autocal_offsets *autocal =
 592			&tegra_host->autocal_offsets;
 593	int err;
 594
 595	err = device_property_read_u32(mmc_dev(host->mmc),
 596			"nvidia,pad-autocal-pull-up-offset-3v3",
 597			&autocal->pull_up_3v3);
 598	if (err)
 599		autocal->pull_up_3v3 = 0;
 600
 601	err = device_property_read_u32(mmc_dev(host->mmc),
 602			"nvidia,pad-autocal-pull-down-offset-3v3",
 603			&autocal->pull_down_3v3);
 604	if (err)
 605		autocal->pull_down_3v3 = 0;
 606
 607	err = device_property_read_u32(mmc_dev(host->mmc),
 608			"nvidia,pad-autocal-pull-up-offset-1v8",
 609			&autocal->pull_up_1v8);
 610	if (err)
 611		autocal->pull_up_1v8 = 0;
 612
 613	err = device_property_read_u32(mmc_dev(host->mmc),
 614			"nvidia,pad-autocal-pull-down-offset-1v8",
 615			&autocal->pull_down_1v8);
 616	if (err)
 617		autocal->pull_down_1v8 = 0;
 618
 619	err = device_property_read_u32(mmc_dev(host->mmc),
 620			"nvidia,pad-autocal-pull-up-offset-sdr104",
 621			&autocal->pull_up_sdr104);
 622	if (err)
 623		autocal->pull_up_sdr104 = autocal->pull_up_1v8;
 624
 625	err = device_property_read_u32(mmc_dev(host->mmc),
 626			"nvidia,pad-autocal-pull-down-offset-sdr104",
 627			&autocal->pull_down_sdr104);
 628	if (err)
 629		autocal->pull_down_sdr104 = autocal->pull_down_1v8;
 630
 631	err = device_property_read_u32(mmc_dev(host->mmc),
 632			"nvidia,pad-autocal-pull-up-offset-hs400",
 633			&autocal->pull_up_hs400);
 634	if (err)
 635		autocal->pull_up_hs400 = autocal->pull_up_1v8;
 636
 637	err = device_property_read_u32(mmc_dev(host->mmc),
 638			"nvidia,pad-autocal-pull-down-offset-hs400",
 639			&autocal->pull_down_hs400);
 640	if (err)
 641		autocal->pull_down_hs400 = autocal->pull_down_1v8;
 642
 643	/*
 644	 * Different fail-safe drive strength values based on the signaling
 645	 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
 646	 * So, avoid reading below device tree properties for SoCs that don't
 647	 * have NVQUIRK_NEEDS_PAD_CONTROL.
 648	 */
 649	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
 650		return;
 651
 652	err = device_property_read_u32(mmc_dev(host->mmc),
 653			"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
 654			&autocal->pull_up_3v3_timeout);
 655	if (err) {
 656		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
 657			(tegra_host->pinctrl_state_3v3_drv == NULL))
 658			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
 659				mmc_hostname(host->mmc));
 660		autocal->pull_up_3v3_timeout = 0;
 661	}
 662
 663	err = device_property_read_u32(mmc_dev(host->mmc),
 664			"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
 665			&autocal->pull_down_3v3_timeout);
 666	if (err) {
 667		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
 668			(tegra_host->pinctrl_state_3v3_drv == NULL))
 669			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
 670				mmc_hostname(host->mmc));
 671		autocal->pull_down_3v3_timeout = 0;
 672	}
 673
 674	err = device_property_read_u32(mmc_dev(host->mmc),
 675			"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
 676			&autocal->pull_up_1v8_timeout);
 677	if (err) {
 678		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
 679			(tegra_host->pinctrl_state_1v8_drv == NULL))
 680			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
 681				mmc_hostname(host->mmc));
 682		autocal->pull_up_1v8_timeout = 0;
 683	}
 684
 685	err = device_property_read_u32(mmc_dev(host->mmc),
 686			"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
 687			&autocal->pull_down_1v8_timeout);
 688	if (err) {
 689		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
 690			(tegra_host->pinctrl_state_1v8_drv == NULL))
 691			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
 692				mmc_hostname(host->mmc));
 693		autocal->pull_down_1v8_timeout = 0;
 694	}
 695}
 696
 697static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 698{
 699	struct sdhci_host *host = mmc_priv(mmc);
 700	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 701	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 702	ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
 703
 704	/* 100 ms calibration interval is specified in the TRM */
 705	if (ktime_to_ms(since_calib) > 100) {
 706		tegra_sdhci_pad_autocalib(host);
 707		tegra_host->last_calib = ktime_get();
 708	}
 709
 710	sdhci_request(mmc, mrq);
 711}
 712
 713static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
 714{
 715	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 716	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 717	int err;
 718
 719	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap",
 720				       &tegra_host->default_tap);
 721	if (err)
 722		tegra_host->default_tap = 0;
 723
 724	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim",
 725				       &tegra_host->default_trim);
 726	if (err)
 727		tegra_host->default_trim = 0;
 728
 729	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim",
 730				       &tegra_host->dqs_trim);
 731	if (err)
 732		tegra_host->dqs_trim = 0x11;
 733}
 734
 735static void tegra_sdhci_parse_dt(struct sdhci_host *host)
 736{
 737	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 738	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 739
 740	if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe"))
 741		tegra_host->enable_hwcq = true;
 742	else
 743		tegra_host->enable_hwcq = false;
 744
 745	tegra_sdhci_parse_pad_autocal_dt(host);
 746	tegra_sdhci_parse_tap_and_trim(host);
 747}
 748
 749static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
 750{
 751	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 752	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 753	struct device *dev = mmc_dev(host->mmc);
 754	unsigned long host_clk;
 755	int err;
 756
 757	if (!clock)
 758		return sdhci_set_clock(host, clock);
 759
 760	/*
 761	 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
 762	 * divider to be configured to divided the host clock by two. The SDHCI
 763	 * clock divider is calculated as part of sdhci_set_clock() by
 764	 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
 765	 * the requested clock rate.
 766	 *
 767	 * By setting the host->max_clk to clock * 2 the divider calculation
 768	 * will always result in the correct value for DDR50/52 modes,
 769	 * regardless of clock rate rounding, which may happen if the value
 770	 * from clk_get_rate() is used.
 771	 */
 772	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
 773
 774	err = dev_pm_opp_set_rate(dev, host_clk);
 775	if (err)
 776		dev_err(dev, "failed to set clk rate to %luHz: %d\n",
 777			host_clk, err);
 778
 779	tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
 780	if (tegra_host->ddr_signaling)
 781		host->max_clk = host_clk;
 782	else
 783		host->max_clk = clk_get_rate(pltfm_host->clk);
 784
 785	sdhci_set_clock(host, clock);
 786
 787	if (tegra_host->pad_calib_required) {
 788		tegra_sdhci_pad_autocalib(host);
 789		tegra_host->pad_calib_required = false;
 790	}
 791}
 792
 793static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
 794					      struct mmc_ios *ios)
 795{
 796	struct sdhci_host *host = mmc_priv(mmc);
 797	u32 val;
 798
 799	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
 800
 801	if (ios->enhanced_strobe) {
 802		val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
 803		/*
 804		 * When CMD13 is sent from mmc_select_hs400es() after
 805		 * switching to HS400ES mode, the bus is operating at
 806		 * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
 807		 * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
 808		 * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
 809		 * controller CAR clock and the interface clock are rate matched.
 810		 */
 811		tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
 812	} else {
 813		val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
 814	}
 815
 816	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
 817}
 818
 819static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
 820{
 821	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 822
 823	return clk_round_rate(pltfm_host->clk, UINT_MAX);
 824}
 825
 826static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
 827{
 828	u32 val;
 829
 830	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
 831	val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
 832	val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
 833	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
 834}
 835
 836static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
 837{
 838	u32 reg;
 839	int err;
 840
 841	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
 842	reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
 843	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
 844
 845	/* 1 ms sleep, 5 ms timeout */
 846	err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
 847				 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
 848				 1000, 5000);
 849	if (err)
 850		dev_err(mmc_dev(host->mmc),
 851			"HS400 delay line calibration timed out\n");
 852}
 853
 854static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
 855				       u8 thd_low, u8 fixed_tap)
 856{
 857	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 858	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 859	u32 val, tun_status;
 860	u8 word, bit, edge1, tap, window;
 861	bool tap_result;
 862	bool start_fail = false;
 863	bool start_pass = false;
 864	bool end_pass = false;
 865	bool first_fail = false;
 866	bool first_pass = false;
 867	u8 start_pass_tap = 0;
 868	u8 end_pass_tap = 0;
 869	u8 first_fail_tap = 0;
 870	u8 first_pass_tap = 0;
 871	u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
 872
 873	/*
 874	 * Read auto-tuned results and extract good valid passing window by
 875	 * filtering out un-wanted bubble/partial/merged windows.
 876	 */
 877	for (word = 0; word < total_tuning_words; word++) {
 878		val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
 879		val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
 880		val |= word;
 881		sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
 882		tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
 883		bit = 0;
 884		while (bit < TUNING_WORD_BIT_SIZE) {
 885			tap = word * TUNING_WORD_BIT_SIZE + bit;
 886			tap_result = tun_status & (1 << bit);
 887			if (!tap_result && !start_fail) {
 888				start_fail = true;
 889				if (!first_fail) {
 890					first_fail_tap = tap;
 891					first_fail = true;
 892				}
 893
 894			} else if (tap_result && start_fail && !start_pass) {
 895				start_pass_tap = tap;
 896				start_pass = true;
 897				if (!first_pass) {
 898					first_pass_tap = tap;
 899					first_pass = true;
 900				}
 901
 902			} else if (!tap_result && start_fail && start_pass &&
 903				   !end_pass) {
 904				end_pass_tap = tap - 1;
 905				end_pass = true;
 906			} else if (tap_result && start_pass && start_fail &&
 907				   end_pass) {
 908				window = end_pass_tap - start_pass_tap;
 909				/* discard merged window and bubble window */
 910				if (window >= thd_up || window < thd_low) {
 911					start_pass_tap = tap;
 912					end_pass = false;
 913				} else {
 914					/* set tap at middle of valid window */
 915					tap = start_pass_tap + window / 2;
 916					tegra_host->tuned_tap_delay = tap;
 917					return;
 918				}
 919			}
 920
 921			bit++;
 922		}
 923	}
 924
 925	if (!first_fail) {
 926		WARN(1, "no edge detected, continue with hw tuned delay.\n");
 927	} else if (first_pass) {
 928		/* set tap location at fixed tap relative to the first edge */
 929		edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
 930		if (edge1 - 1 > fixed_tap)
 931			tegra_host->tuned_tap_delay = edge1 - fixed_tap;
 932		else
 933			tegra_host->tuned_tap_delay = edge1 + fixed_tap;
 934	}
 935}
 936
 937static void tegra_sdhci_post_tuning(struct sdhci_host *host)
 938{
 939	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 940	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 941	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 942	u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
 943	u8 fixed_tap, start_tap, end_tap, window_width;
 944	u8 thdupper, thdlower;
 945	u8 num_iter;
 946	u32 clk_rate_mhz, period_ps, bestcase, worstcase;
 947
 948	/* retain HW tuned tap to use incase if no correction is needed */
 949	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 950	tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
 951				      SDHCI_CLOCK_CTRL_TAP_SHIFT;
 952	if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
 953		min_tap_dly = soc_data->min_tap_delay;
 954		max_tap_dly = soc_data->max_tap_delay;
 955		clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
 956		period_ps = USEC_PER_SEC / clk_rate_mhz;
 957		bestcase = period_ps / min_tap_dly;
 958		worstcase = period_ps / max_tap_dly;
 959		/*
 960		 * Upper and Lower bound thresholds used to detect merged and
 961		 * bubble windows
 962		 */
 963		thdupper = (2 * worstcase + bestcase) / 2;
 964		thdlower = worstcase / 4;
 965		/*
 966		 * fixed tap is used when HW tuning result contains single edge
 967		 * and tap is set at fixed tap delay relative to the first edge
 968		 */
 969		avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
 970		fixed_tap = avg_tap_dly / 2;
 971
 972		val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
 973		start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
 974		end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
 975			  SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
 976		window_width = end_tap - start_tap;
 977		num_iter = host->tuning_loop_count;
 978		/*
 979		 * partial window includes edges of the tuning range.
 980		 * merged window includes more taps so window width is higher
 981		 * than upper threshold.
 982		 */
 983		if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
 984		    (end_tap == num_iter - 2) || window_width >= thdupper) {
 985			pr_debug("%s: Apply tuning correction\n",
 986				 mmc_hostname(host->mmc));
 987			tegra_sdhci_tap_correction(host, thdupper, thdlower,
 988						   fixed_tap);
 989		}
 990	}
 991
 992	tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
 993}
 994
 995static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
 996{
 997	struct sdhci_host *host = mmc_priv(mmc);
 998	int err;
 999
1000	err = sdhci_execute_tuning(mmc, opcode);
1001	if (!err && !host->tuning_err)
1002		tegra_sdhci_post_tuning(host);
1003
1004	return err;
1005}
1006
1007static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
1008					  unsigned timing)
1009{
1010	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1011	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1012	bool set_default_tap = false;
1013	bool set_dqs_trim = false;
1014	bool do_hs400_dll_cal = false;
1015	u8 iter = TRIES_256;
1016	u32 val;
1017
1018	tegra_host->ddr_signaling = false;
1019	switch (timing) {
1020	case MMC_TIMING_UHS_SDR50:
1021		break;
1022	case MMC_TIMING_UHS_SDR104:
1023	case MMC_TIMING_MMC_HS200:
1024		/* Don't set default tap on tunable modes. */
1025		iter = TRIES_128;
1026		break;
1027	case MMC_TIMING_MMC_HS400:
1028		set_dqs_trim = true;
1029		do_hs400_dll_cal = true;
1030		iter = TRIES_128;
1031		break;
1032	case MMC_TIMING_MMC_DDR52:
1033	case MMC_TIMING_UHS_DDR50:
1034		tegra_host->ddr_signaling = true;
1035		set_default_tap = true;
1036		break;
1037	default:
1038		set_default_tap = true;
1039		break;
1040	}
1041
1042	val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1043	val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
1044		 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
1045		 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
1046	val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
1047		0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
1048		1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
1049	sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
1050	sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
1051
1052	host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
1053
1054	sdhci_set_uhs_signaling(host, timing);
1055
1056	tegra_sdhci_pad_autocalib(host);
1057
1058	if (tegra_host->tuned_tap_delay && !set_default_tap)
1059		tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
1060	else
1061		tegra_sdhci_set_tap(host, tegra_host->default_tap);
1062
1063	if (set_dqs_trim)
1064		tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
1065
1066	if (do_hs400_dll_cal)
1067		tegra_sdhci_hs400_dll_cal(host);
1068}
1069
1070static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1071{
1072	unsigned int min, max;
1073
1074	/*
1075	 * Start search for minimum tap value at 10, as smaller values are
1076	 * may wrongly be reported as working but fail at higher speeds,
1077	 * according to the TRM.
1078	 */
1079	min = 10;
1080	while (min < 255) {
1081		tegra_sdhci_set_tap(host, min);
1082		if (!mmc_send_tuning(host->mmc, opcode, NULL))
1083			break;
1084		min++;
1085	}
1086
1087	/* Find the maximum tap value that still passes. */
1088	max = min + 1;
1089	while (max < 255) {
1090		tegra_sdhci_set_tap(host, max);
1091		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1092			max--;
1093			break;
1094		}
1095		max++;
1096	}
1097
1098	/* The TRM states the ideal tap value is at 75% in the passing range. */
1099	tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1100
1101	return mmc_send_tuning(host->mmc, opcode, NULL);
1102}
1103
1104static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1105						   struct mmc_ios *ios)
1106{
1107	struct sdhci_host *host = mmc_priv(mmc);
1108	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1109	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1110	int ret = 0;
1111
1112	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1113		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1114		if (ret < 0)
1115			return ret;
1116		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1117	} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1118		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1119		if (ret < 0)
1120			return ret;
1121		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1122	}
1123
1124	if (tegra_host->pad_calib_required)
1125		tegra_sdhci_pad_autocalib(host);
1126
1127	return ret;
1128}
1129
1130static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1131					 struct sdhci_tegra *tegra_host)
1132{
1133	tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1134	if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1135		dev_dbg(dev, "No pinctrl info, err: %ld\n",
1136			PTR_ERR(tegra_host->pinctrl_sdmmc));
1137		return -1;
1138	}
1139
1140	tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1141				tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1142	if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1143		if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1144			tegra_host->pinctrl_state_1v8_drv = NULL;
1145	}
1146
1147	tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1148				tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1149	if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1150		if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1151			tegra_host->pinctrl_state_3v3_drv = NULL;
1152	}
1153
1154	tegra_host->pinctrl_state_3v3 =
1155		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1156	if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1157		dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1158			 PTR_ERR(tegra_host->pinctrl_state_3v3));
1159		return -1;
1160	}
1161
1162	tegra_host->pinctrl_state_1v8 =
1163		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1164	if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1165		dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1166			 PTR_ERR(tegra_host->pinctrl_state_1v8));
1167		return -1;
1168	}
1169
1170	tegra_host->pad_control_available = true;
1171
1172	return 0;
1173}
1174
1175static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1176{
1177	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1178	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1179	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1180
1181	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1182		tegra_host->pad_calib_required = true;
1183}
1184
1185static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1186{
1187	struct mmc_host *mmc = cq_host->mmc;
1188	struct sdhci_host *host = mmc_priv(mmc);
1189	u8 ctrl;
1190	ktime_t timeout;
1191	bool timed_out;
1192
1193	/*
1194	 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1195	 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1196	 * to be re-configured.
1197	 * Tegra CQHCI/SDHCI prevents write access to block size register when
1198	 * CQE is unhalted. So handling CQE resume sequence here to configure
1199	 * SDHCI block registers prior to exiting CQE halt state.
1200	 */
1201	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1202	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
1203		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1204		sdhci_cqe_enable(mmc);
1205		writel(val, cq_host->mmio + reg);
1206		timeout = ktime_add_us(ktime_get(), 50);
1207		while (1) {
1208			timed_out = ktime_compare(ktime_get(), timeout) > 0;
1209			ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1210			if (!(ctrl & CQHCI_HALT) || timed_out)
1211				break;
1212		}
1213		/*
1214		 * CQE usually resumes very quick, but incase if Tegra CQE
1215		 * doesn't resume retry unhalt.
1216		 */
1217		if (timed_out)
1218			writel(val, cq_host->mmio + reg);
1219	} else {
1220		writel(val, cq_host->mmio + reg);
1221	}
1222}
1223
1224static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1225					 struct mmc_request *mrq, u64 *data)
1226{
1227	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1228	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1229	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1230
1231	if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1232	    mrq->cmd->flags & MMC_RSP_R1B)
1233		*data |= CQHCI_CMD_TIMING(1);
1234}
1235
1236static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1237{
1238	struct cqhci_host *cq_host = mmc->cqe_private;
1239	struct sdhci_host *host = mmc_priv(mmc);
1240	u32 val;
1241
1242	/*
1243	 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1244	 * register when CQE is enabled and unhalted.
1245	 * CQHCI driver enables CQE prior to activation, so disable CQE before
1246	 * programming block size in sdhci controller and enable it back.
1247	 */
1248	if (!cq_host->activated) {
1249		val = cqhci_readl(cq_host, CQHCI_CFG);
1250		if (val & CQHCI_ENABLE)
1251			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1252				     CQHCI_CFG);
1253		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1254		sdhci_cqe_enable(mmc);
1255		if (val & CQHCI_ENABLE)
1256			cqhci_writel(cq_host, val, CQHCI_CFG);
1257	}
1258
1259	/*
1260	 * CMD CRC errors are seen sometimes with some eMMC devices when status
1261	 * command is sent during transfer of last data block which is the
1262	 * default case as send status command block counter (CBC) is 1.
1263	 * Recommended fix to set CBC to 0 allowing send status command only
1264	 * when data lines are idle.
1265	 */
1266	val = cqhci_readl(cq_host, CQHCI_SSC1);
1267	val &= ~CQHCI_SSC1_CBC_MASK;
1268	cqhci_writel(cq_host, val, CQHCI_SSC1);
1269}
1270
1271static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1272{
1273	sdhci_dumpregs(mmc_priv(mmc));
1274}
1275
1276static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1277{
1278	int cmd_error = 0;
1279	int data_error = 0;
1280
1281	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1282		return intmask;
1283
1284	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1285
1286	return 0;
1287}
1288
1289static void tegra_sdhci_set_timeout(struct sdhci_host *host,
1290				    struct mmc_command *cmd)
1291{
1292	u32 val;
1293
1294	/*
1295	 * HW busy detection timeout is based on programmed data timeout
1296	 * counter and maximum supported timeout is 11s which may not be
1297	 * enough for long operations like cache flush, sleep awake, erase.
1298	 *
1299	 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
1300	 * host controller to wait for busy state until the card is busy
1301	 * without HW timeout.
1302	 *
1303	 * So, use infinite busy wait mode for operations that may take
1304	 * more than maximum HW busy timeout of 11s otherwise use finite
1305	 * busy wait mode.
1306	 */
1307	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1308	if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC)
1309		val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1310	else
1311		val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1312	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1313
1314	__sdhci_set_timeout(host, cmd);
1315}
1316
1317static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
1318{
1319	struct cqhci_host *cq_host = mmc->cqe_private;
1320	u32 reg;
1321
1322	reg = cqhci_readl(cq_host, CQHCI_CFG);
1323	reg |= CQHCI_ENABLE;
1324	cqhci_writel(cq_host, reg, CQHCI_CFG);
1325}
1326
1327static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
1328{
1329	struct cqhci_host *cq_host = mmc->cqe_private;
1330	struct sdhci_host *host = mmc_priv(mmc);
1331	u32 reg;
1332
1333	reg = cqhci_readl(cq_host, CQHCI_CFG);
1334	reg &= ~CQHCI_ENABLE;
1335	cqhci_writel(cq_host, reg, CQHCI_CFG);
1336	sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1337}
1338
1339static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1340	.write_l    = tegra_cqhci_writel,
1341	.enable	= sdhci_tegra_cqe_enable,
1342	.disable = sdhci_cqe_disable,
1343	.dumpregs = sdhci_tegra_dumpregs,
1344	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1345	.pre_enable = sdhci_tegra_cqe_pre_enable,
1346	.post_disable = sdhci_tegra_cqe_post_disable,
1347};
1348
1349static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1350{
1351	struct sdhci_pltfm_host *platform = sdhci_priv(host);
1352	struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1353	const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1354	struct device *dev = mmc_dev(host->mmc);
1355
1356	if (soc->dma_mask)
1357		return dma_set_mask_and_coherent(dev, soc->dma_mask);
1358
1359	return 0;
1360}
1361
1362static const struct sdhci_ops tegra_sdhci_ops = {
1363	.get_ro     = tegra_sdhci_get_ro,
1364	.read_w     = tegra_sdhci_readw,
1365	.write_l    = tegra_sdhci_writel,
1366	.set_clock  = tegra_sdhci_set_clock,
1367	.set_dma_mask = tegra_sdhci_set_dma_mask,
1368	.set_bus_width = sdhci_set_bus_width,
1369	.reset      = tegra_sdhci_reset,
1370	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1371	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1372	.voltage_switch = tegra_sdhci_voltage_switch,
1373	.get_max_clock = tegra_sdhci_get_max_clock,
1374};
1375
1376static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1377	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1378		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1379		  SDHCI_QUIRK_NO_HISPD_BIT |
1380		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1381		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1382	.ops  = &tegra_sdhci_ops,
1383};
1384
1385static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1386	.pdata = &sdhci_tegra20_pdata,
1387	.dma_mask = DMA_BIT_MASK(32),
1388	.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
1389		    NVQUIRK_HAS_ANDROID_GPT_SECTOR |
1390		    NVQUIRK_ENABLE_BLOCK_GAP_DET,
1391};
1392
1393static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1394	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1395		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1396		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1397		  SDHCI_QUIRK_NO_HISPD_BIT |
1398		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1399		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1400	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1401		   SDHCI_QUIRK2_BROKEN_HS200 |
1402		   /*
1403		    * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1404		    * though no command operation was in progress."
1405		    *
1406		    * The exact reason is unknown, as the same hardware seems
1407		    * to support Auto CMD23 on a downstream 3.1 kernel.
1408		    */
1409		   SDHCI_QUIRK2_ACMD23_BROKEN,
1410	.ops  = &tegra_sdhci_ops,
1411};
1412
1413static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1414	.pdata = &sdhci_tegra30_pdata,
1415	.dma_mask = DMA_BIT_MASK(32),
1416	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1417		    NVQUIRK_ENABLE_SDR50 |
1418		    NVQUIRK_ENABLE_SDR104 |
1419		    NVQUIRK_HAS_ANDROID_GPT_SECTOR |
1420		    NVQUIRK_HAS_PADCALIB,
1421};
1422
1423static const struct sdhci_ops tegra114_sdhci_ops = {
1424	.get_ro     = tegra_sdhci_get_ro,
1425	.read_w     = tegra_sdhci_readw,
1426	.write_w    = tegra_sdhci_writew,
1427	.write_l    = tegra_sdhci_writel,
1428	.set_clock  = tegra_sdhci_set_clock,
1429	.set_dma_mask = tegra_sdhci_set_dma_mask,
1430	.set_bus_width = sdhci_set_bus_width,
1431	.reset      = tegra_sdhci_reset,
1432	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1433	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1434	.voltage_switch = tegra_sdhci_voltage_switch,
1435	.get_max_clock = tegra_sdhci_get_max_clock,
1436};
1437
1438static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1439	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1440		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1441		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1442		  SDHCI_QUIRK_NO_HISPD_BIT |
1443		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1444		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1445	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1446	.ops  = &tegra114_sdhci_ops,
1447};
1448
1449static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1450	.pdata = &sdhci_tegra114_pdata,
1451	.dma_mask = DMA_BIT_MASK(32),
1452	.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
1453};
1454
1455static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1456	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1457		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1458		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1459		  SDHCI_QUIRK_NO_HISPD_BIT |
1460		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1461		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1462	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1463	.ops  = &tegra114_sdhci_ops,
1464};
1465
1466static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1467	.pdata = &sdhci_tegra124_pdata,
1468	.dma_mask = DMA_BIT_MASK(34),
1469	.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
1470};
1471
1472static const struct sdhci_ops tegra210_sdhci_ops = {
1473	.get_ro     = tegra_sdhci_get_ro,
1474	.read_w     = tegra_sdhci_readw,
1475	.write_w    = tegra210_sdhci_writew,
1476	.write_l    = tegra_sdhci_writel,
1477	.set_clock  = tegra_sdhci_set_clock,
1478	.set_dma_mask = tegra_sdhci_set_dma_mask,
1479	.set_bus_width = sdhci_set_bus_width,
1480	.reset      = tegra_sdhci_reset,
1481	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1482	.voltage_switch = tegra_sdhci_voltage_switch,
1483	.get_max_clock = tegra_sdhci_get_max_clock,
1484	.set_timeout = tegra_sdhci_set_timeout,
1485};
1486
1487static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1488	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1489		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1490		  SDHCI_QUIRK_NO_HISPD_BIT |
1491		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1492		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1493	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1494	.ops  = &tegra210_sdhci_ops,
1495};
1496
1497static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1498	.pdata = &sdhci_tegra210_pdata,
1499	.dma_mask = DMA_BIT_MASK(34),
1500	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1501		    NVQUIRK_HAS_PADCALIB |
1502		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1503		    NVQUIRK_ENABLE_SDR50 |
1504		    NVQUIRK_ENABLE_SDR104 |
1505		    NVQUIRK_HAS_TMCLK,
1506	.min_tap_delay = 106,
1507	.max_tap_delay = 185,
1508};
1509
1510static const struct sdhci_ops tegra186_sdhci_ops = {
1511	.get_ro     = tegra_sdhci_get_ro,
1512	.read_w     = tegra_sdhci_readw,
1513	.write_l    = tegra_sdhci_writel,
1514	.set_clock  = tegra_sdhci_set_clock,
1515	.set_dma_mask = tegra_sdhci_set_dma_mask,
1516	.set_bus_width = sdhci_set_bus_width,
1517	.reset      = tegra_sdhci_reset,
1518	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1519	.voltage_switch = tegra_sdhci_voltage_switch,
1520	.get_max_clock = tegra_sdhci_get_max_clock,
1521	.irq = sdhci_tegra_cqhci_irq,
1522	.set_timeout = tegra_sdhci_set_timeout,
1523};
1524
1525static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1526	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1527		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1528		  SDHCI_QUIRK_NO_HISPD_BIT |
1529		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1530		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1531	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1532		   SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER,
1533	.ops  = &tegra186_sdhci_ops,
1534};
1535
1536static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1537	.pdata = &sdhci_tegra186_pdata,
1538	.dma_mask = DMA_BIT_MASK(40),
1539	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1540		    NVQUIRK_HAS_PADCALIB |
1541		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1542		    NVQUIRK_ENABLE_SDR50 |
1543		    NVQUIRK_ENABLE_SDR104 |
1544		    NVQUIRK_HAS_TMCLK |
1545		    NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1546	.min_tap_delay = 84,
1547	.max_tap_delay = 136,
1548};
1549
1550static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1551	.pdata = &sdhci_tegra186_pdata,
1552	.dma_mask = DMA_BIT_MASK(39),
1553	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1554		    NVQUIRK_HAS_PADCALIB |
1555		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1556		    NVQUIRK_ENABLE_SDR50 |
1557		    NVQUIRK_ENABLE_SDR104 |
1558		    NVQUIRK_HAS_TMCLK,
1559	.min_tap_delay = 96,
1560	.max_tap_delay = 139,
1561};
1562
1563static const struct sdhci_tegra_soc_data soc_data_tegra234 = {
1564	.pdata = &sdhci_tegra186_pdata,
1565	.dma_mask = DMA_BIT_MASK(39),
1566	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1567		    NVQUIRK_HAS_PADCALIB |
1568		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1569		    NVQUIRK_ENABLE_SDR50 |
1570		    NVQUIRK_ENABLE_SDR104 |
1571		    NVQUIRK_PROGRAM_STREAMID |
1572		    NVQUIRK_HAS_TMCLK,
1573	.min_tap_delay = 95,
1574	.max_tap_delay = 111,
1575};
1576
1577static const struct of_device_id sdhci_tegra_dt_match[] = {
1578	{ .compatible = "nvidia,tegra234-sdhci", .data = &soc_data_tegra234 },
1579	{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1580	{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1581	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1582	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1583	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1584	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1585	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1586	{}
1587};
1588MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1589
1590static int sdhci_tegra_add_host(struct sdhci_host *host)
1591{
1592	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1593	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1594	struct cqhci_host *cq_host;
1595	bool dma64;
1596	int ret;
1597
1598	if (!tegra_host->enable_hwcq)
1599		return sdhci_add_host(host);
1600
1601	sdhci_enable_v4_mode(host);
1602
1603	ret = sdhci_setup_host(host);
1604	if (ret)
1605		return ret;
1606
1607	host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1608
1609	cq_host = devm_kzalloc(mmc_dev(host->mmc),
1610				sizeof(*cq_host), GFP_KERNEL);
1611	if (!cq_host) {
1612		ret = -ENOMEM;
1613		goto cleanup;
1614	}
1615
1616	cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1617	cq_host->ops = &sdhci_tegra_cqhci_ops;
1618
1619	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1620	if (dma64)
1621		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1622
1623	ret = cqhci_init(cq_host, host->mmc, dma64);
1624	if (ret)
1625		goto cleanup;
1626
1627	ret = __sdhci_add_host(host);
1628	if (ret)
1629		goto cleanup;
1630
1631	return 0;
1632
1633cleanup:
1634	sdhci_cleanup_host(host);
1635	return ret;
1636}
1637
1638/* Program MC streamID for DMA transfers */
1639static void sdhci_tegra_program_stream_id(struct sdhci_host *host)
1640{
1641	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1642	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1643
1644	if (tegra_host->soc_data->nvquirks & NVQUIRK_PROGRAM_STREAMID) {
1645		tegra_sdhci_writel(host, FIELD_PREP(GENMASK(15, 8), tegra_host->stream_id) |
1646					 FIELD_PREP(GENMASK(7, 0), tegra_host->stream_id),
1647					 SDHCI_TEGRA_CIF2AXI_CTRL_0);
1648	}
1649}
1650
1651static int sdhci_tegra_probe(struct platform_device *pdev)
1652{
1653	const struct sdhci_tegra_soc_data *soc_data;
1654	struct sdhci_host *host;
1655	struct sdhci_pltfm_host *pltfm_host;
1656	struct sdhci_tegra *tegra_host;
1657	struct clk *clk;
1658	int rc;
1659
1660	soc_data = of_device_get_match_data(&pdev->dev);
1661	if (!soc_data)
1662		return -EINVAL;
1663
1664	host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1665	if (IS_ERR(host))
1666		return PTR_ERR(host);
1667	pltfm_host = sdhci_priv(host);
1668
1669	tegra_host = sdhci_pltfm_priv(pltfm_host);
1670	tegra_host->ddr_signaling = false;
1671	tegra_host->pad_calib_required = false;
1672	tegra_host->pad_control_available = false;
1673	tegra_host->soc_data = soc_data;
1674
1675	if (soc_data->nvquirks & NVQUIRK_HAS_ANDROID_GPT_SECTOR)
1676		host->mmc->caps2 |= MMC_CAP2_ALT_GPT_TEGRA;
1677
1678	if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1679		rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1680		if (rc == 0)
1681			host->mmc_host_ops.start_signal_voltage_switch =
1682				sdhci_tegra_start_signal_voltage_switch;
1683	}
1684
1685	/* Hook to periodically rerun pad calibration */
1686	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1687		host->mmc_host_ops.request = tegra_sdhci_request;
1688
1689	host->mmc_host_ops.hs400_enhanced_strobe =
1690			tegra_sdhci_hs400_enhanced_strobe;
1691
1692	if (!host->ops->platform_execute_tuning)
1693		host->mmc_host_ops.execute_tuning =
1694				tegra_sdhci_execute_hw_tuning;
1695
1696	rc = mmc_of_parse(host->mmc);
1697	if (rc)
1698		goto err_parse_dt;
1699
1700	if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1701		host->mmc->caps |= MMC_CAP_1_8V_DDR;
1702
1703	/* HW busy detection is supported, but R1B responses are required. */
1704	host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
1705
1706	/* GPIO CD can be set as a wakeup source */
1707	host->mmc->caps |= MMC_CAP_CD_WAKE;
1708
1709	tegra_sdhci_parse_dt(host);
1710
1711	if (tegra_host->soc_data->nvquirks & NVQUIRK_PROGRAM_STREAMID &&
1712	    !tegra_dev_iommu_get_stream_id(&pdev->dev, &tegra_host->stream_id)) {
1713		dev_warn(mmc_dev(host->mmc), "missing IOMMU stream ID\n");
1714		tegra_host->stream_id = 0x7f;
1715	}
1716
1717	tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1718							 GPIOD_OUT_HIGH);
1719	if (IS_ERR(tegra_host->power_gpio)) {
1720		rc = PTR_ERR(tegra_host->power_gpio);
1721		goto err_power_req;
1722	}
1723
1724	/*
1725	 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
1726	 * timeout clock and SW can choose TMCLK or SDCLK for hardware
1727	 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
1728	 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
1729	 *
1730	 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
1731	 * 12Mhz TMCLK which is advertised in host capability register.
1732	 * With TMCLK of 12Mhz provides maximum data timeout period that can
1733	 * be achieved is 11s better than using SDCLK for data timeout.
1734	 *
1735	 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
1736	 * supporting separate TMCLK.
1737	 */
1738
1739	if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
1740		clk = devm_clk_get(&pdev->dev, "tmclk");
1741		if (IS_ERR(clk)) {
1742			rc = PTR_ERR(clk);
1743			if (rc == -EPROBE_DEFER)
1744				goto err_power_req;
1745
1746			dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
1747			clk = NULL;
1748		}
1749
1750		clk_set_rate(clk, 12000000);
1751		rc = clk_prepare_enable(clk);
1752		if (rc) {
1753			dev_err(&pdev->dev,
1754				"failed to enable tmclk: %d\n", rc);
1755			goto err_power_req;
1756		}
1757
1758		tegra_host->tmclk = clk;
1759	}
1760
1761	clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1762	if (IS_ERR(clk)) {
1763		rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
1764				   "failed to get clock\n");
1765		goto err_clk_get;
1766	}
1767	pltfm_host->clk = clk;
1768
1769	tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1770							   "sdhci");
1771	if (IS_ERR(tegra_host->rst)) {
1772		rc = PTR_ERR(tegra_host->rst);
1773		dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1774		goto err_rst_get;
1775	}
1776
1777	rc = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
1778	if (rc)
1779		goto err_rst_get;
1780
1781	pm_runtime_enable(&pdev->dev);
1782	rc = pm_runtime_resume_and_get(&pdev->dev);
1783	if (rc)
1784		goto err_pm_get;
1785
1786	rc = reset_control_assert(tegra_host->rst);
1787	if (rc)
1788		goto err_rst_assert;
1789
1790	usleep_range(2000, 4000);
1791
1792	rc = reset_control_deassert(tegra_host->rst);
1793	if (rc)
1794		goto err_rst_assert;
1795
1796	usleep_range(2000, 4000);
1797
1798	rc = sdhci_tegra_add_host(host);
1799	if (rc)
1800		goto err_add_host;
1801
1802	sdhci_tegra_program_stream_id(host);
1803
1804	return 0;
1805
1806err_add_host:
1807	reset_control_assert(tegra_host->rst);
1808err_rst_assert:
1809	pm_runtime_put_sync_suspend(&pdev->dev);
1810err_pm_get:
1811	pm_runtime_disable(&pdev->dev);
1812err_rst_get:
1813err_clk_get:
1814	clk_disable_unprepare(tegra_host->tmclk);
1815err_power_req:
1816err_parse_dt:
1817	sdhci_pltfm_free(pdev);
1818	return rc;
1819}
1820
1821static int sdhci_tegra_remove(struct platform_device *pdev)
1822{
1823	struct sdhci_host *host = platform_get_drvdata(pdev);
1824	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1825	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1826
1827	sdhci_remove_host(host, 0);
1828
1829	reset_control_assert(tegra_host->rst);
1830	usleep_range(2000, 4000);
1831
1832	pm_runtime_put_sync_suspend(&pdev->dev);
1833	pm_runtime_force_suspend(&pdev->dev);
1834
1835	clk_disable_unprepare(tegra_host->tmclk);
1836	sdhci_pltfm_free(pdev);
1837
1838	return 0;
1839}
1840
1841static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev)
1842{
1843	struct sdhci_host *host = dev_get_drvdata(dev);
1844	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1845
1846	clk_disable_unprepare(pltfm_host->clk);
1847
1848	return 0;
1849}
1850
1851static int __maybe_unused sdhci_tegra_runtime_resume(struct device *dev)
1852{
1853	struct sdhci_host *host = dev_get_drvdata(dev);
1854	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1855
1856	return clk_prepare_enable(pltfm_host->clk);
1857}
1858
1859#ifdef CONFIG_PM_SLEEP
1860static int sdhci_tegra_suspend(struct device *dev)
1861{
1862	struct sdhci_host *host = dev_get_drvdata(dev);
1863	int ret;
1864
1865	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1866		ret = cqhci_suspend(host->mmc);
1867		if (ret)
1868			return ret;
1869	}
1870
1871	ret = sdhci_suspend_host(host);
1872	if (ret) {
1873		cqhci_resume(host->mmc);
1874		return ret;
1875	}
1876
1877	ret = pm_runtime_force_suspend(dev);
1878	if (ret) {
1879		sdhci_resume_host(host);
1880		cqhci_resume(host->mmc);
1881		return ret;
1882	}
1883
1884	return mmc_gpio_set_cd_wake(host->mmc, true);
1885}
1886
1887static int sdhci_tegra_resume(struct device *dev)
1888{
1889	struct sdhci_host *host = dev_get_drvdata(dev);
1890	int ret;
1891
1892	ret = mmc_gpio_set_cd_wake(host->mmc, false);
1893	if (ret)
1894		return ret;
1895
1896	ret = pm_runtime_force_resume(dev);
1897	if (ret)
1898		return ret;
1899
1900	sdhci_tegra_program_stream_id(host);
1901
1902	ret = sdhci_resume_host(host);
1903	if (ret)
1904		goto disable_clk;
1905
1906	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1907		ret = cqhci_resume(host->mmc);
1908		if (ret)
1909			goto suspend_host;
1910	}
1911
1912	return 0;
1913
1914suspend_host:
1915	sdhci_suspend_host(host);
1916disable_clk:
1917	pm_runtime_force_suspend(dev);
1918	return ret;
1919}
1920#endif
1921
1922static const struct dev_pm_ops sdhci_tegra_dev_pm_ops = {
1923	SET_RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume,
1924			   NULL)
1925	SET_SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume)
1926};
1927
1928static struct platform_driver sdhci_tegra_driver = {
1929	.driver		= {
1930		.name	= "sdhci-tegra",
1931		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1932		.of_match_table = sdhci_tegra_dt_match,
1933		.pm	= &sdhci_tegra_dev_pm_ops,
1934	},
1935	.probe		= sdhci_tegra_probe,
1936	.remove		= sdhci_tegra_remove,
1937};
1938
1939module_platform_driver(sdhci_tegra_driver);
1940
1941MODULE_DESCRIPTION("SDHCI driver for Tegra");
1942MODULE_AUTHOR("Google, Inc.");
1943MODULE_LICENSE("GPL v2");
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2010 Google, Inc.
   4 */
   5
   6#include <linux/bitfield.h>
   7#include <linux/clk.h>
   8#include <linux/delay.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/err.h>
  11#include <linux/gpio/consumer.h>
  12#include <linux/init.h>
  13#include <linux/io.h>
  14#include <linux/iommu.h>
  15#include <linux/iopoll.h>
  16#include <linux/ktime.h>
  17#include <linux/mmc/card.h>
  18#include <linux/mmc/host.h>
  19#include <linux/mmc/mmc.h>
  20#include <linux/mmc/slot-gpio.h>
  21#include <linux/module.h>
 
  22#include <linux/of.h>
  23#include <linux/pinctrl/consumer.h>
  24#include <linux/platform_device.h>
  25#include <linux/pm_opp.h>
  26#include <linux/pm_runtime.h>
  27#include <linux/regulator/consumer.h>
  28#include <linux/reset.h>
  29
  30#include <soc/tegra/common.h>
  31
  32#include "sdhci-cqhci.h"
  33#include "sdhci-pltfm.h"
  34#include "cqhci.h"
  35
  36/* Tegra SDHOST controller vendor register definitions */
  37#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL			0x100
  38#define SDHCI_CLOCK_CTRL_TAP_MASK			0x00ff0000
  39#define SDHCI_CLOCK_CTRL_TAP_SHIFT			16
  40#define SDHCI_CLOCK_CTRL_TRIM_MASK			0x1f000000
  41#define SDHCI_CLOCK_CTRL_TRIM_SHIFT			24
  42#define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE		BIT(5)
  43#define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE		BIT(3)
  44#define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE	BIT(2)
  45
  46#define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL			0x104
  47#define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE		BIT(31)
  48
  49#define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES		0x10c
  50#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK		0x00003f00
  51#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT	8
  52
  53#define SDHCI_TEGRA_VENDOR_MISC_CTRL			0x120
  54#define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT		BIT(0)
  55#define SDHCI_MISC_CTRL_ENABLE_SDR104			0x8
  56#define SDHCI_MISC_CTRL_ENABLE_SDR50			0x10
  57#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300		0x20
  58#define SDHCI_MISC_CTRL_ENABLE_DDR50			0x200
  59
  60#define SDHCI_TEGRA_VENDOR_DLLCAL_CFG			0x1b0
  61#define SDHCI_TEGRA_DLLCAL_CALIBRATE			BIT(31)
  62
  63#define SDHCI_TEGRA_VENDOR_DLLCAL_STA			0x1bc
  64#define SDHCI_TEGRA_DLLCAL_STA_ACTIVE			BIT(31)
  65
  66#define SDHCI_VNDR_TUN_CTRL0_0				0x1c0
  67#define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP			0x20000
  68#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK		0x03fc0000
  69#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT	18
  70#define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK			0x00001fc0
  71#define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT		6
  72#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK		0x000e000
  73#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT		13
  74#define TRIES_128					2
  75#define TRIES_256					4
  76#define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK		0x7
  77
  78#define SDHCI_TEGRA_VNDR_TUN_CTRL1_0			0x1c4
  79#define SDHCI_TEGRA_VNDR_TUN_STATUS0			0x1C8
  80#define SDHCI_TEGRA_VNDR_TUN_STATUS1			0x1CC
  81#define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK		0xFF
  82#define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT	0x8
  83#define TUNING_WORD_BIT_SIZE				32
  84
  85#define SDHCI_TEGRA_AUTO_CAL_CONFIG			0x1e4
  86#define SDHCI_AUTO_CAL_START				BIT(31)
  87#define SDHCI_AUTO_CAL_ENABLE				BIT(29)
  88#define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK			0x0000ffff
  89
  90#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL			0x1e0
  91#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK	0x0000000f
  92#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL	0x7
  93#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD	BIT(31)
  94#define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK		0x07FFF000
  95
  96#define SDHCI_TEGRA_AUTO_CAL_STATUS			0x1ec
  97#define SDHCI_TEGRA_AUTO_CAL_ACTIVE			BIT(31)
  98
  99#define SDHCI_TEGRA_CIF2AXI_CTRL_0			0x1fc
 100
 101#define NVQUIRK_FORCE_SDHCI_SPEC_200			BIT(0)
 102#define NVQUIRK_ENABLE_BLOCK_GAP_DET			BIT(1)
 103#define NVQUIRK_ENABLE_SDHCI_SPEC_300			BIT(2)
 104#define NVQUIRK_ENABLE_SDR50				BIT(3)
 105#define NVQUIRK_ENABLE_SDR104				BIT(4)
 106#define NVQUIRK_ENABLE_DDR50				BIT(5)
 107/*
 108 * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
 109 * drive strength.
 110 */
 111#define NVQUIRK_HAS_PADCALIB				BIT(6)
 112/*
 113 * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
 114 * 3V3/1V8 pad selection happens through pinctrl state selection depending
 115 * on the signaling mode.
 116 */
 117#define NVQUIRK_NEEDS_PAD_CONTROL			BIT(7)
 118#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP			BIT(8)
 119#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING		BIT(9)
 120
 121/*
 122 * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
 123 * SDMMC hardware data timeout.
 124 */
 125#define NVQUIRK_HAS_TMCLK				BIT(10)
 126
 127#define NVQUIRK_HAS_ANDROID_GPT_SECTOR			BIT(11)
 128#define NVQUIRK_PROGRAM_STREAMID			BIT(12)
 129
 130/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
 131#define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
 132
 133#define SDHCI_TEGRA_CQE_TRNS_MODE	(SDHCI_TRNS_MULTI | \
 134					 SDHCI_TRNS_BLK_CNT_EN | \
 135					 SDHCI_TRNS_DMA)
 136
 137struct sdhci_tegra_soc_data {
 138	const struct sdhci_pltfm_data *pdata;
 139	u64 dma_mask;
 140	u32 nvquirks;
 141	u8 min_tap_delay;
 142	u8 max_tap_delay;
 143};
 144
 145/* Magic pull up and pull down pad calibration offsets */
 146struct sdhci_tegra_autocal_offsets {
 147	u32 pull_up_3v3;
 148	u32 pull_down_3v3;
 149	u32 pull_up_3v3_timeout;
 150	u32 pull_down_3v3_timeout;
 151	u32 pull_up_1v8;
 152	u32 pull_down_1v8;
 153	u32 pull_up_1v8_timeout;
 154	u32 pull_down_1v8_timeout;
 155	u32 pull_up_sdr104;
 156	u32 pull_down_sdr104;
 157	u32 pull_up_hs400;
 158	u32 pull_down_hs400;
 159};
 160
 161struct sdhci_tegra {
 162	const struct sdhci_tegra_soc_data *soc_data;
 163	struct gpio_desc *power_gpio;
 164	struct clk *tmclk;
 165	bool ddr_signaling;
 166	bool pad_calib_required;
 167	bool pad_control_available;
 168
 169	struct reset_control *rst;
 170	struct pinctrl *pinctrl_sdmmc;
 171	struct pinctrl_state *pinctrl_state_3v3;
 172	struct pinctrl_state *pinctrl_state_1v8;
 173	struct pinctrl_state *pinctrl_state_3v3_drv;
 174	struct pinctrl_state *pinctrl_state_1v8_drv;
 175
 176	struct sdhci_tegra_autocal_offsets autocal_offsets;
 177	ktime_t last_calib;
 178
 179	u32 default_tap;
 180	u32 default_trim;
 181	u32 dqs_trim;
 182	bool enable_hwcq;
 183	unsigned long curr_clk_rate;
 184	u8 tuned_tap_delay;
 185	u32 stream_id;
 186};
 187
 188static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
 189{
 190	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 191	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 192	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 193
 194	if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
 195			(reg == SDHCI_HOST_VERSION))) {
 196		/* Erratum: Version register is invalid in HW. */
 197		return SDHCI_SPEC_200;
 198	}
 199
 200	return readw(host->ioaddr + reg);
 201}
 202
 203static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
 204{
 205	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 206
 207	switch (reg) {
 208	case SDHCI_TRANSFER_MODE:
 209		/*
 210		 * Postpone this write, we must do it together with a
 211		 * command write that is down below.
 212		 */
 213		pltfm_host->xfer_mode_shadow = val;
 214		return;
 215	case SDHCI_COMMAND:
 216		writel((val << 16) | pltfm_host->xfer_mode_shadow,
 217			host->ioaddr + SDHCI_TRANSFER_MODE);
 218		return;
 219	}
 220
 221	writew(val, host->ioaddr + reg);
 222}
 223
 224static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
 225{
 226	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 227	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 228	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 229
 230	/* Seems like we're getting spurious timeout and crc errors, so
 231	 * disable signalling of them. In case of real errors software
 232	 * timers should take care of eventually detecting them.
 233	 */
 234	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
 235		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
 236
 237	writel(val, host->ioaddr + reg);
 238
 239	if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
 240			(reg == SDHCI_INT_ENABLE))) {
 241		/* Erratum: Must enable block gap interrupt detection */
 242		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
 243		if (val & SDHCI_INT_CARD_INT)
 244			gap_ctrl |= 0x8;
 245		else
 246			gap_ctrl &= ~0x8;
 247		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
 248	}
 249}
 250
 251static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
 252{
 253	bool status;
 254	u32 reg;
 255
 256	reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
 257	status = !!(reg & SDHCI_CLOCK_CARD_EN);
 258
 259	if (status == enable)
 260		return status;
 261
 262	if (enable)
 263		reg |= SDHCI_CLOCK_CARD_EN;
 264	else
 265		reg &= ~SDHCI_CLOCK_CARD_EN;
 266
 267	sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
 268
 269	return status;
 270}
 271
 272static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
 273{
 274	bool is_tuning_cmd = 0;
 275	bool clk_enabled;
 276
 277	if (reg == SDHCI_COMMAND)
 278		is_tuning_cmd = mmc_op_tuning(SDHCI_GET_CMD(val));
 279
 280	if (is_tuning_cmd)
 281		clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
 282
 283	writew(val, host->ioaddr + reg);
 284
 285	if (is_tuning_cmd) {
 286		udelay(1);
 287		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 288		tegra_sdhci_configure_card_clk(host, clk_enabled);
 289	}
 290}
 291
 292static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
 293{
 294	/*
 295	 * Write-enable shall be assumed if GPIO is missing in a board's
 296	 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
 297	 * Tegra.
 298	 */
 299	return mmc_gpio_get_ro(host->mmc);
 300}
 301
 302static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
 303{
 304	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 305	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 306	int has_1v8, has_3v3;
 307
 308	/*
 309	 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
 310	 * voltage configuration in order to perform voltage switching. This
 311	 * means that valid pinctrl info is required on SDHCI instances capable
 312	 * of performing voltage switching. Whether or not an SDHCI instance is
 313	 * capable of voltage switching is determined based on the regulator.
 314	 */
 315
 316	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
 317		return true;
 318
 319	if (IS_ERR(host->mmc->supply.vqmmc))
 320		return false;
 321
 322	has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
 323						 1700000, 1950000);
 324
 325	has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
 326						 2700000, 3600000);
 327
 328	if (has_1v8 == 1 && has_3v3 == 1)
 329		return tegra_host->pad_control_available;
 330
 331	/* Fixed voltage, no pad control required. */
 332	return true;
 333}
 334
 335static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
 336{
 337	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 338	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 339	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 340	bool card_clk_enabled = false;
 341	u32 reg;
 342
 343	/*
 344	 * Touching the tap values is a bit tricky on some SoC generations.
 345	 * The quirk enables a workaround for a glitch that sometimes occurs if
 346	 * the tap values are changed.
 347	 */
 348
 349	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
 350		card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
 351
 352	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 353	reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
 354	reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
 355	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 356
 357	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
 358	    card_clk_enabled) {
 359		udelay(1);
 360		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 361		tegra_sdhci_configure_card_clk(host, card_clk_enabled);
 362	}
 363}
 364
 365static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
 366{
 367	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 368	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 369	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 370	u32 misc_ctrl, clk_ctrl, pad_ctrl;
 371
 372	sdhci_and_cqhci_reset(host, mask);
 373
 374	if (!(mask & SDHCI_RESET_ALL))
 375		return;
 376
 377	tegra_sdhci_set_tap(host, tegra_host->default_tap);
 378
 379	misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
 380	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 381
 382	misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
 383		       SDHCI_MISC_CTRL_ENABLE_SDR50 |
 384		       SDHCI_MISC_CTRL_ENABLE_DDR50 |
 385		       SDHCI_MISC_CTRL_ENABLE_SDR104);
 386
 387	clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
 388		      SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
 389
 390	if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
 391		/* Erratum: Enable SDHCI spec v3.00 support */
 392		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
 393			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
 394		/* Advertise UHS modes as supported by host */
 395		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
 396			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
 397		if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
 398			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
 399		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
 400			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
 401		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
 402			clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
 403	}
 404
 405	clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
 406
 407	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
 408	sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 409
 410	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
 411		pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 412		pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
 413		pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
 414		sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 415
 416		tegra_host->pad_calib_required = true;
 417	}
 418
 419	tegra_host->ddr_signaling = false;
 420}
 421
 422static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
 423{
 424	u32 val;
 425
 426	/*
 427	 * Enable or disable the additional I/O pad used by the drive strength
 428	 * calibration process.
 429	 */
 430	val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 431
 432	if (enable)
 433		val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
 434	else
 435		val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
 436
 437	sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 438
 439	if (enable)
 440		usleep_range(1, 2);
 441}
 442
 443static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
 444					       u16 pdpu)
 445{
 446	u32 reg;
 447
 448	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 449	reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
 450	reg |= pdpu;
 451	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 452}
 453
 454static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
 455				   bool state_drvupdn)
 456{
 457	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 458	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 459	struct sdhci_tegra_autocal_offsets *offsets =
 460						&tegra_host->autocal_offsets;
 461	struct pinctrl_state *pinctrl_drvupdn = NULL;
 462	int ret = 0;
 463	u8 drvup = 0, drvdn = 0;
 464	u32 reg;
 465
 466	if (!state_drvupdn) {
 467		/* PADS Drive Strength */
 468		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
 469			if (tegra_host->pinctrl_state_1v8_drv) {
 470				pinctrl_drvupdn =
 471					tegra_host->pinctrl_state_1v8_drv;
 472			} else {
 473				drvup = offsets->pull_up_1v8_timeout;
 474				drvdn = offsets->pull_down_1v8_timeout;
 475			}
 476		} else {
 477			if (tegra_host->pinctrl_state_3v3_drv) {
 478				pinctrl_drvupdn =
 479					tegra_host->pinctrl_state_3v3_drv;
 480			} else {
 481				drvup = offsets->pull_up_3v3_timeout;
 482				drvdn = offsets->pull_down_3v3_timeout;
 483			}
 484		}
 485
 486		if (pinctrl_drvupdn != NULL) {
 487			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 488							pinctrl_drvupdn);
 489			if (ret < 0)
 490				dev_err(mmc_dev(host->mmc),
 491					"failed pads drvupdn, ret: %d\n", ret);
 492		} else if ((drvup) || (drvdn)) {
 493			reg = sdhci_readl(host,
 494					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 495			reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
 496			reg |= (drvup << 20) | (drvdn << 12);
 497			sdhci_writel(host, reg,
 498					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
 499		}
 500
 501	} else {
 502		/* Dual Voltage PADS Voltage selection */
 503		if (!tegra_host->pad_control_available)
 504			return 0;
 505
 506		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
 507			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 508						tegra_host->pinctrl_state_1v8);
 509			if (ret < 0)
 510				dev_err(mmc_dev(host->mmc),
 511					"setting 1.8V failed, ret: %d\n", ret);
 512		} else {
 513			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
 514						tegra_host->pinctrl_state_3v3);
 515			if (ret < 0)
 516				dev_err(mmc_dev(host->mmc),
 517					"setting 3.3V failed, ret: %d\n", ret);
 518		}
 519	}
 520
 521	return ret;
 522}
 523
 524static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
 525{
 526	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 527	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 528	struct sdhci_tegra_autocal_offsets offsets =
 529			tegra_host->autocal_offsets;
 530	struct mmc_ios *ios = &host->mmc->ios;
 531	bool card_clk_enabled;
 532	u16 pdpu;
 533	u32 reg;
 534	int ret;
 535
 536	switch (ios->timing) {
 537	case MMC_TIMING_UHS_SDR104:
 538		pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
 539		break;
 540	case MMC_TIMING_MMC_HS400:
 541		pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
 542		break;
 543	default:
 544		if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
 545			pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
 546		else
 547			pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
 548	}
 549
 550	/* Set initial offset before auto-calibration */
 551	tegra_sdhci_set_pad_autocal_offset(host, pdpu);
 552
 553	card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
 554
 555	tegra_sdhci_configure_cal_pad(host, true);
 556
 557	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 558	reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
 559	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 560
 561	usleep_range(1, 2);
 562	/* 10 ms timeout */
 563	ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
 564				 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
 565				 1000, 10000);
 566
 567	tegra_sdhci_configure_cal_pad(host, false);
 568
 569	tegra_sdhci_configure_card_clk(host, card_clk_enabled);
 570
 571	if (ret) {
 572		dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
 573
 574		/* Disable automatic cal and use fixed Drive Strengths */
 575		reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 576		reg &= ~SDHCI_AUTO_CAL_ENABLE;
 577		sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
 578
 579		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
 580		if (ret < 0)
 581			dev_err(mmc_dev(host->mmc),
 582				"Setting drive strengths failed: %d\n", ret);
 583	}
 584}
 585
 586static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
 587{
 588	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 589	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 590	struct sdhci_tegra_autocal_offsets *autocal =
 591			&tegra_host->autocal_offsets;
 592	int err;
 593
 594	err = device_property_read_u32(mmc_dev(host->mmc),
 595			"nvidia,pad-autocal-pull-up-offset-3v3",
 596			&autocal->pull_up_3v3);
 597	if (err)
 598		autocal->pull_up_3v3 = 0;
 599
 600	err = device_property_read_u32(mmc_dev(host->mmc),
 601			"nvidia,pad-autocal-pull-down-offset-3v3",
 602			&autocal->pull_down_3v3);
 603	if (err)
 604		autocal->pull_down_3v3 = 0;
 605
 606	err = device_property_read_u32(mmc_dev(host->mmc),
 607			"nvidia,pad-autocal-pull-up-offset-1v8",
 608			&autocal->pull_up_1v8);
 609	if (err)
 610		autocal->pull_up_1v8 = 0;
 611
 612	err = device_property_read_u32(mmc_dev(host->mmc),
 613			"nvidia,pad-autocal-pull-down-offset-1v8",
 614			&autocal->pull_down_1v8);
 615	if (err)
 616		autocal->pull_down_1v8 = 0;
 617
 618	err = device_property_read_u32(mmc_dev(host->mmc),
 619			"nvidia,pad-autocal-pull-up-offset-sdr104",
 620			&autocal->pull_up_sdr104);
 621	if (err)
 622		autocal->pull_up_sdr104 = autocal->pull_up_1v8;
 623
 624	err = device_property_read_u32(mmc_dev(host->mmc),
 625			"nvidia,pad-autocal-pull-down-offset-sdr104",
 626			&autocal->pull_down_sdr104);
 627	if (err)
 628		autocal->pull_down_sdr104 = autocal->pull_down_1v8;
 629
 630	err = device_property_read_u32(mmc_dev(host->mmc),
 631			"nvidia,pad-autocal-pull-up-offset-hs400",
 632			&autocal->pull_up_hs400);
 633	if (err)
 634		autocal->pull_up_hs400 = autocal->pull_up_1v8;
 635
 636	err = device_property_read_u32(mmc_dev(host->mmc),
 637			"nvidia,pad-autocal-pull-down-offset-hs400",
 638			&autocal->pull_down_hs400);
 639	if (err)
 640		autocal->pull_down_hs400 = autocal->pull_down_1v8;
 641
 642	/*
 643	 * Different fail-safe drive strength values based on the signaling
 644	 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
 645	 * So, avoid reading below device tree properties for SoCs that don't
 646	 * have NVQUIRK_NEEDS_PAD_CONTROL.
 647	 */
 648	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
 649		return;
 650
 651	err = device_property_read_u32(mmc_dev(host->mmc),
 652			"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
 653			&autocal->pull_up_3v3_timeout);
 654	if (err) {
 655		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
 656			(tegra_host->pinctrl_state_3v3_drv == NULL))
 657			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
 658				mmc_hostname(host->mmc));
 659		autocal->pull_up_3v3_timeout = 0;
 660	}
 661
 662	err = device_property_read_u32(mmc_dev(host->mmc),
 663			"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
 664			&autocal->pull_down_3v3_timeout);
 665	if (err) {
 666		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
 667			(tegra_host->pinctrl_state_3v3_drv == NULL))
 668			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
 669				mmc_hostname(host->mmc));
 670		autocal->pull_down_3v3_timeout = 0;
 671	}
 672
 673	err = device_property_read_u32(mmc_dev(host->mmc),
 674			"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
 675			&autocal->pull_up_1v8_timeout);
 676	if (err) {
 677		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
 678			(tegra_host->pinctrl_state_1v8_drv == NULL))
 679			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
 680				mmc_hostname(host->mmc));
 681		autocal->pull_up_1v8_timeout = 0;
 682	}
 683
 684	err = device_property_read_u32(mmc_dev(host->mmc),
 685			"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
 686			&autocal->pull_down_1v8_timeout);
 687	if (err) {
 688		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
 689			(tegra_host->pinctrl_state_1v8_drv == NULL))
 690			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
 691				mmc_hostname(host->mmc));
 692		autocal->pull_down_1v8_timeout = 0;
 693	}
 694}
 695
 696static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 697{
 698	struct sdhci_host *host = mmc_priv(mmc);
 699	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 700	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 701	ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
 702
 703	/* 100 ms calibration interval is specified in the TRM */
 704	if (ktime_to_ms(since_calib) > 100) {
 705		tegra_sdhci_pad_autocalib(host);
 706		tegra_host->last_calib = ktime_get();
 707	}
 708
 709	sdhci_request(mmc, mrq);
 710}
 711
 712static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
 713{
 714	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 715	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 716	int err;
 717
 718	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap",
 719				       &tegra_host->default_tap);
 720	if (err)
 721		tegra_host->default_tap = 0;
 722
 723	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim",
 724				       &tegra_host->default_trim);
 725	if (err)
 726		tegra_host->default_trim = 0;
 727
 728	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim",
 729				       &tegra_host->dqs_trim);
 730	if (err)
 731		tegra_host->dqs_trim = 0x11;
 732}
 733
 734static void tegra_sdhci_parse_dt(struct sdhci_host *host)
 735{
 736	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 737	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 738
 739	if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe"))
 740		tegra_host->enable_hwcq = true;
 741	else
 742		tegra_host->enable_hwcq = false;
 743
 744	tegra_sdhci_parse_pad_autocal_dt(host);
 745	tegra_sdhci_parse_tap_and_trim(host);
 746}
 747
 748static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
 749{
 750	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 751	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 752	struct device *dev = mmc_dev(host->mmc);
 753	unsigned long host_clk;
 754	int err;
 755
 756	if (!clock)
 757		return sdhci_set_clock(host, clock);
 758
 759	/*
 760	 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
 761	 * divider to be configured to divided the host clock by two. The SDHCI
 762	 * clock divider is calculated as part of sdhci_set_clock() by
 763	 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
 764	 * the requested clock rate.
 765	 *
 766	 * By setting the host->max_clk to clock * 2 the divider calculation
 767	 * will always result in the correct value for DDR50/52 modes,
 768	 * regardless of clock rate rounding, which may happen if the value
 769	 * from clk_get_rate() is used.
 770	 */
 771	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
 772
 773	err = dev_pm_opp_set_rate(dev, host_clk);
 774	if (err)
 775		dev_err(dev, "failed to set clk rate to %luHz: %d\n",
 776			host_clk, err);
 777
 778	tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
 779	if (tegra_host->ddr_signaling)
 780		host->max_clk = host_clk;
 781	else
 782		host->max_clk = clk_get_rate(pltfm_host->clk);
 783
 784	sdhci_set_clock(host, clock);
 785
 786	if (tegra_host->pad_calib_required) {
 787		tegra_sdhci_pad_autocalib(host);
 788		tegra_host->pad_calib_required = false;
 789	}
 790}
 791
 792static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
 793					      struct mmc_ios *ios)
 794{
 795	struct sdhci_host *host = mmc_priv(mmc);
 796	u32 val;
 797
 798	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
 799
 800	if (ios->enhanced_strobe) {
 801		val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
 802		/*
 803		 * When CMD13 is sent from mmc_select_hs400es() after
 804		 * switching to HS400ES mode, the bus is operating at
 805		 * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
 806		 * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
 807		 * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
 808		 * controller CAR clock and the interface clock are rate matched.
 809		 */
 810		tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
 811	} else {
 812		val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
 813	}
 814
 815	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
 816}
 817
 818static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
 819{
 820	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 821
 822	return clk_round_rate(pltfm_host->clk, UINT_MAX);
 823}
 824
 825static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
 826{
 827	u32 val;
 828
 829	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
 830	val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
 831	val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
 832	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
 833}
 834
 835static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
 836{
 837	u32 reg;
 838	int err;
 839
 840	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
 841	reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
 842	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
 843
 844	/* 1 ms sleep, 5 ms timeout */
 845	err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
 846				 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
 847				 1000, 5000);
 848	if (err)
 849		dev_err(mmc_dev(host->mmc),
 850			"HS400 delay line calibration timed out\n");
 851}
 852
 853static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
 854				       u8 thd_low, u8 fixed_tap)
 855{
 856	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 857	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 858	u32 val, tun_status;
 859	u8 word, bit, edge1, tap, window;
 860	bool tap_result;
 861	bool start_fail = false;
 862	bool start_pass = false;
 863	bool end_pass = false;
 864	bool first_fail = false;
 865	bool first_pass = false;
 866	u8 start_pass_tap = 0;
 867	u8 end_pass_tap = 0;
 868	u8 first_fail_tap = 0;
 869	u8 first_pass_tap = 0;
 870	u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
 871
 872	/*
 873	 * Read auto-tuned results and extract good valid passing window by
 874	 * filtering out un-wanted bubble/partial/merged windows.
 875	 */
 876	for (word = 0; word < total_tuning_words; word++) {
 877		val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
 878		val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
 879		val |= word;
 880		sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
 881		tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
 882		bit = 0;
 883		while (bit < TUNING_WORD_BIT_SIZE) {
 884			tap = word * TUNING_WORD_BIT_SIZE + bit;
 885			tap_result = tun_status & (1 << bit);
 886			if (!tap_result && !start_fail) {
 887				start_fail = true;
 888				if (!first_fail) {
 889					first_fail_tap = tap;
 890					first_fail = true;
 891				}
 892
 893			} else if (tap_result && start_fail && !start_pass) {
 894				start_pass_tap = tap;
 895				start_pass = true;
 896				if (!first_pass) {
 897					first_pass_tap = tap;
 898					first_pass = true;
 899				}
 900
 901			} else if (!tap_result && start_fail && start_pass &&
 902				   !end_pass) {
 903				end_pass_tap = tap - 1;
 904				end_pass = true;
 905			} else if (tap_result && start_pass && start_fail &&
 906				   end_pass) {
 907				window = end_pass_tap - start_pass_tap;
 908				/* discard merged window and bubble window */
 909				if (window >= thd_up || window < thd_low) {
 910					start_pass_tap = tap;
 911					end_pass = false;
 912				} else {
 913					/* set tap at middle of valid window */
 914					tap = start_pass_tap + window / 2;
 915					tegra_host->tuned_tap_delay = tap;
 916					return;
 917				}
 918			}
 919
 920			bit++;
 921		}
 922	}
 923
 924	if (!first_fail) {
 925		WARN(1, "no edge detected, continue with hw tuned delay.\n");
 926	} else if (first_pass) {
 927		/* set tap location at fixed tap relative to the first edge */
 928		edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
 929		if (edge1 - 1 > fixed_tap)
 930			tegra_host->tuned_tap_delay = edge1 - fixed_tap;
 931		else
 932			tegra_host->tuned_tap_delay = edge1 + fixed_tap;
 933	}
 934}
 935
 936static void tegra_sdhci_post_tuning(struct sdhci_host *host)
 937{
 938	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 939	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
 940	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
 941	u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
 942	u8 fixed_tap, start_tap, end_tap, window_width;
 943	u8 thdupper, thdlower;
 944	u8 num_iter;
 945	u32 clk_rate_mhz, period_ps, bestcase, worstcase;
 946
 947	/* retain HW tuned tap to use incase if no correction is needed */
 948	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
 949	tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
 950				      SDHCI_CLOCK_CTRL_TAP_SHIFT;
 951	if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
 952		min_tap_dly = soc_data->min_tap_delay;
 953		max_tap_dly = soc_data->max_tap_delay;
 954		clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
 955		period_ps = USEC_PER_SEC / clk_rate_mhz;
 956		bestcase = period_ps / min_tap_dly;
 957		worstcase = period_ps / max_tap_dly;
 958		/*
 959		 * Upper and Lower bound thresholds used to detect merged and
 960		 * bubble windows
 961		 */
 962		thdupper = (2 * worstcase + bestcase) / 2;
 963		thdlower = worstcase / 4;
 964		/*
 965		 * fixed tap is used when HW tuning result contains single edge
 966		 * and tap is set at fixed tap delay relative to the first edge
 967		 */
 968		avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
 969		fixed_tap = avg_tap_dly / 2;
 970
 971		val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
 972		start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
 973		end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
 974			  SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
 975		window_width = end_tap - start_tap;
 976		num_iter = host->tuning_loop_count;
 977		/*
 978		 * partial window includes edges of the tuning range.
 979		 * merged window includes more taps so window width is higher
 980		 * than upper threshold.
 981		 */
 982		if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
 983		    (end_tap == num_iter - 2) || window_width >= thdupper) {
 984			pr_debug("%s: Apply tuning correction\n",
 985				 mmc_hostname(host->mmc));
 986			tegra_sdhci_tap_correction(host, thdupper, thdlower,
 987						   fixed_tap);
 988		}
 989	}
 990
 991	tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
 992}
 993
 994static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
 995{
 996	struct sdhci_host *host = mmc_priv(mmc);
 997	int err;
 998
 999	err = sdhci_execute_tuning(mmc, opcode);
1000	if (!err && !host->tuning_err)
1001		tegra_sdhci_post_tuning(host);
1002
1003	return err;
1004}
1005
1006static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
1007					  unsigned timing)
1008{
1009	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1010	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1011	bool set_default_tap = false;
1012	bool set_dqs_trim = false;
1013	bool do_hs400_dll_cal = false;
1014	u8 iter = TRIES_256;
1015	u32 val;
1016
1017	tegra_host->ddr_signaling = false;
1018	switch (timing) {
1019	case MMC_TIMING_UHS_SDR50:
1020		break;
1021	case MMC_TIMING_UHS_SDR104:
1022	case MMC_TIMING_MMC_HS200:
1023		/* Don't set default tap on tunable modes. */
1024		iter = TRIES_128;
1025		break;
1026	case MMC_TIMING_MMC_HS400:
1027		set_dqs_trim = true;
1028		do_hs400_dll_cal = true;
1029		iter = TRIES_128;
1030		break;
1031	case MMC_TIMING_MMC_DDR52:
1032	case MMC_TIMING_UHS_DDR50:
1033		tegra_host->ddr_signaling = true;
1034		set_default_tap = true;
1035		break;
1036	default:
1037		set_default_tap = true;
1038		break;
1039	}
1040
1041	val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1042	val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
1043		 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
1044		 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
1045	val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
1046		0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
1047		1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
1048	sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
1049	sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
1050
1051	host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
1052
1053	sdhci_set_uhs_signaling(host, timing);
1054
1055	tegra_sdhci_pad_autocalib(host);
1056
1057	if (tegra_host->tuned_tap_delay && !set_default_tap)
1058		tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
1059	else
1060		tegra_sdhci_set_tap(host, tegra_host->default_tap);
1061
1062	if (set_dqs_trim)
1063		tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
1064
1065	if (do_hs400_dll_cal)
1066		tegra_sdhci_hs400_dll_cal(host);
1067}
1068
1069static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1070{
1071	unsigned int min, max;
1072
1073	/*
1074	 * Start search for minimum tap value at 10, as smaller values are
1075	 * may wrongly be reported as working but fail at higher speeds,
1076	 * according to the TRM.
1077	 */
1078	min = 10;
1079	while (min < 255) {
1080		tegra_sdhci_set_tap(host, min);
1081		if (!mmc_send_tuning(host->mmc, opcode, NULL))
1082			break;
1083		min++;
1084	}
1085
1086	/* Find the maximum tap value that still passes. */
1087	max = min + 1;
1088	while (max < 255) {
1089		tegra_sdhci_set_tap(host, max);
1090		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1091			max--;
1092			break;
1093		}
1094		max++;
1095	}
1096
1097	/* The TRM states the ideal tap value is at 75% in the passing range. */
1098	tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1099
1100	return mmc_send_tuning(host->mmc, opcode, NULL);
1101}
1102
1103static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1104						   struct mmc_ios *ios)
1105{
1106	struct sdhci_host *host = mmc_priv(mmc);
1107	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1108	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1109	int ret = 0;
1110
1111	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1112		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1113		if (ret < 0)
1114			return ret;
1115		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1116	} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1117		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1118		if (ret < 0)
1119			return ret;
1120		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1121	}
1122
1123	if (tegra_host->pad_calib_required)
1124		tegra_sdhci_pad_autocalib(host);
1125
1126	return ret;
1127}
1128
1129static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1130					 struct sdhci_tegra *tegra_host)
1131{
1132	tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1133	if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1134		dev_dbg(dev, "No pinctrl info, err: %ld\n",
1135			PTR_ERR(tegra_host->pinctrl_sdmmc));
1136		return -1;
1137	}
1138
1139	tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1140				tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1141	if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1142		if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1143			tegra_host->pinctrl_state_1v8_drv = NULL;
1144	}
1145
1146	tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1147				tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1148	if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1149		if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1150			tegra_host->pinctrl_state_3v3_drv = NULL;
1151	}
1152
1153	tegra_host->pinctrl_state_3v3 =
1154		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1155	if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1156		dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1157			 PTR_ERR(tegra_host->pinctrl_state_3v3));
1158		return -1;
1159	}
1160
1161	tegra_host->pinctrl_state_1v8 =
1162		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1163	if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1164		dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1165			 PTR_ERR(tegra_host->pinctrl_state_1v8));
1166		return -1;
1167	}
1168
1169	tegra_host->pad_control_available = true;
1170
1171	return 0;
1172}
1173
1174static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1175{
1176	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1177	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1178	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1179
1180	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1181		tegra_host->pad_calib_required = true;
1182}
1183
1184static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1185{
1186	struct mmc_host *mmc = cq_host->mmc;
1187	struct sdhci_host *host = mmc_priv(mmc);
1188	u8 ctrl;
1189	ktime_t timeout;
1190	bool timed_out;
1191
1192	/*
1193	 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1194	 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1195	 * to be re-configured.
1196	 * Tegra CQHCI/SDHCI prevents write access to block size register when
1197	 * CQE is unhalted. So handling CQE resume sequence here to configure
1198	 * SDHCI block registers prior to exiting CQE halt state.
1199	 */
1200	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1201	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
1202		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1203		sdhci_cqe_enable(mmc);
1204		writel(val, cq_host->mmio + reg);
1205		timeout = ktime_add_us(ktime_get(), 50);
1206		while (1) {
1207			timed_out = ktime_compare(ktime_get(), timeout) > 0;
1208			ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1209			if (!(ctrl & CQHCI_HALT) || timed_out)
1210				break;
1211		}
1212		/*
1213		 * CQE usually resumes very quick, but incase if Tegra CQE
1214		 * doesn't resume retry unhalt.
1215		 */
1216		if (timed_out)
1217			writel(val, cq_host->mmio + reg);
1218	} else {
1219		writel(val, cq_host->mmio + reg);
1220	}
1221}
1222
1223static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1224					 struct mmc_request *mrq, u64 *data)
1225{
1226	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1227	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1228	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1229
1230	if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1231	    mrq->cmd->flags & MMC_RSP_R1B)
1232		*data |= CQHCI_CMD_TIMING(1);
1233}
1234
1235static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1236{
1237	struct cqhci_host *cq_host = mmc->cqe_private;
1238	struct sdhci_host *host = mmc_priv(mmc);
1239	u32 val;
1240
1241	/*
1242	 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1243	 * register when CQE is enabled and unhalted.
1244	 * CQHCI driver enables CQE prior to activation, so disable CQE before
1245	 * programming block size in sdhci controller and enable it back.
1246	 */
1247	if (!cq_host->activated) {
1248		val = cqhci_readl(cq_host, CQHCI_CFG);
1249		if (val & CQHCI_ENABLE)
1250			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1251				     CQHCI_CFG);
1252		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1253		sdhci_cqe_enable(mmc);
1254		if (val & CQHCI_ENABLE)
1255			cqhci_writel(cq_host, val, CQHCI_CFG);
1256	}
1257
1258	/*
1259	 * CMD CRC errors are seen sometimes with some eMMC devices when status
1260	 * command is sent during transfer of last data block which is the
1261	 * default case as send status command block counter (CBC) is 1.
1262	 * Recommended fix to set CBC to 0 allowing send status command only
1263	 * when data lines are idle.
1264	 */
1265	val = cqhci_readl(cq_host, CQHCI_SSC1);
1266	val &= ~CQHCI_SSC1_CBC_MASK;
1267	cqhci_writel(cq_host, val, CQHCI_SSC1);
1268}
1269
1270static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1271{
1272	sdhci_dumpregs(mmc_priv(mmc));
1273}
1274
1275static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1276{
1277	int cmd_error = 0;
1278	int data_error = 0;
1279
1280	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1281		return intmask;
1282
1283	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1284
1285	return 0;
1286}
1287
1288static void tegra_sdhci_set_timeout(struct sdhci_host *host,
1289				    struct mmc_command *cmd)
1290{
1291	u32 val;
1292
1293	/*
1294	 * HW busy detection timeout is based on programmed data timeout
1295	 * counter and maximum supported timeout is 11s which may not be
1296	 * enough for long operations like cache flush, sleep awake, erase.
1297	 *
1298	 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
1299	 * host controller to wait for busy state until the card is busy
1300	 * without HW timeout.
1301	 *
1302	 * So, use infinite busy wait mode for operations that may take
1303	 * more than maximum HW busy timeout of 11s otherwise use finite
1304	 * busy wait mode.
1305	 */
1306	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1307	if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC)
1308		val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1309	else
1310		val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1311	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1312
1313	__sdhci_set_timeout(host, cmd);
1314}
1315
1316static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
1317{
1318	struct cqhci_host *cq_host = mmc->cqe_private;
1319	u32 reg;
1320
1321	reg = cqhci_readl(cq_host, CQHCI_CFG);
1322	reg |= CQHCI_ENABLE;
1323	cqhci_writel(cq_host, reg, CQHCI_CFG);
1324}
1325
1326static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
1327{
1328	struct cqhci_host *cq_host = mmc->cqe_private;
1329	struct sdhci_host *host = mmc_priv(mmc);
1330	u32 reg;
1331
1332	reg = cqhci_readl(cq_host, CQHCI_CFG);
1333	reg &= ~CQHCI_ENABLE;
1334	cqhci_writel(cq_host, reg, CQHCI_CFG);
1335	sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1336}
1337
1338static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1339	.write_l    = tegra_cqhci_writel,
1340	.enable	= sdhci_tegra_cqe_enable,
1341	.disable = sdhci_cqe_disable,
1342	.dumpregs = sdhci_tegra_dumpregs,
1343	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1344	.pre_enable = sdhci_tegra_cqe_pre_enable,
1345	.post_disable = sdhci_tegra_cqe_post_disable,
1346};
1347
1348static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1349{
1350	struct sdhci_pltfm_host *platform = sdhci_priv(host);
1351	struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1352	const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1353	struct device *dev = mmc_dev(host->mmc);
1354
1355	if (soc->dma_mask)
1356		return dma_set_mask_and_coherent(dev, soc->dma_mask);
1357
1358	return 0;
1359}
1360
1361static const struct sdhci_ops tegra_sdhci_ops = {
1362	.get_ro     = tegra_sdhci_get_ro,
1363	.read_w     = tegra_sdhci_readw,
1364	.write_l    = tegra_sdhci_writel,
1365	.set_clock  = tegra_sdhci_set_clock,
1366	.set_dma_mask = tegra_sdhci_set_dma_mask,
1367	.set_bus_width = sdhci_set_bus_width,
1368	.reset      = tegra_sdhci_reset,
1369	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1370	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1371	.voltage_switch = tegra_sdhci_voltage_switch,
1372	.get_max_clock = tegra_sdhci_get_max_clock,
1373};
1374
1375static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1376	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1377		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1378		  SDHCI_QUIRK_NO_HISPD_BIT |
1379		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1380		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1381	.ops  = &tegra_sdhci_ops,
1382};
1383
1384static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1385	.pdata = &sdhci_tegra20_pdata,
1386	.dma_mask = DMA_BIT_MASK(32),
1387	.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
1388		    NVQUIRK_HAS_ANDROID_GPT_SECTOR |
1389		    NVQUIRK_ENABLE_BLOCK_GAP_DET,
1390};
1391
1392static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1393	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1394		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1395		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1396		  SDHCI_QUIRK_NO_HISPD_BIT |
1397		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1398		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1399	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1400		   SDHCI_QUIRK2_BROKEN_HS200 |
1401		   /*
1402		    * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1403		    * though no command operation was in progress."
1404		    *
1405		    * The exact reason is unknown, as the same hardware seems
1406		    * to support Auto CMD23 on a downstream 3.1 kernel.
1407		    */
1408		   SDHCI_QUIRK2_ACMD23_BROKEN,
1409	.ops  = &tegra_sdhci_ops,
1410};
1411
1412static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1413	.pdata = &sdhci_tegra30_pdata,
1414	.dma_mask = DMA_BIT_MASK(32),
1415	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1416		    NVQUIRK_ENABLE_SDR50 |
1417		    NVQUIRK_ENABLE_SDR104 |
1418		    NVQUIRK_HAS_ANDROID_GPT_SECTOR |
1419		    NVQUIRK_HAS_PADCALIB,
1420};
1421
1422static const struct sdhci_ops tegra114_sdhci_ops = {
1423	.get_ro     = tegra_sdhci_get_ro,
1424	.read_w     = tegra_sdhci_readw,
1425	.write_w    = tegra_sdhci_writew,
1426	.write_l    = tegra_sdhci_writel,
1427	.set_clock  = tegra_sdhci_set_clock,
1428	.set_dma_mask = tegra_sdhci_set_dma_mask,
1429	.set_bus_width = sdhci_set_bus_width,
1430	.reset      = tegra_sdhci_reset,
1431	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1432	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1433	.voltage_switch = tegra_sdhci_voltage_switch,
1434	.get_max_clock = tegra_sdhci_get_max_clock,
1435};
1436
1437static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1438	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1439		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1440		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1441		  SDHCI_QUIRK_NO_HISPD_BIT |
1442		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1443		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1444	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1445	.ops  = &tegra114_sdhci_ops,
1446};
1447
1448static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1449	.pdata = &sdhci_tegra114_pdata,
1450	.dma_mask = DMA_BIT_MASK(32),
1451	.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
1452};
1453
1454static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1455	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1456		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1457		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1458		  SDHCI_QUIRK_NO_HISPD_BIT |
1459		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1460		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1461	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1462	.ops  = &tegra114_sdhci_ops,
1463};
1464
1465static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1466	.pdata = &sdhci_tegra124_pdata,
1467	.dma_mask = DMA_BIT_MASK(34),
1468	.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
1469};
1470
1471static const struct sdhci_ops tegra210_sdhci_ops = {
1472	.get_ro     = tegra_sdhci_get_ro,
1473	.read_w     = tegra_sdhci_readw,
1474	.write_w    = tegra210_sdhci_writew,
1475	.write_l    = tegra_sdhci_writel,
1476	.set_clock  = tegra_sdhci_set_clock,
1477	.set_dma_mask = tegra_sdhci_set_dma_mask,
1478	.set_bus_width = sdhci_set_bus_width,
1479	.reset      = tegra_sdhci_reset,
1480	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1481	.voltage_switch = tegra_sdhci_voltage_switch,
1482	.get_max_clock = tegra_sdhci_get_max_clock,
1483	.set_timeout = tegra_sdhci_set_timeout,
1484};
1485
1486static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1487	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1488		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1489		  SDHCI_QUIRK_NO_HISPD_BIT |
1490		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1491		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1492	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1493	.ops  = &tegra210_sdhci_ops,
1494};
1495
1496static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1497	.pdata = &sdhci_tegra210_pdata,
1498	.dma_mask = DMA_BIT_MASK(34),
1499	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1500		    NVQUIRK_HAS_PADCALIB |
1501		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1502		    NVQUIRK_ENABLE_SDR50 |
1503		    NVQUIRK_ENABLE_SDR104 |
1504		    NVQUIRK_HAS_TMCLK,
1505	.min_tap_delay = 106,
1506	.max_tap_delay = 185,
1507};
1508
1509static const struct sdhci_ops tegra186_sdhci_ops = {
1510	.get_ro     = tegra_sdhci_get_ro,
1511	.read_w     = tegra_sdhci_readw,
1512	.write_l    = tegra_sdhci_writel,
1513	.set_clock  = tegra_sdhci_set_clock,
1514	.set_dma_mask = tegra_sdhci_set_dma_mask,
1515	.set_bus_width = sdhci_set_bus_width,
1516	.reset      = tegra_sdhci_reset,
1517	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1518	.voltage_switch = tegra_sdhci_voltage_switch,
1519	.get_max_clock = tegra_sdhci_get_max_clock,
1520	.irq = sdhci_tegra_cqhci_irq,
1521	.set_timeout = tegra_sdhci_set_timeout,
1522};
1523
1524static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1525	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1526		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1527		  SDHCI_QUIRK_NO_HISPD_BIT |
1528		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1529		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1530	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1531		   SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER,
1532	.ops  = &tegra186_sdhci_ops,
1533};
1534
1535static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1536	.pdata = &sdhci_tegra186_pdata,
1537	.dma_mask = DMA_BIT_MASK(40),
1538	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1539		    NVQUIRK_HAS_PADCALIB |
1540		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1541		    NVQUIRK_ENABLE_SDR50 |
1542		    NVQUIRK_ENABLE_SDR104 |
1543		    NVQUIRK_HAS_TMCLK |
1544		    NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1545	.min_tap_delay = 84,
1546	.max_tap_delay = 136,
1547};
1548
1549static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1550	.pdata = &sdhci_tegra186_pdata,
1551	.dma_mask = DMA_BIT_MASK(39),
1552	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1553		    NVQUIRK_HAS_PADCALIB |
1554		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1555		    NVQUIRK_ENABLE_SDR50 |
1556		    NVQUIRK_ENABLE_SDR104 |
1557		    NVQUIRK_HAS_TMCLK,
1558	.min_tap_delay = 96,
1559	.max_tap_delay = 139,
1560};
1561
1562static const struct sdhci_tegra_soc_data soc_data_tegra234 = {
1563	.pdata = &sdhci_tegra186_pdata,
1564	.dma_mask = DMA_BIT_MASK(39),
1565	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1566		    NVQUIRK_HAS_PADCALIB |
1567		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1568		    NVQUIRK_ENABLE_SDR50 |
1569		    NVQUIRK_ENABLE_SDR104 |
1570		    NVQUIRK_PROGRAM_STREAMID |
1571		    NVQUIRK_HAS_TMCLK,
1572	.min_tap_delay = 95,
1573	.max_tap_delay = 111,
1574};
1575
1576static const struct of_device_id sdhci_tegra_dt_match[] = {
1577	{ .compatible = "nvidia,tegra234-sdhci", .data = &soc_data_tegra234 },
1578	{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1579	{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1580	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1581	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1582	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1583	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1584	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1585	{}
1586};
1587MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1588
1589static int sdhci_tegra_add_host(struct sdhci_host *host)
1590{
1591	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1592	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1593	struct cqhci_host *cq_host;
1594	bool dma64;
1595	int ret;
1596
1597	if (!tegra_host->enable_hwcq)
1598		return sdhci_add_host(host);
1599
1600	sdhci_enable_v4_mode(host);
1601
1602	ret = sdhci_setup_host(host);
1603	if (ret)
1604		return ret;
1605
1606	host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1607
1608	cq_host = devm_kzalloc(mmc_dev(host->mmc),
1609				sizeof(*cq_host), GFP_KERNEL);
1610	if (!cq_host) {
1611		ret = -ENOMEM;
1612		goto cleanup;
1613	}
1614
1615	cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1616	cq_host->ops = &sdhci_tegra_cqhci_ops;
1617
1618	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1619	if (dma64)
1620		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1621
1622	ret = cqhci_init(cq_host, host->mmc, dma64);
1623	if (ret)
1624		goto cleanup;
1625
1626	ret = __sdhci_add_host(host);
1627	if (ret)
1628		goto cleanup;
1629
1630	return 0;
1631
1632cleanup:
1633	sdhci_cleanup_host(host);
1634	return ret;
1635}
1636
1637/* Program MC streamID for DMA transfers */
1638static void sdhci_tegra_program_stream_id(struct sdhci_host *host)
1639{
1640	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1641	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1642
1643	if (tegra_host->soc_data->nvquirks & NVQUIRK_PROGRAM_STREAMID) {
1644		tegra_sdhci_writel(host, FIELD_PREP(GENMASK(15, 8), tegra_host->stream_id) |
1645					 FIELD_PREP(GENMASK(7, 0), tegra_host->stream_id),
1646					 SDHCI_TEGRA_CIF2AXI_CTRL_0);
1647	}
1648}
1649
1650static int sdhci_tegra_probe(struct platform_device *pdev)
1651{
1652	const struct sdhci_tegra_soc_data *soc_data;
1653	struct sdhci_host *host;
1654	struct sdhci_pltfm_host *pltfm_host;
1655	struct sdhci_tegra *tegra_host;
1656	struct clk *clk;
1657	int rc;
1658
1659	soc_data = of_device_get_match_data(&pdev->dev);
1660	if (!soc_data)
1661		return -EINVAL;
1662
1663	host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1664	if (IS_ERR(host))
1665		return PTR_ERR(host);
1666	pltfm_host = sdhci_priv(host);
1667
1668	tegra_host = sdhci_pltfm_priv(pltfm_host);
1669	tegra_host->ddr_signaling = false;
1670	tegra_host->pad_calib_required = false;
1671	tegra_host->pad_control_available = false;
1672	tegra_host->soc_data = soc_data;
1673
1674	if (soc_data->nvquirks & NVQUIRK_HAS_ANDROID_GPT_SECTOR)
1675		host->mmc->caps2 |= MMC_CAP2_ALT_GPT_TEGRA;
1676
1677	if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1678		rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1679		if (rc == 0)
1680			host->mmc_host_ops.start_signal_voltage_switch =
1681				sdhci_tegra_start_signal_voltage_switch;
1682	}
1683
1684	/* Hook to periodically rerun pad calibration */
1685	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1686		host->mmc_host_ops.request = tegra_sdhci_request;
1687
1688	host->mmc_host_ops.hs400_enhanced_strobe =
1689			tegra_sdhci_hs400_enhanced_strobe;
1690
1691	if (!host->ops->platform_execute_tuning)
1692		host->mmc_host_ops.execute_tuning =
1693				tegra_sdhci_execute_hw_tuning;
1694
1695	rc = mmc_of_parse(host->mmc);
1696	if (rc)
1697		goto err_parse_dt;
1698
1699	if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1700		host->mmc->caps |= MMC_CAP_1_8V_DDR;
1701
1702	/* HW busy detection is supported, but R1B responses are required. */
1703	host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
1704
1705	/* GPIO CD can be set as a wakeup source */
1706	host->mmc->caps |= MMC_CAP_CD_WAKE;
1707
1708	tegra_sdhci_parse_dt(host);
1709
1710	if (tegra_host->soc_data->nvquirks & NVQUIRK_PROGRAM_STREAMID &&
1711	    !tegra_dev_iommu_get_stream_id(&pdev->dev, &tegra_host->stream_id)) {
1712		dev_warn(mmc_dev(host->mmc), "missing IOMMU stream ID\n");
1713		tegra_host->stream_id = 0x7f;
1714	}
1715
1716	tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1717							 GPIOD_OUT_HIGH);
1718	if (IS_ERR(tegra_host->power_gpio)) {
1719		rc = PTR_ERR(tegra_host->power_gpio);
1720		goto err_power_req;
1721	}
1722
1723	/*
1724	 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
1725	 * timeout clock and SW can choose TMCLK or SDCLK for hardware
1726	 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
1727	 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
1728	 *
1729	 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
1730	 * 12Mhz TMCLK which is advertised in host capability register.
1731	 * With TMCLK of 12Mhz provides maximum data timeout period that can
1732	 * be achieved is 11s better than using SDCLK for data timeout.
1733	 *
1734	 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
1735	 * supporting separate TMCLK.
1736	 */
1737
1738	if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
1739		clk = devm_clk_get(&pdev->dev, "tmclk");
1740		if (IS_ERR(clk)) {
1741			rc = PTR_ERR(clk);
1742			if (rc == -EPROBE_DEFER)
1743				goto err_power_req;
1744
1745			dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
1746			clk = NULL;
1747		}
1748
1749		clk_set_rate(clk, 12000000);
1750		rc = clk_prepare_enable(clk);
1751		if (rc) {
1752			dev_err(&pdev->dev,
1753				"failed to enable tmclk: %d\n", rc);
1754			goto err_power_req;
1755		}
1756
1757		tegra_host->tmclk = clk;
1758	}
1759
1760	clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1761	if (IS_ERR(clk)) {
1762		rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
1763				   "failed to get clock\n");
1764		goto err_clk_get;
1765	}
1766	pltfm_host->clk = clk;
1767
1768	tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1769							   "sdhci");
1770	if (IS_ERR(tegra_host->rst)) {
1771		rc = PTR_ERR(tegra_host->rst);
1772		dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1773		goto err_rst_get;
1774	}
1775
1776	rc = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
1777	if (rc)
1778		goto err_rst_get;
1779
1780	pm_runtime_enable(&pdev->dev);
1781	rc = pm_runtime_resume_and_get(&pdev->dev);
1782	if (rc)
1783		goto err_pm_get;
1784
1785	rc = reset_control_assert(tegra_host->rst);
1786	if (rc)
1787		goto err_rst_assert;
1788
1789	usleep_range(2000, 4000);
1790
1791	rc = reset_control_deassert(tegra_host->rst);
1792	if (rc)
1793		goto err_rst_assert;
1794
1795	usleep_range(2000, 4000);
1796
1797	rc = sdhci_tegra_add_host(host);
1798	if (rc)
1799		goto err_add_host;
1800
1801	sdhci_tegra_program_stream_id(host);
1802
1803	return 0;
1804
1805err_add_host:
1806	reset_control_assert(tegra_host->rst);
1807err_rst_assert:
1808	pm_runtime_put_sync_suspend(&pdev->dev);
1809err_pm_get:
1810	pm_runtime_disable(&pdev->dev);
1811err_rst_get:
1812err_clk_get:
1813	clk_disable_unprepare(tegra_host->tmclk);
1814err_power_req:
1815err_parse_dt:
1816	sdhci_pltfm_free(pdev);
1817	return rc;
1818}
1819
1820static void sdhci_tegra_remove(struct platform_device *pdev)
1821{
1822	struct sdhci_host *host = platform_get_drvdata(pdev);
1823	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1824	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1825
1826	sdhci_remove_host(host, 0);
1827
1828	reset_control_assert(tegra_host->rst);
1829	usleep_range(2000, 4000);
1830
1831	pm_runtime_put_sync_suspend(&pdev->dev);
1832	pm_runtime_force_suspend(&pdev->dev);
1833
1834	clk_disable_unprepare(tegra_host->tmclk);
1835	sdhci_pltfm_free(pdev);
 
 
1836}
1837
1838static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev)
1839{
1840	struct sdhci_host *host = dev_get_drvdata(dev);
1841	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1842
1843	clk_disable_unprepare(pltfm_host->clk);
1844
1845	return 0;
1846}
1847
1848static int __maybe_unused sdhci_tegra_runtime_resume(struct device *dev)
1849{
1850	struct sdhci_host *host = dev_get_drvdata(dev);
1851	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1852
1853	return clk_prepare_enable(pltfm_host->clk);
1854}
1855
1856#ifdef CONFIG_PM_SLEEP
1857static int sdhci_tegra_suspend(struct device *dev)
1858{
1859	struct sdhci_host *host = dev_get_drvdata(dev);
1860	int ret;
1861
1862	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1863		ret = cqhci_suspend(host->mmc);
1864		if (ret)
1865			return ret;
1866	}
1867
1868	ret = sdhci_suspend_host(host);
1869	if (ret) {
1870		cqhci_resume(host->mmc);
1871		return ret;
1872	}
1873
1874	ret = pm_runtime_force_suspend(dev);
1875	if (ret) {
1876		sdhci_resume_host(host);
1877		cqhci_resume(host->mmc);
1878		return ret;
1879	}
1880
1881	return mmc_gpio_set_cd_wake(host->mmc, true);
1882}
1883
1884static int sdhci_tegra_resume(struct device *dev)
1885{
1886	struct sdhci_host *host = dev_get_drvdata(dev);
1887	int ret;
1888
1889	ret = mmc_gpio_set_cd_wake(host->mmc, false);
1890	if (ret)
1891		return ret;
1892
1893	ret = pm_runtime_force_resume(dev);
1894	if (ret)
1895		return ret;
1896
1897	sdhci_tegra_program_stream_id(host);
1898
1899	ret = sdhci_resume_host(host);
1900	if (ret)
1901		goto disable_clk;
1902
1903	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1904		ret = cqhci_resume(host->mmc);
1905		if (ret)
1906			goto suspend_host;
1907	}
1908
1909	return 0;
1910
1911suspend_host:
1912	sdhci_suspend_host(host);
1913disable_clk:
1914	pm_runtime_force_suspend(dev);
1915	return ret;
1916}
1917#endif
1918
1919static const struct dev_pm_ops sdhci_tegra_dev_pm_ops = {
1920	SET_RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume,
1921			   NULL)
1922	SET_SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume)
1923};
1924
1925static struct platform_driver sdhci_tegra_driver = {
1926	.driver		= {
1927		.name	= "sdhci-tegra",
1928		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1929		.of_match_table = sdhci_tegra_dt_match,
1930		.pm	= &sdhci_tegra_dev_pm_ops,
1931	},
1932	.probe		= sdhci_tegra_probe,
1933	.remove_new	= sdhci_tegra_remove,
1934};
1935
1936module_platform_driver(sdhci_tegra_driver);
1937
1938MODULE_DESCRIPTION("SDHCI driver for Tegra");
1939MODULE_AUTHOR("Google, Inc.");
1940MODULE_LICENSE("GPL v2");