Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Amlogic SD/eMMC driver for the GX/S905 family SoCs
   4 *
   5 * Copyright (c) 2016 BayLibre, SAS.
   6 * Author: Kevin Hilman <khilman@baylibre.com>
   7 */
   8#include <linux/kernel.h>
   9#include <linux/module.h>
  10#include <linux/init.h>
  11#include <linux/delay.h>
  12#include <linux/device.h>
  13#include <linux/iopoll.h>
  14#include <linux/of_device.h>
  15#include <linux/platform_device.h>
  16#include <linux/ioport.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/mmc/host.h>
  19#include <linux/mmc/mmc.h>
  20#include <linux/mmc/sdio.h>
  21#include <linux/mmc/slot-gpio.h>
  22#include <linux/io.h>
  23#include <linux/clk.h>
  24#include <linux/clk-provider.h>
  25#include <linux/regulator/consumer.h>
  26#include <linux/reset.h>
  27#include <linux/interrupt.h>
  28#include <linux/bitfield.h>
  29#include <linux/pinctrl/consumer.h>
  30
  31#define DRIVER_NAME "meson-gx-mmc"
  32
  33#define SD_EMMC_CLOCK 0x0
  34#define   CLK_DIV_MASK GENMASK(5, 0)
  35#define   CLK_SRC_MASK GENMASK(7, 6)
  36#define   CLK_CORE_PHASE_MASK GENMASK(9, 8)
  37#define   CLK_TX_PHASE_MASK GENMASK(11, 10)
  38#define   CLK_RX_PHASE_MASK GENMASK(13, 12)
  39#define   CLK_PHASE_0 0
  40#define   CLK_PHASE_180 2
  41#define   CLK_V2_TX_DELAY_MASK GENMASK(19, 16)
  42#define   CLK_V2_RX_DELAY_MASK GENMASK(23, 20)
  43#define   CLK_V2_ALWAYS_ON BIT(24)
  44
  45#define   CLK_V3_TX_DELAY_MASK GENMASK(21, 16)
  46#define   CLK_V3_RX_DELAY_MASK GENMASK(27, 22)
  47#define   CLK_V3_ALWAYS_ON BIT(28)
  48
  49#define   CLK_TX_DELAY_MASK(h)		(h->data->tx_delay_mask)
  50#define   CLK_RX_DELAY_MASK(h)		(h->data->rx_delay_mask)
  51#define   CLK_ALWAYS_ON(h)		(h->data->always_on)
  52
  53#define SD_EMMC_DELAY 0x4
  54#define SD_EMMC_ADJUST 0x8
  55#define   ADJUST_ADJ_DELAY_MASK GENMASK(21, 16)
  56#define   ADJUST_DS_EN BIT(15)
  57#define   ADJUST_ADJ_EN BIT(13)
  58
  59#define SD_EMMC_DELAY1 0x4
  60#define SD_EMMC_DELAY2 0x8
  61#define SD_EMMC_V3_ADJUST 0xc
  62
  63#define SD_EMMC_CALOUT 0x10
  64#define SD_EMMC_START 0x40
  65#define   START_DESC_INIT BIT(0)
  66#define   START_DESC_BUSY BIT(1)
  67#define   START_DESC_ADDR_MASK GENMASK(31, 2)
  68
  69#define SD_EMMC_CFG 0x44
  70#define   CFG_BUS_WIDTH_MASK GENMASK(1, 0)
  71#define   CFG_BUS_WIDTH_1 0x0
  72#define   CFG_BUS_WIDTH_4 0x1
  73#define   CFG_BUS_WIDTH_8 0x2
  74#define   CFG_DDR BIT(2)
  75#define   CFG_BLK_LEN_MASK GENMASK(7, 4)
  76#define   CFG_RESP_TIMEOUT_MASK GENMASK(11, 8)
  77#define   CFG_RC_CC_MASK GENMASK(15, 12)
  78#define   CFG_STOP_CLOCK BIT(22)
  79#define   CFG_CLK_ALWAYS_ON BIT(18)
  80#define   CFG_CHK_DS BIT(20)
  81#define   CFG_AUTO_CLK BIT(23)
  82#define   CFG_ERR_ABORT BIT(27)
  83
  84#define SD_EMMC_STATUS 0x48
  85#define   STATUS_BUSY BIT(31)
  86#define   STATUS_DESC_BUSY BIT(30)
  87#define   STATUS_DATI GENMASK(23, 16)
  88
  89#define SD_EMMC_IRQ_EN 0x4c
  90#define   IRQ_RXD_ERR_MASK GENMASK(7, 0)
  91#define   IRQ_TXD_ERR BIT(8)
  92#define   IRQ_DESC_ERR BIT(9)
  93#define   IRQ_RESP_ERR BIT(10)
  94#define   IRQ_CRC_ERR \
  95	(IRQ_RXD_ERR_MASK | IRQ_TXD_ERR | IRQ_DESC_ERR | IRQ_RESP_ERR)
  96#define   IRQ_RESP_TIMEOUT BIT(11)
  97#define   IRQ_DESC_TIMEOUT BIT(12)
  98#define   IRQ_TIMEOUTS \
  99	(IRQ_RESP_TIMEOUT | IRQ_DESC_TIMEOUT)
 100#define   IRQ_END_OF_CHAIN BIT(13)
 101#define   IRQ_RESP_STATUS BIT(14)
 102#define   IRQ_SDIO BIT(15)
 103#define   IRQ_EN_MASK \
 104	(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN | IRQ_RESP_STATUS |\
 105	 IRQ_SDIO)
 106
 107#define SD_EMMC_CMD_CFG 0x50
 108#define SD_EMMC_CMD_ARG 0x54
 109#define SD_EMMC_CMD_DAT 0x58
 110#define SD_EMMC_CMD_RSP 0x5c
 111#define SD_EMMC_CMD_RSP1 0x60
 112#define SD_EMMC_CMD_RSP2 0x64
 113#define SD_EMMC_CMD_RSP3 0x68
 114
 115#define SD_EMMC_RXD 0x94
 116#define SD_EMMC_TXD 0x94
 117#define SD_EMMC_LAST_REG SD_EMMC_TXD
 118
 119#define SD_EMMC_SRAM_DATA_BUF_LEN 1536
 120#define SD_EMMC_SRAM_DATA_BUF_OFF 0x200
 121
 122#define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
 123#define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
 124#define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */
 125#define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */
 126#define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
 127#define SD_EMMC_DESC_BUF_LEN PAGE_SIZE
 128
 129#define SD_EMMC_PRE_REQ_DONE BIT(0)
 130#define SD_EMMC_DESC_CHAIN_MODE BIT(1)
 131
 132#define MUX_CLK_NUM_PARENTS 2
 133
 134struct meson_mmc_data {
 135	unsigned int tx_delay_mask;
 136	unsigned int rx_delay_mask;
 137	unsigned int always_on;
 138	unsigned int adjust;
 139};
 140
 141struct sd_emmc_desc {
 142	u32 cmd_cfg;
 143	u32 cmd_arg;
 144	u32 cmd_data;
 145	u32 cmd_resp;
 146};
 147
 148struct meson_host {
 149	struct	device		*dev;
 150	struct	meson_mmc_data *data;
 151	struct	mmc_host	*mmc;
 152	struct	mmc_command	*cmd;
 153
 154	void __iomem *regs;
 155	struct clk *core_clk;
 156	struct clk *mux_clk;
 157	struct clk *mmc_clk;
 158	unsigned long req_rate;
 159	bool ddr;
 160
 161	bool dram_access_quirk;
 162
 163	struct pinctrl *pinctrl;
 164	struct pinctrl_state *pins_default;
 165	struct pinctrl_state *pins_clk_gate;
 166
 167	unsigned int bounce_buf_size;
 168	void *bounce_buf;
 169	dma_addr_t bounce_dma_addr;
 170	struct sd_emmc_desc *descs;
 171	dma_addr_t descs_dma_addr;
 172
 173	int irq;
 174
 175	bool vqmmc_enabled;
 176};
 177
 178#define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
 179#define CMD_CFG_BLOCK_MODE BIT(9)
 180#define CMD_CFG_R1B BIT(10)
 181#define CMD_CFG_END_OF_CHAIN BIT(11)
 182#define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12)
 183#define CMD_CFG_NO_RESP BIT(16)
 184#define CMD_CFG_NO_CMD BIT(17)
 185#define CMD_CFG_DATA_IO BIT(18)
 186#define CMD_CFG_DATA_WR BIT(19)
 187#define CMD_CFG_RESP_NOCRC BIT(20)
 188#define CMD_CFG_RESP_128 BIT(21)
 189#define CMD_CFG_RESP_NUM BIT(22)
 190#define CMD_CFG_DATA_NUM BIT(23)
 191#define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24)
 192#define CMD_CFG_ERROR BIT(30)
 193#define CMD_CFG_OWNER BIT(31)
 194
 195#define CMD_DATA_MASK GENMASK(31, 2)
 196#define CMD_DATA_BIG_ENDIAN BIT(1)
 197#define CMD_DATA_SRAM BIT(0)
 198#define CMD_RESP_MASK GENMASK(31, 1)
 199#define CMD_RESP_SRAM BIT(0)
 200
 201static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data)
 202{
 203	unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC;
 204
 205	if (!timeout)
 206		return SD_EMMC_CMD_TIMEOUT_DATA;
 207
 208	timeout = roundup_pow_of_two(timeout);
 209
 210	return min(timeout, 32768U); /* max. 2^15 ms */
 211}
 212
 213static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd)
 214{
 215	if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error)
 216		return cmd->mrq->cmd;
 217	else if (mmc_op_multi(cmd->opcode) &&
 218		 (!cmd->mrq->sbc || cmd->error || cmd->data->error))
 219		return cmd->mrq->stop;
 220	else
 221		return NULL;
 222}
 223
 224static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
 225					struct mmc_request *mrq)
 226{
 227	struct meson_host *host = mmc_priv(mmc);
 228	struct mmc_data *data = mrq->data;
 229	struct scatterlist *sg;
 230	int i;
 231	bool use_desc_chain_mode = true;
 232
 233	/*
 234	 * When Controller DMA cannot directly access DDR memory, disable
 235	 * support for Chain Mode to directly use the internal SRAM using
 236	 * the bounce buffer mode.
 237	 */
 238	if (host->dram_access_quirk)
 239		return;
 240
 241	/*
 242	 * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been
 243	 * reported. For some strange reason this occurs in descriptor
 244	 * chain mode only. So let's fall back to bounce buffer mode
 245	 * for command SD_IO_RW_EXTENDED.
 246	 */
 247	if (mrq->cmd->opcode == SD_IO_RW_EXTENDED)
 248		return;
 249
 250	for_each_sg(data->sg, sg, data->sg_len, i)
 251		/* check for 8 byte alignment */
 252		if (sg->offset & 7) {
 253			WARN_ONCE(1, "unaligned scatterlist buffer\n");
 254			use_desc_chain_mode = false;
 255			break;
 256		}
 257
 258	if (use_desc_chain_mode)
 259		data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE;
 260}
 261
 262static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data)
 263{
 264	return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE;
 265}
 266
 267static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data)
 268{
 269	return data && data->flags & MMC_DATA_READ &&
 270	       !meson_mmc_desc_chain_mode(data);
 271}
 272
 273static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
 274{
 275	struct mmc_data *data = mrq->data;
 276
 277	if (!data)
 278		return;
 279
 280	meson_mmc_get_transfer_mode(mmc, mrq);
 281	data->host_cookie |= SD_EMMC_PRE_REQ_DONE;
 282
 283	if (!meson_mmc_desc_chain_mode(data))
 284		return;
 285
 286	data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
 287                                   mmc_get_dma_dir(data));
 288	if (!data->sg_count)
 289		dev_err(mmc_dev(mmc), "dma_map_sg failed");
 290}
 291
 292static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
 293			       int err)
 294{
 295	struct mmc_data *data = mrq->data;
 296
 297	if (data && meson_mmc_desc_chain_mode(data) && data->sg_count)
 298		dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
 299			     mmc_get_dma_dir(data));
 300}
 301
 302/*
 303 * Gating the clock on this controller is tricky.  It seems the mmc clock
 304 * is also used by the controller.  It may crash during some operation if the
 305 * clock is stopped.  The safest thing to do, whenever possible, is to keep
 306 * clock running at stop it at the pad using the pinmux.
 307 */
 308static void meson_mmc_clk_gate(struct meson_host *host)
 309{
 310	u32 cfg;
 311
 312	if (host->pins_clk_gate) {
 313		pinctrl_select_state(host->pinctrl, host->pins_clk_gate);
 314	} else {
 315		/*
 316		 * If the pinmux is not provided - default to the classic and
 317		 * unsafe method
 318		 */
 319		cfg = readl(host->regs + SD_EMMC_CFG);
 320		cfg |= CFG_STOP_CLOCK;
 321		writel(cfg, host->regs + SD_EMMC_CFG);
 322	}
 323}
 324
 325static void meson_mmc_clk_ungate(struct meson_host *host)
 326{
 327	u32 cfg;
 328
 329	if (host->pins_clk_gate)
 330		pinctrl_select_state(host->pinctrl, host->pins_default);
 331
 332	/* Make sure the clock is not stopped in the controller */
 333	cfg = readl(host->regs + SD_EMMC_CFG);
 334	cfg &= ~CFG_STOP_CLOCK;
 335	writel(cfg, host->regs + SD_EMMC_CFG);
 336}
 337
 338static int meson_mmc_clk_set(struct meson_host *host, unsigned long rate,
 339			     bool ddr)
 340{
 341	struct mmc_host *mmc = host->mmc;
 342	int ret;
 343	u32 cfg;
 344
 345	/* Same request - bail-out */
 346	if (host->ddr == ddr && host->req_rate == rate)
 347		return 0;
 348
 349	/* stop clock */
 350	meson_mmc_clk_gate(host);
 351	host->req_rate = 0;
 352	mmc->actual_clock = 0;
 353
 354	/* return with clock being stopped */
 355	if (!rate)
 356		return 0;
 357
 358	/* Stop the clock during rate change to avoid glitches */
 359	cfg = readl(host->regs + SD_EMMC_CFG);
 360	cfg |= CFG_STOP_CLOCK;
 361	writel(cfg, host->regs + SD_EMMC_CFG);
 362
 363	if (ddr) {
 364		/* DDR modes require higher module clock */
 365		rate <<= 1;
 366		cfg |= CFG_DDR;
 367	} else {
 368		cfg &= ~CFG_DDR;
 369	}
 370	writel(cfg, host->regs + SD_EMMC_CFG);
 371	host->ddr = ddr;
 372
 373	ret = clk_set_rate(host->mmc_clk, rate);
 374	if (ret) {
 375		dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
 376			rate, ret);
 377		return ret;
 378	}
 379
 380	host->req_rate = rate;
 381	mmc->actual_clock = clk_get_rate(host->mmc_clk);
 382
 383	/* We should report the real output frequency of the controller */
 384	if (ddr) {
 385		host->req_rate >>= 1;
 386		mmc->actual_clock >>= 1;
 387	}
 388
 389	dev_dbg(host->dev, "clk rate: %u Hz\n", mmc->actual_clock);
 390	if (rate != mmc->actual_clock)
 391		dev_dbg(host->dev, "requested rate was %lu\n", rate);
 392
 393	/* (re)start clock */
 394	meson_mmc_clk_ungate(host);
 395
 396	return 0;
 397}
 398
 399/*
 400 * The SD/eMMC IP block has an internal mux and divider used for
 401 * generating the MMC clock.  Use the clock framework to create and
 402 * manage these clocks.
 403 */
 404static int meson_mmc_clk_init(struct meson_host *host)
 405{
 406	struct clk_init_data init;
 407	struct clk_mux *mux;
 408	struct clk_divider *div;
 409	char clk_name[32];
 410	int i, ret = 0;
 411	const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
 412	const char *clk_parent[1];
 413	u32 clk_reg;
 414
 415	/* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
 416	clk_reg = CLK_ALWAYS_ON(host);
 417	clk_reg |= CLK_DIV_MASK;
 418	clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, CLK_PHASE_180);
 419	clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, CLK_PHASE_0);
 420	clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, CLK_PHASE_0);
 421	writel(clk_reg, host->regs + SD_EMMC_CLOCK);
 422
 423	/* get the mux parents */
 424	for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
 425		struct clk *clk;
 426		char name[16];
 427
 428		snprintf(name, sizeof(name), "clkin%d", i);
 429		clk = devm_clk_get(host->dev, name);
 430		if (IS_ERR(clk)) {
 431			if (clk != ERR_PTR(-EPROBE_DEFER))
 432				dev_err(host->dev, "Missing clock %s\n", name);
 433			return PTR_ERR(clk);
 434		}
 435
 436		mux_parent_names[i] = __clk_get_name(clk);
 437	}
 438
 439	/* create the mux */
 440	mux = devm_kzalloc(host->dev, sizeof(*mux), GFP_KERNEL);
 441	if (!mux)
 442		return -ENOMEM;
 443
 444	snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev));
 445	init.name = clk_name;
 446	init.ops = &clk_mux_ops;
 447	init.flags = 0;
 448	init.parent_names = mux_parent_names;
 449	init.num_parents = MUX_CLK_NUM_PARENTS;
 450
 451	mux->reg = host->regs + SD_EMMC_CLOCK;
 452	mux->shift = __ffs(CLK_SRC_MASK);
 453	mux->mask = CLK_SRC_MASK >> mux->shift;
 454	mux->hw.init = &init;
 455
 456	host->mux_clk = devm_clk_register(host->dev, &mux->hw);
 457	if (WARN_ON(IS_ERR(host->mux_clk)))
 458		return PTR_ERR(host->mux_clk);
 459
 460	/* create the divider */
 461	div = devm_kzalloc(host->dev, sizeof(*div), GFP_KERNEL);
 462	if (!div)
 463		return -ENOMEM;
 464
 465	snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev));
 466	init.name = clk_name;
 467	init.ops = &clk_divider_ops;
 468	init.flags = CLK_SET_RATE_PARENT;
 469	clk_parent[0] = __clk_get_name(host->mux_clk);
 470	init.parent_names = clk_parent;
 471	init.num_parents = 1;
 472
 473	div->reg = host->regs + SD_EMMC_CLOCK;
 474	div->shift = __ffs(CLK_DIV_MASK);
 475	div->width = __builtin_popcountl(CLK_DIV_MASK);
 476	div->hw.init = &init;
 477	div->flags = CLK_DIVIDER_ONE_BASED;
 478
 479	host->mmc_clk = devm_clk_register(host->dev, &div->hw);
 480	if (WARN_ON(IS_ERR(host->mmc_clk)))
 481		return PTR_ERR(host->mmc_clk);
 482
 483	/* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
 484	host->mmc->f_min = clk_round_rate(host->mmc_clk, 400000);
 485	ret = clk_set_rate(host->mmc_clk, host->mmc->f_min);
 486	if (ret)
 487		return ret;
 488
 489	return clk_prepare_enable(host->mmc_clk);
 490}
 491
 492static void meson_mmc_disable_resampling(struct meson_host *host)
 493{
 494	unsigned int val = readl(host->regs + host->data->adjust);
 495
 496	val &= ~ADJUST_ADJ_EN;
 497	writel(val, host->regs + host->data->adjust);
 498}
 499
 500static void meson_mmc_reset_resampling(struct meson_host *host)
 501{
 502	unsigned int val;
 503
 504	meson_mmc_disable_resampling(host);
 505
 506	val = readl(host->regs + host->data->adjust);
 507	val &= ~ADJUST_ADJ_DELAY_MASK;
 508	writel(val, host->regs + host->data->adjust);
 509}
 510
 511static int meson_mmc_resampling_tuning(struct mmc_host *mmc, u32 opcode)
 512{
 513	struct meson_host *host = mmc_priv(mmc);
 514	unsigned int val, dly, max_dly, i;
 515	int ret;
 516
 517	/* Resampling is done using the source clock */
 518	max_dly = DIV_ROUND_UP(clk_get_rate(host->mux_clk),
 519			       clk_get_rate(host->mmc_clk));
 520
 521	val = readl(host->regs + host->data->adjust);
 522	val |= ADJUST_ADJ_EN;
 523	writel(val, host->regs + host->data->adjust);
 524
 525	if (mmc->doing_retune)
 526		dly = FIELD_GET(ADJUST_ADJ_DELAY_MASK, val) + 1;
 527	else
 528		dly = 0;
 529
 530	for (i = 0; i < max_dly; i++) {
 531		val &= ~ADJUST_ADJ_DELAY_MASK;
 532		val |= FIELD_PREP(ADJUST_ADJ_DELAY_MASK, (dly + i) % max_dly);
 533		writel(val, host->regs + host->data->adjust);
 534
 535		ret = mmc_send_tuning(mmc, opcode, NULL);
 536		if (!ret) {
 537			dev_dbg(mmc_dev(mmc), "resampling delay: %u\n",
 538				(dly + i) % max_dly);
 539			return 0;
 540		}
 541	}
 542
 543	meson_mmc_reset_resampling(host);
 544	return -EIO;
 545}
 546
 547static int meson_mmc_prepare_ios_clock(struct meson_host *host,
 548				       struct mmc_ios *ios)
 549{
 550	bool ddr;
 551
 552	switch (ios->timing) {
 553	case MMC_TIMING_MMC_DDR52:
 554	case MMC_TIMING_UHS_DDR50:
 555		ddr = true;
 556		break;
 557
 558	default:
 559		ddr = false;
 560		break;
 561	}
 562
 563	return meson_mmc_clk_set(host, ios->clock, ddr);
 564}
 565
 566static void meson_mmc_check_resampling(struct meson_host *host,
 567				       struct mmc_ios *ios)
 568{
 569	switch (ios->timing) {
 570	case MMC_TIMING_LEGACY:
 571	case MMC_TIMING_MMC_HS:
 572	case MMC_TIMING_SD_HS:
 573	case MMC_TIMING_MMC_DDR52:
 574		meson_mmc_disable_resampling(host);
 575		break;
 576	}
 577}
 578
 579static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 580{
 581	struct meson_host *host = mmc_priv(mmc);
 582	u32 bus_width, val;
 583	int err;
 584
 585	/*
 586	 * GPIO regulator, only controls switching between 1v8 and
 587	 * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON.
 588	 */
 589	switch (ios->power_mode) {
 590	case MMC_POWER_OFF:
 591		if (!IS_ERR(mmc->supply.vmmc))
 592			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
 593
 594		if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
 595			regulator_disable(mmc->supply.vqmmc);
 596			host->vqmmc_enabled = false;
 597		}
 598
 599		break;
 600
 601	case MMC_POWER_UP:
 602		if (!IS_ERR(mmc->supply.vmmc))
 603			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
 604
 605		break;
 606
 607	case MMC_POWER_ON:
 608		if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
 609			int ret = regulator_enable(mmc->supply.vqmmc);
 610
 611			if (ret < 0)
 612				dev_err(host->dev,
 613					"failed to enable vqmmc regulator\n");
 614			else
 615				host->vqmmc_enabled = true;
 616		}
 617
 618		break;
 619	}
 620
 621	/* Bus width */
 622	switch (ios->bus_width) {
 623	case MMC_BUS_WIDTH_1:
 624		bus_width = CFG_BUS_WIDTH_1;
 625		break;
 626	case MMC_BUS_WIDTH_4:
 627		bus_width = CFG_BUS_WIDTH_4;
 628		break;
 629	case MMC_BUS_WIDTH_8:
 630		bus_width = CFG_BUS_WIDTH_8;
 631		break;
 632	default:
 633		dev_err(host->dev, "Invalid ios->bus_width: %u.  Setting to 4.\n",
 634			ios->bus_width);
 635		bus_width = CFG_BUS_WIDTH_4;
 636	}
 637
 638	val = readl(host->regs + SD_EMMC_CFG);
 639	val &= ~CFG_BUS_WIDTH_MASK;
 640	val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width);
 641	writel(val, host->regs + SD_EMMC_CFG);
 642
 643	meson_mmc_check_resampling(host, ios);
 644	err = meson_mmc_prepare_ios_clock(host, ios);
 645	if (err)
 646		dev_err(host->dev, "Failed to set clock: %d\n,", err);
 647
 648	dev_dbg(host->dev, "SD_EMMC_CFG:  0x%08x\n", val);
 649}
 650
 651static void meson_mmc_request_done(struct mmc_host *mmc,
 652				   struct mmc_request *mrq)
 653{
 654	struct meson_host *host = mmc_priv(mmc);
 655
 656	host->cmd = NULL;
 657	mmc_request_done(host->mmc, mrq);
 658}
 659
 660static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz)
 661{
 662	struct meson_host *host = mmc_priv(mmc);
 663	u32 cfg, blksz_old;
 664
 665	cfg = readl(host->regs + SD_EMMC_CFG);
 666	blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg);
 667
 668	if (!is_power_of_2(blksz))
 669		dev_err(host->dev, "blksz %u is not a power of 2\n", blksz);
 670
 671	blksz = ilog2(blksz);
 672
 673	/* check if block-size matches, if not update */
 674	if (blksz == blksz_old)
 675		return;
 676
 677	dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__,
 678		blksz_old, blksz);
 679
 680	cfg &= ~CFG_BLK_LEN_MASK;
 681	cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz);
 682	writel(cfg, host->regs + SD_EMMC_CFG);
 683}
 684
 685static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg)
 686{
 687	if (cmd->flags & MMC_RSP_PRESENT) {
 688		if (cmd->flags & MMC_RSP_136)
 689			*cmd_cfg |= CMD_CFG_RESP_128;
 690		*cmd_cfg |= CMD_CFG_RESP_NUM;
 691
 692		if (!(cmd->flags & MMC_RSP_CRC))
 693			*cmd_cfg |= CMD_CFG_RESP_NOCRC;
 694
 695		if (cmd->flags & MMC_RSP_BUSY)
 696			*cmd_cfg |= CMD_CFG_R1B;
 697	} else {
 698		*cmd_cfg |= CMD_CFG_NO_RESP;
 699	}
 700}
 701
 702static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
 703{
 704	struct meson_host *host = mmc_priv(mmc);
 705	struct sd_emmc_desc *desc = host->descs;
 706	struct mmc_data *data = host->cmd->data;
 707	struct scatterlist *sg;
 708	u32 start;
 709	int i;
 710
 711	if (data->flags & MMC_DATA_WRITE)
 712		cmd_cfg |= CMD_CFG_DATA_WR;
 713
 714	if (data->blocks > 1) {
 715		cmd_cfg |= CMD_CFG_BLOCK_MODE;
 716		meson_mmc_set_blksz(mmc, data->blksz);
 717	}
 718
 719	for_each_sg(data->sg, sg, data->sg_count, i) {
 720		unsigned int len = sg_dma_len(sg);
 721
 722		if (data->blocks > 1)
 723			len /= data->blksz;
 724
 725		desc[i].cmd_cfg = cmd_cfg;
 726		desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len);
 727		if (i > 0)
 728			desc[i].cmd_cfg |= CMD_CFG_NO_CMD;
 729		desc[i].cmd_arg = host->cmd->arg;
 730		desc[i].cmd_resp = 0;
 731		desc[i].cmd_data = sg_dma_address(sg);
 732	}
 733	desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN;
 734
 735	dma_wmb(); /* ensure descriptor is written before kicked */
 736	start = host->descs_dma_addr | START_DESC_BUSY;
 737	writel(start, host->regs + SD_EMMC_START);
 738}
 739
 740static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
 741{
 742	struct meson_host *host = mmc_priv(mmc);
 743	struct mmc_data *data = cmd->data;
 744	u32 cmd_cfg = 0, cmd_data = 0;
 745	unsigned int xfer_bytes = 0;
 746
 747	/* Setup descriptors */
 748	dma_rmb();
 749
 750	host->cmd = cmd;
 751
 752	cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
 753	cmd_cfg |= CMD_CFG_OWNER;  /* owned by CPU */
 754	cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */
 755
 756	meson_mmc_set_response_bits(cmd, &cmd_cfg);
 757
 758	/* data? */
 759	if (data) {
 760		data->bytes_xfered = 0;
 761		cmd_cfg |= CMD_CFG_DATA_IO;
 762		cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
 763				      ilog2(meson_mmc_get_timeout_msecs(data)));
 764
 765		if (meson_mmc_desc_chain_mode(data)) {
 766			meson_mmc_desc_chain_transfer(mmc, cmd_cfg);
 767			return;
 768		}
 769
 770		if (data->blocks > 1) {
 771			cmd_cfg |= CMD_CFG_BLOCK_MODE;
 772			cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK,
 773					      data->blocks);
 774			meson_mmc_set_blksz(mmc, data->blksz);
 775		} else {
 776			cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz);
 777		}
 778
 779		xfer_bytes = data->blksz * data->blocks;
 780		if (data->flags & MMC_DATA_WRITE) {
 781			cmd_cfg |= CMD_CFG_DATA_WR;
 782			WARN_ON(xfer_bytes > host->bounce_buf_size);
 783			sg_copy_to_buffer(data->sg, data->sg_len,
 784					  host->bounce_buf, xfer_bytes);
 785			dma_wmb();
 786		}
 787
 788		cmd_data = host->bounce_dma_addr & CMD_DATA_MASK;
 789	} else {
 790		cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
 791				      ilog2(SD_EMMC_CMD_TIMEOUT));
 792	}
 793
 794	/* Last descriptor */
 795	cmd_cfg |= CMD_CFG_END_OF_CHAIN;
 796	writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG);
 797	writel(cmd_data, host->regs + SD_EMMC_CMD_DAT);
 798	writel(0, host->regs + SD_EMMC_CMD_RSP);
 799	wmb(); /* ensure descriptor is written before kicked */
 800	writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
 801}
 802
 803static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 804{
 805	struct meson_host *host = mmc_priv(mmc);
 806	bool needs_pre_post_req = mrq->data &&
 807			!(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
 808
 809	if (needs_pre_post_req) {
 810		meson_mmc_get_transfer_mode(mmc, mrq);
 811		if (!meson_mmc_desc_chain_mode(mrq->data))
 812			needs_pre_post_req = false;
 813	}
 814
 815	if (needs_pre_post_req)
 816		meson_mmc_pre_req(mmc, mrq);
 817
 818	/* Stop execution */
 819	writel(0, host->regs + SD_EMMC_START);
 820
 821	meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
 822
 823	if (needs_pre_post_req)
 824		meson_mmc_post_req(mmc, mrq, 0);
 825}
 826
 827static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
 828{
 829	struct meson_host *host = mmc_priv(mmc);
 830
 831	if (cmd->flags & MMC_RSP_136) {
 832		cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3);
 833		cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2);
 834		cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1);
 835		cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP);
 836	} else if (cmd->flags & MMC_RSP_PRESENT) {
 837		cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP);
 838	}
 839}
 840
 841static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
 842{
 843	struct meson_host *host = dev_id;
 844	struct mmc_command *cmd;
 845	struct mmc_data *data;
 846	u32 irq_en, status, raw_status;
 847	irqreturn_t ret = IRQ_NONE;
 848
 849	irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
 850	raw_status = readl(host->regs + SD_EMMC_STATUS);
 851	status = raw_status & irq_en;
 852
 853	if (!status) {
 854		dev_dbg(host->dev,
 855			"Unexpected IRQ! irq_en 0x%08x - status 0x%08x\n",
 856			 irq_en, raw_status);
 857		return IRQ_NONE;
 858	}
 859
 860	if (WARN_ON(!host) || WARN_ON(!host->cmd))
 861		return IRQ_NONE;
 862
 863	/* ack all raised interrupts */
 864	writel(status, host->regs + SD_EMMC_STATUS);
 865
 866	cmd = host->cmd;
 867	data = cmd->data;
 868	cmd->error = 0;
 869	if (status & IRQ_CRC_ERR) {
 870		dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status);
 871		cmd->error = -EILSEQ;
 872		ret = IRQ_WAKE_THREAD;
 873		goto out;
 874	}
 875
 876	if (status & IRQ_TIMEOUTS) {
 877		dev_dbg(host->dev, "Timeout - status 0x%08x\n", status);
 878		cmd->error = -ETIMEDOUT;
 879		ret = IRQ_WAKE_THREAD;
 880		goto out;
 881	}
 882
 883	meson_mmc_read_resp(host->mmc, cmd);
 884
 885	if (status & IRQ_SDIO) {
 886		dev_dbg(host->dev, "IRQ: SDIO TODO.\n");
 887		ret = IRQ_HANDLED;
 888	}
 889
 890	if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
 891		if (data && !cmd->error)
 892			data->bytes_xfered = data->blksz * data->blocks;
 893		if (meson_mmc_bounce_buf_read(data) ||
 894		    meson_mmc_get_next_command(cmd))
 895			ret = IRQ_WAKE_THREAD;
 896		else
 897			ret = IRQ_HANDLED;
 898	}
 899
 900out:
 901	if (cmd->error) {
 902		/* Stop desc in case of errors */
 903		u32 start = readl(host->regs + SD_EMMC_START);
 904
 905		start &= ~START_DESC_BUSY;
 906		writel(start, host->regs + SD_EMMC_START);
 907	}
 908
 909	if (ret == IRQ_HANDLED)
 910		meson_mmc_request_done(host->mmc, cmd->mrq);
 911
 912	return ret;
 913}
 914
 915static int meson_mmc_wait_desc_stop(struct meson_host *host)
 916{
 917	u32 status;
 918
 919	/*
 920	 * It may sometimes take a while for it to actually halt. Here, we
 921	 * are giving it 5ms to comply
 922	 *
 923	 * If we don't confirm the descriptor is stopped, it might raise new
 924	 * IRQs after we have called mmc_request_done() which is bad.
 925	 */
 926
 927	return readl_poll_timeout(host->regs + SD_EMMC_STATUS, status,
 928				  !(status & (STATUS_BUSY | STATUS_DESC_BUSY)),
 929				  100, 5000);
 930}
 931
 932static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
 933{
 934	struct meson_host *host = dev_id;
 935	struct mmc_command *next_cmd, *cmd = host->cmd;
 936	struct mmc_data *data;
 937	unsigned int xfer_bytes;
 938
 939	if (WARN_ON(!cmd))
 940		return IRQ_NONE;
 941
 942	if (cmd->error) {
 943		meson_mmc_wait_desc_stop(host);
 944		meson_mmc_request_done(host->mmc, cmd->mrq);
 945
 946		return IRQ_HANDLED;
 947	}
 948
 949	data = cmd->data;
 950	if (meson_mmc_bounce_buf_read(data)) {
 951		xfer_bytes = data->blksz * data->blocks;
 952		WARN_ON(xfer_bytes > host->bounce_buf_size);
 953		sg_copy_from_buffer(data->sg, data->sg_len,
 954				    host->bounce_buf, xfer_bytes);
 955	}
 956
 957	next_cmd = meson_mmc_get_next_command(cmd);
 958	if (next_cmd)
 959		meson_mmc_start_cmd(host->mmc, next_cmd);
 960	else
 961		meson_mmc_request_done(host->mmc, cmd->mrq);
 962
 963	return IRQ_HANDLED;
 964}
 965
 966/*
 967 * NOTE: we only need this until the GPIO/pinctrl driver can handle
 968 * interrupts.  For now, the MMC core will use this for polling.
 969 */
 970static int meson_mmc_get_cd(struct mmc_host *mmc)
 971{
 972	int status = mmc_gpio_get_cd(mmc);
 973
 974	if (status == -ENOSYS)
 975		return 1; /* assume present */
 976
 977	return status;
 978}
 979
 980static void meson_mmc_cfg_init(struct meson_host *host)
 981{
 982	u32 cfg = 0;
 983
 984	cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
 985			  ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
 986	cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP));
 987	cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE));
 988
 989	/* abort chain on R/W errors */
 990	cfg |= CFG_ERR_ABORT;
 991
 992	writel(cfg, host->regs + SD_EMMC_CFG);
 993}
 994
 995static int meson_mmc_card_busy(struct mmc_host *mmc)
 996{
 997	struct meson_host *host = mmc_priv(mmc);
 998	u32 regval;
 999
1000	regval = readl(host->regs + SD_EMMC_STATUS);
1001
1002	/* We are only interrested in lines 0 to 3, so mask the other ones */
1003	return !(FIELD_GET(STATUS_DATI, regval) & 0xf);
1004}
1005
1006static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1007{
1008	/* vqmmc regulator is available */
1009	if (!IS_ERR(mmc->supply.vqmmc)) {
1010		/*
1011		 * The usual amlogic setup uses a GPIO to switch from one
1012		 * regulator to the other. While the voltage ramp up is
1013		 * pretty fast, care must be taken when switching from 3.3v
1014		 * to 1.8v. Please make sure the regulator framework is aware
1015		 * of your own regulator constraints
1016		 */
1017		return mmc_regulator_set_vqmmc(mmc, ios);
1018	}
1019
1020	/* no vqmmc regulator, assume fixed regulator at 3/3.3V */
1021	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1022		return 0;
1023
1024	return -EINVAL;
1025}
1026
1027static const struct mmc_host_ops meson_mmc_ops = {
1028	.request	= meson_mmc_request,
1029	.set_ios	= meson_mmc_set_ios,
1030	.get_cd         = meson_mmc_get_cd,
1031	.pre_req	= meson_mmc_pre_req,
1032	.post_req	= meson_mmc_post_req,
1033	.execute_tuning = meson_mmc_resampling_tuning,
1034	.card_busy	= meson_mmc_card_busy,
1035	.start_signal_voltage_switch = meson_mmc_voltage_switch,
1036};
1037
1038static int meson_mmc_probe(struct platform_device *pdev)
1039{
1040	struct resource *res;
1041	struct meson_host *host;
1042	struct mmc_host *mmc;
1043	int ret;
1044
1045	mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
1046	if (!mmc)
1047		return -ENOMEM;
1048	host = mmc_priv(mmc);
1049	host->mmc = mmc;
1050	host->dev = &pdev->dev;
1051	dev_set_drvdata(&pdev->dev, host);
1052
1053	/* The G12A SDIO Controller needs an SRAM bounce buffer */
1054	host->dram_access_quirk = device_property_read_bool(&pdev->dev,
1055					"amlogic,dram-access-quirk");
1056
1057	/* Get regulators and the supported OCR mask */
1058	host->vqmmc_enabled = false;
1059	ret = mmc_regulator_get_supply(mmc);
1060	if (ret)
1061		goto free_host;
1062
1063	ret = mmc_of_parse(mmc);
1064	if (ret) {
1065		if (ret != -EPROBE_DEFER)
1066			dev_warn(&pdev->dev, "error parsing DT: %d\n", ret);
1067		goto free_host;
1068	}
1069
1070	host->data = (struct meson_mmc_data *)
1071		of_device_get_match_data(&pdev->dev);
1072	if (!host->data) {
1073		ret = -EINVAL;
1074		goto free_host;
1075	}
1076
1077	ret = device_reset_optional(&pdev->dev);
1078	if (ret) {
1079		if (ret != -EPROBE_DEFER)
1080			dev_err(&pdev->dev, "device reset failed: %d\n", ret);
1081
1082		return ret;
1083	}
1084
1085	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1086	host->regs = devm_ioremap_resource(&pdev->dev, res);
1087	if (IS_ERR(host->regs)) {
1088		ret = PTR_ERR(host->regs);
1089		goto free_host;
1090	}
1091
1092	host->irq = platform_get_irq(pdev, 0);
1093	if (host->irq <= 0) {
1094		ret = -EINVAL;
1095		goto free_host;
1096	}
1097
1098	host->pinctrl = devm_pinctrl_get(&pdev->dev);
1099	if (IS_ERR(host->pinctrl)) {
1100		ret = PTR_ERR(host->pinctrl);
1101		goto free_host;
1102	}
1103
1104	host->pins_default = pinctrl_lookup_state(host->pinctrl,
1105						  PINCTRL_STATE_DEFAULT);
1106	if (IS_ERR(host->pins_default)) {
1107		ret = PTR_ERR(host->pins_default);
1108		goto free_host;
1109	}
1110
1111	host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl,
1112						   "clk-gate");
1113	if (IS_ERR(host->pins_clk_gate)) {
1114		dev_warn(&pdev->dev,
1115			 "can't get clk-gate pinctrl, using clk_stop bit\n");
1116		host->pins_clk_gate = NULL;
1117	}
1118
1119	host->core_clk = devm_clk_get(&pdev->dev, "core");
1120	if (IS_ERR(host->core_clk)) {
1121		ret = PTR_ERR(host->core_clk);
1122		goto free_host;
1123	}
1124
1125	ret = clk_prepare_enable(host->core_clk);
1126	if (ret)
1127		goto free_host;
1128
1129	ret = meson_mmc_clk_init(host);
1130	if (ret)
1131		goto err_core_clk;
1132
1133	/* set config to sane default */
1134	meson_mmc_cfg_init(host);
1135
1136	/* Stop execution */
1137	writel(0, host->regs + SD_EMMC_START);
1138
1139	/* clear, ack and enable interrupts */
1140	writel(0, host->regs + SD_EMMC_IRQ_EN);
1141	writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
1142	       host->regs + SD_EMMC_STATUS);
1143	writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
1144	       host->regs + SD_EMMC_IRQ_EN);
1145
1146	ret = request_threaded_irq(host->irq, meson_mmc_irq,
1147				   meson_mmc_irq_thread, IRQF_ONESHOT,
1148				   dev_name(&pdev->dev), host);
1149	if (ret)
1150		goto err_init_clk;
1151
1152	mmc->caps |= MMC_CAP_CMD23;
1153	if (host->dram_access_quirk) {
1154		/* Limit to the available sram memory */
1155		mmc->max_segs = SD_EMMC_SRAM_DATA_BUF_LEN / mmc->max_blk_size;
1156		mmc->max_blk_count = mmc->max_segs;
1157	} else {
1158		mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
1159		mmc->max_segs = SD_EMMC_DESC_BUF_LEN /
1160				sizeof(struct sd_emmc_desc);
1161	}
1162	mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
1163	mmc->max_seg_size = mmc->max_req_size;
1164
1165	/*
1166	 * At the moment, we don't know how to reliably enable HS400.
1167	 * From the different datasheets, it is not even clear if this mode
1168	 * is officially supported by any of the SoCs
1169	 */
1170	mmc->caps2 &= ~MMC_CAP2_HS400;
1171
1172	if (host->dram_access_quirk) {
1173		/*
1174		 * The MMC Controller embeds 1,5KiB of internal SRAM
1175		 * that can be used to be used as bounce buffer.
1176		 * In the case of the G12A SDIO controller, use these
1177		 * instead of the DDR memory
1178		 */
1179		host->bounce_buf_size = SD_EMMC_SRAM_DATA_BUF_LEN;
1180		host->bounce_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
1181		host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF;
1182	} else {
1183		/* data bounce buffer */
1184		host->bounce_buf_size = mmc->max_req_size;
1185		host->bounce_buf =
1186			dma_alloc_coherent(host->dev, host->bounce_buf_size,
1187					   &host->bounce_dma_addr, GFP_KERNEL);
1188		if (host->bounce_buf == NULL) {
1189			dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
1190			ret = -ENOMEM;
1191			goto err_free_irq;
1192		}
1193	}
1194
1195	host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
1196		      &host->descs_dma_addr, GFP_KERNEL);
1197	if (!host->descs) {
1198		dev_err(host->dev, "Allocating descriptor DMA buffer failed\n");
1199		ret = -ENOMEM;
1200		goto err_bounce_buf;
1201	}
1202
1203	mmc->ops = &meson_mmc_ops;
1204	mmc_add_host(mmc);
1205
1206	return 0;
1207
1208err_bounce_buf:
1209	if (!host->dram_access_quirk)
1210		dma_free_coherent(host->dev, host->bounce_buf_size,
1211				  host->bounce_buf, host->bounce_dma_addr);
1212err_free_irq:
1213	free_irq(host->irq, host);
1214err_init_clk:
1215	clk_disable_unprepare(host->mmc_clk);
1216err_core_clk:
1217	clk_disable_unprepare(host->core_clk);
1218free_host:
1219	mmc_free_host(mmc);
1220	return ret;
1221}
1222
1223static int meson_mmc_remove(struct platform_device *pdev)
1224{
1225	struct meson_host *host = dev_get_drvdata(&pdev->dev);
1226
1227	mmc_remove_host(host->mmc);
1228
1229	/* disable interrupts */
1230	writel(0, host->regs + SD_EMMC_IRQ_EN);
1231	free_irq(host->irq, host);
1232
1233	dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
1234			  host->descs, host->descs_dma_addr);
1235
1236	if (!host->dram_access_quirk)
1237		dma_free_coherent(host->dev, host->bounce_buf_size,
1238				  host->bounce_buf, host->bounce_dma_addr);
1239
1240	clk_disable_unprepare(host->mmc_clk);
1241	clk_disable_unprepare(host->core_clk);
1242
1243	mmc_free_host(host->mmc);
1244	return 0;
1245}
1246
1247static const struct meson_mmc_data meson_gx_data = {
1248	.tx_delay_mask	= CLK_V2_TX_DELAY_MASK,
1249	.rx_delay_mask	= CLK_V2_RX_DELAY_MASK,
1250	.always_on	= CLK_V2_ALWAYS_ON,
1251	.adjust		= SD_EMMC_ADJUST,
1252};
1253
1254static const struct meson_mmc_data meson_axg_data = {
1255	.tx_delay_mask	= CLK_V3_TX_DELAY_MASK,
1256	.rx_delay_mask	= CLK_V3_RX_DELAY_MASK,
1257	.always_on	= CLK_V3_ALWAYS_ON,
1258	.adjust		= SD_EMMC_V3_ADJUST,
1259};
1260
1261static const struct of_device_id meson_mmc_of_match[] = {
1262	{ .compatible = "amlogic,meson-gx-mmc",		.data = &meson_gx_data },
1263	{ .compatible = "amlogic,meson-gxbb-mmc", 	.data = &meson_gx_data },
1264	{ .compatible = "amlogic,meson-gxl-mmc",	.data = &meson_gx_data },
1265	{ .compatible = "amlogic,meson-gxm-mmc",	.data = &meson_gx_data },
1266	{ .compatible = "amlogic,meson-axg-mmc",	.data = &meson_axg_data },
1267	{}
1268};
1269MODULE_DEVICE_TABLE(of, meson_mmc_of_match);
1270
1271static struct platform_driver meson_mmc_driver = {
1272	.probe		= meson_mmc_probe,
1273	.remove		= meson_mmc_remove,
1274	.driver		= {
1275		.name = DRIVER_NAME,
1276		.of_match_table = of_match_ptr(meson_mmc_of_match),
1277	},
1278};
1279
1280module_platform_driver(meson_mmc_driver);
1281
1282MODULE_DESCRIPTION("Amlogic S905*/GX*/AXG SD/eMMC driver");
1283MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>");
1284MODULE_LICENSE("GPL v2");