Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2018 Stefan Agner <stefan@agner.ch>
   4 * Copyright (C) 2014-2015 Lucas Stach <dev@lynxeye.de>
   5 * Copyright (C) 2012 Avionic Design GmbH
   6 */
   7
   8#include <linux/clk.h>
   9#include <linux/completion.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/err.h>
  12#include <linux/gpio/consumer.h>
  13#include <linux/interrupt.h>
  14#include <linux/io.h>
  15#include <linux/module.h>
  16#include <linux/mtd/partitions.h>
  17#include <linux/mtd/rawnand.h>
  18#include <linux/of.h>
  19#include <linux/platform_device.h>
 
  20#include <linux/reset.h>
  21
 
 
  22#define COMMAND					0x00
  23#define   COMMAND_GO				BIT(31)
  24#define   COMMAND_CLE				BIT(30)
  25#define   COMMAND_ALE				BIT(29)
  26#define   COMMAND_PIO				BIT(28)
  27#define   COMMAND_TX				BIT(27)
  28#define   COMMAND_RX				BIT(26)
  29#define   COMMAND_SEC_CMD			BIT(25)
  30#define   COMMAND_AFT_DAT			BIT(24)
  31#define   COMMAND_TRANS_SIZE(size)		((((size) - 1) & 0xf) << 20)
  32#define   COMMAND_A_VALID			BIT(19)
  33#define   COMMAND_B_VALID			BIT(18)
  34#define   COMMAND_RD_STATUS_CHK			BIT(17)
  35#define   COMMAND_RBSY_CHK			BIT(16)
  36#define   COMMAND_CE(x)				BIT(8 + ((x) & 0x7))
  37#define   COMMAND_CLE_SIZE(size)		((((size) - 1) & 0x3) << 4)
  38#define   COMMAND_ALE_SIZE(size)		((((size) - 1) & 0xf) << 0)
  39
  40#define STATUS					0x04
  41
  42#define ISR					0x08
  43#define   ISR_CORRFAIL_ERR			BIT(24)
  44#define   ISR_UND				BIT(7)
  45#define   ISR_OVR				BIT(6)
  46#define   ISR_CMD_DONE				BIT(5)
  47#define   ISR_ECC_ERR				BIT(4)
  48
  49#define IER					0x0c
  50#define   IER_ERR_TRIG_VAL(x)			(((x) & 0xf) << 16)
  51#define   IER_UND				BIT(7)
  52#define   IER_OVR				BIT(6)
  53#define   IER_CMD_DONE				BIT(5)
  54#define   IER_ECC_ERR				BIT(4)
  55#define   IER_GIE				BIT(0)
  56
  57#define CONFIG					0x10
  58#define   CONFIG_HW_ECC				BIT(31)
  59#define   CONFIG_ECC_SEL			BIT(30)
  60#define   CONFIG_ERR_COR			BIT(29)
  61#define   CONFIG_PIPE_EN			BIT(28)
  62#define   CONFIG_TVAL_4				(0 << 24)
  63#define   CONFIG_TVAL_6				(1 << 24)
  64#define   CONFIG_TVAL_8				(2 << 24)
  65#define   CONFIG_SKIP_SPARE			BIT(23)
  66#define   CONFIG_BUS_WIDTH_16			BIT(21)
  67#define   CONFIG_COM_BSY			BIT(20)
  68#define   CONFIG_PS_256				(0 << 16)
  69#define   CONFIG_PS_512				(1 << 16)
  70#define   CONFIG_PS_1024			(2 << 16)
  71#define   CONFIG_PS_2048			(3 << 16)
  72#define   CONFIG_PS_4096			(4 << 16)
  73#define   CONFIG_SKIP_SPARE_SIZE_4		(0 << 14)
  74#define   CONFIG_SKIP_SPARE_SIZE_8		(1 << 14)
  75#define   CONFIG_SKIP_SPARE_SIZE_12		(2 << 14)
  76#define   CONFIG_SKIP_SPARE_SIZE_16		(3 << 14)
  77#define   CONFIG_TAG_BYTE_SIZE(x)			((x) & 0xff)
  78
  79#define TIMING_1				0x14
  80#define   TIMING_TRP_RESP(x)			(((x) & 0xf) << 28)
  81#define   TIMING_TWB(x)				(((x) & 0xf) << 24)
  82#define   TIMING_TCR_TAR_TRR(x)			(((x) & 0xf) << 20)
  83#define   TIMING_TWHR(x)			(((x) & 0xf) << 16)
  84#define   TIMING_TCS(x)				(((x) & 0x3) << 14)
  85#define   TIMING_TWH(x)				(((x) & 0x3) << 12)
  86#define   TIMING_TWP(x)				(((x) & 0xf) <<  8)
  87#define   TIMING_TRH(x)				(((x) & 0x3) <<  4)
  88#define   TIMING_TRP(x)				(((x) & 0xf) <<  0)
  89
  90#define RESP					0x18
  91
  92#define TIMING_2				0x1c
  93#define   TIMING_TADL(x)			((x) & 0xf)
  94
  95#define CMD_REG1				0x20
  96#define CMD_REG2				0x24
  97#define ADDR_REG1				0x28
  98#define ADDR_REG2				0x2c
  99
 100#define DMA_MST_CTRL				0x30
 101#define   DMA_MST_CTRL_GO			BIT(31)
 102#define   DMA_MST_CTRL_IN			(0 << 30)
 103#define   DMA_MST_CTRL_OUT			BIT(30)
 104#define   DMA_MST_CTRL_PERF_EN			BIT(29)
 105#define   DMA_MST_CTRL_IE_DONE			BIT(28)
 106#define   DMA_MST_CTRL_REUSE			BIT(27)
 107#define   DMA_MST_CTRL_BURST_1			(2 << 24)
 108#define   DMA_MST_CTRL_BURST_4			(3 << 24)
 109#define   DMA_MST_CTRL_BURST_8			(4 << 24)
 110#define   DMA_MST_CTRL_BURST_16			(5 << 24)
 111#define   DMA_MST_CTRL_IS_DONE			BIT(20)
 112#define   DMA_MST_CTRL_EN_A			BIT(2)
 113#define   DMA_MST_CTRL_EN_B			BIT(1)
 114
 115#define DMA_CFG_A				0x34
 116#define DMA_CFG_B				0x38
 117
 118#define FIFO_CTRL				0x3c
 119#define   FIFO_CTRL_CLR_ALL			BIT(3)
 120
 121#define DATA_PTR				0x40
 122#define TAG_PTR					0x44
 123#define ECC_PTR					0x48
 124
 125#define DEC_STATUS				0x4c
 126#define   DEC_STATUS_A_ECC_FAIL			BIT(1)
 127#define   DEC_STATUS_ERR_COUNT_MASK		0x00ff0000
 128#define   DEC_STATUS_ERR_COUNT_SHIFT		16
 129
 130#define HWSTATUS_CMD				0x50
 131#define HWSTATUS_MASK				0x54
 132#define   HWSTATUS_RDSTATUS_MASK(x)		(((x) & 0xff) << 24)
 133#define   HWSTATUS_RDSTATUS_VALUE(x)		(((x) & 0xff) << 16)
 134#define   HWSTATUS_RBSY_MASK(x)			(((x) & 0xff) << 8)
 135#define   HWSTATUS_RBSY_VALUE(x)		(((x) & 0xff) << 0)
 136
 137#define BCH_CONFIG				0xcc
 138#define   BCH_ENABLE				BIT(0)
 139#define   BCH_TVAL_4				(0 << 4)
 140#define   BCH_TVAL_8				(1 << 4)
 141#define   BCH_TVAL_14				(2 << 4)
 142#define   BCH_TVAL_16				(3 << 4)
 143
 144#define DEC_STAT_RESULT				0xd0
 145#define DEC_STAT_BUF				0xd4
 146#define   DEC_STAT_BUF_FAIL_SEC_FLAG_MASK	0xff000000
 147#define   DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT	24
 148#define   DEC_STAT_BUF_CORR_SEC_FLAG_MASK	0x00ff0000
 149#define   DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT	16
 150#define   DEC_STAT_BUF_MAX_CORR_CNT_MASK	0x00001f00
 151#define   DEC_STAT_BUF_MAX_CORR_CNT_SHIFT	8
 152
 153#define OFFSET(val, off)	((val) < (off) ? 0 : (val) - (off))
 154
 155#define SKIP_SPARE_BYTES	4
 156#define BITS_PER_STEP_RS	18
 157#define BITS_PER_STEP_BCH	13
 158
 159#define INT_MASK		(IER_UND | IER_OVR | IER_CMD_DONE | IER_GIE)
 160#define HWSTATUS_CMD_DEFAULT	NAND_STATUS_READY
 161#define HWSTATUS_MASK_DEFAULT	(HWSTATUS_RDSTATUS_MASK(1) | \
 162				HWSTATUS_RDSTATUS_VALUE(0) | \
 163				HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | \
 164				HWSTATUS_RBSY_VALUE(NAND_STATUS_READY))
 165
 166struct tegra_nand_controller {
 167	struct nand_controller controller;
 168	struct device *dev;
 169	void __iomem *regs;
 170	int irq;
 171	struct clk *clk;
 172	struct completion command_complete;
 173	struct completion dma_complete;
 174	bool last_read_error;
 175	int cur_cs;
 176	struct nand_chip *chip;
 177};
 178
 179struct tegra_nand_chip {
 180	struct nand_chip chip;
 181	struct gpio_desc *wp_gpio;
 182	struct mtd_oob_region ecc;
 183	u32 config;
 184	u32 config_ecc;
 185	u32 bch_config;
 186	int cs[1];
 187};
 188
 189static inline struct tegra_nand_controller *
 190			to_tegra_ctrl(struct nand_controller *hw_ctrl)
 191{
 192	return container_of(hw_ctrl, struct tegra_nand_controller, controller);
 193}
 194
 195static inline struct tegra_nand_chip *to_tegra_chip(struct nand_chip *chip)
 196{
 197	return container_of(chip, struct tegra_nand_chip, chip);
 198}
 199
 200static int tegra_nand_ooblayout_rs_ecc(struct mtd_info *mtd, int section,
 201				       struct mtd_oob_region *oobregion)
 202{
 203	struct nand_chip *chip = mtd_to_nand(mtd);
 204	int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_RS * chip->ecc.strength,
 205					  BITS_PER_BYTE);
 206
 207	if (section > 0)
 208		return -ERANGE;
 209
 210	oobregion->offset = SKIP_SPARE_BYTES;
 211	oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
 212
 213	return 0;
 214}
 215
 216static int tegra_nand_ooblayout_no_free(struct mtd_info *mtd, int section,
 217					struct mtd_oob_region *oobregion)
 218{
 219	return -ERANGE;
 220}
 221
 222static const struct mtd_ooblayout_ops tegra_nand_oob_rs_ops = {
 223	.ecc = tegra_nand_ooblayout_rs_ecc,
 224	.free = tegra_nand_ooblayout_no_free,
 225};
 226
 227static int tegra_nand_ooblayout_bch_ecc(struct mtd_info *mtd, int section,
 228					struct mtd_oob_region *oobregion)
 229{
 230	struct nand_chip *chip = mtd_to_nand(mtd);
 231	int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_BCH * chip->ecc.strength,
 232					  BITS_PER_BYTE);
 233
 234	if (section > 0)
 235		return -ERANGE;
 236
 237	oobregion->offset = SKIP_SPARE_BYTES;
 238	oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
 239
 240	return 0;
 241}
 242
 243static const struct mtd_ooblayout_ops tegra_nand_oob_bch_ops = {
 244	.ecc = tegra_nand_ooblayout_bch_ecc,
 245	.free = tegra_nand_ooblayout_no_free,
 246};
 247
 248static irqreturn_t tegra_nand_irq(int irq, void *data)
 249{
 250	struct tegra_nand_controller *ctrl = data;
 251	u32 isr, dma;
 252
 253	isr = readl_relaxed(ctrl->regs + ISR);
 254	dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
 255	dev_dbg(ctrl->dev, "isr %08x\n", isr);
 256
 257	if (!isr && !(dma & DMA_MST_CTRL_IS_DONE))
 258		return IRQ_NONE;
 259
 260	/*
 261	 * The bit name is somewhat missleading: This is also set when
 262	 * HW ECC was successful. The data sheet states:
 263	 * Correctable OR Un-correctable errors occurred in the DMA transfer...
 264	 */
 265	if (isr & ISR_CORRFAIL_ERR)
 266		ctrl->last_read_error = true;
 267
 268	if (isr & ISR_CMD_DONE)
 269		complete(&ctrl->command_complete);
 270
 271	if (isr & ISR_UND)
 272		dev_err(ctrl->dev, "FIFO underrun\n");
 273
 274	if (isr & ISR_OVR)
 275		dev_err(ctrl->dev, "FIFO overrun\n");
 276
 277	/* handle DMA interrupts */
 278	if (dma & DMA_MST_CTRL_IS_DONE) {
 279		writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
 280		complete(&ctrl->dma_complete);
 281	}
 282
 283	/* clear interrupts */
 284	writel_relaxed(isr, ctrl->regs + ISR);
 285
 286	return IRQ_HANDLED;
 287}
 288
 289static const char * const tegra_nand_reg_names[] = {
 290	"COMMAND",
 291	"STATUS",
 292	"ISR",
 293	"IER",
 294	"CONFIG",
 295	"TIMING",
 296	NULL,
 297	"TIMING2",
 298	"CMD_REG1",
 299	"CMD_REG2",
 300	"ADDR_REG1",
 301	"ADDR_REG2",
 302	"DMA_MST_CTRL",
 303	"DMA_CFG_A",
 304	"DMA_CFG_B",
 305	"FIFO_CTRL",
 306};
 307
 308static void tegra_nand_dump_reg(struct tegra_nand_controller *ctrl)
 309{
 310	u32 reg;
 311	int i;
 312
 313	dev_err(ctrl->dev, "Tegra NAND controller register dump\n");
 314	for (i = 0; i < ARRAY_SIZE(tegra_nand_reg_names); i++) {
 315		const char *reg_name = tegra_nand_reg_names[i];
 316
 317		if (!reg_name)
 318			continue;
 319
 320		reg = readl_relaxed(ctrl->regs + (i * 4));
 321		dev_err(ctrl->dev, "%s: 0x%08x\n", reg_name, reg);
 322	}
 323}
 324
 325static void tegra_nand_controller_abort(struct tegra_nand_controller *ctrl)
 326{
 327	u32 isr, dma;
 328
 329	disable_irq(ctrl->irq);
 330
 331	/* Abort current command/DMA operation */
 332	writel_relaxed(0, ctrl->regs + DMA_MST_CTRL);
 333	writel_relaxed(0, ctrl->regs + COMMAND);
 334
 335	/* clear interrupts */
 336	isr = readl_relaxed(ctrl->regs + ISR);
 337	writel_relaxed(isr, ctrl->regs + ISR);
 338	dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
 339	writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
 340
 341	reinit_completion(&ctrl->command_complete);
 342	reinit_completion(&ctrl->dma_complete);
 343
 344	enable_irq(ctrl->irq);
 345}
 346
 347static int tegra_nand_cmd(struct nand_chip *chip,
 348			  const struct nand_subop *subop)
 349{
 350	const struct nand_op_instr *instr;
 351	const struct nand_op_instr *instr_data_in = NULL;
 352	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 353	unsigned int op_id, size = 0, offset = 0;
 354	bool first_cmd = true;
 355	u32 reg, cmd = 0;
 356	int ret;
 357
 358	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
 359		unsigned int naddrs, i;
 360		const u8 *addrs;
 361		u32 addr1 = 0, addr2 = 0;
 362
 363		instr = &subop->instrs[op_id];
 364
 365		switch (instr->type) {
 366		case NAND_OP_CMD_INSTR:
 367			if (first_cmd) {
 368				cmd |= COMMAND_CLE;
 369				writel_relaxed(instr->ctx.cmd.opcode,
 370					       ctrl->regs + CMD_REG1);
 371			} else {
 372				cmd |= COMMAND_SEC_CMD;
 373				writel_relaxed(instr->ctx.cmd.opcode,
 374					       ctrl->regs + CMD_REG2);
 375			}
 376			first_cmd = false;
 377			break;
 378
 379		case NAND_OP_ADDR_INSTR:
 380			offset = nand_subop_get_addr_start_off(subop, op_id);
 381			naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
 382			addrs = &instr->ctx.addr.addrs[offset];
 383
 384			cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(naddrs);
 385			for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
 386				addr1 |= *addrs++ << (BITS_PER_BYTE * i);
 387			naddrs -= i;
 388			for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
 389				addr2 |= *addrs++ << (BITS_PER_BYTE * i);
 390
 391			writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
 392			writel_relaxed(addr2, ctrl->regs + ADDR_REG2);
 393			break;
 394
 395		case NAND_OP_DATA_IN_INSTR:
 396			size = nand_subop_get_data_len(subop, op_id);
 397			offset = nand_subop_get_data_start_off(subop, op_id);
 398
 399			cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
 400				COMMAND_RX | COMMAND_A_VALID;
 401
 402			instr_data_in = instr;
 403			break;
 404
 405		case NAND_OP_DATA_OUT_INSTR:
 406			size = nand_subop_get_data_len(subop, op_id);
 407			offset = nand_subop_get_data_start_off(subop, op_id);
 408
 409			cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
 410				COMMAND_TX | COMMAND_A_VALID;
 411			memcpy(&reg, instr->ctx.data.buf.out + offset, size);
 412
 413			writel_relaxed(reg, ctrl->regs + RESP);
 414			break;
 415
 416		case NAND_OP_WAITRDY_INSTR:
 417			cmd |= COMMAND_RBSY_CHK;
 418			break;
 419		}
 420	}
 421
 422	cmd |= COMMAND_GO | COMMAND_CE(ctrl->cur_cs);
 423	writel_relaxed(cmd, ctrl->regs + COMMAND);
 424	ret = wait_for_completion_timeout(&ctrl->command_complete,
 425					  msecs_to_jiffies(500));
 426	if (!ret) {
 427		dev_err(ctrl->dev, "COMMAND timeout\n");
 428		tegra_nand_dump_reg(ctrl);
 429		tegra_nand_controller_abort(ctrl);
 430		return -ETIMEDOUT;
 431	}
 432
 433	if (instr_data_in) {
 434		reg = readl_relaxed(ctrl->regs + RESP);
 435		memcpy(instr_data_in->ctx.data.buf.in + offset, &reg, size);
 436	}
 437
 438	return 0;
 439}
 440
 441static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER(
 442	NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
 443		NAND_OP_PARSER_PAT_CMD_ELEM(true),
 444		NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
 445		NAND_OP_PARSER_PAT_CMD_ELEM(true),
 446		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
 447	NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
 448		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 4)),
 449	NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
 450		NAND_OP_PARSER_PAT_CMD_ELEM(true),
 451		NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
 452		NAND_OP_PARSER_PAT_CMD_ELEM(true),
 453		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
 454		NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
 455	);
 456
 457static void tegra_nand_select_target(struct nand_chip *chip,
 458				     unsigned int die_nr)
 459{
 460	struct tegra_nand_chip *nand = to_tegra_chip(chip);
 461	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 462
 463	ctrl->cur_cs = nand->cs[die_nr];
 464}
 465
 466static int tegra_nand_exec_op(struct nand_chip *chip,
 467			      const struct nand_operation *op,
 468			      bool check_only)
 469{
 470	if (!check_only)
 471		tegra_nand_select_target(chip, op->cs);
 472
 473	return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
 474				      check_only);
 475}
 476
 477static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
 478			      struct nand_chip *chip, bool enable)
 479{
 480	struct tegra_nand_chip *nand = to_tegra_chip(chip);
 481
 482	if (chip->ecc.algo == NAND_ECC_BCH && enable)
 483		writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG);
 484	else
 485		writel_relaxed(0, ctrl->regs + BCH_CONFIG);
 486
 487	if (enable)
 488		writel_relaxed(nand->config_ecc, ctrl->regs + CONFIG);
 489	else
 490		writel_relaxed(nand->config, ctrl->regs + CONFIG);
 491}
 492
 493static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip,
 494				void *buf, void *oob_buf, int oob_len, int page,
 495				bool read)
 496{
 497	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 498	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 499	dma_addr_t dma_addr = 0, dma_addr_oob = 0;
 500	u32 addr1, cmd, dma_ctrl;
 501	int ret;
 502
 503	tegra_nand_select_target(chip, chip->cur_cs);
 504
 505	if (read) {
 506		writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1);
 507		writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2);
 508	} else {
 509		writel_relaxed(NAND_CMD_SEQIN, ctrl->regs + CMD_REG1);
 510		writel_relaxed(NAND_CMD_PAGEPROG, ctrl->regs + CMD_REG2);
 511	}
 512	cmd = COMMAND_CLE | COMMAND_SEC_CMD;
 513
 514	/* Lower 16-bits are column, by default 0 */
 515	addr1 = page << 16;
 516
 517	if (!buf)
 518		addr1 |= mtd->writesize;
 519	writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
 520
 521	if (chip->options & NAND_ROW_ADDR_3) {
 522		writel_relaxed(page >> 16, ctrl->regs + ADDR_REG2);
 523		cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(5);
 524	} else {
 525		cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(4);
 526	}
 527
 528	if (buf) {
 529		dma_addr = dma_map_single(ctrl->dev, buf, mtd->writesize, dir);
 530		ret = dma_mapping_error(ctrl->dev, dma_addr);
 531		if (ret) {
 532			dev_err(ctrl->dev, "dma mapping error\n");
 533			return -EINVAL;
 534		}
 535
 536		writel_relaxed(mtd->writesize - 1, ctrl->regs + DMA_CFG_A);
 537		writel_relaxed(dma_addr, ctrl->regs + DATA_PTR);
 538	}
 539
 540	if (oob_buf) {
 541		dma_addr_oob = dma_map_single(ctrl->dev, oob_buf, mtd->oobsize,
 542					      dir);
 543		ret = dma_mapping_error(ctrl->dev, dma_addr_oob);
 544		if (ret) {
 545			dev_err(ctrl->dev, "dma mapping error\n");
 546			ret = -EINVAL;
 547			goto err_unmap_dma_page;
 548		}
 549
 550		writel_relaxed(oob_len - 1, ctrl->regs + DMA_CFG_B);
 551		writel_relaxed(dma_addr_oob, ctrl->regs + TAG_PTR);
 552	}
 553
 554	dma_ctrl = DMA_MST_CTRL_GO | DMA_MST_CTRL_PERF_EN |
 555		   DMA_MST_CTRL_IE_DONE | DMA_MST_CTRL_IS_DONE |
 556		   DMA_MST_CTRL_BURST_16;
 557
 558	if (buf)
 559		dma_ctrl |= DMA_MST_CTRL_EN_A;
 560	if (oob_buf)
 561		dma_ctrl |= DMA_MST_CTRL_EN_B;
 562
 563	if (read)
 564		dma_ctrl |= DMA_MST_CTRL_IN | DMA_MST_CTRL_REUSE;
 565	else
 566		dma_ctrl |= DMA_MST_CTRL_OUT;
 567
 568	writel_relaxed(dma_ctrl, ctrl->regs + DMA_MST_CTRL);
 569
 570	cmd |= COMMAND_GO | COMMAND_RBSY_CHK | COMMAND_TRANS_SIZE(9) |
 571	       COMMAND_CE(ctrl->cur_cs);
 572
 573	if (buf)
 574		cmd |= COMMAND_A_VALID;
 575	if (oob_buf)
 576		cmd |= COMMAND_B_VALID;
 577
 578	if (read)
 579		cmd |= COMMAND_RX;
 580	else
 581		cmd |= COMMAND_TX | COMMAND_AFT_DAT;
 582
 583	writel_relaxed(cmd, ctrl->regs + COMMAND);
 584
 585	ret = wait_for_completion_timeout(&ctrl->command_complete,
 586					  msecs_to_jiffies(500));
 587	if (!ret) {
 588		dev_err(ctrl->dev, "COMMAND timeout\n");
 589		tegra_nand_dump_reg(ctrl);
 590		tegra_nand_controller_abort(ctrl);
 591		ret = -ETIMEDOUT;
 592		goto err_unmap_dma;
 593	}
 594
 595	ret = wait_for_completion_timeout(&ctrl->dma_complete,
 596					  msecs_to_jiffies(500));
 597	if (!ret) {
 598		dev_err(ctrl->dev, "DMA timeout\n");
 599		tegra_nand_dump_reg(ctrl);
 600		tegra_nand_controller_abort(ctrl);
 601		ret = -ETIMEDOUT;
 602		goto err_unmap_dma;
 603	}
 604	ret = 0;
 605
 606err_unmap_dma:
 607	if (oob_buf)
 608		dma_unmap_single(ctrl->dev, dma_addr_oob, mtd->oobsize, dir);
 609err_unmap_dma_page:
 610	if (buf)
 611		dma_unmap_single(ctrl->dev, dma_addr, mtd->writesize, dir);
 612
 613	return ret;
 614}
 615
 616static int tegra_nand_read_page_raw(struct nand_chip *chip, u8 *buf,
 617				    int oob_required, int page)
 618{
 619	struct mtd_info *mtd = nand_to_mtd(chip);
 620	void *oob_buf = oob_required ? chip->oob_poi : NULL;
 621
 622	return tegra_nand_page_xfer(mtd, chip, buf, oob_buf,
 623				    mtd->oobsize, page, true);
 624}
 625
 626static int tegra_nand_write_page_raw(struct nand_chip *chip, const u8 *buf,
 627				     int oob_required, int page)
 628{
 629	struct mtd_info *mtd = nand_to_mtd(chip);
 630	void *oob_buf = oob_required ? chip->oob_poi : NULL;
 631
 632	return tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
 633				     mtd->oobsize, page, false);
 634}
 635
 636static int tegra_nand_read_oob(struct nand_chip *chip, int page)
 637{
 638	struct mtd_info *mtd = nand_to_mtd(chip);
 639
 640	return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
 641				    mtd->oobsize, page, true);
 642}
 643
 644static int tegra_nand_write_oob(struct nand_chip *chip, int page)
 645{
 646	struct mtd_info *mtd = nand_to_mtd(chip);
 647
 648	return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
 649				    mtd->oobsize, page, false);
 650}
 651
 652static int tegra_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
 653				      int oob_required, int page)
 654{
 655	struct mtd_info *mtd = nand_to_mtd(chip);
 656	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 657	struct tegra_nand_chip *nand = to_tegra_chip(chip);
 658	void *oob_buf = oob_required ? chip->oob_poi : NULL;
 659	u32 dec_stat, max_corr_cnt;
 660	unsigned long fail_sec_flag;
 661	int ret;
 662
 663	tegra_nand_hw_ecc(ctrl, chip, true);
 664	ret = tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 0, page, true);
 665	tegra_nand_hw_ecc(ctrl, chip, false);
 666	if (ret)
 667		return ret;
 668
 669	/* No correctable or un-correctable errors, page must have 0 bitflips */
 670	if (!ctrl->last_read_error)
 671		return 0;
 672
 673	/*
 674	 * Correctable or un-correctable errors occurred. Use DEC_STAT_BUF
 675	 * which contains information for all ECC selections.
 676	 *
 677	 * Note that since we do not use Command Queues DEC_RESULT does not
 678	 * state the number of pages we can read from the DEC_STAT_BUF. But
 679	 * since CORRFAIL_ERR did occur during page read we do have a valid
 680	 * result in DEC_STAT_BUF.
 681	 */
 682	ctrl->last_read_error = false;
 683	dec_stat = readl_relaxed(ctrl->regs + DEC_STAT_BUF);
 684
 685	fail_sec_flag = (dec_stat & DEC_STAT_BUF_FAIL_SEC_FLAG_MASK) >>
 686			DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT;
 687
 688	max_corr_cnt = (dec_stat & DEC_STAT_BUF_MAX_CORR_CNT_MASK) >>
 689		       DEC_STAT_BUF_MAX_CORR_CNT_SHIFT;
 690
 691	if (fail_sec_flag) {
 692		int bit, max_bitflips = 0;
 693
 694		/*
 695		 * Since we do not support subpage writes, a complete page
 696		 * is either written or not. We can take a shortcut here by
 697		 * checking wheather any of the sector has been successful
 698		 * read. If at least one sectors has been read successfully,
 699		 * the page must have been a written previously. It cannot
 700		 * be an erased page.
 701		 *
 702		 * E.g. controller might return fail_sec_flag with 0x4, which
 703		 * would mean only the third sector failed to correct. The
 704		 * page must have been written and the third sector is really
 705		 * not correctable anymore.
 706		 */
 707		if (fail_sec_flag ^ GENMASK(chip->ecc.steps - 1, 0)) {
 708			mtd->ecc_stats.failed += hweight8(fail_sec_flag);
 709			return max_corr_cnt;
 710		}
 711
 712		/*
 713		 * All sectors failed to correct, but the ECC isn't smart
 714		 * enough to figure out if a page is really just erased.
 715		 * Read OOB data and check whether data/OOB is completely
 716		 * erased or if error correction just failed for all sub-
 717		 * pages.
 718		 */
 719		ret = tegra_nand_read_oob(chip, page);
 720		if (ret < 0)
 721			return ret;
 722
 723		for_each_set_bit(bit, &fail_sec_flag, chip->ecc.steps) {
 724			u8 *data = buf + (chip->ecc.size * bit);
 725			u8 *oob = chip->oob_poi + nand->ecc.offset +
 726				  (chip->ecc.bytes * bit);
 727
 728			ret = nand_check_erased_ecc_chunk(data, chip->ecc.size,
 729							  oob, chip->ecc.bytes,
 730							  NULL, 0,
 731							  chip->ecc.strength);
 732			if (ret < 0) {
 733				mtd->ecc_stats.failed++;
 734			} else {
 735				mtd->ecc_stats.corrected += ret;
 736				max_bitflips = max(ret, max_bitflips);
 737			}
 738		}
 739
 740		return max_t(unsigned int, max_corr_cnt, max_bitflips);
 741	} else {
 742		int corr_sec_flag;
 743
 744		corr_sec_flag = (dec_stat & DEC_STAT_BUF_CORR_SEC_FLAG_MASK) >>
 745				DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT;
 746
 747		/*
 748		 * The value returned in the register is the maximum of
 749		 * bitflips encountered in any of the ECC regions. As there is
 750		 * no way to get the number of bitflips in a specific regions
 751		 * we are not able to deliver correct stats but instead
 752		 * overestimate the number of corrected bitflips by assuming
 753		 * that all regions where errors have been corrected
 754		 * encountered the maximum number of bitflips.
 755		 */
 756		mtd->ecc_stats.corrected += max_corr_cnt * hweight8(corr_sec_flag);
 757
 758		return max_corr_cnt;
 759	}
 760}
 761
 762static int tegra_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
 763				       int oob_required, int page)
 764{
 765	struct mtd_info *mtd = nand_to_mtd(chip);
 766	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 767	void *oob_buf = oob_required ? chip->oob_poi : NULL;
 768	int ret;
 769
 770	tegra_nand_hw_ecc(ctrl, chip, true);
 771	ret = tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
 772				   0, page, false);
 773	tegra_nand_hw_ecc(ctrl, chip, false);
 774
 775	return ret;
 776}
 777
 778static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl,
 779				    const struct nand_sdr_timings *timings)
 780{
 781	/*
 782	 * The period (and all other timings in this function) is in ps,
 783	 * so need to take care here to avoid integer overflows.
 784	 */
 785	unsigned int rate = clk_get_rate(ctrl->clk) / 1000000;
 786	unsigned int period = DIV_ROUND_UP(1000000, rate);
 787	u32 val, reg = 0;
 788
 789	val = DIV_ROUND_UP(max3(timings->tAR_min, timings->tRR_min,
 790				timings->tRC_min), period);
 791	reg |= TIMING_TCR_TAR_TRR(OFFSET(val, 3));
 792
 793	val = DIV_ROUND_UP(max(max(timings->tCS_min, timings->tCH_min),
 794			       max(timings->tALS_min, timings->tALH_min)),
 795			   period);
 796	reg |= TIMING_TCS(OFFSET(val, 2));
 797
 798	val = DIV_ROUND_UP(max(timings->tRP_min, timings->tREA_max) + 6000,
 799			   period);
 800	reg |= TIMING_TRP(OFFSET(val, 1)) | TIMING_TRP_RESP(OFFSET(val, 1));
 801
 802	reg |= TIMING_TWB(OFFSET(DIV_ROUND_UP(timings->tWB_max, period), 1));
 803	reg |= TIMING_TWHR(OFFSET(DIV_ROUND_UP(timings->tWHR_min, period), 1));
 804	reg |= TIMING_TWH(OFFSET(DIV_ROUND_UP(timings->tWH_min, period), 1));
 805	reg |= TIMING_TWP(OFFSET(DIV_ROUND_UP(timings->tWP_min, period), 1));
 806	reg |= TIMING_TRH(OFFSET(DIV_ROUND_UP(timings->tREH_min, period), 1));
 807
 808	writel_relaxed(reg, ctrl->regs + TIMING_1);
 809
 810	val = DIV_ROUND_UP(timings->tADL_min, period);
 811	reg = TIMING_TADL(OFFSET(val, 3));
 812
 813	writel_relaxed(reg, ctrl->regs + TIMING_2);
 814}
 815
 816static int tegra_nand_setup_interface(struct nand_chip *chip, int csline,
 817				      const struct nand_interface_config *conf)
 818{
 819	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 820	const struct nand_sdr_timings *timings;
 821
 822	timings = nand_get_sdr_timings(conf);
 823	if (IS_ERR(timings))
 824		return PTR_ERR(timings);
 825
 826	if (csline == NAND_DATA_IFACE_CHECK_ONLY)
 827		return 0;
 828
 829	tegra_nand_setup_timing(ctrl, timings);
 830
 831	return 0;
 832}
 833
 834static const int rs_strength_bootable[] = { 4 };
 835static const int rs_strength[] = { 4, 6, 8 };
 836static const int bch_strength_bootable[] = { 8, 16 };
 837static const int bch_strength[] = { 4, 8, 14, 16 };
 838
 839static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
 840				   int strength_len, int bits_per_step,
 841				   int oobsize)
 842{
 843	bool maximize = chip->ecc.options & NAND_ECC_MAXIMIZE;
 
 
 
 844	int i;
 845
 846	/*
 847	 * Loop through available strengths. Backwards in case we try to
 848	 * maximize the BCH strength.
 849	 */
 850	for (i = 0; i < strength_len; i++) {
 851		int strength_sel, bytes_per_step, bytes_per_page;
 852
 853		if (maximize) {
 854			strength_sel = strength[strength_len - i - 1];
 855		} else {
 856			strength_sel = strength[i];
 857
 858			if (strength_sel < chip->base.eccreq.strength)
 859				continue;
 860		}
 861
 862		bytes_per_step = DIV_ROUND_UP(bits_per_step * strength_sel,
 863					      BITS_PER_BYTE);
 864		bytes_per_page = round_up(bytes_per_step * chip->ecc.steps, 4);
 865
 866		/* Check whether strength fits OOB */
 867		if (bytes_per_page < (oobsize - SKIP_SPARE_BYTES))
 868			return strength_sel;
 869	}
 870
 871	return -EINVAL;
 872}
 873
 874static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
 875{
 876	const int *strength;
 877	int strength_len, bits_per_step;
 878
 879	switch (chip->ecc.algo) {
 880	case NAND_ECC_RS:
 881		bits_per_step = BITS_PER_STEP_RS;
 882		if (chip->options & NAND_IS_BOOT_MEDIUM) {
 883			strength = rs_strength_bootable;
 884			strength_len = ARRAY_SIZE(rs_strength_bootable);
 885		} else {
 886			strength = rs_strength;
 887			strength_len = ARRAY_SIZE(rs_strength);
 888		}
 889		break;
 890	case NAND_ECC_BCH:
 891		bits_per_step = BITS_PER_STEP_BCH;
 892		if (chip->options & NAND_IS_BOOT_MEDIUM) {
 893			strength = bch_strength_bootable;
 894			strength_len = ARRAY_SIZE(bch_strength_bootable);
 895		} else {
 896			strength = bch_strength;
 897			strength_len = ARRAY_SIZE(bch_strength);
 898		}
 899		break;
 900	default:
 901		return -EINVAL;
 902	}
 903
 904	return tegra_nand_get_strength(chip, strength, strength_len,
 905				       bits_per_step, oobsize);
 906}
 907
 908static int tegra_nand_attach_chip(struct nand_chip *chip)
 909{
 910	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 
 
 911	struct tegra_nand_chip *nand = to_tegra_chip(chip);
 912	struct mtd_info *mtd = nand_to_mtd(chip);
 913	int bits_per_step;
 914	int ret;
 915
 916	if (chip->bbt_options & NAND_BBT_USE_FLASH)
 917		chip->bbt_options |= NAND_BBT_NO_OOB;
 918
 919	chip->ecc.mode = NAND_ECC_HW;
 920	chip->ecc.size = 512;
 921	chip->ecc.steps = mtd->writesize / chip->ecc.size;
 922	if (chip->base.eccreq.step_size != 512) {
 923		dev_err(ctrl->dev, "Unsupported step size %d\n",
 924			chip->base.eccreq.step_size);
 925		return -EINVAL;
 926	}
 927
 928	chip->ecc.read_page = tegra_nand_read_page_hwecc;
 929	chip->ecc.write_page = tegra_nand_write_page_hwecc;
 930	chip->ecc.read_page_raw = tegra_nand_read_page_raw;
 931	chip->ecc.write_page_raw = tegra_nand_write_page_raw;
 932	chip->ecc.read_oob = tegra_nand_read_oob;
 933	chip->ecc.write_oob = tegra_nand_write_oob;
 934
 935	if (chip->options & NAND_BUSWIDTH_16)
 936		nand->config |= CONFIG_BUS_WIDTH_16;
 937
 938	if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
 939		if (mtd->writesize < 2048)
 940			chip->ecc.algo = NAND_ECC_RS;
 941		else
 942			chip->ecc.algo = NAND_ECC_BCH;
 943	}
 944
 945	if (chip->ecc.algo == NAND_ECC_BCH && mtd->writesize < 2048) {
 946		dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n");
 947		return -EINVAL;
 948	}
 949
 950	if (!chip->ecc.strength) {
 951		ret = tegra_nand_select_strength(chip, mtd->oobsize);
 952		if (ret < 0) {
 953			dev_err(ctrl->dev,
 954				"No valid strength found, minimum %d\n",
 955				chip->base.eccreq.strength);
 956			return ret;
 957		}
 958
 959		chip->ecc.strength = ret;
 960	}
 961
 962	nand->config_ecc = CONFIG_PIPE_EN | CONFIG_SKIP_SPARE |
 963			   CONFIG_SKIP_SPARE_SIZE_4;
 964
 965	switch (chip->ecc.algo) {
 966	case NAND_ECC_RS:
 967		bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength;
 968		mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops);
 969		nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL |
 970				    CONFIG_ERR_COR;
 971		switch (chip->ecc.strength) {
 972		case 4:
 973			nand->config_ecc |= CONFIG_TVAL_4;
 974			break;
 975		case 6:
 976			nand->config_ecc |= CONFIG_TVAL_6;
 977			break;
 978		case 8:
 979			nand->config_ecc |= CONFIG_TVAL_8;
 980			break;
 981		default:
 982			dev_err(ctrl->dev, "ECC strength %d not supported\n",
 983				chip->ecc.strength);
 984			return -EINVAL;
 985		}
 986		break;
 987	case NAND_ECC_BCH:
 988		bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength;
 989		mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops);
 990		nand->bch_config = BCH_ENABLE;
 991		switch (chip->ecc.strength) {
 992		case 4:
 993			nand->bch_config |= BCH_TVAL_4;
 994			break;
 995		case 8:
 996			nand->bch_config |= BCH_TVAL_8;
 997			break;
 998		case 14:
 999			nand->bch_config |= BCH_TVAL_14;
1000			break;
1001		case 16:
1002			nand->bch_config |= BCH_TVAL_16;
1003			break;
1004		default:
1005			dev_err(ctrl->dev, "ECC strength %d not supported\n",
1006				chip->ecc.strength);
1007			return -EINVAL;
1008		}
1009		break;
1010	default:
1011		dev_err(ctrl->dev, "ECC algorithm not supported\n");
1012		return -EINVAL;
1013	}
1014
1015	dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n",
1016		 chip->ecc.algo == NAND_ECC_BCH ? "BCH" : "RS",
1017		 chip->ecc.strength);
1018
1019	chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE);
1020
1021	switch (mtd->writesize) {
1022	case 256:
1023		nand->config |= CONFIG_PS_256;
1024		break;
1025	case 512:
1026		nand->config |= CONFIG_PS_512;
1027		break;
1028	case 1024:
1029		nand->config |= CONFIG_PS_1024;
1030		break;
1031	case 2048:
1032		nand->config |= CONFIG_PS_2048;
1033		break;
1034	case 4096:
1035		nand->config |= CONFIG_PS_4096;
1036		break;
1037	default:
1038		dev_err(ctrl->dev, "Unsupported writesize %d\n",
1039			mtd->writesize);
1040		return -ENODEV;
1041	}
1042
1043	/* Store complete configuration for HW ECC in config_ecc */
1044	nand->config_ecc |= nand->config;
1045
1046	/* Non-HW ECC read/writes complete OOB */
1047	nand->config |= CONFIG_TAG_BYTE_SIZE(mtd->oobsize - 1);
1048	writel_relaxed(nand->config, ctrl->regs + CONFIG);
1049
1050	return 0;
1051}
1052
1053static const struct nand_controller_ops tegra_nand_controller_ops = {
1054	.attach_chip = &tegra_nand_attach_chip,
1055	.exec_op = tegra_nand_exec_op,
1056	.setup_interface = tegra_nand_setup_interface,
1057};
1058
1059static int tegra_nand_chips_init(struct device *dev,
1060				 struct tegra_nand_controller *ctrl)
1061{
1062	struct device_node *np = dev->of_node;
1063	struct device_node *np_nand;
1064	int nsels, nchips = of_get_child_count(np);
1065	struct tegra_nand_chip *nand;
1066	struct mtd_info *mtd;
1067	struct nand_chip *chip;
1068	int ret;
1069	u32 cs;
1070
1071	if (nchips != 1) {
1072		dev_err(dev, "Currently only one NAND chip supported\n");
1073		return -EINVAL;
1074	}
1075
1076	np_nand = of_get_next_child(np, NULL);
1077
1078	nsels = of_property_count_elems_of_size(np_nand, "reg", sizeof(u32));
1079	if (nsels != 1) {
1080		dev_err(dev, "Missing/invalid reg property\n");
1081		return -EINVAL;
1082	}
1083
1084	/* Retrieve CS id, currently only single die NAND supported */
1085	ret = of_property_read_u32(np_nand, "reg", &cs);
1086	if (ret) {
1087		dev_err(dev, "could not retrieve reg property: %d\n", ret);
1088		return ret;
1089	}
1090
1091	nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
1092	if (!nand)
1093		return -ENOMEM;
1094
1095	nand->cs[0] = cs;
1096
1097	nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
1098
1099	if (IS_ERR(nand->wp_gpio)) {
1100		ret = PTR_ERR(nand->wp_gpio);
1101		dev_err(dev, "Failed to request WP GPIO: %d\n", ret);
1102		return ret;
1103	}
1104
1105	chip = &nand->chip;
1106	chip->controller = &ctrl->controller;
1107
1108	mtd = nand_to_mtd(chip);
1109
1110	mtd->dev.parent = dev;
1111	mtd->owner = THIS_MODULE;
1112
1113	nand_set_flash_node(chip, np_nand);
1114
1115	if (!mtd->name)
1116		mtd->name = "tegra_nand";
1117
1118	chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA;
1119
1120	ret = nand_scan(chip, 1);
1121	if (ret)
1122		return ret;
1123
1124	mtd_ooblayout_ecc(mtd, 0, &nand->ecc);
1125
1126	ret = mtd_device_register(mtd, NULL, 0);
1127	if (ret) {
1128		dev_err(dev, "Failed to register mtd device: %d\n", ret);
1129		nand_cleanup(chip);
1130		return ret;
1131	}
1132
1133	ctrl->chip = chip;
1134
1135	return 0;
1136}
1137
1138static int tegra_nand_probe(struct platform_device *pdev)
1139{
1140	struct reset_control *rst;
1141	struct tegra_nand_controller *ctrl;
1142	struct resource *res;
1143	int err = 0;
1144
1145	ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
1146	if (!ctrl)
1147		return -ENOMEM;
1148
1149	ctrl->dev = &pdev->dev;
 
1150	nand_controller_init(&ctrl->controller);
1151	ctrl->controller.ops = &tegra_nand_controller_ops;
1152
1153	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1154	ctrl->regs = devm_ioremap_resource(&pdev->dev, res);
1155	if (IS_ERR(ctrl->regs))
1156		return PTR_ERR(ctrl->regs);
1157
1158	rst = devm_reset_control_get(&pdev->dev, "nand");
1159	if (IS_ERR(rst))
1160		return PTR_ERR(rst);
1161
1162	ctrl->clk = devm_clk_get(&pdev->dev, "nand");
1163	if (IS_ERR(ctrl->clk))
1164		return PTR_ERR(ctrl->clk);
1165
1166	err = clk_prepare_enable(ctrl->clk);
1167	if (err)
1168		return err;
1169
 
 
 
 
 
 
 
 
 
1170	err = reset_control_reset(rst);
1171	if (err) {
1172		dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
1173		goto err_disable_clk;
1174	}
1175
1176	writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
1177	writel_relaxed(HWSTATUS_MASK_DEFAULT, ctrl->regs + HWSTATUS_MASK);
1178	writel_relaxed(INT_MASK, ctrl->regs + IER);
1179
1180	init_completion(&ctrl->command_complete);
1181	init_completion(&ctrl->dma_complete);
1182
1183	ctrl->irq = platform_get_irq(pdev, 0);
1184	err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
1185			       dev_name(&pdev->dev), ctrl);
1186	if (err) {
1187		dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
1188		goto err_disable_clk;
1189	}
1190
1191	writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
1192
1193	err = tegra_nand_chips_init(ctrl->dev, ctrl);
1194	if (err)
1195		goto err_disable_clk;
1196
1197	platform_set_drvdata(pdev, ctrl);
1198
1199	return 0;
1200
1201err_disable_clk:
1202	clk_disable_unprepare(ctrl->clk);
 
 
 
1203	return err;
1204}
1205
1206static int tegra_nand_remove(struct platform_device *pdev)
1207{
1208	struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
1209	struct nand_chip *chip = ctrl->chip;
1210	struct mtd_info *mtd = nand_to_mtd(chip);
1211	int ret;
1212
1213	ret = mtd_device_unregister(mtd);
1214	if (ret)
1215		return ret;
1216
1217	nand_cleanup(chip);
1218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1219	clk_disable_unprepare(ctrl->clk);
1220
1221	return 0;
1222}
1223
 
 
 
 
 
1224static const struct of_device_id tegra_nand_of_match[] = {
1225	{ .compatible = "nvidia,tegra20-nand" },
1226	{ /* sentinel */ }
1227};
1228MODULE_DEVICE_TABLE(of, tegra_nand_of_match);
1229
1230static struct platform_driver tegra_nand_driver = {
1231	.driver = {
1232		.name = "tegra-nand",
1233		.of_match_table = tegra_nand_of_match,
 
1234	},
1235	.probe = tegra_nand_probe,
1236	.remove = tegra_nand_remove,
1237};
1238module_platform_driver(tegra_nand_driver);
1239
1240MODULE_DESCRIPTION("NVIDIA Tegra NAND driver");
1241MODULE_AUTHOR("Thierry Reding <thierry.reding@nvidia.com>");
1242MODULE_AUTHOR("Lucas Stach <dev@lynxeye.de>");
1243MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>");
1244MODULE_LICENSE("GPL v2");
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2018 Stefan Agner <stefan@agner.ch>
   4 * Copyright (C) 2014-2015 Lucas Stach <dev@lynxeye.de>
   5 * Copyright (C) 2012 Avionic Design GmbH
   6 */
   7
   8#include <linux/clk.h>
   9#include <linux/completion.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/err.h>
  12#include <linux/gpio/consumer.h>
  13#include <linux/interrupt.h>
  14#include <linux/io.h>
  15#include <linux/module.h>
  16#include <linux/mtd/partitions.h>
  17#include <linux/mtd/rawnand.h>
  18#include <linux/of.h>
  19#include <linux/platform_device.h>
  20#include <linux/pm_runtime.h>
  21#include <linux/reset.h>
  22
  23#include <soc/tegra/common.h>
  24
  25#define COMMAND					0x00
  26#define   COMMAND_GO				BIT(31)
  27#define   COMMAND_CLE				BIT(30)
  28#define   COMMAND_ALE				BIT(29)
  29#define   COMMAND_PIO				BIT(28)
  30#define   COMMAND_TX				BIT(27)
  31#define   COMMAND_RX				BIT(26)
  32#define   COMMAND_SEC_CMD			BIT(25)
  33#define   COMMAND_AFT_DAT			BIT(24)
  34#define   COMMAND_TRANS_SIZE(size)		((((size) - 1) & 0xf) << 20)
  35#define   COMMAND_A_VALID			BIT(19)
  36#define   COMMAND_B_VALID			BIT(18)
  37#define   COMMAND_RD_STATUS_CHK			BIT(17)
  38#define   COMMAND_RBSY_CHK			BIT(16)
  39#define   COMMAND_CE(x)				BIT(8 + ((x) & 0x7))
  40#define   COMMAND_CLE_SIZE(size)		((((size) - 1) & 0x3) << 4)
  41#define   COMMAND_ALE_SIZE(size)		((((size) - 1) & 0xf) << 0)
  42
  43#define STATUS					0x04
  44
  45#define ISR					0x08
  46#define   ISR_CORRFAIL_ERR			BIT(24)
  47#define   ISR_UND				BIT(7)
  48#define   ISR_OVR				BIT(6)
  49#define   ISR_CMD_DONE				BIT(5)
  50#define   ISR_ECC_ERR				BIT(4)
  51
  52#define IER					0x0c
  53#define   IER_ERR_TRIG_VAL(x)			(((x) & 0xf) << 16)
  54#define   IER_UND				BIT(7)
  55#define   IER_OVR				BIT(6)
  56#define   IER_CMD_DONE				BIT(5)
  57#define   IER_ECC_ERR				BIT(4)
  58#define   IER_GIE				BIT(0)
  59
  60#define CONFIG					0x10
  61#define   CONFIG_HW_ECC				BIT(31)
  62#define   CONFIG_ECC_SEL			BIT(30)
  63#define   CONFIG_ERR_COR			BIT(29)
  64#define   CONFIG_PIPE_EN			BIT(28)
  65#define   CONFIG_TVAL_4				(0 << 24)
  66#define   CONFIG_TVAL_6				(1 << 24)
  67#define   CONFIG_TVAL_8				(2 << 24)
  68#define   CONFIG_SKIP_SPARE			BIT(23)
  69#define   CONFIG_BUS_WIDTH_16			BIT(21)
  70#define   CONFIG_COM_BSY			BIT(20)
  71#define   CONFIG_PS_256				(0 << 16)
  72#define   CONFIG_PS_512				(1 << 16)
  73#define   CONFIG_PS_1024			(2 << 16)
  74#define   CONFIG_PS_2048			(3 << 16)
  75#define   CONFIG_PS_4096			(4 << 16)
  76#define   CONFIG_SKIP_SPARE_SIZE_4		(0 << 14)
  77#define   CONFIG_SKIP_SPARE_SIZE_8		(1 << 14)
  78#define   CONFIG_SKIP_SPARE_SIZE_12		(2 << 14)
  79#define   CONFIG_SKIP_SPARE_SIZE_16		(3 << 14)
  80#define   CONFIG_TAG_BYTE_SIZE(x)			((x) & 0xff)
  81
  82#define TIMING_1				0x14
  83#define   TIMING_TRP_RESP(x)			(((x) & 0xf) << 28)
  84#define   TIMING_TWB(x)				(((x) & 0xf) << 24)
  85#define   TIMING_TCR_TAR_TRR(x)			(((x) & 0xf) << 20)
  86#define   TIMING_TWHR(x)			(((x) & 0xf) << 16)
  87#define   TIMING_TCS(x)				(((x) & 0x3) << 14)
  88#define   TIMING_TWH(x)				(((x) & 0x3) << 12)
  89#define   TIMING_TWP(x)				(((x) & 0xf) <<  8)
  90#define   TIMING_TRH(x)				(((x) & 0x3) <<  4)
  91#define   TIMING_TRP(x)				(((x) & 0xf) <<  0)
  92
  93#define RESP					0x18
  94
  95#define TIMING_2				0x1c
  96#define   TIMING_TADL(x)			((x) & 0xf)
  97
  98#define CMD_REG1				0x20
  99#define CMD_REG2				0x24
 100#define ADDR_REG1				0x28
 101#define ADDR_REG2				0x2c
 102
 103#define DMA_MST_CTRL				0x30
 104#define   DMA_MST_CTRL_GO			BIT(31)
 105#define   DMA_MST_CTRL_IN			(0 << 30)
 106#define   DMA_MST_CTRL_OUT			BIT(30)
 107#define   DMA_MST_CTRL_PERF_EN			BIT(29)
 108#define   DMA_MST_CTRL_IE_DONE			BIT(28)
 109#define   DMA_MST_CTRL_REUSE			BIT(27)
 110#define   DMA_MST_CTRL_BURST_1			(2 << 24)
 111#define   DMA_MST_CTRL_BURST_4			(3 << 24)
 112#define   DMA_MST_CTRL_BURST_8			(4 << 24)
 113#define   DMA_MST_CTRL_BURST_16			(5 << 24)
 114#define   DMA_MST_CTRL_IS_DONE			BIT(20)
 115#define   DMA_MST_CTRL_EN_A			BIT(2)
 116#define   DMA_MST_CTRL_EN_B			BIT(1)
 117
 118#define DMA_CFG_A				0x34
 119#define DMA_CFG_B				0x38
 120
 121#define FIFO_CTRL				0x3c
 122#define   FIFO_CTRL_CLR_ALL			BIT(3)
 123
 124#define DATA_PTR				0x40
 125#define TAG_PTR					0x44
 126#define ECC_PTR					0x48
 127
 128#define DEC_STATUS				0x4c
 129#define   DEC_STATUS_A_ECC_FAIL			BIT(1)
 130#define   DEC_STATUS_ERR_COUNT_MASK		0x00ff0000
 131#define   DEC_STATUS_ERR_COUNT_SHIFT		16
 132
 133#define HWSTATUS_CMD				0x50
 134#define HWSTATUS_MASK				0x54
 135#define   HWSTATUS_RDSTATUS_MASK(x)		(((x) & 0xff) << 24)
 136#define   HWSTATUS_RDSTATUS_VALUE(x)		(((x) & 0xff) << 16)
 137#define   HWSTATUS_RBSY_MASK(x)			(((x) & 0xff) << 8)
 138#define   HWSTATUS_RBSY_VALUE(x)		(((x) & 0xff) << 0)
 139
 140#define BCH_CONFIG				0xcc
 141#define   BCH_ENABLE				BIT(0)
 142#define   BCH_TVAL_4				(0 << 4)
 143#define   BCH_TVAL_8				(1 << 4)
 144#define   BCH_TVAL_14				(2 << 4)
 145#define   BCH_TVAL_16				(3 << 4)
 146
 147#define DEC_STAT_RESULT				0xd0
 148#define DEC_STAT_BUF				0xd4
 149#define   DEC_STAT_BUF_FAIL_SEC_FLAG_MASK	0xff000000
 150#define   DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT	24
 151#define   DEC_STAT_BUF_CORR_SEC_FLAG_MASK	0x00ff0000
 152#define   DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT	16
 153#define   DEC_STAT_BUF_MAX_CORR_CNT_MASK	0x00001f00
 154#define   DEC_STAT_BUF_MAX_CORR_CNT_SHIFT	8
 155
 156#define OFFSET(val, off)	((val) < (off) ? 0 : (val) - (off))
 157
 158#define SKIP_SPARE_BYTES	4
 159#define BITS_PER_STEP_RS	18
 160#define BITS_PER_STEP_BCH	13
 161
 162#define INT_MASK		(IER_UND | IER_OVR | IER_CMD_DONE | IER_GIE)
 163#define HWSTATUS_CMD_DEFAULT	NAND_STATUS_READY
 164#define HWSTATUS_MASK_DEFAULT	(HWSTATUS_RDSTATUS_MASK(1) | \
 165				HWSTATUS_RDSTATUS_VALUE(0) | \
 166				HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | \
 167				HWSTATUS_RBSY_VALUE(NAND_STATUS_READY))
 168
 169struct tegra_nand_controller {
 170	struct nand_controller controller;
 171	struct device *dev;
 172	void __iomem *regs;
 173	int irq;
 174	struct clk *clk;
 175	struct completion command_complete;
 176	struct completion dma_complete;
 177	bool last_read_error;
 178	int cur_cs;
 179	struct nand_chip *chip;
 180};
 181
 182struct tegra_nand_chip {
 183	struct nand_chip chip;
 184	struct gpio_desc *wp_gpio;
 185	struct mtd_oob_region ecc;
 186	u32 config;
 187	u32 config_ecc;
 188	u32 bch_config;
 189	int cs[1];
 190};
 191
 192static inline struct tegra_nand_controller *
 193			to_tegra_ctrl(struct nand_controller *hw_ctrl)
 194{
 195	return container_of(hw_ctrl, struct tegra_nand_controller, controller);
 196}
 197
 198static inline struct tegra_nand_chip *to_tegra_chip(struct nand_chip *chip)
 199{
 200	return container_of(chip, struct tegra_nand_chip, chip);
 201}
 202
 203static int tegra_nand_ooblayout_rs_ecc(struct mtd_info *mtd, int section,
 204				       struct mtd_oob_region *oobregion)
 205{
 206	struct nand_chip *chip = mtd_to_nand(mtd);
 207	int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_RS * chip->ecc.strength,
 208					  BITS_PER_BYTE);
 209
 210	if (section > 0)
 211		return -ERANGE;
 212
 213	oobregion->offset = SKIP_SPARE_BYTES;
 214	oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
 215
 216	return 0;
 217}
 218
 219static int tegra_nand_ooblayout_no_free(struct mtd_info *mtd, int section,
 220					struct mtd_oob_region *oobregion)
 221{
 222	return -ERANGE;
 223}
 224
 225static const struct mtd_ooblayout_ops tegra_nand_oob_rs_ops = {
 226	.ecc = tegra_nand_ooblayout_rs_ecc,
 227	.free = tegra_nand_ooblayout_no_free,
 228};
 229
 230static int tegra_nand_ooblayout_bch_ecc(struct mtd_info *mtd, int section,
 231					struct mtd_oob_region *oobregion)
 232{
 233	struct nand_chip *chip = mtd_to_nand(mtd);
 234	int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_BCH * chip->ecc.strength,
 235					  BITS_PER_BYTE);
 236
 237	if (section > 0)
 238		return -ERANGE;
 239
 240	oobregion->offset = SKIP_SPARE_BYTES;
 241	oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
 242
 243	return 0;
 244}
 245
 246static const struct mtd_ooblayout_ops tegra_nand_oob_bch_ops = {
 247	.ecc = tegra_nand_ooblayout_bch_ecc,
 248	.free = tegra_nand_ooblayout_no_free,
 249};
 250
 251static irqreturn_t tegra_nand_irq(int irq, void *data)
 252{
 253	struct tegra_nand_controller *ctrl = data;
 254	u32 isr, dma;
 255
 256	isr = readl_relaxed(ctrl->regs + ISR);
 257	dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
 258	dev_dbg(ctrl->dev, "isr %08x\n", isr);
 259
 260	if (!isr && !(dma & DMA_MST_CTRL_IS_DONE))
 261		return IRQ_NONE;
 262
 263	/*
 264	 * The bit name is somewhat missleading: This is also set when
 265	 * HW ECC was successful. The data sheet states:
 266	 * Correctable OR Un-correctable errors occurred in the DMA transfer...
 267	 */
 268	if (isr & ISR_CORRFAIL_ERR)
 269		ctrl->last_read_error = true;
 270
 271	if (isr & ISR_CMD_DONE)
 272		complete(&ctrl->command_complete);
 273
 274	if (isr & ISR_UND)
 275		dev_err(ctrl->dev, "FIFO underrun\n");
 276
 277	if (isr & ISR_OVR)
 278		dev_err(ctrl->dev, "FIFO overrun\n");
 279
 280	/* handle DMA interrupts */
 281	if (dma & DMA_MST_CTRL_IS_DONE) {
 282		writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
 283		complete(&ctrl->dma_complete);
 284	}
 285
 286	/* clear interrupts */
 287	writel_relaxed(isr, ctrl->regs + ISR);
 288
 289	return IRQ_HANDLED;
 290}
 291
 292static const char * const tegra_nand_reg_names[] = {
 293	"COMMAND",
 294	"STATUS",
 295	"ISR",
 296	"IER",
 297	"CONFIG",
 298	"TIMING",
 299	NULL,
 300	"TIMING2",
 301	"CMD_REG1",
 302	"CMD_REG2",
 303	"ADDR_REG1",
 304	"ADDR_REG2",
 305	"DMA_MST_CTRL",
 306	"DMA_CFG_A",
 307	"DMA_CFG_B",
 308	"FIFO_CTRL",
 309};
 310
 311static void tegra_nand_dump_reg(struct tegra_nand_controller *ctrl)
 312{
 313	u32 reg;
 314	int i;
 315
 316	dev_err(ctrl->dev, "Tegra NAND controller register dump\n");
 317	for (i = 0; i < ARRAY_SIZE(tegra_nand_reg_names); i++) {
 318		const char *reg_name = tegra_nand_reg_names[i];
 319
 320		if (!reg_name)
 321			continue;
 322
 323		reg = readl_relaxed(ctrl->regs + (i * 4));
 324		dev_err(ctrl->dev, "%s: 0x%08x\n", reg_name, reg);
 325	}
 326}
 327
 328static void tegra_nand_controller_abort(struct tegra_nand_controller *ctrl)
 329{
 330	u32 isr, dma;
 331
 332	disable_irq(ctrl->irq);
 333
 334	/* Abort current command/DMA operation */
 335	writel_relaxed(0, ctrl->regs + DMA_MST_CTRL);
 336	writel_relaxed(0, ctrl->regs + COMMAND);
 337
 338	/* clear interrupts */
 339	isr = readl_relaxed(ctrl->regs + ISR);
 340	writel_relaxed(isr, ctrl->regs + ISR);
 341	dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
 342	writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
 343
 344	reinit_completion(&ctrl->command_complete);
 345	reinit_completion(&ctrl->dma_complete);
 346
 347	enable_irq(ctrl->irq);
 348}
 349
 350static int tegra_nand_cmd(struct nand_chip *chip,
 351			  const struct nand_subop *subop)
 352{
 353	const struct nand_op_instr *instr;
 354	const struct nand_op_instr *instr_data_in = NULL;
 355	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 356	unsigned int op_id, size = 0, offset = 0;
 357	bool first_cmd = true;
 358	u32 reg, cmd = 0;
 359	int ret;
 360
 361	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
 362		unsigned int naddrs, i;
 363		const u8 *addrs;
 364		u32 addr1 = 0, addr2 = 0;
 365
 366		instr = &subop->instrs[op_id];
 367
 368		switch (instr->type) {
 369		case NAND_OP_CMD_INSTR:
 370			if (first_cmd) {
 371				cmd |= COMMAND_CLE;
 372				writel_relaxed(instr->ctx.cmd.opcode,
 373					       ctrl->regs + CMD_REG1);
 374			} else {
 375				cmd |= COMMAND_SEC_CMD;
 376				writel_relaxed(instr->ctx.cmd.opcode,
 377					       ctrl->regs + CMD_REG2);
 378			}
 379			first_cmd = false;
 380			break;
 381
 382		case NAND_OP_ADDR_INSTR:
 383			offset = nand_subop_get_addr_start_off(subop, op_id);
 384			naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
 385			addrs = &instr->ctx.addr.addrs[offset];
 386
 387			cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(naddrs);
 388			for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
 389				addr1 |= *addrs++ << (BITS_PER_BYTE * i);
 390			naddrs -= i;
 391			for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
 392				addr2 |= *addrs++ << (BITS_PER_BYTE * i);
 393
 394			writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
 395			writel_relaxed(addr2, ctrl->regs + ADDR_REG2);
 396			break;
 397
 398		case NAND_OP_DATA_IN_INSTR:
 399			size = nand_subop_get_data_len(subop, op_id);
 400			offset = nand_subop_get_data_start_off(subop, op_id);
 401
 402			cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
 403				COMMAND_RX | COMMAND_A_VALID;
 404
 405			instr_data_in = instr;
 406			break;
 407
 408		case NAND_OP_DATA_OUT_INSTR:
 409			size = nand_subop_get_data_len(subop, op_id);
 410			offset = nand_subop_get_data_start_off(subop, op_id);
 411
 412			cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
 413				COMMAND_TX | COMMAND_A_VALID;
 414			memcpy(&reg, instr->ctx.data.buf.out + offset, size);
 415
 416			writel_relaxed(reg, ctrl->regs + RESP);
 417			break;
 418
 419		case NAND_OP_WAITRDY_INSTR:
 420			cmd |= COMMAND_RBSY_CHK;
 421			break;
 422		}
 423	}
 424
 425	cmd |= COMMAND_GO | COMMAND_CE(ctrl->cur_cs);
 426	writel_relaxed(cmd, ctrl->regs + COMMAND);
 427	ret = wait_for_completion_timeout(&ctrl->command_complete,
 428					  msecs_to_jiffies(500));
 429	if (!ret) {
 430		dev_err(ctrl->dev, "COMMAND timeout\n");
 431		tegra_nand_dump_reg(ctrl);
 432		tegra_nand_controller_abort(ctrl);
 433		return -ETIMEDOUT;
 434	}
 435
 436	if (instr_data_in) {
 437		reg = readl_relaxed(ctrl->regs + RESP);
 438		memcpy(instr_data_in->ctx.data.buf.in + offset, &reg, size);
 439	}
 440
 441	return 0;
 442}
 443
 444static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER(
 445	NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
 446		NAND_OP_PARSER_PAT_CMD_ELEM(true),
 447		NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
 448		NAND_OP_PARSER_PAT_CMD_ELEM(true),
 449		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
 450	NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
 451		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 4)),
 452	NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
 453		NAND_OP_PARSER_PAT_CMD_ELEM(true),
 454		NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
 455		NAND_OP_PARSER_PAT_CMD_ELEM(true),
 456		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
 457		NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
 458	);
 459
 460static void tegra_nand_select_target(struct nand_chip *chip,
 461				     unsigned int die_nr)
 462{
 463	struct tegra_nand_chip *nand = to_tegra_chip(chip);
 464	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 465
 466	ctrl->cur_cs = nand->cs[die_nr];
 467}
 468
 469static int tegra_nand_exec_op(struct nand_chip *chip,
 470			      const struct nand_operation *op,
 471			      bool check_only)
 472{
 473	if (!check_only)
 474		tegra_nand_select_target(chip, op->cs);
 475
 476	return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
 477				      check_only);
 478}
 479
 480static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
 481			      struct nand_chip *chip, bool enable)
 482{
 483	struct tegra_nand_chip *nand = to_tegra_chip(chip);
 484
 485	if (chip->ecc.algo == NAND_ECC_ALGO_BCH && enable)
 486		writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG);
 487	else
 488		writel_relaxed(0, ctrl->regs + BCH_CONFIG);
 489
 490	if (enable)
 491		writel_relaxed(nand->config_ecc, ctrl->regs + CONFIG);
 492	else
 493		writel_relaxed(nand->config, ctrl->regs + CONFIG);
 494}
 495
 496static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip,
 497				void *buf, void *oob_buf, int oob_len, int page,
 498				bool read)
 499{
 500	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 501	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 502	dma_addr_t dma_addr = 0, dma_addr_oob = 0;
 503	u32 addr1, cmd, dma_ctrl;
 504	int ret;
 505
 506	tegra_nand_select_target(chip, chip->cur_cs);
 507
 508	if (read) {
 509		writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1);
 510		writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2);
 511	} else {
 512		writel_relaxed(NAND_CMD_SEQIN, ctrl->regs + CMD_REG1);
 513		writel_relaxed(NAND_CMD_PAGEPROG, ctrl->regs + CMD_REG2);
 514	}
 515	cmd = COMMAND_CLE | COMMAND_SEC_CMD;
 516
 517	/* Lower 16-bits are column, by default 0 */
 518	addr1 = page << 16;
 519
 520	if (!buf)
 521		addr1 |= mtd->writesize;
 522	writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
 523
 524	if (chip->options & NAND_ROW_ADDR_3) {
 525		writel_relaxed(page >> 16, ctrl->regs + ADDR_REG2);
 526		cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(5);
 527	} else {
 528		cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(4);
 529	}
 530
 531	if (buf) {
 532		dma_addr = dma_map_single(ctrl->dev, buf, mtd->writesize, dir);
 533		ret = dma_mapping_error(ctrl->dev, dma_addr);
 534		if (ret) {
 535			dev_err(ctrl->dev, "dma mapping error\n");
 536			return -EINVAL;
 537		}
 538
 539		writel_relaxed(mtd->writesize - 1, ctrl->regs + DMA_CFG_A);
 540		writel_relaxed(dma_addr, ctrl->regs + DATA_PTR);
 541	}
 542
 543	if (oob_buf) {
 544		dma_addr_oob = dma_map_single(ctrl->dev, oob_buf, mtd->oobsize,
 545					      dir);
 546		ret = dma_mapping_error(ctrl->dev, dma_addr_oob);
 547		if (ret) {
 548			dev_err(ctrl->dev, "dma mapping error\n");
 549			ret = -EINVAL;
 550			goto err_unmap_dma_page;
 551		}
 552
 553		writel_relaxed(oob_len - 1, ctrl->regs + DMA_CFG_B);
 554		writel_relaxed(dma_addr_oob, ctrl->regs + TAG_PTR);
 555	}
 556
 557	dma_ctrl = DMA_MST_CTRL_GO | DMA_MST_CTRL_PERF_EN |
 558		   DMA_MST_CTRL_IE_DONE | DMA_MST_CTRL_IS_DONE |
 559		   DMA_MST_CTRL_BURST_16;
 560
 561	if (buf)
 562		dma_ctrl |= DMA_MST_CTRL_EN_A;
 563	if (oob_buf)
 564		dma_ctrl |= DMA_MST_CTRL_EN_B;
 565
 566	if (read)
 567		dma_ctrl |= DMA_MST_CTRL_IN | DMA_MST_CTRL_REUSE;
 568	else
 569		dma_ctrl |= DMA_MST_CTRL_OUT;
 570
 571	writel_relaxed(dma_ctrl, ctrl->regs + DMA_MST_CTRL);
 572
 573	cmd |= COMMAND_GO | COMMAND_RBSY_CHK | COMMAND_TRANS_SIZE(9) |
 574	       COMMAND_CE(ctrl->cur_cs);
 575
 576	if (buf)
 577		cmd |= COMMAND_A_VALID;
 578	if (oob_buf)
 579		cmd |= COMMAND_B_VALID;
 580
 581	if (read)
 582		cmd |= COMMAND_RX;
 583	else
 584		cmd |= COMMAND_TX | COMMAND_AFT_DAT;
 585
 586	writel_relaxed(cmd, ctrl->regs + COMMAND);
 587
 588	ret = wait_for_completion_timeout(&ctrl->command_complete,
 589					  msecs_to_jiffies(500));
 590	if (!ret) {
 591		dev_err(ctrl->dev, "COMMAND timeout\n");
 592		tegra_nand_dump_reg(ctrl);
 593		tegra_nand_controller_abort(ctrl);
 594		ret = -ETIMEDOUT;
 595		goto err_unmap_dma;
 596	}
 597
 598	ret = wait_for_completion_timeout(&ctrl->dma_complete,
 599					  msecs_to_jiffies(500));
 600	if (!ret) {
 601		dev_err(ctrl->dev, "DMA timeout\n");
 602		tegra_nand_dump_reg(ctrl);
 603		tegra_nand_controller_abort(ctrl);
 604		ret = -ETIMEDOUT;
 605		goto err_unmap_dma;
 606	}
 607	ret = 0;
 608
 609err_unmap_dma:
 610	if (oob_buf)
 611		dma_unmap_single(ctrl->dev, dma_addr_oob, mtd->oobsize, dir);
 612err_unmap_dma_page:
 613	if (buf)
 614		dma_unmap_single(ctrl->dev, dma_addr, mtd->writesize, dir);
 615
 616	return ret;
 617}
 618
 619static int tegra_nand_read_page_raw(struct nand_chip *chip, u8 *buf,
 620				    int oob_required, int page)
 621{
 622	struct mtd_info *mtd = nand_to_mtd(chip);
 623	void *oob_buf = oob_required ? chip->oob_poi : NULL;
 624
 625	return tegra_nand_page_xfer(mtd, chip, buf, oob_buf,
 626				    mtd->oobsize, page, true);
 627}
 628
 629static int tegra_nand_write_page_raw(struct nand_chip *chip, const u8 *buf,
 630				     int oob_required, int page)
 631{
 632	struct mtd_info *mtd = nand_to_mtd(chip);
 633	void *oob_buf = oob_required ? chip->oob_poi : NULL;
 634
 635	return tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
 636				     mtd->oobsize, page, false);
 637}
 638
 639static int tegra_nand_read_oob(struct nand_chip *chip, int page)
 640{
 641	struct mtd_info *mtd = nand_to_mtd(chip);
 642
 643	return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
 644				    mtd->oobsize, page, true);
 645}
 646
 647static int tegra_nand_write_oob(struct nand_chip *chip, int page)
 648{
 649	struct mtd_info *mtd = nand_to_mtd(chip);
 650
 651	return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
 652				    mtd->oobsize, page, false);
 653}
 654
 655static int tegra_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
 656				      int oob_required, int page)
 657{
 658	struct mtd_info *mtd = nand_to_mtd(chip);
 659	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 660	struct tegra_nand_chip *nand = to_tegra_chip(chip);
 661	void *oob_buf = oob_required ? chip->oob_poi : NULL;
 662	u32 dec_stat, max_corr_cnt;
 663	unsigned long fail_sec_flag;
 664	int ret;
 665
 666	tegra_nand_hw_ecc(ctrl, chip, true);
 667	ret = tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 0, page, true);
 668	tegra_nand_hw_ecc(ctrl, chip, false);
 669	if (ret)
 670		return ret;
 671
 672	/* No correctable or un-correctable errors, page must have 0 bitflips */
 673	if (!ctrl->last_read_error)
 674		return 0;
 675
 676	/*
 677	 * Correctable or un-correctable errors occurred. Use DEC_STAT_BUF
 678	 * which contains information for all ECC selections.
 679	 *
 680	 * Note that since we do not use Command Queues DEC_RESULT does not
 681	 * state the number of pages we can read from the DEC_STAT_BUF. But
 682	 * since CORRFAIL_ERR did occur during page read we do have a valid
 683	 * result in DEC_STAT_BUF.
 684	 */
 685	ctrl->last_read_error = false;
 686	dec_stat = readl_relaxed(ctrl->regs + DEC_STAT_BUF);
 687
 688	fail_sec_flag = (dec_stat & DEC_STAT_BUF_FAIL_SEC_FLAG_MASK) >>
 689			DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT;
 690
 691	max_corr_cnt = (dec_stat & DEC_STAT_BUF_MAX_CORR_CNT_MASK) >>
 692		       DEC_STAT_BUF_MAX_CORR_CNT_SHIFT;
 693
 694	if (fail_sec_flag) {
 695		int bit, max_bitflips = 0;
 696
 697		/*
 698		 * Since we do not support subpage writes, a complete page
 699		 * is either written or not. We can take a shortcut here by
 700		 * checking wheather any of the sector has been successful
 701		 * read. If at least one sectors has been read successfully,
 702		 * the page must have been a written previously. It cannot
 703		 * be an erased page.
 704		 *
 705		 * E.g. controller might return fail_sec_flag with 0x4, which
 706		 * would mean only the third sector failed to correct. The
 707		 * page must have been written and the third sector is really
 708		 * not correctable anymore.
 709		 */
 710		if (fail_sec_flag ^ GENMASK(chip->ecc.steps - 1, 0)) {
 711			mtd->ecc_stats.failed += hweight8(fail_sec_flag);
 712			return max_corr_cnt;
 713		}
 714
 715		/*
 716		 * All sectors failed to correct, but the ECC isn't smart
 717		 * enough to figure out if a page is really just erased.
 718		 * Read OOB data and check whether data/OOB is completely
 719		 * erased or if error correction just failed for all sub-
 720		 * pages.
 721		 */
 722		ret = tegra_nand_read_oob(chip, page);
 723		if (ret < 0)
 724			return ret;
 725
 726		for_each_set_bit(bit, &fail_sec_flag, chip->ecc.steps) {
 727			u8 *data = buf + (chip->ecc.size * bit);
 728			u8 *oob = chip->oob_poi + nand->ecc.offset +
 729				  (chip->ecc.bytes * bit);
 730
 731			ret = nand_check_erased_ecc_chunk(data, chip->ecc.size,
 732							  oob, chip->ecc.bytes,
 733							  NULL, 0,
 734							  chip->ecc.strength);
 735			if (ret < 0) {
 736				mtd->ecc_stats.failed++;
 737			} else {
 738				mtd->ecc_stats.corrected += ret;
 739				max_bitflips = max(ret, max_bitflips);
 740			}
 741		}
 742
 743		return max_t(unsigned int, max_corr_cnt, max_bitflips);
 744	} else {
 745		int corr_sec_flag;
 746
 747		corr_sec_flag = (dec_stat & DEC_STAT_BUF_CORR_SEC_FLAG_MASK) >>
 748				DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT;
 749
 750		/*
 751		 * The value returned in the register is the maximum of
 752		 * bitflips encountered in any of the ECC regions. As there is
 753		 * no way to get the number of bitflips in a specific regions
 754		 * we are not able to deliver correct stats but instead
 755		 * overestimate the number of corrected bitflips by assuming
 756		 * that all regions where errors have been corrected
 757		 * encountered the maximum number of bitflips.
 758		 */
 759		mtd->ecc_stats.corrected += max_corr_cnt * hweight8(corr_sec_flag);
 760
 761		return max_corr_cnt;
 762	}
 763}
 764
 765static int tegra_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
 766				       int oob_required, int page)
 767{
 768	struct mtd_info *mtd = nand_to_mtd(chip);
 769	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 770	void *oob_buf = oob_required ? chip->oob_poi : NULL;
 771	int ret;
 772
 773	tegra_nand_hw_ecc(ctrl, chip, true);
 774	ret = tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
 775				   0, page, false);
 776	tegra_nand_hw_ecc(ctrl, chip, false);
 777
 778	return ret;
 779}
 780
 781static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl,
 782				    const struct nand_sdr_timings *timings)
 783{
 784	/*
 785	 * The period (and all other timings in this function) is in ps,
 786	 * so need to take care here to avoid integer overflows.
 787	 */
 788	unsigned int rate = clk_get_rate(ctrl->clk) / 1000000;
 789	unsigned int period = DIV_ROUND_UP(1000000, rate);
 790	u32 val, reg = 0;
 791
 792	val = DIV_ROUND_UP(max3(timings->tAR_min, timings->tRR_min,
 793				timings->tRC_min), period);
 794	reg |= TIMING_TCR_TAR_TRR(OFFSET(val, 3));
 795
 796	val = DIV_ROUND_UP(max(max(timings->tCS_min, timings->tCH_min),
 797			       max(timings->tALS_min, timings->tALH_min)),
 798			   period);
 799	reg |= TIMING_TCS(OFFSET(val, 2));
 800
 801	val = DIV_ROUND_UP(max(timings->tRP_min, timings->tREA_max) + 6000,
 802			   period);
 803	reg |= TIMING_TRP(OFFSET(val, 1)) | TIMING_TRP_RESP(OFFSET(val, 1));
 804
 805	reg |= TIMING_TWB(OFFSET(DIV_ROUND_UP(timings->tWB_max, period), 1));
 806	reg |= TIMING_TWHR(OFFSET(DIV_ROUND_UP(timings->tWHR_min, period), 1));
 807	reg |= TIMING_TWH(OFFSET(DIV_ROUND_UP(timings->tWH_min, period), 1));
 808	reg |= TIMING_TWP(OFFSET(DIV_ROUND_UP(timings->tWP_min, period), 1));
 809	reg |= TIMING_TRH(OFFSET(DIV_ROUND_UP(timings->tREH_min, period), 1));
 810
 811	writel_relaxed(reg, ctrl->regs + TIMING_1);
 812
 813	val = DIV_ROUND_UP(timings->tADL_min, period);
 814	reg = TIMING_TADL(OFFSET(val, 3));
 815
 816	writel_relaxed(reg, ctrl->regs + TIMING_2);
 817}
 818
 819static int tegra_nand_setup_interface(struct nand_chip *chip, int csline,
 820				      const struct nand_interface_config *conf)
 821{
 822	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 823	const struct nand_sdr_timings *timings;
 824
 825	timings = nand_get_sdr_timings(conf);
 826	if (IS_ERR(timings))
 827		return PTR_ERR(timings);
 828
 829	if (csline == NAND_DATA_IFACE_CHECK_ONLY)
 830		return 0;
 831
 832	tegra_nand_setup_timing(ctrl, timings);
 833
 834	return 0;
 835}
 836
 837static const int rs_strength_bootable[] = { 4 };
 838static const int rs_strength[] = { 4, 6, 8 };
 839static const int bch_strength_bootable[] = { 8, 16 };
 840static const int bch_strength[] = { 4, 8, 14, 16 };
 841
 842static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
 843				   int strength_len, int bits_per_step,
 844				   int oobsize)
 845{
 846	struct nand_device *base = mtd_to_nanddev(nand_to_mtd(chip));
 847	const struct nand_ecc_props *requirements =
 848		nanddev_get_ecc_requirements(base);
 849	bool maximize = base->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH;
 850	int i;
 851
 852	/*
 853	 * Loop through available strengths. Backwards in case we try to
 854	 * maximize the BCH strength.
 855	 */
 856	for (i = 0; i < strength_len; i++) {
 857		int strength_sel, bytes_per_step, bytes_per_page;
 858
 859		if (maximize) {
 860			strength_sel = strength[strength_len - i - 1];
 861		} else {
 862			strength_sel = strength[i];
 863
 864			if (strength_sel < requirements->strength)
 865				continue;
 866		}
 867
 868		bytes_per_step = DIV_ROUND_UP(bits_per_step * strength_sel,
 869					      BITS_PER_BYTE);
 870		bytes_per_page = round_up(bytes_per_step * chip->ecc.steps, 4);
 871
 872		/* Check whether strength fits OOB */
 873		if (bytes_per_page < (oobsize - SKIP_SPARE_BYTES))
 874			return strength_sel;
 875	}
 876
 877	return -EINVAL;
 878}
 879
 880static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
 881{
 882	const int *strength;
 883	int strength_len, bits_per_step;
 884
 885	switch (chip->ecc.algo) {
 886	case NAND_ECC_ALGO_RS:
 887		bits_per_step = BITS_PER_STEP_RS;
 888		if (chip->options & NAND_IS_BOOT_MEDIUM) {
 889			strength = rs_strength_bootable;
 890			strength_len = ARRAY_SIZE(rs_strength_bootable);
 891		} else {
 892			strength = rs_strength;
 893			strength_len = ARRAY_SIZE(rs_strength);
 894		}
 895		break;
 896	case NAND_ECC_ALGO_BCH:
 897		bits_per_step = BITS_PER_STEP_BCH;
 898		if (chip->options & NAND_IS_BOOT_MEDIUM) {
 899			strength = bch_strength_bootable;
 900			strength_len = ARRAY_SIZE(bch_strength_bootable);
 901		} else {
 902			strength = bch_strength;
 903			strength_len = ARRAY_SIZE(bch_strength);
 904		}
 905		break;
 906	default:
 907		return -EINVAL;
 908	}
 909
 910	return tegra_nand_get_strength(chip, strength, strength_len,
 911				       bits_per_step, oobsize);
 912}
 913
 914static int tegra_nand_attach_chip(struct nand_chip *chip)
 915{
 916	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 917	const struct nand_ecc_props *requirements =
 918		nanddev_get_ecc_requirements(&chip->base);
 919	struct tegra_nand_chip *nand = to_tegra_chip(chip);
 920	struct mtd_info *mtd = nand_to_mtd(chip);
 921	int bits_per_step;
 922	int ret;
 923
 924	if (chip->bbt_options & NAND_BBT_USE_FLASH)
 925		chip->bbt_options |= NAND_BBT_NO_OOB;
 926
 927	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 928	chip->ecc.size = 512;
 929	chip->ecc.steps = mtd->writesize / chip->ecc.size;
 930	if (requirements->step_size != 512) {
 931		dev_err(ctrl->dev, "Unsupported step size %d\n",
 932			requirements->step_size);
 933		return -EINVAL;
 934	}
 935
 936	chip->ecc.read_page = tegra_nand_read_page_hwecc;
 937	chip->ecc.write_page = tegra_nand_write_page_hwecc;
 938	chip->ecc.read_page_raw = tegra_nand_read_page_raw;
 939	chip->ecc.write_page_raw = tegra_nand_write_page_raw;
 940	chip->ecc.read_oob = tegra_nand_read_oob;
 941	chip->ecc.write_oob = tegra_nand_write_oob;
 942
 943	if (chip->options & NAND_BUSWIDTH_16)
 944		nand->config |= CONFIG_BUS_WIDTH_16;
 945
 946	if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
 947		if (mtd->writesize < 2048)
 948			chip->ecc.algo = NAND_ECC_ALGO_RS;
 949		else
 950			chip->ecc.algo = NAND_ECC_ALGO_BCH;
 951	}
 952
 953	if (chip->ecc.algo == NAND_ECC_ALGO_BCH && mtd->writesize < 2048) {
 954		dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n");
 955		return -EINVAL;
 956	}
 957
 958	if (!chip->ecc.strength) {
 959		ret = tegra_nand_select_strength(chip, mtd->oobsize);
 960		if (ret < 0) {
 961			dev_err(ctrl->dev,
 962				"No valid strength found, minimum %d\n",
 963				requirements->strength);
 964			return ret;
 965		}
 966
 967		chip->ecc.strength = ret;
 968	}
 969
 970	nand->config_ecc = CONFIG_PIPE_EN | CONFIG_SKIP_SPARE |
 971			   CONFIG_SKIP_SPARE_SIZE_4;
 972
 973	switch (chip->ecc.algo) {
 974	case NAND_ECC_ALGO_RS:
 975		bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength;
 976		mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops);
 977		nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL |
 978				    CONFIG_ERR_COR;
 979		switch (chip->ecc.strength) {
 980		case 4:
 981			nand->config_ecc |= CONFIG_TVAL_4;
 982			break;
 983		case 6:
 984			nand->config_ecc |= CONFIG_TVAL_6;
 985			break;
 986		case 8:
 987			nand->config_ecc |= CONFIG_TVAL_8;
 988			break;
 989		default:
 990			dev_err(ctrl->dev, "ECC strength %d not supported\n",
 991				chip->ecc.strength);
 992			return -EINVAL;
 993		}
 994		break;
 995	case NAND_ECC_ALGO_BCH:
 996		bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength;
 997		mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops);
 998		nand->bch_config = BCH_ENABLE;
 999		switch (chip->ecc.strength) {
1000		case 4:
1001			nand->bch_config |= BCH_TVAL_4;
1002			break;
1003		case 8:
1004			nand->bch_config |= BCH_TVAL_8;
1005			break;
1006		case 14:
1007			nand->bch_config |= BCH_TVAL_14;
1008			break;
1009		case 16:
1010			nand->bch_config |= BCH_TVAL_16;
1011			break;
1012		default:
1013			dev_err(ctrl->dev, "ECC strength %d not supported\n",
1014				chip->ecc.strength);
1015			return -EINVAL;
1016		}
1017		break;
1018	default:
1019		dev_err(ctrl->dev, "ECC algorithm not supported\n");
1020		return -EINVAL;
1021	}
1022
1023	dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n",
1024		 chip->ecc.algo == NAND_ECC_ALGO_BCH ? "BCH" : "RS",
1025		 chip->ecc.strength);
1026
1027	chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE);
1028
1029	switch (mtd->writesize) {
1030	case 256:
1031		nand->config |= CONFIG_PS_256;
1032		break;
1033	case 512:
1034		nand->config |= CONFIG_PS_512;
1035		break;
1036	case 1024:
1037		nand->config |= CONFIG_PS_1024;
1038		break;
1039	case 2048:
1040		nand->config |= CONFIG_PS_2048;
1041		break;
1042	case 4096:
1043		nand->config |= CONFIG_PS_4096;
1044		break;
1045	default:
1046		dev_err(ctrl->dev, "Unsupported writesize %d\n",
1047			mtd->writesize);
1048		return -ENODEV;
1049	}
1050
1051	/* Store complete configuration for HW ECC in config_ecc */
1052	nand->config_ecc |= nand->config;
1053
1054	/* Non-HW ECC read/writes complete OOB */
1055	nand->config |= CONFIG_TAG_BYTE_SIZE(mtd->oobsize - 1);
1056	writel_relaxed(nand->config, ctrl->regs + CONFIG);
1057
1058	return 0;
1059}
1060
1061static const struct nand_controller_ops tegra_nand_controller_ops = {
1062	.attach_chip = &tegra_nand_attach_chip,
1063	.exec_op = tegra_nand_exec_op,
1064	.setup_interface = tegra_nand_setup_interface,
1065};
1066
1067static int tegra_nand_chips_init(struct device *dev,
1068				 struct tegra_nand_controller *ctrl)
1069{
1070	struct device_node *np = dev->of_node;
1071	struct device_node *np_nand;
1072	int nsels, nchips = of_get_child_count(np);
1073	struct tegra_nand_chip *nand;
1074	struct mtd_info *mtd;
1075	struct nand_chip *chip;
1076	int ret;
1077	u32 cs;
1078
1079	if (nchips != 1) {
1080		dev_err(dev, "Currently only one NAND chip supported\n");
1081		return -EINVAL;
1082	}
1083
1084	np_nand = of_get_next_child(np, NULL);
1085
1086	nsels = of_property_count_elems_of_size(np_nand, "reg", sizeof(u32));
1087	if (nsels != 1) {
1088		dev_err(dev, "Missing/invalid reg property\n");
1089		return -EINVAL;
1090	}
1091
1092	/* Retrieve CS id, currently only single die NAND supported */
1093	ret = of_property_read_u32(np_nand, "reg", &cs);
1094	if (ret) {
1095		dev_err(dev, "could not retrieve reg property: %d\n", ret);
1096		return ret;
1097	}
1098
1099	nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
1100	if (!nand)
1101		return -ENOMEM;
1102
1103	nand->cs[0] = cs;
1104
1105	nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
1106
1107	if (IS_ERR(nand->wp_gpio)) {
1108		ret = PTR_ERR(nand->wp_gpio);
1109		dev_err(dev, "Failed to request WP GPIO: %d\n", ret);
1110		return ret;
1111	}
1112
1113	chip = &nand->chip;
1114	chip->controller = &ctrl->controller;
1115
1116	mtd = nand_to_mtd(chip);
1117
1118	mtd->dev.parent = dev;
1119	mtd->owner = THIS_MODULE;
1120
1121	nand_set_flash_node(chip, np_nand);
1122
1123	if (!mtd->name)
1124		mtd->name = "tegra_nand";
1125
1126	chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA;
1127
1128	ret = nand_scan(chip, 1);
1129	if (ret)
1130		return ret;
1131
1132	mtd_ooblayout_ecc(mtd, 0, &nand->ecc);
1133
1134	ret = mtd_device_register(mtd, NULL, 0);
1135	if (ret) {
1136		dev_err(dev, "Failed to register mtd device: %d\n", ret);
1137		nand_cleanup(chip);
1138		return ret;
1139	}
1140
1141	ctrl->chip = chip;
1142
1143	return 0;
1144}
1145
1146static int tegra_nand_probe(struct platform_device *pdev)
1147{
1148	struct reset_control *rst;
1149	struct tegra_nand_controller *ctrl;
 
1150	int err = 0;
1151
1152	ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
1153	if (!ctrl)
1154		return -ENOMEM;
1155
1156	ctrl->dev = &pdev->dev;
1157	platform_set_drvdata(pdev, ctrl);
1158	nand_controller_init(&ctrl->controller);
1159	ctrl->controller.ops = &tegra_nand_controller_ops;
1160
1161	ctrl->regs = devm_platform_ioremap_resource(pdev, 0);
 
1162	if (IS_ERR(ctrl->regs))
1163		return PTR_ERR(ctrl->regs);
1164
1165	rst = devm_reset_control_get(&pdev->dev, "nand");
1166	if (IS_ERR(rst))
1167		return PTR_ERR(rst);
1168
1169	ctrl->clk = devm_clk_get(&pdev->dev, "nand");
1170	if (IS_ERR(ctrl->clk))
1171		return PTR_ERR(ctrl->clk);
1172
1173	err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
1174	if (err)
1175		return err;
1176
1177	/*
1178	 * This driver doesn't support active power management yet,
1179	 * so we will simply keep device resumed.
1180	 */
1181	pm_runtime_enable(&pdev->dev);
1182	err = pm_runtime_resume_and_get(&pdev->dev);
1183	if (err)
1184		goto err_dis_pm;
1185
1186	err = reset_control_reset(rst);
1187	if (err) {
1188		dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
1189		goto err_put_pm;
1190	}
1191
1192	writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
1193	writel_relaxed(HWSTATUS_MASK_DEFAULT, ctrl->regs + HWSTATUS_MASK);
1194	writel_relaxed(INT_MASK, ctrl->regs + IER);
1195
1196	init_completion(&ctrl->command_complete);
1197	init_completion(&ctrl->dma_complete);
1198
1199	ctrl->irq = platform_get_irq(pdev, 0);
1200	err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
1201			       dev_name(&pdev->dev), ctrl);
1202	if (err) {
1203		dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
1204		goto err_put_pm;
1205	}
1206
1207	writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
1208
1209	err = tegra_nand_chips_init(ctrl->dev, ctrl);
1210	if (err)
1211		goto err_put_pm;
 
 
1212
1213	return 0;
1214
1215err_put_pm:
1216	pm_runtime_put_sync_suspend(ctrl->dev);
1217	pm_runtime_force_suspend(ctrl->dev);
1218err_dis_pm:
1219	pm_runtime_disable(&pdev->dev);
1220	return err;
1221}
1222
1223static int tegra_nand_remove(struct platform_device *pdev)
1224{
1225	struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
1226	struct nand_chip *chip = ctrl->chip;
1227	struct mtd_info *mtd = nand_to_mtd(chip);
 
1228
1229	WARN_ON(mtd_device_unregister(mtd));
 
 
1230
1231	nand_cleanup(chip);
1232
1233	pm_runtime_put_sync_suspend(ctrl->dev);
1234	pm_runtime_force_suspend(ctrl->dev);
1235
1236	return 0;
1237}
1238
1239static int __maybe_unused tegra_nand_runtime_resume(struct device *dev)
1240{
1241	struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
1242	int err;
1243
1244	err = clk_prepare_enable(ctrl->clk);
1245	if (err) {
1246		dev_err(dev, "Failed to enable clock: %d\n", err);
1247		return err;
1248	}
1249
1250	return 0;
1251}
1252
1253static int __maybe_unused tegra_nand_runtime_suspend(struct device *dev)
1254{
1255	struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
1256
1257	clk_disable_unprepare(ctrl->clk);
1258
1259	return 0;
1260}
1261
1262static const struct dev_pm_ops tegra_nand_pm = {
1263	SET_RUNTIME_PM_OPS(tegra_nand_runtime_suspend, tegra_nand_runtime_resume,
1264			   NULL)
1265};
1266
1267static const struct of_device_id tegra_nand_of_match[] = {
1268	{ .compatible = "nvidia,tegra20-nand" },
1269	{ /* sentinel */ }
1270};
1271MODULE_DEVICE_TABLE(of, tegra_nand_of_match);
1272
1273static struct platform_driver tegra_nand_driver = {
1274	.driver = {
1275		.name = "tegra-nand",
1276		.of_match_table = tegra_nand_of_match,
1277		.pm = &tegra_nand_pm,
1278	},
1279	.probe = tegra_nand_probe,
1280	.remove = tegra_nand_remove,
1281};
1282module_platform_driver(tegra_nand_driver);
1283
1284MODULE_DESCRIPTION("NVIDIA Tegra NAND driver");
1285MODULE_AUTHOR("Thierry Reding <thierry.reding@nvidia.com>");
1286MODULE_AUTHOR("Lucas Stach <dev@lynxeye.de>");
1287MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>");
1288MODULE_LICENSE("GPL v2");