Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*
   2 * MTK NAND Flash controller driver.
   3 * Copyright (C) 2016 MediaTek Inc.
   4 * Authors:	Xiaolei Li		<xiaolei.li@mediatek.com>
   5 *		Jorge Ramirez-Ortiz	<jorge.ramirez-ortiz@linaro.org>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 */
  16
  17#include <linux/platform_device.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/interrupt.h>
  20#include <linux/delay.h>
  21#include <linux/clk.h>
  22#include <linux/mtd/nand.h>
  23#include <linux/mtd/mtd.h>
  24#include <linux/module.h>
  25#include <linux/iopoll.h>
  26#include <linux/of.h>
  27#include "mtk_ecc.h"
  28
  29/* NAND controller register definition */
  30#define NFI_CNFG		(0x00)
  31#define		CNFG_AHB		BIT(0)
  32#define		CNFG_READ_EN		BIT(1)
  33#define		CNFG_DMA_BURST_EN	BIT(2)
  34#define		CNFG_BYTE_RW		BIT(6)
  35#define		CNFG_HW_ECC_EN		BIT(8)
  36#define		CNFG_AUTO_FMT_EN	BIT(9)
  37#define		CNFG_OP_CUST		(6 << 12)
  38#define NFI_PAGEFMT		(0x04)
  39#define		PAGEFMT_FDM_ECC_SHIFT	(12)
  40#define		PAGEFMT_FDM_SHIFT	(8)
  41#define		PAGEFMT_SPARE_16	(0)
  42#define		PAGEFMT_SPARE_26	(1)
  43#define		PAGEFMT_SPARE_27	(2)
  44#define		PAGEFMT_SPARE_28	(3)
  45#define		PAGEFMT_SPARE_32	(4)
  46#define		PAGEFMT_SPARE_36	(5)
  47#define		PAGEFMT_SPARE_40	(6)
  48#define		PAGEFMT_SPARE_44	(7)
  49#define		PAGEFMT_SPARE_48	(8)
  50#define		PAGEFMT_SPARE_49	(9)
  51#define		PAGEFMT_SPARE_50	(0xa)
  52#define		PAGEFMT_SPARE_51	(0xb)
  53#define		PAGEFMT_SPARE_52	(0xc)
  54#define		PAGEFMT_SPARE_62	(0xd)
  55#define		PAGEFMT_SPARE_63	(0xe)
  56#define		PAGEFMT_SPARE_64	(0xf)
  57#define		PAGEFMT_SPARE_SHIFT	(4)
  58#define		PAGEFMT_SEC_SEL_512	BIT(2)
  59#define		PAGEFMT_512_2K		(0)
  60#define		PAGEFMT_2K_4K		(1)
  61#define		PAGEFMT_4K_8K		(2)
  62#define		PAGEFMT_8K_16K		(3)
  63/* NFI control */
  64#define NFI_CON			(0x08)
  65#define		CON_FIFO_FLUSH		BIT(0)
  66#define		CON_NFI_RST		BIT(1)
  67#define		CON_BRD			BIT(8)  /* burst  read */
  68#define		CON_BWR			BIT(9)	/* burst  write */
  69#define		CON_SEC_SHIFT		(12)
  70/* Timming control register */
  71#define NFI_ACCCON		(0x0C)
  72#define NFI_INTR_EN		(0x10)
  73#define		INTR_AHB_DONE_EN	BIT(6)
  74#define NFI_INTR_STA		(0x14)
  75#define NFI_CMD			(0x20)
  76#define NFI_ADDRNOB		(0x30)
  77#define NFI_COLADDR		(0x34)
  78#define NFI_ROWADDR		(0x38)
  79#define NFI_STRDATA		(0x40)
  80#define		STAR_EN			(1)
  81#define		STAR_DE			(0)
  82#define NFI_CNRNB		(0x44)
  83#define NFI_DATAW		(0x50)
  84#define NFI_DATAR		(0x54)
  85#define NFI_PIO_DIRDY		(0x58)
  86#define		PIO_DI_RDY		(0x01)
  87#define NFI_STA			(0x60)
  88#define		STA_CMD			BIT(0)
  89#define		STA_ADDR		BIT(1)
  90#define		STA_BUSY		BIT(8)
  91#define		STA_EMP_PAGE		BIT(12)
  92#define		NFI_FSM_CUSTDATA	(0xe << 16)
  93#define		NFI_FSM_MASK		(0xf << 16)
  94#define NFI_ADDRCNTR		(0x70)
  95#define		CNTR_MASK		GENMASK(16, 12)
  96#define		ADDRCNTR_SEC_SHIFT	(12)
  97#define		ADDRCNTR_SEC(val) \
  98		(((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
  99#define NFI_STRADDR		(0x80)
 100#define NFI_BYTELEN		(0x84)
 101#define NFI_CSEL		(0x90)
 102#define NFI_FDML(x)		(0xA0 + (x) * sizeof(u32) * 2)
 103#define NFI_FDMM(x)		(0xA4 + (x) * sizeof(u32) * 2)
 104#define NFI_FDM_MAX_SIZE	(8)
 105#define NFI_FDM_MIN_SIZE	(1)
 106#define NFI_MASTER_STA		(0x224)
 107#define		MASTER_STA_MASK		(0x0FFF)
 108#define NFI_EMPTY_THRESH	(0x23C)
 109
 110#define MTK_NAME		"mtk-nand"
 111#define KB(x)			((x) * 1024UL)
 112#define MB(x)			(KB(x) * 1024UL)
 113
 114#define MTK_TIMEOUT		(500000)
 115#define MTK_RESET_TIMEOUT	(1000000)
 116#define MTK_MAX_SECTOR		(16)
 117#define MTK_NAND_MAX_NSELS	(2)
 118
 119struct mtk_nfc_bad_mark_ctl {
 120	void (*bm_swap)(struct mtd_info *, u8 *buf, int raw);
 121	u32 sec;
 122	u32 pos;
 123};
 124
 125/*
 126 * FDM: region used to store free OOB data
 127 */
 128struct mtk_nfc_fdm {
 129	u32 reg_size;
 130	u32 ecc_size;
 131};
 132
 133struct mtk_nfc_nand_chip {
 134	struct list_head node;
 135	struct nand_chip nand;
 136
 137	struct mtk_nfc_bad_mark_ctl bad_mark;
 138	struct mtk_nfc_fdm fdm;
 139	u32 spare_per_sector;
 140
 141	int nsels;
 142	u8 sels[0];
 143	/* nothing after this field */
 144};
 145
 146struct mtk_nfc_clk {
 147	struct clk *nfi_clk;
 148	struct clk *pad_clk;
 149};
 150
 151struct mtk_nfc {
 152	struct nand_hw_control controller;
 153	struct mtk_ecc_config ecc_cfg;
 154	struct mtk_nfc_clk clk;
 155	struct mtk_ecc *ecc;
 156
 157	struct device *dev;
 158	void __iomem *regs;
 159
 160	struct completion done;
 161	struct list_head chips;
 162
 163	u8 *buffer;
 164};
 165
 166static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
 167{
 168	return container_of(nand, struct mtk_nfc_nand_chip, nand);
 169}
 170
 171static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i)
 172{
 173	return (u8 *)p + i * chip->ecc.size;
 174}
 175
 176static inline u8 *oob_ptr(struct nand_chip *chip, int i)
 177{
 178	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
 179	u8 *poi;
 180
 181	/* map the sector's FDM data to free oob:
 182	 * the beginning of the oob area stores the FDM data of bad mark sectors
 183	 */
 184
 185	if (i < mtk_nand->bad_mark.sec)
 186		poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
 187	else if (i == mtk_nand->bad_mark.sec)
 188		poi = chip->oob_poi;
 189	else
 190		poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
 191
 192	return poi;
 193}
 194
 195static inline int mtk_data_len(struct nand_chip *chip)
 196{
 197	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
 198
 199	return chip->ecc.size + mtk_nand->spare_per_sector;
 200}
 201
 202static inline u8 *mtk_data_ptr(struct nand_chip *chip,  int i)
 203{
 204	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 205
 206	return nfc->buffer + i * mtk_data_len(chip);
 207}
 208
 209static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i)
 210{
 211	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 212
 213	return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
 214}
 215
 216static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
 217{
 218	writel(val, nfc->regs + reg);
 219}
 220
 221static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
 222{
 223	writew(val, nfc->regs + reg);
 224}
 225
 226static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
 227{
 228	writeb(val, nfc->regs + reg);
 229}
 230
 231static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
 232{
 233	return readl_relaxed(nfc->regs + reg);
 234}
 235
 236static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
 237{
 238	return readw_relaxed(nfc->regs + reg);
 239}
 240
 241static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
 242{
 243	return readb_relaxed(nfc->regs + reg);
 244}
 245
 246static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
 247{
 248	struct device *dev = nfc->dev;
 249	u32 val;
 250	int ret;
 251
 252	/* reset all registers and force the NFI master to terminate */
 253	nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
 254
 255	/* wait for the master to finish the last transaction */
 256	ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
 257				 !(val & MASTER_STA_MASK), 50,
 258				 MTK_RESET_TIMEOUT);
 259	if (ret)
 260		dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
 261			 NFI_MASTER_STA, val);
 262
 263	/* ensure any status register affected by the NFI master is reset */
 264	nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
 265	nfi_writew(nfc, STAR_DE, NFI_STRDATA);
 266}
 267
 268static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
 269{
 270	struct device *dev = nfc->dev;
 271	u32 val;
 272	int ret;
 273
 274	nfi_writel(nfc, command, NFI_CMD);
 275
 276	ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
 277					!(val & STA_CMD), 10,  MTK_TIMEOUT);
 278	if (ret) {
 279		dev_warn(dev, "nfi core timed out entering command mode\n");
 280		return -EIO;
 281	}
 282
 283	return 0;
 284}
 285
 286static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
 287{
 288	struct device *dev = nfc->dev;
 289	u32 val;
 290	int ret;
 291
 292	nfi_writel(nfc, addr, NFI_COLADDR);
 293	nfi_writel(nfc, 0, NFI_ROWADDR);
 294	nfi_writew(nfc, 1, NFI_ADDRNOB);
 295
 296	ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
 297					!(val & STA_ADDR), 10, MTK_TIMEOUT);
 298	if (ret) {
 299		dev_warn(dev, "nfi core timed out entering address mode\n");
 300		return -EIO;
 301	}
 302
 303	return 0;
 304}
 305
 306static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
 307{
 308	struct nand_chip *chip = mtd_to_nand(mtd);
 309	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
 310	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 311	u32 fmt, spare;
 312
 313	if (!mtd->writesize)
 314		return 0;
 315
 316	spare = mtk_nand->spare_per_sector;
 317
 318	switch (mtd->writesize) {
 319	case 512:
 320		fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
 321		break;
 322	case KB(2):
 323		if (chip->ecc.size == 512)
 324			fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
 325		else
 326			fmt = PAGEFMT_512_2K;
 327		break;
 328	case KB(4):
 329		if (chip->ecc.size == 512)
 330			fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
 331		else
 332			fmt = PAGEFMT_2K_4K;
 333		break;
 334	case KB(8):
 335		if (chip->ecc.size == 512)
 336			fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
 337		else
 338			fmt = PAGEFMT_4K_8K;
 339		break;
 340	case KB(16):
 341		fmt = PAGEFMT_8K_16K;
 342		break;
 343	default:
 344		dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
 345		return -EINVAL;
 346	}
 347
 348	/*
 349	 * the hardware will double the value for this eccsize, so we need to
 350	 * halve it
 351	 */
 352	if (chip->ecc.size == 1024)
 353		spare >>= 1;
 354
 355	switch (spare) {
 356	case 16:
 357		fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT);
 358		break;
 359	case 26:
 360		fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT);
 361		break;
 362	case 27:
 363		fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT);
 364		break;
 365	case 28:
 366		fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT);
 367		break;
 368	case 32:
 369		fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT);
 370		break;
 371	case 36:
 372		fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT);
 373		break;
 374	case 40:
 375		fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT);
 376		break;
 377	case 44:
 378		fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT);
 379		break;
 380	case 48:
 381		fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT);
 382		break;
 383	case 49:
 384		fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT);
 385		break;
 386	case 50:
 387		fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT);
 388		break;
 389	case 51:
 390		fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT);
 391		break;
 392	case 52:
 393		fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT);
 394		break;
 395	case 62:
 396		fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT);
 397		break;
 398	case 63:
 399		fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT);
 400		break;
 401	case 64:
 402		fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT);
 403		break;
 404	default:
 405		dev_err(nfc->dev, "invalid spare per sector %d\n", spare);
 406		return -EINVAL;
 407	}
 408
 409	fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
 410	fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
 411	nfi_writew(nfc, fmt, NFI_PAGEFMT);
 412
 413	nfc->ecc_cfg.strength = chip->ecc.strength;
 414	nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size;
 415
 416	return 0;
 417}
 418
 419static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip)
 420{
 421	struct nand_chip *nand = mtd_to_nand(mtd);
 422	struct mtk_nfc *nfc = nand_get_controller_data(nand);
 423	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
 424
 425	if (chip < 0)
 426		return;
 427
 428	mtk_nfc_hw_runtime_config(mtd);
 429
 430	nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL);
 431}
 432
 433static int mtk_nfc_dev_ready(struct mtd_info *mtd)
 434{
 435	struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
 436
 437	if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
 438		return 0;
 439
 440	return 1;
 441}
 442
 443static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
 444{
 445	struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
 446
 447	if (ctrl & NAND_ALE) {
 448		mtk_nfc_send_address(nfc, dat);
 449	} else if (ctrl & NAND_CLE) {
 450		mtk_nfc_hw_reset(nfc);
 451
 452		nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
 453		mtk_nfc_send_command(nfc, dat);
 454	}
 455}
 456
 457static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
 458{
 459	int rc;
 460	u8 val;
 461
 462	rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
 463				       val & PIO_DI_RDY, 10, MTK_TIMEOUT);
 464	if (rc < 0)
 465		dev_err(nfc->dev, "data not ready\n");
 466}
 467
 468static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
 469{
 470	struct nand_chip *chip = mtd_to_nand(mtd);
 471	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 472	u32 reg;
 473
 474	/* after each byte read, the NFI_STA reg is reset by the hardware */
 475	reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
 476	if (reg != NFI_FSM_CUSTDATA) {
 477		reg = nfi_readw(nfc, NFI_CNFG);
 478		reg |= CNFG_BYTE_RW | CNFG_READ_EN;
 479		nfi_writew(nfc, reg, NFI_CNFG);
 480
 481		/*
 482		 * set to max sector to allow the HW to continue reading over
 483		 * unaligned accesses
 484		 */
 485		reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
 486		nfi_writel(nfc, reg, NFI_CON);
 487
 488		/* trigger to fetch data */
 489		nfi_writew(nfc, STAR_EN, NFI_STRDATA);
 490	}
 491
 492	mtk_nfc_wait_ioready(nfc);
 493
 494	return nfi_readb(nfc, NFI_DATAR);
 495}
 496
 497static void mtk_nfc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
 498{
 499	int i;
 500
 501	for (i = 0; i < len; i++)
 502		buf[i] = mtk_nfc_read_byte(mtd);
 503}
 504
 505static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
 506{
 507	struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
 508	u32 reg;
 509
 510	reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
 511
 512	if (reg != NFI_FSM_CUSTDATA) {
 513		reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
 514		nfi_writew(nfc, reg, NFI_CNFG);
 515
 516		reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
 517		nfi_writel(nfc, reg, NFI_CON);
 518
 519		nfi_writew(nfc, STAR_EN, NFI_STRDATA);
 520	}
 521
 522	mtk_nfc_wait_ioready(nfc);
 523	nfi_writeb(nfc, byte, NFI_DATAW);
 524}
 525
 526static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
 527{
 528	int i;
 529
 530	for (i = 0; i < len; i++)
 531		mtk_nfc_write_byte(mtd, buf[i]);
 532}
 533
 534static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
 535{
 536	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 537	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
 538	int size = chip->ecc.size + mtk_nand->fdm.reg_size;
 539
 540	nfc->ecc_cfg.mode = ECC_DMA_MODE;
 541	nfc->ecc_cfg.op = ECC_ENCODE;
 542
 543	return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size);
 544}
 545
 546static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c)
 547{
 548	/* nop */
 549}
 550
 551static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw)
 552{
 553	struct nand_chip *chip = mtd_to_nand(mtd);
 554	struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
 555	u32 bad_pos = nand->bad_mark.pos;
 556
 557	if (raw)
 558		bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
 559	else
 560		bad_pos += nand->bad_mark.sec * chip->ecc.size;
 561
 562	swap(chip->oob_poi[0], buf[bad_pos]);
 563}
 564
 565static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset,
 566				  u32 len, const u8 *buf)
 567{
 568	struct nand_chip *chip = mtd_to_nand(mtd);
 569	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
 570	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 571	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
 572	u32 start, end;
 573	int i, ret;
 574
 575	start = offset / chip->ecc.size;
 576	end = DIV_ROUND_UP(offset + len, chip->ecc.size);
 577
 578	memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
 579	for (i = 0; i < chip->ecc.steps; i++) {
 580		memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
 581		       chip->ecc.size);
 582
 583		if (start > i || i >= end)
 584			continue;
 585
 586		if (i == mtk_nand->bad_mark.sec)
 587			mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
 588
 589		memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
 590
 591		/* program the CRC back to the OOB */
 592		ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
 593		if (ret < 0)
 594			return ret;
 595	}
 596
 597	return 0;
 598}
 599
 600static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf)
 601{
 602	struct nand_chip *chip = mtd_to_nand(mtd);
 603	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
 604	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 605	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
 606	u32 i;
 607
 608	memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
 609	for (i = 0; i < chip->ecc.steps; i++) {
 610		if (buf)
 611			memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
 612			       chip->ecc.size);
 613
 614		if (i == mtk_nand->bad_mark.sec)
 615			mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
 616
 617		memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
 618	}
 619}
 620
 621static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
 622				    u32 sectors)
 623{
 624	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 625	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
 626	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
 627	u32 vall, valm;
 628	u8 *oobptr;
 629	int i, j;
 630
 631	for (i = 0; i < sectors; i++) {
 632		oobptr = oob_ptr(chip, start + i);
 633		vall = nfi_readl(nfc, NFI_FDML(i));
 634		valm = nfi_readl(nfc, NFI_FDMM(i));
 635
 636		for (j = 0; j < fdm->reg_size; j++)
 637			oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
 638	}
 639}
 640
 641static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
 642{
 643	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 644	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
 645	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
 646	u32 vall, valm;
 647	u8 *oobptr;
 648	int i, j;
 649
 650	for (i = 0; i < chip->ecc.steps; i++) {
 651		oobptr = oob_ptr(chip, i);
 652		vall = 0;
 653		valm = 0;
 654		for (j = 0; j < 8; j++) {
 655			if (j < 4)
 656				vall |= (j < fdm->reg_size ? oobptr[j] : 0xff)
 657						<< (j * 8);
 658			else
 659				valm |= (j < fdm->reg_size ? oobptr[j] : 0xff)
 660						<< ((j - 4) * 8);
 661		}
 662		nfi_writel(nfc, vall, NFI_FDML(i));
 663		nfi_writel(nfc, valm, NFI_FDMM(i));
 664	}
 665}
 666
 667static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 668				 const u8 *buf, int page, int len)
 669{
 670	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 671	struct device *dev = nfc->dev;
 672	dma_addr_t addr;
 673	u32 reg;
 674	int ret;
 675
 676	addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE);
 677	ret = dma_mapping_error(nfc->dev, addr);
 678	if (ret) {
 679		dev_err(nfc->dev, "dma mapping error\n");
 680		return -EINVAL;
 681	}
 682
 683	reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
 684	nfi_writew(nfc, reg, NFI_CNFG);
 685
 686	nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
 687	nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
 688	nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
 689
 690	init_completion(&nfc->done);
 691
 692	reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
 693	nfi_writel(nfc, reg, NFI_CON);
 694	nfi_writew(nfc, STAR_EN, NFI_STRDATA);
 695
 696	ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
 697	if (!ret) {
 698		dev_err(dev, "program ahb done timeout\n");
 699		nfi_writew(nfc, 0, NFI_INTR_EN);
 700		ret = -ETIMEDOUT;
 701		goto timeout;
 702	}
 703
 704	ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
 705					ADDRCNTR_SEC(reg) >= chip->ecc.steps,
 706					10, MTK_TIMEOUT);
 707	if (ret)
 708		dev_err(dev, "hwecc write timeout\n");
 709
 710timeout:
 711
 712	dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
 713	nfi_writel(nfc, 0, NFI_CON);
 714
 715	return ret;
 716}
 717
 718static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 719			      const u8 *buf, int page, int raw)
 720{
 721	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 722	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
 723	size_t len;
 724	const u8 *bufpoi;
 725	u32 reg;
 726	int ret;
 727
 728	if (!raw) {
 729		/* OOB => FDM: from register,  ECC: from HW */
 730		reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
 731		nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
 732
 733		nfc->ecc_cfg.op = ECC_ENCODE;
 734		nfc->ecc_cfg.mode = ECC_NFI_MODE;
 735		ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
 736		if (ret) {
 737			/* clear NFI config */
 738			reg = nfi_readw(nfc, NFI_CNFG);
 739			reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
 740			nfi_writew(nfc, reg, NFI_CNFG);
 741
 742			return ret;
 743		}
 744
 745		memcpy(nfc->buffer, buf, mtd->writesize);
 746		mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
 747		bufpoi = nfc->buffer;
 748
 749		/* write OOB into the FDM registers (OOB area in MTK NAND) */
 750		mtk_nfc_write_fdm(chip);
 751	} else {
 752		bufpoi = buf;
 753	}
 754
 755	len = mtd->writesize + (raw ? mtd->oobsize : 0);
 756	ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
 757
 758	if (!raw)
 759		mtk_ecc_disable(nfc->ecc);
 760
 761	return ret;
 762}
 763
 764static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
 765				    struct nand_chip *chip, const u8 *buf,
 766				    int oob_on, int page)
 767{
 768	return mtk_nfc_write_page(mtd, chip, buf, page, 0);
 769}
 770
 771static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
 772				  const u8 *buf, int oob_on, int pg)
 773{
 774	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 775
 776	mtk_nfc_format_page(mtd, buf);
 777	return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
 778}
 779
 780static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
 781				       struct nand_chip *chip, u32 offset,
 782				       u32 data_len, const u8 *buf,
 783				       int oob_on, int page)
 784{
 785	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 786	int ret;
 787
 788	ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
 789	if (ret < 0)
 790		return ret;
 791
 792	/* use the data in the private buffer (now with FDM and CRC) */
 793	return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
 794}
 795
 796static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
 797				 int page)
 798{
 799	int ret;
 800
 801	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
 802
 803	ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
 804	if (ret < 0)
 805		return -EIO;
 806
 807	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
 808	ret = chip->waitfunc(mtd, chip);
 809
 810	return ret & NAND_STATUS_FAIL ? -EIO : 0;
 811}
 812
 813static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
 814{
 815	struct nand_chip *chip = mtd_to_nand(mtd);
 816	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 817	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
 818	struct mtk_ecc_stats stats;
 819	int rc, i;
 820
 821	rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
 822	if (rc) {
 823		memset(buf, 0xff, sectors * chip->ecc.size);
 824		for (i = 0; i < sectors; i++)
 825			memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
 826		return 0;
 827	}
 828
 829	mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
 830	mtd->ecc_stats.corrected += stats.corrected;
 831	mtd->ecc_stats.failed += stats.failed;
 832
 833	return stats.bitflips;
 834}
 835
 836static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
 837				u32 data_offs, u32 readlen,
 838				u8 *bufpoi, int page, int raw)
 839{
 840	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 841	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
 842	u32 spare = mtk_nand->spare_per_sector;
 843	u32 column, sectors, start, end, reg;
 844	dma_addr_t addr;
 845	int bitflips;
 846	size_t len;
 847	u8 *buf;
 848	int rc;
 849
 850	start = data_offs / chip->ecc.size;
 851	end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
 852
 853	sectors = end - start;
 854	column = start * (chip->ecc.size + spare);
 855
 856	len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
 857	buf = bufpoi + start * chip->ecc.size;
 858
 859	if (column != 0)
 860		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
 861
 862	addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
 863	rc = dma_mapping_error(nfc->dev, addr);
 864	if (rc) {
 865		dev_err(nfc->dev, "dma mapping error\n");
 866
 867		return -EINVAL;
 868	}
 869
 870	reg = nfi_readw(nfc, NFI_CNFG);
 871	reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
 872	if (!raw) {
 873		reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
 874		nfi_writew(nfc, reg, NFI_CNFG);
 875
 876		nfc->ecc_cfg.mode = ECC_NFI_MODE;
 877		nfc->ecc_cfg.sectors = sectors;
 878		nfc->ecc_cfg.op = ECC_DECODE;
 879		rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
 880		if (rc) {
 881			dev_err(nfc->dev, "ecc enable\n");
 882			/* clear NFI_CNFG */
 883			reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
 884				CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
 885			nfi_writew(nfc, reg, NFI_CNFG);
 886			dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
 887
 888			return rc;
 889		}
 890	} else {
 891		nfi_writew(nfc, reg, NFI_CNFG);
 892	}
 893
 894	nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
 895	nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
 896	nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
 897
 898	init_completion(&nfc->done);
 899	reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
 900	nfi_writel(nfc, reg, NFI_CON);
 901	nfi_writew(nfc, STAR_EN, NFI_STRDATA);
 902
 903	rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
 904	if (!rc)
 905		dev_warn(nfc->dev, "read ahb/dma done timeout\n");
 906
 907	rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
 908				       ADDRCNTR_SEC(reg) >= sectors, 10,
 909				       MTK_TIMEOUT);
 910	if (rc < 0) {
 911		dev_err(nfc->dev, "subpage done timeout\n");
 912		bitflips = -EIO;
 913	} else {
 914		bitflips = 0;
 915		if (!raw) {
 916			rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
 917			bitflips = rc < 0 ? -ETIMEDOUT :
 918				mtk_nfc_update_ecc_stats(mtd, buf, sectors);
 919			mtk_nfc_read_fdm(chip, start, sectors);
 920		}
 921	}
 922
 923	dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
 924
 925	if (raw)
 926		goto done;
 927
 928	mtk_ecc_disable(nfc->ecc);
 929
 930	if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
 931		mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
 932done:
 933	nfi_writel(nfc, 0, NFI_CON);
 934
 935	return bitflips;
 936}
 937
 938static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd,
 939				      struct nand_chip *chip, u32 off,
 940				      u32 len, u8 *p, int pg)
 941{
 942	return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0);
 943}
 944
 945static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd,
 946				   struct nand_chip *chip, u8 *p,
 947				   int oob_on, int pg)
 948{
 949	return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
 950}
 951
 952static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
 953				 u8 *buf, int oob_on, int page)
 954{
 955	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
 956	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 957	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
 958	int i, ret;
 959
 960	memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
 961	ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
 962				   page, 1);
 963	if (ret < 0)
 964		return ret;
 965
 966	for (i = 0; i < chip->ecc.steps; i++) {
 967		memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
 968
 969		if (i == mtk_nand->bad_mark.sec)
 970			mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
 971
 972		if (buf)
 973			memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
 974			       chip->ecc.size);
 975	}
 976
 977	return ret;
 978}
 979
 980static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
 981				int page)
 982{
 983	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
 984
 985	return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
 986}
 987
 988static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
 989{
 990	/*
 991	 * ACCON: access timing control register
 992	 * -------------------------------------
 993	 * 31:28: minimum required time for CS post pulling down after accessing
 994	 *	the device
 995	 * 27:22: minimum required time for CS pre pulling down before accessing
 996	 *	the device
 997	 * 21:16: minimum required time from NCEB low to NREB low
 998	 * 15:12: minimum required time from NWEB high to NREB low.
 999	 * 11:08: write enable hold time
1000	 * 07:04: write wait states
1001	 * 03:00: read wait states
1002	 */
1003	nfi_writel(nfc, 0x10804211, NFI_ACCCON);
1004
1005	/*
1006	 * CNRNB: nand ready/busy register
1007	 * -------------------------------
1008	 * 7:4: timeout register for polling the NAND busy/ready signal
1009	 * 0  : poll the status of the busy/ready signal after [7:4]*16 cycles.
1010	 */
1011	nfi_writew(nfc, 0xf1, NFI_CNRNB);
1012	nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
1013
1014	mtk_nfc_hw_reset(nfc);
1015
1016	nfi_readl(nfc, NFI_INTR_STA);
1017	nfi_writel(nfc, 0, NFI_INTR_EN);
1018}
1019
1020static irqreturn_t mtk_nfc_irq(int irq, void *id)
1021{
1022	struct mtk_nfc *nfc = id;
1023	u16 sta, ien;
1024
1025	sta = nfi_readw(nfc, NFI_INTR_STA);
1026	ien = nfi_readw(nfc, NFI_INTR_EN);
1027
1028	if (!(sta & ien))
1029		return IRQ_NONE;
1030
1031	nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
1032	complete(&nfc->done);
1033
1034	return IRQ_HANDLED;
1035}
1036
1037static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
1038{
1039	int ret;
1040
1041	ret = clk_prepare_enable(clk->nfi_clk);
1042	if (ret) {
1043		dev_err(dev, "failed to enable nfi clk\n");
1044		return ret;
1045	}
1046
1047	ret = clk_prepare_enable(clk->pad_clk);
1048	if (ret) {
1049		dev_err(dev, "failed to enable pad clk\n");
1050		clk_disable_unprepare(clk->nfi_clk);
1051		return ret;
1052	}
1053
1054	return 0;
1055}
1056
1057static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
1058{
1059	clk_disable_unprepare(clk->nfi_clk);
1060	clk_disable_unprepare(clk->pad_clk);
1061}
1062
1063static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
1064				  struct mtd_oob_region *oob_region)
1065{
1066	struct nand_chip *chip = mtd_to_nand(mtd);
1067	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1068	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
1069	u32 eccsteps;
1070
1071	eccsteps = mtd->writesize / chip->ecc.size;
1072
1073	if (section >= eccsteps)
1074		return -ERANGE;
1075
1076	oob_region->length = fdm->reg_size - fdm->ecc_size;
1077	oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
1078
1079	return 0;
1080}
1081
1082static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
1083				 struct mtd_oob_region *oob_region)
1084{
1085	struct nand_chip *chip = mtd_to_nand(mtd);
1086	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1087	u32 eccsteps;
1088
1089	if (section)
1090		return -ERANGE;
1091
1092	eccsteps = mtd->writesize / chip->ecc.size;
1093	oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
1094	oob_region->length = mtd->oobsize - oob_region->offset;
1095
1096	return 0;
1097}
1098
1099static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
1100	.free = mtk_nfc_ooblayout_free,
1101	.ecc = mtk_nfc_ooblayout_ecc,
1102};
1103
1104static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
1105{
1106	struct nand_chip *nand = mtd_to_nand(mtd);
1107	struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
1108	u32 ecc_bytes;
1109
1110	ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
1111
1112	fdm->reg_size = chip->spare_per_sector - ecc_bytes;
1113	if (fdm->reg_size > NFI_FDM_MAX_SIZE)
1114		fdm->reg_size = NFI_FDM_MAX_SIZE;
1115
1116	/* bad block mark storage */
1117	fdm->ecc_size = 1;
1118}
1119
1120static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
1121				     struct mtd_info *mtd)
1122{
1123	struct nand_chip *nand = mtd_to_nand(mtd);
1124
1125	if (mtd->writesize == 512) {
1126		bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
1127	} else {
1128		bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
1129		bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
1130		bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
1131	}
1132}
1133
1134static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
1135{
1136	struct nand_chip *nand = mtd_to_nand(mtd);
1137	u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44,
1138			48, 49, 50, 51, 52, 62, 63, 64};
1139	u32 eccsteps, i;
1140
1141	eccsteps = mtd->writesize / nand->ecc.size;
1142	*sps = mtd->oobsize / eccsteps;
1143
1144	if (nand->ecc.size == 1024)
1145		*sps >>= 1;
1146
1147	for (i = 0; i < ARRAY_SIZE(spare); i++) {
1148		if (*sps <= spare[i]) {
1149			if (!i)
1150				*sps = spare[i];
1151			else if (*sps != spare[i])
1152				*sps = spare[i - 1];
1153			break;
1154		}
1155	}
1156
1157	if (i >= ARRAY_SIZE(spare))
1158		*sps = spare[ARRAY_SIZE(spare) - 1];
1159
1160	if (nand->ecc.size == 1024)
1161		*sps <<= 1;
1162}
1163
1164static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
1165{
1166	struct nand_chip *nand = mtd_to_nand(mtd);
1167	u32 spare;
1168	int free;
1169
1170	/* support only ecc hw mode */
1171	if (nand->ecc.mode != NAND_ECC_HW) {
1172		dev_err(dev, "ecc.mode not supported\n");
1173		return -EINVAL;
1174	}
1175
1176	/* if optional dt settings not present */
1177	if (!nand->ecc.size || !nand->ecc.strength) {
1178		/* use datasheet requirements */
1179		nand->ecc.strength = nand->ecc_strength_ds;
1180		nand->ecc.size = nand->ecc_step_ds;
1181
1182		/*
1183		 * align eccstrength and eccsize
1184		 * this controller only supports 512 and 1024 sizes
1185		 */
1186		if (nand->ecc.size < 1024) {
1187			if (mtd->writesize > 512) {
1188				nand->ecc.size = 1024;
1189				nand->ecc.strength <<= 1;
1190			} else {
1191				nand->ecc.size = 512;
1192			}
1193		} else {
1194			nand->ecc.size = 1024;
1195		}
1196
1197		mtk_nfc_set_spare_per_sector(&spare, mtd);
1198
1199		/* calculate oob bytes except ecc parity data */
1200		free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3;
1201		free = spare - free;
1202
1203		/*
1204		 * enhance ecc strength if oob left is bigger than max FDM size
1205		 * or reduce ecc strength if oob size is not enough for ecc
1206		 * parity data.
1207		 */
1208		if (free > NFI_FDM_MAX_SIZE) {
1209			spare -= NFI_FDM_MAX_SIZE;
1210			nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
1211		} else if (free < 0) {
1212			spare -= NFI_FDM_MIN_SIZE;
1213			nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
1214		}
1215	}
1216
1217	mtk_ecc_adjust_strength(&nand->ecc.strength);
1218
1219	dev_info(dev, "eccsize %d eccstrength %d\n",
1220		 nand->ecc.size, nand->ecc.strength);
1221
1222	return 0;
1223}
1224
1225static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
1226				  struct device_node *np)
1227{
1228	struct mtk_nfc_nand_chip *chip;
1229	struct nand_chip *nand;
1230	struct mtd_info *mtd;
1231	int nsels, len;
1232	u32 tmp;
1233	int ret;
1234	int i;
1235
1236	if (!of_get_property(np, "reg", &nsels))
1237		return -ENODEV;
1238
1239	nsels /= sizeof(u32);
1240	if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
1241		dev_err(dev, "invalid reg property size %d\n", nsels);
1242		return -EINVAL;
1243	}
1244
1245	chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
1246			    GFP_KERNEL);
1247	if (!chip)
1248		return -ENOMEM;
1249
1250	chip->nsels = nsels;
1251	for (i = 0; i < nsels; i++) {
1252		ret = of_property_read_u32_index(np, "reg", i, &tmp);
1253		if (ret) {
1254			dev_err(dev, "reg property failure : %d\n", ret);
1255			return ret;
1256		}
1257		chip->sels[i] = tmp;
1258	}
1259
1260	nand = &chip->nand;
1261	nand->controller = &nfc->controller;
1262
1263	nand_set_flash_node(nand, np);
1264	nand_set_controller_data(nand, nfc);
1265
1266	nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
1267	nand->dev_ready = mtk_nfc_dev_ready;
1268	nand->select_chip = mtk_nfc_select_chip;
1269	nand->write_byte = mtk_nfc_write_byte;
1270	nand->write_buf = mtk_nfc_write_buf;
1271	nand->read_byte = mtk_nfc_read_byte;
1272	nand->read_buf = mtk_nfc_read_buf;
1273	nand->cmd_ctrl = mtk_nfc_cmd_ctrl;
1274
1275	/* set default mode in case dt entry is missing */
1276	nand->ecc.mode = NAND_ECC_HW;
1277
1278	nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
1279	nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
1280	nand->ecc.write_page = mtk_nfc_write_page_hwecc;
1281	nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
1282	nand->ecc.write_oob = mtk_nfc_write_oob_std;
1283
1284	nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
1285	nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
1286	nand->ecc.read_page = mtk_nfc_read_page_hwecc;
1287	nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
1288	nand->ecc.read_oob = mtk_nfc_read_oob_std;
1289
1290	mtd = nand_to_mtd(nand);
1291	mtd->owner = THIS_MODULE;
1292	mtd->dev.parent = dev;
1293	mtd->name = MTK_NAME;
1294	mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
1295
1296	mtk_nfc_hw_init(nfc);
1297
1298	ret = nand_scan_ident(mtd, nsels, NULL);
1299	if (ret)
1300		return ret;
1301
1302	/* store bbt magic in page, cause OOB is not protected */
1303	if (nand->bbt_options & NAND_BBT_USE_FLASH)
1304		nand->bbt_options |= NAND_BBT_NO_OOB;
1305
1306	ret = mtk_nfc_ecc_init(dev, mtd);
1307	if (ret)
1308		return -EINVAL;
1309
1310	if (nand->options & NAND_BUSWIDTH_16) {
1311		dev_err(dev, "16bits buswidth not supported");
1312		return -EINVAL;
1313	}
1314
1315	mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
1316	mtk_nfc_set_fdm(&chip->fdm, mtd);
1317	mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd);
1318
1319	len = mtd->writesize + mtd->oobsize;
1320	nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
1321	if (!nfc->buffer)
1322		return  -ENOMEM;
1323
1324	ret = nand_scan_tail(mtd);
1325	if (ret)
1326		return ret;
1327
1328	ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
1329	if (ret) {
1330		dev_err(dev, "mtd parse partition error\n");
1331		nand_release(mtd);
1332		return ret;
1333	}
1334
1335	list_add_tail(&chip->node, &nfc->chips);
1336
1337	return 0;
1338}
1339
1340static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
1341{
1342	struct device_node *np = dev->of_node;
1343	struct device_node *nand_np;
1344	int ret;
1345
1346	for_each_child_of_node(np, nand_np) {
1347		ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
1348		if (ret) {
1349			of_node_put(nand_np);
1350			return ret;
1351		}
1352	}
1353
1354	return 0;
1355}
1356
1357static int mtk_nfc_probe(struct platform_device *pdev)
1358{
1359	struct device *dev = &pdev->dev;
1360	struct device_node *np = dev->of_node;
1361	struct mtk_nfc *nfc;
1362	struct resource *res;
1363	int ret, irq;
1364
1365	nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
1366	if (!nfc)
1367		return -ENOMEM;
1368
1369	spin_lock_init(&nfc->controller.lock);
1370	init_waitqueue_head(&nfc->controller.wq);
1371	INIT_LIST_HEAD(&nfc->chips);
1372
1373	/* probe defer if not ready */
1374	nfc->ecc = of_mtk_ecc_get(np);
1375	if (IS_ERR(nfc->ecc))
1376		return PTR_ERR(nfc->ecc);
1377	else if (!nfc->ecc)
1378		return -ENODEV;
1379
1380	nfc->dev = dev;
1381
1382	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1383	nfc->regs = devm_ioremap_resource(dev, res);
1384	if (IS_ERR(nfc->regs)) {
1385		ret = PTR_ERR(nfc->regs);
1386		dev_err(dev, "no nfi base\n");
1387		goto release_ecc;
1388	}
1389
1390	nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
1391	if (IS_ERR(nfc->clk.nfi_clk)) {
1392		dev_err(dev, "no clk\n");
1393		ret = PTR_ERR(nfc->clk.nfi_clk);
1394		goto release_ecc;
1395	}
1396
1397	nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
1398	if (IS_ERR(nfc->clk.pad_clk)) {
1399		dev_err(dev, "no pad clk\n");
1400		ret = PTR_ERR(nfc->clk.pad_clk);
1401		goto release_ecc;
1402	}
1403
1404	ret = mtk_nfc_enable_clk(dev, &nfc->clk);
1405	if (ret)
1406		goto release_ecc;
1407
1408	irq = platform_get_irq(pdev, 0);
1409	if (irq < 0) {
1410		dev_err(dev, "no nfi irq resource\n");
1411		ret = -EINVAL;
1412		goto clk_disable;
1413	}
1414
1415	ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
1416	if (ret) {
1417		dev_err(dev, "failed to request nfi irq\n");
1418		goto clk_disable;
1419	}
1420
1421	ret = dma_set_mask(dev, DMA_BIT_MASK(32));
1422	if (ret) {
1423		dev_err(dev, "failed to set dma mask\n");
1424		goto clk_disable;
1425	}
1426
1427	platform_set_drvdata(pdev, nfc);
1428
1429	ret = mtk_nfc_nand_chips_init(dev, nfc);
1430	if (ret) {
1431		dev_err(dev, "failed to init nand chips\n");
1432		goto clk_disable;
1433	}
1434
1435	return 0;
1436
1437clk_disable:
1438	mtk_nfc_disable_clk(&nfc->clk);
1439
1440release_ecc:
1441	mtk_ecc_release(nfc->ecc);
1442
1443	return ret;
1444}
1445
1446static int mtk_nfc_remove(struct platform_device *pdev)
1447{
1448	struct mtk_nfc *nfc = platform_get_drvdata(pdev);
1449	struct mtk_nfc_nand_chip *chip;
1450
1451	while (!list_empty(&nfc->chips)) {
1452		chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
1453					node);
1454		nand_release(nand_to_mtd(&chip->nand));
1455		list_del(&chip->node);
1456	}
1457
1458	mtk_ecc_release(nfc->ecc);
1459	mtk_nfc_disable_clk(&nfc->clk);
1460
1461	return 0;
1462}
1463
1464#ifdef CONFIG_PM_SLEEP
1465static int mtk_nfc_suspend(struct device *dev)
1466{
1467	struct mtk_nfc *nfc = dev_get_drvdata(dev);
1468
1469	mtk_nfc_disable_clk(&nfc->clk);
1470
1471	return 0;
1472}
1473
1474static int mtk_nfc_resume(struct device *dev)
1475{
1476	struct mtk_nfc *nfc = dev_get_drvdata(dev);
1477	struct mtk_nfc_nand_chip *chip;
1478	struct nand_chip *nand;
1479	struct mtd_info *mtd;
1480	int ret;
1481	u32 i;
1482
1483	udelay(200);
1484
1485	ret = mtk_nfc_enable_clk(dev, &nfc->clk);
1486	if (ret)
1487		return ret;
1488
1489	mtk_nfc_hw_init(nfc);
1490
1491	/* reset NAND chip if VCC was powered off */
1492	list_for_each_entry(chip, &nfc->chips, node) {
1493		nand = &chip->nand;
1494		mtd = nand_to_mtd(nand);
1495		for (i = 0; i < chip->nsels; i++) {
1496			nand->select_chip(mtd, i);
1497			nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1498		}
1499	}
1500
1501	return 0;
1502}
1503
1504static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
1505#endif
1506
1507static const struct of_device_id mtk_nfc_id_table[] = {
1508	{ .compatible = "mediatek,mt2701-nfc" },
1509	{}
1510};
1511MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
1512
1513static struct platform_driver mtk_nfc_driver = {
1514	.probe  = mtk_nfc_probe,
1515	.remove = mtk_nfc_remove,
1516	.driver = {
1517		.name  = MTK_NAME,
1518		.of_match_table = mtk_nfc_id_table,
1519#ifdef CONFIG_PM_SLEEP
1520		.pm = &mtk_nfc_pm_ops,
1521#endif
1522	},
1523};
1524
1525module_platform_driver(mtk_nfc_driver);
1526
1527MODULE_LICENSE("GPL");
1528MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
1529MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");