Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*
   2 *  Overview:
   3 *   This is the generic MTD driver for NAND flash devices. It should be
   4 *   capable of working with almost all NAND chips currently available.
   5 *
   6 *	Additional technical information is available on
   7 *	http://www.linux-mtd.infradead.org/doc/nand.html
   8 *
   9 *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
  10 *		  2002-2006 Thomas Gleixner (tglx@linutronix.de)
  11 *
  12 *  Credits:
  13 *	David Woodhouse for adding multichip support
  14 *
  15 *	Aleph One Ltd. and Toby Churchill Ltd. for supporting the
  16 *	rework for 2K page size chips
  17 *
  18 *  TODO:
  19 *	Enable cached programming for 2k page size chips
  20 *	Check, if mtd->ecctype should be set to MTD_ECC_HW
  21 *	if we have HW ECC support.
  22 *	BBT table is not serialized, has to be fixed
  23 *
  24 * This program is free software; you can redistribute it and/or modify
  25 * it under the terms of the GNU General Public License version 2 as
  26 * published by the Free Software Foundation.
  27 *
  28 */
  29
  30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  31
  32#include <linux/module.h>
  33#include <linux/delay.h>
  34#include <linux/errno.h>
  35#include <linux/err.h>
  36#include <linux/sched.h>
  37#include <linux/slab.h>
  38#include <linux/mm.h>
  39#include <linux/types.h>
  40#include <linux/mtd/mtd.h>
  41#include <linux/mtd/nand.h>
  42#include <linux/mtd/nand_ecc.h>
  43#include <linux/mtd/nand_bch.h>
  44#include <linux/interrupt.h>
  45#include <linux/bitops.h>
  46#include <linux/io.h>
  47#include <linux/mtd/partitions.h>
  48#include <linux/of.h>
  49
  50static int nand_get_device(struct mtd_info *mtd, int new_state);
  51
  52static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
  53			     struct mtd_oob_ops *ops);
  54
  55/* Define default oob placement schemes for large and small page devices */
  56static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
  57				 struct mtd_oob_region *oobregion)
  58{
  59	struct nand_chip *chip = mtd_to_nand(mtd);
  60	struct nand_ecc_ctrl *ecc = &chip->ecc;
  61
  62	if (section > 1)
  63		return -ERANGE;
  64
  65	if (!section) {
  66		oobregion->offset = 0;
  67		oobregion->length = 4;
  68	} else {
  69		oobregion->offset = 6;
  70		oobregion->length = ecc->total - 4;
  71	}
  72
  73	return 0;
  74}
  75
  76static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
  77				  struct mtd_oob_region *oobregion)
  78{
  79	if (section > 1)
  80		return -ERANGE;
  81
  82	if (mtd->oobsize == 16) {
  83		if (section)
  84			return -ERANGE;
  85
  86		oobregion->length = 8;
  87		oobregion->offset = 8;
  88	} else {
  89		oobregion->length = 2;
  90		if (!section)
  91			oobregion->offset = 3;
  92		else
  93			oobregion->offset = 6;
  94	}
  95
  96	return 0;
  97}
  98
  99const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
 100	.ecc = nand_ooblayout_ecc_sp,
 101	.free = nand_ooblayout_free_sp,
 102};
 103EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
 104
 105static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
 106				 struct mtd_oob_region *oobregion)
 107{
 108	struct nand_chip *chip = mtd_to_nand(mtd);
 109	struct nand_ecc_ctrl *ecc = &chip->ecc;
 110
 111	if (section)
 112		return -ERANGE;
 113
 114	oobregion->length = ecc->total;
 115	oobregion->offset = mtd->oobsize - oobregion->length;
 116
 117	return 0;
 118}
 119
 120static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
 121				  struct mtd_oob_region *oobregion)
 122{
 123	struct nand_chip *chip = mtd_to_nand(mtd);
 124	struct nand_ecc_ctrl *ecc = &chip->ecc;
 125
 126	if (section)
 127		return -ERANGE;
 128
 129	oobregion->length = mtd->oobsize - ecc->total - 2;
 130	oobregion->offset = 2;
 131
 132	return 0;
 133}
 134
 135const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
 136	.ecc = nand_ooblayout_ecc_lp,
 137	.free = nand_ooblayout_free_lp,
 138};
 139EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
 140
 141static int check_offs_len(struct mtd_info *mtd,
 142					loff_t ofs, uint64_t len)
 143{
 144	struct nand_chip *chip = mtd_to_nand(mtd);
 145	int ret = 0;
 146
 147	/* Start address must align on block boundary */
 148	if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
 149		pr_debug("%s: unaligned address\n", __func__);
 150		ret = -EINVAL;
 151	}
 152
 153	/* Length must align on block boundary */
 154	if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
 155		pr_debug("%s: length not block aligned\n", __func__);
 156		ret = -EINVAL;
 157	}
 158
 159	return ret;
 160}
 161
 162/**
 163 * nand_release_device - [GENERIC] release chip
 164 * @mtd: MTD device structure
 165 *
 166 * Release chip lock and wake up anyone waiting on the device.
 167 */
 168static void nand_release_device(struct mtd_info *mtd)
 169{
 170	struct nand_chip *chip = mtd_to_nand(mtd);
 171
 172	/* Release the controller and the chip */
 173	spin_lock(&chip->controller->lock);
 174	chip->controller->active = NULL;
 175	chip->state = FL_READY;
 176	wake_up(&chip->controller->wq);
 177	spin_unlock(&chip->controller->lock);
 178}
 179
 180/**
 181 * nand_read_byte - [DEFAULT] read one byte from the chip
 182 * @mtd: MTD device structure
 183 *
 184 * Default read function for 8bit buswidth
 185 */
 186static uint8_t nand_read_byte(struct mtd_info *mtd)
 187{
 188	struct nand_chip *chip = mtd_to_nand(mtd);
 189	return readb(chip->IO_ADDR_R);
 190}
 191
 192/**
 193 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
 194 * @mtd: MTD device structure
 195 *
 196 * Default read function for 16bit buswidth with endianness conversion.
 197 *
 198 */
 199static uint8_t nand_read_byte16(struct mtd_info *mtd)
 200{
 201	struct nand_chip *chip = mtd_to_nand(mtd);
 202	return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
 203}
 204
 205/**
 206 * nand_read_word - [DEFAULT] read one word from the chip
 207 * @mtd: MTD device structure
 208 *
 209 * Default read function for 16bit buswidth without endianness conversion.
 210 */
 211static u16 nand_read_word(struct mtd_info *mtd)
 212{
 213	struct nand_chip *chip = mtd_to_nand(mtd);
 214	return readw(chip->IO_ADDR_R);
 215}
 216
 217/**
 218 * nand_select_chip - [DEFAULT] control CE line
 219 * @mtd: MTD device structure
 220 * @chipnr: chipnumber to select, -1 for deselect
 221 *
 222 * Default select function for 1 chip devices.
 223 */
 224static void nand_select_chip(struct mtd_info *mtd, int chipnr)
 225{
 226	struct nand_chip *chip = mtd_to_nand(mtd);
 227
 228	switch (chipnr) {
 229	case -1:
 230		chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
 231		break;
 232	case 0:
 233		break;
 234
 235	default:
 236		BUG();
 237	}
 238}
 239
 240/**
 241 * nand_write_byte - [DEFAULT] write single byte to chip
 242 * @mtd: MTD device structure
 243 * @byte: value to write
 244 *
 245 * Default function to write a byte to I/O[7:0]
 246 */
 247static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
 248{
 249	struct nand_chip *chip = mtd_to_nand(mtd);
 250
 251	chip->write_buf(mtd, &byte, 1);
 252}
 253
 254/**
 255 * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
 256 * @mtd: MTD device structure
 257 * @byte: value to write
 258 *
 259 * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
 260 */
 261static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
 262{
 263	struct nand_chip *chip = mtd_to_nand(mtd);
 264	uint16_t word = byte;
 265
 266	/*
 267	 * It's not entirely clear what should happen to I/O[15:8] when writing
 268	 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
 269	 *
 270	 *    When the host supports a 16-bit bus width, only data is
 271	 *    transferred at the 16-bit width. All address and command line
 272	 *    transfers shall use only the lower 8-bits of the data bus. During
 273	 *    command transfers, the host may place any value on the upper
 274	 *    8-bits of the data bus. During address transfers, the host shall
 275	 *    set the upper 8-bits of the data bus to 00h.
 276	 *
 277	 * One user of the write_byte callback is nand_onfi_set_features. The
 278	 * four parameters are specified to be written to I/O[7:0], but this is
 279	 * neither an address nor a command transfer. Let's assume a 0 on the
 280	 * upper I/O lines is OK.
 281	 */
 282	chip->write_buf(mtd, (uint8_t *)&word, 2);
 283}
 284
 285/**
 286 * nand_write_buf - [DEFAULT] write buffer to chip
 287 * @mtd: MTD device structure
 288 * @buf: data buffer
 289 * @len: number of bytes to write
 290 *
 291 * Default write function for 8bit buswidth.
 292 */
 293static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
 294{
 295	struct nand_chip *chip = mtd_to_nand(mtd);
 296
 297	iowrite8_rep(chip->IO_ADDR_W, buf, len);
 298}
 299
 300/**
 301 * nand_read_buf - [DEFAULT] read chip data into buffer
 302 * @mtd: MTD device structure
 303 * @buf: buffer to store date
 304 * @len: number of bytes to read
 305 *
 306 * Default read function for 8bit buswidth.
 307 */
 308static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
 309{
 310	struct nand_chip *chip = mtd_to_nand(mtd);
 311
 312	ioread8_rep(chip->IO_ADDR_R, buf, len);
 313}
 314
 315/**
 316 * nand_write_buf16 - [DEFAULT] write buffer to chip
 317 * @mtd: MTD device structure
 318 * @buf: data buffer
 319 * @len: number of bytes to write
 320 *
 321 * Default write function for 16bit buswidth.
 322 */
 323static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
 324{
 325	struct nand_chip *chip = mtd_to_nand(mtd);
 326	u16 *p = (u16 *) buf;
 327
 328	iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
 329}
 330
 331/**
 332 * nand_read_buf16 - [DEFAULT] read chip data into buffer
 333 * @mtd: MTD device structure
 334 * @buf: buffer to store date
 335 * @len: number of bytes to read
 336 *
 337 * Default read function for 16bit buswidth.
 338 */
 339static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
 340{
 341	struct nand_chip *chip = mtd_to_nand(mtd);
 342	u16 *p = (u16 *) buf;
 343
 344	ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
 345}
 346
 347/**
 348 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
 349 * @mtd: MTD device structure
 350 * @ofs: offset from device start
 351 *
 352 * Check, if the block is bad.
 353 */
 354static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
 355{
 356	int page, res = 0, i = 0;
 357	struct nand_chip *chip = mtd_to_nand(mtd);
 358	u16 bad;
 359
 360	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
 361		ofs += mtd->erasesize - mtd->writesize;
 362
 363	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
 364
 365	do {
 366		if (chip->options & NAND_BUSWIDTH_16) {
 367			chip->cmdfunc(mtd, NAND_CMD_READOOB,
 368					chip->badblockpos & 0xFE, page);
 369			bad = cpu_to_le16(chip->read_word(mtd));
 370			if (chip->badblockpos & 0x1)
 371				bad >>= 8;
 372			else
 373				bad &= 0xFF;
 374		} else {
 375			chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
 376					page);
 377			bad = chip->read_byte(mtd);
 378		}
 379
 380		if (likely(chip->badblockbits == 8))
 381			res = bad != 0xFF;
 382		else
 383			res = hweight8(bad) < chip->badblockbits;
 384		ofs += mtd->writesize;
 385		page = (int)(ofs >> chip->page_shift) & chip->pagemask;
 386		i++;
 387	} while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
 388
 389	return res;
 390}
 391
 392/**
 393 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
 394 * @mtd: MTD device structure
 395 * @ofs: offset from device start
 396 *
 397 * This is the default implementation, which can be overridden by a hardware
 398 * specific driver. It provides the details for writing a bad block marker to a
 399 * block.
 400 */
 401static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
 402{
 403	struct nand_chip *chip = mtd_to_nand(mtd);
 404	struct mtd_oob_ops ops;
 405	uint8_t buf[2] = { 0, 0 };
 406	int ret = 0, res, i = 0;
 407
 408	memset(&ops, 0, sizeof(ops));
 409	ops.oobbuf = buf;
 410	ops.ooboffs = chip->badblockpos;
 411	if (chip->options & NAND_BUSWIDTH_16) {
 412		ops.ooboffs &= ~0x01;
 413		ops.len = ops.ooblen = 2;
 414	} else {
 415		ops.len = ops.ooblen = 1;
 416	}
 417	ops.mode = MTD_OPS_PLACE_OOB;
 418
 419	/* Write to first/last page(s) if necessary */
 420	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
 421		ofs += mtd->erasesize - mtd->writesize;
 422	do {
 423		res = nand_do_write_oob(mtd, ofs, &ops);
 424		if (!ret)
 425			ret = res;
 426
 427		i++;
 428		ofs += mtd->writesize;
 429	} while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
 430
 431	return ret;
 432}
 433
 434/**
 435 * nand_block_markbad_lowlevel - mark a block bad
 436 * @mtd: MTD device structure
 437 * @ofs: offset from device start
 438 *
 439 * This function performs the generic NAND bad block marking steps (i.e., bad
 440 * block table(s) and/or marker(s)). We only allow the hardware driver to
 441 * specify how to write bad block markers to OOB (chip->block_markbad).
 442 *
 443 * We try operations in the following order:
 444 *  (1) erase the affected block, to allow OOB marker to be written cleanly
 445 *  (2) write bad block marker to OOB area of affected block (unless flag
 446 *      NAND_BBT_NO_OOB_BBM is present)
 447 *  (3) update the BBT
 448 * Note that we retain the first error encountered in (2) or (3), finish the
 449 * procedures, and dump the error in the end.
 450*/
 451static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
 452{
 453	struct nand_chip *chip = mtd_to_nand(mtd);
 454	int res, ret = 0;
 455
 456	if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
 457		struct erase_info einfo;
 458
 459		/* Attempt erase before marking OOB */
 460		memset(&einfo, 0, sizeof(einfo));
 461		einfo.mtd = mtd;
 462		einfo.addr = ofs;
 463		einfo.len = 1ULL << chip->phys_erase_shift;
 464		nand_erase_nand(mtd, &einfo, 0);
 465
 466		/* Write bad block marker to OOB */
 467		nand_get_device(mtd, FL_WRITING);
 468		ret = chip->block_markbad(mtd, ofs);
 469		nand_release_device(mtd);
 470	}
 471
 472	/* Mark block bad in BBT */
 473	if (chip->bbt) {
 474		res = nand_markbad_bbt(mtd, ofs);
 475		if (!ret)
 476			ret = res;
 477	}
 478
 479	if (!ret)
 480		mtd->ecc_stats.badblocks++;
 481
 482	return ret;
 483}
 484
 485/**
 486 * nand_check_wp - [GENERIC] check if the chip is write protected
 487 * @mtd: MTD device structure
 488 *
 489 * Check, if the device is write protected. The function expects, that the
 490 * device is already selected.
 491 */
 492static int nand_check_wp(struct mtd_info *mtd)
 493{
 494	struct nand_chip *chip = mtd_to_nand(mtd);
 495
 496	/* Broken xD cards report WP despite being writable */
 497	if (chip->options & NAND_BROKEN_XD)
 498		return 0;
 499
 500	/* Check the WP bit */
 501	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
 502	return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
 503}
 504
 505/**
 506 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
 507 * @mtd: MTD device structure
 508 * @ofs: offset from device start
 509 *
 510 * Check if the block is marked as reserved.
 511 */
 512static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
 513{
 514	struct nand_chip *chip = mtd_to_nand(mtd);
 515
 516	if (!chip->bbt)
 517		return 0;
 518	/* Return info from the table */
 519	return nand_isreserved_bbt(mtd, ofs);
 520}
 521
 522/**
 523 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
 524 * @mtd: MTD device structure
 525 * @ofs: offset from device start
 526 * @allowbbt: 1, if its allowed to access the bbt area
 527 *
 528 * Check, if the block is bad. Either by reading the bad block table or
 529 * calling of the scan function.
 530 */
 531static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
 532{
 533	struct nand_chip *chip = mtd_to_nand(mtd);
 534
 535	if (!chip->bbt)
 536		return chip->block_bad(mtd, ofs);
 537
 538	/* Return info from the table */
 539	return nand_isbad_bbt(mtd, ofs, allowbbt);
 540}
 541
 542/**
 543 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
 544 * @mtd: MTD device structure
 545 * @timeo: Timeout
 546 *
 547 * Helper function for nand_wait_ready used when needing to wait in interrupt
 548 * context.
 549 */
 550static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
 551{
 552	struct nand_chip *chip = mtd_to_nand(mtd);
 553	int i;
 554
 555	/* Wait for the device to get ready */
 556	for (i = 0; i < timeo; i++) {
 557		if (chip->dev_ready(mtd))
 558			break;
 559		touch_softlockup_watchdog();
 560		mdelay(1);
 561	}
 562}
 563
 564/**
 565 * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
 566 * @mtd: MTD device structure
 567 *
 568 * Wait for the ready pin after a command, and warn if a timeout occurs.
 569 */
 570void nand_wait_ready(struct mtd_info *mtd)
 571{
 572	struct nand_chip *chip = mtd_to_nand(mtd);
 573	unsigned long timeo = 400;
 574
 575	if (in_interrupt() || oops_in_progress)
 576		return panic_nand_wait_ready(mtd, timeo);
 577
 578	/* Wait until command is processed or timeout occurs */
 579	timeo = jiffies + msecs_to_jiffies(timeo);
 580	do {
 581		if (chip->dev_ready(mtd))
 582			return;
 583		cond_resched();
 584	} while (time_before(jiffies, timeo));
 585
 586	if (!chip->dev_ready(mtd))
 587		pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
 588}
 589EXPORT_SYMBOL_GPL(nand_wait_ready);
 590
 591/**
 592 * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
 593 * @mtd: MTD device structure
 594 * @timeo: Timeout in ms
 595 *
 596 * Wait for status ready (i.e. command done) or timeout.
 597 */
 598static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
 599{
 600	register struct nand_chip *chip = mtd_to_nand(mtd);
 601
 602	timeo = jiffies + msecs_to_jiffies(timeo);
 603	do {
 604		if ((chip->read_byte(mtd) & NAND_STATUS_READY))
 605			break;
 606		touch_softlockup_watchdog();
 607	} while (time_before(jiffies, timeo));
 608};
 609
 610/**
 611 * nand_command - [DEFAULT] Send command to NAND device
 612 * @mtd: MTD device structure
 613 * @command: the command to be sent
 614 * @column: the column address for this command, -1 if none
 615 * @page_addr: the page address for this command, -1 if none
 616 *
 617 * Send command to NAND device. This function is used for small page devices
 618 * (512 Bytes per page).
 619 */
 620static void nand_command(struct mtd_info *mtd, unsigned int command,
 621			 int column, int page_addr)
 622{
 623	register struct nand_chip *chip = mtd_to_nand(mtd);
 624	int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
 625
 626	/* Write out the command to the device */
 627	if (command == NAND_CMD_SEQIN) {
 628		int readcmd;
 629
 630		if (column >= mtd->writesize) {
 631			/* OOB area */
 632			column -= mtd->writesize;
 633			readcmd = NAND_CMD_READOOB;
 634		} else if (column < 256) {
 635			/* First 256 bytes --> READ0 */
 636			readcmd = NAND_CMD_READ0;
 637		} else {
 638			column -= 256;
 639			readcmd = NAND_CMD_READ1;
 640		}
 641		chip->cmd_ctrl(mtd, readcmd, ctrl);
 642		ctrl &= ~NAND_CTRL_CHANGE;
 643	}
 644	chip->cmd_ctrl(mtd, command, ctrl);
 645
 646	/* Address cycle, when necessary */
 647	ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
 648	/* Serially input address */
 649	if (column != -1) {
 650		/* Adjust columns for 16 bit buswidth */
 651		if (chip->options & NAND_BUSWIDTH_16 &&
 652				!nand_opcode_8bits(command))
 653			column >>= 1;
 654		chip->cmd_ctrl(mtd, column, ctrl);
 655		ctrl &= ~NAND_CTRL_CHANGE;
 656	}
 657	if (page_addr != -1) {
 658		chip->cmd_ctrl(mtd, page_addr, ctrl);
 659		ctrl &= ~NAND_CTRL_CHANGE;
 660		chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
 661		/* One more address cycle for devices > 32MiB */
 662		if (chip->chipsize > (32 << 20))
 663			chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
 664	}
 665	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
 666
 667	/*
 668	 * Program and erase have their own busy handlers status and sequential
 669	 * in needs no delay
 670	 */
 671	switch (command) {
 672
 673	case NAND_CMD_PAGEPROG:
 674	case NAND_CMD_ERASE1:
 675	case NAND_CMD_ERASE2:
 676	case NAND_CMD_SEQIN:
 677	case NAND_CMD_STATUS:
 678		return;
 679
 680	case NAND_CMD_RESET:
 681		if (chip->dev_ready)
 682			break;
 683		udelay(chip->chip_delay);
 684		chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
 685			       NAND_CTRL_CLE | NAND_CTRL_CHANGE);
 686		chip->cmd_ctrl(mtd,
 687			       NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
 688		/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
 689		nand_wait_status_ready(mtd, 250);
 690		return;
 691
 692		/* This applies to read commands */
 693	default:
 694		/*
 695		 * If we don't have access to the busy pin, we apply the given
 696		 * command delay
 697		 */
 698		if (!chip->dev_ready) {
 699			udelay(chip->chip_delay);
 700			return;
 701		}
 702	}
 703	/*
 704	 * Apply this short delay always to ensure that we do wait tWB in
 705	 * any case on any machine.
 706	 */
 707	ndelay(100);
 708
 709	nand_wait_ready(mtd);
 710}
 711
 712static void nand_ccs_delay(struct nand_chip *chip)
 713{
 714	/*
 715	 * The controller already takes care of waiting for tCCS when the RNDIN
 716	 * or RNDOUT command is sent, return directly.
 717	 */
 718	if (!(chip->options & NAND_WAIT_TCCS))
 719		return;
 720
 721	/*
 722	 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
 723	 * (which should be safe for all NANDs).
 724	 */
 725	if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
 726		ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
 727	else
 728		ndelay(500);
 729}
 730
 731/**
 732 * nand_command_lp - [DEFAULT] Send command to NAND large page device
 733 * @mtd: MTD device structure
 734 * @command: the command to be sent
 735 * @column: the column address for this command, -1 if none
 736 * @page_addr: the page address for this command, -1 if none
 737 *
 738 * Send command to NAND device. This is the version for the new large page
 739 * devices. We don't have the separate regions as we have in the small page
 740 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
 741 */
 742static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
 743			    int column, int page_addr)
 744{
 745	register struct nand_chip *chip = mtd_to_nand(mtd);
 746
 747	/* Emulate NAND_CMD_READOOB */
 748	if (command == NAND_CMD_READOOB) {
 749		column += mtd->writesize;
 750		command = NAND_CMD_READ0;
 751	}
 752
 753	/* Command latch cycle */
 754	chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
 755
 756	if (column != -1 || page_addr != -1) {
 757		int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
 758
 759		/* Serially input address */
 760		if (column != -1) {
 761			/* Adjust columns for 16 bit buswidth */
 762			if (chip->options & NAND_BUSWIDTH_16 &&
 763					!nand_opcode_8bits(command))
 764				column >>= 1;
 765			chip->cmd_ctrl(mtd, column, ctrl);
 766			ctrl &= ~NAND_CTRL_CHANGE;
 767
 768			/* Only output a single addr cycle for 8bits opcodes. */
 769			if (!nand_opcode_8bits(command))
 770				chip->cmd_ctrl(mtd, column >> 8, ctrl);
 771		}
 772		if (page_addr != -1) {
 773			chip->cmd_ctrl(mtd, page_addr, ctrl);
 774			chip->cmd_ctrl(mtd, page_addr >> 8,
 775				       NAND_NCE | NAND_ALE);
 776			/* One more address cycle for devices > 128MiB */
 777			if (chip->chipsize > (128 << 20))
 778				chip->cmd_ctrl(mtd, page_addr >> 16,
 779					       NAND_NCE | NAND_ALE);
 780		}
 781	}
 782	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
 783
 784	/*
 785	 * Program and erase have their own busy handlers status, sequential
 786	 * in and status need no delay.
 787	 */
 788	switch (command) {
 789
 790	case NAND_CMD_CACHEDPROG:
 791	case NAND_CMD_PAGEPROG:
 792	case NAND_CMD_ERASE1:
 793	case NAND_CMD_ERASE2:
 794	case NAND_CMD_SEQIN:
 795	case NAND_CMD_STATUS:
 796		return;
 797
 798	case NAND_CMD_RNDIN:
 799		nand_ccs_delay(chip);
 800		return;
 801
 802	case NAND_CMD_RESET:
 803		if (chip->dev_ready)
 804			break;
 805		udelay(chip->chip_delay);
 806		chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
 807			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
 808		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
 809			       NAND_NCE | NAND_CTRL_CHANGE);
 810		/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
 811		nand_wait_status_ready(mtd, 250);
 812		return;
 813
 814	case NAND_CMD_RNDOUT:
 815		/* No ready / busy check necessary */
 816		chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
 817			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
 818		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
 819			       NAND_NCE | NAND_CTRL_CHANGE);
 820
 821		nand_ccs_delay(chip);
 822		return;
 823
 824	case NAND_CMD_READ0:
 825		chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
 826			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
 827		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
 828			       NAND_NCE | NAND_CTRL_CHANGE);
 829
 830		/* This applies to read commands */
 831	default:
 832		/*
 833		 * If we don't have access to the busy pin, we apply the given
 834		 * command delay.
 835		 */
 836		if (!chip->dev_ready) {
 837			udelay(chip->chip_delay);
 838			return;
 839		}
 840	}
 841
 842	/*
 843	 * Apply this short delay always to ensure that we do wait tWB in
 844	 * any case on any machine.
 845	 */
 846	ndelay(100);
 847
 848	nand_wait_ready(mtd);
 849}
 850
 851/**
 852 * panic_nand_get_device - [GENERIC] Get chip for selected access
 853 * @chip: the nand chip descriptor
 854 * @mtd: MTD device structure
 855 * @new_state: the state which is requested
 856 *
 857 * Used when in panic, no locks are taken.
 858 */
 859static void panic_nand_get_device(struct nand_chip *chip,
 860		      struct mtd_info *mtd, int new_state)
 861{
 862	/* Hardware controller shared among independent devices */
 863	chip->controller->active = chip;
 864	chip->state = new_state;
 865}
 866
 867/**
 868 * nand_get_device - [GENERIC] Get chip for selected access
 869 * @mtd: MTD device structure
 870 * @new_state: the state which is requested
 871 *
 872 * Get the device and lock it for exclusive access
 873 */
 874static int
 875nand_get_device(struct mtd_info *mtd, int new_state)
 876{
 877	struct nand_chip *chip = mtd_to_nand(mtd);
 878	spinlock_t *lock = &chip->controller->lock;
 879	wait_queue_head_t *wq = &chip->controller->wq;
 880	DECLARE_WAITQUEUE(wait, current);
 881retry:
 882	spin_lock(lock);
 883
 884	/* Hardware controller shared among independent devices */
 885	if (!chip->controller->active)
 886		chip->controller->active = chip;
 887
 888	if (chip->controller->active == chip && chip->state == FL_READY) {
 889		chip->state = new_state;
 890		spin_unlock(lock);
 891		return 0;
 892	}
 893	if (new_state == FL_PM_SUSPENDED) {
 894		if (chip->controller->active->state == FL_PM_SUSPENDED) {
 895			chip->state = FL_PM_SUSPENDED;
 896			spin_unlock(lock);
 897			return 0;
 898		}
 899	}
 900	set_current_state(TASK_UNINTERRUPTIBLE);
 901	add_wait_queue(wq, &wait);
 902	spin_unlock(lock);
 903	schedule();
 904	remove_wait_queue(wq, &wait);
 905	goto retry;
 906}
 907
 908/**
 909 * panic_nand_wait - [GENERIC] wait until the command is done
 910 * @mtd: MTD device structure
 911 * @chip: NAND chip structure
 912 * @timeo: timeout
 913 *
 914 * Wait for command done. This is a helper function for nand_wait used when
 915 * we are in interrupt context. May happen when in panic and trying to write
 916 * an oops through mtdoops.
 917 */
 918static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
 919			    unsigned long timeo)
 920{
 921	int i;
 922	for (i = 0; i < timeo; i++) {
 923		if (chip->dev_ready) {
 924			if (chip->dev_ready(mtd))
 925				break;
 926		} else {
 927			if (chip->read_byte(mtd) & NAND_STATUS_READY)
 928				break;
 929		}
 930		mdelay(1);
 931	}
 932}
 933
 934/**
 935 * nand_wait - [DEFAULT] wait until the command is done
 936 * @mtd: MTD device structure
 937 * @chip: NAND chip structure
 938 *
 939 * Wait for command done. This applies to erase and program only.
 940 */
 941static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
 942{
 943
 944	int status;
 945	unsigned long timeo = 400;
 946
 947	/*
 948	 * Apply this short delay always to ensure that we do wait tWB in any
 949	 * case on any machine.
 950	 */
 951	ndelay(100);
 952
 953	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
 954
 955	if (in_interrupt() || oops_in_progress)
 956		panic_nand_wait(mtd, chip, timeo);
 957	else {
 958		timeo = jiffies + msecs_to_jiffies(timeo);
 959		do {
 960			if (chip->dev_ready) {
 961				if (chip->dev_ready(mtd))
 962					break;
 963			} else {
 964				if (chip->read_byte(mtd) & NAND_STATUS_READY)
 965					break;
 966			}
 967			cond_resched();
 968		} while (time_before(jiffies, timeo));
 969	}
 970
 971	status = (int)chip->read_byte(mtd);
 972	/* This can happen if in case of timeout or buggy dev_ready */
 973	WARN_ON(!(status & NAND_STATUS_READY));
 974	return status;
 975}
 976
 977/**
 978 * nand_reset_data_interface - Reset data interface and timings
 979 * @chip: The NAND chip
 980 *
 981 * Reset the Data interface and timings to ONFI mode 0.
 982 *
 983 * Returns 0 for success or negative error code otherwise.
 984 */
 985static int nand_reset_data_interface(struct nand_chip *chip)
 986{
 987	struct mtd_info *mtd = nand_to_mtd(chip);
 988	const struct nand_data_interface *conf;
 989	int ret;
 990
 991	if (!chip->setup_data_interface)
 992		return 0;
 993
 994	/*
 995	 * The ONFI specification says:
 996	 * "
 997	 * To transition from NV-DDR or NV-DDR2 to the SDR data
 998	 * interface, the host shall use the Reset (FFh) command
 999	 * using SDR timing mode 0. A device in any timing mode is
1000	 * required to recognize Reset (FFh) command issued in SDR
1001	 * timing mode 0.
1002	 * "
1003	 *
1004	 * Configure the data interface in SDR mode and set the
1005	 * timings to timing mode 0.
1006	 */
1007
1008	conf = nand_get_default_data_interface();
1009	ret = chip->setup_data_interface(mtd, conf, false);
1010	if (ret)
1011		pr_err("Failed to configure data interface to SDR timing mode 0\n");
1012
1013	return ret;
1014}
1015
1016/**
1017 * nand_setup_data_interface - Setup the best data interface and timings
1018 * @chip: The NAND chip
1019 *
1020 * Find and configure the best data interface and NAND timings supported by
1021 * the chip and the driver.
1022 * First tries to retrieve supported timing modes from ONFI information,
1023 * and if the NAND chip does not support ONFI, relies on the
1024 * ->onfi_timing_mode_default specified in the nand_ids table.
1025 *
1026 * Returns 0 for success or negative error code otherwise.
1027 */
1028static int nand_setup_data_interface(struct nand_chip *chip)
1029{
1030	struct mtd_info *mtd = nand_to_mtd(chip);
1031	int ret;
1032
1033	if (!chip->setup_data_interface || !chip->data_interface)
1034		return 0;
1035
1036	/*
1037	 * Ensure the timing mode has been changed on the chip side
1038	 * before changing timings on the controller side.
1039	 */
1040	if (chip->onfi_version) {
1041		u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1042			chip->onfi_timing_mode_default,
1043		};
1044
1045		ret = chip->onfi_set_features(mtd, chip,
1046				ONFI_FEATURE_ADDR_TIMING_MODE,
1047				tmode_param);
1048		if (ret)
1049			goto err;
1050	}
1051
1052	ret = chip->setup_data_interface(mtd, chip->data_interface, false);
1053err:
1054	return ret;
1055}
1056
1057/**
1058 * nand_init_data_interface - find the best data interface and timings
1059 * @chip: The NAND chip
1060 *
1061 * Find the best data interface and NAND timings supported by the chip
1062 * and the driver.
1063 * First tries to retrieve supported timing modes from ONFI information,
1064 * and if the NAND chip does not support ONFI, relies on the
1065 * ->onfi_timing_mode_default specified in the nand_ids table. After this
1066 * function nand_chip->data_interface is initialized with the best timing mode
1067 * available.
1068 *
1069 * Returns 0 for success or negative error code otherwise.
1070 */
1071static int nand_init_data_interface(struct nand_chip *chip)
1072{
1073	struct mtd_info *mtd = nand_to_mtd(chip);
1074	int modes, mode, ret;
1075
1076	if (!chip->setup_data_interface)
1077		return 0;
1078
1079	/*
1080	 * First try to identify the best timings from ONFI parameters and
1081	 * if the NAND does not support ONFI, fallback to the default ONFI
1082	 * timing mode.
1083	 */
1084	modes = onfi_get_async_timing_mode(chip);
1085	if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1086		if (!chip->onfi_timing_mode_default)
1087			return 0;
1088
1089		modes = GENMASK(chip->onfi_timing_mode_default, 0);
1090	}
1091
1092	chip->data_interface = kzalloc(sizeof(*chip->data_interface),
1093				       GFP_KERNEL);
1094	if (!chip->data_interface)
1095		return -ENOMEM;
1096
1097	for (mode = fls(modes) - 1; mode >= 0; mode--) {
1098		ret = onfi_init_data_interface(chip, chip->data_interface,
1099					       NAND_SDR_IFACE, mode);
1100		if (ret)
1101			continue;
1102
1103		ret = chip->setup_data_interface(mtd, chip->data_interface,
1104						 true);
1105		if (!ret) {
1106			chip->onfi_timing_mode_default = mode;
1107			break;
1108		}
1109	}
1110
1111	return 0;
1112}
1113
1114static void nand_release_data_interface(struct nand_chip *chip)
1115{
1116	kfree(chip->data_interface);
1117}
1118
1119/**
1120 * nand_reset - Reset and initialize a NAND device
1121 * @chip: The NAND chip
1122 * @chipnr: Internal die id
1123 *
1124 * Returns 0 for success or negative error code otherwise
1125 */
1126int nand_reset(struct nand_chip *chip, int chipnr)
1127{
1128	struct mtd_info *mtd = nand_to_mtd(chip);
1129	int ret;
1130
1131	ret = nand_reset_data_interface(chip);
1132	if (ret)
1133		return ret;
1134
1135	/*
1136	 * The CS line has to be released before we can apply the new NAND
1137	 * interface settings, hence this weird ->select_chip() dance.
1138	 */
1139	chip->select_chip(mtd, chipnr);
1140	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1141	chip->select_chip(mtd, -1);
1142
1143	chip->select_chip(mtd, chipnr);
1144	ret = nand_setup_data_interface(chip);
1145	chip->select_chip(mtd, -1);
1146	if (ret)
1147		return ret;
1148
1149	return 0;
1150}
1151
1152/**
1153 * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
1154 * @mtd: mtd info
1155 * @ofs: offset to start unlock from
1156 * @len: length to unlock
1157 * @invert: when = 0, unlock the range of blocks within the lower and
1158 *                    upper boundary address
1159 *          when = 1, unlock the range of blocks outside the boundaries
1160 *                    of the lower and upper boundary address
1161 *
1162 * Returs unlock status.
1163 */
1164static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
1165					uint64_t len, int invert)
1166{
1167	int ret = 0;
1168	int status, page;
1169	struct nand_chip *chip = mtd_to_nand(mtd);
1170
1171	/* Submit address of first page to unlock */
1172	page = ofs >> chip->page_shift;
1173	chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
1174
1175	/* Submit address of last page to unlock */
1176	page = (ofs + len) >> chip->page_shift;
1177	chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
1178				(page | invert) & chip->pagemask);
1179
1180	/* Call wait ready function */
1181	status = chip->waitfunc(mtd, chip);
1182	/* See if device thinks it succeeded */
1183	if (status & NAND_STATUS_FAIL) {
1184		pr_debug("%s: error status = 0x%08x\n",
1185					__func__, status);
1186		ret = -EIO;
1187	}
1188
1189	return ret;
1190}
1191
1192/**
1193 * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
1194 * @mtd: mtd info
1195 * @ofs: offset to start unlock from
1196 * @len: length to unlock
1197 *
1198 * Returns unlock status.
1199 */
1200int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1201{
1202	int ret = 0;
1203	int chipnr;
1204	struct nand_chip *chip = mtd_to_nand(mtd);
1205
1206	pr_debug("%s: start = 0x%012llx, len = %llu\n",
1207			__func__, (unsigned long long)ofs, len);
1208
1209	if (check_offs_len(mtd, ofs, len))
1210		return -EINVAL;
1211
1212	/* Align to last block address if size addresses end of the device */
1213	if (ofs + len == mtd->size)
1214		len -= mtd->erasesize;
1215
1216	nand_get_device(mtd, FL_UNLOCKING);
1217
1218	/* Shift to get chip number */
1219	chipnr = ofs >> chip->chip_shift;
1220
1221	/*
1222	 * Reset the chip.
1223	 * If we want to check the WP through READ STATUS and check the bit 7
1224	 * we must reset the chip
1225	 * some operation can also clear the bit 7 of status register
1226	 * eg. erase/program a locked block
1227	 */
1228	nand_reset(chip, chipnr);
1229
1230	chip->select_chip(mtd, chipnr);
1231
1232	/* Check, if it is write protected */
1233	if (nand_check_wp(mtd)) {
1234		pr_debug("%s: device is write protected!\n",
1235					__func__);
1236		ret = -EIO;
1237		goto out;
1238	}
1239
1240	ret = __nand_unlock(mtd, ofs, len, 0);
1241
1242out:
1243	chip->select_chip(mtd, -1);
1244	nand_release_device(mtd);
1245
1246	return ret;
1247}
1248EXPORT_SYMBOL(nand_unlock);
1249
1250/**
1251 * nand_lock - [REPLACEABLE] locks all blocks present in the device
1252 * @mtd: mtd info
1253 * @ofs: offset to start unlock from
1254 * @len: length to unlock
1255 *
1256 * This feature is not supported in many NAND parts. 'Micron' NAND parts do
1257 * have this feature, but it allows only to lock all blocks, not for specified
1258 * range for block. Implementing 'lock' feature by making use of 'unlock', for
1259 * now.
1260 *
1261 * Returns lock status.
1262 */
1263int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1264{
1265	int ret = 0;
1266	int chipnr, status, page;
1267	struct nand_chip *chip = mtd_to_nand(mtd);
1268
1269	pr_debug("%s: start = 0x%012llx, len = %llu\n",
1270			__func__, (unsigned long long)ofs, len);
1271
1272	if (check_offs_len(mtd, ofs, len))
1273		return -EINVAL;
1274
1275	nand_get_device(mtd, FL_LOCKING);
1276
1277	/* Shift to get chip number */
1278	chipnr = ofs >> chip->chip_shift;
1279
1280	/*
1281	 * Reset the chip.
1282	 * If we want to check the WP through READ STATUS and check the bit 7
1283	 * we must reset the chip
1284	 * some operation can also clear the bit 7 of status register
1285	 * eg. erase/program a locked block
1286	 */
1287	nand_reset(chip, chipnr);
1288
1289	chip->select_chip(mtd, chipnr);
1290
1291	/* Check, if it is write protected */
1292	if (nand_check_wp(mtd)) {
1293		pr_debug("%s: device is write protected!\n",
1294					__func__);
1295		status = MTD_ERASE_FAILED;
1296		ret = -EIO;
1297		goto out;
1298	}
1299
1300	/* Submit address of first page to lock */
1301	page = ofs >> chip->page_shift;
1302	chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
1303
1304	/* Call wait ready function */
1305	status = chip->waitfunc(mtd, chip);
1306	/* See if device thinks it succeeded */
1307	if (status & NAND_STATUS_FAIL) {
1308		pr_debug("%s: error status = 0x%08x\n",
1309					__func__, status);
1310		ret = -EIO;
1311		goto out;
1312	}
1313
1314	ret = __nand_unlock(mtd, ofs, len, 0x1);
1315
1316out:
1317	chip->select_chip(mtd, -1);
1318	nand_release_device(mtd);
1319
1320	return ret;
1321}
1322EXPORT_SYMBOL(nand_lock);
1323
1324/**
1325 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
1326 * @buf: buffer to test
1327 * @len: buffer length
1328 * @bitflips_threshold: maximum number of bitflips
1329 *
1330 * Check if a buffer contains only 0xff, which means the underlying region
1331 * has been erased and is ready to be programmed.
1332 * The bitflips_threshold specify the maximum number of bitflips before
1333 * considering the region is not erased.
1334 * Note: The logic of this function has been extracted from the memweight
1335 * implementation, except that nand_check_erased_buf function exit before
1336 * testing the whole buffer if the number of bitflips exceed the
1337 * bitflips_threshold value.
1338 *
1339 * Returns a positive number of bitflips less than or equal to
1340 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1341 * threshold.
1342 */
1343static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
1344{
1345	const unsigned char *bitmap = buf;
1346	int bitflips = 0;
1347	int weight;
1348
1349	for (; len && ((uintptr_t)bitmap) % sizeof(long);
1350	     len--, bitmap++) {
1351		weight = hweight8(*bitmap);
1352		bitflips += BITS_PER_BYTE - weight;
1353		if (unlikely(bitflips > bitflips_threshold))
1354			return -EBADMSG;
1355	}
1356
1357	for (; len >= sizeof(long);
1358	     len -= sizeof(long), bitmap += sizeof(long)) {
1359		weight = hweight_long(*((unsigned long *)bitmap));
1360		bitflips += BITS_PER_LONG - weight;
1361		if (unlikely(bitflips > bitflips_threshold))
1362			return -EBADMSG;
1363	}
1364
1365	for (; len > 0; len--, bitmap++) {
1366		weight = hweight8(*bitmap);
1367		bitflips += BITS_PER_BYTE - weight;
1368		if (unlikely(bitflips > bitflips_threshold))
1369			return -EBADMSG;
1370	}
1371
1372	return bitflips;
1373}
1374
1375/**
1376 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
1377 *				 0xff data
1378 * @data: data buffer to test
1379 * @datalen: data length
1380 * @ecc: ECC buffer
1381 * @ecclen: ECC length
1382 * @extraoob: extra OOB buffer
1383 * @extraooblen: extra OOB length
1384 * @bitflips_threshold: maximum number of bitflips
1385 *
1386 * Check if a data buffer and its associated ECC and OOB data contains only
1387 * 0xff pattern, which means the underlying region has been erased and is
1388 * ready to be programmed.
1389 * The bitflips_threshold specify the maximum number of bitflips before
1390 * considering the region as not erased.
1391 *
1392 * Note:
1393 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
1394 *    different from the NAND page size. When fixing bitflips, ECC engines will
1395 *    report the number of errors per chunk, and the NAND core infrastructure
1396 *    expect you to return the maximum number of bitflips for the whole page.
1397 *    This is why you should always use this function on a single chunk and
1398 *    not on the whole page. After checking each chunk you should update your
1399 *    max_bitflips value accordingly.
1400 * 2/ When checking for bitflips in erased pages you should not only check
1401 *    the payload data but also their associated ECC data, because a user might
1402 *    have programmed almost all bits to 1 but a few. In this case, we
1403 *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
1404 *    this case.
1405 * 3/ The extraoob argument is optional, and should be used if some of your OOB
1406 *    data are protected by the ECC engine.
1407 *    It could also be used if you support subpages and want to attach some
1408 *    extra OOB data to an ECC chunk.
1409 *
1410 * Returns a positive number of bitflips less than or equal to
1411 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1412 * threshold. In case of success, the passed buffers are filled with 0xff.
1413 */
1414int nand_check_erased_ecc_chunk(void *data, int datalen,
1415				void *ecc, int ecclen,
1416				void *extraoob, int extraooblen,
1417				int bitflips_threshold)
1418{
1419	int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
1420
1421	data_bitflips = nand_check_erased_buf(data, datalen,
1422					      bitflips_threshold);
1423	if (data_bitflips < 0)
1424		return data_bitflips;
1425
1426	bitflips_threshold -= data_bitflips;
1427
1428	ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
1429	if (ecc_bitflips < 0)
1430		return ecc_bitflips;
1431
1432	bitflips_threshold -= ecc_bitflips;
1433
1434	extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
1435						  bitflips_threshold);
1436	if (extraoob_bitflips < 0)
1437		return extraoob_bitflips;
1438
1439	if (data_bitflips)
1440		memset(data, 0xff, datalen);
1441
1442	if (ecc_bitflips)
1443		memset(ecc, 0xff, ecclen);
1444
1445	if (extraoob_bitflips)
1446		memset(extraoob, 0xff, extraooblen);
1447
1448	return data_bitflips + ecc_bitflips + extraoob_bitflips;
1449}
1450EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
1451
1452/**
1453 * nand_read_page_raw - [INTERN] read raw page data without ecc
1454 * @mtd: mtd info structure
1455 * @chip: nand chip info structure
1456 * @buf: buffer to store read data
1457 * @oob_required: caller requires OOB data read to chip->oob_poi
1458 * @page: page number to read
1459 *
1460 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1461 */
1462static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1463			      uint8_t *buf, int oob_required, int page)
1464{
1465	chip->read_buf(mtd, buf, mtd->writesize);
1466	if (oob_required)
1467		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1468	return 0;
1469}
1470
1471/**
1472 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
1473 * @mtd: mtd info structure
1474 * @chip: nand chip info structure
1475 * @buf: buffer to store read data
1476 * @oob_required: caller requires OOB data read to chip->oob_poi
1477 * @page: page number to read
1478 *
1479 * We need a special oob layout and handling even when OOB isn't used.
1480 */
1481static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1482				       struct nand_chip *chip, uint8_t *buf,
1483				       int oob_required, int page)
1484{
1485	int eccsize = chip->ecc.size;
1486	int eccbytes = chip->ecc.bytes;
1487	uint8_t *oob = chip->oob_poi;
1488	int steps, size;
1489
1490	for (steps = chip->ecc.steps; steps > 0; steps--) {
1491		chip->read_buf(mtd, buf, eccsize);
1492		buf += eccsize;
1493
1494		if (chip->ecc.prepad) {
1495			chip->read_buf(mtd, oob, chip->ecc.prepad);
1496			oob += chip->ecc.prepad;
1497		}
1498
1499		chip->read_buf(mtd, oob, eccbytes);
1500		oob += eccbytes;
1501
1502		if (chip->ecc.postpad) {
1503			chip->read_buf(mtd, oob, chip->ecc.postpad);
1504			oob += chip->ecc.postpad;
1505		}
1506	}
1507
1508	size = mtd->oobsize - (oob - chip->oob_poi);
1509	if (size)
1510		chip->read_buf(mtd, oob, size);
1511
1512	return 0;
1513}
1514
1515/**
1516 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
1517 * @mtd: mtd info structure
1518 * @chip: nand chip info structure
1519 * @buf: buffer to store read data
1520 * @oob_required: caller requires OOB data read to chip->oob_poi
1521 * @page: page number to read
1522 */
1523static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1524				uint8_t *buf, int oob_required, int page)
1525{
1526	int i, eccsize = chip->ecc.size, ret;
1527	int eccbytes = chip->ecc.bytes;
1528	int eccsteps = chip->ecc.steps;
1529	uint8_t *p = buf;
1530	uint8_t *ecc_calc = chip->buffers->ecccalc;
1531	uint8_t *ecc_code = chip->buffers->ecccode;
1532	unsigned int max_bitflips = 0;
1533
1534	chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
1535
1536	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1537		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1538
1539	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1540					 chip->ecc.total);
1541	if (ret)
1542		return ret;
1543
1544	eccsteps = chip->ecc.steps;
1545	p = buf;
1546
1547	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1548		int stat;
1549
1550		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1551		if (stat < 0) {
1552			mtd->ecc_stats.failed++;
1553		} else {
1554			mtd->ecc_stats.corrected += stat;
1555			max_bitflips = max_t(unsigned int, max_bitflips, stat);
1556		}
1557	}
1558	return max_bitflips;
1559}
1560
1561/**
1562 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
1563 * @mtd: mtd info structure
1564 * @chip: nand chip info structure
1565 * @data_offs: offset of requested data within the page
1566 * @readlen: data length
1567 * @bufpoi: buffer to store read data
1568 * @page: page number to read
1569 */
1570static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1571			uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
1572			int page)
1573{
1574	int start_step, end_step, num_steps, ret;
1575	uint8_t *p;
1576	int data_col_addr, i, gaps = 0;
1577	int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1578	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1579	int index, section = 0;
1580	unsigned int max_bitflips = 0;
1581	struct mtd_oob_region oobregion = { };
1582
1583	/* Column address within the page aligned to ECC size (256bytes) */
1584	start_step = data_offs / chip->ecc.size;
1585	end_step = (data_offs + readlen - 1) / chip->ecc.size;
1586	num_steps = end_step - start_step + 1;
1587	index = start_step * chip->ecc.bytes;
1588
1589	/* Data size aligned to ECC ecc.size */
1590	datafrag_len = num_steps * chip->ecc.size;
1591	eccfrag_len = num_steps * chip->ecc.bytes;
1592
1593	data_col_addr = start_step * chip->ecc.size;
1594	/* If we read not a page aligned data */
1595	if (data_col_addr != 0)
1596		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
1597
1598	p = bufpoi + data_col_addr;
1599	chip->read_buf(mtd, p, datafrag_len);
1600
1601	/* Calculate ECC */
1602	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1603		chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1604
1605	/*
1606	 * The performance is faster if we position offsets according to
1607	 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
1608	 */
1609	ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
1610	if (ret)
1611		return ret;
1612
1613	if (oobregion.length < eccfrag_len)
1614		gaps = 1;
1615
1616	if (gaps) {
1617		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1618		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1619	} else {
1620		/*
1621		 * Send the command to read the particular ECC bytes take care
1622		 * about buswidth alignment in read_buf.
1623		 */
1624		aligned_pos = oobregion.offset & ~(busw - 1);
1625		aligned_len = eccfrag_len;
1626		if (oobregion.offset & (busw - 1))
1627			aligned_len++;
1628		if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
1629		    (busw - 1))
1630			aligned_len++;
1631
1632		chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
1633			      mtd->writesize + aligned_pos, -1);
1634		chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
1635	}
1636
1637	ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
1638					 chip->oob_poi, index, eccfrag_len);
1639	if (ret)
1640		return ret;
1641
1642	p = bufpoi + data_col_addr;
1643	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
1644		int stat;
1645
1646		stat = chip->ecc.correct(mtd, p,
1647			&chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1648		if (stat == -EBADMSG &&
1649		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1650			/* check for empty pages with bitflips */
1651			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1652						&chip->buffers->ecccode[i],
1653						chip->ecc.bytes,
1654						NULL, 0,
1655						chip->ecc.strength);
1656		}
1657
1658		if (stat < 0) {
1659			mtd->ecc_stats.failed++;
1660		} else {
1661			mtd->ecc_stats.corrected += stat;
1662			max_bitflips = max_t(unsigned int, max_bitflips, stat);
1663		}
1664	}
1665	return max_bitflips;
1666}
1667
1668/**
1669 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
1670 * @mtd: mtd info structure
1671 * @chip: nand chip info structure
1672 * @buf: buffer to store read data
1673 * @oob_required: caller requires OOB data read to chip->oob_poi
1674 * @page: page number to read
1675 *
1676 * Not for syndrome calculating ECC controllers which need a special oob layout.
1677 */
1678static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1679				uint8_t *buf, int oob_required, int page)
1680{
1681	int i, eccsize = chip->ecc.size, ret;
1682	int eccbytes = chip->ecc.bytes;
1683	int eccsteps = chip->ecc.steps;
1684	uint8_t *p = buf;
1685	uint8_t *ecc_calc = chip->buffers->ecccalc;
1686	uint8_t *ecc_code = chip->buffers->ecccode;
1687	unsigned int max_bitflips = 0;
1688
1689	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1690		chip->ecc.hwctl(mtd, NAND_ECC_READ);
1691		chip->read_buf(mtd, p, eccsize);
1692		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1693	}
1694	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1695
1696	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1697					 chip->ecc.total);
1698	if (ret)
1699		return ret;
1700
1701	eccsteps = chip->ecc.steps;
1702	p = buf;
1703
1704	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1705		int stat;
1706
1707		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1708		if (stat == -EBADMSG &&
1709		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1710			/* check for empty pages with bitflips */
1711			stat = nand_check_erased_ecc_chunk(p, eccsize,
1712						&ecc_code[i], eccbytes,
1713						NULL, 0,
1714						chip->ecc.strength);
1715		}
1716
1717		if (stat < 0) {
1718			mtd->ecc_stats.failed++;
1719		} else {
1720			mtd->ecc_stats.corrected += stat;
1721			max_bitflips = max_t(unsigned int, max_bitflips, stat);
1722		}
1723	}
1724	return max_bitflips;
1725}
1726
1727/**
1728 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
1729 * @mtd: mtd info structure
1730 * @chip: nand chip info structure
1731 * @buf: buffer to store read data
1732 * @oob_required: caller requires OOB data read to chip->oob_poi
1733 * @page: page number to read
1734 *
1735 * Hardware ECC for large page chips, require OOB to be read first. For this
1736 * ECC mode, the write_page method is re-used from ECC_HW. These methods
1737 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
1738 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
1739 * the data area, by overwriting the NAND manufacturer bad block markings.
1740 */
1741static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1742	struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
1743{
1744	int i, eccsize = chip->ecc.size, ret;
1745	int eccbytes = chip->ecc.bytes;
1746	int eccsteps = chip->ecc.steps;
1747	uint8_t *p = buf;
1748	uint8_t *ecc_code = chip->buffers->ecccode;
1749	uint8_t *ecc_calc = chip->buffers->ecccalc;
1750	unsigned int max_bitflips = 0;
1751
1752	/* Read the OOB area first */
1753	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1754	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1755	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1756
1757	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1758					 chip->ecc.total);
1759	if (ret)
1760		return ret;
1761
1762	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1763		int stat;
1764
1765		chip->ecc.hwctl(mtd, NAND_ECC_READ);
1766		chip->read_buf(mtd, p, eccsize);
1767		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1768
1769		stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1770		if (stat == -EBADMSG &&
1771		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1772			/* check for empty pages with bitflips */
1773			stat = nand_check_erased_ecc_chunk(p, eccsize,
1774						&ecc_code[i], eccbytes,
1775						NULL, 0,
1776						chip->ecc.strength);
1777		}
1778
1779		if (stat < 0) {
1780			mtd->ecc_stats.failed++;
1781		} else {
1782			mtd->ecc_stats.corrected += stat;
1783			max_bitflips = max_t(unsigned int, max_bitflips, stat);
1784		}
1785	}
1786	return max_bitflips;
1787}
1788
1789/**
1790 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
1791 * @mtd: mtd info structure
1792 * @chip: nand chip info structure
1793 * @buf: buffer to store read data
1794 * @oob_required: caller requires OOB data read to chip->oob_poi
1795 * @page: page number to read
1796 *
1797 * The hw generator calculates the error syndrome automatically. Therefore we
1798 * need a special oob layout and handling.
1799 */
1800static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1801				   uint8_t *buf, int oob_required, int page)
1802{
1803	int i, eccsize = chip->ecc.size;
1804	int eccbytes = chip->ecc.bytes;
1805	int eccsteps = chip->ecc.steps;
1806	int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
1807	uint8_t *p = buf;
1808	uint8_t *oob = chip->oob_poi;
1809	unsigned int max_bitflips = 0;
1810
1811	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1812		int stat;
1813
1814		chip->ecc.hwctl(mtd, NAND_ECC_READ);
1815		chip->read_buf(mtd, p, eccsize);
1816
1817		if (chip->ecc.prepad) {
1818			chip->read_buf(mtd, oob, chip->ecc.prepad);
1819			oob += chip->ecc.prepad;
1820		}
1821
1822		chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
1823		chip->read_buf(mtd, oob, eccbytes);
1824		stat = chip->ecc.correct(mtd, p, oob, NULL);
1825
1826		oob += eccbytes;
1827
1828		if (chip->ecc.postpad) {
1829			chip->read_buf(mtd, oob, chip->ecc.postpad);
1830			oob += chip->ecc.postpad;
1831		}
1832
1833		if (stat == -EBADMSG &&
1834		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1835			/* check for empty pages with bitflips */
1836			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1837							   oob - eccpadbytes,
1838							   eccpadbytes,
1839							   NULL, 0,
1840							   chip->ecc.strength);
1841		}
1842
1843		if (stat < 0) {
1844			mtd->ecc_stats.failed++;
1845		} else {
1846			mtd->ecc_stats.corrected += stat;
1847			max_bitflips = max_t(unsigned int, max_bitflips, stat);
1848		}
1849	}
1850
1851	/* Calculate remaining oob bytes */
1852	i = mtd->oobsize - (oob - chip->oob_poi);
1853	if (i)
1854		chip->read_buf(mtd, oob, i);
1855
1856	return max_bitflips;
1857}
1858
1859/**
1860 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
1861 * @mtd: mtd info structure
1862 * @oob: oob destination address
1863 * @ops: oob ops structure
1864 * @len: size of oob to transfer
1865 */
1866static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
1867				  struct mtd_oob_ops *ops, size_t len)
1868{
1869	struct nand_chip *chip = mtd_to_nand(mtd);
1870	int ret;
1871
1872	switch (ops->mode) {
1873
1874	case MTD_OPS_PLACE_OOB:
1875	case MTD_OPS_RAW:
1876		memcpy(oob, chip->oob_poi + ops->ooboffs, len);
1877		return oob + len;
1878
1879	case MTD_OPS_AUTO_OOB:
1880		ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
1881						  ops->ooboffs, len);
1882		BUG_ON(ret);
1883		return oob + len;
1884
1885	default:
1886		BUG();
1887	}
1888	return NULL;
1889}
1890
1891/**
1892 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
1893 * @mtd: MTD device structure
1894 * @retry_mode: the retry mode to use
1895 *
1896 * Some vendors supply a special command to shift the Vt threshold, to be used
1897 * when there are too many bitflips in a page (i.e., ECC error). After setting
1898 * a new threshold, the host should retry reading the page.
1899 */
1900static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
1901{
1902	struct nand_chip *chip = mtd_to_nand(mtd);
1903
1904	pr_debug("setting READ RETRY mode %d\n", retry_mode);
1905
1906	if (retry_mode >= chip->read_retries)
1907		return -EINVAL;
1908
1909	if (!chip->setup_read_retry)
1910		return -EOPNOTSUPP;
1911
1912	return chip->setup_read_retry(mtd, retry_mode);
1913}
1914
1915/**
1916 * nand_do_read_ops - [INTERN] Read data with ECC
1917 * @mtd: MTD device structure
1918 * @from: offset to read from
1919 * @ops: oob ops structure
1920 *
1921 * Internal function. Called with chip held.
1922 */
1923static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1924			    struct mtd_oob_ops *ops)
1925{
1926	int chipnr, page, realpage, col, bytes, aligned, oob_required;
1927	struct nand_chip *chip = mtd_to_nand(mtd);
1928	int ret = 0;
1929	uint32_t readlen = ops->len;
1930	uint32_t oobreadlen = ops->ooblen;
1931	uint32_t max_oobsize = mtd_oobavail(mtd, ops);
1932
1933	uint8_t *bufpoi, *oob, *buf;
1934	int use_bufpoi;
1935	unsigned int max_bitflips = 0;
1936	int retry_mode = 0;
1937	bool ecc_fail = false;
1938
1939	chipnr = (int)(from >> chip->chip_shift);
1940	chip->select_chip(mtd, chipnr);
1941
1942	realpage = (int)(from >> chip->page_shift);
1943	page = realpage & chip->pagemask;
1944
1945	col = (int)(from & (mtd->writesize - 1));
1946
1947	buf = ops->datbuf;
1948	oob = ops->oobbuf;
1949	oob_required = oob ? 1 : 0;
1950
1951	while (1) {
1952		unsigned int ecc_failures = mtd->ecc_stats.failed;
1953
1954		bytes = min(mtd->writesize - col, readlen);
1955		aligned = (bytes == mtd->writesize);
1956
1957		if (!aligned)
1958			use_bufpoi = 1;
1959		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
1960			use_bufpoi = !virt_addr_valid(buf);
1961		else
1962			use_bufpoi = 0;
1963
1964		/* Is the current page in the buffer? */
1965		if (realpage != chip->pagebuf || oob) {
1966			bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
1967
1968			if (use_bufpoi && aligned)
1969				pr_debug("%s: using read bounce buffer for buf@%p\n",
1970						 __func__, buf);
1971
1972read_retry:
1973			if (nand_standard_page_accessors(&chip->ecc))
1974				chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1975
1976			/*
1977			 * Now read the page into the buffer.  Absent an error,
1978			 * the read methods return max bitflips per ecc step.
1979			 */
1980			if (unlikely(ops->mode == MTD_OPS_RAW))
1981				ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
1982							      oob_required,
1983							      page);
1984			else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
1985				 !oob)
1986				ret = chip->ecc.read_subpage(mtd, chip,
1987							col, bytes, bufpoi,
1988							page);
1989			else
1990				ret = chip->ecc.read_page(mtd, chip, bufpoi,
1991							  oob_required, page);
1992			if (ret < 0) {
1993				if (use_bufpoi)
1994					/* Invalidate page cache */
1995					chip->pagebuf = -1;
1996				break;
1997			}
1998
1999			max_bitflips = max_t(unsigned int, max_bitflips, ret);
2000
2001			/* Transfer not aligned data */
2002			if (use_bufpoi) {
2003				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
2004				    !(mtd->ecc_stats.failed - ecc_failures) &&
2005				    (ops->mode != MTD_OPS_RAW)) {
2006					chip->pagebuf = realpage;
2007					chip->pagebuf_bitflips = ret;
2008				} else {
2009					/* Invalidate page cache */
2010					chip->pagebuf = -1;
2011				}
2012				memcpy(buf, chip->buffers->databuf + col, bytes);
2013			}
2014
2015			if (unlikely(oob)) {
2016				int toread = min(oobreadlen, max_oobsize);
2017
2018				if (toread) {
2019					oob = nand_transfer_oob(mtd,
2020						oob, ops, toread);
2021					oobreadlen -= toread;
2022				}
2023			}
2024
2025			if (chip->options & NAND_NEED_READRDY) {
2026				/* Apply delay or wait for ready/busy pin */
2027				if (!chip->dev_ready)
2028					udelay(chip->chip_delay);
2029				else
2030					nand_wait_ready(mtd);
2031			}
2032
2033			if (mtd->ecc_stats.failed - ecc_failures) {
2034				if (retry_mode + 1 < chip->read_retries) {
2035					retry_mode++;
2036					ret = nand_setup_read_retry(mtd,
2037							retry_mode);
2038					if (ret < 0)
2039						break;
2040
2041					/* Reset failures; retry */
2042					mtd->ecc_stats.failed = ecc_failures;
2043					goto read_retry;
2044				} else {
2045					/* No more retry modes; real failure */
2046					ecc_fail = true;
2047				}
2048			}
2049
2050			buf += bytes;
2051		} else {
2052			memcpy(buf, chip->buffers->databuf + col, bytes);
2053			buf += bytes;
2054			max_bitflips = max_t(unsigned int, max_bitflips,
2055					     chip->pagebuf_bitflips);
2056		}
2057
2058		readlen -= bytes;
2059
2060		/* Reset to retry mode 0 */
2061		if (retry_mode) {
2062			ret = nand_setup_read_retry(mtd, 0);
2063			if (ret < 0)
2064				break;
2065			retry_mode = 0;
2066		}
2067
2068		if (!readlen)
2069			break;
2070
2071		/* For subsequent reads align to page boundary */
2072		col = 0;
2073		/* Increment page address */
2074		realpage++;
2075
2076		page = realpage & chip->pagemask;
2077		/* Check, if we cross a chip boundary */
2078		if (!page) {
2079			chipnr++;
2080			chip->select_chip(mtd, -1);
2081			chip->select_chip(mtd, chipnr);
2082		}
2083	}
2084	chip->select_chip(mtd, -1);
2085
2086	ops->retlen = ops->len - (size_t) readlen;
2087	if (oob)
2088		ops->oobretlen = ops->ooblen - oobreadlen;
2089
2090	if (ret < 0)
2091		return ret;
2092
2093	if (ecc_fail)
2094		return -EBADMSG;
2095
2096	return max_bitflips;
2097}
2098
2099/**
2100 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
2101 * @mtd: MTD device structure
2102 * @from: offset to read from
2103 * @len: number of bytes to read
2104 * @retlen: pointer to variable to store the number of read bytes
2105 * @buf: the databuffer to put data
2106 *
2107 * Get hold of the chip and call nand_do_read.
2108 */
2109static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
2110		     size_t *retlen, uint8_t *buf)
2111{
2112	struct mtd_oob_ops ops;
2113	int ret;
2114
2115	nand_get_device(mtd, FL_READING);
2116	memset(&ops, 0, sizeof(ops));
2117	ops.len = len;
2118	ops.datbuf = buf;
2119	ops.mode = MTD_OPS_PLACE_OOB;
2120	ret = nand_do_read_ops(mtd, from, &ops);
2121	*retlen = ops.retlen;
2122	nand_release_device(mtd);
2123	return ret;
2124}
2125
2126/**
2127 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
2128 * @mtd: mtd info structure
2129 * @chip: nand chip info structure
2130 * @page: page number to read
2131 */
2132int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2133{
2134	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
2135	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2136	return 0;
2137}
2138EXPORT_SYMBOL(nand_read_oob_std);
2139
2140/**
2141 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
2142 *			    with syndromes
2143 * @mtd: mtd info structure
2144 * @chip: nand chip info structure
2145 * @page: page number to read
2146 */
2147int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2148			   int page)
2149{
2150	int length = mtd->oobsize;
2151	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2152	int eccsize = chip->ecc.size;
2153	uint8_t *bufpoi = chip->oob_poi;
2154	int i, toread, sndrnd = 0, pos;
2155
2156	chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
2157	for (i = 0; i < chip->ecc.steps; i++) {
2158		if (sndrnd) {
2159			pos = eccsize + i * (eccsize + chunk);
2160			if (mtd->writesize > 512)
2161				chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
2162			else
2163				chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
2164		} else
2165			sndrnd = 1;
2166		toread = min_t(int, length, chunk);
2167		chip->read_buf(mtd, bufpoi, toread);
2168		bufpoi += toread;
2169		length -= toread;
2170	}
2171	if (length > 0)
2172		chip->read_buf(mtd, bufpoi, length);
2173
2174	return 0;
2175}
2176EXPORT_SYMBOL(nand_read_oob_syndrome);
2177
2178/**
2179 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
2180 * @mtd: mtd info structure
2181 * @chip: nand chip info structure
2182 * @page: page number to write
2183 */
2184int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2185{
2186	int status = 0;
2187	const uint8_t *buf = chip->oob_poi;
2188	int length = mtd->oobsize;
2189
2190	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
2191	chip->write_buf(mtd, buf, length);
2192	/* Send command to program the OOB data */
2193	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2194
2195	status = chip->waitfunc(mtd, chip);
2196
2197	return status & NAND_STATUS_FAIL ? -EIO : 0;
2198}
2199EXPORT_SYMBOL(nand_write_oob_std);
2200
2201/**
2202 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
2203 *			     with syndrome - only for large page flash
2204 * @mtd: mtd info structure
2205 * @chip: nand chip info structure
2206 * @page: page number to write
2207 */
2208int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2209			    int page)
2210{
2211	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2212	int eccsize = chip->ecc.size, length = mtd->oobsize;
2213	int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
2214	const uint8_t *bufpoi = chip->oob_poi;
2215
2216	/*
2217	 * data-ecc-data-ecc ... ecc-oob
2218	 * or
2219	 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
2220	 */
2221	if (!chip->ecc.prepad && !chip->ecc.postpad) {
2222		pos = steps * (eccsize + chunk);
2223		steps = 0;
2224	} else
2225		pos = eccsize;
2226
2227	chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
2228	for (i = 0; i < steps; i++) {
2229		if (sndcmd) {
2230			if (mtd->writesize <= 512) {
2231				uint32_t fill = 0xFFFFFFFF;
2232
2233				len = eccsize;
2234				while (len > 0) {
2235					int num = min_t(int, len, 4);
2236					chip->write_buf(mtd, (uint8_t *)&fill,
2237							num);
2238					len -= num;
2239				}
2240			} else {
2241				pos = eccsize + i * (eccsize + chunk);
2242				chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
2243			}
2244		} else
2245			sndcmd = 1;
2246		len = min_t(int, length, chunk);
2247		chip->write_buf(mtd, bufpoi, len);
2248		bufpoi += len;
2249		length -= len;
2250	}
2251	if (length > 0)
2252		chip->write_buf(mtd, bufpoi, length);
2253
2254	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2255	status = chip->waitfunc(mtd, chip);
2256
2257	return status & NAND_STATUS_FAIL ? -EIO : 0;
2258}
2259EXPORT_SYMBOL(nand_write_oob_syndrome);
2260
2261/**
2262 * nand_do_read_oob - [INTERN] NAND read out-of-band
2263 * @mtd: MTD device structure
2264 * @from: offset to read from
2265 * @ops: oob operations description structure
2266 *
2267 * NAND read out-of-band data from the spare area.
2268 */
2269static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2270			    struct mtd_oob_ops *ops)
2271{
2272	int page, realpage, chipnr;
2273	struct nand_chip *chip = mtd_to_nand(mtd);
2274	struct mtd_ecc_stats stats;
2275	int readlen = ops->ooblen;
2276	int len;
2277	uint8_t *buf = ops->oobbuf;
2278	int ret = 0;
2279
2280	pr_debug("%s: from = 0x%08Lx, len = %i\n",
2281			__func__, (unsigned long long)from, readlen);
2282
2283	stats = mtd->ecc_stats;
2284
2285	len = mtd_oobavail(mtd, ops);
2286
2287	if (unlikely(ops->ooboffs >= len)) {
2288		pr_debug("%s: attempt to start read outside oob\n",
2289				__func__);
2290		return -EINVAL;
2291	}
2292
2293	/* Do not allow reads past end of device */
2294	if (unlikely(from >= mtd->size ||
2295		     ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
2296					(from >> chip->page_shift)) * len)) {
2297		pr_debug("%s: attempt to read beyond end of device\n",
2298				__func__);
2299		return -EINVAL;
2300	}
2301
2302	chipnr = (int)(from >> chip->chip_shift);
2303	chip->select_chip(mtd, chipnr);
2304
2305	/* Shift to get page */
2306	realpage = (int)(from >> chip->page_shift);
2307	page = realpage & chip->pagemask;
2308
2309	while (1) {
2310		if (ops->mode == MTD_OPS_RAW)
2311			ret = chip->ecc.read_oob_raw(mtd, chip, page);
2312		else
2313			ret = chip->ecc.read_oob(mtd, chip, page);
2314
2315		if (ret < 0)
2316			break;
2317
2318		len = min(len, readlen);
2319		buf = nand_transfer_oob(mtd, buf, ops, len);
2320
2321		if (chip->options & NAND_NEED_READRDY) {
2322			/* Apply delay or wait for ready/busy pin */
2323			if (!chip->dev_ready)
2324				udelay(chip->chip_delay);
2325			else
2326				nand_wait_ready(mtd);
2327		}
2328
2329		readlen -= len;
2330		if (!readlen)
2331			break;
2332
2333		/* Increment page address */
2334		realpage++;
2335
2336		page = realpage & chip->pagemask;
2337		/* Check, if we cross a chip boundary */
2338		if (!page) {
2339			chipnr++;
2340			chip->select_chip(mtd, -1);
2341			chip->select_chip(mtd, chipnr);
2342		}
2343	}
2344	chip->select_chip(mtd, -1);
2345
2346	ops->oobretlen = ops->ooblen - readlen;
2347
2348	if (ret < 0)
2349		return ret;
2350
2351	if (mtd->ecc_stats.failed - stats.failed)
2352		return -EBADMSG;
2353
2354	return  mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
2355}
2356
2357/**
2358 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
2359 * @mtd: MTD device structure
2360 * @from: offset to read from
2361 * @ops: oob operation description structure
2362 *
2363 * NAND read data and/or out-of-band data.
2364 */
2365static int nand_read_oob(struct mtd_info *mtd, loff_t from,
2366			 struct mtd_oob_ops *ops)
2367{
2368	int ret;
2369
2370	ops->retlen = 0;
2371
2372	/* Do not allow reads past end of device */
2373	if (ops->datbuf && (from + ops->len) > mtd->size) {
2374		pr_debug("%s: attempt to read beyond end of device\n",
2375				__func__);
2376		return -EINVAL;
2377	}
2378
2379	if (ops->mode != MTD_OPS_PLACE_OOB &&
2380	    ops->mode != MTD_OPS_AUTO_OOB &&
2381	    ops->mode != MTD_OPS_RAW)
2382		return -ENOTSUPP;
2383
2384	nand_get_device(mtd, FL_READING);
2385
2386	if (!ops->datbuf)
2387		ret = nand_do_read_oob(mtd, from, ops);
2388	else
2389		ret = nand_do_read_ops(mtd, from, ops);
2390
2391	nand_release_device(mtd);
2392	return ret;
2393}
2394
2395
2396/**
2397 * nand_write_page_raw - [INTERN] raw page write function
2398 * @mtd: mtd info structure
2399 * @chip: nand chip info structure
2400 * @buf: data buffer
2401 * @oob_required: must write chip->oob_poi to OOB
2402 * @page: page number to write
2403 *
2404 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2405 */
2406static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2407			       const uint8_t *buf, int oob_required, int page)
2408{
2409	chip->write_buf(mtd, buf, mtd->writesize);
2410	if (oob_required)
2411		chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2412
2413	return 0;
2414}
2415
2416/**
2417 * nand_write_page_raw_syndrome - [INTERN] raw page write function
2418 * @mtd: mtd info structure
2419 * @chip: nand chip info structure
2420 * @buf: data buffer
2421 * @oob_required: must write chip->oob_poi to OOB
2422 * @page: page number to write
2423 *
2424 * We need a special oob layout and handling even when ECC isn't checked.
2425 */
2426static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
2427					struct nand_chip *chip,
2428					const uint8_t *buf, int oob_required,
2429					int page)
2430{
2431	int eccsize = chip->ecc.size;
2432	int eccbytes = chip->ecc.bytes;
2433	uint8_t *oob = chip->oob_poi;
2434	int steps, size;
2435
2436	for (steps = chip->ecc.steps; steps > 0; steps--) {
2437		chip->write_buf(mtd, buf, eccsize);
2438		buf += eccsize;
2439
2440		if (chip->ecc.prepad) {
2441			chip->write_buf(mtd, oob, chip->ecc.prepad);
2442			oob += chip->ecc.prepad;
2443		}
2444
2445		chip->write_buf(mtd, oob, eccbytes);
2446		oob += eccbytes;
2447
2448		if (chip->ecc.postpad) {
2449			chip->write_buf(mtd, oob, chip->ecc.postpad);
2450			oob += chip->ecc.postpad;
2451		}
2452	}
2453
2454	size = mtd->oobsize - (oob - chip->oob_poi);
2455	if (size)
2456		chip->write_buf(mtd, oob, size);
2457
2458	return 0;
2459}
2460/**
2461 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
2462 * @mtd: mtd info structure
2463 * @chip: nand chip info structure
2464 * @buf: data buffer
2465 * @oob_required: must write chip->oob_poi to OOB
2466 * @page: page number to write
2467 */
2468static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2469				 const uint8_t *buf, int oob_required,
2470				 int page)
2471{
2472	int i, eccsize = chip->ecc.size, ret;
2473	int eccbytes = chip->ecc.bytes;
2474	int eccsteps = chip->ecc.steps;
2475	uint8_t *ecc_calc = chip->buffers->ecccalc;
2476	const uint8_t *p = buf;
2477
2478	/* Software ECC calculation */
2479	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2480		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2481
2482	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2483					 chip->ecc.total);
2484	if (ret)
2485		return ret;
2486
2487	return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2488}
2489
2490/**
2491 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
2492 * @mtd: mtd info structure
2493 * @chip: nand chip info structure
2494 * @buf: data buffer
2495 * @oob_required: must write chip->oob_poi to OOB
2496 * @page: page number to write
2497 */
2498static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2499				  const uint8_t *buf, int oob_required,
2500				  int page)
2501{
2502	int i, eccsize = chip->ecc.size, ret;
2503	int eccbytes = chip->ecc.bytes;
2504	int eccsteps = chip->ecc.steps;
2505	uint8_t *ecc_calc = chip->buffers->ecccalc;
2506	const uint8_t *p = buf;
2507
2508	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2509		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2510		chip->write_buf(mtd, p, eccsize);
2511		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2512	}
2513
2514	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2515					 chip->ecc.total);
2516	if (ret)
2517		return ret;
2518
2519	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2520
2521	return 0;
2522}
2523
2524
2525/**
2526 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
2527 * @mtd:	mtd info structure
2528 * @chip:	nand chip info structure
2529 * @offset:	column address of subpage within the page
2530 * @data_len:	data length
2531 * @buf:	data buffer
2532 * @oob_required: must write chip->oob_poi to OOB
2533 * @page: page number to write
2534 */
2535static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2536				struct nand_chip *chip, uint32_t offset,
2537				uint32_t data_len, const uint8_t *buf,
2538				int oob_required, int page)
2539{
2540	uint8_t *oob_buf  = chip->oob_poi;
2541	uint8_t *ecc_calc = chip->buffers->ecccalc;
2542	int ecc_size      = chip->ecc.size;
2543	int ecc_bytes     = chip->ecc.bytes;
2544	int ecc_steps     = chip->ecc.steps;
2545	uint32_t start_step = offset / ecc_size;
2546	uint32_t end_step   = (offset + data_len - 1) / ecc_size;
2547	int oob_bytes       = mtd->oobsize / ecc_steps;
2548	int step, ret;
2549
2550	for (step = 0; step < ecc_steps; step++) {
2551		/* configure controller for WRITE access */
2552		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2553
2554		/* write data (untouched subpages already masked by 0xFF) */
2555		chip->write_buf(mtd, buf, ecc_size);
2556
2557		/* mask ECC of un-touched subpages by padding 0xFF */
2558		if ((step < start_step) || (step > end_step))
2559			memset(ecc_calc, 0xff, ecc_bytes);
2560		else
2561			chip->ecc.calculate(mtd, buf, ecc_calc);
2562
2563		/* mask OOB of un-touched subpages by padding 0xFF */
2564		/* if oob_required, preserve OOB metadata of written subpage */
2565		if (!oob_required || (step < start_step) || (step > end_step))
2566			memset(oob_buf, 0xff, oob_bytes);
2567
2568		buf += ecc_size;
2569		ecc_calc += ecc_bytes;
2570		oob_buf  += oob_bytes;
2571	}
2572
2573	/* copy calculated ECC for whole page to chip->buffer->oob */
2574	/* this include masked-value(0xFF) for unwritten subpages */
2575	ecc_calc = chip->buffers->ecccalc;
2576	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2577					 chip->ecc.total);
2578	if (ret)
2579		return ret;
2580
2581	/* write OOB buffer to NAND device */
2582	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2583
2584	return 0;
2585}
2586
2587
2588/**
2589 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
2590 * @mtd: mtd info structure
2591 * @chip: nand chip info structure
2592 * @buf: data buffer
2593 * @oob_required: must write chip->oob_poi to OOB
2594 * @page: page number to write
2595 *
2596 * The hw generator calculates the error syndrome automatically. Therefore we
2597 * need a special oob layout and handling.
2598 */
2599static int nand_write_page_syndrome(struct mtd_info *mtd,
2600				    struct nand_chip *chip,
2601				    const uint8_t *buf, int oob_required,
2602				    int page)
2603{
2604	int i, eccsize = chip->ecc.size;
2605	int eccbytes = chip->ecc.bytes;
2606	int eccsteps = chip->ecc.steps;
2607	const uint8_t *p = buf;
2608	uint8_t *oob = chip->oob_poi;
2609
2610	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2611
2612		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2613		chip->write_buf(mtd, p, eccsize);
2614
2615		if (chip->ecc.prepad) {
2616			chip->write_buf(mtd, oob, chip->ecc.prepad);
2617			oob += chip->ecc.prepad;
2618		}
2619
2620		chip->ecc.calculate(mtd, p, oob);
2621		chip->write_buf(mtd, oob, eccbytes);
2622		oob += eccbytes;
2623
2624		if (chip->ecc.postpad) {
2625			chip->write_buf(mtd, oob, chip->ecc.postpad);
2626			oob += chip->ecc.postpad;
2627		}
2628	}
2629
2630	/* Calculate remaining oob bytes */
2631	i = mtd->oobsize - (oob - chip->oob_poi);
2632	if (i)
2633		chip->write_buf(mtd, oob, i);
2634
2635	return 0;
2636}
2637
2638/**
2639 * nand_write_page - [REPLACEABLE] write one page
2640 * @mtd: MTD device structure
2641 * @chip: NAND chip descriptor
2642 * @offset: address offset within the page
2643 * @data_len: length of actual data to be written
2644 * @buf: the data to write
2645 * @oob_required: must write chip->oob_poi to OOB
2646 * @page: page number to write
2647 * @cached: cached programming
2648 * @raw: use _raw version of write_page
2649 */
2650static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2651		uint32_t offset, int data_len, const uint8_t *buf,
2652		int oob_required, int page, int cached, int raw)
2653{
2654	int status, subpage;
2655
2656	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
2657		chip->ecc.write_subpage)
2658		subpage = offset || (data_len < mtd->writesize);
2659	else
2660		subpage = 0;
2661
2662	if (nand_standard_page_accessors(&chip->ecc))
2663		chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2664
2665	if (unlikely(raw))
2666		status = chip->ecc.write_page_raw(mtd, chip, buf,
2667						  oob_required, page);
2668	else if (subpage)
2669		status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
2670						 buf, oob_required, page);
2671	else
2672		status = chip->ecc.write_page(mtd, chip, buf, oob_required,
2673					      page);
2674
2675	if (status < 0)
2676		return status;
2677
2678	/*
2679	 * Cached progamming disabled for now. Not sure if it's worth the
2680	 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
2681	 */
2682	cached = 0;
2683
2684	if (!cached || !NAND_HAS_CACHEPROG(chip)) {
2685
2686		if (nand_standard_page_accessors(&chip->ecc))
2687			chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2688		status = chip->waitfunc(mtd, chip);
2689		/*
2690		 * See if operation failed and additional status checks are
2691		 * available.
2692		 */
2693		if ((status & NAND_STATUS_FAIL) && (chip->errstat))
2694			status = chip->errstat(mtd, chip, FL_WRITING, status,
2695					       page);
2696
2697		if (status & NAND_STATUS_FAIL)
2698			return -EIO;
2699	} else {
2700		chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
2701		status = chip->waitfunc(mtd, chip);
2702	}
2703
2704	return 0;
2705}
2706
2707/**
2708 * nand_fill_oob - [INTERN] Transfer client buffer to oob
2709 * @mtd: MTD device structure
2710 * @oob: oob data buffer
2711 * @len: oob data write length
2712 * @ops: oob ops structure
2713 */
2714static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2715			      struct mtd_oob_ops *ops)
2716{
2717	struct nand_chip *chip = mtd_to_nand(mtd);
2718	int ret;
2719
2720	/*
2721	 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2722	 * data from a previous OOB read.
2723	 */
2724	memset(chip->oob_poi, 0xff, mtd->oobsize);
2725
2726	switch (ops->mode) {
2727
2728	case MTD_OPS_PLACE_OOB:
2729	case MTD_OPS_RAW:
2730		memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2731		return oob + len;
2732
2733	case MTD_OPS_AUTO_OOB:
2734		ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
2735						  ops->ooboffs, len);
2736		BUG_ON(ret);
2737		return oob + len;
2738
2739	default:
2740		BUG();
2741	}
2742	return NULL;
2743}
2744
2745#define NOTALIGNED(x)	((x & (chip->subpagesize - 1)) != 0)
2746
2747/**
2748 * nand_do_write_ops - [INTERN] NAND write with ECC
2749 * @mtd: MTD device structure
2750 * @to: offset to write to
2751 * @ops: oob operations description structure
2752 *
2753 * NAND write with ECC.
2754 */
2755static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2756			     struct mtd_oob_ops *ops)
2757{
2758	int chipnr, realpage, page, blockmask, column;
2759	struct nand_chip *chip = mtd_to_nand(mtd);
2760	uint32_t writelen = ops->len;
2761
2762	uint32_t oobwritelen = ops->ooblen;
2763	uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2764
2765	uint8_t *oob = ops->oobbuf;
2766	uint8_t *buf = ops->datbuf;
2767	int ret;
2768	int oob_required = oob ? 1 : 0;
2769
2770	ops->retlen = 0;
2771	if (!writelen)
2772		return 0;
2773
2774	/* Reject writes, which are not page aligned */
2775	if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
2776		pr_notice("%s: attempt to write non page aligned data\n",
2777			   __func__);
2778		return -EINVAL;
2779	}
2780
2781	column = to & (mtd->writesize - 1);
2782
2783	chipnr = (int)(to >> chip->chip_shift);
2784	chip->select_chip(mtd, chipnr);
2785
2786	/* Check, if it is write protected */
2787	if (nand_check_wp(mtd)) {
2788		ret = -EIO;
2789		goto err_out;
2790	}
2791
2792	realpage = (int)(to >> chip->page_shift);
2793	page = realpage & chip->pagemask;
2794	blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
2795
2796	/* Invalidate the page cache, when we write to the cached page */
2797	if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
2798	    ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
2799		chip->pagebuf = -1;
2800
2801	/* Don't allow multipage oob writes with offset */
2802	if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
2803		ret = -EINVAL;
2804		goto err_out;
2805	}
2806
2807	while (1) {
2808		int bytes = mtd->writesize;
2809		int cached = writelen > bytes && page != blockmask;
2810		uint8_t *wbuf = buf;
2811		int use_bufpoi;
2812		int part_pagewr = (column || writelen < mtd->writesize);
2813
2814		if (part_pagewr)
2815			use_bufpoi = 1;
2816		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2817			use_bufpoi = !virt_addr_valid(buf);
2818		else
2819			use_bufpoi = 0;
2820
2821		/* Partial page write?, or need to use bounce buffer */
2822		if (use_bufpoi) {
2823			pr_debug("%s: using write bounce buffer for buf@%p\n",
2824					 __func__, buf);
2825			cached = 0;
2826			if (part_pagewr)
2827				bytes = min_t(int, bytes - column, writelen);
2828			chip->pagebuf = -1;
2829			memset(chip->buffers->databuf, 0xff, mtd->writesize);
2830			memcpy(&chip->buffers->databuf[column], buf, bytes);
2831			wbuf = chip->buffers->databuf;
2832		}
2833
2834		if (unlikely(oob)) {
2835			size_t len = min(oobwritelen, oobmaxlen);
2836			oob = nand_fill_oob(mtd, oob, len, ops);
2837			oobwritelen -= len;
2838		} else {
2839			/* We still need to erase leftover OOB data */
2840			memset(chip->oob_poi, 0xff, mtd->oobsize);
2841		}
2842		ret = chip->write_page(mtd, chip, column, bytes, wbuf,
2843					oob_required, page, cached,
2844					(ops->mode == MTD_OPS_RAW));
2845		if (ret)
2846			break;
2847
2848		writelen -= bytes;
2849		if (!writelen)
2850			break;
2851
2852		column = 0;
2853		buf += bytes;
2854		realpage++;
2855
2856		page = realpage & chip->pagemask;
2857		/* Check, if we cross a chip boundary */
2858		if (!page) {
2859			chipnr++;
2860			chip->select_chip(mtd, -1);
2861			chip->select_chip(mtd, chipnr);
2862		}
2863	}
2864
2865	ops->retlen = ops->len - writelen;
2866	if (unlikely(oob))
2867		ops->oobretlen = ops->ooblen;
2868
2869err_out:
2870	chip->select_chip(mtd, -1);
2871	return ret;
2872}
2873
2874/**
2875 * panic_nand_write - [MTD Interface] NAND write with ECC
2876 * @mtd: MTD device structure
2877 * @to: offset to write to
2878 * @len: number of bytes to write
2879 * @retlen: pointer to variable to store the number of written bytes
2880 * @buf: the data to write
2881 *
2882 * NAND write with ECC. Used when performing writes in interrupt context, this
2883 * may for example be called by mtdoops when writing an oops while in panic.
2884 */
2885static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2886			    size_t *retlen, const uint8_t *buf)
2887{
2888	struct nand_chip *chip = mtd_to_nand(mtd);
2889	struct mtd_oob_ops ops;
2890	int ret;
2891
2892	/* Wait for the device to get ready */
2893	panic_nand_wait(mtd, chip, 400);
2894
2895	/* Grab the device */
2896	panic_nand_get_device(chip, mtd, FL_WRITING);
2897
2898	memset(&ops, 0, sizeof(ops));
2899	ops.len = len;
2900	ops.datbuf = (uint8_t *)buf;
2901	ops.mode = MTD_OPS_PLACE_OOB;
2902
2903	ret = nand_do_write_ops(mtd, to, &ops);
2904
2905	*retlen = ops.retlen;
2906	return ret;
2907}
2908
2909/**
2910 * nand_write - [MTD Interface] NAND write with ECC
2911 * @mtd: MTD device structure
2912 * @to: offset to write to
2913 * @len: number of bytes to write
2914 * @retlen: pointer to variable to store the number of written bytes
2915 * @buf: the data to write
2916 *
2917 * NAND write with ECC.
2918 */
2919static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2920			  size_t *retlen, const uint8_t *buf)
2921{
2922	struct mtd_oob_ops ops;
2923	int ret;
2924
2925	nand_get_device(mtd, FL_WRITING);
2926	memset(&ops, 0, sizeof(ops));
2927	ops.len = len;
2928	ops.datbuf = (uint8_t *)buf;
2929	ops.mode = MTD_OPS_PLACE_OOB;
2930	ret = nand_do_write_ops(mtd, to, &ops);
2931	*retlen = ops.retlen;
2932	nand_release_device(mtd);
2933	return ret;
2934}
2935
2936/**
2937 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
2938 * @mtd: MTD device structure
2939 * @to: offset to write to
2940 * @ops: oob operation description structure
2941 *
2942 * NAND write out-of-band.
2943 */
2944static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2945			     struct mtd_oob_ops *ops)
2946{
2947	int chipnr, page, status, len;
2948	struct nand_chip *chip = mtd_to_nand(mtd);
2949
2950	pr_debug("%s: to = 0x%08x, len = %i\n",
2951			 __func__, (unsigned int)to, (int)ops->ooblen);
2952
2953	len = mtd_oobavail(mtd, ops);
2954
2955	/* Do not allow write past end of page */
2956	if ((ops->ooboffs + ops->ooblen) > len) {
2957		pr_debug("%s: attempt to write past end of page\n",
2958				__func__);
2959		return -EINVAL;
2960	}
2961
2962	if (unlikely(ops->ooboffs >= len)) {
2963		pr_debug("%s: attempt to start write outside oob\n",
2964				__func__);
2965		return -EINVAL;
2966	}
2967
2968	/* Do not allow write past end of device */
2969	if (unlikely(to >= mtd->size ||
2970		     ops->ooboffs + ops->ooblen >
2971			((mtd->size >> chip->page_shift) -
2972			 (to >> chip->page_shift)) * len)) {
2973		pr_debug("%s: attempt to write beyond end of device\n",
2974				__func__);
2975		return -EINVAL;
2976	}
2977
2978	chipnr = (int)(to >> chip->chip_shift);
2979
2980	/*
2981	 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
2982	 * of my DiskOnChip 2000 test units) will clear the whole data page too
2983	 * if we don't do this. I have no clue why, but I seem to have 'fixed'
2984	 * it in the doc2000 driver in August 1999.  dwmw2.
2985	 */
2986	nand_reset(chip, chipnr);
2987
2988	chip->select_chip(mtd, chipnr);
2989
2990	/* Shift to get page */
2991	page = (int)(to >> chip->page_shift);
2992
2993	/* Check, if it is write protected */
2994	if (nand_check_wp(mtd)) {
2995		chip->select_chip(mtd, -1);
2996		return -EROFS;
2997	}
2998
2999	/* Invalidate the page cache, if we write to the cached page */
3000	if (page == chip->pagebuf)
3001		chip->pagebuf = -1;
3002
3003	nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
3004
3005	if (ops->mode == MTD_OPS_RAW)
3006		status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
3007	else
3008		status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
3009
3010	chip->select_chip(mtd, -1);
3011
3012	if (status)
3013		return status;
3014
3015	ops->oobretlen = ops->ooblen;
3016
3017	return 0;
3018}
3019
3020/**
3021 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
3022 * @mtd: MTD device structure
3023 * @to: offset to write to
3024 * @ops: oob operation description structure
3025 */
3026static int nand_write_oob(struct mtd_info *mtd, loff_t to,
3027			  struct mtd_oob_ops *ops)
3028{
3029	int ret = -ENOTSUPP;
3030
3031	ops->retlen = 0;
3032
3033	/* Do not allow writes past end of device */
3034	if (ops->datbuf && (to + ops->len) > mtd->size) {
3035		pr_debug("%s: attempt to write beyond end of device\n",
3036				__func__);
3037		return -EINVAL;
3038	}
3039
3040	nand_get_device(mtd, FL_WRITING);
3041
3042	switch (ops->mode) {
3043	case MTD_OPS_PLACE_OOB:
3044	case MTD_OPS_AUTO_OOB:
3045	case MTD_OPS_RAW:
3046		break;
3047
3048	default:
3049		goto out;
3050	}
3051
3052	if (!ops->datbuf)
3053		ret = nand_do_write_oob(mtd, to, ops);
3054	else
3055		ret = nand_do_write_ops(mtd, to, ops);
3056
3057out:
3058	nand_release_device(mtd);
3059	return ret;
3060}
3061
3062/**
3063 * single_erase - [GENERIC] NAND standard block erase command function
3064 * @mtd: MTD device structure
3065 * @page: the page address of the block which will be erased
3066 *
3067 * Standard erase command for NAND chips. Returns NAND status.
3068 */
3069static int single_erase(struct mtd_info *mtd, int page)
3070{
3071	struct nand_chip *chip = mtd_to_nand(mtd);
3072	/* Send commands to erase a block */
3073	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
3074	chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
3075
3076	return chip->waitfunc(mtd, chip);
3077}
3078
3079/**
3080 * nand_erase - [MTD Interface] erase block(s)
3081 * @mtd: MTD device structure
3082 * @instr: erase instruction
3083 *
3084 * Erase one ore more blocks.
3085 */
3086static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
3087{
3088	return nand_erase_nand(mtd, instr, 0);
3089}
3090
3091/**
3092 * nand_erase_nand - [INTERN] erase block(s)
3093 * @mtd: MTD device structure
3094 * @instr: erase instruction
3095 * @allowbbt: allow erasing the bbt area
3096 *
3097 * Erase one ore more blocks.
3098 */
3099int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
3100		    int allowbbt)
3101{
3102	int page, status, pages_per_block, ret, chipnr;
3103	struct nand_chip *chip = mtd_to_nand(mtd);
3104	loff_t len;
3105
3106	pr_debug("%s: start = 0x%012llx, len = %llu\n",
3107			__func__, (unsigned long long)instr->addr,
3108			(unsigned long long)instr->len);
3109
3110	if (check_offs_len(mtd, instr->addr, instr->len))
3111		return -EINVAL;
3112
3113	/* Grab the lock and see if the device is available */
3114	nand_get_device(mtd, FL_ERASING);
3115
3116	/* Shift to get first page */
3117	page = (int)(instr->addr >> chip->page_shift);
3118	chipnr = (int)(instr->addr >> chip->chip_shift);
3119
3120	/* Calculate pages in each block */
3121	pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3122
3123	/* Select the NAND device */
3124	chip->select_chip(mtd, chipnr);
3125
3126	/* Check, if it is write protected */
3127	if (nand_check_wp(mtd)) {
3128		pr_debug("%s: device is write protected!\n",
3129				__func__);
3130		instr->state = MTD_ERASE_FAILED;
3131		goto erase_exit;
3132	}
3133
3134	/* Loop through the pages */
3135	len = instr->len;
3136
3137	instr->state = MTD_ERASING;
3138
3139	while (len) {
3140		/* Check if we have a bad block, we do not erase bad blocks! */
3141		if (nand_block_checkbad(mtd, ((loff_t) page) <<
3142					chip->page_shift, allowbbt)) {
3143			pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
3144				    __func__, page);
3145			instr->state = MTD_ERASE_FAILED;
3146			goto erase_exit;
3147		}
3148
3149		/*
3150		 * Invalidate the page cache, if we erase the block which
3151		 * contains the current cached page.
3152		 */
3153		if (page <= chip->pagebuf && chip->pagebuf <
3154		    (page + pages_per_block))
3155			chip->pagebuf = -1;
3156
3157		status = chip->erase(mtd, page & chip->pagemask);
3158
3159		/*
3160		 * See if operation failed and additional status checks are
3161		 * available
3162		 */
3163		if ((status & NAND_STATUS_FAIL) && (chip->errstat))
3164			status = chip->errstat(mtd, chip, FL_ERASING,
3165					       status, page);
3166
3167		/* See if block erase succeeded */
3168		if (status & NAND_STATUS_FAIL) {
3169			pr_debug("%s: failed erase, page 0x%08x\n",
3170					__func__, page);
3171			instr->state = MTD_ERASE_FAILED;
3172			instr->fail_addr =
3173				((loff_t)page << chip->page_shift);
3174			goto erase_exit;
3175		}
3176
3177		/* Increment page address and decrement length */
3178		len -= (1ULL << chip->phys_erase_shift);
3179		page += pages_per_block;
3180
3181		/* Check, if we cross a chip boundary */
3182		if (len && !(page & chip->pagemask)) {
3183			chipnr++;
3184			chip->select_chip(mtd, -1);
3185			chip->select_chip(mtd, chipnr);
3186		}
3187	}
3188	instr->state = MTD_ERASE_DONE;
3189
3190erase_exit:
3191
3192	ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
3193
3194	/* Deselect and wake up anyone waiting on the device */
3195	chip->select_chip(mtd, -1);
3196	nand_release_device(mtd);
3197
3198	/* Do call back function */
3199	if (!ret)
3200		mtd_erase_callback(instr);
3201
3202	/* Return more or less happy */
3203	return ret;
3204}
3205
3206/**
3207 * nand_sync - [MTD Interface] sync
3208 * @mtd: MTD device structure
3209 *
3210 * Sync is actually a wait for chip ready function.
3211 */
3212static void nand_sync(struct mtd_info *mtd)
3213{
3214	pr_debug("%s: called\n", __func__);
3215
3216	/* Grab the lock and see if the device is available */
3217	nand_get_device(mtd, FL_SYNCING);
3218	/* Release it and go back */
3219	nand_release_device(mtd);
3220}
3221
3222/**
3223 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
3224 * @mtd: MTD device structure
3225 * @offs: offset relative to mtd start
3226 */
3227static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
3228{
3229	struct nand_chip *chip = mtd_to_nand(mtd);
3230	int chipnr = (int)(offs >> chip->chip_shift);
3231	int ret;
3232
3233	/* Select the NAND device */
3234	nand_get_device(mtd, FL_READING);
3235	chip->select_chip(mtd, chipnr);
3236
3237	ret = nand_block_checkbad(mtd, offs, 0);
3238
3239	chip->select_chip(mtd, -1);
3240	nand_release_device(mtd);
3241
3242	return ret;
3243}
3244
3245/**
3246 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
3247 * @mtd: MTD device structure
3248 * @ofs: offset relative to mtd start
3249 */
3250static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
3251{
3252	int ret;
3253
3254	ret = nand_block_isbad(mtd, ofs);
3255	if (ret) {
3256		/* If it was bad already, return success and do nothing */
3257		if (ret > 0)
3258			return 0;
3259		return ret;
3260	}
3261
3262	return nand_block_markbad_lowlevel(mtd, ofs);
3263}
3264
3265/**
3266 * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
3267 * @mtd: MTD device structure
3268 * @chip: nand chip info structure
3269 * @addr: feature address.
3270 * @subfeature_param: the subfeature parameters, a four bytes array.
3271 */
3272static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
3273			int addr, uint8_t *subfeature_param)
3274{
3275	int status;
3276	int i;
3277
3278	if (!chip->onfi_version ||
3279	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
3280	      & ONFI_OPT_CMD_SET_GET_FEATURES))
3281		return -EINVAL;
3282
3283	chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
3284	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3285		chip->write_byte(mtd, subfeature_param[i]);
3286
3287	status = chip->waitfunc(mtd, chip);
3288	if (status & NAND_STATUS_FAIL)
3289		return -EIO;
3290	return 0;
3291}
3292
3293/**
3294 * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
3295 * @mtd: MTD device structure
3296 * @chip: nand chip info structure
3297 * @addr: feature address.
3298 * @subfeature_param: the subfeature parameters, a four bytes array.
3299 */
3300static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
3301			int addr, uint8_t *subfeature_param)
3302{
3303	int i;
3304
3305	if (!chip->onfi_version ||
3306	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
3307	      & ONFI_OPT_CMD_SET_GET_FEATURES))
3308		return -EINVAL;
3309
3310	chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
3311	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3312		*subfeature_param++ = chip->read_byte(mtd);
3313	return 0;
3314}
3315
3316/**
3317 * nand_suspend - [MTD Interface] Suspend the NAND flash
3318 * @mtd: MTD device structure
3319 */
3320static int nand_suspend(struct mtd_info *mtd)
3321{
3322	return nand_get_device(mtd, FL_PM_SUSPENDED);
3323}
3324
3325/**
3326 * nand_resume - [MTD Interface] Resume the NAND flash
3327 * @mtd: MTD device structure
3328 */
3329static void nand_resume(struct mtd_info *mtd)
3330{
3331	struct nand_chip *chip = mtd_to_nand(mtd);
3332
3333	if (chip->state == FL_PM_SUSPENDED)
3334		nand_release_device(mtd);
3335	else
3336		pr_err("%s called for a chip which is not in suspended state\n",
3337			__func__);
3338}
3339
3340/**
3341 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
3342 *                 prevent further operations
3343 * @mtd: MTD device structure
3344 */
3345static void nand_shutdown(struct mtd_info *mtd)
3346{
3347	nand_get_device(mtd, FL_PM_SUSPENDED);
3348}
3349
3350/* Set default functions */
3351static void nand_set_defaults(struct nand_chip *chip, int busw)
3352{
3353	/* check for proper chip_delay setup, set 20us if not */
3354	if (!chip->chip_delay)
3355		chip->chip_delay = 20;
3356
3357	/* check, if a user supplied command function given */
3358	if (chip->cmdfunc == NULL)
3359		chip->cmdfunc = nand_command;
3360
3361	/* check, if a user supplied wait function given */
3362	if (chip->waitfunc == NULL)
3363		chip->waitfunc = nand_wait;
3364
3365	if (!chip->select_chip)
3366		chip->select_chip = nand_select_chip;
3367
3368	/* set for ONFI nand */
3369	if (!chip->onfi_set_features)
3370		chip->onfi_set_features = nand_onfi_set_features;
3371	if (!chip->onfi_get_features)
3372		chip->onfi_get_features = nand_onfi_get_features;
3373
3374	/* If called twice, pointers that depend on busw may need to be reset */
3375	if (!chip->read_byte || chip->read_byte == nand_read_byte)
3376		chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
3377	if (!chip->read_word)
3378		chip->read_word = nand_read_word;
3379	if (!chip->block_bad)
3380		chip->block_bad = nand_block_bad;
3381	if (!chip->block_markbad)
3382		chip->block_markbad = nand_default_block_markbad;
3383	if (!chip->write_buf || chip->write_buf == nand_write_buf)
3384		chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
3385	if (!chip->write_byte || chip->write_byte == nand_write_byte)
3386		chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
3387	if (!chip->read_buf || chip->read_buf == nand_read_buf)
3388		chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
3389	if (!chip->scan_bbt)
3390		chip->scan_bbt = nand_default_bbt;
3391
3392	if (!chip->controller) {
3393		chip->controller = &chip->hwcontrol;
3394		nand_hw_control_init(chip->controller);
3395	}
3396
3397}
3398
3399/* Sanitize ONFI strings so we can safely print them */
3400static void sanitize_string(uint8_t *s, size_t len)
3401{
3402	ssize_t i;
3403
3404	/* Null terminate */
3405	s[len - 1] = 0;
3406
3407	/* Remove non printable chars */
3408	for (i = 0; i < len - 1; i++) {
3409		if (s[i] < ' ' || s[i] > 127)
3410			s[i] = '?';
3411	}
3412
3413	/* Remove trailing spaces */
3414	strim(s);
3415}
3416
3417static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
3418{
3419	int i;
3420	while (len--) {
3421		crc ^= *p++ << 8;
3422		for (i = 0; i < 8; i++)
3423			crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
3424	}
3425
3426	return crc;
3427}
3428
3429/* Parse the Extended Parameter Page. */
3430static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
3431		struct nand_chip *chip, struct nand_onfi_params *p)
3432{
3433	struct onfi_ext_param_page *ep;
3434	struct onfi_ext_section *s;
3435	struct onfi_ext_ecc_info *ecc;
3436	uint8_t *cursor;
3437	int ret = -EINVAL;
3438	int len;
3439	int i;
3440
3441	len = le16_to_cpu(p->ext_param_page_length) * 16;
3442	ep = kmalloc(len, GFP_KERNEL);
3443	if (!ep)
3444		return -ENOMEM;
3445
3446	/* Send our own NAND_CMD_PARAM. */
3447	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3448
3449	/* Use the Change Read Column command to skip the ONFI param pages. */
3450	chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
3451			sizeof(*p) * p->num_of_param_pages , -1);
3452
3453	/* Read out the Extended Parameter Page. */
3454	chip->read_buf(mtd, (uint8_t *)ep, len);
3455	if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
3456		!= le16_to_cpu(ep->crc))) {
3457		pr_debug("fail in the CRC.\n");
3458		goto ext_out;
3459	}
3460
3461	/*
3462	 * Check the signature.
3463	 * Do not strictly follow the ONFI spec, maybe changed in future.
3464	 */
3465	if (strncmp(ep->sig, "EPPS", 4)) {
3466		pr_debug("The signature is invalid.\n");
3467		goto ext_out;
3468	}
3469
3470	/* find the ECC section. */
3471	cursor = (uint8_t *)(ep + 1);
3472	for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
3473		s = ep->sections + i;
3474		if (s->type == ONFI_SECTION_TYPE_2)
3475			break;
3476		cursor += s->length * 16;
3477	}
3478	if (i == ONFI_EXT_SECTION_MAX) {
3479		pr_debug("We can not find the ECC section.\n");
3480		goto ext_out;
3481	}
3482
3483	/* get the info we want. */
3484	ecc = (struct onfi_ext_ecc_info *)cursor;
3485
3486	if (!ecc->codeword_size) {
3487		pr_debug("Invalid codeword size\n");
3488		goto ext_out;
3489	}
3490
3491	chip->ecc_strength_ds = ecc->ecc_bits;
3492	chip->ecc_step_ds = 1 << ecc->codeword_size;
3493	ret = 0;
3494
3495ext_out:
3496	kfree(ep);
3497	return ret;
3498}
3499
3500static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
3501{
3502	struct nand_chip *chip = mtd_to_nand(mtd);
3503	uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
3504
3505	return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
3506			feature);
3507}
3508
3509/*
3510 * Configure chip properties from Micron vendor-specific ONFI table
3511 */
3512static void nand_onfi_detect_micron(struct nand_chip *chip,
3513		struct nand_onfi_params *p)
3514{
3515	struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
3516
3517	if (le16_to_cpu(p->vendor_revision) < 1)
3518		return;
3519
3520	chip->read_retries = micron->read_retry_options;
3521	chip->setup_read_retry = nand_setup_read_retry_micron;
3522}
3523
3524/*
3525 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
3526 */
3527static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
3528					int *busw)
3529{
3530	struct nand_onfi_params *p = &chip->onfi_params;
3531	int i, j;
3532	int val;
3533
3534	/* Try ONFI for unknown chip or LP */
3535	chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
3536	if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
3537		chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
3538		return 0;
3539
3540	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3541	for (i = 0; i < 3; i++) {
3542		for (j = 0; j < sizeof(*p); j++)
3543			((uint8_t *)p)[j] = chip->read_byte(mtd);
3544		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
3545				le16_to_cpu(p->crc)) {
3546			break;
3547		}
3548	}
3549
3550	if (i == 3) {
3551		pr_err("Could not find valid ONFI parameter page; aborting\n");
3552		return 0;
3553	}
3554
3555	/* Check version */
3556	val = le16_to_cpu(p->revision);
3557	if (val & (1 << 5))
3558		chip->onfi_version = 23;
3559	else if (val & (1 << 4))
3560		chip->onfi_version = 22;
3561	else if (val & (1 << 3))
3562		chip->onfi_version = 21;
3563	else if (val & (1 << 2))
3564		chip->onfi_version = 20;
3565	else if (val & (1 << 1))
3566		chip->onfi_version = 10;
3567
3568	if (!chip->onfi_version) {
3569		pr_info("unsupported ONFI version: %d\n", val);
3570		return 0;
3571	}
3572
3573	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3574	sanitize_string(p->model, sizeof(p->model));
3575	if (!mtd->name)
3576		mtd->name = p->model;
3577
3578	mtd->writesize = le32_to_cpu(p->byte_per_page);
3579
3580	/*
3581	 * pages_per_block and blocks_per_lun may not be a power-of-2 size
3582	 * (don't ask me who thought of this...). MTD assumes that these
3583	 * dimensions will be power-of-2, so just truncate the remaining area.
3584	 */
3585	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3586	mtd->erasesize *= mtd->writesize;
3587
3588	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3589
3590	/* See erasesize comment */
3591	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3592	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3593	chip->bits_per_cell = p->bits_per_cell;
3594
3595	if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
3596		*busw = NAND_BUSWIDTH_16;
3597	else
3598		*busw = 0;
3599
3600	if (p->ecc_bits != 0xff) {
3601		chip->ecc_strength_ds = p->ecc_bits;
3602		chip->ecc_step_ds = 512;
3603	} else if (chip->onfi_version >= 21 &&
3604		(onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
3605
3606		/*
3607		 * The nand_flash_detect_ext_param_page() uses the
3608		 * Change Read Column command which maybe not supported
3609		 * by the chip->cmdfunc. So try to update the chip->cmdfunc
3610		 * now. We do not replace user supplied command function.
3611		 */
3612		if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3613			chip->cmdfunc = nand_command_lp;
3614
3615		/* The Extended Parameter Page is supported since ONFI 2.1. */
3616		if (nand_flash_detect_ext_param_page(mtd, chip, p))
3617			pr_warn("Failed to detect ONFI extended param page\n");
3618	} else {
3619		pr_warn("Could not retrieve ONFI ECC requirements\n");
3620	}
3621
3622	if (p->jedec_id == NAND_MFR_MICRON)
3623		nand_onfi_detect_micron(chip, p);
3624
3625	return 1;
3626}
3627
3628/*
3629 * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
3630 */
3631static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip,
3632					int *busw)
3633{
3634	struct nand_jedec_params *p = &chip->jedec_params;
3635	struct jedec_ecc_info *ecc;
3636	int val;
3637	int i, j;
3638
3639	/* Try JEDEC for unknown chip or LP */
3640	chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
3641	if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
3642		chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
3643		chip->read_byte(mtd) != 'C')
3644		return 0;
3645
3646	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
3647	for (i = 0; i < 3; i++) {
3648		for (j = 0; j < sizeof(*p); j++)
3649			((uint8_t *)p)[j] = chip->read_byte(mtd);
3650
3651		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
3652				le16_to_cpu(p->crc))
3653			break;
3654	}
3655
3656	if (i == 3) {
3657		pr_err("Could not find valid JEDEC parameter page; aborting\n");
3658		return 0;
3659	}
3660
3661	/* Check version */
3662	val = le16_to_cpu(p->revision);
3663	if (val & (1 << 2))
3664		chip->jedec_version = 10;
3665	else if (val & (1 << 1))
3666		chip->jedec_version = 1; /* vendor specific version */
3667
3668	if (!chip->jedec_version) {
3669		pr_info("unsupported JEDEC version: %d\n", val);
3670		return 0;
3671	}
3672
3673	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3674	sanitize_string(p->model, sizeof(p->model));
3675	if (!mtd->name)
3676		mtd->name = p->model;
3677
3678	mtd->writesize = le32_to_cpu(p->byte_per_page);
3679
3680	/* Please reference to the comment for nand_flash_detect_onfi. */
3681	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3682	mtd->erasesize *= mtd->writesize;
3683
3684	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3685
3686	/* Please reference to the comment for nand_flash_detect_onfi. */
3687	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3688	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3689	chip->bits_per_cell = p->bits_per_cell;
3690
3691	if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
3692		*busw = NAND_BUSWIDTH_16;
3693	else
3694		*busw = 0;
3695
3696	/* ECC info */
3697	ecc = &p->ecc_info[0];
3698
3699	if (ecc->codeword_size >= 9) {
3700		chip->ecc_strength_ds = ecc->ecc_bits;
3701		chip->ecc_step_ds = 1 << ecc->codeword_size;
3702	} else {
3703		pr_warn("Invalid codeword size\n");
3704	}
3705
3706	return 1;
3707}
3708
3709/*
3710 * nand_id_has_period - Check if an ID string has a given wraparound period
3711 * @id_data: the ID string
3712 * @arrlen: the length of the @id_data array
3713 * @period: the period of repitition
3714 *
3715 * Check if an ID string is repeated within a given sequence of bytes at
3716 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
3717 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
3718 * if the repetition has a period of @period; otherwise, returns zero.
3719 */
3720static int nand_id_has_period(u8 *id_data, int arrlen, int period)
3721{
3722	int i, j;
3723	for (i = 0; i < period; i++)
3724		for (j = i + period; j < arrlen; j += period)
3725			if (id_data[i] != id_data[j])
3726				return 0;
3727	return 1;
3728}
3729
3730/*
3731 * nand_id_len - Get the length of an ID string returned by CMD_READID
3732 * @id_data: the ID string
3733 * @arrlen: the length of the @id_data array
3734
3735 * Returns the length of the ID string, according to known wraparound/trailing
3736 * zero patterns. If no pattern exists, returns the length of the array.
3737 */
3738static int nand_id_len(u8 *id_data, int arrlen)
3739{
3740	int last_nonzero, period;
3741
3742	/* Find last non-zero byte */
3743	for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
3744		if (id_data[last_nonzero])
3745			break;
3746
3747	/* All zeros */
3748	if (last_nonzero < 0)
3749		return 0;
3750
3751	/* Calculate wraparound period */
3752	for (period = 1; period < arrlen; period++)
3753		if (nand_id_has_period(id_data, arrlen, period))
3754			break;
3755
3756	/* There's a repeated pattern */
3757	if (period < arrlen)
3758		return period;
3759
3760	/* There are trailing zeros */
3761	if (last_nonzero < arrlen - 1)
3762		return last_nonzero + 1;
3763
3764	/* No pattern detected */
3765	return arrlen;
3766}
3767
3768/* Extract the bits of per cell from the 3rd byte of the extended ID */
3769static int nand_get_bits_per_cell(u8 cellinfo)
3770{
3771	int bits;
3772
3773	bits = cellinfo & NAND_CI_CELLTYPE_MSK;
3774	bits >>= NAND_CI_CELLTYPE_SHIFT;
3775	return bits + 1;
3776}
3777
3778/*
3779 * Many new NAND share similar device ID codes, which represent the size of the
3780 * chip. The rest of the parameters must be decoded according to generic or
3781 * manufacturer-specific "extended ID" decoding patterns.
3782 */
3783static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
3784				u8 id_data[8], int *busw)
3785{
3786	int extid, id_len;
3787	/* The 3rd id byte holds MLC / multichip data */
3788	chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3789	/* The 4th id byte is the important one */
3790	extid = id_data[3];
3791
3792	id_len = nand_id_len(id_data, 8);
3793
3794	/*
3795	 * Field definitions are in the following datasheets:
3796	 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
3797	 * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
3798	 * Hynix MLC   (6 byte ID): Hynix H27UBG8T2B (p.22)
3799	 *
3800	 * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
3801	 * ID to decide what to do.
3802	 */
3803	if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
3804			!nand_is_slc(chip) && id_data[5] != 0x00) {
3805		/* Calc pagesize */
3806		mtd->writesize = 2048 << (extid & 0x03);
3807		extid >>= 2;
3808		/* Calc oobsize */
3809		switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
3810		case 1:
3811			mtd->oobsize = 128;
3812			break;
3813		case 2:
3814			mtd->oobsize = 218;
3815			break;
3816		case 3:
3817			mtd->oobsize = 400;
3818			break;
3819		case 4:
3820			mtd->oobsize = 436;
3821			break;
3822		case 5:
3823			mtd->oobsize = 512;
3824			break;
3825		case 6:
3826			mtd->oobsize = 640;
3827			break;
3828		case 7:
3829		default: /* Other cases are "reserved" (unknown) */
3830			mtd->oobsize = 1024;
3831			break;
3832		}
3833		extid >>= 2;
3834		/* Calc blocksize */
3835		mtd->erasesize = (128 * 1024) <<
3836			(((extid >> 1) & 0x04) | (extid & 0x03));
3837		*busw = 0;
3838	} else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
3839			!nand_is_slc(chip)) {
3840		unsigned int tmp;
3841
3842		/* Calc pagesize */
3843		mtd->writesize = 2048 << (extid & 0x03);
3844		extid >>= 2;
3845		/* Calc oobsize */
3846		switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
3847		case 0:
3848			mtd->oobsize = 128;
3849			break;
3850		case 1:
3851			mtd->oobsize = 224;
3852			break;
3853		case 2:
3854			mtd->oobsize = 448;
3855			break;
3856		case 3:
3857			mtd->oobsize = 64;
3858			break;
3859		case 4:
3860			mtd->oobsize = 32;
3861			break;
3862		case 5:
3863			mtd->oobsize = 16;
3864			break;
3865		default:
3866			mtd->oobsize = 640;
3867			break;
3868		}
3869		extid >>= 2;
3870		/* Calc blocksize */
3871		tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
3872		if (tmp < 0x03)
3873			mtd->erasesize = (128 * 1024) << tmp;
3874		else if (tmp == 0x03)
3875			mtd->erasesize = 768 * 1024;
3876		else
3877			mtd->erasesize = (64 * 1024) << tmp;
3878		*busw = 0;
3879	} else {
3880		/* Calc pagesize */
3881		mtd->writesize = 1024 << (extid & 0x03);
3882		extid >>= 2;
3883		/* Calc oobsize */
3884		mtd->oobsize = (8 << (extid & 0x01)) *
3885			(mtd->writesize >> 9);
3886		extid >>= 2;
3887		/* Calc blocksize. Blocksize is multiples of 64KiB */
3888		mtd->erasesize = (64 * 1024) << (extid & 0x03);
3889		extid >>= 2;
3890		/* Get buswidth information */
3891		*busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
3892
3893		/*
3894		 * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
3895		 * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
3896		 * follows:
3897		 * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
3898		 *                         110b -> 24nm
3899		 * - ID byte 5, bit[7]:    1 -> BENAND, 0 -> raw SLC
3900		 */
3901		if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
3902				nand_is_slc(chip) &&
3903				(id_data[5] & 0x7) == 0x6 /* 24nm */ &&
3904				!(id_data[4] & 0x80) /* !BENAND */) {
3905			mtd->oobsize = 32 * mtd->writesize >> 9;
3906		}
3907
3908	}
3909}
3910
3911/*
3912 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
3913 * decodes a matching ID table entry and assigns the MTD size parameters for
3914 * the chip.
3915 */
3916static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
3917				struct nand_flash_dev *type, u8 id_data[8],
3918				int *busw)
3919{
3920	int maf_id = id_data[0];
3921
3922	mtd->erasesize = type->erasesize;
3923	mtd->writesize = type->pagesize;
3924	mtd->oobsize = mtd->writesize / 32;
3925	*busw = type->options & NAND_BUSWIDTH_16;
3926
3927	/* All legacy ID NAND are small-page, SLC */
3928	chip->bits_per_cell = 1;
3929
3930	/*
3931	 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
3932	 * some Spansion chips have erasesize that conflicts with size
3933	 * listed in nand_ids table.
3934	 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
3935	 */
3936	if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
3937			&& id_data[6] == 0x00 && id_data[7] == 0x00
3938			&& mtd->writesize == 512) {
3939		mtd->erasesize = 128 * 1024;
3940		mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
3941	}
3942}
3943
3944/*
3945 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
3946 * heuristic patterns using various detected parameters (e.g., manufacturer,
3947 * page size, cell-type information).
3948 */
3949static void nand_decode_bbm_options(struct mtd_info *mtd,
3950				    struct nand_chip *chip, u8 id_data[8])
3951{
3952	int maf_id = id_data[0];
3953
3954	/* Set the bad block position */
3955	if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
3956		chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3957	else
3958		chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3959
3960	/*
3961	 * Bad block marker is stored in the last page of each block on Samsung
3962	 * and Hynix MLC devices; stored in first two pages of each block on
3963	 * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
3964	 * AMD/Spansion, and Macronix.  All others scan only the first page.
3965	 */
3966	if (!nand_is_slc(chip) &&
3967			(maf_id == NAND_MFR_SAMSUNG ||
3968			 maf_id == NAND_MFR_HYNIX))
3969		chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
3970	else if ((nand_is_slc(chip) &&
3971				(maf_id == NAND_MFR_SAMSUNG ||
3972				 maf_id == NAND_MFR_HYNIX ||
3973				 maf_id == NAND_MFR_TOSHIBA ||
3974				 maf_id == NAND_MFR_AMD ||
3975				 maf_id == NAND_MFR_MACRONIX)) ||
3976			(mtd->writesize == 2048 &&
3977			 maf_id == NAND_MFR_MICRON))
3978		chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
3979}
3980
3981static inline bool is_full_id_nand(struct nand_flash_dev *type)
3982{
3983	return type->id_len;
3984}
3985
3986static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
3987		   struct nand_flash_dev *type, u8 *id_data, int *busw)
3988{
3989	if (!strncmp(type->id, id_data, type->id_len)) {
3990		mtd->writesize = type->pagesize;
3991		mtd->erasesize = type->erasesize;
3992		mtd->oobsize = type->oobsize;
3993
3994		chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3995		chip->chipsize = (uint64_t)type->chipsize << 20;
3996		chip->options |= type->options;
3997		chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
3998		chip->ecc_step_ds = NAND_ECC_STEP(type);
3999		chip->onfi_timing_mode_default =
4000					type->onfi_timing_mode_default;
4001
4002		*busw = type->options & NAND_BUSWIDTH_16;
4003
4004		if (!mtd->name)
4005			mtd->name = type->name;
4006
4007		return true;
4008	}
4009	return false;
4010}
4011
4012/*
4013 * Get the flash and manufacturer id and lookup if the type is supported.
4014 */
4015static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip,
4016			       int *maf_id, int *dev_id,
4017			       struct nand_flash_dev *type)
4018{
4019	int busw;
4020	int i, maf_idx;
4021	u8 id_data[8];
4022
4023	/*
4024	 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4025	 * after power-up.
4026	 */
4027	nand_reset(chip, 0);
4028
4029	/* Select the device */
4030	chip->select_chip(mtd, 0);
4031
4032	/* Send the command for reading device ID */
4033	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4034
4035	/* Read manufacturer and device IDs */
4036	*maf_id = chip->read_byte(mtd);
4037	*dev_id = chip->read_byte(mtd);
4038
4039	/*
4040	 * Try again to make sure, as some systems the bus-hold or other
4041	 * interface concerns can cause random data which looks like a
4042	 * possibly credible NAND flash to appear. If the two results do
4043	 * not match, ignore the device completely.
4044	 */
4045
4046	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4047
4048	/* Read entire ID string */
4049	for (i = 0; i < 8; i++)
4050		id_data[i] = chip->read_byte(mtd);
4051
4052	if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
4053		pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4054			*maf_id, *dev_id, id_data[0], id_data[1]);
4055		return -ENODEV;
4056	}
4057
4058	if (!type)
4059		type = nand_flash_ids;
4060
4061	for (; type->name != NULL; type++) {
4062		if (is_full_id_nand(type)) {
4063			if (find_full_id_nand(mtd, chip, type, id_data, &busw))
4064				goto ident_done;
4065		} else if (*dev_id == type->dev_id) {
4066			break;
4067		}
4068	}
4069
4070	chip->onfi_version = 0;
4071	if (!type->name || !type->pagesize) {
4072		/* Check if the chip is ONFI compliant */
4073		if (nand_flash_detect_onfi(mtd, chip, &busw))
4074			goto ident_done;
4075
4076		/* Check if the chip is JEDEC compliant */
4077		if (nand_flash_detect_jedec(mtd, chip, &busw))
4078			goto ident_done;
4079	}
4080
4081	if (!type->name)
4082		return -ENODEV;
4083
4084	if (!mtd->name)
4085		mtd->name = type->name;
4086
4087	chip->chipsize = (uint64_t)type->chipsize << 20;
4088
4089	if (!type->pagesize) {
4090		/* Decode parameters from extended ID */
4091		nand_decode_ext_id(mtd, chip, id_data, &busw);
4092	} else {
4093		nand_decode_id(mtd, chip, type, id_data, &busw);
4094	}
4095	/* Get chip options */
4096	chip->options |= type->options;
4097
4098	/*
4099	 * Check if chip is not a Samsung device. Do not clear the
4100	 * options for chips which do not have an extended id.
4101	 */
4102	if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
4103		chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
4104ident_done:
4105
4106	/* Try to identify manufacturer */
4107	for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
4108		if (nand_manuf_ids[maf_idx].id == *maf_id)
4109			break;
4110	}
4111
4112	if (chip->options & NAND_BUSWIDTH_AUTO) {
4113		WARN_ON(chip->options & NAND_BUSWIDTH_16);
4114		chip->options |= busw;
4115		nand_set_defaults(chip, busw);
4116	} else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4117		/*
4118		 * Check, if buswidth is correct. Hardware drivers should set
4119		 * chip correct!
4120		 */
4121		pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4122			*maf_id, *dev_id);
4123		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name);
4124		pr_warn("bus width %d instead %d bit\n",
4125			   (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
4126			   busw ? 16 : 8);
4127		return -EINVAL;
4128	}
4129
4130	nand_decode_bbm_options(mtd, chip, id_data);
4131
4132	/* Calculate the address shift from the page size */
4133	chip->page_shift = ffs(mtd->writesize) - 1;
4134	/* Convert chipsize to number of pages per chip -1 */
4135	chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
4136
4137	chip->bbt_erase_shift = chip->phys_erase_shift =
4138		ffs(mtd->erasesize) - 1;
4139	if (chip->chipsize & 0xffffffff)
4140		chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
4141	else {
4142		chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
4143		chip->chip_shift += 32 - 1;
4144	}
4145
4146	chip->badblockbits = 8;
4147	chip->erase = single_erase;
4148
4149	/* Do not replace user supplied command function! */
4150	if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
4151		chip->cmdfunc = nand_command_lp;
4152
4153	pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4154		*maf_id, *dev_id);
4155
4156	if (chip->onfi_version)
4157		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4158				chip->onfi_params.model);
4159	else if (chip->jedec_version)
4160		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4161				chip->jedec_params.model);
4162	else
4163		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4164				type->name);
4165
4166	pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4167		(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4168		mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4169	return 0;
4170}
4171
4172static const char * const nand_ecc_modes[] = {
4173	[NAND_ECC_NONE]		= "none",
4174	[NAND_ECC_SOFT]		= "soft",
4175	[NAND_ECC_HW]		= "hw",
4176	[NAND_ECC_HW_SYNDROME]	= "hw_syndrome",
4177	[NAND_ECC_HW_OOB_FIRST]	= "hw_oob_first",
4178};
4179
4180static int of_get_nand_ecc_mode(struct device_node *np)
4181{
4182	const char *pm;
4183	int err, i;
4184
4185	err = of_property_read_string(np, "nand-ecc-mode", &pm);
4186	if (err < 0)
4187		return err;
4188
4189	for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4190		if (!strcasecmp(pm, nand_ecc_modes[i]))
4191			return i;
4192
4193	/*
4194	 * For backward compatibility we support few obsoleted values that don't
4195	 * have their mappings into nand_ecc_modes_t anymore (they were merged
4196	 * with other enums).
4197	 */
4198	if (!strcasecmp(pm, "soft_bch"))
4199		return NAND_ECC_SOFT;
4200
4201	return -ENODEV;
4202}
4203
4204static const char * const nand_ecc_algos[] = {
4205	[NAND_ECC_HAMMING]	= "hamming",
4206	[NAND_ECC_BCH]		= "bch",
4207};
4208
4209static int of_get_nand_ecc_algo(struct device_node *np)
4210{
4211	const char *pm;
4212	int err, i;
4213
4214	err = of_property_read_string(np, "nand-ecc-algo", &pm);
4215	if (!err) {
4216		for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4217			if (!strcasecmp(pm, nand_ecc_algos[i]))
4218				return i;
4219		return -ENODEV;
4220	}
4221
4222	/*
4223	 * For backward compatibility we also read "nand-ecc-mode" checking
4224	 * for some obsoleted values that were specifying ECC algorithm.
4225	 */
4226	err = of_property_read_string(np, "nand-ecc-mode", &pm);
4227	if (err < 0)
4228		return err;
4229
4230	if (!strcasecmp(pm, "soft"))
4231		return NAND_ECC_HAMMING;
4232	else if (!strcasecmp(pm, "soft_bch"))
4233		return NAND_ECC_BCH;
4234
4235	return -ENODEV;
4236}
4237
4238static int of_get_nand_ecc_step_size(struct device_node *np)
4239{
4240	int ret;
4241	u32 val;
4242
4243	ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4244	return ret ? ret : val;
4245}
4246
4247static int of_get_nand_ecc_strength(struct device_node *np)
4248{
4249	int ret;
4250	u32 val;
4251
4252	ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4253	return ret ? ret : val;
4254}
4255
4256static int of_get_nand_bus_width(struct device_node *np)
4257{
4258	u32 val;
4259
4260	if (of_property_read_u32(np, "nand-bus-width", &val))
4261		return 8;
4262
4263	switch (val) {
4264	case 8:
4265	case 16:
4266		return val;
4267	default:
4268		return -EIO;
4269	}
4270}
4271
4272static bool of_get_nand_on_flash_bbt(struct device_node *np)
4273{
4274	return of_property_read_bool(np, "nand-on-flash-bbt");
4275}
4276
4277static int nand_dt_init(struct nand_chip *chip)
4278{
4279	struct device_node *dn = nand_get_flash_node(chip);
4280	int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4281
4282	if (!dn)
4283		return 0;
4284
4285	if (of_get_nand_bus_width(dn) == 16)
4286		chip->options |= NAND_BUSWIDTH_16;
4287
4288	if (of_get_nand_on_flash_bbt(dn))
4289		chip->bbt_options |= NAND_BBT_USE_FLASH;
4290
4291	ecc_mode = of_get_nand_ecc_mode(dn);
4292	ecc_algo = of_get_nand_ecc_algo(dn);
4293	ecc_strength = of_get_nand_ecc_strength(dn);
4294	ecc_step = of_get_nand_ecc_step_size(dn);
4295
4296	if ((ecc_step >= 0 && !(ecc_strength >= 0)) ||
4297	    (!(ecc_step >= 0) && ecc_strength >= 0)) {
4298		pr_err("must set both strength and step size in DT\n");
4299		return -EINVAL;
4300	}
4301
4302	if (ecc_mode >= 0)
4303		chip->ecc.mode = ecc_mode;
4304
4305	if (ecc_algo >= 0)
4306		chip->ecc.algo = ecc_algo;
4307
4308	if (ecc_strength >= 0)
4309		chip->ecc.strength = ecc_strength;
4310
4311	if (ecc_step > 0)
4312		chip->ecc.size = ecc_step;
4313
4314	if (of_property_read_bool(dn, "nand-ecc-maximize"))
4315		chip->ecc.options |= NAND_ECC_MAXIMIZE;
4316
4317	return 0;
4318}
4319
4320/**
4321 * nand_scan_ident - [NAND Interface] Scan for the NAND device
4322 * @mtd: MTD device structure
4323 * @maxchips: number of chips to scan for
4324 * @table: alternative NAND ID table
4325 *
4326 * This is the first phase of the normal nand_scan() function. It reads the
4327 * flash ID and sets up MTD fields accordingly.
4328 *
4329 */
4330int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4331		    struct nand_flash_dev *table)
4332{
4333	int i, nand_maf_id, nand_dev_id;
4334	struct nand_chip *chip = mtd_to_nand(mtd);
4335	int ret;
4336
4337	ret = nand_dt_init(chip);
4338	if (ret)
4339		return ret;
4340
4341	if (!mtd->name && mtd->dev.parent)
4342		mtd->name = dev_name(mtd->dev.parent);
4343
4344	if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
4345		/*
4346		 * Default functions assigned for chip_select() and
4347		 * cmdfunc() both expect cmd_ctrl() to be populated,
4348		 * so we need to check that that's the case
4349		 */
4350		pr_err("chip.cmd_ctrl() callback is not provided");
4351		return -EINVAL;
4352	}
4353	/* Set the default functions */
4354	nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
4355
4356	/* Read the flash type */
4357	ret = nand_get_flash_type(mtd, chip, &nand_maf_id, &nand_dev_id, table);
4358	if (ret) {
4359		if (!(chip->options & NAND_SCAN_SILENT_NODEV))
4360			pr_warn("No NAND device found\n");
4361		chip->select_chip(mtd, -1);
4362		return ret;
4363	}
4364
4365	/* Initialize the ->data_interface field. */
4366	ret = nand_init_data_interface(chip);
4367	if (ret)
4368		return ret;
4369
4370	/*
4371	 * Setup the data interface correctly on the chip and controller side.
4372	 * This explicit call to nand_setup_data_interface() is only required
4373	 * for the first die, because nand_reset() has been called before
4374	 * ->data_interface and ->default_onfi_timing_mode were set.
4375	 * For the other dies, nand_reset() will automatically switch to the
4376	 * best mode for us.
4377	 */
4378	ret = nand_setup_data_interface(chip);
4379	if (ret)
4380		return ret;
4381
4382	chip->select_chip(mtd, -1);
4383
4384	/* Check for a chip array */
4385	for (i = 1; i < maxchips; i++) {
4386		/* See comment in nand_get_flash_type for reset */
4387		nand_reset(chip, i);
4388
4389		chip->select_chip(mtd, i);
4390		/* Send the command for reading device ID */
4391		chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4392		/* Read manufacturer and device IDs */
4393		if (nand_maf_id != chip->read_byte(mtd) ||
4394		    nand_dev_id != chip->read_byte(mtd)) {
4395			chip->select_chip(mtd, -1);
4396			break;
4397		}
4398		chip->select_chip(mtd, -1);
4399	}
4400	if (i > 1)
4401		pr_info("%d chips detected\n", i);
4402
4403	/* Store the number of chips and calc total size for mtd */
4404	chip->numchips = i;
4405	mtd->size = i * chip->chipsize;
4406
4407	return 0;
4408}
4409EXPORT_SYMBOL(nand_scan_ident);
4410
4411static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
4412{
4413	struct nand_chip *chip = mtd_to_nand(mtd);
4414	struct nand_ecc_ctrl *ecc = &chip->ecc;
4415
4416	if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
4417		return -EINVAL;
4418
4419	switch (ecc->algo) {
4420	case NAND_ECC_HAMMING:
4421		ecc->calculate = nand_calculate_ecc;
4422		ecc->correct = nand_correct_data;
4423		ecc->read_page = nand_read_page_swecc;
4424		ecc->read_subpage = nand_read_subpage;
4425		ecc->write_page = nand_write_page_swecc;
4426		ecc->read_page_raw = nand_read_page_raw;
4427		ecc->write_page_raw = nand_write_page_raw;
4428		ecc->read_oob = nand_read_oob_std;
4429		ecc->write_oob = nand_write_oob_std;
4430		if (!ecc->size)
4431			ecc->size = 256;
4432		ecc->bytes = 3;
4433		ecc->strength = 1;
4434		return 0;
4435	case NAND_ECC_BCH:
4436		if (!mtd_nand_has_bch()) {
4437			WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
4438			return -EINVAL;
4439		}
4440		ecc->calculate = nand_bch_calculate_ecc;
4441		ecc->correct = nand_bch_correct_data;
4442		ecc->read_page = nand_read_page_swecc;
4443		ecc->read_subpage = nand_read_subpage;
4444		ecc->write_page = nand_write_page_swecc;
4445		ecc->read_page_raw = nand_read_page_raw;
4446		ecc->write_page_raw = nand_write_page_raw;
4447		ecc->read_oob = nand_read_oob_std;
4448		ecc->write_oob = nand_write_oob_std;
4449
4450		/*
4451		* Board driver should supply ecc.size and ecc.strength
4452		* values to select how many bits are correctable.
4453		* Otherwise, default to 4 bits for large page devices.
4454		*/
4455		if (!ecc->size && (mtd->oobsize >= 64)) {
4456			ecc->size = 512;
4457			ecc->strength = 4;
4458		}
4459
4460		/*
4461		 * if no ecc placement scheme was provided pickup the default
4462		 * large page one.
4463		 */
4464		if (!mtd->ooblayout) {
4465			/* handle large page devices only */
4466			if (mtd->oobsize < 64) {
4467				WARN(1, "OOB layout is required when using software BCH on small pages\n");
4468				return -EINVAL;
4469			}
4470
4471			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
4472
4473		}
4474
4475		/*
4476		 * We can only maximize ECC config when the default layout is
4477		 * used, otherwise we don't know how many bytes can really be
4478		 * used.
4479		 */
4480		if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
4481		    ecc->options & NAND_ECC_MAXIMIZE) {
4482			int steps, bytes;
4483
4484			/* Always prefer 1k blocks over 512bytes ones */
4485			ecc->size = 1024;
4486			steps = mtd->writesize / ecc->size;
4487
4488			/* Reserve 2 bytes for the BBM */
4489			bytes = (mtd->oobsize - 2) / steps;
4490			ecc->strength = bytes * 8 / fls(8 * ecc->size);
4491		}
4492
4493		/* See nand_bch_init() for details. */
4494		ecc->bytes = 0;
4495		ecc->priv = nand_bch_init(mtd);
4496		if (!ecc->priv) {
4497			WARN(1, "BCH ECC initialization failed!\n");
4498			return -EINVAL;
4499		}
4500		return 0;
4501	default:
4502		WARN(1, "Unsupported ECC algorithm!\n");
4503		return -EINVAL;
4504	}
4505}
4506
4507/*
4508 * Check if the chip configuration meet the datasheet requirements.
4509
4510 * If our configuration corrects A bits per B bytes and the minimum
4511 * required correction level is X bits per Y bytes, then we must ensure
4512 * both of the following are true:
4513 *
4514 * (1) A / B >= X / Y
4515 * (2) A >= X
4516 *
4517 * Requirement (1) ensures we can correct for the required bitflip density.
4518 * Requirement (2) ensures we can correct even when all bitflips are clumped
4519 * in the same sector.
4520 */
4521static bool nand_ecc_strength_good(struct mtd_info *mtd)
4522{
4523	struct nand_chip *chip = mtd_to_nand(mtd);
4524	struct nand_ecc_ctrl *ecc = &chip->ecc;
4525	int corr, ds_corr;
4526
4527	if (ecc->size == 0 || chip->ecc_step_ds == 0)
4528		/* Not enough information */
4529		return true;
4530
4531	/*
4532	 * We get the number of corrected bits per page to compare
4533	 * the correction density.
4534	 */
4535	corr = (mtd->writesize * ecc->strength) / ecc->size;
4536	ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
4537
4538	return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
4539}
4540
4541static bool invalid_ecc_page_accessors(struct nand_chip *chip)
4542{
4543	struct nand_ecc_ctrl *ecc = &chip->ecc;
4544
4545	if (nand_standard_page_accessors(ecc))
4546		return false;
4547
4548	/*
4549	 * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
4550	 * controller driver implements all the page accessors because
4551	 * default helpers are not suitable when the core does not
4552	 * send the READ0/PAGEPROG commands.
4553	 */
4554	return (!ecc->read_page || !ecc->write_page ||
4555		!ecc->read_page_raw || !ecc->write_page_raw ||
4556		(NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
4557		(NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
4558		 ecc->hwctl && ecc->calculate));
4559}
4560
4561/**
4562 * nand_scan_tail - [NAND Interface] Scan for the NAND device
4563 * @mtd: MTD device structure
4564 *
4565 * This is the second phase of the normal nand_scan() function. It fills out
4566 * all the uninitialized function pointers with the defaults and scans for a
4567 * bad block table if appropriate.
4568 */
4569int nand_scan_tail(struct mtd_info *mtd)
4570{
4571	struct nand_chip *chip = mtd_to_nand(mtd);
4572	struct nand_ecc_ctrl *ecc = &chip->ecc;
4573	struct nand_buffers *nbuf;
4574	int ret;
4575
4576	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
4577	if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
4578		   !(chip->bbt_options & NAND_BBT_USE_FLASH)))
4579		return -EINVAL;
4580
4581	if (invalid_ecc_page_accessors(chip)) {
4582		pr_err("Invalid ECC page accessors setup\n");
4583		return -EINVAL;
4584	}
4585
4586	if (!(chip->options & NAND_OWN_BUFFERS)) {
4587		nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
4588				+ mtd->oobsize * 3, GFP_KERNEL);
4589		if (!nbuf)
4590			return -ENOMEM;
4591		nbuf->ecccalc = (uint8_t *)(nbuf + 1);
4592		nbuf->ecccode = nbuf->ecccalc + mtd->oobsize;
4593		nbuf->databuf = nbuf->ecccode + mtd->oobsize;
4594
4595		chip->buffers = nbuf;
4596	} else {
4597		if (!chip->buffers)
4598			return -ENOMEM;
4599	}
4600
4601	/* Set the internal oob buffer location, just after the page data */
4602	chip->oob_poi = chip->buffers->databuf + mtd->writesize;
4603
4604	/*
4605	 * If no default placement scheme is given, select an appropriate one.
4606	 */
4607	if (!mtd->ooblayout &&
4608	    !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
4609		switch (mtd->oobsize) {
4610		case 8:
4611		case 16:
4612			mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
4613			break;
4614		case 64:
4615		case 128:
4616			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
4617			break;
4618		default:
4619			WARN(1, "No oob scheme defined for oobsize %d\n",
4620				mtd->oobsize);
4621			ret = -EINVAL;
4622			goto err_free;
4623		}
4624	}
4625
4626	if (!chip->write_page)
4627		chip->write_page = nand_write_page;
4628
4629	/*
4630	 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
4631	 * selected and we have 256 byte pagesize fallback to software ECC
4632	 */
4633
4634	switch (ecc->mode) {
4635	case NAND_ECC_HW_OOB_FIRST:
4636		/* Similar to NAND_ECC_HW, but a separate read_page handle */
4637		if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
4638			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4639			ret = -EINVAL;
4640			goto err_free;
4641		}
4642		if (!ecc->read_page)
4643			ecc->read_page = nand_read_page_hwecc_oob_first;
4644
4645	case NAND_ECC_HW:
4646		/* Use standard hwecc read page function? */
4647		if (!ecc->read_page)
4648			ecc->read_page = nand_read_page_hwecc;
4649		if (!ecc->write_page)
4650			ecc->write_page = nand_write_page_hwecc;
4651		if (!ecc->read_page_raw)
4652			ecc->read_page_raw = nand_read_page_raw;
4653		if (!ecc->write_page_raw)
4654			ecc->write_page_raw = nand_write_page_raw;
4655		if (!ecc->read_oob)
4656			ecc->read_oob = nand_read_oob_std;
4657		if (!ecc->write_oob)
4658			ecc->write_oob = nand_write_oob_std;
4659		if (!ecc->read_subpage)
4660			ecc->read_subpage = nand_read_subpage;
4661		if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
4662			ecc->write_subpage = nand_write_subpage_hwecc;
4663
4664	case NAND_ECC_HW_SYNDROME:
4665		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
4666		    (!ecc->read_page ||
4667		     ecc->read_page == nand_read_page_hwecc ||
4668		     !ecc->write_page ||
4669		     ecc->write_page == nand_write_page_hwecc)) {
4670			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4671			ret = -EINVAL;
4672			goto err_free;
4673		}
4674		/* Use standard syndrome read/write page function? */
4675		if (!ecc->read_page)
4676			ecc->read_page = nand_read_page_syndrome;
4677		if (!ecc->write_page)
4678			ecc->write_page = nand_write_page_syndrome;
4679		if (!ecc->read_page_raw)
4680			ecc->read_page_raw = nand_read_page_raw_syndrome;
4681		if (!ecc->write_page_raw)
4682			ecc->write_page_raw = nand_write_page_raw_syndrome;
4683		if (!ecc->read_oob)
4684			ecc->read_oob = nand_read_oob_syndrome;
4685		if (!ecc->write_oob)
4686			ecc->write_oob = nand_write_oob_syndrome;
4687
4688		if (mtd->writesize >= ecc->size) {
4689			if (!ecc->strength) {
4690				WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
4691				ret = -EINVAL;
4692				goto err_free;
4693			}
4694			break;
4695		}
4696		pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
4697			ecc->size, mtd->writesize);
4698		ecc->mode = NAND_ECC_SOFT;
4699		ecc->algo = NAND_ECC_HAMMING;
4700
4701	case NAND_ECC_SOFT:
4702		ret = nand_set_ecc_soft_ops(mtd);
4703		if (ret) {
4704			ret = -EINVAL;
4705			goto err_free;
4706		}
4707		break;
4708
4709	case NAND_ECC_NONE:
4710		pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
4711		ecc->read_page = nand_read_page_raw;
4712		ecc->write_page = nand_write_page_raw;
4713		ecc->read_oob = nand_read_oob_std;
4714		ecc->read_page_raw = nand_read_page_raw;
4715		ecc->write_page_raw = nand_write_page_raw;
4716		ecc->write_oob = nand_write_oob_std;
4717		ecc->size = mtd->writesize;
4718		ecc->bytes = 0;
4719		ecc->strength = 0;
4720		break;
4721
4722	default:
4723		WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
4724		ret = -EINVAL;
4725		goto err_free;
4726	}
4727
4728	/* For many systems, the standard OOB write also works for raw */
4729	if (!ecc->read_oob_raw)
4730		ecc->read_oob_raw = ecc->read_oob;
4731	if (!ecc->write_oob_raw)
4732		ecc->write_oob_raw = ecc->write_oob;
4733
4734	/* propagate ecc info to mtd_info */
4735	mtd->ecc_strength = ecc->strength;
4736	mtd->ecc_step_size = ecc->size;
4737
4738	/*
4739	 * Set the number of read / write steps for one page depending on ECC
4740	 * mode.
4741	 */
4742	ecc->steps = mtd->writesize / ecc->size;
4743	if (ecc->steps * ecc->size != mtd->writesize) {
4744		WARN(1, "Invalid ECC parameters\n");
4745		ret = -EINVAL;
4746		goto err_free;
4747	}
4748	ecc->total = ecc->steps * ecc->bytes;
4749
4750	/*
4751	 * The number of bytes available for a client to place data into
4752	 * the out of band area.
4753	 */
4754	ret = mtd_ooblayout_count_freebytes(mtd);
4755	if (ret < 0)
4756		ret = 0;
4757
4758	mtd->oobavail = ret;
4759
4760	/* ECC sanity check: warn if it's too weak */
4761	if (!nand_ecc_strength_good(mtd))
4762		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
4763			mtd->name);
4764
4765	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
4766	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
4767		switch (ecc->steps) {
4768		case 2:
4769			mtd->subpage_sft = 1;
4770			break;
4771		case 4:
4772		case 8:
4773		case 16:
4774			mtd->subpage_sft = 2;
4775			break;
4776		}
4777	}
4778	chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
4779
4780	/* Initialize state */
4781	chip->state = FL_READY;
4782
4783	/* Invalidate the pagebuffer reference */
4784	chip->pagebuf = -1;
4785
4786	/* Large page NAND with SOFT_ECC should support subpage reads */
4787	switch (ecc->mode) {
4788	case NAND_ECC_SOFT:
4789		if (chip->page_shift > 9)
4790			chip->options |= NAND_SUBPAGE_READ;
4791		break;
4792
4793	default:
4794		break;
4795	}
4796
4797	/* Fill in remaining MTD driver data */
4798	mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
4799	mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
4800						MTD_CAP_NANDFLASH;
4801	mtd->_erase = nand_erase;
4802	mtd->_point = NULL;
4803	mtd->_unpoint = NULL;
4804	mtd->_read = nand_read;
4805	mtd->_write = nand_write;
4806	mtd->_panic_write = panic_nand_write;
4807	mtd->_read_oob = nand_read_oob;
4808	mtd->_write_oob = nand_write_oob;
4809	mtd->_sync = nand_sync;
4810	mtd->_lock = NULL;
4811	mtd->_unlock = NULL;
4812	mtd->_suspend = nand_suspend;
4813	mtd->_resume = nand_resume;
4814	mtd->_reboot = nand_shutdown;
4815	mtd->_block_isreserved = nand_block_isreserved;
4816	mtd->_block_isbad = nand_block_isbad;
4817	mtd->_block_markbad = nand_block_markbad;
4818	mtd->writebufsize = mtd->writesize;
4819
4820	/*
4821	 * Initialize bitflip_threshold to its default prior scan_bbt() call.
4822	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
4823	 * properly set.
4824	 */
4825	if (!mtd->bitflip_threshold)
4826		mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
4827
4828	/* Check, if we should skip the bad block table scan */
4829	if (chip->options & NAND_SKIP_BBTSCAN)
4830		return 0;
4831
4832	/* Build bad block table */
4833	return chip->scan_bbt(mtd);
4834err_free:
4835	if (!(chip->options & NAND_OWN_BUFFERS))
4836		kfree(chip->buffers);
4837	return ret;
4838}
4839EXPORT_SYMBOL(nand_scan_tail);
4840
4841/*
4842 * is_module_text_address() isn't exported, and it's mostly a pointless
4843 * test if this is a module _anyway_ -- they'd have to try _really_ hard
4844 * to call us from in-kernel code if the core NAND support is modular.
4845 */
4846#ifdef MODULE
4847#define caller_is_module() (1)
4848#else
4849#define caller_is_module() \
4850	is_module_text_address((unsigned long)__builtin_return_address(0))
4851#endif
4852
4853/**
4854 * nand_scan - [NAND Interface] Scan for the NAND device
4855 * @mtd: MTD device structure
4856 * @maxchips: number of chips to scan for
4857 *
4858 * This fills out all the uninitialized function pointers with the defaults.
4859 * The flash ID is read and the mtd/chip structures are filled with the
4860 * appropriate values.
4861 */
4862int nand_scan(struct mtd_info *mtd, int maxchips)
4863{
4864	int ret;
4865
4866	ret = nand_scan_ident(mtd, maxchips, NULL);
4867	if (!ret)
4868		ret = nand_scan_tail(mtd);
4869	return ret;
4870}
4871EXPORT_SYMBOL(nand_scan);
4872
4873/**
4874 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
4875 * @chip: NAND chip object
4876 */
4877void nand_cleanup(struct nand_chip *chip)
4878{
4879	if (chip->ecc.mode == NAND_ECC_SOFT &&
4880	    chip->ecc.algo == NAND_ECC_BCH)
4881		nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
4882
4883	nand_release_data_interface(chip);
4884
4885	/* Free bad block table memory */
4886	kfree(chip->bbt);
4887	if (!(chip->options & NAND_OWN_BUFFERS))
4888		kfree(chip->buffers);
4889
4890	/* Free bad block descriptor memory */
4891	if (chip->badblock_pattern && chip->badblock_pattern->options
4892			& NAND_BBT_DYNAMICSTRUCT)
4893		kfree(chip->badblock_pattern);
4894}
4895EXPORT_SYMBOL_GPL(nand_cleanup);
4896
4897/**
4898 * nand_release - [NAND Interface] Unregister the MTD device and free resources
4899 *		  held by the NAND device
4900 * @mtd: MTD device structure
4901 */
4902void nand_release(struct mtd_info *mtd)
4903{
4904	mtd_device_unregister(mtd);
4905	nand_cleanup(mtd_to_nand(mtd));
4906}
4907EXPORT_SYMBOL_GPL(nand_release);
4908
4909MODULE_LICENSE("GPL");
4910MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
4911MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
4912MODULE_DESCRIPTION("Generic NAND flash driver code");