Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Common Flash Interface support:
   4 *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
   5 *
   6 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
   7 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
   8 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
   9 *
  10 * 2_by_8 routines added by Simon Munton
  11 *
  12 * 4_by_16 work by Carolyn J. Smith
  13 *
  14 * XIP support hooks by Vitaly Wool (based on code for Intel flash
  15 * by Nicolas Pitre)
  16 *
  17 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
  18 *
  19 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
 
 
  20 */
  21
  22#include <linux/module.h>
  23#include <linux/types.h>
  24#include <linux/kernel.h>
  25#include <linux/sched.h>
  26#include <asm/io.h>
  27#include <asm/byteorder.h>
  28
  29#include <linux/errno.h>
  30#include <linux/slab.h>
  31#include <linux/delay.h>
  32#include <linux/interrupt.h>
  33#include <linux/reboot.h>
  34#include <linux/of.h>
 
  35#include <linux/mtd/map.h>
  36#include <linux/mtd/mtd.h>
  37#include <linux/mtd/cfi.h>
  38#include <linux/mtd/xip.h>
  39
  40#define AMD_BOOTLOC_BUG
  41#define FORCE_WORD_WRITE 0
  42
  43#define MAX_RETRIES 3
  44
  45#define SST49LF004B		0x0060
  46#define SST49LF040B		0x0050
  47#define SST49LF008A		0x005a
  48#define AT49BV6416		0x00d6
  49#define S29GL064N_MN12		0x0c01
  50
  51/*
  52 * Status Register bit description. Used by flash devices that don't
  53 * support DQ polling (e.g. HyperFlash)
  54 */
  55#define CFI_SR_DRB		BIT(7)
  56#define CFI_SR_ESB		BIT(5)
  57#define CFI_SR_PSB		BIT(4)
  58#define CFI_SR_WBASB		BIT(3)
  59#define CFI_SR_SLSB		BIT(1)
  60
  61enum cfi_quirks {
  62	CFI_QUIRK_DQ_TRUE_DATA = BIT(0),
  63};
  64
  65static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  66static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  67#if !FORCE_WORD_WRITE
  68static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  69#endif
  70static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
  71static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
  72static void cfi_amdstd_sync (struct mtd_info *);
  73static int cfi_amdstd_suspend (struct mtd_info *);
  74static void cfi_amdstd_resume (struct mtd_info *);
  75static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
  76static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
  77					 size_t *, struct otp_info *);
  78static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
  79					 size_t *, struct otp_info *);
  80static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  81static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
  82					 size_t *, u_char *);
  83static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
  84					 size_t *, u_char *);
  85static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
  86					  size_t *, const u_char *);
  87static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
  88
  89static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
  90				  size_t *retlen, const u_char *buf);
  91
  92static void cfi_amdstd_destroy(struct mtd_info *);
  93
  94struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
  95static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
  96
  97static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  98static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  99#include "fwh_lock.h"
 100
 101static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 102static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 103
 104static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 105static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 106static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 107
 108static struct mtd_chip_driver cfi_amdstd_chipdrv = {
 109	.probe		= NULL, /* Not usable directly */
 110	.destroy	= cfi_amdstd_destroy,
 111	.name		= "cfi_cmdset_0002",
 112	.module		= THIS_MODULE
 113};
 114
 115/*
 116 * Use status register to poll for Erase/write completion when DQ is not
 117 * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in
 118 * CFI Primary Vendor-Specific Extended Query table 1.5
 119 */
 120static int cfi_use_status_reg(struct cfi_private *cfi)
 121{
 122	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 123	u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
 124
 125	return extp && extp->MinorVersion >= '5' &&
 126		(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
 127}
 128
 129static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
 130				unsigned long adr)
 131{
 132	struct cfi_private *cfi = map->fldrv_priv;
 133	map_word status;
 134
 135	if (!cfi_use_status_reg(cfi))
 136		return 0;
 137
 138	cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
 139			 cfi->device_type, NULL);
 140	status = map_read(map, adr);
 141
 142	/* The error bits are invalid while the chip's busy */
 143	if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
 144		return 0;
 145
 146	if (map_word_bitsset(map, status, CMD(0x3a))) {
 147		unsigned long chipstatus = MERGESTATUS(status);
 148
 149		if (chipstatus & CFI_SR_ESB)
 150			pr_err("%s erase operation failed, status %lx\n",
 151			       map->name, chipstatus);
 152		if (chipstatus & CFI_SR_PSB)
 153			pr_err("%s program operation failed, status %lx\n",
 154			       map->name, chipstatus);
 155		if (chipstatus & CFI_SR_WBASB)
 156			pr_err("%s buffer program command aborted, status %lx\n",
 157			       map->name, chipstatus);
 158		if (chipstatus & CFI_SR_SLSB)
 159			pr_err("%s sector write protected, status %lx\n",
 160			       map->name, chipstatus);
 161
 162		/* Erase/Program status bits are set on the operation failure */
 163		if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
 164			return 1;
 165	}
 166	return 0;
 167}
 168
 169/* #define DEBUG_CFI_FEATURES */
 170
 171
 172#ifdef DEBUG_CFI_FEATURES
 173static void cfi_tell_features(struct cfi_pri_amdstd *extp)
 174{
 175	const char* erase_suspend[3] = {
 176		"Not supported", "Read only", "Read/write"
 177	};
 178	const char* top_bottom[6] = {
 179		"No WP", "8x8KiB sectors at top & bottom, no WP",
 180		"Bottom boot", "Top boot",
 181		"Uniform, Bottom WP", "Uniform, Top WP"
 182	};
 183
 184	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
 185	printk("  Address sensitive unlock: %s\n",
 186	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
 187
 188	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
 189		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
 190	else
 191		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
 192
 193	if (extp->BlkProt == 0)
 194		printk("  Block protection: Not supported\n");
 195	else
 196		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
 197
 198
 199	printk("  Temporary block unprotect: %s\n",
 200	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
 201	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
 202	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
 203	printk("  Burst mode: %s\n",
 204	       extp->BurstMode ? "Supported" : "Not supported");
 205	if (extp->PageMode == 0)
 206		printk("  Page mode: Not supported\n");
 207	else
 208		printk("  Page mode: %d word page\n", extp->PageMode << 2);
 209
 210	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
 211	       extp->VppMin >> 4, extp->VppMin & 0xf);
 212	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
 213	       extp->VppMax >> 4, extp->VppMax & 0xf);
 214
 215	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
 216		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
 217	else
 218		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
 219}
 220#endif
 221
 222#ifdef AMD_BOOTLOC_BUG
 223/* Wheee. Bring me the head of someone at AMD. */
 224static void fixup_amd_bootblock(struct mtd_info *mtd)
 225{
 226	struct map_info *map = mtd->priv;
 227	struct cfi_private *cfi = map->fldrv_priv;
 228	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 229	__u8 major = extp->MajorVersion;
 230	__u8 minor = extp->MinorVersion;
 231
 232	if (((major << 8) | minor) < 0x3131) {
 233		/* CFI version 1.0 => don't trust bootloc */
 234
 235		pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
 236			map->name, cfi->mfr, cfi->id);
 237
 238		/* AFAICS all 29LV400 with a bottom boot block have a device ID
 239		 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
 240		 * These were badly detected as they have the 0x80 bit set
 241		 * so treat them as a special case.
 242		 */
 243		if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
 244
 245			/* Macronix added CFI to their 2nd generation
 246			 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
 247			 * Fujitsu, Spansion, EON, ESI and older Macronix)
 248			 * has CFI.
 249			 *
 250			 * Therefore also check the manufacturer.
 251			 * This reduces the risk of false detection due to
 252			 * the 8-bit device ID.
 253			 */
 254			(cfi->mfr == CFI_MFR_MACRONIX)) {
 255			pr_debug("%s: Macronix MX29LV400C with bottom boot block"
 256				" detected\n", map->name);
 257			extp->TopBottom = 2;	/* bottom boot */
 258		} else
 259		if (cfi->id & 0x80) {
 260			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
 261			extp->TopBottom = 3;	/* top boot */
 262		} else {
 263			extp->TopBottom = 2;	/* bottom boot */
 264		}
 265
 266		pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
 267			" deduced %s from Device ID\n", map->name, major, minor,
 268			extp->TopBottom == 2 ? "bottom" : "top");
 269	}
 270}
 271#endif
 272
 273#if !FORCE_WORD_WRITE
 274static void fixup_use_write_buffers(struct mtd_info *mtd)
 275{
 276	struct map_info *map = mtd->priv;
 277	struct cfi_private *cfi = map->fldrv_priv;
 278
 279	if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x2201)
 280		return;
 281
 282	if (cfi->cfiq->BufWriteTimeoutTyp) {
 283		pr_debug("Using buffer write method\n");
 284		mtd->_write = cfi_amdstd_write_buffers;
 285	}
 286}
 287#endif /* !FORCE_WORD_WRITE */
 288
 289/* Atmel chips don't use the same PRI format as AMD chips */
 290static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 291{
 292	struct map_info *map = mtd->priv;
 293	struct cfi_private *cfi = map->fldrv_priv;
 294	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 295	struct cfi_pri_atmel atmel_pri;
 296
 297	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 298	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 299
 300	if (atmel_pri.Features & 0x02)
 301		extp->EraseSuspend = 2;
 302
 303	/* Some chips got it backwards... */
 304	if (cfi->id == AT49BV6416) {
 305		if (atmel_pri.BottomBoot)
 306			extp->TopBottom = 3;
 307		else
 308			extp->TopBottom = 2;
 309	} else {
 310		if (atmel_pri.BottomBoot)
 311			extp->TopBottom = 2;
 312		else
 313			extp->TopBottom = 3;
 314	}
 315
 316	/* burst write mode not supported */
 317	cfi->cfiq->BufWriteTimeoutTyp = 0;
 318	cfi->cfiq->BufWriteTimeoutMax = 0;
 319}
 320
 321static void fixup_use_secsi(struct mtd_info *mtd)
 322{
 323	/* Setup for chips with a secsi area */
 324	mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
 325	mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
 326}
 327
 328static void fixup_use_erase_chip(struct mtd_info *mtd)
 329{
 330	struct map_info *map = mtd->priv;
 331	struct cfi_private *cfi = map->fldrv_priv;
 332	if ((cfi->cfiq->NumEraseRegions == 1) &&
 333		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
 334		mtd->_erase = cfi_amdstd_erase_chip;
 335	}
 336
 337}
 338
 339/*
 340 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
 341 * locked by default.
 342 */
 343static void fixup_use_atmel_lock(struct mtd_info *mtd)
 344{
 345	mtd->_lock = cfi_atmel_lock;
 346	mtd->_unlock = cfi_atmel_unlock;
 347	mtd->flags |= MTD_POWERUP_LOCK;
 348}
 349
 350static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
 351{
 352	struct map_info *map = mtd->priv;
 353	struct cfi_private *cfi = map->fldrv_priv;
 354
 355	/*
 356	 * These flashes report two separate eraseblock regions based on the
 357	 * sector_erase-size and block_erase-size, although they both operate on the
 358	 * same memory. This is not allowed according to CFI, so we just pick the
 359	 * sector_erase-size.
 360	 */
 361	cfi->cfiq->NumEraseRegions = 1;
 362}
 363
 364static void fixup_sst39vf(struct mtd_info *mtd)
 365{
 366	struct map_info *map = mtd->priv;
 367	struct cfi_private *cfi = map->fldrv_priv;
 368
 369	fixup_old_sst_eraseregion(mtd);
 370
 371	cfi->addr_unlock1 = 0x5555;
 372	cfi->addr_unlock2 = 0x2AAA;
 373}
 374
 375static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
 376{
 377	struct map_info *map = mtd->priv;
 378	struct cfi_private *cfi = map->fldrv_priv;
 379
 380	fixup_old_sst_eraseregion(mtd);
 381
 382	cfi->addr_unlock1 = 0x555;
 383	cfi->addr_unlock2 = 0x2AA;
 384
 385	cfi->sector_erase_cmd = CMD(0x50);
 386}
 387
 388static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
 389{
 390	struct map_info *map = mtd->priv;
 391	struct cfi_private *cfi = map->fldrv_priv;
 392
 393	fixup_sst39vf_rev_b(mtd);
 394
 395	/*
 396	 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
 397	 * it should report a size of 8KBytes (0x0020*256).
 398	 */
 399	cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
 400	pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
 401		mtd->name);
 402}
 403
 404static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
 405{
 406	struct map_info *map = mtd->priv;
 407	struct cfi_private *cfi = map->fldrv_priv;
 408
 409	if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
 410		cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
 411		pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
 412			mtd->name);
 413	}
 414}
 415
 416static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
 417{
 418	struct map_info *map = mtd->priv;
 419	struct cfi_private *cfi = map->fldrv_priv;
 420
 421	if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
 422		cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
 423		pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
 424			mtd->name);
 425	}
 426}
 427
 428static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
 429{
 430	struct map_info *map = mtd->priv;
 431	struct cfi_private *cfi = map->fldrv_priv;
 432
 433	/*
 434	 *  S29NS512P flash uses more than 8bits to report number of sectors,
 435	 * which is not permitted by CFI.
 436	 */
 437	cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
 438	pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
 439		mtd->name);
 440}
 441
 442static void fixup_quirks(struct mtd_info *mtd)
 443{
 444	struct map_info *map = mtd->priv;
 445	struct cfi_private *cfi = map->fldrv_priv;
 446
 447	if (cfi->mfr == CFI_MFR_AMD && cfi->id == S29GL064N_MN12)
 448		cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA;
 449}
 450
 451/* Used to fix CFI-Tables of chips without Extended Query Tables */
 452static struct cfi_fixup cfi_nopri_fixup_table[] = {
 453	{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
 454	{ CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
 455	{ CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
 456	{ CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
 457	{ CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
 458	{ CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
 459	{ CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
 460	{ CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
 461	{ 0, 0, NULL }
 462};
 463
 464static struct cfi_fixup cfi_fixup_table[] = {
 465	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 466#ifdef AMD_BOOTLOC_BUG
 467	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
 468	{ CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
 469	{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
 470#endif
 471	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi },
 472	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi },
 473	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi },
 474	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi },
 475	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi },
 476	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi },
 477	{ CFI_MFR_AMD, S29GL064N_MN12, fixup_s29gl064n_sectors },
 478	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
 479	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
 480	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
 481	{ CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
 482	{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
 483	{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
 484	{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
 485	{ CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
 486#if !FORCE_WORD_WRITE
 487	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 488#endif
 489	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks },
 490	{ 0, 0, NULL }
 491};
 492static struct cfi_fixup jedec_fixup_table[] = {
 493	{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
 494	{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
 495	{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
 496	{ 0, 0, NULL }
 497};
 498
 499static struct cfi_fixup fixup_table[] = {
 500	/* The CFI vendor ids and the JEDEC vendor IDs appear
 501	 * to be common.  It is like the devices id's are as
 502	 * well.  This table is to pick all cases where
 503	 * we know that is the case.
 504	 */
 505	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
 506	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
 507	{ 0, 0, NULL }
 508};
 509
 510
 511static void cfi_fixup_major_minor(struct cfi_private *cfi,
 512				  struct cfi_pri_amdstd *extp)
 513{
 514	if (cfi->mfr == CFI_MFR_SAMSUNG) {
 515		if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
 516		    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
 517			/*
 518			 * Samsung K8P2815UQB and K8D6x16UxM chips
 519			 * report major=0 / minor=0.
 520			 * K8D3x16UxC chips report major=3 / minor=3.
 521			 */
 522			printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
 523			       " Extended Query version to 1.%c\n",
 524			       extp->MinorVersion);
 525			extp->MajorVersion = '1';
 526		}
 527	}
 528
 529	/*
 530	 * SST 38VF640x chips report major=0xFF / minor=0xFF.
 531	 */
 532	if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
 533		extp->MajorVersion = '1';
 534		extp->MinorVersion = '0';
 535	}
 536}
 537
 538static int is_m29ew(struct cfi_private *cfi)
 539{
 540	if (cfi->mfr == CFI_MFR_INTEL &&
 541	    ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
 542	     (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
 543		return 1;
 544	return 0;
 545}
 546
 547/*
 548 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
 549 * Some revisions of the M29EW suffer from erase suspend hang ups. In
 550 * particular, it can occur when the sequence
 551 * Erase Confirm -> Suspend -> Program -> Resume
 552 * causes a lockup due to internal timing issues. The consequence is that the
 553 * erase cannot be resumed without inserting a dummy command after programming
 554 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
 555 * that writes an F0 command code before the RESUME command.
 556 */
 557static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
 558					  unsigned long adr)
 559{
 560	struct cfi_private *cfi = map->fldrv_priv;
 561	/* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
 562	if (is_m29ew(cfi))
 563		map_write(map, CMD(0xF0), adr);
 564}
 565
 566/*
 567 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
 568 *
 569 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
 570 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
 571 * command is issued after an ERASE RESUME operation without waiting for a
 572 * minimum delay.  The result is that once the ERASE seems to be completed
 573 * (no bits are toggling), the contents of the Flash memory block on which
 574 * the erase was ongoing could be inconsistent with the expected values
 575 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
 576 * values), causing a consequent failure of the ERASE operation.
 577 * The occurrence of this issue could be high, especially when file system
 578 * operations on the Flash are intensive.  As a result, it is recommended
 579 * that a patch be applied.  Intensive file system operations can cause many
 580 * calls to the garbage routine to free Flash space (also by erasing physical
 581 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
 582 * commands can occur.  The problem disappears when a delay is inserted after
 583 * the RESUME command by using the udelay() function available in Linux.
 584 * The DELAY value must be tuned based on the customer's platform.
 585 * The maximum value that fixes the problem in all cases is 500us.
 586 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
 587 * in most cases.
 588 * We have chosen 500µs because this latency is acceptable.
 589 */
 590static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
 591{
 592	/*
 593	 * Resolving the Delay After Resume Issue see Micron TN-13-07
 594	 * Worst case delay must be 500µs but 30-50µs should be ok as well
 595	 */
 596	if (is_m29ew(cfi))
 597		cfi_udelay(500);
 598}
 599
 600struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
 601{
 602	struct cfi_private *cfi = map->fldrv_priv;
 603	struct device_node __maybe_unused *np = map->device_node;
 604	struct mtd_info *mtd;
 605	int i;
 606
 607	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 608	if (!mtd)
 609		return NULL;
 610	mtd->priv = map;
 611	mtd->type = MTD_NORFLASH;
 612
 613	/* Fill in the default mtd operations */
 614	mtd->_erase   = cfi_amdstd_erase_varsize;
 615	mtd->_write   = cfi_amdstd_write_words;
 616	mtd->_read    = cfi_amdstd_read;
 617	mtd->_sync    = cfi_amdstd_sync;
 618	mtd->_suspend = cfi_amdstd_suspend;
 619	mtd->_resume  = cfi_amdstd_resume;
 620	mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
 621	mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
 622	mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
 623	mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
 624	mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
 625	mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
 626	mtd->flags   = MTD_CAP_NORFLASH;
 627	mtd->name    = map->name;
 628	mtd->writesize = 1;
 629	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 630
 631	pr_debug("MTD %s(): write buffer size %d\n", __func__,
 632			mtd->writebufsize);
 633
 634	mtd->_panic_write = cfi_amdstd_panic_write;
 635	mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
 636
 637	if (cfi->cfi_mode==CFI_MODE_CFI){
 638		unsigned char bootloc;
 639		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 640		struct cfi_pri_amdstd *extp;
 641
 642		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
 643		if (extp) {
 644			/*
 645			 * It's a real CFI chip, not one for which the probe
 646			 * routine faked a CFI structure.
 647			 */
 648			cfi_fixup_major_minor(cfi, extp);
 649
 650			/*
 651			 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
 652			 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
 653			 *      http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
 654			 *      http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
 655			 *      http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
 656			 */
 657			if (extp->MajorVersion != '1' ||
 658			    (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
 659				printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
 660				       "version %c.%c (%#02x/%#02x).\n",
 661				       extp->MajorVersion, extp->MinorVersion,
 662				       extp->MajorVersion, extp->MinorVersion);
 663				kfree(extp);
 664				kfree(mtd);
 665				return NULL;
 666			}
 667
 668			printk(KERN_INFO "  Amd/Fujitsu Extended Query version %c.%c.\n",
 669			       extp->MajorVersion, extp->MinorVersion);
 670
 671			/* Install our own private info structure */
 672			cfi->cmdset_priv = extp;
 673
 674			/* Apply cfi device specific fixups */
 675			cfi_fixup(mtd, cfi_fixup_table);
 676
 677#ifdef DEBUG_CFI_FEATURES
 678			/* Tell the user about it in lots of lovely detail */
 679			cfi_tell_features(extp);
 680#endif
 681
 682#ifdef CONFIG_OF
 683			if (np && of_property_read_bool(
 684				    np, "use-advanced-sector-protection")
 685			    && extp->BlkProtUnprot == 8) {
 686				printk(KERN_INFO "  Advanced Sector Protection (PPB Locking) supported\n");
 687				mtd->_lock = cfi_ppb_lock;
 688				mtd->_unlock = cfi_ppb_unlock;
 689				mtd->_is_locked = cfi_ppb_is_locked;
 690			}
 691#endif
 692
 693			bootloc = extp->TopBottom;
 694			if ((bootloc < 2) || (bootloc > 5)) {
 695				printk(KERN_WARNING "%s: CFI contains unrecognised boot "
 696				       "bank location (%d). Assuming bottom.\n",
 697				       map->name, bootloc);
 698				bootloc = 2;
 699			}
 700
 701			if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
 702				printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
 703
 704				for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
 705					int j = (cfi->cfiq->NumEraseRegions-1)-i;
 706
 707					swap(cfi->cfiq->EraseRegionInfo[i],
 708					     cfi->cfiq->EraseRegionInfo[j]);
 709				}
 710			}
 711			/* Set the default CFI lock/unlock addresses */
 712			cfi->addr_unlock1 = 0x555;
 713			cfi->addr_unlock2 = 0x2aa;
 714		}
 715		cfi_fixup(mtd, cfi_nopri_fixup_table);
 716
 717		if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
 718			kfree(mtd);
 719			return NULL;
 720		}
 721
 722	} /* CFI mode */
 723	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 724		/* Apply jedec specific fixups */
 725		cfi_fixup(mtd, jedec_fixup_table);
 726	}
 727	/* Apply generic fixups */
 728	cfi_fixup(mtd, fixup_table);
 729
 730	for (i=0; i< cfi->numchips; i++) {
 731		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
 732		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
 733		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
 734		/*
 735		 * First calculate the timeout max according to timeout field
 736		 * of struct cfi_ident that probed from chip's CFI aera, if
 737		 * available. Specify a minimum of 2000us, in case the CFI data
 738		 * is wrong.
 739		 */
 740		if (cfi->cfiq->BufWriteTimeoutTyp &&
 741		    cfi->cfiq->BufWriteTimeoutMax)
 742			cfi->chips[i].buffer_write_time_max =
 743				1 << (cfi->cfiq->BufWriteTimeoutTyp +
 744				      cfi->cfiq->BufWriteTimeoutMax);
 745		else
 746			cfi->chips[i].buffer_write_time_max = 0;
 747
 748		cfi->chips[i].buffer_write_time_max =
 749			max(cfi->chips[i].buffer_write_time_max, 2000);
 750
 751		cfi->chips[i].ref_point_counter = 0;
 752		init_waitqueue_head(&(cfi->chips[i].wq));
 753	}
 754
 755	map->fldrv = &cfi_amdstd_chipdrv;
 756
 757	return cfi_amdstd_setup(mtd);
 758}
 759struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 760struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 761EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
 762EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
 763EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
 764
 765static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
 766{
 767	struct map_info *map = mtd->priv;
 768	struct cfi_private *cfi = map->fldrv_priv;
 769	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 770	unsigned long offset = 0;
 771	int i,j;
 772
 773	printk(KERN_NOTICE "number of %s chips: %d\n",
 774	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
 775	/* Select the correct geometry setup */
 776	mtd->size = devsize * cfi->numchips;
 777
 778	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 779	mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
 780					  sizeof(struct mtd_erase_region_info),
 781					  GFP_KERNEL);
 782	if (!mtd->eraseregions)
 783		goto setup_err;
 784
 785	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 786		unsigned long ernum, ersize;
 787		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 788		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 789
 790		if (mtd->erasesize < ersize) {
 791			mtd->erasesize = ersize;
 792		}
 793		for (j=0; j<cfi->numchips; j++) {
 794			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 795			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 796			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 797		}
 798		offset += (ersize * ernum);
 799	}
 800	if (offset != devsize) {
 801		/* Argh */
 802		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 803		goto setup_err;
 804	}
 805
 806	__module_get(THIS_MODULE);
 807	register_reboot_notifier(&mtd->reboot_notifier);
 808	return mtd;
 809
 810 setup_err:
 811	kfree(mtd->eraseregions);
 812	kfree(mtd);
 813	kfree(cfi->cmdset_priv);
 
 814	return NULL;
 815}
 816
 817/*
 818 * Return true if the chip is ready and has the correct value.
 819 *
 820 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 821 * non-suspended sector) and is indicated by no toggle bits toggling.
 822 *
 823 * Error are indicated by toggling bits or bits held with the wrong value,
 824 * or with bits toggling.
 825 *
 826 * Note that anything more complicated than checking if no bits are toggling
 827 * (including checking DQ5 for an error status) is tricky to get working
 828 * correctly and is therefore not done	(particularly with interleaved chips
 829 * as each chip must be checked independently of the others).
 830 */
 831static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
 832			       unsigned long addr, map_word *expected)
 833{
 834	struct cfi_private *cfi = map->fldrv_priv;
 835	map_word oldd, curd;
 836	int ret;
 837
 838	if (cfi_use_status_reg(cfi)) {
 839		map_word ready = CMD(CFI_SR_DRB);
 840		/*
 841		 * For chips that support status register, check device
 842		 * ready bit
 843		 */
 844		cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
 845				 cfi->device_type, NULL);
 846		curd = map_read(map, addr);
 847
 848		return map_word_andequal(map, curd, ready, ready);
 849	}
 850
 851	oldd = map_read(map, addr);
 852	curd = map_read(map, addr);
 853
 854	ret = map_word_equal(map, oldd, curd);
 855
 856	if (!ret || !expected)
 857		return ret;
 858
 859	return map_word_equal(map, curd, *expected);
 860}
 861
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 862static int __xipram chip_good(struct map_info *map, struct flchip *chip,
 863			      unsigned long addr, map_word *expected)
 864{
 865	struct cfi_private *cfi = map->fldrv_priv;
 866	map_word *datum = expected;
 
 
 
 
 
 
 
 
 
 
 
 
 867
 868	if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA)
 869		datum = NULL;
 870
 871	return chip_ready(map, chip, addr, datum);
 
 
 
 
 
 
 
 872}
 873
 874static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 875{
 876	DECLARE_WAITQUEUE(wait, current);
 877	struct cfi_private *cfi = map->fldrv_priv;
 878	unsigned long timeo;
 879	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
 880
 881 resettime:
 882	timeo = jiffies + HZ;
 883 retry:
 884	switch (chip->state) {
 885
 886	case FL_STATUS:
 887		for (;;) {
 888			if (chip_ready(map, chip, adr, NULL))
 889				break;
 890
 891			if (time_after(jiffies, timeo)) {
 892				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
 893				return -EIO;
 894			}
 895			mutex_unlock(&chip->mutex);
 896			cfi_udelay(1);
 897			mutex_lock(&chip->mutex);
 898			/* Someone else might have been playing with it. */
 899			goto retry;
 900		}
 901		return 0;
 902
 903	case FL_READY:
 904	case FL_CFI_QUERY:
 905	case FL_JEDEC_QUERY:
 906		return 0;
 907
 908	case FL_ERASING:
 909		if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
 910		    !(mode == FL_READY || mode == FL_POINT ||
 911		    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
 912			goto sleep;
 913
 914		/* Do not allow suspend iff read/write to EB address */
 915		if ((adr & chip->in_progress_block_mask) ==
 916		    chip->in_progress_block_addr)
 917			goto sleep;
 918
 919		/* Erase suspend */
 920		/* It's harmless to issue the Erase-Suspend and Erase-Resume
 921		 * commands when the erase algorithm isn't in progress. */
 922		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 923		chip->oldstate = FL_ERASING;
 924		chip->state = FL_ERASE_SUSPENDING;
 925		chip->erase_suspended = 1;
 926		for (;;) {
 927			if (chip_ready(map, chip, adr, NULL))
 928				break;
 929
 930			if (time_after(jiffies, timeo)) {
 931				/* Should have suspended the erase by now.
 932				 * Send an Erase-Resume command as either
 933				 * there was an error (so leave the erase
 934				 * routine to recover from it) or we trying to
 935				 * use the erase-in-progress sector. */
 936				put_chip(map, chip, adr);
 937				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
 938				return -EIO;
 939			}
 940
 941			mutex_unlock(&chip->mutex);
 942			cfi_udelay(1);
 943			mutex_lock(&chip->mutex);
 944			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 945			   So we can just loop here. */
 946		}
 947		chip->state = FL_READY;
 948		return 0;
 949
 950	case FL_XIP_WHILE_ERASING:
 951		if (mode != FL_READY && mode != FL_POINT &&
 952		    (!cfip || !(cfip->EraseSuspend&2)))
 953			goto sleep;
 954		chip->oldstate = chip->state;
 955		chip->state = FL_READY;
 956		return 0;
 957
 958	case FL_SHUTDOWN:
 959		/* The machine is rebooting */
 960		return -EIO;
 961
 962	case FL_POINT:
 963		/* Only if there's no operation suspended... */
 964		if (mode == FL_READY && chip->oldstate == FL_READY)
 965			return 0;
 966		fallthrough;
 
 967	default:
 968	sleep:
 969		set_current_state(TASK_UNINTERRUPTIBLE);
 970		add_wait_queue(&chip->wq, &wait);
 971		mutex_unlock(&chip->mutex);
 972		schedule();
 973		remove_wait_queue(&chip->wq, &wait);
 974		mutex_lock(&chip->mutex);
 975		goto resettime;
 976	}
 977}
 978
 979
 980static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
 981{
 982	struct cfi_private *cfi = map->fldrv_priv;
 983
 984	switch(chip->oldstate) {
 985	case FL_ERASING:
 986		cfi_fixup_m29ew_erase_suspend(map,
 987			chip->in_progress_block_addr);
 988		map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
 989		cfi_fixup_m29ew_delay_after_resume(cfi);
 990		chip->oldstate = FL_READY;
 991		chip->state = FL_ERASING;
 992		break;
 993
 994	case FL_XIP_WHILE_ERASING:
 995		chip->state = chip->oldstate;
 996		chip->oldstate = FL_READY;
 997		break;
 998
 999	case FL_READY:
1000	case FL_STATUS:
1001		break;
1002	default:
1003		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
1004	}
1005	wake_up(&chip->wq);
1006}
1007
1008#ifdef CONFIG_MTD_XIP
1009
1010/*
1011 * No interrupt what so ever can be serviced while the flash isn't in array
1012 * mode.  This is ensured by the xip_disable() and xip_enable() functions
1013 * enclosing any code path where the flash is known not to be in array mode.
1014 * And within a XIP disabled code path, only functions marked with __xipram
1015 * may be called and nothing else (it's a good thing to inspect generated
1016 * assembly to make sure inline functions were actually inlined and that gcc
1017 * didn't emit calls to its own support functions). Also configuring MTD CFI
1018 * support to a single buswidth and a single interleave is also recommended.
1019 */
1020
1021static void xip_disable(struct map_info *map, struct flchip *chip,
1022			unsigned long adr)
1023{
1024	/* TODO: chips with no XIP use should ignore and return */
1025	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1026	local_irq_disable();
1027}
1028
1029static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1030				unsigned long adr)
1031{
1032	struct cfi_private *cfi = map->fldrv_priv;
1033
1034	if (chip->state != FL_POINT && chip->state != FL_READY) {
1035		map_write(map, CMD(0xf0), adr);
1036		chip->state = FL_READY;
1037	}
1038	(void) map_read(map, adr);
1039	xip_iprefetch();
1040	local_irq_enable();
1041}
1042
1043/*
1044 * When a delay is required for the flash operation to complete, the
1045 * xip_udelay() function is polling for both the given timeout and pending
1046 * (but still masked) hardware interrupts.  Whenever there is an interrupt
1047 * pending then the flash erase operation is suspended, array mode restored
1048 * and interrupts unmasked.  Task scheduling might also happen at that
1049 * point.  The CPU eventually returns from the interrupt or the call to
1050 * schedule() and the suspended flash operation is resumed for the remaining
1051 * of the delay period.
1052 *
1053 * Warning: this function _will_ fool interrupt latency tracing tools.
1054 */
1055
1056static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1057				unsigned long adr, int usec)
1058{
1059	struct cfi_private *cfi = map->fldrv_priv;
1060	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
1061	map_word status, OK = CMD(0x80);
1062	unsigned long suspended, start = xip_currtime();
1063	flstate_t oldstate;
1064
1065	do {
1066		cpu_relax();
1067		if (xip_irqpending() && extp &&
1068		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
1069		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1070			/*
1071			 * Let's suspend the erase operation when supported.
1072			 * Note that we currently don't try to suspend
1073			 * interleaved chips if there is already another
1074			 * operation suspended (imagine what happens
1075			 * when one chip was already done with the current
1076			 * operation while another chip suspended it, then
1077			 * we resume the whole thing at once).  Yes, it
1078			 * can happen!
1079			 */
1080			map_write(map, CMD(0xb0), adr);
1081			usec -= xip_elapsed_since(start);
1082			suspended = xip_currtime();
1083			do {
1084				if (xip_elapsed_since(suspended) > 100000) {
1085					/*
1086					 * The chip doesn't want to suspend
1087					 * after waiting for 100 msecs.
1088					 * This is a critical error but there
1089					 * is not much we can do here.
1090					 */
1091					return;
1092				}
1093				status = map_read(map, adr);
1094			} while (!map_word_andequal(map, status, OK, OK));
1095
1096			/* Suspend succeeded */
1097			oldstate = chip->state;
1098			if (!map_word_bitsset(map, status, CMD(0x40)))
1099				break;
1100			chip->state = FL_XIP_WHILE_ERASING;
1101			chip->erase_suspended = 1;
1102			map_write(map, CMD(0xf0), adr);
1103			(void) map_read(map, adr);
1104			xip_iprefetch();
1105			local_irq_enable();
1106			mutex_unlock(&chip->mutex);
1107			xip_iprefetch();
1108			cond_resched();
1109
1110			/*
1111			 * We're back.  However someone else might have
1112			 * decided to go write to the chip if we are in
1113			 * a suspended erase state.  If so let's wait
1114			 * until it's done.
1115			 */
1116			mutex_lock(&chip->mutex);
1117			while (chip->state != FL_XIP_WHILE_ERASING) {
1118				DECLARE_WAITQUEUE(wait, current);
1119				set_current_state(TASK_UNINTERRUPTIBLE);
1120				add_wait_queue(&chip->wq, &wait);
1121				mutex_unlock(&chip->mutex);
1122				schedule();
1123				remove_wait_queue(&chip->wq, &wait);
1124				mutex_lock(&chip->mutex);
1125			}
1126			/* Disallow XIP again */
1127			local_irq_disable();
1128
1129			/* Correct Erase Suspend Hangups for M29EW */
1130			cfi_fixup_m29ew_erase_suspend(map, adr);
1131			/* Resume the write or erase operation */
1132			map_write(map, cfi->sector_erase_cmd, adr);
1133			chip->state = oldstate;
1134			start = xip_currtime();
1135		} else if (usec >= 1000000/HZ) {
1136			/*
1137			 * Try to save on CPU power when waiting delay
1138			 * is at least a system timer tick period.
1139			 * No need to be extremely accurate here.
1140			 */
1141			xip_cpu_idle();
1142		}
1143		status = map_read(map, adr);
1144	} while (!map_word_andequal(map, status, OK, OK)
1145		 && xip_elapsed_since(start) < usec);
1146}
1147
1148#define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
1149
1150/*
1151 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1152 * the flash is actively programming or erasing since we have to poll for
1153 * the operation to complete anyway.  We can't do that in a generic way with
1154 * a XIP setup so do it before the actual flash operation in this case
1155 * and stub it out from INVALIDATE_CACHE_UDELAY.
1156 */
1157#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1158	INVALIDATE_CACHED_RANGE(map, from, size)
1159
1160#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1161	UDELAY(map, chip, adr, usec)
1162
1163/*
1164 * Extra notes:
1165 *
1166 * Activating this XIP support changes the way the code works a bit.  For
1167 * example the code to suspend the current process when concurrent access
1168 * happens is never executed because xip_udelay() will always return with the
1169 * same chip state as it was entered with.  This is why there is no care for
1170 * the presence of add_wait_queue() or schedule() calls from within a couple
1171 * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
1172 * The queueing and scheduling are always happening within xip_udelay().
1173 *
1174 * Similarly, get_chip() and put_chip() just happen to always be executed
1175 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1176 * is in array mode, therefore never executing many cases therein and not
1177 * causing any problem with XIP.
1178 */
1179
1180#else
1181
1182#define xip_disable(map, chip, adr)
1183#define xip_enable(map, chip, adr)
1184#define XIP_INVAL_CACHED_RANGE(x...)
1185
1186#define UDELAY(map, chip, adr, usec)  \
1187do {  \
1188	mutex_unlock(&chip->mutex);  \
1189	cfi_udelay(usec);  \
1190	mutex_lock(&chip->mutex);  \
1191} while (0)
1192
1193#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1194do {  \
1195	mutex_unlock(&chip->mutex);  \
1196	INVALIDATE_CACHED_RANGE(map, adr, len);  \
1197	cfi_udelay(usec);  \
1198	mutex_lock(&chip->mutex);  \
1199} while (0)
1200
1201#endif
1202
1203static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1204{
1205	unsigned long cmd_addr;
1206	struct cfi_private *cfi = map->fldrv_priv;
1207	int ret;
1208
1209	adr += chip->start;
1210
1211	/* Ensure cmd read/writes are aligned. */
1212	cmd_addr = adr & ~(map_bankwidth(map)-1);
1213
1214	mutex_lock(&chip->mutex);
1215	ret = get_chip(map, chip, cmd_addr, FL_READY);
1216	if (ret) {
1217		mutex_unlock(&chip->mutex);
1218		return ret;
1219	}
1220
1221	if (chip->state != FL_POINT && chip->state != FL_READY) {
1222		map_write(map, CMD(0xf0), cmd_addr);
1223		chip->state = FL_READY;
1224	}
1225
1226	map_copy_from(map, buf, adr, len);
1227
1228	put_chip(map, chip, cmd_addr);
1229
1230	mutex_unlock(&chip->mutex);
1231	return 0;
1232}
1233
1234
1235static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1236{
1237	struct map_info *map = mtd->priv;
1238	struct cfi_private *cfi = map->fldrv_priv;
1239	unsigned long ofs;
1240	int chipnum;
1241	int ret = 0;
1242
1243	/* ofs: offset within the first chip that the first read should start */
1244	chipnum = (from >> cfi->chipshift);
1245	ofs = from - (chipnum <<  cfi->chipshift);
1246
1247	while (len) {
1248		unsigned long thislen;
1249
1250		if (chipnum >= cfi->numchips)
1251			break;
1252
1253		if ((len + ofs -1) >> cfi->chipshift)
1254			thislen = (1<<cfi->chipshift) - ofs;
1255		else
1256			thislen = len;
1257
1258		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1259		if (ret)
1260			break;
1261
1262		*retlen += thislen;
1263		len -= thislen;
1264		buf += thislen;
1265
1266		ofs = 0;
1267		chipnum++;
1268	}
1269	return ret;
1270}
1271
1272typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1273			loff_t adr, size_t len, u_char *buf, size_t grouplen);
1274
1275static inline void otp_enter(struct map_info *map, struct flchip *chip,
1276			     loff_t adr, size_t len)
1277{
1278	struct cfi_private *cfi = map->fldrv_priv;
1279
1280	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1281			 cfi->device_type, NULL);
1282	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1283			 cfi->device_type, NULL);
1284	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
1285			 cfi->device_type, NULL);
1286
1287	INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1288}
1289
1290static inline void otp_exit(struct map_info *map, struct flchip *chip,
1291			    loff_t adr, size_t len)
1292{
1293	struct cfi_private *cfi = map->fldrv_priv;
1294
1295	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1296			 cfi->device_type, NULL);
1297	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1298			 cfi->device_type, NULL);
1299	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
1300			 cfi->device_type, NULL);
1301	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
1302			 cfi->device_type, NULL);
1303
1304	INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1305}
1306
1307static inline int do_read_secsi_onechip(struct map_info *map,
1308					struct flchip *chip, loff_t adr,
1309					size_t len, u_char *buf,
1310					size_t grouplen)
1311{
1312	DECLARE_WAITQUEUE(wait, current);
1313
1314 retry:
1315	mutex_lock(&chip->mutex);
1316
1317	if (chip->state != FL_READY){
1318		set_current_state(TASK_UNINTERRUPTIBLE);
1319		add_wait_queue(&chip->wq, &wait);
1320
1321		mutex_unlock(&chip->mutex);
1322
1323		schedule();
1324		remove_wait_queue(&chip->wq, &wait);
1325
1326		goto retry;
1327	}
1328
1329	adr += chip->start;
1330
1331	chip->state = FL_READY;
1332
1333	otp_enter(map, chip, adr, len);
1334	map_copy_from(map, buf, adr, len);
1335	otp_exit(map, chip, adr, len);
1336
1337	wake_up(&chip->wq);
1338	mutex_unlock(&chip->mutex);
1339
1340	return 0;
1341}
1342
1343static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1344{
1345	struct map_info *map = mtd->priv;
1346	struct cfi_private *cfi = map->fldrv_priv;
1347	unsigned long ofs;
1348	int chipnum;
1349	int ret = 0;
1350
1351	/* ofs: offset within the first chip that the first read should start */
1352	/* 8 secsi bytes per chip */
1353	chipnum=from>>3;
1354	ofs=from & 7;
1355
1356	while (len) {
1357		unsigned long thislen;
1358
1359		if (chipnum >= cfi->numchips)
1360			break;
1361
1362		if ((len + ofs -1) >> 3)
1363			thislen = (1<<3) - ofs;
1364		else
1365			thislen = len;
1366
1367		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
1368					    thislen, buf, 0);
1369		if (ret)
1370			break;
1371
1372		*retlen += thislen;
1373		len -= thislen;
1374		buf += thislen;
1375
1376		ofs = 0;
1377		chipnum++;
1378	}
1379	return ret;
1380}
1381
1382static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1383				     unsigned long adr, map_word datum,
1384				     int mode);
1385
1386static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
1387			size_t len, u_char *buf, size_t grouplen)
1388{
1389	int ret;
1390	while (len) {
1391		unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
1392		int gap = adr - bus_ofs;
1393		int n = min_t(int, len, map_bankwidth(map) - gap);
1394		map_word datum = map_word_ff(map);
1395
1396		if (n != map_bankwidth(map)) {
1397			/* partial write of a word, load old contents */
1398			otp_enter(map, chip, bus_ofs, map_bankwidth(map));
1399			datum = map_read(map, bus_ofs);
1400			otp_exit(map, chip, bus_ofs, map_bankwidth(map));
1401		}
1402
1403		datum = map_word_load_partial(map, datum, buf, gap, n);
1404		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1405		if (ret)
1406			return ret;
1407
1408		adr += n;
1409		buf += n;
1410		len -= n;
1411	}
1412
1413	return 0;
1414}
1415
1416static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
1417		       size_t len, u_char *buf, size_t grouplen)
1418{
1419	struct cfi_private *cfi = map->fldrv_priv;
1420	uint8_t lockreg;
1421	unsigned long timeo;
1422	int ret;
1423
1424	/* make sure area matches group boundaries */
1425	if ((adr != 0) || (len != grouplen))
1426		return -EINVAL;
1427
1428	mutex_lock(&chip->mutex);
1429	ret = get_chip(map, chip, chip->start, FL_LOCKING);
1430	if (ret) {
1431		mutex_unlock(&chip->mutex);
1432		return ret;
1433	}
1434	chip->state = FL_LOCKING;
1435
1436	/* Enter lock register command */
1437	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1438			 cfi->device_type, NULL);
1439	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1440			 cfi->device_type, NULL);
1441	cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
1442			 cfi->device_type, NULL);
1443
1444	/* read lock register */
1445	lockreg = cfi_read_query(map, 0);
1446
1447	/* set bit 0 to protect extended memory block */
1448	lockreg &= ~0x01;
1449
1450	/* set bit 0 to protect extended memory block */
1451	/* write lock register */
1452	map_write(map, CMD(0xA0), chip->start);
1453	map_write(map, CMD(lockreg), chip->start);
1454
1455	/* wait for chip to become ready */
1456	timeo = jiffies + msecs_to_jiffies(2);
1457	for (;;) {
1458		if (chip_ready(map, chip, adr, NULL))
1459			break;
1460
1461		if (time_after(jiffies, timeo)) {
1462			pr_err("Waiting for chip to be ready timed out.\n");
1463			ret = -EIO;
1464			break;
1465		}
1466		UDELAY(map, chip, 0, 1);
1467	}
1468
1469	/* exit protection commands */
1470	map_write(map, CMD(0x90), chip->start);
1471	map_write(map, CMD(0x00), chip->start);
1472
1473	chip->state = FL_READY;
1474	put_chip(map, chip, chip->start);
1475	mutex_unlock(&chip->mutex);
1476
1477	return ret;
1478}
1479
1480static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1481			       size_t *retlen, u_char *buf,
1482			       otp_op_t action, int user_regs)
1483{
1484	struct map_info *map = mtd->priv;
1485	struct cfi_private *cfi = map->fldrv_priv;
1486	int ofs_factor = cfi->interleave * cfi->device_type;
1487	unsigned long base;
1488	int chipnum;
1489	struct flchip *chip;
1490	uint8_t otp, lockreg;
1491	int ret;
1492
1493	size_t user_size, factory_size, otpsize;
1494	loff_t user_offset, factory_offset, otpoffset;
1495	int user_locked = 0, otplocked;
1496
1497	*retlen = 0;
1498
1499	for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
1500		chip = &cfi->chips[chipnum];
1501		factory_size = 0;
1502		user_size = 0;
1503
1504		/* Micron M29EW family */
1505		if (is_m29ew(cfi)) {
1506			base = chip->start;
1507
1508			/* check whether secsi area is factory locked
1509			   or user lockable */
1510			mutex_lock(&chip->mutex);
1511			ret = get_chip(map, chip, base, FL_CFI_QUERY);
1512			if (ret) {
1513				mutex_unlock(&chip->mutex);
1514				return ret;
1515			}
1516			cfi_qry_mode_on(base, map, cfi);
1517			otp = cfi_read_query(map, base + 0x3 * ofs_factor);
1518			cfi_qry_mode_off(base, map, cfi);
1519			put_chip(map, chip, base);
1520			mutex_unlock(&chip->mutex);
1521
1522			if (otp & 0x80) {
1523				/* factory locked */
1524				factory_offset = 0;
1525				factory_size = 0x100;
1526			} else {
1527				/* customer lockable */
1528				user_offset = 0;
1529				user_size = 0x100;
1530
1531				mutex_lock(&chip->mutex);
1532				ret = get_chip(map, chip, base, FL_LOCKING);
1533				if (ret) {
1534					mutex_unlock(&chip->mutex);
1535					return ret;
1536				}
1537
1538				/* Enter lock register command */
1539				cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
1540						 chip->start, map, cfi,
1541						 cfi->device_type, NULL);
1542				cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
1543						 chip->start, map, cfi,
1544						 cfi->device_type, NULL);
1545				cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
1546						 chip->start, map, cfi,
1547						 cfi->device_type, NULL);
1548				/* read lock register */
1549				lockreg = cfi_read_query(map, 0);
1550				/* exit protection commands */
1551				map_write(map, CMD(0x90), chip->start);
1552				map_write(map, CMD(0x00), chip->start);
1553				put_chip(map, chip, chip->start);
1554				mutex_unlock(&chip->mutex);
1555
1556				user_locked = ((lockreg & 0x01) == 0x00);
1557			}
1558		}
1559
1560		otpsize = user_regs ? user_size : factory_size;
1561		if (!otpsize)
1562			continue;
1563		otpoffset = user_regs ? user_offset : factory_offset;
1564		otplocked = user_regs ? user_locked : 1;
1565
1566		if (!action) {
1567			/* return otpinfo */
1568			struct otp_info *otpinfo;
1569			len -= sizeof(*otpinfo);
1570			if (len <= 0)
1571				return -ENOSPC;
1572			otpinfo = (struct otp_info *)buf;
1573			otpinfo->start = from;
1574			otpinfo->length = otpsize;
1575			otpinfo->locked = otplocked;
1576			buf += sizeof(*otpinfo);
1577			*retlen += sizeof(*otpinfo);
1578			from += otpsize;
1579		} else if ((from < otpsize) && (len > 0)) {
1580			size_t size;
1581			size = (len < otpsize - from) ? len : otpsize - from;
1582			ret = action(map, chip, otpoffset + from, size, buf,
1583				     otpsize);
1584			if (ret < 0)
1585				return ret;
1586
1587			buf += size;
1588			len -= size;
1589			*retlen += size;
1590			from = 0;
1591		} else {
1592			from -= otpsize;
1593		}
1594	}
1595	return 0;
1596}
1597
1598static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
1599					 size_t *retlen, struct otp_info *buf)
1600{
1601	return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1602				   NULL, 0);
1603}
1604
1605static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
1606					 size_t *retlen, struct otp_info *buf)
1607{
1608	return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1609				   NULL, 1);
1610}
1611
1612static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
1613					 size_t len, size_t *retlen,
1614					 u_char *buf)
1615{
1616	return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1617				   buf, do_read_secsi_onechip, 0);
1618}
1619
1620static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
1621					 size_t len, size_t *retlen,
1622					 u_char *buf)
1623{
1624	return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1625				   buf, do_read_secsi_onechip, 1);
1626}
1627
1628static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1629					  size_t len, size_t *retlen,
1630					  const u_char *buf)
1631{
1632	return cfi_amdstd_otp_walk(mtd, from, len, retlen, (u_char *)buf,
1633				   do_otp_write, 1);
1634}
1635
1636static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1637					 size_t len)
1638{
1639	size_t retlen;
1640	return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
1641				   do_otp_lock, 1);
1642}
1643
1644static int __xipram do_write_oneword_once(struct map_info *map,
1645					  struct flchip *chip,
1646					  unsigned long adr, map_word datum,
1647					  int mode, struct cfi_private *cfi)
1648{
1649	unsigned long timeo;
1650	/*
1651	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1652	 * have a max write time of a few hundreds usec). However, we should
1653	 * use the maximum timeout value given by the chip at probe time
1654	 * instead.  Unfortunately, struct flchip does have a field for
1655	 * maximum timeout, only for typical which can be far too short
1656	 * depending of the conditions.	 The ' + 1' is to avoid having a
1657	 * timeout of 0 jiffies if HZ is smaller than 1000.
1658	 */
1659	unsigned long uWriteTimeout = (HZ / 1000) + 1;
1660	int ret = 0;
1661
1662	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1663	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1664	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1665	map_write(map, datum, adr);
1666	chip->state = mode;
1667
1668	INVALIDATE_CACHE_UDELAY(map, chip,
1669				adr, map_bankwidth(map),
1670				chip->word_write_time);
1671
1672	/* See comment above for timeout value. */
1673	timeo = jiffies + uWriteTimeout;
1674	for (;;) {
1675		if (chip->state != mode) {
1676			/* Someone's suspended the write. Sleep */
1677			DECLARE_WAITQUEUE(wait, current);
1678
1679			set_current_state(TASK_UNINTERRUPTIBLE);
1680			add_wait_queue(&chip->wq, &wait);
1681			mutex_unlock(&chip->mutex);
1682			schedule();
1683			remove_wait_queue(&chip->wq, &wait);
1684			timeo = jiffies + (HZ / 2); /* FIXME */
1685			mutex_lock(&chip->mutex);
1686			continue;
1687		}
1688
1689		/*
1690		 * We check "time_after" and "!chip_good" before checking
1691		 * "chip_good" to avoid the failure due to scheduling.
1692		 */
1693		if (time_after(jiffies, timeo) &&
1694		    !chip_good(map, chip, adr, &datum)) {
1695			xip_enable(map, chip, adr);
1696			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1697			xip_disable(map, chip, adr);
1698			ret = -EIO;
1699			break;
1700		}
1701
1702		if (chip_good(map, chip, adr, &datum)) {
1703			if (cfi_check_err_status(map, chip, adr))
1704				ret = -EIO;
1705			break;
1706		}
1707
1708		/* Latency issues. Drop the lock, wait a while and retry */
1709		UDELAY(map, chip, adr, 1);
1710	}
1711
1712	return ret;
1713}
1714
1715static int __xipram do_write_oneword_start(struct map_info *map,
1716					   struct flchip *chip,
1717					   unsigned long adr, int mode)
1718{
1719	int ret;
1720
1721	mutex_lock(&chip->mutex);
1722
1723	ret = get_chip(map, chip, adr, mode);
1724	if (ret) {
1725		mutex_unlock(&chip->mutex);
1726		return ret;
1727	}
1728
1729	if (mode == FL_OTP_WRITE)
1730		otp_enter(map, chip, adr, map_bankwidth(map));
1731
1732	return ret;
1733}
1734
1735static void __xipram do_write_oneword_done(struct map_info *map,
1736					   struct flchip *chip,
1737					   unsigned long adr, int mode)
1738{
1739	if (mode == FL_OTP_WRITE)
1740		otp_exit(map, chip, adr, map_bankwidth(map));
1741
1742	chip->state = FL_READY;
1743	DISABLE_VPP(map);
1744	put_chip(map, chip, adr);
1745
1746	mutex_unlock(&chip->mutex);
1747}
1748
1749static int __xipram do_write_oneword_retry(struct map_info *map,
1750					   struct flchip *chip,
1751					   unsigned long adr, map_word datum,
1752					   int mode)
1753{
1754	struct cfi_private *cfi = map->fldrv_priv;
1755	int ret = 0;
1756	map_word oldd;
1757	int retry_cnt = 0;
1758
1759	/*
1760	 * Check for a NOP for the case when the datum to write is already
1761	 * present - it saves time and works around buggy chips that corrupt
1762	 * data at other locations when 0xff is written to a location that
1763	 * already contains 0xff.
1764	 */
1765	oldd = map_read(map, adr);
1766	if (map_word_equal(map, oldd, datum)) {
1767		pr_debug("MTD %s(): NOP\n", __func__);
1768		return ret;
1769	}
1770
1771	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1772	ENABLE_VPP(map);
1773	xip_disable(map, chip, adr);
1774
1775 retry:
1776	ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
1777	if (ret) {
1778		/* reset on all failures. */
 
1779		map_write(map, CMD(0xF0), chip->start);
1780		/* FIXME - should have reset delay before continuing */
1781
1782		if (++retry_cnt <= MAX_RETRIES) {
1783			ret = 0;
1784			goto retry;
1785		}
1786	}
1787	xip_enable(map, chip, adr);
1788
1789	return ret;
1790}
1791
1792static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1793				     unsigned long adr, map_word datum,
1794				     int mode)
1795{
1796	int ret;
1797
1798	adr += chip->start;
1799
1800	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr,
1801		 datum.x[0]);
1802
1803	ret = do_write_oneword_start(map, chip, adr, mode);
1804	if (ret)
1805		return ret;
1806
1807	ret = do_write_oneword_retry(map, chip, adr, datum, mode);
1808
1809	do_write_oneword_done(map, chip, adr, mode);
1810
1811	return ret;
1812}
1813
1814
1815static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1816				  size_t *retlen, const u_char *buf)
1817{
1818	struct map_info *map = mtd->priv;
1819	struct cfi_private *cfi = map->fldrv_priv;
1820	int ret;
1821	int chipnum;
1822	unsigned long ofs, chipstart;
1823	DECLARE_WAITQUEUE(wait, current);
1824
1825	chipnum = to >> cfi->chipshift;
1826	ofs = to  - (chipnum << cfi->chipshift);
1827	chipstart = cfi->chips[chipnum].start;
1828
1829	/* If it's not bus-aligned, do the first byte write */
1830	if (ofs & (map_bankwidth(map)-1)) {
1831		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1832		int i = ofs - bus_ofs;
1833		int n = 0;
1834		map_word tmp_buf;
1835
1836 retry:
1837		mutex_lock(&cfi->chips[chipnum].mutex);
1838
1839		if (cfi->chips[chipnum].state != FL_READY) {
1840			set_current_state(TASK_UNINTERRUPTIBLE);
1841			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1842
1843			mutex_unlock(&cfi->chips[chipnum].mutex);
1844
1845			schedule();
1846			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1847			goto retry;
1848		}
1849
1850		/* Load 'tmp_buf' with old contents of flash */
1851		tmp_buf = map_read(map, bus_ofs+chipstart);
1852
1853		mutex_unlock(&cfi->chips[chipnum].mutex);
1854
1855		/* Number of bytes to copy from buffer */
1856		n = min_t(int, len, map_bankwidth(map)-i);
1857
1858		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1859
1860		ret = do_write_oneword(map, &cfi->chips[chipnum],
1861				       bus_ofs, tmp_buf, FL_WRITING);
1862		if (ret)
1863			return ret;
1864
1865		ofs += n;
1866		buf += n;
1867		(*retlen) += n;
1868		len -= n;
1869
1870		if (ofs >> cfi->chipshift) {
1871			chipnum ++;
1872			ofs = 0;
1873			if (chipnum == cfi->numchips)
1874				return 0;
1875		}
1876	}
1877
1878	/* We are now aligned, write as much as possible */
1879	while(len >= map_bankwidth(map)) {
1880		map_word datum;
1881
1882		datum = map_word_load(map, buf);
1883
1884		ret = do_write_oneword(map, &cfi->chips[chipnum],
1885				       ofs, datum, FL_WRITING);
1886		if (ret)
1887			return ret;
1888
1889		ofs += map_bankwidth(map);
1890		buf += map_bankwidth(map);
1891		(*retlen) += map_bankwidth(map);
1892		len -= map_bankwidth(map);
1893
1894		if (ofs >> cfi->chipshift) {
1895			chipnum ++;
1896			ofs = 0;
1897			if (chipnum == cfi->numchips)
1898				return 0;
1899			chipstart = cfi->chips[chipnum].start;
1900		}
1901	}
1902
1903	/* Write the trailing bytes if any */
1904	if (len & (map_bankwidth(map)-1)) {
1905		map_word tmp_buf;
1906
1907 retry1:
1908		mutex_lock(&cfi->chips[chipnum].mutex);
1909
1910		if (cfi->chips[chipnum].state != FL_READY) {
1911			set_current_state(TASK_UNINTERRUPTIBLE);
1912			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1913
1914			mutex_unlock(&cfi->chips[chipnum].mutex);
1915
1916			schedule();
1917			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1918			goto retry1;
1919		}
1920
1921		tmp_buf = map_read(map, ofs + chipstart);
1922
1923		mutex_unlock(&cfi->chips[chipnum].mutex);
1924
1925		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1926
1927		ret = do_write_oneword(map, &cfi->chips[chipnum],
1928				       ofs, tmp_buf, FL_WRITING);
1929		if (ret)
1930			return ret;
1931
1932		(*retlen) += len;
1933	}
1934
1935	return 0;
1936}
1937
1938#if !FORCE_WORD_WRITE
1939static int __xipram do_write_buffer_wait(struct map_info *map,
1940					 struct flchip *chip, unsigned long adr,
1941					 map_word datum)
1942{
1943	unsigned long timeo;
1944	unsigned long u_write_timeout;
1945	int ret = 0;
1946
1947	/*
1948	 * Timeout is calculated according to CFI data, if available.
1949	 * See more comments in cfi_cmdset_0002().
1950	 */
1951	u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max);
1952	timeo = jiffies + u_write_timeout;
1953
1954	for (;;) {
1955		if (chip->state != FL_WRITING) {
1956			/* Someone's suspended the write. Sleep */
1957			DECLARE_WAITQUEUE(wait, current);
1958
1959			set_current_state(TASK_UNINTERRUPTIBLE);
1960			add_wait_queue(&chip->wq, &wait);
1961			mutex_unlock(&chip->mutex);
1962			schedule();
1963			remove_wait_queue(&chip->wq, &wait);
1964			timeo = jiffies + (HZ / 2); /* FIXME */
1965			mutex_lock(&chip->mutex);
1966			continue;
1967		}
1968
1969		/*
1970		 * We check "time_after" and "!chip_good" before checking
1971		 * "chip_good" to avoid the failure due to scheduling.
1972		 */
1973		if (time_after(jiffies, timeo) &&
1974		    !chip_good(map, chip, adr, &datum)) {
1975			pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
1976			       __func__, adr);
1977			ret = -EIO;
1978			break;
1979		}
1980
1981		if (chip_good(map, chip, adr, &datum)) {
1982			if (cfi_check_err_status(map, chip, adr))
1983				ret = -EIO;
1984			break;
1985		}
1986
1987		/* Latency issues. Drop the lock, wait a while and retry */
1988		UDELAY(map, chip, adr, 1);
1989	}
1990
1991	return ret;
1992}
1993
1994static void __xipram do_write_buffer_reset(struct map_info *map,
1995					   struct flchip *chip,
1996					   struct cfi_private *cfi)
1997{
1998	/*
1999	 * Recovery from write-buffer programming failures requires
2000	 * the write-to-buffer-reset sequence.  Since the last part
2001	 * of the sequence also works as a normal reset, we can run
2002	 * the same commands regardless of why we are here.
2003	 * See e.g.
2004	 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
2005	 */
2006	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2007			 cfi->device_type, NULL);
2008	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2009			 cfi->device_type, NULL);
2010	cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
2011			 cfi->device_type, NULL);
2012
2013	/* FIXME - should have reset delay before continuing */
2014}
2015
2016/*
2017 * FIXME: interleaved mode not tested, and probably not supported!
2018 */
2019static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
2020				    unsigned long adr, const u_char *buf,
2021				    int len)
2022{
2023	struct cfi_private *cfi = map->fldrv_priv;
2024	int ret;
2025	unsigned long cmd_adr;
2026	int z, words;
2027	map_word datum;
2028
2029	adr += chip->start;
2030	cmd_adr = adr;
2031
2032	mutex_lock(&chip->mutex);
2033	ret = get_chip(map, chip, adr, FL_WRITING);
2034	if (ret) {
2035		mutex_unlock(&chip->mutex);
2036		return ret;
2037	}
2038
2039	datum = map_word_load(map, buf);
2040
2041	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
2042		 __func__, adr, datum.x[0]);
2043
2044	XIP_INVAL_CACHED_RANGE(map, adr, len);
2045	ENABLE_VPP(map);
2046	xip_disable(map, chip, cmd_adr);
2047
2048	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2049	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2050
2051	/* Write Buffer Load */
2052	map_write(map, CMD(0x25), cmd_adr);
2053
2054	chip->state = FL_WRITING_TO_BUFFER;
2055
2056	/* Write length of data to come */
2057	words = len / map_bankwidth(map);
2058	map_write(map, CMD(words - 1), cmd_adr);
2059	/* Write data */
2060	z = 0;
2061	while(z < words * map_bankwidth(map)) {
2062		datum = map_word_load(map, buf);
2063		map_write(map, datum, adr + z);
2064
2065		z += map_bankwidth(map);
2066		buf += map_bankwidth(map);
2067	}
2068	z -= map_bankwidth(map);
2069
2070	adr += z;
2071
2072	/* Write Buffer Program Confirm: GO GO GO */
2073	map_write(map, CMD(0x29), cmd_adr);
2074	chip->state = FL_WRITING;
2075
2076	INVALIDATE_CACHE_UDELAY(map, chip,
2077				adr, map_bankwidth(map),
2078				chip->word_write_time);
2079
2080	ret = do_write_buffer_wait(map, chip, adr, datum);
2081	if (ret)
 
2082		do_write_buffer_reset(map, chip, cfi);
 
 
 
2083
2084	xip_enable(map, chip, adr);
2085
2086	chip->state = FL_READY;
2087	DISABLE_VPP(map);
2088	put_chip(map, chip, adr);
2089	mutex_unlock(&chip->mutex);
2090
2091	return ret;
2092}
2093
2094
2095static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
2096				    size_t *retlen, const u_char *buf)
2097{
2098	struct map_info *map = mtd->priv;
2099	struct cfi_private *cfi = map->fldrv_priv;
2100	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
2101	int ret;
2102	int chipnum;
2103	unsigned long ofs;
2104
2105	chipnum = to >> cfi->chipshift;
2106	ofs = to  - (chipnum << cfi->chipshift);
2107
2108	/* If it's not bus-aligned, do the first word write */
2109	if (ofs & (map_bankwidth(map)-1)) {
2110		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
2111		if (local_len > len)
2112			local_len = len;
2113		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2114					     local_len, retlen, buf);
2115		if (ret)
2116			return ret;
2117		ofs += local_len;
2118		buf += local_len;
2119		len -= local_len;
2120
2121		if (ofs >> cfi->chipshift) {
2122			chipnum ++;
2123			ofs = 0;
2124			if (chipnum == cfi->numchips)
2125				return 0;
2126		}
2127	}
2128
2129	/* Write buffer is worth it only if more than one word to write... */
2130	while (len >= map_bankwidth(map) * 2) {
2131		/* We must not cross write block boundaries */
2132		int size = wbufsize - (ofs & (wbufsize-1));
2133
2134		if (size > len)
2135			size = len;
2136		if (size % map_bankwidth(map))
2137			size -= size % map_bankwidth(map);
2138
2139		ret = do_write_buffer(map, &cfi->chips[chipnum],
2140				      ofs, buf, size);
2141		if (ret)
2142			return ret;
2143
2144		ofs += size;
2145		buf += size;
2146		(*retlen) += size;
2147		len -= size;
2148
2149		if (ofs >> cfi->chipshift) {
2150			chipnum ++;
2151			ofs = 0;
2152			if (chipnum == cfi->numchips)
2153				return 0;
2154		}
2155	}
2156
2157	if (len) {
2158		size_t retlen_dregs = 0;
2159
2160		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2161					     len, &retlen_dregs, buf);
2162
2163		*retlen += retlen_dregs;
2164		return ret;
2165	}
2166
2167	return 0;
2168}
2169#endif /* !FORCE_WORD_WRITE */
2170
2171/*
2172 * Wait for the flash chip to become ready to write data
2173 *
2174 * This is only called during the panic_write() path. When panic_write()
2175 * is called, the kernel is in the process of a panic, and will soon be
2176 * dead. Therefore we don't take any locks, and attempt to get access
2177 * to the chip as soon as possible.
2178 */
2179static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2180				 unsigned long adr)
2181{
2182	struct cfi_private *cfi = map->fldrv_priv;
2183	int retries = 10;
2184	int i;
2185
2186	/*
2187	 * If the driver thinks the chip is idle, and no toggle bits
2188	 * are changing, then the chip is actually idle for sure.
2189	 */
2190	if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL))
2191		return 0;
2192
2193	/*
2194	 * Try several times to reset the chip and then wait for it
2195	 * to become idle. The upper limit of a few milliseconds of
2196	 * delay isn't a big problem: the kernel is dying anyway. It
2197	 * is more important to save the messages.
2198	 */
2199	while (retries > 0) {
2200		const unsigned long timeo = (HZ / 1000) + 1;
2201
2202		/* send the reset command */
2203		map_write(map, CMD(0xF0), chip->start);
2204
2205		/* wait for the chip to become ready */
2206		for (i = 0; i < jiffies_to_usecs(timeo); i++) {
2207			if (chip_ready(map, chip, adr, NULL))
2208				return 0;
2209
2210			udelay(1);
2211		}
2212
2213		retries--;
2214	}
2215
2216	/* the chip never became ready */
2217	return -EBUSY;
2218}
2219
2220/*
2221 * Write out one word of data to a single flash chip during a kernel panic
2222 *
2223 * This is only called during the panic_write() path. When panic_write()
2224 * is called, the kernel is in the process of a panic, and will soon be
2225 * dead. Therefore we don't take any locks, and attempt to get access
2226 * to the chip as soon as possible.
2227 *
2228 * The implementation of this routine is intentionally similar to
2229 * do_write_oneword(), in order to ease code maintenance.
2230 */
2231static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
2232				  unsigned long adr, map_word datum)
2233{
2234	const unsigned long uWriteTimeout = (HZ / 1000) + 1;
2235	struct cfi_private *cfi = map->fldrv_priv;
2236	int retry_cnt = 0;
2237	map_word oldd;
2238	int ret;
2239	int i;
2240
2241	adr += chip->start;
2242
2243	ret = cfi_amdstd_panic_wait(map, chip, adr);
2244	if (ret)
2245		return ret;
2246
2247	pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
2248			__func__, adr, datum.x[0]);
2249
2250	/*
2251	 * Check for a NOP for the case when the datum to write is already
2252	 * present - it saves time and works around buggy chips that corrupt
2253	 * data at other locations when 0xff is written to a location that
2254	 * already contains 0xff.
2255	 */
2256	oldd = map_read(map, adr);
2257	if (map_word_equal(map, oldd, datum)) {
2258		pr_debug("MTD %s(): NOP\n", __func__);
2259		goto op_done;
2260	}
2261
2262	ENABLE_VPP(map);
2263
2264retry:
2265	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2266	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2267	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2268	map_write(map, datum, adr);
2269
2270	for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
2271		if (chip_ready(map, chip, adr, NULL))
2272			break;
2273
2274		udelay(1);
2275	}
2276
2277	if (!chip_ready(map, chip, adr, &datum) ||
2278	    cfi_check_err_status(map, chip, adr)) {
2279		/* reset on all failures. */
 
2280		map_write(map, CMD(0xF0), chip->start);
2281		/* FIXME - should have reset delay before continuing */
2282
2283		if (++retry_cnt <= MAX_RETRIES)
2284			goto retry;
2285
2286		ret = -EIO;
2287	}
2288
2289op_done:
2290	DISABLE_VPP(map);
2291	return ret;
2292}
2293
2294/*
2295 * Write out some data during a kernel panic
2296 *
2297 * This is used by the mtdoops driver to save the dying messages from a
2298 * kernel which has panic'd.
2299 *
2300 * This routine ignores all of the locking used throughout the rest of the
2301 * driver, in order to ensure that the data gets written out no matter what
2302 * state this driver (and the flash chip itself) was in when the kernel crashed.
2303 *
2304 * The implementation of this routine is intentionally similar to
2305 * cfi_amdstd_write_words(), in order to ease code maintenance.
2306 */
2307static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
2308				  size_t *retlen, const u_char *buf)
2309{
2310	struct map_info *map = mtd->priv;
2311	struct cfi_private *cfi = map->fldrv_priv;
2312	unsigned long ofs, chipstart;
2313	int ret;
2314	int chipnum;
2315
2316	chipnum = to >> cfi->chipshift;
2317	ofs = to - (chipnum << cfi->chipshift);
2318	chipstart = cfi->chips[chipnum].start;
2319
2320	/* If it's not bus aligned, do the first byte write */
2321	if (ofs & (map_bankwidth(map) - 1)) {
2322		unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
2323		int i = ofs - bus_ofs;
2324		int n = 0;
2325		map_word tmp_buf;
2326
2327		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
2328		if (ret)
2329			return ret;
2330
2331		/* Load 'tmp_buf' with old contents of flash */
2332		tmp_buf = map_read(map, bus_ofs + chipstart);
2333
2334		/* Number of bytes to copy from buffer */
2335		n = min_t(int, len, map_bankwidth(map) - i);
2336
2337		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
2338
2339		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2340					     bus_ofs, tmp_buf);
2341		if (ret)
2342			return ret;
2343
2344		ofs += n;
2345		buf += n;
2346		(*retlen) += n;
2347		len -= n;
2348
2349		if (ofs >> cfi->chipshift) {
2350			chipnum++;
2351			ofs = 0;
2352			if (chipnum == cfi->numchips)
2353				return 0;
2354		}
2355	}
2356
2357	/* We are now aligned, write as much as possible */
2358	while (len >= map_bankwidth(map)) {
2359		map_word datum;
2360
2361		datum = map_word_load(map, buf);
2362
2363		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2364					     ofs, datum);
2365		if (ret)
2366			return ret;
2367
2368		ofs += map_bankwidth(map);
2369		buf += map_bankwidth(map);
2370		(*retlen) += map_bankwidth(map);
2371		len -= map_bankwidth(map);
2372
2373		if (ofs >> cfi->chipshift) {
2374			chipnum++;
2375			ofs = 0;
2376			if (chipnum == cfi->numchips)
2377				return 0;
2378
2379			chipstart = cfi->chips[chipnum].start;
2380		}
2381	}
2382
2383	/* Write the trailing bytes if any */
2384	if (len & (map_bankwidth(map) - 1)) {
2385		map_word tmp_buf;
2386
2387		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
2388		if (ret)
2389			return ret;
2390
2391		tmp_buf = map_read(map, ofs + chipstart);
2392
2393		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
2394
2395		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2396					     ofs, tmp_buf);
2397		if (ret)
2398			return ret;
2399
2400		(*retlen) += len;
2401	}
2402
2403	return 0;
2404}
2405
2406
2407/*
2408 * Handle devices with one erase region, that only implement
2409 * the chip erase command.
2410 */
2411static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2412{
2413	struct cfi_private *cfi = map->fldrv_priv;
2414	unsigned long timeo = jiffies + HZ;
2415	unsigned long int adr;
2416	DECLARE_WAITQUEUE(wait, current);
2417	int ret;
2418	int retry_cnt = 0;
2419	map_word datum = map_word_ff(map);
2420
2421	adr = cfi->addr_unlock1;
2422
2423	mutex_lock(&chip->mutex);
2424	ret = get_chip(map, chip, adr, FL_ERASING);
2425	if (ret) {
2426		mutex_unlock(&chip->mutex);
2427		return ret;
2428	}
2429
2430	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2431	       __func__, chip->start);
2432
2433	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
2434	ENABLE_VPP(map);
2435	xip_disable(map, chip, adr);
2436
2437 retry:
2438	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2439	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2440	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2441	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2442	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2443	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2444
2445	chip->state = FL_ERASING;
2446	chip->erase_suspended = 0;
2447	chip->in_progress_block_addr = adr;
2448	chip->in_progress_block_mask = ~(map->size - 1);
2449
2450	INVALIDATE_CACHE_UDELAY(map, chip,
2451				adr, map->size,
2452				chip->erase_time*500);
2453
2454	timeo = jiffies + (HZ*20);
2455
2456	for (;;) {
2457		if (chip->state != FL_ERASING) {
2458			/* Someone's suspended the erase. Sleep */
2459			set_current_state(TASK_UNINTERRUPTIBLE);
2460			add_wait_queue(&chip->wq, &wait);
2461			mutex_unlock(&chip->mutex);
2462			schedule();
2463			remove_wait_queue(&chip->wq, &wait);
2464			mutex_lock(&chip->mutex);
2465			continue;
2466		}
2467		if (chip->erase_suspended) {
2468			/* This erase was suspended and resumed.
2469			   Adjust the timeout */
2470			timeo = jiffies + (HZ*20); /* FIXME */
2471			chip->erase_suspended = 0;
2472		}
2473
2474		if (chip_ready(map, chip, adr, &datum)) {
2475			if (cfi_check_err_status(map, chip, adr))
2476				ret = -EIO;
2477			break;
2478		}
2479
2480		if (time_after(jiffies, timeo)) {
2481			printk(KERN_WARNING "MTD %s(): software timeout\n",
2482			       __func__);
2483			ret = -EIO;
2484			break;
2485		}
2486
2487		/* Latency issues. Drop the lock, wait a while and retry */
2488		UDELAY(map, chip, adr, 1000000/HZ);
2489	}
2490	/* Did we succeed? */
2491	if (ret) {
2492		/* reset on all failures. */
 
2493		map_write(map, CMD(0xF0), chip->start);
2494		/* FIXME - should have reset delay before continuing */
2495
2496		if (++retry_cnt <= MAX_RETRIES) {
2497			ret = 0;
2498			goto retry;
2499		}
2500	}
2501
2502	chip->state = FL_READY;
2503	xip_enable(map, chip, adr);
2504	DISABLE_VPP(map);
2505	put_chip(map, chip, adr);
2506	mutex_unlock(&chip->mutex);
2507
2508	return ret;
2509}
2510
2511
2512static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
2513{
2514	struct cfi_private *cfi = map->fldrv_priv;
2515	unsigned long timeo = jiffies + HZ;
2516	DECLARE_WAITQUEUE(wait, current);
2517	int ret;
2518	int retry_cnt = 0;
2519	map_word datum = map_word_ff(map);
2520
2521	adr += chip->start;
2522
2523	mutex_lock(&chip->mutex);
2524	ret = get_chip(map, chip, adr, FL_ERASING);
2525	if (ret) {
2526		mutex_unlock(&chip->mutex);
2527		return ret;
2528	}
2529
2530	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2531		 __func__, adr);
2532
2533	XIP_INVAL_CACHED_RANGE(map, adr, len);
2534	ENABLE_VPP(map);
2535	xip_disable(map, chip, adr);
2536
2537 retry:
2538	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2539	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2540	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2541	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2542	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2543	map_write(map, cfi->sector_erase_cmd, adr);
2544
2545	chip->state = FL_ERASING;
2546	chip->erase_suspended = 0;
2547	chip->in_progress_block_addr = adr;
2548	chip->in_progress_block_mask = ~(len - 1);
2549
2550	INVALIDATE_CACHE_UDELAY(map, chip,
2551				adr, len,
2552				chip->erase_time*500);
2553
2554	timeo = jiffies + (HZ*20);
2555
2556	for (;;) {
2557		if (chip->state != FL_ERASING) {
2558			/* Someone's suspended the erase. Sleep */
2559			set_current_state(TASK_UNINTERRUPTIBLE);
2560			add_wait_queue(&chip->wq, &wait);
2561			mutex_unlock(&chip->mutex);
2562			schedule();
2563			remove_wait_queue(&chip->wq, &wait);
2564			mutex_lock(&chip->mutex);
2565			continue;
2566		}
2567		if (chip->erase_suspended) {
2568			/* This erase was suspended and resumed.
2569			   Adjust the timeout */
2570			timeo = jiffies + (HZ*20); /* FIXME */
2571			chip->erase_suspended = 0;
2572		}
2573
2574		if (chip_ready(map, chip, adr, &datum)) {
2575			if (cfi_check_err_status(map, chip, adr))
2576				ret = -EIO;
2577			break;
2578		}
2579
2580		if (time_after(jiffies, timeo)) {
2581			printk(KERN_WARNING "MTD %s(): software timeout\n",
2582			       __func__);
2583			ret = -EIO;
2584			break;
2585		}
2586
2587		/* Latency issues. Drop the lock, wait a while and retry */
2588		UDELAY(map, chip, adr, 1000000/HZ);
2589	}
2590	/* Did we succeed? */
2591	if (ret) {
2592		/* reset on all failures. */
 
2593		map_write(map, CMD(0xF0), chip->start);
2594		/* FIXME - should have reset delay before continuing */
2595
2596		if (++retry_cnt <= MAX_RETRIES) {
2597			ret = 0;
2598			goto retry;
2599		}
2600	}
2601
2602	chip->state = FL_READY;
2603	xip_enable(map, chip, adr);
2604	DISABLE_VPP(map);
2605	put_chip(map, chip, adr);
2606	mutex_unlock(&chip->mutex);
2607	return ret;
2608}
2609
2610
2611static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2612{
2613	return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2614				instr->len, NULL);
2615}
2616
2617
2618static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2619{
2620	struct map_info *map = mtd->priv;
2621	struct cfi_private *cfi = map->fldrv_priv;
2622
2623	if (instr->addr != 0)
2624		return -EINVAL;
2625
2626	if (instr->len != mtd->size)
2627		return -EINVAL;
2628
2629	return do_erase_chip(map, &cfi->chips[0]);
2630}
2631
2632static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2633			 unsigned long adr, int len, void *thunk)
2634{
2635	struct cfi_private *cfi = map->fldrv_priv;
2636	int ret;
2637
2638	mutex_lock(&chip->mutex);
2639	ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2640	if (ret)
2641		goto out_unlock;
2642	chip->state = FL_LOCKING;
2643
2644	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2645
2646	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2647			 cfi->device_type, NULL);
2648	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2649			 cfi->device_type, NULL);
2650	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2651			 cfi->device_type, NULL);
2652	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2653			 cfi->device_type, NULL);
2654	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2655			 cfi->device_type, NULL);
2656	map_write(map, CMD(0x40), chip->start + adr);
2657
2658	chip->state = FL_READY;
2659	put_chip(map, chip, adr + chip->start);
2660	ret = 0;
2661
2662out_unlock:
2663	mutex_unlock(&chip->mutex);
2664	return ret;
2665}
2666
2667static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2668			   unsigned long adr, int len, void *thunk)
2669{
2670	struct cfi_private *cfi = map->fldrv_priv;
2671	int ret;
2672
2673	mutex_lock(&chip->mutex);
2674	ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2675	if (ret)
2676		goto out_unlock;
2677	chip->state = FL_UNLOCKING;
2678
2679	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2680
2681	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2682			 cfi->device_type, NULL);
2683	map_write(map, CMD(0x70), adr);
2684
2685	chip->state = FL_READY;
2686	put_chip(map, chip, adr + chip->start);
2687	ret = 0;
2688
2689out_unlock:
2690	mutex_unlock(&chip->mutex);
2691	return ret;
2692}
2693
2694static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2695{
2696	return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2697}
2698
2699static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2700{
2701	return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2702}
2703
2704/*
2705 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2706 */
2707
2708struct ppb_lock {
2709	struct flchip *chip;
2710	unsigned long adr;
2711	int locked;
2712};
2713
2714#define DO_XXLOCK_ONEBLOCK_LOCK		((void *)1)
2715#define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *)2)
2716#define DO_XXLOCK_ONEBLOCK_GETLOCK	((void *)3)
2717
2718static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2719					struct flchip *chip,
2720					unsigned long adr, int len, void *thunk)
2721{
2722	struct cfi_private *cfi = map->fldrv_priv;
2723	unsigned long timeo;
2724	int ret;
2725
2726	adr += chip->start;
2727	mutex_lock(&chip->mutex);
2728	ret = get_chip(map, chip, adr, FL_LOCKING);
2729	if (ret) {
2730		mutex_unlock(&chip->mutex);
2731		return ret;
2732	}
2733
2734	pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2735
2736	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2737			 cfi->device_type, NULL);
2738	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2739			 cfi->device_type, NULL);
2740	/* PPB entry command */
2741	cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2742			 cfi->device_type, NULL);
2743
2744	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2745		chip->state = FL_LOCKING;
2746		map_write(map, CMD(0xA0), adr);
2747		map_write(map, CMD(0x00), adr);
2748	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2749		/*
2750		 * Unlocking of one specific sector is not supported, so we
2751		 * have to unlock all sectors of this device instead
2752		 */
2753		chip->state = FL_UNLOCKING;
2754		map_write(map, CMD(0x80), chip->start);
2755		map_write(map, CMD(0x30), chip->start);
2756	} else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2757		chip->state = FL_JEDEC_QUERY;
2758		/* Return locked status: 0->locked, 1->unlocked */
2759		ret = !cfi_read_query(map, adr);
2760	} else
2761		BUG();
2762
2763	/*
2764	 * Wait for some time as unlocking of all sectors takes quite long
2765	 */
2766	timeo = jiffies + msecs_to_jiffies(2000);	/* 2s max (un)locking */
2767	for (;;) {
2768		if (chip_ready(map, chip, adr, NULL))
2769			break;
2770
2771		if (time_after(jiffies, timeo)) {
2772			printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2773			ret = -EIO;
2774			break;
2775		}
2776
2777		UDELAY(map, chip, adr, 1);
2778	}
2779
2780	/* Exit BC commands */
2781	map_write(map, CMD(0x90), chip->start);
2782	map_write(map, CMD(0x00), chip->start);
2783
2784	chip->state = FL_READY;
2785	put_chip(map, chip, adr);
2786	mutex_unlock(&chip->mutex);
2787
2788	return ret;
2789}
2790
2791static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2792				       uint64_t len)
2793{
2794	return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2795				DO_XXLOCK_ONEBLOCK_LOCK);
2796}
2797
2798static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2799					 uint64_t len)
2800{
2801	struct mtd_erase_region_info *regions = mtd->eraseregions;
2802	struct map_info *map = mtd->priv;
2803	struct cfi_private *cfi = map->fldrv_priv;
2804	struct ppb_lock *sect;
2805	unsigned long adr;
2806	loff_t offset;
2807	uint64_t length;
2808	int chipnum;
2809	int i;
2810	int sectors;
2811	int ret;
2812	int max_sectors;
2813
2814	/*
2815	 * PPB unlocking always unlocks all sectors of the flash chip.
2816	 * We need to re-lock all previously locked sectors. So lets
2817	 * first check the locking status of all sectors and save
2818	 * it for future use.
2819	 */
2820	max_sectors = 0;
2821	for (i = 0; i < mtd->numeraseregions; i++)
2822		max_sectors += regions[i].numblocks;
2823
2824	sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
2825	if (!sect)
2826		return -ENOMEM;
2827
2828	/*
2829	 * This code to walk all sectors is a slightly modified version
2830	 * of the cfi_varsize_frob() code.
2831	 */
2832	i = 0;
2833	chipnum = 0;
2834	adr = 0;
2835	sectors = 0;
2836	offset = 0;
2837	length = mtd->size;
2838
2839	while (length) {
2840		int size = regions[i].erasesize;
2841
2842		/*
2843		 * Only test sectors that shall not be unlocked. The other
2844		 * sectors shall be unlocked, so lets keep their locking
2845		 * status at "unlocked" (locked=0) for the final re-locking.
2846		 */
2847		if ((offset < ofs) || (offset >= (ofs + len))) {
2848			sect[sectors].chip = &cfi->chips[chipnum];
2849			sect[sectors].adr = adr;
2850			sect[sectors].locked = do_ppb_xxlock(
2851				map, &cfi->chips[chipnum], adr, 0,
2852				DO_XXLOCK_ONEBLOCK_GETLOCK);
2853		}
2854
2855		adr += size;
2856		offset += size;
2857		length -= size;
2858
2859		if (offset == regions[i].offset + size * regions[i].numblocks)
2860			i++;
2861
2862		if (adr >> cfi->chipshift) {
2863			if (offset >= (ofs + len))
2864				break;
2865			adr = 0;
2866			chipnum++;
2867
2868			if (chipnum >= cfi->numchips)
2869				break;
2870		}
2871
2872		sectors++;
2873		if (sectors >= max_sectors) {
2874			printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2875			       max_sectors);
2876			kfree(sect);
2877			return -EINVAL;
2878		}
2879	}
2880
2881	/* Now unlock the whole chip */
2882	ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2883			       DO_XXLOCK_ONEBLOCK_UNLOCK);
2884	if (ret) {
2885		kfree(sect);
2886		return ret;
2887	}
2888
2889	/*
2890	 * PPB unlocking always unlocks all sectors of the flash chip.
2891	 * We need to re-lock all previously locked sectors.
2892	 */
2893	for (i = 0; i < sectors; i++) {
2894		if (sect[i].locked)
2895			do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2896				      DO_XXLOCK_ONEBLOCK_LOCK);
2897	}
2898
2899	kfree(sect);
2900	return ret;
2901}
2902
2903static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2904					    uint64_t len)
2905{
2906	return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2907				DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2908}
2909
2910static void cfi_amdstd_sync (struct mtd_info *mtd)
2911{
2912	struct map_info *map = mtd->priv;
2913	struct cfi_private *cfi = map->fldrv_priv;
2914	int i;
2915	struct flchip *chip;
2916	int ret = 0;
2917	DECLARE_WAITQUEUE(wait, current);
2918
2919	for (i=0; !ret && i<cfi->numchips; i++) {
2920		chip = &cfi->chips[i];
2921
2922	retry:
2923		mutex_lock(&chip->mutex);
2924
2925		switch(chip->state) {
2926		case FL_READY:
2927		case FL_STATUS:
2928		case FL_CFI_QUERY:
2929		case FL_JEDEC_QUERY:
2930			chip->oldstate = chip->state;
2931			chip->state = FL_SYNCING;
2932			/* No need to wake_up() on this state change -
2933			 * as the whole point is that nobody can do anything
2934			 * with the chip now anyway.
2935			 */
2936			fallthrough;
2937		case FL_SYNCING:
2938			mutex_unlock(&chip->mutex);
2939			break;
2940
2941		default:
2942			/* Not an idle state */
2943			set_current_state(TASK_UNINTERRUPTIBLE);
2944			add_wait_queue(&chip->wq, &wait);
2945
2946			mutex_unlock(&chip->mutex);
2947
2948			schedule();
2949
2950			remove_wait_queue(&chip->wq, &wait);
2951
2952			goto retry;
2953		}
2954	}
2955
2956	/* Unlock the chips again */
2957
2958	for (i--; i >=0; i--) {
2959		chip = &cfi->chips[i];
2960
2961		mutex_lock(&chip->mutex);
2962
2963		if (chip->state == FL_SYNCING) {
2964			chip->state = chip->oldstate;
2965			wake_up(&chip->wq);
2966		}
2967		mutex_unlock(&chip->mutex);
2968	}
2969}
2970
2971
2972static int cfi_amdstd_suspend(struct mtd_info *mtd)
2973{
2974	struct map_info *map = mtd->priv;
2975	struct cfi_private *cfi = map->fldrv_priv;
2976	int i;
2977	struct flchip *chip;
2978	int ret = 0;
2979
2980	for (i=0; !ret && i<cfi->numchips; i++) {
2981		chip = &cfi->chips[i];
2982
2983		mutex_lock(&chip->mutex);
2984
2985		switch(chip->state) {
2986		case FL_READY:
2987		case FL_STATUS:
2988		case FL_CFI_QUERY:
2989		case FL_JEDEC_QUERY:
2990			chip->oldstate = chip->state;
2991			chip->state = FL_PM_SUSPENDED;
2992			/* No need to wake_up() on this state change -
2993			 * as the whole point is that nobody can do anything
2994			 * with the chip now anyway.
2995			 */
2996			break;
2997		case FL_PM_SUSPENDED:
2998			break;
2999
3000		default:
3001			ret = -EAGAIN;
3002			break;
3003		}
3004		mutex_unlock(&chip->mutex);
3005	}
3006
3007	/* Unlock the chips again */
3008
3009	if (ret) {
3010		for (i--; i >=0; i--) {
3011			chip = &cfi->chips[i];
3012
3013			mutex_lock(&chip->mutex);
3014
3015			if (chip->state == FL_PM_SUSPENDED) {
3016				chip->state = chip->oldstate;
3017				wake_up(&chip->wq);
3018			}
3019			mutex_unlock(&chip->mutex);
3020		}
3021	}
3022
3023	return ret;
3024}
3025
3026
3027static void cfi_amdstd_resume(struct mtd_info *mtd)
3028{
3029	struct map_info *map = mtd->priv;
3030	struct cfi_private *cfi = map->fldrv_priv;
3031	int i;
3032	struct flchip *chip;
3033
3034	for (i=0; i<cfi->numchips; i++) {
3035
3036		chip = &cfi->chips[i];
3037
3038		mutex_lock(&chip->mutex);
3039
3040		if (chip->state == FL_PM_SUSPENDED) {
3041			chip->state = FL_READY;
3042			map_write(map, CMD(0xF0), chip->start);
3043			wake_up(&chip->wq);
3044		}
3045		else
3046			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
3047
3048		mutex_unlock(&chip->mutex);
3049	}
3050}
3051
3052
3053/*
3054 * Ensure that the flash device is put back into read array mode before
3055 * unloading the driver or rebooting.  On some systems, rebooting while
3056 * the flash is in query/program/erase mode will prevent the CPU from
3057 * fetching the bootloader code, requiring a hard reset or power cycle.
3058 */
3059static int cfi_amdstd_reset(struct mtd_info *mtd)
3060{
3061	struct map_info *map = mtd->priv;
3062	struct cfi_private *cfi = map->fldrv_priv;
3063	int i, ret;
3064	struct flchip *chip;
3065
3066	for (i = 0; i < cfi->numchips; i++) {
3067
3068		chip = &cfi->chips[i];
3069
3070		mutex_lock(&chip->mutex);
3071
3072		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
3073		if (!ret) {
3074			map_write(map, CMD(0xF0), chip->start);
3075			chip->state = FL_SHUTDOWN;
3076			put_chip(map, chip, chip->start);
3077		}
3078
3079		mutex_unlock(&chip->mutex);
3080	}
3081
3082	return 0;
3083}
3084
3085
3086static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
3087			       void *v)
3088{
3089	struct mtd_info *mtd;
3090
3091	mtd = container_of(nb, struct mtd_info, reboot_notifier);
3092	cfi_amdstd_reset(mtd);
3093	return NOTIFY_DONE;
3094}
3095
3096
3097static void cfi_amdstd_destroy(struct mtd_info *mtd)
3098{
3099	struct map_info *map = mtd->priv;
3100	struct cfi_private *cfi = map->fldrv_priv;
3101
3102	cfi_amdstd_reset(mtd);
3103	unregister_reboot_notifier(&mtd->reboot_notifier);
3104	kfree(cfi->cmdset_priv);
3105	kfree(cfi->cfiq);
3106	kfree(cfi);
3107	kfree(mtd->eraseregions);
3108}
3109
3110MODULE_LICENSE("GPL");
3111MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
3112MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
3113MODULE_ALIAS("cfi_cmdset_0006");
3114MODULE_ALIAS("cfi_cmdset_0701");
v5.4
 
   1/*
   2 * Common Flash Interface support:
   3 *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
   4 *
   5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
   6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
   7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
   8 *
   9 * 2_by_8 routines added by Simon Munton
  10 *
  11 * 4_by_16 work by Carolyn J. Smith
  12 *
  13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
  14 * by Nicolas Pitre)
  15 *
  16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
  17 *
  18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
  19 *
  20 * This code is GPL
  21 */
  22
  23#include <linux/module.h>
  24#include <linux/types.h>
  25#include <linux/kernel.h>
  26#include <linux/sched.h>
  27#include <asm/io.h>
  28#include <asm/byteorder.h>
  29
  30#include <linux/errno.h>
  31#include <linux/slab.h>
  32#include <linux/delay.h>
  33#include <linux/interrupt.h>
  34#include <linux/reboot.h>
  35#include <linux/of.h>
  36#include <linux/of_platform.h>
  37#include <linux/mtd/map.h>
  38#include <linux/mtd/mtd.h>
  39#include <linux/mtd/cfi.h>
  40#include <linux/mtd/xip.h>
  41
  42#define AMD_BOOTLOC_BUG
  43#define FORCE_WORD_WRITE 0
  44
  45#define MAX_RETRIES 3
  46
  47#define SST49LF004B		0x0060
  48#define SST49LF040B		0x0050
  49#define SST49LF008A		0x005a
  50#define AT49BV6416		0x00d6
 
  51
  52/*
  53 * Status Register bit description. Used by flash devices that don't
  54 * support DQ polling (e.g. HyperFlash)
  55 */
  56#define CFI_SR_DRB		BIT(7)
  57#define CFI_SR_ESB		BIT(5)
  58#define CFI_SR_PSB		BIT(4)
  59#define CFI_SR_WBASB		BIT(3)
  60#define CFI_SR_SLSB		BIT(1)
  61
 
 
 
 
  62static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  63static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  64#if !FORCE_WORD_WRITE
  65static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  66#endif
  67static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
  68static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
  69static void cfi_amdstd_sync (struct mtd_info *);
  70static int cfi_amdstd_suspend (struct mtd_info *);
  71static void cfi_amdstd_resume (struct mtd_info *);
  72static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
  73static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
  74					 size_t *, struct otp_info *);
  75static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
  76					 size_t *, struct otp_info *);
  77static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  78static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
  79					 size_t *, u_char *);
  80static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
  81					 size_t *, u_char *);
  82static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
  83					  size_t *, u_char *);
  84static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
  85
  86static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
  87				  size_t *retlen, const u_char *buf);
  88
  89static void cfi_amdstd_destroy(struct mtd_info *);
  90
  91struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
  92static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
  93
  94static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  95static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  96#include "fwh_lock.h"
  97
  98static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  99static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 100
 101static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 102static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 103static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 104
 105static struct mtd_chip_driver cfi_amdstd_chipdrv = {
 106	.probe		= NULL, /* Not usable directly */
 107	.destroy	= cfi_amdstd_destroy,
 108	.name		= "cfi_cmdset_0002",
 109	.module		= THIS_MODULE
 110};
 111
 112/*
 113 * Use status register to poll for Erase/write completion when DQ is not
 114 * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in
 115 * CFI Primary Vendor-Specific Extended Query table 1.5
 116 */
 117static int cfi_use_status_reg(struct cfi_private *cfi)
 118{
 119	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 120	u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
 121
 122	return extp->MinorVersion >= '5' &&
 123		(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
 124}
 125
 126static void cfi_check_err_status(struct map_info *map, struct flchip *chip,
 127				 unsigned long adr)
 128{
 129	struct cfi_private *cfi = map->fldrv_priv;
 130	map_word status;
 131
 132	if (!cfi_use_status_reg(cfi))
 133		return;
 134
 135	cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
 136			 cfi->device_type, NULL);
 137	status = map_read(map, adr);
 138
 
 
 
 
 139	if (map_word_bitsset(map, status, CMD(0x3a))) {
 140		unsigned long chipstatus = MERGESTATUS(status);
 141
 142		if (chipstatus & CFI_SR_ESB)
 143			pr_err("%s erase operation failed, status %lx\n",
 144			       map->name, chipstatus);
 145		if (chipstatus & CFI_SR_PSB)
 146			pr_err("%s program operation failed, status %lx\n",
 147			       map->name, chipstatus);
 148		if (chipstatus & CFI_SR_WBASB)
 149			pr_err("%s buffer program command aborted, status %lx\n",
 150			       map->name, chipstatus);
 151		if (chipstatus & CFI_SR_SLSB)
 152			pr_err("%s sector write protected, status %lx\n",
 153			       map->name, chipstatus);
 
 
 
 
 154	}
 
 155}
 156
 157/* #define DEBUG_CFI_FEATURES */
 158
 159
 160#ifdef DEBUG_CFI_FEATURES
 161static void cfi_tell_features(struct cfi_pri_amdstd *extp)
 162{
 163	const char* erase_suspend[3] = {
 164		"Not supported", "Read only", "Read/write"
 165	};
 166	const char* top_bottom[6] = {
 167		"No WP", "8x8KiB sectors at top & bottom, no WP",
 168		"Bottom boot", "Top boot",
 169		"Uniform, Bottom WP", "Uniform, Top WP"
 170	};
 171
 172	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
 173	printk("  Address sensitive unlock: %s\n",
 174	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
 175
 176	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
 177		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
 178	else
 179		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
 180
 181	if (extp->BlkProt == 0)
 182		printk("  Block protection: Not supported\n");
 183	else
 184		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
 185
 186
 187	printk("  Temporary block unprotect: %s\n",
 188	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
 189	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
 190	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
 191	printk("  Burst mode: %s\n",
 192	       extp->BurstMode ? "Supported" : "Not supported");
 193	if (extp->PageMode == 0)
 194		printk("  Page mode: Not supported\n");
 195	else
 196		printk("  Page mode: %d word page\n", extp->PageMode << 2);
 197
 198	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
 199	       extp->VppMin >> 4, extp->VppMin & 0xf);
 200	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
 201	       extp->VppMax >> 4, extp->VppMax & 0xf);
 202
 203	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
 204		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
 205	else
 206		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
 207}
 208#endif
 209
 210#ifdef AMD_BOOTLOC_BUG
 211/* Wheee. Bring me the head of someone at AMD. */
 212static void fixup_amd_bootblock(struct mtd_info *mtd)
 213{
 214	struct map_info *map = mtd->priv;
 215	struct cfi_private *cfi = map->fldrv_priv;
 216	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 217	__u8 major = extp->MajorVersion;
 218	__u8 minor = extp->MinorVersion;
 219
 220	if (((major << 8) | minor) < 0x3131) {
 221		/* CFI version 1.0 => don't trust bootloc */
 222
 223		pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
 224			map->name, cfi->mfr, cfi->id);
 225
 226		/* AFAICS all 29LV400 with a bottom boot block have a device ID
 227		 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
 228		 * These were badly detected as they have the 0x80 bit set
 229		 * so treat them as a special case.
 230		 */
 231		if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
 232
 233			/* Macronix added CFI to their 2nd generation
 234			 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
 235			 * Fujitsu, Spansion, EON, ESI and older Macronix)
 236			 * has CFI.
 237			 *
 238			 * Therefore also check the manufacturer.
 239			 * This reduces the risk of false detection due to
 240			 * the 8-bit device ID.
 241			 */
 242			(cfi->mfr == CFI_MFR_MACRONIX)) {
 243			pr_debug("%s: Macronix MX29LV400C with bottom boot block"
 244				" detected\n", map->name);
 245			extp->TopBottom = 2;	/* bottom boot */
 246		} else
 247		if (cfi->id & 0x80) {
 248			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
 249			extp->TopBottom = 3;	/* top boot */
 250		} else {
 251			extp->TopBottom = 2;	/* bottom boot */
 252		}
 253
 254		pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
 255			" deduced %s from Device ID\n", map->name, major, minor,
 256			extp->TopBottom == 2 ? "bottom" : "top");
 257	}
 258}
 259#endif
 260
 261#if !FORCE_WORD_WRITE
 262static void fixup_use_write_buffers(struct mtd_info *mtd)
 263{
 264	struct map_info *map = mtd->priv;
 265	struct cfi_private *cfi = map->fldrv_priv;
 
 
 
 
 266	if (cfi->cfiq->BufWriteTimeoutTyp) {
 267		pr_debug("Using buffer write method\n");
 268		mtd->_write = cfi_amdstd_write_buffers;
 269	}
 270}
 271#endif /* !FORCE_WORD_WRITE */
 272
 273/* Atmel chips don't use the same PRI format as AMD chips */
 274static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 275{
 276	struct map_info *map = mtd->priv;
 277	struct cfi_private *cfi = map->fldrv_priv;
 278	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 279	struct cfi_pri_atmel atmel_pri;
 280
 281	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 282	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 283
 284	if (atmel_pri.Features & 0x02)
 285		extp->EraseSuspend = 2;
 286
 287	/* Some chips got it backwards... */
 288	if (cfi->id == AT49BV6416) {
 289		if (atmel_pri.BottomBoot)
 290			extp->TopBottom = 3;
 291		else
 292			extp->TopBottom = 2;
 293	} else {
 294		if (atmel_pri.BottomBoot)
 295			extp->TopBottom = 2;
 296		else
 297			extp->TopBottom = 3;
 298	}
 299
 300	/* burst write mode not supported */
 301	cfi->cfiq->BufWriteTimeoutTyp = 0;
 302	cfi->cfiq->BufWriteTimeoutMax = 0;
 303}
 304
 305static void fixup_use_secsi(struct mtd_info *mtd)
 306{
 307	/* Setup for chips with a secsi area */
 308	mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
 309	mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
 310}
 311
 312static void fixup_use_erase_chip(struct mtd_info *mtd)
 313{
 314	struct map_info *map = mtd->priv;
 315	struct cfi_private *cfi = map->fldrv_priv;
 316	if ((cfi->cfiq->NumEraseRegions == 1) &&
 317		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
 318		mtd->_erase = cfi_amdstd_erase_chip;
 319	}
 320
 321}
 322
 323/*
 324 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
 325 * locked by default.
 326 */
 327static void fixup_use_atmel_lock(struct mtd_info *mtd)
 328{
 329	mtd->_lock = cfi_atmel_lock;
 330	mtd->_unlock = cfi_atmel_unlock;
 331	mtd->flags |= MTD_POWERUP_LOCK;
 332}
 333
 334static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
 335{
 336	struct map_info *map = mtd->priv;
 337	struct cfi_private *cfi = map->fldrv_priv;
 338
 339	/*
 340	 * These flashes report two separate eraseblock regions based on the
 341	 * sector_erase-size and block_erase-size, although they both operate on the
 342	 * same memory. This is not allowed according to CFI, so we just pick the
 343	 * sector_erase-size.
 344	 */
 345	cfi->cfiq->NumEraseRegions = 1;
 346}
 347
 348static void fixup_sst39vf(struct mtd_info *mtd)
 349{
 350	struct map_info *map = mtd->priv;
 351	struct cfi_private *cfi = map->fldrv_priv;
 352
 353	fixup_old_sst_eraseregion(mtd);
 354
 355	cfi->addr_unlock1 = 0x5555;
 356	cfi->addr_unlock2 = 0x2AAA;
 357}
 358
 359static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
 360{
 361	struct map_info *map = mtd->priv;
 362	struct cfi_private *cfi = map->fldrv_priv;
 363
 364	fixup_old_sst_eraseregion(mtd);
 365
 366	cfi->addr_unlock1 = 0x555;
 367	cfi->addr_unlock2 = 0x2AA;
 368
 369	cfi->sector_erase_cmd = CMD(0x50);
 370}
 371
 372static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
 373{
 374	struct map_info *map = mtd->priv;
 375	struct cfi_private *cfi = map->fldrv_priv;
 376
 377	fixup_sst39vf_rev_b(mtd);
 378
 379	/*
 380	 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
 381	 * it should report a size of 8KBytes (0x0020*256).
 382	 */
 383	cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
 384	pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
 385		mtd->name);
 386}
 387
 388static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
 389{
 390	struct map_info *map = mtd->priv;
 391	struct cfi_private *cfi = map->fldrv_priv;
 392
 393	if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
 394		cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
 395		pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
 396			mtd->name);
 397	}
 398}
 399
 400static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
 401{
 402	struct map_info *map = mtd->priv;
 403	struct cfi_private *cfi = map->fldrv_priv;
 404
 405	if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
 406		cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
 407		pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
 408			mtd->name);
 409	}
 410}
 411
 412static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
 413{
 414	struct map_info *map = mtd->priv;
 415	struct cfi_private *cfi = map->fldrv_priv;
 416
 417	/*
 418	 *  S29NS512P flash uses more than 8bits to report number of sectors,
 419	 * which is not permitted by CFI.
 420	 */
 421	cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
 422	pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
 423		mtd->name);
 424}
 425
 
 
 
 
 
 
 
 
 
 426/* Used to fix CFI-Tables of chips without Extended Query Tables */
 427static struct cfi_fixup cfi_nopri_fixup_table[] = {
 428	{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
 429	{ CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
 430	{ CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
 431	{ CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
 432	{ CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
 433	{ CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
 434	{ CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
 435	{ CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
 436	{ 0, 0, NULL }
 437};
 438
 439static struct cfi_fixup cfi_fixup_table[] = {
 440	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 441#ifdef AMD_BOOTLOC_BUG
 442	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
 443	{ CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
 444	{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
 445#endif
 446	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi },
 447	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi },
 448	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi },
 449	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi },
 450	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi },
 451	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi },
 452	{ CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
 453	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
 454	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
 455	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
 456	{ CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
 457	{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
 458	{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
 459	{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
 460	{ CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
 461#if !FORCE_WORD_WRITE
 462	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 463#endif
 
 464	{ 0, 0, NULL }
 465};
 466static struct cfi_fixup jedec_fixup_table[] = {
 467	{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
 468	{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
 469	{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
 470	{ 0, 0, NULL }
 471};
 472
 473static struct cfi_fixup fixup_table[] = {
 474	/* The CFI vendor ids and the JEDEC vendor IDs appear
 475	 * to be common.  It is like the devices id's are as
 476	 * well.  This table is to pick all cases where
 477	 * we know that is the case.
 478	 */
 479	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
 480	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
 481	{ 0, 0, NULL }
 482};
 483
 484
 485static void cfi_fixup_major_minor(struct cfi_private *cfi,
 486				  struct cfi_pri_amdstd *extp)
 487{
 488	if (cfi->mfr == CFI_MFR_SAMSUNG) {
 489		if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
 490		    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
 491			/*
 492			 * Samsung K8P2815UQB and K8D6x16UxM chips
 493			 * report major=0 / minor=0.
 494			 * K8D3x16UxC chips report major=3 / minor=3.
 495			 */
 496			printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
 497			       " Extended Query version to 1.%c\n",
 498			       extp->MinorVersion);
 499			extp->MajorVersion = '1';
 500		}
 501	}
 502
 503	/*
 504	 * SST 38VF640x chips report major=0xFF / minor=0xFF.
 505	 */
 506	if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
 507		extp->MajorVersion = '1';
 508		extp->MinorVersion = '0';
 509	}
 510}
 511
 512static int is_m29ew(struct cfi_private *cfi)
 513{
 514	if (cfi->mfr == CFI_MFR_INTEL &&
 515	    ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
 516	     (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
 517		return 1;
 518	return 0;
 519}
 520
 521/*
 522 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
 523 * Some revisions of the M29EW suffer from erase suspend hang ups. In
 524 * particular, it can occur when the sequence
 525 * Erase Confirm -> Suspend -> Program -> Resume
 526 * causes a lockup due to internal timing issues. The consequence is that the
 527 * erase cannot be resumed without inserting a dummy command after programming
 528 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
 529 * that writes an F0 command code before the RESUME command.
 530 */
 531static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
 532					  unsigned long adr)
 533{
 534	struct cfi_private *cfi = map->fldrv_priv;
 535	/* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
 536	if (is_m29ew(cfi))
 537		map_write(map, CMD(0xF0), adr);
 538}
 539
 540/*
 541 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
 542 *
 543 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
 544 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
 545 * command is issued after an ERASE RESUME operation without waiting for a
 546 * minimum delay.  The result is that once the ERASE seems to be completed
 547 * (no bits are toggling), the contents of the Flash memory block on which
 548 * the erase was ongoing could be inconsistent with the expected values
 549 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
 550 * values), causing a consequent failure of the ERASE operation.
 551 * The occurrence of this issue could be high, especially when file system
 552 * operations on the Flash are intensive.  As a result, it is recommended
 553 * that a patch be applied.  Intensive file system operations can cause many
 554 * calls to the garbage routine to free Flash space (also by erasing physical
 555 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
 556 * commands can occur.  The problem disappears when a delay is inserted after
 557 * the RESUME command by using the udelay() function available in Linux.
 558 * The DELAY value must be tuned based on the customer's platform.
 559 * The maximum value that fixes the problem in all cases is 500us.
 560 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
 561 * in most cases.
 562 * We have chosen 500µs because this latency is acceptable.
 563 */
 564static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
 565{
 566	/*
 567	 * Resolving the Delay After Resume Issue see Micron TN-13-07
 568	 * Worst case delay must be 500µs but 30-50µs should be ok as well
 569	 */
 570	if (is_m29ew(cfi))
 571		cfi_udelay(500);
 572}
 573
 574struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
 575{
 576	struct cfi_private *cfi = map->fldrv_priv;
 577	struct device_node __maybe_unused *np = map->device_node;
 578	struct mtd_info *mtd;
 579	int i;
 580
 581	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 582	if (!mtd)
 583		return NULL;
 584	mtd->priv = map;
 585	mtd->type = MTD_NORFLASH;
 586
 587	/* Fill in the default mtd operations */
 588	mtd->_erase   = cfi_amdstd_erase_varsize;
 589	mtd->_write   = cfi_amdstd_write_words;
 590	mtd->_read    = cfi_amdstd_read;
 591	mtd->_sync    = cfi_amdstd_sync;
 592	mtd->_suspend = cfi_amdstd_suspend;
 593	mtd->_resume  = cfi_amdstd_resume;
 594	mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
 595	mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
 596	mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
 597	mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
 598	mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
 599	mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
 600	mtd->flags   = MTD_CAP_NORFLASH;
 601	mtd->name    = map->name;
 602	mtd->writesize = 1;
 603	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 604
 605	pr_debug("MTD %s(): write buffer size %d\n", __func__,
 606			mtd->writebufsize);
 607
 608	mtd->_panic_write = cfi_amdstd_panic_write;
 609	mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
 610
 611	if (cfi->cfi_mode==CFI_MODE_CFI){
 612		unsigned char bootloc;
 613		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 614		struct cfi_pri_amdstd *extp;
 615
 616		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
 617		if (extp) {
 618			/*
 619			 * It's a real CFI chip, not one for which the probe
 620			 * routine faked a CFI structure.
 621			 */
 622			cfi_fixup_major_minor(cfi, extp);
 623
 624			/*
 625			 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
 626			 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 
 627			 *      http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
 628			 *      http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
 629			 *      http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
 630			 */
 631			if (extp->MajorVersion != '1' ||
 632			    (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
 633				printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
 634				       "version %c.%c (%#02x/%#02x).\n",
 635				       extp->MajorVersion, extp->MinorVersion,
 636				       extp->MajorVersion, extp->MinorVersion);
 637				kfree(extp);
 638				kfree(mtd);
 639				return NULL;
 640			}
 641
 642			printk(KERN_INFO "  Amd/Fujitsu Extended Query version %c.%c.\n",
 643			       extp->MajorVersion, extp->MinorVersion);
 644
 645			/* Install our own private info structure */
 646			cfi->cmdset_priv = extp;
 647
 648			/* Apply cfi device specific fixups */
 649			cfi_fixup(mtd, cfi_fixup_table);
 650
 651#ifdef DEBUG_CFI_FEATURES
 652			/* Tell the user about it in lots of lovely detail */
 653			cfi_tell_features(extp);
 654#endif
 655
 656#ifdef CONFIG_OF
 657			if (np && of_property_read_bool(
 658				    np, "use-advanced-sector-protection")
 659			    && extp->BlkProtUnprot == 8) {
 660				printk(KERN_INFO "  Advanced Sector Protection (PPB Locking) supported\n");
 661				mtd->_lock = cfi_ppb_lock;
 662				mtd->_unlock = cfi_ppb_unlock;
 663				mtd->_is_locked = cfi_ppb_is_locked;
 664			}
 665#endif
 666
 667			bootloc = extp->TopBottom;
 668			if ((bootloc < 2) || (bootloc > 5)) {
 669				printk(KERN_WARNING "%s: CFI contains unrecognised boot "
 670				       "bank location (%d). Assuming bottom.\n",
 671				       map->name, bootloc);
 672				bootloc = 2;
 673			}
 674
 675			if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
 676				printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
 677
 678				for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
 679					int j = (cfi->cfiq->NumEraseRegions-1)-i;
 680
 681					swap(cfi->cfiq->EraseRegionInfo[i],
 682					     cfi->cfiq->EraseRegionInfo[j]);
 683				}
 684			}
 685			/* Set the default CFI lock/unlock addresses */
 686			cfi->addr_unlock1 = 0x555;
 687			cfi->addr_unlock2 = 0x2aa;
 688		}
 689		cfi_fixup(mtd, cfi_nopri_fixup_table);
 690
 691		if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
 692			kfree(mtd);
 693			return NULL;
 694		}
 695
 696	} /* CFI mode */
 697	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 698		/* Apply jedec specific fixups */
 699		cfi_fixup(mtd, jedec_fixup_table);
 700	}
 701	/* Apply generic fixups */
 702	cfi_fixup(mtd, fixup_table);
 703
 704	for (i=0; i< cfi->numchips; i++) {
 705		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
 706		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
 707		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
 708		/*
 709		 * First calculate the timeout max according to timeout field
 710		 * of struct cfi_ident that probed from chip's CFI aera, if
 711		 * available. Specify a minimum of 2000us, in case the CFI data
 712		 * is wrong.
 713		 */
 714		if (cfi->cfiq->BufWriteTimeoutTyp &&
 715		    cfi->cfiq->BufWriteTimeoutMax)
 716			cfi->chips[i].buffer_write_time_max =
 717				1 << (cfi->cfiq->BufWriteTimeoutTyp +
 718				      cfi->cfiq->BufWriteTimeoutMax);
 719		else
 720			cfi->chips[i].buffer_write_time_max = 0;
 721
 722		cfi->chips[i].buffer_write_time_max =
 723			max(cfi->chips[i].buffer_write_time_max, 2000);
 724
 725		cfi->chips[i].ref_point_counter = 0;
 726		init_waitqueue_head(&(cfi->chips[i].wq));
 727	}
 728
 729	map->fldrv = &cfi_amdstd_chipdrv;
 730
 731	return cfi_amdstd_setup(mtd);
 732}
 733struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 734struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
 735EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
 736EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
 737EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
 738
 739static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
 740{
 741	struct map_info *map = mtd->priv;
 742	struct cfi_private *cfi = map->fldrv_priv;
 743	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 744	unsigned long offset = 0;
 745	int i,j;
 746
 747	printk(KERN_NOTICE "number of %s chips: %d\n",
 748	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
 749	/* Select the correct geometry setup */
 750	mtd->size = devsize * cfi->numchips;
 751
 752	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 753	mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
 754					  sizeof(struct mtd_erase_region_info),
 755					  GFP_KERNEL);
 756	if (!mtd->eraseregions)
 757		goto setup_err;
 758
 759	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 760		unsigned long ernum, ersize;
 761		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 762		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 763
 764		if (mtd->erasesize < ersize) {
 765			mtd->erasesize = ersize;
 766		}
 767		for (j=0; j<cfi->numchips; j++) {
 768			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 769			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 770			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 771		}
 772		offset += (ersize * ernum);
 773	}
 774	if (offset != devsize) {
 775		/* Argh */
 776		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 777		goto setup_err;
 778	}
 779
 780	__module_get(THIS_MODULE);
 781	register_reboot_notifier(&mtd->reboot_notifier);
 782	return mtd;
 783
 784 setup_err:
 785	kfree(mtd->eraseregions);
 786	kfree(mtd);
 787	kfree(cfi->cmdset_priv);
 788	kfree(cfi->cfiq);
 789	return NULL;
 790}
 791
 792/*
 793 * Return true if the chip is ready.
 794 *
 795 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 796 * non-suspended sector) and is indicated by no toggle bits toggling.
 797 *
 
 
 
 798 * Note that anything more complicated than checking if no bits are toggling
 799 * (including checking DQ5 for an error status) is tricky to get working
 800 * correctly and is therefore not done	(particularly with interleaved chips
 801 * as each chip must be checked independently of the others).
 802 */
 803static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
 804			       unsigned long addr)
 805{
 806	struct cfi_private *cfi = map->fldrv_priv;
 807	map_word d, t;
 
 808
 809	if (cfi_use_status_reg(cfi)) {
 810		map_word ready = CMD(CFI_SR_DRB);
 811		/*
 812		 * For chips that support status register, check device
 813		 * ready bit
 814		 */
 815		cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
 816				 cfi->device_type, NULL);
 817		d = map_read(map, addr);
 818
 819		return map_word_andequal(map, d, ready, ready);
 820	}
 821
 822	d = map_read(map, addr);
 823	t = map_read(map, addr);
 
 
 
 
 
 824
 825	return map_word_equal(map, d, t);
 826}
 827
 828/*
 829 * Return true if the chip is ready and has the correct value.
 830 *
 831 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
 832 * non-suspended sector) and it is indicated by no bits toggling.
 833 *
 834 * Error are indicated by toggling bits or bits held with the wrong value,
 835 * or with bits toggling.
 836 *
 837 * Note that anything more complicated than checking if no bits are toggling
 838 * (including checking DQ5 for an error status) is tricky to get working
 839 * correctly and is therefore not done	(particularly with interleaved chips
 840 * as each chip must be checked independently of the others).
 841 *
 842 */
 843static int __xipram chip_good(struct map_info *map, struct flchip *chip,
 844			      unsigned long addr, map_word expected)
 845{
 846	struct cfi_private *cfi = map->fldrv_priv;
 847	map_word oldd, curd;
 848
 849	if (cfi_use_status_reg(cfi)) {
 850		map_word ready = CMD(CFI_SR_DRB);
 851		map_word err = CMD(CFI_SR_PSB | CFI_SR_ESB);
 852		/*
 853		 * For chips that support status register, check device
 854		 * ready bit and Erase/Program status bit to know if
 855		 * operation succeeded.
 856		 */
 857		cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
 858				 cfi->device_type, NULL);
 859		curd = map_read(map, addr);
 860
 861		if (map_word_andequal(map, curd, ready, ready))
 862			return !map_word_bitsset(map, curd, err);
 863
 864		return 0;
 865	}
 866
 867	oldd = map_read(map, addr);
 868	curd = map_read(map, addr);
 869
 870	return	map_word_equal(map, oldd, curd) &&
 871		map_word_equal(map, curd, expected);
 872}
 873
 874static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 875{
 876	DECLARE_WAITQUEUE(wait, current);
 877	struct cfi_private *cfi = map->fldrv_priv;
 878	unsigned long timeo;
 879	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
 880
 881 resettime:
 882	timeo = jiffies + HZ;
 883 retry:
 884	switch (chip->state) {
 885
 886	case FL_STATUS:
 887		for (;;) {
 888			if (chip_ready(map, chip, adr))
 889				break;
 890
 891			if (time_after(jiffies, timeo)) {
 892				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
 893				return -EIO;
 894			}
 895			mutex_unlock(&chip->mutex);
 896			cfi_udelay(1);
 897			mutex_lock(&chip->mutex);
 898			/* Someone else might have been playing with it. */
 899			goto retry;
 900		}
 
 901
 902	case FL_READY:
 903	case FL_CFI_QUERY:
 904	case FL_JEDEC_QUERY:
 905		return 0;
 906
 907	case FL_ERASING:
 908		if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
 909		    !(mode == FL_READY || mode == FL_POINT ||
 910		    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
 911			goto sleep;
 912
 913		/* Do not allow suspend iff read/write to EB address */
 914		if ((adr & chip->in_progress_block_mask) ==
 915		    chip->in_progress_block_addr)
 916			goto sleep;
 917
 918		/* Erase suspend */
 919		/* It's harmless to issue the Erase-Suspend and Erase-Resume
 920		 * commands when the erase algorithm isn't in progress. */
 921		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 922		chip->oldstate = FL_ERASING;
 923		chip->state = FL_ERASE_SUSPENDING;
 924		chip->erase_suspended = 1;
 925		for (;;) {
 926			if (chip_ready(map, chip, adr))
 927				break;
 928
 929			if (time_after(jiffies, timeo)) {
 930				/* Should have suspended the erase by now.
 931				 * Send an Erase-Resume command as either
 932				 * there was an error (so leave the erase
 933				 * routine to recover from it) or we trying to
 934				 * use the erase-in-progress sector. */
 935				put_chip(map, chip, adr);
 936				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
 937				return -EIO;
 938			}
 939
 940			mutex_unlock(&chip->mutex);
 941			cfi_udelay(1);
 942			mutex_lock(&chip->mutex);
 943			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 944			   So we can just loop here. */
 945		}
 946		chip->state = FL_READY;
 947		return 0;
 948
 949	case FL_XIP_WHILE_ERASING:
 950		if (mode != FL_READY && mode != FL_POINT &&
 951		    (!cfip || !(cfip->EraseSuspend&2)))
 952			goto sleep;
 953		chip->oldstate = chip->state;
 954		chip->state = FL_READY;
 955		return 0;
 956
 957	case FL_SHUTDOWN:
 958		/* The machine is rebooting */
 959		return -EIO;
 960
 961	case FL_POINT:
 962		/* Only if there's no operation suspended... */
 963		if (mode == FL_READY && chip->oldstate == FL_READY)
 964			return 0;
 965		/* fall through */
 966
 967	default:
 968	sleep:
 969		set_current_state(TASK_UNINTERRUPTIBLE);
 970		add_wait_queue(&chip->wq, &wait);
 971		mutex_unlock(&chip->mutex);
 972		schedule();
 973		remove_wait_queue(&chip->wq, &wait);
 974		mutex_lock(&chip->mutex);
 975		goto resettime;
 976	}
 977}
 978
 979
 980static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
 981{
 982	struct cfi_private *cfi = map->fldrv_priv;
 983
 984	switch(chip->oldstate) {
 985	case FL_ERASING:
 986		cfi_fixup_m29ew_erase_suspend(map,
 987			chip->in_progress_block_addr);
 988		map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
 989		cfi_fixup_m29ew_delay_after_resume(cfi);
 990		chip->oldstate = FL_READY;
 991		chip->state = FL_ERASING;
 992		break;
 993
 994	case FL_XIP_WHILE_ERASING:
 995		chip->state = chip->oldstate;
 996		chip->oldstate = FL_READY;
 997		break;
 998
 999	case FL_READY:
1000	case FL_STATUS:
1001		break;
1002	default:
1003		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
1004	}
1005	wake_up(&chip->wq);
1006}
1007
1008#ifdef CONFIG_MTD_XIP
1009
1010/*
1011 * No interrupt what so ever can be serviced while the flash isn't in array
1012 * mode.  This is ensured by the xip_disable() and xip_enable() functions
1013 * enclosing any code path where the flash is known not to be in array mode.
1014 * And within a XIP disabled code path, only functions marked with __xipram
1015 * may be called and nothing else (it's a good thing to inspect generated
1016 * assembly to make sure inline functions were actually inlined and that gcc
1017 * didn't emit calls to its own support functions). Also configuring MTD CFI
1018 * support to a single buswidth and a single interleave is also recommended.
1019 */
1020
1021static void xip_disable(struct map_info *map, struct flchip *chip,
1022			unsigned long adr)
1023{
1024	/* TODO: chips with no XIP use should ignore and return */
1025	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1026	local_irq_disable();
1027}
1028
1029static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1030				unsigned long adr)
1031{
1032	struct cfi_private *cfi = map->fldrv_priv;
1033
1034	if (chip->state != FL_POINT && chip->state != FL_READY) {
1035		map_write(map, CMD(0xf0), adr);
1036		chip->state = FL_READY;
1037	}
1038	(void) map_read(map, adr);
1039	xip_iprefetch();
1040	local_irq_enable();
1041}
1042
1043/*
1044 * When a delay is required for the flash operation to complete, the
1045 * xip_udelay() function is polling for both the given timeout and pending
1046 * (but still masked) hardware interrupts.  Whenever there is an interrupt
1047 * pending then the flash erase operation is suspended, array mode restored
1048 * and interrupts unmasked.  Task scheduling might also happen at that
1049 * point.  The CPU eventually returns from the interrupt or the call to
1050 * schedule() and the suspended flash operation is resumed for the remaining
1051 * of the delay period.
1052 *
1053 * Warning: this function _will_ fool interrupt latency tracing tools.
1054 */
1055
1056static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1057				unsigned long adr, int usec)
1058{
1059	struct cfi_private *cfi = map->fldrv_priv;
1060	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
1061	map_word status, OK = CMD(0x80);
1062	unsigned long suspended, start = xip_currtime();
1063	flstate_t oldstate;
1064
1065	do {
1066		cpu_relax();
1067		if (xip_irqpending() && extp &&
1068		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
1069		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1070			/*
1071			 * Let's suspend the erase operation when supported.
1072			 * Note that we currently don't try to suspend
1073			 * interleaved chips if there is already another
1074			 * operation suspended (imagine what happens
1075			 * when one chip was already done with the current
1076			 * operation while another chip suspended it, then
1077			 * we resume the whole thing at once).  Yes, it
1078			 * can happen!
1079			 */
1080			map_write(map, CMD(0xb0), adr);
1081			usec -= xip_elapsed_since(start);
1082			suspended = xip_currtime();
1083			do {
1084				if (xip_elapsed_since(suspended) > 100000) {
1085					/*
1086					 * The chip doesn't want to suspend
1087					 * after waiting for 100 msecs.
1088					 * This is a critical error but there
1089					 * is not much we can do here.
1090					 */
1091					return;
1092				}
1093				status = map_read(map, adr);
1094			} while (!map_word_andequal(map, status, OK, OK));
1095
1096			/* Suspend succeeded */
1097			oldstate = chip->state;
1098			if (!map_word_bitsset(map, status, CMD(0x40)))
1099				break;
1100			chip->state = FL_XIP_WHILE_ERASING;
1101			chip->erase_suspended = 1;
1102			map_write(map, CMD(0xf0), adr);
1103			(void) map_read(map, adr);
1104			xip_iprefetch();
1105			local_irq_enable();
1106			mutex_unlock(&chip->mutex);
1107			xip_iprefetch();
1108			cond_resched();
1109
1110			/*
1111			 * We're back.  However someone else might have
1112			 * decided to go write to the chip if we are in
1113			 * a suspended erase state.  If so let's wait
1114			 * until it's done.
1115			 */
1116			mutex_lock(&chip->mutex);
1117			while (chip->state != FL_XIP_WHILE_ERASING) {
1118				DECLARE_WAITQUEUE(wait, current);
1119				set_current_state(TASK_UNINTERRUPTIBLE);
1120				add_wait_queue(&chip->wq, &wait);
1121				mutex_unlock(&chip->mutex);
1122				schedule();
1123				remove_wait_queue(&chip->wq, &wait);
1124				mutex_lock(&chip->mutex);
1125			}
1126			/* Disallow XIP again */
1127			local_irq_disable();
1128
1129			/* Correct Erase Suspend Hangups for M29EW */
1130			cfi_fixup_m29ew_erase_suspend(map, adr);
1131			/* Resume the write or erase operation */
1132			map_write(map, cfi->sector_erase_cmd, adr);
1133			chip->state = oldstate;
1134			start = xip_currtime();
1135		} else if (usec >= 1000000/HZ) {
1136			/*
1137			 * Try to save on CPU power when waiting delay
1138			 * is at least a system timer tick period.
1139			 * No need to be extremely accurate here.
1140			 */
1141			xip_cpu_idle();
1142		}
1143		status = map_read(map, adr);
1144	} while (!map_word_andequal(map, status, OK, OK)
1145		 && xip_elapsed_since(start) < usec);
1146}
1147
1148#define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
1149
1150/*
1151 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1152 * the flash is actively programming or erasing since we have to poll for
1153 * the operation to complete anyway.  We can't do that in a generic way with
1154 * a XIP setup so do it before the actual flash operation in this case
1155 * and stub it out from INVALIDATE_CACHE_UDELAY.
1156 */
1157#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1158	INVALIDATE_CACHED_RANGE(map, from, size)
1159
1160#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1161	UDELAY(map, chip, adr, usec)
1162
1163/*
1164 * Extra notes:
1165 *
1166 * Activating this XIP support changes the way the code works a bit.  For
1167 * example the code to suspend the current process when concurrent access
1168 * happens is never executed because xip_udelay() will always return with the
1169 * same chip state as it was entered with.  This is why there is no care for
1170 * the presence of add_wait_queue() or schedule() calls from within a couple
1171 * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
1172 * The queueing and scheduling are always happening within xip_udelay().
1173 *
1174 * Similarly, get_chip() and put_chip() just happen to always be executed
1175 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1176 * is in array mode, therefore never executing many cases therein and not
1177 * causing any problem with XIP.
1178 */
1179
1180#else
1181
1182#define xip_disable(map, chip, adr)
1183#define xip_enable(map, chip, adr)
1184#define XIP_INVAL_CACHED_RANGE(x...)
1185
1186#define UDELAY(map, chip, adr, usec)  \
1187do {  \
1188	mutex_unlock(&chip->mutex);  \
1189	cfi_udelay(usec);  \
1190	mutex_lock(&chip->mutex);  \
1191} while (0)
1192
1193#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1194do {  \
1195	mutex_unlock(&chip->mutex);  \
1196	INVALIDATE_CACHED_RANGE(map, adr, len);  \
1197	cfi_udelay(usec);  \
1198	mutex_lock(&chip->mutex);  \
1199} while (0)
1200
1201#endif
1202
1203static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1204{
1205	unsigned long cmd_addr;
1206	struct cfi_private *cfi = map->fldrv_priv;
1207	int ret;
1208
1209	adr += chip->start;
1210
1211	/* Ensure cmd read/writes are aligned. */
1212	cmd_addr = adr & ~(map_bankwidth(map)-1);
1213
1214	mutex_lock(&chip->mutex);
1215	ret = get_chip(map, chip, cmd_addr, FL_READY);
1216	if (ret) {
1217		mutex_unlock(&chip->mutex);
1218		return ret;
1219	}
1220
1221	if (chip->state != FL_POINT && chip->state != FL_READY) {
1222		map_write(map, CMD(0xf0), cmd_addr);
1223		chip->state = FL_READY;
1224	}
1225
1226	map_copy_from(map, buf, adr, len);
1227
1228	put_chip(map, chip, cmd_addr);
1229
1230	mutex_unlock(&chip->mutex);
1231	return 0;
1232}
1233
1234
1235static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1236{
1237	struct map_info *map = mtd->priv;
1238	struct cfi_private *cfi = map->fldrv_priv;
1239	unsigned long ofs;
1240	int chipnum;
1241	int ret = 0;
1242
1243	/* ofs: offset within the first chip that the first read should start */
1244	chipnum = (from >> cfi->chipshift);
1245	ofs = from - (chipnum <<  cfi->chipshift);
1246
1247	while (len) {
1248		unsigned long thislen;
1249
1250		if (chipnum >= cfi->numchips)
1251			break;
1252
1253		if ((len + ofs -1) >> cfi->chipshift)
1254			thislen = (1<<cfi->chipshift) - ofs;
1255		else
1256			thislen = len;
1257
1258		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1259		if (ret)
1260			break;
1261
1262		*retlen += thislen;
1263		len -= thislen;
1264		buf += thislen;
1265
1266		ofs = 0;
1267		chipnum++;
1268	}
1269	return ret;
1270}
1271
1272typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1273			loff_t adr, size_t len, u_char *buf, size_t grouplen);
1274
1275static inline void otp_enter(struct map_info *map, struct flchip *chip,
1276			     loff_t adr, size_t len)
1277{
1278	struct cfi_private *cfi = map->fldrv_priv;
1279
1280	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1281			 cfi->device_type, NULL);
1282	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1283			 cfi->device_type, NULL);
1284	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
1285			 cfi->device_type, NULL);
1286
1287	INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1288}
1289
1290static inline void otp_exit(struct map_info *map, struct flchip *chip,
1291			    loff_t adr, size_t len)
1292{
1293	struct cfi_private *cfi = map->fldrv_priv;
1294
1295	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1296			 cfi->device_type, NULL);
1297	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1298			 cfi->device_type, NULL);
1299	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
1300			 cfi->device_type, NULL);
1301	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
1302			 cfi->device_type, NULL);
1303
1304	INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1305}
1306
1307static inline int do_read_secsi_onechip(struct map_info *map,
1308					struct flchip *chip, loff_t adr,
1309					size_t len, u_char *buf,
1310					size_t grouplen)
1311{
1312	DECLARE_WAITQUEUE(wait, current);
1313
1314 retry:
1315	mutex_lock(&chip->mutex);
1316
1317	if (chip->state != FL_READY){
1318		set_current_state(TASK_UNINTERRUPTIBLE);
1319		add_wait_queue(&chip->wq, &wait);
1320
1321		mutex_unlock(&chip->mutex);
1322
1323		schedule();
1324		remove_wait_queue(&chip->wq, &wait);
1325
1326		goto retry;
1327	}
1328
1329	adr += chip->start;
1330
1331	chip->state = FL_READY;
1332
1333	otp_enter(map, chip, adr, len);
1334	map_copy_from(map, buf, adr, len);
1335	otp_exit(map, chip, adr, len);
1336
1337	wake_up(&chip->wq);
1338	mutex_unlock(&chip->mutex);
1339
1340	return 0;
1341}
1342
1343static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1344{
1345	struct map_info *map = mtd->priv;
1346	struct cfi_private *cfi = map->fldrv_priv;
1347	unsigned long ofs;
1348	int chipnum;
1349	int ret = 0;
1350
1351	/* ofs: offset within the first chip that the first read should start */
1352	/* 8 secsi bytes per chip */
1353	chipnum=from>>3;
1354	ofs=from & 7;
1355
1356	while (len) {
1357		unsigned long thislen;
1358
1359		if (chipnum >= cfi->numchips)
1360			break;
1361
1362		if ((len + ofs -1) >> 3)
1363			thislen = (1<<3) - ofs;
1364		else
1365			thislen = len;
1366
1367		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
1368					    thislen, buf, 0);
1369		if (ret)
1370			break;
1371
1372		*retlen += thislen;
1373		len -= thislen;
1374		buf += thislen;
1375
1376		ofs = 0;
1377		chipnum++;
1378	}
1379	return ret;
1380}
1381
1382static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1383				     unsigned long adr, map_word datum,
1384				     int mode);
1385
1386static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
1387			size_t len, u_char *buf, size_t grouplen)
1388{
1389	int ret;
1390	while (len) {
1391		unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
1392		int gap = adr - bus_ofs;
1393		int n = min_t(int, len, map_bankwidth(map) - gap);
1394		map_word datum = map_word_ff(map);
1395
1396		if (n != map_bankwidth(map)) {
1397			/* partial write of a word, load old contents */
1398			otp_enter(map, chip, bus_ofs, map_bankwidth(map));
1399			datum = map_read(map, bus_ofs);
1400			otp_exit(map, chip, bus_ofs, map_bankwidth(map));
1401		}
1402
1403		datum = map_word_load_partial(map, datum, buf, gap, n);
1404		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1405		if (ret)
1406			return ret;
1407
1408		adr += n;
1409		buf += n;
1410		len -= n;
1411	}
1412
1413	return 0;
1414}
1415
1416static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
1417		       size_t len, u_char *buf, size_t grouplen)
1418{
1419	struct cfi_private *cfi = map->fldrv_priv;
1420	uint8_t lockreg;
1421	unsigned long timeo;
1422	int ret;
1423
1424	/* make sure area matches group boundaries */
1425	if ((adr != 0) || (len != grouplen))
1426		return -EINVAL;
1427
1428	mutex_lock(&chip->mutex);
1429	ret = get_chip(map, chip, chip->start, FL_LOCKING);
1430	if (ret) {
1431		mutex_unlock(&chip->mutex);
1432		return ret;
1433	}
1434	chip->state = FL_LOCKING;
1435
1436	/* Enter lock register command */
1437	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1438			 cfi->device_type, NULL);
1439	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1440			 cfi->device_type, NULL);
1441	cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
1442			 cfi->device_type, NULL);
1443
1444	/* read lock register */
1445	lockreg = cfi_read_query(map, 0);
1446
1447	/* set bit 0 to protect extended memory block */
1448	lockreg &= ~0x01;
1449
1450	/* set bit 0 to protect extended memory block */
1451	/* write lock register */
1452	map_write(map, CMD(0xA0), chip->start);
1453	map_write(map, CMD(lockreg), chip->start);
1454
1455	/* wait for chip to become ready */
1456	timeo = jiffies + msecs_to_jiffies(2);
1457	for (;;) {
1458		if (chip_ready(map, chip, adr))
1459			break;
1460
1461		if (time_after(jiffies, timeo)) {
1462			pr_err("Waiting for chip to be ready timed out.\n");
1463			ret = -EIO;
1464			break;
1465		}
1466		UDELAY(map, chip, 0, 1);
1467	}
1468
1469	/* exit protection commands */
1470	map_write(map, CMD(0x90), chip->start);
1471	map_write(map, CMD(0x00), chip->start);
1472
1473	chip->state = FL_READY;
1474	put_chip(map, chip, chip->start);
1475	mutex_unlock(&chip->mutex);
1476
1477	return ret;
1478}
1479
1480static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1481			       size_t *retlen, u_char *buf,
1482			       otp_op_t action, int user_regs)
1483{
1484	struct map_info *map = mtd->priv;
1485	struct cfi_private *cfi = map->fldrv_priv;
1486	int ofs_factor = cfi->interleave * cfi->device_type;
1487	unsigned long base;
1488	int chipnum;
1489	struct flchip *chip;
1490	uint8_t otp, lockreg;
1491	int ret;
1492
1493	size_t user_size, factory_size, otpsize;
1494	loff_t user_offset, factory_offset, otpoffset;
1495	int user_locked = 0, otplocked;
1496
1497	*retlen = 0;
1498
1499	for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
1500		chip = &cfi->chips[chipnum];
1501		factory_size = 0;
1502		user_size = 0;
1503
1504		/* Micron M29EW family */
1505		if (is_m29ew(cfi)) {
1506			base = chip->start;
1507
1508			/* check whether secsi area is factory locked
1509			   or user lockable */
1510			mutex_lock(&chip->mutex);
1511			ret = get_chip(map, chip, base, FL_CFI_QUERY);
1512			if (ret) {
1513				mutex_unlock(&chip->mutex);
1514				return ret;
1515			}
1516			cfi_qry_mode_on(base, map, cfi);
1517			otp = cfi_read_query(map, base + 0x3 * ofs_factor);
1518			cfi_qry_mode_off(base, map, cfi);
1519			put_chip(map, chip, base);
1520			mutex_unlock(&chip->mutex);
1521
1522			if (otp & 0x80) {
1523				/* factory locked */
1524				factory_offset = 0;
1525				factory_size = 0x100;
1526			} else {
1527				/* customer lockable */
1528				user_offset = 0;
1529				user_size = 0x100;
1530
1531				mutex_lock(&chip->mutex);
1532				ret = get_chip(map, chip, base, FL_LOCKING);
1533				if (ret) {
1534					mutex_unlock(&chip->mutex);
1535					return ret;
1536				}
1537
1538				/* Enter lock register command */
1539				cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
1540						 chip->start, map, cfi,
1541						 cfi->device_type, NULL);
1542				cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
1543						 chip->start, map, cfi,
1544						 cfi->device_type, NULL);
1545				cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
1546						 chip->start, map, cfi,
1547						 cfi->device_type, NULL);
1548				/* read lock register */
1549				lockreg = cfi_read_query(map, 0);
1550				/* exit protection commands */
1551				map_write(map, CMD(0x90), chip->start);
1552				map_write(map, CMD(0x00), chip->start);
1553				put_chip(map, chip, chip->start);
1554				mutex_unlock(&chip->mutex);
1555
1556				user_locked = ((lockreg & 0x01) == 0x00);
1557			}
1558		}
1559
1560		otpsize = user_regs ? user_size : factory_size;
1561		if (!otpsize)
1562			continue;
1563		otpoffset = user_regs ? user_offset : factory_offset;
1564		otplocked = user_regs ? user_locked : 1;
1565
1566		if (!action) {
1567			/* return otpinfo */
1568			struct otp_info *otpinfo;
1569			len -= sizeof(*otpinfo);
1570			if (len <= 0)
1571				return -ENOSPC;
1572			otpinfo = (struct otp_info *)buf;
1573			otpinfo->start = from;
1574			otpinfo->length = otpsize;
1575			otpinfo->locked = otplocked;
1576			buf += sizeof(*otpinfo);
1577			*retlen += sizeof(*otpinfo);
1578			from += otpsize;
1579		} else if ((from < otpsize) && (len > 0)) {
1580			size_t size;
1581			size = (len < otpsize - from) ? len : otpsize - from;
1582			ret = action(map, chip, otpoffset + from, size, buf,
1583				     otpsize);
1584			if (ret < 0)
1585				return ret;
1586
1587			buf += size;
1588			len -= size;
1589			*retlen += size;
1590			from = 0;
1591		} else {
1592			from -= otpsize;
1593		}
1594	}
1595	return 0;
1596}
1597
1598static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
1599					 size_t *retlen, struct otp_info *buf)
1600{
1601	return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1602				   NULL, 0);
1603}
1604
1605static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
1606					 size_t *retlen, struct otp_info *buf)
1607{
1608	return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1609				   NULL, 1);
1610}
1611
1612static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
1613					 size_t len, size_t *retlen,
1614					 u_char *buf)
1615{
1616	return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1617				   buf, do_read_secsi_onechip, 0);
1618}
1619
1620static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
1621					 size_t len, size_t *retlen,
1622					 u_char *buf)
1623{
1624	return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1625				   buf, do_read_secsi_onechip, 1);
1626}
1627
1628static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1629					  size_t len, size_t *retlen,
1630					  u_char *buf)
1631{
1632	return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf,
1633				   do_otp_write, 1);
1634}
1635
1636static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1637					 size_t len)
1638{
1639	size_t retlen;
1640	return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
1641				   do_otp_lock, 1);
1642}
1643
1644static int __xipram do_write_oneword_once(struct map_info *map,
1645					  struct flchip *chip,
1646					  unsigned long adr, map_word datum,
1647					  int mode, struct cfi_private *cfi)
1648{
1649	unsigned long timeo = jiffies + HZ;
1650	/*
1651	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1652	 * have a max write time of a few hundreds usec). However, we should
1653	 * use the maximum timeout value given by the chip at probe time
1654	 * instead.  Unfortunately, struct flchip does have a field for
1655	 * maximum timeout, only for typical which can be far too short
1656	 * depending of the conditions.	 The ' + 1' is to avoid having a
1657	 * timeout of 0 jiffies if HZ is smaller than 1000.
1658	 */
1659	unsigned long uWriteTimeout = (HZ / 1000) + 1;
1660	int ret = 0;
1661
1662	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1663	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1664	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1665	map_write(map, datum, adr);
1666	chip->state = mode;
1667
1668	INVALIDATE_CACHE_UDELAY(map, chip,
1669				adr, map_bankwidth(map),
1670				chip->word_write_time);
1671
1672	/* See comment above for timeout value. */
1673	timeo = jiffies + uWriteTimeout;
1674	for (;;) {
1675		if (chip->state != mode) {
1676			/* Someone's suspended the write. Sleep */
1677			DECLARE_WAITQUEUE(wait, current);
1678
1679			set_current_state(TASK_UNINTERRUPTIBLE);
1680			add_wait_queue(&chip->wq, &wait);
1681			mutex_unlock(&chip->mutex);
1682			schedule();
1683			remove_wait_queue(&chip->wq, &wait);
1684			timeo = jiffies + (HZ / 2); /* FIXME */
1685			mutex_lock(&chip->mutex);
1686			continue;
1687		}
1688
1689		/*
1690		 * We check "time_after" and "!chip_good" before checking
1691		 * "chip_good" to avoid the failure due to scheduling.
1692		 */
1693		if (time_after(jiffies, timeo) &&
1694		    !chip_good(map, chip, adr, datum)) {
1695			xip_enable(map, chip, adr);
1696			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1697			xip_disable(map, chip, adr);
1698			ret = -EIO;
1699			break;
1700		}
1701
1702		if (chip_good(map, chip, adr, datum))
 
 
1703			break;
 
1704
1705		/* Latency issues. Drop the lock, wait a while and retry */
1706		UDELAY(map, chip, adr, 1);
1707	}
1708
1709	return ret;
1710}
1711
1712static int __xipram do_write_oneword_start(struct map_info *map,
1713					   struct flchip *chip,
1714					   unsigned long adr, int mode)
1715{
1716	int ret = 0;
1717
1718	mutex_lock(&chip->mutex);
1719
1720	ret = get_chip(map, chip, adr, mode);
1721	if (ret) {
1722		mutex_unlock(&chip->mutex);
1723		return ret;
1724	}
1725
1726	if (mode == FL_OTP_WRITE)
1727		otp_enter(map, chip, adr, map_bankwidth(map));
1728
1729	return ret;
1730}
1731
1732static void __xipram do_write_oneword_done(struct map_info *map,
1733					   struct flchip *chip,
1734					   unsigned long adr, int mode)
1735{
1736	if (mode == FL_OTP_WRITE)
1737		otp_exit(map, chip, adr, map_bankwidth(map));
1738
1739	chip->state = FL_READY;
1740	DISABLE_VPP(map);
1741	put_chip(map, chip, adr);
1742
1743	mutex_unlock(&chip->mutex);
1744}
1745
1746static int __xipram do_write_oneword_retry(struct map_info *map,
1747					   struct flchip *chip,
1748					   unsigned long adr, map_word datum,
1749					   int mode)
1750{
1751	struct cfi_private *cfi = map->fldrv_priv;
1752	int ret = 0;
1753	map_word oldd;
1754	int retry_cnt = 0;
1755
1756	/*
1757	 * Check for a NOP for the case when the datum to write is already
1758	 * present - it saves time and works around buggy chips that corrupt
1759	 * data at other locations when 0xff is written to a location that
1760	 * already contains 0xff.
1761	 */
1762	oldd = map_read(map, adr);
1763	if (map_word_equal(map, oldd, datum)) {
1764		pr_debug("MTD %s(): NOP\n", __func__);
1765		return ret;
1766	}
1767
1768	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1769	ENABLE_VPP(map);
1770	xip_disable(map, chip, adr);
1771
1772 retry:
1773	ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
1774	if (ret) {
1775		/* reset on all failures. */
1776		cfi_check_err_status(map, chip, adr);
1777		map_write(map, CMD(0xF0), chip->start);
1778		/* FIXME - should have reset delay before continuing */
1779
1780		if (++retry_cnt <= MAX_RETRIES) {
1781			ret = 0;
1782			goto retry;
1783		}
1784	}
1785	xip_enable(map, chip, adr);
1786
1787	return ret;
1788}
1789
1790static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1791				     unsigned long adr, map_word datum,
1792				     int mode)
1793{
1794	int ret = 0;
1795
1796	adr += chip->start;
1797
1798	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr,
1799		 datum.x[0]);
1800
1801	ret = do_write_oneword_start(map, chip, adr, mode);
1802	if (ret)
1803		return ret;
1804
1805	ret = do_write_oneword_retry(map, chip, adr, datum, mode);
1806
1807	do_write_oneword_done(map, chip, adr, mode);
1808
1809	return ret;
1810}
1811
1812
1813static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1814				  size_t *retlen, const u_char *buf)
1815{
1816	struct map_info *map = mtd->priv;
1817	struct cfi_private *cfi = map->fldrv_priv;
1818	int ret = 0;
1819	int chipnum;
1820	unsigned long ofs, chipstart;
1821	DECLARE_WAITQUEUE(wait, current);
1822
1823	chipnum = to >> cfi->chipshift;
1824	ofs = to  - (chipnum << cfi->chipshift);
1825	chipstart = cfi->chips[chipnum].start;
1826
1827	/* If it's not bus-aligned, do the first byte write */
1828	if (ofs & (map_bankwidth(map)-1)) {
1829		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1830		int i = ofs - bus_ofs;
1831		int n = 0;
1832		map_word tmp_buf;
1833
1834 retry:
1835		mutex_lock(&cfi->chips[chipnum].mutex);
1836
1837		if (cfi->chips[chipnum].state != FL_READY) {
1838			set_current_state(TASK_UNINTERRUPTIBLE);
1839			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1840
1841			mutex_unlock(&cfi->chips[chipnum].mutex);
1842
1843			schedule();
1844			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1845			goto retry;
1846		}
1847
1848		/* Load 'tmp_buf' with old contents of flash */
1849		tmp_buf = map_read(map, bus_ofs+chipstart);
1850
1851		mutex_unlock(&cfi->chips[chipnum].mutex);
1852
1853		/* Number of bytes to copy from buffer */
1854		n = min_t(int, len, map_bankwidth(map)-i);
1855
1856		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1857
1858		ret = do_write_oneword(map, &cfi->chips[chipnum],
1859				       bus_ofs, tmp_buf, FL_WRITING);
1860		if (ret)
1861			return ret;
1862
1863		ofs += n;
1864		buf += n;
1865		(*retlen) += n;
1866		len -= n;
1867
1868		if (ofs >> cfi->chipshift) {
1869			chipnum ++;
1870			ofs = 0;
1871			if (chipnum == cfi->numchips)
1872				return 0;
1873		}
1874	}
1875
1876	/* We are now aligned, write as much as possible */
1877	while(len >= map_bankwidth(map)) {
1878		map_word datum;
1879
1880		datum = map_word_load(map, buf);
1881
1882		ret = do_write_oneword(map, &cfi->chips[chipnum],
1883				       ofs, datum, FL_WRITING);
1884		if (ret)
1885			return ret;
1886
1887		ofs += map_bankwidth(map);
1888		buf += map_bankwidth(map);
1889		(*retlen) += map_bankwidth(map);
1890		len -= map_bankwidth(map);
1891
1892		if (ofs >> cfi->chipshift) {
1893			chipnum ++;
1894			ofs = 0;
1895			if (chipnum == cfi->numchips)
1896				return 0;
1897			chipstart = cfi->chips[chipnum].start;
1898		}
1899	}
1900
1901	/* Write the trailing bytes if any */
1902	if (len & (map_bankwidth(map)-1)) {
1903		map_word tmp_buf;
1904
1905 retry1:
1906		mutex_lock(&cfi->chips[chipnum].mutex);
1907
1908		if (cfi->chips[chipnum].state != FL_READY) {
1909			set_current_state(TASK_UNINTERRUPTIBLE);
1910			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1911
1912			mutex_unlock(&cfi->chips[chipnum].mutex);
1913
1914			schedule();
1915			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1916			goto retry1;
1917		}
1918
1919		tmp_buf = map_read(map, ofs + chipstart);
1920
1921		mutex_unlock(&cfi->chips[chipnum].mutex);
1922
1923		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1924
1925		ret = do_write_oneword(map, &cfi->chips[chipnum],
1926				       ofs, tmp_buf, FL_WRITING);
1927		if (ret)
1928			return ret;
1929
1930		(*retlen) += len;
1931	}
1932
1933	return 0;
1934}
1935
1936#if !FORCE_WORD_WRITE
1937static int __xipram do_write_buffer_wait(struct map_info *map,
1938					 struct flchip *chip, unsigned long adr,
1939					 map_word datum)
1940{
1941	unsigned long timeo;
1942	unsigned long u_write_timeout;
1943	int ret = 0;
1944
1945	/*
1946	 * Timeout is calculated according to CFI data, if available.
1947	 * See more comments in cfi_cmdset_0002().
1948	 */
1949	u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max);
1950	timeo = jiffies + u_write_timeout;
1951
1952	for (;;) {
1953		if (chip->state != FL_WRITING) {
1954			/* Someone's suspended the write. Sleep */
1955			DECLARE_WAITQUEUE(wait, current);
1956
1957			set_current_state(TASK_UNINTERRUPTIBLE);
1958			add_wait_queue(&chip->wq, &wait);
1959			mutex_unlock(&chip->mutex);
1960			schedule();
1961			remove_wait_queue(&chip->wq, &wait);
1962			timeo = jiffies + (HZ / 2); /* FIXME */
1963			mutex_lock(&chip->mutex);
1964			continue;
1965		}
1966
1967		/*
1968		 * We check "time_after" and "!chip_good" before checking
1969		 * "chip_good" to avoid the failure due to scheduling.
1970		 */
1971		if (time_after(jiffies, timeo) &&
1972		    !chip_good(map, chip, adr, datum)) {
 
 
1973			ret = -EIO;
1974			break;
1975		}
1976
1977		if (chip_good(map, chip, adr, datum))
 
 
1978			break;
 
1979
1980		/* Latency issues. Drop the lock, wait a while and retry */
1981		UDELAY(map, chip, adr, 1);
1982	}
1983
1984	return ret;
1985}
1986
1987static void __xipram do_write_buffer_reset(struct map_info *map,
1988					   struct flchip *chip,
1989					   struct cfi_private *cfi)
1990{
1991	/*
1992	 * Recovery from write-buffer programming failures requires
1993	 * the write-to-buffer-reset sequence.  Since the last part
1994	 * of the sequence also works as a normal reset, we can run
1995	 * the same commands regardless of why we are here.
1996	 * See e.g.
1997	 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
1998	 */
1999	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2000			 cfi->device_type, NULL);
2001	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2002			 cfi->device_type, NULL);
2003	cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
2004			 cfi->device_type, NULL);
2005
2006	/* FIXME - should have reset delay before continuing */
2007}
2008
2009/*
2010 * FIXME: interleaved mode not tested, and probably not supported!
2011 */
2012static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
2013				    unsigned long adr, const u_char *buf,
2014				    int len)
2015{
2016	struct cfi_private *cfi = map->fldrv_priv;
2017	int ret = -EIO;
2018	unsigned long cmd_adr;
2019	int z, words;
2020	map_word datum;
2021
2022	adr += chip->start;
2023	cmd_adr = adr;
2024
2025	mutex_lock(&chip->mutex);
2026	ret = get_chip(map, chip, adr, FL_WRITING);
2027	if (ret) {
2028		mutex_unlock(&chip->mutex);
2029		return ret;
2030	}
2031
2032	datum = map_word_load(map, buf);
2033
2034	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
2035		 __func__, adr, datum.x[0]);
2036
2037	XIP_INVAL_CACHED_RANGE(map, adr, len);
2038	ENABLE_VPP(map);
2039	xip_disable(map, chip, cmd_adr);
2040
2041	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2042	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2043
2044	/* Write Buffer Load */
2045	map_write(map, CMD(0x25), cmd_adr);
2046
2047	chip->state = FL_WRITING_TO_BUFFER;
2048
2049	/* Write length of data to come */
2050	words = len / map_bankwidth(map);
2051	map_write(map, CMD(words - 1), cmd_adr);
2052	/* Write data */
2053	z = 0;
2054	while(z < words * map_bankwidth(map)) {
2055		datum = map_word_load(map, buf);
2056		map_write(map, datum, adr + z);
2057
2058		z += map_bankwidth(map);
2059		buf += map_bankwidth(map);
2060	}
2061	z -= map_bankwidth(map);
2062
2063	adr += z;
2064
2065	/* Write Buffer Program Confirm: GO GO GO */
2066	map_write(map, CMD(0x29), cmd_adr);
2067	chip->state = FL_WRITING;
2068
2069	INVALIDATE_CACHE_UDELAY(map, chip,
2070				adr, map_bankwidth(map),
2071				chip->word_write_time);
2072
2073	ret = do_write_buffer_wait(map, chip, adr, datum);
2074	if (ret) {
2075		cfi_check_err_status(map, chip, adr);
2076		do_write_buffer_reset(map, chip, cfi);
2077		pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
2078		       __func__, adr);
2079	}
2080
2081	xip_enable(map, chip, adr);
2082
2083	chip->state = FL_READY;
2084	DISABLE_VPP(map);
2085	put_chip(map, chip, adr);
2086	mutex_unlock(&chip->mutex);
2087
2088	return ret;
2089}
2090
2091
2092static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
2093				    size_t *retlen, const u_char *buf)
2094{
2095	struct map_info *map = mtd->priv;
2096	struct cfi_private *cfi = map->fldrv_priv;
2097	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
2098	int ret = 0;
2099	int chipnum;
2100	unsigned long ofs;
2101
2102	chipnum = to >> cfi->chipshift;
2103	ofs = to  - (chipnum << cfi->chipshift);
2104
2105	/* If it's not bus-aligned, do the first word write */
2106	if (ofs & (map_bankwidth(map)-1)) {
2107		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
2108		if (local_len > len)
2109			local_len = len;
2110		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2111					     local_len, retlen, buf);
2112		if (ret)
2113			return ret;
2114		ofs += local_len;
2115		buf += local_len;
2116		len -= local_len;
2117
2118		if (ofs >> cfi->chipshift) {
2119			chipnum ++;
2120			ofs = 0;
2121			if (chipnum == cfi->numchips)
2122				return 0;
2123		}
2124	}
2125
2126	/* Write buffer is worth it only if more than one word to write... */
2127	while (len >= map_bankwidth(map) * 2) {
2128		/* We must not cross write block boundaries */
2129		int size = wbufsize - (ofs & (wbufsize-1));
2130
2131		if (size > len)
2132			size = len;
2133		if (size % map_bankwidth(map))
2134			size -= size % map_bankwidth(map);
2135
2136		ret = do_write_buffer(map, &cfi->chips[chipnum],
2137				      ofs, buf, size);
2138		if (ret)
2139			return ret;
2140
2141		ofs += size;
2142		buf += size;
2143		(*retlen) += size;
2144		len -= size;
2145
2146		if (ofs >> cfi->chipshift) {
2147			chipnum ++;
2148			ofs = 0;
2149			if (chipnum == cfi->numchips)
2150				return 0;
2151		}
2152	}
2153
2154	if (len) {
2155		size_t retlen_dregs = 0;
2156
2157		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2158					     len, &retlen_dregs, buf);
2159
2160		*retlen += retlen_dregs;
2161		return ret;
2162	}
2163
2164	return 0;
2165}
2166#endif /* !FORCE_WORD_WRITE */
2167
2168/*
2169 * Wait for the flash chip to become ready to write data
2170 *
2171 * This is only called during the panic_write() path. When panic_write()
2172 * is called, the kernel is in the process of a panic, and will soon be
2173 * dead. Therefore we don't take any locks, and attempt to get access
2174 * to the chip as soon as possible.
2175 */
2176static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2177				 unsigned long adr)
2178{
2179	struct cfi_private *cfi = map->fldrv_priv;
2180	int retries = 10;
2181	int i;
2182
2183	/*
2184	 * If the driver thinks the chip is idle, and no toggle bits
2185	 * are changing, then the chip is actually idle for sure.
2186	 */
2187	if (chip->state == FL_READY && chip_ready(map, chip, adr))
2188		return 0;
2189
2190	/*
2191	 * Try several times to reset the chip and then wait for it
2192	 * to become idle. The upper limit of a few milliseconds of
2193	 * delay isn't a big problem: the kernel is dying anyway. It
2194	 * is more important to save the messages.
2195	 */
2196	while (retries > 0) {
2197		const unsigned long timeo = (HZ / 1000) + 1;
2198
2199		/* send the reset command */
2200		map_write(map, CMD(0xF0), chip->start);
2201
2202		/* wait for the chip to become ready */
2203		for (i = 0; i < jiffies_to_usecs(timeo); i++) {
2204			if (chip_ready(map, chip, adr))
2205				return 0;
2206
2207			udelay(1);
2208		}
2209
2210		retries--;
2211	}
2212
2213	/* the chip never became ready */
2214	return -EBUSY;
2215}
2216
2217/*
2218 * Write out one word of data to a single flash chip during a kernel panic
2219 *
2220 * This is only called during the panic_write() path. When panic_write()
2221 * is called, the kernel is in the process of a panic, and will soon be
2222 * dead. Therefore we don't take any locks, and attempt to get access
2223 * to the chip as soon as possible.
2224 *
2225 * The implementation of this routine is intentionally similar to
2226 * do_write_oneword(), in order to ease code maintenance.
2227 */
2228static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
2229				  unsigned long adr, map_word datum)
2230{
2231	const unsigned long uWriteTimeout = (HZ / 1000) + 1;
2232	struct cfi_private *cfi = map->fldrv_priv;
2233	int retry_cnt = 0;
2234	map_word oldd;
2235	int ret = 0;
2236	int i;
2237
2238	adr += chip->start;
2239
2240	ret = cfi_amdstd_panic_wait(map, chip, adr);
2241	if (ret)
2242		return ret;
2243
2244	pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
2245			__func__, adr, datum.x[0]);
2246
2247	/*
2248	 * Check for a NOP for the case when the datum to write is already
2249	 * present - it saves time and works around buggy chips that corrupt
2250	 * data at other locations when 0xff is written to a location that
2251	 * already contains 0xff.
2252	 */
2253	oldd = map_read(map, adr);
2254	if (map_word_equal(map, oldd, datum)) {
2255		pr_debug("MTD %s(): NOP\n", __func__);
2256		goto op_done;
2257	}
2258
2259	ENABLE_VPP(map);
2260
2261retry:
2262	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2263	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2264	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2265	map_write(map, datum, adr);
2266
2267	for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
2268		if (chip_ready(map, chip, adr))
2269			break;
2270
2271		udelay(1);
2272	}
2273
2274	if (!chip_good(map, chip, adr, datum)) {
 
2275		/* reset on all failures. */
2276		cfi_check_err_status(map, chip, adr);
2277		map_write(map, CMD(0xF0), chip->start);
2278		/* FIXME - should have reset delay before continuing */
2279
2280		if (++retry_cnt <= MAX_RETRIES)
2281			goto retry;
2282
2283		ret = -EIO;
2284	}
2285
2286op_done:
2287	DISABLE_VPP(map);
2288	return ret;
2289}
2290
2291/*
2292 * Write out some data during a kernel panic
2293 *
2294 * This is used by the mtdoops driver to save the dying messages from a
2295 * kernel which has panic'd.
2296 *
2297 * This routine ignores all of the locking used throughout the rest of the
2298 * driver, in order to ensure that the data gets written out no matter what
2299 * state this driver (and the flash chip itself) was in when the kernel crashed.
2300 *
2301 * The implementation of this routine is intentionally similar to
2302 * cfi_amdstd_write_words(), in order to ease code maintenance.
2303 */
2304static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
2305				  size_t *retlen, const u_char *buf)
2306{
2307	struct map_info *map = mtd->priv;
2308	struct cfi_private *cfi = map->fldrv_priv;
2309	unsigned long ofs, chipstart;
2310	int ret = 0;
2311	int chipnum;
2312
2313	chipnum = to >> cfi->chipshift;
2314	ofs = to - (chipnum << cfi->chipshift);
2315	chipstart = cfi->chips[chipnum].start;
2316
2317	/* If it's not bus aligned, do the first byte write */
2318	if (ofs & (map_bankwidth(map) - 1)) {
2319		unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
2320		int i = ofs - bus_ofs;
2321		int n = 0;
2322		map_word tmp_buf;
2323
2324		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
2325		if (ret)
2326			return ret;
2327
2328		/* Load 'tmp_buf' with old contents of flash */
2329		tmp_buf = map_read(map, bus_ofs + chipstart);
2330
2331		/* Number of bytes to copy from buffer */
2332		n = min_t(int, len, map_bankwidth(map) - i);
2333
2334		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
2335
2336		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2337					     bus_ofs, tmp_buf);
2338		if (ret)
2339			return ret;
2340
2341		ofs += n;
2342		buf += n;
2343		(*retlen) += n;
2344		len -= n;
2345
2346		if (ofs >> cfi->chipshift) {
2347			chipnum++;
2348			ofs = 0;
2349			if (chipnum == cfi->numchips)
2350				return 0;
2351		}
2352	}
2353
2354	/* We are now aligned, write as much as possible */
2355	while (len >= map_bankwidth(map)) {
2356		map_word datum;
2357
2358		datum = map_word_load(map, buf);
2359
2360		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2361					     ofs, datum);
2362		if (ret)
2363			return ret;
2364
2365		ofs += map_bankwidth(map);
2366		buf += map_bankwidth(map);
2367		(*retlen) += map_bankwidth(map);
2368		len -= map_bankwidth(map);
2369
2370		if (ofs >> cfi->chipshift) {
2371			chipnum++;
2372			ofs = 0;
2373			if (chipnum == cfi->numchips)
2374				return 0;
2375
2376			chipstart = cfi->chips[chipnum].start;
2377		}
2378	}
2379
2380	/* Write the trailing bytes if any */
2381	if (len & (map_bankwidth(map) - 1)) {
2382		map_word tmp_buf;
2383
2384		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
2385		if (ret)
2386			return ret;
2387
2388		tmp_buf = map_read(map, ofs + chipstart);
2389
2390		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
2391
2392		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2393					     ofs, tmp_buf);
2394		if (ret)
2395			return ret;
2396
2397		(*retlen) += len;
2398	}
2399
2400	return 0;
2401}
2402
2403
2404/*
2405 * Handle devices with one erase region, that only implement
2406 * the chip erase command.
2407 */
2408static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2409{
2410	struct cfi_private *cfi = map->fldrv_priv;
2411	unsigned long timeo = jiffies + HZ;
2412	unsigned long int adr;
2413	DECLARE_WAITQUEUE(wait, current);
2414	int ret = 0;
2415	int retry_cnt = 0;
 
2416
2417	adr = cfi->addr_unlock1;
2418
2419	mutex_lock(&chip->mutex);
2420	ret = get_chip(map, chip, adr, FL_ERASING);
2421	if (ret) {
2422		mutex_unlock(&chip->mutex);
2423		return ret;
2424	}
2425
2426	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2427	       __func__, chip->start);
2428
2429	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
2430	ENABLE_VPP(map);
2431	xip_disable(map, chip, adr);
2432
2433 retry:
2434	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2435	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2436	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2437	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2438	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2439	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2440
2441	chip->state = FL_ERASING;
2442	chip->erase_suspended = 0;
2443	chip->in_progress_block_addr = adr;
2444	chip->in_progress_block_mask = ~(map->size - 1);
2445
2446	INVALIDATE_CACHE_UDELAY(map, chip,
2447				adr, map->size,
2448				chip->erase_time*500);
2449
2450	timeo = jiffies + (HZ*20);
2451
2452	for (;;) {
2453		if (chip->state != FL_ERASING) {
2454			/* Someone's suspended the erase. Sleep */
2455			set_current_state(TASK_UNINTERRUPTIBLE);
2456			add_wait_queue(&chip->wq, &wait);
2457			mutex_unlock(&chip->mutex);
2458			schedule();
2459			remove_wait_queue(&chip->wq, &wait);
2460			mutex_lock(&chip->mutex);
2461			continue;
2462		}
2463		if (chip->erase_suspended) {
2464			/* This erase was suspended and resumed.
2465			   Adjust the timeout */
2466			timeo = jiffies + (HZ*20); /* FIXME */
2467			chip->erase_suspended = 0;
2468		}
2469
2470		if (chip_good(map, chip, adr, map_word_ff(map)))
 
 
2471			break;
 
2472
2473		if (time_after(jiffies, timeo)) {
2474			printk(KERN_WARNING "MTD %s(): software timeout\n",
2475			       __func__);
2476			ret = -EIO;
2477			break;
2478		}
2479
2480		/* Latency issues. Drop the lock, wait a while and retry */
2481		UDELAY(map, chip, adr, 1000000/HZ);
2482	}
2483	/* Did we succeed? */
2484	if (ret) {
2485		/* reset on all failures. */
2486		cfi_check_err_status(map, chip, adr);
2487		map_write(map, CMD(0xF0), chip->start);
2488		/* FIXME - should have reset delay before continuing */
2489
2490		if (++retry_cnt <= MAX_RETRIES) {
2491			ret = 0;
2492			goto retry;
2493		}
2494	}
2495
2496	chip->state = FL_READY;
2497	xip_enable(map, chip, adr);
2498	DISABLE_VPP(map);
2499	put_chip(map, chip, adr);
2500	mutex_unlock(&chip->mutex);
2501
2502	return ret;
2503}
2504
2505
2506static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
2507{
2508	struct cfi_private *cfi = map->fldrv_priv;
2509	unsigned long timeo = jiffies + HZ;
2510	DECLARE_WAITQUEUE(wait, current);
2511	int ret = 0;
2512	int retry_cnt = 0;
 
2513
2514	adr += chip->start;
2515
2516	mutex_lock(&chip->mutex);
2517	ret = get_chip(map, chip, adr, FL_ERASING);
2518	if (ret) {
2519		mutex_unlock(&chip->mutex);
2520		return ret;
2521	}
2522
2523	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2524		 __func__, adr);
2525
2526	XIP_INVAL_CACHED_RANGE(map, adr, len);
2527	ENABLE_VPP(map);
2528	xip_disable(map, chip, adr);
2529
2530 retry:
2531	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2532	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2533	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2534	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2535	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2536	map_write(map, cfi->sector_erase_cmd, adr);
2537
2538	chip->state = FL_ERASING;
2539	chip->erase_suspended = 0;
2540	chip->in_progress_block_addr = adr;
2541	chip->in_progress_block_mask = ~(len - 1);
2542
2543	INVALIDATE_CACHE_UDELAY(map, chip,
2544				adr, len,
2545				chip->erase_time*500);
2546
2547	timeo = jiffies + (HZ*20);
2548
2549	for (;;) {
2550		if (chip->state != FL_ERASING) {
2551			/* Someone's suspended the erase. Sleep */
2552			set_current_state(TASK_UNINTERRUPTIBLE);
2553			add_wait_queue(&chip->wq, &wait);
2554			mutex_unlock(&chip->mutex);
2555			schedule();
2556			remove_wait_queue(&chip->wq, &wait);
2557			mutex_lock(&chip->mutex);
2558			continue;
2559		}
2560		if (chip->erase_suspended) {
2561			/* This erase was suspended and resumed.
2562			   Adjust the timeout */
2563			timeo = jiffies + (HZ*20); /* FIXME */
2564			chip->erase_suspended = 0;
2565		}
2566
2567		if (chip_good(map, chip, adr, map_word_ff(map)))
 
 
2568			break;
 
2569
2570		if (time_after(jiffies, timeo)) {
2571			printk(KERN_WARNING "MTD %s(): software timeout\n",
2572			       __func__);
2573			ret = -EIO;
2574			break;
2575		}
2576
2577		/* Latency issues. Drop the lock, wait a while and retry */
2578		UDELAY(map, chip, adr, 1000000/HZ);
2579	}
2580	/* Did we succeed? */
2581	if (ret) {
2582		/* reset on all failures. */
2583		cfi_check_err_status(map, chip, adr);
2584		map_write(map, CMD(0xF0), chip->start);
2585		/* FIXME - should have reset delay before continuing */
2586
2587		if (++retry_cnt <= MAX_RETRIES) {
2588			ret = 0;
2589			goto retry;
2590		}
2591	}
2592
2593	chip->state = FL_READY;
2594	xip_enable(map, chip, adr);
2595	DISABLE_VPP(map);
2596	put_chip(map, chip, adr);
2597	mutex_unlock(&chip->mutex);
2598	return ret;
2599}
2600
2601
2602static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2603{
2604	return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2605				instr->len, NULL);
2606}
2607
2608
2609static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2610{
2611	struct map_info *map = mtd->priv;
2612	struct cfi_private *cfi = map->fldrv_priv;
2613
2614	if (instr->addr != 0)
2615		return -EINVAL;
2616
2617	if (instr->len != mtd->size)
2618		return -EINVAL;
2619
2620	return do_erase_chip(map, &cfi->chips[0]);
2621}
2622
2623static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2624			 unsigned long adr, int len, void *thunk)
2625{
2626	struct cfi_private *cfi = map->fldrv_priv;
2627	int ret;
2628
2629	mutex_lock(&chip->mutex);
2630	ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2631	if (ret)
2632		goto out_unlock;
2633	chip->state = FL_LOCKING;
2634
2635	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2636
2637	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2638			 cfi->device_type, NULL);
2639	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2640			 cfi->device_type, NULL);
2641	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2642			 cfi->device_type, NULL);
2643	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2644			 cfi->device_type, NULL);
2645	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2646			 cfi->device_type, NULL);
2647	map_write(map, CMD(0x40), chip->start + adr);
2648
2649	chip->state = FL_READY;
2650	put_chip(map, chip, adr + chip->start);
2651	ret = 0;
2652
2653out_unlock:
2654	mutex_unlock(&chip->mutex);
2655	return ret;
2656}
2657
2658static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2659			   unsigned long adr, int len, void *thunk)
2660{
2661	struct cfi_private *cfi = map->fldrv_priv;
2662	int ret;
2663
2664	mutex_lock(&chip->mutex);
2665	ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2666	if (ret)
2667		goto out_unlock;
2668	chip->state = FL_UNLOCKING;
2669
2670	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2671
2672	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2673			 cfi->device_type, NULL);
2674	map_write(map, CMD(0x70), adr);
2675
2676	chip->state = FL_READY;
2677	put_chip(map, chip, adr + chip->start);
2678	ret = 0;
2679
2680out_unlock:
2681	mutex_unlock(&chip->mutex);
2682	return ret;
2683}
2684
2685static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2686{
2687	return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2688}
2689
2690static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2691{
2692	return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2693}
2694
2695/*
2696 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2697 */
2698
2699struct ppb_lock {
2700	struct flchip *chip;
2701	unsigned long adr;
2702	int locked;
2703};
2704
2705#define DO_XXLOCK_ONEBLOCK_LOCK		((void *)1)
2706#define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *)2)
2707#define DO_XXLOCK_ONEBLOCK_GETLOCK	((void *)3)
2708
2709static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2710					struct flchip *chip,
2711					unsigned long adr, int len, void *thunk)
2712{
2713	struct cfi_private *cfi = map->fldrv_priv;
2714	unsigned long timeo;
2715	int ret;
2716
2717	adr += chip->start;
2718	mutex_lock(&chip->mutex);
2719	ret = get_chip(map, chip, adr, FL_LOCKING);
2720	if (ret) {
2721		mutex_unlock(&chip->mutex);
2722		return ret;
2723	}
2724
2725	pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2726
2727	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2728			 cfi->device_type, NULL);
2729	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2730			 cfi->device_type, NULL);
2731	/* PPB entry command */
2732	cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2733			 cfi->device_type, NULL);
2734
2735	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2736		chip->state = FL_LOCKING;
2737		map_write(map, CMD(0xA0), adr);
2738		map_write(map, CMD(0x00), adr);
2739	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2740		/*
2741		 * Unlocking of one specific sector is not supported, so we
2742		 * have to unlock all sectors of this device instead
2743		 */
2744		chip->state = FL_UNLOCKING;
2745		map_write(map, CMD(0x80), chip->start);
2746		map_write(map, CMD(0x30), chip->start);
2747	} else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2748		chip->state = FL_JEDEC_QUERY;
2749		/* Return locked status: 0->locked, 1->unlocked */
2750		ret = !cfi_read_query(map, adr);
2751	} else
2752		BUG();
2753
2754	/*
2755	 * Wait for some time as unlocking of all sectors takes quite long
2756	 */
2757	timeo = jiffies + msecs_to_jiffies(2000);	/* 2s max (un)locking */
2758	for (;;) {
2759		if (chip_ready(map, chip, adr))
2760			break;
2761
2762		if (time_after(jiffies, timeo)) {
2763			printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2764			ret = -EIO;
2765			break;
2766		}
2767
2768		UDELAY(map, chip, adr, 1);
2769	}
2770
2771	/* Exit BC commands */
2772	map_write(map, CMD(0x90), chip->start);
2773	map_write(map, CMD(0x00), chip->start);
2774
2775	chip->state = FL_READY;
2776	put_chip(map, chip, adr);
2777	mutex_unlock(&chip->mutex);
2778
2779	return ret;
2780}
2781
2782static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2783				       uint64_t len)
2784{
2785	return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2786				DO_XXLOCK_ONEBLOCK_LOCK);
2787}
2788
2789static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2790					 uint64_t len)
2791{
2792	struct mtd_erase_region_info *regions = mtd->eraseregions;
2793	struct map_info *map = mtd->priv;
2794	struct cfi_private *cfi = map->fldrv_priv;
2795	struct ppb_lock *sect;
2796	unsigned long adr;
2797	loff_t offset;
2798	uint64_t length;
2799	int chipnum;
2800	int i;
2801	int sectors;
2802	int ret;
2803	int max_sectors;
2804
2805	/*
2806	 * PPB unlocking always unlocks all sectors of the flash chip.
2807	 * We need to re-lock all previously locked sectors. So lets
2808	 * first check the locking status of all sectors and save
2809	 * it for future use.
2810	 */
2811	max_sectors = 0;
2812	for (i = 0; i < mtd->numeraseregions; i++)
2813		max_sectors += regions[i].numblocks;
2814
2815	sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
2816	if (!sect)
2817		return -ENOMEM;
2818
2819	/*
2820	 * This code to walk all sectors is a slightly modified version
2821	 * of the cfi_varsize_frob() code.
2822	 */
2823	i = 0;
2824	chipnum = 0;
2825	adr = 0;
2826	sectors = 0;
2827	offset = 0;
2828	length = mtd->size;
2829
2830	while (length) {
2831		int size = regions[i].erasesize;
2832
2833		/*
2834		 * Only test sectors that shall not be unlocked. The other
2835		 * sectors shall be unlocked, so lets keep their locking
2836		 * status at "unlocked" (locked=0) for the final re-locking.
2837		 */
2838		if ((offset < ofs) || (offset >= (ofs + len))) {
2839			sect[sectors].chip = &cfi->chips[chipnum];
2840			sect[sectors].adr = adr;
2841			sect[sectors].locked = do_ppb_xxlock(
2842				map, &cfi->chips[chipnum], adr, 0,
2843				DO_XXLOCK_ONEBLOCK_GETLOCK);
2844		}
2845
2846		adr += size;
2847		offset += size;
2848		length -= size;
2849
2850		if (offset == regions[i].offset + size * regions[i].numblocks)
2851			i++;
2852
2853		if (adr >> cfi->chipshift) {
2854			if (offset >= (ofs + len))
2855				break;
2856			adr = 0;
2857			chipnum++;
2858
2859			if (chipnum >= cfi->numchips)
2860				break;
2861		}
2862
2863		sectors++;
2864		if (sectors >= max_sectors) {
2865			printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2866			       max_sectors);
2867			kfree(sect);
2868			return -EINVAL;
2869		}
2870	}
2871
2872	/* Now unlock the whole chip */
2873	ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2874			       DO_XXLOCK_ONEBLOCK_UNLOCK);
2875	if (ret) {
2876		kfree(sect);
2877		return ret;
2878	}
2879
2880	/*
2881	 * PPB unlocking always unlocks all sectors of the flash chip.
2882	 * We need to re-lock all previously locked sectors.
2883	 */
2884	for (i = 0; i < sectors; i++) {
2885		if (sect[i].locked)
2886			do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2887				      DO_XXLOCK_ONEBLOCK_LOCK);
2888	}
2889
2890	kfree(sect);
2891	return ret;
2892}
2893
2894static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2895					    uint64_t len)
2896{
2897	return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2898				DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2899}
2900
2901static void cfi_amdstd_sync (struct mtd_info *mtd)
2902{
2903	struct map_info *map = mtd->priv;
2904	struct cfi_private *cfi = map->fldrv_priv;
2905	int i;
2906	struct flchip *chip;
2907	int ret = 0;
2908	DECLARE_WAITQUEUE(wait, current);
2909
2910	for (i=0; !ret && i<cfi->numchips; i++) {
2911		chip = &cfi->chips[i];
2912
2913	retry:
2914		mutex_lock(&chip->mutex);
2915
2916		switch(chip->state) {
2917		case FL_READY:
2918		case FL_STATUS:
2919		case FL_CFI_QUERY:
2920		case FL_JEDEC_QUERY:
2921			chip->oldstate = chip->state;
2922			chip->state = FL_SYNCING;
2923			/* No need to wake_up() on this state change -
2924			 * as the whole point is that nobody can do anything
2925			 * with the chip now anyway.
2926			 */
2927			/* fall through */
2928		case FL_SYNCING:
2929			mutex_unlock(&chip->mutex);
2930			break;
2931
2932		default:
2933			/* Not an idle state */
2934			set_current_state(TASK_UNINTERRUPTIBLE);
2935			add_wait_queue(&chip->wq, &wait);
2936
2937			mutex_unlock(&chip->mutex);
2938
2939			schedule();
2940
2941			remove_wait_queue(&chip->wq, &wait);
2942
2943			goto retry;
2944		}
2945	}
2946
2947	/* Unlock the chips again */
2948
2949	for (i--; i >=0; i--) {
2950		chip = &cfi->chips[i];
2951
2952		mutex_lock(&chip->mutex);
2953
2954		if (chip->state == FL_SYNCING) {
2955			chip->state = chip->oldstate;
2956			wake_up(&chip->wq);
2957		}
2958		mutex_unlock(&chip->mutex);
2959	}
2960}
2961
2962
2963static int cfi_amdstd_suspend(struct mtd_info *mtd)
2964{
2965	struct map_info *map = mtd->priv;
2966	struct cfi_private *cfi = map->fldrv_priv;
2967	int i;
2968	struct flchip *chip;
2969	int ret = 0;
2970
2971	for (i=0; !ret && i<cfi->numchips; i++) {
2972		chip = &cfi->chips[i];
2973
2974		mutex_lock(&chip->mutex);
2975
2976		switch(chip->state) {
2977		case FL_READY:
2978		case FL_STATUS:
2979		case FL_CFI_QUERY:
2980		case FL_JEDEC_QUERY:
2981			chip->oldstate = chip->state;
2982			chip->state = FL_PM_SUSPENDED;
2983			/* No need to wake_up() on this state change -
2984			 * as the whole point is that nobody can do anything
2985			 * with the chip now anyway.
2986			 */
 
2987		case FL_PM_SUSPENDED:
2988			break;
2989
2990		default:
2991			ret = -EAGAIN;
2992			break;
2993		}
2994		mutex_unlock(&chip->mutex);
2995	}
2996
2997	/* Unlock the chips again */
2998
2999	if (ret) {
3000		for (i--; i >=0; i--) {
3001			chip = &cfi->chips[i];
3002
3003			mutex_lock(&chip->mutex);
3004
3005			if (chip->state == FL_PM_SUSPENDED) {
3006				chip->state = chip->oldstate;
3007				wake_up(&chip->wq);
3008			}
3009			mutex_unlock(&chip->mutex);
3010		}
3011	}
3012
3013	return ret;
3014}
3015
3016
3017static void cfi_amdstd_resume(struct mtd_info *mtd)
3018{
3019	struct map_info *map = mtd->priv;
3020	struct cfi_private *cfi = map->fldrv_priv;
3021	int i;
3022	struct flchip *chip;
3023
3024	for (i=0; i<cfi->numchips; i++) {
3025
3026		chip = &cfi->chips[i];
3027
3028		mutex_lock(&chip->mutex);
3029
3030		if (chip->state == FL_PM_SUSPENDED) {
3031			chip->state = FL_READY;
3032			map_write(map, CMD(0xF0), chip->start);
3033			wake_up(&chip->wq);
3034		}
3035		else
3036			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
3037
3038		mutex_unlock(&chip->mutex);
3039	}
3040}
3041
3042
3043/*
3044 * Ensure that the flash device is put back into read array mode before
3045 * unloading the driver or rebooting.  On some systems, rebooting while
3046 * the flash is in query/program/erase mode will prevent the CPU from
3047 * fetching the bootloader code, requiring a hard reset or power cycle.
3048 */
3049static int cfi_amdstd_reset(struct mtd_info *mtd)
3050{
3051	struct map_info *map = mtd->priv;
3052	struct cfi_private *cfi = map->fldrv_priv;
3053	int i, ret;
3054	struct flchip *chip;
3055
3056	for (i = 0; i < cfi->numchips; i++) {
3057
3058		chip = &cfi->chips[i];
3059
3060		mutex_lock(&chip->mutex);
3061
3062		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
3063		if (!ret) {
3064			map_write(map, CMD(0xF0), chip->start);
3065			chip->state = FL_SHUTDOWN;
3066			put_chip(map, chip, chip->start);
3067		}
3068
3069		mutex_unlock(&chip->mutex);
3070	}
3071
3072	return 0;
3073}
3074
3075
3076static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
3077			       void *v)
3078{
3079	struct mtd_info *mtd;
3080
3081	mtd = container_of(nb, struct mtd_info, reboot_notifier);
3082	cfi_amdstd_reset(mtd);
3083	return NOTIFY_DONE;
3084}
3085
3086
3087static void cfi_amdstd_destroy(struct mtd_info *mtd)
3088{
3089	struct map_info *map = mtd->priv;
3090	struct cfi_private *cfi = map->fldrv_priv;
3091
3092	cfi_amdstd_reset(mtd);
3093	unregister_reboot_notifier(&mtd->reboot_notifier);
3094	kfree(cfi->cmdset_priv);
3095	kfree(cfi->cfiq);
3096	kfree(cfi);
3097	kfree(mtd->eraseregions);
3098}
3099
3100MODULE_LICENSE("GPL");
3101MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
3102MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
3103MODULE_ALIAS("cfi_cmdset_0006");
3104MODULE_ALIAS("cfi_cmdset_0701");