Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Common Flash Interface support:
   4 *   Intel Extended Vendor Command Set (ID 0x0001)
   5 *
   6 * (C) 2000 Red Hat.
   7 *
   8 *
   9 * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
  10 * 	- completely revamped method functions so they are aware and
  11 * 	  independent of the flash geometry (buswidth, interleave, etc.)
  12 * 	- scalability vs code size is completely set at compile-time
  13 * 	  (see include/linux/mtd/cfi.h for selection)
  14 *	- optimized write buffer method
  15 * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
  16 *	- reworked lock/unlock/erase support for var size flash
  17 * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
  18 * 	- auto unlock sectors on resume for auto locking flash on power up
  19 */
  20
  21#include <linux/module.h>
  22#include <linux/types.h>
  23#include <linux/kernel.h>
  24#include <linux/sched.h>
  25#include <asm/io.h>
  26#include <asm/byteorder.h>
  27
  28#include <linux/errno.h>
  29#include <linux/slab.h>
  30#include <linux/delay.h>
  31#include <linux/interrupt.h>
  32#include <linux/reboot.h>
  33#include <linux/bitmap.h>
  34#include <linux/mtd/xip.h>
  35#include <linux/mtd/map.h>
  36#include <linux/mtd/mtd.h>
  37#include <linux/mtd/cfi.h>
  38
  39/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
  40/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
  41
  42// debugging, turns off buffer write mode if set to 1
  43#define FORCE_WORD_WRITE 0
  44
  45/* Intel chips */
  46#define I82802AB	0x00ad
  47#define I82802AC	0x00ac
  48#define PF38F4476	0x881c
  49#define M28F00AP30	0x8963
  50/* STMicroelectronics chips */
  51#define M50LPW080       0x002F
  52#define M50FLW080A	0x0080
  53#define M50FLW080B	0x0081
  54/* Atmel chips */
  55#define AT49BV640D	0x02de
  56#define AT49BV640DT	0x02db
  57/* Sharp chips */
  58#define LH28F640BFHE_PTTL90	0x00b0
  59#define LH28F640BFHE_PBTL90	0x00b1
  60#define LH28F640BFHE_PTTL70A	0x00b2
  61#define LH28F640BFHE_PBTL70A	0x00b3
  62
  63static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  64static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  65static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  66static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
  67static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
  68static void cfi_intelext_sync (struct mtd_info *);
  69static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  70static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  71static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
  72				  uint64_t len);
  73#ifdef CONFIG_MTD_OTP
  74static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  75static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  76static int cfi_intelext_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
  77					    size_t *, const u_char *);
  78static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
  79static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
  80					   size_t *, struct otp_info *);
  81static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
  82					   size_t *, struct otp_info *);
  83#endif
  84static int cfi_intelext_suspend (struct mtd_info *);
  85static void cfi_intelext_resume (struct mtd_info *);
  86static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
  87
  88static void cfi_intelext_destroy(struct mtd_info *);
  89
  90struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
  91
  92static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
  93static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
  94
  95static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
  96		     size_t *retlen, void **virt, resource_size_t *phys);
  97static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
  98
  99static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
 100static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
 101static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
 102#include "fwh_lock.h"
 103
 104
 105
 106/*
 107 *  *********** SETUP AND PROBE BITS  ***********
 108 */
 109
 110static struct mtd_chip_driver cfi_intelext_chipdrv = {
 111	.probe		= NULL, /* Not usable directly */
 112	.destroy	= cfi_intelext_destroy,
 113	.name		= "cfi_cmdset_0001",
 114	.module		= THIS_MODULE
 115};
 116
 117/* #define DEBUG_LOCK_BITS */
 118/* #define DEBUG_CFI_FEATURES */
 119
 120#ifdef DEBUG_CFI_FEATURES
 121static void cfi_tell_features(struct cfi_pri_intelext *extp)
 122{
 123	int i;
 124	printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
 125	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
 126	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
 127	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
 128	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
 129	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
 130	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
 131	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
 132	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
 133	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
 134	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
 135	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
 136	printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
 137	for (i=11; i<32; i++) {
 138		if (extp->FeatureSupport & (1<<i))
 139			printk("     - Unknown Bit %X:      supported\n", i);
 140	}
 141
 142	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
 143	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
 144	for (i=1; i<8; i++) {
 145		if (extp->SuspendCmdSupport & (1<<i))
 146			printk("     - Unknown Bit %X:               supported\n", i);
 147	}
 148
 149	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
 150	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
 151	printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
 152	for (i=2; i<3; i++) {
 153		if (extp->BlkStatusRegMask & (1<<i))
 154			printk("     - Unknown Bit %X Active: yes\n",i);
 155	}
 156	printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
 157	printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
 158	for (i=6; i<16; i++) {
 159		if (extp->BlkStatusRegMask & (1<<i))
 160			printk("     - Unknown Bit %X Active: yes\n",i);
 161	}
 162
 163	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
 164	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
 165	if (extp->VppOptimal)
 166		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
 167		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
 168}
 169#endif
 170
 171/* Atmel chips don't use the same PRI format as Intel chips */
 172static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 173{
 174	struct map_info *map = mtd->priv;
 175	struct cfi_private *cfi = map->fldrv_priv;
 176	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 177	struct cfi_pri_atmel atmel_pri;
 178	uint32_t features = 0;
 179
 180	/* Reverse byteswapping */
 181	extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
 182	extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
 183	extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
 184
 185	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 186	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 187
 188	printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
 189
 190	if (atmel_pri.Features & 0x01) /* chip erase supported */
 191		features |= (1<<0);
 192	if (atmel_pri.Features & 0x02) /* erase suspend supported */
 193		features |= (1<<1);
 194	if (atmel_pri.Features & 0x04) /* program suspend supported */
 195		features |= (1<<2);
 196	if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
 197		features |= (1<<9);
 198	if (atmel_pri.Features & 0x20) /* page mode read supported */
 199		features |= (1<<7);
 200	if (atmel_pri.Features & 0x40) /* queued erase supported */
 201		features |= (1<<4);
 202	if (atmel_pri.Features & 0x80) /* Protection bits supported */
 203		features |= (1<<6);
 204
 205	extp->FeatureSupport = features;
 206
 207	/* burst write mode not supported */
 208	cfi->cfiq->BufWriteTimeoutTyp = 0;
 209	cfi->cfiq->BufWriteTimeoutMax = 0;
 210}
 211
 212static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
 213{
 214	struct map_info *map = mtd->priv;
 215	struct cfi_private *cfi = map->fldrv_priv;
 216	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 217
 218	cfip->FeatureSupport |= (1 << 5);
 219	mtd->flags |= MTD_POWERUP_LOCK;
 220}
 221
 222#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 223/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
 224static void fixup_intel_strataflash(struct mtd_info *mtd)
 225{
 226	struct map_info *map = mtd->priv;
 227	struct cfi_private *cfi = map->fldrv_priv;
 228	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 229
 230	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
 231	                    "erase on write disabled.\n");
 232	extp->SuspendCmdSupport &= ~1;
 233}
 234#endif
 235
 236#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 237static void fixup_no_write_suspend(struct mtd_info *mtd)
 238{
 239	struct map_info *map = mtd->priv;
 240	struct cfi_private *cfi = map->fldrv_priv;
 241	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 242
 243	if (cfip && (cfip->FeatureSupport&4)) {
 244		cfip->FeatureSupport &= ~4;
 245		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
 246	}
 247}
 248#endif
 249
 250static void fixup_st_m28w320ct(struct mtd_info *mtd)
 251{
 252	struct map_info *map = mtd->priv;
 253	struct cfi_private *cfi = map->fldrv_priv;
 254
 255	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
 256	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
 257}
 258
 259static void fixup_st_m28w320cb(struct mtd_info *mtd)
 260{
 261	struct map_info *map = mtd->priv;
 262	struct cfi_private *cfi = map->fldrv_priv;
 263
 264	/* Note this is done after the region info is endian swapped */
 265	cfi->cfiq->EraseRegionInfo[1] =
 266		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
 267};
 268
 269static int is_LH28F640BF(struct cfi_private *cfi)
 270{
 271	/* Sharp LH28F640BF Family */
 272	if (cfi->mfr == CFI_MFR_SHARP && (
 273	    cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
 274	    cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
 275		return 1;
 276	return 0;
 277}
 278
 279static void fixup_LH28F640BF(struct mtd_info *mtd)
 280{
 281	struct map_info *map = mtd->priv;
 282	struct cfi_private *cfi = map->fldrv_priv;
 283	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 284
 285	/* Reset the Partition Configuration Register on LH28F640BF
 286	 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
 287	if (is_LH28F640BF(cfi)) {
 288		printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
 289		map_write(map, CMD(0x60), 0);
 290		map_write(map, CMD(0x04), 0);
 291
 292		/* We have set one single partition thus
 293		 * Simultaneous Operations are not allowed */
 294		printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
 295		extp->FeatureSupport &= ~512;
 296	}
 297}
 298
 299static void fixup_use_point(struct mtd_info *mtd)
 300{
 301	struct map_info *map = mtd->priv;
 302	if (!mtd->_point && map_is_linear(map)) {
 303		mtd->_point   = cfi_intelext_point;
 304		mtd->_unpoint = cfi_intelext_unpoint;
 305	}
 306}
 307
 308static void fixup_use_write_buffers(struct mtd_info *mtd)
 309{
 310	struct map_info *map = mtd->priv;
 311	struct cfi_private *cfi = map->fldrv_priv;
 312	if (cfi->cfiq->BufWriteTimeoutTyp) {
 313		printk(KERN_INFO "Using buffer write method\n" );
 314		mtd->_write = cfi_intelext_write_buffers;
 315		mtd->_writev = cfi_intelext_writev;
 316	}
 317}
 318
 319/*
 320 * Some chips power-up with all sectors locked by default.
 321 */
 322static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
 323{
 324	struct map_info *map = mtd->priv;
 325	struct cfi_private *cfi = map->fldrv_priv;
 326	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 327
 328	if (cfip->FeatureSupport&32) {
 329		printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
 330		mtd->flags |= MTD_POWERUP_LOCK;
 331	}
 332}
 333
 334static struct cfi_fixup cfi_fixup_table[] = {
 335	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 336	{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
 337	{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
 338#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 339	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
 340#endif
 341#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 342	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
 343#endif
 344#if !FORCE_WORD_WRITE
 345	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 346#endif
 347	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
 348	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
 349	{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
 350	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
 351	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
 352	{ 0, 0, NULL }
 353};
 354
 355static struct cfi_fixup jedec_fixup_table[] = {
 356	{ CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
 357	{ CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
 358	{ CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
 359	{ CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
 360	{ CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
 361	{ 0, 0, NULL }
 362};
 363static struct cfi_fixup fixup_table[] = {
 364	/* The CFI vendor ids and the JEDEC vendor IDs appear
 365	 * to be common.  It is like the devices id's are as
 366	 * well.  This table is to pick all cases where
 367	 * we know that is the case.
 368	 */
 369	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
 370	{ 0, 0, NULL }
 371};
 372
 373static void cfi_fixup_major_minor(struct cfi_private *cfi,
 374						struct cfi_pri_intelext *extp)
 375{
 376	if (cfi->mfr == CFI_MFR_INTEL &&
 377			cfi->id == PF38F4476 && extp->MinorVersion == '3')
 378		extp->MinorVersion = '1';
 379}
 380
 381static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
 382{
 383	/*
 384	 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
 385	 * Erase Supend for their small Erase Blocks(0x8000)
 386	 */
 387	if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
 388		return 1;
 389	return 0;
 390}
 391
 392static inline struct cfi_pri_intelext *
 393read_pri_intelext(struct map_info *map, __u16 adr)
 394{
 395	struct cfi_private *cfi = map->fldrv_priv;
 396	struct cfi_pri_intelext *extp;
 397	unsigned int extra_size = 0;
 398	unsigned int extp_size = sizeof(*extp);
 399
 400 again:
 401	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
 402	if (!extp)
 403		return NULL;
 404
 405	cfi_fixup_major_minor(cfi, extp);
 406
 407	if (extp->MajorVersion != '1' ||
 408	    (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
 409		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
 410		       "version %c.%c.\n",  extp->MajorVersion,
 411		       extp->MinorVersion);
 412		kfree(extp);
 413		return NULL;
 414	}
 415
 416	/* Do some byteswapping if necessary */
 417	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
 418	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
 419	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
 420
 421	if (extp->MinorVersion >= '0') {
 422		extra_size = 0;
 423
 424		/* Protection Register info */
 425		if (extp->NumProtectionFields) {
 426			struct cfi_intelext_otpinfo *otp =
 427				(struct cfi_intelext_otpinfo *)&extp->extra[0];
 428
 429			extra_size += (extp->NumProtectionFields - 1) *
 430				sizeof(struct cfi_intelext_otpinfo);
 431
 432			if (extp_size >= sizeof(*extp) + extra_size) {
 433				int i;
 434
 435				/* Do some byteswapping if necessary */
 436				for (i = 0; i < extp->NumProtectionFields - 1; i++) {
 437					otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
 438					otp->FactGroups = le16_to_cpu(otp->FactGroups);
 439					otp->UserGroups = le16_to_cpu(otp->UserGroups);
 440					otp++;
 441				}
 442			}
 443		}
 444	}
 445
 446	if (extp->MinorVersion >= '1') {
 447		/* Burst Read info */
 448		extra_size += 2;
 449		if (extp_size < sizeof(*extp) + extra_size)
 450			goto need_more;
 451		extra_size += extp->extra[extra_size - 1];
 452	}
 453
 454	if (extp->MinorVersion >= '3') {
 455		int nb_parts, i;
 456
 457		/* Number of hardware-partitions */
 458		extra_size += 1;
 459		if (extp_size < sizeof(*extp) + extra_size)
 460			goto need_more;
 461		nb_parts = extp->extra[extra_size - 1];
 462
 463		/* skip the sizeof(partregion) field in CFI 1.4 */
 464		if (extp->MinorVersion >= '4')
 465			extra_size += 2;
 466
 467		for (i = 0; i < nb_parts; i++) {
 468			struct cfi_intelext_regioninfo *rinfo;
 469			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
 470			extra_size += sizeof(*rinfo);
 471			if (extp_size < sizeof(*extp) + extra_size)
 472				goto need_more;
 473			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
 474			extra_size += (rinfo->NumBlockTypes - 1)
 475				      * sizeof(struct cfi_intelext_blockinfo);
 476		}
 477
 478		if (extp->MinorVersion >= '4')
 479			extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
 480
 481		if (extp_size < sizeof(*extp) + extra_size) {
 482			need_more:
 483			extp_size = sizeof(*extp) + extra_size;
 484			kfree(extp);
 485			if (extp_size > 4096) {
 486				printk(KERN_ERR
 487					"%s: cfi_pri_intelext is too fat\n",
 488					__func__);
 489				return NULL;
 490			}
 491			goto again;
 492		}
 493	}
 494
 495	return extp;
 496}
 497
 498struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
 499{
 500	struct cfi_private *cfi = map->fldrv_priv;
 501	struct mtd_info *mtd;
 502	int i;
 503
 504	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 505	if (!mtd)
 506		return NULL;
 507	mtd->priv = map;
 508	mtd->type = MTD_NORFLASH;
 509
 510	/* Fill in the default mtd operations */
 511	mtd->_erase   = cfi_intelext_erase_varsize;
 512	mtd->_read    = cfi_intelext_read;
 513	mtd->_write   = cfi_intelext_write_words;
 514	mtd->_sync    = cfi_intelext_sync;
 515	mtd->_lock    = cfi_intelext_lock;
 516	mtd->_unlock  = cfi_intelext_unlock;
 517	mtd->_is_locked = cfi_intelext_is_locked;
 518	mtd->_suspend = cfi_intelext_suspend;
 519	mtd->_resume  = cfi_intelext_resume;
 520	mtd->flags   = MTD_CAP_NORFLASH;
 521	mtd->name    = map->name;
 522	mtd->writesize = 1;
 523	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 524
 525	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
 526
 527	if (cfi->cfi_mode == CFI_MODE_CFI) {
 528		/*
 529		 * It's a real CFI chip, not one for which the probe
 530		 * routine faked a CFI structure. So we read the feature
 531		 * table from it.
 532		 */
 533		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 534		struct cfi_pri_intelext *extp;
 535
 536		extp = read_pri_intelext(map, adr);
 537		if (!extp) {
 538			kfree(mtd);
 539			return NULL;
 540		}
 541
 542		/* Install our own private info structure */
 543		cfi->cmdset_priv = extp;
 544
 545		cfi_fixup(mtd, cfi_fixup_table);
 546
 547#ifdef DEBUG_CFI_FEATURES
 548		/* Tell the user about it in lots of lovely detail */
 549		cfi_tell_features(extp);
 550#endif
 551
 552		if(extp->SuspendCmdSupport & 1) {
 553			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
 554		}
 555	}
 556	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 557		/* Apply jedec specific fixups */
 558		cfi_fixup(mtd, jedec_fixup_table);
 559	}
 560	/* Apply generic fixups */
 561	cfi_fixup(mtd, fixup_table);
 562
 563	for (i=0; i< cfi->numchips; i++) {
 564		if (cfi->cfiq->WordWriteTimeoutTyp)
 565			cfi->chips[i].word_write_time =
 566				1<<cfi->cfiq->WordWriteTimeoutTyp;
 567		else
 568			cfi->chips[i].word_write_time = 50000;
 569
 570		if (cfi->cfiq->BufWriteTimeoutTyp)
 571			cfi->chips[i].buffer_write_time =
 572				1<<cfi->cfiq->BufWriteTimeoutTyp;
 573		/* No default; if it isn't specified, we won't use it */
 574
 575		if (cfi->cfiq->BlockEraseTimeoutTyp)
 576			cfi->chips[i].erase_time =
 577				1000<<cfi->cfiq->BlockEraseTimeoutTyp;
 578		else
 579			cfi->chips[i].erase_time = 2000000;
 580
 581		if (cfi->cfiq->WordWriteTimeoutTyp &&
 582		    cfi->cfiq->WordWriteTimeoutMax)
 583			cfi->chips[i].word_write_time_max =
 584				1<<(cfi->cfiq->WordWriteTimeoutTyp +
 585				    cfi->cfiq->WordWriteTimeoutMax);
 586		else
 587			cfi->chips[i].word_write_time_max = 50000 * 8;
 588
 589		if (cfi->cfiq->BufWriteTimeoutTyp &&
 590		    cfi->cfiq->BufWriteTimeoutMax)
 591			cfi->chips[i].buffer_write_time_max =
 592				1<<(cfi->cfiq->BufWriteTimeoutTyp +
 593				    cfi->cfiq->BufWriteTimeoutMax);
 594
 595		if (cfi->cfiq->BlockEraseTimeoutTyp &&
 596		    cfi->cfiq->BlockEraseTimeoutMax)
 597			cfi->chips[i].erase_time_max =
 598				1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
 599				       cfi->cfiq->BlockEraseTimeoutMax);
 600		else
 601			cfi->chips[i].erase_time_max = 2000000 * 8;
 602
 603		cfi->chips[i].ref_point_counter = 0;
 604		init_waitqueue_head(&(cfi->chips[i].wq));
 605	}
 606
 607	map->fldrv = &cfi_intelext_chipdrv;
 608
 609	return cfi_intelext_setup(mtd);
 610}
 611struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 612struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 613EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
 614EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
 615EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
 616
 617static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
 618{
 619	struct map_info *map = mtd->priv;
 620	struct cfi_private *cfi = map->fldrv_priv;
 621	unsigned long offset = 0;
 622	int i,j;
 623	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 624
 625	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
 626
 627	mtd->size = devsize * cfi->numchips;
 628
 629	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 630	mtd->eraseregions = kcalloc(mtd->numeraseregions,
 631				    sizeof(struct mtd_erase_region_info),
 632				    GFP_KERNEL);
 633	if (!mtd->eraseregions)
 634		goto setup_err;
 635
 636	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 637		unsigned long ernum, ersize;
 638		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 639		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 640
 641		if (mtd->erasesize < ersize) {
 642			mtd->erasesize = ersize;
 643		}
 644		for (j=0; j<cfi->numchips; j++) {
 645			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 646			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 647			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 648			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
 649			if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
 650				goto setup_err;
 651		}
 652		offset += (ersize * ernum);
 653	}
 654
 655	if (offset != devsize) {
 656		/* Argh */
 657		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 658		goto setup_err;
 659	}
 660
 661	for (i=0; i<mtd->numeraseregions;i++){
 662		printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
 663		       i,(unsigned long long)mtd->eraseregions[i].offset,
 664		       mtd->eraseregions[i].erasesize,
 665		       mtd->eraseregions[i].numblocks);
 666	}
 667
 668#ifdef CONFIG_MTD_OTP
 669	mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
 670	mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
 671	mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
 672	mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
 673	mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
 674	mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
 675#endif
 676
 677	/* This function has the potential to distort the reality
 678	   a bit and therefore should be called last. */
 679	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
 680		goto setup_err;
 681
 682	__module_get(THIS_MODULE);
 683	register_reboot_notifier(&mtd->reboot_notifier);
 684	return mtd;
 685
 686 setup_err:
 687	if (mtd->eraseregions)
 688		for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
 689			for (j=0; j<cfi->numchips; j++)
 690				kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
 691	kfree(mtd->eraseregions);
 692	kfree(mtd);
 693	kfree(cfi->cmdset_priv);
 694	return NULL;
 695}
 696
 697static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
 698					struct cfi_private **pcfi)
 699{
 700	struct map_info *map = mtd->priv;
 701	struct cfi_private *cfi = *pcfi;
 702	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 703
 704	/*
 705	 * Probing of multi-partition flash chips.
 706	 *
 707	 * To support multiple partitions when available, we simply arrange
 708	 * for each of them to have their own flchip structure even if they
 709	 * are on the same physical chip.  This means completely recreating
 710	 * a new cfi_private structure right here which is a blatent code
 711	 * layering violation, but this is still the least intrusive
 712	 * arrangement at this point. This can be rearranged in the future
 713	 * if someone feels motivated enough.  --nico
 714	 */
 715	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
 716	    && extp->FeatureSupport & (1 << 9)) {
 717		int offs = 0;
 718		struct cfi_private *newcfi;
 719		struct flchip *chip;
 720		struct flchip_shared *shared;
 721		int numregions, numparts, partshift, numvirtchips, i, j;
 722
 723		/* Protection Register info */
 724		if (extp->NumProtectionFields)
 725			offs = (extp->NumProtectionFields - 1) *
 726			       sizeof(struct cfi_intelext_otpinfo);
 727
 728		/* Burst Read info */
 729		offs += extp->extra[offs+1]+2;
 730
 731		/* Number of partition regions */
 732		numregions = extp->extra[offs];
 733		offs += 1;
 734
 735		/* skip the sizeof(partregion) field in CFI 1.4 */
 736		if (extp->MinorVersion >= '4')
 737			offs += 2;
 738
 739		/* Number of hardware partitions */
 740		numparts = 0;
 741		for (i = 0; i < numregions; i++) {
 742			struct cfi_intelext_regioninfo *rinfo;
 743			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
 744			numparts += rinfo->NumIdentPartitions;
 745			offs += sizeof(*rinfo)
 746				+ (rinfo->NumBlockTypes - 1) *
 747				  sizeof(struct cfi_intelext_blockinfo);
 748		}
 749
 750		if (!numparts)
 751			numparts = 1;
 752
 753		/* Programming Region info */
 754		if (extp->MinorVersion >= '4') {
 755			struct cfi_intelext_programming_regioninfo *prinfo;
 756			prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
 757			mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
 758			mtd->flags &= ~MTD_BIT_WRITEABLE;
 759			printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
 760			       map->name, mtd->writesize,
 761			       cfi->interleave * prinfo->ControlValid,
 762			       cfi->interleave * prinfo->ControlInvalid);
 763		}
 764
 765		/*
 766		 * All functions below currently rely on all chips having
 767		 * the same geometry so we'll just assume that all hardware
 768		 * partitions are of the same size too.
 769		 */
 770		partshift = cfi->chipshift - __ffs(numparts);
 771
 772		if ((1 << partshift) < mtd->erasesize) {
 773			printk( KERN_ERR
 774				"%s: bad number of hw partitions (%d)\n",
 775				__func__, numparts);
 776			return -EINVAL;
 777		}
 778
 779		numvirtchips = cfi->numchips * numparts;
 780		newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
 781				 GFP_KERNEL);
 782		if (!newcfi)
 783			return -ENOMEM;
 784		shared = kmalloc_array(cfi->numchips,
 785				       sizeof(struct flchip_shared),
 786				       GFP_KERNEL);
 787		if (!shared) {
 788			kfree(newcfi);
 789			return -ENOMEM;
 790		}
 791		memcpy(newcfi, cfi, sizeof(struct cfi_private));
 792		newcfi->numchips = numvirtchips;
 793		newcfi->chipshift = partshift;
 794
 795		chip = &newcfi->chips[0];
 796		for (i = 0; i < cfi->numchips; i++) {
 797			shared[i].writing = shared[i].erasing = NULL;
 798			mutex_init(&shared[i].lock);
 799			for (j = 0; j < numparts; j++) {
 800				*chip = cfi->chips[i];
 801				chip->start += j << partshift;
 802				chip->priv = &shared[i];
 803				/* those should be reset too since
 804				   they create memory references. */
 805				init_waitqueue_head(&chip->wq);
 806				mutex_init(&chip->mutex);
 807				chip++;
 808			}
 809		}
 810
 811		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
 812				  "--> %d partitions of %d KiB\n",
 813				  map->name, cfi->numchips, cfi->interleave,
 814				  newcfi->numchips, 1<<(newcfi->chipshift-10));
 815
 816		map->fldrv_priv = newcfi;
 817		*pcfi = newcfi;
 818		kfree(cfi);
 819	}
 820
 821	return 0;
 822}
 823
 824/*
 825 *  *********** CHIP ACCESS FUNCTIONS ***********
 826 */
 827static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 828{
 829	DECLARE_WAITQUEUE(wait, current);
 830	struct cfi_private *cfi = map->fldrv_priv;
 831	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
 832	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 833	unsigned long timeo = jiffies + HZ;
 834
 835	/* Prevent setting state FL_SYNCING for chip in suspended state. */
 836	if (mode == FL_SYNCING && chip->oldstate != FL_READY)
 837		goto sleep;
 838
 839	switch (chip->state) {
 840
 841	case FL_STATUS:
 842		for (;;) {
 843			status = map_read(map, adr);
 844			if (map_word_andequal(map, status, status_OK, status_OK))
 845				break;
 846
 847			/* At this point we're fine with write operations
 848			   in other partitions as they don't conflict. */
 849			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
 850				break;
 851
 852			mutex_unlock(&chip->mutex);
 853			cfi_udelay(1);
 854			mutex_lock(&chip->mutex);
 855			/* Someone else might have been playing with it. */
 856			return -EAGAIN;
 857		}
 858		fallthrough;
 859	case FL_READY:
 860	case FL_CFI_QUERY:
 861	case FL_JEDEC_QUERY:
 862		return 0;
 863
 864	case FL_ERASING:
 865		if (!cfip ||
 866		    !(cfip->FeatureSupport & 2) ||
 867		    !(mode == FL_READY || mode == FL_POINT ||
 868		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
 869			goto sleep;
 870
 871		/* Do not allow suspend iff read/write to EB address */
 872		if ((adr & chip->in_progress_block_mask) ==
 873		    chip->in_progress_block_addr)
 874			goto sleep;
 875
 876		/* do not suspend small EBs, buggy Micron Chips */
 877		if (cfi_is_micron_28F00AP30(cfi, chip) &&
 878		    (chip->in_progress_block_mask == ~(0x8000-1)))
 879			goto sleep;
 880
 881		/* Erase suspend */
 882		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 883
 884		/* If the flash has finished erasing, then 'erase suspend'
 885		 * appears to make some (28F320) flash devices switch to
 886		 * 'read' mode.  Make sure that we switch to 'read status'
 887		 * mode so we get the right data. --rmk
 888		 */
 889		map_write(map, CMD(0x70), chip->in_progress_block_addr);
 890		chip->oldstate = FL_ERASING;
 891		chip->state = FL_ERASE_SUSPENDING;
 892		chip->erase_suspended = 1;
 893		for (;;) {
 894			status = map_read(map, chip->in_progress_block_addr);
 895			if (map_word_andequal(map, status, status_OK, status_OK))
 896			        break;
 897
 898			if (time_after(jiffies, timeo)) {
 899				/* Urgh. Resume and pretend we weren't here.
 900				 * Make sure we're in 'read status' mode if it had finished */
 901				put_chip(map, chip, adr);
 902				printk(KERN_ERR "%s: Chip not ready after erase "
 903				       "suspended: status = 0x%lx\n", map->name, status.x[0]);
 904				return -EIO;
 905			}
 906
 907			mutex_unlock(&chip->mutex);
 908			cfi_udelay(1);
 909			mutex_lock(&chip->mutex);
 910			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 911			   So we can just loop here. */
 912		}
 913		chip->state = FL_STATUS;
 914		return 0;
 915
 916	case FL_XIP_WHILE_ERASING:
 917		if (mode != FL_READY && mode != FL_POINT &&
 918		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
 919			goto sleep;
 920		chip->oldstate = chip->state;
 921		chip->state = FL_READY;
 922		return 0;
 923
 924	case FL_SHUTDOWN:
 925		/* The machine is rebooting now,so no one can get chip anymore */
 926		return -EIO;
 927	case FL_POINT:
 928		/* Only if there's no operation suspended... */
 929		if (mode == FL_READY && chip->oldstate == FL_READY)
 930			return 0;
 931		fallthrough;
 932	default:
 933	sleep:
 934		set_current_state(TASK_UNINTERRUPTIBLE);
 935		add_wait_queue(&chip->wq, &wait);
 936		mutex_unlock(&chip->mutex);
 937		schedule();
 938		remove_wait_queue(&chip->wq, &wait);
 939		mutex_lock(&chip->mutex);
 940		return -EAGAIN;
 941	}
 942}
 943
 944static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 945{
 946	int ret;
 947	DECLARE_WAITQUEUE(wait, current);
 948
 949 retry:
 950	if (chip->priv &&
 951	    (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
 952	    || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
 953		/*
 954		 * OK. We have possibility for contention on the write/erase
 955		 * operations which are global to the real chip and not per
 956		 * partition.  So let's fight it over in the partition which
 957		 * currently has authority on the operation.
 958		 *
 959		 * The rules are as follows:
 960		 *
 961		 * - any write operation must own shared->writing.
 962		 *
 963		 * - any erase operation must own _both_ shared->writing and
 964		 *   shared->erasing.
 965		 *
 966		 * - contention arbitration is handled in the owner's context.
 967		 *
 968		 * The 'shared' struct can be read and/or written only when
 969		 * its lock is taken.
 970		 */
 971		struct flchip_shared *shared = chip->priv;
 972		struct flchip *contender;
 973		mutex_lock(&shared->lock);
 974		contender = shared->writing;
 975		if (contender && contender != chip) {
 976			/*
 977			 * The engine to perform desired operation on this
 978			 * partition is already in use by someone else.
 979			 * Let's fight over it in the context of the chip
 980			 * currently using it.  If it is possible to suspend,
 981			 * that other partition will do just that, otherwise
 982			 * it'll happily send us to sleep.  In any case, when
 983			 * get_chip returns success we're clear to go ahead.
 984			 */
 985			ret = mutex_trylock(&contender->mutex);
 986			mutex_unlock(&shared->lock);
 987			if (!ret)
 988				goto retry;
 989			mutex_unlock(&chip->mutex);
 990			ret = chip_ready(map, contender, contender->start, mode);
 991			mutex_lock(&chip->mutex);
 992
 993			if (ret == -EAGAIN) {
 994				mutex_unlock(&contender->mutex);
 995				goto retry;
 996			}
 997			if (ret) {
 998				mutex_unlock(&contender->mutex);
 999				return ret;
1000			}
1001			mutex_lock(&shared->lock);
1002
1003			/* We should not own chip if it is already
1004			 * in FL_SYNCING state. Put contender and retry. */
1005			if (chip->state == FL_SYNCING) {
1006				put_chip(map, contender, contender->start);
1007				mutex_unlock(&contender->mutex);
1008				goto retry;
1009			}
1010			mutex_unlock(&contender->mutex);
1011		}
1012
1013		/* Check if we already have suspended erase
1014		 * on this chip. Sleep. */
1015		if (mode == FL_ERASING && shared->erasing
1016		    && shared->erasing->oldstate == FL_ERASING) {
1017			mutex_unlock(&shared->lock);
1018			set_current_state(TASK_UNINTERRUPTIBLE);
1019			add_wait_queue(&chip->wq, &wait);
1020			mutex_unlock(&chip->mutex);
1021			schedule();
1022			remove_wait_queue(&chip->wq, &wait);
1023			mutex_lock(&chip->mutex);
1024			goto retry;
1025		}
1026
1027		/* We now own it */
1028		shared->writing = chip;
1029		if (mode == FL_ERASING)
1030			shared->erasing = chip;
1031		mutex_unlock(&shared->lock);
1032	}
1033	ret = chip_ready(map, chip, adr, mode);
1034	if (ret == -EAGAIN)
1035		goto retry;
1036
1037	return ret;
1038}
1039
1040static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1041{
1042	struct cfi_private *cfi = map->fldrv_priv;
1043
1044	if (chip->priv) {
1045		struct flchip_shared *shared = chip->priv;
1046		mutex_lock(&shared->lock);
1047		if (shared->writing == chip && chip->oldstate == FL_READY) {
1048			/* We own the ability to write, but we're done */
1049			shared->writing = shared->erasing;
1050			if (shared->writing && shared->writing != chip) {
1051				/* give back ownership to who we loaned it from */
1052				struct flchip *loaner = shared->writing;
1053				mutex_lock(&loaner->mutex);
1054				mutex_unlock(&shared->lock);
1055				mutex_unlock(&chip->mutex);
1056				put_chip(map, loaner, loaner->start);
1057				mutex_lock(&chip->mutex);
1058				mutex_unlock(&loaner->mutex);
1059				wake_up(&chip->wq);
1060				return;
1061			}
1062			shared->erasing = NULL;
1063			shared->writing = NULL;
1064		} else if (shared->erasing == chip && shared->writing != chip) {
1065			/*
1066			 * We own the ability to erase without the ability
1067			 * to write, which means the erase was suspended
1068			 * and some other partition is currently writing.
1069			 * Don't let the switch below mess things up since
1070			 * we don't have ownership to resume anything.
1071			 */
1072			mutex_unlock(&shared->lock);
1073			wake_up(&chip->wq);
1074			return;
1075		}
1076		mutex_unlock(&shared->lock);
1077	}
1078
1079	switch(chip->oldstate) {
1080	case FL_ERASING:
1081		/* What if one interleaved chip has finished and the
1082		   other hasn't? The old code would leave the finished
1083		   one in READY mode. That's bad, and caused -EROFS
1084		   errors to be returned from do_erase_oneblock because
1085		   that's the only bit it checked for at the time.
1086		   As the state machine appears to explicitly allow
1087		   sending the 0x70 (Read Status) command to an erasing
1088		   chip and expecting it to be ignored, that's what we
1089		   do. */
1090		map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1091		map_write(map, CMD(0x70), chip->in_progress_block_addr);
1092		chip->oldstate = FL_READY;
1093		chip->state = FL_ERASING;
1094		break;
1095
1096	case FL_XIP_WHILE_ERASING:
1097		chip->state = chip->oldstate;
1098		chip->oldstate = FL_READY;
1099		break;
1100
1101	case FL_READY:
1102	case FL_STATUS:
1103	case FL_JEDEC_QUERY:
1104		break;
1105	default:
1106		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1107	}
1108	wake_up(&chip->wq);
1109}
1110
1111#ifdef CONFIG_MTD_XIP
1112
1113/*
1114 * No interrupt what so ever can be serviced while the flash isn't in array
1115 * mode.  This is ensured by the xip_disable() and xip_enable() functions
1116 * enclosing any code path where the flash is known not to be in array mode.
1117 * And within a XIP disabled code path, only functions marked with __xipram
1118 * may be called and nothing else (it's a good thing to inspect generated
1119 * assembly to make sure inline functions were actually inlined and that gcc
1120 * didn't emit calls to its own support functions). Also configuring MTD CFI
1121 * support to a single buswidth and a single interleave is also recommended.
1122 */
1123
1124static void xip_disable(struct map_info *map, struct flchip *chip,
1125			unsigned long adr)
1126{
1127	/* TODO: chips with no XIP use should ignore and return */
1128	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1129	local_irq_disable();
1130}
1131
1132static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1133				unsigned long adr)
1134{
1135	struct cfi_private *cfi = map->fldrv_priv;
1136	if (chip->state != FL_POINT && chip->state != FL_READY) {
1137		map_write(map, CMD(0xff), adr);
1138		chip->state = FL_READY;
1139	}
1140	(void) map_read(map, adr);
1141	xip_iprefetch();
1142	local_irq_enable();
1143}
1144
1145/*
1146 * When a delay is required for the flash operation to complete, the
1147 * xip_wait_for_operation() function is polling for both the given timeout
1148 * and pending (but still masked) hardware interrupts.  Whenever there is an
1149 * interrupt pending then the flash erase or write operation is suspended,
1150 * array mode restored and interrupts unmasked.  Task scheduling might also
1151 * happen at that point.  The CPU eventually returns from the interrupt or
1152 * the call to schedule() and the suspended flash operation is resumed for
1153 * the remaining of the delay period.
1154 *
1155 * Warning: this function _will_ fool interrupt latency tracing tools.
1156 */
1157
1158static int __xipram xip_wait_for_operation(
1159		struct map_info *map, struct flchip *chip,
1160		unsigned long adr, unsigned int chip_op_time_max)
1161{
1162	struct cfi_private *cfi = map->fldrv_priv;
1163	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1164	map_word status, OK = CMD(0x80);
1165	unsigned long usec, suspended, start, done;
1166	flstate_t oldstate, newstate;
1167
1168       	start = xip_currtime();
1169	usec = chip_op_time_max;
1170	if (usec == 0)
1171		usec = 500000;
1172	done = 0;
1173
1174	do {
1175		cpu_relax();
1176		if (xip_irqpending() && cfip &&
1177		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1178		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1179		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1180			/*
1181			 * Let's suspend the erase or write operation when
1182			 * supported.  Note that we currently don't try to
1183			 * suspend interleaved chips if there is already
1184			 * another operation suspended (imagine what happens
1185			 * when one chip was already done with the current
1186			 * operation while another chip suspended it, then
1187			 * we resume the whole thing at once).  Yes, it
1188			 * can happen!
1189			 */
1190			usec -= done;
1191			map_write(map, CMD(0xb0), adr);
1192			map_write(map, CMD(0x70), adr);
1193			suspended = xip_currtime();
1194			do {
1195				if (xip_elapsed_since(suspended) > 100000) {
1196					/*
1197					 * The chip doesn't want to suspend
1198					 * after waiting for 100 msecs.
1199					 * This is a critical error but there
1200					 * is not much we can do here.
1201					 */
1202					return -EIO;
1203				}
1204				status = map_read(map, adr);
1205			} while (!map_word_andequal(map, status, OK, OK));
1206
1207			/* Suspend succeeded */
1208			oldstate = chip->state;
1209			if (oldstate == FL_ERASING) {
1210				if (!map_word_bitsset(map, status, CMD(0x40)))
1211					break;
1212				newstate = FL_XIP_WHILE_ERASING;
1213				chip->erase_suspended = 1;
1214			} else {
1215				if (!map_word_bitsset(map, status, CMD(0x04)))
1216					break;
1217				newstate = FL_XIP_WHILE_WRITING;
1218				chip->write_suspended = 1;
1219			}
1220			chip->state = newstate;
1221			map_write(map, CMD(0xff), adr);
1222			(void) map_read(map, adr);
1223			xip_iprefetch();
1224			local_irq_enable();
1225			mutex_unlock(&chip->mutex);
1226			xip_iprefetch();
1227			cond_resched();
1228
1229			/*
1230			 * We're back.  However someone else might have
1231			 * decided to go write to the chip if we are in
1232			 * a suspended erase state.  If so let's wait
1233			 * until it's done.
1234			 */
1235			mutex_lock(&chip->mutex);
1236			while (chip->state != newstate) {
1237				DECLARE_WAITQUEUE(wait, current);
1238				set_current_state(TASK_UNINTERRUPTIBLE);
1239				add_wait_queue(&chip->wq, &wait);
1240				mutex_unlock(&chip->mutex);
1241				schedule();
1242				remove_wait_queue(&chip->wq, &wait);
1243				mutex_lock(&chip->mutex);
1244			}
1245			/* Disallow XIP again */
1246			local_irq_disable();
1247
1248			/* Resume the write or erase operation */
1249			map_write(map, CMD(0xd0), adr);
1250			map_write(map, CMD(0x70), adr);
1251			chip->state = oldstate;
1252			start = xip_currtime();
1253		} else if (usec >= 1000000/HZ) {
1254			/*
1255			 * Try to save on CPU power when waiting delay
1256			 * is at least a system timer tick period.
1257			 * No need to be extremely accurate here.
1258			 */
1259			xip_cpu_idle();
1260		}
1261		status = map_read(map, adr);
1262		done = xip_elapsed_since(start);
1263	} while (!map_word_andequal(map, status, OK, OK)
1264		 && done < usec);
1265
1266	return (done >= usec) ? -ETIME : 0;
1267}
1268
1269/*
1270 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1271 * the flash is actively programming or erasing since we have to poll for
1272 * the operation to complete anyway.  We can't do that in a generic way with
1273 * a XIP setup so do it before the actual flash operation in this case
1274 * and stub it out from INVAL_CACHE_AND_WAIT.
1275 */
1276#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1277	INVALIDATE_CACHED_RANGE(map, from, size)
1278
1279#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1280	xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1281
1282#else
1283
1284#define xip_disable(map, chip, adr)
1285#define xip_enable(map, chip, adr)
1286#define XIP_INVAL_CACHED_RANGE(x...)
1287#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1288
1289static int inval_cache_and_wait_for_operation(
1290		struct map_info *map, struct flchip *chip,
1291		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1292		unsigned int chip_op_time, unsigned int chip_op_time_max)
1293{
1294	struct cfi_private *cfi = map->fldrv_priv;
1295	map_word status, status_OK = CMD(0x80);
1296	int chip_state = chip->state;
1297	unsigned int timeo, sleep_time, reset_timeo;
1298
1299	mutex_unlock(&chip->mutex);
1300	if (inval_len)
1301		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1302	mutex_lock(&chip->mutex);
1303
1304	timeo = chip_op_time_max;
1305	if (!timeo)
1306		timeo = 500000;
1307	reset_timeo = timeo;
1308	sleep_time = chip_op_time / 2;
1309
1310	for (;;) {
1311		if (chip->state != chip_state) {
1312			/* Someone's suspended the operation: sleep */
1313			DECLARE_WAITQUEUE(wait, current);
1314			set_current_state(TASK_UNINTERRUPTIBLE);
1315			add_wait_queue(&chip->wq, &wait);
1316			mutex_unlock(&chip->mutex);
1317			schedule();
1318			remove_wait_queue(&chip->wq, &wait);
1319			mutex_lock(&chip->mutex);
1320			continue;
1321		}
1322
1323		status = map_read(map, cmd_adr);
1324		if (map_word_andequal(map, status, status_OK, status_OK))
1325			break;
1326
1327		if (chip->erase_suspended && chip_state == FL_ERASING)  {
1328			/* Erase suspend occurred while sleep: reset timeout */
1329			timeo = reset_timeo;
1330			chip->erase_suspended = 0;
1331		}
1332		if (chip->write_suspended && chip_state == FL_WRITING)  {
1333			/* Write suspend occurred while sleep: reset timeout */
1334			timeo = reset_timeo;
1335			chip->write_suspended = 0;
1336		}
1337		if (!timeo) {
1338			map_write(map, CMD(0x70), cmd_adr);
1339			chip->state = FL_STATUS;
1340			return -ETIME;
1341		}
1342
1343		/* OK Still waiting. Drop the lock, wait a while and retry. */
1344		mutex_unlock(&chip->mutex);
1345		if (sleep_time >= 1000000/HZ) {
1346			/*
1347			 * Half of the normal delay still remaining
1348			 * can be performed with a sleeping delay instead
1349			 * of busy waiting.
1350			 */
1351			msleep(sleep_time/1000);
1352			timeo -= sleep_time;
1353			sleep_time = 1000000/HZ;
1354		} else {
1355			udelay(1);
1356			cond_resched();
1357			timeo--;
1358		}
1359		mutex_lock(&chip->mutex);
1360	}
1361
1362	/* Done and happy. */
1363 	chip->state = FL_STATUS;
1364	return 0;
1365}
1366
1367#endif
1368
1369#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1370	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1371
1372
1373static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1374{
1375	unsigned long cmd_addr;
1376	struct cfi_private *cfi = map->fldrv_priv;
1377	int ret;
1378
1379	adr += chip->start;
1380
1381	/* Ensure cmd read/writes are aligned. */
1382	cmd_addr = adr & ~(map_bankwidth(map)-1);
1383
1384	mutex_lock(&chip->mutex);
1385
1386	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1387
1388	if (!ret) {
1389		if (chip->state != FL_POINT && chip->state != FL_READY)
1390			map_write(map, CMD(0xff), cmd_addr);
1391
1392		chip->state = FL_POINT;
1393		chip->ref_point_counter++;
1394	}
1395	mutex_unlock(&chip->mutex);
1396
1397	return ret;
1398}
1399
1400static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1401		size_t *retlen, void **virt, resource_size_t *phys)
1402{
1403	struct map_info *map = mtd->priv;
1404	struct cfi_private *cfi = map->fldrv_priv;
1405	unsigned long ofs, last_end = 0;
1406	int chipnum;
1407	int ret;
1408
1409	if (!map->virt)
1410		return -EINVAL;
1411
1412	/* Now lock the chip(s) to POINT state */
1413
1414	/* ofs: offset within the first chip that the first read should start */
1415	chipnum = (from >> cfi->chipshift);
1416	ofs = from - (chipnum << cfi->chipshift);
1417
1418	*virt = map->virt + cfi->chips[chipnum].start + ofs;
1419	if (phys)
1420		*phys = map->phys + cfi->chips[chipnum].start + ofs;
1421
1422	while (len) {
1423		unsigned long thislen;
1424
1425		if (chipnum >= cfi->numchips)
1426			break;
1427
1428		/* We cannot point across chips that are virtually disjoint */
1429		if (!last_end)
1430			last_end = cfi->chips[chipnum].start;
1431		else if (cfi->chips[chipnum].start != last_end)
1432			break;
1433
1434		if ((len + ofs -1) >> cfi->chipshift)
1435			thislen = (1<<cfi->chipshift) - ofs;
1436		else
1437			thislen = len;
1438
1439		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1440		if (ret)
1441			break;
1442
1443		*retlen += thislen;
1444		len -= thislen;
1445
1446		ofs = 0;
1447		last_end += 1 << cfi->chipshift;
1448		chipnum++;
1449	}
1450	return 0;
1451}
1452
1453static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1454{
1455	struct map_info *map = mtd->priv;
1456	struct cfi_private *cfi = map->fldrv_priv;
1457	unsigned long ofs;
1458	int chipnum, err = 0;
1459
1460	/* Now unlock the chip(s) POINT state */
1461
1462	/* ofs: offset within the first chip that the first read should start */
1463	chipnum = (from >> cfi->chipshift);
1464	ofs = from - (chipnum <<  cfi->chipshift);
1465
1466	while (len && !err) {
1467		unsigned long thislen;
1468		struct flchip *chip;
1469
1470		chip = &cfi->chips[chipnum];
1471		if (chipnum >= cfi->numchips)
1472			break;
1473
1474		if ((len + ofs -1) >> cfi->chipshift)
1475			thislen = (1<<cfi->chipshift) - ofs;
1476		else
1477			thislen = len;
1478
1479		mutex_lock(&chip->mutex);
1480		if (chip->state == FL_POINT) {
1481			chip->ref_point_counter--;
1482			if(chip->ref_point_counter == 0)
1483				chip->state = FL_READY;
1484		} else {
1485			printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1486			err = -EINVAL;
1487		}
1488
1489		put_chip(map, chip, chip->start);
1490		mutex_unlock(&chip->mutex);
1491
1492		len -= thislen;
1493		ofs = 0;
1494		chipnum++;
1495	}
1496
1497	return err;
1498}
1499
1500static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1501{
1502	unsigned long cmd_addr;
1503	struct cfi_private *cfi = map->fldrv_priv;
1504	int ret;
1505
1506	adr += chip->start;
1507
1508	/* Ensure cmd read/writes are aligned. */
1509	cmd_addr = adr & ~(map_bankwidth(map)-1);
1510
1511	mutex_lock(&chip->mutex);
1512	ret = get_chip(map, chip, cmd_addr, FL_READY);
1513	if (ret) {
1514		mutex_unlock(&chip->mutex);
1515		return ret;
1516	}
1517
1518	if (chip->state != FL_POINT && chip->state != FL_READY) {
1519		map_write(map, CMD(0xff), cmd_addr);
1520
1521		chip->state = FL_READY;
1522	}
1523
1524	map_copy_from(map, buf, adr, len);
1525
1526	put_chip(map, chip, cmd_addr);
1527
1528	mutex_unlock(&chip->mutex);
1529	return 0;
1530}
1531
1532static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1533{
1534	struct map_info *map = mtd->priv;
1535	struct cfi_private *cfi = map->fldrv_priv;
1536	unsigned long ofs;
1537	int chipnum;
1538	int ret = 0;
1539
1540	/* ofs: offset within the first chip that the first read should start */
1541	chipnum = (from >> cfi->chipshift);
1542	ofs = from - (chipnum <<  cfi->chipshift);
1543
1544	while (len) {
1545		unsigned long thislen;
1546
1547		if (chipnum >= cfi->numchips)
1548			break;
1549
1550		if ((len + ofs -1) >> cfi->chipshift)
1551			thislen = (1<<cfi->chipshift) - ofs;
1552		else
1553			thislen = len;
1554
1555		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1556		if (ret)
1557			break;
1558
1559		*retlen += thislen;
1560		len -= thislen;
1561		buf += thislen;
1562
1563		ofs = 0;
1564		chipnum++;
1565	}
1566	return ret;
1567}
1568
1569static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1570				     unsigned long adr, map_word datum, int mode)
1571{
1572	struct cfi_private *cfi = map->fldrv_priv;
1573	map_word status, write_cmd;
1574	int ret;
1575
1576	adr += chip->start;
1577
1578	switch (mode) {
1579	case FL_WRITING:
1580		write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1581		break;
1582	case FL_OTP_WRITE:
1583		write_cmd = CMD(0xc0);
1584		break;
1585	default:
1586		return -EINVAL;
1587	}
1588
1589	mutex_lock(&chip->mutex);
1590	ret = get_chip(map, chip, adr, mode);
1591	if (ret) {
1592		mutex_unlock(&chip->mutex);
1593		return ret;
1594	}
1595
1596	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1597	ENABLE_VPP(map);
1598	xip_disable(map, chip, adr);
1599	map_write(map, write_cmd, adr);
1600	map_write(map, datum, adr);
1601	chip->state = mode;
1602
1603	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1604				   adr, map_bankwidth(map),
1605				   chip->word_write_time,
1606				   chip->word_write_time_max);
1607	if (ret) {
1608		xip_enable(map, chip, adr);
1609		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1610		goto out;
1611	}
1612
1613	/* check for errors */
1614	status = map_read(map, adr);
1615	if (map_word_bitsset(map, status, CMD(0x1a))) {
1616		unsigned long chipstatus = MERGESTATUS(status);
1617
1618		/* reset status */
1619		map_write(map, CMD(0x50), adr);
1620		map_write(map, CMD(0x70), adr);
1621		xip_enable(map, chip, adr);
1622
1623		if (chipstatus & 0x02) {
1624			ret = -EROFS;
1625		} else if (chipstatus & 0x08) {
1626			printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1627			ret = -EIO;
1628		} else {
1629			printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1630			ret = -EINVAL;
1631		}
1632
1633		goto out;
1634	}
1635
1636	xip_enable(map, chip, adr);
1637 out:	DISABLE_VPP(map);
1638	put_chip(map, chip, adr);
1639	mutex_unlock(&chip->mutex);
1640	return ret;
1641}
1642
1643
1644static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1645{
1646	struct map_info *map = mtd->priv;
1647	struct cfi_private *cfi = map->fldrv_priv;
1648	int ret;
1649	int chipnum;
1650	unsigned long ofs;
1651
1652	chipnum = to >> cfi->chipshift;
1653	ofs = to  - (chipnum << cfi->chipshift);
1654
1655	/* If it's not bus-aligned, do the first byte write */
1656	if (ofs & (map_bankwidth(map)-1)) {
1657		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1658		int gap = ofs - bus_ofs;
1659		int n;
1660		map_word datum;
1661
1662		n = min_t(int, len, map_bankwidth(map)-gap);
1663		datum = map_word_ff(map);
1664		datum = map_word_load_partial(map, datum, buf, gap, n);
1665
1666		ret = do_write_oneword(map, &cfi->chips[chipnum],
1667					       bus_ofs, datum, FL_WRITING);
1668		if (ret)
1669			return ret;
1670
1671		len -= n;
1672		ofs += n;
1673		buf += n;
1674		(*retlen) += n;
1675
1676		if (ofs >> cfi->chipshift) {
1677			chipnum ++;
1678			ofs = 0;
1679			if (chipnum == cfi->numchips)
1680				return 0;
1681		}
1682	}
1683
1684	while(len >= map_bankwidth(map)) {
1685		map_word datum = map_word_load(map, buf);
1686
1687		ret = do_write_oneword(map, &cfi->chips[chipnum],
1688				       ofs, datum, FL_WRITING);
1689		if (ret)
1690			return ret;
1691
1692		ofs += map_bankwidth(map);
1693		buf += map_bankwidth(map);
1694		(*retlen) += map_bankwidth(map);
1695		len -= map_bankwidth(map);
1696
1697		if (ofs >> cfi->chipshift) {
1698			chipnum ++;
1699			ofs = 0;
1700			if (chipnum == cfi->numchips)
1701				return 0;
1702		}
1703	}
1704
1705	if (len & (map_bankwidth(map)-1)) {
1706		map_word datum;
1707
1708		datum = map_word_ff(map);
1709		datum = map_word_load_partial(map, datum, buf, 0, len);
1710
1711		ret = do_write_oneword(map, &cfi->chips[chipnum],
1712				       ofs, datum, FL_WRITING);
1713		if (ret)
1714			return ret;
1715
1716		(*retlen) += len;
1717	}
1718
1719	return 0;
1720}
1721
1722
1723static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1724				    unsigned long adr, const struct kvec **pvec,
1725				    unsigned long *pvec_seek, int len)
1726{
1727	struct cfi_private *cfi = map->fldrv_priv;
1728	map_word status, write_cmd, datum;
1729	unsigned long cmd_adr;
1730	int ret, wbufsize, word_gap, words;
1731	const struct kvec *vec;
1732	unsigned long vec_seek;
1733	unsigned long initial_adr;
1734	int initial_len = len;
1735
1736	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1737	adr += chip->start;
1738	initial_adr = adr;
1739	cmd_adr = adr & ~(wbufsize-1);
1740
1741	/* Sharp LH28F640BF chips need the first address for the
1742	 * Page Buffer Program command. See Table 5 of
1743	 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1744	if (is_LH28F640BF(cfi))
1745		cmd_adr = adr;
1746
1747	/* Let's determine this according to the interleave only once */
1748	write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1749
1750	mutex_lock(&chip->mutex);
1751	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1752	if (ret) {
1753		mutex_unlock(&chip->mutex);
1754		return ret;
1755	}
1756
1757	XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1758	ENABLE_VPP(map);
1759	xip_disable(map, chip, cmd_adr);
1760
1761	/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1762	   [...], the device will not accept any more Write to Buffer commands".
1763	   So we must check here and reset those bits if they're set. Otherwise
1764	   we're just pissing in the wind */
1765	if (chip->state != FL_STATUS) {
1766		map_write(map, CMD(0x70), cmd_adr);
1767		chip->state = FL_STATUS;
1768	}
1769	status = map_read(map, cmd_adr);
1770	if (map_word_bitsset(map, status, CMD(0x30))) {
1771		xip_enable(map, chip, cmd_adr);
1772		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1773		xip_disable(map, chip, cmd_adr);
1774		map_write(map, CMD(0x50), cmd_adr);
1775		map_write(map, CMD(0x70), cmd_adr);
1776	}
1777
1778	chip->state = FL_WRITING_TO_BUFFER;
1779	map_write(map, write_cmd, cmd_adr);
1780	ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1781	if (ret) {
1782		/* Argh. Not ready for write to buffer */
1783		map_word Xstatus = map_read(map, cmd_adr);
1784		map_write(map, CMD(0x70), cmd_adr);
1785		chip->state = FL_STATUS;
1786		status = map_read(map, cmd_adr);
1787		map_write(map, CMD(0x50), cmd_adr);
1788		map_write(map, CMD(0x70), cmd_adr);
1789		xip_enable(map, chip, cmd_adr);
1790		printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1791				map->name, Xstatus.x[0], status.x[0]);
1792		goto out;
1793	}
1794
1795	/* Figure out the number of words to write */
1796	word_gap = (-adr & (map_bankwidth(map)-1));
1797	words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1798	if (!word_gap) {
1799		words--;
1800	} else {
1801		word_gap = map_bankwidth(map) - word_gap;
1802		adr -= word_gap;
1803		datum = map_word_ff(map);
1804	}
1805
1806	/* Write length of data to come */
1807	map_write(map, CMD(words), cmd_adr );
1808
1809	/* Write data */
1810	vec = *pvec;
1811	vec_seek = *pvec_seek;
1812	do {
1813		int n = map_bankwidth(map) - word_gap;
1814		if (n > vec->iov_len - vec_seek)
1815			n = vec->iov_len - vec_seek;
1816		if (n > len)
1817			n = len;
1818
1819		if (!word_gap && len < map_bankwidth(map))
1820			datum = map_word_ff(map);
1821
1822		datum = map_word_load_partial(map, datum,
1823					      vec->iov_base + vec_seek,
1824					      word_gap, n);
1825
1826		len -= n;
1827		word_gap += n;
1828		if (!len || word_gap == map_bankwidth(map)) {
1829			map_write(map, datum, adr);
1830			adr += map_bankwidth(map);
1831			word_gap = 0;
1832		}
1833
1834		vec_seek += n;
1835		if (vec_seek == vec->iov_len) {
1836			vec++;
1837			vec_seek = 0;
1838		}
1839	} while (len);
1840	*pvec = vec;
1841	*pvec_seek = vec_seek;
1842
1843	/* GO GO GO */
1844	map_write(map, CMD(0xd0), cmd_adr);
1845	chip->state = FL_WRITING;
1846
1847	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1848				   initial_adr, initial_len,
1849				   chip->buffer_write_time,
1850				   chip->buffer_write_time_max);
1851	if (ret) {
1852		map_write(map, CMD(0x70), cmd_adr);
1853		chip->state = FL_STATUS;
1854		xip_enable(map, chip, cmd_adr);
1855		printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1856		goto out;
1857	}
1858
1859	/* check for errors */
1860	status = map_read(map, cmd_adr);
1861	if (map_word_bitsset(map, status, CMD(0x1a))) {
1862		unsigned long chipstatus = MERGESTATUS(status);
1863
1864		/* reset status */
1865		map_write(map, CMD(0x50), cmd_adr);
1866		map_write(map, CMD(0x70), cmd_adr);
1867		xip_enable(map, chip, cmd_adr);
1868
1869		if (chipstatus & 0x02) {
1870			ret = -EROFS;
1871		} else if (chipstatus & 0x08) {
1872			printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1873			ret = -EIO;
1874		} else {
1875			printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1876			ret = -EINVAL;
1877		}
1878
1879		goto out;
1880	}
1881
1882	xip_enable(map, chip, cmd_adr);
1883 out:	DISABLE_VPP(map);
1884	put_chip(map, chip, cmd_adr);
1885	mutex_unlock(&chip->mutex);
1886	return ret;
1887}
1888
1889static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1890				unsigned long count, loff_t to, size_t *retlen)
1891{
1892	struct map_info *map = mtd->priv;
1893	struct cfi_private *cfi = map->fldrv_priv;
1894	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1895	int ret;
1896	int chipnum;
1897	unsigned long ofs, vec_seek, i;
1898	size_t len = 0;
1899
1900	for (i = 0; i < count; i++)
1901		len += vecs[i].iov_len;
1902
1903	if (!len)
1904		return 0;
1905
1906	chipnum = to >> cfi->chipshift;
1907	ofs = to - (chipnum << cfi->chipshift);
1908	vec_seek = 0;
1909
1910	do {
1911		/* We must not cross write block boundaries */
1912		int size = wbufsize - (ofs & (wbufsize-1));
1913
1914		if (size > len)
1915			size = len;
1916		ret = do_write_buffer(map, &cfi->chips[chipnum],
1917				      ofs, &vecs, &vec_seek, size);
1918		if (ret)
1919			return ret;
1920
1921		ofs += size;
1922		(*retlen) += size;
1923		len -= size;
1924
1925		if (ofs >> cfi->chipshift) {
1926			chipnum ++;
1927			ofs = 0;
1928			if (chipnum == cfi->numchips)
1929				return 0;
1930		}
1931
1932		/* Be nice and reschedule with the chip in a usable state for other
1933		   processes. */
1934		cond_resched();
1935
1936	} while (len);
1937
1938	return 0;
1939}
1940
1941static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1942				       size_t len, size_t *retlen, const u_char *buf)
1943{
1944	struct kvec vec;
1945
1946	vec.iov_base = (void *) buf;
1947	vec.iov_len = len;
1948
1949	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1950}
1951
1952static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1953				      unsigned long adr, int len, void *thunk)
1954{
1955	struct cfi_private *cfi = map->fldrv_priv;
1956	map_word status;
1957	int retries = 3;
1958	int ret;
1959
1960	adr += chip->start;
1961
1962 retry:
1963	mutex_lock(&chip->mutex);
1964	ret = get_chip(map, chip, adr, FL_ERASING);
1965	if (ret) {
1966		mutex_unlock(&chip->mutex);
1967		return ret;
1968	}
1969
1970	XIP_INVAL_CACHED_RANGE(map, adr, len);
1971	ENABLE_VPP(map);
1972	xip_disable(map, chip, adr);
1973
1974	/* Clear the status register first */
1975	map_write(map, CMD(0x50), adr);
1976
1977	/* Now erase */
1978	map_write(map, CMD(0x20), adr);
1979	map_write(map, CMD(0xD0), adr);
1980	chip->state = FL_ERASING;
1981	chip->erase_suspended = 0;
1982	chip->in_progress_block_addr = adr;
1983	chip->in_progress_block_mask = ~(len - 1);
1984
1985	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1986				   adr, len,
1987				   chip->erase_time,
1988				   chip->erase_time_max);
1989	if (ret) {
1990		map_write(map, CMD(0x70), adr);
1991		chip->state = FL_STATUS;
1992		xip_enable(map, chip, adr);
1993		printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1994		goto out;
1995	}
1996
1997	/* We've broken this before. It doesn't hurt to be safe */
1998	map_write(map, CMD(0x70), adr);
1999	chip->state = FL_STATUS;
2000	status = map_read(map, adr);
2001
2002	/* check for errors */
2003	if (map_word_bitsset(map, status, CMD(0x3a))) {
2004		unsigned long chipstatus = MERGESTATUS(status);
2005
2006		/* Reset the error bits */
2007		map_write(map, CMD(0x50), adr);
2008		map_write(map, CMD(0x70), adr);
2009		xip_enable(map, chip, adr);
2010
2011		if ((chipstatus & 0x30) == 0x30) {
2012			printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
2013			ret = -EINVAL;
2014		} else if (chipstatus & 0x02) {
2015			/* Protection bit set */
2016			ret = -EROFS;
2017		} else if (chipstatus & 0x8) {
2018			/* Voltage */
2019			printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
2020			ret = -EIO;
2021		} else if (chipstatus & 0x20 && retries--) {
2022			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2023			DISABLE_VPP(map);
2024			put_chip(map, chip, adr);
2025			mutex_unlock(&chip->mutex);
2026			goto retry;
2027		} else {
2028			printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2029			ret = -EIO;
2030		}
2031
2032		goto out;
2033	}
2034
2035	xip_enable(map, chip, adr);
2036 out:	DISABLE_VPP(map);
2037	put_chip(map, chip, adr);
2038	mutex_unlock(&chip->mutex);
2039	return ret;
2040}
2041
2042static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2043{
2044	return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2045				instr->len, NULL);
2046}
2047
2048static void cfi_intelext_sync (struct mtd_info *mtd)
2049{
2050	struct map_info *map = mtd->priv;
2051	struct cfi_private *cfi = map->fldrv_priv;
2052	int i;
2053	struct flchip *chip;
2054	int ret = 0;
2055
2056	for (i=0; !ret && i<cfi->numchips; i++) {
2057		chip = &cfi->chips[i];
2058
2059		mutex_lock(&chip->mutex);
2060		ret = get_chip(map, chip, chip->start, FL_SYNCING);
2061
2062		if (!ret) {
2063			chip->oldstate = chip->state;
2064			chip->state = FL_SYNCING;
2065			/* No need to wake_up() on this state change -
2066			 * as the whole point is that nobody can do anything
2067			 * with the chip now anyway.
2068			 */
2069		}
2070		mutex_unlock(&chip->mutex);
2071	}
2072
2073	/* Unlock the chips again */
2074
2075	for (i--; i >=0; i--) {
2076		chip = &cfi->chips[i];
2077
2078		mutex_lock(&chip->mutex);
2079
2080		if (chip->state == FL_SYNCING) {
2081			chip->state = chip->oldstate;
2082			chip->oldstate = FL_READY;
2083			wake_up(&chip->wq);
2084		}
2085		mutex_unlock(&chip->mutex);
2086	}
2087}
2088
2089static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2090						struct flchip *chip,
2091						unsigned long adr,
2092						int len, void *thunk)
2093{
2094	struct cfi_private *cfi = map->fldrv_priv;
2095	int status, ofs_factor = cfi->interleave * cfi->device_type;
2096
2097	adr += chip->start;
2098	xip_disable(map, chip, adr+(2*ofs_factor));
2099	map_write(map, CMD(0x90), adr+(2*ofs_factor));
2100	chip->state = FL_JEDEC_QUERY;
2101	status = cfi_read_query(map, adr+(2*ofs_factor));
2102	xip_enable(map, chip, 0);
2103	return status;
2104}
2105
2106#ifdef DEBUG_LOCK_BITS
2107static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2108						struct flchip *chip,
2109						unsigned long adr,
2110						int len, void *thunk)
2111{
2112	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2113	       adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2114	return 0;
2115}
2116#endif
2117
2118#define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
2119#define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
2120
2121static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2122				       unsigned long adr, int len, void *thunk)
2123{
2124	struct cfi_private *cfi = map->fldrv_priv;
2125	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2126	int mdelay;
2127	int ret;
2128
2129	adr += chip->start;
2130
2131	mutex_lock(&chip->mutex);
2132	ret = get_chip(map, chip, adr, FL_LOCKING);
2133	if (ret) {
2134		mutex_unlock(&chip->mutex);
2135		return ret;
2136	}
2137
2138	ENABLE_VPP(map);
2139	xip_disable(map, chip, adr);
2140
2141	map_write(map, CMD(0x60), adr);
2142	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2143		map_write(map, CMD(0x01), adr);
2144		chip->state = FL_LOCKING;
2145	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2146		map_write(map, CMD(0xD0), adr);
2147		chip->state = FL_UNLOCKING;
2148	} else
2149		BUG();
2150
2151	/*
2152	 * If Instant Individual Block Locking supported then no need
2153	 * to delay.
2154	 */
2155	/*
2156	 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2157	 * lets use a max of 1.5 seconds (1500ms) as timeout.
2158	 *
2159	 * See "Clear Block Lock-Bits Time" on page 40 in
2160	 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2161	 * from February 2003
2162	 */
2163	mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2164
2165	ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2166	if (ret) {
2167		map_write(map, CMD(0x70), adr);
2168		chip->state = FL_STATUS;
2169		xip_enable(map, chip, adr);
2170		printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2171		goto out;
2172	}
2173
2174	xip_enable(map, chip, adr);
2175 out:	DISABLE_VPP(map);
2176	put_chip(map, chip, adr);
2177	mutex_unlock(&chip->mutex);
2178	return ret;
2179}
2180
2181static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2182{
2183	int ret;
2184
2185#ifdef DEBUG_LOCK_BITS
2186	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2187	       __func__, ofs, len);
2188	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2189		ofs, len, NULL);
2190#endif
2191
2192	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2193		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2194
2195#ifdef DEBUG_LOCK_BITS
2196	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2197	       __func__, ret);
2198	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2199		ofs, len, NULL);
2200#endif
2201
2202	return ret;
2203}
2204
2205static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2206{
2207	int ret;
2208
2209#ifdef DEBUG_LOCK_BITS
2210	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2211	       __func__, ofs, len);
2212	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2213		ofs, len, NULL);
2214#endif
2215
2216	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2217					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2218
2219#ifdef DEBUG_LOCK_BITS
2220	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2221	       __func__, ret);
2222	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2223		ofs, len, NULL);
2224#endif
2225
2226	return ret;
2227}
2228
2229static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2230				  uint64_t len)
2231{
2232	return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2233				ofs, len, NULL) ? 1 : 0;
2234}
2235
2236#ifdef CONFIG_MTD_OTP
2237
2238typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2239			u_long data_offset, u_char *buf, u_int size,
2240			u_long prot_offset, u_int groupno, u_int groupsize);
2241
2242static int __xipram
2243do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2244	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2245{
2246	struct cfi_private *cfi = map->fldrv_priv;
2247	int ret;
2248
2249	mutex_lock(&chip->mutex);
2250	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2251	if (ret) {
2252		mutex_unlock(&chip->mutex);
2253		return ret;
2254	}
2255
2256	/* let's ensure we're not reading back cached data from array mode */
2257	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2258
2259	xip_disable(map, chip, chip->start);
2260	if (chip->state != FL_JEDEC_QUERY) {
2261		map_write(map, CMD(0x90), chip->start);
2262		chip->state = FL_JEDEC_QUERY;
2263	}
2264	map_copy_from(map, buf, chip->start + offset, size);
2265	xip_enable(map, chip, chip->start);
2266
2267	/* then ensure we don't keep OTP data in the cache */
2268	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2269
2270	put_chip(map, chip, chip->start);
2271	mutex_unlock(&chip->mutex);
2272	return 0;
2273}
2274
2275static int
2276do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2277	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2278{
2279	int ret;
2280
2281	while (size) {
2282		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2283		int gap = offset - bus_ofs;
2284		int n = min_t(int, size, map_bankwidth(map)-gap);
2285		map_word datum = map_word_ff(map);
2286
2287		datum = map_word_load_partial(map, datum, buf, gap, n);
2288		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2289		if (ret)
2290			return ret;
2291
2292		offset += n;
2293		buf += n;
2294		size -= n;
2295	}
2296
2297	return 0;
2298}
2299
2300static int
2301do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2302	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2303{
2304	struct cfi_private *cfi = map->fldrv_priv;
2305	map_word datum;
2306
2307	/* make sure area matches group boundaries */
2308	if (size != grpsz)
2309		return -EXDEV;
2310
2311	datum = map_word_ff(map);
2312	datum = map_word_clr(map, datum, CMD(1 << grpno));
2313	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2314}
2315
2316static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2317				 size_t *retlen, u_char *buf,
2318				 otp_op_t action, int user_regs)
2319{
2320	struct map_info *map = mtd->priv;
2321	struct cfi_private *cfi = map->fldrv_priv;
2322	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2323	struct flchip *chip;
2324	struct cfi_intelext_otpinfo *otp;
2325	u_long devsize, reg_prot_offset, data_offset;
2326	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2327	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2328	int ret;
2329
2330	*retlen = 0;
2331
2332	/* Check that we actually have some OTP registers */
2333	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2334		return -ENODATA;
2335
2336	/* we need real chips here not virtual ones */
2337	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2338	chip_step = devsize >> cfi->chipshift;
2339	chip_num = 0;
2340
2341	/* Some chips have OTP located in the _top_ partition only.
2342	   For example: Intel 28F256L18T (T means top-parameter device) */
2343	if (cfi->mfr == CFI_MFR_INTEL) {
2344		switch (cfi->id) {
2345		case 0x880b:
2346		case 0x880c:
2347		case 0x880d:
2348			chip_num = chip_step - 1;
2349		}
2350	}
2351
2352	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2353		chip = &cfi->chips[chip_num];
2354		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2355
2356		/* first OTP region */
2357		field = 0;
2358		reg_prot_offset = extp->ProtRegAddr;
2359		reg_fact_groups = 1;
2360		reg_fact_size = 1 << extp->FactProtRegSize;
2361		reg_user_groups = 1;
2362		reg_user_size = 1 << extp->UserProtRegSize;
2363
2364		while (len > 0) {
2365			/* flash geometry fixup */
2366			data_offset = reg_prot_offset + 1;
2367			data_offset *= cfi->interleave * cfi->device_type;
2368			reg_prot_offset *= cfi->interleave * cfi->device_type;
2369			reg_fact_size *= cfi->interleave;
2370			reg_user_size *= cfi->interleave;
2371
2372			if (user_regs) {
2373				groups = reg_user_groups;
2374				groupsize = reg_user_size;
2375				/* skip over factory reg area */
2376				groupno = reg_fact_groups;
2377				data_offset += reg_fact_groups * reg_fact_size;
2378			} else {
2379				groups = reg_fact_groups;
2380				groupsize = reg_fact_size;
2381				groupno = 0;
2382			}
2383
2384			while (len > 0 && groups > 0) {
2385				if (!action) {
2386					/*
2387					 * Special case: if action is NULL
2388					 * we fill buf with otp_info records.
2389					 */
2390					struct otp_info *otpinfo;
2391					map_word lockword;
2392					len -= sizeof(struct otp_info);
2393					if (len <= 0)
2394						return -ENOSPC;
2395					ret = do_otp_read(map, chip,
2396							  reg_prot_offset,
2397							  (u_char *)&lockword,
2398							  map_bankwidth(map),
2399							  0, 0,  0);
2400					if (ret)
2401						return ret;
2402					otpinfo = (struct otp_info *)buf;
2403					otpinfo->start = from;
2404					otpinfo->length = groupsize;
2405					otpinfo->locked =
2406					   !map_word_bitsset(map, lockword,
2407							     CMD(1 << groupno));
2408					from += groupsize;
2409					buf += sizeof(*otpinfo);
2410					*retlen += sizeof(*otpinfo);
2411				} else if (from >= groupsize) {
2412					from -= groupsize;
2413					data_offset += groupsize;
2414				} else {
2415					int size = groupsize;
2416					data_offset += from;
2417					size -= from;
2418					from = 0;
2419					if (size > len)
2420						size = len;
2421					ret = action(map, chip, data_offset,
2422						     buf, size, reg_prot_offset,
2423						     groupno, groupsize);
2424					if (ret < 0)
2425						return ret;
2426					buf += size;
2427					len -= size;
2428					*retlen += size;
2429					data_offset += size;
2430				}
2431				groupno++;
2432				groups--;
2433			}
2434
2435			/* next OTP region */
2436			if (++field == extp->NumProtectionFields)
2437				break;
2438			reg_prot_offset = otp->ProtRegAddr;
2439			reg_fact_groups = otp->FactGroups;
2440			reg_fact_size = 1 << otp->FactProtRegSize;
2441			reg_user_groups = otp->UserGroups;
2442			reg_user_size = 1 << otp->UserProtRegSize;
2443			otp++;
2444		}
2445	}
2446
2447	return 0;
2448}
2449
2450static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2451					   size_t len, size_t *retlen,
2452					    u_char *buf)
2453{
2454	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2455				     buf, do_otp_read, 0);
2456}
2457
2458static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2459					   size_t len, size_t *retlen,
2460					    u_char *buf)
2461{
2462	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2463				     buf, do_otp_read, 1);
2464}
2465
2466static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2467					    size_t len, size_t *retlen,
2468					    const u_char *buf)
2469{
2470	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2471				     (u_char *)buf, do_otp_write, 1);
2472}
2473
2474static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2475					   loff_t from, size_t len)
2476{
2477	size_t retlen;
2478	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2479				     NULL, do_otp_lock, 1);
2480}
2481
2482static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2483					   size_t *retlen, struct otp_info *buf)
2484
2485{
2486	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2487				     NULL, 0);
2488}
2489
2490static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2491					   size_t *retlen, struct otp_info *buf)
2492{
2493	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2494				     NULL, 1);
2495}
2496
2497#endif
2498
2499static void cfi_intelext_save_locks(struct mtd_info *mtd)
2500{
2501	struct mtd_erase_region_info *region;
2502	int block, status, i;
2503	unsigned long adr;
2504	size_t len;
2505
2506	for (i = 0; i < mtd->numeraseregions; i++) {
2507		region = &mtd->eraseregions[i];
2508		if (!region->lockmap)
2509			continue;
2510
2511		for (block = 0; block < region->numblocks; block++){
2512			len = region->erasesize;
2513			adr = region->offset + block * len;
2514
2515			status = cfi_varsize_frob(mtd,
2516					do_getlockstatus_oneblock, adr, len, NULL);
2517			if (status)
2518				set_bit(block, region->lockmap);
2519			else
2520				clear_bit(block, region->lockmap);
2521		}
2522	}
2523}
2524
2525static int cfi_intelext_suspend(struct mtd_info *mtd)
2526{
2527	struct map_info *map = mtd->priv;
2528	struct cfi_private *cfi = map->fldrv_priv;
2529	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2530	int i;
2531	struct flchip *chip;
2532	int ret = 0;
2533
2534	if ((mtd->flags & MTD_POWERUP_LOCK)
2535	    && extp && (extp->FeatureSupport & (1 << 5)))
2536		cfi_intelext_save_locks(mtd);
2537
2538	for (i=0; !ret && i<cfi->numchips; i++) {
2539		chip = &cfi->chips[i];
2540
2541		mutex_lock(&chip->mutex);
2542
2543		switch (chip->state) {
2544		case FL_READY:
2545		case FL_STATUS:
2546		case FL_CFI_QUERY:
2547		case FL_JEDEC_QUERY:
2548			if (chip->oldstate == FL_READY) {
2549				/* place the chip in a known state before suspend */
2550				map_write(map, CMD(0xFF), cfi->chips[i].start);
2551				chip->oldstate = chip->state;
2552				chip->state = FL_PM_SUSPENDED;
2553				/* No need to wake_up() on this state change -
2554				 * as the whole point is that nobody can do anything
2555				 * with the chip now anyway.
2556				 */
2557			} else {
2558				/* There seems to be an operation pending. We must wait for it. */
2559				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2560				ret = -EAGAIN;
2561			}
2562			break;
2563		default:
2564			/* Should we actually wait? Once upon a time these routines weren't
2565			   allowed to. Or should we return -EAGAIN, because the upper layers
2566			   ought to have already shut down anything which was using the device
2567			   anyway? The latter for now. */
2568			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2569			ret = -EAGAIN;
2570			break;
2571		case FL_PM_SUSPENDED:
2572			break;
2573		}
2574		mutex_unlock(&chip->mutex);
2575	}
2576
2577	/* Unlock the chips again */
2578
2579	if (ret) {
2580		for (i--; i >=0; i--) {
2581			chip = &cfi->chips[i];
2582
2583			mutex_lock(&chip->mutex);
2584
2585			if (chip->state == FL_PM_SUSPENDED) {
2586				/* No need to force it into a known state here,
2587				   because we're returning failure, and it didn't
2588				   get power cycled */
2589				chip->state = chip->oldstate;
2590				chip->oldstate = FL_READY;
2591				wake_up(&chip->wq);
2592			}
2593			mutex_unlock(&chip->mutex);
2594		}
2595	}
2596
2597	return ret;
2598}
2599
2600static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2601{
2602	struct mtd_erase_region_info *region;
2603	int block, i;
2604	unsigned long adr;
2605	size_t len;
2606
2607	for (i = 0; i < mtd->numeraseregions; i++) {
2608		region = &mtd->eraseregions[i];
2609		if (!region->lockmap)
2610			continue;
2611
2612		for_each_clear_bit(block, region->lockmap, region->numblocks) {
2613			len = region->erasesize;
2614			adr = region->offset + block * len;
2615			cfi_intelext_unlock(mtd, adr, len);
2616		}
2617	}
2618}
2619
2620static void cfi_intelext_resume(struct mtd_info *mtd)
2621{
2622	struct map_info *map = mtd->priv;
2623	struct cfi_private *cfi = map->fldrv_priv;
2624	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2625	int i;
2626	struct flchip *chip;
2627
2628	for (i=0; i<cfi->numchips; i++) {
2629
2630		chip = &cfi->chips[i];
2631
2632		mutex_lock(&chip->mutex);
2633
2634		/* Go to known state. Chip may have been power cycled */
2635		if (chip->state == FL_PM_SUSPENDED) {
2636			/* Refresh LH28F640BF Partition Config. Register */
2637			fixup_LH28F640BF(mtd);
2638			map_write(map, CMD(0xFF), cfi->chips[i].start);
2639			chip->oldstate = chip->state = FL_READY;
2640			wake_up(&chip->wq);
2641		}
2642
2643		mutex_unlock(&chip->mutex);
2644	}
2645
2646	if ((mtd->flags & MTD_POWERUP_LOCK)
2647	    && extp && (extp->FeatureSupport & (1 << 5)))
2648		cfi_intelext_restore_locks(mtd);
2649}
2650
2651static int cfi_intelext_reset(struct mtd_info *mtd)
2652{
2653	struct map_info *map = mtd->priv;
2654	struct cfi_private *cfi = map->fldrv_priv;
2655	int i, ret;
2656
2657	for (i=0; i < cfi->numchips; i++) {
2658		struct flchip *chip = &cfi->chips[i];
2659
2660		/* force the completion of any ongoing operation
2661		   and switch to array mode so any bootloader in
2662		   flash is accessible for soft reboot. */
2663		mutex_lock(&chip->mutex);
2664		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2665		if (!ret) {
2666			map_write(map, CMD(0xff), chip->start);
2667			chip->state = FL_SHUTDOWN;
2668			put_chip(map, chip, chip->start);
2669		}
2670		mutex_unlock(&chip->mutex);
2671	}
2672
2673	return 0;
2674}
2675
2676static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2677			       void *v)
2678{
2679	struct mtd_info *mtd;
2680
2681	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2682	cfi_intelext_reset(mtd);
2683	return NOTIFY_DONE;
2684}
2685
2686static void cfi_intelext_destroy(struct mtd_info *mtd)
2687{
2688	struct map_info *map = mtd->priv;
2689	struct cfi_private *cfi = map->fldrv_priv;
2690	struct mtd_erase_region_info *region;
2691	int i;
2692	cfi_intelext_reset(mtd);
2693	unregister_reboot_notifier(&mtd->reboot_notifier);
2694	kfree(cfi->cmdset_priv);
2695	kfree(cfi->cfiq);
2696	kfree(cfi->chips[0].priv);
2697	kfree(cfi);
2698	for (i = 0; i < mtd->numeraseregions; i++) {
2699		region = &mtd->eraseregions[i];
2700		kfree(region->lockmap);
2701	}
2702	kfree(mtd->eraseregions);
2703}
2704
2705MODULE_LICENSE("GPL");
2706MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2707MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2708MODULE_ALIAS("cfi_cmdset_0003");
2709MODULE_ALIAS("cfi_cmdset_0200");
v5.9
 
   1/*
   2 * Common Flash Interface support:
   3 *   Intel Extended Vendor Command Set (ID 0x0001)
   4 *
   5 * (C) 2000 Red Hat. GPL'd
   6 *
   7 *
   8 * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
   9 * 	- completely revamped method functions so they are aware and
  10 * 	  independent of the flash geometry (buswidth, interleave, etc.)
  11 * 	- scalability vs code size is completely set at compile-time
  12 * 	  (see include/linux/mtd/cfi.h for selection)
  13 *	- optimized write buffer method
  14 * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
  15 *	- reworked lock/unlock/erase support for var size flash
  16 * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
  17 * 	- auto unlock sectors on resume for auto locking flash on power up
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/types.h>
  22#include <linux/kernel.h>
  23#include <linux/sched.h>
  24#include <asm/io.h>
  25#include <asm/byteorder.h>
  26
  27#include <linux/errno.h>
  28#include <linux/slab.h>
  29#include <linux/delay.h>
  30#include <linux/interrupt.h>
  31#include <linux/reboot.h>
  32#include <linux/bitmap.h>
  33#include <linux/mtd/xip.h>
  34#include <linux/mtd/map.h>
  35#include <linux/mtd/mtd.h>
  36#include <linux/mtd/cfi.h>
  37
  38/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
  39/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
  40
  41// debugging, turns off buffer write mode if set to 1
  42#define FORCE_WORD_WRITE 0
  43
  44/* Intel chips */
  45#define I82802AB	0x00ad
  46#define I82802AC	0x00ac
  47#define PF38F4476	0x881c
  48#define M28F00AP30	0x8963
  49/* STMicroelectronics chips */
  50#define M50LPW080       0x002F
  51#define M50FLW080A	0x0080
  52#define M50FLW080B	0x0081
  53/* Atmel chips */
  54#define AT49BV640D	0x02de
  55#define AT49BV640DT	0x02db
  56/* Sharp chips */
  57#define LH28F640BFHE_PTTL90	0x00b0
  58#define LH28F640BFHE_PBTL90	0x00b1
  59#define LH28F640BFHE_PTTL70A	0x00b2
  60#define LH28F640BFHE_PBTL70A	0x00b3
  61
  62static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  63static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  64static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  65static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
  66static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
  67static void cfi_intelext_sync (struct mtd_info *);
  68static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  69static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  70static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
  71				  uint64_t len);
  72#ifdef CONFIG_MTD_OTP
  73static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  74static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  75static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
 
  76static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
  77static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
  78					   size_t *, struct otp_info *);
  79static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
  80					   size_t *, struct otp_info *);
  81#endif
  82static int cfi_intelext_suspend (struct mtd_info *);
  83static void cfi_intelext_resume (struct mtd_info *);
  84static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
  85
  86static void cfi_intelext_destroy(struct mtd_info *);
  87
  88struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
  89
  90static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
  91static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
  92
  93static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
  94		     size_t *retlen, void **virt, resource_size_t *phys);
  95static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
  96
  97static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  98static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  99static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
 100#include "fwh_lock.h"
 101
 102
 103
 104/*
 105 *  *********** SETUP AND PROBE BITS  ***********
 106 */
 107
 108static struct mtd_chip_driver cfi_intelext_chipdrv = {
 109	.probe		= NULL, /* Not usable directly */
 110	.destroy	= cfi_intelext_destroy,
 111	.name		= "cfi_cmdset_0001",
 112	.module		= THIS_MODULE
 113};
 114
 115/* #define DEBUG_LOCK_BITS */
 116/* #define DEBUG_CFI_FEATURES */
 117
 118#ifdef DEBUG_CFI_FEATURES
 119static void cfi_tell_features(struct cfi_pri_intelext *extp)
 120{
 121	int i;
 122	printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
 123	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
 124	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
 125	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
 126	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
 127	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
 128	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
 129	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
 130	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
 131	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
 132	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
 133	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
 134	printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
 135	for (i=11; i<32; i++) {
 136		if (extp->FeatureSupport & (1<<i))
 137			printk("     - Unknown Bit %X:      supported\n", i);
 138	}
 139
 140	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
 141	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
 142	for (i=1; i<8; i++) {
 143		if (extp->SuspendCmdSupport & (1<<i))
 144			printk("     - Unknown Bit %X:               supported\n", i);
 145	}
 146
 147	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
 148	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
 149	printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
 150	for (i=2; i<3; i++) {
 151		if (extp->BlkStatusRegMask & (1<<i))
 152			printk("     - Unknown Bit %X Active: yes\n",i);
 153	}
 154	printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
 155	printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
 156	for (i=6; i<16; i++) {
 157		if (extp->BlkStatusRegMask & (1<<i))
 158			printk("     - Unknown Bit %X Active: yes\n",i);
 159	}
 160
 161	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
 162	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
 163	if (extp->VppOptimal)
 164		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
 165		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
 166}
 167#endif
 168
 169/* Atmel chips don't use the same PRI format as Intel chips */
 170static void fixup_convert_atmel_pri(struct mtd_info *mtd)
 171{
 172	struct map_info *map = mtd->priv;
 173	struct cfi_private *cfi = map->fldrv_priv;
 174	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 175	struct cfi_pri_atmel atmel_pri;
 176	uint32_t features = 0;
 177
 178	/* Reverse byteswapping */
 179	extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
 180	extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
 181	extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
 182
 183	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 184	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 185
 186	printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
 187
 188	if (atmel_pri.Features & 0x01) /* chip erase supported */
 189		features |= (1<<0);
 190	if (atmel_pri.Features & 0x02) /* erase suspend supported */
 191		features |= (1<<1);
 192	if (atmel_pri.Features & 0x04) /* program suspend supported */
 193		features |= (1<<2);
 194	if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
 195		features |= (1<<9);
 196	if (atmel_pri.Features & 0x20) /* page mode read supported */
 197		features |= (1<<7);
 198	if (atmel_pri.Features & 0x40) /* queued erase supported */
 199		features |= (1<<4);
 200	if (atmel_pri.Features & 0x80) /* Protection bits supported */
 201		features |= (1<<6);
 202
 203	extp->FeatureSupport = features;
 204
 205	/* burst write mode not supported */
 206	cfi->cfiq->BufWriteTimeoutTyp = 0;
 207	cfi->cfiq->BufWriteTimeoutMax = 0;
 208}
 209
 210static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
 211{
 212	struct map_info *map = mtd->priv;
 213	struct cfi_private *cfi = map->fldrv_priv;
 214	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 215
 216	cfip->FeatureSupport |= (1 << 5);
 217	mtd->flags |= MTD_POWERUP_LOCK;
 218}
 219
 220#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 221/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
 222static void fixup_intel_strataflash(struct mtd_info *mtd)
 223{
 224	struct map_info *map = mtd->priv;
 225	struct cfi_private *cfi = map->fldrv_priv;
 226	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 227
 228	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
 229	                    "erase on write disabled.\n");
 230	extp->SuspendCmdSupport &= ~1;
 231}
 232#endif
 233
 234#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 235static void fixup_no_write_suspend(struct mtd_info *mtd)
 236{
 237	struct map_info *map = mtd->priv;
 238	struct cfi_private *cfi = map->fldrv_priv;
 239	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 240
 241	if (cfip && (cfip->FeatureSupport&4)) {
 242		cfip->FeatureSupport &= ~4;
 243		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
 244	}
 245}
 246#endif
 247
 248static void fixup_st_m28w320ct(struct mtd_info *mtd)
 249{
 250	struct map_info *map = mtd->priv;
 251	struct cfi_private *cfi = map->fldrv_priv;
 252
 253	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
 254	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
 255}
 256
 257static void fixup_st_m28w320cb(struct mtd_info *mtd)
 258{
 259	struct map_info *map = mtd->priv;
 260	struct cfi_private *cfi = map->fldrv_priv;
 261
 262	/* Note this is done after the region info is endian swapped */
 263	cfi->cfiq->EraseRegionInfo[1] =
 264		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
 265};
 266
 267static int is_LH28F640BF(struct cfi_private *cfi)
 268{
 269	/* Sharp LH28F640BF Family */
 270	if (cfi->mfr == CFI_MFR_SHARP && (
 271	    cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
 272	    cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
 273		return 1;
 274	return 0;
 275}
 276
 277static void fixup_LH28F640BF(struct mtd_info *mtd)
 278{
 279	struct map_info *map = mtd->priv;
 280	struct cfi_private *cfi = map->fldrv_priv;
 281	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 282
 283	/* Reset the Partition Configuration Register on LH28F640BF
 284	 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
 285	if (is_LH28F640BF(cfi)) {
 286		printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
 287		map_write(map, CMD(0x60), 0);
 288		map_write(map, CMD(0x04), 0);
 289
 290		/* We have set one single partition thus
 291		 * Simultaneous Operations are not allowed */
 292		printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
 293		extp->FeatureSupport &= ~512;
 294	}
 295}
 296
 297static void fixup_use_point(struct mtd_info *mtd)
 298{
 299	struct map_info *map = mtd->priv;
 300	if (!mtd->_point && map_is_linear(map)) {
 301		mtd->_point   = cfi_intelext_point;
 302		mtd->_unpoint = cfi_intelext_unpoint;
 303	}
 304}
 305
 306static void fixup_use_write_buffers(struct mtd_info *mtd)
 307{
 308	struct map_info *map = mtd->priv;
 309	struct cfi_private *cfi = map->fldrv_priv;
 310	if (cfi->cfiq->BufWriteTimeoutTyp) {
 311		printk(KERN_INFO "Using buffer write method\n" );
 312		mtd->_write = cfi_intelext_write_buffers;
 313		mtd->_writev = cfi_intelext_writev;
 314	}
 315}
 316
 317/*
 318 * Some chips power-up with all sectors locked by default.
 319 */
 320static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
 321{
 322	struct map_info *map = mtd->priv;
 323	struct cfi_private *cfi = map->fldrv_priv;
 324	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 325
 326	if (cfip->FeatureSupport&32) {
 327		printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
 328		mtd->flags |= MTD_POWERUP_LOCK;
 329	}
 330}
 331
 332static struct cfi_fixup cfi_fixup_table[] = {
 333	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
 334	{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
 335	{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
 336#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 337	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
 338#endif
 339#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 340	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
 341#endif
 342#if !FORCE_WORD_WRITE
 343	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
 344#endif
 345	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
 346	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
 347	{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
 348	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
 349	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
 350	{ 0, 0, NULL }
 351};
 352
 353static struct cfi_fixup jedec_fixup_table[] = {
 354	{ CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
 355	{ CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
 356	{ CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
 357	{ CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
 358	{ CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
 359	{ 0, 0, NULL }
 360};
 361static struct cfi_fixup fixup_table[] = {
 362	/* The CFI vendor ids and the JEDEC vendor IDs appear
 363	 * to be common.  It is like the devices id's are as
 364	 * well.  This table is to pick all cases where
 365	 * we know that is the case.
 366	 */
 367	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
 368	{ 0, 0, NULL }
 369};
 370
 371static void cfi_fixup_major_minor(struct cfi_private *cfi,
 372						struct cfi_pri_intelext *extp)
 373{
 374	if (cfi->mfr == CFI_MFR_INTEL &&
 375			cfi->id == PF38F4476 && extp->MinorVersion == '3')
 376		extp->MinorVersion = '1';
 377}
 378
 379static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
 380{
 381	/*
 382	 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
 383	 * Erase Supend for their small Erase Blocks(0x8000)
 384	 */
 385	if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
 386		return 1;
 387	return 0;
 388}
 389
 390static inline struct cfi_pri_intelext *
 391read_pri_intelext(struct map_info *map, __u16 adr)
 392{
 393	struct cfi_private *cfi = map->fldrv_priv;
 394	struct cfi_pri_intelext *extp;
 395	unsigned int extra_size = 0;
 396	unsigned int extp_size = sizeof(*extp);
 397
 398 again:
 399	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
 400	if (!extp)
 401		return NULL;
 402
 403	cfi_fixup_major_minor(cfi, extp);
 404
 405	if (extp->MajorVersion != '1' ||
 406	    (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
 407		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
 408		       "version %c.%c.\n",  extp->MajorVersion,
 409		       extp->MinorVersion);
 410		kfree(extp);
 411		return NULL;
 412	}
 413
 414	/* Do some byteswapping if necessary */
 415	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
 416	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
 417	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
 418
 419	if (extp->MinorVersion >= '0') {
 420		extra_size = 0;
 421
 422		/* Protection Register info */
 423		if (extp->NumProtectionFields)
 
 
 
 424			extra_size += (extp->NumProtectionFields - 1) *
 425				      sizeof(struct cfi_intelext_otpinfo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 426	}
 427
 428	if (extp->MinorVersion >= '1') {
 429		/* Burst Read info */
 430		extra_size += 2;
 431		if (extp_size < sizeof(*extp) + extra_size)
 432			goto need_more;
 433		extra_size += extp->extra[extra_size - 1];
 434	}
 435
 436	if (extp->MinorVersion >= '3') {
 437		int nb_parts, i;
 438
 439		/* Number of hardware-partitions */
 440		extra_size += 1;
 441		if (extp_size < sizeof(*extp) + extra_size)
 442			goto need_more;
 443		nb_parts = extp->extra[extra_size - 1];
 444
 445		/* skip the sizeof(partregion) field in CFI 1.4 */
 446		if (extp->MinorVersion >= '4')
 447			extra_size += 2;
 448
 449		for (i = 0; i < nb_parts; i++) {
 450			struct cfi_intelext_regioninfo *rinfo;
 451			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
 452			extra_size += sizeof(*rinfo);
 453			if (extp_size < sizeof(*extp) + extra_size)
 454				goto need_more;
 455			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
 456			extra_size += (rinfo->NumBlockTypes - 1)
 457				      * sizeof(struct cfi_intelext_blockinfo);
 458		}
 459
 460		if (extp->MinorVersion >= '4')
 461			extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
 462
 463		if (extp_size < sizeof(*extp) + extra_size) {
 464			need_more:
 465			extp_size = sizeof(*extp) + extra_size;
 466			kfree(extp);
 467			if (extp_size > 4096) {
 468				printk(KERN_ERR
 469					"%s: cfi_pri_intelext is too fat\n",
 470					__func__);
 471				return NULL;
 472			}
 473			goto again;
 474		}
 475	}
 476
 477	return extp;
 478}
 479
 480struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
 481{
 482	struct cfi_private *cfi = map->fldrv_priv;
 483	struct mtd_info *mtd;
 484	int i;
 485
 486	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 487	if (!mtd)
 488		return NULL;
 489	mtd->priv = map;
 490	mtd->type = MTD_NORFLASH;
 491
 492	/* Fill in the default mtd operations */
 493	mtd->_erase   = cfi_intelext_erase_varsize;
 494	mtd->_read    = cfi_intelext_read;
 495	mtd->_write   = cfi_intelext_write_words;
 496	mtd->_sync    = cfi_intelext_sync;
 497	mtd->_lock    = cfi_intelext_lock;
 498	mtd->_unlock  = cfi_intelext_unlock;
 499	mtd->_is_locked = cfi_intelext_is_locked;
 500	mtd->_suspend = cfi_intelext_suspend;
 501	mtd->_resume  = cfi_intelext_resume;
 502	mtd->flags   = MTD_CAP_NORFLASH;
 503	mtd->name    = map->name;
 504	mtd->writesize = 1;
 505	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 506
 507	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
 508
 509	if (cfi->cfi_mode == CFI_MODE_CFI) {
 510		/*
 511		 * It's a real CFI chip, not one for which the probe
 512		 * routine faked a CFI structure. So we read the feature
 513		 * table from it.
 514		 */
 515		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 516		struct cfi_pri_intelext *extp;
 517
 518		extp = read_pri_intelext(map, adr);
 519		if (!extp) {
 520			kfree(mtd);
 521			return NULL;
 522		}
 523
 524		/* Install our own private info structure */
 525		cfi->cmdset_priv = extp;
 526
 527		cfi_fixup(mtd, cfi_fixup_table);
 528
 529#ifdef DEBUG_CFI_FEATURES
 530		/* Tell the user about it in lots of lovely detail */
 531		cfi_tell_features(extp);
 532#endif
 533
 534		if(extp->SuspendCmdSupport & 1) {
 535			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
 536		}
 537	}
 538	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 539		/* Apply jedec specific fixups */
 540		cfi_fixup(mtd, jedec_fixup_table);
 541	}
 542	/* Apply generic fixups */
 543	cfi_fixup(mtd, fixup_table);
 544
 545	for (i=0; i< cfi->numchips; i++) {
 546		if (cfi->cfiq->WordWriteTimeoutTyp)
 547			cfi->chips[i].word_write_time =
 548				1<<cfi->cfiq->WordWriteTimeoutTyp;
 549		else
 550			cfi->chips[i].word_write_time = 50000;
 551
 552		if (cfi->cfiq->BufWriteTimeoutTyp)
 553			cfi->chips[i].buffer_write_time =
 554				1<<cfi->cfiq->BufWriteTimeoutTyp;
 555		/* No default; if it isn't specified, we won't use it */
 556
 557		if (cfi->cfiq->BlockEraseTimeoutTyp)
 558			cfi->chips[i].erase_time =
 559				1000<<cfi->cfiq->BlockEraseTimeoutTyp;
 560		else
 561			cfi->chips[i].erase_time = 2000000;
 562
 563		if (cfi->cfiq->WordWriteTimeoutTyp &&
 564		    cfi->cfiq->WordWriteTimeoutMax)
 565			cfi->chips[i].word_write_time_max =
 566				1<<(cfi->cfiq->WordWriteTimeoutTyp +
 567				    cfi->cfiq->WordWriteTimeoutMax);
 568		else
 569			cfi->chips[i].word_write_time_max = 50000 * 8;
 570
 571		if (cfi->cfiq->BufWriteTimeoutTyp &&
 572		    cfi->cfiq->BufWriteTimeoutMax)
 573			cfi->chips[i].buffer_write_time_max =
 574				1<<(cfi->cfiq->BufWriteTimeoutTyp +
 575				    cfi->cfiq->BufWriteTimeoutMax);
 576
 577		if (cfi->cfiq->BlockEraseTimeoutTyp &&
 578		    cfi->cfiq->BlockEraseTimeoutMax)
 579			cfi->chips[i].erase_time_max =
 580				1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
 581				       cfi->cfiq->BlockEraseTimeoutMax);
 582		else
 583			cfi->chips[i].erase_time_max = 2000000 * 8;
 584
 585		cfi->chips[i].ref_point_counter = 0;
 586		init_waitqueue_head(&(cfi->chips[i].wq));
 587	}
 588
 589	map->fldrv = &cfi_intelext_chipdrv;
 590
 591	return cfi_intelext_setup(mtd);
 592}
 593struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 594struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 595EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
 596EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
 597EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
 598
 599static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
 600{
 601	struct map_info *map = mtd->priv;
 602	struct cfi_private *cfi = map->fldrv_priv;
 603	unsigned long offset = 0;
 604	int i,j;
 605	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 606
 607	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
 608
 609	mtd->size = devsize * cfi->numchips;
 610
 611	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 612	mtd->eraseregions = kcalloc(mtd->numeraseregions,
 613				    sizeof(struct mtd_erase_region_info),
 614				    GFP_KERNEL);
 615	if (!mtd->eraseregions)
 616		goto setup_err;
 617
 618	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 619		unsigned long ernum, ersize;
 620		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 621		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 622
 623		if (mtd->erasesize < ersize) {
 624			mtd->erasesize = ersize;
 625		}
 626		for (j=0; j<cfi->numchips; j++) {
 627			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 628			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 629			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 630			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
 631			if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
 632				goto setup_err;
 633		}
 634		offset += (ersize * ernum);
 635	}
 636
 637	if (offset != devsize) {
 638		/* Argh */
 639		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 640		goto setup_err;
 641	}
 642
 643	for (i=0; i<mtd->numeraseregions;i++){
 644		printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
 645		       i,(unsigned long long)mtd->eraseregions[i].offset,
 646		       mtd->eraseregions[i].erasesize,
 647		       mtd->eraseregions[i].numblocks);
 648	}
 649
 650#ifdef CONFIG_MTD_OTP
 651	mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
 652	mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
 653	mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
 654	mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
 655	mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
 656	mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
 657#endif
 658
 659	/* This function has the potential to distort the reality
 660	   a bit and therefore should be called last. */
 661	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
 662		goto setup_err;
 663
 664	__module_get(THIS_MODULE);
 665	register_reboot_notifier(&mtd->reboot_notifier);
 666	return mtd;
 667
 668 setup_err:
 669	if (mtd->eraseregions)
 670		for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
 671			for (j=0; j<cfi->numchips; j++)
 672				kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
 673	kfree(mtd->eraseregions);
 674	kfree(mtd);
 675	kfree(cfi->cmdset_priv);
 676	return NULL;
 677}
 678
 679static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
 680					struct cfi_private **pcfi)
 681{
 682	struct map_info *map = mtd->priv;
 683	struct cfi_private *cfi = *pcfi;
 684	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 685
 686	/*
 687	 * Probing of multi-partition flash chips.
 688	 *
 689	 * To support multiple partitions when available, we simply arrange
 690	 * for each of them to have their own flchip structure even if they
 691	 * are on the same physical chip.  This means completely recreating
 692	 * a new cfi_private structure right here which is a blatent code
 693	 * layering violation, but this is still the least intrusive
 694	 * arrangement at this point. This can be rearranged in the future
 695	 * if someone feels motivated enough.  --nico
 696	 */
 697	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
 698	    && extp->FeatureSupport & (1 << 9)) {
 699		int offs = 0;
 700		struct cfi_private *newcfi;
 701		struct flchip *chip;
 702		struct flchip_shared *shared;
 703		int numregions, numparts, partshift, numvirtchips, i, j;
 704
 705		/* Protection Register info */
 706		if (extp->NumProtectionFields)
 707			offs = (extp->NumProtectionFields - 1) *
 708			       sizeof(struct cfi_intelext_otpinfo);
 709
 710		/* Burst Read info */
 711		offs += extp->extra[offs+1]+2;
 712
 713		/* Number of partition regions */
 714		numregions = extp->extra[offs];
 715		offs += 1;
 716
 717		/* skip the sizeof(partregion) field in CFI 1.4 */
 718		if (extp->MinorVersion >= '4')
 719			offs += 2;
 720
 721		/* Number of hardware partitions */
 722		numparts = 0;
 723		for (i = 0; i < numregions; i++) {
 724			struct cfi_intelext_regioninfo *rinfo;
 725			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
 726			numparts += rinfo->NumIdentPartitions;
 727			offs += sizeof(*rinfo)
 728				+ (rinfo->NumBlockTypes - 1) *
 729				  sizeof(struct cfi_intelext_blockinfo);
 730		}
 731
 732		if (!numparts)
 733			numparts = 1;
 734
 735		/* Programming Region info */
 736		if (extp->MinorVersion >= '4') {
 737			struct cfi_intelext_programming_regioninfo *prinfo;
 738			prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
 739			mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
 740			mtd->flags &= ~MTD_BIT_WRITEABLE;
 741			printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
 742			       map->name, mtd->writesize,
 743			       cfi->interleave * prinfo->ControlValid,
 744			       cfi->interleave * prinfo->ControlInvalid);
 745		}
 746
 747		/*
 748		 * All functions below currently rely on all chips having
 749		 * the same geometry so we'll just assume that all hardware
 750		 * partitions are of the same size too.
 751		 */
 752		partshift = cfi->chipshift - __ffs(numparts);
 753
 754		if ((1 << partshift) < mtd->erasesize) {
 755			printk( KERN_ERR
 756				"%s: bad number of hw partitions (%d)\n",
 757				__func__, numparts);
 758			return -EINVAL;
 759		}
 760
 761		numvirtchips = cfi->numchips * numparts;
 762		newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
 763				 GFP_KERNEL);
 764		if (!newcfi)
 765			return -ENOMEM;
 766		shared = kmalloc_array(cfi->numchips,
 767				       sizeof(struct flchip_shared),
 768				       GFP_KERNEL);
 769		if (!shared) {
 770			kfree(newcfi);
 771			return -ENOMEM;
 772		}
 773		memcpy(newcfi, cfi, sizeof(struct cfi_private));
 774		newcfi->numchips = numvirtchips;
 775		newcfi->chipshift = partshift;
 776
 777		chip = &newcfi->chips[0];
 778		for (i = 0; i < cfi->numchips; i++) {
 779			shared[i].writing = shared[i].erasing = NULL;
 780			mutex_init(&shared[i].lock);
 781			for (j = 0; j < numparts; j++) {
 782				*chip = cfi->chips[i];
 783				chip->start += j << partshift;
 784				chip->priv = &shared[i];
 785				/* those should be reset too since
 786				   they create memory references. */
 787				init_waitqueue_head(&chip->wq);
 788				mutex_init(&chip->mutex);
 789				chip++;
 790			}
 791		}
 792
 793		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
 794				  "--> %d partitions of %d KiB\n",
 795				  map->name, cfi->numchips, cfi->interleave,
 796				  newcfi->numchips, 1<<(newcfi->chipshift-10));
 797
 798		map->fldrv_priv = newcfi;
 799		*pcfi = newcfi;
 800		kfree(cfi);
 801	}
 802
 803	return 0;
 804}
 805
 806/*
 807 *  *********** CHIP ACCESS FUNCTIONS ***********
 808 */
 809static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 810{
 811	DECLARE_WAITQUEUE(wait, current);
 812	struct cfi_private *cfi = map->fldrv_priv;
 813	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
 814	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 815	unsigned long timeo = jiffies + HZ;
 816
 817	/* Prevent setting state FL_SYNCING for chip in suspended state. */
 818	if (mode == FL_SYNCING && chip->oldstate != FL_READY)
 819		goto sleep;
 820
 821	switch (chip->state) {
 822
 823	case FL_STATUS:
 824		for (;;) {
 825			status = map_read(map, adr);
 826			if (map_word_andequal(map, status, status_OK, status_OK))
 827				break;
 828
 829			/* At this point we're fine with write operations
 830			   in other partitions as they don't conflict. */
 831			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
 832				break;
 833
 834			mutex_unlock(&chip->mutex);
 835			cfi_udelay(1);
 836			mutex_lock(&chip->mutex);
 837			/* Someone else might have been playing with it. */
 838			return -EAGAIN;
 839		}
 840		fallthrough;
 841	case FL_READY:
 842	case FL_CFI_QUERY:
 843	case FL_JEDEC_QUERY:
 844		return 0;
 845
 846	case FL_ERASING:
 847		if (!cfip ||
 848		    !(cfip->FeatureSupport & 2) ||
 849		    !(mode == FL_READY || mode == FL_POINT ||
 850		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
 851			goto sleep;
 852
 853		/* Do not allow suspend iff read/write to EB address */
 854		if ((adr & chip->in_progress_block_mask) ==
 855		    chip->in_progress_block_addr)
 856			goto sleep;
 857
 858		/* do not suspend small EBs, buggy Micron Chips */
 859		if (cfi_is_micron_28F00AP30(cfi, chip) &&
 860		    (chip->in_progress_block_mask == ~(0x8000-1)))
 861			goto sleep;
 862
 863		/* Erase suspend */
 864		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 865
 866		/* If the flash has finished erasing, then 'erase suspend'
 867		 * appears to make some (28F320) flash devices switch to
 868		 * 'read' mode.  Make sure that we switch to 'read status'
 869		 * mode so we get the right data. --rmk
 870		 */
 871		map_write(map, CMD(0x70), chip->in_progress_block_addr);
 872		chip->oldstate = FL_ERASING;
 873		chip->state = FL_ERASE_SUSPENDING;
 874		chip->erase_suspended = 1;
 875		for (;;) {
 876			status = map_read(map, chip->in_progress_block_addr);
 877			if (map_word_andequal(map, status, status_OK, status_OK))
 878			        break;
 879
 880			if (time_after(jiffies, timeo)) {
 881				/* Urgh. Resume and pretend we weren't here.
 882				 * Make sure we're in 'read status' mode if it had finished */
 883				put_chip(map, chip, adr);
 884				printk(KERN_ERR "%s: Chip not ready after erase "
 885				       "suspended: status = 0x%lx\n", map->name, status.x[0]);
 886				return -EIO;
 887			}
 888
 889			mutex_unlock(&chip->mutex);
 890			cfi_udelay(1);
 891			mutex_lock(&chip->mutex);
 892			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 893			   So we can just loop here. */
 894		}
 895		chip->state = FL_STATUS;
 896		return 0;
 897
 898	case FL_XIP_WHILE_ERASING:
 899		if (mode != FL_READY && mode != FL_POINT &&
 900		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
 901			goto sleep;
 902		chip->oldstate = chip->state;
 903		chip->state = FL_READY;
 904		return 0;
 905
 906	case FL_SHUTDOWN:
 907		/* The machine is rebooting now,so no one can get chip anymore */
 908		return -EIO;
 909	case FL_POINT:
 910		/* Only if there's no operation suspended... */
 911		if (mode == FL_READY && chip->oldstate == FL_READY)
 912			return 0;
 913		fallthrough;
 914	default:
 915	sleep:
 916		set_current_state(TASK_UNINTERRUPTIBLE);
 917		add_wait_queue(&chip->wq, &wait);
 918		mutex_unlock(&chip->mutex);
 919		schedule();
 920		remove_wait_queue(&chip->wq, &wait);
 921		mutex_lock(&chip->mutex);
 922		return -EAGAIN;
 923	}
 924}
 925
 926static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 927{
 928	int ret;
 929	DECLARE_WAITQUEUE(wait, current);
 930
 931 retry:
 932	if (chip->priv &&
 933	    (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
 934	    || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
 935		/*
 936		 * OK. We have possibility for contention on the write/erase
 937		 * operations which are global to the real chip and not per
 938		 * partition.  So let's fight it over in the partition which
 939		 * currently has authority on the operation.
 940		 *
 941		 * The rules are as follows:
 942		 *
 943		 * - any write operation must own shared->writing.
 944		 *
 945		 * - any erase operation must own _both_ shared->writing and
 946		 *   shared->erasing.
 947		 *
 948		 * - contention arbitration is handled in the owner's context.
 949		 *
 950		 * The 'shared' struct can be read and/or written only when
 951		 * its lock is taken.
 952		 */
 953		struct flchip_shared *shared = chip->priv;
 954		struct flchip *contender;
 955		mutex_lock(&shared->lock);
 956		contender = shared->writing;
 957		if (contender && contender != chip) {
 958			/*
 959			 * The engine to perform desired operation on this
 960			 * partition is already in use by someone else.
 961			 * Let's fight over it in the context of the chip
 962			 * currently using it.  If it is possible to suspend,
 963			 * that other partition will do just that, otherwise
 964			 * it'll happily send us to sleep.  In any case, when
 965			 * get_chip returns success we're clear to go ahead.
 966			 */
 967			ret = mutex_trylock(&contender->mutex);
 968			mutex_unlock(&shared->lock);
 969			if (!ret)
 970				goto retry;
 971			mutex_unlock(&chip->mutex);
 972			ret = chip_ready(map, contender, contender->start, mode);
 973			mutex_lock(&chip->mutex);
 974
 975			if (ret == -EAGAIN) {
 976				mutex_unlock(&contender->mutex);
 977				goto retry;
 978			}
 979			if (ret) {
 980				mutex_unlock(&contender->mutex);
 981				return ret;
 982			}
 983			mutex_lock(&shared->lock);
 984
 985			/* We should not own chip if it is already
 986			 * in FL_SYNCING state. Put contender and retry. */
 987			if (chip->state == FL_SYNCING) {
 988				put_chip(map, contender, contender->start);
 989				mutex_unlock(&contender->mutex);
 990				goto retry;
 991			}
 992			mutex_unlock(&contender->mutex);
 993		}
 994
 995		/* Check if we already have suspended erase
 996		 * on this chip. Sleep. */
 997		if (mode == FL_ERASING && shared->erasing
 998		    && shared->erasing->oldstate == FL_ERASING) {
 999			mutex_unlock(&shared->lock);
1000			set_current_state(TASK_UNINTERRUPTIBLE);
1001			add_wait_queue(&chip->wq, &wait);
1002			mutex_unlock(&chip->mutex);
1003			schedule();
1004			remove_wait_queue(&chip->wq, &wait);
1005			mutex_lock(&chip->mutex);
1006			goto retry;
1007		}
1008
1009		/* We now own it */
1010		shared->writing = chip;
1011		if (mode == FL_ERASING)
1012			shared->erasing = chip;
1013		mutex_unlock(&shared->lock);
1014	}
1015	ret = chip_ready(map, chip, adr, mode);
1016	if (ret == -EAGAIN)
1017		goto retry;
1018
1019	return ret;
1020}
1021
1022static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1023{
1024	struct cfi_private *cfi = map->fldrv_priv;
1025
1026	if (chip->priv) {
1027		struct flchip_shared *shared = chip->priv;
1028		mutex_lock(&shared->lock);
1029		if (shared->writing == chip && chip->oldstate == FL_READY) {
1030			/* We own the ability to write, but we're done */
1031			shared->writing = shared->erasing;
1032			if (shared->writing && shared->writing != chip) {
1033				/* give back ownership to who we loaned it from */
1034				struct flchip *loaner = shared->writing;
1035				mutex_lock(&loaner->mutex);
1036				mutex_unlock(&shared->lock);
1037				mutex_unlock(&chip->mutex);
1038				put_chip(map, loaner, loaner->start);
1039				mutex_lock(&chip->mutex);
1040				mutex_unlock(&loaner->mutex);
1041				wake_up(&chip->wq);
1042				return;
1043			}
1044			shared->erasing = NULL;
1045			shared->writing = NULL;
1046		} else if (shared->erasing == chip && shared->writing != chip) {
1047			/*
1048			 * We own the ability to erase without the ability
1049			 * to write, which means the erase was suspended
1050			 * and some other partition is currently writing.
1051			 * Don't let the switch below mess things up since
1052			 * we don't have ownership to resume anything.
1053			 */
1054			mutex_unlock(&shared->lock);
1055			wake_up(&chip->wq);
1056			return;
1057		}
1058		mutex_unlock(&shared->lock);
1059	}
1060
1061	switch(chip->oldstate) {
1062	case FL_ERASING:
1063		/* What if one interleaved chip has finished and the
1064		   other hasn't? The old code would leave the finished
1065		   one in READY mode. That's bad, and caused -EROFS
1066		   errors to be returned from do_erase_oneblock because
1067		   that's the only bit it checked for at the time.
1068		   As the state machine appears to explicitly allow
1069		   sending the 0x70 (Read Status) command to an erasing
1070		   chip and expecting it to be ignored, that's what we
1071		   do. */
1072		map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1073		map_write(map, CMD(0x70), chip->in_progress_block_addr);
1074		chip->oldstate = FL_READY;
1075		chip->state = FL_ERASING;
1076		break;
1077
1078	case FL_XIP_WHILE_ERASING:
1079		chip->state = chip->oldstate;
1080		chip->oldstate = FL_READY;
1081		break;
1082
1083	case FL_READY:
1084	case FL_STATUS:
1085	case FL_JEDEC_QUERY:
1086		break;
1087	default:
1088		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1089	}
1090	wake_up(&chip->wq);
1091}
1092
1093#ifdef CONFIG_MTD_XIP
1094
1095/*
1096 * No interrupt what so ever can be serviced while the flash isn't in array
1097 * mode.  This is ensured by the xip_disable() and xip_enable() functions
1098 * enclosing any code path where the flash is known not to be in array mode.
1099 * And within a XIP disabled code path, only functions marked with __xipram
1100 * may be called and nothing else (it's a good thing to inspect generated
1101 * assembly to make sure inline functions were actually inlined and that gcc
1102 * didn't emit calls to its own support functions). Also configuring MTD CFI
1103 * support to a single buswidth and a single interleave is also recommended.
1104 */
1105
1106static void xip_disable(struct map_info *map, struct flchip *chip,
1107			unsigned long adr)
1108{
1109	/* TODO: chips with no XIP use should ignore and return */
1110	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1111	local_irq_disable();
1112}
1113
1114static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1115				unsigned long adr)
1116{
1117	struct cfi_private *cfi = map->fldrv_priv;
1118	if (chip->state != FL_POINT && chip->state != FL_READY) {
1119		map_write(map, CMD(0xff), adr);
1120		chip->state = FL_READY;
1121	}
1122	(void) map_read(map, adr);
1123	xip_iprefetch();
1124	local_irq_enable();
1125}
1126
1127/*
1128 * When a delay is required for the flash operation to complete, the
1129 * xip_wait_for_operation() function is polling for both the given timeout
1130 * and pending (but still masked) hardware interrupts.  Whenever there is an
1131 * interrupt pending then the flash erase or write operation is suspended,
1132 * array mode restored and interrupts unmasked.  Task scheduling might also
1133 * happen at that point.  The CPU eventually returns from the interrupt or
1134 * the call to schedule() and the suspended flash operation is resumed for
1135 * the remaining of the delay period.
1136 *
1137 * Warning: this function _will_ fool interrupt latency tracing tools.
1138 */
1139
1140static int __xipram xip_wait_for_operation(
1141		struct map_info *map, struct flchip *chip,
1142		unsigned long adr, unsigned int chip_op_time_max)
1143{
1144	struct cfi_private *cfi = map->fldrv_priv;
1145	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1146	map_word status, OK = CMD(0x80);
1147	unsigned long usec, suspended, start, done;
1148	flstate_t oldstate, newstate;
1149
1150       	start = xip_currtime();
1151	usec = chip_op_time_max;
1152	if (usec == 0)
1153		usec = 500000;
1154	done = 0;
1155
1156	do {
1157		cpu_relax();
1158		if (xip_irqpending() && cfip &&
1159		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1160		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1161		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1162			/*
1163			 * Let's suspend the erase or write operation when
1164			 * supported.  Note that we currently don't try to
1165			 * suspend interleaved chips if there is already
1166			 * another operation suspended (imagine what happens
1167			 * when one chip was already done with the current
1168			 * operation while another chip suspended it, then
1169			 * we resume the whole thing at once).  Yes, it
1170			 * can happen!
1171			 */
1172			usec -= done;
1173			map_write(map, CMD(0xb0), adr);
1174			map_write(map, CMD(0x70), adr);
1175			suspended = xip_currtime();
1176			do {
1177				if (xip_elapsed_since(suspended) > 100000) {
1178					/*
1179					 * The chip doesn't want to suspend
1180					 * after waiting for 100 msecs.
1181					 * This is a critical error but there
1182					 * is not much we can do here.
1183					 */
1184					return -EIO;
1185				}
1186				status = map_read(map, adr);
1187			} while (!map_word_andequal(map, status, OK, OK));
1188
1189			/* Suspend succeeded */
1190			oldstate = chip->state;
1191			if (oldstate == FL_ERASING) {
1192				if (!map_word_bitsset(map, status, CMD(0x40)))
1193					break;
1194				newstate = FL_XIP_WHILE_ERASING;
1195				chip->erase_suspended = 1;
1196			} else {
1197				if (!map_word_bitsset(map, status, CMD(0x04)))
1198					break;
1199				newstate = FL_XIP_WHILE_WRITING;
1200				chip->write_suspended = 1;
1201			}
1202			chip->state = newstate;
1203			map_write(map, CMD(0xff), adr);
1204			(void) map_read(map, adr);
1205			xip_iprefetch();
1206			local_irq_enable();
1207			mutex_unlock(&chip->mutex);
1208			xip_iprefetch();
1209			cond_resched();
1210
1211			/*
1212			 * We're back.  However someone else might have
1213			 * decided to go write to the chip if we are in
1214			 * a suspended erase state.  If so let's wait
1215			 * until it's done.
1216			 */
1217			mutex_lock(&chip->mutex);
1218			while (chip->state != newstate) {
1219				DECLARE_WAITQUEUE(wait, current);
1220				set_current_state(TASK_UNINTERRUPTIBLE);
1221				add_wait_queue(&chip->wq, &wait);
1222				mutex_unlock(&chip->mutex);
1223				schedule();
1224				remove_wait_queue(&chip->wq, &wait);
1225				mutex_lock(&chip->mutex);
1226			}
1227			/* Disallow XIP again */
1228			local_irq_disable();
1229
1230			/* Resume the write or erase operation */
1231			map_write(map, CMD(0xd0), adr);
1232			map_write(map, CMD(0x70), adr);
1233			chip->state = oldstate;
1234			start = xip_currtime();
1235		} else if (usec >= 1000000/HZ) {
1236			/*
1237			 * Try to save on CPU power when waiting delay
1238			 * is at least a system timer tick period.
1239			 * No need to be extremely accurate here.
1240			 */
1241			xip_cpu_idle();
1242		}
1243		status = map_read(map, adr);
1244		done = xip_elapsed_since(start);
1245	} while (!map_word_andequal(map, status, OK, OK)
1246		 && done < usec);
1247
1248	return (done >= usec) ? -ETIME : 0;
1249}
1250
1251/*
1252 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1253 * the flash is actively programming or erasing since we have to poll for
1254 * the operation to complete anyway.  We can't do that in a generic way with
1255 * a XIP setup so do it before the actual flash operation in this case
1256 * and stub it out from INVAL_CACHE_AND_WAIT.
1257 */
1258#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1259	INVALIDATE_CACHED_RANGE(map, from, size)
1260
1261#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1262	xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1263
1264#else
1265
1266#define xip_disable(map, chip, adr)
1267#define xip_enable(map, chip, adr)
1268#define XIP_INVAL_CACHED_RANGE(x...)
1269#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1270
1271static int inval_cache_and_wait_for_operation(
1272		struct map_info *map, struct flchip *chip,
1273		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1274		unsigned int chip_op_time, unsigned int chip_op_time_max)
1275{
1276	struct cfi_private *cfi = map->fldrv_priv;
1277	map_word status, status_OK = CMD(0x80);
1278	int chip_state = chip->state;
1279	unsigned int timeo, sleep_time, reset_timeo;
1280
1281	mutex_unlock(&chip->mutex);
1282	if (inval_len)
1283		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1284	mutex_lock(&chip->mutex);
1285
1286	timeo = chip_op_time_max;
1287	if (!timeo)
1288		timeo = 500000;
1289	reset_timeo = timeo;
1290	sleep_time = chip_op_time / 2;
1291
1292	for (;;) {
1293		if (chip->state != chip_state) {
1294			/* Someone's suspended the operation: sleep */
1295			DECLARE_WAITQUEUE(wait, current);
1296			set_current_state(TASK_UNINTERRUPTIBLE);
1297			add_wait_queue(&chip->wq, &wait);
1298			mutex_unlock(&chip->mutex);
1299			schedule();
1300			remove_wait_queue(&chip->wq, &wait);
1301			mutex_lock(&chip->mutex);
1302			continue;
1303		}
1304
1305		status = map_read(map, cmd_adr);
1306		if (map_word_andequal(map, status, status_OK, status_OK))
1307			break;
1308
1309		if (chip->erase_suspended && chip_state == FL_ERASING)  {
1310			/* Erase suspend occurred while sleep: reset timeout */
1311			timeo = reset_timeo;
1312			chip->erase_suspended = 0;
1313		}
1314		if (chip->write_suspended && chip_state == FL_WRITING)  {
1315			/* Write suspend occurred while sleep: reset timeout */
1316			timeo = reset_timeo;
1317			chip->write_suspended = 0;
1318		}
1319		if (!timeo) {
1320			map_write(map, CMD(0x70), cmd_adr);
1321			chip->state = FL_STATUS;
1322			return -ETIME;
1323		}
1324
1325		/* OK Still waiting. Drop the lock, wait a while and retry. */
1326		mutex_unlock(&chip->mutex);
1327		if (sleep_time >= 1000000/HZ) {
1328			/*
1329			 * Half of the normal delay still remaining
1330			 * can be performed with a sleeping delay instead
1331			 * of busy waiting.
1332			 */
1333			msleep(sleep_time/1000);
1334			timeo -= sleep_time;
1335			sleep_time = 1000000/HZ;
1336		} else {
1337			udelay(1);
1338			cond_resched();
1339			timeo--;
1340		}
1341		mutex_lock(&chip->mutex);
1342	}
1343
1344	/* Done and happy. */
1345 	chip->state = FL_STATUS;
1346	return 0;
1347}
1348
1349#endif
1350
1351#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1352	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1353
1354
1355static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1356{
1357	unsigned long cmd_addr;
1358	struct cfi_private *cfi = map->fldrv_priv;
1359	int ret;
1360
1361	adr += chip->start;
1362
1363	/* Ensure cmd read/writes are aligned. */
1364	cmd_addr = adr & ~(map_bankwidth(map)-1);
1365
1366	mutex_lock(&chip->mutex);
1367
1368	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1369
1370	if (!ret) {
1371		if (chip->state != FL_POINT && chip->state != FL_READY)
1372			map_write(map, CMD(0xff), cmd_addr);
1373
1374		chip->state = FL_POINT;
1375		chip->ref_point_counter++;
1376	}
1377	mutex_unlock(&chip->mutex);
1378
1379	return ret;
1380}
1381
1382static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1383		size_t *retlen, void **virt, resource_size_t *phys)
1384{
1385	struct map_info *map = mtd->priv;
1386	struct cfi_private *cfi = map->fldrv_priv;
1387	unsigned long ofs, last_end = 0;
1388	int chipnum;
1389	int ret;
1390
1391	if (!map->virt)
1392		return -EINVAL;
1393
1394	/* Now lock the chip(s) to POINT state */
1395
1396	/* ofs: offset within the first chip that the first read should start */
1397	chipnum = (from >> cfi->chipshift);
1398	ofs = from - (chipnum << cfi->chipshift);
1399
1400	*virt = map->virt + cfi->chips[chipnum].start + ofs;
1401	if (phys)
1402		*phys = map->phys + cfi->chips[chipnum].start + ofs;
1403
1404	while (len) {
1405		unsigned long thislen;
1406
1407		if (chipnum >= cfi->numchips)
1408			break;
1409
1410		/* We cannot point across chips that are virtually disjoint */
1411		if (!last_end)
1412			last_end = cfi->chips[chipnum].start;
1413		else if (cfi->chips[chipnum].start != last_end)
1414			break;
1415
1416		if ((len + ofs -1) >> cfi->chipshift)
1417			thislen = (1<<cfi->chipshift) - ofs;
1418		else
1419			thislen = len;
1420
1421		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1422		if (ret)
1423			break;
1424
1425		*retlen += thislen;
1426		len -= thislen;
1427
1428		ofs = 0;
1429		last_end += 1 << cfi->chipshift;
1430		chipnum++;
1431	}
1432	return 0;
1433}
1434
1435static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1436{
1437	struct map_info *map = mtd->priv;
1438	struct cfi_private *cfi = map->fldrv_priv;
1439	unsigned long ofs;
1440	int chipnum, err = 0;
1441
1442	/* Now unlock the chip(s) POINT state */
1443
1444	/* ofs: offset within the first chip that the first read should start */
1445	chipnum = (from >> cfi->chipshift);
1446	ofs = from - (chipnum <<  cfi->chipshift);
1447
1448	while (len && !err) {
1449		unsigned long thislen;
1450		struct flchip *chip;
1451
1452		chip = &cfi->chips[chipnum];
1453		if (chipnum >= cfi->numchips)
1454			break;
1455
1456		if ((len + ofs -1) >> cfi->chipshift)
1457			thislen = (1<<cfi->chipshift) - ofs;
1458		else
1459			thislen = len;
1460
1461		mutex_lock(&chip->mutex);
1462		if (chip->state == FL_POINT) {
1463			chip->ref_point_counter--;
1464			if(chip->ref_point_counter == 0)
1465				chip->state = FL_READY;
1466		} else {
1467			printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1468			err = -EINVAL;
1469		}
1470
1471		put_chip(map, chip, chip->start);
1472		mutex_unlock(&chip->mutex);
1473
1474		len -= thislen;
1475		ofs = 0;
1476		chipnum++;
1477	}
1478
1479	return err;
1480}
1481
1482static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1483{
1484	unsigned long cmd_addr;
1485	struct cfi_private *cfi = map->fldrv_priv;
1486	int ret;
1487
1488	adr += chip->start;
1489
1490	/* Ensure cmd read/writes are aligned. */
1491	cmd_addr = adr & ~(map_bankwidth(map)-1);
1492
1493	mutex_lock(&chip->mutex);
1494	ret = get_chip(map, chip, cmd_addr, FL_READY);
1495	if (ret) {
1496		mutex_unlock(&chip->mutex);
1497		return ret;
1498	}
1499
1500	if (chip->state != FL_POINT && chip->state != FL_READY) {
1501		map_write(map, CMD(0xff), cmd_addr);
1502
1503		chip->state = FL_READY;
1504	}
1505
1506	map_copy_from(map, buf, adr, len);
1507
1508	put_chip(map, chip, cmd_addr);
1509
1510	mutex_unlock(&chip->mutex);
1511	return 0;
1512}
1513
1514static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1515{
1516	struct map_info *map = mtd->priv;
1517	struct cfi_private *cfi = map->fldrv_priv;
1518	unsigned long ofs;
1519	int chipnum;
1520	int ret = 0;
1521
1522	/* ofs: offset within the first chip that the first read should start */
1523	chipnum = (from >> cfi->chipshift);
1524	ofs = from - (chipnum <<  cfi->chipshift);
1525
1526	while (len) {
1527		unsigned long thislen;
1528
1529		if (chipnum >= cfi->numchips)
1530			break;
1531
1532		if ((len + ofs -1) >> cfi->chipshift)
1533			thislen = (1<<cfi->chipshift) - ofs;
1534		else
1535			thislen = len;
1536
1537		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1538		if (ret)
1539			break;
1540
1541		*retlen += thislen;
1542		len -= thislen;
1543		buf += thislen;
1544
1545		ofs = 0;
1546		chipnum++;
1547	}
1548	return ret;
1549}
1550
1551static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1552				     unsigned long adr, map_word datum, int mode)
1553{
1554	struct cfi_private *cfi = map->fldrv_priv;
1555	map_word status, write_cmd;
1556	int ret;
1557
1558	adr += chip->start;
1559
1560	switch (mode) {
1561	case FL_WRITING:
1562		write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1563		break;
1564	case FL_OTP_WRITE:
1565		write_cmd = CMD(0xc0);
1566		break;
1567	default:
1568		return -EINVAL;
1569	}
1570
1571	mutex_lock(&chip->mutex);
1572	ret = get_chip(map, chip, adr, mode);
1573	if (ret) {
1574		mutex_unlock(&chip->mutex);
1575		return ret;
1576	}
1577
1578	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1579	ENABLE_VPP(map);
1580	xip_disable(map, chip, adr);
1581	map_write(map, write_cmd, adr);
1582	map_write(map, datum, adr);
1583	chip->state = mode;
1584
1585	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1586				   adr, map_bankwidth(map),
1587				   chip->word_write_time,
1588				   chip->word_write_time_max);
1589	if (ret) {
1590		xip_enable(map, chip, adr);
1591		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1592		goto out;
1593	}
1594
1595	/* check for errors */
1596	status = map_read(map, adr);
1597	if (map_word_bitsset(map, status, CMD(0x1a))) {
1598		unsigned long chipstatus = MERGESTATUS(status);
1599
1600		/* reset status */
1601		map_write(map, CMD(0x50), adr);
1602		map_write(map, CMD(0x70), adr);
1603		xip_enable(map, chip, adr);
1604
1605		if (chipstatus & 0x02) {
1606			ret = -EROFS;
1607		} else if (chipstatus & 0x08) {
1608			printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1609			ret = -EIO;
1610		} else {
1611			printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1612			ret = -EINVAL;
1613		}
1614
1615		goto out;
1616	}
1617
1618	xip_enable(map, chip, adr);
1619 out:	DISABLE_VPP(map);
1620	put_chip(map, chip, adr);
1621	mutex_unlock(&chip->mutex);
1622	return ret;
1623}
1624
1625
1626static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1627{
1628	struct map_info *map = mtd->priv;
1629	struct cfi_private *cfi = map->fldrv_priv;
1630	int ret;
1631	int chipnum;
1632	unsigned long ofs;
1633
1634	chipnum = to >> cfi->chipshift;
1635	ofs = to  - (chipnum << cfi->chipshift);
1636
1637	/* If it's not bus-aligned, do the first byte write */
1638	if (ofs & (map_bankwidth(map)-1)) {
1639		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1640		int gap = ofs - bus_ofs;
1641		int n;
1642		map_word datum;
1643
1644		n = min_t(int, len, map_bankwidth(map)-gap);
1645		datum = map_word_ff(map);
1646		datum = map_word_load_partial(map, datum, buf, gap, n);
1647
1648		ret = do_write_oneword(map, &cfi->chips[chipnum],
1649					       bus_ofs, datum, FL_WRITING);
1650		if (ret)
1651			return ret;
1652
1653		len -= n;
1654		ofs += n;
1655		buf += n;
1656		(*retlen) += n;
1657
1658		if (ofs >> cfi->chipshift) {
1659			chipnum ++;
1660			ofs = 0;
1661			if (chipnum == cfi->numchips)
1662				return 0;
1663		}
1664	}
1665
1666	while(len >= map_bankwidth(map)) {
1667		map_word datum = map_word_load(map, buf);
1668
1669		ret = do_write_oneword(map, &cfi->chips[chipnum],
1670				       ofs, datum, FL_WRITING);
1671		if (ret)
1672			return ret;
1673
1674		ofs += map_bankwidth(map);
1675		buf += map_bankwidth(map);
1676		(*retlen) += map_bankwidth(map);
1677		len -= map_bankwidth(map);
1678
1679		if (ofs >> cfi->chipshift) {
1680			chipnum ++;
1681			ofs = 0;
1682			if (chipnum == cfi->numchips)
1683				return 0;
1684		}
1685	}
1686
1687	if (len & (map_bankwidth(map)-1)) {
1688		map_word datum;
1689
1690		datum = map_word_ff(map);
1691		datum = map_word_load_partial(map, datum, buf, 0, len);
1692
1693		ret = do_write_oneword(map, &cfi->chips[chipnum],
1694				       ofs, datum, FL_WRITING);
1695		if (ret)
1696			return ret;
1697
1698		(*retlen) += len;
1699	}
1700
1701	return 0;
1702}
1703
1704
1705static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1706				    unsigned long adr, const struct kvec **pvec,
1707				    unsigned long *pvec_seek, int len)
1708{
1709	struct cfi_private *cfi = map->fldrv_priv;
1710	map_word status, write_cmd, datum;
1711	unsigned long cmd_adr;
1712	int ret, wbufsize, word_gap, words;
1713	const struct kvec *vec;
1714	unsigned long vec_seek;
1715	unsigned long initial_adr;
1716	int initial_len = len;
1717
1718	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1719	adr += chip->start;
1720	initial_adr = adr;
1721	cmd_adr = adr & ~(wbufsize-1);
1722
1723	/* Sharp LH28F640BF chips need the first address for the
1724	 * Page Buffer Program command. See Table 5 of
1725	 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1726	if (is_LH28F640BF(cfi))
1727		cmd_adr = adr;
1728
1729	/* Let's determine this according to the interleave only once */
1730	write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1731
1732	mutex_lock(&chip->mutex);
1733	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1734	if (ret) {
1735		mutex_unlock(&chip->mutex);
1736		return ret;
1737	}
1738
1739	XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1740	ENABLE_VPP(map);
1741	xip_disable(map, chip, cmd_adr);
1742
1743	/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1744	   [...], the device will not accept any more Write to Buffer commands".
1745	   So we must check here and reset those bits if they're set. Otherwise
1746	   we're just pissing in the wind */
1747	if (chip->state != FL_STATUS) {
1748		map_write(map, CMD(0x70), cmd_adr);
1749		chip->state = FL_STATUS;
1750	}
1751	status = map_read(map, cmd_adr);
1752	if (map_word_bitsset(map, status, CMD(0x30))) {
1753		xip_enable(map, chip, cmd_adr);
1754		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1755		xip_disable(map, chip, cmd_adr);
1756		map_write(map, CMD(0x50), cmd_adr);
1757		map_write(map, CMD(0x70), cmd_adr);
1758	}
1759
1760	chip->state = FL_WRITING_TO_BUFFER;
1761	map_write(map, write_cmd, cmd_adr);
1762	ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1763	if (ret) {
1764		/* Argh. Not ready for write to buffer */
1765		map_word Xstatus = map_read(map, cmd_adr);
1766		map_write(map, CMD(0x70), cmd_adr);
1767		chip->state = FL_STATUS;
1768		status = map_read(map, cmd_adr);
1769		map_write(map, CMD(0x50), cmd_adr);
1770		map_write(map, CMD(0x70), cmd_adr);
1771		xip_enable(map, chip, cmd_adr);
1772		printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1773				map->name, Xstatus.x[0], status.x[0]);
1774		goto out;
1775	}
1776
1777	/* Figure out the number of words to write */
1778	word_gap = (-adr & (map_bankwidth(map)-1));
1779	words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1780	if (!word_gap) {
1781		words--;
1782	} else {
1783		word_gap = map_bankwidth(map) - word_gap;
1784		adr -= word_gap;
1785		datum = map_word_ff(map);
1786	}
1787
1788	/* Write length of data to come */
1789	map_write(map, CMD(words), cmd_adr );
1790
1791	/* Write data */
1792	vec = *pvec;
1793	vec_seek = *pvec_seek;
1794	do {
1795		int n = map_bankwidth(map) - word_gap;
1796		if (n > vec->iov_len - vec_seek)
1797			n = vec->iov_len - vec_seek;
1798		if (n > len)
1799			n = len;
1800
1801		if (!word_gap && len < map_bankwidth(map))
1802			datum = map_word_ff(map);
1803
1804		datum = map_word_load_partial(map, datum,
1805					      vec->iov_base + vec_seek,
1806					      word_gap, n);
1807
1808		len -= n;
1809		word_gap += n;
1810		if (!len || word_gap == map_bankwidth(map)) {
1811			map_write(map, datum, adr);
1812			adr += map_bankwidth(map);
1813			word_gap = 0;
1814		}
1815
1816		vec_seek += n;
1817		if (vec_seek == vec->iov_len) {
1818			vec++;
1819			vec_seek = 0;
1820		}
1821	} while (len);
1822	*pvec = vec;
1823	*pvec_seek = vec_seek;
1824
1825	/* GO GO GO */
1826	map_write(map, CMD(0xd0), cmd_adr);
1827	chip->state = FL_WRITING;
1828
1829	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1830				   initial_adr, initial_len,
1831				   chip->buffer_write_time,
1832				   chip->buffer_write_time_max);
1833	if (ret) {
1834		map_write(map, CMD(0x70), cmd_adr);
1835		chip->state = FL_STATUS;
1836		xip_enable(map, chip, cmd_adr);
1837		printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1838		goto out;
1839	}
1840
1841	/* check for errors */
1842	status = map_read(map, cmd_adr);
1843	if (map_word_bitsset(map, status, CMD(0x1a))) {
1844		unsigned long chipstatus = MERGESTATUS(status);
1845
1846		/* reset status */
1847		map_write(map, CMD(0x50), cmd_adr);
1848		map_write(map, CMD(0x70), cmd_adr);
1849		xip_enable(map, chip, cmd_adr);
1850
1851		if (chipstatus & 0x02) {
1852			ret = -EROFS;
1853		} else if (chipstatus & 0x08) {
1854			printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1855			ret = -EIO;
1856		} else {
1857			printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1858			ret = -EINVAL;
1859		}
1860
1861		goto out;
1862	}
1863
1864	xip_enable(map, chip, cmd_adr);
1865 out:	DISABLE_VPP(map);
1866	put_chip(map, chip, cmd_adr);
1867	mutex_unlock(&chip->mutex);
1868	return ret;
1869}
1870
1871static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1872				unsigned long count, loff_t to, size_t *retlen)
1873{
1874	struct map_info *map = mtd->priv;
1875	struct cfi_private *cfi = map->fldrv_priv;
1876	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1877	int ret;
1878	int chipnum;
1879	unsigned long ofs, vec_seek, i;
1880	size_t len = 0;
1881
1882	for (i = 0; i < count; i++)
1883		len += vecs[i].iov_len;
1884
1885	if (!len)
1886		return 0;
1887
1888	chipnum = to >> cfi->chipshift;
1889	ofs = to - (chipnum << cfi->chipshift);
1890	vec_seek = 0;
1891
1892	do {
1893		/* We must not cross write block boundaries */
1894		int size = wbufsize - (ofs & (wbufsize-1));
1895
1896		if (size > len)
1897			size = len;
1898		ret = do_write_buffer(map, &cfi->chips[chipnum],
1899				      ofs, &vecs, &vec_seek, size);
1900		if (ret)
1901			return ret;
1902
1903		ofs += size;
1904		(*retlen) += size;
1905		len -= size;
1906
1907		if (ofs >> cfi->chipshift) {
1908			chipnum ++;
1909			ofs = 0;
1910			if (chipnum == cfi->numchips)
1911				return 0;
1912		}
1913
1914		/* Be nice and reschedule with the chip in a usable state for other
1915		   processes. */
1916		cond_resched();
1917
1918	} while (len);
1919
1920	return 0;
1921}
1922
1923static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1924				       size_t len, size_t *retlen, const u_char *buf)
1925{
1926	struct kvec vec;
1927
1928	vec.iov_base = (void *) buf;
1929	vec.iov_len = len;
1930
1931	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1932}
1933
1934static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1935				      unsigned long adr, int len, void *thunk)
1936{
1937	struct cfi_private *cfi = map->fldrv_priv;
1938	map_word status;
1939	int retries = 3;
1940	int ret;
1941
1942	adr += chip->start;
1943
1944 retry:
1945	mutex_lock(&chip->mutex);
1946	ret = get_chip(map, chip, adr, FL_ERASING);
1947	if (ret) {
1948		mutex_unlock(&chip->mutex);
1949		return ret;
1950	}
1951
1952	XIP_INVAL_CACHED_RANGE(map, adr, len);
1953	ENABLE_VPP(map);
1954	xip_disable(map, chip, adr);
1955
1956	/* Clear the status register first */
1957	map_write(map, CMD(0x50), adr);
1958
1959	/* Now erase */
1960	map_write(map, CMD(0x20), adr);
1961	map_write(map, CMD(0xD0), adr);
1962	chip->state = FL_ERASING;
1963	chip->erase_suspended = 0;
1964	chip->in_progress_block_addr = adr;
1965	chip->in_progress_block_mask = ~(len - 1);
1966
1967	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1968				   adr, len,
1969				   chip->erase_time,
1970				   chip->erase_time_max);
1971	if (ret) {
1972		map_write(map, CMD(0x70), adr);
1973		chip->state = FL_STATUS;
1974		xip_enable(map, chip, adr);
1975		printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1976		goto out;
1977	}
1978
1979	/* We've broken this before. It doesn't hurt to be safe */
1980	map_write(map, CMD(0x70), adr);
1981	chip->state = FL_STATUS;
1982	status = map_read(map, adr);
1983
1984	/* check for errors */
1985	if (map_word_bitsset(map, status, CMD(0x3a))) {
1986		unsigned long chipstatus = MERGESTATUS(status);
1987
1988		/* Reset the error bits */
1989		map_write(map, CMD(0x50), adr);
1990		map_write(map, CMD(0x70), adr);
1991		xip_enable(map, chip, adr);
1992
1993		if ((chipstatus & 0x30) == 0x30) {
1994			printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1995			ret = -EINVAL;
1996		} else if (chipstatus & 0x02) {
1997			/* Protection bit set */
1998			ret = -EROFS;
1999		} else if (chipstatus & 0x8) {
2000			/* Voltage */
2001			printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
2002			ret = -EIO;
2003		} else if (chipstatus & 0x20 && retries--) {
2004			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2005			DISABLE_VPP(map);
2006			put_chip(map, chip, adr);
2007			mutex_unlock(&chip->mutex);
2008			goto retry;
2009		} else {
2010			printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2011			ret = -EIO;
2012		}
2013
2014		goto out;
2015	}
2016
2017	xip_enable(map, chip, adr);
2018 out:	DISABLE_VPP(map);
2019	put_chip(map, chip, adr);
2020	mutex_unlock(&chip->mutex);
2021	return ret;
2022}
2023
2024static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2025{
2026	return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2027				instr->len, NULL);
2028}
2029
2030static void cfi_intelext_sync (struct mtd_info *mtd)
2031{
2032	struct map_info *map = mtd->priv;
2033	struct cfi_private *cfi = map->fldrv_priv;
2034	int i;
2035	struct flchip *chip;
2036	int ret = 0;
2037
2038	for (i=0; !ret && i<cfi->numchips; i++) {
2039		chip = &cfi->chips[i];
2040
2041		mutex_lock(&chip->mutex);
2042		ret = get_chip(map, chip, chip->start, FL_SYNCING);
2043
2044		if (!ret) {
2045			chip->oldstate = chip->state;
2046			chip->state = FL_SYNCING;
2047			/* No need to wake_up() on this state change -
2048			 * as the whole point is that nobody can do anything
2049			 * with the chip now anyway.
2050			 */
2051		}
2052		mutex_unlock(&chip->mutex);
2053	}
2054
2055	/* Unlock the chips again */
2056
2057	for (i--; i >=0; i--) {
2058		chip = &cfi->chips[i];
2059
2060		mutex_lock(&chip->mutex);
2061
2062		if (chip->state == FL_SYNCING) {
2063			chip->state = chip->oldstate;
2064			chip->oldstate = FL_READY;
2065			wake_up(&chip->wq);
2066		}
2067		mutex_unlock(&chip->mutex);
2068	}
2069}
2070
2071static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2072						struct flchip *chip,
2073						unsigned long adr,
2074						int len, void *thunk)
2075{
2076	struct cfi_private *cfi = map->fldrv_priv;
2077	int status, ofs_factor = cfi->interleave * cfi->device_type;
2078
2079	adr += chip->start;
2080	xip_disable(map, chip, adr+(2*ofs_factor));
2081	map_write(map, CMD(0x90), adr+(2*ofs_factor));
2082	chip->state = FL_JEDEC_QUERY;
2083	status = cfi_read_query(map, adr+(2*ofs_factor));
2084	xip_enable(map, chip, 0);
2085	return status;
2086}
2087
2088#ifdef DEBUG_LOCK_BITS
2089static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2090						struct flchip *chip,
2091						unsigned long adr,
2092						int len, void *thunk)
2093{
2094	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2095	       adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2096	return 0;
2097}
2098#endif
2099
2100#define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
2101#define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
2102
2103static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2104				       unsigned long adr, int len, void *thunk)
2105{
2106	struct cfi_private *cfi = map->fldrv_priv;
2107	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2108	int mdelay;
2109	int ret;
2110
2111	adr += chip->start;
2112
2113	mutex_lock(&chip->mutex);
2114	ret = get_chip(map, chip, adr, FL_LOCKING);
2115	if (ret) {
2116		mutex_unlock(&chip->mutex);
2117		return ret;
2118	}
2119
2120	ENABLE_VPP(map);
2121	xip_disable(map, chip, adr);
2122
2123	map_write(map, CMD(0x60), adr);
2124	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2125		map_write(map, CMD(0x01), adr);
2126		chip->state = FL_LOCKING;
2127	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2128		map_write(map, CMD(0xD0), adr);
2129		chip->state = FL_UNLOCKING;
2130	} else
2131		BUG();
2132
2133	/*
2134	 * If Instant Individual Block Locking supported then no need
2135	 * to delay.
2136	 */
2137	/*
2138	 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2139	 * lets use a max of 1.5 seconds (1500ms) as timeout.
2140	 *
2141	 * See "Clear Block Lock-Bits Time" on page 40 in
2142	 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2143	 * from February 2003
2144	 */
2145	mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2146
2147	ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2148	if (ret) {
2149		map_write(map, CMD(0x70), adr);
2150		chip->state = FL_STATUS;
2151		xip_enable(map, chip, adr);
2152		printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2153		goto out;
2154	}
2155
2156	xip_enable(map, chip, adr);
2157 out:	DISABLE_VPP(map);
2158	put_chip(map, chip, adr);
2159	mutex_unlock(&chip->mutex);
2160	return ret;
2161}
2162
2163static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2164{
2165	int ret;
2166
2167#ifdef DEBUG_LOCK_BITS
2168	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2169	       __func__, ofs, len);
2170	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2171		ofs, len, NULL);
2172#endif
2173
2174	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2175		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2176
2177#ifdef DEBUG_LOCK_BITS
2178	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2179	       __func__, ret);
2180	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2181		ofs, len, NULL);
2182#endif
2183
2184	return ret;
2185}
2186
2187static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2188{
2189	int ret;
2190
2191#ifdef DEBUG_LOCK_BITS
2192	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2193	       __func__, ofs, len);
2194	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2195		ofs, len, NULL);
2196#endif
2197
2198	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2199					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2200
2201#ifdef DEBUG_LOCK_BITS
2202	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2203	       __func__, ret);
2204	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2205		ofs, len, NULL);
2206#endif
2207
2208	return ret;
2209}
2210
2211static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2212				  uint64_t len)
2213{
2214	return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2215				ofs, len, NULL) ? 1 : 0;
2216}
2217
2218#ifdef CONFIG_MTD_OTP
2219
2220typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2221			u_long data_offset, u_char *buf, u_int size,
2222			u_long prot_offset, u_int groupno, u_int groupsize);
2223
2224static int __xipram
2225do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2226	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2227{
2228	struct cfi_private *cfi = map->fldrv_priv;
2229	int ret;
2230
2231	mutex_lock(&chip->mutex);
2232	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2233	if (ret) {
2234		mutex_unlock(&chip->mutex);
2235		return ret;
2236	}
2237
2238	/* let's ensure we're not reading back cached data from array mode */
2239	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2240
2241	xip_disable(map, chip, chip->start);
2242	if (chip->state != FL_JEDEC_QUERY) {
2243		map_write(map, CMD(0x90), chip->start);
2244		chip->state = FL_JEDEC_QUERY;
2245	}
2246	map_copy_from(map, buf, chip->start + offset, size);
2247	xip_enable(map, chip, chip->start);
2248
2249	/* then ensure we don't keep OTP data in the cache */
2250	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2251
2252	put_chip(map, chip, chip->start);
2253	mutex_unlock(&chip->mutex);
2254	return 0;
2255}
2256
2257static int
2258do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2259	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2260{
2261	int ret;
2262
2263	while (size) {
2264		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2265		int gap = offset - bus_ofs;
2266		int n = min_t(int, size, map_bankwidth(map)-gap);
2267		map_word datum = map_word_ff(map);
2268
2269		datum = map_word_load_partial(map, datum, buf, gap, n);
2270		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2271		if (ret)
2272			return ret;
2273
2274		offset += n;
2275		buf += n;
2276		size -= n;
2277	}
2278
2279	return 0;
2280}
2281
2282static int
2283do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2284	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2285{
2286	struct cfi_private *cfi = map->fldrv_priv;
2287	map_word datum;
2288
2289	/* make sure area matches group boundaries */
2290	if (size != grpsz)
2291		return -EXDEV;
2292
2293	datum = map_word_ff(map);
2294	datum = map_word_clr(map, datum, CMD(1 << grpno));
2295	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2296}
2297
2298static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2299				 size_t *retlen, u_char *buf,
2300				 otp_op_t action, int user_regs)
2301{
2302	struct map_info *map = mtd->priv;
2303	struct cfi_private *cfi = map->fldrv_priv;
2304	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2305	struct flchip *chip;
2306	struct cfi_intelext_otpinfo *otp;
2307	u_long devsize, reg_prot_offset, data_offset;
2308	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2309	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2310	int ret;
2311
2312	*retlen = 0;
2313
2314	/* Check that we actually have some OTP registers */
2315	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2316		return -ENODATA;
2317
2318	/* we need real chips here not virtual ones */
2319	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2320	chip_step = devsize >> cfi->chipshift;
2321	chip_num = 0;
2322
2323	/* Some chips have OTP located in the _top_ partition only.
2324	   For example: Intel 28F256L18T (T means top-parameter device) */
2325	if (cfi->mfr == CFI_MFR_INTEL) {
2326		switch (cfi->id) {
2327		case 0x880b:
2328		case 0x880c:
2329		case 0x880d:
2330			chip_num = chip_step - 1;
2331		}
2332	}
2333
2334	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2335		chip = &cfi->chips[chip_num];
2336		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2337
2338		/* first OTP region */
2339		field = 0;
2340		reg_prot_offset = extp->ProtRegAddr;
2341		reg_fact_groups = 1;
2342		reg_fact_size = 1 << extp->FactProtRegSize;
2343		reg_user_groups = 1;
2344		reg_user_size = 1 << extp->UserProtRegSize;
2345
2346		while (len > 0) {
2347			/* flash geometry fixup */
2348			data_offset = reg_prot_offset + 1;
2349			data_offset *= cfi->interleave * cfi->device_type;
2350			reg_prot_offset *= cfi->interleave * cfi->device_type;
2351			reg_fact_size *= cfi->interleave;
2352			reg_user_size *= cfi->interleave;
2353
2354			if (user_regs) {
2355				groups = reg_user_groups;
2356				groupsize = reg_user_size;
2357				/* skip over factory reg area */
2358				groupno = reg_fact_groups;
2359				data_offset += reg_fact_groups * reg_fact_size;
2360			} else {
2361				groups = reg_fact_groups;
2362				groupsize = reg_fact_size;
2363				groupno = 0;
2364			}
2365
2366			while (len > 0 && groups > 0) {
2367				if (!action) {
2368					/*
2369					 * Special case: if action is NULL
2370					 * we fill buf with otp_info records.
2371					 */
2372					struct otp_info *otpinfo;
2373					map_word lockword;
2374					len -= sizeof(struct otp_info);
2375					if (len <= 0)
2376						return -ENOSPC;
2377					ret = do_otp_read(map, chip,
2378							  reg_prot_offset,
2379							  (u_char *)&lockword,
2380							  map_bankwidth(map),
2381							  0, 0,  0);
2382					if (ret)
2383						return ret;
2384					otpinfo = (struct otp_info *)buf;
2385					otpinfo->start = from;
2386					otpinfo->length = groupsize;
2387					otpinfo->locked =
2388					   !map_word_bitsset(map, lockword,
2389							     CMD(1 << groupno));
2390					from += groupsize;
2391					buf += sizeof(*otpinfo);
2392					*retlen += sizeof(*otpinfo);
2393				} else if (from >= groupsize) {
2394					from -= groupsize;
2395					data_offset += groupsize;
2396				} else {
2397					int size = groupsize;
2398					data_offset += from;
2399					size -= from;
2400					from = 0;
2401					if (size > len)
2402						size = len;
2403					ret = action(map, chip, data_offset,
2404						     buf, size, reg_prot_offset,
2405						     groupno, groupsize);
2406					if (ret < 0)
2407						return ret;
2408					buf += size;
2409					len -= size;
2410					*retlen += size;
2411					data_offset += size;
2412				}
2413				groupno++;
2414				groups--;
2415			}
2416
2417			/* next OTP region */
2418			if (++field == extp->NumProtectionFields)
2419				break;
2420			reg_prot_offset = otp->ProtRegAddr;
2421			reg_fact_groups = otp->FactGroups;
2422			reg_fact_size = 1 << otp->FactProtRegSize;
2423			reg_user_groups = otp->UserGroups;
2424			reg_user_size = 1 << otp->UserProtRegSize;
2425			otp++;
2426		}
2427	}
2428
2429	return 0;
2430}
2431
2432static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2433					   size_t len, size_t *retlen,
2434					    u_char *buf)
2435{
2436	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2437				     buf, do_otp_read, 0);
2438}
2439
2440static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2441					   size_t len, size_t *retlen,
2442					    u_char *buf)
2443{
2444	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2445				     buf, do_otp_read, 1);
2446}
2447
2448static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2449					    size_t len, size_t *retlen,
2450					     u_char *buf)
2451{
2452	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2453				     buf, do_otp_write, 1);
2454}
2455
2456static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2457					   loff_t from, size_t len)
2458{
2459	size_t retlen;
2460	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2461				     NULL, do_otp_lock, 1);
2462}
2463
2464static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2465					   size_t *retlen, struct otp_info *buf)
2466
2467{
2468	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2469				     NULL, 0);
2470}
2471
2472static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2473					   size_t *retlen, struct otp_info *buf)
2474{
2475	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2476				     NULL, 1);
2477}
2478
2479#endif
2480
2481static void cfi_intelext_save_locks(struct mtd_info *mtd)
2482{
2483	struct mtd_erase_region_info *region;
2484	int block, status, i;
2485	unsigned long adr;
2486	size_t len;
2487
2488	for (i = 0; i < mtd->numeraseregions; i++) {
2489		region = &mtd->eraseregions[i];
2490		if (!region->lockmap)
2491			continue;
2492
2493		for (block = 0; block < region->numblocks; block++){
2494			len = region->erasesize;
2495			adr = region->offset + block * len;
2496
2497			status = cfi_varsize_frob(mtd,
2498					do_getlockstatus_oneblock, adr, len, NULL);
2499			if (status)
2500				set_bit(block, region->lockmap);
2501			else
2502				clear_bit(block, region->lockmap);
2503		}
2504	}
2505}
2506
2507static int cfi_intelext_suspend(struct mtd_info *mtd)
2508{
2509	struct map_info *map = mtd->priv;
2510	struct cfi_private *cfi = map->fldrv_priv;
2511	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2512	int i;
2513	struct flchip *chip;
2514	int ret = 0;
2515
2516	if ((mtd->flags & MTD_POWERUP_LOCK)
2517	    && extp && (extp->FeatureSupport & (1 << 5)))
2518		cfi_intelext_save_locks(mtd);
2519
2520	for (i=0; !ret && i<cfi->numchips; i++) {
2521		chip = &cfi->chips[i];
2522
2523		mutex_lock(&chip->mutex);
2524
2525		switch (chip->state) {
2526		case FL_READY:
2527		case FL_STATUS:
2528		case FL_CFI_QUERY:
2529		case FL_JEDEC_QUERY:
2530			if (chip->oldstate == FL_READY) {
2531				/* place the chip in a known state before suspend */
2532				map_write(map, CMD(0xFF), cfi->chips[i].start);
2533				chip->oldstate = chip->state;
2534				chip->state = FL_PM_SUSPENDED;
2535				/* No need to wake_up() on this state change -
2536				 * as the whole point is that nobody can do anything
2537				 * with the chip now anyway.
2538				 */
2539			} else {
2540				/* There seems to be an operation pending. We must wait for it. */
2541				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2542				ret = -EAGAIN;
2543			}
2544			break;
2545		default:
2546			/* Should we actually wait? Once upon a time these routines weren't
2547			   allowed to. Or should we return -EAGAIN, because the upper layers
2548			   ought to have already shut down anything which was using the device
2549			   anyway? The latter for now. */
2550			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2551			ret = -EAGAIN;
 
2552		case FL_PM_SUSPENDED:
2553			break;
2554		}
2555		mutex_unlock(&chip->mutex);
2556	}
2557
2558	/* Unlock the chips again */
2559
2560	if (ret) {
2561		for (i--; i >=0; i--) {
2562			chip = &cfi->chips[i];
2563
2564			mutex_lock(&chip->mutex);
2565
2566			if (chip->state == FL_PM_SUSPENDED) {
2567				/* No need to force it into a known state here,
2568				   because we're returning failure, and it didn't
2569				   get power cycled */
2570				chip->state = chip->oldstate;
2571				chip->oldstate = FL_READY;
2572				wake_up(&chip->wq);
2573			}
2574			mutex_unlock(&chip->mutex);
2575		}
2576	}
2577
2578	return ret;
2579}
2580
2581static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2582{
2583	struct mtd_erase_region_info *region;
2584	int block, i;
2585	unsigned long adr;
2586	size_t len;
2587
2588	for (i = 0; i < mtd->numeraseregions; i++) {
2589		region = &mtd->eraseregions[i];
2590		if (!region->lockmap)
2591			continue;
2592
2593		for_each_clear_bit(block, region->lockmap, region->numblocks) {
2594			len = region->erasesize;
2595			adr = region->offset + block * len;
2596			cfi_intelext_unlock(mtd, adr, len);
2597		}
2598	}
2599}
2600
2601static void cfi_intelext_resume(struct mtd_info *mtd)
2602{
2603	struct map_info *map = mtd->priv;
2604	struct cfi_private *cfi = map->fldrv_priv;
2605	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2606	int i;
2607	struct flchip *chip;
2608
2609	for (i=0; i<cfi->numchips; i++) {
2610
2611		chip = &cfi->chips[i];
2612
2613		mutex_lock(&chip->mutex);
2614
2615		/* Go to known state. Chip may have been power cycled */
2616		if (chip->state == FL_PM_SUSPENDED) {
2617			/* Refresh LH28F640BF Partition Config. Register */
2618			fixup_LH28F640BF(mtd);
2619			map_write(map, CMD(0xFF), cfi->chips[i].start);
2620			chip->oldstate = chip->state = FL_READY;
2621			wake_up(&chip->wq);
2622		}
2623
2624		mutex_unlock(&chip->mutex);
2625	}
2626
2627	if ((mtd->flags & MTD_POWERUP_LOCK)
2628	    && extp && (extp->FeatureSupport & (1 << 5)))
2629		cfi_intelext_restore_locks(mtd);
2630}
2631
2632static int cfi_intelext_reset(struct mtd_info *mtd)
2633{
2634	struct map_info *map = mtd->priv;
2635	struct cfi_private *cfi = map->fldrv_priv;
2636	int i, ret;
2637
2638	for (i=0; i < cfi->numchips; i++) {
2639		struct flchip *chip = &cfi->chips[i];
2640
2641		/* force the completion of any ongoing operation
2642		   and switch to array mode so any bootloader in
2643		   flash is accessible for soft reboot. */
2644		mutex_lock(&chip->mutex);
2645		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2646		if (!ret) {
2647			map_write(map, CMD(0xff), chip->start);
2648			chip->state = FL_SHUTDOWN;
2649			put_chip(map, chip, chip->start);
2650		}
2651		mutex_unlock(&chip->mutex);
2652	}
2653
2654	return 0;
2655}
2656
2657static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2658			       void *v)
2659{
2660	struct mtd_info *mtd;
2661
2662	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2663	cfi_intelext_reset(mtd);
2664	return NOTIFY_DONE;
2665}
2666
2667static void cfi_intelext_destroy(struct mtd_info *mtd)
2668{
2669	struct map_info *map = mtd->priv;
2670	struct cfi_private *cfi = map->fldrv_priv;
2671	struct mtd_erase_region_info *region;
2672	int i;
2673	cfi_intelext_reset(mtd);
2674	unregister_reboot_notifier(&mtd->reboot_notifier);
2675	kfree(cfi->cmdset_priv);
2676	kfree(cfi->cfiq);
2677	kfree(cfi->chips[0].priv);
2678	kfree(cfi);
2679	for (i = 0; i < mtd->numeraseregions; i++) {
2680		region = &mtd->eraseregions[i];
2681		kfree(region->lockmap);
2682	}
2683	kfree(mtd->eraseregions);
2684}
2685
2686MODULE_LICENSE("GPL");
2687MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2688MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2689MODULE_ALIAS("cfi_cmdset_0003");
2690MODULE_ALIAS("cfi_cmdset_0200");