Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
   4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
   5 *
   6 * Copyright (C) 2005, Intec Automation Inc.
   7 * Copyright (C) 2014, Freescale Semiconductor, Inc.
   8 */
   9
  10#include <linux/err.h>
  11#include <linux/errno.h>
  12#include <linux/module.h>
  13#include <linux/device.h>
  14#include <linux/mutex.h>
  15#include <linux/math64.h>
  16#include <linux/sizes.h>
  17#include <linux/slab.h>
  18#include <linux/sort.h>
  19
  20#include <linux/mtd/mtd.h>
  21#include <linux/of_platform.h>
  22#include <linux/sched/task_stack.h>
  23#include <linux/spi/flash.h>
  24#include <linux/mtd/spi-nor.h>
  25
  26/* Define max times to check status register before we give up. */
  27
  28/*
  29 * For everything but full-chip erase; probably could be much smaller, but kept
  30 * around for safety for now
  31 */
  32#define DEFAULT_READY_WAIT_JIFFIES		(40UL * HZ)
  33
  34/*
  35 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
  36 * for larger flash
  37 */
  38#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES	(40UL * HZ)
  39
  40#define SPI_NOR_MAX_ID_LEN	6
  41#define SPI_NOR_MAX_ADDR_WIDTH	4
  42
  43struct sfdp_parameter_header {
  44	u8		id_lsb;
  45	u8		minor;
  46	u8		major;
  47	u8		length; /* in double words */
  48	u8		parameter_table_pointer[3]; /* byte address */
  49	u8		id_msb;
  50};
  51
  52#define SFDP_PARAM_HEADER_ID(p)	(((p)->id_msb << 8) | (p)->id_lsb)
  53#define SFDP_PARAM_HEADER_PTP(p) \
  54	(((p)->parameter_table_pointer[2] << 16) | \
  55	 ((p)->parameter_table_pointer[1] <<  8) | \
  56	 ((p)->parameter_table_pointer[0] <<  0))
  57
  58#define SFDP_BFPT_ID		0xff00	/* Basic Flash Parameter Table */
  59#define SFDP_SECTOR_MAP_ID	0xff81	/* Sector Map Table */
  60#define SFDP_4BAIT_ID		0xff84  /* 4-byte Address Instruction Table */
  61
  62#define SFDP_SIGNATURE		0x50444653U
  63#define SFDP_JESD216_MAJOR	1
  64#define SFDP_JESD216_MINOR	0
  65#define SFDP_JESD216A_MINOR	5
  66#define SFDP_JESD216B_MINOR	6
  67
  68struct sfdp_header {
  69	u32		signature; /* Ox50444653U <=> "SFDP" */
  70	u8		minor;
  71	u8		major;
  72	u8		nph; /* 0-base number of parameter headers */
  73	u8		unused;
  74
  75	/* Basic Flash Parameter Table. */
  76	struct sfdp_parameter_header	bfpt_header;
  77};
  78
  79/* Basic Flash Parameter Table */
  80
  81/*
  82 * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
  83 * They are indexed from 1 but C arrays are indexed from 0.
  84 */
  85#define BFPT_DWORD(i)		((i) - 1)
  86#define BFPT_DWORD_MAX		16
  87
  88/* The first version of JESB216 defined only 9 DWORDs. */
  89#define BFPT_DWORD_MAX_JESD216			9
  90
  91/* 1st DWORD. */
  92#define BFPT_DWORD1_FAST_READ_1_1_2		BIT(16)
  93#define BFPT_DWORD1_ADDRESS_BYTES_MASK		GENMASK(18, 17)
  94#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY	(0x0UL << 17)
  95#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4	(0x1UL << 17)
  96#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY	(0x2UL << 17)
  97#define BFPT_DWORD1_DTR				BIT(19)
  98#define BFPT_DWORD1_FAST_READ_1_2_2		BIT(20)
  99#define BFPT_DWORD1_FAST_READ_1_4_4		BIT(21)
 100#define BFPT_DWORD1_FAST_READ_1_1_4		BIT(22)
 101
 102/* 5th DWORD. */
 103#define BFPT_DWORD5_FAST_READ_2_2_2		BIT(0)
 104#define BFPT_DWORD5_FAST_READ_4_4_4		BIT(4)
 105
 106/* 11th DWORD. */
 107#define BFPT_DWORD11_PAGE_SIZE_SHIFT		4
 108#define BFPT_DWORD11_PAGE_SIZE_MASK		GENMASK(7, 4)
 109
 110/* 15th DWORD. */
 111
 112/*
 113 * (from JESD216 rev B)
 114 * Quad Enable Requirements (QER):
 115 * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
 116 *         reads based on instruction. DQ3/HOLD# functions are hold during
 117 *         instruction phase.
 118 * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
 119 *         two data bytes where bit 1 of the second byte is one.
 120 *         [...]
 121 *         Writing only one byte to the status register has the side-effect of
 122 *         clearing status register 2, including the QE bit. The 100b code is
 123 *         used if writing one byte to the status register does not modify
 124 *         status register 2.
 125 * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
 126 *         one data byte where bit 6 is one.
 127 *         [...]
 128 * - 011b: QE is bit 7 of status register 2. It is set via Write status
 129 *         register 2 instruction 3Eh with one data byte where bit 7 is one.
 130 *         [...]
 131 *         The status register 2 is read using instruction 3Fh.
 132 * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
 133 *         two data bytes where bit 1 of the second byte is one.
 134 *         [...]
 135 *         In contrast to the 001b code, writing one byte to the status
 136 *         register does not modify status register 2.
 137 * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
 138 *         Read Status instruction 05h. Status register2 is read using
 139 *         instruction 35h. QE is set via Write Status instruction 01h with
 140 *         two data bytes where bit 1 of the second byte is one.
 141 *         [...]
 142 */
 143#define BFPT_DWORD15_QER_MASK			GENMASK(22, 20)
 144#define BFPT_DWORD15_QER_NONE			(0x0UL << 20) /* Micron */
 145#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY		(0x1UL << 20)
 146#define BFPT_DWORD15_QER_SR1_BIT6		(0x2UL << 20) /* Macronix */
 147#define BFPT_DWORD15_QER_SR2_BIT7		(0x3UL << 20)
 148#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD		(0x4UL << 20)
 149#define BFPT_DWORD15_QER_SR2_BIT1		(0x5UL << 20) /* Spansion */
 150
 151struct sfdp_bfpt {
 152	u32	dwords[BFPT_DWORD_MAX];
 153};
 154
 155/**
 156 * struct spi_nor_fixups - SPI NOR fixup hooks
 157 * @default_init: called after default flash parameters init. Used to tweak
 158 *                flash parameters when information provided by the flash_info
 159 *                table is incomplete or wrong.
 160 * @post_bfpt: called after the BFPT table has been parsed
 161 * @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs
 162 *             that do not support RDSFDP). Typically used to tweak various
 163 *             parameters that could not be extracted by other means (i.e.
 164 *             when information provided by the SFDP/flash_info tables are
 165 *             incomplete or wrong).
 166 *
 167 * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
 168 * table is broken or not available.
 169 */
 170struct spi_nor_fixups {
 171	void (*default_init)(struct spi_nor *nor);
 172	int (*post_bfpt)(struct spi_nor *nor,
 173			 const struct sfdp_parameter_header *bfpt_header,
 174			 const struct sfdp_bfpt *bfpt,
 175			 struct spi_nor_flash_parameter *params);
 176	void (*post_sfdp)(struct spi_nor *nor);
 177};
 178
 179struct flash_info {
 180	char		*name;
 181
 182	/*
 183	 * This array stores the ID bytes.
 184	 * The first three bytes are the JEDIC ID.
 185	 * JEDEC ID zero means "no ID" (mostly older chips).
 186	 */
 187	u8		id[SPI_NOR_MAX_ID_LEN];
 188	u8		id_len;
 189
 190	/* The size listed here is what works with SPINOR_OP_SE, which isn't
 191	 * necessarily called a "sector" by the vendor.
 192	 */
 193	unsigned	sector_size;
 194	u16		n_sectors;
 195
 196	u16		page_size;
 197	u16		addr_width;
 198
 199	u16		flags;
 200#define SECT_4K			BIT(0)	/* SPINOR_OP_BE_4K works uniformly */
 201#define SPI_NOR_NO_ERASE	BIT(1)	/* No erase command needed */
 202#define SST_WRITE		BIT(2)	/* use SST byte programming */
 203#define SPI_NOR_NO_FR		BIT(3)	/* Can't do fastread */
 204#define SECT_4K_PMC		BIT(4)	/* SPINOR_OP_BE_4K_PMC works uniformly */
 205#define SPI_NOR_DUAL_READ	BIT(5)	/* Flash supports Dual Read */
 206#define SPI_NOR_QUAD_READ	BIT(6)	/* Flash supports Quad Read */
 207#define USE_FSR			BIT(7)	/* use flag status register */
 208#define SPI_NOR_HAS_LOCK	BIT(8)	/* Flash supports lock/unlock via SR */
 209#define SPI_NOR_HAS_TB		BIT(9)	/*
 210					 * Flash SR has Top/Bottom (TB) protect
 211					 * bit. Must be used with
 212					 * SPI_NOR_HAS_LOCK.
 213					 */
 214#define SPI_NOR_XSR_RDY		BIT(10)	/*
 215					 * S3AN flashes have specific opcode to
 216					 * read the status register.
 217					 * Flags SPI_NOR_XSR_RDY and SPI_S3AN
 218					 * use the same bit as one implies the
 219					 * other, but we will get rid of
 220					 * SPI_S3AN soon.
 221					 */
 222#define	SPI_S3AN		BIT(10)	/*
 223					 * Xilinx Spartan 3AN In-System Flash
 224					 * (MFR cannot be used for probing
 225					 * because it has the same value as
 226					 * ATMEL flashes)
 227					 */
 228#define SPI_NOR_4B_OPCODES	BIT(11)	/*
 229					 * Use dedicated 4byte address op codes
 230					 * to support memory size above 128Mib.
 231					 */
 232#define NO_CHIP_ERASE		BIT(12) /* Chip does not support chip erase */
 233#define SPI_NOR_SKIP_SFDP	BIT(13)	/* Skip parsing of SFDP tables */
 234#define USE_CLSR		BIT(14)	/* use CLSR command */
 235#define SPI_NOR_OCTAL_READ	BIT(15)	/* Flash supports Octal Read */
 236
 237	/* Part specific fixup hooks. */
 238	const struct spi_nor_fixups *fixups;
 239};
 240
 241#define JEDEC_MFR(info)	((info)->id[0])
 242
 243/**
 244 * spi_nor_spimem_xfer_data() - helper function to read/write data to
 245 *                              flash's memory region
 246 * @nor:        pointer to 'struct spi_nor'
 247 * @op:         pointer to 'struct spi_mem_op' template for transfer
 248 *
 249 * Return: number of bytes transferred on success, -errno otherwise
 250 */
 251static ssize_t spi_nor_spimem_xfer_data(struct spi_nor *nor,
 252					struct spi_mem_op *op)
 253{
 254	bool usebouncebuf = false;
 255	void *rdbuf = NULL;
 256	const void *buf;
 257	int ret;
 258
 259	if (op->data.dir == SPI_MEM_DATA_IN)
 260		buf = op->data.buf.in;
 261	else
 262		buf = op->data.buf.out;
 263
 264	if (object_is_on_stack(buf) || !virt_addr_valid(buf))
 265		usebouncebuf = true;
 266
 267	if (usebouncebuf) {
 268		if (op->data.nbytes > nor->bouncebuf_size)
 269			op->data.nbytes = nor->bouncebuf_size;
 270
 271		if (op->data.dir == SPI_MEM_DATA_IN) {
 272			rdbuf = op->data.buf.in;
 273			op->data.buf.in = nor->bouncebuf;
 274		} else {
 275			op->data.buf.out = nor->bouncebuf;
 276			memcpy(nor->bouncebuf, buf,
 277			       op->data.nbytes);
 278		}
 279	}
 280
 281	ret = spi_mem_adjust_op_size(nor->spimem, op);
 282	if (ret)
 283		return ret;
 284
 285	ret = spi_mem_exec_op(nor->spimem, op);
 286	if (ret)
 287		return ret;
 288
 289	if (usebouncebuf && op->data.dir == SPI_MEM_DATA_IN)
 290		memcpy(rdbuf, nor->bouncebuf, op->data.nbytes);
 291
 292	return op->data.nbytes;
 293}
 294
 295/**
 296 * spi_nor_spimem_read_data() - read data from flash's memory region via
 297 *                              spi-mem
 298 * @nor:        pointer to 'struct spi_nor'
 299 * @from:       offset to read from
 300 * @len:        number of bytes to read
 301 * @buf:        pointer to dst buffer
 302 *
 303 * Return: number of bytes read successfully, -errno otherwise
 304 */
 305static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
 306					size_t len, u8 *buf)
 307{
 308	struct spi_mem_op op =
 309		SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
 310			   SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
 311			   SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
 312			   SPI_MEM_OP_DATA_IN(len, buf, 1));
 313
 314	/* get transfer protocols. */
 315	op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
 316	op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
 317	op.dummy.buswidth = op.addr.buswidth;
 318	op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
 319
 320	/* convert the dummy cycles to the number of bytes */
 321	op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
 322
 323	return spi_nor_spimem_xfer_data(nor, &op);
 324}
 325
 326/**
 327 * spi_nor_read_data() - read data from flash memory
 328 * @nor:        pointer to 'struct spi_nor'
 329 * @from:       offset to read from
 330 * @len:        number of bytes to read
 331 * @buf:        pointer to dst buffer
 332 *
 333 * Return: number of bytes read successfully, -errno otherwise
 334 */
 335static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
 336				 u8 *buf)
 337{
 338	if (nor->spimem)
 339		return spi_nor_spimem_read_data(nor, from, len, buf);
 340
 341	return nor->read(nor, from, len, buf);
 342}
 343
 344/**
 345 * spi_nor_spimem_write_data() - write data to flash memory via
 346 *                               spi-mem
 347 * @nor:        pointer to 'struct spi_nor'
 348 * @to:         offset to write to
 349 * @len:        number of bytes to write
 350 * @buf:        pointer to src buffer
 351 *
 352 * Return: number of bytes written successfully, -errno otherwise
 353 */
 354static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
 355					 size_t len, const u8 *buf)
 356{
 357	struct spi_mem_op op =
 358		SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
 359			   SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
 360			   SPI_MEM_OP_NO_DUMMY,
 361			   SPI_MEM_OP_DATA_OUT(len, buf, 1));
 362
 363	op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
 364	op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
 365	op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
 366
 367	if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
 368		op.addr.nbytes = 0;
 369
 370	return spi_nor_spimem_xfer_data(nor, &op);
 371}
 372
 373/**
 374 * spi_nor_write_data() - write data to flash memory
 375 * @nor:        pointer to 'struct spi_nor'
 376 * @to:         offset to write to
 377 * @len:        number of bytes to write
 378 * @buf:        pointer to src buffer
 379 *
 380 * Return: number of bytes written successfully, -errno otherwise
 381 */
 382static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
 383				  const u8 *buf)
 384{
 385	if (nor->spimem)
 386		return spi_nor_spimem_write_data(nor, to, len, buf);
 387
 388	return nor->write(nor, to, len, buf);
 389}
 390
 391/*
 392 * Read the status register, returning its value in the location
 393 * Return the status register value.
 394 * Returns negative if error occurred.
 395 */
 396static int read_sr(struct spi_nor *nor)
 397{
 398	int ret;
 399
 400	if (nor->spimem) {
 401		struct spi_mem_op op =
 402			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
 403				   SPI_MEM_OP_NO_ADDR,
 404				   SPI_MEM_OP_NO_DUMMY,
 405				   SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
 406
 407		ret = spi_mem_exec_op(nor->spimem, &op);
 408	} else {
 409		ret = nor->read_reg(nor, SPINOR_OP_RDSR, nor->bouncebuf, 1);
 410	}
 411
 412	if (ret < 0) {
 413		pr_err("error %d reading SR\n", (int) ret);
 414		return ret;
 415	}
 416
 417	return nor->bouncebuf[0];
 418}
 419
 420/*
 421 * Read the flag status register, returning its value in the location
 422 * Return the status register value.
 423 * Returns negative if error occurred.
 424 */
 425static int read_fsr(struct spi_nor *nor)
 426{
 427	int ret;
 428
 429	if (nor->spimem) {
 430		struct spi_mem_op op =
 431			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
 432				   SPI_MEM_OP_NO_ADDR,
 433				   SPI_MEM_OP_NO_DUMMY,
 434				   SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
 435
 436		ret = spi_mem_exec_op(nor->spimem, &op);
 437	} else {
 438		ret = nor->read_reg(nor, SPINOR_OP_RDFSR, nor->bouncebuf, 1);
 439	}
 440
 441	if (ret < 0) {
 442		pr_err("error %d reading FSR\n", ret);
 443		return ret;
 444	}
 445
 446	return nor->bouncebuf[0];
 447}
 448
 449/*
 450 * Read configuration register, returning its value in the
 451 * location. Return the configuration register value.
 452 * Returns negative if error occurred.
 453 */
 454static int read_cr(struct spi_nor *nor)
 455{
 456	int ret;
 457
 458	if (nor->spimem) {
 459		struct spi_mem_op op =
 460			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
 461				   SPI_MEM_OP_NO_ADDR,
 462				   SPI_MEM_OP_NO_DUMMY,
 463				   SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
 464
 465		ret = spi_mem_exec_op(nor->spimem, &op);
 466	} else {
 467		ret = nor->read_reg(nor, SPINOR_OP_RDCR, nor->bouncebuf, 1);
 468	}
 469
 470	if (ret < 0) {
 471		dev_err(nor->dev, "error %d reading CR\n", ret);
 472		return ret;
 473	}
 474
 475	return nor->bouncebuf[0];
 476}
 477
 478/*
 479 * Write status register 1 byte
 480 * Returns negative if error occurred.
 481 */
 482static int write_sr(struct spi_nor *nor, u8 val)
 483{
 484	nor->bouncebuf[0] = val;
 485	if (nor->spimem) {
 486		struct spi_mem_op op =
 487			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
 488				   SPI_MEM_OP_NO_ADDR,
 489				   SPI_MEM_OP_NO_DUMMY,
 490				   SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
 491
 492		return spi_mem_exec_op(nor->spimem, &op);
 493	}
 494
 495	return nor->write_reg(nor, SPINOR_OP_WRSR, nor->bouncebuf, 1);
 496}
 497
 498/*
 499 * Set write enable latch with Write Enable command.
 500 * Returns negative if error occurred.
 501 */
 502static int write_enable(struct spi_nor *nor)
 503{
 504	if (nor->spimem) {
 505		struct spi_mem_op op =
 506			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
 507				   SPI_MEM_OP_NO_ADDR,
 508				   SPI_MEM_OP_NO_DUMMY,
 509				   SPI_MEM_OP_NO_DATA);
 510
 511		return spi_mem_exec_op(nor->spimem, &op);
 512	}
 513
 514	return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
 515}
 516
 517/*
 518 * Send write disable instruction to the chip.
 519 */
 520static int write_disable(struct spi_nor *nor)
 521{
 522	if (nor->spimem) {
 523		struct spi_mem_op op =
 524			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
 525				   SPI_MEM_OP_NO_ADDR,
 526				   SPI_MEM_OP_NO_DUMMY,
 527				   SPI_MEM_OP_NO_DATA);
 528
 529		return spi_mem_exec_op(nor->spimem, &op);
 530	}
 531
 532	return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
 533}
 534
 535static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
 536{
 537	return mtd->priv;
 538}
 539
 540
 541static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
 542{
 543	size_t i;
 544
 545	for (i = 0; i < size; i++)
 546		if (table[i][0] == opcode)
 547			return table[i][1];
 548
 549	/* No conversion found, keep input op code. */
 550	return opcode;
 551}
 552
 553static u8 spi_nor_convert_3to4_read(u8 opcode)
 554{
 555	static const u8 spi_nor_3to4_read[][2] = {
 556		{ SPINOR_OP_READ,	SPINOR_OP_READ_4B },
 557		{ SPINOR_OP_READ_FAST,	SPINOR_OP_READ_FAST_4B },
 558		{ SPINOR_OP_READ_1_1_2,	SPINOR_OP_READ_1_1_2_4B },
 559		{ SPINOR_OP_READ_1_2_2,	SPINOR_OP_READ_1_2_2_4B },
 560		{ SPINOR_OP_READ_1_1_4,	SPINOR_OP_READ_1_1_4_4B },
 561		{ SPINOR_OP_READ_1_4_4,	SPINOR_OP_READ_1_4_4_4B },
 562		{ SPINOR_OP_READ_1_1_8,	SPINOR_OP_READ_1_1_8_4B },
 563		{ SPINOR_OP_READ_1_8_8,	SPINOR_OP_READ_1_8_8_4B },
 564
 565		{ SPINOR_OP_READ_1_1_1_DTR,	SPINOR_OP_READ_1_1_1_DTR_4B },
 566		{ SPINOR_OP_READ_1_2_2_DTR,	SPINOR_OP_READ_1_2_2_DTR_4B },
 567		{ SPINOR_OP_READ_1_4_4_DTR,	SPINOR_OP_READ_1_4_4_DTR_4B },
 568	};
 569
 570	return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
 571				      ARRAY_SIZE(spi_nor_3to4_read));
 572}
 573
 574static u8 spi_nor_convert_3to4_program(u8 opcode)
 575{
 576	static const u8 spi_nor_3to4_program[][2] = {
 577		{ SPINOR_OP_PP,		SPINOR_OP_PP_4B },
 578		{ SPINOR_OP_PP_1_1_4,	SPINOR_OP_PP_1_1_4_4B },
 579		{ SPINOR_OP_PP_1_4_4,	SPINOR_OP_PP_1_4_4_4B },
 580		{ SPINOR_OP_PP_1_1_8,	SPINOR_OP_PP_1_1_8_4B },
 581		{ SPINOR_OP_PP_1_8_8,	SPINOR_OP_PP_1_8_8_4B },
 582	};
 583
 584	return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
 585				      ARRAY_SIZE(spi_nor_3to4_program));
 586}
 587
 588static u8 spi_nor_convert_3to4_erase(u8 opcode)
 589{
 590	static const u8 spi_nor_3to4_erase[][2] = {
 591		{ SPINOR_OP_BE_4K,	SPINOR_OP_BE_4K_4B },
 592		{ SPINOR_OP_BE_32K,	SPINOR_OP_BE_32K_4B },
 593		{ SPINOR_OP_SE,		SPINOR_OP_SE_4B },
 594	};
 595
 596	return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
 597				      ARRAY_SIZE(spi_nor_3to4_erase));
 598}
 599
 600static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
 601{
 602	nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
 603	nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
 604	nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
 605
 606	if (!spi_nor_has_uniform_erase(nor)) {
 607		struct spi_nor_erase_map *map = &nor->params.erase_map;
 608		struct spi_nor_erase_type *erase;
 609		int i;
 610
 611		for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
 612			erase = &map->erase_type[i];
 613			erase->opcode =
 614				spi_nor_convert_3to4_erase(erase->opcode);
 615		}
 616	}
 617}
 618
 619static int macronix_set_4byte(struct spi_nor *nor, bool enable)
 620{
 621	if (nor->spimem) {
 622		struct spi_mem_op op =
 623			SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
 624						  SPINOR_OP_EN4B :
 625						  SPINOR_OP_EX4B,
 626						  1),
 627				  SPI_MEM_OP_NO_ADDR,
 628				  SPI_MEM_OP_NO_DUMMY,
 629				  SPI_MEM_OP_NO_DATA);
 630
 631		return spi_mem_exec_op(nor->spimem, &op);
 632	}
 633
 634	return nor->write_reg(nor, enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B,
 635			      NULL, 0);
 636}
 637
 638static int st_micron_set_4byte(struct spi_nor *nor, bool enable)
 639{
 640	int ret;
 641
 642	write_enable(nor);
 643	ret = macronix_set_4byte(nor, enable);
 644	write_disable(nor);
 645
 646	return ret;
 647}
 648
 649static int spansion_set_4byte(struct spi_nor *nor, bool enable)
 650{
 651	nor->bouncebuf[0] = enable << 7;
 652
 653	if (nor->spimem) {
 654		struct spi_mem_op op =
 655			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
 656				   SPI_MEM_OP_NO_ADDR,
 657				   SPI_MEM_OP_NO_DUMMY,
 658				   SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
 659
 660		return spi_mem_exec_op(nor->spimem, &op);
 661	}
 662
 663	return nor->write_reg(nor, SPINOR_OP_BRWR, nor->bouncebuf, 1);
 664}
 665
 666static int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
 667{
 668	nor->bouncebuf[0] = ear;
 669
 670	if (nor->spimem) {
 671		struct spi_mem_op op =
 672			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
 673				   SPI_MEM_OP_NO_ADDR,
 674				   SPI_MEM_OP_NO_DUMMY,
 675				   SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
 676
 677		return spi_mem_exec_op(nor->spimem, &op);
 678	}
 679
 680	return nor->write_reg(nor, SPINOR_OP_WREAR, nor->bouncebuf, 1);
 681}
 682
 683static int winbond_set_4byte(struct spi_nor *nor, bool enable)
 684{
 685	int ret;
 686
 687	ret = macronix_set_4byte(nor, enable);
 688	if (ret || enable)
 689		return ret;
 690
 691	/*
 692	 * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address
 693	 * Register to be set to 1, so all 3-byte-address reads come from the
 694	 * second 16M. We must clear the register to enable normal behavior.
 695	 */
 696	write_enable(nor);
 697	ret = spi_nor_write_ear(nor, 0);
 698	write_disable(nor);
 699
 700	return ret;
 701}
 702
 703static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
 704{
 705	if (nor->spimem) {
 706		struct spi_mem_op op =
 707			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
 708				   SPI_MEM_OP_NO_ADDR,
 709				   SPI_MEM_OP_NO_DUMMY,
 710				   SPI_MEM_OP_DATA_IN(1, sr, 1));
 711
 712		return spi_mem_exec_op(nor->spimem, &op);
 713	}
 714
 715	return nor->read_reg(nor, SPINOR_OP_XRDSR, sr, 1);
 716}
 717
 718static int s3an_sr_ready(struct spi_nor *nor)
 719{
 720	int ret;
 721
 722	ret = spi_nor_xread_sr(nor, nor->bouncebuf);
 723	if (ret < 0) {
 724		dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
 725		return ret;
 726	}
 727
 728	return !!(nor->bouncebuf[0] & XSR_RDY);
 729}
 730
 731static int spi_nor_clear_sr(struct spi_nor *nor)
 732{
 733	if (nor->spimem) {
 734		struct spi_mem_op op =
 735			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
 736				   SPI_MEM_OP_NO_ADDR,
 737				   SPI_MEM_OP_NO_DUMMY,
 738				   SPI_MEM_OP_NO_DATA);
 739
 740		return spi_mem_exec_op(nor->spimem, &op);
 741	}
 742
 743	return nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
 744}
 745
 746static int spi_nor_sr_ready(struct spi_nor *nor)
 747{
 748	int sr = read_sr(nor);
 749	if (sr < 0)
 750		return sr;
 751
 752	if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
 753		if (sr & SR_E_ERR)
 754			dev_err(nor->dev, "Erase Error occurred\n");
 755		else
 756			dev_err(nor->dev, "Programming Error occurred\n");
 757
 758		spi_nor_clear_sr(nor);
 759		return -EIO;
 760	}
 761
 762	return !(sr & SR_WIP);
 763}
 764
 765static int spi_nor_clear_fsr(struct spi_nor *nor)
 766{
 767	if (nor->spimem) {
 768		struct spi_mem_op op =
 769			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
 770				   SPI_MEM_OP_NO_ADDR,
 771				   SPI_MEM_OP_NO_DUMMY,
 772				   SPI_MEM_OP_NO_DATA);
 773
 774		return spi_mem_exec_op(nor->spimem, &op);
 775	}
 776
 777	return nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
 778}
 779
 780static int spi_nor_fsr_ready(struct spi_nor *nor)
 781{
 782	int fsr = read_fsr(nor);
 783	if (fsr < 0)
 784		return fsr;
 785
 786	if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
 787		if (fsr & FSR_E_ERR)
 788			dev_err(nor->dev, "Erase operation failed.\n");
 789		else
 790			dev_err(nor->dev, "Program operation failed.\n");
 791
 792		if (fsr & FSR_PT_ERR)
 793			dev_err(nor->dev,
 794			"Attempted to modify a protected sector.\n");
 795
 796		spi_nor_clear_fsr(nor);
 797		return -EIO;
 798	}
 799
 800	return fsr & FSR_READY;
 801}
 802
 803static int spi_nor_ready(struct spi_nor *nor)
 804{
 805	int sr, fsr;
 806
 807	if (nor->flags & SNOR_F_READY_XSR_RDY)
 808		sr = s3an_sr_ready(nor);
 809	else
 810		sr = spi_nor_sr_ready(nor);
 811	if (sr < 0)
 812		return sr;
 813	fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
 814	if (fsr < 0)
 815		return fsr;
 816	return sr && fsr;
 817}
 818
 819/*
 820 * Service routine to read status register until ready, or timeout occurs.
 821 * Returns non-zero if error.
 822 */
 823static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
 824						unsigned long timeout_jiffies)
 825{
 826	unsigned long deadline;
 827	int timeout = 0, ret;
 828
 829	deadline = jiffies + timeout_jiffies;
 830
 831	while (!timeout) {
 832		if (time_after_eq(jiffies, deadline))
 833			timeout = 1;
 834
 835		ret = spi_nor_ready(nor);
 836		if (ret < 0)
 837			return ret;
 838		if (ret)
 839			return 0;
 840
 841		cond_resched();
 842	}
 843
 844	dev_err(nor->dev, "flash operation timed out\n");
 845
 846	return -ETIMEDOUT;
 847}
 848
 849static int spi_nor_wait_till_ready(struct spi_nor *nor)
 850{
 851	return spi_nor_wait_till_ready_with_timeout(nor,
 852						    DEFAULT_READY_WAIT_JIFFIES);
 853}
 854
 855/*
 856 * Erase the whole flash memory
 857 *
 858 * Returns 0 if successful, non-zero otherwise.
 859 */
 860static int erase_chip(struct spi_nor *nor)
 861{
 862	dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
 863
 864	if (nor->spimem) {
 865		struct spi_mem_op op =
 866			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
 867				   SPI_MEM_OP_NO_ADDR,
 868				   SPI_MEM_OP_NO_DUMMY,
 869				   SPI_MEM_OP_NO_DATA);
 870
 871		return spi_mem_exec_op(nor->spimem, &op);
 872	}
 873
 874	return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
 875}
 876
 877static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
 878{
 879	int ret = 0;
 880
 881	mutex_lock(&nor->lock);
 882
 883	if (nor->prepare) {
 884		ret = nor->prepare(nor, ops);
 885		if (ret) {
 886			dev_err(nor->dev, "failed in the preparation.\n");
 887			mutex_unlock(&nor->lock);
 888			return ret;
 889		}
 890	}
 891	return ret;
 892}
 893
 894static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
 895{
 896	if (nor->unprepare)
 897		nor->unprepare(nor, ops);
 898	mutex_unlock(&nor->lock);
 899}
 900
 901/*
 902 * This code converts an address to the Default Address Mode, that has non
 903 * power of two page sizes. We must support this mode because it is the default
 904 * mode supported by Xilinx tools, it can access the whole flash area and
 905 * changing over to the Power-of-two mode is irreversible and corrupts the
 906 * original data.
 907 * Addr can safely be unsigned int, the biggest S3AN device is smaller than
 908 * 4 MiB.
 909 */
 910static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
 911{
 912	u32 offset, page;
 913
 914	offset = addr % nor->page_size;
 915	page = addr / nor->page_size;
 916	page <<= (nor->page_size > 512) ? 10 : 9;
 917
 918	return page | offset;
 919}
 920
 921static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
 922{
 923	if (!nor->params.convert_addr)
 924		return addr;
 925
 926	return nor->params.convert_addr(nor, addr);
 927}
 928
 929/*
 930 * Initiate the erasure of a single sector
 931 */
 932static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
 933{
 934	int i;
 935
 936	addr = spi_nor_convert_addr(nor, addr);
 937
 938	if (nor->erase)
 939		return nor->erase(nor, addr);
 940
 941	if (nor->spimem) {
 942		struct spi_mem_op op =
 943			SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
 944				   SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
 945				   SPI_MEM_OP_NO_DUMMY,
 946				   SPI_MEM_OP_NO_DATA);
 947
 948		return spi_mem_exec_op(nor->spimem, &op);
 949	}
 950
 951	/*
 952	 * Default implementation, if driver doesn't have a specialized HW
 953	 * control
 954	 */
 955	for (i = nor->addr_width - 1; i >= 0; i--) {
 956		nor->bouncebuf[i] = addr & 0xff;
 957		addr >>= 8;
 958	}
 959
 960	return nor->write_reg(nor, nor->erase_opcode, nor->bouncebuf,
 961			      nor->addr_width);
 962}
 963
 964/**
 965 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
 966 * @erase:	pointer to a structure that describes a SPI NOR erase type
 967 * @dividend:	dividend value
 968 * @remainder:	pointer to u32 remainder (will be updated)
 969 *
 970 * Return: the result of the division
 971 */
 972static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
 973				     u64 dividend, u32 *remainder)
 974{
 975	/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
 976	*remainder = (u32)dividend & erase->size_mask;
 977	return dividend >> erase->size_shift;
 978}
 979
 980/**
 981 * spi_nor_find_best_erase_type() - find the best erase type for the given
 982 *				    offset in the serial flash memory and the
 983 *				    number of bytes to erase. The region in
 984 *				    which the address fits is expected to be
 985 *				    provided.
 986 * @map:	the erase map of the SPI NOR
 987 * @region:	pointer to a structure that describes a SPI NOR erase region
 988 * @addr:	offset in the serial flash memory
 989 * @len:	number of bytes to erase
 990 *
 991 * Return: a pointer to the best fitted erase type, NULL otherwise.
 992 */
 993static const struct spi_nor_erase_type *
 994spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
 995			     const struct spi_nor_erase_region *region,
 996			     u64 addr, u32 len)
 997{
 998	const struct spi_nor_erase_type *erase;
 999	u32 rem;
1000	int i;
1001	u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
1002
1003	/*
1004	 * Erase types are ordered by size, with the smallest erase type at
1005	 * index 0.
1006	 */
1007	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1008		/* Does the erase region support the tested erase type? */
1009		if (!(erase_mask & BIT(i)))
1010			continue;
1011
1012		erase = &map->erase_type[i];
1013
1014		/* Don't erase more than what the user has asked for. */
1015		if (erase->size > len)
1016			continue;
1017
1018		/* Alignment is not mandatory for overlaid regions */
1019		if (region->offset & SNOR_OVERLAID_REGION)
1020			return erase;
1021
1022		spi_nor_div_by_erase_size(erase, addr, &rem);
1023		if (rem)
1024			continue;
1025		else
1026			return erase;
1027	}
1028
1029	return NULL;
1030}
1031
1032/**
1033 * spi_nor_region_next() - get the next spi nor region
1034 * @region:	pointer to a structure that describes a SPI NOR erase region
1035 *
1036 * Return: the next spi nor region or NULL if last region.
1037 */
1038static struct spi_nor_erase_region *
1039spi_nor_region_next(struct spi_nor_erase_region *region)
1040{
1041	if (spi_nor_region_is_last(region))
1042		return NULL;
1043	region++;
1044	return region;
1045}
1046
1047/**
1048 * spi_nor_find_erase_region() - find the region of the serial flash memory in
1049 *				 which the offset fits
1050 * @map:	the erase map of the SPI NOR
1051 * @addr:	offset in the serial flash memory
1052 *
1053 * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
1054 *	   otherwise.
1055 */
1056static struct spi_nor_erase_region *
1057spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
1058{
1059	struct spi_nor_erase_region *region = map->regions;
1060	u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1061	u64 region_end = region_start + region->size;
1062
1063	while (addr < region_start || addr >= region_end) {
1064		region = spi_nor_region_next(region);
1065		if (!region)
1066			return ERR_PTR(-EINVAL);
1067
1068		region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1069		region_end = region_start + region->size;
1070	}
1071
1072	return region;
1073}
1074
1075/**
1076 * spi_nor_init_erase_cmd() - initialize an erase command
1077 * @region:	pointer to a structure that describes a SPI NOR erase region
1078 * @erase:	pointer to a structure that describes a SPI NOR erase type
1079 *
1080 * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
1081 *	   otherwise.
1082 */
1083static struct spi_nor_erase_command *
1084spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1085		       const struct spi_nor_erase_type *erase)
1086{
1087	struct spi_nor_erase_command *cmd;
1088
1089	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1090	if (!cmd)
1091		return ERR_PTR(-ENOMEM);
1092
1093	INIT_LIST_HEAD(&cmd->list);
1094	cmd->opcode = erase->opcode;
1095	cmd->count = 1;
1096
1097	if (region->offset & SNOR_OVERLAID_REGION)
1098		cmd->size = region->size;
1099	else
1100		cmd->size = erase->size;
1101
1102	return cmd;
1103}
1104
1105/**
1106 * spi_nor_destroy_erase_cmd_list() - destroy erase command list
1107 * @erase_list:	list of erase commands
1108 */
1109static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1110{
1111	struct spi_nor_erase_command *cmd, *next;
1112
1113	list_for_each_entry_safe(cmd, next, erase_list, list) {
1114		list_del(&cmd->list);
1115		kfree(cmd);
1116	}
1117}
1118
1119/**
1120 * spi_nor_init_erase_cmd_list() - initialize erase command list
1121 * @nor:	pointer to a 'struct spi_nor'
1122 * @erase_list:	list of erase commands to be executed once we validate that the
1123 *		erase can be performed
1124 * @addr:	offset in the serial flash memory
1125 * @len:	number of bytes to erase
1126 *
1127 * Builds the list of best fitted erase commands and verifies if the erase can
1128 * be performed.
1129 *
1130 * Return: 0 on success, -errno otherwise.
1131 */
1132static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1133				       struct list_head *erase_list,
1134				       u64 addr, u32 len)
1135{
1136	const struct spi_nor_erase_map *map = &nor->params.erase_map;
1137	const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1138	struct spi_nor_erase_region *region;
1139	struct spi_nor_erase_command *cmd = NULL;
1140	u64 region_end;
1141	int ret = -EINVAL;
1142
1143	region = spi_nor_find_erase_region(map, addr);
1144	if (IS_ERR(region))
1145		return PTR_ERR(region);
1146
1147	region_end = spi_nor_region_end(region);
1148
1149	while (len) {
1150		erase = spi_nor_find_best_erase_type(map, region, addr, len);
1151		if (!erase)
1152			goto destroy_erase_cmd_list;
1153
1154		if (prev_erase != erase ||
1155		    region->offset & SNOR_OVERLAID_REGION) {
1156			cmd = spi_nor_init_erase_cmd(region, erase);
1157			if (IS_ERR(cmd)) {
1158				ret = PTR_ERR(cmd);
1159				goto destroy_erase_cmd_list;
1160			}
1161
1162			list_add_tail(&cmd->list, erase_list);
1163		} else {
1164			cmd->count++;
1165		}
1166
1167		addr += cmd->size;
1168		len -= cmd->size;
1169
1170		if (len && addr >= region_end) {
1171			region = spi_nor_region_next(region);
1172			if (!region)
1173				goto destroy_erase_cmd_list;
1174			region_end = spi_nor_region_end(region);
1175		}
1176
1177		prev_erase = erase;
1178	}
1179
1180	return 0;
1181
1182destroy_erase_cmd_list:
1183	spi_nor_destroy_erase_cmd_list(erase_list);
1184	return ret;
1185}
1186
1187/**
1188 * spi_nor_erase_multi_sectors() - perform a non-uniform erase
1189 * @nor:	pointer to a 'struct spi_nor'
1190 * @addr:	offset in the serial flash memory
1191 * @len:	number of bytes to erase
1192 *
1193 * Build a list of best fitted erase commands and execute it once we validate
1194 * that the erase can be performed.
1195 *
1196 * Return: 0 on success, -errno otherwise.
1197 */
1198static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1199{
1200	LIST_HEAD(erase_list);
1201	struct spi_nor_erase_command *cmd, *next;
1202	int ret;
1203
1204	ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
1205	if (ret)
1206		return ret;
1207
1208	list_for_each_entry_safe(cmd, next, &erase_list, list) {
1209		nor->erase_opcode = cmd->opcode;
1210		while (cmd->count) {
1211			write_enable(nor);
1212
1213			ret = spi_nor_erase_sector(nor, addr);
1214			if (ret)
1215				goto destroy_erase_cmd_list;
1216
1217			addr += cmd->size;
1218			cmd->count--;
1219
1220			ret = spi_nor_wait_till_ready(nor);
1221			if (ret)
1222				goto destroy_erase_cmd_list;
1223		}
1224		list_del(&cmd->list);
1225		kfree(cmd);
1226	}
1227
1228	return 0;
1229
1230destroy_erase_cmd_list:
1231	spi_nor_destroy_erase_cmd_list(&erase_list);
1232	return ret;
1233}
1234
1235/*
1236 * Erase an address range on the nor chip.  The address range may extend
1237 * one or more erase sectors.  Return an error is there is a problem erasing.
1238 */
1239static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1240{
1241	struct spi_nor *nor = mtd_to_spi_nor(mtd);
1242	u32 addr, len;
1243	uint32_t rem;
1244	int ret;
1245
1246	dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1247			(long long)instr->len);
1248
1249	if (spi_nor_has_uniform_erase(nor)) {
1250		div_u64_rem(instr->len, mtd->erasesize, &rem);
1251		if (rem)
1252			return -EINVAL;
1253	}
1254
1255	addr = instr->addr;
1256	len = instr->len;
1257
1258	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
1259	if (ret)
1260		return ret;
1261
1262	/* whole-chip erase? */
1263	if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1264		unsigned long timeout;
1265
1266		write_enable(nor);
1267
1268		if (erase_chip(nor)) {
1269			ret = -EIO;
1270			goto erase_err;
1271		}
1272
1273		/*
1274		 * Scale the timeout linearly with the size of the flash, with
1275		 * a minimum calibrated to an old 2MB flash. We could try to
1276		 * pull these from CFI/SFDP, but these values should be good
1277		 * enough for now.
1278		 */
1279		timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1280			      CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1281			      (unsigned long)(mtd->size / SZ_2M));
1282		ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1283		if (ret)
1284			goto erase_err;
1285
1286	/* REVISIT in some cases we could speed up erasing large regions
1287	 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K.  We may have set up
1288	 * to use "small sector erase", but that's not always optimal.
1289	 */
1290
1291	/* "sector"-at-a-time erase */
1292	} else if (spi_nor_has_uniform_erase(nor)) {
1293		while (len) {
1294			write_enable(nor);
1295
1296			ret = spi_nor_erase_sector(nor, addr);
1297			if (ret)
1298				goto erase_err;
1299
1300			addr += mtd->erasesize;
1301			len -= mtd->erasesize;
1302
1303			ret = spi_nor_wait_till_ready(nor);
1304			if (ret)
1305				goto erase_err;
1306		}
1307
1308	/* erase multiple sectors */
1309	} else {
1310		ret = spi_nor_erase_multi_sectors(nor, addr, len);
1311		if (ret)
1312			goto erase_err;
1313	}
1314
1315	write_disable(nor);
1316
1317erase_err:
1318	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
1319
1320	return ret;
1321}
1322
1323/* Write status register and ensure bits in mask match written values */
1324static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
1325{
1326	int ret;
1327
1328	write_enable(nor);
1329	ret = write_sr(nor, status_new);
1330	if (ret)
1331		return ret;
1332
1333	ret = spi_nor_wait_till_ready(nor);
1334	if (ret)
1335		return ret;
1336
1337	ret = read_sr(nor);
1338	if (ret < 0)
1339		return ret;
1340
1341	return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
1342}
1343
1344static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
1345				 uint64_t *len)
1346{
1347	struct mtd_info *mtd = &nor->mtd;
1348	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1349	int shift = ffs(mask) - 1;
1350	int pow;
1351
1352	if (!(sr & mask)) {
1353		/* No protection */
1354		*ofs = 0;
1355		*len = 0;
1356	} else {
1357		pow = ((sr & mask) ^ mask) >> shift;
1358		*len = mtd->size >> pow;
1359		if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
1360			*ofs = 0;
1361		else
1362			*ofs = mtd->size - *len;
1363	}
1364}
1365
1366/*
1367 * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
1368 * @locked is false); 0 otherwise
1369 */
1370static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1371				    u8 sr, bool locked)
1372{
1373	loff_t lock_offs;
1374	uint64_t lock_len;
1375
1376	if (!len)
1377		return 1;
1378
1379	stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
1380
1381	if (locked)
1382		/* Requested range is a sub-range of locked range */
1383		return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
1384	else
1385		/* Requested range does not overlap with locked range */
1386		return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
1387}
1388
1389static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1390			    u8 sr)
1391{
1392	return stm_check_lock_status_sr(nor, ofs, len, sr, true);
1393}
1394
1395static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1396			      u8 sr)
1397{
1398	return stm_check_lock_status_sr(nor, ofs, len, sr, false);
1399}
1400
1401/*
1402 * Lock a region of the flash. Compatible with ST Micro and similar flash.
1403 * Supports the block protection bits BP{0,1,2} in the status register
1404 * (SR). Does not support these features found in newer SR bitfields:
1405 *   - SEC: sector/block protect - only handle SEC=0 (block protect)
1406 *   - CMP: complement protect - only support CMP=0 (range is not complemented)
1407 *
1408 * Support for the following is provided conditionally for some flash:
1409 *   - TB: top/bottom protect
1410 *
1411 * Sample table portion for 8MB flash (Winbond w25q64fw):
1412 *
1413 *   SEC  |  TB   |  BP2  |  BP1  |  BP0  |  Prot Length  | Protected Portion
1414 *  --------------------------------------------------------------------------
1415 *    X   |   X   |   0   |   0   |   0   |  NONE         | NONE
1416 *    0   |   0   |   0   |   0   |   1   |  128 KB       | Upper 1/64
1417 *    0   |   0   |   0   |   1   |   0   |  256 KB       | Upper 1/32
1418 *    0   |   0   |   0   |   1   |   1   |  512 KB       | Upper 1/16
1419 *    0   |   0   |   1   |   0   |   0   |  1 MB         | Upper 1/8
1420 *    0   |   0   |   1   |   0   |   1   |  2 MB         | Upper 1/4
1421 *    0   |   0   |   1   |   1   |   0   |  4 MB         | Upper 1/2
1422 *    X   |   X   |   1   |   1   |   1   |  8 MB         | ALL
1423 *  ------|-------|-------|-------|-------|---------------|-------------------
1424 *    0   |   1   |   0   |   0   |   1   |  128 KB       | Lower 1/64
1425 *    0   |   1   |   0   |   1   |   0   |  256 KB       | Lower 1/32
1426 *    0   |   1   |   0   |   1   |   1   |  512 KB       | Lower 1/16
1427 *    0   |   1   |   1   |   0   |   0   |  1 MB         | Lower 1/8
1428 *    0   |   1   |   1   |   0   |   1   |  2 MB         | Lower 1/4
1429 *    0   |   1   |   1   |   1   |   0   |  4 MB         | Lower 1/2
1430 *
1431 * Returns negative on errors, 0 on success.
1432 */
1433static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1434{
1435	struct mtd_info *mtd = &nor->mtd;
1436	int status_old, status_new;
1437	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1438	u8 shift = ffs(mask) - 1, pow, val;
1439	loff_t lock_len;
1440	bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1441	bool use_top;
1442
1443	status_old = read_sr(nor);
1444	if (status_old < 0)
1445		return status_old;
1446
1447	/* If nothing in our range is unlocked, we don't need to do anything */
1448	if (stm_is_locked_sr(nor, ofs, len, status_old))
1449		return 0;
1450
1451	/* If anything below us is unlocked, we can't use 'bottom' protection */
1452	if (!stm_is_locked_sr(nor, 0, ofs, status_old))
1453		can_be_bottom = false;
1454
1455	/* If anything above us is unlocked, we can't use 'top' protection */
1456	if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
1457				status_old))
1458		can_be_top = false;
1459
1460	if (!can_be_bottom && !can_be_top)
1461		return -EINVAL;
1462
1463	/* Prefer top, if both are valid */
1464	use_top = can_be_top;
1465
1466	/* lock_len: length of region that should end up locked */
1467	if (use_top)
1468		lock_len = mtd->size - ofs;
1469	else
1470		lock_len = ofs + len;
1471
1472	/*
1473	 * Need smallest pow such that:
1474	 *
1475	 *   1 / (2^pow) <= (len / size)
1476	 *
1477	 * so (assuming power-of-2 size) we do:
1478	 *
1479	 *   pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
1480	 */
1481	pow = ilog2(mtd->size) - ilog2(lock_len);
1482	val = mask - (pow << shift);
1483	if (val & ~mask)
1484		return -EINVAL;
1485	/* Don't "lock" with no region! */
1486	if (!(val & mask))
1487		return -EINVAL;
1488
1489	status_new = (status_old & ~mask & ~SR_TB) | val;
1490
1491	/* Disallow further writes if WP pin is asserted */
1492	status_new |= SR_SRWD;
1493
1494	if (!use_top)
1495		status_new |= SR_TB;
1496
1497	/* Don't bother if they're the same */
1498	if (status_new == status_old)
1499		return 0;
1500
1501	/* Only modify protection if it will not unlock other areas */
1502	if ((status_new & mask) < (status_old & mask))
1503		return -EINVAL;
1504
1505	return write_sr_and_check(nor, status_new, mask);
1506}
1507
1508/*
1509 * Unlock a region of the flash. See stm_lock() for more info
1510 *
1511 * Returns negative on errors, 0 on success.
1512 */
1513static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1514{
1515	struct mtd_info *mtd = &nor->mtd;
1516	int status_old, status_new;
1517	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1518	u8 shift = ffs(mask) - 1, pow, val;
1519	loff_t lock_len;
1520	bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1521	bool use_top;
1522
1523	status_old = read_sr(nor);
1524	if (status_old < 0)
1525		return status_old;
1526
1527	/* If nothing in our range is locked, we don't need to do anything */
1528	if (stm_is_unlocked_sr(nor, ofs, len, status_old))
1529		return 0;
1530
1531	/* If anything below us is locked, we can't use 'top' protection */
1532	if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
1533		can_be_top = false;
1534
1535	/* If anything above us is locked, we can't use 'bottom' protection */
1536	if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
1537				status_old))
1538		can_be_bottom = false;
1539
1540	if (!can_be_bottom && !can_be_top)
1541		return -EINVAL;
1542
1543	/* Prefer top, if both are valid */
1544	use_top = can_be_top;
1545
1546	/* lock_len: length of region that should remain locked */
1547	if (use_top)
1548		lock_len = mtd->size - (ofs + len);
1549	else
1550		lock_len = ofs;
1551
1552	/*
1553	 * Need largest pow such that:
1554	 *
1555	 *   1 / (2^pow) >= (len / size)
1556	 *
1557	 * so (assuming power-of-2 size) we do:
1558	 *
1559	 *   pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
1560	 */
1561	pow = ilog2(mtd->size) - order_base_2(lock_len);
1562	if (lock_len == 0) {
1563		val = 0; /* fully unlocked */
1564	} else {
1565		val = mask - (pow << shift);
1566		/* Some power-of-two sizes are not supported */
1567		if (val & ~mask)
1568			return -EINVAL;
1569	}
1570
1571	status_new = (status_old & ~mask & ~SR_TB) | val;
1572
1573	/* Don't protect status register if we're fully unlocked */
1574	if (lock_len == 0)
1575		status_new &= ~SR_SRWD;
1576
1577	if (!use_top)
1578		status_new |= SR_TB;
1579
1580	/* Don't bother if they're the same */
1581	if (status_new == status_old)
1582		return 0;
1583
1584	/* Only modify protection if it will not lock other areas */
1585	if ((status_new & mask) > (status_old & mask))
1586		return -EINVAL;
1587
1588	return write_sr_and_check(nor, status_new, mask);
1589}
1590
1591/*
1592 * Check if a region of the flash is (completely) locked. See stm_lock() for
1593 * more info.
1594 *
1595 * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
1596 * negative on errors.
1597 */
1598static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1599{
1600	int status;
1601
1602	status = read_sr(nor);
1603	if (status < 0)
1604		return status;
1605
1606	return stm_is_locked_sr(nor, ofs, len, status);
1607}
1608
1609static const struct spi_nor_locking_ops stm_locking_ops = {
1610	.lock = stm_lock,
1611	.unlock = stm_unlock,
1612	.is_locked = stm_is_locked,
1613};
1614
1615static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1616{
1617	struct spi_nor *nor = mtd_to_spi_nor(mtd);
1618	int ret;
1619
1620	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
1621	if (ret)
1622		return ret;
1623
1624	ret = nor->params.locking_ops->lock(nor, ofs, len);
1625
1626	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
1627	return ret;
1628}
1629
1630static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1631{
1632	struct spi_nor *nor = mtd_to_spi_nor(mtd);
1633	int ret;
1634
1635	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1636	if (ret)
1637		return ret;
1638
1639	ret = nor->params.locking_ops->unlock(nor, ofs, len);
1640
1641	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1642	return ret;
1643}
1644
1645static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1646{
1647	struct spi_nor *nor = mtd_to_spi_nor(mtd);
1648	int ret;
1649
1650	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1651	if (ret)
1652		return ret;
1653
1654	ret = nor->params.locking_ops->is_locked(nor, ofs, len);
1655
1656	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1657	return ret;
1658}
1659
1660/*
1661 * Write status Register and configuration register with 2 bytes
1662 * The first byte will be written to the status register, while the
1663 * second byte will be written to the configuration register.
1664 * Return negative if error occurred.
1665 */
1666static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
1667{
1668	int ret;
1669
1670	write_enable(nor);
1671
1672	if (nor->spimem) {
1673		struct spi_mem_op op =
1674			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
1675				   SPI_MEM_OP_NO_ADDR,
1676				   SPI_MEM_OP_NO_DUMMY,
1677				   SPI_MEM_OP_DATA_OUT(2, sr_cr, 1));
1678
1679		ret = spi_mem_exec_op(nor->spimem, &op);
1680	} else {
1681		ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
1682	}
1683
1684	if (ret < 0) {
1685		dev_err(nor->dev,
1686			"error while writing configuration register\n");
1687		return -EINVAL;
1688	}
1689
1690	ret = spi_nor_wait_till_ready(nor);
1691	if (ret) {
1692		dev_err(nor->dev,
1693			"timeout while writing configuration register\n");
1694		return ret;
1695	}
1696
1697	return 0;
1698}
1699
1700/**
1701 * macronix_quad_enable() - set QE bit in Status Register.
1702 * @nor:	pointer to a 'struct spi_nor'
1703 *
1704 * Set the Quad Enable (QE) bit in the Status Register.
1705 *
1706 * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
1707 *
1708 * Return: 0 on success, -errno otherwise.
1709 */
1710static int macronix_quad_enable(struct spi_nor *nor)
1711{
1712	int ret, val;
1713
1714	val = read_sr(nor);
1715	if (val < 0)
1716		return val;
1717	if (val & SR_QUAD_EN_MX)
1718		return 0;
1719
1720	write_enable(nor);
1721
1722	write_sr(nor, val | SR_QUAD_EN_MX);
1723
1724	ret = spi_nor_wait_till_ready(nor);
1725	if (ret)
1726		return ret;
1727
1728	ret = read_sr(nor);
1729	if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
1730		dev_err(nor->dev, "Macronix Quad bit not set\n");
1731		return -EINVAL;
1732	}
1733
1734	return 0;
1735}
1736
1737/**
1738 * spansion_quad_enable() - set QE bit in Configuraiton Register.
1739 * @nor:	pointer to a 'struct spi_nor'
1740 *
1741 * Set the Quad Enable (QE) bit in the Configuration Register.
1742 * This function is kept for legacy purpose because it has been used for a
1743 * long time without anybody complaining but it should be considered as
1744 * deprecated and maybe buggy.
1745 * First, this function doesn't care about the previous values of the Status
1746 * and Configuration Registers when it sets the QE bit (bit 1) in the
1747 * Configuration Register: all other bits are cleared, which may have unwanted
1748 * side effects like removing some block protections.
1749 * Secondly, it uses the Read Configuration Register (35h) instruction though
1750 * some very old and few memories don't support this instruction. If a pull-up
1751 * resistor is present on the MISO/IO1 line, we might still be able to pass the
1752 * "read back" test because the QSPI memory doesn't recognize the command,
1753 * so leaves the MISO/IO1 line state unchanged, hence read_cr() returns 0xFF.
1754 *
1755 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1756 * memories.
1757 *
1758 * Return: 0 on success, -errno otherwise.
1759 */
1760static int spansion_quad_enable(struct spi_nor *nor)
1761{
1762	u8 *sr_cr = nor->bouncebuf;
1763	int ret;
1764
1765	sr_cr[0] = 0;
1766	sr_cr[1] = CR_QUAD_EN_SPAN;
1767	ret = write_sr_cr(nor, sr_cr);
1768	if (ret)
1769		return ret;
1770
1771	/* read back and check it */
1772	ret = read_cr(nor);
1773	if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1774		dev_err(nor->dev, "Spansion Quad bit not set\n");
1775		return -EINVAL;
1776	}
1777
1778	return 0;
1779}
1780
1781/**
1782 * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
1783 * @nor:	pointer to a 'struct spi_nor'
1784 *
1785 * Set the Quad Enable (QE) bit in the Configuration Register.
1786 * This function should be used with QSPI memories not supporting the Read
1787 * Configuration Register (35h) instruction.
1788 *
1789 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1790 * memories.
1791 *
1792 * Return: 0 on success, -errno otherwise.
1793 */
1794static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
1795{
1796	u8 *sr_cr = nor->bouncebuf;
1797	int ret;
1798
1799	/* Keep the current value of the Status Register. */
1800	ret = read_sr(nor);
1801	if (ret < 0) {
1802		dev_err(nor->dev, "error while reading status register\n");
1803		return -EINVAL;
1804	}
1805	sr_cr[0] = ret;
1806	sr_cr[1] = CR_QUAD_EN_SPAN;
1807
1808	return write_sr_cr(nor, sr_cr);
1809}
1810
1811/**
1812 * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
1813 * @nor:	pointer to a 'struct spi_nor'
1814 *
1815 * Set the Quad Enable (QE) bit in the Configuration Register.
1816 * This function should be used with QSPI memories supporting the Read
1817 * Configuration Register (35h) instruction.
1818 *
1819 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1820 * memories.
1821 *
1822 * Return: 0 on success, -errno otherwise.
1823 */
1824static int spansion_read_cr_quad_enable(struct spi_nor *nor)
1825{
1826	struct device *dev = nor->dev;
1827	u8 *sr_cr = nor->bouncebuf;
1828	int ret;
1829
1830	/* Check current Quad Enable bit value. */
1831	ret = read_cr(nor);
1832	if (ret < 0) {
1833		dev_err(dev, "error while reading configuration register\n");
1834		return -EINVAL;
1835	}
1836
1837	if (ret & CR_QUAD_EN_SPAN)
1838		return 0;
1839
1840	sr_cr[1] = ret | CR_QUAD_EN_SPAN;
1841
1842	/* Keep the current value of the Status Register. */
1843	ret = read_sr(nor);
1844	if (ret < 0) {
1845		dev_err(dev, "error while reading status register\n");
1846		return -EINVAL;
1847	}
1848	sr_cr[0] = ret;
1849
1850	ret = write_sr_cr(nor, sr_cr);
1851	if (ret)
1852		return ret;
1853
1854	/* Read back and check it. */
1855	ret = read_cr(nor);
1856	if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1857		dev_err(nor->dev, "Spansion Quad bit not set\n");
1858		return -EINVAL;
1859	}
1860
1861	return 0;
1862}
1863
1864static int spi_nor_write_sr2(struct spi_nor *nor, u8 *sr2)
1865{
1866	if (nor->spimem) {
1867		struct spi_mem_op op =
1868			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
1869				   SPI_MEM_OP_NO_ADDR,
1870				   SPI_MEM_OP_NO_DUMMY,
1871				   SPI_MEM_OP_DATA_OUT(1, sr2, 1));
1872
1873		return spi_mem_exec_op(nor->spimem, &op);
1874	}
1875
1876	return nor->write_reg(nor, SPINOR_OP_WRSR2, sr2, 1);
1877}
1878
1879static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
1880{
1881	if (nor->spimem) {
1882		struct spi_mem_op op =
1883			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
1884				   SPI_MEM_OP_NO_ADDR,
1885				   SPI_MEM_OP_NO_DUMMY,
1886				   SPI_MEM_OP_DATA_IN(1, sr2, 1));
1887
1888		return spi_mem_exec_op(nor->spimem, &op);
1889	}
1890
1891	return nor->read_reg(nor, SPINOR_OP_RDSR2, sr2, 1);
1892}
1893
1894/**
1895 * sr2_bit7_quad_enable() - set QE bit in Status Register 2.
1896 * @nor:	pointer to a 'struct spi_nor'
1897 *
1898 * Set the Quad Enable (QE) bit in the Status Register 2.
1899 *
1900 * This is one of the procedures to set the QE bit described in the SFDP
1901 * (JESD216 rev B) specification but no manufacturer using this procedure has
1902 * been identified yet, hence the name of the function.
1903 *
1904 * Return: 0 on success, -errno otherwise.
1905 */
1906static int sr2_bit7_quad_enable(struct spi_nor *nor)
1907{
1908	u8 *sr2 = nor->bouncebuf;
1909	int ret;
1910
1911	/* Check current Quad Enable bit value. */
1912	ret = spi_nor_read_sr2(nor, sr2);
1913	if (ret)
1914		return ret;
1915	if (*sr2 & SR2_QUAD_EN_BIT7)
1916		return 0;
1917
1918	/* Update the Quad Enable bit. */
1919	*sr2 |= SR2_QUAD_EN_BIT7;
1920
1921	write_enable(nor);
1922
1923	ret = spi_nor_write_sr2(nor, sr2);
1924	if (ret < 0) {
1925		dev_err(nor->dev, "error while writing status register 2\n");
1926		return -EINVAL;
1927	}
1928
1929	ret = spi_nor_wait_till_ready(nor);
1930	if (ret < 0) {
1931		dev_err(nor->dev, "timeout while writing status register 2\n");
1932		return ret;
1933	}
1934
1935	/* Read back and check it. */
1936	ret = spi_nor_read_sr2(nor, sr2);
1937	if (!(ret > 0 && (*sr2 & SR2_QUAD_EN_BIT7))) {
1938		dev_err(nor->dev, "SR2 Quad bit not set\n");
1939		return -EINVAL;
1940	}
1941
1942	return 0;
1943}
1944
1945/**
1946 * spi_nor_clear_sr_bp() - clear the Status Register Block Protection bits.
1947 * @nor:        pointer to a 'struct spi_nor'
1948 *
1949 * Read-modify-write function that clears the Block Protection bits from the
1950 * Status Register without affecting other bits.
1951 *
1952 * Return: 0 on success, -errno otherwise.
1953 */
1954static int spi_nor_clear_sr_bp(struct spi_nor *nor)
1955{
1956	int ret;
1957	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1958
1959	ret = read_sr(nor);
1960	if (ret < 0) {
1961		dev_err(nor->dev, "error while reading status register\n");
1962		return ret;
1963	}
1964
1965	write_enable(nor);
1966
1967	ret = write_sr(nor, ret & ~mask);
1968	if (ret) {
1969		dev_err(nor->dev, "write to status register failed\n");
1970		return ret;
1971	}
1972
1973	ret = spi_nor_wait_till_ready(nor);
1974	if (ret)
1975		dev_err(nor->dev, "timeout while writing status register\n");
1976	return ret;
1977}
1978
1979/**
1980 * spi_nor_spansion_clear_sr_bp() - clear the Status Register Block Protection
1981 * bits on spansion flashes.
1982 * @nor:        pointer to a 'struct spi_nor'
1983 *
1984 * Read-modify-write function that clears the Block Protection bits from the
1985 * Status Register without affecting other bits. The function is tightly
1986 * coupled with the spansion_quad_enable() function. Both assume that the Write
1987 * Register with 16 bits, together with the Read Configuration Register (35h)
1988 * instructions are supported.
1989 *
1990 * Return: 0 on success, -errno otherwise.
1991 */
1992static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor)
1993{
1994	int ret;
1995	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1996	u8 *sr_cr =  nor->bouncebuf;
1997
1998	/* Check current Quad Enable bit value. */
1999	ret = read_cr(nor);
2000	if (ret < 0) {
2001		dev_err(nor->dev,
2002			"error while reading configuration register\n");
2003		return ret;
2004	}
2005
2006	/*
2007	 * When the configuration register Quad Enable bit is one, only the
2008	 * Write Status (01h) command with two data bytes may be used.
2009	 */
2010	if (ret & CR_QUAD_EN_SPAN) {
2011		sr_cr[1] = ret;
2012
2013		ret = read_sr(nor);
2014		if (ret < 0) {
2015			dev_err(nor->dev,
2016				"error while reading status register\n");
2017			return ret;
2018		}
2019		sr_cr[0] = ret & ~mask;
2020
2021		ret = write_sr_cr(nor, sr_cr);
2022		if (ret)
2023			dev_err(nor->dev, "16-bit write register failed\n");
2024		return ret;
2025	}
2026
2027	/*
2028	 * If the Quad Enable bit is zero, use the Write Status (01h) command
2029	 * with one data byte.
2030	 */
2031	return spi_nor_clear_sr_bp(nor);
2032}
2033
2034/* Used when the "_ext_id" is two bytes at most */
2035#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)	\
2036		.id = {							\
2037			((_jedec_id) >> 16) & 0xff,			\
2038			((_jedec_id) >> 8) & 0xff,			\
2039			(_jedec_id) & 0xff,				\
2040			((_ext_id) >> 8) & 0xff,			\
2041			(_ext_id) & 0xff,				\
2042			},						\
2043		.id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))),	\
2044		.sector_size = (_sector_size),				\
2045		.n_sectors = (_n_sectors),				\
2046		.page_size = 256,					\
2047		.flags = (_flags),
2048
2049#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)	\
2050		.id = {							\
2051			((_jedec_id) >> 16) & 0xff,			\
2052			((_jedec_id) >> 8) & 0xff,			\
2053			(_jedec_id) & 0xff,				\
2054			((_ext_id) >> 16) & 0xff,			\
2055			((_ext_id) >> 8) & 0xff,			\
2056			(_ext_id) & 0xff,				\
2057			},						\
2058		.id_len = 6,						\
2059		.sector_size = (_sector_size),				\
2060		.n_sectors = (_n_sectors),				\
2061		.page_size = 256,					\
2062		.flags = (_flags),
2063
2064#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags)	\
2065		.sector_size = (_sector_size),				\
2066		.n_sectors = (_n_sectors),				\
2067		.page_size = (_page_size),				\
2068		.addr_width = (_addr_width),				\
2069		.flags = (_flags),
2070
2071#define S3AN_INFO(_jedec_id, _n_sectors, _page_size)			\
2072		.id = {							\
2073			((_jedec_id) >> 16) & 0xff,			\
2074			((_jedec_id) >> 8) & 0xff,			\
2075			(_jedec_id) & 0xff				\
2076			},						\
2077		.id_len = 3,						\
2078		.sector_size = (8*_page_size),				\
2079		.n_sectors = (_n_sectors),				\
2080		.page_size = _page_size,				\
2081		.addr_width = 3,					\
2082		.flags = SPI_NOR_NO_FR | SPI_S3AN,
2083
2084static int
2085is25lp256_post_bfpt_fixups(struct spi_nor *nor,
2086			   const struct sfdp_parameter_header *bfpt_header,
2087			   const struct sfdp_bfpt *bfpt,
2088			   struct spi_nor_flash_parameter *params)
2089{
2090	/*
2091	 * IS25LP256 supports 4B opcodes, but the BFPT advertises a
2092	 * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
2093	 * Overwrite the address width advertised by the BFPT.
2094	 */
2095	if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
2096		BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
2097		nor->addr_width = 4;
2098
2099	return 0;
2100}
2101
2102static struct spi_nor_fixups is25lp256_fixups = {
2103	.post_bfpt = is25lp256_post_bfpt_fixups,
2104};
2105
2106static int
2107mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
2108			    const struct sfdp_parameter_header *bfpt_header,
2109			    const struct sfdp_bfpt *bfpt,
2110			    struct spi_nor_flash_parameter *params)
2111{
2112	/*
2113	 * MX25L25635F supports 4B opcodes but MX25L25635E does not.
2114	 * Unfortunately, Macronix has re-used the same JEDEC ID for both
2115	 * variants which prevents us from defining a new entry in the parts
2116	 * table.
2117	 * We need a way to differentiate MX25L25635E and MX25L25635F, and it
2118	 * seems that the F version advertises support for Fast Read 4-4-4 in
2119	 * its BFPT table.
2120	 */
2121	if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
2122		nor->flags |= SNOR_F_4B_OPCODES;
2123
2124	return 0;
2125}
2126
2127static struct spi_nor_fixups mx25l25635_fixups = {
2128	.post_bfpt = mx25l25635_post_bfpt_fixups,
2129};
2130
2131static void gd25q256_default_init(struct spi_nor *nor)
2132{
2133	/*
2134	 * Some manufacturer like GigaDevice may use different
2135	 * bit to set QE on different memories, so the MFR can't
2136	 * indicate the quad_enable method for this case, we need
2137	 * to set it in the default_init fixup hook.
2138	 */
2139	nor->params.quad_enable = macronix_quad_enable;
2140}
2141
2142static struct spi_nor_fixups gd25q256_fixups = {
2143	.default_init = gd25q256_default_init,
2144};
2145
2146/* NOTE: double check command sets and memory organization when you add
2147 * more nor chips.  This current list focusses on newer chips, which
2148 * have been converging on command sets which including JEDEC ID.
2149 *
2150 * All newly added entries should describe *hardware* and should use SECT_4K
2151 * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
2152 * scenarios excluding small sectors there is config option that can be
2153 * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
2154 * For historical (and compatibility) reasons (before we got above config) some
2155 * old entries may be missing 4K flag.
2156 */
2157static const struct flash_info spi_nor_ids[] = {
2158	/* Atmel -- some are (confusingly) marketed as "DataFlash" */
2159	{ "at25fs010",  INFO(0x1f6601, 0, 32 * 1024,   4, SECT_4K) },
2160	{ "at25fs040",  INFO(0x1f6604, 0, 64 * 1024,   8, SECT_4K) },
2161
2162	{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024,   8, SECT_4K) },
2163	{ "at25df321",  INFO(0x1f4700, 0, 64 * 1024,  64, SECT_4K) },
2164	{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024,  64, SECT_4K) },
2165	{ "at25df641",  INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
2166
2167	{ "at26f004",   INFO(0x1f0400, 0, 64 * 1024,  8, SECT_4K) },
2168	{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
2169	{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
2170	{ "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
2171
2172	{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
2173
2174	/* EON -- en25xxx */
2175	{ "en25f32",    INFO(0x1c3116, 0, 64 * 1024,   64, SECT_4K) },
2176	{ "en25p32",    INFO(0x1c2016, 0, 64 * 1024,   64, 0) },
2177	{ "en25q32b",   INFO(0x1c3016, 0, 64 * 1024,   64, 0) },
2178	{ "en25p64",    INFO(0x1c2017, 0, 64 * 1024,  128, 0) },
2179	{ "en25q64",    INFO(0x1c3017, 0, 64 * 1024,  128, SECT_4K) },
2180	{ "en25q80a",   INFO(0x1c3014, 0, 64 * 1024,   16,
2181			SECT_4K | SPI_NOR_DUAL_READ) },
2182	{ "en25qh32",   INFO(0x1c7016, 0, 64 * 1024,   64, 0) },
2183	{ "en25qh64",   INFO(0x1c7017, 0, 64 * 1024,  128,
2184			SECT_4K | SPI_NOR_DUAL_READ) },
2185	{ "en25qh128",  INFO(0x1c7018, 0, 64 * 1024,  256, 0) },
2186	{ "en25qh256",  INFO(0x1c7019, 0, 64 * 1024,  512, 0) },
2187	{ "en25s64",	INFO(0x1c3817, 0, 64 * 1024,  128, SECT_4K) },
2188
2189	/* ESMT */
2190	{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
2191	{ "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
2192	{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
2193
2194	/* Everspin */
2195	{ "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2196	{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2197	{ "mr25h10",  CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2198	{ "mr25h40",  CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2199
2200	/* Fujitsu */
2201	{ "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
2202
2203	/* GigaDevice */
2204	{
2205		"gd25q16", INFO(0xc84015, 0, 64 * 1024,  32,
2206			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2207			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2208	},
2209	{
2210		"gd25q32", INFO(0xc84016, 0, 64 * 1024,  64,
2211			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2212			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2213	},
2214	{
2215		"gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
2216			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2217			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2218	},
2219	{
2220		"gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
2221			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2222			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2223	},
2224	{
2225		"gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
2226			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2227			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2228	},
2229	{
2230		"gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
2231			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2232			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2233	},
2234	{
2235		"gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
2236			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2237			SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2238			.fixups = &gd25q256_fixups,
2239	},
2240
2241	/* Intel/Numonyx -- xxxs33b */
2242	{ "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
2243	{ "320s33b",  INFO(0x898912, 0, 64 * 1024,  64, 0) },
2244	{ "640s33b",  INFO(0x898913, 0, 64 * 1024, 128, 0) },
2245
2246	/* ISSI */
2247	{ "is25cd512",  INFO(0x7f9d20, 0, 32 * 1024,   2, SECT_4K) },
2248	{ "is25lq040b", INFO(0x9d4013, 0, 64 * 1024,   8,
2249			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2250	{ "is25lp016d", INFO(0x9d6015, 0, 64 * 1024,  32,
2251			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2252	{ "is25lp080d", INFO(0x9d6014, 0, 64 * 1024,  16,
2253			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2254	{ "is25lp032",  INFO(0x9d6016, 0, 64 * 1024,  64,
2255			SECT_4K | SPI_NOR_DUAL_READ) },
2256	{ "is25lp064",  INFO(0x9d6017, 0, 64 * 1024, 128,
2257			SECT_4K | SPI_NOR_DUAL_READ) },
2258	{ "is25lp128",  INFO(0x9d6018, 0, 64 * 1024, 256,
2259			SECT_4K | SPI_NOR_DUAL_READ) },
2260	{ "is25lp256",  INFO(0x9d6019, 0, 64 * 1024, 512,
2261			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2262			SPI_NOR_4B_OPCODES)
2263			.fixups = &is25lp256_fixups },
2264	{ "is25wp032",  INFO(0x9d7016, 0, 64 * 1024,  64,
2265			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2266	{ "is25wp064",  INFO(0x9d7017, 0, 64 * 1024, 128,
2267			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2268	{ "is25wp128",  INFO(0x9d7018, 0, 64 * 1024, 256,
2269			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2270
2271	/* Macronix */
2272	{ "mx25l512e",   INFO(0xc22010, 0, 64 * 1024,   1, SECT_4K) },
2273	{ "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4, SECT_4K) },
2274	{ "mx25l4005a",  INFO(0xc22013, 0, 64 * 1024,   8, SECT_4K) },
2275	{ "mx25l8005",   INFO(0xc22014, 0, 64 * 1024,  16, 0) },
2276	{ "mx25l1606e",  INFO(0xc22015, 0, 64 * 1024,  32, SECT_4K) },
2277	{ "mx25l3205d",  INFO(0xc22016, 0, 64 * 1024,  64, SECT_4K) },
2278	{ "mx25l3255e",  INFO(0xc29e16, 0, 64 * 1024,  64, SECT_4K) },
2279	{ "mx25l6405d",  INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
2280	{ "mx25u2033e",  INFO(0xc22532, 0, 64 * 1024,   4, SECT_4K) },
2281	{ "mx25u3235f",	 INFO(0xc22536, 0, 64 * 1024,  64,
2282			 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2283	{ "mx25u4035",   INFO(0xc22533, 0, 64 * 1024,   8, SECT_4K) },
2284	{ "mx25u8035",   INFO(0xc22534, 0, 64 * 1024,  16, SECT_4K) },
2285	{ "mx25u6435f",  INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
2286	{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
2287	{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
2288	{ "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
2289			 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2290	{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
2291			 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
2292			 .fixups = &mx25l25635_fixups },
2293	{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
2294	{ "mx25v8035f",  INFO(0xc22314, 0, 64 * 1024,  16,
2295			 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2296	{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
2297	{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2298	{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2299	{ "mx66l1g45g",  INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2300	{ "mx66l1g55g",  INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
2301
2302	/* Micron <--> ST Micro */
2303	{ "n25q016a",	 INFO(0x20bb15, 0, 64 * 1024,   32, SECT_4K | SPI_NOR_QUAD_READ) },
2304	{ "n25q032",	 INFO(0x20ba16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
2305	{ "n25q032a",	 INFO(0x20bb16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
2306	{ "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
2307	{ "n25q064a",    INFO(0x20bb17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
2308	{ "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, SECT_4K | SPI_NOR_QUAD_READ) },
2309	{ "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, SECT_4K | SPI_NOR_QUAD_READ) },
2310	{ "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2311	{ "n25q256ax1",  INFO(0x20bb19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_QUAD_READ) },
2312	{ "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
2313	{ "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
2314	{ "n25q00a",     INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
2315	{ "mt25ql02g",   INFO(0x20ba22, 0, 64 * 1024, 4096,
2316			      SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
2317			      NO_CHIP_ERASE) },
2318	{ "mt25qu512a (n25q512a)", INFO(0x20bb20, 0, 64 * 1024, 1024,
2319					SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
2320					SPI_NOR_QUAD_READ |
2321					SPI_NOR_4B_OPCODES) },
2322	{ "mt25qu02g",   INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
2323
2324	/* Micron */
2325	{
2326		"mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
2327			SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
2328			SPI_NOR_4B_OPCODES)
2329	},
2330	{ "mt35xu02g",  INFO(0x2c5b1c, 0, 128 * 1024, 2048,
2331			     SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
2332			     SPI_NOR_4B_OPCODES) },
2333
2334	/* PMC */
2335	{ "pm25lv512",   INFO(0,        0, 32 * 1024,    2, SECT_4K_PMC) },
2336	{ "pm25lv010",   INFO(0,        0, 32 * 1024,    4, SECT_4K_PMC) },
2337	{ "pm25lq032",   INFO(0x7f9d46, 0, 64 * 1024,   64, SECT_4K) },
2338
2339	/* Spansion/Cypress -- single (large) sector size only, at least
2340	 * for the chips listed here (without boot sectors).
2341	 */
2342	{ "s25sl032p",  INFO(0x010215, 0x4d00,  64 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2343	{ "s25sl064p",  INFO(0x010216, 0x4d00,  64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2344	{ "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
2345			SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2346	{ "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
2347			SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2348	{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
2349	{ "s25fl256s1", INFO(0x010219, 0x4d01,  64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2350	{ "s25fl512s",  INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
2351			SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2352			SPI_NOR_HAS_LOCK | USE_CLSR) },
2353	{ "s25fs512s",  INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2354	{ "s70fl01gs",  INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
2355	{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) },
2356	{ "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) },
2357	{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2358	{ "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2359	{ "s25sl004a",  INFO(0x010212,      0,  64 * 1024,   8, 0) },
2360	{ "s25sl008a",  INFO(0x010213,      0,  64 * 1024,  16, 0) },
2361	{ "s25sl016a",  INFO(0x010214,      0,  64 * 1024,  32, 0) },
2362	{ "s25sl032a",  INFO(0x010215,      0,  64 * 1024,  64, 0) },
2363	{ "s25sl064a",  INFO(0x010216,      0,  64 * 1024, 128, 0) },
2364	{ "s25fl004k",  INFO(0xef4013,      0,  64 * 1024,   8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2365	{ "s25fl008k",  INFO(0xef4014,      0,  64 * 1024,  16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2366	{ "s25fl016k",  INFO(0xef4015,      0,  64 * 1024,  32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2367	{ "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128, SECT_4K) },
2368	{ "s25fl116k",  INFO(0x014015,      0,  64 * 1024,  32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2369	{ "s25fl132k",  INFO(0x014016,      0,  64 * 1024,  64, SECT_4K) },
2370	{ "s25fl164k",  INFO(0x014017,      0,  64 * 1024, 128, SECT_4K) },
2371	{ "s25fl204k",  INFO(0x014013,      0,  64 * 1024,   8, SECT_4K | SPI_NOR_DUAL_READ) },
2372	{ "s25fl208k",  INFO(0x014014,      0,  64 * 1024,  16, SECT_4K | SPI_NOR_DUAL_READ) },
2373	{ "s25fl064l",  INFO(0x016017,      0,  64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2374	{ "s25fl128l",  INFO(0x016018,      0,  64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2375	{ "s25fl256l",  INFO(0x016019,      0,  64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2376
2377	/* SST -- large erase sizes are "overlays", "sectors" are 4K */
2378	{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
2379	{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
2380	{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
2381	{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
2382	{ "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
2383	{ "sst25wf512",  INFO(0xbf2501, 0, 64 * 1024,  1, SECT_4K | SST_WRITE) },
2384	{ "sst25wf010",  INFO(0xbf2502, 0, 64 * 1024,  2, SECT_4K | SST_WRITE) },
2385	{ "sst25wf020",  INFO(0xbf2503, 0, 64 * 1024,  4, SECT_4K | SST_WRITE) },
2386	{ "sst25wf020a", INFO(0x621612, 0, 64 * 1024,  4, SECT_4K) },
2387	{ "sst25wf040b", INFO(0x621613, 0, 64 * 1024,  8, SECT_4K) },
2388	{ "sst25wf040",  INFO(0xbf2504, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
2389	{ "sst25wf080",  INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
2390	{ "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K |
2391			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2392	{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2393
2394	/* ST Microelectronics -- newer production may have feature updates */
2395	{ "m25p05",  INFO(0x202010,  0,  32 * 1024,   2, 0) },
2396	{ "m25p10",  INFO(0x202011,  0,  32 * 1024,   4, 0) },
2397	{ "m25p20",  INFO(0x202012,  0,  64 * 1024,   4, 0) },
2398	{ "m25p40",  INFO(0x202013,  0,  64 * 1024,   8, 0) },
2399	{ "m25p80",  INFO(0x202014,  0,  64 * 1024,  16, 0) },
2400	{ "m25p16",  INFO(0x202015,  0,  64 * 1024,  32, 0) },
2401	{ "m25p32",  INFO(0x202016,  0,  64 * 1024,  64, 0) },
2402	{ "m25p64",  INFO(0x202017,  0,  64 * 1024, 128, 0) },
2403	{ "m25p128", INFO(0x202018,  0, 256 * 1024,  64, 0) },
2404
2405	{ "m25p05-nonjedec",  INFO(0, 0,  32 * 1024,   2, 0) },
2406	{ "m25p10-nonjedec",  INFO(0, 0,  32 * 1024,   4, 0) },
2407	{ "m25p20-nonjedec",  INFO(0, 0,  64 * 1024,   4, 0) },
2408	{ "m25p40-nonjedec",  INFO(0, 0,  64 * 1024,   8, 0) },
2409	{ "m25p80-nonjedec",  INFO(0, 0,  64 * 1024,  16, 0) },
2410	{ "m25p16-nonjedec",  INFO(0, 0,  64 * 1024,  32, 0) },
2411	{ "m25p32-nonjedec",  INFO(0, 0,  64 * 1024,  64, 0) },
2412	{ "m25p64-nonjedec",  INFO(0, 0,  64 * 1024, 128, 0) },
2413	{ "m25p128-nonjedec", INFO(0, 0, 256 * 1024,  64, 0) },
2414
2415	{ "m45pe10", INFO(0x204011,  0, 64 * 1024,    2, 0) },
2416	{ "m45pe80", INFO(0x204014,  0, 64 * 1024,   16, 0) },
2417	{ "m45pe16", INFO(0x204015,  0, 64 * 1024,   32, 0) },
2418
2419	{ "m25pe20", INFO(0x208012,  0, 64 * 1024,  4,       0) },
2420	{ "m25pe80", INFO(0x208014,  0, 64 * 1024, 16,       0) },
2421	{ "m25pe16", INFO(0x208015,  0, 64 * 1024, 32, SECT_4K) },
2422
2423	{ "m25px16",    INFO(0x207115,  0, 64 * 1024, 32, SECT_4K) },
2424	{ "m25px32",    INFO(0x207116,  0, 64 * 1024, 64, SECT_4K) },
2425	{ "m25px32-s0", INFO(0x207316,  0, 64 * 1024, 64, SECT_4K) },
2426	{ "m25px32-s1", INFO(0x206316,  0, 64 * 1024, 64, SECT_4K) },
2427	{ "m25px64",    INFO(0x207117,  0, 64 * 1024, 128, 0) },
2428	{ "m25px80",    INFO(0x207114,  0, 64 * 1024, 16, 0) },
2429
2430	/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
2431	{ "w25x05", INFO(0xef3010, 0, 64 * 1024,  1,  SECT_4K) },
2432	{ "w25x10", INFO(0xef3011, 0, 64 * 1024,  2,  SECT_4K) },
2433	{ "w25x20", INFO(0xef3012, 0, 64 * 1024,  4,  SECT_4K) },
2434	{ "w25x40", INFO(0xef3013, 0, 64 * 1024,  8,  SECT_4K) },
2435	{ "w25x80", INFO(0xef3014, 0, 64 * 1024,  16, SECT_4K) },
2436	{ "w25x16", INFO(0xef3015, 0, 64 * 1024,  32, SECT_4K) },
2437	{
2438		"w25q16dw", INFO(0xef6015, 0, 64 * 1024,  32,
2439			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2440			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2441	},
2442	{ "w25x32", INFO(0xef3016, 0, 64 * 1024,  64, SECT_4K) },
2443	{
2444		"w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024,  32,
2445			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2446			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2447	},
2448	{ "w25q20cl", INFO(0xef4012, 0, 64 * 1024,  4, SECT_4K) },
2449	{ "w25q20bw", INFO(0xef5012, 0, 64 * 1024,  4, SECT_4K) },
2450	{ "w25q20ew", INFO(0xef6012, 0, 64 * 1024,  4, SECT_4K) },
2451	{ "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) },
2452	{
2453		"w25q32dw", INFO(0xef6016, 0, 64 * 1024,  64,
2454			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2455			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2456	},
2457	{
2458		"w25q32jv", INFO(0xef7016, 0, 64 * 1024,  64,
2459			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2460			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2461	},
2462	{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
2463	{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
2464	{
2465		"w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
2466			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2467			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2468	},
2469	{
2470		"w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
2471			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2472			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2473	},
2474	{
2475		"w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
2476			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2477			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2478	},
2479	{ "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
2480	{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16, SECT_4K) },
2481	{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
2482	{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2483	{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
2484			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2485	{ "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
2486			SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
2487
2488	/* Catalyst / On Semiconductor -- non-JEDEC */
2489	{ "cat25c11", CAT25_INFO(  16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2490	{ "cat25c03", CAT25_INFO(  32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2491	{ "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2492	{ "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2493	{ "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2494
2495	/* Xilinx S3AN Internal Flash */
2496	{ "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
2497	{ "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
2498	{ "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
2499	{ "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
2500	{ "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
2501
2502	/* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
2503	{ "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2504	{ "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2505	{ },
2506};
2507
2508static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
2509{
2510	int			tmp;
2511	u8			*id = nor->bouncebuf;
2512	const struct flash_info	*info;
2513
2514	if (nor->spimem) {
2515		struct spi_mem_op op =
2516			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
2517				   SPI_MEM_OP_NO_ADDR,
2518				   SPI_MEM_OP_NO_DUMMY,
2519				   SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
2520
2521		tmp = spi_mem_exec_op(nor->spimem, &op);
2522	} else {
2523		tmp = nor->read_reg(nor, SPINOR_OP_RDID, id,
2524				    SPI_NOR_MAX_ID_LEN);
2525	}
2526	if (tmp < 0) {
2527		dev_err(nor->dev, "error %d reading JEDEC ID\n", tmp);
2528		return ERR_PTR(tmp);
2529	}
2530
2531	for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
2532		info = &spi_nor_ids[tmp];
2533		if (info->id_len) {
2534			if (!memcmp(info->id, id, info->id_len))
2535				return &spi_nor_ids[tmp];
2536		}
2537	}
2538	dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2539		SPI_NOR_MAX_ID_LEN, id);
2540	return ERR_PTR(-ENODEV);
2541}
2542
2543static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2544			size_t *retlen, u_char *buf)
2545{
2546	struct spi_nor *nor = mtd_to_spi_nor(mtd);
2547	int ret;
2548
2549	dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2550
2551	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
2552	if (ret)
2553		return ret;
2554
2555	while (len) {
2556		loff_t addr = from;
2557
2558		addr = spi_nor_convert_addr(nor, addr);
2559
2560		ret = spi_nor_read_data(nor, addr, len, buf);
2561		if (ret == 0) {
2562			/* We shouldn't see 0-length reads */
2563			ret = -EIO;
2564			goto read_err;
2565		}
2566		if (ret < 0)
2567			goto read_err;
2568
2569		WARN_ON(ret > len);
2570		*retlen += ret;
2571		buf += ret;
2572		from += ret;
2573		len -= ret;
2574	}
2575	ret = 0;
2576
2577read_err:
2578	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
2579	return ret;
2580}
2581
2582static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
2583		size_t *retlen, const u_char *buf)
2584{
2585	struct spi_nor *nor = mtd_to_spi_nor(mtd);
2586	size_t actual;
2587	int ret;
2588
2589	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2590
2591	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
2592	if (ret)
2593		return ret;
2594
2595	write_enable(nor);
2596
2597	nor->sst_write_second = false;
2598
2599	actual = to % 2;
2600	/* Start write from odd address. */
2601	if (actual) {
2602		nor->program_opcode = SPINOR_OP_BP;
2603
2604		/* write one byte. */
2605		ret = spi_nor_write_data(nor, to, 1, buf);
2606		if (ret < 0)
2607			goto sst_write_err;
2608		WARN(ret != 1, "While writing 1 byte written %i bytes\n",
2609		     (int)ret);
2610		ret = spi_nor_wait_till_ready(nor);
2611		if (ret)
2612			goto sst_write_err;
2613	}
2614	to += actual;
2615
2616	/* Write out most of the data here. */
2617	for (; actual < len - 1; actual += 2) {
2618		nor->program_opcode = SPINOR_OP_AAI_WP;
2619
2620		/* write two bytes. */
2621		ret = spi_nor_write_data(nor, to, 2, buf + actual);
2622		if (ret < 0)
2623			goto sst_write_err;
2624		WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
2625		     (int)ret);
2626		ret = spi_nor_wait_till_ready(nor);
2627		if (ret)
2628			goto sst_write_err;
2629		to += 2;
2630		nor->sst_write_second = true;
2631	}
2632	nor->sst_write_second = false;
2633
2634	write_disable(nor);
2635	ret = spi_nor_wait_till_ready(nor);
2636	if (ret)
2637		goto sst_write_err;
2638
2639	/* Write out trailing byte if it exists. */
2640	if (actual != len) {
2641		write_enable(nor);
2642
2643		nor->program_opcode = SPINOR_OP_BP;
2644		ret = spi_nor_write_data(nor, to, 1, buf + actual);
2645		if (ret < 0)
2646			goto sst_write_err;
2647		WARN(ret != 1, "While writing 1 byte written %i bytes\n",
2648		     (int)ret);
2649		ret = spi_nor_wait_till_ready(nor);
2650		if (ret)
2651			goto sst_write_err;
2652		write_disable(nor);
2653		actual += 1;
2654	}
2655sst_write_err:
2656	*retlen += actual;
2657	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
2658	return ret;
2659}
2660
2661/*
2662 * Write an address range to the nor chip.  Data must be written in
2663 * FLASH_PAGESIZE chunks.  The address range may be any size provided
2664 * it is within the physical boundaries.
2665 */
2666static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2667	size_t *retlen, const u_char *buf)
2668{
2669	struct spi_nor *nor = mtd_to_spi_nor(mtd);
2670	size_t page_offset, page_remain, i;
2671	ssize_t ret;
2672
2673	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2674
2675	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
2676	if (ret)
2677		return ret;
2678
2679	for (i = 0; i < len; ) {
2680		ssize_t written;
2681		loff_t addr = to + i;
2682
2683		/*
2684		 * If page_size is a power of two, the offset can be quickly
2685		 * calculated with an AND operation. On the other cases we
2686		 * need to do a modulus operation (more expensive).
2687		 * Power of two numbers have only one bit set and we can use
2688		 * the instruction hweight32 to detect if we need to do a
2689		 * modulus (do_div()) or not.
2690		 */
2691		if (hweight32(nor->page_size) == 1) {
2692			page_offset = addr & (nor->page_size - 1);
2693		} else {
2694			uint64_t aux = addr;
2695
2696			page_offset = do_div(aux, nor->page_size);
2697		}
2698		/* the size of data remaining on the first page */
2699		page_remain = min_t(size_t,
2700				    nor->page_size - page_offset, len - i);
2701
2702		addr = spi_nor_convert_addr(nor, addr);
2703
2704		write_enable(nor);
2705		ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
2706		if (ret < 0)
2707			goto write_err;
2708		written = ret;
2709
2710		ret = spi_nor_wait_till_ready(nor);
2711		if (ret)
2712			goto write_err;
2713		*retlen += written;
2714		i += written;
2715	}
2716
2717write_err:
2718	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
2719	return ret;
2720}
2721
2722static int spi_nor_check(struct spi_nor *nor)
2723{
2724	if (!nor->dev ||
2725	    (!nor->spimem &&
2726	    (!nor->read || !nor->write || !nor->read_reg ||
2727	      !nor->write_reg))) {
2728		pr_err("spi-nor: please fill all the necessary fields!\n");
2729		return -EINVAL;
2730	}
2731
2732	return 0;
2733}
2734
2735static int s3an_nor_setup(struct spi_nor *nor,
2736			  const struct spi_nor_hwcaps *hwcaps)
2737{
2738	int ret;
2739
2740	ret = spi_nor_xread_sr(nor, nor->bouncebuf);
2741	if (ret < 0) {
2742		dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
2743		return ret;
2744	}
2745
2746	nor->erase_opcode = SPINOR_OP_XSE;
2747	nor->program_opcode = SPINOR_OP_XPP;
2748	nor->read_opcode = SPINOR_OP_READ;
2749	nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
2750
2751	/*
2752	 * This flashes have a page size of 264 or 528 bytes (known as
2753	 * Default addressing mode). It can be changed to a more standard
2754	 * Power of two mode where the page size is 256/512. This comes
2755	 * with a price: there is 3% less of space, the data is corrupted
2756	 * and the page size cannot be changed back to default addressing
2757	 * mode.
2758	 *
2759	 * The current addressing mode can be read from the XRDSR register
2760	 * and should not be changed, because is a destructive operation.
2761	 */
2762	if (nor->bouncebuf[0] & XSR_PAGESIZE) {
2763		/* Flash in Power of 2 mode */
2764		nor->page_size = (nor->page_size == 264) ? 256 : 512;
2765		nor->mtd.writebufsize = nor->page_size;
2766		nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
2767		nor->mtd.erasesize = 8 * nor->page_size;
2768	} else {
2769		/* Flash in Default addressing mode */
2770		nor->params.convert_addr = s3an_convert_addr;
2771		nor->mtd.erasesize = nor->info->sector_size;
2772	}
2773
2774	return 0;
2775}
2776
2777static void
2778spi_nor_set_read_settings(struct spi_nor_read_command *read,
2779			  u8 num_mode_clocks,
2780			  u8 num_wait_states,
2781			  u8 opcode,
2782			  enum spi_nor_protocol proto)
2783{
2784	read->num_mode_clocks = num_mode_clocks;
2785	read->num_wait_states = num_wait_states;
2786	read->opcode = opcode;
2787	read->proto = proto;
2788}
2789
2790static void
2791spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
2792			u8 opcode,
2793			enum spi_nor_protocol proto)
2794{
2795	pp->opcode = opcode;
2796	pp->proto = proto;
2797}
2798
2799static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2800{
2801	size_t i;
2802
2803	for (i = 0; i < size; i++)
2804		if (table[i][0] == (int)hwcaps)
2805			return table[i][1];
2806
2807	return -EINVAL;
2808}
2809
2810static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2811{
2812	static const int hwcaps_read2cmd[][2] = {
2813		{ SNOR_HWCAPS_READ,		SNOR_CMD_READ },
2814		{ SNOR_HWCAPS_READ_FAST,	SNOR_CMD_READ_FAST },
2815		{ SNOR_HWCAPS_READ_1_1_1_DTR,	SNOR_CMD_READ_1_1_1_DTR },
2816		{ SNOR_HWCAPS_READ_1_1_2,	SNOR_CMD_READ_1_1_2 },
2817		{ SNOR_HWCAPS_READ_1_2_2,	SNOR_CMD_READ_1_2_2 },
2818		{ SNOR_HWCAPS_READ_2_2_2,	SNOR_CMD_READ_2_2_2 },
2819		{ SNOR_HWCAPS_READ_1_2_2_DTR,	SNOR_CMD_READ_1_2_2_DTR },
2820		{ SNOR_HWCAPS_READ_1_1_4,	SNOR_CMD_READ_1_1_4 },
2821		{ SNOR_HWCAPS_READ_1_4_4,	SNOR_CMD_READ_1_4_4 },
2822		{ SNOR_HWCAPS_READ_4_4_4,	SNOR_CMD_READ_4_4_4 },
2823		{ SNOR_HWCAPS_READ_1_4_4_DTR,	SNOR_CMD_READ_1_4_4_DTR },
2824		{ SNOR_HWCAPS_READ_1_1_8,	SNOR_CMD_READ_1_1_8 },
2825		{ SNOR_HWCAPS_READ_1_8_8,	SNOR_CMD_READ_1_8_8 },
2826		{ SNOR_HWCAPS_READ_8_8_8,	SNOR_CMD_READ_8_8_8 },
2827		{ SNOR_HWCAPS_READ_1_8_8_DTR,	SNOR_CMD_READ_1_8_8_DTR },
2828	};
2829
2830	return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2831				  ARRAY_SIZE(hwcaps_read2cmd));
2832}
2833
2834static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2835{
2836	static const int hwcaps_pp2cmd[][2] = {
2837		{ SNOR_HWCAPS_PP,		SNOR_CMD_PP },
2838		{ SNOR_HWCAPS_PP_1_1_4,		SNOR_CMD_PP_1_1_4 },
2839		{ SNOR_HWCAPS_PP_1_4_4,		SNOR_CMD_PP_1_4_4 },
2840		{ SNOR_HWCAPS_PP_4_4_4,		SNOR_CMD_PP_4_4_4 },
2841		{ SNOR_HWCAPS_PP_1_1_8,		SNOR_CMD_PP_1_1_8 },
2842		{ SNOR_HWCAPS_PP_1_8_8,		SNOR_CMD_PP_1_8_8 },
2843		{ SNOR_HWCAPS_PP_8_8_8,		SNOR_CMD_PP_8_8_8 },
2844	};
2845
2846	return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2847				  ARRAY_SIZE(hwcaps_pp2cmd));
2848}
2849
2850/*
2851 * Serial Flash Discoverable Parameters (SFDP) parsing.
2852 */
2853
2854/**
2855 * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
2856 *			addr_width and read_dummy members of the struct spi_nor
2857 *			should be previously
2858 * set.
2859 * @nor:	pointer to a 'struct spi_nor'
2860 * @addr:	offset in the serial flash memory
2861 * @len:	number of bytes to read
2862 * @buf:	buffer where the data is copied into (dma-safe memory)
2863 *
2864 * Return: 0 on success, -errno otherwise.
2865 */
2866static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
2867{
2868	int ret;
2869
2870	while (len) {
2871		ret = spi_nor_read_data(nor, addr, len, buf);
2872		if (ret < 0)
2873			return ret;
2874		if (!ret || ret > len)
2875			return -EIO;
2876
2877		buf += ret;
2878		addr += ret;
2879		len -= ret;
2880	}
2881	return 0;
2882}
2883
2884/**
2885 * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
2886 * @nor:	pointer to a 'struct spi_nor'
2887 * @addr:	offset in the SFDP area to start reading data from
2888 * @len:	number of bytes to read
2889 * @buf:	buffer where the SFDP data are copied into (dma-safe memory)
2890 *
2891 * Whatever the actual numbers of bytes for address and dummy cycles are
2892 * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
2893 * followed by a 3-byte address and 8 dummy clock cycles.
2894 *
2895 * Return: 0 on success, -errno otherwise.
2896 */
2897static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
2898			     size_t len, void *buf)
2899{
2900	u8 addr_width, read_opcode, read_dummy;
2901	int ret;
2902
2903	read_opcode = nor->read_opcode;
2904	addr_width = nor->addr_width;
2905	read_dummy = nor->read_dummy;
2906
2907	nor->read_opcode = SPINOR_OP_RDSFDP;
2908	nor->addr_width = 3;
2909	nor->read_dummy = 8;
2910
2911	ret = spi_nor_read_raw(nor, addr, len, buf);
2912
2913	nor->read_opcode = read_opcode;
2914	nor->addr_width = addr_width;
2915	nor->read_dummy = read_dummy;
2916
2917	return ret;
2918}
2919
2920/**
2921 * spi_nor_spimem_check_op - check if the operation is supported
2922 *                           by controller
2923 *@nor:        pointer to a 'struct spi_nor'
2924 *@op:         pointer to op template to be checked
2925 *
2926 * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2927 */
2928static int spi_nor_spimem_check_op(struct spi_nor *nor,
2929				   struct spi_mem_op *op)
2930{
2931	/*
2932	 * First test with 4 address bytes. The opcode itself might
2933	 * be a 3B addressing opcode but we don't care, because
2934	 * SPI controller implementation should not check the opcode,
2935	 * but just the sequence.
2936	 */
2937	op->addr.nbytes = 4;
2938	if (!spi_mem_supports_op(nor->spimem, op)) {
2939		if (nor->mtd.size > SZ_16M)
2940			return -ENOTSUPP;
2941
2942		/* If flash size <= 16MB, 3 address bytes are sufficient */
2943		op->addr.nbytes = 3;
2944		if (!spi_mem_supports_op(nor->spimem, op))
2945			return -ENOTSUPP;
2946	}
2947
2948	return 0;
2949}
2950
2951/**
2952 * spi_nor_spimem_check_readop - check if the read op is supported
2953 *                               by controller
2954 *@nor:         pointer to a 'struct spi_nor'
2955 *@read:        pointer to op template to be checked
2956 *
2957 * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2958 */
2959static int spi_nor_spimem_check_readop(struct spi_nor *nor,
2960				       const struct spi_nor_read_command *read)
2961{
2962	struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
2963					  SPI_MEM_OP_ADDR(3, 0, 1),
2964					  SPI_MEM_OP_DUMMY(0, 1),
2965					  SPI_MEM_OP_DATA_IN(0, NULL, 1));
2966
2967	op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
2968	op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
2969	op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
2970	op.dummy.buswidth = op.addr.buswidth;
2971	op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
2972			  op.dummy.buswidth / 8;
2973
2974	return spi_nor_spimem_check_op(nor, &op);
2975}
2976
2977/**
2978 * spi_nor_spimem_check_pp - check if the page program op is supported
2979 *                           by controller
2980 *@nor:         pointer to a 'struct spi_nor'
2981 *@pp:          pointer to op template to be checked
2982 *
2983 * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2984 */
2985static int spi_nor_spimem_check_pp(struct spi_nor *nor,
2986				   const struct spi_nor_pp_command *pp)
2987{
2988	struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
2989					  SPI_MEM_OP_ADDR(3, 0, 1),
2990					  SPI_MEM_OP_NO_DUMMY,
2991					  SPI_MEM_OP_DATA_OUT(0, NULL, 1));
2992
2993	op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
2994	op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
2995	op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
2996
2997	return spi_nor_spimem_check_op(nor, &op);
2998}
2999
3000/**
3001 * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
3002 *                                based on SPI controller capabilities
3003 * @nor:        pointer to a 'struct spi_nor'
3004 * @hwcaps:     pointer to resulting capabilities after adjusting
3005 *              according to controller and flash's capability
3006 */
3007static void
3008spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
3009{
3010	struct spi_nor_flash_parameter *params =  &nor->params;
3011	unsigned int cap;
3012
3013	/* DTR modes are not supported yet, mask them all. */
3014	*hwcaps &= ~SNOR_HWCAPS_DTR;
3015
3016	/* X-X-X modes are not supported yet, mask them all. */
3017	*hwcaps &= ~SNOR_HWCAPS_X_X_X;
3018
3019	for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
3020		int rdidx, ppidx;
3021
3022		if (!(*hwcaps & BIT(cap)))
3023			continue;
3024
3025		rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
3026		if (rdidx >= 0 &&
3027		    spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
3028			*hwcaps &= ~BIT(cap);
3029
3030		ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
3031		if (ppidx < 0)
3032			continue;
3033
3034		if (spi_nor_spimem_check_pp(nor,
3035					    &params->page_programs[ppidx]))
3036			*hwcaps &= ~BIT(cap);
3037	}
3038}
3039
3040/**
3041 * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
3042 * @nor:	pointer to a 'struct spi_nor'
3043 * @addr:	offset in the SFDP area to start reading data from
3044 * @len:	number of bytes to read
3045 * @buf:	buffer where the SFDP data are copied into
3046 *
3047 * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
3048 * guaranteed to be dma-safe.
3049 *
3050 * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
3051 *          otherwise.
3052 */
3053static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
3054					size_t len, void *buf)
3055{
3056	void *dma_safe_buf;
3057	int ret;
3058
3059	dma_safe_buf = kmalloc(len, GFP_KERNEL);
3060	if (!dma_safe_buf)
3061		return -ENOMEM;
3062
3063	ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
3064	memcpy(buf, dma_safe_buf, len);
3065	kfree(dma_safe_buf);
3066
3067	return ret;
3068}
3069
3070/* Fast Read settings. */
3071
3072static void
3073spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
3074				    u16 half,
3075				    enum spi_nor_protocol proto)
3076{
3077	read->num_mode_clocks = (half >> 5) & 0x07;
3078	read->num_wait_states = (half >> 0) & 0x1f;
3079	read->opcode = (half >> 8) & 0xff;
3080	read->proto = proto;
3081}
3082
3083struct sfdp_bfpt_read {
3084	/* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
3085	u32			hwcaps;
3086
3087	/*
3088	 * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
3089	 * whether the Fast Read x-y-z command is supported.
3090	 */
3091	u32			supported_dword;
3092	u32			supported_bit;
3093
3094	/*
3095	 * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
3096	 * encodes the op code, the number of mode clocks and the number of wait
3097	 * states to be used by Fast Read x-y-z command.
3098	 */
3099	u32			settings_dword;
3100	u32			settings_shift;
3101
3102	/* The SPI protocol for this Fast Read x-y-z command. */
3103	enum spi_nor_protocol	proto;
3104};
3105
3106static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
3107	/* Fast Read 1-1-2 */
3108	{
3109		SNOR_HWCAPS_READ_1_1_2,
3110		BFPT_DWORD(1), BIT(16),	/* Supported bit */
3111		BFPT_DWORD(4), 0,	/* Settings */
3112		SNOR_PROTO_1_1_2,
3113	},
3114
3115	/* Fast Read 1-2-2 */
3116	{
3117		SNOR_HWCAPS_READ_1_2_2,
3118		BFPT_DWORD(1), BIT(20),	/* Supported bit */
3119		BFPT_DWORD(4), 16,	/* Settings */
3120		SNOR_PROTO_1_2_2,
3121	},
3122
3123	/* Fast Read 2-2-2 */
3124	{
3125		SNOR_HWCAPS_READ_2_2_2,
3126		BFPT_DWORD(5),  BIT(0),	/* Supported bit */
3127		BFPT_DWORD(6), 16,	/* Settings */
3128		SNOR_PROTO_2_2_2,
3129	},
3130
3131	/* Fast Read 1-1-4 */
3132	{
3133		SNOR_HWCAPS_READ_1_1_4,
3134		BFPT_DWORD(1), BIT(22),	/* Supported bit */
3135		BFPT_DWORD(3), 16,	/* Settings */
3136		SNOR_PROTO_1_1_4,
3137	},
3138
3139	/* Fast Read 1-4-4 */
3140	{
3141		SNOR_HWCAPS_READ_1_4_4,
3142		BFPT_DWORD(1), BIT(21),	/* Supported bit */
3143		BFPT_DWORD(3), 0,	/* Settings */
3144		SNOR_PROTO_1_4_4,
3145	},
3146
3147	/* Fast Read 4-4-4 */
3148	{
3149		SNOR_HWCAPS_READ_4_4_4,
3150		BFPT_DWORD(5), BIT(4),	/* Supported bit */
3151		BFPT_DWORD(7), 16,	/* Settings */
3152		SNOR_PROTO_4_4_4,
3153	},
3154};
3155
3156struct sfdp_bfpt_erase {
3157	/*
3158	 * The half-word at offset <shift> in DWORD <dwoard> encodes the
3159	 * op code and erase sector size to be used by Sector Erase commands.
3160	 */
3161	u32			dword;
3162	u32			shift;
3163};
3164
3165static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
3166	/* Erase Type 1 in DWORD8 bits[15:0] */
3167	{BFPT_DWORD(8), 0},
3168
3169	/* Erase Type 2 in DWORD8 bits[31:16] */
3170	{BFPT_DWORD(8), 16},
3171
3172	/* Erase Type 3 in DWORD9 bits[15:0] */
3173	{BFPT_DWORD(9), 0},
3174
3175	/* Erase Type 4 in DWORD9 bits[31:16] */
3176	{BFPT_DWORD(9), 16},
3177};
3178
3179/**
3180 * spi_nor_set_erase_type() - set a SPI NOR erase type
3181 * @erase:	pointer to a structure that describes a SPI NOR erase type
3182 * @size:	the size of the sector/block erased by the erase type
3183 * @opcode:	the SPI command op code to erase the sector/block
3184 */
3185static void spi_nor_set_erase_type(struct spi_nor_erase_type *erase,
3186				   u32 size, u8 opcode)
3187{
3188	erase->size = size;
3189	erase->opcode = opcode;
3190	/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
3191	erase->size_shift = ffs(erase->size) - 1;
3192	erase->size_mask = (1 << erase->size_shift) - 1;
3193}
3194
3195/**
3196 * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
3197 * @erase:	pointer to a structure that describes a SPI NOR erase type
3198 * @size:	the size of the sector/block erased by the erase type
3199 * @opcode:	the SPI command op code to erase the sector/block
3200 * @i:		erase type index as sorted in the Basic Flash Parameter Table
3201 *
3202 * The supported Erase Types will be sorted at init in ascending order, with
3203 * the smallest Erase Type size being the first member in the erase_type array
3204 * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
3205 * the Basic Flash Parameter Table since it will be used later on to
3206 * synchronize with the supported Erase Types defined in SFDP optional tables.
3207 */
3208static void
3209spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
3210				     u32 size, u8 opcode, u8 i)
3211{
3212	erase->idx = i;
3213	spi_nor_set_erase_type(erase, size, opcode);
3214}
3215
3216/**
3217 * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
3218 * @l:	member in the left half of the map's erase_type array
3219 * @r:	member in the right half of the map's erase_type array
3220 *
3221 * Comparison function used in the sort() call to sort in ascending order the
3222 * map's erase types, the smallest erase type size being the first member in the
3223 * sorted erase_type array.
3224 *
3225 * Return: the result of @l->size - @r->size
3226 */
3227static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
3228{
3229	const struct spi_nor_erase_type *left = l, *right = r;
3230
3231	return left->size - right->size;
3232}
3233
3234/**
3235 * spi_nor_sort_erase_mask() - sort erase mask
3236 * @map:	the erase map of the SPI NOR
3237 * @erase_mask:	the erase type mask to be sorted
3238 *
3239 * Replicate the sort done for the map's erase types in BFPT: sort the erase
3240 * mask in ascending order with the smallest erase type size starting from
3241 * BIT(0) in the sorted erase mask.
3242 *
3243 * Return: sorted erase mask.
3244 */
3245static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
3246{
3247	struct spi_nor_erase_type *erase_type = map->erase_type;
3248	int i;
3249	u8 sorted_erase_mask = 0;
3250
3251	if (!erase_mask)
3252		return 0;
3253
3254	/* Replicate the sort done for the map's erase types. */
3255	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
3256		if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
3257			sorted_erase_mask |= BIT(i);
3258
3259	return sorted_erase_mask;
3260}
3261
3262/**
3263 * spi_nor_regions_sort_erase_types() - sort erase types in each region
3264 * @map:	the erase map of the SPI NOR
3265 *
3266 * Function assumes that the erase types defined in the erase map are already
3267 * sorted in ascending order, with the smallest erase type size being the first
3268 * member in the erase_type array. It replicates the sort done for the map's
3269 * erase types. Each region's erase bitmask will indicate which erase types are
3270 * supported from the sorted erase types defined in the erase map.
3271 * Sort the all region's erase type at init in order to speed up the process of
3272 * finding the best erase command at runtime.
3273 */
3274static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
3275{
3276	struct spi_nor_erase_region *region = map->regions;
3277	u8 region_erase_mask, sorted_erase_mask;
3278
3279	while (region) {
3280		region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
3281
3282		sorted_erase_mask = spi_nor_sort_erase_mask(map,
3283							    region_erase_mask);
3284
3285		/* Overwrite erase mask. */
3286		region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
3287				 sorted_erase_mask;
3288
3289		region = spi_nor_region_next(region);
3290	}
3291}
3292
3293/**
3294 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
3295 * @map:		the erase map of the SPI NOR
3296 * @erase_mask:		bitmask encoding erase types that can erase the entire
3297 *			flash memory
3298 * @flash_size:		the spi nor flash memory size
3299 */
3300static void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
3301					   u8 erase_mask, u64 flash_size)
3302{
3303	/* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
3304	map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
3305				     SNOR_LAST_REGION;
3306	map->uniform_region.size = flash_size;
3307	map->regions = &map->uniform_region;
3308	map->uniform_erase_type = erase_mask;
3309}
3310
3311static int
3312spi_nor_post_bfpt_fixups(struct spi_nor *nor,
3313			 const struct sfdp_parameter_header *bfpt_header,
3314			 const struct sfdp_bfpt *bfpt,
3315			 struct spi_nor_flash_parameter *params)
3316{
3317	if (nor->info->fixups && nor->info->fixups->post_bfpt)
3318		return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
3319						    params);
3320
3321	return 0;
3322}
3323
3324/**
3325 * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
3326 * @nor:		pointer to a 'struct spi_nor'
3327 * @bfpt_header:	pointer to the 'struct sfdp_parameter_header' describing
3328 *			the Basic Flash Parameter Table length and version
3329 * @params:		pointer to the 'struct spi_nor_flash_parameter' to be
3330 *			filled
3331 *
3332 * The Basic Flash Parameter Table is the main and only mandatory table as
3333 * defined by the SFDP (JESD216) specification.
3334 * It provides us with the total size (memory density) of the data array and
3335 * the number of address bytes for Fast Read, Page Program and Sector Erase
3336 * commands.
3337 * For Fast READ commands, it also gives the number of mode clock cycles and
3338 * wait states (regrouped in the number of dummy clock cycles) for each
3339 * supported instruction op code.
3340 * For Page Program, the page size is now available since JESD216 rev A, however
3341 * the supported instruction op codes are still not provided.
3342 * For Sector Erase commands, this table stores the supported instruction op
3343 * codes and the associated sector sizes.
3344 * Finally, the Quad Enable Requirements (QER) are also available since JESD216
3345 * rev A. The QER bits encode the manufacturer dependent procedure to be
3346 * executed to set the Quad Enable (QE) bit in some internal register of the
3347 * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
3348 * sending any Quad SPI command to the memory. Actually, setting the QE bit
3349 * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
3350 * and IO3 hence enabling 4 (Quad) I/O lines.
3351 *
3352 * Return: 0 on success, -errno otherwise.
3353 */
3354static int spi_nor_parse_bfpt(struct spi_nor *nor,
3355			      const struct sfdp_parameter_header *bfpt_header,
3356			      struct spi_nor_flash_parameter *params)
3357{
3358	struct spi_nor_erase_map *map = &params->erase_map;
3359	struct spi_nor_erase_type *erase_type = map->erase_type;
3360	struct sfdp_bfpt bfpt;
3361	size_t len;
3362	int i, cmd, err;
3363	u32 addr;
3364	u16 half;
3365	u8 erase_mask;
3366
3367	/* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
3368	if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
3369		return -EINVAL;
3370
3371	/* Read the Basic Flash Parameter Table. */
3372	len = min_t(size_t, sizeof(bfpt),
3373		    bfpt_header->length * sizeof(u32));
3374	addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
3375	memset(&bfpt, 0, sizeof(bfpt));
3376	err = spi_nor_read_sfdp_dma_unsafe(nor,  addr, len, &bfpt);
3377	if (err < 0)
3378		return err;
3379
3380	/* Fix endianness of the BFPT DWORDs. */
3381	for (i = 0; i < BFPT_DWORD_MAX; i++)
3382		bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
3383
3384	/* Number of address bytes. */
3385	switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
3386	case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
3387		nor->addr_width = 3;
3388		break;
3389
3390	case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
3391		nor->addr_width = 4;
3392		break;
3393
3394	default:
3395		break;
3396	}
3397
3398	/* Flash Memory Density (in bits). */
3399	params->size = bfpt.dwords[BFPT_DWORD(2)];
3400	if (params->size & BIT(31)) {
3401		params->size &= ~BIT(31);
3402
3403		/*
3404		 * Prevent overflows on params->size. Anyway, a NOR of 2^64
3405		 * bits is unlikely to exist so this error probably means
3406		 * the BFPT we are reading is corrupted/wrong.
3407		 */
3408		if (params->size > 63)
3409			return -EINVAL;
3410
3411		params->size = 1ULL << params->size;
3412	} else {
3413		params->size++;
3414	}
3415	params->size >>= 3; /* Convert to bytes. */
3416
3417	/* Fast Read settings. */
3418	for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
3419		const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
3420		struct spi_nor_read_command *read;
3421
3422		if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
3423			params->hwcaps.mask &= ~rd->hwcaps;
3424			continue;
3425		}
3426
3427		params->hwcaps.mask |= rd->hwcaps;
3428		cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
3429		read = &params->reads[cmd];
3430		half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
3431		spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
3432	}
3433
3434	/*
3435	 * Sector Erase settings. Reinitialize the uniform erase map using the
3436	 * Erase Types defined in the bfpt table.
3437	 */
3438	erase_mask = 0;
3439	memset(&params->erase_map, 0, sizeof(params->erase_map));
3440	for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
3441		const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
3442		u32 erasesize;
3443		u8 opcode;
3444
3445		half = bfpt.dwords[er->dword] >> er->shift;
3446		erasesize = half & 0xff;
3447
3448		/* erasesize == 0 means this Erase Type is not supported. */
3449		if (!erasesize)
3450			continue;
3451
3452		erasesize = 1U << erasesize;
3453		opcode = (half >> 8) & 0xff;
3454		erase_mask |= BIT(i);
3455		spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
3456						     opcode, i);
3457	}
3458	spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
3459	/*
3460	 * Sort all the map's Erase Types in ascending order with the smallest
3461	 * erase size being the first member in the erase_type array.
3462	 */
3463	sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
3464	     spi_nor_map_cmp_erase_type, NULL);
3465	/*
3466	 * Sort the erase types in the uniform region in order to update the
3467	 * uniform_erase_type bitmask. The bitmask will be used later on when
3468	 * selecting the uniform erase.
3469	 */
3470	spi_nor_regions_sort_erase_types(map);
3471	map->uniform_erase_type = map->uniform_region.offset &
3472				  SNOR_ERASE_TYPE_MASK;
3473
3474	/* Stop here if not JESD216 rev A or later. */
3475	if (bfpt_header->length < BFPT_DWORD_MAX)
3476		return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
3477						params);
3478
3479	/* Page size: this field specifies 'N' so the page size = 2^N bytes. */
3480	params->page_size = bfpt.dwords[BFPT_DWORD(11)];
3481	params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
3482	params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
3483	params->page_size = 1U << params->page_size;
3484
3485	/* Quad Enable Requirements. */
3486	switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
3487	case BFPT_DWORD15_QER_NONE:
3488		params->quad_enable = NULL;
3489		break;
3490
3491	case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
3492	case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
3493		params->quad_enable = spansion_no_read_cr_quad_enable;
3494		break;
3495
3496	case BFPT_DWORD15_QER_SR1_BIT6:
3497		params->quad_enable = macronix_quad_enable;
3498		break;
3499
3500	case BFPT_DWORD15_QER_SR2_BIT7:
3501		params->quad_enable = sr2_bit7_quad_enable;
3502		break;
3503
3504	case BFPT_DWORD15_QER_SR2_BIT1:
3505		params->quad_enable = spansion_read_cr_quad_enable;
3506		break;
3507
3508	default:
3509		return -EINVAL;
3510	}
3511
3512	return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
3513}
3514
3515#define SMPT_CMD_ADDRESS_LEN_MASK		GENMASK(23, 22)
3516#define SMPT_CMD_ADDRESS_LEN_0			(0x0UL << 22)
3517#define SMPT_CMD_ADDRESS_LEN_3			(0x1UL << 22)
3518#define SMPT_CMD_ADDRESS_LEN_4			(0x2UL << 22)
3519#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT	(0x3UL << 22)
3520
3521#define SMPT_CMD_READ_DUMMY_MASK		GENMASK(19, 16)
3522#define SMPT_CMD_READ_DUMMY_SHIFT		16
3523#define SMPT_CMD_READ_DUMMY(_cmd) \
3524	(((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
3525#define SMPT_CMD_READ_DUMMY_IS_VARIABLE		0xfUL
3526
3527#define SMPT_CMD_READ_DATA_MASK			GENMASK(31, 24)
3528#define SMPT_CMD_READ_DATA_SHIFT		24
3529#define SMPT_CMD_READ_DATA(_cmd) \
3530	(((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
3531
3532#define SMPT_CMD_OPCODE_MASK			GENMASK(15, 8)
3533#define SMPT_CMD_OPCODE_SHIFT			8
3534#define SMPT_CMD_OPCODE(_cmd) \
3535	(((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
3536
3537#define SMPT_MAP_REGION_COUNT_MASK		GENMASK(23, 16)
3538#define SMPT_MAP_REGION_COUNT_SHIFT		16
3539#define SMPT_MAP_REGION_COUNT(_header) \
3540	((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
3541	  SMPT_MAP_REGION_COUNT_SHIFT) + 1)
3542
3543#define SMPT_MAP_ID_MASK			GENMASK(15, 8)
3544#define SMPT_MAP_ID_SHIFT			8
3545#define SMPT_MAP_ID(_header) \
3546	(((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
3547
3548#define SMPT_MAP_REGION_SIZE_MASK		GENMASK(31, 8)
3549#define SMPT_MAP_REGION_SIZE_SHIFT		8
3550#define SMPT_MAP_REGION_SIZE(_region) \
3551	(((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
3552	   SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
3553
3554#define SMPT_MAP_REGION_ERASE_TYPE_MASK		GENMASK(3, 0)
3555#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
3556	((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
3557
3558#define SMPT_DESC_TYPE_MAP			BIT(1)
3559#define SMPT_DESC_END				BIT(0)
3560
3561/**
3562 * spi_nor_smpt_addr_width() - return the address width used in the
3563 *			       configuration detection command.
3564 * @nor:	pointer to a 'struct spi_nor'
3565 * @settings:	configuration detection command descriptor, dword1
3566 */
3567static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
3568{
3569	switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
3570	case SMPT_CMD_ADDRESS_LEN_0:
3571		return 0;
3572	case SMPT_CMD_ADDRESS_LEN_3:
3573		return 3;
3574	case SMPT_CMD_ADDRESS_LEN_4:
3575		return 4;
3576	case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
3577		/* fall through */
3578	default:
3579		return nor->addr_width;
3580	}
3581}
3582
3583/**
3584 * spi_nor_smpt_read_dummy() - return the configuration detection command read
3585 *			       latency, in clock cycles.
3586 * @nor:	pointer to a 'struct spi_nor'
3587 * @settings:	configuration detection command descriptor, dword1
3588 *
3589 * Return: the number of dummy cycles for an SMPT read
3590 */
3591static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
3592{
3593	u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
3594
3595	if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
3596		return nor->read_dummy;
3597	return read_dummy;
3598}
3599
3600/**
3601 * spi_nor_get_map_in_use() - get the configuration map in use
3602 * @nor:	pointer to a 'struct spi_nor'
3603 * @smpt:	pointer to the sector map parameter table
3604 * @smpt_len:	sector map parameter table length
3605 *
3606 * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
3607 */
3608static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
3609					 u8 smpt_len)
3610{
3611	const u32 *ret;
3612	u8 *buf;
3613	u32 addr;
3614	int err;
3615	u8 i;
3616	u8 addr_width, read_opcode, read_dummy;
3617	u8 read_data_mask, map_id;
3618
3619	/* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
3620	buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3621	if (!buf)
3622		return ERR_PTR(-ENOMEM);
3623
3624	addr_width = nor->addr_width;
3625	read_dummy = nor->read_dummy;
3626	read_opcode = nor->read_opcode;
3627
3628	map_id = 0;
3629	/* Determine if there are any optional Detection Command Descriptors */
3630	for (i = 0; i < smpt_len; i += 2) {
3631		if (smpt[i] & SMPT_DESC_TYPE_MAP)
3632			break;
3633
3634		read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
3635		nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
3636		nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
3637		nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
3638		addr = smpt[i + 1];
3639
3640		err = spi_nor_read_raw(nor, addr, 1, buf);
3641		if (err) {
3642			ret = ERR_PTR(err);
3643			goto out;
3644		}
3645
3646		/*
3647		 * Build an index value that is used to select the Sector Map
3648		 * Configuration that is currently in use.
3649		 */
3650		map_id = map_id << 1 | !!(*buf & read_data_mask);
3651	}
3652
3653	/*
3654	 * If command descriptors are provided, they always precede map
3655	 * descriptors in the table. There is no need to start the iteration
3656	 * over smpt array all over again.
3657	 *
3658	 * Find the matching configuration map.
3659	 */
3660	ret = ERR_PTR(-EINVAL);
3661	while (i < smpt_len) {
3662		if (SMPT_MAP_ID(smpt[i]) == map_id) {
3663			ret = smpt + i;
3664			break;
3665		}
3666
3667		/*
3668		 * If there are no more configuration map descriptors and no
3669		 * configuration ID matched the configuration identifier, the
3670		 * sector address map is unknown.
3671		 */
3672		if (smpt[i] & SMPT_DESC_END)
3673			break;
3674
3675		/* increment the table index to the next map */
3676		i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
3677	}
3678
3679	/* fall through */
3680out:
3681	kfree(buf);
3682	nor->addr_width = addr_width;
3683	nor->read_dummy = read_dummy;
3684	nor->read_opcode = read_opcode;
3685	return ret;
3686}
3687
3688/**
3689 * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
3690 * @region:	pointer to a structure that describes a SPI NOR erase region
3691 * @erase:	pointer to a structure that describes a SPI NOR erase type
3692 * @erase_type:	erase type bitmask
3693 */
3694static void
3695spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
3696			     const struct spi_nor_erase_type *erase,
3697			     const u8 erase_type)
3698{
3699	int i;
3700
3701	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3702		if (!(erase_type & BIT(i)))
3703			continue;
3704		if (region->size & erase[i].size_mask) {
3705			spi_nor_region_mark_overlay(region);
3706			return;
3707		}
3708	}
3709}
3710
3711/**
3712 * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
3713 * @nor:	pointer to a 'struct spi_nor'
3714 * @params:     pointer to a duplicate 'struct spi_nor_flash_parameter' that is
3715 *              used for storing SFDP parsed data
3716 * @smpt:	pointer to the sector map parameter table
3717 *
3718 * Return: 0 on success, -errno otherwise.
3719 */
3720static int
3721spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
3722				   struct spi_nor_flash_parameter *params,
3723				   const u32 *smpt)
3724{
3725	struct spi_nor_erase_map *map = &params->erase_map;
3726	struct spi_nor_erase_type *erase = map->erase_type;
3727	struct spi_nor_erase_region *region;
3728	u64 offset;
3729	u32 region_count;
3730	int i, j;
3731	u8 uniform_erase_type, save_uniform_erase_type;
3732	u8 erase_type, regions_erase_type;
3733
3734	region_count = SMPT_MAP_REGION_COUNT(*smpt);
3735	/*
3736	 * The regions will be freed when the driver detaches from the
3737	 * device.
3738	 */
3739	region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
3740			      GFP_KERNEL);
3741	if (!region)
3742		return -ENOMEM;
3743	map->regions = region;
3744
3745	uniform_erase_type = 0xff;
3746	regions_erase_type = 0;
3747	offset = 0;
3748	/* Populate regions. */
3749	for (i = 0; i < region_count; i++) {
3750		j = i + 1; /* index for the region dword */
3751		region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
3752		erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
3753		region[i].offset = offset | erase_type;
3754
3755		spi_nor_region_check_overlay(&region[i], erase, erase_type);
3756
3757		/*
3758		 * Save the erase types that are supported in all regions and
3759		 * can erase the entire flash memory.
3760		 */
3761		uniform_erase_type &= erase_type;
3762
3763		/*
3764		 * regions_erase_type mask will indicate all the erase types
3765		 * supported in this configuration map.
3766		 */
3767		regions_erase_type |= erase_type;
3768
3769		offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
3770			 region[i].size;
3771	}
3772
3773	save_uniform_erase_type = map->uniform_erase_type;
3774	map->uniform_erase_type = spi_nor_sort_erase_mask(map,
3775							  uniform_erase_type);
3776
3777	if (!regions_erase_type) {
3778		/*
3779		 * Roll back to the previous uniform_erase_type mask, SMPT is
3780		 * broken.
3781		 */
3782		map->uniform_erase_type = save_uniform_erase_type;
3783		return -EINVAL;
3784	}
3785
3786	/*
3787	 * BFPT advertises all the erase types supported by all the possible
3788	 * map configurations. Mask out the erase types that are not supported
3789	 * by the current map configuration.
3790	 */
3791	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
3792		if (!(regions_erase_type & BIT(erase[i].idx)))
3793			spi_nor_set_erase_type(&erase[i], 0, 0xFF);
3794
3795	spi_nor_region_mark_end(&region[i - 1]);
3796
3797	return 0;
3798}
3799
3800/**
3801 * spi_nor_parse_smpt() - parse Sector Map Parameter Table
3802 * @nor:		pointer to a 'struct spi_nor'
3803 * @smpt_header:	sector map parameter table header
3804 * @params:		pointer to a duplicate 'struct spi_nor_flash_parameter'
3805 *                      that is used for storing SFDP parsed data
3806 *
3807 * This table is optional, but when available, we parse it to identify the
3808 * location and size of sectors within the main data array of the flash memory
3809 * device and to identify which Erase Types are supported by each sector.
3810 *
3811 * Return: 0 on success, -errno otherwise.
3812 */
3813static int spi_nor_parse_smpt(struct spi_nor *nor,
3814			      const struct sfdp_parameter_header *smpt_header,
3815			      struct spi_nor_flash_parameter *params)
3816{
3817	const u32 *sector_map;
3818	u32 *smpt;
3819	size_t len;
3820	u32 addr;
3821	int i, ret;
3822
3823	/* Read the Sector Map Parameter Table. */
3824	len = smpt_header->length * sizeof(*smpt);
3825	smpt = kmalloc(len, GFP_KERNEL);
3826	if (!smpt)
3827		return -ENOMEM;
3828
3829	addr = SFDP_PARAM_HEADER_PTP(smpt_header);
3830	ret = spi_nor_read_sfdp(nor, addr, len, smpt);
3831	if (ret)
3832		goto out;
3833
3834	/* Fix endianness of the SMPT DWORDs. */
3835	for (i = 0; i < smpt_header->length; i++)
3836		smpt[i] = le32_to_cpu(smpt[i]);
3837
3838	sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
3839	if (IS_ERR(sector_map)) {
3840		ret = PTR_ERR(sector_map);
3841		goto out;
3842	}
3843
3844	ret = spi_nor_init_non_uniform_erase_map(nor, params, sector_map);
3845	if (ret)
3846		goto out;
3847
3848	spi_nor_regions_sort_erase_types(&params->erase_map);
3849	/* fall through */
3850out:
3851	kfree(smpt);
3852	return ret;
3853}
3854
3855#define SFDP_4BAIT_DWORD_MAX	2
3856
3857struct sfdp_4bait {
3858	/* The hardware capability. */
3859	u32		hwcaps;
3860
3861	/*
3862	 * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
3863	 * the associated 4-byte address op code is supported.
3864	 */
3865	u32		supported_bit;
3866};
3867
3868/**
3869 * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
3870 * @nor:		pointer to a 'struct spi_nor'.
3871 * @param_header:	pointer to the 'struct sfdp_parameter_header' describing
3872 *			the 4-Byte Address Instruction Table length and version.
3873 * @params:		pointer to the 'struct spi_nor_flash_parameter' to be.
3874 *
3875 * Return: 0 on success, -errno otherwise.
3876 */
3877static int spi_nor_parse_4bait(struct spi_nor *nor,
3878			       const struct sfdp_parameter_header *param_header,
3879			       struct spi_nor_flash_parameter *params)
3880{
3881	static const struct sfdp_4bait reads[] = {
3882		{ SNOR_HWCAPS_READ,		BIT(0) },
3883		{ SNOR_HWCAPS_READ_FAST,	BIT(1) },
3884		{ SNOR_HWCAPS_READ_1_1_2,	BIT(2) },
3885		{ SNOR_HWCAPS_READ_1_2_2,	BIT(3) },
3886		{ SNOR_HWCAPS_READ_1_1_4,	BIT(4) },
3887		{ SNOR_HWCAPS_READ_1_4_4,	BIT(5) },
3888		{ SNOR_HWCAPS_READ_1_1_1_DTR,	BIT(13) },
3889		{ SNOR_HWCAPS_READ_1_2_2_DTR,	BIT(14) },
3890		{ SNOR_HWCAPS_READ_1_4_4_DTR,	BIT(15) },
3891	};
3892	static const struct sfdp_4bait programs[] = {
3893		{ SNOR_HWCAPS_PP,		BIT(6) },
3894		{ SNOR_HWCAPS_PP_1_1_4,		BIT(7) },
3895		{ SNOR_HWCAPS_PP_1_4_4,		BIT(8) },
3896	};
3897	static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
3898		{ 0u /* not used */,		BIT(9) },
3899		{ 0u /* not used */,		BIT(10) },
3900		{ 0u /* not used */,		BIT(11) },
3901		{ 0u /* not used */,		BIT(12) },
3902	};
3903	struct spi_nor_pp_command *params_pp = params->page_programs;
3904	struct spi_nor_erase_map *map = &params->erase_map;
3905	struct spi_nor_erase_type *erase_type = map->erase_type;
3906	u32 *dwords;
3907	size_t len;
3908	u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
3909	int i, ret;
3910
3911	if (param_header->major != SFDP_JESD216_MAJOR ||
3912	    param_header->length < SFDP_4BAIT_DWORD_MAX)
3913		return -EINVAL;
3914
3915	/* Read the 4-byte Address Instruction Table. */
3916	len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
3917
3918	/* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
3919	dwords = kmalloc(len, GFP_KERNEL);
3920	if (!dwords)
3921		return -ENOMEM;
3922
3923	addr = SFDP_PARAM_HEADER_PTP(param_header);
3924	ret = spi_nor_read_sfdp(nor, addr, len, dwords);
3925	if (ret)
3926		goto out;
3927
3928	/* Fix endianness of the 4BAIT DWORDs. */
3929	for (i = 0; i < SFDP_4BAIT_DWORD_MAX; i++)
3930		dwords[i] = le32_to_cpu(dwords[i]);
3931
3932	/*
3933	 * Compute the subset of (Fast) Read commands for which the 4-byte
3934	 * version is supported.
3935	 */
3936	discard_hwcaps = 0;
3937	read_hwcaps = 0;
3938	for (i = 0; i < ARRAY_SIZE(reads); i++) {
3939		const struct sfdp_4bait *read = &reads[i];
3940
3941		discard_hwcaps |= read->hwcaps;
3942		if ((params->hwcaps.mask & read->hwcaps) &&
3943		    (dwords[0] & read->supported_bit))
3944			read_hwcaps |= read->hwcaps;
3945	}
3946
3947	/*
3948	 * Compute the subset of Page Program commands for which the 4-byte
3949	 * version is supported.
3950	 */
3951	pp_hwcaps = 0;
3952	for (i = 0; i < ARRAY_SIZE(programs); i++) {
3953		const struct sfdp_4bait *program = &programs[i];
3954
3955		/*
3956		 * The 4 Byte Address Instruction (Optional) Table is the only
3957		 * SFDP table that indicates support for Page Program Commands.
3958		 * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
3959		 * authority for specifying Page Program support.
3960		 */
3961		discard_hwcaps |= program->hwcaps;
3962		if (dwords[0] & program->supported_bit)
3963			pp_hwcaps |= program->hwcaps;
3964	}
3965
3966	/*
3967	 * Compute the subset of Sector Erase commands for which the 4-byte
3968	 * version is supported.
3969	 */
3970	erase_mask = 0;
3971	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3972		const struct sfdp_4bait *erase = &erases[i];
3973
3974		if (dwords[0] & erase->supported_bit)
3975			erase_mask |= BIT(i);
3976	}
3977
3978	/* Replicate the sort done for the map's erase types in BFPT. */
3979	erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
3980
3981	/*
3982	 * We need at least one 4-byte op code per read, program and erase
3983	 * operation; the .read(), .write() and .erase() hooks share the
3984	 * nor->addr_width value.
3985	 */
3986	if (!read_hwcaps || !pp_hwcaps || !erase_mask)
3987		goto out;
3988
3989	/*
3990	 * Discard all operations from the 4-byte instruction set which are
3991	 * not supported by this memory.
3992	 */
3993	params->hwcaps.mask &= ~discard_hwcaps;
3994	params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
3995
3996	/* Use the 4-byte address instruction set. */
3997	for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
3998		struct spi_nor_read_command *read_cmd = &params->reads[i];
3999
4000		read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
4001	}
4002
4003	/* 4BAIT is the only SFDP table that indicates page program support. */
4004	if (pp_hwcaps & SNOR_HWCAPS_PP)
4005		spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
4006					SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
4007	if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
4008		spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
4009					SPINOR_OP_PP_1_1_4_4B,
4010					SNOR_PROTO_1_1_4);
4011	if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
4012		spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_4_4],
4013					SPINOR_OP_PP_1_4_4_4B,
4014					SNOR_PROTO_1_4_4);
4015
4016	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
4017		if (erase_mask & BIT(i))
4018			erase_type[i].opcode = (dwords[1] >>
4019						erase_type[i].idx * 8) & 0xFF;
4020		else
4021			spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
4022	}
4023
4024	/*
4025	 * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
4026	 * later because we already did the conversion to 4byte opcodes. Also,
4027	 * this latest function implements a legacy quirk for the erase size of
4028	 * Spansion memory. However this quirk is no longer needed with new
4029	 * SFDP compliant memories.
4030	 */
4031	nor->addr_width = 4;
4032	nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
4033
4034	/* fall through */
4035out:
4036	kfree(dwords);
4037	return ret;
4038}
4039
4040/**
4041 * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
4042 * @nor:		pointer to a 'struct spi_nor'
4043 * @params:		pointer to the 'struct spi_nor_flash_parameter' to be
4044 *			filled
4045 *
4046 * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
4047 * specification. This is a standard which tends to supported by almost all
4048 * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
4049 * runtime the main parameters needed to perform basic SPI flash operations such
4050 * as Fast Read, Page Program or Sector Erase commands.
4051 *
4052 * Return: 0 on success, -errno otherwise.
4053 */
4054static int spi_nor_parse_sfdp(struct spi_nor *nor,
4055			      struct spi_nor_flash_parameter *params)
4056{
4057	const struct sfdp_parameter_header *param_header, *bfpt_header;
4058	struct sfdp_parameter_header *param_headers = NULL;
4059	struct sfdp_header header;
4060	struct device *dev = nor->dev;
4061	size_t psize;
4062	int i, err;
4063
4064	/* Get the SFDP header. */
4065	err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
4066	if (err < 0)
4067		return err;
4068
4069	/* Check the SFDP header version. */
4070	if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
4071	    header.major != SFDP_JESD216_MAJOR)
4072		return -EINVAL;
4073
4074	/*
4075	 * Verify that the first and only mandatory parameter header is a
4076	 * Basic Flash Parameter Table header as specified in JESD216.
4077	 */
4078	bfpt_header = &header.bfpt_header;
4079	if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
4080	    bfpt_header->major != SFDP_JESD216_MAJOR)
4081		return -EINVAL;
4082
4083	/*
4084	 * Allocate memory then read all parameter headers with a single
4085	 * Read SFDP command. These parameter headers will actually be parsed
4086	 * twice: a first time to get the latest revision of the basic flash
4087	 * parameter table, then a second time to handle the supported optional
4088	 * tables.
4089	 * Hence we read the parameter headers once for all to reduce the
4090	 * processing time. Also we use kmalloc() instead of devm_kmalloc()
4091	 * because we don't need to keep these parameter headers: the allocated
4092	 * memory is always released with kfree() before exiting this function.
4093	 */
4094	if (header.nph) {
4095		psize = header.nph * sizeof(*param_headers);
4096
4097		param_headers = kmalloc(psize, GFP_KERNEL);
4098		if (!param_headers)
4099			return -ENOMEM;
4100
4101		err = spi_nor_read_sfdp(nor, sizeof(header),
4102					psize, param_headers);
4103		if (err < 0) {
4104			dev_err(dev, "failed to read SFDP parameter headers\n");
4105			goto exit;
4106		}
4107	}
4108
4109	/*
4110	 * Check other parameter headers to get the latest revision of
4111	 * the basic flash parameter table.
4112	 */
4113	for (i = 0; i < header.nph; i++) {
4114		param_header = &param_headers[i];
4115
4116		if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
4117		    param_header->major == SFDP_JESD216_MAJOR &&
4118		    (param_header->minor > bfpt_header->minor ||
4119		     (param_header->minor == bfpt_header->minor &&
4120		      param_header->length > bfpt_header->length)))
4121			bfpt_header = param_header;
4122	}
4123
4124	err = spi_nor_parse_bfpt(nor, bfpt_header, params);
4125	if (err)
4126		goto exit;
4127
4128	/* Parse optional parameter tables. */
4129	for (i = 0; i < header.nph; i++) {
4130		param_header = &param_headers[i];
4131
4132		switch (SFDP_PARAM_HEADER_ID(param_header)) {
4133		case SFDP_SECTOR_MAP_ID:
4134			err = spi_nor_parse_smpt(nor, param_header, params);
4135			break;
4136
4137		case SFDP_4BAIT_ID:
4138			err = spi_nor_parse_4bait(nor, param_header, params);
4139			break;
4140
4141		default:
4142			break;
4143		}
4144
4145		if (err) {
4146			dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
4147				 SFDP_PARAM_HEADER_ID(param_header));
4148			/*
4149			 * Let's not drop all information we extracted so far
4150			 * if optional table parsers fail. In case of failing,
4151			 * each optional parser is responsible to roll back to
4152			 * the previously known spi_nor data.
4153			 */
4154			err = 0;
4155		}
4156	}
4157
4158exit:
4159	kfree(param_headers);
4160	return err;
4161}
4162
4163static int spi_nor_select_read(struct spi_nor *nor,
4164			       u32 shared_hwcaps)
4165{
4166	int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
4167	const struct spi_nor_read_command *read;
4168
4169	if (best_match < 0)
4170		return -EINVAL;
4171
4172	cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
4173	if (cmd < 0)
4174		return -EINVAL;
4175
4176	read = &nor->params.reads[cmd];
4177	nor->read_opcode = read->opcode;
4178	nor->read_proto = read->proto;
4179
4180	/*
4181	 * In the spi-nor framework, we don't need to make the difference
4182	 * between mode clock cycles and wait state clock cycles.
4183	 * Indeed, the value of the mode clock cycles is used by a QSPI
4184	 * flash memory to know whether it should enter or leave its 0-4-4
4185	 * (Continuous Read / XIP) mode.
4186	 * eXecution In Place is out of the scope of the mtd sub-system.
4187	 * Hence we choose to merge both mode and wait state clock cycles
4188	 * into the so called dummy clock cycles.
4189	 */
4190	nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
4191	return 0;
4192}
4193
4194static int spi_nor_select_pp(struct spi_nor *nor,
4195			     u32 shared_hwcaps)
4196{
4197	int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
4198	const struct spi_nor_pp_command *pp;
4199
4200	if (best_match < 0)
4201		return -EINVAL;
4202
4203	cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
4204	if (cmd < 0)
4205		return -EINVAL;
4206
4207	pp = &nor->params.page_programs[cmd];
4208	nor->program_opcode = pp->opcode;
4209	nor->write_proto = pp->proto;
4210	return 0;
4211}
4212
4213/**
4214 * spi_nor_select_uniform_erase() - select optimum uniform erase type
4215 * @map:		the erase map of the SPI NOR
4216 * @wanted_size:	the erase type size to search for. Contains the value of
4217 *			info->sector_size or of the "small sector" size in case
4218 *			CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
4219 *
4220 * Once the optimum uniform sector erase command is found, disable all the
4221 * other.
4222 *
4223 * Return: pointer to erase type on success, NULL otherwise.
4224 */
4225static const struct spi_nor_erase_type *
4226spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
4227			     const u32 wanted_size)
4228{
4229	const struct spi_nor_erase_type *tested_erase, *erase = NULL;
4230	int i;
4231	u8 uniform_erase_type = map->uniform_erase_type;
4232
4233	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
4234		if (!(uniform_erase_type & BIT(i)))
4235			continue;
4236
4237		tested_erase = &map->erase_type[i];
4238
4239		/*
4240		 * If the current erase size is the one, stop here:
4241		 * we have found the right uniform Sector Erase command.
4242		 */
4243		if (tested_erase->size == wanted_size) {
4244			erase = tested_erase;
4245			break;
4246		}
4247
4248		/*
4249		 * Otherwise, the current erase size is still a valid canditate.
4250		 * Select the biggest valid candidate.
4251		 */
4252		if (!erase && tested_erase->size)
4253			erase = tested_erase;
4254			/* keep iterating to find the wanted_size */
4255	}
4256
4257	if (!erase)
4258		return NULL;
4259
4260	/* Disable all other Sector Erase commands. */
4261	map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
4262	map->uniform_erase_type |= BIT(erase - map->erase_type);
4263	return erase;
4264}
4265
4266static int spi_nor_select_erase(struct spi_nor *nor)
4267{
4268	struct spi_nor_erase_map *map = &nor->params.erase_map;
4269	const struct spi_nor_erase_type *erase = NULL;
4270	struct mtd_info *mtd = &nor->mtd;
4271	u32 wanted_size = nor->info->sector_size;
4272	int i;
4273
4274	/*
4275	 * The previous implementation handling Sector Erase commands assumed
4276	 * that the SPI flash memory has an uniform layout then used only one
4277	 * of the supported erase sizes for all Sector Erase commands.
4278	 * So to be backward compatible, the new implementation also tries to
4279	 * manage the SPI flash memory as uniform with a single erase sector
4280	 * size, when possible.
4281	 */
4282#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
4283	/* prefer "small sector" erase if possible */
4284	wanted_size = 4096u;
4285#endif
4286
4287	if (spi_nor_has_uniform_erase(nor)) {
4288		erase = spi_nor_select_uniform_erase(map, wanted_size);
4289		if (!erase)
4290			return -EINVAL;
4291		nor->erase_opcode = erase->opcode;
4292		mtd->erasesize = erase->size;
4293		return 0;
4294	}
4295
4296	/*
4297	 * For non-uniform SPI flash memory, set mtd->erasesize to the
4298	 * maximum erase sector size. No need to set nor->erase_opcode.
4299	 */
4300	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
4301		if (map->erase_type[i].size) {
4302			erase = &map->erase_type[i];
4303			break;
4304		}
4305	}
4306
4307	if (!erase)
4308		return -EINVAL;
4309
4310	mtd->erasesize = erase->size;
4311	return 0;
4312}
4313
4314static int spi_nor_default_setup(struct spi_nor *nor,
4315				 const struct spi_nor_hwcaps *hwcaps)
4316{
4317	struct spi_nor_flash_parameter *params = &nor->params;
4318	u32 ignored_mask, shared_mask;
4319	int err;
4320
4321	/*
4322	 * Keep only the hardware capabilities supported by both the SPI
4323	 * controller and the SPI flash memory.
4324	 */
4325	shared_mask = hwcaps->mask & params->hwcaps.mask;
4326
4327	if (nor->spimem) {
4328		/*
4329		 * When called from spi_nor_probe(), all caps are set and we
4330		 * need to discard some of them based on what the SPI
4331		 * controller actually supports (using spi_mem_supports_op()).
4332		 */
4333		spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
4334	} else {
4335		/*
4336		 * SPI n-n-n protocols are not supported when the SPI
4337		 * controller directly implements the spi_nor interface.
4338		 * Yet another reason to switch to spi-mem.
4339		 */
4340		ignored_mask = SNOR_HWCAPS_X_X_X;
4341		if (shared_mask & ignored_mask) {
4342			dev_dbg(nor->dev,
4343				"SPI n-n-n protocols are not supported.\n");
4344			shared_mask &= ~ignored_mask;
4345		}
4346	}
4347
4348	/* Select the (Fast) Read command. */
4349	err = spi_nor_select_read(nor, shared_mask);
4350	if (err) {
4351		dev_err(nor->dev,
4352			"can't select read settings supported by both the SPI controller and memory.\n");
4353		return err;
4354	}
4355
4356	/* Select the Page Program command. */
4357	err = spi_nor_select_pp(nor, shared_mask);
4358	if (err) {
4359		dev_err(nor->dev,
4360			"can't select write settings supported by both the SPI controller and memory.\n");
4361		return err;
4362	}
4363
4364	/* Select the Sector Erase command. */
4365	err = spi_nor_select_erase(nor);
4366	if (err) {
4367		dev_err(nor->dev,
4368			"can't select erase settings supported by both the SPI controller and memory.\n");
4369		return err;
4370	}
4371
4372	return 0;
4373}
4374
4375static int spi_nor_setup(struct spi_nor *nor,
4376			 const struct spi_nor_hwcaps *hwcaps)
4377{
4378	if (!nor->params.setup)
4379		return 0;
4380
4381	return nor->params.setup(nor, hwcaps);
4382}
4383
4384static void macronix_set_default_init(struct spi_nor *nor)
4385{
4386	nor->params.quad_enable = macronix_quad_enable;
4387	nor->params.set_4byte = macronix_set_4byte;
4388}
4389
4390static void st_micron_set_default_init(struct spi_nor *nor)
4391{
4392	nor->flags |= SNOR_F_HAS_LOCK;
4393	nor->params.quad_enable = NULL;
4394	nor->params.set_4byte = st_micron_set_4byte;
4395}
4396
4397static void winbond_set_default_init(struct spi_nor *nor)
4398{
4399	nor->params.set_4byte = winbond_set_4byte;
4400}
4401
4402/**
4403 * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
4404 * settings based on MFR register and ->default_init() hook.
4405 * @nor:	pointer to a 'struct spi-nor'.
4406 */
4407static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
4408{
4409	/* Init flash parameters based on MFR */
4410	switch (JEDEC_MFR(nor->info)) {
4411	case SNOR_MFR_MACRONIX:
4412		macronix_set_default_init(nor);
4413		break;
4414
4415	case SNOR_MFR_ST:
4416	case SNOR_MFR_MICRON:
4417		st_micron_set_default_init(nor);
4418		break;
4419
4420	case SNOR_MFR_WINBOND:
4421		winbond_set_default_init(nor);
4422		break;
4423
4424	default:
4425		break;
4426	}
4427
4428	if (nor->info->fixups && nor->info->fixups->default_init)
4429		nor->info->fixups->default_init(nor);
4430}
4431
4432/**
4433 * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
4434 * based on JESD216 SFDP standard.
4435 * @nor:	pointer to a 'struct spi-nor'.
4436 *
4437 * The method has a roll-back mechanism: in case the SFDP parsing fails, the
4438 * legacy flash parameters and settings will be restored.
4439 */
4440static void spi_nor_sfdp_init_params(struct spi_nor *nor)
4441{
4442	struct spi_nor_flash_parameter sfdp_params;
4443
4444	memcpy(&sfdp_params, &nor->params, sizeof(sfdp_params));
4445
4446	if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
4447		nor->addr_width = 0;
4448		nor->flags &= ~SNOR_F_4B_OPCODES;
4449	} else {
4450		memcpy(&nor->params, &sfdp_params, sizeof(nor->params));
4451	}
4452}
4453
4454/**
4455 * spi_nor_info_init_params() - Initialize the flash's parameters and settings
4456 * based on nor->info data.
4457 * @nor:	pointer to a 'struct spi-nor'.
4458 */
4459static void spi_nor_info_init_params(struct spi_nor *nor)
4460{
4461	struct spi_nor_flash_parameter *params = &nor->params;
4462	struct spi_nor_erase_map *map = &params->erase_map;
4463	const struct flash_info *info = nor->info;
4464	struct device_node *np = spi_nor_get_flash_node(nor);
4465	u8 i, erase_mask;
4466
4467	/* Initialize legacy flash parameters and settings. */
4468	params->quad_enable = spansion_quad_enable;
4469	params->set_4byte = spansion_set_4byte;
4470	params->setup = spi_nor_default_setup;
4471
4472	/* Set SPI NOR sizes. */
4473	params->size = (u64)info->sector_size * info->n_sectors;
4474	params->page_size = info->page_size;
4475
4476	if (!(info->flags & SPI_NOR_NO_FR)) {
4477		/* Default to Fast Read for DT and non-DT platform devices. */
4478		params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
4479
4480		/* Mask out Fast Read if not requested at DT instantiation. */
4481		if (np && !of_property_read_bool(np, "m25p,fast-read"))
4482			params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
4483	}
4484
4485	/* (Fast) Read settings. */
4486	params->hwcaps.mask |= SNOR_HWCAPS_READ;
4487	spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
4488				  0, 0, SPINOR_OP_READ,
4489				  SNOR_PROTO_1_1_1);
4490
4491	if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
4492		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
4493					  0, 8, SPINOR_OP_READ_FAST,
4494					  SNOR_PROTO_1_1_1);
4495
4496	if (info->flags & SPI_NOR_DUAL_READ) {
4497		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
4498		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
4499					  0, 8, SPINOR_OP_READ_1_1_2,
4500					  SNOR_PROTO_1_1_2);
4501	}
4502
4503	if (info->flags & SPI_NOR_QUAD_READ) {
4504		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
4505		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
4506					  0, 8, SPINOR_OP_READ_1_1_4,
4507					  SNOR_PROTO_1_1_4);
4508	}
4509
4510	if (info->flags & SPI_NOR_OCTAL_READ) {
4511		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
4512		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
4513					  0, 8, SPINOR_OP_READ_1_1_8,
4514					  SNOR_PROTO_1_1_8);
4515	}
4516
4517	/* Page Program settings. */
4518	params->hwcaps.mask |= SNOR_HWCAPS_PP;
4519	spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
4520				SPINOR_OP_PP, SNOR_PROTO_1_1_1);
4521
4522	/*
4523	 * Sector Erase settings. Sort Erase Types in ascending order, with the
4524	 * smallest erase size starting at BIT(0).
4525	 */
4526	erase_mask = 0;
4527	i = 0;
4528	if (info->flags & SECT_4K_PMC) {
4529		erase_mask |= BIT(i);
4530		spi_nor_set_erase_type(&map->erase_type[i], 4096u,
4531				       SPINOR_OP_BE_4K_PMC);
4532		i++;
4533	} else if (info->flags & SECT_4K) {
4534		erase_mask |= BIT(i);
4535		spi_nor_set_erase_type(&map->erase_type[i], 4096u,
4536				       SPINOR_OP_BE_4K);
4537		i++;
4538	}
4539	erase_mask |= BIT(i);
4540	spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
4541			       SPINOR_OP_SE);
4542	spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
4543}
4544
4545static void spansion_post_sfdp_fixups(struct spi_nor *nor)
4546{
4547	struct mtd_info *mtd = &nor->mtd;
4548
4549	if (mtd->size <= SZ_16M)
4550		return;
4551
4552	nor->flags |= SNOR_F_4B_OPCODES;
4553	/* No small sector erase for 4-byte command set */
4554	nor->erase_opcode = SPINOR_OP_SE;
4555	nor->mtd.erasesize = nor->info->sector_size;
4556}
4557
4558static void s3an_post_sfdp_fixups(struct spi_nor *nor)
4559{
4560	nor->params.setup = s3an_nor_setup;
4561}
4562
4563/**
4564 * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
4565 * after SFDP has been parsed (is also called for SPI NORs that do not
4566 * support RDSFDP).
4567 * @nor:	pointer to a 'struct spi_nor'
4568 *
4569 * Typically used to tweak various parameters that could not be extracted by
4570 * other means (i.e. when information provided by the SFDP/flash_info tables
4571 * are incomplete or wrong).
4572 */
4573static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
4574{
4575	switch (JEDEC_MFR(nor->info)) {
4576	case SNOR_MFR_SPANSION:
4577		spansion_post_sfdp_fixups(nor);
4578		break;
4579
4580	default:
4581		break;
4582	}
4583
4584	if (nor->info->flags & SPI_S3AN)
4585		s3an_post_sfdp_fixups(nor);
4586
4587	if (nor->info->fixups && nor->info->fixups->post_sfdp)
4588		nor->info->fixups->post_sfdp(nor);
4589}
4590
4591/**
4592 * spi_nor_late_init_params() - Late initialization of default flash parameters.
4593 * @nor:	pointer to a 'struct spi_nor'
4594 *
4595 * Used to set default flash parameters and settings when the ->default_init()
4596 * hook or the SFDP parser let voids.
4597 */
4598static void spi_nor_late_init_params(struct spi_nor *nor)
4599{
4600	/*
4601	 * NOR protection support. When locking_ops are not provided, we pick
4602	 * the default ones.
4603	 */
4604	if (nor->flags & SNOR_F_HAS_LOCK && !nor->params.locking_ops)
4605		nor->params.locking_ops = &stm_locking_ops;
4606}
4607
4608/**
4609 * spi_nor_init_params() - Initialize the flash's parameters and settings.
4610 * @nor:	pointer to a 'struct spi-nor'.
4611 *
4612 * The flash parameters and settings are initialized based on a sequence of
4613 * calls that are ordered by priority:
4614 *
4615 * 1/ Default flash parameters initialization. The initializations are done
4616 *    based on nor->info data:
4617 *		spi_nor_info_init_params()
4618 *
4619 * which can be overwritten by:
4620 * 2/ Manufacturer flash parameters initialization. The initializations are
4621 *    done based on MFR register, or when the decisions can not be done solely
4622 *    based on MFR, by using specific flash_info tweeks, ->default_init():
4623 *		spi_nor_manufacturer_init_params()
4624 *
4625 * which can be overwritten by:
4626 * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
4627 *    should be more accurate that the above.
4628 *		spi_nor_sfdp_init_params()
4629 *
4630 *    Please note that there is a ->post_bfpt() fixup hook that can overwrite
4631 *    the flash parameters and settings immediately after parsing the Basic
4632 *    Flash Parameter Table.
4633 *
4634 * which can be overwritten by:
4635 * 4/ Post SFDP flash parameters initialization. Used to tweak various
4636 *    parameters that could not be extracted by other means (i.e. when
4637 *    information provided by the SFDP/flash_info tables are incomplete or
4638 *    wrong).
4639 *		spi_nor_post_sfdp_fixups()
4640 *
4641 * 5/ Late default flash parameters initialization, used when the
4642 * ->default_init() hook or the SFDP parser do not set specific params.
4643 *		spi_nor_late_init_params()
4644 */
4645static void spi_nor_init_params(struct spi_nor *nor)
4646{
4647	spi_nor_info_init_params(nor);
4648
4649	spi_nor_manufacturer_init_params(nor);
4650
4651	if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
4652	    !(nor->info->flags & SPI_NOR_SKIP_SFDP))
4653		spi_nor_sfdp_init_params(nor);
4654
4655	spi_nor_post_sfdp_fixups(nor);
4656
4657	spi_nor_late_init_params(nor);
4658}
4659
4660/**
4661 * spi_nor_quad_enable() - enable Quad I/O if needed.
4662 * @nor:                pointer to a 'struct spi_nor'
4663 *
4664 * Return: 0 on success, -errno otherwise.
4665 */
4666static int spi_nor_quad_enable(struct spi_nor *nor)
4667{
4668	if (!nor->params.quad_enable)
4669		return 0;
4670
4671	if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
4672	      spi_nor_get_protocol_width(nor->write_proto) == 4))
4673		return 0;
4674
4675	return nor->params.quad_enable(nor);
4676}
4677
4678static int spi_nor_init(struct spi_nor *nor)
4679{
4680	int err;
4681
4682	if (nor->clear_sr_bp) {
4683		if (nor->params.quad_enable == spansion_quad_enable)
4684			nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
4685
4686		err = nor->clear_sr_bp(nor);
4687		if (err) {
4688			dev_err(nor->dev,
4689				"fail to clear block protection bits\n");
4690			return err;
4691		}
4692	}
4693
4694	err = spi_nor_quad_enable(nor);
4695	if (err) {
4696		dev_err(nor->dev, "quad mode not supported\n");
4697		return err;
4698	}
4699
4700	if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
4701		/*
4702		 * If the RESET# pin isn't hooked up properly, or the system
4703		 * otherwise doesn't perform a reset command in the boot
4704		 * sequence, it's impossible to 100% protect against unexpected
4705		 * reboots (e.g., crashes). Warn the user (or hopefully, system
4706		 * designer) that this is bad.
4707		 */
4708		WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
4709			  "enabling reset hack; may not recover from unexpected reboots\n");
4710		nor->params.set_4byte(nor, true);
4711	}
4712
4713	return 0;
4714}
4715
4716/* mtd resume handler */
4717static void spi_nor_resume(struct mtd_info *mtd)
4718{
4719	struct spi_nor *nor = mtd_to_spi_nor(mtd);
4720	struct device *dev = nor->dev;
4721	int ret;
4722
4723	/* re-initialize the nor chip */
4724	ret = spi_nor_init(nor);
4725	if (ret)
4726		dev_err(dev, "resume() failed\n");
4727}
4728
4729void spi_nor_restore(struct spi_nor *nor)
4730{
4731	/* restore the addressing mode */
4732	if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
4733	    nor->flags & SNOR_F_BROKEN_RESET)
4734		nor->params.set_4byte(nor, false);
4735}
4736EXPORT_SYMBOL_GPL(spi_nor_restore);
4737
4738static const struct flash_info *spi_nor_match_id(const char *name)
4739{
4740	const struct flash_info *id = spi_nor_ids;
4741
4742	while (id->name) {
4743		if (!strcmp(name, id->name))
4744			return id;
4745		id++;
4746	}
4747	return NULL;
4748}
4749
4750static int spi_nor_set_addr_width(struct spi_nor *nor)
4751{
4752	if (nor->addr_width) {
4753		/* already configured from SFDP */
4754	} else if (nor->info->addr_width) {
4755		nor->addr_width = nor->info->addr_width;
4756	} else if (nor->mtd.size > 0x1000000) {
4757		/* enable 4-byte addressing if the device exceeds 16MiB */
4758		nor->addr_width = 4;
4759	} else {
4760		nor->addr_width = 3;
4761	}
4762
4763	if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
4764		dev_err(nor->dev, "address width is too large: %u\n",
4765			nor->addr_width);
4766		return -EINVAL;
4767	}
4768
4769	/* Set 4byte opcodes when possible. */
4770	if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
4771	    !(nor->flags & SNOR_F_HAS_4BAIT))
4772		spi_nor_set_4byte_opcodes(nor);
4773
4774	return 0;
4775}
4776
4777static void spi_nor_debugfs_init(struct spi_nor *nor,
4778				 const struct flash_info *info)
4779{
4780	struct mtd_info *mtd = &nor->mtd;
4781
4782	mtd->dbg.partname = info->name;
4783	mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
4784					 info->id_len, info->id);
4785}
4786
4787static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
4788						       const char *name)
4789{
4790	const struct flash_info *info = NULL;
4791
4792	if (name)
4793		info = spi_nor_match_id(name);
4794	/* Try to auto-detect if chip name wasn't specified or not found */
4795	if (!info)
4796		info = spi_nor_read_id(nor);
4797	if (IS_ERR_OR_NULL(info))
4798		return ERR_PTR(-ENOENT);
4799
4800	/*
4801	 * If caller has specified name of flash model that can normally be
4802	 * detected using JEDEC, let's verify it.
4803	 */
4804	if (name && info->id_len) {
4805		const struct flash_info *jinfo;
4806
4807		jinfo = spi_nor_read_id(nor);
4808		if (IS_ERR(jinfo)) {
4809			return jinfo;
4810		} else if (jinfo != info) {
4811			/*
4812			 * JEDEC knows better, so overwrite platform ID. We
4813			 * can't trust partitions any longer, but we'll let
4814			 * mtd apply them anyway, since some partitions may be
4815			 * marked read-only, and we don't want to lose that
4816			 * information, even if it's not 100% accurate.
4817			 */
4818			dev_warn(nor->dev, "found %s, expected %s\n",
4819				 jinfo->name, info->name);
4820			info = jinfo;
4821		}
4822	}
4823
4824	return info;
4825}
4826
4827int spi_nor_scan(struct spi_nor *nor, const char *name,
4828		 const struct spi_nor_hwcaps *hwcaps)
4829{
4830	const struct flash_info *info;
4831	struct device *dev = nor->dev;
4832	struct mtd_info *mtd = &nor->mtd;
4833	struct device_node *np = spi_nor_get_flash_node(nor);
4834	struct spi_nor_flash_parameter *params = &nor->params;
4835	int ret;
4836	int i;
4837
4838	ret = spi_nor_check(nor);
4839	if (ret)
4840		return ret;
4841
4842	/* Reset SPI protocol for all commands. */
4843	nor->reg_proto = SNOR_PROTO_1_1_1;
4844	nor->read_proto = SNOR_PROTO_1_1_1;
4845	nor->write_proto = SNOR_PROTO_1_1_1;
4846
4847	/*
4848	 * We need the bounce buffer early to read/write registers when going
4849	 * through the spi-mem layer (buffers have to be DMA-able).
4850	 * For spi-mem drivers, we'll reallocate a new buffer if
4851	 * nor->page_size turns out to be greater than PAGE_SIZE (which
4852	 * shouldn't happen before long since NOR pages are usually less
4853	 * than 1KB) after spi_nor_scan() returns.
4854	 */
4855	nor->bouncebuf_size = PAGE_SIZE;
4856	nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
4857				      GFP_KERNEL);
4858	if (!nor->bouncebuf)
4859		return -ENOMEM;
4860
4861	info = spi_nor_get_flash_info(nor, name);
4862	if (IS_ERR(info))
4863		return PTR_ERR(info);
4864
4865	nor->info = info;
4866
4867	spi_nor_debugfs_init(nor, info);
4868
4869	mutex_init(&nor->lock);
4870
4871	/*
4872	 * Make sure the XSR_RDY flag is set before calling
4873	 * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
4874	 * with Atmel spi-nor
4875	 */
4876	if (info->flags & SPI_NOR_XSR_RDY)
4877		nor->flags |=  SNOR_F_READY_XSR_RDY;
4878
4879	if (info->flags & SPI_NOR_HAS_LOCK)
4880		nor->flags |= SNOR_F_HAS_LOCK;
4881
4882	/*
4883	 * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
4884	 * with the software protection bits set.
4885	 */
4886	if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
4887	    JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
4888	    JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
4889	    nor->info->flags & SPI_NOR_HAS_LOCK)
4890		nor->clear_sr_bp = spi_nor_clear_sr_bp;
4891
4892	/* Init flash parameters based on flash_info struct and SFDP */
4893	spi_nor_init_params(nor);
4894
4895	if (!mtd->name)
4896		mtd->name = dev_name(dev);
4897	mtd->priv = nor;
4898	mtd->type = MTD_NORFLASH;
4899	mtd->writesize = 1;
4900	mtd->flags = MTD_CAP_NORFLASH;
4901	mtd->size = params->size;
4902	mtd->_erase = spi_nor_erase;
4903	mtd->_read = spi_nor_read;
4904	mtd->_resume = spi_nor_resume;
4905
4906	if (nor->params.locking_ops) {
4907		mtd->_lock = spi_nor_lock;
4908		mtd->_unlock = spi_nor_unlock;
4909		mtd->_is_locked = spi_nor_is_locked;
4910	}
4911
4912	/* sst nor chips use AAI word program */
4913	if (info->flags & SST_WRITE)
4914		mtd->_write = sst_write;
4915	else
4916		mtd->_write = spi_nor_write;
4917
4918	if (info->flags & USE_FSR)
4919		nor->flags |= SNOR_F_USE_FSR;
4920	if (info->flags & SPI_NOR_HAS_TB)
4921		nor->flags |= SNOR_F_HAS_SR_TB;
4922	if (info->flags & NO_CHIP_ERASE)
4923		nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
4924	if (info->flags & USE_CLSR)
4925		nor->flags |= SNOR_F_USE_CLSR;
4926
4927	if (info->flags & SPI_NOR_NO_ERASE)
4928		mtd->flags |= MTD_NO_ERASE;
4929
4930	mtd->dev.parent = dev;
4931	nor->page_size = params->page_size;
4932	mtd->writebufsize = nor->page_size;
4933
4934	if (of_property_read_bool(np, "broken-flash-reset"))
4935		nor->flags |= SNOR_F_BROKEN_RESET;
4936
4937	/*
4938	 * Configure the SPI memory:
4939	 * - select op codes for (Fast) Read, Page Program and Sector Erase.
4940	 * - set the number of dummy cycles (mode cycles + wait states).
4941	 * - set the SPI protocols for register and memory accesses.
4942	 */
4943	ret = spi_nor_setup(nor, hwcaps);
4944	if (ret)
4945		return ret;
4946
4947	if (info->flags & SPI_NOR_4B_OPCODES)
4948		nor->flags |= SNOR_F_4B_OPCODES;
4949
4950	ret = spi_nor_set_addr_width(nor);
4951	if (ret)
4952		return ret;
4953
4954	/* Send all the required SPI flash commands to initialize device */
4955	ret = spi_nor_init(nor);
4956	if (ret)
4957		return ret;
4958
4959	dev_info(dev, "%s (%lld Kbytes)\n", info->name,
4960			(long long)mtd->size >> 10);
4961
4962	dev_dbg(dev,
4963		"mtd .name = %s, .size = 0x%llx (%lldMiB), "
4964		".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
4965		mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
4966		mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
4967
4968	if (mtd->numeraseregions)
4969		for (i = 0; i < mtd->numeraseregions; i++)
4970			dev_dbg(dev,
4971				"mtd.eraseregions[%d] = { .offset = 0x%llx, "
4972				".erasesize = 0x%.8x (%uKiB), "
4973				".numblocks = %d }\n",
4974				i, (long long)mtd->eraseregions[i].offset,
4975				mtd->eraseregions[i].erasesize,
4976				mtd->eraseregions[i].erasesize / 1024,
4977				mtd->eraseregions[i].numblocks);
4978	return 0;
4979}
4980EXPORT_SYMBOL_GPL(spi_nor_scan);
4981
4982static int spi_nor_probe(struct spi_mem *spimem)
4983{
4984	struct spi_device *spi = spimem->spi;
4985	struct flash_platform_data *data = dev_get_platdata(&spi->dev);
4986	struct spi_nor *nor;
4987	/*
4988	 * Enable all caps by default. The core will mask them after
4989	 * checking what's really supported using spi_mem_supports_op().
4990	 */
4991	const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
4992	char *flash_name;
4993	int ret;
4994
4995	nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
4996	if (!nor)
4997		return -ENOMEM;
4998
4999	nor->spimem = spimem;
5000	nor->dev = &spi->dev;
5001	spi_nor_set_flash_node(nor, spi->dev.of_node);
5002
5003	spi_mem_set_drvdata(spimem, nor);
5004
5005	if (data && data->name)
5006		nor->mtd.name = data->name;
5007
5008	if (!nor->mtd.name)
5009		nor->mtd.name = spi_mem_get_name(spimem);
5010
5011	/*
5012	 * For some (historical?) reason many platforms provide two different
5013	 * names in flash_platform_data: "name" and "type". Quite often name is
5014	 * set to "m25p80" and then "type" provides a real chip name.
5015	 * If that's the case, respect "type" and ignore a "name".
5016	 */
5017	if (data && data->type)
5018		flash_name = data->type;
5019	else if (!strcmp(spi->modalias, "spi-nor"))
5020		flash_name = NULL; /* auto-detect */
5021	else
5022		flash_name = spi->modalias;
5023
5024	ret = spi_nor_scan(nor, flash_name, &hwcaps);
5025	if (ret)
5026		return ret;
5027
5028	/*
5029	 * None of the existing parts have > 512B pages, but let's play safe
5030	 * and add this logic so that if anyone ever adds support for such
5031	 * a NOR we don't end up with buffer overflows.
5032	 */
5033	if (nor->page_size > PAGE_SIZE) {
5034		nor->bouncebuf_size = nor->page_size;
5035		devm_kfree(nor->dev, nor->bouncebuf);
5036		nor->bouncebuf = devm_kmalloc(nor->dev,
5037					      nor->bouncebuf_size,
5038					      GFP_KERNEL);
5039		if (!nor->bouncebuf)
5040			return -ENOMEM;
5041	}
5042
5043	return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
5044				   data ? data->nr_parts : 0);
5045}
5046
5047static int spi_nor_remove(struct spi_mem *spimem)
5048{
5049	struct spi_nor *nor = spi_mem_get_drvdata(spimem);
5050
5051	spi_nor_restore(nor);
5052
5053	/* Clean up MTD stuff. */
5054	return mtd_device_unregister(&nor->mtd);
5055}
5056
5057static void spi_nor_shutdown(struct spi_mem *spimem)
5058{
5059	struct spi_nor *nor = spi_mem_get_drvdata(spimem);
5060
5061	spi_nor_restore(nor);
5062}
5063
5064/*
5065 * Do NOT add to this array without reading the following:
5066 *
5067 * Historically, many flash devices are bound to this driver by their name. But
5068 * since most of these flash are compatible to some extent, and their
5069 * differences can often be differentiated by the JEDEC read-ID command, we
5070 * encourage new users to add support to the spi-nor library, and simply bind
5071 * against a generic string here (e.g., "jedec,spi-nor").
5072 *
5073 * Many flash names are kept here in this list (as well as in spi-nor.c) to
5074 * keep them available as module aliases for existing platforms.
5075 */
5076static const struct spi_device_id spi_nor_dev_ids[] = {
5077	/*
5078	 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
5079	 * hack around the fact that the SPI core does not provide uevent
5080	 * matching for .of_match_table
5081	 */
5082	{"spi-nor"},
5083
5084	/*
5085	 * Entries not used in DTs that should be safe to drop after replacing
5086	 * them with "spi-nor" in platform data.
5087	 */
5088	{"s25sl064a"},	{"w25x16"},	{"m25p10"},	{"m25px64"},
5089
5090	/*
5091	 * Entries that were used in DTs without "jedec,spi-nor" fallback and
5092	 * should be kept for backward compatibility.
5093	 */
5094	{"at25df321a"},	{"at25df641"},	{"at26df081a"},
5095	{"mx25l4005a"},	{"mx25l1606e"},	{"mx25l6405d"},	{"mx25l12805d"},
5096	{"mx25l25635e"},{"mx66l51235l"},
5097	{"n25q064"},	{"n25q128a11"},	{"n25q128a13"},	{"n25q512a"},
5098	{"s25fl256s1"},	{"s25fl512s"},	{"s25sl12801"},	{"s25fl008k"},
5099	{"s25fl064k"},
5100	{"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
5101	{"m25p40"},	{"m25p80"},	{"m25p16"},	{"m25p32"},
5102	{"m25p64"},	{"m25p128"},
5103	{"w25x80"},	{"w25x32"},	{"w25q32"},	{"w25q32dw"},
5104	{"w25q80bl"},	{"w25q128"},	{"w25q256"},
5105
5106	/* Flashes that can't be detected using JEDEC */
5107	{"m25p05-nonjedec"},	{"m25p10-nonjedec"},	{"m25p20-nonjedec"},
5108	{"m25p40-nonjedec"},	{"m25p80-nonjedec"},	{"m25p16-nonjedec"},
5109	{"m25p32-nonjedec"},	{"m25p64-nonjedec"},	{"m25p128-nonjedec"},
5110
5111	/* Everspin MRAMs (non-JEDEC) */
5112	{ "mr25h128" }, /* 128 Kib, 40 MHz */
5113	{ "mr25h256" }, /* 256 Kib, 40 MHz */
5114	{ "mr25h10" },  /*   1 Mib, 40 MHz */
5115	{ "mr25h40" },  /*   4 Mib, 40 MHz */
5116
5117	{ },
5118};
5119MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
5120
5121static const struct of_device_id spi_nor_of_table[] = {
5122	/*
5123	 * Generic compatibility for SPI NOR that can be identified by the
5124	 * JEDEC READ ID opcode (0x9F). Use this, if possible.
5125	 */
5126	{ .compatible = "jedec,spi-nor" },
5127	{ /* sentinel */ },
5128};
5129MODULE_DEVICE_TABLE(of, spi_nor_of_table);
5130
5131/*
5132 * REVISIT: many of these chips have deep power-down modes, which
5133 * should clearly be entered on suspend() to minimize power use.
5134 * And also when they're otherwise idle...
5135 */
5136static struct spi_mem_driver spi_nor_driver = {
5137	.spidrv = {
5138		.driver = {
5139			.name = "spi-nor",
5140			.of_match_table = spi_nor_of_table,
5141		},
5142		.id_table = spi_nor_dev_ids,
5143	},
5144	.probe = spi_nor_probe,
5145	.remove = spi_nor_remove,
5146	.shutdown = spi_nor_shutdown,
5147};
5148module_spi_mem_driver(spi_nor_driver);
5149
5150MODULE_LICENSE("GPL v2");
5151MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
5152MODULE_AUTHOR("Mike Lavender");
5153MODULE_DESCRIPTION("framework for SPI NOR");