Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2005, Intec Automation Inc.
4 * Copyright (C) 2014, Freescale Semiconductor, Inc.
5 */
6
7#include <linux/bitfield.h>
8#include <linux/slab.h>
9#include <linux/sort.h>
10#include <linux/mtd/spi-nor.h>
11
12#include "core.h"
13
14#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
15#define SFDP_PARAM_HEADER_PTP(p) \
16 (((p)->parameter_table_pointer[2] << 16) | \
17 ((p)->parameter_table_pointer[1] << 8) | \
18 ((p)->parameter_table_pointer[0] << 0))
19#define SFDP_PARAM_HEADER_PARAM_LEN(p) ((p)->length * 4)
20
21#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
22#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
23#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */
24#define SFDP_PROFILE1_ID 0xff05 /* xSPI Profile 1.0 table. */
25#define SFDP_SCCR_MAP_ID 0xff87 /*
26 * Status, Control and Configuration
27 * Register Map.
28 */
29
30#define SFDP_SIGNATURE 0x50444653U
31
32struct sfdp_header {
33 u32 signature; /* Ox50444653U <=> "SFDP" */
34 u8 minor;
35 u8 major;
36 u8 nph; /* 0-base number of parameter headers */
37 u8 unused;
38
39 /* Basic Flash Parameter Table. */
40 struct sfdp_parameter_header bfpt_header;
41};
42
43/* Fast Read settings. */
44struct sfdp_bfpt_read {
45 /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
46 u32 hwcaps;
47
48 /*
49 * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
50 * whether the Fast Read x-y-z command is supported.
51 */
52 u32 supported_dword;
53 u32 supported_bit;
54
55 /*
56 * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
57 * encodes the op code, the number of mode clocks and the number of wait
58 * states to be used by Fast Read x-y-z command.
59 */
60 u32 settings_dword;
61 u32 settings_shift;
62
63 /* The SPI protocol for this Fast Read x-y-z command. */
64 enum spi_nor_protocol proto;
65};
66
67struct sfdp_bfpt_erase {
68 /*
69 * The half-word at offset <shift> in DWORD <dword> encodes the
70 * op code and erase sector size to be used by Sector Erase commands.
71 */
72 u32 dword;
73 u32 shift;
74};
75
76#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
77#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22)
78#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22)
79#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22)
80#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22)
81
82#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16)
83#define SMPT_CMD_READ_DUMMY_SHIFT 16
84#define SMPT_CMD_READ_DUMMY(_cmd) \
85 (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
86#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL
87
88#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24)
89#define SMPT_CMD_READ_DATA_SHIFT 24
90#define SMPT_CMD_READ_DATA(_cmd) \
91 (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
92
93#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8)
94#define SMPT_CMD_OPCODE_SHIFT 8
95#define SMPT_CMD_OPCODE(_cmd) \
96 (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
97
98#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16)
99#define SMPT_MAP_REGION_COUNT_SHIFT 16
100#define SMPT_MAP_REGION_COUNT(_header) \
101 ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
102 SMPT_MAP_REGION_COUNT_SHIFT) + 1)
103
104#define SMPT_MAP_ID_MASK GENMASK(15, 8)
105#define SMPT_MAP_ID_SHIFT 8
106#define SMPT_MAP_ID(_header) \
107 (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
108
109#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8)
110#define SMPT_MAP_REGION_SIZE_SHIFT 8
111#define SMPT_MAP_REGION_SIZE(_region) \
112 (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
113 SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
114
115#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0)
116#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
117 ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
118
119#define SMPT_DESC_TYPE_MAP BIT(1)
120#define SMPT_DESC_END BIT(0)
121
122#define SFDP_4BAIT_DWORD_MAX 2
123
124struct sfdp_4bait {
125 /* The hardware capability. */
126 u32 hwcaps;
127
128 /*
129 * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
130 * the associated 4-byte address op code is supported.
131 */
132 u32 supported_bit;
133};
134
135/**
136 * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
137 * addr_width and read_dummy members of the struct spi_nor
138 * should be previously
139 * set.
140 * @nor: pointer to a 'struct spi_nor'
141 * @addr: offset in the serial flash memory
142 * @len: number of bytes to read
143 * @buf: buffer where the data is copied into (dma-safe memory)
144 *
145 * Return: 0 on success, -errno otherwise.
146 */
147static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
148{
149 ssize_t ret;
150
151 while (len) {
152 ret = spi_nor_read_data(nor, addr, len, buf);
153 if (ret < 0)
154 return ret;
155 if (!ret || ret > len)
156 return -EIO;
157
158 buf += ret;
159 addr += ret;
160 len -= ret;
161 }
162 return 0;
163}
164
165/**
166 * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
167 * @nor: pointer to a 'struct spi_nor'
168 * @addr: offset in the SFDP area to start reading data from
169 * @len: number of bytes to read
170 * @buf: buffer where the SFDP data are copied into (dma-safe memory)
171 *
172 * Whatever the actual numbers of bytes for address and dummy cycles are
173 * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
174 * followed by a 3-byte address and 8 dummy clock cycles.
175 *
176 * Return: 0 on success, -errno otherwise.
177 */
178static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
179 size_t len, void *buf)
180{
181 u8 addr_width, read_opcode, read_dummy;
182 int ret;
183
184 read_opcode = nor->read_opcode;
185 addr_width = nor->addr_width;
186 read_dummy = nor->read_dummy;
187
188 nor->read_opcode = SPINOR_OP_RDSFDP;
189 nor->addr_width = 3;
190 nor->read_dummy = 8;
191
192 ret = spi_nor_read_raw(nor, addr, len, buf);
193
194 nor->read_opcode = read_opcode;
195 nor->addr_width = addr_width;
196 nor->read_dummy = read_dummy;
197
198 return ret;
199}
200
201/**
202 * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
203 * @nor: pointer to a 'struct spi_nor'
204 * @addr: offset in the SFDP area to start reading data from
205 * @len: number of bytes to read
206 * @buf: buffer where the SFDP data are copied into
207 *
208 * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
209 * guaranteed to be dma-safe.
210 *
211 * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
212 * otherwise.
213 */
214static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
215 size_t len, void *buf)
216{
217 void *dma_safe_buf;
218 int ret;
219
220 dma_safe_buf = kmalloc(len, GFP_KERNEL);
221 if (!dma_safe_buf)
222 return -ENOMEM;
223
224 ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
225 memcpy(buf, dma_safe_buf, len);
226 kfree(dma_safe_buf);
227
228 return ret;
229}
230
231static void
232spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
233 u16 half,
234 enum spi_nor_protocol proto)
235{
236 read->num_mode_clocks = (half >> 5) & 0x07;
237 read->num_wait_states = (half >> 0) & 0x1f;
238 read->opcode = (half >> 8) & 0xff;
239 read->proto = proto;
240}
241
242static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
243 /* Fast Read 1-1-2 */
244 {
245 SNOR_HWCAPS_READ_1_1_2,
246 BFPT_DWORD(1), BIT(16), /* Supported bit */
247 BFPT_DWORD(4), 0, /* Settings */
248 SNOR_PROTO_1_1_2,
249 },
250
251 /* Fast Read 1-2-2 */
252 {
253 SNOR_HWCAPS_READ_1_2_2,
254 BFPT_DWORD(1), BIT(20), /* Supported bit */
255 BFPT_DWORD(4), 16, /* Settings */
256 SNOR_PROTO_1_2_2,
257 },
258
259 /* Fast Read 2-2-2 */
260 {
261 SNOR_HWCAPS_READ_2_2_2,
262 BFPT_DWORD(5), BIT(0), /* Supported bit */
263 BFPT_DWORD(6), 16, /* Settings */
264 SNOR_PROTO_2_2_2,
265 },
266
267 /* Fast Read 1-1-4 */
268 {
269 SNOR_HWCAPS_READ_1_1_4,
270 BFPT_DWORD(1), BIT(22), /* Supported bit */
271 BFPT_DWORD(3), 16, /* Settings */
272 SNOR_PROTO_1_1_4,
273 },
274
275 /* Fast Read 1-4-4 */
276 {
277 SNOR_HWCAPS_READ_1_4_4,
278 BFPT_DWORD(1), BIT(21), /* Supported bit */
279 BFPT_DWORD(3), 0, /* Settings */
280 SNOR_PROTO_1_4_4,
281 },
282
283 /* Fast Read 4-4-4 */
284 {
285 SNOR_HWCAPS_READ_4_4_4,
286 BFPT_DWORD(5), BIT(4), /* Supported bit */
287 BFPT_DWORD(7), 16, /* Settings */
288 SNOR_PROTO_4_4_4,
289 },
290};
291
292static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
293 /* Erase Type 1 in DWORD8 bits[15:0] */
294 {BFPT_DWORD(8), 0},
295
296 /* Erase Type 2 in DWORD8 bits[31:16] */
297 {BFPT_DWORD(8), 16},
298
299 /* Erase Type 3 in DWORD9 bits[15:0] */
300 {BFPT_DWORD(9), 0},
301
302 /* Erase Type 4 in DWORD9 bits[31:16] */
303 {BFPT_DWORD(9), 16},
304};
305
306/**
307 * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
308 * @erase: pointer to a structure that describes a SPI NOR erase type
309 * @size: the size of the sector/block erased by the erase type
310 * @opcode: the SPI command op code to erase the sector/block
311 * @i: erase type index as sorted in the Basic Flash Parameter Table
312 *
313 * The supported Erase Types will be sorted at init in ascending order, with
314 * the smallest Erase Type size being the first member in the erase_type array
315 * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
316 * the Basic Flash Parameter Table since it will be used later on to
317 * synchronize with the supported Erase Types defined in SFDP optional tables.
318 */
319static void
320spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
321 u32 size, u8 opcode, u8 i)
322{
323 erase->idx = i;
324 spi_nor_set_erase_type(erase, size, opcode);
325}
326
327/**
328 * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
329 * @l: member in the left half of the map's erase_type array
330 * @r: member in the right half of the map's erase_type array
331 *
332 * Comparison function used in the sort() call to sort in ascending order the
333 * map's erase types, the smallest erase type size being the first member in the
334 * sorted erase_type array.
335 *
336 * Return: the result of @l->size - @r->size
337 */
338static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
339{
340 const struct spi_nor_erase_type *left = l, *right = r;
341
342 return left->size - right->size;
343}
344
345/**
346 * spi_nor_sort_erase_mask() - sort erase mask
347 * @map: the erase map of the SPI NOR
348 * @erase_mask: the erase type mask to be sorted
349 *
350 * Replicate the sort done for the map's erase types in BFPT: sort the erase
351 * mask in ascending order with the smallest erase type size starting from
352 * BIT(0) in the sorted erase mask.
353 *
354 * Return: sorted erase mask.
355 */
356static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
357{
358 struct spi_nor_erase_type *erase_type = map->erase_type;
359 int i;
360 u8 sorted_erase_mask = 0;
361
362 if (!erase_mask)
363 return 0;
364
365 /* Replicate the sort done for the map's erase types. */
366 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
367 if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
368 sorted_erase_mask |= BIT(i);
369
370 return sorted_erase_mask;
371}
372
373/**
374 * spi_nor_regions_sort_erase_types() - sort erase types in each region
375 * @map: the erase map of the SPI NOR
376 *
377 * Function assumes that the erase types defined in the erase map are already
378 * sorted in ascending order, with the smallest erase type size being the first
379 * member in the erase_type array. It replicates the sort done for the map's
380 * erase types. Each region's erase bitmask will indicate which erase types are
381 * supported from the sorted erase types defined in the erase map.
382 * Sort the all region's erase type at init in order to speed up the process of
383 * finding the best erase command at runtime.
384 */
385static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
386{
387 struct spi_nor_erase_region *region = map->regions;
388 u8 region_erase_mask, sorted_erase_mask;
389
390 while (region) {
391 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
392
393 sorted_erase_mask = spi_nor_sort_erase_mask(map,
394 region_erase_mask);
395
396 /* Overwrite erase mask. */
397 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
398 sorted_erase_mask;
399
400 region = spi_nor_region_next(region);
401 }
402}
403
404/**
405 * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
406 * @nor: pointer to a 'struct spi_nor'
407 * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
408 * the Basic Flash Parameter Table length and version
409 *
410 * The Basic Flash Parameter Table is the main and only mandatory table as
411 * defined by the SFDP (JESD216) specification.
412 * It provides us with the total size (memory density) of the data array and
413 * the number of address bytes for Fast Read, Page Program and Sector Erase
414 * commands.
415 * For Fast READ commands, it also gives the number of mode clock cycles and
416 * wait states (regrouped in the number of dummy clock cycles) for each
417 * supported instruction op code.
418 * For Page Program, the page size is now available since JESD216 rev A, however
419 * the supported instruction op codes are still not provided.
420 * For Sector Erase commands, this table stores the supported instruction op
421 * codes and the associated sector sizes.
422 * Finally, the Quad Enable Requirements (QER) are also available since JESD216
423 * rev A. The QER bits encode the manufacturer dependent procedure to be
424 * executed to set the Quad Enable (QE) bit in some internal register of the
425 * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
426 * sending any Quad SPI command to the memory. Actually, setting the QE bit
427 * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
428 * and IO3 hence enabling 4 (Quad) I/O lines.
429 *
430 * Return: 0 on success, -errno otherwise.
431 */
432static int spi_nor_parse_bfpt(struct spi_nor *nor,
433 const struct sfdp_parameter_header *bfpt_header)
434{
435 struct spi_nor_flash_parameter *params = nor->params;
436 struct spi_nor_erase_map *map = ¶ms->erase_map;
437 struct spi_nor_erase_type *erase_type = map->erase_type;
438 struct sfdp_bfpt bfpt;
439 size_t len;
440 int i, cmd, err;
441 u32 addr, val;
442 u16 half;
443 u8 erase_mask;
444
445 /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
446 if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
447 return -EINVAL;
448
449 /* Read the Basic Flash Parameter Table. */
450 len = min_t(size_t, sizeof(bfpt),
451 bfpt_header->length * sizeof(u32));
452 addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
453 memset(&bfpt, 0, sizeof(bfpt));
454 err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
455 if (err < 0)
456 return err;
457
458 /* Fix endianness of the BFPT DWORDs. */
459 le32_to_cpu_array(bfpt.dwords, BFPT_DWORD_MAX);
460
461 /* Number of address bytes. */
462 switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
463 case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
464 case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4:
465 nor->addr_width = 3;
466 break;
467
468 case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
469 nor->addr_width = 4;
470 break;
471
472 default:
473 break;
474 }
475
476 /* Flash Memory Density (in bits). */
477 val = bfpt.dwords[BFPT_DWORD(2)];
478 if (val & BIT(31)) {
479 val &= ~BIT(31);
480
481 /*
482 * Prevent overflows on params->size. Anyway, a NOR of 2^64
483 * bits is unlikely to exist so this error probably means
484 * the BFPT we are reading is corrupted/wrong.
485 */
486 if (val > 63)
487 return -EINVAL;
488
489 params->size = 1ULL << val;
490 } else {
491 params->size = val + 1;
492 }
493 params->size >>= 3; /* Convert to bytes. */
494
495 /* Fast Read settings. */
496 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
497 const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
498 struct spi_nor_read_command *read;
499
500 if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
501 params->hwcaps.mask &= ~rd->hwcaps;
502 continue;
503 }
504
505 params->hwcaps.mask |= rd->hwcaps;
506 cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
507 read = ¶ms->reads[cmd];
508 half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
509 spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
510 }
511
512 /*
513 * Sector Erase settings. Reinitialize the uniform erase map using the
514 * Erase Types defined in the bfpt table.
515 */
516 erase_mask = 0;
517 memset(¶ms->erase_map, 0, sizeof(params->erase_map));
518 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
519 const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
520 u32 erasesize;
521 u8 opcode;
522
523 half = bfpt.dwords[er->dword] >> er->shift;
524 erasesize = half & 0xff;
525
526 /* erasesize == 0 means this Erase Type is not supported. */
527 if (!erasesize)
528 continue;
529
530 erasesize = 1U << erasesize;
531 opcode = (half >> 8) & 0xff;
532 erase_mask |= BIT(i);
533 spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
534 opcode, i);
535 }
536 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
537 /*
538 * Sort all the map's Erase Types in ascending order with the smallest
539 * erase size being the first member in the erase_type array.
540 */
541 sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
542 spi_nor_map_cmp_erase_type, NULL);
543 /*
544 * Sort the erase types in the uniform region in order to update the
545 * uniform_erase_type bitmask. The bitmask will be used later on when
546 * selecting the uniform erase.
547 */
548 spi_nor_regions_sort_erase_types(map);
549 map->uniform_erase_type = map->uniform_region.offset &
550 SNOR_ERASE_TYPE_MASK;
551
552 /* Stop here if not JESD216 rev A or later. */
553 if (bfpt_header->length == BFPT_DWORD_MAX_JESD216)
554 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt);
555
556 /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
557 val = bfpt.dwords[BFPT_DWORD(11)];
558 val &= BFPT_DWORD11_PAGE_SIZE_MASK;
559 val >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
560 params->page_size = 1U << val;
561
562 /* Quad Enable Requirements. */
563 switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
564 case BFPT_DWORD15_QER_NONE:
565 params->quad_enable = NULL;
566 break;
567
568 case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
569 /*
570 * Writing only one byte to the Status Register has the
571 * side-effect of clearing Status Register 2.
572 */
573 case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
574 /*
575 * Read Configuration Register (35h) instruction is not
576 * supported.
577 */
578 nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR;
579 params->quad_enable = spi_nor_sr2_bit1_quad_enable;
580 break;
581
582 case BFPT_DWORD15_QER_SR1_BIT6:
583 nor->flags &= ~SNOR_F_HAS_16BIT_SR;
584 params->quad_enable = spi_nor_sr1_bit6_quad_enable;
585 break;
586
587 case BFPT_DWORD15_QER_SR2_BIT7:
588 nor->flags &= ~SNOR_F_HAS_16BIT_SR;
589 params->quad_enable = spi_nor_sr2_bit7_quad_enable;
590 break;
591
592 case BFPT_DWORD15_QER_SR2_BIT1:
593 /*
594 * JESD216 rev B or later does not specify if writing only one
595 * byte to the Status Register clears or not the Status
596 * Register 2, so let's be cautious and keep the default
597 * assumption of a 16-bit Write Status (01h) command.
598 */
599 nor->flags |= SNOR_F_HAS_16BIT_SR;
600
601 params->quad_enable = spi_nor_sr2_bit1_quad_enable;
602 break;
603
604 default:
605 dev_dbg(nor->dev, "BFPT QER reserved value used\n");
606 break;
607 }
608
609 /* Soft Reset support. */
610 if (bfpt.dwords[BFPT_DWORD(16)] & BFPT_DWORD16_SWRST_EN_RST)
611 nor->flags |= SNOR_F_SOFT_RESET;
612
613 /* Stop here if not JESD216 rev C or later. */
614 if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B)
615 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt);
616
617 /* 8D-8D-8D command extension. */
618 switch (bfpt.dwords[BFPT_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) {
619 case BFPT_DWORD18_CMD_EXT_REP:
620 nor->cmd_ext_type = SPI_NOR_EXT_REPEAT;
621 break;
622
623 case BFPT_DWORD18_CMD_EXT_INV:
624 nor->cmd_ext_type = SPI_NOR_EXT_INVERT;
625 break;
626
627 case BFPT_DWORD18_CMD_EXT_RES:
628 dev_dbg(nor->dev, "Reserved command extension used\n");
629 break;
630
631 case BFPT_DWORD18_CMD_EXT_16B:
632 dev_dbg(nor->dev, "16-bit opcodes not supported\n");
633 return -EOPNOTSUPP;
634 }
635
636 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt);
637}
638
639/**
640 * spi_nor_smpt_addr_width() - return the address width used in the
641 * configuration detection command.
642 * @nor: pointer to a 'struct spi_nor'
643 * @settings: configuration detection command descriptor, dword1
644 */
645static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
646{
647 switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
648 case SMPT_CMD_ADDRESS_LEN_0:
649 return 0;
650 case SMPT_CMD_ADDRESS_LEN_3:
651 return 3;
652 case SMPT_CMD_ADDRESS_LEN_4:
653 return 4;
654 case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
655 default:
656 return nor->addr_width;
657 }
658}
659
660/**
661 * spi_nor_smpt_read_dummy() - return the configuration detection command read
662 * latency, in clock cycles.
663 * @nor: pointer to a 'struct spi_nor'
664 * @settings: configuration detection command descriptor, dword1
665 *
666 * Return: the number of dummy cycles for an SMPT read
667 */
668static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
669{
670 u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
671
672 if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
673 return nor->read_dummy;
674 return read_dummy;
675}
676
677/**
678 * spi_nor_get_map_in_use() - get the configuration map in use
679 * @nor: pointer to a 'struct spi_nor'
680 * @smpt: pointer to the sector map parameter table
681 * @smpt_len: sector map parameter table length
682 *
683 * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
684 */
685static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
686 u8 smpt_len)
687{
688 const u32 *ret;
689 u8 *buf;
690 u32 addr;
691 int err;
692 u8 i;
693 u8 addr_width, read_opcode, read_dummy;
694 u8 read_data_mask, map_id;
695
696 /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
697 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
698 if (!buf)
699 return ERR_PTR(-ENOMEM);
700
701 addr_width = nor->addr_width;
702 read_dummy = nor->read_dummy;
703 read_opcode = nor->read_opcode;
704
705 map_id = 0;
706 /* Determine if there are any optional Detection Command Descriptors */
707 for (i = 0; i < smpt_len; i += 2) {
708 if (smpt[i] & SMPT_DESC_TYPE_MAP)
709 break;
710
711 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
712 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
713 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
714 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
715 addr = smpt[i + 1];
716
717 err = spi_nor_read_raw(nor, addr, 1, buf);
718 if (err) {
719 ret = ERR_PTR(err);
720 goto out;
721 }
722
723 /*
724 * Build an index value that is used to select the Sector Map
725 * Configuration that is currently in use.
726 */
727 map_id = map_id << 1 | !!(*buf & read_data_mask);
728 }
729
730 /*
731 * If command descriptors are provided, they always precede map
732 * descriptors in the table. There is no need to start the iteration
733 * over smpt array all over again.
734 *
735 * Find the matching configuration map.
736 */
737 ret = ERR_PTR(-EINVAL);
738 while (i < smpt_len) {
739 if (SMPT_MAP_ID(smpt[i]) == map_id) {
740 ret = smpt + i;
741 break;
742 }
743
744 /*
745 * If there are no more configuration map descriptors and no
746 * configuration ID matched the configuration identifier, the
747 * sector address map is unknown.
748 */
749 if (smpt[i] & SMPT_DESC_END)
750 break;
751
752 /* increment the table index to the next map */
753 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
754 }
755
756 /* fall through */
757out:
758 kfree(buf);
759 nor->addr_width = addr_width;
760 nor->read_dummy = read_dummy;
761 nor->read_opcode = read_opcode;
762 return ret;
763}
764
765static void spi_nor_region_mark_end(struct spi_nor_erase_region *region)
766{
767 region->offset |= SNOR_LAST_REGION;
768}
769
770static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
771{
772 region->offset |= SNOR_OVERLAID_REGION;
773}
774
775/**
776 * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
777 * @region: pointer to a structure that describes a SPI NOR erase region
778 * @erase: pointer to a structure that describes a SPI NOR erase type
779 * @erase_type: erase type bitmask
780 */
781static void
782spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
783 const struct spi_nor_erase_type *erase,
784 const u8 erase_type)
785{
786 int i;
787
788 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
789 if (!(erase[i].size && erase_type & BIT(erase[i].idx)))
790 continue;
791 if (region->size & erase[i].size_mask) {
792 spi_nor_region_mark_overlay(region);
793 return;
794 }
795 }
796}
797
798/**
799 * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
800 * @nor: pointer to a 'struct spi_nor'
801 * @smpt: pointer to the sector map parameter table
802 *
803 * Return: 0 on success, -errno otherwise.
804 */
805static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
806 const u32 *smpt)
807{
808 struct spi_nor_erase_map *map = &nor->params->erase_map;
809 struct spi_nor_erase_type *erase = map->erase_type;
810 struct spi_nor_erase_region *region;
811 u64 offset;
812 u32 region_count;
813 int i, j;
814 u8 uniform_erase_type, save_uniform_erase_type;
815 u8 erase_type, regions_erase_type;
816
817 region_count = SMPT_MAP_REGION_COUNT(*smpt);
818 /*
819 * The regions will be freed when the driver detaches from the
820 * device.
821 */
822 region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
823 GFP_KERNEL);
824 if (!region)
825 return -ENOMEM;
826 map->regions = region;
827
828 uniform_erase_type = 0xff;
829 regions_erase_type = 0;
830 offset = 0;
831 /* Populate regions. */
832 for (i = 0; i < region_count; i++) {
833 j = i + 1; /* index for the region dword */
834 region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
835 erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
836 region[i].offset = offset | erase_type;
837
838 spi_nor_region_check_overlay(®ion[i], erase, erase_type);
839
840 /*
841 * Save the erase types that are supported in all regions and
842 * can erase the entire flash memory.
843 */
844 uniform_erase_type &= erase_type;
845
846 /*
847 * regions_erase_type mask will indicate all the erase types
848 * supported in this configuration map.
849 */
850 regions_erase_type |= erase_type;
851
852 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
853 region[i].size;
854 }
855 spi_nor_region_mark_end(®ion[i - 1]);
856
857 save_uniform_erase_type = map->uniform_erase_type;
858 map->uniform_erase_type = spi_nor_sort_erase_mask(map,
859 uniform_erase_type);
860
861 if (!regions_erase_type) {
862 /*
863 * Roll back to the previous uniform_erase_type mask, SMPT is
864 * broken.
865 */
866 map->uniform_erase_type = save_uniform_erase_type;
867 return -EINVAL;
868 }
869
870 /*
871 * BFPT advertises all the erase types supported by all the possible
872 * map configurations. Mask out the erase types that are not supported
873 * by the current map configuration.
874 */
875 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
876 if (!(regions_erase_type & BIT(erase[i].idx)))
877 spi_nor_set_erase_type(&erase[i], 0, 0xFF);
878
879 return 0;
880}
881
882/**
883 * spi_nor_parse_smpt() - parse Sector Map Parameter Table
884 * @nor: pointer to a 'struct spi_nor'
885 * @smpt_header: sector map parameter table header
886 *
887 * This table is optional, but when available, we parse it to identify the
888 * location and size of sectors within the main data array of the flash memory
889 * device and to identify which Erase Types are supported by each sector.
890 *
891 * Return: 0 on success, -errno otherwise.
892 */
893static int spi_nor_parse_smpt(struct spi_nor *nor,
894 const struct sfdp_parameter_header *smpt_header)
895{
896 const u32 *sector_map;
897 u32 *smpt;
898 size_t len;
899 u32 addr;
900 int ret;
901
902 /* Read the Sector Map Parameter Table. */
903 len = smpt_header->length * sizeof(*smpt);
904 smpt = kmalloc(len, GFP_KERNEL);
905 if (!smpt)
906 return -ENOMEM;
907
908 addr = SFDP_PARAM_HEADER_PTP(smpt_header);
909 ret = spi_nor_read_sfdp(nor, addr, len, smpt);
910 if (ret)
911 goto out;
912
913 /* Fix endianness of the SMPT DWORDs. */
914 le32_to_cpu_array(smpt, smpt_header->length);
915
916 sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
917 if (IS_ERR(sector_map)) {
918 ret = PTR_ERR(sector_map);
919 goto out;
920 }
921
922 ret = spi_nor_init_non_uniform_erase_map(nor, sector_map);
923 if (ret)
924 goto out;
925
926 spi_nor_regions_sort_erase_types(&nor->params->erase_map);
927 /* fall through */
928out:
929 kfree(smpt);
930 return ret;
931}
932
933/**
934 * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
935 * @nor: pointer to a 'struct spi_nor'.
936 * @param_header: pointer to the 'struct sfdp_parameter_header' describing
937 * the 4-Byte Address Instruction Table length and version.
938 *
939 * Return: 0 on success, -errno otherwise.
940 */
941static int spi_nor_parse_4bait(struct spi_nor *nor,
942 const struct sfdp_parameter_header *param_header)
943{
944 static const struct sfdp_4bait reads[] = {
945 { SNOR_HWCAPS_READ, BIT(0) },
946 { SNOR_HWCAPS_READ_FAST, BIT(1) },
947 { SNOR_HWCAPS_READ_1_1_2, BIT(2) },
948 { SNOR_HWCAPS_READ_1_2_2, BIT(3) },
949 { SNOR_HWCAPS_READ_1_1_4, BIT(4) },
950 { SNOR_HWCAPS_READ_1_4_4, BIT(5) },
951 { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
952 { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
953 { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
954 };
955 static const struct sfdp_4bait programs[] = {
956 { SNOR_HWCAPS_PP, BIT(6) },
957 { SNOR_HWCAPS_PP_1_1_4, BIT(7) },
958 { SNOR_HWCAPS_PP_1_4_4, BIT(8) },
959 };
960 static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
961 { 0u /* not used */, BIT(9) },
962 { 0u /* not used */, BIT(10) },
963 { 0u /* not used */, BIT(11) },
964 { 0u /* not used */, BIT(12) },
965 };
966 struct spi_nor_flash_parameter *params = nor->params;
967 struct spi_nor_pp_command *params_pp = params->page_programs;
968 struct spi_nor_erase_map *map = ¶ms->erase_map;
969 struct spi_nor_erase_type *erase_type = map->erase_type;
970 u32 *dwords;
971 size_t len;
972 u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
973 int i, ret;
974
975 if (param_header->major != SFDP_JESD216_MAJOR ||
976 param_header->length < SFDP_4BAIT_DWORD_MAX)
977 return -EINVAL;
978
979 /* Read the 4-byte Address Instruction Table. */
980 len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
981
982 /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
983 dwords = kmalloc(len, GFP_KERNEL);
984 if (!dwords)
985 return -ENOMEM;
986
987 addr = SFDP_PARAM_HEADER_PTP(param_header);
988 ret = spi_nor_read_sfdp(nor, addr, len, dwords);
989 if (ret)
990 goto out;
991
992 /* Fix endianness of the 4BAIT DWORDs. */
993 le32_to_cpu_array(dwords, SFDP_4BAIT_DWORD_MAX);
994
995 /*
996 * Compute the subset of (Fast) Read commands for which the 4-byte
997 * version is supported.
998 */
999 discard_hwcaps = 0;
1000 read_hwcaps = 0;
1001 for (i = 0; i < ARRAY_SIZE(reads); i++) {
1002 const struct sfdp_4bait *read = &reads[i];
1003
1004 discard_hwcaps |= read->hwcaps;
1005 if ((params->hwcaps.mask & read->hwcaps) &&
1006 (dwords[0] & read->supported_bit))
1007 read_hwcaps |= read->hwcaps;
1008 }
1009
1010 /*
1011 * Compute the subset of Page Program commands for which the 4-byte
1012 * version is supported.
1013 */
1014 pp_hwcaps = 0;
1015 for (i = 0; i < ARRAY_SIZE(programs); i++) {
1016 const struct sfdp_4bait *program = &programs[i];
1017
1018 /*
1019 * The 4 Byte Address Instruction (Optional) Table is the only
1020 * SFDP table that indicates support for Page Program Commands.
1021 * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
1022 * authority for specifying Page Program support.
1023 */
1024 discard_hwcaps |= program->hwcaps;
1025 if (dwords[0] & program->supported_bit)
1026 pp_hwcaps |= program->hwcaps;
1027 }
1028
1029 /*
1030 * Compute the subset of Sector Erase commands for which the 4-byte
1031 * version is supported.
1032 */
1033 erase_mask = 0;
1034 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
1035 const struct sfdp_4bait *erase = &erases[i];
1036
1037 if (dwords[0] & erase->supported_bit)
1038 erase_mask |= BIT(i);
1039 }
1040
1041 /* Replicate the sort done for the map's erase types in BFPT. */
1042 erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
1043
1044 /*
1045 * We need at least one 4-byte op code per read, program and erase
1046 * operation; the .read(), .write() and .erase() hooks share the
1047 * nor->addr_width value.
1048 */
1049 if (!read_hwcaps || !pp_hwcaps || !erase_mask)
1050 goto out;
1051
1052 /*
1053 * Discard all operations from the 4-byte instruction set which are
1054 * not supported by this memory.
1055 */
1056 params->hwcaps.mask &= ~discard_hwcaps;
1057 params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
1058
1059 /* Use the 4-byte address instruction set. */
1060 for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
1061 struct spi_nor_read_command *read_cmd = ¶ms->reads[i];
1062
1063 read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
1064 }
1065
1066 /* 4BAIT is the only SFDP table that indicates page program support. */
1067 if (pp_hwcaps & SNOR_HWCAPS_PP) {
1068 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP],
1069 SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
1070 /*
1071 * Since xSPI Page Program opcode is backward compatible with
1072 * Legacy SPI, use Legacy SPI opcode there as well.
1073 */
1074 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_8_8_8_DTR],
1075 SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR);
1076 }
1077 if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
1078 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_1_4],
1079 SPINOR_OP_PP_1_1_4_4B,
1080 SNOR_PROTO_1_1_4);
1081 if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
1082 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_4_4],
1083 SPINOR_OP_PP_1_4_4_4B,
1084 SNOR_PROTO_1_4_4);
1085
1086 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
1087 if (erase_mask & BIT(i))
1088 erase_type[i].opcode = (dwords[1] >>
1089 erase_type[i].idx * 8) & 0xFF;
1090 else
1091 spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
1092 }
1093
1094 /*
1095 * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
1096 * later because we already did the conversion to 4byte opcodes. Also,
1097 * this latest function implements a legacy quirk for the erase size of
1098 * Spansion memory. However this quirk is no longer needed with new
1099 * SFDP compliant memories.
1100 */
1101 nor->addr_width = 4;
1102 nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
1103
1104 /* fall through */
1105out:
1106 kfree(dwords);
1107 return ret;
1108}
1109
1110#define PROFILE1_DWORD1_RDSR_ADDR_BYTES BIT(29)
1111#define PROFILE1_DWORD1_RDSR_DUMMY BIT(28)
1112#define PROFILE1_DWORD1_RD_FAST_CMD GENMASK(15, 8)
1113#define PROFILE1_DWORD4_DUMMY_200MHZ GENMASK(11, 7)
1114#define PROFILE1_DWORD5_DUMMY_166MHZ GENMASK(31, 27)
1115#define PROFILE1_DWORD5_DUMMY_133MHZ GENMASK(21, 17)
1116#define PROFILE1_DWORD5_DUMMY_100MHZ GENMASK(11, 7)
1117
1118/**
1119 * spi_nor_parse_profile1() - parse the xSPI Profile 1.0 table
1120 * @nor: pointer to a 'struct spi_nor'
1121 * @profile1_header: pointer to the 'struct sfdp_parameter_header' describing
1122 * the Profile 1.0 Table length and version.
1123 *
1124 * Return: 0 on success, -errno otherwise.
1125 */
1126static int spi_nor_parse_profile1(struct spi_nor *nor,
1127 const struct sfdp_parameter_header *profile1_header)
1128{
1129 u32 *dwords, addr;
1130 size_t len;
1131 int ret;
1132 u8 dummy, opcode;
1133
1134 len = profile1_header->length * sizeof(*dwords);
1135 dwords = kmalloc(len, GFP_KERNEL);
1136 if (!dwords)
1137 return -ENOMEM;
1138
1139 addr = SFDP_PARAM_HEADER_PTP(profile1_header);
1140 ret = spi_nor_read_sfdp(nor, addr, len, dwords);
1141 if (ret)
1142 goto out;
1143
1144 le32_to_cpu_array(dwords, profile1_header->length);
1145
1146 /* Get 8D-8D-8D fast read opcode and dummy cycles. */
1147 opcode = FIELD_GET(PROFILE1_DWORD1_RD_FAST_CMD, dwords[0]);
1148
1149 /* Set the Read Status Register dummy cycles and dummy address bytes. */
1150 if (dwords[0] & PROFILE1_DWORD1_RDSR_DUMMY)
1151 nor->params->rdsr_dummy = 8;
1152 else
1153 nor->params->rdsr_dummy = 4;
1154
1155 if (dwords[0] & PROFILE1_DWORD1_RDSR_ADDR_BYTES)
1156 nor->params->rdsr_addr_nbytes = 4;
1157 else
1158 nor->params->rdsr_addr_nbytes = 0;
1159
1160 /*
1161 * We don't know what speed the controller is running at. Find the
1162 * dummy cycles for the fastest frequency the flash can run at to be
1163 * sure we are never short of dummy cycles. A value of 0 means the
1164 * frequency is not supported.
1165 *
1166 * Default to PROFILE1_DUMMY_DEFAULT if we don't find anything, and let
1167 * flashes set the correct value if needed in their fixup hooks.
1168 */
1169 dummy = FIELD_GET(PROFILE1_DWORD4_DUMMY_200MHZ, dwords[3]);
1170 if (!dummy)
1171 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_166MHZ, dwords[4]);
1172 if (!dummy)
1173 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_133MHZ, dwords[4]);
1174 if (!dummy)
1175 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_100MHZ, dwords[4]);
1176 if (!dummy)
1177 dev_dbg(nor->dev,
1178 "Can't find dummy cycles from Profile 1.0 table\n");
1179
1180 /* Round up to an even value to avoid tripping controllers up. */
1181 dummy = round_up(dummy, 2);
1182
1183 /* Update the fast read settings. */
1184 spi_nor_set_read_settings(&nor->params->reads[SNOR_CMD_READ_8_8_8_DTR],
1185 0, dummy, opcode,
1186 SNOR_PROTO_8_8_8_DTR);
1187
1188out:
1189 kfree(dwords);
1190 return ret;
1191}
1192
1193#define SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE BIT(31)
1194
1195/**
1196 * spi_nor_parse_sccr() - Parse the Status, Control and Configuration Register
1197 * Map.
1198 * @nor: pointer to a 'struct spi_nor'
1199 * @sccr_header: pointer to the 'struct sfdp_parameter_header' describing
1200 * the SCCR Map table length and version.
1201 *
1202 * Return: 0 on success, -errno otherwise.
1203 */
1204static int spi_nor_parse_sccr(struct spi_nor *nor,
1205 const struct sfdp_parameter_header *sccr_header)
1206{
1207 u32 *dwords, addr;
1208 size_t len;
1209 int ret;
1210
1211 len = sccr_header->length * sizeof(*dwords);
1212 dwords = kmalloc(len, GFP_KERNEL);
1213 if (!dwords)
1214 return -ENOMEM;
1215
1216 addr = SFDP_PARAM_HEADER_PTP(sccr_header);
1217 ret = spi_nor_read_sfdp(nor, addr, len, dwords);
1218 if (ret)
1219 goto out;
1220
1221 le32_to_cpu_array(dwords, sccr_header->length);
1222
1223 if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, dwords[22]))
1224 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
1225
1226out:
1227 kfree(dwords);
1228 return ret;
1229}
1230
1231/**
1232 * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
1233 * @nor: pointer to a 'struct spi_nor'
1234 *
1235 * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
1236 * specification. This is a standard which tends to supported by almost all
1237 * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
1238 * runtime the main parameters needed to perform basic SPI flash operations such
1239 * as Fast Read, Page Program or Sector Erase commands.
1240 *
1241 * Return: 0 on success, -errno otherwise.
1242 */
1243int spi_nor_parse_sfdp(struct spi_nor *nor)
1244{
1245 const struct sfdp_parameter_header *param_header, *bfpt_header;
1246 struct sfdp_parameter_header *param_headers = NULL;
1247 struct sfdp_header header;
1248 struct device *dev = nor->dev;
1249 struct sfdp *sfdp;
1250 size_t sfdp_size;
1251 size_t psize;
1252 int i, err;
1253
1254 /* Get the SFDP header. */
1255 err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
1256 if (err < 0)
1257 return err;
1258
1259 /* Check the SFDP header version. */
1260 if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
1261 header.major != SFDP_JESD216_MAJOR)
1262 return -EINVAL;
1263
1264 /*
1265 * Verify that the first and only mandatory parameter header is a
1266 * Basic Flash Parameter Table header as specified in JESD216.
1267 */
1268 bfpt_header = &header.bfpt_header;
1269 if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
1270 bfpt_header->major != SFDP_JESD216_MAJOR)
1271 return -EINVAL;
1272
1273 sfdp_size = SFDP_PARAM_HEADER_PTP(bfpt_header) +
1274 SFDP_PARAM_HEADER_PARAM_LEN(bfpt_header);
1275
1276 /*
1277 * Allocate memory then read all parameter headers with a single
1278 * Read SFDP command. These parameter headers will actually be parsed
1279 * twice: a first time to get the latest revision of the basic flash
1280 * parameter table, then a second time to handle the supported optional
1281 * tables.
1282 * Hence we read the parameter headers once for all to reduce the
1283 * processing time. Also we use kmalloc() instead of devm_kmalloc()
1284 * because we don't need to keep these parameter headers: the allocated
1285 * memory is always released with kfree() before exiting this function.
1286 */
1287 if (header.nph) {
1288 psize = header.nph * sizeof(*param_headers);
1289
1290 param_headers = kmalloc(psize, GFP_KERNEL);
1291 if (!param_headers)
1292 return -ENOMEM;
1293
1294 err = spi_nor_read_sfdp(nor, sizeof(header),
1295 psize, param_headers);
1296 if (err < 0) {
1297 dev_dbg(dev, "failed to read SFDP parameter headers\n");
1298 goto exit;
1299 }
1300 }
1301
1302 /*
1303 * Cache the complete SFDP data. It is not (easily) possible to fetch
1304 * SFDP after probe time and we need it for the sysfs access.
1305 */
1306 for (i = 0; i < header.nph; i++) {
1307 param_header = ¶m_headers[i];
1308 sfdp_size = max_t(size_t, sfdp_size,
1309 SFDP_PARAM_HEADER_PTP(param_header) +
1310 SFDP_PARAM_HEADER_PARAM_LEN(param_header));
1311 }
1312
1313 /*
1314 * Limit the total size to a reasonable value to avoid allocating too
1315 * much memory just of because the flash returned some insane values.
1316 */
1317 if (sfdp_size > PAGE_SIZE) {
1318 dev_dbg(dev, "SFDP data (%zu) too big, truncating\n",
1319 sfdp_size);
1320 sfdp_size = PAGE_SIZE;
1321 }
1322
1323 sfdp = devm_kzalloc(dev, sizeof(*sfdp), GFP_KERNEL);
1324 if (!sfdp) {
1325 err = -ENOMEM;
1326 goto exit;
1327 }
1328
1329 /*
1330 * The SFDP is organized in chunks of DWORDs. Thus, in theory, the
1331 * sfdp_size should be a multiple of DWORDs. But in case a flash
1332 * is not spec compliant, make sure that we have enough space to store
1333 * the complete SFDP data.
1334 */
1335 sfdp->num_dwords = DIV_ROUND_UP(sfdp_size, sizeof(*sfdp->dwords));
1336 sfdp->dwords = devm_kcalloc(dev, sfdp->num_dwords,
1337 sizeof(*sfdp->dwords), GFP_KERNEL);
1338 if (!sfdp->dwords) {
1339 err = -ENOMEM;
1340 devm_kfree(dev, sfdp);
1341 goto exit;
1342 }
1343
1344 err = spi_nor_read_sfdp(nor, 0, sfdp_size, sfdp->dwords);
1345 if (err < 0) {
1346 dev_dbg(dev, "failed to read SFDP data\n");
1347 devm_kfree(dev, sfdp->dwords);
1348 devm_kfree(dev, sfdp);
1349 goto exit;
1350 }
1351
1352 nor->sfdp = sfdp;
1353
1354 /*
1355 * Check other parameter headers to get the latest revision of
1356 * the basic flash parameter table.
1357 */
1358 for (i = 0; i < header.nph; i++) {
1359 param_header = ¶m_headers[i];
1360
1361 if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
1362 param_header->major == SFDP_JESD216_MAJOR &&
1363 (param_header->minor > bfpt_header->minor ||
1364 (param_header->minor == bfpt_header->minor &&
1365 param_header->length > bfpt_header->length)))
1366 bfpt_header = param_header;
1367 }
1368
1369 err = spi_nor_parse_bfpt(nor, bfpt_header);
1370 if (err)
1371 goto exit;
1372
1373 /* Parse optional parameter tables. */
1374 for (i = 0; i < header.nph; i++) {
1375 param_header = ¶m_headers[i];
1376
1377 switch (SFDP_PARAM_HEADER_ID(param_header)) {
1378 case SFDP_SECTOR_MAP_ID:
1379 err = spi_nor_parse_smpt(nor, param_header);
1380 break;
1381
1382 case SFDP_4BAIT_ID:
1383 err = spi_nor_parse_4bait(nor, param_header);
1384 break;
1385
1386 case SFDP_PROFILE1_ID:
1387 err = spi_nor_parse_profile1(nor, param_header);
1388 break;
1389
1390 case SFDP_SCCR_MAP_ID:
1391 err = spi_nor_parse_sccr(nor, param_header);
1392 break;
1393
1394 default:
1395 break;
1396 }
1397
1398 if (err) {
1399 dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
1400 SFDP_PARAM_HEADER_ID(param_header));
1401 /*
1402 * Let's not drop all information we extracted so far
1403 * if optional table parsers fail. In case of failing,
1404 * each optional parser is responsible to roll back to
1405 * the previously known spi_nor data.
1406 */
1407 err = 0;
1408 }
1409 }
1410
1411exit:
1412 kfree(param_headers);
1413 return err;
1414}