Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Common Flash Interface support:
  4 *   Generic utility functions not dependent on command set
  5 *
  6 * Copyright (C) 2002 Red Hat
  7 * Copyright (C) 2003 STMicroelectronics Limited
 
 
  8 */
  9
 10#include <linux/module.h>
 11#include <linux/types.h>
 12#include <linux/kernel.h>
 13#include <asm/io.h>
 14#include <asm/byteorder.h>
 15
 16#include <linux/errno.h>
 17#include <linux/slab.h>
 18#include <linux/delay.h>
 19#include <linux/interrupt.h>
 20#include <linux/mtd/xip.h>
 21#include <linux/mtd/mtd.h>
 22#include <linux/mtd/map.h>
 23#include <linux/mtd/cfi.h>
 24
 25void cfi_udelay(int us)
 26{
 27	if (us >= 1000) {
 28		msleep(DIV_ROUND_UP(us, 1000));
 29	} else {
 30		udelay(us);
 31		cond_resched();
 32	}
 33}
 34EXPORT_SYMBOL(cfi_udelay);
 35
 36/*
 37 * Returns the command address according to the given geometry.
 38 */
 39uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
 40				struct map_info *map, struct cfi_private *cfi)
 41{
 42	unsigned bankwidth = map_bankwidth(map);
 43	unsigned interleave = cfi_interleave(cfi);
 44	unsigned type = cfi->device_type;
 45	uint32_t addr;
 46
 47	addr = (cmd_ofs * type) * interleave;
 48
 49	/* Modify the unlock address if we are in compatibility mode.
 50	 * For 16bit devices on 8 bit busses
 51	 * and 32bit devices on 16 bit busses
 52	 * set the low bit of the alternating bit sequence of the address.
 53	 */
 54	if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
 55		addr |= (type >> 1)*interleave;
 56
 57	return  addr;
 58}
 59EXPORT_SYMBOL(cfi_build_cmd_addr);
 60
 61/*
 62 * Transforms the CFI command for the given geometry (bus width & interleave).
 63 * It looks too long to be inline, but in the common case it should almost all
 64 * get optimised away.
 65 */
 66map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
 67{
 68	map_word val = { {0} };
 69	int wordwidth, words_per_bus, chip_mode, chips_per_word;
 70	unsigned long onecmd;
 71	int i;
 72
 73	/* We do it this way to give the compiler a fighting chance
 74	   of optimising away all the crap for 'bankwidth' larger than
 75	   an unsigned long, in the common case where that support is
 76	   disabled */
 77	if (map_bankwidth_is_large(map)) {
 78		wordwidth = sizeof(unsigned long);
 79		words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
 80	} else {
 81		wordwidth = map_bankwidth(map);
 82		words_per_bus = 1;
 83	}
 84
 85	chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
 86	chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
 87
 88	/* First, determine what the bit-pattern should be for a single
 89	   device, according to chip mode and endianness... */
 90	switch (chip_mode) {
 91	default: BUG();
 92	case 1:
 93		onecmd = cmd;
 94		break;
 95	case 2:
 96		onecmd = cpu_to_cfi16(map, cmd);
 97		break;
 98	case 4:
 99		onecmd = cpu_to_cfi32(map, cmd);
100		break;
101	}
102
103	/* Now replicate it across the size of an unsigned long, or
104	   just to the bus width as appropriate */
105	switch (chips_per_word) {
106	default: BUG();
107#if BITS_PER_LONG >= 64
108	case 8:
109		onecmd |= (onecmd << (chip_mode * 32));
110		fallthrough;
111#endif
112	case 4:
113		onecmd |= (onecmd << (chip_mode * 16));
114		fallthrough;
115	case 2:
116		onecmd |= (onecmd << (chip_mode * 8));
117		fallthrough;
118	case 1:
119		;
120	}
121
122	/* And finally, for the multi-word case, replicate it
123	   in all words in the structure */
124	for (i=0; i < words_per_bus; i++) {
125		val.x[i] = onecmd;
126	}
127
128	return val;
129}
130EXPORT_SYMBOL(cfi_build_cmd);
131
132unsigned long cfi_merge_status(map_word val, struct map_info *map,
133					   struct cfi_private *cfi)
134{
135	int wordwidth, words_per_bus, chip_mode, chips_per_word;
136	unsigned long onestat, res = 0;
137	int i;
138
139	/* We do it this way to give the compiler a fighting chance
140	   of optimising away all the crap for 'bankwidth' larger than
141	   an unsigned long, in the common case where that support is
142	   disabled */
143	if (map_bankwidth_is_large(map)) {
144		wordwidth = sizeof(unsigned long);
145		words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
146	} else {
147		wordwidth = map_bankwidth(map);
148		words_per_bus = 1;
149	}
150
151	chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
152	chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
153
154	onestat = val.x[0];
155	/* Or all status words together */
156	for (i=1; i < words_per_bus; i++) {
157		onestat |= val.x[i];
158	}
159
160	res = onestat;
161	switch(chips_per_word) {
162	default: BUG();
163#if BITS_PER_LONG >= 64
164	case 8:
165		res |= (onestat >> (chip_mode * 32));
166		fallthrough;
167#endif
168	case 4:
169		res |= (onestat >> (chip_mode * 16));
170		fallthrough;
171	case 2:
172		res |= (onestat >> (chip_mode * 8));
173		fallthrough;
174	case 1:
175		;
176	}
177
178	/* Last, determine what the bit-pattern should be for a single
179	   device, according to chip mode and endianness... */
180	switch (chip_mode) {
181	case 1:
182		break;
183	case 2:
184		res = cfi16_to_cpu(map, res);
185		break;
186	case 4:
187		res = cfi32_to_cpu(map, res);
188		break;
189	default: BUG();
190	}
191	return res;
192}
193EXPORT_SYMBOL(cfi_merge_status);
194
195/*
196 * Sends a CFI command to a bank of flash for the given geometry.
197 *
198 * Returns the offset in flash where the command was written.
199 * If prev_val is non-null, it will be set to the value at the command address,
200 * before the command was written.
201 */
202uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
203				struct map_info *map, struct cfi_private *cfi,
204				int type, map_word *prev_val)
205{
206	map_word val;
207	uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
208	val = cfi_build_cmd(cmd, map, cfi);
209
210	if (prev_val)
211		*prev_val = map_read(map, addr);
212
213	map_write(map, val, addr);
214
215	return addr - base;
216}
217EXPORT_SYMBOL(cfi_send_gen_cmd);
218
219int __xipram cfi_qry_present(struct map_info *map, __u32 base,
220			     struct cfi_private *cfi)
221{
222	int osf = cfi->interleave * cfi->device_type;	/* scale factor */
223	map_word val[3];
224	map_word qry[3];
225
226	qry[0] = cfi_build_cmd('Q', map, cfi);
227	qry[1] = cfi_build_cmd('R', map, cfi);
228	qry[2] = cfi_build_cmd('Y', map, cfi);
229
230	val[0] = map_read(map, base + osf*0x10);
231	val[1] = map_read(map, base + osf*0x11);
232	val[2] = map_read(map, base + osf*0x12);
233
234	if (!map_word_equal(map, qry[0], val[0]))
235		return 0;
236
237	if (!map_word_equal(map, qry[1], val[1]))
238		return 0;
239
240	if (!map_word_equal(map, qry[2], val[2]))
241		return 0;
242
243	return 1; 	/* "QRY" found */
244}
245EXPORT_SYMBOL_GPL(cfi_qry_present);
246
247int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
248			     struct cfi_private *cfi)
249{
250	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
251	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
252	if (cfi_qry_present(map, base, cfi))
253		return 1;
254	/* QRY not found probably we deal with some odd CFI chips */
255	/* Some revisions of some old Intel chips? */
256	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
257	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
258	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
259	if (cfi_qry_present(map, base, cfi))
260		return 1;
261	/* ST M29DW chips */
262	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
263	cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
264	if (cfi_qry_present(map, base, cfi))
265		return 1;
266	/* some old SST chips, e.g. 39VF160x/39VF320x */
267	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
268	cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
269	cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
270	cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
271	if (cfi_qry_present(map, base, cfi))
272		return 1;
273	/* SST 39VF640xB */
274	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
275	cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL);
276	cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL);
277	cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
278	if (cfi_qry_present(map, base, cfi))
279		return 1;
280	/* QRY not found */
281	return 0;
282}
283EXPORT_SYMBOL_GPL(cfi_qry_mode_on);
284
285void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
286			       struct cfi_private *cfi)
287{
288	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
289	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
290	/* M29W128G flashes require an additional reset command
291	   when exit qry mode */
292	if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E))
293		cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
294}
295EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
296
297struct cfi_extquery *
298__xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
299{
300	struct cfi_private *cfi = map->fldrv_priv;
301	__u32 base = 0; // cfi->chips[0].start;
302	int ofs_factor = cfi->interleave * cfi->device_type;
303	int i;
304	struct cfi_extquery *extp = NULL;
305
306	if (!adr)
307		goto out;
308
309	printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
310
311	extp = kmalloc(size, GFP_KERNEL);
312	if (!extp)
313		goto out;
314
315#ifdef CONFIG_MTD_XIP
316	local_irq_disable();
317#endif
318
319	/* Switch it into Query Mode */
320	cfi_qry_mode_on(base, map, cfi);
321	/* Read in the Extended Query Table */
322	for (i=0; i<size; i++) {
323		((unsigned char *)extp)[i] =
324			cfi_read_query(map, base+((adr+i)*ofs_factor));
325	}
326
327	/* Make sure it returns to read mode */
328	cfi_qry_mode_off(base, map, cfi);
329
330#ifdef CONFIG_MTD_XIP
331	(void) map_read(map, base);
332	xip_iprefetch();
333	local_irq_enable();
334#endif
335
336 out:	return extp;
337}
338
339EXPORT_SYMBOL(cfi_read_pri);
340
341void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
342{
343	struct map_info *map = mtd->priv;
344	struct cfi_private *cfi = map->fldrv_priv;
345	struct cfi_fixup *f;
346
347	for (f=fixups; f->fixup; f++) {
348		if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
349		    ((f->id  == CFI_ID_ANY)  || (f->id  == cfi->id))) {
350			f->fixup(mtd);
351		}
352	}
353}
354
355EXPORT_SYMBOL(cfi_fixup);
356
357int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
358				     loff_t ofs, size_t len, void *thunk)
359{
360	struct map_info *map = mtd->priv;
361	struct cfi_private *cfi = map->fldrv_priv;
362	unsigned long adr;
363	int chipnum, ret = 0;
364	int i, first;
365	struct mtd_erase_region_info *regions = mtd->eraseregions;
366
367	/* Check that both start and end of the requested erase are
368	 * aligned with the erasesize at the appropriate addresses.
369	 */
370
371	i = 0;
372
373	/* Skip all erase regions which are ended before the start of
374	   the requested erase. Actually, to save on the calculations,
375	   we skip to the first erase region which starts after the
376	   start of the requested erase, and then go back one.
377	*/
378
379	while (i < mtd->numeraseregions && ofs >= regions[i].offset)
380	       i++;
381	i--;
382
383	/* OK, now i is pointing at the erase region in which this
384	   erase request starts. Check the start of the requested
385	   erase range is aligned with the erase size which is in
386	   effect here.
387	*/
388
389	if (ofs & (regions[i].erasesize-1))
390		return -EINVAL;
391
392	/* Remember the erase region we start on */
393	first = i;
394
395	/* Next, check that the end of the requested erase is aligned
396	 * with the erase region at that address.
397	 */
398
399	while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
400		i++;
401
402	/* As before, drop back one to point at the region in which
403	   the address actually falls
404	*/
405	i--;
406
407	if ((ofs + len) & (regions[i].erasesize-1))
408		return -EINVAL;
409
410	chipnum = ofs >> cfi->chipshift;
411	adr = ofs - (chipnum << cfi->chipshift);
412
413	i=first;
414
415	while(len) {
416		int size = regions[i].erasesize;
417
418		ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
419
420		if (ret)
421			return ret;
422
423		adr += size;
424		ofs += size;
425		len -= size;
426
427		if (ofs == regions[i].offset + size * regions[i].numblocks)
428			i++;
429
430		if (adr >> cfi->chipshift) {
431			adr = 0;
432			chipnum++;
433
434			if (chipnum >= cfi->numchips)
435				break;
436		}
437	}
438
439	return 0;
440}
441
442EXPORT_SYMBOL(cfi_varsize_frob);
443
444MODULE_LICENSE("GPL");
v5.14.15
 
  1/*
  2 * Common Flash Interface support:
  3 *   Generic utility functions not dependent on command set
  4 *
  5 * Copyright (C) 2002 Red Hat
  6 * Copyright (C) 2003 STMicroelectronics Limited
  7 *
  8 * This code is covered by the GPL.
  9 */
 10
 11#include <linux/module.h>
 12#include <linux/types.h>
 13#include <linux/kernel.h>
 14#include <asm/io.h>
 15#include <asm/byteorder.h>
 16
 17#include <linux/errno.h>
 18#include <linux/slab.h>
 19#include <linux/delay.h>
 20#include <linux/interrupt.h>
 21#include <linux/mtd/xip.h>
 22#include <linux/mtd/mtd.h>
 23#include <linux/mtd/map.h>
 24#include <linux/mtd/cfi.h>
 25
 26void cfi_udelay(int us)
 27{
 28	if (us >= 1000) {
 29		msleep(DIV_ROUND_UP(us, 1000));
 30	} else {
 31		udelay(us);
 32		cond_resched();
 33	}
 34}
 35EXPORT_SYMBOL(cfi_udelay);
 36
 37/*
 38 * Returns the command address according to the given geometry.
 39 */
 40uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
 41				struct map_info *map, struct cfi_private *cfi)
 42{
 43	unsigned bankwidth = map_bankwidth(map);
 44	unsigned interleave = cfi_interleave(cfi);
 45	unsigned type = cfi->device_type;
 46	uint32_t addr;
 47
 48	addr = (cmd_ofs * type) * interleave;
 49
 50	/* Modify the unlock address if we are in compatibility mode.
 51	 * For 16bit devices on 8 bit busses
 52	 * and 32bit devices on 16 bit busses
 53	 * set the low bit of the alternating bit sequence of the address.
 54	 */
 55	if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
 56		addr |= (type >> 1)*interleave;
 57
 58	return  addr;
 59}
 60EXPORT_SYMBOL(cfi_build_cmd_addr);
 61
 62/*
 63 * Transforms the CFI command for the given geometry (bus width & interleave).
 64 * It looks too long to be inline, but in the common case it should almost all
 65 * get optimised away.
 66 */
 67map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
 68{
 69	map_word val = { {0} };
 70	int wordwidth, words_per_bus, chip_mode, chips_per_word;
 71	unsigned long onecmd;
 72	int i;
 73
 74	/* We do it this way to give the compiler a fighting chance
 75	   of optimising away all the crap for 'bankwidth' larger than
 76	   an unsigned long, in the common case where that support is
 77	   disabled */
 78	if (map_bankwidth_is_large(map)) {
 79		wordwidth = sizeof(unsigned long);
 80		words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
 81	} else {
 82		wordwidth = map_bankwidth(map);
 83		words_per_bus = 1;
 84	}
 85
 86	chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
 87	chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
 88
 89	/* First, determine what the bit-pattern should be for a single
 90	   device, according to chip mode and endianness... */
 91	switch (chip_mode) {
 92	default: BUG();
 93	case 1:
 94		onecmd = cmd;
 95		break;
 96	case 2:
 97		onecmd = cpu_to_cfi16(map, cmd);
 98		break;
 99	case 4:
100		onecmd = cpu_to_cfi32(map, cmd);
101		break;
102	}
103
104	/* Now replicate it across the size of an unsigned long, or
105	   just to the bus width as appropriate */
106	switch (chips_per_word) {
107	default: BUG();
108#if BITS_PER_LONG >= 64
109	case 8:
110		onecmd |= (onecmd << (chip_mode * 32));
111		fallthrough;
112#endif
113	case 4:
114		onecmd |= (onecmd << (chip_mode * 16));
115		fallthrough;
116	case 2:
117		onecmd |= (onecmd << (chip_mode * 8));
118		fallthrough;
119	case 1:
120		;
121	}
122
123	/* And finally, for the multi-word case, replicate it
124	   in all words in the structure */
125	for (i=0; i < words_per_bus; i++) {
126		val.x[i] = onecmd;
127	}
128
129	return val;
130}
131EXPORT_SYMBOL(cfi_build_cmd);
132
133unsigned long cfi_merge_status(map_word val, struct map_info *map,
134					   struct cfi_private *cfi)
135{
136	int wordwidth, words_per_bus, chip_mode, chips_per_word;
137	unsigned long onestat, res = 0;
138	int i;
139
140	/* We do it this way to give the compiler a fighting chance
141	   of optimising away all the crap for 'bankwidth' larger than
142	   an unsigned long, in the common case where that support is
143	   disabled */
144	if (map_bankwidth_is_large(map)) {
145		wordwidth = sizeof(unsigned long);
146		words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
147	} else {
148		wordwidth = map_bankwidth(map);
149		words_per_bus = 1;
150	}
151
152	chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
153	chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
154
155	onestat = val.x[0];
156	/* Or all status words together */
157	for (i=1; i < words_per_bus; i++) {
158		onestat |= val.x[i];
159	}
160
161	res = onestat;
162	switch(chips_per_word) {
163	default: BUG();
164#if BITS_PER_LONG >= 64
165	case 8:
166		res |= (onestat >> (chip_mode * 32));
167		fallthrough;
168#endif
169	case 4:
170		res |= (onestat >> (chip_mode * 16));
171		fallthrough;
172	case 2:
173		res |= (onestat >> (chip_mode * 8));
174		fallthrough;
175	case 1:
176		;
177	}
178
179	/* Last, determine what the bit-pattern should be for a single
180	   device, according to chip mode and endianness... */
181	switch (chip_mode) {
182	case 1:
183		break;
184	case 2:
185		res = cfi16_to_cpu(map, res);
186		break;
187	case 4:
188		res = cfi32_to_cpu(map, res);
189		break;
190	default: BUG();
191	}
192	return res;
193}
194EXPORT_SYMBOL(cfi_merge_status);
195
196/*
197 * Sends a CFI command to a bank of flash for the given geometry.
198 *
199 * Returns the offset in flash where the command was written.
200 * If prev_val is non-null, it will be set to the value at the command address,
201 * before the command was written.
202 */
203uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
204				struct map_info *map, struct cfi_private *cfi,
205				int type, map_word *prev_val)
206{
207	map_word val;
208	uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
209	val = cfi_build_cmd(cmd, map, cfi);
210
211	if (prev_val)
212		*prev_val = map_read(map, addr);
213
214	map_write(map, val, addr);
215
216	return addr - base;
217}
218EXPORT_SYMBOL(cfi_send_gen_cmd);
219
220int __xipram cfi_qry_present(struct map_info *map, __u32 base,
221			     struct cfi_private *cfi)
222{
223	int osf = cfi->interleave * cfi->device_type;	/* scale factor */
224	map_word val[3];
225	map_word qry[3];
226
227	qry[0] = cfi_build_cmd('Q', map, cfi);
228	qry[1] = cfi_build_cmd('R', map, cfi);
229	qry[2] = cfi_build_cmd('Y', map, cfi);
230
231	val[0] = map_read(map, base + osf*0x10);
232	val[1] = map_read(map, base + osf*0x11);
233	val[2] = map_read(map, base + osf*0x12);
234
235	if (!map_word_equal(map, qry[0], val[0]))
236		return 0;
237
238	if (!map_word_equal(map, qry[1], val[1]))
239		return 0;
240
241	if (!map_word_equal(map, qry[2], val[2]))
242		return 0;
243
244	return 1; 	/* "QRY" found */
245}
246EXPORT_SYMBOL_GPL(cfi_qry_present);
247
248int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
249			     struct cfi_private *cfi)
250{
251	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
252	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
253	if (cfi_qry_present(map, base, cfi))
254		return 1;
255	/* QRY not found probably we deal with some odd CFI chips */
256	/* Some revisions of some old Intel chips? */
257	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
258	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
259	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
260	if (cfi_qry_present(map, base, cfi))
261		return 1;
262	/* ST M29DW chips */
263	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
264	cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
265	if (cfi_qry_present(map, base, cfi))
266		return 1;
267	/* some old SST chips, e.g. 39VF160x/39VF320x */
268	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
269	cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
270	cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
271	cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
272	if (cfi_qry_present(map, base, cfi))
273		return 1;
274	/* SST 39VF640xB */
275	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
276	cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL);
277	cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL);
278	cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
279	if (cfi_qry_present(map, base, cfi))
280		return 1;
281	/* QRY not found */
282	return 0;
283}
284EXPORT_SYMBOL_GPL(cfi_qry_mode_on);
285
286void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
287			       struct cfi_private *cfi)
288{
289	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
290	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
291	/* M29W128G flashes require an additional reset command
292	   when exit qry mode */
293	if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E))
294		cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
295}
296EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
297
298struct cfi_extquery *
299__xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
300{
301	struct cfi_private *cfi = map->fldrv_priv;
302	__u32 base = 0; // cfi->chips[0].start;
303	int ofs_factor = cfi->interleave * cfi->device_type;
304	int i;
305	struct cfi_extquery *extp = NULL;
306
307	if (!adr)
308		goto out;
309
310	printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
311
312	extp = kmalloc(size, GFP_KERNEL);
313	if (!extp)
314		goto out;
315
316#ifdef CONFIG_MTD_XIP
317	local_irq_disable();
318#endif
319
320	/* Switch it into Query Mode */
321	cfi_qry_mode_on(base, map, cfi);
322	/* Read in the Extended Query Table */
323	for (i=0; i<size; i++) {
324		((unsigned char *)extp)[i] =
325			cfi_read_query(map, base+((adr+i)*ofs_factor));
326	}
327
328	/* Make sure it returns to read mode */
329	cfi_qry_mode_off(base, map, cfi);
330
331#ifdef CONFIG_MTD_XIP
332	(void) map_read(map, base);
333	xip_iprefetch();
334	local_irq_enable();
335#endif
336
337 out:	return extp;
338}
339
340EXPORT_SYMBOL(cfi_read_pri);
341
342void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
343{
344	struct map_info *map = mtd->priv;
345	struct cfi_private *cfi = map->fldrv_priv;
346	struct cfi_fixup *f;
347
348	for (f=fixups; f->fixup; f++) {
349		if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
350		    ((f->id  == CFI_ID_ANY)  || (f->id  == cfi->id))) {
351			f->fixup(mtd);
352		}
353	}
354}
355
356EXPORT_SYMBOL(cfi_fixup);
357
358int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
359				     loff_t ofs, size_t len, void *thunk)
360{
361	struct map_info *map = mtd->priv;
362	struct cfi_private *cfi = map->fldrv_priv;
363	unsigned long adr;
364	int chipnum, ret = 0;
365	int i, first;
366	struct mtd_erase_region_info *regions = mtd->eraseregions;
367
368	/* Check that both start and end of the requested erase are
369	 * aligned with the erasesize at the appropriate addresses.
370	 */
371
372	i = 0;
373
374	/* Skip all erase regions which are ended before the start of
375	   the requested erase. Actually, to save on the calculations,
376	   we skip to the first erase region which starts after the
377	   start of the requested erase, and then go back one.
378	*/
379
380	while (i < mtd->numeraseregions && ofs >= regions[i].offset)
381	       i++;
382	i--;
383
384	/* OK, now i is pointing at the erase region in which this
385	   erase request starts. Check the start of the requested
386	   erase range is aligned with the erase size which is in
387	   effect here.
388	*/
389
390	if (ofs & (regions[i].erasesize-1))
391		return -EINVAL;
392
393	/* Remember the erase region we start on */
394	first = i;
395
396	/* Next, check that the end of the requested erase is aligned
397	 * with the erase region at that address.
398	 */
399
400	while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
401		i++;
402
403	/* As before, drop back one to point at the region in which
404	   the address actually falls
405	*/
406	i--;
407
408	if ((ofs + len) & (regions[i].erasesize-1))
409		return -EINVAL;
410
411	chipnum = ofs >> cfi->chipshift;
412	adr = ofs - (chipnum << cfi->chipshift);
413
414	i=first;
415
416	while(len) {
417		int size = regions[i].erasesize;
418
419		ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
420
421		if (ret)
422			return ret;
423
424		adr += size;
425		ofs += size;
426		len -= size;
427
428		if (ofs == regions[i].offset + size * regions[i].numblocks)
429			i++;
430
431		if (adr >> cfi->chipshift) {
432			adr = 0;
433			chipnum++;
434
435			if (chipnum >= cfi->numchips)
436				break;
437		}
438	}
439
440	return 0;
441}
442
443EXPORT_SYMBOL(cfi_varsize_frob);
444
445MODULE_LICENSE("GPL");