Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * rfd_ftl.c -- resident flash disk (flash translation layer)
  4 *
  5 * Copyright © 2005  Sean Young <sean@mess.org>
  6 *
  7 * This type of flash translation layer (FTL) is used by the Embedded BIOS
  8 * by General Software. It is known as the Resident Flash Disk (RFD), see:
  9 *
 10 *	http://www.gensw.com/pages/prod/bios/rfd.htm
 11 *
 12 * based on ftl.c
 13 */
 14
 15#include <linux/hdreg.h>
 16#include <linux/init.h>
 17#include <linux/mtd/blktrans.h>
 18#include <linux/mtd/mtd.h>
 19#include <linux/vmalloc.h>
 20#include <linux/slab.h>
 21#include <linux/jiffies.h>
 22#include <linux/module.h>
 23
 24#include <asm/types.h>
 25
 26static int block_size = 0;
 27module_param(block_size, int, 0);
 28MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
 29
 30#define PREFIX "rfd_ftl: "
 31
 32/* This major has been assigned by device@lanana.org */
 33#ifndef RFD_FTL_MAJOR
 34#define RFD_FTL_MAJOR		256
 35#endif
 36
 37/* Maximum number of partitions in an FTL region */
 38#define PART_BITS		4
 39
 40/* An erase unit should start with this value */
 41#define RFD_MAGIC		0x9193
 42
 43/* the second value is 0xffff or 0xffc8; function unknown */
 44
 45/* the third value is always 0xffff, ignored */
 46
 47/* next is an array of mapping for each corresponding sector */
 48#define HEADER_MAP_OFFSET	3
 49#define SECTOR_DELETED		0x0000
 50#define SECTOR_ZERO		0xfffe
 51#define SECTOR_FREE		0xffff
 52
 53#define SECTOR_SIZE		512
 54
 55#define SECTORS_PER_TRACK	63
 56
 57struct block {
 58	enum {
 59		BLOCK_OK,
 60		BLOCK_ERASING,
 61		BLOCK_ERASED,
 62		BLOCK_UNUSED,
 63		BLOCK_FAILED
 64	} state;
 65	int free_sectors;
 66	int used_sectors;
 67	int erases;
 68	u_long offset;
 69};
 70
 71struct partition {
 72	struct mtd_blktrans_dev mbd;
 73
 74	u_int block_size;		/* size of erase unit */
 75	u_int total_blocks;		/* number of erase units */
 76	u_int header_sectors_per_block;	/* header sectors in erase unit */
 77	u_int data_sectors_per_block;	/* data sectors in erase unit */
 78	u_int sector_count;		/* sectors in translated disk */
 79	u_int header_size;		/* bytes in header sector */
 80	int reserved_block;		/* block next up for reclaim */
 81	int current_block;		/* block to write to */
 82	u16 *header_cache;		/* cached header */
 83
 84	int is_reclaiming;
 85	int cylinders;
 86	int errors;
 87	u_long *sector_map;
 88	struct block *blocks;
 89};
 90
 91static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
 92
 93static int build_block_map(struct partition *part, int block_no)
 94{
 95	struct block *block = &part->blocks[block_no];
 96	int i;
 97
 98	block->offset = part->block_size * block_no;
 99
100	if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
101		block->state = BLOCK_UNUSED;
102		return -ENOENT;
103	}
104
105	block->state = BLOCK_OK;
106
107	for (i=0; i<part->data_sectors_per_block; i++) {
108		u16 entry;
109
110		entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
111
112		if (entry == SECTOR_DELETED)
113			continue;
114
115		if (entry == SECTOR_FREE) {
116			block->free_sectors++;
117			continue;
118		}
119
120		if (entry == SECTOR_ZERO)
121			entry = 0;
122
123		if (entry >= part->sector_count) {
124			printk(KERN_WARNING PREFIX
125				"'%s': unit #%d: entry %d corrupt, "
126				"sector %d out of range\n",
127				part->mbd.mtd->name, block_no, i, entry);
128			continue;
129		}
130
131		if (part->sector_map[entry] != -1) {
132			printk(KERN_WARNING PREFIX
133				"'%s': more than one entry for sector %d\n",
134				part->mbd.mtd->name, entry);
135			part->errors = 1;
136			continue;
137		}
138
139		part->sector_map[entry] = block->offset +
140			(i + part->header_sectors_per_block) * SECTOR_SIZE;
141
142		block->used_sectors++;
143	}
144
145	if (block->free_sectors == part->data_sectors_per_block)
146		part->reserved_block = block_no;
147
148	return 0;
149}
150
151static int scan_header(struct partition *part)
152{
153	int sectors_per_block;
154	int i, rc = -ENOMEM;
155	int blocks_found;
156	size_t retlen;
157
158	sectors_per_block = part->block_size / SECTOR_SIZE;
159	part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
160
161	if (part->total_blocks < 2)
162		return -ENOENT;
163
164	/* each erase block has three bytes header, followed by the map */
165	part->header_sectors_per_block =
166			((HEADER_MAP_OFFSET + sectors_per_block) *
167			sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
168
169	part->data_sectors_per_block = sectors_per_block -
170			part->header_sectors_per_block;
171
172	part->header_size = (HEADER_MAP_OFFSET +
173			part->data_sectors_per_block) * sizeof(u16);
174
175	part->cylinders = (part->data_sectors_per_block *
176			(part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
177
178	part->sector_count = part->cylinders * SECTORS_PER_TRACK;
179
180	part->current_block = -1;
181	part->reserved_block = -1;
182	part->is_reclaiming = 0;
183
184	part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
185	if (!part->header_cache)
186		goto err;
187
188	part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
189			GFP_KERNEL);
190	if (!part->blocks)
191		goto err;
192
193	part->sector_map = vmalloc(array_size(sizeof(u_long),
194					      part->sector_count));
195	if (!part->sector_map) {
196		printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
197			"sector map", part->mbd.mtd->name);
198		goto err;
199	}
200
201	for (i=0; i<part->sector_count; i++)
202		part->sector_map[i] = -1;
203
204	for (i=0, blocks_found=0; i<part->total_blocks; i++) {
205		rc = mtd_read(part->mbd.mtd, i * part->block_size,
206			      part->header_size, &retlen,
207			      (u_char *)part->header_cache);
208
209		if (!rc && retlen != part->header_size)
210			rc = -EIO;
211
212		if (rc)
213			goto err;
214
215		if (!build_block_map(part, i))
216			blocks_found++;
217	}
218
219	if (blocks_found == 0) {
220		printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
221				part->mbd.mtd->name);
222		rc = -ENOENT;
223		goto err;
224	}
225
226	if (part->reserved_block == -1) {
227		printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
228				part->mbd.mtd->name);
229
230		part->errors = 1;
231	}
232
233	return 0;
234
235err:
236	vfree(part->sector_map);
237	kfree(part->header_cache);
238	kfree(part->blocks);
239
240	return rc;
241}
242
243static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
244{
245	struct partition *part = (struct partition*)dev;
246	u_long addr;
247	size_t retlen;
248	int rc;
249
250	if (sector >= part->sector_count)
251		return -EIO;
252
253	addr = part->sector_map[sector];
254	if (addr != -1) {
255		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
256			      (u_char *)buf);
257		if (!rc && retlen != SECTOR_SIZE)
258			rc = -EIO;
259
260		if (rc) {
261			printk(KERN_WARNING PREFIX "error reading '%s' at "
262				"0x%lx\n", part->mbd.mtd->name, addr);
263			return rc;
264		}
265	} else
266		memset(buf, 0, SECTOR_SIZE);
267
268	return 0;
269}
270
271static int erase_block(struct partition *part, int block)
272{
273	struct erase_info *erase;
274	int rc;
275
276	erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
277	if (!erase)
278		return -ENOMEM;
279
280	erase->addr = part->blocks[block].offset;
281	erase->len = part->block_size;
282
283	part->blocks[block].state = BLOCK_ERASING;
284	part->blocks[block].free_sectors = 0;
285
286	rc = mtd_erase(part->mbd.mtd, erase);
287	if (rc) {
288		printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
289				"failed\n", (unsigned long long)erase->addr,
290				(unsigned long long)erase->len, part->mbd.mtd->name);
291		part->blocks[block].state = BLOCK_FAILED;
292		part->blocks[block].free_sectors = 0;
293		part->blocks[block].used_sectors = 0;
294	} else {
295		u16 magic = cpu_to_le16(RFD_MAGIC);
296		size_t retlen;
297
298		part->blocks[block].state = BLOCK_ERASED;
299		part->blocks[block].free_sectors = part->data_sectors_per_block;
300		part->blocks[block].used_sectors = 0;
301		part->blocks[block].erases++;
302
303		rc = mtd_write(part->mbd.mtd, part->blocks[block].offset,
304			       sizeof(magic), &retlen, (u_char *)&magic);
305		if (!rc && retlen != sizeof(magic))
306			rc = -EIO;
307
308		if (rc) {
309			pr_err(PREFIX "'%s': unable to write RFD header at 0x%lx\n",
310			       part->mbd.mtd->name, part->blocks[block].offset);
311			part->blocks[block].state = BLOCK_FAILED;
312		} else {
313			part->blocks[block].state = BLOCK_OK;
314		}
315	}
316
317	kfree(erase);
318
319	return rc;
320}
321
322static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
323{
324	void *sector_data;
325	u16 *map;
326	size_t retlen;
327	int i, rc = -ENOMEM;
328
329	part->is_reclaiming = 1;
330
331	sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
332	if (!sector_data)
333		goto err3;
334
335	map = kmalloc(part->header_size, GFP_KERNEL);
336	if (!map)
337		goto err2;
338
339	rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
340		      part->header_size, &retlen, (u_char *)map);
341
342	if (!rc && retlen != part->header_size)
343		rc = -EIO;
344
345	if (rc) {
346		printk(KERN_ERR PREFIX "error reading '%s' at "
347			"0x%lx\n", part->mbd.mtd->name,
348			part->blocks[block_no].offset);
349
350		goto err;
351	}
352
353	for (i=0; i<part->data_sectors_per_block; i++) {
354		u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
355		u_long addr;
356
357
358		if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
359			continue;
360
361		if (entry == SECTOR_ZERO)
362			entry = 0;
363
364		/* already warned about and ignored in build_block_map() */
365		if (entry >= part->sector_count)
366			continue;
367
368		addr = part->blocks[block_no].offset +
369			(i + part->header_sectors_per_block) * SECTOR_SIZE;
370
371		if (*old_sector == addr) {
372			*old_sector = -1;
373			if (!part->blocks[block_no].used_sectors--) {
374				rc = erase_block(part, block_no);
375				break;
376			}
377			continue;
378		}
379		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
380			      sector_data);
381
382		if (!rc && retlen != SECTOR_SIZE)
383			rc = -EIO;
384
385		if (rc) {
386			printk(KERN_ERR PREFIX "'%s': Unable to "
387				"read sector for relocation\n",
388				part->mbd.mtd->name);
389
390			goto err;
391		}
392
393		rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
394				entry, sector_data);
395
396		if (rc)
397			goto err;
398	}
399
400err:
401	kfree(map);
402err2:
403	kfree(sector_data);
404err3:
405	part->is_reclaiming = 0;
406
407	return rc;
408}
409
410static int reclaim_block(struct partition *part, u_long *old_sector)
411{
412	int block, best_block, score, old_sector_block;
413	int rc;
414
415	/* we have a race if sync doesn't exist */
416	mtd_sync(part->mbd.mtd);
417
418	score = 0x7fffffff; /* MAX_INT */
419	best_block = -1;
420	if (*old_sector != -1)
421		old_sector_block = *old_sector / part->block_size;
422	else
423		old_sector_block = -1;
424
425	for (block=0; block<part->total_blocks; block++) {
426		int this_score;
427
428		if (block == part->reserved_block)
429			continue;
430
431		/*
432		 * Postpone reclaiming if there is a free sector as
433		 * more removed sectors is more efficient (have to move
434		 * less).
435		 */
436		if (part->blocks[block].free_sectors)
437			return 0;
438
439		this_score = part->blocks[block].used_sectors;
440
441		if (block == old_sector_block)
442			this_score--;
443		else {
444			/* no point in moving a full block */
445			if (part->blocks[block].used_sectors ==
446					part->data_sectors_per_block)
447				continue;
448		}
449
450		this_score += part->blocks[block].erases;
451
452		if (this_score < score) {
453			best_block = block;
454			score = this_score;
455		}
456	}
457
458	if (best_block == -1)
459		return -ENOSPC;
460
461	part->current_block = -1;
462	part->reserved_block = best_block;
463
464	pr_debug("reclaim_block: reclaiming block #%d with %d used "
465		 "%d free sectors\n", best_block,
466		 part->blocks[best_block].used_sectors,
467		 part->blocks[best_block].free_sectors);
468
469	if (part->blocks[best_block].used_sectors)
470		rc = move_block_contents(part, best_block, old_sector);
471	else
472		rc = erase_block(part, best_block);
473
474	return rc;
475}
476
477/*
478 * IMPROVE: It would be best to choose the block with the most deleted sectors,
479 * because if we fill that one up first it'll have the most chance of having
480 * the least live sectors at reclaim.
481 */
482static int find_free_block(struct partition *part)
483{
484	int block, stop;
485
486	block = part->current_block == -1 ?
487			jiffies % part->total_blocks : part->current_block;
488	stop = block;
489
490	do {
491		if (part->blocks[block].free_sectors &&
492				block != part->reserved_block)
493			return block;
494
495		if (part->blocks[block].state == BLOCK_UNUSED)
496			erase_block(part, block);
497
498		if (++block >= part->total_blocks)
499			block = 0;
500
501	} while (block != stop);
502
503	return -1;
504}
505
506static int find_writable_block(struct partition *part, u_long *old_sector)
507{
508	int rc, block;
509	size_t retlen;
510
511	block = find_free_block(part);
512
513	if (block == -1) {
514		if (!part->is_reclaiming) {
515			rc = reclaim_block(part, old_sector);
516			if (rc)
517				goto err;
518
519			block = find_free_block(part);
520		}
521
522		if (block == -1) {
523			rc = -ENOSPC;
524			goto err;
525		}
526	}
527
528	rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
529		      part->header_size, &retlen,
530		      (u_char *)part->header_cache);
531
532	if (!rc && retlen != part->header_size)
533		rc = -EIO;
534
535	if (rc) {
536		printk(KERN_ERR PREFIX "'%s': unable to read header at "
537				"0x%lx\n", part->mbd.mtd->name,
538				part->blocks[block].offset);
539		goto err;
540	}
541
542	part->current_block = block;
543
544err:
545	return rc;
546}
547
548static int mark_sector_deleted(struct partition *part, u_long old_addr)
549{
550	int block, offset, rc;
551	u_long addr;
552	size_t retlen;
553	u16 del = cpu_to_le16(SECTOR_DELETED);
554
555	block = old_addr / part->block_size;
556	offset = (old_addr % part->block_size) / SECTOR_SIZE -
557		part->header_sectors_per_block;
558
559	addr = part->blocks[block].offset +
560			(HEADER_MAP_OFFSET + offset) * sizeof(u16);
561	rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
562		       (u_char *)&del);
563
564	if (!rc && retlen != sizeof(del))
565		rc = -EIO;
566
567	if (rc) {
568		printk(KERN_ERR PREFIX "error writing '%s' at "
569			"0x%lx\n", part->mbd.mtd->name, addr);
570		goto err;
571	}
572	if (block == part->current_block)
573		part->header_cache[offset + HEADER_MAP_OFFSET] = del;
574
575	part->blocks[block].used_sectors--;
576
577	if (!part->blocks[block].used_sectors &&
578	    !part->blocks[block].free_sectors)
579		rc = erase_block(part, block);
580
581err:
582	return rc;
583}
584
585static int find_free_sector(const struct partition *part, const struct block *block)
586{
587	int i, stop;
588
589	i = stop = part->data_sectors_per_block - block->free_sectors;
590
591	do {
592		if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
593				== SECTOR_FREE)
594			return i;
595
596		if (++i == part->data_sectors_per_block)
597			i = 0;
598	}
599	while(i != stop);
600
601	return -1;
602}
603
604static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
605{
606	struct partition *part = (struct partition*)dev;
607	struct block *block;
608	u_long addr;
609	int i;
610	int rc;
611	size_t retlen;
612	u16 entry;
613
614	if (part->current_block == -1 ||
615		!part->blocks[part->current_block].free_sectors) {
616
617		rc = find_writable_block(part, old_addr);
618		if (rc)
619			goto err;
620	}
621
622	block = &part->blocks[part->current_block];
623
624	i = find_free_sector(part, block);
625
626	if (i < 0) {
627		rc = -ENOSPC;
628		goto err;
629	}
630
631	addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
632		block->offset;
633	rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
634		       (u_char *)buf);
635
636	if (!rc && retlen != SECTOR_SIZE)
637		rc = -EIO;
638
639	if (rc) {
640		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
641				part->mbd.mtd->name, addr);
642		goto err;
643	}
644
645	part->sector_map[sector] = addr;
646
647	entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
648
649	part->header_cache[i + HEADER_MAP_OFFSET] = entry;
650
651	addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
652	rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
653		       (u_char *)&entry);
654
655	if (!rc && retlen != sizeof(entry))
656		rc = -EIO;
657
658	if (rc) {
659		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
660				part->mbd.mtd->name, addr);
661		goto err;
662	}
663	block->used_sectors++;
664	block->free_sectors--;
665
666err:
667	return rc;
668}
669
670static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
671{
672	struct partition *part = (struct partition*)dev;
673	u_long old_addr;
674	int i;
675	int rc = 0;
676
677	pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
678
679	if (part->reserved_block == -1) {
680		rc = -EACCES;
681		goto err;
682	}
683
684	if (sector >= part->sector_count) {
685		rc = -EIO;
686		goto err;
687	}
688
689	old_addr = part->sector_map[sector];
690
691	for (i=0; i<SECTOR_SIZE; i++) {
692		if (!buf[i])
693			continue;
694
695		rc = do_writesect(dev, sector, buf, &old_addr);
696		if (rc)
697			goto err;
698		break;
699	}
700
701	if (i == SECTOR_SIZE)
702		part->sector_map[sector] = -1;
703
704	if (old_addr != -1)
705		rc = mark_sector_deleted(part, old_addr);
706
707err:
708	return rc;
709}
710
711static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
712{
713	struct partition *part = (struct partition*)dev;
714
715	geo->heads = 1;
716	geo->sectors = SECTORS_PER_TRACK;
717	geo->cylinders = part->cylinders;
718
719	return 0;
720}
721
722static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
723{
724	struct partition *part;
725
726	if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
727		return;
728
729	part = kzalloc(sizeof(struct partition), GFP_KERNEL);
730	if (!part)
731		return;
732
733	part->mbd.mtd = mtd;
734
735	if (block_size)
736		part->block_size = block_size;
737	else {
738		if (!mtd->erasesize) {
739			printk(KERN_WARNING PREFIX "please provide block_size");
740			goto out;
741		} else
742			part->block_size = mtd->erasesize;
743	}
744
745	if (scan_header(part) == 0) {
746		part->mbd.size = part->sector_count;
747		part->mbd.tr = tr;
748		part->mbd.devnum = -1;
749		if (!(mtd->flags & MTD_WRITEABLE))
750			part->mbd.readonly = 1;
751		else if (part->errors) {
752			printk(KERN_WARNING PREFIX "'%s': errors found, "
753					"setting read-only\n", mtd->name);
754			part->mbd.readonly = 1;
755		}
756
757		printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
758				mtd->name, mtd->type, mtd->flags);
759
760		if (!add_mtd_blktrans_dev((void*)part))
761			return;
762	}
763out:
764	kfree(part);
765}
766
767static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
768{
769	struct partition *part = (struct partition*)dev;
770	int i;
771
772	for (i=0; i<part->total_blocks; i++) {
773		pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
774			part->mbd.mtd->name, i, part->blocks[i].erases);
775	}
776
777	del_mtd_blktrans_dev(dev);
778	vfree(part->sector_map);
779	kfree(part->header_cache);
780	kfree(part->blocks);
781}
782
783static struct mtd_blktrans_ops rfd_ftl_tr = {
784	.name		= "rfd",
785	.major		= RFD_FTL_MAJOR,
786	.part_bits	= PART_BITS,
787	.blksize 	= SECTOR_SIZE,
788
789	.readsect	= rfd_ftl_readsect,
790	.writesect	= rfd_ftl_writesect,
791	.getgeo		= rfd_ftl_getgeo,
792	.add_mtd	= rfd_ftl_add_mtd,
793	.remove_dev	= rfd_ftl_remove_dev,
794	.owner		= THIS_MODULE,
795};
796
797static int __init init_rfd_ftl(void)
798{
799	return register_mtd_blktrans(&rfd_ftl_tr);
800}
801
802static void __exit cleanup_rfd_ftl(void)
803{
804	deregister_mtd_blktrans(&rfd_ftl_tr);
805}
806
807module_init(init_rfd_ftl);
808module_exit(cleanup_rfd_ftl);
809
810MODULE_LICENSE("GPL");
811MODULE_AUTHOR("Sean Young <sean@mess.org>");
812MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
813		"used by General Software's Embedded BIOS");
814
v4.17
 
  1/*
  2 * rfd_ftl.c -- resident flash disk (flash translation layer)
  3 *
  4 * Copyright © 2005  Sean Young <sean@mess.org>
  5 *
  6 * This type of flash translation layer (FTL) is used by the Embedded BIOS
  7 * by General Software. It is known as the Resident Flash Disk (RFD), see:
  8 *
  9 *	http://www.gensw.com/pages/prod/bios/rfd.htm
 10 *
 11 * based on ftl.c
 12 */
 13
 14#include <linux/hdreg.h>
 15#include <linux/init.h>
 16#include <linux/mtd/blktrans.h>
 17#include <linux/mtd/mtd.h>
 18#include <linux/vmalloc.h>
 19#include <linux/slab.h>
 20#include <linux/jiffies.h>
 21#include <linux/module.h>
 22
 23#include <asm/types.h>
 24
 25static int block_size = 0;
 26module_param(block_size, int, 0);
 27MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
 28
 29#define PREFIX "rfd_ftl: "
 30
 31/* This major has been assigned by device@lanana.org */
 32#ifndef RFD_FTL_MAJOR
 33#define RFD_FTL_MAJOR		256
 34#endif
 35
 36/* Maximum number of partitions in an FTL region */
 37#define PART_BITS		4
 38
 39/* An erase unit should start with this value */
 40#define RFD_MAGIC		0x9193
 41
 42/* the second value is 0xffff or 0xffc8; function unknown */
 43
 44/* the third value is always 0xffff, ignored */
 45
 46/* next is an array of mapping for each corresponding sector */
 47#define HEADER_MAP_OFFSET	3
 48#define SECTOR_DELETED		0x0000
 49#define SECTOR_ZERO		0xfffe
 50#define SECTOR_FREE		0xffff
 51
 52#define SECTOR_SIZE		512
 53
 54#define SECTORS_PER_TRACK	63
 55
 56struct block {
 57	enum {
 58		BLOCK_OK,
 59		BLOCK_ERASING,
 60		BLOCK_ERASED,
 61		BLOCK_UNUSED,
 62		BLOCK_FAILED
 63	} state;
 64	int free_sectors;
 65	int used_sectors;
 66	int erases;
 67	u_long offset;
 68};
 69
 70struct partition {
 71	struct mtd_blktrans_dev mbd;
 72
 73	u_int block_size;		/* size of erase unit */
 74	u_int total_blocks;		/* number of erase units */
 75	u_int header_sectors_per_block;	/* header sectors in erase unit */
 76	u_int data_sectors_per_block;	/* data sectors in erase unit */
 77	u_int sector_count;		/* sectors in translated disk */
 78	u_int header_size;		/* bytes in header sector */
 79	int reserved_block;		/* block next up for reclaim */
 80	int current_block;		/* block to write to */
 81	u16 *header_cache;		/* cached header */
 82
 83	int is_reclaiming;
 84	int cylinders;
 85	int errors;
 86	u_long *sector_map;
 87	struct block *blocks;
 88};
 89
 90static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
 91
 92static int build_block_map(struct partition *part, int block_no)
 93{
 94	struct block *block = &part->blocks[block_no];
 95	int i;
 96
 97	block->offset = part->block_size * block_no;
 98
 99	if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
100		block->state = BLOCK_UNUSED;
101		return -ENOENT;
102	}
103
104	block->state = BLOCK_OK;
105
106	for (i=0; i<part->data_sectors_per_block; i++) {
107		u16 entry;
108
109		entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
110
111		if (entry == SECTOR_DELETED)
112			continue;
113
114		if (entry == SECTOR_FREE) {
115			block->free_sectors++;
116			continue;
117		}
118
119		if (entry == SECTOR_ZERO)
120			entry = 0;
121
122		if (entry >= part->sector_count) {
123			printk(KERN_WARNING PREFIX
124				"'%s': unit #%d: entry %d corrupt, "
125				"sector %d out of range\n",
126				part->mbd.mtd->name, block_no, i, entry);
127			continue;
128		}
129
130		if (part->sector_map[entry] != -1) {
131			printk(KERN_WARNING PREFIX
132				"'%s': more than one entry for sector %d\n",
133				part->mbd.mtd->name, entry);
134			part->errors = 1;
135			continue;
136		}
137
138		part->sector_map[entry] = block->offset +
139			(i + part->header_sectors_per_block) * SECTOR_SIZE;
140
141		block->used_sectors++;
142	}
143
144	if (block->free_sectors == part->data_sectors_per_block)
145		part->reserved_block = block_no;
146
147	return 0;
148}
149
150static int scan_header(struct partition *part)
151{
152	int sectors_per_block;
153	int i, rc = -ENOMEM;
154	int blocks_found;
155	size_t retlen;
156
157	sectors_per_block = part->block_size / SECTOR_SIZE;
158	part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
159
160	if (part->total_blocks < 2)
161		return -ENOENT;
162
163	/* each erase block has three bytes header, followed by the map */
164	part->header_sectors_per_block =
165			((HEADER_MAP_OFFSET + sectors_per_block) *
166			sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
167
168	part->data_sectors_per_block = sectors_per_block -
169			part->header_sectors_per_block;
170
171	part->header_size = (HEADER_MAP_OFFSET +
172			part->data_sectors_per_block) * sizeof(u16);
173
174	part->cylinders = (part->data_sectors_per_block *
175			(part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
176
177	part->sector_count = part->cylinders * SECTORS_PER_TRACK;
178
179	part->current_block = -1;
180	part->reserved_block = -1;
181	part->is_reclaiming = 0;
182
183	part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
184	if (!part->header_cache)
185		goto err;
186
187	part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
188			GFP_KERNEL);
189	if (!part->blocks)
190		goto err;
191
192	part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
 
193	if (!part->sector_map) {
194		printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
195			"sector map", part->mbd.mtd->name);
196		goto err;
197	}
198
199	for (i=0; i<part->sector_count; i++)
200		part->sector_map[i] = -1;
201
202	for (i=0, blocks_found=0; i<part->total_blocks; i++) {
203		rc = mtd_read(part->mbd.mtd, i * part->block_size,
204			      part->header_size, &retlen,
205			      (u_char *)part->header_cache);
206
207		if (!rc && retlen != part->header_size)
208			rc = -EIO;
209
210		if (rc)
211			goto err;
212
213		if (!build_block_map(part, i))
214			blocks_found++;
215	}
216
217	if (blocks_found == 0) {
218		printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
219				part->mbd.mtd->name);
220		rc = -ENOENT;
221		goto err;
222	}
223
224	if (part->reserved_block == -1) {
225		printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
226				part->mbd.mtd->name);
227
228		part->errors = 1;
229	}
230
231	return 0;
232
233err:
234	vfree(part->sector_map);
235	kfree(part->header_cache);
236	kfree(part->blocks);
237
238	return rc;
239}
240
241static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
242{
243	struct partition *part = (struct partition*)dev;
244	u_long addr;
245	size_t retlen;
246	int rc;
247
248	if (sector >= part->sector_count)
249		return -EIO;
250
251	addr = part->sector_map[sector];
252	if (addr != -1) {
253		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
254			      (u_char *)buf);
255		if (!rc && retlen != SECTOR_SIZE)
256			rc = -EIO;
257
258		if (rc) {
259			printk(KERN_WARNING PREFIX "error reading '%s' at "
260				"0x%lx\n", part->mbd.mtd->name, addr);
261			return rc;
262		}
263	} else
264		memset(buf, 0, SECTOR_SIZE);
265
266	return 0;
267}
268
269static int erase_block(struct partition *part, int block)
270{
271	struct erase_info *erase;
272	int rc;
273
274	erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
275	if (!erase)
276		return -ENOMEM;
277
278	erase->addr = part->blocks[block].offset;
279	erase->len = part->block_size;
280
281	part->blocks[block].state = BLOCK_ERASING;
282	part->blocks[block].free_sectors = 0;
283
284	rc = mtd_erase(part->mbd.mtd, erase);
285	if (rc) {
286		printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
287				"failed\n", (unsigned long long)erase->addr,
288				(unsigned long long)erase->len, part->mbd.mtd->name);
289		part->blocks[block].state = BLOCK_FAILED;
290		part->blocks[block].free_sectors = 0;
291		part->blocks[block].used_sectors = 0;
292	} else {
293		u16 magic = cpu_to_le16(RFD_MAGIC);
294		size_t retlen;
295
296		part->blocks[block].state = BLOCK_ERASED;
297		part->blocks[block].free_sectors = part->data_sectors_per_block;
298		part->blocks[block].used_sectors = 0;
299		part->blocks[block].erases++;
300
301		rc = mtd_write(part->mbd.mtd, part->blocks[block].offset,
302			       sizeof(magic), &retlen, (u_char *)&magic);
303		if (!rc && retlen != sizeof(magic))
304			rc = -EIO;
305
306		if (rc) {
307			pr_err(PREFIX "'%s': unable to write RFD header at 0x%lx\n",
308			       part->mbd.mtd->name, part->blocks[block].offset);
309			part->blocks[block].state = BLOCK_FAILED;
310		} else {
311			part->blocks[block].state = BLOCK_OK;
312		}
313	}
314
315	kfree(erase);
316
317	return rc;
318}
319
320static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
321{
322	void *sector_data;
323	u16 *map;
324	size_t retlen;
325	int i, rc = -ENOMEM;
326
327	part->is_reclaiming = 1;
328
329	sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
330	if (!sector_data)
331		goto err3;
332
333	map = kmalloc(part->header_size, GFP_KERNEL);
334	if (!map)
335		goto err2;
336
337	rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
338		      part->header_size, &retlen, (u_char *)map);
339
340	if (!rc && retlen != part->header_size)
341		rc = -EIO;
342
343	if (rc) {
344		printk(KERN_ERR PREFIX "error reading '%s' at "
345			"0x%lx\n", part->mbd.mtd->name,
346			part->blocks[block_no].offset);
347
348		goto err;
349	}
350
351	for (i=0; i<part->data_sectors_per_block; i++) {
352		u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
353		u_long addr;
354
355
356		if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
357			continue;
358
359		if (entry == SECTOR_ZERO)
360			entry = 0;
361
362		/* already warned about and ignored in build_block_map() */
363		if (entry >= part->sector_count)
364			continue;
365
366		addr = part->blocks[block_no].offset +
367			(i + part->header_sectors_per_block) * SECTOR_SIZE;
368
369		if (*old_sector == addr) {
370			*old_sector = -1;
371			if (!part->blocks[block_no].used_sectors--) {
372				rc = erase_block(part, block_no);
373				break;
374			}
375			continue;
376		}
377		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
378			      sector_data);
379
380		if (!rc && retlen != SECTOR_SIZE)
381			rc = -EIO;
382
383		if (rc) {
384			printk(KERN_ERR PREFIX "'%s': Unable to "
385				"read sector for relocation\n",
386				part->mbd.mtd->name);
387
388			goto err;
389		}
390
391		rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
392				entry, sector_data);
393
394		if (rc)
395			goto err;
396	}
397
398err:
399	kfree(map);
400err2:
401	kfree(sector_data);
402err3:
403	part->is_reclaiming = 0;
404
405	return rc;
406}
407
408static int reclaim_block(struct partition *part, u_long *old_sector)
409{
410	int block, best_block, score, old_sector_block;
411	int rc;
412
413	/* we have a race if sync doesn't exist */
414	mtd_sync(part->mbd.mtd);
415
416	score = 0x7fffffff; /* MAX_INT */
417	best_block = -1;
418	if (*old_sector != -1)
419		old_sector_block = *old_sector / part->block_size;
420	else
421		old_sector_block = -1;
422
423	for (block=0; block<part->total_blocks; block++) {
424		int this_score;
425
426		if (block == part->reserved_block)
427			continue;
428
429		/*
430		 * Postpone reclaiming if there is a free sector as
431		 * more removed sectors is more efficient (have to move
432		 * less).
433		 */
434		if (part->blocks[block].free_sectors)
435			return 0;
436
437		this_score = part->blocks[block].used_sectors;
438
439		if (block == old_sector_block)
440			this_score--;
441		else {
442			/* no point in moving a full block */
443			if (part->blocks[block].used_sectors ==
444					part->data_sectors_per_block)
445				continue;
446		}
447
448		this_score += part->blocks[block].erases;
449
450		if (this_score < score) {
451			best_block = block;
452			score = this_score;
453		}
454	}
455
456	if (best_block == -1)
457		return -ENOSPC;
458
459	part->current_block = -1;
460	part->reserved_block = best_block;
461
462	pr_debug("reclaim_block: reclaiming block #%d with %d used "
463		 "%d free sectors\n", best_block,
464		 part->blocks[best_block].used_sectors,
465		 part->blocks[best_block].free_sectors);
466
467	if (part->blocks[best_block].used_sectors)
468		rc = move_block_contents(part, best_block, old_sector);
469	else
470		rc = erase_block(part, best_block);
471
472	return rc;
473}
474
475/*
476 * IMPROVE: It would be best to choose the block with the most deleted sectors,
477 * because if we fill that one up first it'll have the most chance of having
478 * the least live sectors at reclaim.
479 */
480static int find_free_block(struct partition *part)
481{
482	int block, stop;
483
484	block = part->current_block == -1 ?
485			jiffies % part->total_blocks : part->current_block;
486	stop = block;
487
488	do {
489		if (part->blocks[block].free_sectors &&
490				block != part->reserved_block)
491			return block;
492
493		if (part->blocks[block].state == BLOCK_UNUSED)
494			erase_block(part, block);
495
496		if (++block >= part->total_blocks)
497			block = 0;
498
499	} while (block != stop);
500
501	return -1;
502}
503
504static int find_writable_block(struct partition *part, u_long *old_sector)
505{
506	int rc, block;
507	size_t retlen;
508
509	block = find_free_block(part);
510
511	if (block == -1) {
512		if (!part->is_reclaiming) {
513			rc = reclaim_block(part, old_sector);
514			if (rc)
515				goto err;
516
517			block = find_free_block(part);
518		}
519
520		if (block == -1) {
521			rc = -ENOSPC;
522			goto err;
523		}
524	}
525
526	rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
527		      part->header_size, &retlen,
528		      (u_char *)part->header_cache);
529
530	if (!rc && retlen != part->header_size)
531		rc = -EIO;
532
533	if (rc) {
534		printk(KERN_ERR PREFIX "'%s': unable to read header at "
535				"0x%lx\n", part->mbd.mtd->name,
536				part->blocks[block].offset);
537		goto err;
538	}
539
540	part->current_block = block;
541
542err:
543	return rc;
544}
545
546static int mark_sector_deleted(struct partition *part, u_long old_addr)
547{
548	int block, offset, rc;
549	u_long addr;
550	size_t retlen;
551	u16 del = cpu_to_le16(SECTOR_DELETED);
552
553	block = old_addr / part->block_size;
554	offset = (old_addr % part->block_size) / SECTOR_SIZE -
555		part->header_sectors_per_block;
556
557	addr = part->blocks[block].offset +
558			(HEADER_MAP_OFFSET + offset) * sizeof(u16);
559	rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
560		       (u_char *)&del);
561
562	if (!rc && retlen != sizeof(del))
563		rc = -EIO;
564
565	if (rc) {
566		printk(KERN_ERR PREFIX "error writing '%s' at "
567			"0x%lx\n", part->mbd.mtd->name, addr);
568		goto err;
569	}
570	if (block == part->current_block)
571		part->header_cache[offset + HEADER_MAP_OFFSET] = del;
572
573	part->blocks[block].used_sectors--;
574
575	if (!part->blocks[block].used_sectors &&
576	    !part->blocks[block].free_sectors)
577		rc = erase_block(part, block);
578
579err:
580	return rc;
581}
582
583static int find_free_sector(const struct partition *part, const struct block *block)
584{
585	int i, stop;
586
587	i = stop = part->data_sectors_per_block - block->free_sectors;
588
589	do {
590		if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
591				== SECTOR_FREE)
592			return i;
593
594		if (++i == part->data_sectors_per_block)
595			i = 0;
596	}
597	while(i != stop);
598
599	return -1;
600}
601
602static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
603{
604	struct partition *part = (struct partition*)dev;
605	struct block *block;
606	u_long addr;
607	int i;
608	int rc;
609	size_t retlen;
610	u16 entry;
611
612	if (part->current_block == -1 ||
613		!part->blocks[part->current_block].free_sectors) {
614
615		rc = find_writable_block(part, old_addr);
616		if (rc)
617			goto err;
618	}
619
620	block = &part->blocks[part->current_block];
621
622	i = find_free_sector(part, block);
623
624	if (i < 0) {
625		rc = -ENOSPC;
626		goto err;
627	}
628
629	addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
630		block->offset;
631	rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
632		       (u_char *)buf);
633
634	if (!rc && retlen != SECTOR_SIZE)
635		rc = -EIO;
636
637	if (rc) {
638		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
639				part->mbd.mtd->name, addr);
640		goto err;
641	}
642
643	part->sector_map[sector] = addr;
644
645	entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
646
647	part->header_cache[i + HEADER_MAP_OFFSET] = entry;
648
649	addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
650	rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
651		       (u_char *)&entry);
652
653	if (!rc && retlen != sizeof(entry))
654		rc = -EIO;
655
656	if (rc) {
657		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
658				part->mbd.mtd->name, addr);
659		goto err;
660	}
661	block->used_sectors++;
662	block->free_sectors--;
663
664err:
665	return rc;
666}
667
668static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
669{
670	struct partition *part = (struct partition*)dev;
671	u_long old_addr;
672	int i;
673	int rc = 0;
674
675	pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
676
677	if (part->reserved_block == -1) {
678		rc = -EACCES;
679		goto err;
680	}
681
682	if (sector >= part->sector_count) {
683		rc = -EIO;
684		goto err;
685	}
686
687	old_addr = part->sector_map[sector];
688
689	for (i=0; i<SECTOR_SIZE; i++) {
690		if (!buf[i])
691			continue;
692
693		rc = do_writesect(dev, sector, buf, &old_addr);
694		if (rc)
695			goto err;
696		break;
697	}
698
699	if (i == SECTOR_SIZE)
700		part->sector_map[sector] = -1;
701
702	if (old_addr != -1)
703		rc = mark_sector_deleted(part, old_addr);
704
705err:
706	return rc;
707}
708
709static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
710{
711	struct partition *part = (struct partition*)dev;
712
713	geo->heads = 1;
714	geo->sectors = SECTORS_PER_TRACK;
715	geo->cylinders = part->cylinders;
716
717	return 0;
718}
719
720static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
721{
722	struct partition *part;
723
724	if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
725		return;
726
727	part = kzalloc(sizeof(struct partition), GFP_KERNEL);
728	if (!part)
729		return;
730
731	part->mbd.mtd = mtd;
732
733	if (block_size)
734		part->block_size = block_size;
735	else {
736		if (!mtd->erasesize) {
737			printk(KERN_WARNING PREFIX "please provide block_size");
738			goto out;
739		} else
740			part->block_size = mtd->erasesize;
741	}
742
743	if (scan_header(part) == 0) {
744		part->mbd.size = part->sector_count;
745		part->mbd.tr = tr;
746		part->mbd.devnum = -1;
747		if (!(mtd->flags & MTD_WRITEABLE))
748			part->mbd.readonly = 1;
749		else if (part->errors) {
750			printk(KERN_WARNING PREFIX "'%s': errors found, "
751					"setting read-only\n", mtd->name);
752			part->mbd.readonly = 1;
753		}
754
755		printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
756				mtd->name, mtd->type, mtd->flags);
757
758		if (!add_mtd_blktrans_dev((void*)part))
759			return;
760	}
761out:
762	kfree(part);
763}
764
765static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
766{
767	struct partition *part = (struct partition*)dev;
768	int i;
769
770	for (i=0; i<part->total_blocks; i++) {
771		pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
772			part->mbd.mtd->name, i, part->blocks[i].erases);
773	}
774
775	del_mtd_blktrans_dev(dev);
776	vfree(part->sector_map);
777	kfree(part->header_cache);
778	kfree(part->blocks);
779}
780
781static struct mtd_blktrans_ops rfd_ftl_tr = {
782	.name		= "rfd",
783	.major		= RFD_FTL_MAJOR,
784	.part_bits	= PART_BITS,
785	.blksize 	= SECTOR_SIZE,
786
787	.readsect	= rfd_ftl_readsect,
788	.writesect	= rfd_ftl_writesect,
789	.getgeo		= rfd_ftl_getgeo,
790	.add_mtd	= rfd_ftl_add_mtd,
791	.remove_dev	= rfd_ftl_remove_dev,
792	.owner		= THIS_MODULE,
793};
794
795static int __init init_rfd_ftl(void)
796{
797	return register_mtd_blktrans(&rfd_ftl_tr);
798}
799
800static void __exit cleanup_rfd_ftl(void)
801{
802	deregister_mtd_blktrans(&rfd_ftl_tr);
803}
804
805module_init(init_rfd_ftl);
806module_exit(cleanup_rfd_ftl);
807
808MODULE_LICENSE("GPL");
809MODULE_AUTHOR("Sean Young <sean@mess.org>");
810MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
811		"used by General Software's Embedded BIOS");
812