Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * rfd_ftl.c -- resident flash disk (flash translation layer)
  3 *
  4 * Copyright © 2005  Sean Young <sean@mess.org>
  5 *
  6 * This type of flash translation layer (FTL) is used by the Embedded BIOS
  7 * by General Software. It is known as the Resident Flash Disk (RFD), see:
  8 *
  9 *	http://www.gensw.com/pages/prod/bios/rfd.htm
 10 *
 11 * based on ftl.c
 12 */
 13
 14#include <linux/hdreg.h>
 15#include <linux/init.h>
 16#include <linux/mtd/blktrans.h>
 17#include <linux/mtd/mtd.h>
 18#include <linux/vmalloc.h>
 19#include <linux/slab.h>
 20#include <linux/jiffies.h>
 
 21
 22#include <asm/types.h>
 23
 24static int block_size = 0;
 25module_param(block_size, int, 0);
 26MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
 27
 28#define PREFIX "rfd_ftl: "
 29
 30/* This major has been assigned by device@lanana.org */
 31#ifndef RFD_FTL_MAJOR
 32#define RFD_FTL_MAJOR		256
 33#endif
 34
 35/* Maximum number of partitions in an FTL region */
 36#define PART_BITS		4
 37
 38/* An erase unit should start with this value */
 39#define RFD_MAGIC		0x9193
 40
 41/* the second value is 0xffff or 0xffc8; function unknown */
 42
 43/* the third value is always 0xffff, ignored */
 44
 45/* next is an array of mapping for each corresponding sector */
 46#define HEADER_MAP_OFFSET	3
 47#define SECTOR_DELETED		0x0000
 48#define SECTOR_ZERO		0xfffe
 49#define SECTOR_FREE		0xffff
 50
 51#define SECTOR_SIZE		512
 52
 53#define SECTORS_PER_TRACK	63
 54
 55struct block {
 56	enum {
 57		BLOCK_OK,
 58		BLOCK_ERASING,
 59		BLOCK_ERASED,
 60		BLOCK_UNUSED,
 61		BLOCK_FAILED
 62	} state;
 63	int free_sectors;
 64	int used_sectors;
 65	int erases;
 66	u_long offset;
 67};
 68
 69struct partition {
 70	struct mtd_blktrans_dev mbd;
 71
 72	u_int block_size;		/* size of erase unit */
 73	u_int total_blocks;		/* number of erase units */
 74	u_int header_sectors_per_block;	/* header sectors in erase unit */
 75	u_int data_sectors_per_block;	/* data sectors in erase unit */
 76	u_int sector_count;		/* sectors in translated disk */
 77	u_int header_size;		/* bytes in header sector */
 78	int reserved_block;		/* block next up for reclaim */
 79	int current_block;		/* block to write to */
 80	u16 *header_cache;		/* cached header */
 81
 82	int is_reclaiming;
 83	int cylinders;
 84	int errors;
 85	u_long *sector_map;
 86	struct block *blocks;
 87};
 88
 89static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
 90
 91static int build_block_map(struct partition *part, int block_no)
 92{
 93	struct block *block = &part->blocks[block_no];
 94	int i;
 95
 96	block->offset = part->block_size * block_no;
 97
 98	if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
 99		block->state = BLOCK_UNUSED;
100		return -ENOENT;
101	}
102
103	block->state = BLOCK_OK;
104
105	for (i=0; i<part->data_sectors_per_block; i++) {
106		u16 entry;
107
108		entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
109
110		if (entry == SECTOR_DELETED)
111			continue;
112
113		if (entry == SECTOR_FREE) {
114			block->free_sectors++;
115			continue;
116		}
117
118		if (entry == SECTOR_ZERO)
119			entry = 0;
120
121		if (entry >= part->sector_count) {
122			printk(KERN_WARNING PREFIX
123				"'%s': unit #%d: entry %d corrupt, "
124				"sector %d out of range\n",
125				part->mbd.mtd->name, block_no, i, entry);
126			continue;
127		}
128
129		if (part->sector_map[entry] != -1) {
130			printk(KERN_WARNING PREFIX
131				"'%s': more than one entry for sector %d\n",
132				part->mbd.mtd->name, entry);
133			part->errors = 1;
134			continue;
135		}
136
137		part->sector_map[entry] = block->offset +
138			(i + part->header_sectors_per_block) * SECTOR_SIZE;
139
140		block->used_sectors++;
141	}
142
143	if (block->free_sectors == part->data_sectors_per_block)
144		part->reserved_block = block_no;
145
146	return 0;
147}
148
149static int scan_header(struct partition *part)
150{
151	int sectors_per_block;
152	int i, rc = -ENOMEM;
153	int blocks_found;
154	size_t retlen;
155
156	sectors_per_block = part->block_size / SECTOR_SIZE;
157	part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
158
159	if (part->total_blocks < 2)
160		return -ENOENT;
161
162	/* each erase block has three bytes header, followed by the map */
163	part->header_sectors_per_block =
164			((HEADER_MAP_OFFSET + sectors_per_block) *
165			sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
166
167	part->data_sectors_per_block = sectors_per_block -
168			part->header_sectors_per_block;
169
170	part->header_size = (HEADER_MAP_OFFSET +
171			part->data_sectors_per_block) * sizeof(u16);
172
173	part->cylinders = (part->data_sectors_per_block *
174			(part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
175
176	part->sector_count = part->cylinders * SECTORS_PER_TRACK;
177
178	part->current_block = -1;
179	part->reserved_block = -1;
180	part->is_reclaiming = 0;
181
182	part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
183	if (!part->header_cache)
184		goto err;
185
186	part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
187			GFP_KERNEL);
188	if (!part->blocks)
189		goto err;
190
191	part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
192	if (!part->sector_map) {
193		printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
194			"sector map", part->mbd.mtd->name);
195		goto err;
196	}
197
198	for (i=0; i<part->sector_count; i++)
199		part->sector_map[i] = -1;
200
201	for (i=0, blocks_found=0; i<part->total_blocks; i++) {
202		rc = part->mbd.mtd->read(part->mbd.mtd,
203				i * part->block_size, part->header_size,
204				&retlen, (u_char*)part->header_cache);
205
206		if (!rc && retlen != part->header_size)
207			rc = -EIO;
208
209		if (rc)
210			goto err;
211
212		if (!build_block_map(part, i))
213			blocks_found++;
214	}
215
216	if (blocks_found == 0) {
217		printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
218				part->mbd.mtd->name);
219		rc = -ENOENT;
220		goto err;
221	}
222
223	if (part->reserved_block == -1) {
224		printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
225				part->mbd.mtd->name);
226
227		part->errors = 1;
228	}
229
230	return 0;
231
232err:
233	vfree(part->sector_map);
234	kfree(part->header_cache);
235	kfree(part->blocks);
236
237	return rc;
238}
239
240static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
241{
242	struct partition *part = (struct partition*)dev;
243	u_long addr;
244	size_t retlen;
245	int rc;
246
247	if (sector >= part->sector_count)
248		return -EIO;
249
250	addr = part->sector_map[sector];
251	if (addr != -1) {
252		rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
253						&retlen, (u_char*)buf);
254		if (!rc && retlen != SECTOR_SIZE)
255			rc = -EIO;
256
257		if (rc) {
258			printk(KERN_WARNING PREFIX "error reading '%s' at "
259				"0x%lx\n", part->mbd.mtd->name, addr);
260			return rc;
261		}
262	} else
263		memset(buf, 0, SECTOR_SIZE);
264
265	return 0;
266}
267
268static void erase_callback(struct erase_info *erase)
269{
270	struct partition *part;
271	u16 magic;
272	int i, rc;
273	size_t retlen;
274
275	part = (struct partition*)erase->priv;
276
277	i = (u32)erase->addr / part->block_size;
278	if (i >= part->total_blocks || part->blocks[i].offset != erase->addr ||
279	    erase->addr > UINT_MAX) {
280		printk(KERN_ERR PREFIX "erase callback for unknown offset %llx "
281				"on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name);
282		return;
283	}
284
285	if (erase->state != MTD_ERASE_DONE) {
286		printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', "
287				"state %d\n", (unsigned long long)erase->addr,
288				part->mbd.mtd->name, erase->state);
289
290		part->blocks[i].state = BLOCK_FAILED;
291		part->blocks[i].free_sectors = 0;
292		part->blocks[i].used_sectors = 0;
293
294		kfree(erase);
295
296		return;
297	}
298
299	magic = cpu_to_le16(RFD_MAGIC);
300
301	part->blocks[i].state = BLOCK_ERASED;
302	part->blocks[i].free_sectors = part->data_sectors_per_block;
303	part->blocks[i].used_sectors = 0;
304	part->blocks[i].erases++;
305
306	rc = part->mbd.mtd->write(part->mbd.mtd,
307		part->blocks[i].offset, sizeof(magic), &retlen,
308		(u_char*)&magic);
309
310	if (!rc && retlen != sizeof(magic))
311		rc = -EIO;
312
313	if (rc) {
314		printk(KERN_ERR PREFIX "'%s': unable to write RFD "
315				"header at 0x%lx\n",
316				part->mbd.mtd->name,
317				part->blocks[i].offset);
318		part->blocks[i].state = BLOCK_FAILED;
319	}
320	else
321		part->blocks[i].state = BLOCK_OK;
322
323	kfree(erase);
324}
325
326static int erase_block(struct partition *part, int block)
327{
328	struct erase_info *erase;
329	int rc = -ENOMEM;
330
331	erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
332	if (!erase)
333		goto err;
334
335	erase->mtd = part->mbd.mtd;
336	erase->callback = erase_callback;
337	erase->addr = part->blocks[block].offset;
338	erase->len = part->block_size;
339	erase->priv = (u_long)part;
340
341	part->blocks[block].state = BLOCK_ERASING;
342	part->blocks[block].free_sectors = 0;
343
344	rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
345
346	if (rc) {
347		printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
348				"failed\n", (unsigned long long)erase->addr,
349				(unsigned long long)erase->len, part->mbd.mtd->name);
350		kfree(erase);
351	}
352
353err:
354	return rc;
355}
356
357static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
358{
359	void *sector_data;
360	u16 *map;
361	size_t retlen;
362	int i, rc = -ENOMEM;
363
364	part->is_reclaiming = 1;
365
366	sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
367	if (!sector_data)
368		goto err3;
369
370	map = kmalloc(part->header_size, GFP_KERNEL);
371	if (!map)
372		goto err2;
373
374	rc = part->mbd.mtd->read(part->mbd.mtd,
375		part->blocks[block_no].offset, part->header_size,
376		&retlen, (u_char*)map);
377
378	if (!rc && retlen != part->header_size)
379		rc = -EIO;
380
381	if (rc) {
382		printk(KERN_ERR PREFIX "error reading '%s' at "
383			"0x%lx\n", part->mbd.mtd->name,
384			part->blocks[block_no].offset);
385
386		goto err;
387	}
388
389	for (i=0; i<part->data_sectors_per_block; i++) {
390		u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
391		u_long addr;
392
393
394		if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
395			continue;
396
397		if (entry == SECTOR_ZERO)
398			entry = 0;
399
400		/* already warned about and ignored in build_block_map() */
401		if (entry >= part->sector_count)
402			continue;
403
404		addr = part->blocks[block_no].offset +
405			(i + part->header_sectors_per_block) * SECTOR_SIZE;
406
407		if (*old_sector == addr) {
408			*old_sector = -1;
409			if (!part->blocks[block_no].used_sectors--) {
410				rc = erase_block(part, block_no);
411				break;
412			}
413			continue;
414		}
415		rc = part->mbd.mtd->read(part->mbd.mtd, addr,
416			SECTOR_SIZE, &retlen, sector_data);
417
418		if (!rc && retlen != SECTOR_SIZE)
419			rc = -EIO;
420
421		if (rc) {
422			printk(KERN_ERR PREFIX "'%s': Unable to "
423				"read sector for relocation\n",
424				part->mbd.mtd->name);
425
426			goto err;
427		}
428
429		rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
430				entry, sector_data);
431
432		if (rc)
433			goto err;
434	}
435
436err:
437	kfree(map);
438err2:
439	kfree(sector_data);
440err3:
441	part->is_reclaiming = 0;
442
443	return rc;
444}
445
446static int reclaim_block(struct partition *part, u_long *old_sector)
447{
448	int block, best_block, score, old_sector_block;
449	int rc;
450
451	/* we have a race if sync doesn't exist */
452	if (part->mbd.mtd->sync)
453		part->mbd.mtd->sync(part->mbd.mtd);
454
455	score = 0x7fffffff; /* MAX_INT */
456	best_block = -1;
457	if (*old_sector != -1)
458		old_sector_block = *old_sector / part->block_size;
459	else
460		old_sector_block = -1;
461
462	for (block=0; block<part->total_blocks; block++) {
463		int this_score;
464
465		if (block == part->reserved_block)
466			continue;
467
468		/*
469		 * Postpone reclaiming if there is a free sector as
470		 * more removed sectors is more efficient (have to move
471		 * less).
472		 */
473		if (part->blocks[block].free_sectors)
474			return 0;
475
476		this_score = part->blocks[block].used_sectors;
477
478		if (block == old_sector_block)
479			this_score--;
480		else {
481			/* no point in moving a full block */
482			if (part->blocks[block].used_sectors ==
483					part->data_sectors_per_block)
484				continue;
485		}
486
487		this_score += part->blocks[block].erases;
488
489		if (this_score < score) {
490			best_block = block;
491			score = this_score;
492		}
493	}
494
495	if (best_block == -1)
496		return -ENOSPC;
497
498	part->current_block = -1;
499	part->reserved_block = best_block;
500
501	pr_debug("reclaim_block: reclaiming block #%d with %d used "
502		 "%d free sectors\n", best_block,
503		 part->blocks[best_block].used_sectors,
504		 part->blocks[best_block].free_sectors);
505
506	if (part->blocks[best_block].used_sectors)
507		rc = move_block_contents(part, best_block, old_sector);
508	else
509		rc = erase_block(part, best_block);
510
511	return rc;
512}
513
514/*
515 * IMPROVE: It would be best to choose the block with the most deleted sectors,
516 * because if we fill that one up first it'll have the most chance of having
517 * the least live sectors at reclaim.
518 */
519static int find_free_block(struct partition *part)
520{
521	int block, stop;
522
523	block = part->current_block == -1 ?
524			jiffies % part->total_blocks : part->current_block;
525	stop = block;
526
527	do {
528		if (part->blocks[block].free_sectors &&
529				block != part->reserved_block)
530			return block;
531
532		if (part->blocks[block].state == BLOCK_UNUSED)
533			erase_block(part, block);
534
535		if (++block >= part->total_blocks)
536			block = 0;
537
538	} while (block != stop);
539
540	return -1;
541}
542
543static int find_writable_block(struct partition *part, u_long *old_sector)
544{
545	int rc, block;
546	size_t retlen;
547
548	block = find_free_block(part);
549
550	if (block == -1) {
551		if (!part->is_reclaiming) {
552			rc = reclaim_block(part, old_sector);
553			if (rc)
554				goto err;
555
556			block = find_free_block(part);
557		}
558
559		if (block == -1) {
560			rc = -ENOSPC;
561			goto err;
562		}
563	}
564
565	rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
566		part->header_size, &retlen, (u_char*)part->header_cache);
 
567
568	if (!rc && retlen != part->header_size)
569		rc = -EIO;
570
571	if (rc) {
572		printk(KERN_ERR PREFIX "'%s': unable to read header at "
573				"0x%lx\n", part->mbd.mtd->name,
574				part->blocks[block].offset);
575		goto err;
576	}
577
578	part->current_block = block;
579
580err:
581	return rc;
582}
583
584static int mark_sector_deleted(struct partition *part, u_long old_addr)
585{
586	int block, offset, rc;
587	u_long addr;
588	size_t retlen;
589	u16 del = cpu_to_le16(SECTOR_DELETED);
590
591	block = old_addr / part->block_size;
592	offset = (old_addr % part->block_size) / SECTOR_SIZE -
593		part->header_sectors_per_block;
594
595	addr = part->blocks[block].offset +
596			(HEADER_MAP_OFFSET + offset) * sizeof(u16);
597	rc = part->mbd.mtd->write(part->mbd.mtd, addr,
598		sizeof(del), &retlen, (u_char*)&del);
599
600	if (!rc && retlen != sizeof(del))
601		rc = -EIO;
602
603	if (rc) {
604		printk(KERN_ERR PREFIX "error writing '%s' at "
605			"0x%lx\n", part->mbd.mtd->name, addr);
606		if (rc)
607			goto err;
608	}
609	if (block == part->current_block)
610		part->header_cache[offset + HEADER_MAP_OFFSET] = del;
611
612	part->blocks[block].used_sectors--;
613
614	if (!part->blocks[block].used_sectors &&
615	    !part->blocks[block].free_sectors)
616		rc = erase_block(part, block);
617
618err:
619	return rc;
620}
621
622static int find_free_sector(const struct partition *part, const struct block *block)
623{
624	int i, stop;
625
626	i = stop = part->data_sectors_per_block - block->free_sectors;
627
628	do {
629		if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
630				== SECTOR_FREE)
631			return i;
632
633		if (++i == part->data_sectors_per_block)
634			i = 0;
635	}
636	while(i != stop);
637
638	return -1;
639}
640
641static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
642{
643	struct partition *part = (struct partition*)dev;
644	struct block *block;
645	u_long addr;
646	int i;
647	int rc;
648	size_t retlen;
649	u16 entry;
650
651	if (part->current_block == -1 ||
652		!part->blocks[part->current_block].free_sectors) {
653
654		rc = find_writable_block(part, old_addr);
655		if (rc)
656			goto err;
657	}
658
659	block = &part->blocks[part->current_block];
660
661	i = find_free_sector(part, block);
662
663	if (i < 0) {
664		rc = -ENOSPC;
665		goto err;
666	}
667
668	addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
669		block->offset;
670	rc = part->mbd.mtd->write(part->mbd.mtd,
671		addr, SECTOR_SIZE, &retlen, (u_char*)buf);
672
673	if (!rc && retlen != SECTOR_SIZE)
674		rc = -EIO;
675
676	if (rc) {
677		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
678				part->mbd.mtd->name, addr);
679		if (rc)
680			goto err;
681	}
682
683	part->sector_map[sector] = addr;
684
685	entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
686
687	part->header_cache[i + HEADER_MAP_OFFSET] = entry;
688
689	addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
690	rc = part->mbd.mtd->write(part->mbd.mtd, addr,
691			sizeof(entry), &retlen, (u_char*)&entry);
692
693	if (!rc && retlen != sizeof(entry))
694		rc = -EIO;
695
696	if (rc) {
697		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
698				part->mbd.mtd->name, addr);
699		if (rc)
700			goto err;
701	}
702	block->used_sectors++;
703	block->free_sectors--;
704
705err:
706	return rc;
707}
708
709static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
710{
711	struct partition *part = (struct partition*)dev;
712	u_long old_addr;
713	int i;
714	int rc = 0;
715
716	pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
717
718	if (part->reserved_block == -1) {
719		rc = -EACCES;
720		goto err;
721	}
722
723	if (sector >= part->sector_count) {
724		rc = -EIO;
725		goto err;
726	}
727
728	old_addr = part->sector_map[sector];
729
730	for (i=0; i<SECTOR_SIZE; i++) {
731		if (!buf[i])
732			continue;
733
734		rc = do_writesect(dev, sector, buf, &old_addr);
735		if (rc)
736			goto err;
737		break;
738	}
739
740	if (i == SECTOR_SIZE)
741		part->sector_map[sector] = -1;
742
743	if (old_addr != -1)
744		rc = mark_sector_deleted(part, old_addr);
745
746err:
747	return rc;
748}
749
750static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
751{
752	struct partition *part = (struct partition*)dev;
753
754	geo->heads = 1;
755	geo->sectors = SECTORS_PER_TRACK;
756	geo->cylinders = part->cylinders;
757
758	return 0;
759}
760
761static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
762{
763	struct partition *part;
764
765	if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
766		return;
767
768	part = kzalloc(sizeof(struct partition), GFP_KERNEL);
769	if (!part)
770		return;
771
772	part->mbd.mtd = mtd;
773
774	if (block_size)
775		part->block_size = block_size;
776	else {
777		if (!mtd->erasesize) {
778			printk(KERN_WARNING PREFIX "please provide block_size");
779			goto out;
780		} else
781			part->block_size = mtd->erasesize;
782	}
783
784	if (scan_header(part) == 0) {
785		part->mbd.size = part->sector_count;
786		part->mbd.tr = tr;
787		part->mbd.devnum = -1;
788		if (!(mtd->flags & MTD_WRITEABLE))
789			part->mbd.readonly = 1;
790		else if (part->errors) {
791			printk(KERN_WARNING PREFIX "'%s': errors found, "
792					"setting read-only\n", mtd->name);
793			part->mbd.readonly = 1;
794		}
795
796		printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
797				mtd->name, mtd->type, mtd->flags);
798
799		if (!add_mtd_blktrans_dev((void*)part))
800			return;
801	}
802out:
803	kfree(part);
804}
805
806static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
807{
808	struct partition *part = (struct partition*)dev;
809	int i;
810
811	for (i=0; i<part->total_blocks; i++) {
812		pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
813			part->mbd.mtd->name, i, part->blocks[i].erases);
814	}
815
816	del_mtd_blktrans_dev(dev);
817	vfree(part->sector_map);
818	kfree(part->header_cache);
819	kfree(part->blocks);
820}
821
822static struct mtd_blktrans_ops rfd_ftl_tr = {
823	.name		= "rfd",
824	.major		= RFD_FTL_MAJOR,
825	.part_bits	= PART_BITS,
826	.blksize 	= SECTOR_SIZE,
827
828	.readsect	= rfd_ftl_readsect,
829	.writesect	= rfd_ftl_writesect,
830	.getgeo		= rfd_ftl_getgeo,
831	.add_mtd	= rfd_ftl_add_mtd,
832	.remove_dev	= rfd_ftl_remove_dev,
833	.owner		= THIS_MODULE,
834};
835
836static int __init init_rfd_ftl(void)
837{
838	return register_mtd_blktrans(&rfd_ftl_tr);
839}
840
841static void __exit cleanup_rfd_ftl(void)
842{
843	deregister_mtd_blktrans(&rfd_ftl_tr);
844}
845
846module_init(init_rfd_ftl);
847module_exit(cleanup_rfd_ftl);
848
849MODULE_LICENSE("GPL");
850MODULE_AUTHOR("Sean Young <sean@mess.org>");
851MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
852		"used by General Software's Embedded BIOS");
853
v3.5.6
  1/*
  2 * rfd_ftl.c -- resident flash disk (flash translation layer)
  3 *
  4 * Copyright © 2005  Sean Young <sean@mess.org>
  5 *
  6 * This type of flash translation layer (FTL) is used by the Embedded BIOS
  7 * by General Software. It is known as the Resident Flash Disk (RFD), see:
  8 *
  9 *	http://www.gensw.com/pages/prod/bios/rfd.htm
 10 *
 11 * based on ftl.c
 12 */
 13
 14#include <linux/hdreg.h>
 15#include <linux/init.h>
 16#include <linux/mtd/blktrans.h>
 17#include <linux/mtd/mtd.h>
 18#include <linux/vmalloc.h>
 19#include <linux/slab.h>
 20#include <linux/jiffies.h>
 21#include <linux/module.h>
 22
 23#include <asm/types.h>
 24
 25static int block_size = 0;
 26module_param(block_size, int, 0);
 27MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
 28
 29#define PREFIX "rfd_ftl: "
 30
 31/* This major has been assigned by device@lanana.org */
 32#ifndef RFD_FTL_MAJOR
 33#define RFD_FTL_MAJOR		256
 34#endif
 35
 36/* Maximum number of partitions in an FTL region */
 37#define PART_BITS		4
 38
 39/* An erase unit should start with this value */
 40#define RFD_MAGIC		0x9193
 41
 42/* the second value is 0xffff or 0xffc8; function unknown */
 43
 44/* the third value is always 0xffff, ignored */
 45
 46/* next is an array of mapping for each corresponding sector */
 47#define HEADER_MAP_OFFSET	3
 48#define SECTOR_DELETED		0x0000
 49#define SECTOR_ZERO		0xfffe
 50#define SECTOR_FREE		0xffff
 51
 52#define SECTOR_SIZE		512
 53
 54#define SECTORS_PER_TRACK	63
 55
 56struct block {
 57	enum {
 58		BLOCK_OK,
 59		BLOCK_ERASING,
 60		BLOCK_ERASED,
 61		BLOCK_UNUSED,
 62		BLOCK_FAILED
 63	} state;
 64	int free_sectors;
 65	int used_sectors;
 66	int erases;
 67	u_long offset;
 68};
 69
 70struct partition {
 71	struct mtd_blktrans_dev mbd;
 72
 73	u_int block_size;		/* size of erase unit */
 74	u_int total_blocks;		/* number of erase units */
 75	u_int header_sectors_per_block;	/* header sectors in erase unit */
 76	u_int data_sectors_per_block;	/* data sectors in erase unit */
 77	u_int sector_count;		/* sectors in translated disk */
 78	u_int header_size;		/* bytes in header sector */
 79	int reserved_block;		/* block next up for reclaim */
 80	int current_block;		/* block to write to */
 81	u16 *header_cache;		/* cached header */
 82
 83	int is_reclaiming;
 84	int cylinders;
 85	int errors;
 86	u_long *sector_map;
 87	struct block *blocks;
 88};
 89
 90static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
 91
 92static int build_block_map(struct partition *part, int block_no)
 93{
 94	struct block *block = &part->blocks[block_no];
 95	int i;
 96
 97	block->offset = part->block_size * block_no;
 98
 99	if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
100		block->state = BLOCK_UNUSED;
101		return -ENOENT;
102	}
103
104	block->state = BLOCK_OK;
105
106	for (i=0; i<part->data_sectors_per_block; i++) {
107		u16 entry;
108
109		entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
110
111		if (entry == SECTOR_DELETED)
112			continue;
113
114		if (entry == SECTOR_FREE) {
115			block->free_sectors++;
116			continue;
117		}
118
119		if (entry == SECTOR_ZERO)
120			entry = 0;
121
122		if (entry >= part->sector_count) {
123			printk(KERN_WARNING PREFIX
124				"'%s': unit #%d: entry %d corrupt, "
125				"sector %d out of range\n",
126				part->mbd.mtd->name, block_no, i, entry);
127			continue;
128		}
129
130		if (part->sector_map[entry] != -1) {
131			printk(KERN_WARNING PREFIX
132				"'%s': more than one entry for sector %d\n",
133				part->mbd.mtd->name, entry);
134			part->errors = 1;
135			continue;
136		}
137
138		part->sector_map[entry] = block->offset +
139			(i + part->header_sectors_per_block) * SECTOR_SIZE;
140
141		block->used_sectors++;
142	}
143
144	if (block->free_sectors == part->data_sectors_per_block)
145		part->reserved_block = block_no;
146
147	return 0;
148}
149
150static int scan_header(struct partition *part)
151{
152	int sectors_per_block;
153	int i, rc = -ENOMEM;
154	int blocks_found;
155	size_t retlen;
156
157	sectors_per_block = part->block_size / SECTOR_SIZE;
158	part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
159
160	if (part->total_blocks < 2)
161		return -ENOENT;
162
163	/* each erase block has three bytes header, followed by the map */
164	part->header_sectors_per_block =
165			((HEADER_MAP_OFFSET + sectors_per_block) *
166			sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
167
168	part->data_sectors_per_block = sectors_per_block -
169			part->header_sectors_per_block;
170
171	part->header_size = (HEADER_MAP_OFFSET +
172			part->data_sectors_per_block) * sizeof(u16);
173
174	part->cylinders = (part->data_sectors_per_block *
175			(part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
176
177	part->sector_count = part->cylinders * SECTORS_PER_TRACK;
178
179	part->current_block = -1;
180	part->reserved_block = -1;
181	part->is_reclaiming = 0;
182
183	part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
184	if (!part->header_cache)
185		goto err;
186
187	part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
188			GFP_KERNEL);
189	if (!part->blocks)
190		goto err;
191
192	part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
193	if (!part->sector_map) {
194		printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
195			"sector map", part->mbd.mtd->name);
196		goto err;
197	}
198
199	for (i=0; i<part->sector_count; i++)
200		part->sector_map[i] = -1;
201
202	for (i=0, blocks_found=0; i<part->total_blocks; i++) {
203		rc = mtd_read(part->mbd.mtd, i * part->block_size,
204			      part->header_size, &retlen,
205			      (u_char *)part->header_cache);
206
207		if (!rc && retlen != part->header_size)
208			rc = -EIO;
209
210		if (rc)
211			goto err;
212
213		if (!build_block_map(part, i))
214			blocks_found++;
215	}
216
217	if (blocks_found == 0) {
218		printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
219				part->mbd.mtd->name);
220		rc = -ENOENT;
221		goto err;
222	}
223
224	if (part->reserved_block == -1) {
225		printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
226				part->mbd.mtd->name);
227
228		part->errors = 1;
229	}
230
231	return 0;
232
233err:
234	vfree(part->sector_map);
235	kfree(part->header_cache);
236	kfree(part->blocks);
237
238	return rc;
239}
240
241static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
242{
243	struct partition *part = (struct partition*)dev;
244	u_long addr;
245	size_t retlen;
246	int rc;
247
248	if (sector >= part->sector_count)
249		return -EIO;
250
251	addr = part->sector_map[sector];
252	if (addr != -1) {
253		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
254			      (u_char *)buf);
255		if (!rc && retlen != SECTOR_SIZE)
256			rc = -EIO;
257
258		if (rc) {
259			printk(KERN_WARNING PREFIX "error reading '%s' at "
260				"0x%lx\n", part->mbd.mtd->name, addr);
261			return rc;
262		}
263	} else
264		memset(buf, 0, SECTOR_SIZE);
265
266	return 0;
267}
268
269static void erase_callback(struct erase_info *erase)
270{
271	struct partition *part;
272	u16 magic;
273	int i, rc;
274	size_t retlen;
275
276	part = (struct partition*)erase->priv;
277
278	i = (u32)erase->addr / part->block_size;
279	if (i >= part->total_blocks || part->blocks[i].offset != erase->addr ||
280	    erase->addr > UINT_MAX) {
281		printk(KERN_ERR PREFIX "erase callback for unknown offset %llx "
282				"on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name);
283		return;
284	}
285
286	if (erase->state != MTD_ERASE_DONE) {
287		printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', "
288				"state %d\n", (unsigned long long)erase->addr,
289				part->mbd.mtd->name, erase->state);
290
291		part->blocks[i].state = BLOCK_FAILED;
292		part->blocks[i].free_sectors = 0;
293		part->blocks[i].used_sectors = 0;
294
295		kfree(erase);
296
297		return;
298	}
299
300	magic = cpu_to_le16(RFD_MAGIC);
301
302	part->blocks[i].state = BLOCK_ERASED;
303	part->blocks[i].free_sectors = part->data_sectors_per_block;
304	part->blocks[i].used_sectors = 0;
305	part->blocks[i].erases++;
306
307	rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic),
308		       &retlen, (u_char *)&magic);
 
309
310	if (!rc && retlen != sizeof(magic))
311		rc = -EIO;
312
313	if (rc) {
314		printk(KERN_ERR PREFIX "'%s': unable to write RFD "
315				"header at 0x%lx\n",
316				part->mbd.mtd->name,
317				part->blocks[i].offset);
318		part->blocks[i].state = BLOCK_FAILED;
319	}
320	else
321		part->blocks[i].state = BLOCK_OK;
322
323	kfree(erase);
324}
325
326static int erase_block(struct partition *part, int block)
327{
328	struct erase_info *erase;
329	int rc = -ENOMEM;
330
331	erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
332	if (!erase)
333		goto err;
334
335	erase->mtd = part->mbd.mtd;
336	erase->callback = erase_callback;
337	erase->addr = part->blocks[block].offset;
338	erase->len = part->block_size;
339	erase->priv = (u_long)part;
340
341	part->blocks[block].state = BLOCK_ERASING;
342	part->blocks[block].free_sectors = 0;
343
344	rc = mtd_erase(part->mbd.mtd, erase);
345
346	if (rc) {
347		printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
348				"failed\n", (unsigned long long)erase->addr,
349				(unsigned long long)erase->len, part->mbd.mtd->name);
350		kfree(erase);
351	}
352
353err:
354	return rc;
355}
356
357static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
358{
359	void *sector_data;
360	u16 *map;
361	size_t retlen;
362	int i, rc = -ENOMEM;
363
364	part->is_reclaiming = 1;
365
366	sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
367	if (!sector_data)
368		goto err3;
369
370	map = kmalloc(part->header_size, GFP_KERNEL);
371	if (!map)
372		goto err2;
373
374	rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
375		      part->header_size, &retlen, (u_char *)map);
 
376
377	if (!rc && retlen != part->header_size)
378		rc = -EIO;
379
380	if (rc) {
381		printk(KERN_ERR PREFIX "error reading '%s' at "
382			"0x%lx\n", part->mbd.mtd->name,
383			part->blocks[block_no].offset);
384
385		goto err;
386	}
387
388	for (i=0; i<part->data_sectors_per_block; i++) {
389		u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
390		u_long addr;
391
392
393		if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
394			continue;
395
396		if (entry == SECTOR_ZERO)
397			entry = 0;
398
399		/* already warned about and ignored in build_block_map() */
400		if (entry >= part->sector_count)
401			continue;
402
403		addr = part->blocks[block_no].offset +
404			(i + part->header_sectors_per_block) * SECTOR_SIZE;
405
406		if (*old_sector == addr) {
407			*old_sector = -1;
408			if (!part->blocks[block_no].used_sectors--) {
409				rc = erase_block(part, block_no);
410				break;
411			}
412			continue;
413		}
414		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
415			      sector_data);
416
417		if (!rc && retlen != SECTOR_SIZE)
418			rc = -EIO;
419
420		if (rc) {
421			printk(KERN_ERR PREFIX "'%s': Unable to "
422				"read sector for relocation\n",
423				part->mbd.mtd->name);
424
425			goto err;
426		}
427
428		rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
429				entry, sector_data);
430
431		if (rc)
432			goto err;
433	}
434
435err:
436	kfree(map);
437err2:
438	kfree(sector_data);
439err3:
440	part->is_reclaiming = 0;
441
442	return rc;
443}
444
445static int reclaim_block(struct partition *part, u_long *old_sector)
446{
447	int block, best_block, score, old_sector_block;
448	int rc;
449
450	/* we have a race if sync doesn't exist */
451	mtd_sync(part->mbd.mtd);
 
452
453	score = 0x7fffffff; /* MAX_INT */
454	best_block = -1;
455	if (*old_sector != -1)
456		old_sector_block = *old_sector / part->block_size;
457	else
458		old_sector_block = -1;
459
460	for (block=0; block<part->total_blocks; block++) {
461		int this_score;
462
463		if (block == part->reserved_block)
464			continue;
465
466		/*
467		 * Postpone reclaiming if there is a free sector as
468		 * more removed sectors is more efficient (have to move
469		 * less).
470		 */
471		if (part->blocks[block].free_sectors)
472			return 0;
473
474		this_score = part->blocks[block].used_sectors;
475
476		if (block == old_sector_block)
477			this_score--;
478		else {
479			/* no point in moving a full block */
480			if (part->blocks[block].used_sectors ==
481					part->data_sectors_per_block)
482				continue;
483		}
484
485		this_score += part->blocks[block].erases;
486
487		if (this_score < score) {
488			best_block = block;
489			score = this_score;
490		}
491	}
492
493	if (best_block == -1)
494		return -ENOSPC;
495
496	part->current_block = -1;
497	part->reserved_block = best_block;
498
499	pr_debug("reclaim_block: reclaiming block #%d with %d used "
500		 "%d free sectors\n", best_block,
501		 part->blocks[best_block].used_sectors,
502		 part->blocks[best_block].free_sectors);
503
504	if (part->blocks[best_block].used_sectors)
505		rc = move_block_contents(part, best_block, old_sector);
506	else
507		rc = erase_block(part, best_block);
508
509	return rc;
510}
511
512/*
513 * IMPROVE: It would be best to choose the block with the most deleted sectors,
514 * because if we fill that one up first it'll have the most chance of having
515 * the least live sectors at reclaim.
516 */
517static int find_free_block(struct partition *part)
518{
519	int block, stop;
520
521	block = part->current_block == -1 ?
522			jiffies % part->total_blocks : part->current_block;
523	stop = block;
524
525	do {
526		if (part->blocks[block].free_sectors &&
527				block != part->reserved_block)
528			return block;
529
530		if (part->blocks[block].state == BLOCK_UNUSED)
531			erase_block(part, block);
532
533		if (++block >= part->total_blocks)
534			block = 0;
535
536	} while (block != stop);
537
538	return -1;
539}
540
541static int find_writable_block(struct partition *part, u_long *old_sector)
542{
543	int rc, block;
544	size_t retlen;
545
546	block = find_free_block(part);
547
548	if (block == -1) {
549		if (!part->is_reclaiming) {
550			rc = reclaim_block(part, old_sector);
551			if (rc)
552				goto err;
553
554			block = find_free_block(part);
555		}
556
557		if (block == -1) {
558			rc = -ENOSPC;
559			goto err;
560		}
561	}
562
563	rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
564		      part->header_size, &retlen,
565		      (u_char *)part->header_cache);
566
567	if (!rc && retlen != part->header_size)
568		rc = -EIO;
569
570	if (rc) {
571		printk(KERN_ERR PREFIX "'%s': unable to read header at "
572				"0x%lx\n", part->mbd.mtd->name,
573				part->blocks[block].offset);
574		goto err;
575	}
576
577	part->current_block = block;
578
579err:
580	return rc;
581}
582
583static int mark_sector_deleted(struct partition *part, u_long old_addr)
584{
585	int block, offset, rc;
586	u_long addr;
587	size_t retlen;
588	u16 del = cpu_to_le16(SECTOR_DELETED);
589
590	block = old_addr / part->block_size;
591	offset = (old_addr % part->block_size) / SECTOR_SIZE -
592		part->header_sectors_per_block;
593
594	addr = part->blocks[block].offset +
595			(HEADER_MAP_OFFSET + offset) * sizeof(u16);
596	rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
597		       (u_char *)&del);
598
599	if (!rc && retlen != sizeof(del))
600		rc = -EIO;
601
602	if (rc) {
603		printk(KERN_ERR PREFIX "error writing '%s' at "
604			"0x%lx\n", part->mbd.mtd->name, addr);
605		if (rc)
606			goto err;
607	}
608	if (block == part->current_block)
609		part->header_cache[offset + HEADER_MAP_OFFSET] = del;
610
611	part->blocks[block].used_sectors--;
612
613	if (!part->blocks[block].used_sectors &&
614	    !part->blocks[block].free_sectors)
615		rc = erase_block(part, block);
616
617err:
618	return rc;
619}
620
621static int find_free_sector(const struct partition *part, const struct block *block)
622{
623	int i, stop;
624
625	i = stop = part->data_sectors_per_block - block->free_sectors;
626
627	do {
628		if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
629				== SECTOR_FREE)
630			return i;
631
632		if (++i == part->data_sectors_per_block)
633			i = 0;
634	}
635	while(i != stop);
636
637	return -1;
638}
639
640static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
641{
642	struct partition *part = (struct partition*)dev;
643	struct block *block;
644	u_long addr;
645	int i;
646	int rc;
647	size_t retlen;
648	u16 entry;
649
650	if (part->current_block == -1 ||
651		!part->blocks[part->current_block].free_sectors) {
652
653		rc = find_writable_block(part, old_addr);
654		if (rc)
655			goto err;
656	}
657
658	block = &part->blocks[part->current_block];
659
660	i = find_free_sector(part, block);
661
662	if (i < 0) {
663		rc = -ENOSPC;
664		goto err;
665	}
666
667	addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
668		block->offset;
669	rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
670		       (u_char *)buf);
671
672	if (!rc && retlen != SECTOR_SIZE)
673		rc = -EIO;
674
675	if (rc) {
676		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
677				part->mbd.mtd->name, addr);
678		if (rc)
679			goto err;
680	}
681
682	part->sector_map[sector] = addr;
683
684	entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
685
686	part->header_cache[i + HEADER_MAP_OFFSET] = entry;
687
688	addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
689	rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
690		       (u_char *)&entry);
691
692	if (!rc && retlen != sizeof(entry))
693		rc = -EIO;
694
695	if (rc) {
696		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
697				part->mbd.mtd->name, addr);
698		if (rc)
699			goto err;
700	}
701	block->used_sectors++;
702	block->free_sectors--;
703
704err:
705	return rc;
706}
707
708static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
709{
710	struct partition *part = (struct partition*)dev;
711	u_long old_addr;
712	int i;
713	int rc = 0;
714
715	pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
716
717	if (part->reserved_block == -1) {
718		rc = -EACCES;
719		goto err;
720	}
721
722	if (sector >= part->sector_count) {
723		rc = -EIO;
724		goto err;
725	}
726
727	old_addr = part->sector_map[sector];
728
729	for (i=0; i<SECTOR_SIZE; i++) {
730		if (!buf[i])
731			continue;
732
733		rc = do_writesect(dev, sector, buf, &old_addr);
734		if (rc)
735			goto err;
736		break;
737	}
738
739	if (i == SECTOR_SIZE)
740		part->sector_map[sector] = -1;
741
742	if (old_addr != -1)
743		rc = mark_sector_deleted(part, old_addr);
744
745err:
746	return rc;
747}
748
749static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
750{
751	struct partition *part = (struct partition*)dev;
752
753	geo->heads = 1;
754	geo->sectors = SECTORS_PER_TRACK;
755	geo->cylinders = part->cylinders;
756
757	return 0;
758}
759
760static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
761{
762	struct partition *part;
763
764	if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
765		return;
766
767	part = kzalloc(sizeof(struct partition), GFP_KERNEL);
768	if (!part)
769		return;
770
771	part->mbd.mtd = mtd;
772
773	if (block_size)
774		part->block_size = block_size;
775	else {
776		if (!mtd->erasesize) {
777			printk(KERN_WARNING PREFIX "please provide block_size");
778			goto out;
779		} else
780			part->block_size = mtd->erasesize;
781	}
782
783	if (scan_header(part) == 0) {
784		part->mbd.size = part->sector_count;
785		part->mbd.tr = tr;
786		part->mbd.devnum = -1;
787		if (!(mtd->flags & MTD_WRITEABLE))
788			part->mbd.readonly = 1;
789		else if (part->errors) {
790			printk(KERN_WARNING PREFIX "'%s': errors found, "
791					"setting read-only\n", mtd->name);
792			part->mbd.readonly = 1;
793		}
794
795		printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
796				mtd->name, mtd->type, mtd->flags);
797
798		if (!add_mtd_blktrans_dev((void*)part))
799			return;
800	}
801out:
802	kfree(part);
803}
804
805static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
806{
807	struct partition *part = (struct partition*)dev;
808	int i;
809
810	for (i=0; i<part->total_blocks; i++) {
811		pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
812			part->mbd.mtd->name, i, part->blocks[i].erases);
813	}
814
815	del_mtd_blktrans_dev(dev);
816	vfree(part->sector_map);
817	kfree(part->header_cache);
818	kfree(part->blocks);
819}
820
821static struct mtd_blktrans_ops rfd_ftl_tr = {
822	.name		= "rfd",
823	.major		= RFD_FTL_MAJOR,
824	.part_bits	= PART_BITS,
825	.blksize 	= SECTOR_SIZE,
826
827	.readsect	= rfd_ftl_readsect,
828	.writesect	= rfd_ftl_writesect,
829	.getgeo		= rfd_ftl_getgeo,
830	.add_mtd	= rfd_ftl_add_mtd,
831	.remove_dev	= rfd_ftl_remove_dev,
832	.owner		= THIS_MODULE,
833};
834
835static int __init init_rfd_ftl(void)
836{
837	return register_mtd_blktrans(&rfd_ftl_tr);
838}
839
840static void __exit cleanup_rfd_ftl(void)
841{
842	deregister_mtd_blktrans(&rfd_ftl_tr);
843}
844
845module_init(init_rfd_ftl);
846module_exit(cleanup_rfd_ftl);
847
848MODULE_LICENSE("GPL");
849MODULE_AUTHOR("Sean Young <sean@mess.org>");
850MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
851		"used by General Software's Embedded BIOS");
852