Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * rfd_ftl.c -- resident flash disk (flash translation layer)
  3 *
  4 * Copyright © 2005  Sean Young <sean@mess.org>
  5 *
  6 * This type of flash translation layer (FTL) is used by the Embedded BIOS
  7 * by General Software. It is known as the Resident Flash Disk (RFD), see:
  8 *
  9 *	http://www.gensw.com/pages/prod/bios/rfd.htm
 10 *
 11 * based on ftl.c
 12 */
 13
 14#include <linux/hdreg.h>
 15#include <linux/init.h>
 16#include <linux/mtd/blktrans.h>
 17#include <linux/mtd/mtd.h>
 18#include <linux/vmalloc.h>
 19#include <linux/slab.h>
 20#include <linux/jiffies.h>
 
 21
 22#include <asm/types.h>
 23
 24static int block_size = 0;
 25module_param(block_size, int, 0);
 26MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
 27
 28#define PREFIX "rfd_ftl: "
 29
 30/* This major has been assigned by device@lanana.org */
 31#ifndef RFD_FTL_MAJOR
 32#define RFD_FTL_MAJOR		256
 33#endif
 34
 35/* Maximum number of partitions in an FTL region */
 36#define PART_BITS		4
 37
 38/* An erase unit should start with this value */
 39#define RFD_MAGIC		0x9193
 40
 41/* the second value is 0xffff or 0xffc8; function unknown */
 42
 43/* the third value is always 0xffff, ignored */
 44
 45/* next is an array of mapping for each corresponding sector */
 46#define HEADER_MAP_OFFSET	3
 47#define SECTOR_DELETED		0x0000
 48#define SECTOR_ZERO		0xfffe
 49#define SECTOR_FREE		0xffff
 50
 51#define SECTOR_SIZE		512
 52
 53#define SECTORS_PER_TRACK	63
 54
 55struct block {
 56	enum {
 57		BLOCK_OK,
 58		BLOCK_ERASING,
 59		BLOCK_ERASED,
 60		BLOCK_UNUSED,
 61		BLOCK_FAILED
 62	} state;
 63	int free_sectors;
 64	int used_sectors;
 65	int erases;
 66	u_long offset;
 67};
 68
 69struct partition {
 70	struct mtd_blktrans_dev mbd;
 71
 72	u_int block_size;		/* size of erase unit */
 73	u_int total_blocks;		/* number of erase units */
 74	u_int header_sectors_per_block;	/* header sectors in erase unit */
 75	u_int data_sectors_per_block;	/* data sectors in erase unit */
 76	u_int sector_count;		/* sectors in translated disk */
 77	u_int header_size;		/* bytes in header sector */
 78	int reserved_block;		/* block next up for reclaim */
 79	int current_block;		/* block to write to */
 80	u16 *header_cache;		/* cached header */
 81
 82	int is_reclaiming;
 83	int cylinders;
 84	int errors;
 85	u_long *sector_map;
 86	struct block *blocks;
 87};
 88
 89static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
 90
 91static int build_block_map(struct partition *part, int block_no)
 92{
 93	struct block *block = &part->blocks[block_no];
 94	int i;
 95
 96	block->offset = part->block_size * block_no;
 97
 98	if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
 99		block->state = BLOCK_UNUSED;
100		return -ENOENT;
101	}
102
103	block->state = BLOCK_OK;
104
105	for (i=0; i<part->data_sectors_per_block; i++) {
106		u16 entry;
107
108		entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
109
110		if (entry == SECTOR_DELETED)
111			continue;
112
113		if (entry == SECTOR_FREE) {
114			block->free_sectors++;
115			continue;
116		}
117
118		if (entry == SECTOR_ZERO)
119			entry = 0;
120
121		if (entry >= part->sector_count) {
122			printk(KERN_WARNING PREFIX
123				"'%s': unit #%d: entry %d corrupt, "
124				"sector %d out of range\n",
125				part->mbd.mtd->name, block_no, i, entry);
126			continue;
127		}
128
129		if (part->sector_map[entry] != -1) {
130			printk(KERN_WARNING PREFIX
131				"'%s': more than one entry for sector %d\n",
132				part->mbd.mtd->name, entry);
133			part->errors = 1;
134			continue;
135		}
136
137		part->sector_map[entry] = block->offset +
138			(i + part->header_sectors_per_block) * SECTOR_SIZE;
139
140		block->used_sectors++;
141	}
142
143	if (block->free_sectors == part->data_sectors_per_block)
144		part->reserved_block = block_no;
145
146	return 0;
147}
148
149static int scan_header(struct partition *part)
150{
151	int sectors_per_block;
152	int i, rc = -ENOMEM;
153	int blocks_found;
154	size_t retlen;
155
156	sectors_per_block = part->block_size / SECTOR_SIZE;
157	part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
158
159	if (part->total_blocks < 2)
160		return -ENOENT;
161
162	/* each erase block has three bytes header, followed by the map */
163	part->header_sectors_per_block =
164			((HEADER_MAP_OFFSET + sectors_per_block) *
165			sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
166
167	part->data_sectors_per_block = sectors_per_block -
168			part->header_sectors_per_block;
169
170	part->header_size = (HEADER_MAP_OFFSET +
171			part->data_sectors_per_block) * sizeof(u16);
172
173	part->cylinders = (part->data_sectors_per_block *
174			(part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
175
176	part->sector_count = part->cylinders * SECTORS_PER_TRACK;
177
178	part->current_block = -1;
179	part->reserved_block = -1;
180	part->is_reclaiming = 0;
181
182	part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
183	if (!part->header_cache)
184		goto err;
185
186	part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
187			GFP_KERNEL);
188	if (!part->blocks)
189		goto err;
190
191	part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
 
192	if (!part->sector_map) {
193		printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
194			"sector map", part->mbd.mtd->name);
195		goto err;
196	}
197
198	for (i=0; i<part->sector_count; i++)
199		part->sector_map[i] = -1;
200
201	for (i=0, blocks_found=0; i<part->total_blocks; i++) {
202		rc = part->mbd.mtd->read(part->mbd.mtd,
203				i * part->block_size, part->header_size,
204				&retlen, (u_char*)part->header_cache);
205
206		if (!rc && retlen != part->header_size)
207			rc = -EIO;
208
209		if (rc)
210			goto err;
211
212		if (!build_block_map(part, i))
213			blocks_found++;
214	}
215
216	if (blocks_found == 0) {
217		printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
218				part->mbd.mtd->name);
219		rc = -ENOENT;
220		goto err;
221	}
222
223	if (part->reserved_block == -1) {
224		printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
225				part->mbd.mtd->name);
226
227		part->errors = 1;
228	}
229
230	return 0;
231
232err:
233	vfree(part->sector_map);
234	kfree(part->header_cache);
235	kfree(part->blocks);
236
237	return rc;
238}
239
240static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
241{
242	struct partition *part = (struct partition*)dev;
243	u_long addr;
244	size_t retlen;
245	int rc;
246
247	if (sector >= part->sector_count)
248		return -EIO;
249
250	addr = part->sector_map[sector];
251	if (addr != -1) {
252		rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
253						&retlen, (u_char*)buf);
254		if (!rc && retlen != SECTOR_SIZE)
255			rc = -EIO;
256
257		if (rc) {
258			printk(KERN_WARNING PREFIX "error reading '%s' at "
259				"0x%lx\n", part->mbd.mtd->name, addr);
260			return rc;
261		}
262	} else
263		memset(buf, 0, SECTOR_SIZE);
264
265	return 0;
266}
267
268static void erase_callback(struct erase_info *erase)
269{
270	struct partition *part;
271	u16 magic;
272	int i, rc;
273	size_t retlen;
274
275	part = (struct partition*)erase->priv;
276
277	i = (u32)erase->addr / part->block_size;
278	if (i >= part->total_blocks || part->blocks[i].offset != erase->addr ||
279	    erase->addr > UINT_MAX) {
280		printk(KERN_ERR PREFIX "erase callback for unknown offset %llx "
281				"on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name);
282		return;
283	}
284
285	if (erase->state != MTD_ERASE_DONE) {
286		printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', "
287				"state %d\n", (unsigned long long)erase->addr,
288				part->mbd.mtd->name, erase->state);
289
290		part->blocks[i].state = BLOCK_FAILED;
291		part->blocks[i].free_sectors = 0;
292		part->blocks[i].used_sectors = 0;
293
294		kfree(erase);
295
296		return;
297	}
298
299	magic = cpu_to_le16(RFD_MAGIC);
300
301	part->blocks[i].state = BLOCK_ERASED;
302	part->blocks[i].free_sectors = part->data_sectors_per_block;
303	part->blocks[i].used_sectors = 0;
304	part->blocks[i].erases++;
305
306	rc = part->mbd.mtd->write(part->mbd.mtd,
307		part->blocks[i].offset, sizeof(magic), &retlen,
308		(u_char*)&magic);
309
310	if (!rc && retlen != sizeof(magic))
311		rc = -EIO;
312
313	if (rc) {
314		printk(KERN_ERR PREFIX "'%s': unable to write RFD "
315				"header at 0x%lx\n",
316				part->mbd.mtd->name,
317				part->blocks[i].offset);
318		part->blocks[i].state = BLOCK_FAILED;
319	}
320	else
321		part->blocks[i].state = BLOCK_OK;
322
323	kfree(erase);
324}
325
326static int erase_block(struct partition *part, int block)
327{
328	struct erase_info *erase;
329	int rc = -ENOMEM;
330
331	erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
332	if (!erase)
333		goto err;
334
335	erase->mtd = part->mbd.mtd;
336	erase->callback = erase_callback;
337	erase->addr = part->blocks[block].offset;
338	erase->len = part->block_size;
339	erase->priv = (u_long)part;
340
341	part->blocks[block].state = BLOCK_ERASING;
342	part->blocks[block].free_sectors = 0;
343
344	rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
345
346	if (rc) {
347		printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
348				"failed\n", (unsigned long long)erase->addr,
349				(unsigned long long)erase->len, part->mbd.mtd->name);
350		kfree(erase);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351	}
352
353err:
 
354	return rc;
355}
356
357static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
358{
359	void *sector_data;
360	u16 *map;
361	size_t retlen;
362	int i, rc = -ENOMEM;
363
364	part->is_reclaiming = 1;
365
366	sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
367	if (!sector_data)
368		goto err3;
369
370	map = kmalloc(part->header_size, GFP_KERNEL);
371	if (!map)
372		goto err2;
373
374	rc = part->mbd.mtd->read(part->mbd.mtd,
375		part->blocks[block_no].offset, part->header_size,
376		&retlen, (u_char*)map);
377
378	if (!rc && retlen != part->header_size)
379		rc = -EIO;
380
381	if (rc) {
382		printk(KERN_ERR PREFIX "error reading '%s' at "
383			"0x%lx\n", part->mbd.mtd->name,
384			part->blocks[block_no].offset);
385
386		goto err;
387	}
388
389	for (i=0; i<part->data_sectors_per_block; i++) {
390		u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
391		u_long addr;
392
393
394		if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
395			continue;
396
397		if (entry == SECTOR_ZERO)
398			entry = 0;
399
400		/* already warned about and ignored in build_block_map() */
401		if (entry >= part->sector_count)
402			continue;
403
404		addr = part->blocks[block_no].offset +
405			(i + part->header_sectors_per_block) * SECTOR_SIZE;
406
407		if (*old_sector == addr) {
408			*old_sector = -1;
409			if (!part->blocks[block_no].used_sectors--) {
410				rc = erase_block(part, block_no);
411				break;
412			}
413			continue;
414		}
415		rc = part->mbd.mtd->read(part->mbd.mtd, addr,
416			SECTOR_SIZE, &retlen, sector_data);
417
418		if (!rc && retlen != SECTOR_SIZE)
419			rc = -EIO;
420
421		if (rc) {
422			printk(KERN_ERR PREFIX "'%s': Unable to "
423				"read sector for relocation\n",
424				part->mbd.mtd->name);
425
426			goto err;
427		}
428
429		rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
430				entry, sector_data);
431
432		if (rc)
433			goto err;
434	}
435
436err:
437	kfree(map);
438err2:
439	kfree(sector_data);
440err3:
441	part->is_reclaiming = 0;
442
443	return rc;
444}
445
446static int reclaim_block(struct partition *part, u_long *old_sector)
447{
448	int block, best_block, score, old_sector_block;
449	int rc;
450
451	/* we have a race if sync doesn't exist */
452	if (part->mbd.mtd->sync)
453		part->mbd.mtd->sync(part->mbd.mtd);
454
455	score = 0x7fffffff; /* MAX_INT */
456	best_block = -1;
457	if (*old_sector != -1)
458		old_sector_block = *old_sector / part->block_size;
459	else
460		old_sector_block = -1;
461
462	for (block=0; block<part->total_blocks; block++) {
463		int this_score;
464
465		if (block == part->reserved_block)
466			continue;
467
468		/*
469		 * Postpone reclaiming if there is a free sector as
470		 * more removed sectors is more efficient (have to move
471		 * less).
472		 */
473		if (part->blocks[block].free_sectors)
474			return 0;
475
476		this_score = part->blocks[block].used_sectors;
477
478		if (block == old_sector_block)
479			this_score--;
480		else {
481			/* no point in moving a full block */
482			if (part->blocks[block].used_sectors ==
483					part->data_sectors_per_block)
484				continue;
485		}
486
487		this_score += part->blocks[block].erases;
488
489		if (this_score < score) {
490			best_block = block;
491			score = this_score;
492		}
493	}
494
495	if (best_block == -1)
496		return -ENOSPC;
497
498	part->current_block = -1;
499	part->reserved_block = best_block;
500
501	pr_debug("reclaim_block: reclaiming block #%d with %d used "
502		 "%d free sectors\n", best_block,
503		 part->blocks[best_block].used_sectors,
504		 part->blocks[best_block].free_sectors);
505
506	if (part->blocks[best_block].used_sectors)
507		rc = move_block_contents(part, best_block, old_sector);
508	else
509		rc = erase_block(part, best_block);
510
511	return rc;
512}
513
514/*
515 * IMPROVE: It would be best to choose the block with the most deleted sectors,
516 * because if we fill that one up first it'll have the most chance of having
517 * the least live sectors at reclaim.
518 */
519static int find_free_block(struct partition *part)
520{
521	int block, stop;
522
523	block = part->current_block == -1 ?
524			jiffies % part->total_blocks : part->current_block;
525	stop = block;
526
527	do {
528		if (part->blocks[block].free_sectors &&
529				block != part->reserved_block)
530			return block;
531
532		if (part->blocks[block].state == BLOCK_UNUSED)
533			erase_block(part, block);
534
535		if (++block >= part->total_blocks)
536			block = 0;
537
538	} while (block != stop);
539
540	return -1;
541}
542
543static int find_writable_block(struct partition *part, u_long *old_sector)
544{
545	int rc, block;
546	size_t retlen;
547
548	block = find_free_block(part);
549
550	if (block == -1) {
551		if (!part->is_reclaiming) {
552			rc = reclaim_block(part, old_sector);
553			if (rc)
554				goto err;
555
556			block = find_free_block(part);
557		}
558
559		if (block == -1) {
560			rc = -ENOSPC;
561			goto err;
562		}
563	}
564
565	rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
566		part->header_size, &retlen, (u_char*)part->header_cache);
 
567
568	if (!rc && retlen != part->header_size)
569		rc = -EIO;
570
571	if (rc) {
572		printk(KERN_ERR PREFIX "'%s': unable to read header at "
573				"0x%lx\n", part->mbd.mtd->name,
574				part->blocks[block].offset);
575		goto err;
576	}
577
578	part->current_block = block;
579
580err:
581	return rc;
582}
583
584static int mark_sector_deleted(struct partition *part, u_long old_addr)
585{
586	int block, offset, rc;
587	u_long addr;
588	size_t retlen;
589	u16 del = cpu_to_le16(SECTOR_DELETED);
590
591	block = old_addr / part->block_size;
592	offset = (old_addr % part->block_size) / SECTOR_SIZE -
593		part->header_sectors_per_block;
594
595	addr = part->blocks[block].offset +
596			(HEADER_MAP_OFFSET + offset) * sizeof(u16);
597	rc = part->mbd.mtd->write(part->mbd.mtd, addr,
598		sizeof(del), &retlen, (u_char*)&del);
599
600	if (!rc && retlen != sizeof(del))
601		rc = -EIO;
602
603	if (rc) {
604		printk(KERN_ERR PREFIX "error writing '%s' at "
605			"0x%lx\n", part->mbd.mtd->name, addr);
606		if (rc)
607			goto err;
608	}
609	if (block == part->current_block)
610		part->header_cache[offset + HEADER_MAP_OFFSET] = del;
611
612	part->blocks[block].used_sectors--;
613
614	if (!part->blocks[block].used_sectors &&
615	    !part->blocks[block].free_sectors)
616		rc = erase_block(part, block);
617
618err:
619	return rc;
620}
621
622static int find_free_sector(const struct partition *part, const struct block *block)
623{
624	int i, stop;
625
626	i = stop = part->data_sectors_per_block - block->free_sectors;
627
628	do {
629		if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
630				== SECTOR_FREE)
631			return i;
632
633		if (++i == part->data_sectors_per_block)
634			i = 0;
635	}
636	while(i != stop);
637
638	return -1;
639}
640
641static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
642{
643	struct partition *part = (struct partition*)dev;
644	struct block *block;
645	u_long addr;
646	int i;
647	int rc;
648	size_t retlen;
649	u16 entry;
650
651	if (part->current_block == -1 ||
652		!part->blocks[part->current_block].free_sectors) {
653
654		rc = find_writable_block(part, old_addr);
655		if (rc)
656			goto err;
657	}
658
659	block = &part->blocks[part->current_block];
660
661	i = find_free_sector(part, block);
662
663	if (i < 0) {
664		rc = -ENOSPC;
665		goto err;
666	}
667
668	addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
669		block->offset;
670	rc = part->mbd.mtd->write(part->mbd.mtd,
671		addr, SECTOR_SIZE, &retlen, (u_char*)buf);
672
673	if (!rc && retlen != SECTOR_SIZE)
674		rc = -EIO;
675
676	if (rc) {
677		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
678				part->mbd.mtd->name, addr);
679		if (rc)
680			goto err;
681	}
682
683	part->sector_map[sector] = addr;
684
685	entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
686
687	part->header_cache[i + HEADER_MAP_OFFSET] = entry;
688
689	addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
690	rc = part->mbd.mtd->write(part->mbd.mtd, addr,
691			sizeof(entry), &retlen, (u_char*)&entry);
692
693	if (!rc && retlen != sizeof(entry))
694		rc = -EIO;
695
696	if (rc) {
697		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
698				part->mbd.mtd->name, addr);
699		if (rc)
700			goto err;
701	}
702	block->used_sectors++;
703	block->free_sectors--;
704
705err:
706	return rc;
707}
708
709static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
710{
711	struct partition *part = (struct partition*)dev;
712	u_long old_addr;
713	int i;
714	int rc = 0;
715
716	pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
717
718	if (part->reserved_block == -1) {
719		rc = -EACCES;
720		goto err;
721	}
722
723	if (sector >= part->sector_count) {
724		rc = -EIO;
725		goto err;
726	}
727
728	old_addr = part->sector_map[sector];
729
730	for (i=0; i<SECTOR_SIZE; i++) {
731		if (!buf[i])
732			continue;
733
734		rc = do_writesect(dev, sector, buf, &old_addr);
735		if (rc)
736			goto err;
737		break;
738	}
739
740	if (i == SECTOR_SIZE)
741		part->sector_map[sector] = -1;
742
743	if (old_addr != -1)
744		rc = mark_sector_deleted(part, old_addr);
745
746err:
747	return rc;
748}
749
750static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
751{
752	struct partition *part = (struct partition*)dev;
753
754	geo->heads = 1;
755	geo->sectors = SECTORS_PER_TRACK;
756	geo->cylinders = part->cylinders;
757
758	return 0;
759}
760
761static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
762{
763	struct partition *part;
764
765	if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
766		return;
767
768	part = kzalloc(sizeof(struct partition), GFP_KERNEL);
769	if (!part)
770		return;
771
772	part->mbd.mtd = mtd;
773
774	if (block_size)
775		part->block_size = block_size;
776	else {
777		if (!mtd->erasesize) {
778			printk(KERN_WARNING PREFIX "please provide block_size");
779			goto out;
780		} else
781			part->block_size = mtd->erasesize;
782	}
783
784	if (scan_header(part) == 0) {
785		part->mbd.size = part->sector_count;
786		part->mbd.tr = tr;
787		part->mbd.devnum = -1;
788		if (!(mtd->flags & MTD_WRITEABLE))
789			part->mbd.readonly = 1;
790		else if (part->errors) {
791			printk(KERN_WARNING PREFIX "'%s': errors found, "
792					"setting read-only\n", mtd->name);
793			part->mbd.readonly = 1;
794		}
795
796		printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
797				mtd->name, mtd->type, mtd->flags);
798
799		if (!add_mtd_blktrans_dev((void*)part))
800			return;
801	}
802out:
803	kfree(part);
804}
805
806static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
807{
808	struct partition *part = (struct partition*)dev;
809	int i;
810
811	for (i=0; i<part->total_blocks; i++) {
812		pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
813			part->mbd.mtd->name, i, part->blocks[i].erases);
814	}
815
816	del_mtd_blktrans_dev(dev);
817	vfree(part->sector_map);
818	kfree(part->header_cache);
819	kfree(part->blocks);
820}
821
822static struct mtd_blktrans_ops rfd_ftl_tr = {
823	.name		= "rfd",
824	.major		= RFD_FTL_MAJOR,
825	.part_bits	= PART_BITS,
826	.blksize 	= SECTOR_SIZE,
827
828	.readsect	= rfd_ftl_readsect,
829	.writesect	= rfd_ftl_writesect,
830	.getgeo		= rfd_ftl_getgeo,
831	.add_mtd	= rfd_ftl_add_mtd,
832	.remove_dev	= rfd_ftl_remove_dev,
833	.owner		= THIS_MODULE,
834};
835
836static int __init init_rfd_ftl(void)
837{
838	return register_mtd_blktrans(&rfd_ftl_tr);
839}
840
841static void __exit cleanup_rfd_ftl(void)
842{
843	deregister_mtd_blktrans(&rfd_ftl_tr);
844}
845
846module_init(init_rfd_ftl);
847module_exit(cleanup_rfd_ftl);
848
849MODULE_LICENSE("GPL");
850MODULE_AUTHOR("Sean Young <sean@mess.org>");
851MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
852		"used by General Software's Embedded BIOS");
853
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * rfd_ftl.c -- resident flash disk (flash translation layer)
  4 *
  5 * Copyright © 2005  Sean Young <sean@mess.org>
  6 *
  7 * This type of flash translation layer (FTL) is used by the Embedded BIOS
  8 * by General Software. It is known as the Resident Flash Disk (RFD), see:
  9 *
 10 *	http://www.gensw.com/pages/prod/bios/rfd.htm
 11 *
 12 * based on ftl.c
 13 */
 14
 15#include <linux/hdreg.h>
 16#include <linux/init.h>
 17#include <linux/mtd/blktrans.h>
 18#include <linux/mtd/mtd.h>
 19#include <linux/vmalloc.h>
 20#include <linux/slab.h>
 21#include <linux/jiffies.h>
 22#include <linux/module.h>
 23
 24#include <asm/types.h>
 25
 26static int block_size = 0;
 27module_param(block_size, int, 0);
 28MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
 29
 30#define PREFIX "rfd_ftl: "
 31
 32/* This major has been assigned by device@lanana.org */
 33#ifndef RFD_FTL_MAJOR
 34#define RFD_FTL_MAJOR		256
 35#endif
 36
 37/* Maximum number of partitions in an FTL region */
 38#define PART_BITS		4
 39
 40/* An erase unit should start with this value */
 41#define RFD_MAGIC		0x9193
 42
 43/* the second value is 0xffff or 0xffc8; function unknown */
 44
 45/* the third value is always 0xffff, ignored */
 46
 47/* next is an array of mapping for each corresponding sector */
 48#define HEADER_MAP_OFFSET	3
 49#define SECTOR_DELETED		0x0000
 50#define SECTOR_ZERO		0xfffe
 51#define SECTOR_FREE		0xffff
 52
 53#define SECTOR_SIZE		512
 54
 55#define SECTORS_PER_TRACK	63
 56
 57struct block {
 58	enum {
 59		BLOCK_OK,
 60		BLOCK_ERASING,
 61		BLOCK_ERASED,
 62		BLOCK_UNUSED,
 63		BLOCK_FAILED
 64	} state;
 65	int free_sectors;
 66	int used_sectors;
 67	int erases;
 68	u_long offset;
 69};
 70
 71struct partition {
 72	struct mtd_blktrans_dev mbd;
 73
 74	u_int block_size;		/* size of erase unit */
 75	u_int total_blocks;		/* number of erase units */
 76	u_int header_sectors_per_block;	/* header sectors in erase unit */
 77	u_int data_sectors_per_block;	/* data sectors in erase unit */
 78	u_int sector_count;		/* sectors in translated disk */
 79	u_int header_size;		/* bytes in header sector */
 80	int reserved_block;		/* block next up for reclaim */
 81	int current_block;		/* block to write to */
 82	u16 *header_cache;		/* cached header */
 83
 84	int is_reclaiming;
 85	int cylinders;
 86	int errors;
 87	u_long *sector_map;
 88	struct block *blocks;
 89};
 90
 91static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
 92
 93static int build_block_map(struct partition *part, int block_no)
 94{
 95	struct block *block = &part->blocks[block_no];
 96	int i;
 97
 98	block->offset = part->block_size * block_no;
 99
100	if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
101		block->state = BLOCK_UNUSED;
102		return -ENOENT;
103	}
104
105	block->state = BLOCK_OK;
106
107	for (i=0; i<part->data_sectors_per_block; i++) {
108		u16 entry;
109
110		entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
111
112		if (entry == SECTOR_DELETED)
113			continue;
114
115		if (entry == SECTOR_FREE) {
116			block->free_sectors++;
117			continue;
118		}
119
120		if (entry == SECTOR_ZERO)
121			entry = 0;
122
123		if (entry >= part->sector_count) {
124			printk(KERN_WARNING PREFIX
125				"'%s': unit #%d: entry %d corrupt, "
126				"sector %d out of range\n",
127				part->mbd.mtd->name, block_no, i, entry);
128			continue;
129		}
130
131		if (part->sector_map[entry] != -1) {
132			printk(KERN_WARNING PREFIX
133				"'%s': more than one entry for sector %d\n",
134				part->mbd.mtd->name, entry);
135			part->errors = 1;
136			continue;
137		}
138
139		part->sector_map[entry] = block->offset +
140			(i + part->header_sectors_per_block) * SECTOR_SIZE;
141
142		block->used_sectors++;
143	}
144
145	if (block->free_sectors == part->data_sectors_per_block)
146		part->reserved_block = block_no;
147
148	return 0;
149}
150
151static int scan_header(struct partition *part)
152{
153	int sectors_per_block;
154	int i, rc = -ENOMEM;
155	int blocks_found;
156	size_t retlen;
157
158	sectors_per_block = part->block_size / SECTOR_SIZE;
159	part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
160
161	if (part->total_blocks < 2)
162		return -ENOENT;
163
164	/* each erase block has three bytes header, followed by the map */
165	part->header_sectors_per_block =
166			((HEADER_MAP_OFFSET + sectors_per_block) *
167			sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
168
169	part->data_sectors_per_block = sectors_per_block -
170			part->header_sectors_per_block;
171
172	part->header_size = (HEADER_MAP_OFFSET +
173			part->data_sectors_per_block) * sizeof(u16);
174
175	part->cylinders = (part->data_sectors_per_block *
176			(part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
177
178	part->sector_count = part->cylinders * SECTORS_PER_TRACK;
179
180	part->current_block = -1;
181	part->reserved_block = -1;
182	part->is_reclaiming = 0;
183
184	part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
185	if (!part->header_cache)
186		goto err;
187
188	part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
189			GFP_KERNEL);
190	if (!part->blocks)
191		goto err;
192
193	part->sector_map = vmalloc(array_size(sizeof(u_long),
194					      part->sector_count));
195	if (!part->sector_map) {
196		printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
197			"sector map", part->mbd.mtd->name);
198		goto err;
199	}
200
201	for (i=0; i<part->sector_count; i++)
202		part->sector_map[i] = -1;
203
204	for (i=0, blocks_found=0; i<part->total_blocks; i++) {
205		rc = mtd_read(part->mbd.mtd, i * part->block_size,
206			      part->header_size, &retlen,
207			      (u_char *)part->header_cache);
208
209		if (!rc && retlen != part->header_size)
210			rc = -EIO;
211
212		if (rc)
213			goto err;
214
215		if (!build_block_map(part, i))
216			blocks_found++;
217	}
218
219	if (blocks_found == 0) {
220		printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
221				part->mbd.mtd->name);
222		rc = -ENOENT;
223		goto err;
224	}
225
226	if (part->reserved_block == -1) {
227		printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
228				part->mbd.mtd->name);
229
230		part->errors = 1;
231	}
232
233	return 0;
234
235err:
236	vfree(part->sector_map);
237	kfree(part->header_cache);
238	kfree(part->blocks);
239
240	return rc;
241}
242
243static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
244{
245	struct partition *part = (struct partition*)dev;
246	u_long addr;
247	size_t retlen;
248	int rc;
249
250	if (sector >= part->sector_count)
251		return -EIO;
252
253	addr = part->sector_map[sector];
254	if (addr != -1) {
255		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
256			      (u_char *)buf);
257		if (!rc && retlen != SECTOR_SIZE)
258			rc = -EIO;
259
260		if (rc) {
261			printk(KERN_WARNING PREFIX "error reading '%s' at "
262				"0x%lx\n", part->mbd.mtd->name, addr);
263			return rc;
264		}
265	} else
266		memset(buf, 0, SECTOR_SIZE);
267
268	return 0;
269}
270
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271static int erase_block(struct partition *part, int block)
272{
273	struct erase_info *erase;
274	int rc;
275
276	erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
277	if (!erase)
278		return -ENOMEM;
279
 
 
280	erase->addr = part->blocks[block].offset;
281	erase->len = part->block_size;
 
282
283	part->blocks[block].state = BLOCK_ERASING;
284	part->blocks[block].free_sectors = 0;
285
286	rc = mtd_erase(part->mbd.mtd, erase);
 
287	if (rc) {
288		printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
289				"failed\n", (unsigned long long)erase->addr,
290				(unsigned long long)erase->len, part->mbd.mtd->name);
291		part->blocks[block].state = BLOCK_FAILED;
292		part->blocks[block].free_sectors = 0;
293		part->blocks[block].used_sectors = 0;
294	} else {
295		u16 magic = cpu_to_le16(RFD_MAGIC);
296		size_t retlen;
297
298		part->blocks[block].state = BLOCK_ERASED;
299		part->blocks[block].free_sectors = part->data_sectors_per_block;
300		part->blocks[block].used_sectors = 0;
301		part->blocks[block].erases++;
302
303		rc = mtd_write(part->mbd.mtd, part->blocks[block].offset,
304			       sizeof(magic), &retlen, (u_char *)&magic);
305		if (!rc && retlen != sizeof(magic))
306			rc = -EIO;
307
308		if (rc) {
309			pr_err(PREFIX "'%s': unable to write RFD header at 0x%lx\n",
310			       part->mbd.mtd->name, part->blocks[block].offset);
311			part->blocks[block].state = BLOCK_FAILED;
312		} else {
313			part->blocks[block].state = BLOCK_OK;
314		}
315	}
316
317	kfree(erase);
318
319	return rc;
320}
321
322static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
323{
324	void *sector_data;
325	u16 *map;
326	size_t retlen;
327	int i, rc = -ENOMEM;
328
329	part->is_reclaiming = 1;
330
331	sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
332	if (!sector_data)
333		goto err3;
334
335	map = kmalloc(part->header_size, GFP_KERNEL);
336	if (!map)
337		goto err2;
338
339	rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
340		      part->header_size, &retlen, (u_char *)map);
 
341
342	if (!rc && retlen != part->header_size)
343		rc = -EIO;
344
345	if (rc) {
346		printk(KERN_ERR PREFIX "error reading '%s' at "
347			"0x%lx\n", part->mbd.mtd->name,
348			part->blocks[block_no].offset);
349
350		goto err;
351	}
352
353	for (i=0; i<part->data_sectors_per_block; i++) {
354		u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
355		u_long addr;
356
357
358		if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
359			continue;
360
361		if (entry == SECTOR_ZERO)
362			entry = 0;
363
364		/* already warned about and ignored in build_block_map() */
365		if (entry >= part->sector_count)
366			continue;
367
368		addr = part->blocks[block_no].offset +
369			(i + part->header_sectors_per_block) * SECTOR_SIZE;
370
371		if (*old_sector == addr) {
372			*old_sector = -1;
373			if (!part->blocks[block_no].used_sectors--) {
374				rc = erase_block(part, block_no);
375				break;
376			}
377			continue;
378		}
379		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
380			      sector_data);
381
382		if (!rc && retlen != SECTOR_SIZE)
383			rc = -EIO;
384
385		if (rc) {
386			printk(KERN_ERR PREFIX "'%s': Unable to "
387				"read sector for relocation\n",
388				part->mbd.mtd->name);
389
390			goto err;
391		}
392
393		rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
394				entry, sector_data);
395
396		if (rc)
397			goto err;
398	}
399
400err:
401	kfree(map);
402err2:
403	kfree(sector_data);
404err3:
405	part->is_reclaiming = 0;
406
407	return rc;
408}
409
410static int reclaim_block(struct partition *part, u_long *old_sector)
411{
412	int block, best_block, score, old_sector_block;
413	int rc;
414
415	/* we have a race if sync doesn't exist */
416	mtd_sync(part->mbd.mtd);
 
417
418	score = 0x7fffffff; /* MAX_INT */
419	best_block = -1;
420	if (*old_sector != -1)
421		old_sector_block = *old_sector / part->block_size;
422	else
423		old_sector_block = -1;
424
425	for (block=0; block<part->total_blocks; block++) {
426		int this_score;
427
428		if (block == part->reserved_block)
429			continue;
430
431		/*
432		 * Postpone reclaiming if there is a free sector as
433		 * more removed sectors is more efficient (have to move
434		 * less).
435		 */
436		if (part->blocks[block].free_sectors)
437			return 0;
438
439		this_score = part->blocks[block].used_sectors;
440
441		if (block == old_sector_block)
442			this_score--;
443		else {
444			/* no point in moving a full block */
445			if (part->blocks[block].used_sectors ==
446					part->data_sectors_per_block)
447				continue;
448		}
449
450		this_score += part->blocks[block].erases;
451
452		if (this_score < score) {
453			best_block = block;
454			score = this_score;
455		}
456	}
457
458	if (best_block == -1)
459		return -ENOSPC;
460
461	part->current_block = -1;
462	part->reserved_block = best_block;
463
464	pr_debug("reclaim_block: reclaiming block #%d with %d used "
465		 "%d free sectors\n", best_block,
466		 part->blocks[best_block].used_sectors,
467		 part->blocks[best_block].free_sectors);
468
469	if (part->blocks[best_block].used_sectors)
470		rc = move_block_contents(part, best_block, old_sector);
471	else
472		rc = erase_block(part, best_block);
473
474	return rc;
475}
476
477/*
478 * IMPROVE: It would be best to choose the block with the most deleted sectors,
479 * because if we fill that one up first it'll have the most chance of having
480 * the least live sectors at reclaim.
481 */
482static int find_free_block(struct partition *part)
483{
484	int block, stop;
485
486	block = part->current_block == -1 ?
487			jiffies % part->total_blocks : part->current_block;
488	stop = block;
489
490	do {
491		if (part->blocks[block].free_sectors &&
492				block != part->reserved_block)
493			return block;
494
495		if (part->blocks[block].state == BLOCK_UNUSED)
496			erase_block(part, block);
497
498		if (++block >= part->total_blocks)
499			block = 0;
500
501	} while (block != stop);
502
503	return -1;
504}
505
506static int find_writable_block(struct partition *part, u_long *old_sector)
507{
508	int rc, block;
509	size_t retlen;
510
511	block = find_free_block(part);
512
513	if (block == -1) {
514		if (!part->is_reclaiming) {
515			rc = reclaim_block(part, old_sector);
516			if (rc)
517				goto err;
518
519			block = find_free_block(part);
520		}
521
522		if (block == -1) {
523			rc = -ENOSPC;
524			goto err;
525		}
526	}
527
528	rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
529		      part->header_size, &retlen,
530		      (u_char *)part->header_cache);
531
532	if (!rc && retlen != part->header_size)
533		rc = -EIO;
534
535	if (rc) {
536		printk(KERN_ERR PREFIX "'%s': unable to read header at "
537				"0x%lx\n", part->mbd.mtd->name,
538				part->blocks[block].offset);
539		goto err;
540	}
541
542	part->current_block = block;
543
544err:
545	return rc;
546}
547
548static int mark_sector_deleted(struct partition *part, u_long old_addr)
549{
550	int block, offset, rc;
551	u_long addr;
552	size_t retlen;
553	u16 del = cpu_to_le16(SECTOR_DELETED);
554
555	block = old_addr / part->block_size;
556	offset = (old_addr % part->block_size) / SECTOR_SIZE -
557		part->header_sectors_per_block;
558
559	addr = part->blocks[block].offset +
560			(HEADER_MAP_OFFSET + offset) * sizeof(u16);
561	rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
562		       (u_char *)&del);
563
564	if (!rc && retlen != sizeof(del))
565		rc = -EIO;
566
567	if (rc) {
568		printk(KERN_ERR PREFIX "error writing '%s' at "
569			"0x%lx\n", part->mbd.mtd->name, addr);
570		goto err;
 
571	}
572	if (block == part->current_block)
573		part->header_cache[offset + HEADER_MAP_OFFSET] = del;
574
575	part->blocks[block].used_sectors--;
576
577	if (!part->blocks[block].used_sectors &&
578	    !part->blocks[block].free_sectors)
579		rc = erase_block(part, block);
580
581err:
582	return rc;
583}
584
585static int find_free_sector(const struct partition *part, const struct block *block)
586{
587	int i, stop;
588
589	i = stop = part->data_sectors_per_block - block->free_sectors;
590
591	do {
592		if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
593				== SECTOR_FREE)
594			return i;
595
596		if (++i == part->data_sectors_per_block)
597			i = 0;
598	}
599	while(i != stop);
600
601	return -1;
602}
603
604static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
605{
606	struct partition *part = (struct partition*)dev;
607	struct block *block;
608	u_long addr;
609	int i;
610	int rc;
611	size_t retlen;
612	u16 entry;
613
614	if (part->current_block == -1 ||
615		!part->blocks[part->current_block].free_sectors) {
616
617		rc = find_writable_block(part, old_addr);
618		if (rc)
619			goto err;
620	}
621
622	block = &part->blocks[part->current_block];
623
624	i = find_free_sector(part, block);
625
626	if (i < 0) {
627		rc = -ENOSPC;
628		goto err;
629	}
630
631	addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
632		block->offset;
633	rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
634		       (u_char *)buf);
635
636	if (!rc && retlen != SECTOR_SIZE)
637		rc = -EIO;
638
639	if (rc) {
640		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
641				part->mbd.mtd->name, addr);
642		goto err;
 
643	}
644
645	part->sector_map[sector] = addr;
646
647	entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
648
649	part->header_cache[i + HEADER_MAP_OFFSET] = entry;
650
651	addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
652	rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
653		       (u_char *)&entry);
654
655	if (!rc && retlen != sizeof(entry))
656		rc = -EIO;
657
658	if (rc) {
659		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
660				part->mbd.mtd->name, addr);
661		goto err;
 
662	}
663	block->used_sectors++;
664	block->free_sectors--;
665
666err:
667	return rc;
668}
669
670static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
671{
672	struct partition *part = (struct partition*)dev;
673	u_long old_addr;
674	int i;
675	int rc = 0;
676
677	pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
678
679	if (part->reserved_block == -1) {
680		rc = -EACCES;
681		goto err;
682	}
683
684	if (sector >= part->sector_count) {
685		rc = -EIO;
686		goto err;
687	}
688
689	old_addr = part->sector_map[sector];
690
691	for (i=0; i<SECTOR_SIZE; i++) {
692		if (!buf[i])
693			continue;
694
695		rc = do_writesect(dev, sector, buf, &old_addr);
696		if (rc)
697			goto err;
698		break;
699	}
700
701	if (i == SECTOR_SIZE)
702		part->sector_map[sector] = -1;
703
704	if (old_addr != -1)
705		rc = mark_sector_deleted(part, old_addr);
706
707err:
708	return rc;
709}
710
711static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
712{
713	struct partition *part = (struct partition*)dev;
714
715	geo->heads = 1;
716	geo->sectors = SECTORS_PER_TRACK;
717	geo->cylinders = part->cylinders;
718
719	return 0;
720}
721
722static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
723{
724	struct partition *part;
725
726	if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
727		return;
728
729	part = kzalloc(sizeof(struct partition), GFP_KERNEL);
730	if (!part)
731		return;
732
733	part->mbd.mtd = mtd;
734
735	if (block_size)
736		part->block_size = block_size;
737	else {
738		if (!mtd->erasesize) {
739			printk(KERN_WARNING PREFIX "please provide block_size");
740			goto out;
741		} else
742			part->block_size = mtd->erasesize;
743	}
744
745	if (scan_header(part) == 0) {
746		part->mbd.size = part->sector_count;
747		part->mbd.tr = tr;
748		part->mbd.devnum = -1;
749		if (!(mtd->flags & MTD_WRITEABLE))
750			part->mbd.readonly = 1;
751		else if (part->errors) {
752			printk(KERN_WARNING PREFIX "'%s': errors found, "
753					"setting read-only\n", mtd->name);
754			part->mbd.readonly = 1;
755		}
756
757		printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
758				mtd->name, mtd->type, mtd->flags);
759
760		if (!add_mtd_blktrans_dev((void*)part))
761			return;
762	}
763out:
764	kfree(part);
765}
766
767static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
768{
769	struct partition *part = (struct partition*)dev;
770	int i;
771
772	for (i=0; i<part->total_blocks; i++) {
773		pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
774			part->mbd.mtd->name, i, part->blocks[i].erases);
775	}
776
777	del_mtd_blktrans_dev(dev);
778	vfree(part->sector_map);
779	kfree(part->header_cache);
780	kfree(part->blocks);
781}
782
783static struct mtd_blktrans_ops rfd_ftl_tr = {
784	.name		= "rfd",
785	.major		= RFD_FTL_MAJOR,
786	.part_bits	= PART_BITS,
787	.blksize 	= SECTOR_SIZE,
788
789	.readsect	= rfd_ftl_readsect,
790	.writesect	= rfd_ftl_writesect,
791	.getgeo		= rfd_ftl_getgeo,
792	.add_mtd	= rfd_ftl_add_mtd,
793	.remove_dev	= rfd_ftl_remove_dev,
794	.owner		= THIS_MODULE,
795};
796
797static int __init init_rfd_ftl(void)
798{
799	return register_mtd_blktrans(&rfd_ftl_tr);
800}
801
802static void __exit cleanup_rfd_ftl(void)
803{
804	deregister_mtd_blktrans(&rfd_ftl_tr);
805}
806
807module_init(init_rfd_ftl);
808module_exit(cleanup_rfd_ftl);
809
810MODULE_LICENSE("GPL");
811MODULE_AUTHOR("Sean Young <sean@mess.org>");
812MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
813		"used by General Software's Embedded BIOS");
814