Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * rfd_ftl.c -- resident flash disk (flash translation layer)
  3 *
  4 * Copyright © 2005  Sean Young <sean@mess.org>
  5 *
  6 * This type of flash translation layer (FTL) is used by the Embedded BIOS
  7 * by General Software. It is known as the Resident Flash Disk (RFD), see:
  8 *
  9 *	http://www.gensw.com/pages/prod/bios/rfd.htm
 10 *
 11 * based on ftl.c
 12 */
 13
 14#include <linux/hdreg.h>
 15#include <linux/init.h>
 16#include <linux/mtd/blktrans.h>
 17#include <linux/mtd/mtd.h>
 18#include <linux/vmalloc.h>
 19#include <linux/slab.h>
 20#include <linux/jiffies.h>
 
 21
 22#include <asm/types.h>
 23
 24static int block_size = 0;
 25module_param(block_size, int, 0);
 26MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
 27
 28#define PREFIX "rfd_ftl: "
 29
 30/* This major has been assigned by device@lanana.org */
 31#ifndef RFD_FTL_MAJOR
 32#define RFD_FTL_MAJOR		256
 33#endif
 34
 35/* Maximum number of partitions in an FTL region */
 36#define PART_BITS		4
 37
 38/* An erase unit should start with this value */
 39#define RFD_MAGIC		0x9193
 40
 41/* the second value is 0xffff or 0xffc8; function unknown */
 42
 43/* the third value is always 0xffff, ignored */
 44
 45/* next is an array of mapping for each corresponding sector */
 46#define HEADER_MAP_OFFSET	3
 47#define SECTOR_DELETED		0x0000
 48#define SECTOR_ZERO		0xfffe
 49#define SECTOR_FREE		0xffff
 50
 51#define SECTOR_SIZE		512
 52
 53#define SECTORS_PER_TRACK	63
 54
 55struct block {
 56	enum {
 57		BLOCK_OK,
 58		BLOCK_ERASING,
 59		BLOCK_ERASED,
 60		BLOCK_UNUSED,
 61		BLOCK_FAILED
 62	} state;
 63	int free_sectors;
 64	int used_sectors;
 65	int erases;
 66	u_long offset;
 67};
 68
 69struct partition {
 70	struct mtd_blktrans_dev mbd;
 71
 72	u_int block_size;		/* size of erase unit */
 73	u_int total_blocks;		/* number of erase units */
 74	u_int header_sectors_per_block;	/* header sectors in erase unit */
 75	u_int data_sectors_per_block;	/* data sectors in erase unit */
 76	u_int sector_count;		/* sectors in translated disk */
 77	u_int header_size;		/* bytes in header sector */
 78	int reserved_block;		/* block next up for reclaim */
 79	int current_block;		/* block to write to */
 80	u16 *header_cache;		/* cached header */
 81
 82	int is_reclaiming;
 83	int cylinders;
 84	int errors;
 85	u_long *sector_map;
 86	struct block *blocks;
 87};
 88
 89static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
 90
 91static int build_block_map(struct partition *part, int block_no)
 92{
 93	struct block *block = &part->blocks[block_no];
 94	int i;
 95
 96	block->offset = part->block_size * block_no;
 97
 98	if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
 99		block->state = BLOCK_UNUSED;
100		return -ENOENT;
101	}
102
103	block->state = BLOCK_OK;
104
105	for (i=0; i<part->data_sectors_per_block; i++) {
106		u16 entry;
107
108		entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
109
110		if (entry == SECTOR_DELETED)
111			continue;
112
113		if (entry == SECTOR_FREE) {
114			block->free_sectors++;
115			continue;
116		}
117
118		if (entry == SECTOR_ZERO)
119			entry = 0;
120
121		if (entry >= part->sector_count) {
122			printk(KERN_WARNING PREFIX
123				"'%s': unit #%d: entry %d corrupt, "
124				"sector %d out of range\n",
125				part->mbd.mtd->name, block_no, i, entry);
126			continue;
127		}
128
129		if (part->sector_map[entry] != -1) {
130			printk(KERN_WARNING PREFIX
131				"'%s': more than one entry for sector %d\n",
132				part->mbd.mtd->name, entry);
133			part->errors = 1;
134			continue;
135		}
136
137		part->sector_map[entry] = block->offset +
138			(i + part->header_sectors_per_block) * SECTOR_SIZE;
139
140		block->used_sectors++;
141	}
142
143	if (block->free_sectors == part->data_sectors_per_block)
144		part->reserved_block = block_no;
145
146	return 0;
147}
148
149static int scan_header(struct partition *part)
150{
151	int sectors_per_block;
152	int i, rc = -ENOMEM;
153	int blocks_found;
154	size_t retlen;
155
156	sectors_per_block = part->block_size / SECTOR_SIZE;
157	part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
158
159	if (part->total_blocks < 2)
160		return -ENOENT;
161
162	/* each erase block has three bytes header, followed by the map */
163	part->header_sectors_per_block =
164			((HEADER_MAP_OFFSET + sectors_per_block) *
165			sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
166
167	part->data_sectors_per_block = sectors_per_block -
168			part->header_sectors_per_block;
169
170	part->header_size = (HEADER_MAP_OFFSET +
171			part->data_sectors_per_block) * sizeof(u16);
172
173	part->cylinders = (part->data_sectors_per_block *
174			(part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
175
176	part->sector_count = part->cylinders * SECTORS_PER_TRACK;
177
178	part->current_block = -1;
179	part->reserved_block = -1;
180	part->is_reclaiming = 0;
181
182	part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
183	if (!part->header_cache)
184		goto err;
185
186	part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
187			GFP_KERNEL);
188	if (!part->blocks)
189		goto err;
190
191	part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
192	if (!part->sector_map) {
193		printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
194			"sector map", part->mbd.mtd->name);
195		goto err;
196	}
197
198	for (i=0; i<part->sector_count; i++)
199		part->sector_map[i] = -1;
200
201	for (i=0, blocks_found=0; i<part->total_blocks; i++) {
202		rc = part->mbd.mtd->read(part->mbd.mtd,
203				i * part->block_size, part->header_size,
204				&retlen, (u_char*)part->header_cache);
205
206		if (!rc && retlen != part->header_size)
207			rc = -EIO;
208
209		if (rc)
210			goto err;
211
212		if (!build_block_map(part, i))
213			blocks_found++;
214	}
215
216	if (blocks_found == 0) {
217		printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
218				part->mbd.mtd->name);
219		rc = -ENOENT;
220		goto err;
221	}
222
223	if (part->reserved_block == -1) {
224		printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
225				part->mbd.mtd->name);
226
227		part->errors = 1;
228	}
229
230	return 0;
231
232err:
233	vfree(part->sector_map);
234	kfree(part->header_cache);
235	kfree(part->blocks);
236
237	return rc;
238}
239
240static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
241{
242	struct partition *part = (struct partition*)dev;
243	u_long addr;
244	size_t retlen;
245	int rc;
246
247	if (sector >= part->sector_count)
248		return -EIO;
249
250	addr = part->sector_map[sector];
251	if (addr != -1) {
252		rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
253						&retlen, (u_char*)buf);
254		if (!rc && retlen != SECTOR_SIZE)
255			rc = -EIO;
256
257		if (rc) {
258			printk(KERN_WARNING PREFIX "error reading '%s' at "
259				"0x%lx\n", part->mbd.mtd->name, addr);
260			return rc;
261		}
262	} else
263		memset(buf, 0, SECTOR_SIZE);
264
265	return 0;
266}
267
268static void erase_callback(struct erase_info *erase)
269{
270	struct partition *part;
271	u16 magic;
272	int i, rc;
273	size_t retlen;
274
275	part = (struct partition*)erase->priv;
276
277	i = (u32)erase->addr / part->block_size;
278	if (i >= part->total_blocks || part->blocks[i].offset != erase->addr ||
279	    erase->addr > UINT_MAX) {
280		printk(KERN_ERR PREFIX "erase callback for unknown offset %llx "
281				"on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name);
282		return;
283	}
284
285	if (erase->state != MTD_ERASE_DONE) {
286		printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', "
287				"state %d\n", (unsigned long long)erase->addr,
288				part->mbd.mtd->name, erase->state);
289
290		part->blocks[i].state = BLOCK_FAILED;
291		part->blocks[i].free_sectors = 0;
292		part->blocks[i].used_sectors = 0;
293
294		kfree(erase);
295
296		return;
297	}
298
299	magic = cpu_to_le16(RFD_MAGIC);
300
301	part->blocks[i].state = BLOCK_ERASED;
302	part->blocks[i].free_sectors = part->data_sectors_per_block;
303	part->blocks[i].used_sectors = 0;
304	part->blocks[i].erases++;
305
306	rc = part->mbd.mtd->write(part->mbd.mtd,
307		part->blocks[i].offset, sizeof(magic), &retlen,
308		(u_char*)&magic);
309
310	if (!rc && retlen != sizeof(magic))
311		rc = -EIO;
312
313	if (rc) {
314		printk(KERN_ERR PREFIX "'%s': unable to write RFD "
315				"header at 0x%lx\n",
316				part->mbd.mtd->name,
317				part->blocks[i].offset);
318		part->blocks[i].state = BLOCK_FAILED;
319	}
320	else
321		part->blocks[i].state = BLOCK_OK;
322
323	kfree(erase);
324}
325
326static int erase_block(struct partition *part, int block)
327{
328	struct erase_info *erase;
329	int rc = -ENOMEM;
330
331	erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
332	if (!erase)
333		goto err;
334
335	erase->mtd = part->mbd.mtd;
336	erase->callback = erase_callback;
337	erase->addr = part->blocks[block].offset;
338	erase->len = part->block_size;
339	erase->priv = (u_long)part;
340
341	part->blocks[block].state = BLOCK_ERASING;
342	part->blocks[block].free_sectors = 0;
343
344	rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
345
346	if (rc) {
347		printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
348				"failed\n", (unsigned long long)erase->addr,
349				(unsigned long long)erase->len, part->mbd.mtd->name);
350		kfree(erase);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351	}
352
353err:
 
354	return rc;
355}
356
357static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
358{
359	void *sector_data;
360	u16 *map;
361	size_t retlen;
362	int i, rc = -ENOMEM;
363
364	part->is_reclaiming = 1;
365
366	sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
367	if (!sector_data)
368		goto err3;
369
370	map = kmalloc(part->header_size, GFP_KERNEL);
371	if (!map)
372		goto err2;
373
374	rc = part->mbd.mtd->read(part->mbd.mtd,
375		part->blocks[block_no].offset, part->header_size,
376		&retlen, (u_char*)map);
377
378	if (!rc && retlen != part->header_size)
379		rc = -EIO;
380
381	if (rc) {
382		printk(KERN_ERR PREFIX "error reading '%s' at "
383			"0x%lx\n", part->mbd.mtd->name,
384			part->blocks[block_no].offset);
385
386		goto err;
387	}
388
389	for (i=0; i<part->data_sectors_per_block; i++) {
390		u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
391		u_long addr;
392
393
394		if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
395			continue;
396
397		if (entry == SECTOR_ZERO)
398			entry = 0;
399
400		/* already warned about and ignored in build_block_map() */
401		if (entry >= part->sector_count)
402			continue;
403
404		addr = part->blocks[block_no].offset +
405			(i + part->header_sectors_per_block) * SECTOR_SIZE;
406
407		if (*old_sector == addr) {
408			*old_sector = -1;
409			if (!part->blocks[block_no].used_sectors--) {
410				rc = erase_block(part, block_no);
411				break;
412			}
413			continue;
414		}
415		rc = part->mbd.mtd->read(part->mbd.mtd, addr,
416			SECTOR_SIZE, &retlen, sector_data);
417
418		if (!rc && retlen != SECTOR_SIZE)
419			rc = -EIO;
420
421		if (rc) {
422			printk(KERN_ERR PREFIX "'%s': Unable to "
423				"read sector for relocation\n",
424				part->mbd.mtd->name);
425
426			goto err;
427		}
428
429		rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
430				entry, sector_data);
431
432		if (rc)
433			goto err;
434	}
435
436err:
437	kfree(map);
438err2:
439	kfree(sector_data);
440err3:
441	part->is_reclaiming = 0;
442
443	return rc;
444}
445
446static int reclaim_block(struct partition *part, u_long *old_sector)
447{
448	int block, best_block, score, old_sector_block;
449	int rc;
450
451	/* we have a race if sync doesn't exist */
452	if (part->mbd.mtd->sync)
453		part->mbd.mtd->sync(part->mbd.mtd);
454
455	score = 0x7fffffff; /* MAX_INT */
456	best_block = -1;
457	if (*old_sector != -1)
458		old_sector_block = *old_sector / part->block_size;
459	else
460		old_sector_block = -1;
461
462	for (block=0; block<part->total_blocks; block++) {
463		int this_score;
464
465		if (block == part->reserved_block)
466			continue;
467
468		/*
469		 * Postpone reclaiming if there is a free sector as
470		 * more removed sectors is more efficient (have to move
471		 * less).
472		 */
473		if (part->blocks[block].free_sectors)
474			return 0;
475
476		this_score = part->blocks[block].used_sectors;
477
478		if (block == old_sector_block)
479			this_score--;
480		else {
481			/* no point in moving a full block */
482			if (part->blocks[block].used_sectors ==
483					part->data_sectors_per_block)
484				continue;
485		}
486
487		this_score += part->blocks[block].erases;
488
489		if (this_score < score) {
490			best_block = block;
491			score = this_score;
492		}
493	}
494
495	if (best_block == -1)
496		return -ENOSPC;
497
498	part->current_block = -1;
499	part->reserved_block = best_block;
500
501	pr_debug("reclaim_block: reclaiming block #%d with %d used "
502		 "%d free sectors\n", best_block,
503		 part->blocks[best_block].used_sectors,
504		 part->blocks[best_block].free_sectors);
505
506	if (part->blocks[best_block].used_sectors)
507		rc = move_block_contents(part, best_block, old_sector);
508	else
509		rc = erase_block(part, best_block);
510
511	return rc;
512}
513
514/*
515 * IMPROVE: It would be best to choose the block with the most deleted sectors,
516 * because if we fill that one up first it'll have the most chance of having
517 * the least live sectors at reclaim.
518 */
519static int find_free_block(struct partition *part)
520{
521	int block, stop;
522
523	block = part->current_block == -1 ?
524			jiffies % part->total_blocks : part->current_block;
525	stop = block;
526
527	do {
528		if (part->blocks[block].free_sectors &&
529				block != part->reserved_block)
530			return block;
531
532		if (part->blocks[block].state == BLOCK_UNUSED)
533			erase_block(part, block);
534
535		if (++block >= part->total_blocks)
536			block = 0;
537
538	} while (block != stop);
539
540	return -1;
541}
542
543static int find_writable_block(struct partition *part, u_long *old_sector)
544{
545	int rc, block;
546	size_t retlen;
547
548	block = find_free_block(part);
549
550	if (block == -1) {
551		if (!part->is_reclaiming) {
552			rc = reclaim_block(part, old_sector);
553			if (rc)
554				goto err;
555
556			block = find_free_block(part);
557		}
558
559		if (block == -1) {
560			rc = -ENOSPC;
561			goto err;
562		}
563	}
564
565	rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
566		part->header_size, &retlen, (u_char*)part->header_cache);
 
567
568	if (!rc && retlen != part->header_size)
569		rc = -EIO;
570
571	if (rc) {
572		printk(KERN_ERR PREFIX "'%s': unable to read header at "
573				"0x%lx\n", part->mbd.mtd->name,
574				part->blocks[block].offset);
575		goto err;
576	}
577
578	part->current_block = block;
579
580err:
581	return rc;
582}
583
584static int mark_sector_deleted(struct partition *part, u_long old_addr)
585{
586	int block, offset, rc;
587	u_long addr;
588	size_t retlen;
589	u16 del = cpu_to_le16(SECTOR_DELETED);
590
591	block = old_addr / part->block_size;
592	offset = (old_addr % part->block_size) / SECTOR_SIZE -
593		part->header_sectors_per_block;
594
595	addr = part->blocks[block].offset +
596			(HEADER_MAP_OFFSET + offset) * sizeof(u16);
597	rc = part->mbd.mtd->write(part->mbd.mtd, addr,
598		sizeof(del), &retlen, (u_char*)&del);
599
600	if (!rc && retlen != sizeof(del))
601		rc = -EIO;
602
603	if (rc) {
604		printk(KERN_ERR PREFIX "error writing '%s' at "
605			"0x%lx\n", part->mbd.mtd->name, addr);
606		if (rc)
607			goto err;
608	}
609	if (block == part->current_block)
610		part->header_cache[offset + HEADER_MAP_OFFSET] = del;
611
612	part->blocks[block].used_sectors--;
613
614	if (!part->blocks[block].used_sectors &&
615	    !part->blocks[block].free_sectors)
616		rc = erase_block(part, block);
617
618err:
619	return rc;
620}
621
622static int find_free_sector(const struct partition *part, const struct block *block)
623{
624	int i, stop;
625
626	i = stop = part->data_sectors_per_block - block->free_sectors;
627
628	do {
629		if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
630				== SECTOR_FREE)
631			return i;
632
633		if (++i == part->data_sectors_per_block)
634			i = 0;
635	}
636	while(i != stop);
637
638	return -1;
639}
640
641static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
642{
643	struct partition *part = (struct partition*)dev;
644	struct block *block;
645	u_long addr;
646	int i;
647	int rc;
648	size_t retlen;
649	u16 entry;
650
651	if (part->current_block == -1 ||
652		!part->blocks[part->current_block].free_sectors) {
653
654		rc = find_writable_block(part, old_addr);
655		if (rc)
656			goto err;
657	}
658
659	block = &part->blocks[part->current_block];
660
661	i = find_free_sector(part, block);
662
663	if (i < 0) {
664		rc = -ENOSPC;
665		goto err;
666	}
667
668	addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
669		block->offset;
670	rc = part->mbd.mtd->write(part->mbd.mtd,
671		addr, SECTOR_SIZE, &retlen, (u_char*)buf);
672
673	if (!rc && retlen != SECTOR_SIZE)
674		rc = -EIO;
675
676	if (rc) {
677		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
678				part->mbd.mtd->name, addr);
679		if (rc)
680			goto err;
681	}
682
683	part->sector_map[sector] = addr;
684
685	entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
686
687	part->header_cache[i + HEADER_MAP_OFFSET] = entry;
688
689	addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
690	rc = part->mbd.mtd->write(part->mbd.mtd, addr,
691			sizeof(entry), &retlen, (u_char*)&entry);
692
693	if (!rc && retlen != sizeof(entry))
694		rc = -EIO;
695
696	if (rc) {
697		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
698				part->mbd.mtd->name, addr);
699		if (rc)
700			goto err;
701	}
702	block->used_sectors++;
703	block->free_sectors--;
704
705err:
706	return rc;
707}
708
709static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
710{
711	struct partition *part = (struct partition*)dev;
712	u_long old_addr;
713	int i;
714	int rc = 0;
715
716	pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
717
718	if (part->reserved_block == -1) {
719		rc = -EACCES;
720		goto err;
721	}
722
723	if (sector >= part->sector_count) {
724		rc = -EIO;
725		goto err;
726	}
727
728	old_addr = part->sector_map[sector];
729
730	for (i=0; i<SECTOR_SIZE; i++) {
731		if (!buf[i])
732			continue;
733
734		rc = do_writesect(dev, sector, buf, &old_addr);
735		if (rc)
736			goto err;
737		break;
738	}
739
740	if (i == SECTOR_SIZE)
741		part->sector_map[sector] = -1;
742
743	if (old_addr != -1)
744		rc = mark_sector_deleted(part, old_addr);
745
746err:
747	return rc;
748}
749
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
750static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
751{
752	struct partition *part = (struct partition*)dev;
753
754	geo->heads = 1;
755	geo->sectors = SECTORS_PER_TRACK;
756	geo->cylinders = part->cylinders;
757
758	return 0;
759}
760
761static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
762{
763	struct partition *part;
764
765	if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
 
766		return;
767
768	part = kzalloc(sizeof(struct partition), GFP_KERNEL);
769	if (!part)
770		return;
771
772	part->mbd.mtd = mtd;
773
774	if (block_size)
775		part->block_size = block_size;
776	else {
777		if (!mtd->erasesize) {
778			printk(KERN_WARNING PREFIX "please provide block_size");
779			goto out;
780		} else
781			part->block_size = mtd->erasesize;
782	}
783
784	if (scan_header(part) == 0) {
785		part->mbd.size = part->sector_count;
786		part->mbd.tr = tr;
787		part->mbd.devnum = -1;
788		if (!(mtd->flags & MTD_WRITEABLE))
789			part->mbd.readonly = 1;
790		else if (part->errors) {
791			printk(KERN_WARNING PREFIX "'%s': errors found, "
792					"setting read-only\n", mtd->name);
793			part->mbd.readonly = 1;
794		}
795
796		printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
797				mtd->name, mtd->type, mtd->flags);
798
799		if (!add_mtd_blktrans_dev((void*)part))
800			return;
801	}
802out:
803	kfree(part);
804}
805
806static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
807{
808	struct partition *part = (struct partition*)dev;
809	int i;
810
811	for (i=0; i<part->total_blocks; i++) {
812		pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
813			part->mbd.mtd->name, i, part->blocks[i].erases);
814	}
815
816	del_mtd_blktrans_dev(dev);
817	vfree(part->sector_map);
818	kfree(part->header_cache);
819	kfree(part->blocks);
 
820}
821
822static struct mtd_blktrans_ops rfd_ftl_tr = {
823	.name		= "rfd",
824	.major		= RFD_FTL_MAJOR,
825	.part_bits	= PART_BITS,
826	.blksize 	= SECTOR_SIZE,
827
828	.readsect	= rfd_ftl_readsect,
829	.writesect	= rfd_ftl_writesect,
 
830	.getgeo		= rfd_ftl_getgeo,
831	.add_mtd	= rfd_ftl_add_mtd,
832	.remove_dev	= rfd_ftl_remove_dev,
833	.owner		= THIS_MODULE,
834};
835
836static int __init init_rfd_ftl(void)
837{
838	return register_mtd_blktrans(&rfd_ftl_tr);
839}
840
841static void __exit cleanup_rfd_ftl(void)
842{
843	deregister_mtd_blktrans(&rfd_ftl_tr);
844}
845
846module_init(init_rfd_ftl);
847module_exit(cleanup_rfd_ftl);
848
849MODULE_LICENSE("GPL");
850MODULE_AUTHOR("Sean Young <sean@mess.org>");
851MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
852		"used by General Software's Embedded BIOS");
853
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * rfd_ftl.c -- resident flash disk (flash translation layer)
  4 *
  5 * Copyright © 2005  Sean Young <sean@mess.org>
  6 *
  7 * This type of flash translation layer (FTL) is used by the Embedded BIOS
  8 * by General Software. It is known as the Resident Flash Disk (RFD), see:
  9 *
 10 *	http://www.gensw.com/pages/prod/bios/rfd.htm
 11 *
 12 * based on ftl.c
 13 */
 14
 15#include <linux/hdreg.h>
 16#include <linux/init.h>
 17#include <linux/mtd/blktrans.h>
 18#include <linux/mtd/mtd.h>
 19#include <linux/vmalloc.h>
 20#include <linux/slab.h>
 21#include <linux/jiffies.h>
 22#include <linux/module.h>
 23
 24#include <asm/types.h>
 25
 26static int block_size = 0;
 27module_param(block_size, int, 0);
 28MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
 29
 30#define PREFIX "rfd_ftl: "
 31
 32/* This major has been assigned by device@lanana.org */
 33#ifndef RFD_FTL_MAJOR
 34#define RFD_FTL_MAJOR		256
 35#endif
 36
 37/* Maximum number of partitions in an FTL region */
 38#define PART_BITS		4
 39
 40/* An erase unit should start with this value */
 41#define RFD_MAGIC		0x9193
 42
 43/* the second value is 0xffff or 0xffc8; function unknown */
 44
 45/* the third value is always 0xffff, ignored */
 46
 47/* next is an array of mapping for each corresponding sector */
 48#define HEADER_MAP_OFFSET	3
 49#define SECTOR_DELETED		0x0000
 50#define SECTOR_ZERO		0xfffe
 51#define SECTOR_FREE		0xffff
 52
 53#define SECTOR_SIZE		512
 54
 55#define SECTORS_PER_TRACK	63
 56
 57struct block {
 58	enum {
 59		BLOCK_OK,
 60		BLOCK_ERASING,
 61		BLOCK_ERASED,
 62		BLOCK_UNUSED,
 63		BLOCK_FAILED
 64	} state;
 65	int free_sectors;
 66	int used_sectors;
 67	int erases;
 68	u_long offset;
 69};
 70
 71struct partition {
 72	struct mtd_blktrans_dev mbd;
 73
 74	u_int block_size;		/* size of erase unit */
 75	u_int total_blocks;		/* number of erase units */
 76	u_int header_sectors_per_block;	/* header sectors in erase unit */
 77	u_int data_sectors_per_block;	/* data sectors in erase unit */
 78	u_int sector_count;		/* sectors in translated disk */
 79	u_int header_size;		/* bytes in header sector */
 80	int reserved_block;		/* block next up for reclaim */
 81	int current_block;		/* block to write to */
 82	u16 *header_cache;		/* cached header */
 83
 84	int is_reclaiming;
 85	int cylinders;
 86	int errors;
 87	u_long *sector_map;
 88	struct block *blocks;
 89};
 90
 91static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
 92
 93static int build_block_map(struct partition *part, int block_no)
 94{
 95	struct block *block = &part->blocks[block_no];
 96	int i;
 97
 98	block->offset = part->block_size * block_no;
 99
100	if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
101		block->state = BLOCK_UNUSED;
102		return -ENOENT;
103	}
104
105	block->state = BLOCK_OK;
106
107	for (i=0; i<part->data_sectors_per_block; i++) {
108		u16 entry;
109
110		entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
111
112		if (entry == SECTOR_DELETED)
113			continue;
114
115		if (entry == SECTOR_FREE) {
116			block->free_sectors++;
117			continue;
118		}
119
120		if (entry == SECTOR_ZERO)
121			entry = 0;
122
123		if (entry >= part->sector_count) {
124			printk(KERN_WARNING PREFIX
125				"'%s': unit #%d: entry %d corrupt, "
126				"sector %d out of range\n",
127				part->mbd.mtd->name, block_no, i, entry);
128			continue;
129		}
130
131		if (part->sector_map[entry] != -1) {
132			printk(KERN_WARNING PREFIX
133				"'%s': more than one entry for sector %d\n",
134				part->mbd.mtd->name, entry);
135			part->errors = 1;
136			continue;
137		}
138
139		part->sector_map[entry] = block->offset +
140			(i + part->header_sectors_per_block) * SECTOR_SIZE;
141
142		block->used_sectors++;
143	}
144
145	if (block->free_sectors == part->data_sectors_per_block)
146		part->reserved_block = block_no;
147
148	return 0;
149}
150
151static int scan_header(struct partition *part)
152{
153	int sectors_per_block;
154	int i, rc = -ENOMEM;
155	int blocks_found;
156	size_t retlen;
157
158	sectors_per_block = part->block_size / SECTOR_SIZE;
159	part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
160
161	if (part->total_blocks < 2)
162		return -ENOENT;
163
164	/* each erase block has three bytes header, followed by the map */
165	part->header_sectors_per_block =
166			((HEADER_MAP_OFFSET + sectors_per_block) *
167			sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
168
169	part->data_sectors_per_block = sectors_per_block -
170			part->header_sectors_per_block;
171
172	part->header_size = (HEADER_MAP_OFFSET +
173			part->data_sectors_per_block) * sizeof(u16);
174
175	part->cylinders = (part->data_sectors_per_block *
176			(part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
177
178	part->sector_count = part->cylinders * SECTORS_PER_TRACK;
179
180	part->current_block = -1;
181	part->reserved_block = -1;
182	part->is_reclaiming = 0;
183
184	part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
185	if (!part->header_cache)
186		goto err;
187
188	part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
189			GFP_KERNEL);
190	if (!part->blocks)
191		goto err;
192
193	part->sector_map = vmalloc(array_size(sizeof(u_long),
194					      part->sector_count));
195	if (!part->sector_map)
 
196		goto err;
 
197
198	for (i=0; i<part->sector_count; i++)
199		part->sector_map[i] = -1;
200
201	for (i=0, blocks_found=0; i<part->total_blocks; i++) {
202		rc = mtd_read(part->mbd.mtd, i * part->block_size,
203			      part->header_size, &retlen,
204			      (u_char *)part->header_cache);
205
206		if (!rc && retlen != part->header_size)
207			rc = -EIO;
208
209		if (rc)
210			goto err;
211
212		if (!build_block_map(part, i))
213			blocks_found++;
214	}
215
216	if (blocks_found == 0) {
217		printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
218				part->mbd.mtd->name);
219		rc = -ENOENT;
220		goto err;
221	}
222
223	if (part->reserved_block == -1) {
224		printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
225				part->mbd.mtd->name);
226
227		part->errors = 1;
228	}
229
230	return 0;
231
232err:
233	vfree(part->sector_map);
234	kfree(part->header_cache);
235	kfree(part->blocks);
236
237	return rc;
238}
239
240static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
241{
242	struct partition *part = container_of(dev, struct partition, mbd);
243	u_long addr;
244	size_t retlen;
245	int rc;
246
247	if (sector >= part->sector_count)
248		return -EIO;
249
250	addr = part->sector_map[sector];
251	if (addr != -1) {
252		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
253			      (u_char *)buf);
254		if (!rc && retlen != SECTOR_SIZE)
255			rc = -EIO;
256
257		if (rc) {
258			printk(KERN_WARNING PREFIX "error reading '%s' at "
259				"0x%lx\n", part->mbd.mtd->name, addr);
260			return rc;
261		}
262	} else
263		memset(buf, 0, SECTOR_SIZE);
264
265	return 0;
266}
267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268static int erase_block(struct partition *part, int block)
269{
270	struct erase_info *erase;
271	int rc;
272
273	erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
274	if (!erase)
275		return -ENOMEM;
276
 
 
277	erase->addr = part->blocks[block].offset;
278	erase->len = part->block_size;
 
279
280	part->blocks[block].state = BLOCK_ERASING;
281	part->blocks[block].free_sectors = 0;
282
283	rc = mtd_erase(part->mbd.mtd, erase);
 
284	if (rc) {
285		printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
286				"failed\n", (unsigned long long)erase->addr,
287				(unsigned long long)erase->len, part->mbd.mtd->name);
288		part->blocks[block].state = BLOCK_FAILED;
289		part->blocks[block].free_sectors = 0;
290		part->blocks[block].used_sectors = 0;
291	} else {
292		u16 magic = cpu_to_le16(RFD_MAGIC);
293		size_t retlen;
294
295		part->blocks[block].state = BLOCK_ERASED;
296		part->blocks[block].free_sectors = part->data_sectors_per_block;
297		part->blocks[block].used_sectors = 0;
298		part->blocks[block].erases++;
299
300		rc = mtd_write(part->mbd.mtd, part->blocks[block].offset,
301			       sizeof(magic), &retlen, (u_char *)&magic);
302		if (!rc && retlen != sizeof(magic))
303			rc = -EIO;
304
305		if (rc) {
306			pr_err(PREFIX "'%s': unable to write RFD header at 0x%lx\n",
307			       part->mbd.mtd->name, part->blocks[block].offset);
308			part->blocks[block].state = BLOCK_FAILED;
309		} else {
310			part->blocks[block].state = BLOCK_OK;
311		}
312	}
313
314	kfree(erase);
315
316	return rc;
317}
318
319static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
320{
321	void *sector_data;
322	u16 *map;
323	size_t retlen;
324	int i, rc = -ENOMEM;
325
326	part->is_reclaiming = 1;
327
328	sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
329	if (!sector_data)
330		goto err3;
331
332	map = kmalloc(part->header_size, GFP_KERNEL);
333	if (!map)
334		goto err2;
335
336	rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
337		      part->header_size, &retlen, (u_char *)map);
 
338
339	if (!rc && retlen != part->header_size)
340		rc = -EIO;
341
342	if (rc) {
343		printk(KERN_ERR PREFIX "error reading '%s' at "
344			"0x%lx\n", part->mbd.mtd->name,
345			part->blocks[block_no].offset);
346
347		goto err;
348	}
349
350	for (i=0; i<part->data_sectors_per_block; i++) {
351		u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
352		u_long addr;
353
354
355		if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
356			continue;
357
358		if (entry == SECTOR_ZERO)
359			entry = 0;
360
361		/* already warned about and ignored in build_block_map() */
362		if (entry >= part->sector_count)
363			continue;
364
365		addr = part->blocks[block_no].offset +
366			(i + part->header_sectors_per_block) * SECTOR_SIZE;
367
368		if (*old_sector == addr) {
369			*old_sector = -1;
370			if (!part->blocks[block_no].used_sectors--) {
371				rc = erase_block(part, block_no);
372				break;
373			}
374			continue;
375		}
376		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
377			      sector_data);
378
379		if (!rc && retlen != SECTOR_SIZE)
380			rc = -EIO;
381
382		if (rc) {
383			printk(KERN_ERR PREFIX "'%s': Unable to "
384				"read sector for relocation\n",
385				part->mbd.mtd->name);
386
387			goto err;
388		}
389
390		rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
391				entry, sector_data);
392
393		if (rc)
394			goto err;
395	}
396
397err:
398	kfree(map);
399err2:
400	kfree(sector_data);
401err3:
402	part->is_reclaiming = 0;
403
404	return rc;
405}
406
407static int reclaim_block(struct partition *part, u_long *old_sector)
408{
409	int block, best_block, score, old_sector_block;
410	int rc;
411
412	/* we have a race if sync doesn't exist */
413	mtd_sync(part->mbd.mtd);
 
414
415	score = 0x7fffffff; /* MAX_INT */
416	best_block = -1;
417	if (*old_sector != -1)
418		old_sector_block = *old_sector / part->block_size;
419	else
420		old_sector_block = -1;
421
422	for (block=0; block<part->total_blocks; block++) {
423		int this_score;
424
425		if (block == part->reserved_block)
426			continue;
427
428		/*
429		 * Postpone reclaiming if there is a free sector as
430		 * more removed sectors is more efficient (have to move
431		 * less).
432		 */
433		if (part->blocks[block].free_sectors)
434			return 0;
435
436		this_score = part->blocks[block].used_sectors;
437
438		if (block == old_sector_block)
439			this_score--;
440		else {
441			/* no point in moving a full block */
442			if (part->blocks[block].used_sectors ==
443					part->data_sectors_per_block)
444				continue;
445		}
446
447		this_score += part->blocks[block].erases;
448
449		if (this_score < score) {
450			best_block = block;
451			score = this_score;
452		}
453	}
454
455	if (best_block == -1)
456		return -ENOSPC;
457
458	part->current_block = -1;
459	part->reserved_block = best_block;
460
461	pr_debug("reclaim_block: reclaiming block #%d with %d used "
462		 "%d free sectors\n", best_block,
463		 part->blocks[best_block].used_sectors,
464		 part->blocks[best_block].free_sectors);
465
466	if (part->blocks[best_block].used_sectors)
467		rc = move_block_contents(part, best_block, old_sector);
468	else
469		rc = erase_block(part, best_block);
470
471	return rc;
472}
473
474/*
475 * IMPROVE: It would be best to choose the block with the most deleted sectors,
476 * because if we fill that one up first it'll have the most chance of having
477 * the least live sectors at reclaim.
478 */
479static int find_free_block(struct partition *part)
480{
481	int block, stop;
482
483	block = part->current_block == -1 ?
484			jiffies % part->total_blocks : part->current_block;
485	stop = block;
486
487	do {
488		if (part->blocks[block].free_sectors &&
489				block != part->reserved_block)
490			return block;
491
492		if (part->blocks[block].state == BLOCK_UNUSED)
493			erase_block(part, block);
494
495		if (++block >= part->total_blocks)
496			block = 0;
497
498	} while (block != stop);
499
500	return -1;
501}
502
503static int find_writable_block(struct partition *part, u_long *old_sector)
504{
505	int rc, block;
506	size_t retlen;
507
508	block = find_free_block(part);
509
510	if (block == -1) {
511		if (!part->is_reclaiming) {
512			rc = reclaim_block(part, old_sector);
513			if (rc)
514				goto err;
515
516			block = find_free_block(part);
517		}
518
519		if (block == -1) {
520			rc = -ENOSPC;
521			goto err;
522		}
523	}
524
525	rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
526		      part->header_size, &retlen,
527		      (u_char *)part->header_cache);
528
529	if (!rc && retlen != part->header_size)
530		rc = -EIO;
531
532	if (rc) {
533		printk(KERN_ERR PREFIX "'%s': unable to read header at "
534				"0x%lx\n", part->mbd.mtd->name,
535				part->blocks[block].offset);
536		goto err;
537	}
538
539	part->current_block = block;
540
541err:
542	return rc;
543}
544
545static int mark_sector_deleted(struct partition *part, u_long old_addr)
546{
547	int block, offset, rc;
548	u_long addr;
549	size_t retlen;
550	u16 del = cpu_to_le16(SECTOR_DELETED);
551
552	block = old_addr / part->block_size;
553	offset = (old_addr % part->block_size) / SECTOR_SIZE -
554		part->header_sectors_per_block;
555
556	addr = part->blocks[block].offset +
557			(HEADER_MAP_OFFSET + offset) * sizeof(u16);
558	rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
559		       (u_char *)&del);
560
561	if (!rc && retlen != sizeof(del))
562		rc = -EIO;
563
564	if (rc) {
565		printk(KERN_ERR PREFIX "error writing '%s' at "
566			"0x%lx\n", part->mbd.mtd->name, addr);
567		goto err;
 
568	}
569	if (block == part->current_block)
570		part->header_cache[offset + HEADER_MAP_OFFSET] = del;
571
572	part->blocks[block].used_sectors--;
573
574	if (!part->blocks[block].used_sectors &&
575	    !part->blocks[block].free_sectors)
576		rc = erase_block(part, block);
577
578err:
579	return rc;
580}
581
582static int find_free_sector(const struct partition *part, const struct block *block)
583{
584	int i, stop;
585
586	i = stop = part->data_sectors_per_block - block->free_sectors;
587
588	do {
589		if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
590				== SECTOR_FREE)
591			return i;
592
593		if (++i == part->data_sectors_per_block)
594			i = 0;
595	}
596	while(i != stop);
597
598	return -1;
599}
600
601static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
602{
603	struct partition *part = container_of(dev, struct partition, mbd);
604	struct block *block;
605	u_long addr;
606	int i;
607	int rc;
608	size_t retlen;
609	u16 entry;
610
611	if (part->current_block == -1 ||
612		!part->blocks[part->current_block].free_sectors) {
613
614		rc = find_writable_block(part, old_addr);
615		if (rc)
616			goto err;
617	}
618
619	block = &part->blocks[part->current_block];
620
621	i = find_free_sector(part, block);
622
623	if (i < 0) {
624		rc = -ENOSPC;
625		goto err;
626	}
627
628	addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
629		block->offset;
630	rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
631		       (u_char *)buf);
632
633	if (!rc && retlen != SECTOR_SIZE)
634		rc = -EIO;
635
636	if (rc) {
637		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
638				part->mbd.mtd->name, addr);
639		goto err;
 
640	}
641
642	part->sector_map[sector] = addr;
643
644	entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
645
646	part->header_cache[i + HEADER_MAP_OFFSET] = entry;
647
648	addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
649	rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
650		       (u_char *)&entry);
651
652	if (!rc && retlen != sizeof(entry))
653		rc = -EIO;
654
655	if (rc) {
656		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
657				part->mbd.mtd->name, addr);
658		goto err;
 
659	}
660	block->used_sectors++;
661	block->free_sectors--;
662
663err:
664	return rc;
665}
666
667static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
668{
669	struct partition *part = container_of(dev, struct partition, mbd);
670	u_long old_addr;
671	int i;
672	int rc = 0;
673
674	pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
675
676	if (part->reserved_block == -1) {
677		rc = -EACCES;
678		goto err;
679	}
680
681	if (sector >= part->sector_count) {
682		rc = -EIO;
683		goto err;
684	}
685
686	old_addr = part->sector_map[sector];
687
688	for (i=0; i<SECTOR_SIZE; i++) {
689		if (!buf[i])
690			continue;
691
692		rc = do_writesect(dev, sector, buf, &old_addr);
693		if (rc)
694			goto err;
695		break;
696	}
697
698	if (i == SECTOR_SIZE)
699		part->sector_map[sector] = -1;
700
701	if (old_addr != -1)
702		rc = mark_sector_deleted(part, old_addr);
703
704err:
705	return rc;
706}
707
708static int rfd_ftl_discardsect(struct mtd_blktrans_dev *dev,
709			       unsigned long sector, unsigned int nr_sects)
710{
711	struct partition *part = container_of(dev, struct partition, mbd);
712	u_long addr;
713	int rc;
714
715	while (nr_sects) {
716		if (sector >= part->sector_count)
717			return -EIO;
718
719		addr = part->sector_map[sector];
720
721		if (addr != -1) {
722			rc = mark_sector_deleted(part, addr);
723			if (rc)
724				return rc;
725
726			part->sector_map[sector] = -1;
727		}
728
729		sector++;
730		nr_sects--;
731	}
732
733	return 0;
734}
735
736static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
737{
738	struct partition *part = container_of(dev, struct partition, mbd);
739
740	geo->heads = 1;
741	geo->sectors = SECTORS_PER_TRACK;
742	geo->cylinders = part->cylinders;
743
744	return 0;
745}
746
747static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
748{
749	struct partition *part;
750
751	if ((mtd->type != MTD_NORFLASH && mtd->type != MTD_RAM) ||
752	    mtd->size > UINT_MAX)
753		return;
754
755	part = kzalloc(sizeof(struct partition), GFP_KERNEL);
756	if (!part)
757		return;
758
759	part->mbd.mtd = mtd;
760
761	if (block_size)
762		part->block_size = block_size;
763	else {
764		if (!mtd->erasesize) {
765			printk(KERN_WARNING PREFIX "please provide block_size");
766			goto out;
767		} else
768			part->block_size = mtd->erasesize;
769	}
770
771	if (scan_header(part) == 0) {
772		part->mbd.size = part->sector_count;
773		part->mbd.tr = tr;
774		part->mbd.devnum = -1;
775		if (!(mtd->flags & MTD_WRITEABLE))
776			part->mbd.readonly = 1;
777		else if (part->errors) {
778			printk(KERN_WARNING PREFIX "'%s': errors found, "
779					"setting read-only\n", mtd->name);
780			part->mbd.readonly = 1;
781		}
782
783		printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
784				mtd->name, mtd->type, mtd->flags);
785
786		if (!add_mtd_blktrans_dev(&part->mbd))
787			return;
788	}
789out:
790	kfree(part);
791}
792
793static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
794{
795	struct partition *part = container_of(dev, struct partition, mbd);
796	int i;
797
798	for (i=0; i<part->total_blocks; i++) {
799		pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
800			part->mbd.mtd->name, i, part->blocks[i].erases);
801	}
802
 
803	vfree(part->sector_map);
804	kfree(part->header_cache);
805	kfree(part->blocks);
806	del_mtd_blktrans_dev(&part->mbd);
807}
808
809static struct mtd_blktrans_ops rfd_ftl_tr = {
810	.name		= "rfd",
811	.major		= RFD_FTL_MAJOR,
812	.part_bits	= PART_BITS,
813	.blksize 	= SECTOR_SIZE,
814
815	.readsect	= rfd_ftl_readsect,
816	.writesect	= rfd_ftl_writesect,
817	.discard	= rfd_ftl_discardsect,
818	.getgeo		= rfd_ftl_getgeo,
819	.add_mtd	= rfd_ftl_add_mtd,
820	.remove_dev	= rfd_ftl_remove_dev,
821	.owner		= THIS_MODULE,
822};
823
824module_mtd_blktrans(rfd_ftl_tr);
 
 
 
 
 
 
 
 
 
 
 
825
826MODULE_LICENSE("GPL");
827MODULE_AUTHOR("Sean Young <sean@mess.org>");
828MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
829		"used by General Software's Embedded BIOS");
830