Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Interface to Linux block layer for MTD 'translation layers'.
  4 *
  5 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/slab.h>
 10#include <linux/module.h>
 11#include <linux/list.h>
 12#include <linux/fs.h>
 13#include <linux/mtd/blktrans.h>
 14#include <linux/mtd/mtd.h>
 15#include <linux/blkdev.h>
 16#include <linux/blk-mq.h>
 17#include <linux/blkpg.h>
 18#include <linux/spinlock.h>
 19#include <linux/hdreg.h>
 20#include <linux/mutex.h>
 21#include <linux/uaccess.h>
 22
 23#include "mtdcore.h"
 24
 25static LIST_HEAD(blktrans_majors);
 26static DEFINE_MUTEX(blktrans_ref_mutex);
 27
 28static void blktrans_dev_release(struct kref *kref)
 29{
 30	struct mtd_blktrans_dev *dev =
 31		container_of(kref, struct mtd_blktrans_dev, ref);
 32
 33	dev->disk->private_data = NULL;
 34	blk_cleanup_queue(dev->rq);
 35	blk_mq_free_tag_set(dev->tag_set);
 36	kfree(dev->tag_set);
 37	put_disk(dev->disk);
 38	list_del(&dev->list);
 39	kfree(dev);
 40}
 41
 42static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
 43{
 44	struct mtd_blktrans_dev *dev;
 45
 46	mutex_lock(&blktrans_ref_mutex);
 47	dev = disk->private_data;
 48
 49	if (!dev)
 50		goto unlock;
 51	kref_get(&dev->ref);
 52unlock:
 53	mutex_unlock(&blktrans_ref_mutex);
 54	return dev;
 55}
 56
 57static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
 58{
 59	mutex_lock(&blktrans_ref_mutex);
 60	kref_put(&dev->ref, blktrans_dev_release);
 61	mutex_unlock(&blktrans_ref_mutex);
 62}
 63
 64
 65static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
 66			       struct mtd_blktrans_dev *dev,
 67			       struct request *req)
 68{
 69	unsigned long block, nsect;
 70	char *buf;
 71
 72	block = blk_rq_pos(req) << 9 >> tr->blkshift;
 73	nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
 74
 75	if (req_op(req) == REQ_OP_FLUSH) {
 76		if (tr->flush(dev))
 77			return BLK_STS_IOERR;
 78		return BLK_STS_OK;
 79	}
 80
 81	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
 82	    get_capacity(req->rq_disk))
 83		return BLK_STS_IOERR;
 84
 85	switch (req_op(req)) {
 86	case REQ_OP_DISCARD:
 87		if (tr->discard(dev, block, nsect))
 88			return BLK_STS_IOERR;
 89		return BLK_STS_OK;
 90	case REQ_OP_READ:
 91		buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
 92		for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
 93			if (tr->readsect(dev, block, buf)) {
 94				kunmap(bio_page(req->bio));
 95				return BLK_STS_IOERR;
 96			}
 97		}
 98		kunmap(bio_page(req->bio));
 99		rq_flush_dcache_pages(req);
100		return BLK_STS_OK;
101	case REQ_OP_WRITE:
102		if (!tr->writesect)
103			return BLK_STS_IOERR;
104
105		rq_flush_dcache_pages(req);
106		buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
107		for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
108			if (tr->writesect(dev, block, buf)) {
109				kunmap(bio_page(req->bio));
110				return BLK_STS_IOERR;
111			}
112		}
113		kunmap(bio_page(req->bio));
114		return BLK_STS_OK;
115	default:
116		return BLK_STS_IOERR;
 
117	}
118}
119
120int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
121{
122	return dev->bg_stop;
123}
124EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
125
126static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
127{
128	struct request *rq;
129
130	rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
131	if (rq) {
132		list_del_init(&rq->queuelist);
133		blk_mq_start_request(rq);
134		return rq;
135	}
136
137	return NULL;
138}
139
140static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
141	__releases(&dev->queue_lock)
142	__acquires(&dev->queue_lock)
143{
 
 
144	struct mtd_blktrans_ops *tr = dev->tr;
 
145	struct request *req = NULL;
146	int background_done = 0;
147
 
 
148	while (1) {
149		blk_status_t res;
150
151		dev->bg_stop = false;
152		if (!req && !(req = mtd_next_request(dev))) {
153			if (tr->background && !background_done) {
154				spin_unlock_irq(&dev->queue_lock);
155				mutex_lock(&dev->lock);
156				tr->background(dev);
157				mutex_unlock(&dev->lock);
158				spin_lock_irq(&dev->queue_lock);
159				/*
160				 * Do background processing just once per idle
161				 * period.
162				 */
163				background_done = !dev->bg_stop;
164				continue;
165			}
166			break;
167		}
168
169		spin_unlock_irq(&dev->queue_lock);
170
171		mutex_lock(&dev->lock);
172		res = do_blktrans_request(dev->tr, dev, req);
173		mutex_unlock(&dev->lock);
174
175		if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
176			__blk_mq_end_request(req, res);
 
177			req = NULL;
178		}
179
180		background_done = 0;
181		spin_lock_irq(&dev->queue_lock);
182	}
 
 
 
 
 
183}
184
185static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
186				 const struct blk_mq_queue_data *bd)
187{
188	struct mtd_blktrans_dev *dev;
 
189
190	dev = hctx->queue->queuedata;
191	if (!dev) {
192		blk_mq_start_request(bd->rq);
193		return BLK_STS_IOERR;
194	}
195
196	spin_lock_irq(&dev->queue_lock);
197	list_add_tail(&bd->rq->queuelist, &dev->rq_list);
198	mtd_blktrans_work(dev);
199	spin_unlock_irq(&dev->queue_lock);
200
201	return BLK_STS_OK;
 
 
 
 
202}
203
204static int blktrans_open(struct block_device *bdev, fmode_t mode)
205{
206	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
207	int ret = 0;
208
209	if (!dev)
210		return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
211
212	mutex_lock(&mtd_table_mutex);
213	mutex_lock(&dev->lock);
214
215	if (dev->open)
216		goto unlock;
217
218	kref_get(&dev->ref);
219	__module_get(dev->tr->owner);
220
221	if (!dev->mtd)
222		goto unlock;
223
224	if (dev->tr->open) {
225		ret = dev->tr->open(dev);
226		if (ret)
227			goto error_put;
228	}
229
230	ret = __get_mtd_device(dev->mtd);
231	if (ret)
232		goto error_release;
233	dev->file_mode = mode;
234
235unlock:
236	dev->open++;
237	mutex_unlock(&dev->lock);
238	mutex_unlock(&mtd_table_mutex);
239	blktrans_dev_put(dev);
240	return ret;
241
242error_release:
243	if (dev->tr->release)
244		dev->tr->release(dev);
245error_put:
246	module_put(dev->tr->owner);
247	kref_put(&dev->ref, blktrans_dev_release);
248	mutex_unlock(&dev->lock);
249	mutex_unlock(&mtd_table_mutex);
250	blktrans_dev_put(dev);
251	return ret;
252}
253
254static void blktrans_release(struct gendisk *disk, fmode_t mode)
255{
256	struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
257
258	if (!dev)
259		return;
260
261	mutex_lock(&mtd_table_mutex);
262	mutex_lock(&dev->lock);
263
264	if (--dev->open)
265		goto unlock;
266
267	kref_put(&dev->ref, blktrans_dev_release);
268	module_put(dev->tr->owner);
269
270	if (dev->mtd) {
271		if (dev->tr->release)
272			dev->tr->release(dev);
273		__put_mtd_device(dev->mtd);
274	}
275unlock:
276	mutex_unlock(&dev->lock);
277	mutex_unlock(&mtd_table_mutex);
278	blktrans_dev_put(dev);
279}
280
281static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
282{
283	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
284	int ret = -ENXIO;
285
286	if (!dev)
287		return ret;
288
289	mutex_lock(&dev->lock);
290
291	if (!dev->mtd)
292		goto unlock;
293
294	ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
295unlock:
296	mutex_unlock(&dev->lock);
297	blktrans_dev_put(dev);
298	return ret;
299}
300
301static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
302			      unsigned int cmd, unsigned long arg)
303{
304	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
305	int ret = -ENXIO;
306
307	if (!dev)
308		return ret;
309
310	mutex_lock(&dev->lock);
311
312	if (!dev->mtd)
313		goto unlock;
314
315	switch (cmd) {
316	case BLKFLSBUF:
317		ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
318		break;
319	default:
320		ret = -ENOTTY;
321	}
322unlock:
323	mutex_unlock(&dev->lock);
324	blktrans_dev_put(dev);
325	return ret;
326}
327
328static const struct block_device_operations mtd_block_ops = {
329	.owner		= THIS_MODULE,
330	.open		= blktrans_open,
331	.release	= blktrans_release,
332	.ioctl		= blktrans_ioctl,
333	.getgeo		= blktrans_getgeo,
334};
335
336static const struct blk_mq_ops mtd_mq_ops = {
337	.queue_rq	= mtd_queue_rq,
338};
339
340int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
341{
342	struct mtd_blktrans_ops *tr = new->tr;
343	struct mtd_blktrans_dev *d;
344	int last_devnum = -1;
345	struct gendisk *gd;
346	int ret;
347
348	if (mutex_trylock(&mtd_table_mutex)) {
349		mutex_unlock(&mtd_table_mutex);
350		BUG();
351	}
352
353	mutex_lock(&blktrans_ref_mutex);
354	list_for_each_entry(d, &tr->devs, list) {
355		if (new->devnum == -1) {
356			/* Use first free number */
357			if (d->devnum != last_devnum+1) {
358				/* Found a free devnum. Plug it in here */
359				new->devnum = last_devnum+1;
360				list_add_tail(&new->list, &d->list);
361				goto added;
362			}
363		} else if (d->devnum == new->devnum) {
364			/* Required number taken */
365			mutex_unlock(&blktrans_ref_mutex);
366			return -EBUSY;
367		} else if (d->devnum > new->devnum) {
368			/* Required number was free */
369			list_add_tail(&new->list, &d->list);
370			goto added;
371		}
372		last_devnum = d->devnum;
373	}
374
375	ret = -EBUSY;
376	if (new->devnum == -1)
377		new->devnum = last_devnum+1;
378
379	/* Check that the device and any partitions will get valid
380	 * minor numbers and that the disk naming code below can cope
381	 * with this number. */
382	if (new->devnum > (MINORMASK >> tr->part_bits) ||
383	    (tr->part_bits && new->devnum >= 27 * 26)) {
384		mutex_unlock(&blktrans_ref_mutex);
385		goto error1;
386	}
387
388	list_add_tail(&new->list, &tr->devs);
389 added:
390	mutex_unlock(&blktrans_ref_mutex);
391
392	mutex_init(&new->lock);
393	kref_init(&new->ref);
394	if (!tr->writesect)
395		new->readonly = 1;
396
397	/* Create gendisk */
398	ret = -ENOMEM;
399	gd = alloc_disk(1 << tr->part_bits);
400
401	if (!gd)
402		goto error2;
403
404	new->disk = gd;
405	gd->private_data = new;
406	gd->major = tr->major;
407	gd->first_minor = (new->devnum) << tr->part_bits;
408	gd->fops = &mtd_block_ops;
409
410	if (tr->part_bits)
411		if (new->devnum < 26)
412			snprintf(gd->disk_name, sizeof(gd->disk_name),
413				 "%s%c", tr->name, 'a' + new->devnum);
414		else
415			snprintf(gd->disk_name, sizeof(gd->disk_name),
416				 "%s%c%c", tr->name,
417				 'a' - 1 + new->devnum / 26,
418				 'a' + new->devnum % 26);
419	else
420		snprintf(gd->disk_name, sizeof(gd->disk_name),
421			 "%s%d", tr->name, new->devnum);
422
423	set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
424
425	/* Create the request queue */
426	spin_lock_init(&new->queue_lock);
427	INIT_LIST_HEAD(&new->rq_list);
428
429	new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
430	if (!new->tag_set)
431		goto error3;
432
433	new->rq = blk_mq_init_sq_queue(new->tag_set, &mtd_mq_ops, 2,
434				BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
435	if (IS_ERR(new->rq)) {
436		ret = PTR_ERR(new->rq);
437		new->rq = NULL;
438		goto error4;
439	}
440
441	if (tr->flush)
442		blk_queue_write_cache(new->rq, true, false);
443
444	new->rq->queuedata = new;
445	blk_queue_logical_block_size(new->rq, tr->blksize);
446
447	blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
448	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
449
450	if (tr->discard) {
451		blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
452		blk_queue_max_discard_sectors(new->rq, UINT_MAX);
453	}
454
455	gd->queue = new->rq;
456
 
 
 
 
 
 
 
 
 
457	if (new->readonly)
458		set_disk_ro(gd, 1);
459
460	device_add_disk(&new->mtd->dev, gd, NULL);
461
462	if (new->disk_attributes) {
463		ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
464					new->disk_attributes);
465		WARN_ON(ret);
466	}
467	return 0;
468error4:
469	kfree(new->tag_set);
470error3:
471	put_disk(new->disk);
472error2:
473	list_del(&new->list);
474error1:
475	return ret;
476}
477
478int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
479{
480	unsigned long flags;
481
482	if (mutex_trylock(&mtd_table_mutex)) {
483		mutex_unlock(&mtd_table_mutex);
484		BUG();
485	}
486
487	if (old->disk_attributes)
488		sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
489						old->disk_attributes);
490
491	/* Stop new requests to arrive */
492	del_gendisk(old->disk);
493
 
 
 
494	/* Kill current requests */
495	spin_lock_irqsave(&old->queue_lock, flags);
496	old->rq->queuedata = NULL;
 
497	spin_unlock_irqrestore(&old->queue_lock, flags);
498
499	/* freeze+quiesce queue to ensure all requests are flushed */
500	blk_mq_freeze_queue(old->rq);
501	blk_mq_quiesce_queue(old->rq);
502	blk_mq_unquiesce_queue(old->rq);
503	blk_mq_unfreeze_queue(old->rq);
504
505	/* If the device is currently open, tell trans driver to close it,
506		then put mtd device, and don't touch it again */
507	mutex_lock(&old->lock);
508	if (old->open) {
509		if (old->tr->release)
510			old->tr->release(old);
511		__put_mtd_device(old->mtd);
512	}
513
514	old->mtd = NULL;
515
516	mutex_unlock(&old->lock);
517	blktrans_dev_put(old);
518	return 0;
519}
520
521static void blktrans_notify_remove(struct mtd_info *mtd)
522{
523	struct mtd_blktrans_ops *tr;
524	struct mtd_blktrans_dev *dev, *next;
525
526	list_for_each_entry(tr, &blktrans_majors, list)
527		list_for_each_entry_safe(dev, next, &tr->devs, list)
528			if (dev->mtd == mtd)
529				tr->remove_dev(dev);
530}
531
532static void blktrans_notify_add(struct mtd_info *mtd)
533{
534	struct mtd_blktrans_ops *tr;
535
536	if (mtd->type == MTD_ABSENT)
537		return;
538
539	list_for_each_entry(tr, &blktrans_majors, list)
540		tr->add_mtd(tr, mtd);
541}
542
543static struct mtd_notifier blktrans_notifier = {
544	.add = blktrans_notify_add,
545	.remove = blktrans_notify_remove,
546};
547
548int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
549{
550	struct mtd_info *mtd;
551	int ret;
552
553	/* Register the notifier if/when the first device type is
554	   registered, to prevent the link/init ordering from fucking
555	   us over. */
556	if (!blktrans_notifier.list.next)
557		register_mtd_user(&blktrans_notifier);
558
559
560	mutex_lock(&mtd_table_mutex);
561
562	ret = register_blkdev(tr->major, tr->name);
563	if (ret < 0) {
564		printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
565		       tr->name, tr->major, ret);
566		mutex_unlock(&mtd_table_mutex);
567		return ret;
568	}
569
570	if (ret)
571		tr->major = ret;
572
573	tr->blkshift = ffs(tr->blksize) - 1;
574
575	INIT_LIST_HEAD(&tr->devs);
576	list_add(&tr->list, &blktrans_majors);
577
578	mtd_for_each_device(mtd)
579		if (mtd->type != MTD_ABSENT)
580			tr->add_mtd(tr, mtd);
581
582	mutex_unlock(&mtd_table_mutex);
583	return 0;
584}
585
586int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
587{
588	struct mtd_blktrans_dev *dev, *next;
589
590	mutex_lock(&mtd_table_mutex);
591
592	/* Remove it from the list of active majors */
593	list_del(&tr->list);
594
595	list_for_each_entry_safe(dev, next, &tr->devs, list)
596		tr->remove_dev(dev);
597
598	unregister_blkdev(tr->major, tr->name);
599	mutex_unlock(&mtd_table_mutex);
600
601	BUG_ON(!list_empty(&tr->devs));
602	return 0;
603}
604
605static void __exit mtd_blktrans_exit(void)
606{
607	/* No race here -- if someone's currently in register_mtd_blktrans
608	   we're screwed anyway. */
609	if (blktrans_notifier.list.next)
610		unregister_mtd_user(&blktrans_notifier);
611}
612
613module_exit(mtd_blktrans_exit);
614
615EXPORT_SYMBOL_GPL(register_mtd_blktrans);
616EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
617EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
618EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
619
620MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
621MODULE_LICENSE("GPL");
622MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
v3.15
 
  1/*
  2 * Interface to Linux block layer for MTD 'translation layers'.
  3 *
  4 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or
  9 * (at your option) any later version.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program; if not, write to the Free Software
 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 19 *
 20 */
 21
 22#include <linux/kernel.h>
 23#include <linux/slab.h>
 24#include <linux/module.h>
 25#include <linux/list.h>
 26#include <linux/fs.h>
 27#include <linux/mtd/blktrans.h>
 28#include <linux/mtd/mtd.h>
 29#include <linux/blkdev.h>
 
 30#include <linux/blkpg.h>
 31#include <linux/spinlock.h>
 32#include <linux/hdreg.h>
 33#include <linux/mutex.h>
 34#include <asm/uaccess.h>
 35
 36#include "mtdcore.h"
 37
 38static LIST_HEAD(blktrans_majors);
 39static DEFINE_MUTEX(blktrans_ref_mutex);
 40
 41static void blktrans_dev_release(struct kref *kref)
 42{
 43	struct mtd_blktrans_dev *dev =
 44		container_of(kref, struct mtd_blktrans_dev, ref);
 45
 46	dev->disk->private_data = NULL;
 47	blk_cleanup_queue(dev->rq);
 
 
 48	put_disk(dev->disk);
 49	list_del(&dev->list);
 50	kfree(dev);
 51}
 52
 53static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
 54{
 55	struct mtd_blktrans_dev *dev;
 56
 57	mutex_lock(&blktrans_ref_mutex);
 58	dev = disk->private_data;
 59
 60	if (!dev)
 61		goto unlock;
 62	kref_get(&dev->ref);
 63unlock:
 64	mutex_unlock(&blktrans_ref_mutex);
 65	return dev;
 66}
 67
 68static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
 69{
 70	mutex_lock(&blktrans_ref_mutex);
 71	kref_put(&dev->ref, blktrans_dev_release);
 72	mutex_unlock(&blktrans_ref_mutex);
 73}
 74
 75
 76static int do_blktrans_request(struct mtd_blktrans_ops *tr,
 77			       struct mtd_blktrans_dev *dev,
 78			       struct request *req)
 79{
 80	unsigned long block, nsect;
 81	char *buf;
 82
 83	block = blk_rq_pos(req) << 9 >> tr->blkshift;
 84	nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
 85
 86	buf = req->buffer;
 87
 88	if (req->cmd_type != REQ_TYPE_FS)
 89		return -EIO;
 
 90
 91	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
 92	    get_capacity(req->rq_disk))
 93		return -EIO;
 94
 95	if (req->cmd_flags & REQ_DISCARD)
 96		return tr->discard(dev, block, nsect);
 97
 98	switch(rq_data_dir(req)) {
 99	case READ:
100		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
101			if (tr->readsect(dev, block, buf))
102				return -EIO;
 
 
 
 
 
 
103		rq_flush_dcache_pages(req);
104		return 0;
105	case WRITE:
106		if (!tr->writesect)
107			return -EIO;
108
109		rq_flush_dcache_pages(req);
110		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
111			if (tr->writesect(dev, block, buf))
112				return -EIO;
113		return 0;
 
 
 
 
 
114	default:
115		printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
116		return -EIO;
117	}
118}
119
120int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
121{
122	return dev->bg_stop;
123}
124EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
125
126static void mtd_blktrans_work(struct work_struct *work)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127{
128	struct mtd_blktrans_dev *dev =
129		container_of(work, struct mtd_blktrans_dev, work);
130	struct mtd_blktrans_ops *tr = dev->tr;
131	struct request_queue *rq = dev->rq;
132	struct request *req = NULL;
133	int background_done = 0;
134
135	spin_lock_irq(rq->queue_lock);
136
137	while (1) {
138		int res;
139
140		dev->bg_stop = false;
141		if (!req && !(req = blk_fetch_request(rq))) {
142			if (tr->background && !background_done) {
143				spin_unlock_irq(rq->queue_lock);
144				mutex_lock(&dev->lock);
145				tr->background(dev);
146				mutex_unlock(&dev->lock);
147				spin_lock_irq(rq->queue_lock);
148				/*
149				 * Do background processing just once per idle
150				 * period.
151				 */
152				background_done = !dev->bg_stop;
153				continue;
154			}
155			break;
156		}
157
158		spin_unlock_irq(rq->queue_lock);
159
160		mutex_lock(&dev->lock);
161		res = do_blktrans_request(dev->tr, dev, req);
162		mutex_unlock(&dev->lock);
163
164		spin_lock_irq(rq->queue_lock);
165
166		if (!__blk_end_request_cur(req, res))
167			req = NULL;
 
168
169		background_done = 0;
 
170	}
171
172	if (req)
173		__blk_end_request_all(req, -EIO);
174
175	spin_unlock_irq(rq->queue_lock);
176}
177
178static void mtd_blktrans_request(struct request_queue *rq)
 
179{
180	struct mtd_blktrans_dev *dev;
181	struct request *req = NULL;
182
183	dev = rq->queuedata;
 
 
 
 
 
 
 
 
 
184
185	if (!dev)
186		while ((req = blk_fetch_request(rq)) != NULL)
187			__blk_end_request_all(req, -ENODEV);
188	else
189		queue_work(dev->wq, &dev->work);
190}
191
192static int blktrans_open(struct block_device *bdev, fmode_t mode)
193{
194	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
195	int ret = 0;
196
197	if (!dev)
198		return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
199
 
200	mutex_lock(&dev->lock);
201
202	if (dev->open)
203		goto unlock;
204
205	kref_get(&dev->ref);
206	__module_get(dev->tr->owner);
207
208	if (!dev->mtd)
209		goto unlock;
210
211	if (dev->tr->open) {
212		ret = dev->tr->open(dev);
213		if (ret)
214			goto error_put;
215	}
216
217	ret = __get_mtd_device(dev->mtd);
218	if (ret)
219		goto error_release;
220	dev->file_mode = mode;
221
222unlock:
223	dev->open++;
224	mutex_unlock(&dev->lock);
 
225	blktrans_dev_put(dev);
226	return ret;
227
228error_release:
229	if (dev->tr->release)
230		dev->tr->release(dev);
231error_put:
232	module_put(dev->tr->owner);
233	kref_put(&dev->ref, blktrans_dev_release);
234	mutex_unlock(&dev->lock);
 
235	blktrans_dev_put(dev);
236	return ret;
237}
238
239static void blktrans_release(struct gendisk *disk, fmode_t mode)
240{
241	struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
242
243	if (!dev)
244		return;
245
 
246	mutex_lock(&dev->lock);
247
248	if (--dev->open)
249		goto unlock;
250
251	kref_put(&dev->ref, blktrans_dev_release);
252	module_put(dev->tr->owner);
253
254	if (dev->mtd) {
255		if (dev->tr->release)
256			dev->tr->release(dev);
257		__put_mtd_device(dev->mtd);
258	}
259unlock:
260	mutex_unlock(&dev->lock);
 
261	blktrans_dev_put(dev);
262}
263
264static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
265{
266	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
267	int ret = -ENXIO;
268
269	if (!dev)
270		return ret;
271
272	mutex_lock(&dev->lock);
273
274	if (!dev->mtd)
275		goto unlock;
276
277	ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
278unlock:
279	mutex_unlock(&dev->lock);
280	blktrans_dev_put(dev);
281	return ret;
282}
283
284static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
285			      unsigned int cmd, unsigned long arg)
286{
287	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
288	int ret = -ENXIO;
289
290	if (!dev)
291		return ret;
292
293	mutex_lock(&dev->lock);
294
295	if (!dev->mtd)
296		goto unlock;
297
298	switch (cmd) {
299	case BLKFLSBUF:
300		ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
301		break;
302	default:
303		ret = -ENOTTY;
304	}
305unlock:
306	mutex_unlock(&dev->lock);
307	blktrans_dev_put(dev);
308	return ret;
309}
310
311static const struct block_device_operations mtd_block_ops = {
312	.owner		= THIS_MODULE,
313	.open		= blktrans_open,
314	.release	= blktrans_release,
315	.ioctl		= blktrans_ioctl,
316	.getgeo		= blktrans_getgeo,
317};
318
 
 
 
 
319int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
320{
321	struct mtd_blktrans_ops *tr = new->tr;
322	struct mtd_blktrans_dev *d;
323	int last_devnum = -1;
324	struct gendisk *gd;
325	int ret;
326
327	if (mutex_trylock(&mtd_table_mutex)) {
328		mutex_unlock(&mtd_table_mutex);
329		BUG();
330	}
331
332	mutex_lock(&blktrans_ref_mutex);
333	list_for_each_entry(d, &tr->devs, list) {
334		if (new->devnum == -1) {
335			/* Use first free number */
336			if (d->devnum != last_devnum+1) {
337				/* Found a free devnum. Plug it in here */
338				new->devnum = last_devnum+1;
339				list_add_tail(&new->list, &d->list);
340				goto added;
341			}
342		} else if (d->devnum == new->devnum) {
343			/* Required number taken */
344			mutex_unlock(&blktrans_ref_mutex);
345			return -EBUSY;
346		} else if (d->devnum > new->devnum) {
347			/* Required number was free */
348			list_add_tail(&new->list, &d->list);
349			goto added;
350		}
351		last_devnum = d->devnum;
352	}
353
354	ret = -EBUSY;
355	if (new->devnum == -1)
356		new->devnum = last_devnum+1;
357
358	/* Check that the device and any partitions will get valid
359	 * minor numbers and that the disk naming code below can cope
360	 * with this number. */
361	if (new->devnum > (MINORMASK >> tr->part_bits) ||
362	    (tr->part_bits && new->devnum >= 27 * 26)) {
363		mutex_unlock(&blktrans_ref_mutex);
364		goto error1;
365	}
366
367	list_add_tail(&new->list, &tr->devs);
368 added:
369	mutex_unlock(&blktrans_ref_mutex);
370
371	mutex_init(&new->lock);
372	kref_init(&new->ref);
373	if (!tr->writesect)
374		new->readonly = 1;
375
376	/* Create gendisk */
377	ret = -ENOMEM;
378	gd = alloc_disk(1 << tr->part_bits);
379
380	if (!gd)
381		goto error2;
382
383	new->disk = gd;
384	gd->private_data = new;
385	gd->major = tr->major;
386	gd->first_minor = (new->devnum) << tr->part_bits;
387	gd->fops = &mtd_block_ops;
388
389	if (tr->part_bits)
390		if (new->devnum < 26)
391			snprintf(gd->disk_name, sizeof(gd->disk_name),
392				 "%s%c", tr->name, 'a' + new->devnum);
393		else
394			snprintf(gd->disk_name, sizeof(gd->disk_name),
395				 "%s%c%c", tr->name,
396				 'a' - 1 + new->devnum / 26,
397				 'a' + new->devnum % 26);
398	else
399		snprintf(gd->disk_name, sizeof(gd->disk_name),
400			 "%s%d", tr->name, new->devnum);
401
402	set_capacity(gd, (new->size * tr->blksize) >> 9);
403
404	/* Create the request queue */
405	spin_lock_init(&new->queue_lock);
406	new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
407
408	if (!new->rq)
 
409		goto error3;
410
 
 
 
 
 
 
 
 
 
 
 
411	new->rq->queuedata = new;
412	blk_queue_logical_block_size(new->rq, tr->blksize);
413
414	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
 
415
416	if (tr->discard) {
417		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
418		new->rq->limits.max_discard_sectors = UINT_MAX;
419	}
420
421	gd->queue = new->rq;
422
423	/* Create processing workqueue */
424	new->wq = alloc_workqueue("%s%d", 0, 0,
425				  tr->name, new->mtd->index);
426	if (!new->wq)
427		goto error4;
428	INIT_WORK(&new->work, mtd_blktrans_work);
429
430	gd->driverfs_dev = &new->mtd->dev;
431
432	if (new->readonly)
433		set_disk_ro(gd, 1);
434
435	add_disk(gd);
436
437	if (new->disk_attributes) {
438		ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
439					new->disk_attributes);
440		WARN_ON(ret);
441	}
442	return 0;
443error4:
444	blk_cleanup_queue(new->rq);
445error3:
446	put_disk(new->disk);
447error2:
448	list_del(&new->list);
449error1:
450	return ret;
451}
452
453int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
454{
455	unsigned long flags;
456
457	if (mutex_trylock(&mtd_table_mutex)) {
458		mutex_unlock(&mtd_table_mutex);
459		BUG();
460	}
461
462	if (old->disk_attributes)
463		sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
464						old->disk_attributes);
465
466	/* Stop new requests to arrive */
467	del_gendisk(old->disk);
468
469	/* Stop workqueue. This will perform any pending request. */
470	destroy_workqueue(old->wq);
471
472	/* Kill current requests */
473	spin_lock_irqsave(&old->queue_lock, flags);
474	old->rq->queuedata = NULL;
475	blk_start_queue(old->rq);
476	spin_unlock_irqrestore(&old->queue_lock, flags);
 
 
 
 
 
 
477
478	/* If the device is currently open, tell trans driver to close it,
479		then put mtd device, and don't touch it again */
480	mutex_lock(&old->lock);
481	if (old->open) {
482		if (old->tr->release)
483			old->tr->release(old);
484		__put_mtd_device(old->mtd);
485	}
486
487	old->mtd = NULL;
488
489	mutex_unlock(&old->lock);
490	blktrans_dev_put(old);
491	return 0;
492}
493
494static void blktrans_notify_remove(struct mtd_info *mtd)
495{
496	struct mtd_blktrans_ops *tr;
497	struct mtd_blktrans_dev *dev, *next;
498
499	list_for_each_entry(tr, &blktrans_majors, list)
500		list_for_each_entry_safe(dev, next, &tr->devs, list)
501			if (dev->mtd == mtd)
502				tr->remove_dev(dev);
503}
504
505static void blktrans_notify_add(struct mtd_info *mtd)
506{
507	struct mtd_blktrans_ops *tr;
508
509	if (mtd->type == MTD_ABSENT)
510		return;
511
512	list_for_each_entry(tr, &blktrans_majors, list)
513		tr->add_mtd(tr, mtd);
514}
515
516static struct mtd_notifier blktrans_notifier = {
517	.add = blktrans_notify_add,
518	.remove = blktrans_notify_remove,
519};
520
521int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
522{
523	struct mtd_info *mtd;
524	int ret;
525
526	/* Register the notifier if/when the first device type is
527	   registered, to prevent the link/init ordering from fucking
528	   us over. */
529	if (!blktrans_notifier.list.next)
530		register_mtd_user(&blktrans_notifier);
531
532
533	mutex_lock(&mtd_table_mutex);
534
535	ret = register_blkdev(tr->major, tr->name);
536	if (ret < 0) {
537		printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
538		       tr->name, tr->major, ret);
539		mutex_unlock(&mtd_table_mutex);
540		return ret;
541	}
542
543	if (ret)
544		tr->major = ret;
545
546	tr->blkshift = ffs(tr->blksize) - 1;
547
548	INIT_LIST_HEAD(&tr->devs);
549	list_add(&tr->list, &blktrans_majors);
550
551	mtd_for_each_device(mtd)
552		if (mtd->type != MTD_ABSENT)
553			tr->add_mtd(tr, mtd);
554
555	mutex_unlock(&mtd_table_mutex);
556	return 0;
557}
558
559int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
560{
561	struct mtd_blktrans_dev *dev, *next;
562
563	mutex_lock(&mtd_table_mutex);
564
565	/* Remove it from the list of active majors */
566	list_del(&tr->list);
567
568	list_for_each_entry_safe(dev, next, &tr->devs, list)
569		tr->remove_dev(dev);
570
571	unregister_blkdev(tr->major, tr->name);
572	mutex_unlock(&mtd_table_mutex);
573
574	BUG_ON(!list_empty(&tr->devs));
575	return 0;
576}
577
578static void __exit mtd_blktrans_exit(void)
579{
580	/* No race here -- if someone's currently in register_mtd_blktrans
581	   we're screwed anyway. */
582	if (blktrans_notifier.list.next)
583		unregister_mtd_user(&blktrans_notifier);
584}
585
586module_exit(mtd_blktrans_exit);
587
588EXPORT_SYMBOL_GPL(register_mtd_blktrans);
589EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
590EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
591EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
592
593MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
594MODULE_LICENSE("GPL");
595MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");