Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2018 Red Hat, Inc.
  4 *
  5 * This is a test "dust" device, which fails reads on specified
  6 * sectors, emulating the behavior of a hard disk drive sending
  7 * a "Read Medium Error" sense.
  8 *
  9 */
 10
 11#include <linux/device-mapper.h>
 12#include <linux/module.h>
 13#include <linux/rbtree.h>
 14
 15#define DM_MSG_PREFIX "dust"
 16
 17struct badblock {
 18	struct rb_node node;
 19	sector_t bb;
 20	unsigned char wr_fail_cnt;
 21};
 22
 23struct dust_device {
 24	struct dm_dev *dev;
 25	struct rb_root badblocklist;
 26	unsigned long long badblock_count;
 27	spinlock_t dust_lock;
 28	unsigned int blksz;
 29	int sect_per_block_shift;
 30	unsigned int sect_per_block;
 31	sector_t start;
 32	bool fail_read_on_bb:1;
 33	bool quiet_mode:1;
 34};
 35
 36static struct badblock *dust_rb_search(struct rb_root *root, sector_t blk)
 37{
 38	struct rb_node *node = root->rb_node;
 39
 40	while (node) {
 41		struct badblock *bblk = rb_entry(node, struct badblock, node);
 42
 43		if (bblk->bb > blk)
 44			node = node->rb_left;
 45		else if (bblk->bb < blk)
 46			node = node->rb_right;
 47		else
 48			return bblk;
 49	}
 50
 51	return NULL;
 52}
 53
 54static bool dust_rb_insert(struct rb_root *root, struct badblock *new)
 55{
 56	struct badblock *bblk;
 57	struct rb_node **link = &root->rb_node, *parent = NULL;
 58	sector_t value = new->bb;
 59
 60	while (*link) {
 61		parent = *link;
 62		bblk = rb_entry(parent, struct badblock, node);
 63
 64		if (bblk->bb > value)
 65			link = &(*link)->rb_left;
 66		else if (bblk->bb < value)
 67			link = &(*link)->rb_right;
 68		else
 69			return false;
 70	}
 71
 72	rb_link_node(&new->node, parent, link);
 73	rb_insert_color(&new->node, root);
 74
 75	return true;
 76}
 77
 78static int dust_remove_block(struct dust_device *dd, unsigned long long block)
 79{
 80	struct badblock *bblock;
 81	unsigned long flags;
 82
 83	spin_lock_irqsave(&dd->dust_lock, flags);
 84	bblock = dust_rb_search(&dd->badblocklist, block);
 85
 86	if (bblock == NULL) {
 87		if (!dd->quiet_mode) {
 88			DMERR("%s: block %llu not found in badblocklist",
 89			      __func__, block);
 90		}
 91		spin_unlock_irqrestore(&dd->dust_lock, flags);
 92		return -EINVAL;
 93	}
 94
 95	rb_erase(&bblock->node, &dd->badblocklist);
 96	dd->badblock_count--;
 97	if (!dd->quiet_mode)
 98		DMINFO("%s: badblock removed at block %llu", __func__, block);
 99	kfree(bblock);
100	spin_unlock_irqrestore(&dd->dust_lock, flags);
101
102	return 0;
103}
104
105static int dust_add_block(struct dust_device *dd, unsigned long long block,
106			  unsigned char wr_fail_cnt)
107{
108	struct badblock *bblock;
109	unsigned long flags;
110
111	bblock = kmalloc(sizeof(*bblock), GFP_KERNEL);
112	if (bblock == NULL) {
113		if (!dd->quiet_mode)
114			DMERR("%s: badblock allocation failed", __func__);
115		return -ENOMEM;
116	}
117
118	spin_lock_irqsave(&dd->dust_lock, flags);
119	bblock->bb = block;
120	bblock->wr_fail_cnt = wr_fail_cnt;
121	if (!dust_rb_insert(&dd->badblocklist, bblock)) {
122		if (!dd->quiet_mode) {
123			DMERR("%s: block %llu already in badblocklist",
124			      __func__, block);
125		}
126		spin_unlock_irqrestore(&dd->dust_lock, flags);
127		kfree(bblock);
128		return -EINVAL;
129	}
130
131	dd->badblock_count++;
132	if (!dd->quiet_mode) {
133		DMINFO("%s: badblock added at block %llu with write fail count %u",
134		       __func__, block, wr_fail_cnt);
135	}
136	spin_unlock_irqrestore(&dd->dust_lock, flags);
137
138	return 0;
139}
140
141static int dust_query_block(struct dust_device *dd, unsigned long long block, char *result,
142			    unsigned int maxlen, unsigned int *sz_ptr)
143{
144	struct badblock *bblock;
145	unsigned long flags;
146	unsigned int sz = *sz_ptr;
147
148	spin_lock_irqsave(&dd->dust_lock, flags);
149	bblock = dust_rb_search(&dd->badblocklist, block);
150	if (bblock != NULL)
151		DMEMIT("%s: block %llu found in badblocklist", __func__, block);
152	else
153		DMEMIT("%s: block %llu not found in badblocklist", __func__, block);
154	spin_unlock_irqrestore(&dd->dust_lock, flags);
155
156	return 1;
157}
158
159static int __dust_map_read(struct dust_device *dd, sector_t thisblock)
160{
161	struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
162
163	if (bblk)
164		return DM_MAPIO_KILL;
165
166	return DM_MAPIO_REMAPPED;
167}
168
169static int dust_map_read(struct dust_device *dd, sector_t thisblock,
170			 bool fail_read_on_bb)
171{
172	unsigned long flags;
173	int r = DM_MAPIO_REMAPPED;
174
175	if (fail_read_on_bb) {
176		thisblock >>= dd->sect_per_block_shift;
177		spin_lock_irqsave(&dd->dust_lock, flags);
178		r = __dust_map_read(dd, thisblock);
179		spin_unlock_irqrestore(&dd->dust_lock, flags);
180	}
181
182	return r;
183}
184
185static int __dust_map_write(struct dust_device *dd, sector_t thisblock)
186{
187	struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
188
189	if (bblk && bblk->wr_fail_cnt > 0) {
190		bblk->wr_fail_cnt--;
191		return DM_MAPIO_KILL;
192	}
193
194	if (bblk) {
195		rb_erase(&bblk->node, &dd->badblocklist);
196		dd->badblock_count--;
197		kfree(bblk);
198		if (!dd->quiet_mode) {
199			sector_div(thisblock, dd->sect_per_block);
200			DMINFO("block %llu removed from badblocklist by write",
201			       (unsigned long long)thisblock);
202		}
203	}
204
205	return DM_MAPIO_REMAPPED;
206}
207
208static int dust_map_write(struct dust_device *dd, sector_t thisblock,
209			  bool fail_read_on_bb)
210{
211	unsigned long flags;
212	int r = DM_MAPIO_REMAPPED;
213
214	if (fail_read_on_bb) {
215		thisblock >>= dd->sect_per_block_shift;
216		spin_lock_irqsave(&dd->dust_lock, flags);
217		r = __dust_map_write(dd, thisblock);
218		spin_unlock_irqrestore(&dd->dust_lock, flags);
219	}
220
221	return r;
222}
223
224static int dust_map(struct dm_target *ti, struct bio *bio)
225{
226	struct dust_device *dd = ti->private;
227	int r;
228
229	bio_set_dev(bio, dd->dev->bdev);
230	bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
231
232	if (bio_data_dir(bio) == READ)
233		r = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
234	else
235		r = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
236
237	return r;
238}
239
240static bool __dust_clear_badblocks(struct rb_root *tree,
241				   unsigned long long count)
242{
243	struct rb_node *node = NULL, *nnode = NULL;
244
245	nnode = rb_first(tree);
246	if (nnode == NULL) {
247		BUG_ON(count != 0);
248		return false;
249	}
250
251	while (nnode) {
252		node = nnode;
253		nnode = rb_next(node);
254		rb_erase(node, tree);
255		count--;
256		kfree(node);
257	}
258	BUG_ON(count != 0);
259	BUG_ON(tree->rb_node != NULL);
260
261	return true;
262}
263
264static int dust_clear_badblocks(struct dust_device *dd, char *result, unsigned int maxlen,
265				unsigned int *sz_ptr)
266{
267	unsigned long flags;
268	struct rb_root badblocklist;
269	unsigned long long badblock_count;
270	unsigned int sz = *sz_ptr;
271
272	spin_lock_irqsave(&dd->dust_lock, flags);
273	badblocklist = dd->badblocklist;
274	badblock_count = dd->badblock_count;
275	dd->badblocklist = RB_ROOT;
276	dd->badblock_count = 0;
277	spin_unlock_irqrestore(&dd->dust_lock, flags);
278
279	if (!__dust_clear_badblocks(&badblocklist, badblock_count))
280		DMEMIT("%s: no badblocks found", __func__);
281	else
282		DMEMIT("%s: badblocks cleared", __func__);
283
284	return 1;
285}
286
287static int dust_list_badblocks(struct dust_device *dd, char *result, unsigned int maxlen,
288				unsigned int *sz_ptr)
289{
290	unsigned long flags;
291	struct rb_root badblocklist;
292	struct rb_node *node;
293	struct badblock *bblk;
294	unsigned int sz = *sz_ptr;
295	unsigned long long num = 0;
296
297	spin_lock_irqsave(&dd->dust_lock, flags);
298	badblocklist = dd->badblocklist;
299	for (node = rb_first(&badblocklist); node; node = rb_next(node)) {
300		bblk = rb_entry(node, struct badblock, node);
301		DMEMIT("%llu\n", bblk->bb);
302		num++;
303	}
304
305	spin_unlock_irqrestore(&dd->dust_lock, flags);
306	if (!num)
307		DMEMIT("No blocks in badblocklist");
308
309	return 1;
310}
311
312/*
313 * Target parameters:
314 *
315 * <device_path> <offset> <blksz>
316 *
317 * device_path: path to the block device
318 * offset: offset to data area from start of device_path
319 * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2)
320 */
321static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
322{
323	struct dust_device *dd;
324	unsigned long long tmp;
325	char dummy;
326	unsigned int blksz;
327	unsigned int sect_per_block;
328	sector_t DUST_MAX_BLKSZ_SECTORS = 2097152;
329	sector_t max_block_sectors = min(ti->len, DUST_MAX_BLKSZ_SECTORS);
330
331	if (argc != 3) {
332		ti->error = "Invalid argument count";
333		return -EINVAL;
334	}
335
336	if (kstrtouint(argv[2], 10, &blksz) || !blksz) {
337		ti->error = "Invalid block size parameter";
338		return -EINVAL;
339	}
340
341	if (blksz < 512) {
342		ti->error = "Block size must be at least 512";
343		return -EINVAL;
344	}
345
346	if (!is_power_of_2(blksz)) {
347		ti->error = "Block size must be a power of 2";
348		return -EINVAL;
349	}
350
351	if (to_sector(blksz) > max_block_sectors) {
352		ti->error = "Block size is too large";
353		return -EINVAL;
354	}
355
356	sect_per_block = (blksz >> SECTOR_SHIFT);
357
358	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
359		ti->error = "Invalid device offset sector";
360		return -EINVAL;
361	}
362
363	dd = kzalloc(sizeof(struct dust_device), GFP_KERNEL);
364	if (dd == NULL) {
365		ti->error = "Cannot allocate context";
366		return -ENOMEM;
367	}
368
369	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) {
370		ti->error = "Device lookup failed";
371		kfree(dd);
372		return -EINVAL;
373	}
374
375	dd->sect_per_block = sect_per_block;
376	dd->blksz = blksz;
377	dd->start = tmp;
378
379	dd->sect_per_block_shift = __ffs(sect_per_block);
380
381	/*
382	 * Whether to fail a read on a "bad" block.
383	 * Defaults to false; enabled later by message.
384	 */
385	dd->fail_read_on_bb = false;
386
387	/*
388	 * Initialize bad block list rbtree.
389	 */
390	dd->badblocklist = RB_ROOT;
391	dd->badblock_count = 0;
392	spin_lock_init(&dd->dust_lock);
393
394	dd->quiet_mode = false;
395
396	BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0);
397
398	ti->num_discard_bios = 1;
399	ti->num_flush_bios = 1;
400	ti->private = dd;
401
402	return 0;
403}
404
405static void dust_dtr(struct dm_target *ti)
406{
407	struct dust_device *dd = ti->private;
408
409	__dust_clear_badblocks(&dd->badblocklist, dd->badblock_count);
410	dm_put_device(ti, dd->dev);
411	kfree(dd);
412}
413
414static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
415			char *result, unsigned int maxlen)
416{
417	struct dust_device *dd = ti->private;
418	sector_t size = bdev_nr_sectors(dd->dev->bdev);
419	bool invalid_msg = false;
420	int r = -EINVAL;
421	unsigned long long tmp, block;
422	unsigned char wr_fail_cnt;
423	unsigned int tmp_ui;
424	unsigned long flags;
425	unsigned int sz = 0;
426	char dummy;
427
428	if (argc == 1) {
429		if (!strcasecmp(argv[0], "addbadblock") ||
430		    !strcasecmp(argv[0], "removebadblock") ||
431		    !strcasecmp(argv[0], "queryblock")) {
432			DMERR("%s requires an additional argument", argv[0]);
433		} else if (!strcasecmp(argv[0], "disable")) {
434			DMINFO("disabling read failures on bad sectors");
435			dd->fail_read_on_bb = false;
436			r = 0;
437		} else if (!strcasecmp(argv[0], "enable")) {
438			DMINFO("enabling read failures on bad sectors");
439			dd->fail_read_on_bb = true;
440			r = 0;
441		} else if (!strcasecmp(argv[0], "countbadblocks")) {
442			spin_lock_irqsave(&dd->dust_lock, flags);
443			DMEMIT("countbadblocks: %llu badblock(s) found",
444			       dd->badblock_count);
445			spin_unlock_irqrestore(&dd->dust_lock, flags);
446			r = 1;
447		} else if (!strcasecmp(argv[0], "clearbadblocks")) {
448			r = dust_clear_badblocks(dd, result, maxlen, &sz);
449		} else if (!strcasecmp(argv[0], "quiet")) {
450			if (!dd->quiet_mode)
451				dd->quiet_mode = true;
452			else
453				dd->quiet_mode = false;
454			r = 0;
455		} else if (!strcasecmp(argv[0], "listbadblocks")) {
456			r = dust_list_badblocks(dd, result, maxlen, &sz);
457		} else {
458			invalid_msg = true;
459		}
460	} else if (argc == 2) {
461		if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
462			return r;
463
464		block = tmp;
465		sector_div(size, dd->sect_per_block);
466		if (block > size) {
467			DMERR("selected block value out of range");
468			return r;
469		}
470
471		if (!strcasecmp(argv[0], "addbadblock"))
472			r = dust_add_block(dd, block, 0);
473		else if (!strcasecmp(argv[0], "removebadblock"))
474			r = dust_remove_block(dd, block);
475		else if (!strcasecmp(argv[0], "queryblock"))
476			r = dust_query_block(dd, block, result, maxlen, &sz);
477		else
478			invalid_msg = true;
479
480	} else if (argc == 3) {
481		if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
482			return r;
483
484		if (sscanf(argv[2], "%u%c", &tmp_ui, &dummy) != 1)
485			return r;
486
487		block = tmp;
488		if (tmp_ui > 255) {
489			DMERR("selected write fail count out of range");
490			return r;
491		}
492		wr_fail_cnt = tmp_ui;
493		sector_div(size, dd->sect_per_block);
494		if (block > size) {
495			DMERR("selected block value out of range");
496			return r;
497		}
498
499		if (!strcasecmp(argv[0], "addbadblock"))
500			r = dust_add_block(dd, block, wr_fail_cnt);
501		else
502			invalid_msg = true;
503
504	} else
505		DMERR("invalid number of arguments '%d'", argc);
506
507	if (invalid_msg)
508		DMERR("unrecognized message '%s' received", argv[0]);
509
510	return r;
511}
512
513static void dust_status(struct dm_target *ti, status_type_t type,
514			unsigned int status_flags, char *result, unsigned int maxlen)
515{
516	struct dust_device *dd = ti->private;
517	unsigned int sz = 0;
518
519	switch (type) {
520	case STATUSTYPE_INFO:
521		DMEMIT("%s %s %s", dd->dev->name,
522		       dd->fail_read_on_bb ? "fail_read_on_bad_block" : "bypass",
523		       dd->quiet_mode ? "quiet" : "verbose");
524		break;
525
526	case STATUSTYPE_TABLE:
527		DMEMIT("%s %llu %u", dd->dev->name,
528		       (unsigned long long)dd->start, dd->blksz);
529		break;
530
531	case STATUSTYPE_IMA:
532		*result = '\0';
533		break;
534	}
535}
536
537static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
538{
539	struct dust_device *dd = ti->private;
540	struct dm_dev *dev = dd->dev;
541
542	*bdev = dev->bdev;
543
544	/*
545	 * Only pass ioctls through if the device sizes match exactly.
546	 */
547	if (dd->start || ti->len != bdev_nr_sectors(dev->bdev))
 
548		return 1;
549
550	return 0;
551}
552
553static int dust_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn,
554				void *data)
555{
556	struct dust_device *dd = ti->private;
557
558	return fn(ti, dd->dev, dd->start, ti->len, data);
559}
560
561static struct target_type dust_target = {
562	.name = "dust",
563	.version = {1, 0, 0},
564	.module = THIS_MODULE,
565	.ctr = dust_ctr,
566	.dtr = dust_dtr,
567	.iterate_devices = dust_iterate_devices,
568	.map = dust_map,
569	.message = dust_message,
570	.status = dust_status,
571	.prepare_ioctl = dust_prepare_ioctl,
572};
573
574static int __init dm_dust_init(void)
575{
576	int r = dm_register_target(&dust_target);
577
578	if (r < 0)
579		DMERR("dm_register_target failed %d", r);
580
581	return r;
582}
583
584static void __exit dm_dust_exit(void)
585{
586	dm_unregister_target(&dust_target);
587}
588
589module_init(dm_dust_init);
590module_exit(dm_dust_exit);
591
592MODULE_DESCRIPTION(DM_NAME " dust test target");
593MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
594MODULE_LICENSE("GPL");
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2018 Red Hat, Inc.
  4 *
  5 * This is a test "dust" device, which fails reads on specified
  6 * sectors, emulating the behavior of a hard disk drive sending
  7 * a "Read Medium Error" sense.
  8 *
  9 */
 10
 11#include <linux/device-mapper.h>
 12#include <linux/module.h>
 13#include <linux/rbtree.h>
 14
 15#define DM_MSG_PREFIX "dust"
 16
 17struct badblock {
 18	struct rb_node node;
 19	sector_t bb;
 
 20};
 21
 22struct dust_device {
 23	struct dm_dev *dev;
 24	struct rb_root badblocklist;
 25	unsigned long long badblock_count;
 26	spinlock_t dust_lock;
 27	unsigned int blksz;
 28	int sect_per_block_shift;
 29	unsigned int sect_per_block;
 30	sector_t start;
 31	bool fail_read_on_bb:1;
 32	bool quiet_mode:1;
 33};
 34
 35static struct badblock *dust_rb_search(struct rb_root *root, sector_t blk)
 36{
 37	struct rb_node *node = root->rb_node;
 38
 39	while (node) {
 40		struct badblock *bblk = rb_entry(node, struct badblock, node);
 41
 42		if (bblk->bb > blk)
 43			node = node->rb_left;
 44		else if (bblk->bb < blk)
 45			node = node->rb_right;
 46		else
 47			return bblk;
 48	}
 49
 50	return NULL;
 51}
 52
 53static bool dust_rb_insert(struct rb_root *root, struct badblock *new)
 54{
 55	struct badblock *bblk;
 56	struct rb_node **link = &root->rb_node, *parent = NULL;
 57	sector_t value = new->bb;
 58
 59	while (*link) {
 60		parent = *link;
 61		bblk = rb_entry(parent, struct badblock, node);
 62
 63		if (bblk->bb > value)
 64			link = &(*link)->rb_left;
 65		else if (bblk->bb < value)
 66			link = &(*link)->rb_right;
 67		else
 68			return false;
 69	}
 70
 71	rb_link_node(&new->node, parent, link);
 72	rb_insert_color(&new->node, root);
 73
 74	return true;
 75}
 76
 77static int dust_remove_block(struct dust_device *dd, unsigned long long block)
 78{
 79	struct badblock *bblock;
 80	unsigned long flags;
 81
 82	spin_lock_irqsave(&dd->dust_lock, flags);
 83	bblock = dust_rb_search(&dd->badblocklist, block);
 84
 85	if (bblock == NULL) {
 86		if (!dd->quiet_mode) {
 87			DMERR("%s: block %llu not found in badblocklist",
 88			      __func__, block);
 89		}
 90		spin_unlock_irqrestore(&dd->dust_lock, flags);
 91		return -EINVAL;
 92	}
 93
 94	rb_erase(&bblock->node, &dd->badblocklist);
 95	dd->badblock_count--;
 96	if (!dd->quiet_mode)
 97		DMINFO("%s: badblock removed at block %llu", __func__, block);
 98	kfree(bblock);
 99	spin_unlock_irqrestore(&dd->dust_lock, flags);
100
101	return 0;
102}
103
104static int dust_add_block(struct dust_device *dd, unsigned long long block)
 
105{
106	struct badblock *bblock;
107	unsigned long flags;
108
109	bblock = kmalloc(sizeof(*bblock), GFP_KERNEL);
110	if (bblock == NULL) {
111		if (!dd->quiet_mode)
112			DMERR("%s: badblock allocation failed", __func__);
113		return -ENOMEM;
114	}
115
116	spin_lock_irqsave(&dd->dust_lock, flags);
117	bblock->bb = block;
 
118	if (!dust_rb_insert(&dd->badblocklist, bblock)) {
119		if (!dd->quiet_mode) {
120			DMERR("%s: block %llu already in badblocklist",
121			      __func__, block);
122		}
123		spin_unlock_irqrestore(&dd->dust_lock, flags);
124		kfree(bblock);
125		return -EINVAL;
126	}
127
128	dd->badblock_count++;
129	if (!dd->quiet_mode)
130		DMINFO("%s: badblock added at block %llu", __func__, block);
 
 
131	spin_unlock_irqrestore(&dd->dust_lock, flags);
132
133	return 0;
134}
135
136static int dust_query_block(struct dust_device *dd, unsigned long long block)
 
137{
138	struct badblock *bblock;
139	unsigned long flags;
 
140
141	spin_lock_irqsave(&dd->dust_lock, flags);
142	bblock = dust_rb_search(&dd->badblocklist, block);
143	if (bblock != NULL)
144		DMINFO("%s: block %llu found in badblocklist", __func__, block);
145	else
146		DMINFO("%s: block %llu not found in badblocklist", __func__, block);
147	spin_unlock_irqrestore(&dd->dust_lock, flags);
148
149	return 0;
150}
151
152static int __dust_map_read(struct dust_device *dd, sector_t thisblock)
153{
154	struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
155
156	if (bblk)
157		return DM_MAPIO_KILL;
158
159	return DM_MAPIO_REMAPPED;
160}
161
162static int dust_map_read(struct dust_device *dd, sector_t thisblock,
163			 bool fail_read_on_bb)
164{
165	unsigned long flags;
166	int ret = DM_MAPIO_REMAPPED;
167
168	if (fail_read_on_bb) {
169		thisblock >>= dd->sect_per_block_shift;
170		spin_lock_irqsave(&dd->dust_lock, flags);
171		ret = __dust_map_read(dd, thisblock);
172		spin_unlock_irqrestore(&dd->dust_lock, flags);
173	}
174
175	return ret;
176}
177
178static void __dust_map_write(struct dust_device *dd, sector_t thisblock)
179{
180	struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
181
 
 
 
 
 
182	if (bblk) {
183		rb_erase(&bblk->node, &dd->badblocklist);
184		dd->badblock_count--;
185		kfree(bblk);
186		if (!dd->quiet_mode) {
187			sector_div(thisblock, dd->sect_per_block);
188			DMINFO("block %llu removed from badblocklist by write",
189			       (unsigned long long)thisblock);
190		}
191	}
 
 
192}
193
194static int dust_map_write(struct dust_device *dd, sector_t thisblock,
195			  bool fail_read_on_bb)
196{
197	unsigned long flags;
 
198
199	if (fail_read_on_bb) {
200		thisblock >>= dd->sect_per_block_shift;
201		spin_lock_irqsave(&dd->dust_lock, flags);
202		__dust_map_write(dd, thisblock);
203		spin_unlock_irqrestore(&dd->dust_lock, flags);
204	}
205
206	return DM_MAPIO_REMAPPED;
207}
208
209static int dust_map(struct dm_target *ti, struct bio *bio)
210{
211	struct dust_device *dd = ti->private;
212	int ret;
213
214	bio_set_dev(bio, dd->dev->bdev);
215	bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
216
217	if (bio_data_dir(bio) == READ)
218		ret = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
219	else
220		ret = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
221
222	return ret;
223}
224
225static bool __dust_clear_badblocks(struct rb_root *tree,
226				   unsigned long long count)
227{
228	struct rb_node *node = NULL, *nnode = NULL;
229
230	nnode = rb_first(tree);
231	if (nnode == NULL) {
232		BUG_ON(count != 0);
233		return false;
234	}
235
236	while (nnode) {
237		node = nnode;
238		nnode = rb_next(node);
239		rb_erase(node, tree);
240		count--;
241		kfree(node);
242	}
243	BUG_ON(count != 0);
244	BUG_ON(tree->rb_node != NULL);
245
246	return true;
247}
248
249static int dust_clear_badblocks(struct dust_device *dd)
 
250{
251	unsigned long flags;
252	struct rb_root badblocklist;
253	unsigned long long badblock_count;
 
254
255	spin_lock_irqsave(&dd->dust_lock, flags);
256	badblocklist = dd->badblocklist;
257	badblock_count = dd->badblock_count;
258	dd->badblocklist = RB_ROOT;
259	dd->badblock_count = 0;
260	spin_unlock_irqrestore(&dd->dust_lock, flags);
261
262	if (!__dust_clear_badblocks(&badblocklist, badblock_count))
263		DMINFO("%s: no badblocks found", __func__);
264	else
265		DMINFO("%s: badblocks cleared", __func__);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
267	return 0;
 
 
 
 
268}
269
270/*
271 * Target parameters:
272 *
273 * <device_path> <offset> <blksz>
274 *
275 * device_path: path to the block device
276 * offset: offset to data area from start of device_path
277 * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2)
278 */
279static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
280{
281	struct dust_device *dd;
282	unsigned long long tmp;
283	char dummy;
284	unsigned int blksz;
285	unsigned int sect_per_block;
286	sector_t DUST_MAX_BLKSZ_SECTORS = 2097152;
287	sector_t max_block_sectors = min(ti->len, DUST_MAX_BLKSZ_SECTORS);
288
289	if (argc != 3) {
290		ti->error = "Invalid argument count";
291		return -EINVAL;
292	}
293
294	if (kstrtouint(argv[2], 10, &blksz) || !blksz) {
295		ti->error = "Invalid block size parameter";
296		return -EINVAL;
297	}
298
299	if (blksz < 512) {
300		ti->error = "Block size must be at least 512";
301		return -EINVAL;
302	}
303
304	if (!is_power_of_2(blksz)) {
305		ti->error = "Block size must be a power of 2";
306		return -EINVAL;
307	}
308
309	if (to_sector(blksz) > max_block_sectors) {
310		ti->error = "Block size is too large";
311		return -EINVAL;
312	}
313
314	sect_per_block = (blksz >> SECTOR_SHIFT);
315
316	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
317		ti->error = "Invalid device offset sector";
318		return -EINVAL;
319	}
320
321	dd = kzalloc(sizeof(struct dust_device), GFP_KERNEL);
322	if (dd == NULL) {
323		ti->error = "Cannot allocate context";
324		return -ENOMEM;
325	}
326
327	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) {
328		ti->error = "Device lookup failed";
329		kfree(dd);
330		return -EINVAL;
331	}
332
333	dd->sect_per_block = sect_per_block;
334	dd->blksz = blksz;
335	dd->start = tmp;
336
337	dd->sect_per_block_shift = __ffs(sect_per_block);
338
339	/*
340	 * Whether to fail a read on a "bad" block.
341	 * Defaults to false; enabled later by message.
342	 */
343	dd->fail_read_on_bb = false;
344
345	/*
346	 * Initialize bad block list rbtree.
347	 */
348	dd->badblocklist = RB_ROOT;
349	dd->badblock_count = 0;
350	spin_lock_init(&dd->dust_lock);
351
352	dd->quiet_mode = false;
353
354	BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0);
355
356	ti->num_discard_bios = 1;
357	ti->num_flush_bios = 1;
358	ti->private = dd;
359
360	return 0;
361}
362
363static void dust_dtr(struct dm_target *ti)
364{
365	struct dust_device *dd = ti->private;
366
367	__dust_clear_badblocks(&dd->badblocklist, dd->badblock_count);
368	dm_put_device(ti, dd->dev);
369	kfree(dd);
370}
371
372static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
373			char *result_buf, unsigned int maxlen)
374{
375	struct dust_device *dd = ti->private;
376	sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
377	bool invalid_msg = false;
378	int result = -EINVAL;
379	unsigned long long tmp, block;
 
 
380	unsigned long flags;
 
381	char dummy;
382
383	if (argc == 1) {
384		if (!strcasecmp(argv[0], "addbadblock") ||
385		    !strcasecmp(argv[0], "removebadblock") ||
386		    !strcasecmp(argv[0], "queryblock")) {
387			DMERR("%s requires an additional argument", argv[0]);
388		} else if (!strcasecmp(argv[0], "disable")) {
389			DMINFO("disabling read failures on bad sectors");
390			dd->fail_read_on_bb = false;
391			result = 0;
392		} else if (!strcasecmp(argv[0], "enable")) {
393			DMINFO("enabling read failures on bad sectors");
394			dd->fail_read_on_bb = true;
395			result = 0;
396		} else if (!strcasecmp(argv[0], "countbadblocks")) {
397			spin_lock_irqsave(&dd->dust_lock, flags);
398			DMINFO("countbadblocks: %llu badblock(s) found",
399			       dd->badblock_count);
400			spin_unlock_irqrestore(&dd->dust_lock, flags);
401			result = 0;
402		} else if (!strcasecmp(argv[0], "clearbadblocks")) {
403			result = dust_clear_badblocks(dd);
404		} else if (!strcasecmp(argv[0], "quiet")) {
405			if (!dd->quiet_mode)
406				dd->quiet_mode = true;
407			else
408				dd->quiet_mode = false;
409			result = 0;
 
 
410		} else {
411			invalid_msg = true;
412		}
413	} else if (argc == 2) {
414		if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
415			return result;
416
417		block = tmp;
418		sector_div(size, dd->sect_per_block);
419		if (block > size) {
420			DMERR("selected block value out of range");
421			return result;
422		}
423
424		if (!strcasecmp(argv[0], "addbadblock"))
425			result = dust_add_block(dd, block);
426		else if (!strcasecmp(argv[0], "removebadblock"))
427			result = dust_remove_block(dd, block);
428		else if (!strcasecmp(argv[0], "queryblock"))
429			result = dust_query_block(dd, block);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
430		else
431			invalid_msg = true;
432
433	} else
434		DMERR("invalid number of arguments '%d'", argc);
435
436	if (invalid_msg)
437		DMERR("unrecognized message '%s' received", argv[0]);
438
439	return result;
440}
441
442static void dust_status(struct dm_target *ti, status_type_t type,
443			unsigned int status_flags, char *result, unsigned int maxlen)
444{
445	struct dust_device *dd = ti->private;
446	unsigned int sz = 0;
447
448	switch (type) {
449	case STATUSTYPE_INFO:
450		DMEMIT("%s %s %s", dd->dev->name,
451		       dd->fail_read_on_bb ? "fail_read_on_bad_block" : "bypass",
452		       dd->quiet_mode ? "quiet" : "verbose");
453		break;
454
455	case STATUSTYPE_TABLE:
456		DMEMIT("%s %llu %u", dd->dev->name,
457		       (unsigned long long)dd->start, dd->blksz);
458		break;
 
 
 
 
459	}
460}
461
462static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
463{
464	struct dust_device *dd = ti->private;
465	struct dm_dev *dev = dd->dev;
466
467	*bdev = dev->bdev;
468
469	/*
470	 * Only pass ioctls through if the device sizes match exactly.
471	 */
472	if (dd->start ||
473	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
474		return 1;
475
476	return 0;
477}
478
479static int dust_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn,
480				void *data)
481{
482	struct dust_device *dd = ti->private;
483
484	return fn(ti, dd->dev, dd->start, ti->len, data);
485}
486
487static struct target_type dust_target = {
488	.name = "dust",
489	.version = {1, 0, 0},
490	.module = THIS_MODULE,
491	.ctr = dust_ctr,
492	.dtr = dust_dtr,
493	.iterate_devices = dust_iterate_devices,
494	.map = dust_map,
495	.message = dust_message,
496	.status = dust_status,
497	.prepare_ioctl = dust_prepare_ioctl,
498};
499
500static int __init dm_dust_init(void)
501{
502	int result = dm_register_target(&dust_target);
503
504	if (result < 0)
505		DMERR("dm_register_target failed %d", result);
506
507	return result;
508}
509
510static void __exit dm_dust_exit(void)
511{
512	dm_unregister_target(&dust_target);
513}
514
515module_init(dm_dust_init);
516module_exit(dm_dust_exit);
517
518MODULE_DESCRIPTION(DM_NAME " dust test target");
519MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
520MODULE_LICENSE("GPL");