Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (C) 2003 Sistina Software (UK) Limited.
  3 * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include <linux/device-mapper.h>
  9
 10#include <linux/module.h>
 11#include <linux/init.h>
 12#include <linux/blkdev.h>
 13#include <linux/bio.h>
 14#include <linux/slab.h>
 15
 16#define DM_MSG_PREFIX "flakey"
 17
 18#define all_corrupt_bio_flags_match(bio, fc)	\
 19	(((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
 20
 21/*
 22 * Flakey: Used for testing only, simulates intermittent,
 23 * catastrophic device failure.
 24 */
 25struct flakey_c {
 26	struct dm_dev *dev;
 27	unsigned long start_time;
 28	sector_t start;
 29	unsigned up_interval;
 30	unsigned down_interval;
 31	unsigned long flags;
 32	unsigned corrupt_bio_byte;
 33	unsigned corrupt_bio_rw;
 34	unsigned corrupt_bio_value;
 35	unsigned corrupt_bio_flags;
 36};
 37
 38enum feature_flag_bits {
 39	DROP_WRITES
 
 
 
 
 
 40};
 41
 42static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
 43			  struct dm_target *ti)
 44{
 45	int r;
 46	unsigned argc;
 47	const char *arg_name;
 48
 49	static struct dm_arg _args[] = {
 50		{0, 6, "Invalid number of feature args"},
 51		{1, UINT_MAX, "Invalid corrupt bio byte"},
 52		{0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
 53		{0, UINT_MAX, "Invalid corrupt bio flags mask"},
 54	};
 55
 56	/* No feature arguments supplied. */
 57	if (!as->argc)
 58		return 0;
 59
 60	r = dm_read_arg_group(_args, as, &argc, &ti->error);
 61	if (r)
 62		return r;
 63
 64	while (argc) {
 65		arg_name = dm_shift_arg(as);
 66		argc--;
 67
 
 
 
 
 
 68		/*
 69		 * drop_writes
 70		 */
 71		if (!strcasecmp(arg_name, "drop_writes")) {
 72			if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
 73				ti->error = "Feature drop_writes duplicated";
 74				return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75			}
 76
 77			continue;
 78		}
 79
 80		/*
 81		 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
 82		 */
 83		if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
 84			if (!argc) {
 85				ti->error = "Feature corrupt_bio_byte requires parameters";
 86				return -EINVAL;
 87			}
 88
 89			r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
 90			if (r)
 91				return r;
 92			argc--;
 93
 94			/*
 95			 * Direction r or w?
 96			 */
 97			arg_name = dm_shift_arg(as);
 98			if (!strcasecmp(arg_name, "w"))
 99				fc->corrupt_bio_rw = WRITE;
100			else if (!strcasecmp(arg_name, "r"))
101				fc->corrupt_bio_rw = READ;
102			else {
103				ti->error = "Invalid corrupt bio direction (r or w)";
104				return -EINVAL;
105			}
106			argc--;
107
108			/*
109			 * Value of byte (0-255) to write in place of correct one.
110			 */
111			r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
112			if (r)
113				return r;
114			argc--;
115
116			/*
117			 * Only corrupt bios with these flags set.
118			 */
119			r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error);
 
 
 
 
120			if (r)
121				return r;
122			argc--;
123
124			continue;
125		}
126
127		ti->error = "Unrecognised flakey feature requested";
128		return -EINVAL;
129	}
130
131	if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
132		ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
133		return -EINVAL;
 
 
 
 
134	}
135
136	return 0;
137}
138
139/*
140 * Construct a flakey mapping:
141 * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
142 *
143 *   Feature args:
144 *     [drop_writes]
145 *     [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
146 *
147 *   Nth_byte starts from 1 for the first byte.
148 *   Direction is r for READ or w for WRITE.
149 *   bio_flags is ignored if 0.
150 */
151static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
152{
153	static struct dm_arg _args[] = {
154		{0, UINT_MAX, "Invalid up interval"},
155		{0, UINT_MAX, "Invalid down interval"},
156	};
157
158	int r;
159	struct flakey_c *fc;
160	unsigned long long tmpll;
161	struct dm_arg_set as;
162	const char *devname;
 
163
164	as.argc = argc;
165	as.argv = argv;
166
167	if (argc < 4) {
168		ti->error = "Invalid argument count";
169		return -EINVAL;
170	}
171
172	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
173	if (!fc) {
174		ti->error = "Cannot allocate linear context";
175		return -ENOMEM;
176	}
177	fc->start_time = jiffies;
178
179	devname = dm_shift_arg(&as);
180
181	if (sscanf(dm_shift_arg(&as), "%llu", &tmpll) != 1) {
 
182		ti->error = "Invalid device sector";
183		goto bad;
184	}
185	fc->start = tmpll;
186
187	r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
188	if (r)
189		goto bad;
190
191	r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
192	if (r)
193		goto bad;
194
195	if (!(fc->up_interval + fc->down_interval)) {
196		ti->error = "Total (up + down) interval is zero";
 
197		goto bad;
198	}
199
200	if (fc->up_interval + fc->down_interval < fc->up_interval) {
201		ti->error = "Interval overflow";
 
202		goto bad;
203	}
204
205	r = parse_features(&as, fc, ti);
206	if (r)
207		goto bad;
208
209	if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) {
 
210		ti->error = "Device lookup failed";
211		goto bad;
212	}
213
214	ti->num_flush_requests = 1;
215	ti->num_discard_requests = 1;
 
216	ti->private = fc;
217	return 0;
218
219bad:
220	kfree(fc);
221	return -EINVAL;
222}
223
224static void flakey_dtr(struct dm_target *ti)
225{
226	struct flakey_c *fc = ti->private;
227
228	dm_put_device(ti, fc->dev);
229	kfree(fc);
230}
231
232static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
233{
234	struct flakey_c *fc = ti->private;
235
236	return fc->start + dm_target_offset(ti, bi_sector);
237}
238
239static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
240{
241	struct flakey_c *fc = ti->private;
242
243	bio->bi_bdev = fc->dev->bdev;
244	if (bio_sectors(bio))
245		bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
246}
247
248static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
249{
250	unsigned bio_bytes = bio_cur_bytes(bio);
251	char *data = bio_data(bio);
 
 
 
 
 
252
253	/*
254	 * Overwrite the Nth byte of the data returned.
 
255	 */
256	if (data && bio_bytes >= fc->corrupt_bio_byte) {
257		data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
258
259		DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
260			"(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
261			bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
262			(bio_data_dir(bio) == WRITE) ? 'w' : 'r',
263			bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes);
 
 
 
 
 
264	}
265}
266
267static int flakey_map(struct dm_target *ti, struct bio *bio,
268		      union map_info *map_context)
269{
270	struct flakey_c *fc = ti->private;
271	unsigned elapsed;
 
 
 
 
 
272
273	/* Are we alive ? */
274	elapsed = (jiffies - fc->start_time) / HZ;
275	if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
276		/*
277		 * Flag this bio as submitted while down.
278		 */
279		map_context->ll = 1;
280
281		/*
282		 * Map reads as normal.
 
283		 */
284		if (bio_data_dir(bio) == READ)
 
 
 
285			goto map_bio;
 
286
287		/*
288		 * Drop writes?
289		 */
290		if (test_bit(DROP_WRITES, &fc->flags)) {
291			bio_endio(bio, 0);
 
 
 
 
292			return DM_MAPIO_SUBMITTED;
293		}
294
295		/*
296		 * Corrupt matching writes.
297		 */
298		if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
299			if (all_corrupt_bio_flags_match(bio, fc))
300				corrupt_bio_data(bio, fc);
301			goto map_bio;
302		}
303
304		/*
305		 * By default, error all I/O.
306		 */
307		return -EIO;
308	}
309
310map_bio:
311	flakey_map_bio(ti, bio);
312
313	return DM_MAPIO_REMAPPED;
314}
315
316static int flakey_end_io(struct dm_target *ti, struct bio *bio,
317			 int error, union map_info *map_context)
318{
319	struct flakey_c *fc = ti->private;
320	unsigned bio_submitted_while_down = map_context->ll;
321
322	/*
323	 * Corrupt successful READs while in down state.
324	 * If flags were specified, only corrupt those that match.
325	 */
326	if (!error && bio_submitted_while_down &&
327	    (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
328	    all_corrupt_bio_flags_match(bio, fc))
329		corrupt_bio_data(bio, fc);
 
 
 
 
 
 
 
 
 
 
 
 
330
331	return error;
332}
333
334static int flakey_status(struct dm_target *ti, status_type_t type,
335			 char *result, unsigned int maxlen)
336{
337	unsigned sz = 0;
338	struct flakey_c *fc = ti->private;
339	unsigned drop_writes;
340
341	switch (type) {
342	case STATUSTYPE_INFO:
343		result[0] = '\0';
344		break;
345
346	case STATUSTYPE_TABLE:
347		DMEMIT("%s %llu %u %u ", fc->dev->name,
348		       (unsigned long long)fc->start, fc->up_interval,
349		       fc->down_interval);
350
351		drop_writes = test_bit(DROP_WRITES, &fc->flags);
352		DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5);
 
353
354		if (drop_writes)
355			DMEMIT("drop_writes ");
 
 
356
357		if (fc->corrupt_bio_byte)
358			DMEMIT("corrupt_bio_byte %u %c %u %u ",
359			       fc->corrupt_bio_byte,
360			       (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
361			       fc->corrupt_bio_value, fc->corrupt_bio_flags);
362
363		break;
 
 
 
 
364	}
365	return 0;
366}
367
368static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
369{
370	struct flakey_c *fc = ti->private;
371
372	return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
 
 
 
 
 
 
 
373}
374
375static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
376			struct bio_vec *biovec, int max_size)
 
377{
378	struct flakey_c *fc = ti->private;
379	struct request_queue *q = bdev_get_queue(fc->dev->bdev);
380
381	if (!q->merge_bvec_fn)
382		return max_size;
383
384	bvm->bi_bdev = fc->dev->bdev;
385	bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector);
386
387	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
388}
 
 
389
390static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
391{
392	struct flakey_c *fc = ti->private;
393
394	return fn(ti, fc->dev, fc->start, ti->len, data);
395}
396
397static struct target_type flakey_target = {
398	.name   = "flakey",
399	.version = {1, 2, 0},
 
 
400	.module = THIS_MODULE,
401	.ctr    = flakey_ctr,
402	.dtr    = flakey_dtr,
403	.map    = flakey_map,
404	.end_io = flakey_end_io,
405	.status = flakey_status,
406	.ioctl	= flakey_ioctl,
407	.merge	= flakey_merge,
408	.iterate_devices = flakey_iterate_devices,
409};
410
411static int __init dm_flakey_init(void)
412{
413	int r = dm_register_target(&flakey_target);
414
415	if (r < 0)
416		DMERR("register failed %d", r);
417
418	return r;
419}
420
421static void __exit dm_flakey_exit(void)
422{
423	dm_unregister_target(&flakey_target);
424}
425
426/* Module hooks */
427module_init(dm_flakey_init);
428module_exit(dm_flakey_exit);
429
430MODULE_DESCRIPTION(DM_NAME " flakey target");
431MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
432MODULE_LICENSE("GPL");
v6.2
  1/*
  2 * Copyright (C) 2003 Sistina Software (UK) Limited.
  3 * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include <linux/device-mapper.h>
  9
 10#include <linux/module.h>
 11#include <linux/init.h>
 12#include <linux/blkdev.h>
 13#include <linux/bio.h>
 14#include <linux/slab.h>
 15
 16#define DM_MSG_PREFIX "flakey"
 17
 18#define all_corrupt_bio_flags_match(bio, fc)	\
 19	(((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
 20
 21/*
 22 * Flakey: Used for testing only, simulates intermittent,
 23 * catastrophic device failure.
 24 */
 25struct flakey_c {
 26	struct dm_dev *dev;
 27	unsigned long start_time;
 28	sector_t start;
 29	unsigned up_interval;
 30	unsigned down_interval;
 31	unsigned long flags;
 32	unsigned corrupt_bio_byte;
 33	unsigned corrupt_bio_rw;
 34	unsigned corrupt_bio_value;
 35	blk_opf_t corrupt_bio_flags;
 36};
 37
 38enum feature_flag_bits {
 39	DROP_WRITES,
 40	ERROR_WRITES
 41};
 42
 43struct per_bio_data {
 44	bool bio_submitted;
 45};
 46
 47static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
 48			  struct dm_target *ti)
 49{
 50	int r;
 51	unsigned argc;
 52	const char *arg_name;
 53
 54	static const struct dm_arg _args[] = {
 55		{0, 6, "Invalid number of feature args"},
 56		{1, UINT_MAX, "Invalid corrupt bio byte"},
 57		{0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
 58		{0, UINT_MAX, "Invalid corrupt bio flags mask"},
 59	};
 60
 61	/* No feature arguments supplied. */
 62	if (!as->argc)
 63		return 0;
 64
 65	r = dm_read_arg_group(_args, as, &argc, &ti->error);
 66	if (r)
 67		return r;
 68
 69	while (argc) {
 70		arg_name = dm_shift_arg(as);
 71		argc--;
 72
 73		if (!arg_name) {
 74			ti->error = "Insufficient feature arguments";
 75			return -EINVAL;
 76		}
 77
 78		/*
 79		 * drop_writes
 80		 */
 81		if (!strcasecmp(arg_name, "drop_writes")) {
 82			if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
 83				ti->error = "Feature drop_writes duplicated";
 84				return -EINVAL;
 85			} else if (test_bit(ERROR_WRITES, &fc->flags)) {
 86				ti->error = "Feature drop_writes conflicts with feature error_writes";
 87				return -EINVAL;
 88			}
 89
 90			continue;
 91		}
 92
 93		/*
 94		 * error_writes
 95		 */
 96		if (!strcasecmp(arg_name, "error_writes")) {
 97			if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
 98				ti->error = "Feature error_writes duplicated";
 99				return -EINVAL;
100
101			} else if (test_bit(DROP_WRITES, &fc->flags)) {
102				ti->error = "Feature error_writes conflicts with feature drop_writes";
103				return -EINVAL;
104			}
105
106			continue;
107		}
108
109		/*
110		 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
111		 */
112		if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
113			if (!argc) {
114				ti->error = "Feature corrupt_bio_byte requires parameters";
115				return -EINVAL;
116			}
117
118			r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
119			if (r)
120				return r;
121			argc--;
122
123			/*
124			 * Direction r or w?
125			 */
126			arg_name = dm_shift_arg(as);
127			if (!strcasecmp(arg_name, "w"))
128				fc->corrupt_bio_rw = WRITE;
129			else if (!strcasecmp(arg_name, "r"))
130				fc->corrupt_bio_rw = READ;
131			else {
132				ti->error = "Invalid corrupt bio direction (r or w)";
133				return -EINVAL;
134			}
135			argc--;
136
137			/*
138			 * Value of byte (0-255) to write in place of correct one.
139			 */
140			r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
141			if (r)
142				return r;
143			argc--;
144
145			/*
146			 * Only corrupt bios with these flags set.
147			 */
148			BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) !=
149				     sizeof(unsigned int));
150			r = dm_read_arg(_args + 3, as,
151				(__force unsigned *)&fc->corrupt_bio_flags,
152				&ti->error);
153			if (r)
154				return r;
155			argc--;
156
157			continue;
158		}
159
160		ti->error = "Unrecognised flakey feature requested";
161		return -EINVAL;
162	}
163
164	if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
165		ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
166		return -EINVAL;
167
168	} else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
169		ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
170		return -EINVAL;
171	}
172
173	return 0;
174}
175
176/*
177 * Construct a flakey mapping:
178 * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
179 *
180 *   Feature args:
181 *     [drop_writes]
182 *     [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
183 *
184 *   Nth_byte starts from 1 for the first byte.
185 *   Direction is r for READ or w for WRITE.
186 *   bio_flags is ignored if 0.
187 */
188static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
189{
190	static const struct dm_arg _args[] = {
191		{0, UINT_MAX, "Invalid up interval"},
192		{0, UINT_MAX, "Invalid down interval"},
193	};
194
195	int r;
196	struct flakey_c *fc;
197	unsigned long long tmpll;
198	struct dm_arg_set as;
199	const char *devname;
200	char dummy;
201
202	as.argc = argc;
203	as.argv = argv;
204
205	if (argc < 4) {
206		ti->error = "Invalid argument count";
207		return -EINVAL;
208	}
209
210	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
211	if (!fc) {
212		ti->error = "Cannot allocate context";
213		return -ENOMEM;
214	}
215	fc->start_time = jiffies;
216
217	devname = dm_shift_arg(&as);
218
219	r = -EINVAL;
220	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
221		ti->error = "Invalid device sector";
222		goto bad;
223	}
224	fc->start = tmpll;
225
226	r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
227	if (r)
228		goto bad;
229
230	r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
231	if (r)
232		goto bad;
233
234	if (!(fc->up_interval + fc->down_interval)) {
235		ti->error = "Total (up + down) interval is zero";
236		r = -EINVAL;
237		goto bad;
238	}
239
240	if (fc->up_interval + fc->down_interval < fc->up_interval) {
241		ti->error = "Interval overflow";
242		r = -EINVAL;
243		goto bad;
244	}
245
246	r = parse_features(&as, fc, ti);
247	if (r)
248		goto bad;
249
250	r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
251	if (r) {
252		ti->error = "Device lookup failed";
253		goto bad;
254	}
255
256	ti->num_flush_bios = 1;
257	ti->num_discard_bios = 1;
258	ti->per_io_data_size = sizeof(struct per_bio_data);
259	ti->private = fc;
260	return 0;
261
262bad:
263	kfree(fc);
264	return r;
265}
266
267static void flakey_dtr(struct dm_target *ti)
268{
269	struct flakey_c *fc = ti->private;
270
271	dm_put_device(ti, fc->dev);
272	kfree(fc);
273}
274
275static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
276{
277	struct flakey_c *fc = ti->private;
278
279	return fc->start + dm_target_offset(ti, bi_sector);
280}
281
282static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
283{
284	struct flakey_c *fc = ti->private;
285
286	bio_set_dev(bio, fc->dev->bdev);
287	bio->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
 
288}
289
290static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
291{
292	unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
293
294	struct bvec_iter iter;
295	struct bio_vec bvec;
296
297	if (!bio_has_data(bio))
298		return;
299
300	/*
301	 * Overwrite the Nth byte of the bio's data, on whichever page
302	 * it falls.
303	 */
304	bio_for_each_segment(bvec, bio, iter) {
305		if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
306			char *segment = (page_address(bio_iter_page(bio, iter))
307					 + bio_iter_offset(bio, iter));
308			segment[corrupt_bio_byte] = fc->corrupt_bio_value;
309			DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
310				"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
311				bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
312				(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
313				(unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size);
314			break;
315		}
316		corrupt_bio_byte -= bio_iter_len(bio, iter);
317	}
318}
319
320static int flakey_map(struct dm_target *ti, struct bio *bio)
 
321{
322	struct flakey_c *fc = ti->private;
323	unsigned elapsed;
324	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
325	pb->bio_submitted = false;
326
327	if (op_is_zone_mgmt(bio_op(bio)))
328		goto map_bio;
329
330	/* Are we alive ? */
331	elapsed = (jiffies - fc->start_time) / HZ;
332	if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
333		/*
334		 * Flag this bio as submitted while down.
335		 */
336		pb->bio_submitted = true;
337
338		/*
339		 * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
340		 * Otherwise, flakey_end_io() will decide if the reads should be modified.
341		 */
342		if (bio_data_dir(bio) == READ) {
343			if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) &&
344			    !test_bit(ERROR_WRITES, &fc->flags))
345				return DM_MAPIO_KILL;
346			goto map_bio;
347		}
348
349		/*
350		 * Drop or error writes?
351		 */
352		if (test_bit(DROP_WRITES, &fc->flags)) {
353			bio_endio(bio);
354			return DM_MAPIO_SUBMITTED;
355		}
356		else if (test_bit(ERROR_WRITES, &fc->flags)) {
357			bio_io_error(bio);
358			return DM_MAPIO_SUBMITTED;
359		}
360
361		/*
362		 * Corrupt matching writes.
363		 */
364		if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
365			if (all_corrupt_bio_flags_match(bio, fc))
366				corrupt_bio_data(bio, fc);
367			goto map_bio;
368		}
369
370		/*
371		 * By default, error all I/O.
372		 */
373		return DM_MAPIO_KILL;
374	}
375
376map_bio:
377	flakey_map_bio(ti, bio);
378
379	return DM_MAPIO_REMAPPED;
380}
381
382static int flakey_end_io(struct dm_target *ti, struct bio *bio,
383			 blk_status_t *error)
384{
385	struct flakey_c *fc = ti->private;
386	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
387
388	if (op_is_zone_mgmt(bio_op(bio)))
389		return DM_ENDIO_DONE;
390
391	if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
392		if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
393		    all_corrupt_bio_flags_match(bio, fc)) {
394			/*
395			 * Corrupt successful matching READs while in down state.
396			 */
397			corrupt_bio_data(bio, fc);
398
399		} else if (!test_bit(DROP_WRITES, &fc->flags) &&
400			   !test_bit(ERROR_WRITES, &fc->flags)) {
401			/*
402			 * Error read during the down_interval if drop_writes
403			 * and error_writes were not configured.
404			 */
405			*error = BLK_STS_IOERR;
406		}
407	}
408
409	return DM_ENDIO_DONE;
410}
411
412static void flakey_status(struct dm_target *ti, status_type_t type,
413			  unsigned status_flags, char *result, unsigned maxlen)
414{
415	unsigned sz = 0;
416	struct flakey_c *fc = ti->private;
417	unsigned drop_writes, error_writes;
418
419	switch (type) {
420	case STATUSTYPE_INFO:
421		result[0] = '\0';
422		break;
423
424	case STATUSTYPE_TABLE:
425		DMEMIT("%s %llu %u %u ", fc->dev->name,
426		       (unsigned long long)fc->start, fc->up_interval,
427		       fc->down_interval);
428
429		drop_writes = test_bit(DROP_WRITES, &fc->flags);
430		error_writes = test_bit(ERROR_WRITES, &fc->flags);
431		DMEMIT("%u ", drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5);
432
433		if (drop_writes)
434			DMEMIT("drop_writes ");
435		else if (error_writes)
436			DMEMIT("error_writes ");
437
438		if (fc->corrupt_bio_byte)
439			DMEMIT("corrupt_bio_byte %u %c %u %u ",
440			       fc->corrupt_bio_byte,
441			       (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
442			       fc->corrupt_bio_value, fc->corrupt_bio_flags);
443
444		break;
445
446	case STATUSTYPE_IMA:
447		result[0] = '\0';
448		break;
449	}
 
450}
451
452static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
453{
454	struct flakey_c *fc = ti->private;
455
456	*bdev = fc->dev->bdev;
457
458	/*
459	 * Only pass ioctls through if the device sizes match exactly.
460	 */
461	if (fc->start || ti->len != bdev_nr_sectors((*bdev)))
462		return 1;
463	return 0;
464}
465
466#ifdef CONFIG_BLK_DEV_ZONED
467static int flakey_report_zones(struct dm_target *ti,
468		struct dm_report_zones_args *args, unsigned int nr_zones)
469{
470	struct flakey_c *fc = ti->private;
 
 
 
 
471
472	return dm_report_zones(fc->dev->bdev, fc->start,
473			       flakey_map_sector(ti, args->next_sector),
474			       args, nr_zones);
475}
476#else
477#define flakey_report_zones NULL
478#endif
479
480static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
481{
482	struct flakey_c *fc = ti->private;
483
484	return fn(ti, fc->dev, fc->start, ti->len, data);
485}
486
487static struct target_type flakey_target = {
488	.name   = "flakey",
489	.version = {1, 5, 0},
490	.features = DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
491	.report_zones = flakey_report_zones,
492	.module = THIS_MODULE,
493	.ctr    = flakey_ctr,
494	.dtr    = flakey_dtr,
495	.map    = flakey_map,
496	.end_io = flakey_end_io,
497	.status = flakey_status,
498	.prepare_ioctl = flakey_prepare_ioctl,
 
499	.iterate_devices = flakey_iterate_devices,
500};
501
502static int __init dm_flakey_init(void)
503{
504	int r = dm_register_target(&flakey_target);
505
506	if (r < 0)
507		DMERR("register failed %d", r);
508
509	return r;
510}
511
512static void __exit dm_flakey_exit(void)
513{
514	dm_unregister_target(&flakey_target);
515}
516
517/* Module hooks */
518module_init(dm_flakey_init);
519module_exit(dm_flakey_exit);
520
521MODULE_DESCRIPTION(DM_NAME " flakey target");
522MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
523MODULE_LICENSE("GPL");