Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright (C) 2003 Sistina Software (UK) Limited.
  3 * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include <linux/device-mapper.h>
  9
 10#include <linux/module.h>
 11#include <linux/init.h>
 12#include <linux/blkdev.h>
 13#include <linux/bio.h>
 14#include <linux/slab.h>
 15
 16#define DM_MSG_PREFIX "flakey"
 17
 18#define all_corrupt_bio_flags_match(bio, fc)	\
 19	(((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
 20
 21/*
 22 * Flakey: Used for testing only, simulates intermittent,
 23 * catastrophic device failure.
 24 */
 25struct flakey_c {
 26	struct dm_dev *dev;
 27	unsigned long start_time;
 28	sector_t start;
 29	unsigned up_interval;
 30	unsigned down_interval;
 31	unsigned long flags;
 32	unsigned corrupt_bio_byte;
 33	unsigned corrupt_bio_rw;
 34	unsigned corrupt_bio_value;
 35	unsigned corrupt_bio_flags;
 36};
 37
 38enum feature_flag_bits {
 39	DROP_WRITES
 
 
 
 
 
 40};
 41
 42static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
 43			  struct dm_target *ti)
 44{
 45	int r;
 46	unsigned argc;
 47	const char *arg_name;
 48
 49	static struct dm_arg _args[] = {
 50		{0, 6, "Invalid number of feature args"},
 51		{1, UINT_MAX, "Invalid corrupt bio byte"},
 52		{0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
 53		{0, UINT_MAX, "Invalid corrupt bio flags mask"},
 54	};
 55
 56	/* No feature arguments supplied. */
 57	if (!as->argc)
 58		return 0;
 59
 60	r = dm_read_arg_group(_args, as, &argc, &ti->error);
 61	if (r)
 62		return r;
 63
 64	while (argc) {
 65		arg_name = dm_shift_arg(as);
 66		argc--;
 67
 
 
 
 
 
 68		/*
 69		 * drop_writes
 70		 */
 71		if (!strcasecmp(arg_name, "drop_writes")) {
 72			if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
 73				ti->error = "Feature drop_writes duplicated";
 74				return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75			}
 76
 77			continue;
 78		}
 79
 80		/*
 81		 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
 82		 */
 83		if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
 84			if (!argc) {
 85				ti->error = "Feature corrupt_bio_byte requires parameters";
 86				return -EINVAL;
 87			}
 88
 89			r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
 90			if (r)
 91				return r;
 92			argc--;
 93
 94			/*
 95			 * Direction r or w?
 96			 */
 97			arg_name = dm_shift_arg(as);
 98			if (!strcasecmp(arg_name, "w"))
 99				fc->corrupt_bio_rw = WRITE;
100			else if (!strcasecmp(arg_name, "r"))
101				fc->corrupt_bio_rw = READ;
102			else {
103				ti->error = "Invalid corrupt bio direction (r or w)";
104				return -EINVAL;
105			}
106			argc--;
107
108			/*
109			 * Value of byte (0-255) to write in place of correct one.
110			 */
111			r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
112			if (r)
113				return r;
114			argc--;
115
116			/*
117			 * Only corrupt bios with these flags set.
118			 */
119			r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error);
120			if (r)
121				return r;
122			argc--;
123
124			continue;
125		}
126
127		ti->error = "Unrecognised flakey feature requested";
128		return -EINVAL;
129	}
130
131	if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
132		ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
133		return -EINVAL;
 
 
 
 
134	}
135
136	return 0;
137}
138
139/*
140 * Construct a flakey mapping:
141 * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
142 *
143 *   Feature args:
144 *     [drop_writes]
145 *     [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
146 *
147 *   Nth_byte starts from 1 for the first byte.
148 *   Direction is r for READ or w for WRITE.
149 *   bio_flags is ignored if 0.
150 */
151static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
152{
153	static struct dm_arg _args[] = {
154		{0, UINT_MAX, "Invalid up interval"},
155		{0, UINT_MAX, "Invalid down interval"},
156	};
157
158	int r;
159	struct flakey_c *fc;
160	unsigned long long tmpll;
161	struct dm_arg_set as;
162	const char *devname;
163	char dummy;
164
165	as.argc = argc;
166	as.argv = argv;
167
168	if (argc < 4) {
169		ti->error = "Invalid argument count";
170		return -EINVAL;
171	}
172
173	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
174	if (!fc) {
175		ti->error = "Cannot allocate linear context";
176		return -ENOMEM;
177	}
178	fc->start_time = jiffies;
179
180	devname = dm_shift_arg(&as);
181
 
182	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
183		ti->error = "Invalid device sector";
184		goto bad;
185	}
186	fc->start = tmpll;
187
188	r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
189	if (r)
190		goto bad;
191
192	r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
193	if (r)
194		goto bad;
195
196	if (!(fc->up_interval + fc->down_interval)) {
197		ti->error = "Total (up + down) interval is zero";
 
198		goto bad;
199	}
200
201	if (fc->up_interval + fc->down_interval < fc->up_interval) {
202		ti->error = "Interval overflow";
 
203		goto bad;
204	}
205
206	r = parse_features(&as, fc, ti);
207	if (r)
208		goto bad;
209
210	if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) {
 
211		ti->error = "Device lookup failed";
212		goto bad;
213	}
214
215	ti->num_flush_requests = 1;
216	ti->num_discard_requests = 1;
 
217	ti->private = fc;
218	return 0;
219
220bad:
221	kfree(fc);
222	return -EINVAL;
223}
224
225static void flakey_dtr(struct dm_target *ti)
226{
227	struct flakey_c *fc = ti->private;
228
229	dm_put_device(ti, fc->dev);
230	kfree(fc);
231}
232
233static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
234{
235	struct flakey_c *fc = ti->private;
236
237	return fc->start + dm_target_offset(ti, bi_sector);
238}
239
240static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
241{
242	struct flakey_c *fc = ti->private;
243
244	bio->bi_bdev = fc->dev->bdev;
245	if (bio_sectors(bio))
246		bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
 
247}
248
249static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
250{
251	unsigned bio_bytes = bio_cur_bytes(bio);
252	char *data = bio_data(bio);
253
254	/*
255	 * Overwrite the Nth byte of the data returned.
256	 */
257	if (data && bio_bytes >= fc->corrupt_bio_byte) {
258		data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
259
260		DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
261			"(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
262			bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
263			(bio_data_dir(bio) == WRITE) ? 'w' : 'r',
264			bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes);
265	}
266}
267
268static int flakey_map(struct dm_target *ti, struct bio *bio,
269		      union map_info *map_context)
270{
271	struct flakey_c *fc = ti->private;
272	unsigned elapsed;
 
 
 
 
 
 
 
 
 
 
273
274	/* Are we alive ? */
275	elapsed = (jiffies - fc->start_time) / HZ;
276	if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
277		/*
278		 * Flag this bio as submitted while down.
279		 */
280		map_context->ll = 1;
281
282		/*
283		 * Map reads as normal.
 
284		 */
285		if (bio_data_dir(bio) == READ)
 
 
 
286			goto map_bio;
 
287
288		/*
289		 * Drop writes?
290		 */
291		if (test_bit(DROP_WRITES, &fc->flags)) {
292			bio_endio(bio, 0);
 
 
 
 
293			return DM_MAPIO_SUBMITTED;
294		}
295
296		/*
297		 * Corrupt matching writes.
298		 */
299		if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
300			if (all_corrupt_bio_flags_match(bio, fc))
301				corrupt_bio_data(bio, fc);
302			goto map_bio;
303		}
304
305		/*
306		 * By default, error all I/O.
307		 */
308		return -EIO;
309	}
310
311map_bio:
312	flakey_map_bio(ti, bio);
313
314	return DM_MAPIO_REMAPPED;
315}
316
317static int flakey_end_io(struct dm_target *ti, struct bio *bio,
318			 int error, union map_info *map_context)
319{
320	struct flakey_c *fc = ti->private;
321	unsigned bio_submitted_while_down = map_context->ll;
322
323	/*
324	 * Corrupt successful READs while in down state.
325	 * If flags were specified, only corrupt those that match.
326	 */
327	if (fc->corrupt_bio_byte && !error && bio_submitted_while_down &&
328	    (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
329	    all_corrupt_bio_flags_match(bio, fc))
330		corrupt_bio_data(bio, fc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331
332	return error;
333}
334
335static int flakey_status(struct dm_target *ti, status_type_t type,
336			 char *result, unsigned int maxlen)
337{
338	unsigned sz = 0;
339	struct flakey_c *fc = ti->private;
340	unsigned drop_writes;
341
342	switch (type) {
343	case STATUSTYPE_INFO:
344		result[0] = '\0';
345		break;
346
347	case STATUSTYPE_TABLE:
348		DMEMIT("%s %llu %u %u ", fc->dev->name,
349		       (unsigned long long)fc->start, fc->up_interval,
350		       fc->down_interval);
351
352		drop_writes = test_bit(DROP_WRITES, &fc->flags);
353		DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5);
 
354
355		if (drop_writes)
356			DMEMIT("drop_writes ");
 
 
357
358		if (fc->corrupt_bio_byte)
359			DMEMIT("corrupt_bio_byte %u %c %u %u ",
360			       fc->corrupt_bio_byte,
361			       (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
362			       fc->corrupt_bio_value, fc->corrupt_bio_flags);
363
364		break;
365	}
366	return 0;
367}
368
369static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
370{
371	struct flakey_c *fc = ti->private;
372	struct dm_dev *dev = fc->dev;
373	int r = 0;
374
375	/*
376	 * Only pass ioctls through if the device sizes match exactly.
377	 */
378	if (fc->start ||
379	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
380		r = scsi_verify_blk_ioctl(NULL, cmd);
381
382	return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
383}
384
385static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
386			struct bio_vec *biovec, int max_size)
387{
388	struct flakey_c *fc = ti->private;
389	struct request_queue *q = bdev_get_queue(fc->dev->bdev);
390
391	if (!q->merge_bvec_fn)
392		return max_size;
393
394	bvm->bi_bdev = fc->dev->bdev;
395	bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector);
396
397	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
398}
399
400static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
401{
402	struct flakey_c *fc = ti->private;
403
404	return fn(ti, fc->dev, fc->start, ti->len, data);
405}
406
407static struct target_type flakey_target = {
408	.name   = "flakey",
409	.version = {1, 2, 0},
 
410	.module = THIS_MODULE,
411	.ctr    = flakey_ctr,
412	.dtr    = flakey_dtr,
413	.map    = flakey_map,
414	.end_io = flakey_end_io,
415	.status = flakey_status,
416	.ioctl	= flakey_ioctl,
417	.merge	= flakey_merge,
418	.iterate_devices = flakey_iterate_devices,
419};
420
421static int __init dm_flakey_init(void)
422{
423	int r = dm_register_target(&flakey_target);
424
425	if (r < 0)
426		DMERR("register failed %d", r);
427
428	return r;
429}
430
431static void __exit dm_flakey_exit(void)
432{
433	dm_unregister_target(&flakey_target);
434}
435
436/* Module hooks */
437module_init(dm_flakey_init);
438module_exit(dm_flakey_exit);
439
440MODULE_DESCRIPTION(DM_NAME " flakey target");
441MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
442MODULE_LICENSE("GPL");
v4.17
  1/*
  2 * Copyright (C) 2003 Sistina Software (UK) Limited.
  3 * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include <linux/device-mapper.h>
  9
 10#include <linux/module.h>
 11#include <linux/init.h>
 12#include <linux/blkdev.h>
 13#include <linux/bio.h>
 14#include <linux/slab.h>
 15
 16#define DM_MSG_PREFIX "flakey"
 17
 18#define all_corrupt_bio_flags_match(bio, fc)	\
 19	(((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
 20
 21/*
 22 * Flakey: Used for testing only, simulates intermittent,
 23 * catastrophic device failure.
 24 */
 25struct flakey_c {
 26	struct dm_dev *dev;
 27	unsigned long start_time;
 28	sector_t start;
 29	unsigned up_interval;
 30	unsigned down_interval;
 31	unsigned long flags;
 32	unsigned corrupt_bio_byte;
 33	unsigned corrupt_bio_rw;
 34	unsigned corrupt_bio_value;
 35	unsigned corrupt_bio_flags;
 36};
 37
 38enum feature_flag_bits {
 39	DROP_WRITES,
 40	ERROR_WRITES
 41};
 42
 43struct per_bio_data {
 44	bool bio_submitted;
 45};
 46
 47static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
 48			  struct dm_target *ti)
 49{
 50	int r;
 51	unsigned argc;
 52	const char *arg_name;
 53
 54	static const struct dm_arg _args[] = {
 55		{0, 6, "Invalid number of feature args"},
 56		{1, UINT_MAX, "Invalid corrupt bio byte"},
 57		{0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
 58		{0, UINT_MAX, "Invalid corrupt bio flags mask"},
 59	};
 60
 61	/* No feature arguments supplied. */
 62	if (!as->argc)
 63		return 0;
 64
 65	r = dm_read_arg_group(_args, as, &argc, &ti->error);
 66	if (r)
 67		return r;
 68
 69	while (argc) {
 70		arg_name = dm_shift_arg(as);
 71		argc--;
 72
 73		if (!arg_name) {
 74			ti->error = "Insufficient feature arguments";
 75			return -EINVAL;
 76		}
 77
 78		/*
 79		 * drop_writes
 80		 */
 81		if (!strcasecmp(arg_name, "drop_writes")) {
 82			if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
 83				ti->error = "Feature drop_writes duplicated";
 84				return -EINVAL;
 85			} else if (test_bit(ERROR_WRITES, &fc->flags)) {
 86				ti->error = "Feature drop_writes conflicts with feature error_writes";
 87				return -EINVAL;
 88			}
 89
 90			continue;
 91		}
 92
 93		/*
 94		 * error_writes
 95		 */
 96		if (!strcasecmp(arg_name, "error_writes")) {
 97			if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
 98				ti->error = "Feature error_writes duplicated";
 99				return -EINVAL;
100
101			} else if (test_bit(DROP_WRITES, &fc->flags)) {
102				ti->error = "Feature error_writes conflicts with feature drop_writes";
103				return -EINVAL;
104			}
105
106			continue;
107		}
108
109		/*
110		 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
111		 */
112		if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
113			if (!argc) {
114				ti->error = "Feature corrupt_bio_byte requires parameters";
115				return -EINVAL;
116			}
117
118			r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
119			if (r)
120				return r;
121			argc--;
122
123			/*
124			 * Direction r or w?
125			 */
126			arg_name = dm_shift_arg(as);
127			if (!strcasecmp(arg_name, "w"))
128				fc->corrupt_bio_rw = WRITE;
129			else if (!strcasecmp(arg_name, "r"))
130				fc->corrupt_bio_rw = READ;
131			else {
132				ti->error = "Invalid corrupt bio direction (r or w)";
133				return -EINVAL;
134			}
135			argc--;
136
137			/*
138			 * Value of byte (0-255) to write in place of correct one.
139			 */
140			r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
141			if (r)
142				return r;
143			argc--;
144
145			/*
146			 * Only corrupt bios with these flags set.
147			 */
148			r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error);
149			if (r)
150				return r;
151			argc--;
152
153			continue;
154		}
155
156		ti->error = "Unrecognised flakey feature requested";
157		return -EINVAL;
158	}
159
160	if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
161		ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
162		return -EINVAL;
163
164	} else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
165		ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
166		return -EINVAL;
167	}
168
169	return 0;
170}
171
172/*
173 * Construct a flakey mapping:
174 * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
175 *
176 *   Feature args:
177 *     [drop_writes]
178 *     [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
179 *
180 *   Nth_byte starts from 1 for the first byte.
181 *   Direction is r for READ or w for WRITE.
182 *   bio_flags is ignored if 0.
183 */
184static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
185{
186	static const struct dm_arg _args[] = {
187		{0, UINT_MAX, "Invalid up interval"},
188		{0, UINT_MAX, "Invalid down interval"},
189	};
190
191	int r;
192	struct flakey_c *fc;
193	unsigned long long tmpll;
194	struct dm_arg_set as;
195	const char *devname;
196	char dummy;
197
198	as.argc = argc;
199	as.argv = argv;
200
201	if (argc < 4) {
202		ti->error = "Invalid argument count";
203		return -EINVAL;
204	}
205
206	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
207	if (!fc) {
208		ti->error = "Cannot allocate context";
209		return -ENOMEM;
210	}
211	fc->start_time = jiffies;
212
213	devname = dm_shift_arg(&as);
214
215	r = -EINVAL;
216	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
217		ti->error = "Invalid device sector";
218		goto bad;
219	}
220	fc->start = tmpll;
221
222	r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
223	if (r)
224		goto bad;
225
226	r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
227	if (r)
228		goto bad;
229
230	if (!(fc->up_interval + fc->down_interval)) {
231		ti->error = "Total (up + down) interval is zero";
232		r = -EINVAL;
233		goto bad;
234	}
235
236	if (fc->up_interval + fc->down_interval < fc->up_interval) {
237		ti->error = "Interval overflow";
238		r = -EINVAL;
239		goto bad;
240	}
241
242	r = parse_features(&as, fc, ti);
243	if (r)
244		goto bad;
245
246	r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
247	if (r) {
248		ti->error = "Device lookup failed";
249		goto bad;
250	}
251
252	ti->num_flush_bios = 1;
253	ti->num_discard_bios = 1;
254	ti->per_io_data_size = sizeof(struct per_bio_data);
255	ti->private = fc;
256	return 0;
257
258bad:
259	kfree(fc);
260	return r;
261}
262
263static void flakey_dtr(struct dm_target *ti)
264{
265	struct flakey_c *fc = ti->private;
266
267	dm_put_device(ti, fc->dev);
268	kfree(fc);
269}
270
271static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
272{
273	struct flakey_c *fc = ti->private;
274
275	return fc->start + dm_target_offset(ti, bi_sector);
276}
277
278static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
279{
280	struct flakey_c *fc = ti->private;
281
282	bio_set_dev(bio, fc->dev->bdev);
283	if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
284		bio->bi_iter.bi_sector =
285			flakey_map_sector(ti, bio->bi_iter.bi_sector);
286}
287
288static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
289{
290	unsigned bio_bytes = bio_cur_bytes(bio);
291	char *data = bio_data(bio);
292
293	/*
294	 * Overwrite the Nth byte of the data returned.
295	 */
296	if (data && bio_bytes >= fc->corrupt_bio_byte) {
297		data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
298
299		DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
300			"(rw=%c bi_opf=%u bi_sector=%llu cur_bytes=%u)\n",
301			bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
302			(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
303			(unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
304	}
305}
306
307static int flakey_map(struct dm_target *ti, struct bio *bio)
 
308{
309	struct flakey_c *fc = ti->private;
310	unsigned elapsed;
311	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
312	pb->bio_submitted = false;
313
314	/* Do not fail reset zone */
315	if (bio_op(bio) == REQ_OP_ZONE_RESET)
316		goto map_bio;
317
318	/* We need to remap reported zones, so remember the BIO iter */
319	if (bio_op(bio) == REQ_OP_ZONE_REPORT)
320		goto map_bio;
321
322	/* Are we alive ? */
323	elapsed = (jiffies - fc->start_time) / HZ;
324	if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
325		/*
326		 * Flag this bio as submitted while down.
327		 */
328		pb->bio_submitted = true;
329
330		/*
331		 * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
332		 * Otherwise, flakey_end_io() will decide if the reads should be modified.
333		 */
334		if (bio_data_dir(bio) == READ) {
335			if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) &&
336			    !test_bit(ERROR_WRITES, &fc->flags))
337				return DM_MAPIO_KILL;
338			goto map_bio;
339		}
340
341		/*
342		 * Drop or error writes?
343		 */
344		if (test_bit(DROP_WRITES, &fc->flags)) {
345			bio_endio(bio);
346			return DM_MAPIO_SUBMITTED;
347		}
348		else if (test_bit(ERROR_WRITES, &fc->flags)) {
349			bio_io_error(bio);
350			return DM_MAPIO_SUBMITTED;
351		}
352
353		/*
354		 * Corrupt matching writes.
355		 */
356		if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
357			if (all_corrupt_bio_flags_match(bio, fc))
358				corrupt_bio_data(bio, fc);
359			goto map_bio;
360		}
361
362		/*
363		 * By default, error all I/O.
364		 */
365		return DM_MAPIO_KILL;
366	}
367
368map_bio:
369	flakey_map_bio(ti, bio);
370
371	return DM_MAPIO_REMAPPED;
372}
373
374static int flakey_end_io(struct dm_target *ti, struct bio *bio,
375			 blk_status_t *error)
376{
377	struct flakey_c *fc = ti->private;
378	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
379
380	if (bio_op(bio) == REQ_OP_ZONE_RESET)
381		return DM_ENDIO_DONE;
382
383	if (bio_op(bio) == REQ_OP_ZONE_REPORT) {
384		dm_remap_zone_report(ti, bio, fc->start);
385		return DM_ENDIO_DONE;
386	}
387
388	if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
389		if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
390		    all_corrupt_bio_flags_match(bio, fc)) {
391			/*
392			 * Corrupt successful matching READs while in down state.
393			 */
394			corrupt_bio_data(bio, fc);
395
396		} else if (!test_bit(DROP_WRITES, &fc->flags) &&
397			   !test_bit(ERROR_WRITES, &fc->flags)) {
398			/*
399			 * Error read during the down_interval if drop_writes
400			 * and error_writes were not configured.
401			 */
402			*error = BLK_STS_IOERR;
403		}
404	}
405
406	return DM_ENDIO_DONE;
407}
408
409static void flakey_status(struct dm_target *ti, status_type_t type,
410			  unsigned status_flags, char *result, unsigned maxlen)
411{
412	unsigned sz = 0;
413	struct flakey_c *fc = ti->private;
414	unsigned drop_writes, error_writes;
415
416	switch (type) {
417	case STATUSTYPE_INFO:
418		result[0] = '\0';
419		break;
420
421	case STATUSTYPE_TABLE:
422		DMEMIT("%s %llu %u %u ", fc->dev->name,
423		       (unsigned long long)fc->start, fc->up_interval,
424		       fc->down_interval);
425
426		drop_writes = test_bit(DROP_WRITES, &fc->flags);
427		error_writes = test_bit(ERROR_WRITES, &fc->flags);
428		DMEMIT("%u ", drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5);
429
430		if (drop_writes)
431			DMEMIT("drop_writes ");
432		else if (error_writes)
433			DMEMIT("error_writes ");
434
435		if (fc->corrupt_bio_byte)
436			DMEMIT("corrupt_bio_byte %u %c %u %u ",
437			       fc->corrupt_bio_byte,
438			       (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
439			       fc->corrupt_bio_value, fc->corrupt_bio_flags);
440
441		break;
442	}
 
443}
444
445static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
446{
447	struct flakey_c *fc = ti->private;
448
449	*bdev = fc->dev->bdev;
450
451	/*
452	 * Only pass ioctls through if the device sizes match exactly.
453	 */
454	if (fc->start ||
455	    ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
456		return 1;
457	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458}
459
460static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
461{
462	struct flakey_c *fc = ti->private;
463
464	return fn(ti, fc->dev, fc->start, ti->len, data);
465}
466
467static struct target_type flakey_target = {
468	.name   = "flakey",
469	.version = {1, 5, 0},
470	.features = DM_TARGET_ZONED_HM,
471	.module = THIS_MODULE,
472	.ctr    = flakey_ctr,
473	.dtr    = flakey_dtr,
474	.map    = flakey_map,
475	.end_io = flakey_end_io,
476	.status = flakey_status,
477	.prepare_ioctl = flakey_prepare_ioctl,
 
478	.iterate_devices = flakey_iterate_devices,
479};
480
481static int __init dm_flakey_init(void)
482{
483	int r = dm_register_target(&flakey_target);
484
485	if (r < 0)
486		DMERR("register failed %d", r);
487
488	return r;
489}
490
491static void __exit dm_flakey_exit(void)
492{
493	dm_unregister_target(&flakey_target);
494}
495
496/* Module hooks */
497module_init(dm_flakey_init);
498module_exit(dm_flakey_exit);
499
500MODULE_DESCRIPTION(DM_NAME " flakey target");
501MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
502MODULE_LICENSE("GPL");