Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright (C) 2005-2007 Red Hat GmbH
  3 *
  4 * A target that delays reads and/or writes and can send
  5 * them to different devices.
  6 *
  7 * This file is released under the GPL.
  8 */
  9
 10#include <linux/module.h>
 11#include <linux/init.h>
 12#include <linux/blkdev.h>
 13#include <linux/bio.h>
 14#include <linux/slab.h>
 15
 16#include <linux/device-mapper.h>
 17
 18#define DM_MSG_PREFIX "delay"
 19
 20struct delay_c {
 21	struct timer_list delay_timer;
 22	struct mutex timer_lock;
 23	struct workqueue_struct *kdelayd_wq;
 24	struct work_struct flush_expired_bios;
 25	struct list_head delayed_bios;
 26	atomic_t may_delay;
 27
 28	struct dm_dev *dev_read;
 29	sector_t start_read;
 30	unsigned read_delay;
 31	unsigned reads;
 32
 33	struct dm_dev *dev_write;
 34	sector_t start_write;
 35	unsigned write_delay;
 36	unsigned writes;
 37};
 38
 39struct dm_delay_info {
 40	struct delay_c *context;
 41	struct list_head list;
 42	unsigned long expires;
 43};
 44
 45static DEFINE_MUTEX(delayed_bios_lock);
 46
 47static void handle_delayed_timer(unsigned long data)
 48{
 49	struct delay_c *dc = (struct delay_c *)data;
 50
 51	queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
 52}
 53
 54static void queue_timeout(struct delay_c *dc, unsigned long expires)
 55{
 56	mutex_lock(&dc->timer_lock);
 57
 58	if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
 59		mod_timer(&dc->delay_timer, expires);
 60
 61	mutex_unlock(&dc->timer_lock);
 62}
 63
 64static void flush_bios(struct bio *bio)
 65{
 66	struct bio *n;
 67
 68	while (bio) {
 69		n = bio->bi_next;
 70		bio->bi_next = NULL;
 71		generic_make_request(bio);
 72		bio = n;
 73	}
 74}
 75
 76static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
 77{
 78	struct dm_delay_info *delayed, *next;
 79	unsigned long next_expires = 0;
 80	int start_timer = 0;
 81	struct bio_list flush_bios = { };
 82
 83	mutex_lock(&delayed_bios_lock);
 84	list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
 85		if (flush_all || time_after_eq(jiffies, delayed->expires)) {
 86			struct bio *bio = dm_bio_from_per_bio_data(delayed,
 87						sizeof(struct dm_delay_info));
 88			list_del(&delayed->list);
 89			bio_list_add(&flush_bios, bio);
 90			if ((bio_data_dir(bio) == WRITE))
 91				delayed->context->writes--;
 92			else
 93				delayed->context->reads--;
 94			continue;
 95		}
 96
 97		if (!start_timer) {
 98			start_timer = 1;
 99			next_expires = delayed->expires;
100		} else
101			next_expires = min(next_expires, delayed->expires);
102	}
103
104	mutex_unlock(&delayed_bios_lock);
105
106	if (start_timer)
107		queue_timeout(dc, next_expires);
108
109	return bio_list_get(&flush_bios);
110}
111
112static void flush_expired_bios(struct work_struct *work)
113{
114	struct delay_c *dc;
115
116	dc = container_of(work, struct delay_c, flush_expired_bios);
117	flush_bios(flush_delayed_bios(dc, 0));
118}
119
120/*
121 * Mapping parameters:
122 *    <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
123 *
124 * With separate write parameters, the first set is only used for reads.
125 * Offsets are specified in sectors.
126 * Delays are specified in milliseconds.
127 */
128static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
129{
130	struct delay_c *dc;
131	unsigned long long tmpll;
132	char dummy;
133	int ret;
134
135	if (argc != 3 && argc != 6) {
136		ti->error = "Requires exactly 3 or 6 arguments";
137		return -EINVAL;
138	}
139
140	dc = kmalloc(sizeof(*dc), GFP_KERNEL);
141	if (!dc) {
142		ti->error = "Cannot allocate context";
143		return -ENOMEM;
144	}
145
146	dc->reads = dc->writes = 0;
147
148	ret = -EINVAL;
149	if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
150		ti->error = "Invalid device sector";
151		goto bad;
152	}
153	dc->start_read = tmpll;
154
155	if (sscanf(argv[2], "%u%c", &dc->read_delay, &dummy) != 1) {
156		ti->error = "Invalid delay";
157		goto bad;
158	}
159
160	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
161			    &dc->dev_read);
162	if (ret) {
163		ti->error = "Device lookup failed";
164		goto bad;
165	}
166
167	ret = -EINVAL;
168	dc->dev_write = NULL;
169	if (argc == 3)
170		goto out;
171
172	if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
173		ti->error = "Invalid write device sector";
174		goto bad_dev_read;
175	}
176	dc->start_write = tmpll;
177
178	if (sscanf(argv[5], "%u%c", &dc->write_delay, &dummy) != 1) {
179		ti->error = "Invalid write delay";
180		goto bad_dev_read;
181	}
182
183	ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
184			    &dc->dev_write);
185	if (ret) {
186		ti->error = "Write device lookup failed";
187		goto bad_dev_read;
188	}
189
190out:
191	ret = -EINVAL;
192	dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
193	if (!dc->kdelayd_wq) {
194		DMERR("Couldn't start kdelayd");
195		goto bad_queue;
196	}
197
198	setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
199
200	INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
201	INIT_LIST_HEAD(&dc->delayed_bios);
202	mutex_init(&dc->timer_lock);
203	atomic_set(&dc->may_delay, 1);
204
205	ti->num_flush_bios = 1;
206	ti->num_discard_bios = 1;
207	ti->per_io_data_size = sizeof(struct dm_delay_info);
208	ti->private = dc;
209	return 0;
210
211bad_queue:
212	if (dc->dev_write)
213		dm_put_device(ti, dc->dev_write);
214bad_dev_read:
215	dm_put_device(ti, dc->dev_read);
216bad:
217	kfree(dc);
218	return ret;
219}
220
221static void delay_dtr(struct dm_target *ti)
222{
223	struct delay_c *dc = ti->private;
224
225	destroy_workqueue(dc->kdelayd_wq);
226
227	dm_put_device(ti, dc->dev_read);
228
229	if (dc->dev_write)
230		dm_put_device(ti, dc->dev_write);
231
232	kfree(dc);
233}
234
235static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
236{
237	struct dm_delay_info *delayed;
238	unsigned long expires = 0;
239
240	if (!delay || !atomic_read(&dc->may_delay))
241		return DM_MAPIO_REMAPPED;
242
243	delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
244
245	delayed->context = dc;
246	delayed->expires = expires = jiffies + msecs_to_jiffies(delay);
247
248	mutex_lock(&delayed_bios_lock);
249
250	if (bio_data_dir(bio) == WRITE)
251		dc->writes++;
252	else
253		dc->reads++;
254
255	list_add_tail(&delayed->list, &dc->delayed_bios);
256
257	mutex_unlock(&delayed_bios_lock);
258
259	queue_timeout(dc, expires);
260
261	return DM_MAPIO_SUBMITTED;
262}
263
264static void delay_presuspend(struct dm_target *ti)
265{
266	struct delay_c *dc = ti->private;
267
268	atomic_set(&dc->may_delay, 0);
269	del_timer_sync(&dc->delay_timer);
270	flush_bios(flush_delayed_bios(dc, 1));
271}
272
273static void delay_resume(struct dm_target *ti)
274{
275	struct delay_c *dc = ti->private;
276
277	atomic_set(&dc->may_delay, 1);
278}
279
280static int delay_map(struct dm_target *ti, struct bio *bio)
281{
282	struct delay_c *dc = ti->private;
283
284	if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
285		bio->bi_bdev = dc->dev_write->bdev;
286		if (bio_sectors(bio))
287			bio->bi_iter.bi_sector = dc->start_write +
288				dm_target_offset(ti, bio->bi_iter.bi_sector);
289
290		return delay_bio(dc, dc->write_delay, bio);
291	}
292
293	bio->bi_bdev = dc->dev_read->bdev;
294	bio->bi_iter.bi_sector = dc->start_read +
295		dm_target_offset(ti, bio->bi_iter.bi_sector);
296
297	return delay_bio(dc, dc->read_delay, bio);
298}
299
300static void delay_status(struct dm_target *ti, status_type_t type,
301			 unsigned status_flags, char *result, unsigned maxlen)
302{
303	struct delay_c *dc = ti->private;
304	int sz = 0;
305
306	switch (type) {
307	case STATUSTYPE_INFO:
308		DMEMIT("%u %u", dc->reads, dc->writes);
309		break;
310
311	case STATUSTYPE_TABLE:
312		DMEMIT("%s %llu %u", dc->dev_read->name,
313		       (unsigned long long) dc->start_read,
314		       dc->read_delay);
315		if (dc->dev_write)
316			DMEMIT(" %s %llu %u", dc->dev_write->name,
317			       (unsigned long long) dc->start_write,
318			       dc->write_delay);
319		break;
320	}
321}
322
323static int delay_iterate_devices(struct dm_target *ti,
324				 iterate_devices_callout_fn fn, void *data)
325{
326	struct delay_c *dc = ti->private;
327	int ret = 0;
328
329	ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data);
330	if (ret)
331		goto out;
332
333	if (dc->dev_write)
334		ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data);
335
336out:
337	return ret;
338}
339
340static struct target_type delay_target = {
341	.name	     = "delay",
342	.version     = {1, 2, 1},
343	.module      = THIS_MODULE,
344	.ctr	     = delay_ctr,
345	.dtr	     = delay_dtr,
346	.map	     = delay_map,
347	.presuspend  = delay_presuspend,
348	.resume	     = delay_resume,
349	.status	     = delay_status,
350	.iterate_devices = delay_iterate_devices,
351};
352
353static int __init dm_delay_init(void)
354{
355	int r;
356
357	r = dm_register_target(&delay_target);
358	if (r < 0) {
359		DMERR("register failed %d", r);
360		goto bad_register;
361	}
362
363	return 0;
364
365bad_register:
366	return r;
367}
368
369static void __exit dm_delay_exit(void)
370{
371	dm_unregister_target(&delay_target);
372}
373
374/* Module hooks */
375module_init(dm_delay_init);
376module_exit(dm_delay_exit);
377
378MODULE_DESCRIPTION(DM_NAME " delay target");
379MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
380MODULE_LICENSE("GPL");
v3.15
  1/*
  2 * Copyright (C) 2005-2007 Red Hat GmbH
  3 *
  4 * A target that delays reads and/or writes and can send
  5 * them to different devices.
  6 *
  7 * This file is released under the GPL.
  8 */
  9
 10#include <linux/module.h>
 11#include <linux/init.h>
 12#include <linux/blkdev.h>
 13#include <linux/bio.h>
 14#include <linux/slab.h>
 15
 16#include <linux/device-mapper.h>
 17
 18#define DM_MSG_PREFIX "delay"
 19
 20struct delay_c {
 21	struct timer_list delay_timer;
 22	struct mutex timer_lock;
 23	struct workqueue_struct *kdelayd_wq;
 24	struct work_struct flush_expired_bios;
 25	struct list_head delayed_bios;
 26	atomic_t may_delay;
 27
 28	struct dm_dev *dev_read;
 29	sector_t start_read;
 30	unsigned read_delay;
 31	unsigned reads;
 32
 33	struct dm_dev *dev_write;
 34	sector_t start_write;
 35	unsigned write_delay;
 36	unsigned writes;
 37};
 38
 39struct dm_delay_info {
 40	struct delay_c *context;
 41	struct list_head list;
 42	unsigned long expires;
 43};
 44
 45static DEFINE_MUTEX(delayed_bios_lock);
 46
 47static void handle_delayed_timer(unsigned long data)
 48{
 49	struct delay_c *dc = (struct delay_c *)data;
 50
 51	queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
 52}
 53
 54static void queue_timeout(struct delay_c *dc, unsigned long expires)
 55{
 56	mutex_lock(&dc->timer_lock);
 57
 58	if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
 59		mod_timer(&dc->delay_timer, expires);
 60
 61	mutex_unlock(&dc->timer_lock);
 62}
 63
 64static void flush_bios(struct bio *bio)
 65{
 66	struct bio *n;
 67
 68	while (bio) {
 69		n = bio->bi_next;
 70		bio->bi_next = NULL;
 71		generic_make_request(bio);
 72		bio = n;
 73	}
 74}
 75
 76static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
 77{
 78	struct dm_delay_info *delayed, *next;
 79	unsigned long next_expires = 0;
 80	int start_timer = 0;
 81	struct bio_list flush_bios = { };
 82
 83	mutex_lock(&delayed_bios_lock);
 84	list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
 85		if (flush_all || time_after_eq(jiffies, delayed->expires)) {
 86			struct bio *bio = dm_bio_from_per_bio_data(delayed,
 87						sizeof(struct dm_delay_info));
 88			list_del(&delayed->list);
 89			bio_list_add(&flush_bios, bio);
 90			if ((bio_data_dir(bio) == WRITE))
 91				delayed->context->writes--;
 92			else
 93				delayed->context->reads--;
 94			continue;
 95		}
 96
 97		if (!start_timer) {
 98			start_timer = 1;
 99			next_expires = delayed->expires;
100		} else
101			next_expires = min(next_expires, delayed->expires);
102	}
103
104	mutex_unlock(&delayed_bios_lock);
105
106	if (start_timer)
107		queue_timeout(dc, next_expires);
108
109	return bio_list_get(&flush_bios);
110}
111
112static void flush_expired_bios(struct work_struct *work)
113{
114	struct delay_c *dc;
115
116	dc = container_of(work, struct delay_c, flush_expired_bios);
117	flush_bios(flush_delayed_bios(dc, 0));
118}
119
120/*
121 * Mapping parameters:
122 *    <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
123 *
124 * With separate write parameters, the first set is only used for reads.
 
125 * Delays are specified in milliseconds.
126 */
127static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
128{
129	struct delay_c *dc;
130	unsigned long long tmpll;
131	char dummy;
 
132
133	if (argc != 3 && argc != 6) {
134		ti->error = "requires exactly 3 or 6 arguments";
135		return -EINVAL;
136	}
137
138	dc = kmalloc(sizeof(*dc), GFP_KERNEL);
139	if (!dc) {
140		ti->error = "Cannot allocate context";
141		return -ENOMEM;
142	}
143
144	dc->reads = dc->writes = 0;
145
 
146	if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
147		ti->error = "Invalid device sector";
148		goto bad;
149	}
150	dc->start_read = tmpll;
151
152	if (sscanf(argv[2], "%u%c", &dc->read_delay, &dummy) != 1) {
153		ti->error = "Invalid delay";
154		goto bad;
155	}
156
157	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
158			  &dc->dev_read)) {
 
159		ti->error = "Device lookup failed";
160		goto bad;
161	}
162
 
163	dc->dev_write = NULL;
164	if (argc == 3)
165		goto out;
166
167	if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
168		ti->error = "Invalid write device sector";
169		goto bad_dev_read;
170	}
171	dc->start_write = tmpll;
172
173	if (sscanf(argv[5], "%u%c", &dc->write_delay, &dummy) != 1) {
174		ti->error = "Invalid write delay";
175		goto bad_dev_read;
176	}
177
178	if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
179			  &dc->dev_write)) {
 
180		ti->error = "Write device lookup failed";
181		goto bad_dev_read;
182	}
183
184out:
 
185	dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
186	if (!dc->kdelayd_wq) {
187		DMERR("Couldn't start kdelayd");
188		goto bad_queue;
189	}
190
191	setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
192
193	INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
194	INIT_LIST_HEAD(&dc->delayed_bios);
195	mutex_init(&dc->timer_lock);
196	atomic_set(&dc->may_delay, 1);
197
198	ti->num_flush_bios = 1;
199	ti->num_discard_bios = 1;
200	ti->per_bio_data_size = sizeof(struct dm_delay_info);
201	ti->private = dc;
202	return 0;
203
204bad_queue:
205	if (dc->dev_write)
206		dm_put_device(ti, dc->dev_write);
207bad_dev_read:
208	dm_put_device(ti, dc->dev_read);
209bad:
210	kfree(dc);
211	return -EINVAL;
212}
213
214static void delay_dtr(struct dm_target *ti)
215{
216	struct delay_c *dc = ti->private;
217
218	destroy_workqueue(dc->kdelayd_wq);
219
220	dm_put_device(ti, dc->dev_read);
221
222	if (dc->dev_write)
223		dm_put_device(ti, dc->dev_write);
224
225	kfree(dc);
226}
227
228static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
229{
230	struct dm_delay_info *delayed;
231	unsigned long expires = 0;
232
233	if (!delay || !atomic_read(&dc->may_delay))
234		return 1;
235
236	delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
237
238	delayed->context = dc;
239	delayed->expires = expires = jiffies + (delay * HZ / 1000);
240
241	mutex_lock(&delayed_bios_lock);
242
243	if (bio_data_dir(bio) == WRITE)
244		dc->writes++;
245	else
246		dc->reads++;
247
248	list_add_tail(&delayed->list, &dc->delayed_bios);
249
250	mutex_unlock(&delayed_bios_lock);
251
252	queue_timeout(dc, expires);
253
254	return 0;
255}
256
257static void delay_presuspend(struct dm_target *ti)
258{
259	struct delay_c *dc = ti->private;
260
261	atomic_set(&dc->may_delay, 0);
262	del_timer_sync(&dc->delay_timer);
263	flush_bios(flush_delayed_bios(dc, 1));
264}
265
266static void delay_resume(struct dm_target *ti)
267{
268	struct delay_c *dc = ti->private;
269
270	atomic_set(&dc->may_delay, 1);
271}
272
273static int delay_map(struct dm_target *ti, struct bio *bio)
274{
275	struct delay_c *dc = ti->private;
276
277	if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
278		bio->bi_bdev = dc->dev_write->bdev;
279		if (bio_sectors(bio))
280			bio->bi_iter.bi_sector = dc->start_write +
281				dm_target_offset(ti, bio->bi_iter.bi_sector);
282
283		return delay_bio(dc, dc->write_delay, bio);
284	}
285
286	bio->bi_bdev = dc->dev_read->bdev;
287	bio->bi_iter.bi_sector = dc->start_read +
288		dm_target_offset(ti, bio->bi_iter.bi_sector);
289
290	return delay_bio(dc, dc->read_delay, bio);
291}
292
293static void delay_status(struct dm_target *ti, status_type_t type,
294			 unsigned status_flags, char *result, unsigned maxlen)
295{
296	struct delay_c *dc = ti->private;
297	int sz = 0;
298
299	switch (type) {
300	case STATUSTYPE_INFO:
301		DMEMIT("%u %u", dc->reads, dc->writes);
302		break;
303
304	case STATUSTYPE_TABLE:
305		DMEMIT("%s %llu %u", dc->dev_read->name,
306		       (unsigned long long) dc->start_read,
307		       dc->read_delay);
308		if (dc->dev_write)
309			DMEMIT(" %s %llu %u", dc->dev_write->name,
310			       (unsigned long long) dc->start_write,
311			       dc->write_delay);
312		break;
313	}
314}
315
316static int delay_iterate_devices(struct dm_target *ti,
317				 iterate_devices_callout_fn fn, void *data)
318{
319	struct delay_c *dc = ti->private;
320	int ret = 0;
321
322	ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data);
323	if (ret)
324		goto out;
325
326	if (dc->dev_write)
327		ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data);
328
329out:
330	return ret;
331}
332
333static struct target_type delay_target = {
334	.name	     = "delay",
335	.version     = {1, 2, 1},
336	.module      = THIS_MODULE,
337	.ctr	     = delay_ctr,
338	.dtr	     = delay_dtr,
339	.map	     = delay_map,
340	.presuspend  = delay_presuspend,
341	.resume	     = delay_resume,
342	.status	     = delay_status,
343	.iterate_devices = delay_iterate_devices,
344};
345
346static int __init dm_delay_init(void)
347{
348	int r;
349
350	r = dm_register_target(&delay_target);
351	if (r < 0) {
352		DMERR("register failed %d", r);
353		goto bad_register;
354	}
355
356	return 0;
357
358bad_register:
359	return r;
360}
361
362static void __exit dm_delay_exit(void)
363{
364	dm_unregister_target(&delay_target);
365}
366
367/* Module hooks */
368module_init(dm_delay_init);
369module_exit(dm_delay_exit);
370
371MODULE_DESCRIPTION(DM_NAME " delay target");
372MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
373MODULE_LICENSE("GPL");