Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2005-2007 Red Hat GmbH
4 *
5 * A target that delays reads and/or writes and can send
6 * them to different devices.
7 *
8 * This file is released under the GPL.
9 */
10
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/blkdev.h>
14#include <linux/bio.h>
15#include <linux/slab.h>
16#include <linux/kthread.h>
17
18#include <linux/device-mapper.h>
19
20#define DM_MSG_PREFIX "delay"
21
22struct delay_class {
23 struct dm_dev *dev;
24 sector_t start;
25 unsigned int delay;
26 unsigned int ops;
27};
28
29struct delay_c {
30 struct timer_list delay_timer;
31 struct mutex timer_lock;
32 struct workqueue_struct *kdelayd_wq;
33 struct work_struct flush_expired_bios;
34 struct list_head delayed_bios;
35 struct task_struct *worker;
36 bool may_delay;
37
38 struct delay_class read;
39 struct delay_class write;
40 struct delay_class flush;
41
42 int argc;
43};
44
45struct dm_delay_info {
46 struct delay_c *context;
47 struct delay_class *class;
48 struct list_head list;
49 unsigned long expires;
50};
51
52static DEFINE_MUTEX(delayed_bios_lock);
53
54static void handle_delayed_timer(struct timer_list *t)
55{
56 struct delay_c *dc = from_timer(dc, t, delay_timer);
57
58 queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
59}
60
61static void queue_timeout(struct delay_c *dc, unsigned long expires)
62{
63 mutex_lock(&dc->timer_lock);
64
65 if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
66 mod_timer(&dc->delay_timer, expires);
67
68 mutex_unlock(&dc->timer_lock);
69}
70
71static inline bool delay_is_fast(struct delay_c *dc)
72{
73 return !!dc->worker;
74}
75
76static void flush_bios(struct bio *bio)
77{
78 struct bio *n;
79
80 while (bio) {
81 n = bio->bi_next;
82 bio->bi_next = NULL;
83 dm_submit_bio_remap(bio, NULL);
84 bio = n;
85 }
86}
87
88static void flush_delayed_bios(struct delay_c *dc, bool flush_all)
89{
90 struct dm_delay_info *delayed, *next;
91 struct bio_list flush_bio_list;
92 unsigned long next_expires = 0;
93 bool start_timer = false;
94 bio_list_init(&flush_bio_list);
95
96 mutex_lock(&delayed_bios_lock);
97 list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
98 cond_resched();
99 if (flush_all || time_after_eq(jiffies, delayed->expires)) {
100 struct bio *bio = dm_bio_from_per_bio_data(delayed,
101 sizeof(struct dm_delay_info));
102 list_del(&delayed->list);
103 bio_list_add(&flush_bio_list, bio);
104 delayed->class->ops--;
105 continue;
106 }
107
108 if (!delay_is_fast(dc)) {
109 if (!start_timer) {
110 start_timer = true;
111 next_expires = delayed->expires;
112 } else {
113 next_expires = min(next_expires, delayed->expires);
114 }
115 }
116 }
117 mutex_unlock(&delayed_bios_lock);
118
119 if (start_timer)
120 queue_timeout(dc, next_expires);
121
122 flush_bios(bio_list_get(&flush_bio_list));
123}
124
125static int flush_worker_fn(void *data)
126{
127 struct delay_c *dc = data;
128
129 while (!kthread_should_stop()) {
130 flush_delayed_bios(dc, false);
131 mutex_lock(&delayed_bios_lock);
132 if (unlikely(list_empty(&dc->delayed_bios))) {
133 set_current_state(TASK_INTERRUPTIBLE);
134 mutex_unlock(&delayed_bios_lock);
135 schedule();
136 } else {
137 mutex_unlock(&delayed_bios_lock);
138 cond_resched();
139 }
140 }
141
142 return 0;
143}
144
145static void flush_expired_bios(struct work_struct *work)
146{
147 struct delay_c *dc;
148
149 dc = container_of(work, struct delay_c, flush_expired_bios);
150 flush_delayed_bios(dc, false);
151}
152
153static void delay_dtr(struct dm_target *ti)
154{
155 struct delay_c *dc = ti->private;
156
157 if (dc->kdelayd_wq)
158 destroy_workqueue(dc->kdelayd_wq);
159
160 if (dc->read.dev)
161 dm_put_device(ti, dc->read.dev);
162 if (dc->write.dev)
163 dm_put_device(ti, dc->write.dev);
164 if (dc->flush.dev)
165 dm_put_device(ti, dc->flush.dev);
166 if (dc->worker)
167 kthread_stop(dc->worker);
168
169 mutex_destroy(&dc->timer_lock);
170
171 kfree(dc);
172}
173
174static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **argv)
175{
176 int ret;
177 unsigned long long tmpll;
178 char dummy;
179
180 if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
181 ti->error = "Invalid device sector";
182 return -EINVAL;
183 }
184 c->start = tmpll;
185
186 if (sscanf(argv[2], "%u%c", &c->delay, &dummy) != 1) {
187 ti->error = "Invalid delay";
188 return -EINVAL;
189 }
190
191 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &c->dev);
192 if (ret) {
193 ti->error = "Device lookup failed";
194 return ret;
195 }
196
197 return 0;
198}
199
200/*
201 * Mapping parameters:
202 * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
203 *
204 * With separate write parameters, the first set is only used for reads.
205 * Offsets are specified in sectors.
206 * Delays are specified in milliseconds.
207 */
208static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
209{
210 struct delay_c *dc;
211 int ret;
212 unsigned int max_delay;
213
214 if (argc != 3 && argc != 6 && argc != 9) {
215 ti->error = "Requires exactly 3, 6 or 9 arguments";
216 return -EINVAL;
217 }
218
219 dc = kzalloc(sizeof(*dc), GFP_KERNEL);
220 if (!dc) {
221 ti->error = "Cannot allocate context";
222 return -ENOMEM;
223 }
224
225 ti->private = dc;
226 INIT_LIST_HEAD(&dc->delayed_bios);
227 mutex_init(&dc->timer_lock);
228 dc->may_delay = true;
229 dc->argc = argc;
230
231 ret = delay_class_ctr(ti, &dc->read, argv);
232 if (ret)
233 goto bad;
234 max_delay = dc->read.delay;
235
236 if (argc == 3) {
237 ret = delay_class_ctr(ti, &dc->write, argv);
238 if (ret)
239 goto bad;
240 ret = delay_class_ctr(ti, &dc->flush, argv);
241 if (ret)
242 goto bad;
243 max_delay = max(max_delay, dc->write.delay);
244 max_delay = max(max_delay, dc->flush.delay);
245 goto out;
246 }
247
248 ret = delay_class_ctr(ti, &dc->write, argv + 3);
249 if (ret)
250 goto bad;
251 if (argc == 6) {
252 ret = delay_class_ctr(ti, &dc->flush, argv + 3);
253 if (ret)
254 goto bad;
255 max_delay = max(max_delay, dc->flush.delay);
256 goto out;
257 }
258
259 ret = delay_class_ctr(ti, &dc->flush, argv + 6);
260 if (ret)
261 goto bad;
262 max_delay = max(max_delay, dc->flush.delay);
263
264out:
265 if (max_delay < 50) {
266 /*
267 * In case of small requested delays, use kthread instead of
268 * timers and workqueue to achieve better latency.
269 */
270 dc->worker = kthread_create(&flush_worker_fn, dc,
271 "dm-delay-flush-worker");
272 if (IS_ERR(dc->worker)) {
273 ret = PTR_ERR(dc->worker);
274 dc->worker = NULL;
275 goto bad;
276 }
277 } else {
278 timer_setup(&dc->delay_timer, handle_delayed_timer, 0);
279 INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
280 dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
281 if (!dc->kdelayd_wq) {
282 ret = -EINVAL;
283 DMERR("Couldn't start kdelayd");
284 goto bad;
285 }
286 }
287
288 ti->num_flush_bios = 1;
289 ti->num_discard_bios = 1;
290 ti->accounts_remapped_io = true;
291 ti->per_io_data_size = sizeof(struct dm_delay_info);
292 return 0;
293
294bad:
295 delay_dtr(ti);
296 return ret;
297}
298
299static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
300{
301 struct dm_delay_info *delayed;
302 unsigned long expires = 0;
303
304 if (!c->delay)
305 return DM_MAPIO_REMAPPED;
306
307 delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
308
309 delayed->context = dc;
310 delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
311
312 mutex_lock(&delayed_bios_lock);
313 if (unlikely(!dc->may_delay)) {
314 mutex_unlock(&delayed_bios_lock);
315 return DM_MAPIO_REMAPPED;
316 }
317 c->ops++;
318 list_add_tail(&delayed->list, &dc->delayed_bios);
319 mutex_unlock(&delayed_bios_lock);
320
321 if (delay_is_fast(dc))
322 wake_up_process(dc->worker);
323 else
324 queue_timeout(dc, expires);
325
326 return DM_MAPIO_SUBMITTED;
327}
328
329static void delay_presuspend(struct dm_target *ti)
330{
331 struct delay_c *dc = ti->private;
332
333 mutex_lock(&delayed_bios_lock);
334 dc->may_delay = false;
335 mutex_unlock(&delayed_bios_lock);
336
337 if (!delay_is_fast(dc))
338 del_timer_sync(&dc->delay_timer);
339 flush_delayed_bios(dc, true);
340}
341
342static void delay_resume(struct dm_target *ti)
343{
344 struct delay_c *dc = ti->private;
345
346 dc->may_delay = true;
347}
348
349static int delay_map(struct dm_target *ti, struct bio *bio)
350{
351 struct delay_c *dc = ti->private;
352 struct delay_class *c;
353 struct dm_delay_info *delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
354
355 if (bio_data_dir(bio) == WRITE) {
356 if (unlikely(bio->bi_opf & REQ_PREFLUSH))
357 c = &dc->flush;
358 else
359 c = &dc->write;
360 } else {
361 c = &dc->read;
362 }
363 delayed->class = c;
364 bio_set_dev(bio, c->dev->bdev);
365 bio->bi_iter.bi_sector = c->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
366
367 return delay_bio(dc, c, bio);
368}
369
370#define DMEMIT_DELAY_CLASS(c) \
371 DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
372
373static void delay_status(struct dm_target *ti, status_type_t type,
374 unsigned int status_flags, char *result, unsigned int maxlen)
375{
376 struct delay_c *dc = ti->private;
377 int sz = 0;
378
379 switch (type) {
380 case STATUSTYPE_INFO:
381 DMEMIT("%u %u %u", dc->read.ops, dc->write.ops, dc->flush.ops);
382 break;
383
384 case STATUSTYPE_TABLE:
385 DMEMIT_DELAY_CLASS(&dc->read);
386 if (dc->argc >= 6) {
387 DMEMIT(" ");
388 DMEMIT_DELAY_CLASS(&dc->write);
389 }
390 if (dc->argc >= 9) {
391 DMEMIT(" ");
392 DMEMIT_DELAY_CLASS(&dc->flush);
393 }
394 break;
395
396 case STATUSTYPE_IMA:
397 *result = '\0';
398 break;
399 }
400}
401
402static int delay_iterate_devices(struct dm_target *ti,
403 iterate_devices_callout_fn fn, void *data)
404{
405 struct delay_c *dc = ti->private;
406 int ret = 0;
407
408 ret = fn(ti, dc->read.dev, dc->read.start, ti->len, data);
409 if (ret)
410 goto out;
411 ret = fn(ti, dc->write.dev, dc->write.start, ti->len, data);
412 if (ret)
413 goto out;
414 ret = fn(ti, dc->flush.dev, dc->flush.start, ti->len, data);
415 if (ret)
416 goto out;
417
418out:
419 return ret;
420}
421
422static struct target_type delay_target = {
423 .name = "delay",
424 .version = {1, 4, 0},
425 .features = DM_TARGET_PASSES_INTEGRITY,
426 .module = THIS_MODULE,
427 .ctr = delay_ctr,
428 .dtr = delay_dtr,
429 .map = delay_map,
430 .presuspend = delay_presuspend,
431 .resume = delay_resume,
432 .status = delay_status,
433 .iterate_devices = delay_iterate_devices,
434};
435module_dm(delay);
436
437MODULE_DESCRIPTION(DM_NAME " delay target");
438MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
439MODULE_LICENSE("GPL");
1/*
2 * Copyright (C) 2005-2007 Red Hat GmbH
3 *
4 * A target that delays reads and/or writes and can send
5 * them to different devices.
6 *
7 * This file is released under the GPL.
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/bio.h>
14#include <linux/slab.h>
15
16#include <linux/device-mapper.h>
17
18#define DM_MSG_PREFIX "delay"
19
20struct delay_c {
21 struct timer_list delay_timer;
22 struct mutex timer_lock;
23 struct workqueue_struct *kdelayd_wq;
24 struct work_struct flush_expired_bios;
25 struct list_head delayed_bios;
26 atomic_t may_delay;
27
28 struct dm_dev *dev_read;
29 sector_t start_read;
30 unsigned read_delay;
31 unsigned reads;
32
33 struct dm_dev *dev_write;
34 sector_t start_write;
35 unsigned write_delay;
36 unsigned writes;
37};
38
39struct dm_delay_info {
40 struct delay_c *context;
41 struct list_head list;
42 unsigned long expires;
43};
44
45static DEFINE_MUTEX(delayed_bios_lock);
46
47static void handle_delayed_timer(unsigned long data)
48{
49 struct delay_c *dc = (struct delay_c *)data;
50
51 queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
52}
53
54static void queue_timeout(struct delay_c *dc, unsigned long expires)
55{
56 mutex_lock(&dc->timer_lock);
57
58 if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
59 mod_timer(&dc->delay_timer, expires);
60
61 mutex_unlock(&dc->timer_lock);
62}
63
64static void flush_bios(struct bio *bio)
65{
66 struct bio *n;
67
68 while (bio) {
69 n = bio->bi_next;
70 bio->bi_next = NULL;
71 generic_make_request(bio);
72 bio = n;
73 }
74}
75
76static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
77{
78 struct dm_delay_info *delayed, *next;
79 unsigned long next_expires = 0;
80 int start_timer = 0;
81 struct bio_list flush_bios = { };
82
83 mutex_lock(&delayed_bios_lock);
84 list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
85 if (flush_all || time_after_eq(jiffies, delayed->expires)) {
86 struct bio *bio = dm_bio_from_per_bio_data(delayed,
87 sizeof(struct dm_delay_info));
88 list_del(&delayed->list);
89 bio_list_add(&flush_bios, bio);
90 if ((bio_data_dir(bio) == WRITE))
91 delayed->context->writes--;
92 else
93 delayed->context->reads--;
94 continue;
95 }
96
97 if (!start_timer) {
98 start_timer = 1;
99 next_expires = delayed->expires;
100 } else
101 next_expires = min(next_expires, delayed->expires);
102 }
103
104 mutex_unlock(&delayed_bios_lock);
105
106 if (start_timer)
107 queue_timeout(dc, next_expires);
108
109 return bio_list_get(&flush_bios);
110}
111
112static void flush_expired_bios(struct work_struct *work)
113{
114 struct delay_c *dc;
115
116 dc = container_of(work, struct delay_c, flush_expired_bios);
117 flush_bios(flush_delayed_bios(dc, 0));
118}
119
120/*
121 * Mapping parameters:
122 * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
123 *
124 * With separate write parameters, the first set is only used for reads.
125 * Offsets are specified in sectors.
126 * Delays are specified in milliseconds.
127 */
128static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
129{
130 struct delay_c *dc;
131 unsigned long long tmpll;
132 char dummy;
133 int ret;
134
135 if (argc != 3 && argc != 6) {
136 ti->error = "Requires exactly 3 or 6 arguments";
137 return -EINVAL;
138 }
139
140 dc = kmalloc(sizeof(*dc), GFP_KERNEL);
141 if (!dc) {
142 ti->error = "Cannot allocate context";
143 return -ENOMEM;
144 }
145
146 dc->reads = dc->writes = 0;
147
148 ret = -EINVAL;
149 if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
150 ti->error = "Invalid device sector";
151 goto bad;
152 }
153 dc->start_read = tmpll;
154
155 if (sscanf(argv[2], "%u%c", &dc->read_delay, &dummy) != 1) {
156 ti->error = "Invalid delay";
157 goto bad;
158 }
159
160 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
161 &dc->dev_read);
162 if (ret) {
163 ti->error = "Device lookup failed";
164 goto bad;
165 }
166
167 ret = -EINVAL;
168 dc->dev_write = NULL;
169 if (argc == 3)
170 goto out;
171
172 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
173 ti->error = "Invalid write device sector";
174 goto bad_dev_read;
175 }
176 dc->start_write = tmpll;
177
178 if (sscanf(argv[5], "%u%c", &dc->write_delay, &dummy) != 1) {
179 ti->error = "Invalid write delay";
180 goto bad_dev_read;
181 }
182
183 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
184 &dc->dev_write);
185 if (ret) {
186 ti->error = "Write device lookup failed";
187 goto bad_dev_read;
188 }
189
190out:
191 ret = -EINVAL;
192 dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
193 if (!dc->kdelayd_wq) {
194 DMERR("Couldn't start kdelayd");
195 goto bad_queue;
196 }
197
198 setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
199
200 INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
201 INIT_LIST_HEAD(&dc->delayed_bios);
202 mutex_init(&dc->timer_lock);
203 atomic_set(&dc->may_delay, 1);
204
205 ti->num_flush_bios = 1;
206 ti->num_discard_bios = 1;
207 ti->per_io_data_size = sizeof(struct dm_delay_info);
208 ti->private = dc;
209 return 0;
210
211bad_queue:
212 if (dc->dev_write)
213 dm_put_device(ti, dc->dev_write);
214bad_dev_read:
215 dm_put_device(ti, dc->dev_read);
216bad:
217 kfree(dc);
218 return ret;
219}
220
221static void delay_dtr(struct dm_target *ti)
222{
223 struct delay_c *dc = ti->private;
224
225 destroy_workqueue(dc->kdelayd_wq);
226
227 dm_put_device(ti, dc->dev_read);
228
229 if (dc->dev_write)
230 dm_put_device(ti, dc->dev_write);
231
232 kfree(dc);
233}
234
235static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
236{
237 struct dm_delay_info *delayed;
238 unsigned long expires = 0;
239
240 if (!delay || !atomic_read(&dc->may_delay))
241 return DM_MAPIO_REMAPPED;
242
243 delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
244
245 delayed->context = dc;
246 delayed->expires = expires = jiffies + msecs_to_jiffies(delay);
247
248 mutex_lock(&delayed_bios_lock);
249
250 if (bio_data_dir(bio) == WRITE)
251 dc->writes++;
252 else
253 dc->reads++;
254
255 list_add_tail(&delayed->list, &dc->delayed_bios);
256
257 mutex_unlock(&delayed_bios_lock);
258
259 queue_timeout(dc, expires);
260
261 return DM_MAPIO_SUBMITTED;
262}
263
264static void delay_presuspend(struct dm_target *ti)
265{
266 struct delay_c *dc = ti->private;
267
268 atomic_set(&dc->may_delay, 0);
269 del_timer_sync(&dc->delay_timer);
270 flush_bios(flush_delayed_bios(dc, 1));
271}
272
273static void delay_resume(struct dm_target *ti)
274{
275 struct delay_c *dc = ti->private;
276
277 atomic_set(&dc->may_delay, 1);
278}
279
280static int delay_map(struct dm_target *ti, struct bio *bio)
281{
282 struct delay_c *dc = ti->private;
283
284 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
285 bio->bi_bdev = dc->dev_write->bdev;
286 if (bio_sectors(bio))
287 bio->bi_iter.bi_sector = dc->start_write +
288 dm_target_offset(ti, bio->bi_iter.bi_sector);
289
290 return delay_bio(dc, dc->write_delay, bio);
291 }
292
293 bio->bi_bdev = dc->dev_read->bdev;
294 bio->bi_iter.bi_sector = dc->start_read +
295 dm_target_offset(ti, bio->bi_iter.bi_sector);
296
297 return delay_bio(dc, dc->read_delay, bio);
298}
299
300static void delay_status(struct dm_target *ti, status_type_t type,
301 unsigned status_flags, char *result, unsigned maxlen)
302{
303 struct delay_c *dc = ti->private;
304 int sz = 0;
305
306 switch (type) {
307 case STATUSTYPE_INFO:
308 DMEMIT("%u %u", dc->reads, dc->writes);
309 break;
310
311 case STATUSTYPE_TABLE:
312 DMEMIT("%s %llu %u", dc->dev_read->name,
313 (unsigned long long) dc->start_read,
314 dc->read_delay);
315 if (dc->dev_write)
316 DMEMIT(" %s %llu %u", dc->dev_write->name,
317 (unsigned long long) dc->start_write,
318 dc->write_delay);
319 break;
320 }
321}
322
323static int delay_iterate_devices(struct dm_target *ti,
324 iterate_devices_callout_fn fn, void *data)
325{
326 struct delay_c *dc = ti->private;
327 int ret = 0;
328
329 ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data);
330 if (ret)
331 goto out;
332
333 if (dc->dev_write)
334 ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data);
335
336out:
337 return ret;
338}
339
340static struct target_type delay_target = {
341 .name = "delay",
342 .version = {1, 2, 1},
343 .module = THIS_MODULE,
344 .ctr = delay_ctr,
345 .dtr = delay_dtr,
346 .map = delay_map,
347 .presuspend = delay_presuspend,
348 .resume = delay_resume,
349 .status = delay_status,
350 .iterate_devices = delay_iterate_devices,
351};
352
353static int __init dm_delay_init(void)
354{
355 int r;
356
357 r = dm_register_target(&delay_target);
358 if (r < 0) {
359 DMERR("register failed %d", r);
360 goto bad_register;
361 }
362
363 return 0;
364
365bad_register:
366 return r;
367}
368
369static void __exit dm_delay_exit(void)
370{
371 dm_unregister_target(&delay_target);
372}
373
374/* Module hooks */
375module_init(dm_delay_init);
376module_exit(dm_delay_exit);
377
378MODULE_DESCRIPTION(DM_NAME " delay target");
379MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
380MODULE_LICENSE("GPL");