Loading...
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-bio-record.h"
9
10#include <linux/init.h>
11#include <linux/mempool.h>
12#include <linux/module.h>
13#include <linux/pagemap.h>
14#include <linux/slab.h>
15#include <linux/workqueue.h>
16#include <linux/device-mapper.h>
17#include <linux/dm-io.h>
18#include <linux/dm-dirty-log.h>
19#include <linux/dm-kcopyd.h>
20#include <linux/dm-region-hash.h>
21
22#define DM_MSG_PREFIX "raid1"
23
24#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
25
26#define DM_RAID1_HANDLE_ERRORS 0x01
27#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
28
29static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
30
31/*-----------------------------------------------------------------
32 * Mirror set structures.
33 *---------------------------------------------------------------*/
34enum dm_raid1_error {
35 DM_RAID1_WRITE_ERROR,
36 DM_RAID1_FLUSH_ERROR,
37 DM_RAID1_SYNC_ERROR,
38 DM_RAID1_READ_ERROR
39};
40
41struct mirror {
42 struct mirror_set *ms;
43 atomic_t error_count;
44 unsigned long error_type;
45 struct dm_dev *dev;
46 sector_t offset;
47};
48
49struct mirror_set {
50 struct dm_target *ti;
51 struct list_head list;
52
53 uint64_t features;
54
55 spinlock_t lock; /* protects the lists */
56 struct bio_list reads;
57 struct bio_list writes;
58 struct bio_list failures;
59 struct bio_list holds; /* bios are waiting until suspend */
60
61 struct dm_region_hash *rh;
62 struct dm_kcopyd_client *kcopyd_client;
63 struct dm_io_client *io_client;
64 mempool_t *read_record_pool;
65
66 /* recovery */
67 region_t nr_regions;
68 int in_sync;
69 int log_failure;
70 int leg_failure;
71 atomic_t suspend;
72
73 atomic_t default_mirror; /* Default mirror */
74
75 struct workqueue_struct *kmirrord_wq;
76 struct work_struct kmirrord_work;
77 struct timer_list timer;
78 unsigned long timer_pending;
79
80 struct work_struct trigger_event;
81
82 unsigned nr_mirrors;
83 struct mirror mirror[0];
84};
85
86static void wakeup_mirrord(void *context)
87{
88 struct mirror_set *ms = context;
89
90 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
91}
92
93static void delayed_wake_fn(unsigned long data)
94{
95 struct mirror_set *ms = (struct mirror_set *) data;
96
97 clear_bit(0, &ms->timer_pending);
98 wakeup_mirrord(ms);
99}
100
101static void delayed_wake(struct mirror_set *ms)
102{
103 if (test_and_set_bit(0, &ms->timer_pending))
104 return;
105
106 ms->timer.expires = jiffies + HZ / 5;
107 ms->timer.data = (unsigned long) ms;
108 ms->timer.function = delayed_wake_fn;
109 add_timer(&ms->timer);
110}
111
112static void wakeup_all_recovery_waiters(void *context)
113{
114 wake_up_all(&_kmirrord_recovery_stopped);
115}
116
117static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
118{
119 unsigned long flags;
120 int should_wake = 0;
121 struct bio_list *bl;
122
123 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
124 spin_lock_irqsave(&ms->lock, flags);
125 should_wake = !(bl->head);
126 bio_list_add(bl, bio);
127 spin_unlock_irqrestore(&ms->lock, flags);
128
129 if (should_wake)
130 wakeup_mirrord(ms);
131}
132
133static void dispatch_bios(void *context, struct bio_list *bio_list)
134{
135 struct mirror_set *ms = context;
136 struct bio *bio;
137
138 while ((bio = bio_list_pop(bio_list)))
139 queue_bio(ms, bio, WRITE);
140}
141
142#define MIN_READ_RECORDS 20
143struct dm_raid1_read_record {
144 struct mirror *m;
145 struct dm_bio_details details;
146};
147
148static struct kmem_cache *_dm_raid1_read_record_cache;
149
150/*
151 * Every mirror should look like this one.
152 */
153#define DEFAULT_MIRROR 0
154
155/*
156 * This is yucky. We squirrel the mirror struct away inside
157 * bi_next for read/write buffers. This is safe since the bh
158 * doesn't get submitted to the lower levels of block layer.
159 */
160static struct mirror *bio_get_m(struct bio *bio)
161{
162 return (struct mirror *) bio->bi_next;
163}
164
165static void bio_set_m(struct bio *bio, struct mirror *m)
166{
167 bio->bi_next = (struct bio *) m;
168}
169
170static struct mirror *get_default_mirror(struct mirror_set *ms)
171{
172 return &ms->mirror[atomic_read(&ms->default_mirror)];
173}
174
175static void set_default_mirror(struct mirror *m)
176{
177 struct mirror_set *ms = m->ms;
178 struct mirror *m0 = &(ms->mirror[0]);
179
180 atomic_set(&ms->default_mirror, m - m0);
181}
182
183static struct mirror *get_valid_mirror(struct mirror_set *ms)
184{
185 struct mirror *m;
186
187 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
188 if (!atomic_read(&m->error_count))
189 return m;
190
191 return NULL;
192}
193
194/* fail_mirror
195 * @m: mirror device to fail
196 * @error_type: one of the enum's, DM_RAID1_*_ERROR
197 *
198 * If errors are being handled, record the type of
199 * error encountered for this device. If this type
200 * of error has already been recorded, we can return;
201 * otherwise, we must signal userspace by triggering
202 * an event. Additionally, if the device is the
203 * primary device, we must choose a new primary, but
204 * only if the mirror is in-sync.
205 *
206 * This function must not block.
207 */
208static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
209{
210 struct mirror_set *ms = m->ms;
211 struct mirror *new;
212
213 ms->leg_failure = 1;
214
215 /*
216 * error_count is used for nothing more than a
217 * simple way to tell if a device has encountered
218 * errors.
219 */
220 atomic_inc(&m->error_count);
221
222 if (test_and_set_bit(error_type, &m->error_type))
223 return;
224
225 if (!errors_handled(ms))
226 return;
227
228 if (m != get_default_mirror(ms))
229 goto out;
230
231 if (!ms->in_sync) {
232 /*
233 * Better to issue requests to same failing device
234 * than to risk returning corrupt data.
235 */
236 DMERR("Primary mirror (%s) failed while out-of-sync: "
237 "Reads may fail.", m->dev->name);
238 goto out;
239 }
240
241 new = get_valid_mirror(ms);
242 if (new)
243 set_default_mirror(new);
244 else
245 DMWARN("All sides of mirror have failed.");
246
247out:
248 schedule_work(&ms->trigger_event);
249}
250
251static int mirror_flush(struct dm_target *ti)
252{
253 struct mirror_set *ms = ti->private;
254 unsigned long error_bits;
255
256 unsigned int i;
257 struct dm_io_region io[ms->nr_mirrors];
258 struct mirror *m;
259 struct dm_io_request io_req = {
260 .bi_rw = WRITE_FLUSH,
261 .mem.type = DM_IO_KMEM,
262 .mem.ptr.addr = NULL,
263 .client = ms->io_client,
264 };
265
266 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
267 io[i].bdev = m->dev->bdev;
268 io[i].sector = 0;
269 io[i].count = 0;
270 }
271
272 error_bits = -1;
273 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
274 if (unlikely(error_bits != 0)) {
275 for (i = 0; i < ms->nr_mirrors; i++)
276 if (test_bit(i, &error_bits))
277 fail_mirror(ms->mirror + i,
278 DM_RAID1_FLUSH_ERROR);
279 return -EIO;
280 }
281
282 return 0;
283}
284
285/*-----------------------------------------------------------------
286 * Recovery.
287 *
288 * When a mirror is first activated we may find that some regions
289 * are in the no-sync state. We have to recover these by
290 * recopying from the default mirror to all the others.
291 *---------------------------------------------------------------*/
292static void recovery_complete(int read_err, unsigned long write_err,
293 void *context)
294{
295 struct dm_region *reg = context;
296 struct mirror_set *ms = dm_rh_region_context(reg);
297 int m, bit = 0;
298
299 if (read_err) {
300 /* Read error means the failure of default mirror. */
301 DMERR_LIMIT("Unable to read primary mirror during recovery");
302 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
303 }
304
305 if (write_err) {
306 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
307 write_err);
308 /*
309 * Bits correspond to devices (excluding default mirror).
310 * The default mirror cannot change during recovery.
311 */
312 for (m = 0; m < ms->nr_mirrors; m++) {
313 if (&ms->mirror[m] == get_default_mirror(ms))
314 continue;
315 if (test_bit(bit, &write_err))
316 fail_mirror(ms->mirror + m,
317 DM_RAID1_SYNC_ERROR);
318 bit++;
319 }
320 }
321
322 dm_rh_recovery_end(reg, !(read_err || write_err));
323}
324
325static int recover(struct mirror_set *ms, struct dm_region *reg)
326{
327 int r;
328 unsigned i;
329 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
330 struct mirror *m;
331 unsigned long flags = 0;
332 region_t key = dm_rh_get_region_key(reg);
333 sector_t region_size = dm_rh_get_region_size(ms->rh);
334
335 /* fill in the source */
336 m = get_default_mirror(ms);
337 from.bdev = m->dev->bdev;
338 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
339 if (key == (ms->nr_regions - 1)) {
340 /*
341 * The final region may be smaller than
342 * region_size.
343 */
344 from.count = ms->ti->len & (region_size - 1);
345 if (!from.count)
346 from.count = region_size;
347 } else
348 from.count = region_size;
349
350 /* fill in the destinations */
351 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
352 if (&ms->mirror[i] == get_default_mirror(ms))
353 continue;
354
355 m = ms->mirror + i;
356 dest->bdev = m->dev->bdev;
357 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
358 dest->count = from.count;
359 dest++;
360 }
361
362 /* hand to kcopyd */
363 if (!errors_handled(ms))
364 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
365
366 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
367 flags, recovery_complete, reg);
368
369 return r;
370}
371
372static void do_recovery(struct mirror_set *ms)
373{
374 struct dm_region *reg;
375 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
376 int r;
377
378 /*
379 * Start quiescing some regions.
380 */
381 dm_rh_recovery_prepare(ms->rh);
382
383 /*
384 * Copy any already quiesced regions.
385 */
386 while ((reg = dm_rh_recovery_start(ms->rh))) {
387 r = recover(ms, reg);
388 if (r)
389 dm_rh_recovery_end(reg, 0);
390 }
391
392 /*
393 * Update the in sync flag.
394 */
395 if (!ms->in_sync &&
396 (log->type->get_sync_count(log) == ms->nr_regions)) {
397 /* the sync is complete */
398 dm_table_event(ms->ti->table);
399 ms->in_sync = 1;
400 }
401}
402
403/*-----------------------------------------------------------------
404 * Reads
405 *---------------------------------------------------------------*/
406static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
407{
408 struct mirror *m = get_default_mirror(ms);
409
410 do {
411 if (likely(!atomic_read(&m->error_count)))
412 return m;
413
414 if (m-- == ms->mirror)
415 m += ms->nr_mirrors;
416 } while (m != get_default_mirror(ms));
417
418 return NULL;
419}
420
421static int default_ok(struct mirror *m)
422{
423 struct mirror *default_mirror = get_default_mirror(m->ms);
424
425 return !atomic_read(&default_mirror->error_count);
426}
427
428static int mirror_available(struct mirror_set *ms, struct bio *bio)
429{
430 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
431 region_t region = dm_rh_bio_to_region(ms->rh, bio);
432
433 if (log->type->in_sync(log, region, 0))
434 return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
435
436 return 0;
437}
438
439/*
440 * remap a buffer to a particular mirror.
441 */
442static sector_t map_sector(struct mirror *m, struct bio *bio)
443{
444 if (unlikely(!bio->bi_size))
445 return 0;
446 return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
447}
448
449static void map_bio(struct mirror *m, struct bio *bio)
450{
451 bio->bi_bdev = m->dev->bdev;
452 bio->bi_sector = map_sector(m, bio);
453}
454
455static void map_region(struct dm_io_region *io, struct mirror *m,
456 struct bio *bio)
457{
458 io->bdev = m->dev->bdev;
459 io->sector = map_sector(m, bio);
460 io->count = bio->bi_size >> 9;
461}
462
463static void hold_bio(struct mirror_set *ms, struct bio *bio)
464{
465 /*
466 * Lock is required to avoid race condition during suspend
467 * process.
468 */
469 spin_lock_irq(&ms->lock);
470
471 if (atomic_read(&ms->suspend)) {
472 spin_unlock_irq(&ms->lock);
473
474 /*
475 * If device is suspended, complete the bio.
476 */
477 if (dm_noflush_suspending(ms->ti))
478 bio_endio(bio, DM_ENDIO_REQUEUE);
479 else
480 bio_endio(bio, -EIO);
481 return;
482 }
483
484 /*
485 * Hold bio until the suspend is complete.
486 */
487 bio_list_add(&ms->holds, bio);
488 spin_unlock_irq(&ms->lock);
489}
490
491/*-----------------------------------------------------------------
492 * Reads
493 *---------------------------------------------------------------*/
494static void read_callback(unsigned long error, void *context)
495{
496 struct bio *bio = context;
497 struct mirror *m;
498
499 m = bio_get_m(bio);
500 bio_set_m(bio, NULL);
501
502 if (likely(!error)) {
503 bio_endio(bio, 0);
504 return;
505 }
506
507 fail_mirror(m, DM_RAID1_READ_ERROR);
508
509 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
510 DMWARN_LIMIT("Read failure on mirror device %s. "
511 "Trying alternative device.",
512 m->dev->name);
513 queue_bio(m->ms, bio, bio_rw(bio));
514 return;
515 }
516
517 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
518 m->dev->name);
519 bio_endio(bio, -EIO);
520}
521
522/* Asynchronous read. */
523static void read_async_bio(struct mirror *m, struct bio *bio)
524{
525 struct dm_io_region io;
526 struct dm_io_request io_req = {
527 .bi_rw = READ,
528 .mem.type = DM_IO_BVEC,
529 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
530 .notify.fn = read_callback,
531 .notify.context = bio,
532 .client = m->ms->io_client,
533 };
534
535 map_region(&io, m, bio);
536 bio_set_m(bio, m);
537 BUG_ON(dm_io(&io_req, 1, &io, NULL));
538}
539
540static inline int region_in_sync(struct mirror_set *ms, region_t region,
541 int may_block)
542{
543 int state = dm_rh_get_state(ms->rh, region, may_block);
544 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
545}
546
547static void do_reads(struct mirror_set *ms, struct bio_list *reads)
548{
549 region_t region;
550 struct bio *bio;
551 struct mirror *m;
552
553 while ((bio = bio_list_pop(reads))) {
554 region = dm_rh_bio_to_region(ms->rh, bio);
555 m = get_default_mirror(ms);
556
557 /*
558 * We can only read balance if the region is in sync.
559 */
560 if (likely(region_in_sync(ms, region, 1)))
561 m = choose_mirror(ms, bio->bi_sector);
562 else if (m && atomic_read(&m->error_count))
563 m = NULL;
564
565 if (likely(m))
566 read_async_bio(m, bio);
567 else
568 bio_endio(bio, -EIO);
569 }
570}
571
572/*-----------------------------------------------------------------
573 * Writes.
574 *
575 * We do different things with the write io depending on the
576 * state of the region that it's in:
577 *
578 * SYNC: increment pending, use kcopyd to write to *all* mirrors
579 * RECOVERING: delay the io until recovery completes
580 * NOSYNC: increment pending, just write to the default mirror
581 *---------------------------------------------------------------*/
582
583
584static void write_callback(unsigned long error, void *context)
585{
586 unsigned i, ret = 0;
587 struct bio *bio = (struct bio *) context;
588 struct mirror_set *ms;
589 int should_wake = 0;
590 unsigned long flags;
591
592 ms = bio_get_m(bio)->ms;
593 bio_set_m(bio, NULL);
594
595 /*
596 * NOTE: We don't decrement the pending count here,
597 * instead it is done by the targets endio function.
598 * This way we handle both writes to SYNC and NOSYNC
599 * regions with the same code.
600 */
601 if (likely(!error)) {
602 bio_endio(bio, ret);
603 return;
604 }
605
606 for (i = 0; i < ms->nr_mirrors; i++)
607 if (test_bit(i, &error))
608 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
609
610 /*
611 * Need to raise event. Since raising
612 * events can block, we need to do it in
613 * the main thread.
614 */
615 spin_lock_irqsave(&ms->lock, flags);
616 if (!ms->failures.head)
617 should_wake = 1;
618 bio_list_add(&ms->failures, bio);
619 spin_unlock_irqrestore(&ms->lock, flags);
620 if (should_wake)
621 wakeup_mirrord(ms);
622}
623
624static void do_write(struct mirror_set *ms, struct bio *bio)
625{
626 unsigned int i;
627 struct dm_io_region io[ms->nr_mirrors], *dest = io;
628 struct mirror *m;
629 struct dm_io_request io_req = {
630 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
631 .mem.type = DM_IO_BVEC,
632 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
633 .notify.fn = write_callback,
634 .notify.context = bio,
635 .client = ms->io_client,
636 };
637
638 if (bio->bi_rw & REQ_DISCARD) {
639 io_req.bi_rw |= REQ_DISCARD;
640 io_req.mem.type = DM_IO_KMEM;
641 io_req.mem.ptr.addr = NULL;
642 }
643
644 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
645 map_region(dest++, m, bio);
646
647 /*
648 * Use default mirror because we only need it to retrieve the reference
649 * to the mirror set in write_callback().
650 */
651 bio_set_m(bio, get_default_mirror(ms));
652
653 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
654}
655
656static void do_writes(struct mirror_set *ms, struct bio_list *writes)
657{
658 int state;
659 struct bio *bio;
660 struct bio_list sync, nosync, recover, *this_list = NULL;
661 struct bio_list requeue;
662 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
663 region_t region;
664
665 if (!writes->head)
666 return;
667
668 /*
669 * Classify each write.
670 */
671 bio_list_init(&sync);
672 bio_list_init(&nosync);
673 bio_list_init(&recover);
674 bio_list_init(&requeue);
675
676 while ((bio = bio_list_pop(writes))) {
677 if ((bio->bi_rw & REQ_FLUSH) ||
678 (bio->bi_rw & REQ_DISCARD)) {
679 bio_list_add(&sync, bio);
680 continue;
681 }
682
683 region = dm_rh_bio_to_region(ms->rh, bio);
684
685 if (log->type->is_remote_recovering &&
686 log->type->is_remote_recovering(log, region)) {
687 bio_list_add(&requeue, bio);
688 continue;
689 }
690
691 state = dm_rh_get_state(ms->rh, region, 1);
692 switch (state) {
693 case DM_RH_CLEAN:
694 case DM_RH_DIRTY:
695 this_list = &sync;
696 break;
697
698 case DM_RH_NOSYNC:
699 this_list = &nosync;
700 break;
701
702 case DM_RH_RECOVERING:
703 this_list = &recover;
704 break;
705 }
706
707 bio_list_add(this_list, bio);
708 }
709
710 /*
711 * Add bios that are delayed due to remote recovery
712 * back on to the write queue
713 */
714 if (unlikely(requeue.head)) {
715 spin_lock_irq(&ms->lock);
716 bio_list_merge(&ms->writes, &requeue);
717 spin_unlock_irq(&ms->lock);
718 delayed_wake(ms);
719 }
720
721 /*
722 * Increment the pending counts for any regions that will
723 * be written to (writes to recover regions are going to
724 * be delayed).
725 */
726 dm_rh_inc_pending(ms->rh, &sync);
727 dm_rh_inc_pending(ms->rh, &nosync);
728
729 /*
730 * If the flush fails on a previous call and succeeds here,
731 * we must not reset the log_failure variable. We need
732 * userspace interaction to do that.
733 */
734 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
735
736 /*
737 * Dispatch io.
738 */
739 if (unlikely(ms->log_failure) && errors_handled(ms)) {
740 spin_lock_irq(&ms->lock);
741 bio_list_merge(&ms->failures, &sync);
742 spin_unlock_irq(&ms->lock);
743 wakeup_mirrord(ms);
744 } else
745 while ((bio = bio_list_pop(&sync)))
746 do_write(ms, bio);
747
748 while ((bio = bio_list_pop(&recover)))
749 dm_rh_delay(ms->rh, bio);
750
751 while ((bio = bio_list_pop(&nosync))) {
752 if (unlikely(ms->leg_failure) && errors_handled(ms)) {
753 spin_lock_irq(&ms->lock);
754 bio_list_add(&ms->failures, bio);
755 spin_unlock_irq(&ms->lock);
756 wakeup_mirrord(ms);
757 } else {
758 map_bio(get_default_mirror(ms), bio);
759 generic_make_request(bio);
760 }
761 }
762}
763
764static void do_failures(struct mirror_set *ms, struct bio_list *failures)
765{
766 struct bio *bio;
767
768 if (likely(!failures->head))
769 return;
770
771 /*
772 * If the log has failed, unattempted writes are being
773 * put on the holds list. We can't issue those writes
774 * until a log has been marked, so we must store them.
775 *
776 * If a 'noflush' suspend is in progress, we can requeue
777 * the I/O's to the core. This give userspace a chance
778 * to reconfigure the mirror, at which point the core
779 * will reissue the writes. If the 'noflush' flag is
780 * not set, we have no choice but to return errors.
781 *
782 * Some writes on the failures list may have been
783 * submitted before the log failure and represent a
784 * failure to write to one of the devices. It is ok
785 * for us to treat them the same and requeue them
786 * as well.
787 */
788 while ((bio = bio_list_pop(failures))) {
789 if (!ms->log_failure) {
790 ms->in_sync = 0;
791 dm_rh_mark_nosync(ms->rh, bio);
792 }
793
794 /*
795 * If all the legs are dead, fail the I/O.
796 * If we have been told to handle errors, hold the bio
797 * and wait for userspace to deal with the problem.
798 * Otherwise pretend that the I/O succeeded. (This would
799 * be wrong if the failed leg returned after reboot and
800 * got replicated back to the good legs.)
801 */
802 if (!get_valid_mirror(ms))
803 bio_endio(bio, -EIO);
804 else if (errors_handled(ms))
805 hold_bio(ms, bio);
806 else
807 bio_endio(bio, 0);
808 }
809}
810
811static void trigger_event(struct work_struct *work)
812{
813 struct mirror_set *ms =
814 container_of(work, struct mirror_set, trigger_event);
815
816 dm_table_event(ms->ti->table);
817}
818
819/*-----------------------------------------------------------------
820 * kmirrord
821 *---------------------------------------------------------------*/
822static void do_mirror(struct work_struct *work)
823{
824 struct mirror_set *ms = container_of(work, struct mirror_set,
825 kmirrord_work);
826 struct bio_list reads, writes, failures;
827 unsigned long flags;
828
829 spin_lock_irqsave(&ms->lock, flags);
830 reads = ms->reads;
831 writes = ms->writes;
832 failures = ms->failures;
833 bio_list_init(&ms->reads);
834 bio_list_init(&ms->writes);
835 bio_list_init(&ms->failures);
836 spin_unlock_irqrestore(&ms->lock, flags);
837
838 dm_rh_update_states(ms->rh, errors_handled(ms));
839 do_recovery(ms);
840 do_reads(ms, &reads);
841 do_writes(ms, &writes);
842 do_failures(ms, &failures);
843}
844
845/*-----------------------------------------------------------------
846 * Target functions
847 *---------------------------------------------------------------*/
848static struct mirror_set *alloc_context(unsigned int nr_mirrors,
849 uint32_t region_size,
850 struct dm_target *ti,
851 struct dm_dirty_log *dl)
852{
853 size_t len;
854 struct mirror_set *ms = NULL;
855
856 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
857
858 ms = kzalloc(len, GFP_KERNEL);
859 if (!ms) {
860 ti->error = "Cannot allocate mirror context";
861 return NULL;
862 }
863
864 spin_lock_init(&ms->lock);
865 bio_list_init(&ms->reads);
866 bio_list_init(&ms->writes);
867 bio_list_init(&ms->failures);
868 bio_list_init(&ms->holds);
869
870 ms->ti = ti;
871 ms->nr_mirrors = nr_mirrors;
872 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
873 ms->in_sync = 0;
874 ms->log_failure = 0;
875 ms->leg_failure = 0;
876 atomic_set(&ms->suspend, 0);
877 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
878
879 ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
880 _dm_raid1_read_record_cache);
881
882 if (!ms->read_record_pool) {
883 ti->error = "Error creating mirror read_record_pool";
884 kfree(ms);
885 return NULL;
886 }
887
888 ms->io_client = dm_io_client_create();
889 if (IS_ERR(ms->io_client)) {
890 ti->error = "Error creating dm_io client";
891 mempool_destroy(ms->read_record_pool);
892 kfree(ms);
893 return NULL;
894 }
895
896 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
897 wakeup_all_recovery_waiters,
898 ms->ti->begin, MAX_RECOVERY,
899 dl, region_size, ms->nr_regions);
900 if (IS_ERR(ms->rh)) {
901 ti->error = "Error creating dirty region hash";
902 dm_io_client_destroy(ms->io_client);
903 mempool_destroy(ms->read_record_pool);
904 kfree(ms);
905 return NULL;
906 }
907
908 return ms;
909}
910
911static void free_context(struct mirror_set *ms, struct dm_target *ti,
912 unsigned int m)
913{
914 while (m--)
915 dm_put_device(ti, ms->mirror[m].dev);
916
917 dm_io_client_destroy(ms->io_client);
918 dm_region_hash_destroy(ms->rh);
919 mempool_destroy(ms->read_record_pool);
920 kfree(ms);
921}
922
923static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
924 unsigned int mirror, char **argv)
925{
926 unsigned long long offset;
927
928 if (sscanf(argv[1], "%llu", &offset) != 1) {
929 ti->error = "Invalid offset";
930 return -EINVAL;
931 }
932
933 if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
934 &ms->mirror[mirror].dev)) {
935 ti->error = "Device lookup failure";
936 return -ENXIO;
937 }
938
939 ms->mirror[mirror].ms = ms;
940 atomic_set(&(ms->mirror[mirror].error_count), 0);
941 ms->mirror[mirror].error_type = 0;
942 ms->mirror[mirror].offset = offset;
943
944 return 0;
945}
946
947/*
948 * Create dirty log: log_type #log_params <log_params>
949 */
950static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
951 unsigned argc, char **argv,
952 unsigned *args_used)
953{
954 unsigned param_count;
955 struct dm_dirty_log *dl;
956
957 if (argc < 2) {
958 ti->error = "Insufficient mirror log arguments";
959 return NULL;
960 }
961
962 if (sscanf(argv[1], "%u", ¶m_count) != 1) {
963 ti->error = "Invalid mirror log argument count";
964 return NULL;
965 }
966
967 *args_used = 2 + param_count;
968
969 if (argc < *args_used) {
970 ti->error = "Insufficient mirror log arguments";
971 return NULL;
972 }
973
974 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
975 argv + 2);
976 if (!dl) {
977 ti->error = "Error creating mirror dirty log";
978 return NULL;
979 }
980
981 return dl;
982}
983
984static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
985 unsigned *args_used)
986{
987 unsigned num_features;
988 struct dm_target *ti = ms->ti;
989
990 *args_used = 0;
991
992 if (!argc)
993 return 0;
994
995 if (sscanf(argv[0], "%u", &num_features) != 1) {
996 ti->error = "Invalid number of features";
997 return -EINVAL;
998 }
999
1000 argc--;
1001 argv++;
1002 (*args_used)++;
1003
1004 if (num_features > argc) {
1005 ti->error = "Not enough arguments to support feature count";
1006 return -EINVAL;
1007 }
1008
1009 if (!strcmp("handle_errors", argv[0]))
1010 ms->features |= DM_RAID1_HANDLE_ERRORS;
1011 else {
1012 ti->error = "Unrecognised feature requested";
1013 return -EINVAL;
1014 }
1015
1016 (*args_used)++;
1017
1018 return 0;
1019}
1020
1021/*
1022 * Construct a mirror mapping:
1023 *
1024 * log_type #log_params <log_params>
1025 * #mirrors [mirror_path offset]{2,}
1026 * [#features <features>]
1027 *
1028 * log_type is "core" or "disk"
1029 * #log_params is between 1 and 3
1030 *
1031 * If present, features must be "handle_errors".
1032 */
1033static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1034{
1035 int r;
1036 unsigned int nr_mirrors, m, args_used;
1037 struct mirror_set *ms;
1038 struct dm_dirty_log *dl;
1039
1040 dl = create_dirty_log(ti, argc, argv, &args_used);
1041 if (!dl)
1042 return -EINVAL;
1043
1044 argv += args_used;
1045 argc -= args_used;
1046
1047 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1048 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
1049 ti->error = "Invalid number of mirrors";
1050 dm_dirty_log_destroy(dl);
1051 return -EINVAL;
1052 }
1053
1054 argv++, argc--;
1055
1056 if (argc < nr_mirrors * 2) {
1057 ti->error = "Too few mirror arguments";
1058 dm_dirty_log_destroy(dl);
1059 return -EINVAL;
1060 }
1061
1062 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1063 if (!ms) {
1064 dm_dirty_log_destroy(dl);
1065 return -ENOMEM;
1066 }
1067
1068 /* Get the mirror parameter sets */
1069 for (m = 0; m < nr_mirrors; m++) {
1070 r = get_mirror(ms, ti, m, argv);
1071 if (r) {
1072 free_context(ms, ti, m);
1073 return r;
1074 }
1075 argv += 2;
1076 argc -= 2;
1077 }
1078
1079 ti->private = ms;
1080 ti->split_io = dm_rh_get_region_size(ms->rh);
1081 ti->num_flush_requests = 1;
1082 ti->num_discard_requests = 1;
1083
1084 ms->kmirrord_wq = alloc_workqueue("kmirrord",
1085 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
1086 if (!ms->kmirrord_wq) {
1087 DMERR("couldn't start kmirrord");
1088 r = -ENOMEM;
1089 goto err_free_context;
1090 }
1091 INIT_WORK(&ms->kmirrord_work, do_mirror);
1092 init_timer(&ms->timer);
1093 ms->timer_pending = 0;
1094 INIT_WORK(&ms->trigger_event, trigger_event);
1095
1096 r = parse_features(ms, argc, argv, &args_used);
1097 if (r)
1098 goto err_destroy_wq;
1099
1100 argv += args_used;
1101 argc -= args_used;
1102
1103 /*
1104 * Any read-balancing addition depends on the
1105 * DM_RAID1_HANDLE_ERRORS flag being present.
1106 * This is because the decision to balance depends
1107 * on the sync state of a region. If the above
1108 * flag is not present, we ignore errors; and
1109 * the sync state may be inaccurate.
1110 */
1111
1112 if (argc) {
1113 ti->error = "Too many mirror arguments";
1114 r = -EINVAL;
1115 goto err_destroy_wq;
1116 }
1117
1118 ms->kcopyd_client = dm_kcopyd_client_create();
1119 if (IS_ERR(ms->kcopyd_client)) {
1120 r = PTR_ERR(ms->kcopyd_client);
1121 goto err_destroy_wq;
1122 }
1123
1124 wakeup_mirrord(ms);
1125 return 0;
1126
1127err_destroy_wq:
1128 destroy_workqueue(ms->kmirrord_wq);
1129err_free_context:
1130 free_context(ms, ti, ms->nr_mirrors);
1131 return r;
1132}
1133
1134static void mirror_dtr(struct dm_target *ti)
1135{
1136 struct mirror_set *ms = (struct mirror_set *) ti->private;
1137
1138 del_timer_sync(&ms->timer);
1139 flush_workqueue(ms->kmirrord_wq);
1140 flush_work_sync(&ms->trigger_event);
1141 dm_kcopyd_client_destroy(ms->kcopyd_client);
1142 destroy_workqueue(ms->kmirrord_wq);
1143 free_context(ms, ti, ms->nr_mirrors);
1144}
1145
1146/*
1147 * Mirror mapping function
1148 */
1149static int mirror_map(struct dm_target *ti, struct bio *bio,
1150 union map_info *map_context)
1151{
1152 int r, rw = bio_rw(bio);
1153 struct mirror *m;
1154 struct mirror_set *ms = ti->private;
1155 struct dm_raid1_read_record *read_record = NULL;
1156 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1157
1158 if (rw == WRITE) {
1159 /* Save region for mirror_end_io() handler */
1160 map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
1161 queue_bio(ms, bio, rw);
1162 return DM_MAPIO_SUBMITTED;
1163 }
1164
1165 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1166 if (r < 0 && r != -EWOULDBLOCK)
1167 return r;
1168
1169 /*
1170 * If region is not in-sync queue the bio.
1171 */
1172 if (!r || (r == -EWOULDBLOCK)) {
1173 if (rw == READA)
1174 return -EWOULDBLOCK;
1175
1176 queue_bio(ms, bio, rw);
1177 return DM_MAPIO_SUBMITTED;
1178 }
1179
1180 /*
1181 * The region is in-sync and we can perform reads directly.
1182 * Store enough information so we can retry if it fails.
1183 */
1184 m = choose_mirror(ms, bio->bi_sector);
1185 if (unlikely(!m))
1186 return -EIO;
1187
1188 read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
1189 if (likely(read_record)) {
1190 dm_bio_record(&read_record->details, bio);
1191 map_context->ptr = read_record;
1192 read_record->m = m;
1193 }
1194
1195 map_bio(m, bio);
1196
1197 return DM_MAPIO_REMAPPED;
1198}
1199
1200static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1201 int error, union map_info *map_context)
1202{
1203 int rw = bio_rw(bio);
1204 struct mirror_set *ms = (struct mirror_set *) ti->private;
1205 struct mirror *m = NULL;
1206 struct dm_bio_details *bd = NULL;
1207 struct dm_raid1_read_record *read_record = map_context->ptr;
1208
1209 /*
1210 * We need to dec pending if this was a write.
1211 */
1212 if (rw == WRITE) {
1213 if (!(bio->bi_rw & REQ_FLUSH))
1214 dm_rh_dec(ms->rh, map_context->ll);
1215 return error;
1216 }
1217
1218 if (error == -EOPNOTSUPP)
1219 goto out;
1220
1221 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
1222 goto out;
1223
1224 if (unlikely(error)) {
1225 if (!read_record) {
1226 /*
1227 * There wasn't enough memory to record necessary
1228 * information for a retry or there was no other
1229 * mirror in-sync.
1230 */
1231 DMERR_LIMIT("Mirror read failed.");
1232 return -EIO;
1233 }
1234
1235 m = read_record->m;
1236
1237 DMERR("Mirror read failed from %s. Trying alternative device.",
1238 m->dev->name);
1239
1240 fail_mirror(m, DM_RAID1_READ_ERROR);
1241
1242 /*
1243 * A failed read is requeued for another attempt using an intact
1244 * mirror.
1245 */
1246 if (default_ok(m) || mirror_available(ms, bio)) {
1247 bd = &read_record->details;
1248
1249 dm_bio_restore(bd, bio);
1250 mempool_free(read_record, ms->read_record_pool);
1251 map_context->ptr = NULL;
1252 queue_bio(ms, bio, rw);
1253 return 1;
1254 }
1255 DMERR("All replicated volumes dead, failing I/O");
1256 }
1257
1258out:
1259 if (read_record) {
1260 mempool_free(read_record, ms->read_record_pool);
1261 map_context->ptr = NULL;
1262 }
1263
1264 return error;
1265}
1266
1267static void mirror_presuspend(struct dm_target *ti)
1268{
1269 struct mirror_set *ms = (struct mirror_set *) ti->private;
1270 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1271
1272 struct bio_list holds;
1273 struct bio *bio;
1274
1275 atomic_set(&ms->suspend, 1);
1276
1277 /*
1278 * Process bios in the hold list to start recovery waiting
1279 * for bios in the hold list. After the process, no bio has
1280 * a chance to be added in the hold list because ms->suspend
1281 * is set.
1282 */
1283 spin_lock_irq(&ms->lock);
1284 holds = ms->holds;
1285 bio_list_init(&ms->holds);
1286 spin_unlock_irq(&ms->lock);
1287
1288 while ((bio = bio_list_pop(&holds)))
1289 hold_bio(ms, bio);
1290
1291 /*
1292 * We must finish up all the work that we've
1293 * generated (i.e. recovery work).
1294 */
1295 dm_rh_stop_recovery(ms->rh);
1296
1297 wait_event(_kmirrord_recovery_stopped,
1298 !dm_rh_recovery_in_flight(ms->rh));
1299
1300 if (log->type->presuspend && log->type->presuspend(log))
1301 /* FIXME: need better error handling */
1302 DMWARN("log presuspend failed");
1303
1304 /*
1305 * Now that recovery is complete/stopped and the
1306 * delayed bios are queued, we need to wait for
1307 * the worker thread to complete. This way,
1308 * we know that all of our I/O has been pushed.
1309 */
1310 flush_workqueue(ms->kmirrord_wq);
1311}
1312
1313static void mirror_postsuspend(struct dm_target *ti)
1314{
1315 struct mirror_set *ms = ti->private;
1316 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1317
1318 if (log->type->postsuspend && log->type->postsuspend(log))
1319 /* FIXME: need better error handling */
1320 DMWARN("log postsuspend failed");
1321}
1322
1323static void mirror_resume(struct dm_target *ti)
1324{
1325 struct mirror_set *ms = ti->private;
1326 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1327
1328 atomic_set(&ms->suspend, 0);
1329 if (log->type->resume && log->type->resume(log))
1330 /* FIXME: need better error handling */
1331 DMWARN("log resume failed");
1332 dm_rh_start_recovery(ms->rh);
1333}
1334
1335/*
1336 * device_status_char
1337 * @m: mirror device/leg we want the status of
1338 *
1339 * We return one character representing the most severe error
1340 * we have encountered.
1341 * A => Alive - No failures
1342 * D => Dead - A write failure occurred leaving mirror out-of-sync
1343 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1344 * R => Read - A read failure occurred, mirror data unaffected
1345 *
1346 * Returns: <char>
1347 */
1348static char device_status_char(struct mirror *m)
1349{
1350 if (!atomic_read(&(m->error_count)))
1351 return 'A';
1352
1353 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1354 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
1355 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1356 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1357}
1358
1359
1360static int mirror_status(struct dm_target *ti, status_type_t type,
1361 char *result, unsigned int maxlen)
1362{
1363 unsigned int m, sz = 0;
1364 struct mirror_set *ms = (struct mirror_set *) ti->private;
1365 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1366 char buffer[ms->nr_mirrors + 1];
1367
1368 switch (type) {
1369 case STATUSTYPE_INFO:
1370 DMEMIT("%d ", ms->nr_mirrors);
1371 for (m = 0; m < ms->nr_mirrors; m++) {
1372 DMEMIT("%s ", ms->mirror[m].dev->name);
1373 buffer[m] = device_status_char(&(ms->mirror[m]));
1374 }
1375 buffer[m] = '\0';
1376
1377 DMEMIT("%llu/%llu 1 %s ",
1378 (unsigned long long)log->type->get_sync_count(log),
1379 (unsigned long long)ms->nr_regions, buffer);
1380
1381 sz += log->type->status(log, type, result+sz, maxlen-sz);
1382
1383 break;
1384
1385 case STATUSTYPE_TABLE:
1386 sz = log->type->status(log, type, result, maxlen);
1387
1388 DMEMIT("%d", ms->nr_mirrors);
1389 for (m = 0; m < ms->nr_mirrors; m++)
1390 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1391 (unsigned long long)ms->mirror[m].offset);
1392
1393 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1394 DMEMIT(" 1 handle_errors");
1395 }
1396
1397 return 0;
1398}
1399
1400static int mirror_iterate_devices(struct dm_target *ti,
1401 iterate_devices_callout_fn fn, void *data)
1402{
1403 struct mirror_set *ms = ti->private;
1404 int ret = 0;
1405 unsigned i;
1406
1407 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1408 ret = fn(ti, ms->mirror[i].dev,
1409 ms->mirror[i].offset, ti->len, data);
1410
1411 return ret;
1412}
1413
1414static struct target_type mirror_target = {
1415 .name = "mirror",
1416 .version = {1, 12, 1},
1417 .module = THIS_MODULE,
1418 .ctr = mirror_ctr,
1419 .dtr = mirror_dtr,
1420 .map = mirror_map,
1421 .end_io = mirror_end_io,
1422 .presuspend = mirror_presuspend,
1423 .postsuspend = mirror_postsuspend,
1424 .resume = mirror_resume,
1425 .status = mirror_status,
1426 .iterate_devices = mirror_iterate_devices,
1427};
1428
1429static int __init dm_mirror_init(void)
1430{
1431 int r;
1432
1433 _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
1434 if (!_dm_raid1_read_record_cache) {
1435 DMERR("Can't allocate dm_raid1_read_record cache");
1436 r = -ENOMEM;
1437 goto bad_cache;
1438 }
1439
1440 r = dm_register_target(&mirror_target);
1441 if (r < 0) {
1442 DMERR("Failed to register mirror target");
1443 goto bad_target;
1444 }
1445
1446 return 0;
1447
1448bad_target:
1449 kmem_cache_destroy(_dm_raid1_read_record_cache);
1450bad_cache:
1451 return r;
1452}
1453
1454static void __exit dm_mirror_exit(void)
1455{
1456 dm_unregister_target(&mirror_target);
1457 kmem_cache_destroy(_dm_raid1_read_record_cache);
1458}
1459
1460/* Module hooks */
1461module_init(dm_mirror_init);
1462module_exit(dm_mirror_exit);
1463
1464MODULE_DESCRIPTION(DM_NAME " mirror target");
1465MODULE_AUTHOR("Joe Thornber");
1466MODULE_LICENSE("GPL");
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-bio-record.h"
9
10#include <linux/init.h>
11#include <linux/mempool.h>
12#include <linux/module.h>
13#include <linux/pagemap.h>
14#include <linux/slab.h>
15#include <linux/workqueue.h>
16#include <linux/device-mapper.h>
17#include <linux/dm-io.h>
18#include <linux/dm-dirty-log.h>
19#include <linux/dm-kcopyd.h>
20#include <linux/dm-region-hash.h>
21
22#define DM_MSG_PREFIX "raid1"
23
24#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
25
26#define MAX_NR_MIRRORS (DM_KCOPYD_MAX_REGIONS + 1)
27
28#define DM_RAID1_HANDLE_ERRORS 0x01
29#define DM_RAID1_KEEP_LOG 0x02
30#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
31#define keep_log(p) ((p)->features & DM_RAID1_KEEP_LOG)
32
33static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
34
35/*-----------------------------------------------------------------
36 * Mirror set structures.
37 *---------------------------------------------------------------*/
38enum dm_raid1_error {
39 DM_RAID1_WRITE_ERROR,
40 DM_RAID1_FLUSH_ERROR,
41 DM_RAID1_SYNC_ERROR,
42 DM_RAID1_READ_ERROR
43};
44
45struct mirror {
46 struct mirror_set *ms;
47 atomic_t error_count;
48 unsigned long error_type;
49 struct dm_dev *dev;
50 sector_t offset;
51};
52
53struct mirror_set {
54 struct dm_target *ti;
55 struct list_head list;
56
57 uint64_t features;
58
59 spinlock_t lock; /* protects the lists */
60 struct bio_list reads;
61 struct bio_list writes;
62 struct bio_list failures;
63 struct bio_list holds; /* bios are waiting until suspend */
64
65 struct dm_region_hash *rh;
66 struct dm_kcopyd_client *kcopyd_client;
67 struct dm_io_client *io_client;
68
69 /* recovery */
70 region_t nr_regions;
71 int in_sync;
72 int log_failure;
73 int leg_failure;
74 atomic_t suspend;
75
76 atomic_t default_mirror; /* Default mirror */
77
78 struct workqueue_struct *kmirrord_wq;
79 struct work_struct kmirrord_work;
80 struct timer_list timer;
81 unsigned long timer_pending;
82
83 struct work_struct trigger_event;
84
85 unsigned nr_mirrors;
86 struct mirror mirror[];
87};
88
89DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
90 "A percentage of time allocated for raid resynchronization");
91
92static void wakeup_mirrord(void *context)
93{
94 struct mirror_set *ms = context;
95
96 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
97}
98
99static void delayed_wake_fn(struct timer_list *t)
100{
101 struct mirror_set *ms = from_timer(ms, t, timer);
102
103 clear_bit(0, &ms->timer_pending);
104 wakeup_mirrord(ms);
105}
106
107static void delayed_wake(struct mirror_set *ms)
108{
109 if (test_and_set_bit(0, &ms->timer_pending))
110 return;
111
112 ms->timer.expires = jiffies + HZ / 5;
113 add_timer(&ms->timer);
114}
115
116static void wakeup_all_recovery_waiters(void *context)
117{
118 wake_up_all(&_kmirrord_recovery_stopped);
119}
120
121static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
122{
123 unsigned long flags;
124 int should_wake = 0;
125 struct bio_list *bl;
126
127 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
128 spin_lock_irqsave(&ms->lock, flags);
129 should_wake = !(bl->head);
130 bio_list_add(bl, bio);
131 spin_unlock_irqrestore(&ms->lock, flags);
132
133 if (should_wake)
134 wakeup_mirrord(ms);
135}
136
137static void dispatch_bios(void *context, struct bio_list *bio_list)
138{
139 struct mirror_set *ms = context;
140 struct bio *bio;
141
142 while ((bio = bio_list_pop(bio_list)))
143 queue_bio(ms, bio, WRITE);
144}
145
146struct dm_raid1_bio_record {
147 struct mirror *m;
148 /* if details->bi_disk == NULL, details were not saved */
149 struct dm_bio_details details;
150 region_t write_region;
151};
152
153/*
154 * Every mirror should look like this one.
155 */
156#define DEFAULT_MIRROR 0
157
158/*
159 * This is yucky. We squirrel the mirror struct away inside
160 * bi_next for read/write buffers. This is safe since the bh
161 * doesn't get submitted to the lower levels of block layer.
162 */
163static struct mirror *bio_get_m(struct bio *bio)
164{
165 return (struct mirror *) bio->bi_next;
166}
167
168static void bio_set_m(struct bio *bio, struct mirror *m)
169{
170 bio->bi_next = (struct bio *) m;
171}
172
173static struct mirror *get_default_mirror(struct mirror_set *ms)
174{
175 return &ms->mirror[atomic_read(&ms->default_mirror)];
176}
177
178static void set_default_mirror(struct mirror *m)
179{
180 struct mirror_set *ms = m->ms;
181 struct mirror *m0 = &(ms->mirror[0]);
182
183 atomic_set(&ms->default_mirror, m - m0);
184}
185
186static struct mirror *get_valid_mirror(struct mirror_set *ms)
187{
188 struct mirror *m;
189
190 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
191 if (!atomic_read(&m->error_count))
192 return m;
193
194 return NULL;
195}
196
197/* fail_mirror
198 * @m: mirror device to fail
199 * @error_type: one of the enum's, DM_RAID1_*_ERROR
200 *
201 * If errors are being handled, record the type of
202 * error encountered for this device. If this type
203 * of error has already been recorded, we can return;
204 * otherwise, we must signal userspace by triggering
205 * an event. Additionally, if the device is the
206 * primary device, we must choose a new primary, but
207 * only if the mirror is in-sync.
208 *
209 * This function must not block.
210 */
211static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
212{
213 struct mirror_set *ms = m->ms;
214 struct mirror *new;
215
216 ms->leg_failure = 1;
217
218 /*
219 * error_count is used for nothing more than a
220 * simple way to tell if a device has encountered
221 * errors.
222 */
223 atomic_inc(&m->error_count);
224
225 if (test_and_set_bit(error_type, &m->error_type))
226 return;
227
228 if (!errors_handled(ms))
229 return;
230
231 if (m != get_default_mirror(ms))
232 goto out;
233
234 if (!ms->in_sync && !keep_log(ms)) {
235 /*
236 * Better to issue requests to same failing device
237 * than to risk returning corrupt data.
238 */
239 DMERR("Primary mirror (%s) failed while out-of-sync: "
240 "Reads may fail.", m->dev->name);
241 goto out;
242 }
243
244 new = get_valid_mirror(ms);
245 if (new)
246 set_default_mirror(new);
247 else
248 DMWARN("All sides of mirror have failed.");
249
250out:
251 schedule_work(&ms->trigger_event);
252}
253
254static int mirror_flush(struct dm_target *ti)
255{
256 struct mirror_set *ms = ti->private;
257 unsigned long error_bits;
258
259 unsigned int i;
260 struct dm_io_region io[MAX_NR_MIRRORS];
261 struct mirror *m;
262 struct dm_io_request io_req = {
263 .bi_op = REQ_OP_WRITE,
264 .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
265 .mem.type = DM_IO_KMEM,
266 .mem.ptr.addr = NULL,
267 .client = ms->io_client,
268 };
269
270 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
271 io[i].bdev = m->dev->bdev;
272 io[i].sector = 0;
273 io[i].count = 0;
274 }
275
276 error_bits = -1;
277 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
278 if (unlikely(error_bits != 0)) {
279 for (i = 0; i < ms->nr_mirrors; i++)
280 if (test_bit(i, &error_bits))
281 fail_mirror(ms->mirror + i,
282 DM_RAID1_FLUSH_ERROR);
283 return -EIO;
284 }
285
286 return 0;
287}
288
289/*-----------------------------------------------------------------
290 * Recovery.
291 *
292 * When a mirror is first activated we may find that some regions
293 * are in the no-sync state. We have to recover these by
294 * recopying from the default mirror to all the others.
295 *---------------------------------------------------------------*/
296static void recovery_complete(int read_err, unsigned long write_err,
297 void *context)
298{
299 struct dm_region *reg = context;
300 struct mirror_set *ms = dm_rh_region_context(reg);
301 int m, bit = 0;
302
303 if (read_err) {
304 /* Read error means the failure of default mirror. */
305 DMERR_LIMIT("Unable to read primary mirror during recovery");
306 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
307 }
308
309 if (write_err) {
310 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
311 write_err);
312 /*
313 * Bits correspond to devices (excluding default mirror).
314 * The default mirror cannot change during recovery.
315 */
316 for (m = 0; m < ms->nr_mirrors; m++) {
317 if (&ms->mirror[m] == get_default_mirror(ms))
318 continue;
319 if (test_bit(bit, &write_err))
320 fail_mirror(ms->mirror + m,
321 DM_RAID1_SYNC_ERROR);
322 bit++;
323 }
324 }
325
326 dm_rh_recovery_end(reg, !(read_err || write_err));
327}
328
329static void recover(struct mirror_set *ms, struct dm_region *reg)
330{
331 unsigned i;
332 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
333 struct mirror *m;
334 unsigned long flags = 0;
335 region_t key = dm_rh_get_region_key(reg);
336 sector_t region_size = dm_rh_get_region_size(ms->rh);
337
338 /* fill in the source */
339 m = get_default_mirror(ms);
340 from.bdev = m->dev->bdev;
341 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
342 if (key == (ms->nr_regions - 1)) {
343 /*
344 * The final region may be smaller than
345 * region_size.
346 */
347 from.count = ms->ti->len & (region_size - 1);
348 if (!from.count)
349 from.count = region_size;
350 } else
351 from.count = region_size;
352
353 /* fill in the destinations */
354 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
355 if (&ms->mirror[i] == get_default_mirror(ms))
356 continue;
357
358 m = ms->mirror + i;
359 dest->bdev = m->dev->bdev;
360 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
361 dest->count = from.count;
362 dest++;
363 }
364
365 /* hand to kcopyd */
366 if (!errors_handled(ms))
367 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
368
369 dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
370 flags, recovery_complete, reg);
371}
372
373static void reset_ms_flags(struct mirror_set *ms)
374{
375 unsigned int m;
376
377 ms->leg_failure = 0;
378 for (m = 0; m < ms->nr_mirrors; m++) {
379 atomic_set(&(ms->mirror[m].error_count), 0);
380 ms->mirror[m].error_type = 0;
381 }
382}
383
384static void do_recovery(struct mirror_set *ms)
385{
386 struct dm_region *reg;
387 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
388
389 /*
390 * Start quiescing some regions.
391 */
392 dm_rh_recovery_prepare(ms->rh);
393
394 /*
395 * Copy any already quiesced regions.
396 */
397 while ((reg = dm_rh_recovery_start(ms->rh)))
398 recover(ms, reg);
399
400 /*
401 * Update the in sync flag.
402 */
403 if (!ms->in_sync &&
404 (log->type->get_sync_count(log) == ms->nr_regions)) {
405 /* the sync is complete */
406 dm_table_event(ms->ti->table);
407 ms->in_sync = 1;
408 reset_ms_flags(ms);
409 }
410}
411
412/*-----------------------------------------------------------------
413 * Reads
414 *---------------------------------------------------------------*/
415static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
416{
417 struct mirror *m = get_default_mirror(ms);
418
419 do {
420 if (likely(!atomic_read(&m->error_count)))
421 return m;
422
423 if (m-- == ms->mirror)
424 m += ms->nr_mirrors;
425 } while (m != get_default_mirror(ms));
426
427 return NULL;
428}
429
430static int default_ok(struct mirror *m)
431{
432 struct mirror *default_mirror = get_default_mirror(m->ms);
433
434 return !atomic_read(&default_mirror->error_count);
435}
436
437static int mirror_available(struct mirror_set *ms, struct bio *bio)
438{
439 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
440 region_t region = dm_rh_bio_to_region(ms->rh, bio);
441
442 if (log->type->in_sync(log, region, 0))
443 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
444
445 return 0;
446}
447
448/*
449 * remap a buffer to a particular mirror.
450 */
451static sector_t map_sector(struct mirror *m, struct bio *bio)
452{
453 if (unlikely(!bio->bi_iter.bi_size))
454 return 0;
455 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
456}
457
458static void map_bio(struct mirror *m, struct bio *bio)
459{
460 bio_set_dev(bio, m->dev->bdev);
461 bio->bi_iter.bi_sector = map_sector(m, bio);
462}
463
464static void map_region(struct dm_io_region *io, struct mirror *m,
465 struct bio *bio)
466{
467 io->bdev = m->dev->bdev;
468 io->sector = map_sector(m, bio);
469 io->count = bio_sectors(bio);
470}
471
472static void hold_bio(struct mirror_set *ms, struct bio *bio)
473{
474 /*
475 * Lock is required to avoid race condition during suspend
476 * process.
477 */
478 spin_lock_irq(&ms->lock);
479
480 if (atomic_read(&ms->suspend)) {
481 spin_unlock_irq(&ms->lock);
482
483 /*
484 * If device is suspended, complete the bio.
485 */
486 if (dm_noflush_suspending(ms->ti))
487 bio->bi_status = BLK_STS_DM_REQUEUE;
488 else
489 bio->bi_status = BLK_STS_IOERR;
490
491 bio_endio(bio);
492 return;
493 }
494
495 /*
496 * Hold bio until the suspend is complete.
497 */
498 bio_list_add(&ms->holds, bio);
499 spin_unlock_irq(&ms->lock);
500}
501
502/*-----------------------------------------------------------------
503 * Reads
504 *---------------------------------------------------------------*/
505static void read_callback(unsigned long error, void *context)
506{
507 struct bio *bio = context;
508 struct mirror *m;
509
510 m = bio_get_m(bio);
511 bio_set_m(bio, NULL);
512
513 if (likely(!error)) {
514 bio_endio(bio);
515 return;
516 }
517
518 fail_mirror(m, DM_RAID1_READ_ERROR);
519
520 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
521 DMWARN_LIMIT("Read failure on mirror device %s. "
522 "Trying alternative device.",
523 m->dev->name);
524 queue_bio(m->ms, bio, bio_data_dir(bio));
525 return;
526 }
527
528 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
529 m->dev->name);
530 bio_io_error(bio);
531}
532
533/* Asynchronous read. */
534static void read_async_bio(struct mirror *m, struct bio *bio)
535{
536 struct dm_io_region io;
537 struct dm_io_request io_req = {
538 .bi_op = REQ_OP_READ,
539 .bi_op_flags = 0,
540 .mem.type = DM_IO_BIO,
541 .mem.ptr.bio = bio,
542 .notify.fn = read_callback,
543 .notify.context = bio,
544 .client = m->ms->io_client,
545 };
546
547 map_region(&io, m, bio);
548 bio_set_m(bio, m);
549 BUG_ON(dm_io(&io_req, 1, &io, NULL));
550}
551
552static inline int region_in_sync(struct mirror_set *ms, region_t region,
553 int may_block)
554{
555 int state = dm_rh_get_state(ms->rh, region, may_block);
556 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
557}
558
559static void do_reads(struct mirror_set *ms, struct bio_list *reads)
560{
561 region_t region;
562 struct bio *bio;
563 struct mirror *m;
564
565 while ((bio = bio_list_pop(reads))) {
566 region = dm_rh_bio_to_region(ms->rh, bio);
567 m = get_default_mirror(ms);
568
569 /*
570 * We can only read balance if the region is in sync.
571 */
572 if (likely(region_in_sync(ms, region, 1)))
573 m = choose_mirror(ms, bio->bi_iter.bi_sector);
574 else if (m && atomic_read(&m->error_count))
575 m = NULL;
576
577 if (likely(m))
578 read_async_bio(m, bio);
579 else
580 bio_io_error(bio);
581 }
582}
583
584/*-----------------------------------------------------------------
585 * Writes.
586 *
587 * We do different things with the write io depending on the
588 * state of the region that it's in:
589 *
590 * SYNC: increment pending, use kcopyd to write to *all* mirrors
591 * RECOVERING: delay the io until recovery completes
592 * NOSYNC: increment pending, just write to the default mirror
593 *---------------------------------------------------------------*/
594
595
596static void write_callback(unsigned long error, void *context)
597{
598 unsigned i;
599 struct bio *bio = (struct bio *) context;
600 struct mirror_set *ms;
601 int should_wake = 0;
602 unsigned long flags;
603
604 ms = bio_get_m(bio)->ms;
605 bio_set_m(bio, NULL);
606
607 /*
608 * NOTE: We don't decrement the pending count here,
609 * instead it is done by the targets endio function.
610 * This way we handle both writes to SYNC and NOSYNC
611 * regions with the same code.
612 */
613 if (likely(!error)) {
614 bio_endio(bio);
615 return;
616 }
617
618 /*
619 * If the bio is discard, return an error, but do not
620 * degrade the array.
621 */
622 if (bio_op(bio) == REQ_OP_DISCARD) {
623 bio->bi_status = BLK_STS_NOTSUPP;
624 bio_endio(bio);
625 return;
626 }
627
628 for (i = 0; i < ms->nr_mirrors; i++)
629 if (test_bit(i, &error))
630 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
631
632 /*
633 * Need to raise event. Since raising
634 * events can block, we need to do it in
635 * the main thread.
636 */
637 spin_lock_irqsave(&ms->lock, flags);
638 if (!ms->failures.head)
639 should_wake = 1;
640 bio_list_add(&ms->failures, bio);
641 spin_unlock_irqrestore(&ms->lock, flags);
642 if (should_wake)
643 wakeup_mirrord(ms);
644}
645
646static void do_write(struct mirror_set *ms, struct bio *bio)
647{
648 unsigned int i;
649 struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
650 struct mirror *m;
651 struct dm_io_request io_req = {
652 .bi_op = REQ_OP_WRITE,
653 .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
654 .mem.type = DM_IO_BIO,
655 .mem.ptr.bio = bio,
656 .notify.fn = write_callback,
657 .notify.context = bio,
658 .client = ms->io_client,
659 };
660
661 if (bio_op(bio) == REQ_OP_DISCARD) {
662 io_req.bi_op = REQ_OP_DISCARD;
663 io_req.mem.type = DM_IO_KMEM;
664 io_req.mem.ptr.addr = NULL;
665 }
666
667 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
668 map_region(dest++, m, bio);
669
670 /*
671 * Use default mirror because we only need it to retrieve the reference
672 * to the mirror set in write_callback().
673 */
674 bio_set_m(bio, get_default_mirror(ms));
675
676 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
677}
678
679static void do_writes(struct mirror_set *ms, struct bio_list *writes)
680{
681 int state;
682 struct bio *bio;
683 struct bio_list sync, nosync, recover, *this_list = NULL;
684 struct bio_list requeue;
685 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
686 region_t region;
687
688 if (!writes->head)
689 return;
690
691 /*
692 * Classify each write.
693 */
694 bio_list_init(&sync);
695 bio_list_init(&nosync);
696 bio_list_init(&recover);
697 bio_list_init(&requeue);
698
699 while ((bio = bio_list_pop(writes))) {
700 if ((bio->bi_opf & REQ_PREFLUSH) ||
701 (bio_op(bio) == REQ_OP_DISCARD)) {
702 bio_list_add(&sync, bio);
703 continue;
704 }
705
706 region = dm_rh_bio_to_region(ms->rh, bio);
707
708 if (log->type->is_remote_recovering &&
709 log->type->is_remote_recovering(log, region)) {
710 bio_list_add(&requeue, bio);
711 continue;
712 }
713
714 state = dm_rh_get_state(ms->rh, region, 1);
715 switch (state) {
716 case DM_RH_CLEAN:
717 case DM_RH_DIRTY:
718 this_list = &sync;
719 break;
720
721 case DM_RH_NOSYNC:
722 this_list = &nosync;
723 break;
724
725 case DM_RH_RECOVERING:
726 this_list = &recover;
727 break;
728 }
729
730 bio_list_add(this_list, bio);
731 }
732
733 /*
734 * Add bios that are delayed due to remote recovery
735 * back on to the write queue
736 */
737 if (unlikely(requeue.head)) {
738 spin_lock_irq(&ms->lock);
739 bio_list_merge(&ms->writes, &requeue);
740 spin_unlock_irq(&ms->lock);
741 delayed_wake(ms);
742 }
743
744 /*
745 * Increment the pending counts for any regions that will
746 * be written to (writes to recover regions are going to
747 * be delayed).
748 */
749 dm_rh_inc_pending(ms->rh, &sync);
750 dm_rh_inc_pending(ms->rh, &nosync);
751
752 /*
753 * If the flush fails on a previous call and succeeds here,
754 * we must not reset the log_failure variable. We need
755 * userspace interaction to do that.
756 */
757 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
758
759 /*
760 * Dispatch io.
761 */
762 if (unlikely(ms->log_failure) && errors_handled(ms)) {
763 spin_lock_irq(&ms->lock);
764 bio_list_merge(&ms->failures, &sync);
765 spin_unlock_irq(&ms->lock);
766 wakeup_mirrord(ms);
767 } else
768 while ((bio = bio_list_pop(&sync)))
769 do_write(ms, bio);
770
771 while ((bio = bio_list_pop(&recover)))
772 dm_rh_delay(ms->rh, bio);
773
774 while ((bio = bio_list_pop(&nosync))) {
775 if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) {
776 spin_lock_irq(&ms->lock);
777 bio_list_add(&ms->failures, bio);
778 spin_unlock_irq(&ms->lock);
779 wakeup_mirrord(ms);
780 } else {
781 map_bio(get_default_mirror(ms), bio);
782 submit_bio_noacct(bio);
783 }
784 }
785}
786
787static void do_failures(struct mirror_set *ms, struct bio_list *failures)
788{
789 struct bio *bio;
790
791 if (likely(!failures->head))
792 return;
793
794 /*
795 * If the log has failed, unattempted writes are being
796 * put on the holds list. We can't issue those writes
797 * until a log has been marked, so we must store them.
798 *
799 * If a 'noflush' suspend is in progress, we can requeue
800 * the I/O's to the core. This give userspace a chance
801 * to reconfigure the mirror, at which point the core
802 * will reissue the writes. If the 'noflush' flag is
803 * not set, we have no choice but to return errors.
804 *
805 * Some writes on the failures list may have been
806 * submitted before the log failure and represent a
807 * failure to write to one of the devices. It is ok
808 * for us to treat them the same and requeue them
809 * as well.
810 */
811 while ((bio = bio_list_pop(failures))) {
812 if (!ms->log_failure) {
813 ms->in_sync = 0;
814 dm_rh_mark_nosync(ms->rh, bio);
815 }
816
817 /*
818 * If all the legs are dead, fail the I/O.
819 * If the device has failed and keep_log is enabled,
820 * fail the I/O.
821 *
822 * If we have been told to handle errors, and keep_log
823 * isn't enabled, hold the bio and wait for userspace to
824 * deal with the problem.
825 *
826 * Otherwise pretend that the I/O succeeded. (This would
827 * be wrong if the failed leg returned after reboot and
828 * got replicated back to the good legs.)
829 */
830 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
831 bio_io_error(bio);
832 else if (errors_handled(ms) && !keep_log(ms))
833 hold_bio(ms, bio);
834 else
835 bio_endio(bio);
836 }
837}
838
839static void trigger_event(struct work_struct *work)
840{
841 struct mirror_set *ms =
842 container_of(work, struct mirror_set, trigger_event);
843
844 dm_table_event(ms->ti->table);
845}
846
847/*-----------------------------------------------------------------
848 * kmirrord
849 *---------------------------------------------------------------*/
850static void do_mirror(struct work_struct *work)
851{
852 struct mirror_set *ms = container_of(work, struct mirror_set,
853 kmirrord_work);
854 struct bio_list reads, writes, failures;
855 unsigned long flags;
856
857 spin_lock_irqsave(&ms->lock, flags);
858 reads = ms->reads;
859 writes = ms->writes;
860 failures = ms->failures;
861 bio_list_init(&ms->reads);
862 bio_list_init(&ms->writes);
863 bio_list_init(&ms->failures);
864 spin_unlock_irqrestore(&ms->lock, flags);
865
866 dm_rh_update_states(ms->rh, errors_handled(ms));
867 do_recovery(ms);
868 do_reads(ms, &reads);
869 do_writes(ms, &writes);
870 do_failures(ms, &failures);
871}
872
873/*-----------------------------------------------------------------
874 * Target functions
875 *---------------------------------------------------------------*/
876static struct mirror_set *alloc_context(unsigned int nr_mirrors,
877 uint32_t region_size,
878 struct dm_target *ti,
879 struct dm_dirty_log *dl)
880{
881 struct mirror_set *ms =
882 kzalloc(struct_size(ms, mirror, nr_mirrors), GFP_KERNEL);
883
884 if (!ms) {
885 ti->error = "Cannot allocate mirror context";
886 return NULL;
887 }
888
889 spin_lock_init(&ms->lock);
890 bio_list_init(&ms->reads);
891 bio_list_init(&ms->writes);
892 bio_list_init(&ms->failures);
893 bio_list_init(&ms->holds);
894
895 ms->ti = ti;
896 ms->nr_mirrors = nr_mirrors;
897 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
898 ms->in_sync = 0;
899 ms->log_failure = 0;
900 ms->leg_failure = 0;
901 atomic_set(&ms->suspend, 0);
902 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
903
904 ms->io_client = dm_io_client_create();
905 if (IS_ERR(ms->io_client)) {
906 ti->error = "Error creating dm_io client";
907 kfree(ms);
908 return NULL;
909 }
910
911 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
912 wakeup_all_recovery_waiters,
913 ms->ti->begin, MAX_RECOVERY,
914 dl, region_size, ms->nr_regions);
915 if (IS_ERR(ms->rh)) {
916 ti->error = "Error creating dirty region hash";
917 dm_io_client_destroy(ms->io_client);
918 kfree(ms);
919 return NULL;
920 }
921
922 return ms;
923}
924
925static void free_context(struct mirror_set *ms, struct dm_target *ti,
926 unsigned int m)
927{
928 while (m--)
929 dm_put_device(ti, ms->mirror[m].dev);
930
931 dm_io_client_destroy(ms->io_client);
932 dm_region_hash_destroy(ms->rh);
933 kfree(ms);
934}
935
936static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
937 unsigned int mirror, char **argv)
938{
939 unsigned long long offset;
940 char dummy;
941 int ret;
942
943 if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
944 offset != (sector_t)offset) {
945 ti->error = "Invalid offset";
946 return -EINVAL;
947 }
948
949 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
950 &ms->mirror[mirror].dev);
951 if (ret) {
952 ti->error = "Device lookup failure";
953 return ret;
954 }
955
956 ms->mirror[mirror].ms = ms;
957 atomic_set(&(ms->mirror[mirror].error_count), 0);
958 ms->mirror[mirror].error_type = 0;
959 ms->mirror[mirror].offset = offset;
960
961 return 0;
962}
963
964/*
965 * Create dirty log: log_type #log_params <log_params>
966 */
967static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
968 unsigned argc, char **argv,
969 unsigned *args_used)
970{
971 unsigned param_count;
972 struct dm_dirty_log *dl;
973 char dummy;
974
975 if (argc < 2) {
976 ti->error = "Insufficient mirror log arguments";
977 return NULL;
978 }
979
980 if (sscanf(argv[1], "%u%c", ¶m_count, &dummy) != 1) {
981 ti->error = "Invalid mirror log argument count";
982 return NULL;
983 }
984
985 *args_used = 2 + param_count;
986
987 if (argc < *args_used) {
988 ti->error = "Insufficient mirror log arguments";
989 return NULL;
990 }
991
992 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
993 argv + 2);
994 if (!dl) {
995 ti->error = "Error creating mirror dirty log";
996 return NULL;
997 }
998
999 return dl;
1000}
1001
1002static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1003 unsigned *args_used)
1004{
1005 unsigned num_features;
1006 struct dm_target *ti = ms->ti;
1007 char dummy;
1008 int i;
1009
1010 *args_used = 0;
1011
1012 if (!argc)
1013 return 0;
1014
1015 if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
1016 ti->error = "Invalid number of features";
1017 return -EINVAL;
1018 }
1019
1020 argc--;
1021 argv++;
1022 (*args_used)++;
1023
1024 if (num_features > argc) {
1025 ti->error = "Not enough arguments to support feature count";
1026 return -EINVAL;
1027 }
1028
1029 for (i = 0; i < num_features; i++) {
1030 if (!strcmp("handle_errors", argv[0]))
1031 ms->features |= DM_RAID1_HANDLE_ERRORS;
1032 else if (!strcmp("keep_log", argv[0]))
1033 ms->features |= DM_RAID1_KEEP_LOG;
1034 else {
1035 ti->error = "Unrecognised feature requested";
1036 return -EINVAL;
1037 }
1038
1039 argc--;
1040 argv++;
1041 (*args_used)++;
1042 }
1043 if (!errors_handled(ms) && keep_log(ms)) {
1044 ti->error = "keep_log feature requires the handle_errors feature";
1045 return -EINVAL;
1046 }
1047
1048 return 0;
1049}
1050
1051/*
1052 * Construct a mirror mapping:
1053 *
1054 * log_type #log_params <log_params>
1055 * #mirrors [mirror_path offset]{2,}
1056 * [#features <features>]
1057 *
1058 * log_type is "core" or "disk"
1059 * #log_params is between 1 and 3
1060 *
1061 * If present, supported features are "handle_errors" and "keep_log".
1062 */
1063static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1064{
1065 int r;
1066 unsigned int nr_mirrors, m, args_used;
1067 struct mirror_set *ms;
1068 struct dm_dirty_log *dl;
1069 char dummy;
1070
1071 dl = create_dirty_log(ti, argc, argv, &args_used);
1072 if (!dl)
1073 return -EINVAL;
1074
1075 argv += args_used;
1076 argc -= args_used;
1077
1078 if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
1079 nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) {
1080 ti->error = "Invalid number of mirrors";
1081 dm_dirty_log_destroy(dl);
1082 return -EINVAL;
1083 }
1084
1085 argv++, argc--;
1086
1087 if (argc < nr_mirrors * 2) {
1088 ti->error = "Too few mirror arguments";
1089 dm_dirty_log_destroy(dl);
1090 return -EINVAL;
1091 }
1092
1093 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1094 if (!ms) {
1095 dm_dirty_log_destroy(dl);
1096 return -ENOMEM;
1097 }
1098
1099 /* Get the mirror parameter sets */
1100 for (m = 0; m < nr_mirrors; m++) {
1101 r = get_mirror(ms, ti, m, argv);
1102 if (r) {
1103 free_context(ms, ti, m);
1104 return r;
1105 }
1106 argv += 2;
1107 argc -= 2;
1108 }
1109
1110 ti->private = ms;
1111
1112 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1113 if (r)
1114 goto err_free_context;
1115
1116 ti->num_flush_bios = 1;
1117 ti->num_discard_bios = 1;
1118 ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
1119
1120 ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
1121 if (!ms->kmirrord_wq) {
1122 DMERR("couldn't start kmirrord");
1123 r = -ENOMEM;
1124 goto err_free_context;
1125 }
1126 INIT_WORK(&ms->kmirrord_work, do_mirror);
1127 timer_setup(&ms->timer, delayed_wake_fn, 0);
1128 ms->timer_pending = 0;
1129 INIT_WORK(&ms->trigger_event, trigger_event);
1130
1131 r = parse_features(ms, argc, argv, &args_used);
1132 if (r)
1133 goto err_destroy_wq;
1134
1135 argv += args_used;
1136 argc -= args_used;
1137
1138 /*
1139 * Any read-balancing addition depends on the
1140 * DM_RAID1_HANDLE_ERRORS flag being present.
1141 * This is because the decision to balance depends
1142 * on the sync state of a region. If the above
1143 * flag is not present, we ignore errors; and
1144 * the sync state may be inaccurate.
1145 */
1146
1147 if (argc) {
1148 ti->error = "Too many mirror arguments";
1149 r = -EINVAL;
1150 goto err_destroy_wq;
1151 }
1152
1153 ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1154 if (IS_ERR(ms->kcopyd_client)) {
1155 r = PTR_ERR(ms->kcopyd_client);
1156 goto err_destroy_wq;
1157 }
1158
1159 wakeup_mirrord(ms);
1160 return 0;
1161
1162err_destroy_wq:
1163 destroy_workqueue(ms->kmirrord_wq);
1164err_free_context:
1165 free_context(ms, ti, ms->nr_mirrors);
1166 return r;
1167}
1168
1169static void mirror_dtr(struct dm_target *ti)
1170{
1171 struct mirror_set *ms = (struct mirror_set *) ti->private;
1172
1173 del_timer_sync(&ms->timer);
1174 flush_workqueue(ms->kmirrord_wq);
1175 flush_work(&ms->trigger_event);
1176 dm_kcopyd_client_destroy(ms->kcopyd_client);
1177 destroy_workqueue(ms->kmirrord_wq);
1178 free_context(ms, ti, ms->nr_mirrors);
1179}
1180
1181/*
1182 * Mirror mapping function
1183 */
1184static int mirror_map(struct dm_target *ti, struct bio *bio)
1185{
1186 int r, rw = bio_data_dir(bio);
1187 struct mirror *m;
1188 struct mirror_set *ms = ti->private;
1189 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1190 struct dm_raid1_bio_record *bio_record =
1191 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1192
1193 bio_record->details.bi_disk = NULL;
1194
1195 if (rw == WRITE) {
1196 /* Save region for mirror_end_io() handler */
1197 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
1198 queue_bio(ms, bio, rw);
1199 return DM_MAPIO_SUBMITTED;
1200 }
1201
1202 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1203 if (r < 0 && r != -EWOULDBLOCK)
1204 return DM_MAPIO_KILL;
1205
1206 /*
1207 * If region is not in-sync queue the bio.
1208 */
1209 if (!r || (r == -EWOULDBLOCK)) {
1210 if (bio->bi_opf & REQ_RAHEAD)
1211 return DM_MAPIO_KILL;
1212
1213 queue_bio(ms, bio, rw);
1214 return DM_MAPIO_SUBMITTED;
1215 }
1216
1217 /*
1218 * The region is in-sync and we can perform reads directly.
1219 * Store enough information so we can retry if it fails.
1220 */
1221 m = choose_mirror(ms, bio->bi_iter.bi_sector);
1222 if (unlikely(!m))
1223 return DM_MAPIO_KILL;
1224
1225 dm_bio_record(&bio_record->details, bio);
1226 bio_record->m = m;
1227
1228 map_bio(m, bio);
1229
1230 return DM_MAPIO_REMAPPED;
1231}
1232
1233static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1234 blk_status_t *error)
1235{
1236 int rw = bio_data_dir(bio);
1237 struct mirror_set *ms = (struct mirror_set *) ti->private;
1238 struct mirror *m = NULL;
1239 struct dm_bio_details *bd = NULL;
1240 struct dm_raid1_bio_record *bio_record =
1241 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1242
1243 /*
1244 * We need to dec pending if this was a write.
1245 */
1246 if (rw == WRITE) {
1247 if (!(bio->bi_opf & REQ_PREFLUSH) &&
1248 bio_op(bio) != REQ_OP_DISCARD)
1249 dm_rh_dec(ms->rh, bio_record->write_region);
1250 return DM_ENDIO_DONE;
1251 }
1252
1253 if (*error == BLK_STS_NOTSUPP)
1254 goto out;
1255
1256 if (bio->bi_opf & REQ_RAHEAD)
1257 goto out;
1258
1259 if (unlikely(*error)) {
1260 if (!bio_record->details.bi_disk) {
1261 /*
1262 * There wasn't enough memory to record necessary
1263 * information for a retry or there was no other
1264 * mirror in-sync.
1265 */
1266 DMERR_LIMIT("Mirror read failed.");
1267 return DM_ENDIO_DONE;
1268 }
1269
1270 m = bio_record->m;
1271
1272 DMERR("Mirror read failed from %s. Trying alternative device.",
1273 m->dev->name);
1274
1275 fail_mirror(m, DM_RAID1_READ_ERROR);
1276
1277 /*
1278 * A failed read is requeued for another attempt using an intact
1279 * mirror.
1280 */
1281 if (default_ok(m) || mirror_available(ms, bio)) {
1282 bd = &bio_record->details;
1283
1284 dm_bio_restore(bd, bio);
1285 bio_record->details.bi_disk = NULL;
1286 bio->bi_status = 0;
1287
1288 queue_bio(ms, bio, rw);
1289 return DM_ENDIO_INCOMPLETE;
1290 }
1291 DMERR("All replicated volumes dead, failing I/O");
1292 }
1293
1294out:
1295 bio_record->details.bi_disk = NULL;
1296
1297 return DM_ENDIO_DONE;
1298}
1299
1300static void mirror_presuspend(struct dm_target *ti)
1301{
1302 struct mirror_set *ms = (struct mirror_set *) ti->private;
1303 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1304
1305 struct bio_list holds;
1306 struct bio *bio;
1307
1308 atomic_set(&ms->suspend, 1);
1309
1310 /*
1311 * Process bios in the hold list to start recovery waiting
1312 * for bios in the hold list. After the process, no bio has
1313 * a chance to be added in the hold list because ms->suspend
1314 * is set.
1315 */
1316 spin_lock_irq(&ms->lock);
1317 holds = ms->holds;
1318 bio_list_init(&ms->holds);
1319 spin_unlock_irq(&ms->lock);
1320
1321 while ((bio = bio_list_pop(&holds)))
1322 hold_bio(ms, bio);
1323
1324 /*
1325 * We must finish up all the work that we've
1326 * generated (i.e. recovery work).
1327 */
1328 dm_rh_stop_recovery(ms->rh);
1329
1330 wait_event(_kmirrord_recovery_stopped,
1331 !dm_rh_recovery_in_flight(ms->rh));
1332
1333 if (log->type->presuspend && log->type->presuspend(log))
1334 /* FIXME: need better error handling */
1335 DMWARN("log presuspend failed");
1336
1337 /*
1338 * Now that recovery is complete/stopped and the
1339 * delayed bios are queued, we need to wait for
1340 * the worker thread to complete. This way,
1341 * we know that all of our I/O has been pushed.
1342 */
1343 flush_workqueue(ms->kmirrord_wq);
1344}
1345
1346static void mirror_postsuspend(struct dm_target *ti)
1347{
1348 struct mirror_set *ms = ti->private;
1349 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1350
1351 if (log->type->postsuspend && log->type->postsuspend(log))
1352 /* FIXME: need better error handling */
1353 DMWARN("log postsuspend failed");
1354}
1355
1356static void mirror_resume(struct dm_target *ti)
1357{
1358 struct mirror_set *ms = ti->private;
1359 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1360
1361 atomic_set(&ms->suspend, 0);
1362 if (log->type->resume && log->type->resume(log))
1363 /* FIXME: need better error handling */
1364 DMWARN("log resume failed");
1365 dm_rh_start_recovery(ms->rh);
1366}
1367
1368/*
1369 * device_status_char
1370 * @m: mirror device/leg we want the status of
1371 *
1372 * We return one character representing the most severe error
1373 * we have encountered.
1374 * A => Alive - No failures
1375 * D => Dead - A write failure occurred leaving mirror out-of-sync
1376 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1377 * R => Read - A read failure occurred, mirror data unaffected
1378 *
1379 * Returns: <char>
1380 */
1381static char device_status_char(struct mirror *m)
1382{
1383 if (!atomic_read(&(m->error_count)))
1384 return 'A';
1385
1386 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1387 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
1388 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1389 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1390}
1391
1392
1393static void mirror_status(struct dm_target *ti, status_type_t type,
1394 unsigned status_flags, char *result, unsigned maxlen)
1395{
1396 unsigned int m, sz = 0;
1397 int num_feature_args = 0;
1398 struct mirror_set *ms = (struct mirror_set *) ti->private;
1399 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1400 char buffer[MAX_NR_MIRRORS + 1];
1401
1402 switch (type) {
1403 case STATUSTYPE_INFO:
1404 DMEMIT("%d ", ms->nr_mirrors);
1405 for (m = 0; m < ms->nr_mirrors; m++) {
1406 DMEMIT("%s ", ms->mirror[m].dev->name);
1407 buffer[m] = device_status_char(&(ms->mirror[m]));
1408 }
1409 buffer[m] = '\0';
1410
1411 DMEMIT("%llu/%llu 1 %s ",
1412 (unsigned long long)log->type->get_sync_count(log),
1413 (unsigned long long)ms->nr_regions, buffer);
1414
1415 sz += log->type->status(log, type, result+sz, maxlen-sz);
1416
1417 break;
1418
1419 case STATUSTYPE_TABLE:
1420 sz = log->type->status(log, type, result, maxlen);
1421
1422 DMEMIT("%d", ms->nr_mirrors);
1423 for (m = 0; m < ms->nr_mirrors; m++)
1424 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1425 (unsigned long long)ms->mirror[m].offset);
1426
1427 num_feature_args += !!errors_handled(ms);
1428 num_feature_args += !!keep_log(ms);
1429 if (num_feature_args) {
1430 DMEMIT(" %d", num_feature_args);
1431 if (errors_handled(ms))
1432 DMEMIT(" handle_errors");
1433 if (keep_log(ms))
1434 DMEMIT(" keep_log");
1435 }
1436
1437 break;
1438 }
1439}
1440
1441static int mirror_iterate_devices(struct dm_target *ti,
1442 iterate_devices_callout_fn fn, void *data)
1443{
1444 struct mirror_set *ms = ti->private;
1445 int ret = 0;
1446 unsigned i;
1447
1448 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1449 ret = fn(ti, ms->mirror[i].dev,
1450 ms->mirror[i].offset, ti->len, data);
1451
1452 return ret;
1453}
1454
1455static struct target_type mirror_target = {
1456 .name = "mirror",
1457 .version = {1, 14, 0},
1458 .module = THIS_MODULE,
1459 .ctr = mirror_ctr,
1460 .dtr = mirror_dtr,
1461 .map = mirror_map,
1462 .end_io = mirror_end_io,
1463 .presuspend = mirror_presuspend,
1464 .postsuspend = mirror_postsuspend,
1465 .resume = mirror_resume,
1466 .status = mirror_status,
1467 .iterate_devices = mirror_iterate_devices,
1468};
1469
1470static int __init dm_mirror_init(void)
1471{
1472 int r;
1473
1474 r = dm_register_target(&mirror_target);
1475 if (r < 0) {
1476 DMERR("Failed to register mirror target");
1477 goto bad_target;
1478 }
1479
1480 return 0;
1481
1482bad_target:
1483 return r;
1484}
1485
1486static void __exit dm_mirror_exit(void)
1487{
1488 dm_unregister_target(&mirror_target);
1489}
1490
1491/* Module hooks */
1492module_init(dm_mirror_init);
1493module_exit(dm_mirror_exit);
1494
1495MODULE_DESCRIPTION(DM_NAME " mirror target");
1496MODULE_AUTHOR("Joe Thornber");
1497MODULE_LICENSE("GPL");