Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2003 Sistina Software Limited.
  4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  5 *
  6 * This file is released under the GPL.
  7 */
  8
  9#include <linux/dm-dirty-log.h>
 10#include <linux/dm-region-hash.h>
 11
 12#include <linux/ctype.h>
 13#include <linux/init.h>
 14#include <linux/module.h>
 15#include <linux/slab.h>
 16#include <linux/vmalloc.h>
 17
 18#include "dm.h"
 19
 20#define	DM_MSG_PREFIX	"region hash"
 21
 22/*
 23 *------------------------------------------------------------------
 24 * Region hash
 25 *
 26 * The mirror splits itself up into discrete regions.  Each
 27 * region can be in one of three states: clean, dirty,
 28 * nosync.  There is no need to put clean regions in the hash.
 29 *
 30 * In addition to being present in the hash table a region _may_
 31 * be present on one of three lists.
 32 *
 33 *   clean_regions: Regions on this list have no io pending to
 34 *   them, they are in sync, we are no longer interested in them,
 35 *   they are dull.  dm_rh_update_states() will remove them from the
 36 *   hash table.
 37 *
 38 *   quiesced_regions: These regions have been spun down, ready
 39 *   for recovery.  rh_recovery_start() will remove regions from
 40 *   this list and hand them to kmirrord, which will schedule the
 41 *   recovery io with kcopyd.
 42 *
 43 *   recovered_regions: Regions that kcopyd has successfully
 44 *   recovered.  dm_rh_update_states() will now schedule any delayed
 45 *   io, up the recovery_count, and remove the region from the
 46 *   hash.
 47 *
 48 * There are 2 locks:
 49 *   A rw spin lock 'hash_lock' protects just the hash table,
 50 *   this is never held in write mode from interrupt context,
 51 *   which I believe means that we only have to disable irqs when
 52 *   doing a write lock.
 53 *
 54 *   An ordinary spin lock 'region_lock' that protects the three
 55 *   lists in the region_hash, with the 'state', 'list' and
 56 *   'delayed_bios' fields of the regions.  This is used from irq
 57 *   context, so all other uses will have to suspend local irqs.
 58 *------------------------------------------------------------------
 59 */
 60struct dm_region_hash {
 61	uint32_t region_size;
 62	unsigned int region_shift;
 63
 64	/* holds persistent region state */
 65	struct dm_dirty_log *log;
 66
 67	/* hash table */
 68	rwlock_t hash_lock;
 69	unsigned int mask;
 70	unsigned int nr_buckets;
 71	unsigned int prime;
 72	unsigned int shift;
 73	struct list_head *buckets;
 74
 75	/*
 76	 * If there was a flush failure no regions can be marked clean.
 77	 */
 78	int flush_failure;
 79
 80	unsigned int max_recovery; /* Max # of regions to recover in parallel */
 81
 82	spinlock_t region_lock;
 83	atomic_t recovery_in_flight;
 84	struct list_head clean_regions;
 85	struct list_head quiesced_regions;
 86	struct list_head recovered_regions;
 87	struct list_head failed_recovered_regions;
 88	struct semaphore recovery_count;
 89
 90	mempool_t region_pool;
 91
 92	void *context;
 93	sector_t target_begin;
 94
 95	/* Callback function to schedule bios writes */
 96	void (*dispatch_bios)(void *context, struct bio_list *bios);
 97
 98	/* Callback function to wakeup callers worker thread. */
 99	void (*wakeup_workers)(void *context);
100
101	/* Callback function to wakeup callers recovery waiters. */
102	void (*wakeup_all_recovery_waiters)(void *context);
103};
104
105struct dm_region {
106	struct dm_region_hash *rh;	/* FIXME: can we get rid of this ? */
107	region_t key;
108	int state;
109
110	struct list_head hash_list;
111	struct list_head list;
112
113	atomic_t pending;
114	struct bio_list delayed_bios;
115};
116
117/*
118 * Conversion fns
119 */
120static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
121{
122	return sector >> rh->region_shift;
123}
124
125sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
126{
127	return region << rh->region_shift;
128}
129EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
130
131region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
132{
133	return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
134				      rh->target_begin);
135}
136EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
137
138void *dm_rh_region_context(struct dm_region *reg)
139{
140	return reg->rh->context;
141}
142EXPORT_SYMBOL_GPL(dm_rh_region_context);
143
144region_t dm_rh_get_region_key(struct dm_region *reg)
145{
146	return reg->key;
147}
148EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
149
150sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
151{
152	return rh->region_size;
153}
154EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
155
156/*
157 * FIXME: shall we pass in a structure instead of all these args to
158 * dm_region_hash_create()????
159 */
160#define RH_HASH_MULT 2654435387U
161#define RH_HASH_SHIFT 12
162
163#define MIN_REGIONS 64
164struct dm_region_hash *dm_region_hash_create(
165		void *context, void (*dispatch_bios)(void *context,
166						     struct bio_list *bios),
167		void (*wakeup_workers)(void *context),
168		void (*wakeup_all_recovery_waiters)(void *context),
169		sector_t target_begin, unsigned int max_recovery,
170		struct dm_dirty_log *log, uint32_t region_size,
171		region_t nr_regions)
172{
173	struct dm_region_hash *rh;
174	unsigned int nr_buckets, max_buckets;
175	size_t i;
176	int ret;
177
178	/*
179	 * Calculate a suitable number of buckets for our hash
180	 * table.
181	 */
182	max_buckets = nr_regions >> 6;
183	for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
184		;
185	nr_buckets >>= 1;
186
187	rh = kzalloc(sizeof(*rh), GFP_KERNEL);
188	if (!rh) {
189		DMERR("unable to allocate region hash memory");
190		return ERR_PTR(-ENOMEM);
191	}
192
193	rh->context = context;
194	rh->dispatch_bios = dispatch_bios;
195	rh->wakeup_workers = wakeup_workers;
196	rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
197	rh->target_begin = target_begin;
198	rh->max_recovery = max_recovery;
199	rh->log = log;
200	rh->region_size = region_size;
201	rh->region_shift = __ffs(region_size);
202	rwlock_init(&rh->hash_lock);
203	rh->mask = nr_buckets - 1;
204	rh->nr_buckets = nr_buckets;
205
206	rh->shift = RH_HASH_SHIFT;
207	rh->prime = RH_HASH_MULT;
208
209	rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets)));
210	if (!rh->buckets) {
211		DMERR("unable to allocate region hash bucket memory");
212		kfree(rh);
213		return ERR_PTR(-ENOMEM);
214	}
215
216	for (i = 0; i < nr_buckets; i++)
217		INIT_LIST_HEAD(rh->buckets + i);
218
219	spin_lock_init(&rh->region_lock);
220	sema_init(&rh->recovery_count, 0);
221	atomic_set(&rh->recovery_in_flight, 0);
222	INIT_LIST_HEAD(&rh->clean_regions);
223	INIT_LIST_HEAD(&rh->quiesced_regions);
224	INIT_LIST_HEAD(&rh->recovered_regions);
225	INIT_LIST_HEAD(&rh->failed_recovered_regions);
226	rh->flush_failure = 0;
227
228	ret = mempool_init_kmalloc_pool(&rh->region_pool, MIN_REGIONS,
229					sizeof(struct dm_region));
230	if (ret) {
231		vfree(rh->buckets);
232		kfree(rh);
233		rh = ERR_PTR(-ENOMEM);
234	}
235
236	return rh;
237}
238EXPORT_SYMBOL_GPL(dm_region_hash_create);
239
240void dm_region_hash_destroy(struct dm_region_hash *rh)
241{
242	unsigned int h;
243	struct dm_region *reg, *nreg;
244
245	BUG_ON(!list_empty(&rh->quiesced_regions));
246	for (h = 0; h < rh->nr_buckets; h++) {
247		list_for_each_entry_safe(reg, nreg, rh->buckets + h,
248					 hash_list) {
249			BUG_ON(atomic_read(&reg->pending));
250			mempool_free(reg, &rh->region_pool);
251		}
252	}
253
254	if (rh->log)
255		dm_dirty_log_destroy(rh->log);
256
257	mempool_exit(&rh->region_pool);
258	vfree(rh->buckets);
259	kfree(rh);
260}
261EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
262
263struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
264{
265	return rh->log;
266}
267EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
268
269static unsigned int rh_hash(struct dm_region_hash *rh, region_t region)
270{
271	return (unsigned int) ((region * rh->prime) >> rh->shift) & rh->mask;
272}
273
274static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
275{
276	struct dm_region *reg;
277	struct list_head *bucket = rh->buckets + rh_hash(rh, region);
278
279	list_for_each_entry(reg, bucket, hash_list)
280		if (reg->key == region)
281			return reg;
282
283	return NULL;
284}
285
286static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
287{
288	list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key));
289}
290
291static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
292{
293	struct dm_region *reg, *nreg;
294
295	nreg = mempool_alloc(&rh->region_pool, GFP_ATOMIC);
296	if (unlikely(!nreg))
297		nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
298
299	nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
300		      DM_RH_CLEAN : DM_RH_NOSYNC;
301	nreg->rh = rh;
302	nreg->key = region;
303	INIT_LIST_HEAD(&nreg->list);
304	atomic_set(&nreg->pending, 0);
305	bio_list_init(&nreg->delayed_bios);
306
307	write_lock_irq(&rh->hash_lock);
308	reg = __rh_lookup(rh, region);
309	if (reg)
310		/* We lost the race. */
311		mempool_free(nreg, &rh->region_pool);
312	else {
313		__rh_insert(rh, nreg);
314		if (nreg->state == DM_RH_CLEAN) {
315			spin_lock(&rh->region_lock);
316			list_add(&nreg->list, &rh->clean_regions);
317			spin_unlock(&rh->region_lock);
318		}
319
320		reg = nreg;
321	}
322	write_unlock_irq(&rh->hash_lock);
323
324	return reg;
325}
326
327static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
328{
329	struct dm_region *reg;
330
331	reg = __rh_lookup(rh, region);
332	if (!reg) {
333		read_unlock(&rh->hash_lock);
334		reg = __rh_alloc(rh, region);
335		read_lock(&rh->hash_lock);
336	}
337
338	return reg;
339}
340
341int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
342{
343	int r;
344	struct dm_region *reg;
345
346	read_lock(&rh->hash_lock);
347	reg = __rh_lookup(rh, region);
348	read_unlock(&rh->hash_lock);
349
350	if (reg)
351		return reg->state;
352
353	/*
354	 * The region wasn't in the hash, so we fall back to the
355	 * dirty log.
356	 */
357	r = rh->log->type->in_sync(rh->log, region, may_block);
358
359	/*
360	 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
361	 * taken as a DM_RH_NOSYNC
362	 */
363	return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
364}
365EXPORT_SYMBOL_GPL(dm_rh_get_state);
366
367static void complete_resync_work(struct dm_region *reg, int success)
368{
369	struct dm_region_hash *rh = reg->rh;
370
371	rh->log->type->set_region_sync(rh->log, reg->key, success);
372
373	/*
374	 * Dispatch the bios before we call 'wake_up_all'.
375	 * This is important because if we are suspending,
376	 * we want to know that recovery is complete and
377	 * the work queue is flushed.  If we wake_up_all
378	 * before we dispatch_bios (queue bios and call wake()),
379	 * then we risk suspending before the work queue
380	 * has been properly flushed.
381	 */
382	rh->dispatch_bios(rh->context, &reg->delayed_bios);
383	if (atomic_dec_and_test(&rh->recovery_in_flight))
384		rh->wakeup_all_recovery_waiters(rh->context);
385	up(&rh->recovery_count);
386}
387
388/* dm_rh_mark_nosync
389 * @ms
390 * @bio
391 *
392 * The bio was written on some mirror(s) but failed on other mirror(s).
393 * We can successfully endio the bio but should avoid the region being
394 * marked clean by setting the state DM_RH_NOSYNC.
395 *
396 * This function is _not_ safe in interrupt context!
397 */
398void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
399{
400	unsigned long flags;
401	struct dm_dirty_log *log = rh->log;
402	struct dm_region *reg;
403	region_t region = dm_rh_bio_to_region(rh, bio);
404	int recovering = 0;
405
406	if (bio->bi_opf & REQ_PREFLUSH) {
407		rh->flush_failure = 1;
408		return;
409	}
410
411	if (bio_op(bio) == REQ_OP_DISCARD)
412		return;
413
414	/* We must inform the log that the sync count has changed. */
415	log->type->set_region_sync(log, region, 0);
416
417	read_lock(&rh->hash_lock);
418	reg = __rh_find(rh, region);
419	read_unlock(&rh->hash_lock);
420
421	/* region hash entry should exist because write was in-flight */
422	BUG_ON(!reg);
423	BUG_ON(!list_empty(&reg->list));
424
425	spin_lock_irqsave(&rh->region_lock, flags);
426	/*
427	 * Possible cases:
428	 *   1) DM_RH_DIRTY
429	 *   2) DM_RH_NOSYNC: was dirty, other preceding writes failed
430	 *   3) DM_RH_RECOVERING: flushing pending writes
431	 * Either case, the region should have not been connected to list.
432	 */
433	recovering = (reg->state == DM_RH_RECOVERING);
434	reg->state = DM_RH_NOSYNC;
435	BUG_ON(!list_empty(&reg->list));
436	spin_unlock_irqrestore(&rh->region_lock, flags);
437
438	if (recovering)
439		complete_resync_work(reg, 0);
440}
441EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
442
443void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
444{
445	struct dm_region *reg, *next;
446
447	LIST_HEAD(clean);
448	LIST_HEAD(recovered);
449	LIST_HEAD(failed_recovered);
450
451	/*
452	 * Quickly grab the lists.
453	 */
454	write_lock_irq(&rh->hash_lock);
455	spin_lock(&rh->region_lock);
456	if (!list_empty(&rh->clean_regions)) {
457		list_splice_init(&rh->clean_regions, &clean);
458
459		list_for_each_entry(reg, &clean, list)
460			list_del(&reg->hash_list);
461	}
462
463	if (!list_empty(&rh->recovered_regions)) {
464		list_splice_init(&rh->recovered_regions, &recovered);
465
466		list_for_each_entry(reg, &recovered, list)
467			list_del(&reg->hash_list);
468	}
469
470	if (!list_empty(&rh->failed_recovered_regions)) {
471		list_splice_init(&rh->failed_recovered_regions,
472				 &failed_recovered);
473
474		list_for_each_entry(reg, &failed_recovered, list)
475			list_del(&reg->hash_list);
476	}
477
478	spin_unlock(&rh->region_lock);
479	write_unlock_irq(&rh->hash_lock);
480
481	/*
482	 * All the regions on the recovered and clean lists have
483	 * now been pulled out of the system, so no need to do
484	 * any more locking.
485	 */
486	list_for_each_entry_safe(reg, next, &recovered, list) {
487		rh->log->type->clear_region(rh->log, reg->key);
488		complete_resync_work(reg, 1);
489		mempool_free(reg, &rh->region_pool);
490	}
491
492	list_for_each_entry_safe(reg, next, &failed_recovered, list) {
493		complete_resync_work(reg, errors_handled ? 0 : 1);
494		mempool_free(reg, &rh->region_pool);
495	}
496
497	list_for_each_entry_safe(reg, next, &clean, list) {
498		rh->log->type->clear_region(rh->log, reg->key);
499		mempool_free(reg, &rh->region_pool);
500	}
501
502	rh->log->type->flush(rh->log);
503}
504EXPORT_SYMBOL_GPL(dm_rh_update_states);
505
506static void rh_inc(struct dm_region_hash *rh, region_t region)
507{
508	struct dm_region *reg;
509
510	read_lock(&rh->hash_lock);
511	reg = __rh_find(rh, region);
512
513	spin_lock_irq(&rh->region_lock);
514	atomic_inc(&reg->pending);
515
516	if (reg->state == DM_RH_CLEAN) {
517		reg->state = DM_RH_DIRTY;
518		list_del_init(&reg->list);	/* take off the clean list */
519		spin_unlock_irq(&rh->region_lock);
520
521		rh->log->type->mark_region(rh->log, reg->key);
522	} else
523		spin_unlock_irq(&rh->region_lock);
524
525
526	read_unlock(&rh->hash_lock);
527}
528
529void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
530{
531	struct bio *bio;
532
533	for (bio = bios->head; bio; bio = bio->bi_next) {
534		if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
535			continue;
536		rh_inc(rh, dm_rh_bio_to_region(rh, bio));
537	}
538}
539EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
540
541void dm_rh_dec(struct dm_region_hash *rh, region_t region)
542{
543	unsigned long flags;
544	struct dm_region *reg;
545	int should_wake = 0;
546
547	read_lock(&rh->hash_lock);
548	reg = __rh_lookup(rh, region);
549	read_unlock(&rh->hash_lock);
550
551	spin_lock_irqsave(&rh->region_lock, flags);
552	if (atomic_dec_and_test(&reg->pending)) {
553		/*
554		 * There is no pending I/O for this region.
555		 * We can move the region to corresponding list for next action.
556		 * At this point, the region is not yet connected to any list.
557		 *
558		 * If the state is DM_RH_NOSYNC, the region should be kept off
559		 * from clean list.
560		 * The hash entry for DM_RH_NOSYNC will remain in memory
561		 * until the region is recovered or the map is reloaded.
562		 */
563
564		/* do nothing for DM_RH_NOSYNC */
565		if (unlikely(rh->flush_failure)) {
566			/*
567			 * If a write flush failed some time ago, we
568			 * don't know whether or not this write made it
569			 * to the disk, so we must resync the device.
570			 */
571			reg->state = DM_RH_NOSYNC;
572		} else if (reg->state == DM_RH_RECOVERING) {
573			list_add_tail(&reg->list, &rh->quiesced_regions);
574		} else if (reg->state == DM_RH_DIRTY) {
575			reg->state = DM_RH_CLEAN;
576			list_add(&reg->list, &rh->clean_regions);
577		}
578		should_wake = 1;
579	}
580	spin_unlock_irqrestore(&rh->region_lock, flags);
581
582	if (should_wake)
583		rh->wakeup_workers(rh->context);
584}
585EXPORT_SYMBOL_GPL(dm_rh_dec);
586
587/*
588 * Starts quiescing a region in preparation for recovery.
589 */
590static int __rh_recovery_prepare(struct dm_region_hash *rh)
591{
592	int r;
593	region_t region;
594	struct dm_region *reg;
595
596	/*
597	 * Ask the dirty log what's next.
598	 */
599	r = rh->log->type->get_resync_work(rh->log, &region);
600	if (r <= 0)
601		return r;
602
603	/*
604	 * Get this region, and start it quiescing by setting the
605	 * recovering flag.
606	 */
607	read_lock(&rh->hash_lock);
608	reg = __rh_find(rh, region);
609	read_unlock(&rh->hash_lock);
610
611	spin_lock_irq(&rh->region_lock);
612	reg->state = DM_RH_RECOVERING;
613
614	/* Already quiesced ? */
615	if (atomic_read(&reg->pending))
616		list_del_init(&reg->list);
617	else
618		list_move(&reg->list, &rh->quiesced_regions);
619
620	spin_unlock_irq(&rh->region_lock);
621
622	return 1;
623}
624
625void dm_rh_recovery_prepare(struct dm_region_hash *rh)
626{
627	/* Extra reference to avoid race with dm_rh_stop_recovery */
628	atomic_inc(&rh->recovery_in_flight);
629
630	while (!down_trylock(&rh->recovery_count)) {
631		atomic_inc(&rh->recovery_in_flight);
632		if (__rh_recovery_prepare(rh) <= 0) {
633			atomic_dec(&rh->recovery_in_flight);
634			up(&rh->recovery_count);
635			break;
636		}
637	}
638
639	/* Drop the extra reference */
640	if (atomic_dec_and_test(&rh->recovery_in_flight))
641		rh->wakeup_all_recovery_waiters(rh->context);
642}
643EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
644
645/*
646 * Returns any quiesced regions.
647 */
648struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
649{
650	struct dm_region *reg = NULL;
651
652	spin_lock_irq(&rh->region_lock);
653	if (!list_empty(&rh->quiesced_regions)) {
654		reg = list_entry(rh->quiesced_regions.next,
655				 struct dm_region, list);
656		list_del_init(&reg->list);  /* remove from the quiesced list */
657	}
658	spin_unlock_irq(&rh->region_lock);
659
660	return reg;
661}
662EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
663
664void dm_rh_recovery_end(struct dm_region *reg, int success)
665{
666	struct dm_region_hash *rh = reg->rh;
667
668	spin_lock_irq(&rh->region_lock);
669	if (success)
670		list_add(&reg->list, &reg->rh->recovered_regions);
671	else
672		list_add(&reg->list, &reg->rh->failed_recovered_regions);
673
674	spin_unlock_irq(&rh->region_lock);
675
676	rh->wakeup_workers(rh->context);
677}
678EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
679
680/* Return recovery in flight count. */
681int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
682{
683	return atomic_read(&rh->recovery_in_flight);
684}
685EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
686
687int dm_rh_flush(struct dm_region_hash *rh)
688{
689	return rh->log->type->flush(rh->log);
690}
691EXPORT_SYMBOL_GPL(dm_rh_flush);
692
693void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
694{
695	struct dm_region *reg;
696
697	read_lock(&rh->hash_lock);
698	reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
699	bio_list_add(&reg->delayed_bios, bio);
700	read_unlock(&rh->hash_lock);
701}
702EXPORT_SYMBOL_GPL(dm_rh_delay);
703
704void dm_rh_stop_recovery(struct dm_region_hash *rh)
705{
706	int i;
707
708	/* wait for any recovering regions */
709	for (i = 0; i < rh->max_recovery; i++)
710		down(&rh->recovery_count);
711}
712EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
713
714void dm_rh_start_recovery(struct dm_region_hash *rh)
715{
716	int i;
717
718	for (i = 0; i < rh->max_recovery; i++)
719		up(&rh->recovery_count);
720
721	rh->wakeup_workers(rh->context);
722}
723EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
724
725MODULE_DESCRIPTION(DM_NAME " region hash");
726MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
727MODULE_LICENSE("GPL");
v6.2
 
  1/*
  2 * Copyright (C) 2003 Sistina Software Limited.
  3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include <linux/dm-dirty-log.h>
  9#include <linux/dm-region-hash.h>
 10
 11#include <linux/ctype.h>
 12#include <linux/init.h>
 13#include <linux/module.h>
 14#include <linux/slab.h>
 15#include <linux/vmalloc.h>
 16
 17#include "dm.h"
 18
 19#define	DM_MSG_PREFIX	"region hash"
 20
 21/*-----------------------------------------------------------------
 
 22 * Region hash
 23 *
 24 * The mirror splits itself up into discrete regions.  Each
 25 * region can be in one of three states: clean, dirty,
 26 * nosync.  There is no need to put clean regions in the hash.
 27 *
 28 * In addition to being present in the hash table a region _may_
 29 * be present on one of three lists.
 30 *
 31 *   clean_regions: Regions on this list have no io pending to
 32 *   them, they are in sync, we are no longer interested in them,
 33 *   they are dull.  dm_rh_update_states() will remove them from the
 34 *   hash table.
 35 *
 36 *   quiesced_regions: These regions have been spun down, ready
 37 *   for recovery.  rh_recovery_start() will remove regions from
 38 *   this list and hand them to kmirrord, which will schedule the
 39 *   recovery io with kcopyd.
 40 *
 41 *   recovered_regions: Regions that kcopyd has successfully
 42 *   recovered.  dm_rh_update_states() will now schedule any delayed
 43 *   io, up the recovery_count, and remove the region from the
 44 *   hash.
 45 *
 46 * There are 2 locks:
 47 *   A rw spin lock 'hash_lock' protects just the hash table,
 48 *   this is never held in write mode from interrupt context,
 49 *   which I believe means that we only have to disable irqs when
 50 *   doing a write lock.
 51 *
 52 *   An ordinary spin lock 'region_lock' that protects the three
 53 *   lists in the region_hash, with the 'state', 'list' and
 54 *   'delayed_bios' fields of the regions.  This is used from irq
 55 *   context, so all other uses will have to suspend local irqs.
 56 *---------------------------------------------------------------*/
 
 57struct dm_region_hash {
 58	uint32_t region_size;
 59	unsigned region_shift;
 60
 61	/* holds persistent region state */
 62	struct dm_dirty_log *log;
 63
 64	/* hash table */
 65	rwlock_t hash_lock;
 66	unsigned mask;
 67	unsigned nr_buckets;
 68	unsigned prime;
 69	unsigned shift;
 70	struct list_head *buckets;
 71
 72	/*
 73	 * If there was a flush failure no regions can be marked clean.
 74	 */
 75	int flush_failure;
 76
 77	unsigned max_recovery; /* Max # of regions to recover in parallel */
 78
 79	spinlock_t region_lock;
 80	atomic_t recovery_in_flight;
 81	struct list_head clean_regions;
 82	struct list_head quiesced_regions;
 83	struct list_head recovered_regions;
 84	struct list_head failed_recovered_regions;
 85	struct semaphore recovery_count;
 86
 87	mempool_t region_pool;
 88
 89	void *context;
 90	sector_t target_begin;
 91
 92	/* Callback function to schedule bios writes */
 93	void (*dispatch_bios)(void *context, struct bio_list *bios);
 94
 95	/* Callback function to wakeup callers worker thread. */
 96	void (*wakeup_workers)(void *context);
 97
 98	/* Callback function to wakeup callers recovery waiters. */
 99	void (*wakeup_all_recovery_waiters)(void *context);
100};
101
102struct dm_region {
103	struct dm_region_hash *rh;	/* FIXME: can we get rid of this ? */
104	region_t key;
105	int state;
106
107	struct list_head hash_list;
108	struct list_head list;
109
110	atomic_t pending;
111	struct bio_list delayed_bios;
112};
113
114/*
115 * Conversion fns
116 */
117static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
118{
119	return sector >> rh->region_shift;
120}
121
122sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
123{
124	return region << rh->region_shift;
125}
126EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
127
128region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
129{
130	return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
131				      rh->target_begin);
132}
133EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
134
135void *dm_rh_region_context(struct dm_region *reg)
136{
137	return reg->rh->context;
138}
139EXPORT_SYMBOL_GPL(dm_rh_region_context);
140
141region_t dm_rh_get_region_key(struct dm_region *reg)
142{
143	return reg->key;
144}
145EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
146
147sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
148{
149	return rh->region_size;
150}
151EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
152
153/*
154 * FIXME: shall we pass in a structure instead of all these args to
155 * dm_region_hash_create()????
156 */
157#define RH_HASH_MULT 2654435387U
158#define RH_HASH_SHIFT 12
159
160#define MIN_REGIONS 64
161struct dm_region_hash *dm_region_hash_create(
162		void *context, void (*dispatch_bios)(void *context,
163						     struct bio_list *bios),
164		void (*wakeup_workers)(void *context),
165		void (*wakeup_all_recovery_waiters)(void *context),
166		sector_t target_begin, unsigned max_recovery,
167		struct dm_dirty_log *log, uint32_t region_size,
168		region_t nr_regions)
169{
170	struct dm_region_hash *rh;
171	unsigned nr_buckets, max_buckets;
172	size_t i;
173	int ret;
174
175	/*
176	 * Calculate a suitable number of buckets for our hash
177	 * table.
178	 */
179	max_buckets = nr_regions >> 6;
180	for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
181		;
182	nr_buckets >>= 1;
183
184	rh = kzalloc(sizeof(*rh), GFP_KERNEL);
185	if (!rh) {
186		DMERR("unable to allocate region hash memory");
187		return ERR_PTR(-ENOMEM);
188	}
189
190	rh->context = context;
191	rh->dispatch_bios = dispatch_bios;
192	rh->wakeup_workers = wakeup_workers;
193	rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
194	rh->target_begin = target_begin;
195	rh->max_recovery = max_recovery;
196	rh->log = log;
197	rh->region_size = region_size;
198	rh->region_shift = __ffs(region_size);
199	rwlock_init(&rh->hash_lock);
200	rh->mask = nr_buckets - 1;
201	rh->nr_buckets = nr_buckets;
202
203	rh->shift = RH_HASH_SHIFT;
204	rh->prime = RH_HASH_MULT;
205
206	rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets)));
207	if (!rh->buckets) {
208		DMERR("unable to allocate region hash bucket memory");
209		kfree(rh);
210		return ERR_PTR(-ENOMEM);
211	}
212
213	for (i = 0; i < nr_buckets; i++)
214		INIT_LIST_HEAD(rh->buckets + i);
215
216	spin_lock_init(&rh->region_lock);
217	sema_init(&rh->recovery_count, 0);
218	atomic_set(&rh->recovery_in_flight, 0);
219	INIT_LIST_HEAD(&rh->clean_regions);
220	INIT_LIST_HEAD(&rh->quiesced_regions);
221	INIT_LIST_HEAD(&rh->recovered_regions);
222	INIT_LIST_HEAD(&rh->failed_recovered_regions);
223	rh->flush_failure = 0;
224
225	ret = mempool_init_kmalloc_pool(&rh->region_pool, MIN_REGIONS,
226					sizeof(struct dm_region));
227	if (ret) {
228		vfree(rh->buckets);
229		kfree(rh);
230		rh = ERR_PTR(-ENOMEM);
231	}
232
233	return rh;
234}
235EXPORT_SYMBOL_GPL(dm_region_hash_create);
236
237void dm_region_hash_destroy(struct dm_region_hash *rh)
238{
239	unsigned h;
240	struct dm_region *reg, *nreg;
241
242	BUG_ON(!list_empty(&rh->quiesced_regions));
243	for (h = 0; h < rh->nr_buckets; h++) {
244		list_for_each_entry_safe(reg, nreg, rh->buckets + h,
245					 hash_list) {
246			BUG_ON(atomic_read(&reg->pending));
247			mempool_free(reg, &rh->region_pool);
248		}
249	}
250
251	if (rh->log)
252		dm_dirty_log_destroy(rh->log);
253
254	mempool_exit(&rh->region_pool);
255	vfree(rh->buckets);
256	kfree(rh);
257}
258EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
259
260struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
261{
262	return rh->log;
263}
264EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
265
266static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
267{
268	return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
269}
270
271static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
272{
273	struct dm_region *reg;
274	struct list_head *bucket = rh->buckets + rh_hash(rh, region);
275
276	list_for_each_entry(reg, bucket, hash_list)
277		if (reg->key == region)
278			return reg;
279
280	return NULL;
281}
282
283static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
284{
285	list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key));
286}
287
288static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
289{
290	struct dm_region *reg, *nreg;
291
292	nreg = mempool_alloc(&rh->region_pool, GFP_ATOMIC);
293	if (unlikely(!nreg))
294		nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
295
296	nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
297		      DM_RH_CLEAN : DM_RH_NOSYNC;
298	nreg->rh = rh;
299	nreg->key = region;
300	INIT_LIST_HEAD(&nreg->list);
301	atomic_set(&nreg->pending, 0);
302	bio_list_init(&nreg->delayed_bios);
303
304	write_lock_irq(&rh->hash_lock);
305	reg = __rh_lookup(rh, region);
306	if (reg)
307		/* We lost the race. */
308		mempool_free(nreg, &rh->region_pool);
309	else {
310		__rh_insert(rh, nreg);
311		if (nreg->state == DM_RH_CLEAN) {
312			spin_lock(&rh->region_lock);
313			list_add(&nreg->list, &rh->clean_regions);
314			spin_unlock(&rh->region_lock);
315		}
316
317		reg = nreg;
318	}
319	write_unlock_irq(&rh->hash_lock);
320
321	return reg;
322}
323
324static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
325{
326	struct dm_region *reg;
327
328	reg = __rh_lookup(rh, region);
329	if (!reg) {
330		read_unlock(&rh->hash_lock);
331		reg = __rh_alloc(rh, region);
332		read_lock(&rh->hash_lock);
333	}
334
335	return reg;
336}
337
338int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
339{
340	int r;
341	struct dm_region *reg;
342
343	read_lock(&rh->hash_lock);
344	reg = __rh_lookup(rh, region);
345	read_unlock(&rh->hash_lock);
346
347	if (reg)
348		return reg->state;
349
350	/*
351	 * The region wasn't in the hash, so we fall back to the
352	 * dirty log.
353	 */
354	r = rh->log->type->in_sync(rh->log, region, may_block);
355
356	/*
357	 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
358	 * taken as a DM_RH_NOSYNC
359	 */
360	return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
361}
362EXPORT_SYMBOL_GPL(dm_rh_get_state);
363
364static void complete_resync_work(struct dm_region *reg, int success)
365{
366	struct dm_region_hash *rh = reg->rh;
367
368	rh->log->type->set_region_sync(rh->log, reg->key, success);
369
370	/*
371	 * Dispatch the bios before we call 'wake_up_all'.
372	 * This is important because if we are suspending,
373	 * we want to know that recovery is complete and
374	 * the work queue is flushed.  If we wake_up_all
375	 * before we dispatch_bios (queue bios and call wake()),
376	 * then we risk suspending before the work queue
377	 * has been properly flushed.
378	 */
379	rh->dispatch_bios(rh->context, &reg->delayed_bios);
380	if (atomic_dec_and_test(&rh->recovery_in_flight))
381		rh->wakeup_all_recovery_waiters(rh->context);
382	up(&rh->recovery_count);
383}
384
385/* dm_rh_mark_nosync
386 * @ms
387 * @bio
388 *
389 * The bio was written on some mirror(s) but failed on other mirror(s).
390 * We can successfully endio the bio but should avoid the region being
391 * marked clean by setting the state DM_RH_NOSYNC.
392 *
393 * This function is _not_ safe in interrupt context!
394 */
395void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
396{
397	unsigned long flags;
398	struct dm_dirty_log *log = rh->log;
399	struct dm_region *reg;
400	region_t region = dm_rh_bio_to_region(rh, bio);
401	int recovering = 0;
402
403	if (bio->bi_opf & REQ_PREFLUSH) {
404		rh->flush_failure = 1;
405		return;
406	}
407
408	if (bio_op(bio) == REQ_OP_DISCARD)
409		return;
410
411	/* We must inform the log that the sync count has changed. */
412	log->type->set_region_sync(log, region, 0);
413
414	read_lock(&rh->hash_lock);
415	reg = __rh_find(rh, region);
416	read_unlock(&rh->hash_lock);
417
418	/* region hash entry should exist because write was in-flight */
419	BUG_ON(!reg);
420	BUG_ON(!list_empty(&reg->list));
421
422	spin_lock_irqsave(&rh->region_lock, flags);
423	/*
424	 * Possible cases:
425	 *   1) DM_RH_DIRTY
426	 *   2) DM_RH_NOSYNC: was dirty, other preceding writes failed
427	 *   3) DM_RH_RECOVERING: flushing pending writes
428	 * Either case, the region should have not been connected to list.
429	 */
430	recovering = (reg->state == DM_RH_RECOVERING);
431	reg->state = DM_RH_NOSYNC;
432	BUG_ON(!list_empty(&reg->list));
433	spin_unlock_irqrestore(&rh->region_lock, flags);
434
435	if (recovering)
436		complete_resync_work(reg, 0);
437}
438EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
439
440void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
441{
442	struct dm_region *reg, *next;
443
444	LIST_HEAD(clean);
445	LIST_HEAD(recovered);
446	LIST_HEAD(failed_recovered);
447
448	/*
449	 * Quickly grab the lists.
450	 */
451	write_lock_irq(&rh->hash_lock);
452	spin_lock(&rh->region_lock);
453	if (!list_empty(&rh->clean_regions)) {
454		list_splice_init(&rh->clean_regions, &clean);
455
456		list_for_each_entry(reg, &clean, list)
457			list_del(&reg->hash_list);
458	}
459
460	if (!list_empty(&rh->recovered_regions)) {
461		list_splice_init(&rh->recovered_regions, &recovered);
462
463		list_for_each_entry(reg, &recovered, list)
464			list_del(&reg->hash_list);
465	}
466
467	if (!list_empty(&rh->failed_recovered_regions)) {
468		list_splice_init(&rh->failed_recovered_regions,
469				 &failed_recovered);
470
471		list_for_each_entry(reg, &failed_recovered, list)
472			list_del(&reg->hash_list);
473	}
474
475	spin_unlock(&rh->region_lock);
476	write_unlock_irq(&rh->hash_lock);
477
478	/*
479	 * All the regions on the recovered and clean lists have
480	 * now been pulled out of the system, so no need to do
481	 * any more locking.
482	 */
483	list_for_each_entry_safe(reg, next, &recovered, list) {
484		rh->log->type->clear_region(rh->log, reg->key);
485		complete_resync_work(reg, 1);
486		mempool_free(reg, &rh->region_pool);
487	}
488
489	list_for_each_entry_safe(reg, next, &failed_recovered, list) {
490		complete_resync_work(reg, errors_handled ? 0 : 1);
491		mempool_free(reg, &rh->region_pool);
492	}
493
494	list_for_each_entry_safe(reg, next, &clean, list) {
495		rh->log->type->clear_region(rh->log, reg->key);
496		mempool_free(reg, &rh->region_pool);
497	}
498
499	rh->log->type->flush(rh->log);
500}
501EXPORT_SYMBOL_GPL(dm_rh_update_states);
502
503static void rh_inc(struct dm_region_hash *rh, region_t region)
504{
505	struct dm_region *reg;
506
507	read_lock(&rh->hash_lock);
508	reg = __rh_find(rh, region);
509
510	spin_lock_irq(&rh->region_lock);
511	atomic_inc(&reg->pending);
512
513	if (reg->state == DM_RH_CLEAN) {
514		reg->state = DM_RH_DIRTY;
515		list_del_init(&reg->list);	/* take off the clean list */
516		spin_unlock_irq(&rh->region_lock);
517
518		rh->log->type->mark_region(rh->log, reg->key);
519	} else
520		spin_unlock_irq(&rh->region_lock);
521
522
523	read_unlock(&rh->hash_lock);
524}
525
526void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
527{
528	struct bio *bio;
529
530	for (bio = bios->head; bio; bio = bio->bi_next) {
531		if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
532			continue;
533		rh_inc(rh, dm_rh_bio_to_region(rh, bio));
534	}
535}
536EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
537
538void dm_rh_dec(struct dm_region_hash *rh, region_t region)
539{
540	unsigned long flags;
541	struct dm_region *reg;
542	int should_wake = 0;
543
544	read_lock(&rh->hash_lock);
545	reg = __rh_lookup(rh, region);
546	read_unlock(&rh->hash_lock);
547
548	spin_lock_irqsave(&rh->region_lock, flags);
549	if (atomic_dec_and_test(&reg->pending)) {
550		/*
551		 * There is no pending I/O for this region.
552		 * We can move the region to corresponding list for next action.
553		 * At this point, the region is not yet connected to any list.
554		 *
555		 * If the state is DM_RH_NOSYNC, the region should be kept off
556		 * from clean list.
557		 * The hash entry for DM_RH_NOSYNC will remain in memory
558		 * until the region is recovered or the map is reloaded.
559		 */
560
561		/* do nothing for DM_RH_NOSYNC */
562		if (unlikely(rh->flush_failure)) {
563			/*
564			 * If a write flush failed some time ago, we
565			 * don't know whether or not this write made it
566			 * to the disk, so we must resync the device.
567			 */
568			reg->state = DM_RH_NOSYNC;
569		} else if (reg->state == DM_RH_RECOVERING) {
570			list_add_tail(&reg->list, &rh->quiesced_regions);
571		} else if (reg->state == DM_RH_DIRTY) {
572			reg->state = DM_RH_CLEAN;
573			list_add(&reg->list, &rh->clean_regions);
574		}
575		should_wake = 1;
576	}
577	spin_unlock_irqrestore(&rh->region_lock, flags);
578
579	if (should_wake)
580		rh->wakeup_workers(rh->context);
581}
582EXPORT_SYMBOL_GPL(dm_rh_dec);
583
584/*
585 * Starts quiescing a region in preparation for recovery.
586 */
587static int __rh_recovery_prepare(struct dm_region_hash *rh)
588{
589	int r;
590	region_t region;
591	struct dm_region *reg;
592
593	/*
594	 * Ask the dirty log what's next.
595	 */
596	r = rh->log->type->get_resync_work(rh->log, &region);
597	if (r <= 0)
598		return r;
599
600	/*
601	 * Get this region, and start it quiescing by setting the
602	 * recovering flag.
603	 */
604	read_lock(&rh->hash_lock);
605	reg = __rh_find(rh, region);
606	read_unlock(&rh->hash_lock);
607
608	spin_lock_irq(&rh->region_lock);
609	reg->state = DM_RH_RECOVERING;
610
611	/* Already quiesced ? */
612	if (atomic_read(&reg->pending))
613		list_del_init(&reg->list);
614	else
615		list_move(&reg->list, &rh->quiesced_regions);
616
617	spin_unlock_irq(&rh->region_lock);
618
619	return 1;
620}
621
622void dm_rh_recovery_prepare(struct dm_region_hash *rh)
623{
624	/* Extra reference to avoid race with dm_rh_stop_recovery */
625	atomic_inc(&rh->recovery_in_flight);
626
627	while (!down_trylock(&rh->recovery_count)) {
628		atomic_inc(&rh->recovery_in_flight);
629		if (__rh_recovery_prepare(rh) <= 0) {
630			atomic_dec(&rh->recovery_in_flight);
631			up(&rh->recovery_count);
632			break;
633		}
634	}
635
636	/* Drop the extra reference */
637	if (atomic_dec_and_test(&rh->recovery_in_flight))
638		rh->wakeup_all_recovery_waiters(rh->context);
639}
640EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
641
642/*
643 * Returns any quiesced regions.
644 */
645struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
646{
647	struct dm_region *reg = NULL;
648
649	spin_lock_irq(&rh->region_lock);
650	if (!list_empty(&rh->quiesced_regions)) {
651		reg = list_entry(rh->quiesced_regions.next,
652				 struct dm_region, list);
653		list_del_init(&reg->list);  /* remove from the quiesced list */
654	}
655	spin_unlock_irq(&rh->region_lock);
656
657	return reg;
658}
659EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
660
661void dm_rh_recovery_end(struct dm_region *reg, int success)
662{
663	struct dm_region_hash *rh = reg->rh;
664
665	spin_lock_irq(&rh->region_lock);
666	if (success)
667		list_add(&reg->list, &reg->rh->recovered_regions);
668	else
669		list_add(&reg->list, &reg->rh->failed_recovered_regions);
670
671	spin_unlock_irq(&rh->region_lock);
672
673	rh->wakeup_workers(rh->context);
674}
675EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
676
677/* Return recovery in flight count. */
678int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
679{
680	return atomic_read(&rh->recovery_in_flight);
681}
682EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
683
684int dm_rh_flush(struct dm_region_hash *rh)
685{
686	return rh->log->type->flush(rh->log);
687}
688EXPORT_SYMBOL_GPL(dm_rh_flush);
689
690void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
691{
692	struct dm_region *reg;
693
694	read_lock(&rh->hash_lock);
695	reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
696	bio_list_add(&reg->delayed_bios, bio);
697	read_unlock(&rh->hash_lock);
698}
699EXPORT_SYMBOL_GPL(dm_rh_delay);
700
701void dm_rh_stop_recovery(struct dm_region_hash *rh)
702{
703	int i;
704
705	/* wait for any recovering regions */
706	for (i = 0; i < rh->max_recovery; i++)
707		down(&rh->recovery_count);
708}
709EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
710
711void dm_rh_start_recovery(struct dm_region_hash *rh)
712{
713	int i;
714
715	for (i = 0; i < rh->max_recovery; i++)
716		up(&rh->recovery_count);
717
718	rh->wakeup_workers(rh->context);
719}
720EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
721
722MODULE_DESCRIPTION(DM_NAME " region hash");
723MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
724MODULE_LICENSE("GPL");