Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright (C) 2006-2009 Red Hat, Inc.
  3 *
  4 * This file is released under the LGPL.
  5 */
  6
  7#include <linux/bio.h>
  8#include <linux/slab.h>
  9#include <linux/dm-dirty-log.h>
 10#include <linux/device-mapper.h>
 11#include <linux/dm-log-userspace.h>
 12#include <linux/module.h>
 
 13
 14#include "dm-log-userspace-transfer.h"
 15
 16#define DM_LOG_USERSPACE_VSN "1.1.0"
 17
 18struct flush_entry {
 19	int type;
 20	region_t region;
 21	struct list_head list;
 22};
 23
 24/*
 25 * This limit on the number of mark and clear request is, to a degree,
 26 * arbitrary.  However, there is some basis for the choice in the limits
 27 * imposed on the size of data payload by dm-log-userspace-transfer.c:
 28 * dm_consult_userspace().
 29 */
 30#define MAX_FLUSH_GROUP_COUNT 32
 31
 32struct log_c {
 33	struct dm_target *ti;
 34	struct dm_dev *log_dev;
 35	uint32_t region_size;
 36	region_t region_count;
 37	uint64_t luid;
 38	char uuid[DM_UUID_LEN];
 39
 40	char *usr_argv_str;
 41	uint32_t usr_argc;
 42
 43	/*
 44	 * in_sync_hint gets set when doing is_remote_recovering.  It
 45	 * represents the first region that needs recovery.  IOW, the
 46	 * first zero bit of sync_bits.  This can be useful for to limit
 47	 * traffic for calls like is_remote_recovering and get_resync_work,
 48	 * but be take care in its use for anything else.
 49	 */
 50	uint64_t in_sync_hint;
 51
 52	/*
 53	 * Mark and clear requests are held until a flush is issued
 54	 * so that we can group, and thereby limit, the amount of
 55	 * network traffic between kernel and userspace.  The 'flush_lock'
 56	 * is used to protect these lists.
 57	 */
 58	spinlock_t flush_lock;
 59	struct list_head mark_list;
 60	struct list_head clear_list;
 
 
 
 
 
 
 
 
 
 
 
 
 61};
 62
 63static mempool_t *flush_entry_pool;
 64
 65static void *flush_entry_alloc(gfp_t gfp_mask, void *pool_data)
 66{
 67	return kmalloc(sizeof(struct flush_entry), gfp_mask);
 68}
 69
 70static void flush_entry_free(void *element, void *pool_data)
 71{
 72	kfree(element);
 73}
 74
 75static int userspace_do_request(struct log_c *lc, const char *uuid,
 76				int request_type, char *data, size_t data_size,
 77				char *rdata, size_t *rdata_size)
 78{
 79	int r;
 80
 81	/*
 82	 * If the server isn't there, -ESRCH is returned,
 83	 * and we must keep trying until the server is
 84	 * restored.
 85	 */
 86retry:
 87	r = dm_consult_userspace(uuid, lc->luid, request_type, data,
 88				 data_size, rdata, rdata_size);
 89
 90	if (r != -ESRCH)
 91		return r;
 92
 93	DMERR(" Userspace log server not found.");
 94	while (1) {
 95		set_current_state(TASK_INTERRUPTIBLE);
 96		schedule_timeout(2*HZ);
 97		DMWARN("Attempting to contact userspace log server...");
 98		r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR,
 99					 lc->usr_argv_str,
100					 strlen(lc->usr_argv_str) + 1,
101					 NULL, NULL);
102		if (!r)
103			break;
104	}
105	DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
106	r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL,
107				 0, NULL, NULL);
108	if (!r)
109		goto retry;
110
111	DMERR("Error trying to resume userspace log: %d", r);
112
113	return -ESRCH;
114}
115
116static int build_constructor_string(struct dm_target *ti,
117				    unsigned argc, char **argv,
118				    char **ctr_str)
119{
120	int i, str_size;
121	char *str = NULL;
122
123	*ctr_str = NULL;
124
 
 
 
125	for (i = 0, str_size = 0; i < argc; i++)
126		str_size += strlen(argv[i]) + 1; /* +1 for space between args */
127
128	str_size += 20; /* Max number of chars in a printed u64 number */
129
130	str = kzalloc(str_size, GFP_KERNEL);
131	if (!str) {
132		DMWARN("Unable to allocate memory for constructor string");
133		return -ENOMEM;
134	}
135
136	str_size = sprintf(str, "%llu", (unsigned long long)ti->len);
137	for (i = 0; i < argc; i++)
138		str_size += sprintf(str + str_size, " %s", argv[i]);
139
140	*ctr_str = str;
141	return str_size;
142}
143
 
 
 
 
 
 
 
 
 
 
 
 
 
144/*
145 * userspace_ctr
146 *
147 * argv contains:
148 *	<UUID> <other args>
149 * Where 'other args' is the userspace implementation specific log
150 * arguments.  An example might be:
151 *	<UUID> clustered-disk <arg count> <log dev> <region_size> [[no]sync]
152 *
153 * So, this module will strip off the <UUID> for identification purposes
154 * when communicating with userspace about a log; but will pass on everything
155 * else.
 
 
 
 
 
 
 
 
156 */
157static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
158			 unsigned argc, char **argv)
159{
160	int r = 0;
161	int str_size;
162	char *ctr_str = NULL;
163	struct log_c *lc = NULL;
164	uint64_t rdata;
165	size_t rdata_size = sizeof(rdata);
166	char *devices_rdata = NULL;
167	size_t devices_rdata_size = DM_NAME_LEN;
168
169	if (argc < 3) {
170		DMWARN("Too few arguments to userspace dirty log");
171		return -EINVAL;
172	}
173
174	lc = kzalloc(sizeof(*lc), GFP_KERNEL);
175	if (!lc) {
176		DMWARN("Unable to allocate userspace log context.");
177		return -ENOMEM;
178	}
179
180	/* The ptr value is sufficient for local unique id */
181	lc->luid = (unsigned long)lc;
182
183	lc->ti = ti;
184
185	if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
186		DMWARN("UUID argument too long.");
187		kfree(lc);
188		return -EINVAL;
189	}
190
 
 
191	strncpy(lc->uuid, argv[0], DM_UUID_LEN);
 
 
192	spin_lock_init(&lc->flush_lock);
193	INIT_LIST_HEAD(&lc->mark_list);
194	INIT_LIST_HEAD(&lc->clear_list);
195
196	str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str);
 
 
 
 
 
 
197	if (str_size < 0) {
198		kfree(lc);
199		return str_size;
200	}
201
202	devices_rdata = kzalloc(devices_rdata_size, GFP_KERNEL);
203	if (!devices_rdata) {
204		DMERR("Failed to allocate memory for device information");
205		r = -ENOMEM;
206		goto out;
207	}
208
209	/*
210	 * Send table string and get back any opened device.
211	 */
212	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
213				 ctr_str, str_size,
214				 devices_rdata, &devices_rdata_size);
215
216	if (r < 0) {
217		if (r == -ESRCH)
218			DMERR("Userspace log server not found");
219		else
220			DMERR("Userspace log server failed to create log");
221		goto out;
222	}
223
224	/* Since the region size does not change, get it now */
225	rdata_size = sizeof(rdata);
226	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE,
227				 NULL, 0, (char *)&rdata, &rdata_size);
228
229	if (r) {
230		DMERR("Failed to get region size of dirty log");
231		goto out;
232	}
233
234	lc->region_size = (uint32_t)rdata;
235	lc->region_count = dm_sector_div_up(ti->len, lc->region_size);
236
237	if (devices_rdata_size) {
238		if (devices_rdata[devices_rdata_size - 1] != '\0') {
239			DMERR("DM_ULOG_CTR device return string not properly terminated");
240			r = -EINVAL;
241			goto out;
242		}
243		r = dm_get_device(ti, devices_rdata,
244				  dm_table_get_mode(ti->table), &lc->log_dev);
245		if (r)
246			DMERR("Failed to register %s with device-mapper",
247			      devices_rdata);
248	}
 
 
 
 
 
 
 
 
 
 
 
 
 
249out:
250	kfree(devices_rdata);
251	if (r) {
252		kfree(lc);
253		kfree(ctr_str);
254	} else {
255		lc->usr_argv_str = ctr_str;
256		lc->usr_argc = argc;
257		log->context = lc;
258	}
259
260	return r;
261}
262
263static void userspace_dtr(struct dm_dirty_log *log)
264{
265	struct log_c *lc = log->context;
266
 
 
 
 
 
 
 
 
267	(void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
268				 NULL, 0,
269				 NULL, NULL);
270
271	if (lc->log_dev)
272		dm_put_device(lc->ti, lc->log_dev);
273
274	kfree(lc->usr_argv_str);
275	kfree(lc);
276
277	return;
278}
279
280static int userspace_presuspend(struct dm_dirty_log *log)
281{
282	int r;
283	struct log_c *lc = log->context;
284
285	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
286				 NULL, 0,
287				 NULL, NULL);
288
289	return r;
290}
291
292static int userspace_postsuspend(struct dm_dirty_log *log)
293{
294	int r;
295	struct log_c *lc = log->context;
296
 
 
 
 
 
 
297	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
298				 NULL, 0,
299				 NULL, NULL);
300
301	return r;
302}
303
304static int userspace_resume(struct dm_dirty_log *log)
305{
306	int r;
307	struct log_c *lc = log->context;
308
309	lc->in_sync_hint = 0;
310	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
311				 NULL, 0,
312				 NULL, NULL);
313
314	return r;
315}
316
317static uint32_t userspace_get_region_size(struct dm_dirty_log *log)
318{
319	struct log_c *lc = log->context;
320
321	return lc->region_size;
322}
323
324/*
325 * userspace_is_clean
326 *
327 * Check whether a region is clean.  If there is any sort of
328 * failure when consulting the server, we return not clean.
329 *
330 * Returns: 1 if clean, 0 otherwise
331 */
332static int userspace_is_clean(struct dm_dirty_log *log, region_t region)
333{
334	int r;
335	uint64_t region64 = (uint64_t)region;
336	int64_t is_clean;
337	size_t rdata_size;
338	struct log_c *lc = log->context;
339
340	rdata_size = sizeof(is_clean);
341	r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN,
342				 (char *)&region64, sizeof(region64),
343				 (char *)&is_clean, &rdata_size);
344
345	return (r) ? 0 : (int)is_clean;
346}
347
348/*
349 * userspace_in_sync
350 *
351 * Check if the region is in-sync.  If there is any sort
352 * of failure when consulting the server, we assume that
353 * the region is not in sync.
354 *
355 * If 'can_block' is set, return immediately
356 *
357 * Returns: 1 if in-sync, 0 if not-in-sync, -EWOULDBLOCK
358 */
359static int userspace_in_sync(struct dm_dirty_log *log, region_t region,
360			     int can_block)
361{
362	int r;
363	uint64_t region64 = region;
364	int64_t in_sync;
365	size_t rdata_size;
366	struct log_c *lc = log->context;
367
368	/*
369	 * We can never respond directly - even if in_sync_hint is
370	 * set.  This is because another machine could see a device
371	 * failure and mark the region out-of-sync.  If we don't go
372	 * to userspace to ask, we might think the region is in-sync
373	 * and allow a read to pick up data that is stale.  (This is
374	 * very unlikely if a device actually fails; but it is very
375	 * likely if a connection to one device from one machine fails.)
376	 *
377	 * There still might be a problem if the mirror caches the region
378	 * state as in-sync... but then this call would not be made.  So,
379	 * that is a mirror problem.
380	 */
381	if (!can_block)
382		return -EWOULDBLOCK;
383
384	rdata_size = sizeof(in_sync);
385	r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC,
386				 (char *)&region64, sizeof(region64),
387				 (char *)&in_sync, &rdata_size);
388	return (r) ? 0 : (int)in_sync;
389}
390
391static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
392{
393	int r = 0;
394	struct flush_entry *fe;
395
396	list_for_each_entry(fe, flush_list, list) {
397		r = userspace_do_request(lc, lc->uuid, fe->type,
398					 (char *)&fe->region,
399					 sizeof(fe->region),
400					 NULL, NULL);
401		if (r)
402			break;
403	}
404
405	return r;
406}
407
408static int flush_by_group(struct log_c *lc, struct list_head *flush_list)
 
409{
410	int r = 0;
411	int count;
412	uint32_t type = 0;
413	struct flush_entry *fe, *tmp_fe;
414	LIST_HEAD(tmp_list);
415	uint64_t group[MAX_FLUSH_GROUP_COUNT];
416
417	/*
418	 * Group process the requests
419	 */
420	while (!list_empty(flush_list)) {
421		count = 0;
422
423		list_for_each_entry_safe(fe, tmp_fe, flush_list, list) {
424			group[count] = fe->region;
425			count++;
426
427			list_move(&fe->list, &tmp_list);
428
429			type = fe->type;
430			if (count >= MAX_FLUSH_GROUP_COUNT)
431				break;
432		}
433
434		r = userspace_do_request(lc, lc->uuid, type,
435					 (char *)(group),
436					 count * sizeof(uint64_t),
437					 NULL, NULL);
438		if (r) {
439			/* Group send failed.  Attempt one-by-one. */
440			list_splice_init(&tmp_list, flush_list);
441			r = flush_one_by_one(lc, flush_list);
442			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443		}
444	}
445
446	/*
447	 * Must collect flush_entrys that were successfully processed
448	 * as a group so that they will be free'd by the caller.
449	 */
450	list_splice_init(&tmp_list, flush_list);
451
452	return r;
453}
454
455/*
456 * userspace_flush
457 *
458 * This function is ok to block.
459 * The flush happens in two stages.  First, it sends all
460 * clear/mark requests that are on the list.  Then it
461 * tells the server to commit them.  This gives the
462 * server a chance to optimise the commit, instead of
463 * doing it for every request.
464 *
465 * Additionally, we could implement another thread that
466 * sends the requests up to the server - reducing the
467 * load on flush.  Then the flush would have less in
468 * the list and be responsible for the finishing commit.
469 *
470 * Returns: 0 on success, < 0 on failure
471 */
472static int userspace_flush(struct dm_dirty_log *log)
473{
474	int r = 0;
475	unsigned long flags;
476	struct log_c *lc = log->context;
477	LIST_HEAD(mark_list);
478	LIST_HEAD(clear_list);
 
 
479	struct flush_entry *fe, *tmp_fe;
480
481	spin_lock_irqsave(&lc->flush_lock, flags);
482	list_splice_init(&lc->mark_list, &mark_list);
483	list_splice_init(&lc->clear_list, &clear_list);
484	spin_unlock_irqrestore(&lc->flush_lock, flags);
485
486	if (list_empty(&mark_list) && list_empty(&clear_list))
 
 
 
487		return 0;
488
489	r = flush_by_group(lc, &mark_list);
490	if (r)
491		goto fail;
 
 
 
 
 
 
 
 
 
492
493	r = flush_by_group(lc, &clear_list);
 
 
 
494	if (r)
495		goto fail;
496
497	r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
498				 NULL, 0, NULL, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
499
500fail:
501	/*
502	 * We can safely remove these entries, even if failure.
503	 * Calling code will receive an error and will know that
504	 * the log facility has failed.
505	 */
506	list_for_each_entry_safe(fe, tmp_fe, &mark_list, list) {
507		list_del(&fe->list);
508		mempool_free(fe, flush_entry_pool);
509	}
510	list_for_each_entry_safe(fe, tmp_fe, &clear_list, list) {
511		list_del(&fe->list);
512		mempool_free(fe, flush_entry_pool);
513	}
514
515	if (r)
516		dm_table_event(lc->ti->table);
517
518	return r;
519}
520
521/*
522 * userspace_mark_region
523 *
524 * This function should avoid blocking unless absolutely required.
525 * (Memory allocation is valid for blocking.)
526 */
527static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
528{
529	unsigned long flags;
530	struct log_c *lc = log->context;
531	struct flush_entry *fe;
532
533	/* Wait for an allocation, but _never_ fail */
534	fe = mempool_alloc(flush_entry_pool, GFP_NOIO);
535	BUG_ON(!fe);
536
537	spin_lock_irqsave(&lc->flush_lock, flags);
538	fe->type = DM_ULOG_MARK_REGION;
539	fe->region = region;
540	list_add(&fe->list, &lc->mark_list);
541	spin_unlock_irqrestore(&lc->flush_lock, flags);
542
543	return;
544}
545
546/*
547 * userspace_clear_region
548 *
549 * This function must not block.
550 * So, the alloc can't block.  In the worst case, it is ok to
551 * fail.  It would simply mean we can't clear the region.
552 * Does nothing to current sync context, but does mean
553 * the region will be re-sync'ed on a reload of the mirror
554 * even though it is in-sync.
555 */
556static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
557{
558	unsigned long flags;
559	struct log_c *lc = log->context;
560	struct flush_entry *fe;
561
562	/*
563	 * If we fail to allocate, we skip the clearing of
564	 * the region.  This doesn't hurt us in any way, except
565	 * to cause the region to be resync'ed when the
566	 * device is activated next time.
567	 */
568	fe = mempool_alloc(flush_entry_pool, GFP_ATOMIC);
569	if (!fe) {
570		DMERR("Failed to allocate memory to clear region.");
571		return;
572	}
573
574	spin_lock_irqsave(&lc->flush_lock, flags);
575	fe->type = DM_ULOG_CLEAR_REGION;
576	fe->region = region;
577	list_add(&fe->list, &lc->clear_list);
578	spin_unlock_irqrestore(&lc->flush_lock, flags);
579
580	return;
581}
582
583/*
584 * userspace_get_resync_work
585 *
586 * Get a region that needs recovery.  It is valid to return
587 * an error for this function.
588 *
589 * Returns: 1 if region filled, 0 if no work, <0 on error
590 */
591static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region)
592{
593	int r;
594	size_t rdata_size;
595	struct log_c *lc = log->context;
596	struct {
597		int64_t i; /* 64-bit for mix arch compatibility */
598		region_t r;
599	} pkg;
600
601	if (lc->in_sync_hint >= lc->region_count)
602		return 0;
603
604	rdata_size = sizeof(pkg);
605	r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK,
606				 NULL, 0,
607				 (char *)&pkg, &rdata_size);
608
609	*region = pkg.r;
610	return (r) ? r : (int)pkg.i;
611}
612
613/*
614 * userspace_set_region_sync
615 *
616 * Set the sync status of a given region.  This function
617 * must not fail.
618 */
619static void userspace_set_region_sync(struct dm_dirty_log *log,
620				      region_t region, int in_sync)
621{
622	int r;
623	struct log_c *lc = log->context;
624	struct {
625		region_t r;
626		int64_t i;
627	} pkg;
628
629	pkg.r = region;
630	pkg.i = (int64_t)in_sync;
631
632	r = userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
633				 (char *)&pkg, sizeof(pkg),
634				 NULL, NULL);
635
636	/*
637	 * It would be nice to be able to report failures.
638	 * However, it is easy emough to detect and resolve.
639	 */
640	return;
641}
642
643/*
644 * userspace_get_sync_count
645 *
646 * If there is any sort of failure when consulting the server,
647 * we assume that the sync count is zero.
648 *
649 * Returns: sync count on success, 0 on failure
650 */
651static region_t userspace_get_sync_count(struct dm_dirty_log *log)
652{
653	int r;
654	size_t rdata_size;
655	uint64_t sync_count;
656	struct log_c *lc = log->context;
657
658	rdata_size = sizeof(sync_count);
659	r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT,
660				 NULL, 0,
661				 (char *)&sync_count, &rdata_size);
662
663	if (r)
664		return 0;
665
666	if (sync_count >= lc->region_count)
667		lc->in_sync_hint = lc->region_count;
668
669	return (region_t)sync_count;
670}
671
672/*
673 * userspace_status
674 *
675 * Returns: amount of space consumed
676 */
677static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
678			    char *result, unsigned maxlen)
679{
680	int r = 0;
681	char *table_args;
682	size_t sz = (size_t)maxlen;
683	struct log_c *lc = log->context;
684
685	switch (status_type) {
686	case STATUSTYPE_INFO:
687		r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO,
688					 NULL, 0,
689					 result, &sz);
690
691		if (r) {
692			sz = 0;
693			DMEMIT("%s 1 COM_FAILURE", log->type->name);
694		}
695		break;
696	case STATUSTYPE_TABLE:
697		sz = 0;
698		table_args = strchr(lc->usr_argv_str, ' ');
699		BUG_ON(!table_args); /* There will always be a ' ' */
700		table_args++;
701
702		DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc,
703		       lc->uuid, table_args);
 
 
704		break;
705	}
706	return (r) ? 0 : (int)sz;
707}
708
709/*
710 * userspace_is_remote_recovering
711 *
712 * Returns: 1 if region recovering, 0 otherwise
713 */
714static int userspace_is_remote_recovering(struct dm_dirty_log *log,
715					  region_t region)
716{
717	int r;
718	uint64_t region64 = region;
719	struct log_c *lc = log->context;
720	static unsigned long long limit;
721	struct {
722		int64_t is_recovering;
723		uint64_t in_sync_hint;
724	} pkg;
725	size_t rdata_size = sizeof(pkg);
726
727	/*
728	 * Once the mirror has been reported to be in-sync,
729	 * it will never again ask for recovery work.  So,
730	 * we can safely say there is not a remote machine
731	 * recovering if the device is in-sync.  (in_sync_hint
732	 * must be reset at resume time.)
733	 */
734	if (region < lc->in_sync_hint)
735		return 0;
736	else if (jiffies < limit)
737		return 1;
738
739	limit = jiffies + (HZ / 4);
740	r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING,
741				 (char *)&region64, sizeof(region64),
742				 (char *)&pkg, &rdata_size);
743	if (r)
744		return 1;
745
746	lc->in_sync_hint = pkg.in_sync_hint;
747
748	return (int)pkg.is_recovering;
749}
750
751static struct dm_dirty_log_type _userspace_type = {
752	.name = "userspace",
753	.module = THIS_MODULE,
754	.ctr = userspace_ctr,
755	.dtr = userspace_dtr,
756	.presuspend = userspace_presuspend,
757	.postsuspend = userspace_postsuspend,
758	.resume = userspace_resume,
759	.get_region_size = userspace_get_region_size,
760	.is_clean = userspace_is_clean,
761	.in_sync = userspace_in_sync,
762	.flush = userspace_flush,
763	.mark_region = userspace_mark_region,
764	.clear_region = userspace_clear_region,
765	.get_resync_work = userspace_get_resync_work,
766	.set_region_sync = userspace_set_region_sync,
767	.get_sync_count = userspace_get_sync_count,
768	.status = userspace_status,
769	.is_remote_recovering = userspace_is_remote_recovering,
770};
771
772static int __init userspace_dirty_log_init(void)
773{
774	int r = 0;
775
776	flush_entry_pool = mempool_create(100, flush_entry_alloc,
777					  flush_entry_free, NULL);
778
779	if (!flush_entry_pool) {
780		DMWARN("Unable to create flush_entry_pool:  No memory.");
781		return -ENOMEM;
782	}
783
784	r = dm_ulog_tfr_init();
785	if (r) {
786		DMWARN("Unable to initialize userspace log communications");
787		mempool_destroy(flush_entry_pool);
788		return r;
789	}
790
791	r = dm_dirty_log_type_register(&_userspace_type);
792	if (r) {
793		DMWARN("Couldn't register userspace dirty log type");
794		dm_ulog_tfr_exit();
795		mempool_destroy(flush_entry_pool);
796		return r;
797	}
798
799	DMINFO("version " DM_LOG_USERSPACE_VSN " loaded");
800	return 0;
801}
802
803static void __exit userspace_dirty_log_exit(void)
804{
805	dm_dirty_log_type_unregister(&_userspace_type);
806	dm_ulog_tfr_exit();
807	mempool_destroy(flush_entry_pool);
808
809	DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
810	return;
811}
812
813module_init(userspace_dirty_log_init);
814module_exit(userspace_dirty_log_exit);
815
816MODULE_DESCRIPTION(DM_NAME " userspace dirty log link");
817MODULE_AUTHOR("Jonathan Brassow <dm-devel@redhat.com>");
818MODULE_LICENSE("GPL");
v3.15
  1/*
  2 * Copyright (C) 2006-2009 Red Hat, Inc.
  3 *
  4 * This file is released under the LGPL.
  5 */
  6
  7#include <linux/bio.h>
  8#include <linux/slab.h>
  9#include <linux/dm-dirty-log.h>
 10#include <linux/device-mapper.h>
 11#include <linux/dm-log-userspace.h>
 12#include <linux/module.h>
 13#include <linux/workqueue.h>
 14
 15#include "dm-log-userspace-transfer.h"
 16
 17#define DM_LOG_USERSPACE_VSN "1.3.0"
 18
 19struct flush_entry {
 20	int type;
 21	region_t region;
 22	struct list_head list;
 23};
 24
 25/*
 26 * This limit on the number of mark and clear request is, to a degree,
 27 * arbitrary.  However, there is some basis for the choice in the limits
 28 * imposed on the size of data payload by dm-log-userspace-transfer.c:
 29 * dm_consult_userspace().
 30 */
 31#define MAX_FLUSH_GROUP_COUNT 32
 32
 33struct log_c {
 34	struct dm_target *ti;
 35	struct dm_dev *log_dev;
 36	uint32_t region_size;
 37	region_t region_count;
 38	uint64_t luid;
 39	char uuid[DM_UUID_LEN];
 40
 41	char *usr_argv_str;
 42	uint32_t usr_argc;
 43
 44	/*
 45	 * in_sync_hint gets set when doing is_remote_recovering.  It
 46	 * represents the first region that needs recovery.  IOW, the
 47	 * first zero bit of sync_bits.  This can be useful for to limit
 48	 * traffic for calls like is_remote_recovering and get_resync_work,
 49	 * but be take care in its use for anything else.
 50	 */
 51	uint64_t in_sync_hint;
 52
 53	/*
 54	 * Mark and clear requests are held until a flush is issued
 55	 * so that we can group, and thereby limit, the amount of
 56	 * network traffic between kernel and userspace.  The 'flush_lock'
 57	 * is used to protect these lists.
 58	 */
 59	spinlock_t flush_lock;
 60	struct list_head mark_list;
 61	struct list_head clear_list;
 62
 63	/*
 64	 * Workqueue for flush of clear region requests.
 65	 */
 66	struct workqueue_struct *dmlog_wq;
 67	struct delayed_work flush_log_work;
 68	atomic_t sched_flush;
 69
 70	/*
 71	 * Combine userspace flush and mark requests for efficiency.
 72	 */
 73	uint32_t integrated_flush;
 74};
 75
 76static mempool_t *flush_entry_pool;
 77
 78static void *flush_entry_alloc(gfp_t gfp_mask, void *pool_data)
 79{
 80	return kmalloc(sizeof(struct flush_entry), gfp_mask);
 81}
 82
 83static void flush_entry_free(void *element, void *pool_data)
 84{
 85	kfree(element);
 86}
 87
 88static int userspace_do_request(struct log_c *lc, const char *uuid,
 89				int request_type, char *data, size_t data_size,
 90				char *rdata, size_t *rdata_size)
 91{
 92	int r;
 93
 94	/*
 95	 * If the server isn't there, -ESRCH is returned,
 96	 * and we must keep trying until the server is
 97	 * restored.
 98	 */
 99retry:
100	r = dm_consult_userspace(uuid, lc->luid, request_type, data,
101				 data_size, rdata, rdata_size);
102
103	if (r != -ESRCH)
104		return r;
105
106	DMERR(" Userspace log server not found.");
107	while (1) {
108		set_current_state(TASK_INTERRUPTIBLE);
109		schedule_timeout(2*HZ);
110		DMWARN("Attempting to contact userspace log server...");
111		r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR,
112					 lc->usr_argv_str,
113					 strlen(lc->usr_argv_str) + 1,
114					 NULL, NULL);
115		if (!r)
116			break;
117	}
118	DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
119	r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL,
120				 0, NULL, NULL);
121	if (!r)
122		goto retry;
123
124	DMERR("Error trying to resume userspace log: %d", r);
125
126	return -ESRCH;
127}
128
129static int build_constructor_string(struct dm_target *ti,
130				    unsigned argc, char **argv,
131				    char **ctr_str)
132{
133	int i, str_size;
134	char *str = NULL;
135
136	*ctr_str = NULL;
137
138	/*
139	 * Determine overall size of the string.
140	 */
141	for (i = 0, str_size = 0; i < argc; i++)
142		str_size += strlen(argv[i]) + 1; /* +1 for space between args */
143
144	str_size += 20; /* Max number of chars in a printed u64 number */
145
146	str = kzalloc(str_size, GFP_KERNEL);
147	if (!str) {
148		DMWARN("Unable to allocate memory for constructor string");
149		return -ENOMEM;
150	}
151
152	str_size = sprintf(str, "%llu", (unsigned long long)ti->len);
153	for (i = 0; i < argc; i++)
154		str_size += sprintf(str + str_size, " %s", argv[i]);
155
156	*ctr_str = str;
157	return str_size;
158}
159
160static void do_flush(struct work_struct *work)
161{
162	int r;
163	struct log_c *lc = container_of(work, struct log_c, flush_log_work.work);
164
165	atomic_set(&lc->sched_flush, 0);
166
167	r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, NULL, 0, NULL, NULL);
168
169	if (r)
170		dm_table_event(lc->ti->table);
171}
172
173/*
174 * userspace_ctr
175 *
176 * argv contains:
177 *	<UUID> [integrated_flush] <other args>
178 * Where 'other args' are the userspace implementation-specific log
179 * arguments.
180 *
181 * Example:
182 *	<UUID> [integrated_flush] clustered-disk <arg count> <log dev>
183 *	<region_size> [[no]sync]
184 *
185 * This module strips off the <UUID> and uses it for identification
186 * purposes when communicating with userspace about a log.
187 *
188 * If integrated_flush is defined, the kernel combines flush
189 * and mark requests.
190 *
191 * The rest of the line, beginning with 'clustered-disk', is passed
192 * to the userspace ctr function.
193 */
194static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
195			 unsigned argc, char **argv)
196{
197	int r = 0;
198	int str_size;
199	char *ctr_str = NULL;
200	struct log_c *lc = NULL;
201	uint64_t rdata;
202	size_t rdata_size = sizeof(rdata);
203	char *devices_rdata = NULL;
204	size_t devices_rdata_size = DM_NAME_LEN;
205
206	if (argc < 3) {
207		DMWARN("Too few arguments to userspace dirty log");
208		return -EINVAL;
209	}
210
211	lc = kzalloc(sizeof(*lc), GFP_KERNEL);
212	if (!lc) {
213		DMWARN("Unable to allocate userspace log context.");
214		return -ENOMEM;
215	}
216
217	/* The ptr value is sufficient for local unique id */
218	lc->luid = (unsigned long)lc;
219
220	lc->ti = ti;
221
222	if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
223		DMWARN("UUID argument too long.");
224		kfree(lc);
225		return -EINVAL;
226	}
227
228	lc->usr_argc = argc;
229
230	strncpy(lc->uuid, argv[0], DM_UUID_LEN);
231	argc--;
232	argv++;
233	spin_lock_init(&lc->flush_lock);
234	INIT_LIST_HEAD(&lc->mark_list);
235	INIT_LIST_HEAD(&lc->clear_list);
236
237	if (!strcasecmp(argv[0], "integrated_flush")) {
238		lc->integrated_flush = 1;
239		argc--;
240		argv++;
241	}
242
243	str_size = build_constructor_string(ti, argc, argv, &ctr_str);
244	if (str_size < 0) {
245		kfree(lc);
246		return str_size;
247	}
248
249	devices_rdata = kzalloc(devices_rdata_size, GFP_KERNEL);
250	if (!devices_rdata) {
251		DMERR("Failed to allocate memory for device information");
252		r = -ENOMEM;
253		goto out;
254	}
255
256	/*
257	 * Send table string and get back any opened device.
258	 */
259	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
260				 ctr_str, str_size,
261				 devices_rdata, &devices_rdata_size);
262
263	if (r < 0) {
264		if (r == -ESRCH)
265			DMERR("Userspace log server not found");
266		else
267			DMERR("Userspace log server failed to create log");
268		goto out;
269	}
270
271	/* Since the region size does not change, get it now */
272	rdata_size = sizeof(rdata);
273	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE,
274				 NULL, 0, (char *)&rdata, &rdata_size);
275
276	if (r) {
277		DMERR("Failed to get region size of dirty log");
278		goto out;
279	}
280
281	lc->region_size = (uint32_t)rdata;
282	lc->region_count = dm_sector_div_up(ti->len, lc->region_size);
283
284	if (devices_rdata_size) {
285		if (devices_rdata[devices_rdata_size - 1] != '\0') {
286			DMERR("DM_ULOG_CTR device return string not properly terminated");
287			r = -EINVAL;
288			goto out;
289		}
290		r = dm_get_device(ti, devices_rdata,
291				  dm_table_get_mode(ti->table), &lc->log_dev);
292		if (r)
293			DMERR("Failed to register %s with device-mapper",
294			      devices_rdata);
295	}
296
297	if (lc->integrated_flush) {
298		lc->dmlog_wq = alloc_workqueue("dmlogd", WQ_MEM_RECLAIM, 0);
299		if (!lc->dmlog_wq) {
300			DMERR("couldn't start dmlogd");
301			r = -ENOMEM;
302			goto out;
303		}
304
305		INIT_DELAYED_WORK(&lc->flush_log_work, do_flush);
306		atomic_set(&lc->sched_flush, 0);
307	}
308
309out:
310	kfree(devices_rdata);
311	if (r) {
312		kfree(lc);
313		kfree(ctr_str);
314	} else {
315		lc->usr_argv_str = ctr_str;
 
316		log->context = lc;
317	}
318
319	return r;
320}
321
322static void userspace_dtr(struct dm_dirty_log *log)
323{
324	struct log_c *lc = log->context;
325
326	if (lc->integrated_flush) {
327		/* flush workqueue */
328		if (atomic_read(&lc->sched_flush))
329			flush_delayed_work(&lc->flush_log_work);
330
331		destroy_workqueue(lc->dmlog_wq);
332	}
333
334	(void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
335				    NULL, 0, NULL, NULL);
 
336
337	if (lc->log_dev)
338		dm_put_device(lc->ti, lc->log_dev);
339
340	kfree(lc->usr_argv_str);
341	kfree(lc);
342
343	return;
344}
345
346static int userspace_presuspend(struct dm_dirty_log *log)
347{
348	int r;
349	struct log_c *lc = log->context;
350
351	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
352				 NULL, 0, NULL, NULL);
 
353
354	return r;
355}
356
357static int userspace_postsuspend(struct dm_dirty_log *log)
358{
359	int r;
360	struct log_c *lc = log->context;
361
362	/*
363	 * Run planned flush earlier.
364	 */
365	if (lc->integrated_flush && atomic_read(&lc->sched_flush))
366		flush_delayed_work(&lc->flush_log_work);
367
368	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
369				 NULL, 0, NULL, NULL);
 
370
371	return r;
372}
373
374static int userspace_resume(struct dm_dirty_log *log)
375{
376	int r;
377	struct log_c *lc = log->context;
378
379	lc->in_sync_hint = 0;
380	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
381				 NULL, 0, NULL, NULL);
 
382
383	return r;
384}
385
386static uint32_t userspace_get_region_size(struct dm_dirty_log *log)
387{
388	struct log_c *lc = log->context;
389
390	return lc->region_size;
391}
392
393/*
394 * userspace_is_clean
395 *
396 * Check whether a region is clean.  If there is any sort of
397 * failure when consulting the server, we return not clean.
398 *
399 * Returns: 1 if clean, 0 otherwise
400 */
401static int userspace_is_clean(struct dm_dirty_log *log, region_t region)
402{
403	int r;
404	uint64_t region64 = (uint64_t)region;
405	int64_t is_clean;
406	size_t rdata_size;
407	struct log_c *lc = log->context;
408
409	rdata_size = sizeof(is_clean);
410	r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN,
411				 (char *)&region64, sizeof(region64),
412				 (char *)&is_clean, &rdata_size);
413
414	return (r) ? 0 : (int)is_clean;
415}
416
417/*
418 * userspace_in_sync
419 *
420 * Check if the region is in-sync.  If there is any sort
421 * of failure when consulting the server, we assume that
422 * the region is not in sync.
423 *
424 * If 'can_block' is set, return immediately
425 *
426 * Returns: 1 if in-sync, 0 if not-in-sync, -EWOULDBLOCK
427 */
428static int userspace_in_sync(struct dm_dirty_log *log, region_t region,
429			     int can_block)
430{
431	int r;
432	uint64_t region64 = region;
433	int64_t in_sync;
434	size_t rdata_size;
435	struct log_c *lc = log->context;
436
437	/*
438	 * We can never respond directly - even if in_sync_hint is
439	 * set.  This is because another machine could see a device
440	 * failure and mark the region out-of-sync.  If we don't go
441	 * to userspace to ask, we might think the region is in-sync
442	 * and allow a read to pick up data that is stale.  (This is
443	 * very unlikely if a device actually fails; but it is very
444	 * likely if a connection to one device from one machine fails.)
445	 *
446	 * There still might be a problem if the mirror caches the region
447	 * state as in-sync... but then this call would not be made.  So,
448	 * that is a mirror problem.
449	 */
450	if (!can_block)
451		return -EWOULDBLOCK;
452
453	rdata_size = sizeof(in_sync);
454	r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC,
455				 (char *)&region64, sizeof(region64),
456				 (char *)&in_sync, &rdata_size);
457	return (r) ? 0 : (int)in_sync;
458}
459
460static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
461{
462	int r = 0;
463	struct flush_entry *fe;
464
465	list_for_each_entry(fe, flush_list, list) {
466		r = userspace_do_request(lc, lc->uuid, fe->type,
467					 (char *)&fe->region,
468					 sizeof(fe->region),
469					 NULL, NULL);
470		if (r)
471			break;
472	}
473
474	return r;
475}
476
477static int flush_by_group(struct log_c *lc, struct list_head *flush_list,
478			  int flush_with_payload)
479{
480	int r = 0;
481	int count;
482	uint32_t type = 0;
483	struct flush_entry *fe, *tmp_fe;
484	LIST_HEAD(tmp_list);
485	uint64_t group[MAX_FLUSH_GROUP_COUNT];
486
487	/*
488	 * Group process the requests
489	 */
490	while (!list_empty(flush_list)) {
491		count = 0;
492
493		list_for_each_entry_safe(fe, tmp_fe, flush_list, list) {
494			group[count] = fe->region;
495			count++;
496
497			list_move(&fe->list, &tmp_list);
498
499			type = fe->type;
500			if (count >= MAX_FLUSH_GROUP_COUNT)
501				break;
502		}
503
504		if (flush_with_payload) {
505			r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
506						 (char *)(group),
507						 count * sizeof(uint64_t),
508						 NULL, NULL);
509			/*
510			 * Integrated flush failed.
511			 */
512			if (r)
513				break;
514		} else {
515			r = userspace_do_request(lc, lc->uuid, type,
516						 (char *)(group),
517						 count * sizeof(uint64_t),
518						 NULL, NULL);
519			if (r) {
520				/*
521				 * Group send failed.  Attempt one-by-one.
522				 */
523				list_splice_init(&tmp_list, flush_list);
524				r = flush_one_by_one(lc, flush_list);
525				break;
526			}
527		}
528	}
529
530	/*
531	 * Must collect flush_entrys that were successfully processed
532	 * as a group so that they will be free'd by the caller.
533	 */
534	list_splice_init(&tmp_list, flush_list);
535
536	return r;
537}
538
539/*
540 * userspace_flush
541 *
542 * This function is ok to block.
543 * The flush happens in two stages.  First, it sends all
544 * clear/mark requests that are on the list.  Then it
545 * tells the server to commit them.  This gives the
546 * server a chance to optimise the commit, instead of
547 * doing it for every request.
548 *
549 * Additionally, we could implement another thread that
550 * sends the requests up to the server - reducing the
551 * load on flush.  Then the flush would have less in
552 * the list and be responsible for the finishing commit.
553 *
554 * Returns: 0 on success, < 0 on failure
555 */
556static int userspace_flush(struct dm_dirty_log *log)
557{
558	int r = 0;
559	unsigned long flags;
560	struct log_c *lc = log->context;
561	LIST_HEAD(mark_list);
562	LIST_HEAD(clear_list);
563	int mark_list_is_empty;
564	int clear_list_is_empty;
565	struct flush_entry *fe, *tmp_fe;
566
567	spin_lock_irqsave(&lc->flush_lock, flags);
568	list_splice_init(&lc->mark_list, &mark_list);
569	list_splice_init(&lc->clear_list, &clear_list);
570	spin_unlock_irqrestore(&lc->flush_lock, flags);
571
572	mark_list_is_empty = list_empty(&mark_list);
573	clear_list_is_empty = list_empty(&clear_list);
574
575	if (mark_list_is_empty && clear_list_is_empty)
576		return 0;
577
578	r = flush_by_group(lc, &clear_list, 0);
579	if (r)
580		goto out;
581
582	if (!lc->integrated_flush) {
583		r = flush_by_group(lc, &mark_list, 0);
584		if (r)
585			goto out;
586		r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
587					 NULL, 0, NULL, NULL);
588		goto out;
589	}
590
591	/*
592	 * Send integrated flush request with mark_list as payload.
593	 */
594	r = flush_by_group(lc, &mark_list, 1);
595	if (r)
596		goto out;
597
598	if (mark_list_is_empty && !atomic_read(&lc->sched_flush)) {
599		/*
600		 * When there are only clear region requests,
601		 * we schedule a flush in the future.
602		 */
603		queue_delayed_work(lc->dmlog_wq, &lc->flush_log_work, 3 * HZ);
604		atomic_set(&lc->sched_flush, 1);
605	} else {
606		/*
607		 * Cancel pending flush because we
608		 * have already flushed in mark_region.
609		 */
610		cancel_delayed_work(&lc->flush_log_work);
611		atomic_set(&lc->sched_flush, 0);
612	}
613
614out:
615	/*
616	 * We can safely remove these entries, even after failure.
617	 * Calling code will receive an error and will know that
618	 * the log facility has failed.
619	 */
620	list_for_each_entry_safe(fe, tmp_fe, &mark_list, list) {
621		list_del(&fe->list);
622		mempool_free(fe, flush_entry_pool);
623	}
624	list_for_each_entry_safe(fe, tmp_fe, &clear_list, list) {
625		list_del(&fe->list);
626		mempool_free(fe, flush_entry_pool);
627	}
628
629	if (r)
630		dm_table_event(lc->ti->table);
631
632	return r;
633}
634
635/*
636 * userspace_mark_region
637 *
638 * This function should avoid blocking unless absolutely required.
639 * (Memory allocation is valid for blocking.)
640 */
641static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
642{
643	unsigned long flags;
644	struct log_c *lc = log->context;
645	struct flush_entry *fe;
646
647	/* Wait for an allocation, but _never_ fail */
648	fe = mempool_alloc(flush_entry_pool, GFP_NOIO);
649	BUG_ON(!fe);
650
651	spin_lock_irqsave(&lc->flush_lock, flags);
652	fe->type = DM_ULOG_MARK_REGION;
653	fe->region = region;
654	list_add(&fe->list, &lc->mark_list);
655	spin_unlock_irqrestore(&lc->flush_lock, flags);
656
657	return;
658}
659
660/*
661 * userspace_clear_region
662 *
663 * This function must not block.
664 * So, the alloc can't block.  In the worst case, it is ok to
665 * fail.  It would simply mean we can't clear the region.
666 * Does nothing to current sync context, but does mean
667 * the region will be re-sync'ed on a reload of the mirror
668 * even though it is in-sync.
669 */
670static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
671{
672	unsigned long flags;
673	struct log_c *lc = log->context;
674	struct flush_entry *fe;
675
676	/*
677	 * If we fail to allocate, we skip the clearing of
678	 * the region.  This doesn't hurt us in any way, except
679	 * to cause the region to be resync'ed when the
680	 * device is activated next time.
681	 */
682	fe = mempool_alloc(flush_entry_pool, GFP_ATOMIC);
683	if (!fe) {
684		DMERR("Failed to allocate memory to clear region.");
685		return;
686	}
687
688	spin_lock_irqsave(&lc->flush_lock, flags);
689	fe->type = DM_ULOG_CLEAR_REGION;
690	fe->region = region;
691	list_add(&fe->list, &lc->clear_list);
692	spin_unlock_irqrestore(&lc->flush_lock, flags);
693
694	return;
695}
696
697/*
698 * userspace_get_resync_work
699 *
700 * Get a region that needs recovery.  It is valid to return
701 * an error for this function.
702 *
703 * Returns: 1 if region filled, 0 if no work, <0 on error
704 */
705static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region)
706{
707	int r;
708	size_t rdata_size;
709	struct log_c *lc = log->context;
710	struct {
711		int64_t i; /* 64-bit for mix arch compatibility */
712		region_t r;
713	} pkg;
714
715	if (lc->in_sync_hint >= lc->region_count)
716		return 0;
717
718	rdata_size = sizeof(pkg);
719	r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK,
720				 NULL, 0, (char *)&pkg, &rdata_size);
 
721
722	*region = pkg.r;
723	return (r) ? r : (int)pkg.i;
724}
725
726/*
727 * userspace_set_region_sync
728 *
729 * Set the sync status of a given region.  This function
730 * must not fail.
731 */
732static void userspace_set_region_sync(struct dm_dirty_log *log,
733				      region_t region, int in_sync)
734{
735	int r;
736	struct log_c *lc = log->context;
737	struct {
738		region_t r;
739		int64_t i;
740	} pkg;
741
742	pkg.r = region;
743	pkg.i = (int64_t)in_sync;
744
745	r = userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
746				 (char *)&pkg, sizeof(pkg), NULL, NULL);
 
747
748	/*
749	 * It would be nice to be able to report failures.
750	 * However, it is easy emough to detect and resolve.
751	 */
752	return;
753}
754
755/*
756 * userspace_get_sync_count
757 *
758 * If there is any sort of failure when consulting the server,
759 * we assume that the sync count is zero.
760 *
761 * Returns: sync count on success, 0 on failure
762 */
763static region_t userspace_get_sync_count(struct dm_dirty_log *log)
764{
765	int r;
766	size_t rdata_size;
767	uint64_t sync_count;
768	struct log_c *lc = log->context;
769
770	rdata_size = sizeof(sync_count);
771	r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT,
772				 NULL, 0, (char *)&sync_count, &rdata_size);
 
773
774	if (r)
775		return 0;
776
777	if (sync_count >= lc->region_count)
778		lc->in_sync_hint = lc->region_count;
779
780	return (region_t)sync_count;
781}
782
783/*
784 * userspace_status
785 *
786 * Returns: amount of space consumed
787 */
788static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
789			    char *result, unsigned maxlen)
790{
791	int r = 0;
792	char *table_args;
793	size_t sz = (size_t)maxlen;
794	struct log_c *lc = log->context;
795
796	switch (status_type) {
797	case STATUSTYPE_INFO:
798		r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO,
799					 NULL, 0, result, &sz);
 
800
801		if (r) {
802			sz = 0;
803			DMEMIT("%s 1 COM_FAILURE", log->type->name);
804		}
805		break;
806	case STATUSTYPE_TABLE:
807		sz = 0;
808		table_args = strchr(lc->usr_argv_str, ' ');
809		BUG_ON(!table_args); /* There will always be a ' ' */
810		table_args++;
811
812		DMEMIT("%s %u %s ", log->type->name, lc->usr_argc, lc->uuid);
813		if (lc->integrated_flush)
814			DMEMIT("integrated_flush ");
815		DMEMIT("%s ", table_args);
816		break;
817	}
818	return (r) ? 0 : (int)sz;
819}
820
821/*
822 * userspace_is_remote_recovering
823 *
824 * Returns: 1 if region recovering, 0 otherwise
825 */
826static int userspace_is_remote_recovering(struct dm_dirty_log *log,
827					  region_t region)
828{
829	int r;
830	uint64_t region64 = region;
831	struct log_c *lc = log->context;
832	static unsigned long long limit;
833	struct {
834		int64_t is_recovering;
835		uint64_t in_sync_hint;
836	} pkg;
837	size_t rdata_size = sizeof(pkg);
838
839	/*
840	 * Once the mirror has been reported to be in-sync,
841	 * it will never again ask for recovery work.  So,
842	 * we can safely say there is not a remote machine
843	 * recovering if the device is in-sync.  (in_sync_hint
844	 * must be reset at resume time.)
845	 */
846	if (region < lc->in_sync_hint)
847		return 0;
848	else if (jiffies < limit)
849		return 1;
850
851	limit = jiffies + (HZ / 4);
852	r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING,
853				 (char *)&region64, sizeof(region64),
854				 (char *)&pkg, &rdata_size);
855	if (r)
856		return 1;
857
858	lc->in_sync_hint = pkg.in_sync_hint;
859
860	return (int)pkg.is_recovering;
861}
862
863static struct dm_dirty_log_type _userspace_type = {
864	.name = "userspace",
865	.module = THIS_MODULE,
866	.ctr = userspace_ctr,
867	.dtr = userspace_dtr,
868	.presuspend = userspace_presuspend,
869	.postsuspend = userspace_postsuspend,
870	.resume = userspace_resume,
871	.get_region_size = userspace_get_region_size,
872	.is_clean = userspace_is_clean,
873	.in_sync = userspace_in_sync,
874	.flush = userspace_flush,
875	.mark_region = userspace_mark_region,
876	.clear_region = userspace_clear_region,
877	.get_resync_work = userspace_get_resync_work,
878	.set_region_sync = userspace_set_region_sync,
879	.get_sync_count = userspace_get_sync_count,
880	.status = userspace_status,
881	.is_remote_recovering = userspace_is_remote_recovering,
882};
883
884static int __init userspace_dirty_log_init(void)
885{
886	int r = 0;
887
888	flush_entry_pool = mempool_create(100, flush_entry_alloc,
889					  flush_entry_free, NULL);
890
891	if (!flush_entry_pool) {
892		DMWARN("Unable to create flush_entry_pool:  No memory.");
893		return -ENOMEM;
894	}
895
896	r = dm_ulog_tfr_init();
897	if (r) {
898		DMWARN("Unable to initialize userspace log communications");
899		mempool_destroy(flush_entry_pool);
900		return r;
901	}
902
903	r = dm_dirty_log_type_register(&_userspace_type);
904	if (r) {
905		DMWARN("Couldn't register userspace dirty log type");
906		dm_ulog_tfr_exit();
907		mempool_destroy(flush_entry_pool);
908		return r;
909	}
910
911	DMINFO("version " DM_LOG_USERSPACE_VSN " loaded");
912	return 0;
913}
914
915static void __exit userspace_dirty_log_exit(void)
916{
917	dm_dirty_log_type_unregister(&_userspace_type);
918	dm_ulog_tfr_exit();
919	mempool_destroy(flush_entry_pool);
920
921	DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
922	return;
923}
924
925module_init(userspace_dirty_log_init);
926module_exit(userspace_dirty_log_exit);
927
928MODULE_DESCRIPTION(DM_NAME " userspace dirty log link");
929MODULE_AUTHOR("Jonathan Brassow <dm-devel@redhat.com>");
930MODULE_LICENSE("GPL");