Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  3 * Copyright (C) 2006-2008 Red Hat GmbH
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include "dm-exception-store.h"
  9
 10#include <linux/mm.h>
 11#include <linux/pagemap.h>
 12#include <linux/vmalloc.h>
 13#include <linux/export.h>
 14#include <linux/slab.h>
 15#include <linux/dm-io.h>
 16
 17#define DM_MSG_PREFIX "persistent snapshot"
 18#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32	/* 16KB */
 19
 20/*-----------------------------------------------------------------
 21 * Persistent snapshots, by persistent we mean that the snapshot
 22 * will survive a reboot.
 23 *---------------------------------------------------------------*/
 24
 25/*
 26 * We need to store a record of which parts of the origin have
 27 * been copied to the snapshot device.  The snapshot code
 28 * requires that we copy exception chunks to chunk aligned areas
 29 * of the COW store.  It makes sense therefore, to store the
 30 * metadata in chunk size blocks.
 31 *
 32 * There is no backward or forward compatibility implemented,
 33 * snapshots with different disk versions than the kernel will
 34 * not be usable.  It is expected that "lvcreate" will blank out
 35 * the start of a fresh COW device before calling the snapshot
 36 * constructor.
 37 *
 38 * The first chunk of the COW device just contains the header.
 39 * After this there is a chunk filled with exception metadata,
 40 * followed by as many exception chunks as can fit in the
 41 * metadata areas.
 42 *
 43 * All on disk structures are in little-endian format.  The end
 44 * of the exceptions info is indicated by an exception with a
 45 * new_chunk of 0, which is invalid since it would point to the
 46 * header chunk.
 47 */
 48
 49/*
 50 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
 51 */
 52#define SNAP_MAGIC 0x70416e53
 53
 54/*
 55 * The on-disk version of the metadata.
 56 */
 57#define SNAPSHOT_DISK_VERSION 1
 58
 59#define NUM_SNAPSHOT_HDR_CHUNKS 1
 60
 61struct disk_header {
 62	__le32 magic;
 63
 64	/*
 65	 * Is this snapshot valid.  There is no way of recovering
 66	 * an invalid snapshot.
 67	 */
 68	__le32 valid;
 69
 70	/*
 71	 * Simple, incrementing version. no backward
 72	 * compatibility.
 73	 */
 74	__le32 version;
 75
 76	/* In sectors */
 77	__le32 chunk_size;
 78} __packed;
 79
 80struct disk_exception {
 81	__le64 old_chunk;
 82	__le64 new_chunk;
 83} __packed;
 84
 85struct core_exception {
 86	uint64_t old_chunk;
 87	uint64_t new_chunk;
 88};
 89
 90struct commit_callback {
 91	void (*callback)(void *, int success);
 92	void *context;
 93};
 94
 95/*
 96 * The top level structure for a persistent exception store.
 97 */
 98struct pstore {
 99	struct dm_exception_store *store;
100	int version;
101	int valid;
102	uint32_t exceptions_per_area;
103
104	/*
105	 * Now that we have an asynchronous kcopyd there is no
106	 * need for large chunk sizes, so it wont hurt to have a
107	 * whole chunks worth of metadata in memory at once.
108	 */
109	void *area;
110
111	/*
112	 * An area of zeros used to clear the next area.
113	 */
114	void *zero_area;
115
116	/*
117	 * An area used for header. The header can be written
118	 * concurrently with metadata (when invalidating the snapshot),
119	 * so it needs a separate buffer.
120	 */
121	void *header_area;
122
123	/*
124	 * Used to keep track of which metadata area the data in
125	 * 'chunk' refers to.
126	 */
127	chunk_t current_area;
128
129	/*
130	 * The next free chunk for an exception.
131	 *
132	 * When creating exceptions, all the chunks here and above are
133	 * free.  It holds the next chunk to be allocated.  On rare
134	 * occasions (e.g. after a system crash) holes can be left in
135	 * the exception store because chunks can be committed out of
136	 * order.
137	 *
138	 * When merging exceptions, it does not necessarily mean all the
139	 * chunks here and above are free.  It holds the value it would
140	 * have held if all chunks had been committed in order of
141	 * allocation.  Consequently the value may occasionally be
142	 * slightly too low, but since it's only used for 'status' and
143	 * it can never reach its minimum value too early this doesn't
144	 * matter.
145	 */
146
147	chunk_t next_free;
148
149	/*
150	 * The index of next free exception in the current
151	 * metadata area.
152	 */
153	uint32_t current_committed;
154
155	atomic_t pending_count;
156	uint32_t callback_count;
157	struct commit_callback *callbacks;
158	struct dm_io_client *io_client;
159
160	struct workqueue_struct *metadata_wq;
161};
162
163static int alloc_area(struct pstore *ps)
164{
165	int r = -ENOMEM;
166	size_t len;
167
168	len = ps->store->chunk_size << SECTOR_SHIFT;
169
170	/*
171	 * Allocate the chunk_size block of memory that will hold
172	 * a single metadata area.
173	 */
174	ps->area = vmalloc(len);
175	if (!ps->area)
176		goto err_area;
177
178	ps->zero_area = vzalloc(len);
179	if (!ps->zero_area)
180		goto err_zero_area;
181
182	ps->header_area = vmalloc(len);
183	if (!ps->header_area)
184		goto err_header_area;
185
186	return 0;
187
188err_header_area:
189	vfree(ps->zero_area);
190
191err_zero_area:
192	vfree(ps->area);
193
194err_area:
195	return r;
196}
197
198static void free_area(struct pstore *ps)
199{
200	if (ps->area)
201		vfree(ps->area);
202	ps->area = NULL;
203
204	if (ps->zero_area)
205		vfree(ps->zero_area);
206	ps->zero_area = NULL;
207
208	if (ps->header_area)
209		vfree(ps->header_area);
210	ps->header_area = NULL;
211}
212
213struct mdata_req {
214	struct dm_io_region *where;
215	struct dm_io_request *io_req;
216	struct work_struct work;
217	int result;
218};
219
220static void do_metadata(struct work_struct *work)
221{
222	struct mdata_req *req = container_of(work, struct mdata_req, work);
223
224	req->result = dm_io(req->io_req, 1, req->where, NULL);
225}
226
227/*
228 * Read or write a chunk aligned and sized block of data from a device.
229 */
230static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
231		    int metadata)
232{
233	struct dm_io_region where = {
234		.bdev = dm_snap_cow(ps->store->snap)->bdev,
235		.sector = ps->store->chunk_size * chunk,
236		.count = ps->store->chunk_size,
237	};
238	struct dm_io_request io_req = {
239		.bi_rw = rw,
240		.mem.type = DM_IO_VMA,
241		.mem.ptr.vma = area,
242		.client = ps->io_client,
243		.notify.fn = NULL,
244	};
245	struct mdata_req req;
246
247	if (!metadata)
248		return dm_io(&io_req, 1, &where, NULL);
249
250	req.where = &where;
251	req.io_req = &io_req;
252
253	/*
254	 * Issue the synchronous I/O from a different thread
255	 * to avoid generic_make_request recursion.
256	 */
257	INIT_WORK_ONSTACK(&req.work, do_metadata);
258	queue_work(ps->metadata_wq, &req.work);
259	flush_work(&req.work);
260
261	return req.result;
262}
263
264/*
265 * Convert a metadata area index to a chunk index.
266 */
267static chunk_t area_location(struct pstore *ps, chunk_t area)
268{
269	return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
270}
271
272/*
273 * Read or write a metadata area.  Remembering to skip the first
274 * chunk which holds the header.
275 */
276static int area_io(struct pstore *ps, int rw)
277{
278	int r;
279	chunk_t chunk;
280
281	chunk = area_location(ps, ps->current_area);
282
283	r = chunk_io(ps, ps->area, chunk, rw, 0);
284	if (r)
285		return r;
286
287	return 0;
288}
289
290static void zero_memory_area(struct pstore *ps)
291{
292	memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
293}
294
295static int zero_disk_area(struct pstore *ps, chunk_t area)
296{
297	return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
298}
299
300static int read_header(struct pstore *ps, int *new_snapshot)
301{
302	int r;
303	struct disk_header *dh;
304	unsigned chunk_size;
305	int chunk_size_supplied = 1;
306	char *chunk_err;
307
308	/*
309	 * Use default chunk size (or logical_block_size, if larger)
310	 * if none supplied
311	 */
312	if (!ps->store->chunk_size) {
313		ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
314		    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
315					    bdev) >> 9);
316		ps->store->chunk_mask = ps->store->chunk_size - 1;
317		ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
318		chunk_size_supplied = 0;
319	}
320
321	ps->io_client = dm_io_client_create();
322	if (IS_ERR(ps->io_client))
323		return PTR_ERR(ps->io_client);
324
325	r = alloc_area(ps);
326	if (r)
327		return r;
328
329	r = chunk_io(ps, ps->header_area, 0, READ, 1);
330	if (r)
331		goto bad;
332
333	dh = ps->header_area;
334
335	if (le32_to_cpu(dh->magic) == 0) {
336		*new_snapshot = 1;
337		return 0;
338	}
339
340	if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
341		DMWARN("Invalid or corrupt snapshot");
342		r = -ENXIO;
343		goto bad;
344	}
345
346	*new_snapshot = 0;
347	ps->valid = le32_to_cpu(dh->valid);
348	ps->version = le32_to_cpu(dh->version);
349	chunk_size = le32_to_cpu(dh->chunk_size);
350
351	if (ps->store->chunk_size == chunk_size)
352		return 0;
353
354	if (chunk_size_supplied)
355		DMWARN("chunk size %u in device metadata overrides "
356		       "table chunk size of %u.",
357		       chunk_size, ps->store->chunk_size);
358
359	/* We had a bogus chunk_size. Fix stuff up. */
360	free_area(ps);
361
362	r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
363					      &chunk_err);
364	if (r) {
365		DMERR("invalid on-disk chunk size %u: %s.",
366		      chunk_size, chunk_err);
367		return r;
368	}
369
370	r = alloc_area(ps);
371	return r;
372
373bad:
374	free_area(ps);
375	return r;
376}
377
378static int write_header(struct pstore *ps)
379{
380	struct disk_header *dh;
381
382	memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
383
384	dh = ps->header_area;
385	dh->magic = cpu_to_le32(SNAP_MAGIC);
386	dh->valid = cpu_to_le32(ps->valid);
387	dh->version = cpu_to_le32(ps->version);
388	dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
389
390	return chunk_io(ps, ps->header_area, 0, WRITE, 1);
391}
392
393/*
394 * Access functions for the disk exceptions, these do the endian conversions.
395 */
396static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
397{
398	BUG_ON(index >= ps->exceptions_per_area);
399
400	return ((struct disk_exception *) ps->area) + index;
401}
402
403static void read_exception(struct pstore *ps,
404			   uint32_t index, struct core_exception *result)
405{
406	struct disk_exception *de = get_exception(ps, index);
407
408	/* copy it */
409	result->old_chunk = le64_to_cpu(de->old_chunk);
410	result->new_chunk = le64_to_cpu(de->new_chunk);
411}
412
413static void write_exception(struct pstore *ps,
414			    uint32_t index, struct core_exception *e)
415{
416	struct disk_exception *de = get_exception(ps, index);
417
418	/* copy it */
419	de->old_chunk = cpu_to_le64(e->old_chunk);
420	de->new_chunk = cpu_to_le64(e->new_chunk);
421}
422
423static void clear_exception(struct pstore *ps, uint32_t index)
424{
425	struct disk_exception *de = get_exception(ps, index);
426
427	/* clear it */
428	de->old_chunk = 0;
429	de->new_chunk = 0;
430}
431
432/*
433 * Registers the exceptions that are present in the current area.
434 * 'full' is filled in to indicate if the area has been
435 * filled.
436 */
437static int insert_exceptions(struct pstore *ps,
438			     int (*callback)(void *callback_context,
439					     chunk_t old, chunk_t new),
440			     void *callback_context,
441			     int *full)
442{
443	int r;
444	unsigned int i;
445	struct core_exception e;
446
447	/* presume the area is full */
448	*full = 1;
449
450	for (i = 0; i < ps->exceptions_per_area; i++) {
451		read_exception(ps, i, &e);
452
453		/*
454		 * If the new_chunk is pointing at the start of
455		 * the COW device, where the first metadata area
456		 * is we know that we've hit the end of the
457		 * exceptions.  Therefore the area is not full.
458		 */
459		if (e.new_chunk == 0LL) {
460			ps->current_committed = i;
461			*full = 0;
462			break;
463		}
464
465		/*
466		 * Keep track of the start of the free chunks.
467		 */
468		if (ps->next_free <= e.new_chunk)
469			ps->next_free = e.new_chunk + 1;
470
471		/*
472		 * Otherwise we add the exception to the snapshot.
473		 */
474		r = callback(callback_context, e.old_chunk, e.new_chunk);
475		if (r)
476			return r;
477	}
478
479	return 0;
480}
481
482static int read_exceptions(struct pstore *ps,
483			   int (*callback)(void *callback_context, chunk_t old,
484					   chunk_t new),
485			   void *callback_context)
486{
487	int r, full = 1;
488
489	/*
490	 * Keeping reading chunks and inserting exceptions until
491	 * we find a partially full area.
492	 */
493	for (ps->current_area = 0; full; ps->current_area++) {
494		r = area_io(ps, READ);
495		if (r)
496			return r;
497
498		r = insert_exceptions(ps, callback, callback_context, &full);
499		if (r)
500			return r;
501	}
502
503	ps->current_area--;
504
505	return 0;
506}
507
508static struct pstore *get_info(struct dm_exception_store *store)
509{
510	return (struct pstore *) store->context;
511}
512
513static void persistent_usage(struct dm_exception_store *store,
514			     sector_t *total_sectors,
515			     sector_t *sectors_allocated,
516			     sector_t *metadata_sectors)
517{
518	struct pstore *ps = get_info(store);
519
520	*sectors_allocated = ps->next_free * store->chunk_size;
521	*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
522
523	/*
524	 * First chunk is the fixed header.
525	 * Then there are (ps->current_area + 1) metadata chunks, each one
526	 * separated from the next by ps->exceptions_per_area data chunks.
527	 */
528	*metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
529			    store->chunk_size;
530}
531
532static void persistent_dtr(struct dm_exception_store *store)
533{
534	struct pstore *ps = get_info(store);
535
536	destroy_workqueue(ps->metadata_wq);
537
538	/* Created in read_header */
539	if (ps->io_client)
540		dm_io_client_destroy(ps->io_client);
541	free_area(ps);
542
543	/* Allocated in persistent_read_metadata */
544	if (ps->callbacks)
545		vfree(ps->callbacks);
546
547	kfree(ps);
548}
549
550static int persistent_read_metadata(struct dm_exception_store *store,
551				    int (*callback)(void *callback_context,
552						    chunk_t old, chunk_t new),
553				    void *callback_context)
554{
555	int r, uninitialized_var(new_snapshot);
556	struct pstore *ps = get_info(store);
557
558	/*
559	 * Read the snapshot header.
560	 */
561	r = read_header(ps, &new_snapshot);
562	if (r)
563		return r;
564
565	/*
566	 * Now we know correct chunk_size, complete the initialisation.
567	 */
568	ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
569				  sizeof(struct disk_exception);
570	ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
571				   sizeof(*ps->callbacks));
572	if (!ps->callbacks)
573		return -ENOMEM;
574
575	/*
576	 * Do we need to setup a new snapshot ?
577	 */
578	if (new_snapshot) {
579		r = write_header(ps);
580		if (r) {
581			DMWARN("write_header failed");
582			return r;
583		}
584
585		ps->current_area = 0;
586		zero_memory_area(ps);
587		r = zero_disk_area(ps, 0);
588		if (r)
589			DMWARN("zero_disk_area(0) failed");
590		return r;
591	}
592	/*
593	 * Sanity checks.
594	 */
595	if (ps->version != SNAPSHOT_DISK_VERSION) {
596		DMWARN("unable to handle snapshot disk version %d",
597		       ps->version);
598		return -EINVAL;
599	}
600
601	/*
602	 * Metadata are valid, but snapshot is invalidated
603	 */
604	if (!ps->valid)
605		return 1;
606
607	/*
608	 * Read the metadata.
609	 */
610	r = read_exceptions(ps, callback, callback_context);
611
612	return r;
613}
614
615static int persistent_prepare_exception(struct dm_exception_store *store,
616					struct dm_exception *e)
617{
618	struct pstore *ps = get_info(store);
619	uint32_t stride;
620	chunk_t next_free;
621	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
622
623	/* Is there enough room ? */
624	if (size < ((ps->next_free + 1) * store->chunk_size))
625		return -ENOSPC;
626
627	e->new_chunk = ps->next_free;
628
629	/*
630	 * Move onto the next free pending, making sure to take
631	 * into account the location of the metadata chunks.
632	 */
633	stride = (ps->exceptions_per_area + 1);
634	next_free = ++ps->next_free;
635	if (sector_div(next_free, stride) == 1)
636		ps->next_free++;
637
638	atomic_inc(&ps->pending_count);
639	return 0;
640}
641
642static void persistent_commit_exception(struct dm_exception_store *store,
643					struct dm_exception *e,
644					void (*callback) (void *, int success),
645					void *callback_context)
646{
647	unsigned int i;
648	struct pstore *ps = get_info(store);
649	struct core_exception ce;
650	struct commit_callback *cb;
651
652	ce.old_chunk = e->old_chunk;
653	ce.new_chunk = e->new_chunk;
654	write_exception(ps, ps->current_committed++, &ce);
655
656	/*
657	 * Add the callback to the back of the array.  This code
658	 * is the only place where the callback array is
659	 * manipulated, and we know that it will never be called
660	 * multiple times concurrently.
661	 */
662	cb = ps->callbacks + ps->callback_count++;
663	cb->callback = callback;
664	cb->context = callback_context;
665
666	/*
667	 * If there are exceptions in flight and we have not yet
668	 * filled this metadata area there's nothing more to do.
669	 */
670	if (!atomic_dec_and_test(&ps->pending_count) &&
671	    (ps->current_committed != ps->exceptions_per_area))
672		return;
673
674	/*
675	 * If we completely filled the current area, then wipe the next one.
676	 */
677	if ((ps->current_committed == ps->exceptions_per_area) &&
678	    zero_disk_area(ps, ps->current_area + 1))
679		ps->valid = 0;
680
681	/*
682	 * Commit exceptions to disk.
683	 */
684	if (ps->valid && area_io(ps, WRITE_FLUSH_FUA))
685		ps->valid = 0;
686
687	/*
688	 * Advance to the next area if this one is full.
689	 */
690	if (ps->current_committed == ps->exceptions_per_area) {
691		ps->current_committed = 0;
692		ps->current_area++;
693		zero_memory_area(ps);
694	}
695
696	for (i = 0; i < ps->callback_count; i++) {
697		cb = ps->callbacks + i;
698		cb->callback(cb->context, ps->valid);
699	}
700
701	ps->callback_count = 0;
702}
703
704static int persistent_prepare_merge(struct dm_exception_store *store,
705				    chunk_t *last_old_chunk,
706				    chunk_t *last_new_chunk)
707{
708	struct pstore *ps = get_info(store);
709	struct core_exception ce;
710	int nr_consecutive;
711	int r;
712
713	/*
714	 * When current area is empty, move back to preceding area.
715	 */
716	if (!ps->current_committed) {
717		/*
718		 * Have we finished?
719		 */
720		if (!ps->current_area)
721			return 0;
722
723		ps->current_area--;
724		r = area_io(ps, READ);
725		if (r < 0)
726			return r;
727		ps->current_committed = ps->exceptions_per_area;
728	}
729
730	read_exception(ps, ps->current_committed - 1, &ce);
731	*last_old_chunk = ce.old_chunk;
732	*last_new_chunk = ce.new_chunk;
733
734	/*
735	 * Find number of consecutive chunks within the current area,
736	 * working backwards.
737	 */
738	for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
739	     nr_consecutive++) {
740		read_exception(ps, ps->current_committed - 1 - nr_consecutive,
741			       &ce);
742		if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
743		    ce.new_chunk != *last_new_chunk - nr_consecutive)
744			break;
745	}
746
747	return nr_consecutive;
748}
749
750static int persistent_commit_merge(struct dm_exception_store *store,
751				   int nr_merged)
752{
753	int r, i;
754	struct pstore *ps = get_info(store);
755
756	BUG_ON(nr_merged > ps->current_committed);
757
758	for (i = 0; i < nr_merged; i++)
759		clear_exception(ps, ps->current_committed - 1 - i);
760
761	r = area_io(ps, WRITE_FLUSH_FUA);
762	if (r < 0)
763		return r;
764
765	ps->current_committed -= nr_merged;
766
767	/*
768	 * At this stage, only persistent_usage() uses ps->next_free, so
769	 * we make no attempt to keep ps->next_free strictly accurate
770	 * as exceptions may have been committed out-of-order originally.
771	 * Once a snapshot has become merging, we set it to the value it
772	 * would have held had all the exceptions been committed in order.
773	 *
774	 * ps->current_area does not get reduced by prepare_merge() until
775	 * after commit_merge() has removed the nr_merged previous exceptions.
776	 */
777	ps->next_free = area_location(ps, ps->current_area) +
778			ps->current_committed + 1;
779
780	return 0;
781}
782
783static void persistent_drop_snapshot(struct dm_exception_store *store)
784{
785	struct pstore *ps = get_info(store);
786
787	ps->valid = 0;
788	if (write_header(ps))
789		DMWARN("write header failed");
790}
791
792static int persistent_ctr(struct dm_exception_store *store,
793			  unsigned argc, char **argv)
794{
795	struct pstore *ps;
796
797	/* allocate the pstore */
798	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
799	if (!ps)
800		return -ENOMEM;
801
802	ps->store = store;
803	ps->valid = 1;
804	ps->version = SNAPSHOT_DISK_VERSION;
805	ps->area = NULL;
806	ps->zero_area = NULL;
807	ps->header_area = NULL;
808	ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
809	ps->current_committed = 0;
810
811	ps->callback_count = 0;
812	atomic_set(&ps->pending_count, 0);
813	ps->callbacks = NULL;
814
815	ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
816	if (!ps->metadata_wq) {
817		kfree(ps);
818		DMERR("couldn't start header metadata update thread");
819		return -ENOMEM;
820	}
821
822	store->context = ps;
823
824	return 0;
825}
826
827static unsigned persistent_status(struct dm_exception_store *store,
828				  status_type_t status, char *result,
829				  unsigned maxlen)
830{
831	unsigned sz = 0;
832
833	switch (status) {
834	case STATUSTYPE_INFO:
835		break;
836	case STATUSTYPE_TABLE:
837		DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
838	}
839
840	return sz;
841}
842
843static struct dm_exception_store_type _persistent_type = {
844	.name = "persistent",
845	.module = THIS_MODULE,
846	.ctr = persistent_ctr,
847	.dtr = persistent_dtr,
848	.read_metadata = persistent_read_metadata,
849	.prepare_exception = persistent_prepare_exception,
850	.commit_exception = persistent_commit_exception,
851	.prepare_merge = persistent_prepare_merge,
852	.commit_merge = persistent_commit_merge,
853	.drop_snapshot = persistent_drop_snapshot,
854	.usage = persistent_usage,
855	.status = persistent_status,
856};
857
858static struct dm_exception_store_type _persistent_compat_type = {
859	.name = "P",
860	.module = THIS_MODULE,
861	.ctr = persistent_ctr,
862	.dtr = persistent_dtr,
863	.read_metadata = persistent_read_metadata,
864	.prepare_exception = persistent_prepare_exception,
865	.commit_exception = persistent_commit_exception,
866	.prepare_merge = persistent_prepare_merge,
867	.commit_merge = persistent_commit_merge,
868	.drop_snapshot = persistent_drop_snapshot,
869	.usage = persistent_usage,
870	.status = persistent_status,
871};
872
873int dm_persistent_snapshot_init(void)
874{
875	int r;
876
877	r = dm_exception_store_type_register(&_persistent_type);
878	if (r) {
879		DMERR("Unable to register persistent exception store type");
880		return r;
881	}
882
883	r = dm_exception_store_type_register(&_persistent_compat_type);
884	if (r) {
885		DMERR("Unable to register old-style persistent exception "
886		      "store type");
887		dm_exception_store_type_unregister(&_persistent_type);
888		return r;
889	}
890
891	return r;
892}
893
894void dm_persistent_snapshot_exit(void)
895{
896	dm_exception_store_type_unregister(&_persistent_type);
897	dm_exception_store_type_unregister(&_persistent_compat_type);
898}
v3.1
  1/*
  2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  3 * Copyright (C) 2006-2008 Red Hat GmbH
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include "dm-exception-store.h"
  9
 10#include <linux/mm.h>
 11#include <linux/pagemap.h>
 12#include <linux/vmalloc.h>
 
 13#include <linux/slab.h>
 14#include <linux/dm-io.h>
 15
 16#define DM_MSG_PREFIX "persistent snapshot"
 17#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32	/* 16KB */
 18
 19/*-----------------------------------------------------------------
 20 * Persistent snapshots, by persistent we mean that the snapshot
 21 * will survive a reboot.
 22 *---------------------------------------------------------------*/
 23
 24/*
 25 * We need to store a record of which parts of the origin have
 26 * been copied to the snapshot device.  The snapshot code
 27 * requires that we copy exception chunks to chunk aligned areas
 28 * of the COW store.  It makes sense therefore, to store the
 29 * metadata in chunk size blocks.
 30 *
 31 * There is no backward or forward compatibility implemented,
 32 * snapshots with different disk versions than the kernel will
 33 * not be usable.  It is expected that "lvcreate" will blank out
 34 * the start of a fresh COW device before calling the snapshot
 35 * constructor.
 36 *
 37 * The first chunk of the COW device just contains the header.
 38 * After this there is a chunk filled with exception metadata,
 39 * followed by as many exception chunks as can fit in the
 40 * metadata areas.
 41 *
 42 * All on disk structures are in little-endian format.  The end
 43 * of the exceptions info is indicated by an exception with a
 44 * new_chunk of 0, which is invalid since it would point to the
 45 * header chunk.
 46 */
 47
 48/*
 49 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
 50 */
 51#define SNAP_MAGIC 0x70416e53
 52
 53/*
 54 * The on-disk version of the metadata.
 55 */
 56#define SNAPSHOT_DISK_VERSION 1
 57
 58#define NUM_SNAPSHOT_HDR_CHUNKS 1
 59
 60struct disk_header {
 61	__le32 magic;
 62
 63	/*
 64	 * Is this snapshot valid.  There is no way of recovering
 65	 * an invalid snapshot.
 66	 */
 67	__le32 valid;
 68
 69	/*
 70	 * Simple, incrementing version. no backward
 71	 * compatibility.
 72	 */
 73	__le32 version;
 74
 75	/* In sectors */
 76	__le32 chunk_size;
 77} __packed;
 78
 79struct disk_exception {
 80	__le64 old_chunk;
 81	__le64 new_chunk;
 82} __packed;
 83
 84struct core_exception {
 85	uint64_t old_chunk;
 86	uint64_t new_chunk;
 87};
 88
 89struct commit_callback {
 90	void (*callback)(void *, int success);
 91	void *context;
 92};
 93
 94/*
 95 * The top level structure for a persistent exception store.
 96 */
 97struct pstore {
 98	struct dm_exception_store *store;
 99	int version;
100	int valid;
101	uint32_t exceptions_per_area;
102
103	/*
104	 * Now that we have an asynchronous kcopyd there is no
105	 * need for large chunk sizes, so it wont hurt to have a
106	 * whole chunks worth of metadata in memory at once.
107	 */
108	void *area;
109
110	/*
111	 * An area of zeros used to clear the next area.
112	 */
113	void *zero_area;
114
115	/*
116	 * An area used for header. The header can be written
117	 * concurrently with metadata (when invalidating the snapshot),
118	 * so it needs a separate buffer.
119	 */
120	void *header_area;
121
122	/*
123	 * Used to keep track of which metadata area the data in
124	 * 'chunk' refers to.
125	 */
126	chunk_t current_area;
127
128	/*
129	 * The next free chunk for an exception.
130	 *
131	 * When creating exceptions, all the chunks here and above are
132	 * free.  It holds the next chunk to be allocated.  On rare
133	 * occasions (e.g. after a system crash) holes can be left in
134	 * the exception store because chunks can be committed out of
135	 * order.
136	 *
137	 * When merging exceptions, it does not necessarily mean all the
138	 * chunks here and above are free.  It holds the value it would
139	 * have held if all chunks had been committed in order of
140	 * allocation.  Consequently the value may occasionally be
141	 * slightly too low, but since it's only used for 'status' and
142	 * it can never reach its minimum value too early this doesn't
143	 * matter.
144	 */
145
146	chunk_t next_free;
147
148	/*
149	 * The index of next free exception in the current
150	 * metadata area.
151	 */
152	uint32_t current_committed;
153
154	atomic_t pending_count;
155	uint32_t callback_count;
156	struct commit_callback *callbacks;
157	struct dm_io_client *io_client;
158
159	struct workqueue_struct *metadata_wq;
160};
161
162static int alloc_area(struct pstore *ps)
163{
164	int r = -ENOMEM;
165	size_t len;
166
167	len = ps->store->chunk_size << SECTOR_SHIFT;
168
169	/*
170	 * Allocate the chunk_size block of memory that will hold
171	 * a single metadata area.
172	 */
173	ps->area = vmalloc(len);
174	if (!ps->area)
175		goto err_area;
176
177	ps->zero_area = vzalloc(len);
178	if (!ps->zero_area)
179		goto err_zero_area;
180
181	ps->header_area = vmalloc(len);
182	if (!ps->header_area)
183		goto err_header_area;
184
185	return 0;
186
187err_header_area:
188	vfree(ps->zero_area);
189
190err_zero_area:
191	vfree(ps->area);
192
193err_area:
194	return r;
195}
196
197static void free_area(struct pstore *ps)
198{
199	if (ps->area)
200		vfree(ps->area);
201	ps->area = NULL;
202
203	if (ps->zero_area)
204		vfree(ps->zero_area);
205	ps->zero_area = NULL;
206
207	if (ps->header_area)
208		vfree(ps->header_area);
209	ps->header_area = NULL;
210}
211
212struct mdata_req {
213	struct dm_io_region *where;
214	struct dm_io_request *io_req;
215	struct work_struct work;
216	int result;
217};
218
219static void do_metadata(struct work_struct *work)
220{
221	struct mdata_req *req = container_of(work, struct mdata_req, work);
222
223	req->result = dm_io(req->io_req, 1, req->where, NULL);
224}
225
226/*
227 * Read or write a chunk aligned and sized block of data from a device.
228 */
229static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
230		    int metadata)
231{
232	struct dm_io_region where = {
233		.bdev = dm_snap_cow(ps->store->snap)->bdev,
234		.sector = ps->store->chunk_size * chunk,
235		.count = ps->store->chunk_size,
236	};
237	struct dm_io_request io_req = {
238		.bi_rw = rw,
239		.mem.type = DM_IO_VMA,
240		.mem.ptr.vma = area,
241		.client = ps->io_client,
242		.notify.fn = NULL,
243	};
244	struct mdata_req req;
245
246	if (!metadata)
247		return dm_io(&io_req, 1, &where, NULL);
248
249	req.where = &where;
250	req.io_req = &io_req;
251
252	/*
253	 * Issue the synchronous I/O from a different thread
254	 * to avoid generic_make_request recursion.
255	 */
256	INIT_WORK_ONSTACK(&req.work, do_metadata);
257	queue_work(ps->metadata_wq, &req.work);
258	flush_work(&req.work);
259
260	return req.result;
261}
262
263/*
264 * Convert a metadata area index to a chunk index.
265 */
266static chunk_t area_location(struct pstore *ps, chunk_t area)
267{
268	return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
269}
270
271/*
272 * Read or write a metadata area.  Remembering to skip the first
273 * chunk which holds the header.
274 */
275static int area_io(struct pstore *ps, int rw)
276{
277	int r;
278	chunk_t chunk;
279
280	chunk = area_location(ps, ps->current_area);
281
282	r = chunk_io(ps, ps->area, chunk, rw, 0);
283	if (r)
284		return r;
285
286	return 0;
287}
288
289static void zero_memory_area(struct pstore *ps)
290{
291	memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
292}
293
294static int zero_disk_area(struct pstore *ps, chunk_t area)
295{
296	return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
297}
298
299static int read_header(struct pstore *ps, int *new_snapshot)
300{
301	int r;
302	struct disk_header *dh;
303	unsigned chunk_size;
304	int chunk_size_supplied = 1;
305	char *chunk_err;
306
307	/*
308	 * Use default chunk size (or logical_block_size, if larger)
309	 * if none supplied
310	 */
311	if (!ps->store->chunk_size) {
312		ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
313		    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
314					    bdev) >> 9);
315		ps->store->chunk_mask = ps->store->chunk_size - 1;
316		ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
317		chunk_size_supplied = 0;
318	}
319
320	ps->io_client = dm_io_client_create();
321	if (IS_ERR(ps->io_client))
322		return PTR_ERR(ps->io_client);
323
324	r = alloc_area(ps);
325	if (r)
326		return r;
327
328	r = chunk_io(ps, ps->header_area, 0, READ, 1);
329	if (r)
330		goto bad;
331
332	dh = ps->header_area;
333
334	if (le32_to_cpu(dh->magic) == 0) {
335		*new_snapshot = 1;
336		return 0;
337	}
338
339	if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
340		DMWARN("Invalid or corrupt snapshot");
341		r = -ENXIO;
342		goto bad;
343	}
344
345	*new_snapshot = 0;
346	ps->valid = le32_to_cpu(dh->valid);
347	ps->version = le32_to_cpu(dh->version);
348	chunk_size = le32_to_cpu(dh->chunk_size);
349
350	if (ps->store->chunk_size == chunk_size)
351		return 0;
352
353	if (chunk_size_supplied)
354		DMWARN("chunk size %u in device metadata overrides "
355		       "table chunk size of %u.",
356		       chunk_size, ps->store->chunk_size);
357
358	/* We had a bogus chunk_size. Fix stuff up. */
359	free_area(ps);
360
361	r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
362					      &chunk_err);
363	if (r) {
364		DMERR("invalid on-disk chunk size %u: %s.",
365		      chunk_size, chunk_err);
366		return r;
367	}
368
369	r = alloc_area(ps);
370	return r;
371
372bad:
373	free_area(ps);
374	return r;
375}
376
377static int write_header(struct pstore *ps)
378{
379	struct disk_header *dh;
380
381	memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
382
383	dh = ps->header_area;
384	dh->magic = cpu_to_le32(SNAP_MAGIC);
385	dh->valid = cpu_to_le32(ps->valid);
386	dh->version = cpu_to_le32(ps->version);
387	dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
388
389	return chunk_io(ps, ps->header_area, 0, WRITE, 1);
390}
391
392/*
393 * Access functions for the disk exceptions, these do the endian conversions.
394 */
395static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
396{
397	BUG_ON(index >= ps->exceptions_per_area);
398
399	return ((struct disk_exception *) ps->area) + index;
400}
401
402static void read_exception(struct pstore *ps,
403			   uint32_t index, struct core_exception *result)
404{
405	struct disk_exception *de = get_exception(ps, index);
406
407	/* copy it */
408	result->old_chunk = le64_to_cpu(de->old_chunk);
409	result->new_chunk = le64_to_cpu(de->new_chunk);
410}
411
412static void write_exception(struct pstore *ps,
413			    uint32_t index, struct core_exception *e)
414{
415	struct disk_exception *de = get_exception(ps, index);
416
417	/* copy it */
418	de->old_chunk = cpu_to_le64(e->old_chunk);
419	de->new_chunk = cpu_to_le64(e->new_chunk);
420}
421
422static void clear_exception(struct pstore *ps, uint32_t index)
423{
424	struct disk_exception *de = get_exception(ps, index);
425
426	/* clear it */
427	de->old_chunk = 0;
428	de->new_chunk = 0;
429}
430
431/*
432 * Registers the exceptions that are present in the current area.
433 * 'full' is filled in to indicate if the area has been
434 * filled.
435 */
436static int insert_exceptions(struct pstore *ps,
437			     int (*callback)(void *callback_context,
438					     chunk_t old, chunk_t new),
439			     void *callback_context,
440			     int *full)
441{
442	int r;
443	unsigned int i;
444	struct core_exception e;
445
446	/* presume the area is full */
447	*full = 1;
448
449	for (i = 0; i < ps->exceptions_per_area; i++) {
450		read_exception(ps, i, &e);
451
452		/*
453		 * If the new_chunk is pointing at the start of
454		 * the COW device, where the first metadata area
455		 * is we know that we've hit the end of the
456		 * exceptions.  Therefore the area is not full.
457		 */
458		if (e.new_chunk == 0LL) {
459			ps->current_committed = i;
460			*full = 0;
461			break;
462		}
463
464		/*
465		 * Keep track of the start of the free chunks.
466		 */
467		if (ps->next_free <= e.new_chunk)
468			ps->next_free = e.new_chunk + 1;
469
470		/*
471		 * Otherwise we add the exception to the snapshot.
472		 */
473		r = callback(callback_context, e.old_chunk, e.new_chunk);
474		if (r)
475			return r;
476	}
477
478	return 0;
479}
480
481static int read_exceptions(struct pstore *ps,
482			   int (*callback)(void *callback_context, chunk_t old,
483					   chunk_t new),
484			   void *callback_context)
485{
486	int r, full = 1;
487
488	/*
489	 * Keeping reading chunks and inserting exceptions until
490	 * we find a partially full area.
491	 */
492	for (ps->current_area = 0; full; ps->current_area++) {
493		r = area_io(ps, READ);
494		if (r)
495			return r;
496
497		r = insert_exceptions(ps, callback, callback_context, &full);
498		if (r)
499			return r;
500	}
501
502	ps->current_area--;
503
504	return 0;
505}
506
507static struct pstore *get_info(struct dm_exception_store *store)
508{
509	return (struct pstore *) store->context;
510}
511
512static void persistent_usage(struct dm_exception_store *store,
513			     sector_t *total_sectors,
514			     sector_t *sectors_allocated,
515			     sector_t *metadata_sectors)
516{
517	struct pstore *ps = get_info(store);
518
519	*sectors_allocated = ps->next_free * store->chunk_size;
520	*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
521
522	/*
523	 * First chunk is the fixed header.
524	 * Then there are (ps->current_area + 1) metadata chunks, each one
525	 * separated from the next by ps->exceptions_per_area data chunks.
526	 */
527	*metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
528			    store->chunk_size;
529}
530
531static void persistent_dtr(struct dm_exception_store *store)
532{
533	struct pstore *ps = get_info(store);
534
535	destroy_workqueue(ps->metadata_wq);
536
537	/* Created in read_header */
538	if (ps->io_client)
539		dm_io_client_destroy(ps->io_client);
540	free_area(ps);
541
542	/* Allocated in persistent_read_metadata */
543	if (ps->callbacks)
544		vfree(ps->callbacks);
545
546	kfree(ps);
547}
548
549static int persistent_read_metadata(struct dm_exception_store *store,
550				    int (*callback)(void *callback_context,
551						    chunk_t old, chunk_t new),
552				    void *callback_context)
553{
554	int r, uninitialized_var(new_snapshot);
555	struct pstore *ps = get_info(store);
556
557	/*
558	 * Read the snapshot header.
559	 */
560	r = read_header(ps, &new_snapshot);
561	if (r)
562		return r;
563
564	/*
565	 * Now we know correct chunk_size, complete the initialisation.
566	 */
567	ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
568				  sizeof(struct disk_exception);
569	ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
570				   sizeof(*ps->callbacks));
571	if (!ps->callbacks)
572		return -ENOMEM;
573
574	/*
575	 * Do we need to setup a new snapshot ?
576	 */
577	if (new_snapshot) {
578		r = write_header(ps);
579		if (r) {
580			DMWARN("write_header failed");
581			return r;
582		}
583
584		ps->current_area = 0;
585		zero_memory_area(ps);
586		r = zero_disk_area(ps, 0);
587		if (r)
588			DMWARN("zero_disk_area(0) failed");
589		return r;
590	}
591	/*
592	 * Sanity checks.
593	 */
594	if (ps->version != SNAPSHOT_DISK_VERSION) {
595		DMWARN("unable to handle snapshot disk version %d",
596		       ps->version);
597		return -EINVAL;
598	}
599
600	/*
601	 * Metadata are valid, but snapshot is invalidated
602	 */
603	if (!ps->valid)
604		return 1;
605
606	/*
607	 * Read the metadata.
608	 */
609	r = read_exceptions(ps, callback, callback_context);
610
611	return r;
612}
613
614static int persistent_prepare_exception(struct dm_exception_store *store,
615					struct dm_exception *e)
616{
617	struct pstore *ps = get_info(store);
618	uint32_t stride;
619	chunk_t next_free;
620	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
621
622	/* Is there enough room ? */
623	if (size < ((ps->next_free + 1) * store->chunk_size))
624		return -ENOSPC;
625
626	e->new_chunk = ps->next_free;
627
628	/*
629	 * Move onto the next free pending, making sure to take
630	 * into account the location of the metadata chunks.
631	 */
632	stride = (ps->exceptions_per_area + 1);
633	next_free = ++ps->next_free;
634	if (sector_div(next_free, stride) == 1)
635		ps->next_free++;
636
637	atomic_inc(&ps->pending_count);
638	return 0;
639}
640
641static void persistent_commit_exception(struct dm_exception_store *store,
642					struct dm_exception *e,
643					void (*callback) (void *, int success),
644					void *callback_context)
645{
646	unsigned int i;
647	struct pstore *ps = get_info(store);
648	struct core_exception ce;
649	struct commit_callback *cb;
650
651	ce.old_chunk = e->old_chunk;
652	ce.new_chunk = e->new_chunk;
653	write_exception(ps, ps->current_committed++, &ce);
654
655	/*
656	 * Add the callback to the back of the array.  This code
657	 * is the only place where the callback array is
658	 * manipulated, and we know that it will never be called
659	 * multiple times concurrently.
660	 */
661	cb = ps->callbacks + ps->callback_count++;
662	cb->callback = callback;
663	cb->context = callback_context;
664
665	/*
666	 * If there are exceptions in flight and we have not yet
667	 * filled this metadata area there's nothing more to do.
668	 */
669	if (!atomic_dec_and_test(&ps->pending_count) &&
670	    (ps->current_committed != ps->exceptions_per_area))
671		return;
672
673	/*
674	 * If we completely filled the current area, then wipe the next one.
675	 */
676	if ((ps->current_committed == ps->exceptions_per_area) &&
677	    zero_disk_area(ps, ps->current_area + 1))
678		ps->valid = 0;
679
680	/*
681	 * Commit exceptions to disk.
682	 */
683	if (ps->valid && area_io(ps, WRITE_FLUSH_FUA))
684		ps->valid = 0;
685
686	/*
687	 * Advance to the next area if this one is full.
688	 */
689	if (ps->current_committed == ps->exceptions_per_area) {
690		ps->current_committed = 0;
691		ps->current_area++;
692		zero_memory_area(ps);
693	}
694
695	for (i = 0; i < ps->callback_count; i++) {
696		cb = ps->callbacks + i;
697		cb->callback(cb->context, ps->valid);
698	}
699
700	ps->callback_count = 0;
701}
702
703static int persistent_prepare_merge(struct dm_exception_store *store,
704				    chunk_t *last_old_chunk,
705				    chunk_t *last_new_chunk)
706{
707	struct pstore *ps = get_info(store);
708	struct core_exception ce;
709	int nr_consecutive;
710	int r;
711
712	/*
713	 * When current area is empty, move back to preceding area.
714	 */
715	if (!ps->current_committed) {
716		/*
717		 * Have we finished?
718		 */
719		if (!ps->current_area)
720			return 0;
721
722		ps->current_area--;
723		r = area_io(ps, READ);
724		if (r < 0)
725			return r;
726		ps->current_committed = ps->exceptions_per_area;
727	}
728
729	read_exception(ps, ps->current_committed - 1, &ce);
730	*last_old_chunk = ce.old_chunk;
731	*last_new_chunk = ce.new_chunk;
732
733	/*
734	 * Find number of consecutive chunks within the current area,
735	 * working backwards.
736	 */
737	for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
738	     nr_consecutive++) {
739		read_exception(ps, ps->current_committed - 1 - nr_consecutive,
740			       &ce);
741		if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
742		    ce.new_chunk != *last_new_chunk - nr_consecutive)
743			break;
744	}
745
746	return nr_consecutive;
747}
748
749static int persistent_commit_merge(struct dm_exception_store *store,
750				   int nr_merged)
751{
752	int r, i;
753	struct pstore *ps = get_info(store);
754
755	BUG_ON(nr_merged > ps->current_committed);
756
757	for (i = 0; i < nr_merged; i++)
758		clear_exception(ps, ps->current_committed - 1 - i);
759
760	r = area_io(ps, WRITE_FLUSH_FUA);
761	if (r < 0)
762		return r;
763
764	ps->current_committed -= nr_merged;
765
766	/*
767	 * At this stage, only persistent_usage() uses ps->next_free, so
768	 * we make no attempt to keep ps->next_free strictly accurate
769	 * as exceptions may have been committed out-of-order originally.
770	 * Once a snapshot has become merging, we set it to the value it
771	 * would have held had all the exceptions been committed in order.
772	 *
773	 * ps->current_area does not get reduced by prepare_merge() until
774	 * after commit_merge() has removed the nr_merged previous exceptions.
775	 */
776	ps->next_free = area_location(ps, ps->current_area) +
777			ps->current_committed + 1;
778
779	return 0;
780}
781
782static void persistent_drop_snapshot(struct dm_exception_store *store)
783{
784	struct pstore *ps = get_info(store);
785
786	ps->valid = 0;
787	if (write_header(ps))
788		DMWARN("write header failed");
789}
790
791static int persistent_ctr(struct dm_exception_store *store,
792			  unsigned argc, char **argv)
793{
794	struct pstore *ps;
795
796	/* allocate the pstore */
797	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
798	if (!ps)
799		return -ENOMEM;
800
801	ps->store = store;
802	ps->valid = 1;
803	ps->version = SNAPSHOT_DISK_VERSION;
804	ps->area = NULL;
805	ps->zero_area = NULL;
806	ps->header_area = NULL;
807	ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
808	ps->current_committed = 0;
809
810	ps->callback_count = 0;
811	atomic_set(&ps->pending_count, 0);
812	ps->callbacks = NULL;
813
814	ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
815	if (!ps->metadata_wq) {
816		kfree(ps);
817		DMERR("couldn't start header metadata update thread");
818		return -ENOMEM;
819	}
820
821	store->context = ps;
822
823	return 0;
824}
825
826static unsigned persistent_status(struct dm_exception_store *store,
827				  status_type_t status, char *result,
828				  unsigned maxlen)
829{
830	unsigned sz = 0;
831
832	switch (status) {
833	case STATUSTYPE_INFO:
834		break;
835	case STATUSTYPE_TABLE:
836		DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
837	}
838
839	return sz;
840}
841
842static struct dm_exception_store_type _persistent_type = {
843	.name = "persistent",
844	.module = THIS_MODULE,
845	.ctr = persistent_ctr,
846	.dtr = persistent_dtr,
847	.read_metadata = persistent_read_metadata,
848	.prepare_exception = persistent_prepare_exception,
849	.commit_exception = persistent_commit_exception,
850	.prepare_merge = persistent_prepare_merge,
851	.commit_merge = persistent_commit_merge,
852	.drop_snapshot = persistent_drop_snapshot,
853	.usage = persistent_usage,
854	.status = persistent_status,
855};
856
857static struct dm_exception_store_type _persistent_compat_type = {
858	.name = "P",
859	.module = THIS_MODULE,
860	.ctr = persistent_ctr,
861	.dtr = persistent_dtr,
862	.read_metadata = persistent_read_metadata,
863	.prepare_exception = persistent_prepare_exception,
864	.commit_exception = persistent_commit_exception,
865	.prepare_merge = persistent_prepare_merge,
866	.commit_merge = persistent_commit_merge,
867	.drop_snapshot = persistent_drop_snapshot,
868	.usage = persistent_usage,
869	.status = persistent_status,
870};
871
872int dm_persistent_snapshot_init(void)
873{
874	int r;
875
876	r = dm_exception_store_type_register(&_persistent_type);
877	if (r) {
878		DMERR("Unable to register persistent exception store type");
879		return r;
880	}
881
882	r = dm_exception_store_type_register(&_persistent_compat_type);
883	if (r) {
884		DMERR("Unable to register old-style persistent exception "
885		      "store type");
886		dm_exception_store_type_unregister(&_persistent_type);
887		return r;
888	}
889
890	return r;
891}
892
893void dm_persistent_snapshot_exit(void)
894{
895	dm_exception_store_type_unregister(&_persistent_type);
896	dm_exception_store_type_unregister(&_persistent_compat_type);
897}