Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  3 * Copyright (C) 2006-2008 Red Hat GmbH
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include "dm-exception-store.h"
  9
 
 10#include <linux/mm.h>
 11#include <linux/pagemap.h>
 12#include <linux/vmalloc.h>
 13#include <linux/export.h>
 14#include <linux/slab.h>
 15#include <linux/dm-io.h>
 
 16
 17#define DM_MSG_PREFIX "persistent snapshot"
 18#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32	/* 16KB */
 
 
 19
 20/*-----------------------------------------------------------------
 21 * Persistent snapshots, by persistent we mean that the snapshot
 22 * will survive a reboot.
 23 *---------------------------------------------------------------*/
 24
 25/*
 26 * We need to store a record of which parts of the origin have
 27 * been copied to the snapshot device.  The snapshot code
 28 * requires that we copy exception chunks to chunk aligned areas
 29 * of the COW store.  It makes sense therefore, to store the
 30 * metadata in chunk size blocks.
 31 *
 32 * There is no backward or forward compatibility implemented,
 33 * snapshots with different disk versions than the kernel will
 34 * not be usable.  It is expected that "lvcreate" will blank out
 35 * the start of a fresh COW device before calling the snapshot
 36 * constructor.
 37 *
 38 * The first chunk of the COW device just contains the header.
 39 * After this there is a chunk filled with exception metadata,
 40 * followed by as many exception chunks as can fit in the
 41 * metadata areas.
 42 *
 43 * All on disk structures are in little-endian format.  The end
 44 * of the exceptions info is indicated by an exception with a
 45 * new_chunk of 0, which is invalid since it would point to the
 46 * header chunk.
 47 */
 48
 49/*
 50 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
 51 */
 52#define SNAP_MAGIC 0x70416e53
 53
 54/*
 55 * The on-disk version of the metadata.
 56 */
 57#define SNAPSHOT_DISK_VERSION 1
 58
 59#define NUM_SNAPSHOT_HDR_CHUNKS 1
 60
 61struct disk_header {
 62	__le32 magic;
 63
 64	/*
 65	 * Is this snapshot valid.  There is no way of recovering
 66	 * an invalid snapshot.
 67	 */
 68	__le32 valid;
 69
 70	/*
 71	 * Simple, incrementing version. no backward
 72	 * compatibility.
 73	 */
 74	__le32 version;
 75
 76	/* In sectors */
 77	__le32 chunk_size;
 78} __packed;
 79
 80struct disk_exception {
 81	__le64 old_chunk;
 82	__le64 new_chunk;
 83} __packed;
 84
 85struct core_exception {
 86	uint64_t old_chunk;
 87	uint64_t new_chunk;
 88};
 89
 90struct commit_callback {
 91	void (*callback)(void *, int success);
 92	void *context;
 93};
 94
 95/*
 96 * The top level structure for a persistent exception store.
 97 */
 98struct pstore {
 99	struct dm_exception_store *store;
100	int version;
101	int valid;
102	uint32_t exceptions_per_area;
103
104	/*
105	 * Now that we have an asynchronous kcopyd there is no
106	 * need for large chunk sizes, so it wont hurt to have a
107	 * whole chunks worth of metadata in memory at once.
108	 */
109	void *area;
110
111	/*
112	 * An area of zeros used to clear the next area.
113	 */
114	void *zero_area;
115
116	/*
117	 * An area used for header. The header can be written
118	 * concurrently with metadata (when invalidating the snapshot),
119	 * so it needs a separate buffer.
120	 */
121	void *header_area;
122
123	/*
124	 * Used to keep track of which metadata area the data in
125	 * 'chunk' refers to.
126	 */
127	chunk_t current_area;
128
129	/*
130	 * The next free chunk for an exception.
131	 *
132	 * When creating exceptions, all the chunks here and above are
133	 * free.  It holds the next chunk to be allocated.  On rare
134	 * occasions (e.g. after a system crash) holes can be left in
135	 * the exception store because chunks can be committed out of
136	 * order.
137	 *
138	 * When merging exceptions, it does not necessarily mean all the
139	 * chunks here and above are free.  It holds the value it would
140	 * have held if all chunks had been committed in order of
141	 * allocation.  Consequently the value may occasionally be
142	 * slightly too low, but since it's only used for 'status' and
143	 * it can never reach its minimum value too early this doesn't
144	 * matter.
145	 */
146
147	chunk_t next_free;
148
149	/*
150	 * The index of next free exception in the current
151	 * metadata area.
152	 */
153	uint32_t current_committed;
154
155	atomic_t pending_count;
156	uint32_t callback_count;
157	struct commit_callback *callbacks;
158	struct dm_io_client *io_client;
159
160	struct workqueue_struct *metadata_wq;
161};
162
163static int alloc_area(struct pstore *ps)
164{
165	int r = -ENOMEM;
166	size_t len;
167
168	len = ps->store->chunk_size << SECTOR_SHIFT;
169
170	/*
171	 * Allocate the chunk_size block of memory that will hold
172	 * a single metadata area.
173	 */
174	ps->area = vmalloc(len);
175	if (!ps->area)
176		goto err_area;
177
178	ps->zero_area = vzalloc(len);
179	if (!ps->zero_area)
180		goto err_zero_area;
181
182	ps->header_area = vmalloc(len);
183	if (!ps->header_area)
184		goto err_header_area;
185
186	return 0;
187
188err_header_area:
189	vfree(ps->zero_area);
190
191err_zero_area:
192	vfree(ps->area);
193
194err_area:
195	return r;
196}
197
198static void free_area(struct pstore *ps)
199{
200	if (ps->area)
201		vfree(ps->area);
202	ps->area = NULL;
203
204	if (ps->zero_area)
205		vfree(ps->zero_area);
206	ps->zero_area = NULL;
207
208	if (ps->header_area)
209		vfree(ps->header_area);
210	ps->header_area = NULL;
211}
212
213struct mdata_req {
214	struct dm_io_region *where;
215	struct dm_io_request *io_req;
216	struct work_struct work;
217	int result;
218};
219
220static void do_metadata(struct work_struct *work)
221{
222	struct mdata_req *req = container_of(work, struct mdata_req, work);
223
224	req->result = dm_io(req->io_req, 1, req->where, NULL);
225}
226
227/*
228 * Read or write a chunk aligned and sized block of data from a device.
229 */
230static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
231		    int metadata)
232{
233	struct dm_io_region where = {
234		.bdev = dm_snap_cow(ps->store->snap)->bdev,
235		.sector = ps->store->chunk_size * chunk,
236		.count = ps->store->chunk_size,
237	};
238	struct dm_io_request io_req = {
239		.bi_rw = rw,
240		.mem.type = DM_IO_VMA,
241		.mem.ptr.vma = area,
242		.client = ps->io_client,
243		.notify.fn = NULL,
244	};
245	struct mdata_req req;
246
247	if (!metadata)
248		return dm_io(&io_req, 1, &where, NULL);
249
250	req.where = &where;
251	req.io_req = &io_req;
252
253	/*
254	 * Issue the synchronous I/O from a different thread
255	 * to avoid generic_make_request recursion.
256	 */
257	INIT_WORK_ONSTACK(&req.work, do_metadata);
258	queue_work(ps->metadata_wq, &req.work);
259	flush_work(&req.work);
 
260
261	return req.result;
262}
263
264/*
265 * Convert a metadata area index to a chunk index.
266 */
267static chunk_t area_location(struct pstore *ps, chunk_t area)
268{
269	return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
270}
271
 
 
 
 
 
 
 
 
272/*
273 * Read or write a metadata area.  Remembering to skip the first
274 * chunk which holds the header.
275 */
276static int area_io(struct pstore *ps, int rw)
277{
278	int r;
279	chunk_t chunk;
280
281	chunk = area_location(ps, ps->current_area);
282
283	r = chunk_io(ps, ps->area, chunk, rw, 0);
284	if (r)
285		return r;
286
287	return 0;
288}
289
290static void zero_memory_area(struct pstore *ps)
291{
292	memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
293}
294
295static int zero_disk_area(struct pstore *ps, chunk_t area)
296{
297	return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
 
298}
299
300static int read_header(struct pstore *ps, int *new_snapshot)
301{
302	int r;
303	struct disk_header *dh;
304	unsigned chunk_size;
305	int chunk_size_supplied = 1;
306	char *chunk_err;
307
308	/*
309	 * Use default chunk size (or logical_block_size, if larger)
310	 * if none supplied
311	 */
312	if (!ps->store->chunk_size) {
313		ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
314		    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
315					    bdev) >> 9);
316		ps->store->chunk_mask = ps->store->chunk_size - 1;
317		ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
318		chunk_size_supplied = 0;
319	}
320
321	ps->io_client = dm_io_client_create();
322	if (IS_ERR(ps->io_client))
323		return PTR_ERR(ps->io_client);
324
325	r = alloc_area(ps);
326	if (r)
327		return r;
328
329	r = chunk_io(ps, ps->header_area, 0, READ, 1);
330	if (r)
331		goto bad;
332
333	dh = ps->header_area;
334
335	if (le32_to_cpu(dh->magic) == 0) {
336		*new_snapshot = 1;
337		return 0;
338	}
339
340	if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
341		DMWARN("Invalid or corrupt snapshot");
342		r = -ENXIO;
343		goto bad;
344	}
345
346	*new_snapshot = 0;
347	ps->valid = le32_to_cpu(dh->valid);
348	ps->version = le32_to_cpu(dh->version);
349	chunk_size = le32_to_cpu(dh->chunk_size);
350
351	if (ps->store->chunk_size == chunk_size)
352		return 0;
353
354	if (chunk_size_supplied)
355		DMWARN("chunk size %u in device metadata overrides "
356		       "table chunk size of %u.",
357		       chunk_size, ps->store->chunk_size);
358
359	/* We had a bogus chunk_size. Fix stuff up. */
360	free_area(ps);
361
362	r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
363					      &chunk_err);
364	if (r) {
365		DMERR("invalid on-disk chunk size %u: %s.",
366		      chunk_size, chunk_err);
367		return r;
368	}
369
370	r = alloc_area(ps);
371	return r;
372
373bad:
374	free_area(ps);
375	return r;
376}
377
378static int write_header(struct pstore *ps)
379{
380	struct disk_header *dh;
381
382	memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
383
384	dh = ps->header_area;
385	dh->magic = cpu_to_le32(SNAP_MAGIC);
386	dh->valid = cpu_to_le32(ps->valid);
387	dh->version = cpu_to_le32(ps->version);
388	dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
389
390	return chunk_io(ps, ps->header_area, 0, WRITE, 1);
391}
392
393/*
394 * Access functions for the disk exceptions, these do the endian conversions.
395 */
396static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
 
397{
398	BUG_ON(index >= ps->exceptions_per_area);
399
400	return ((struct disk_exception *) ps->area) + index;
401}
402
403static void read_exception(struct pstore *ps,
404			   uint32_t index, struct core_exception *result)
405{
406	struct disk_exception *de = get_exception(ps, index);
407
408	/* copy it */
409	result->old_chunk = le64_to_cpu(de->old_chunk);
410	result->new_chunk = le64_to_cpu(de->new_chunk);
411}
412
413static void write_exception(struct pstore *ps,
414			    uint32_t index, struct core_exception *e)
415{
416	struct disk_exception *de = get_exception(ps, index);
417
418	/* copy it */
419	de->old_chunk = cpu_to_le64(e->old_chunk);
420	de->new_chunk = cpu_to_le64(e->new_chunk);
421}
422
423static void clear_exception(struct pstore *ps, uint32_t index)
424{
425	struct disk_exception *de = get_exception(ps, index);
426
427	/* clear it */
428	de->old_chunk = 0;
429	de->new_chunk = 0;
430}
431
432/*
433 * Registers the exceptions that are present in the current area.
434 * 'full' is filled in to indicate if the area has been
435 * filled.
436 */
437static int insert_exceptions(struct pstore *ps,
438			     int (*callback)(void *callback_context,
439					     chunk_t old, chunk_t new),
440			     void *callback_context,
441			     int *full)
442{
443	int r;
444	unsigned int i;
445	struct core_exception e;
446
447	/* presume the area is full */
448	*full = 1;
449
450	for (i = 0; i < ps->exceptions_per_area; i++) {
451		read_exception(ps, i, &e);
452
453		/*
454		 * If the new_chunk is pointing at the start of
455		 * the COW device, where the first metadata area
456		 * is we know that we've hit the end of the
457		 * exceptions.  Therefore the area is not full.
458		 */
459		if (e.new_chunk == 0LL) {
460			ps->current_committed = i;
461			*full = 0;
462			break;
463		}
464
465		/*
466		 * Keep track of the start of the free chunks.
467		 */
468		if (ps->next_free <= e.new_chunk)
469			ps->next_free = e.new_chunk + 1;
470
471		/*
472		 * Otherwise we add the exception to the snapshot.
473		 */
474		r = callback(callback_context, e.old_chunk, e.new_chunk);
475		if (r)
476			return r;
477	}
478
479	return 0;
480}
481
482static int read_exceptions(struct pstore *ps,
483			   int (*callback)(void *callback_context, chunk_t old,
484					   chunk_t new),
485			   void *callback_context)
486{
487	int r, full = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
488
489	/*
490	 * Keeping reading chunks and inserting exceptions until
491	 * we find a partially full area.
492	 */
493	for (ps->current_area = 0; full; ps->current_area++) {
494		r = area_io(ps, READ);
495		if (r)
496			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
497
498		r = insert_exceptions(ps, callback, callback_context, &full);
499		if (r)
500			return r;
 
 
 
 
 
 
 
 
 
501	}
502
503	ps->current_area--;
504
505	return 0;
 
 
 
 
 
 
 
506}
507
508static struct pstore *get_info(struct dm_exception_store *store)
509{
510	return (struct pstore *) store->context;
511}
512
513static void persistent_usage(struct dm_exception_store *store,
514			     sector_t *total_sectors,
515			     sector_t *sectors_allocated,
516			     sector_t *metadata_sectors)
517{
518	struct pstore *ps = get_info(store);
519
520	*sectors_allocated = ps->next_free * store->chunk_size;
521	*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
522
523	/*
524	 * First chunk is the fixed header.
525	 * Then there are (ps->current_area + 1) metadata chunks, each one
526	 * separated from the next by ps->exceptions_per_area data chunks.
527	 */
528	*metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
529			    store->chunk_size;
530}
531
532static void persistent_dtr(struct dm_exception_store *store)
533{
534	struct pstore *ps = get_info(store);
535
536	destroy_workqueue(ps->metadata_wq);
537
538	/* Created in read_header */
539	if (ps->io_client)
540		dm_io_client_destroy(ps->io_client);
541	free_area(ps);
542
543	/* Allocated in persistent_read_metadata */
544	if (ps->callbacks)
545		vfree(ps->callbacks);
546
547	kfree(ps);
548}
549
550static int persistent_read_metadata(struct dm_exception_store *store,
551				    int (*callback)(void *callback_context,
552						    chunk_t old, chunk_t new),
553				    void *callback_context)
554{
555	int r, uninitialized_var(new_snapshot);
556	struct pstore *ps = get_info(store);
557
558	/*
559	 * Read the snapshot header.
560	 */
561	r = read_header(ps, &new_snapshot);
562	if (r)
563		return r;
564
565	/*
566	 * Now we know correct chunk_size, complete the initialisation.
567	 */
568	ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
569				  sizeof(struct disk_exception);
570	ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
571				   sizeof(*ps->callbacks));
572	if (!ps->callbacks)
573		return -ENOMEM;
574
575	/*
576	 * Do we need to setup a new snapshot ?
577	 */
578	if (new_snapshot) {
579		r = write_header(ps);
580		if (r) {
581			DMWARN("write_header failed");
582			return r;
583		}
584
585		ps->current_area = 0;
586		zero_memory_area(ps);
587		r = zero_disk_area(ps, 0);
588		if (r)
589			DMWARN("zero_disk_area(0) failed");
590		return r;
591	}
592	/*
593	 * Sanity checks.
594	 */
595	if (ps->version != SNAPSHOT_DISK_VERSION) {
596		DMWARN("unable to handle snapshot disk version %d",
597		       ps->version);
598		return -EINVAL;
599	}
600
601	/*
602	 * Metadata are valid, but snapshot is invalidated
603	 */
604	if (!ps->valid)
605		return 1;
606
607	/*
608	 * Read the metadata.
609	 */
610	r = read_exceptions(ps, callback, callback_context);
611
612	return r;
613}
614
615static int persistent_prepare_exception(struct dm_exception_store *store,
616					struct dm_exception *e)
617{
618	struct pstore *ps = get_info(store);
619	uint32_t stride;
620	chunk_t next_free;
621	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
622
623	/* Is there enough room ? */
624	if (size < ((ps->next_free + 1) * store->chunk_size))
625		return -ENOSPC;
626
627	e->new_chunk = ps->next_free;
628
629	/*
630	 * Move onto the next free pending, making sure to take
631	 * into account the location of the metadata chunks.
632	 */
633	stride = (ps->exceptions_per_area + 1);
634	next_free = ++ps->next_free;
635	if (sector_div(next_free, stride) == 1)
636		ps->next_free++;
637
638	atomic_inc(&ps->pending_count);
639	return 0;
640}
641
642static void persistent_commit_exception(struct dm_exception_store *store,
643					struct dm_exception *e,
644					void (*callback) (void *, int success),
645					void *callback_context)
646{
647	unsigned int i;
648	struct pstore *ps = get_info(store);
649	struct core_exception ce;
650	struct commit_callback *cb;
651
 
 
 
652	ce.old_chunk = e->old_chunk;
653	ce.new_chunk = e->new_chunk;
654	write_exception(ps, ps->current_committed++, &ce);
655
656	/*
657	 * Add the callback to the back of the array.  This code
658	 * is the only place where the callback array is
659	 * manipulated, and we know that it will never be called
660	 * multiple times concurrently.
661	 */
662	cb = ps->callbacks + ps->callback_count++;
663	cb->callback = callback;
664	cb->context = callback_context;
665
666	/*
667	 * If there are exceptions in flight and we have not yet
668	 * filled this metadata area there's nothing more to do.
669	 */
670	if (!atomic_dec_and_test(&ps->pending_count) &&
671	    (ps->current_committed != ps->exceptions_per_area))
672		return;
673
674	/*
675	 * If we completely filled the current area, then wipe the next one.
676	 */
677	if ((ps->current_committed == ps->exceptions_per_area) &&
678	    zero_disk_area(ps, ps->current_area + 1))
679		ps->valid = 0;
680
681	/*
682	 * Commit exceptions to disk.
683	 */
684	if (ps->valid && area_io(ps, WRITE_FLUSH_FUA))
 
685		ps->valid = 0;
686
687	/*
688	 * Advance to the next area if this one is full.
689	 */
690	if (ps->current_committed == ps->exceptions_per_area) {
691		ps->current_committed = 0;
692		ps->current_area++;
693		zero_memory_area(ps);
694	}
695
696	for (i = 0; i < ps->callback_count; i++) {
697		cb = ps->callbacks + i;
698		cb->callback(cb->context, ps->valid);
699	}
700
701	ps->callback_count = 0;
702}
703
704static int persistent_prepare_merge(struct dm_exception_store *store,
705				    chunk_t *last_old_chunk,
706				    chunk_t *last_new_chunk)
707{
708	struct pstore *ps = get_info(store);
709	struct core_exception ce;
710	int nr_consecutive;
711	int r;
712
713	/*
714	 * When current area is empty, move back to preceding area.
715	 */
716	if (!ps->current_committed) {
717		/*
718		 * Have we finished?
719		 */
720		if (!ps->current_area)
721			return 0;
722
723		ps->current_area--;
724		r = area_io(ps, READ);
725		if (r < 0)
726			return r;
727		ps->current_committed = ps->exceptions_per_area;
728	}
729
730	read_exception(ps, ps->current_committed - 1, &ce);
731	*last_old_chunk = ce.old_chunk;
732	*last_new_chunk = ce.new_chunk;
733
734	/*
735	 * Find number of consecutive chunks within the current area,
736	 * working backwards.
737	 */
738	for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
739	     nr_consecutive++) {
740		read_exception(ps, ps->current_committed - 1 - nr_consecutive,
741			       &ce);
742		if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
743		    ce.new_chunk != *last_new_chunk - nr_consecutive)
744			break;
745	}
746
747	return nr_consecutive;
748}
749
750static int persistent_commit_merge(struct dm_exception_store *store,
751				   int nr_merged)
752{
753	int r, i;
754	struct pstore *ps = get_info(store);
755
756	BUG_ON(nr_merged > ps->current_committed);
757
758	for (i = 0; i < nr_merged; i++)
759		clear_exception(ps, ps->current_committed - 1 - i);
760
761	r = area_io(ps, WRITE_FLUSH_FUA);
762	if (r < 0)
763		return r;
764
765	ps->current_committed -= nr_merged;
766
767	/*
768	 * At this stage, only persistent_usage() uses ps->next_free, so
769	 * we make no attempt to keep ps->next_free strictly accurate
770	 * as exceptions may have been committed out-of-order originally.
771	 * Once a snapshot has become merging, we set it to the value it
772	 * would have held had all the exceptions been committed in order.
773	 *
774	 * ps->current_area does not get reduced by prepare_merge() until
775	 * after commit_merge() has removed the nr_merged previous exceptions.
776	 */
777	ps->next_free = area_location(ps, ps->current_area) +
778			ps->current_committed + 1;
779
780	return 0;
781}
782
783static void persistent_drop_snapshot(struct dm_exception_store *store)
784{
785	struct pstore *ps = get_info(store);
786
787	ps->valid = 0;
788	if (write_header(ps))
789		DMWARN("write header failed");
790}
791
792static int persistent_ctr(struct dm_exception_store *store,
793			  unsigned argc, char **argv)
794{
795	struct pstore *ps;
 
796
797	/* allocate the pstore */
798	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
799	if (!ps)
800		return -ENOMEM;
801
802	ps->store = store;
803	ps->valid = 1;
804	ps->version = SNAPSHOT_DISK_VERSION;
805	ps->area = NULL;
806	ps->zero_area = NULL;
807	ps->header_area = NULL;
808	ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
809	ps->current_committed = 0;
810
811	ps->callback_count = 0;
812	atomic_set(&ps->pending_count, 0);
813	ps->callbacks = NULL;
814
815	ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
816	if (!ps->metadata_wq) {
817		kfree(ps);
818		DMERR("couldn't start header metadata update thread");
819		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
820	}
821
822	store->context = ps;
823
824	return 0;
 
 
 
 
 
 
 
825}
826
827static unsigned persistent_status(struct dm_exception_store *store,
828				  status_type_t status, char *result,
829				  unsigned maxlen)
830{
831	unsigned sz = 0;
832
833	switch (status) {
834	case STATUSTYPE_INFO:
835		break;
836	case STATUSTYPE_TABLE:
837		DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
 
 
 
 
 
838	}
839
840	return sz;
841}
842
843static struct dm_exception_store_type _persistent_type = {
844	.name = "persistent",
845	.module = THIS_MODULE,
846	.ctr = persistent_ctr,
847	.dtr = persistent_dtr,
848	.read_metadata = persistent_read_metadata,
849	.prepare_exception = persistent_prepare_exception,
850	.commit_exception = persistent_commit_exception,
851	.prepare_merge = persistent_prepare_merge,
852	.commit_merge = persistent_commit_merge,
853	.drop_snapshot = persistent_drop_snapshot,
854	.usage = persistent_usage,
855	.status = persistent_status,
856};
857
858static struct dm_exception_store_type _persistent_compat_type = {
859	.name = "P",
860	.module = THIS_MODULE,
861	.ctr = persistent_ctr,
862	.dtr = persistent_dtr,
863	.read_metadata = persistent_read_metadata,
864	.prepare_exception = persistent_prepare_exception,
865	.commit_exception = persistent_commit_exception,
866	.prepare_merge = persistent_prepare_merge,
867	.commit_merge = persistent_commit_merge,
868	.drop_snapshot = persistent_drop_snapshot,
869	.usage = persistent_usage,
870	.status = persistent_status,
871};
872
873int dm_persistent_snapshot_init(void)
874{
875	int r;
876
877	r = dm_exception_store_type_register(&_persistent_type);
878	if (r) {
879		DMERR("Unable to register persistent exception store type");
880		return r;
881	}
882
883	r = dm_exception_store_type_register(&_persistent_compat_type);
884	if (r) {
885		DMERR("Unable to register old-style persistent exception "
886		      "store type");
887		dm_exception_store_type_unregister(&_persistent_type);
888		return r;
889	}
890
891	return r;
892}
893
894void dm_persistent_snapshot_exit(void)
895{
896	dm_exception_store_type_unregister(&_persistent_type);
897	dm_exception_store_type_unregister(&_persistent_compat_type);
898}
v6.2
  1/*
  2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  3 * Copyright (C) 2006-2008 Red Hat GmbH
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include "dm-exception-store.h"
  9
 10#include <linux/ctype.h>
 11#include <linux/mm.h>
 12#include <linux/pagemap.h>
 13#include <linux/vmalloc.h>
 14#include <linux/export.h>
 15#include <linux/slab.h>
 16#include <linux/dm-io.h>
 17#include <linux/dm-bufio.h>
 18
 19#define DM_MSG_PREFIX "persistent snapshot"
 20#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U	/* 16KB */
 21
 22#define DM_PREFETCH_CHUNKS		12
 23
 24/*-----------------------------------------------------------------
 25 * Persistent snapshots, by persistent we mean that the snapshot
 26 * will survive a reboot.
 27 *---------------------------------------------------------------*/
 28
 29/*
 30 * We need to store a record of which parts of the origin have
 31 * been copied to the snapshot device.  The snapshot code
 32 * requires that we copy exception chunks to chunk aligned areas
 33 * of the COW store.  It makes sense therefore, to store the
 34 * metadata in chunk size blocks.
 35 *
 36 * There is no backward or forward compatibility implemented,
 37 * snapshots with different disk versions than the kernel will
 38 * not be usable.  It is expected that "lvcreate" will blank out
 39 * the start of a fresh COW device before calling the snapshot
 40 * constructor.
 41 *
 42 * The first chunk of the COW device just contains the header.
 43 * After this there is a chunk filled with exception metadata,
 44 * followed by as many exception chunks as can fit in the
 45 * metadata areas.
 46 *
 47 * All on disk structures are in little-endian format.  The end
 48 * of the exceptions info is indicated by an exception with a
 49 * new_chunk of 0, which is invalid since it would point to the
 50 * header chunk.
 51 */
 52
 53/*
 54 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
 55 */
 56#define SNAP_MAGIC 0x70416e53
 57
 58/*
 59 * The on-disk version of the metadata.
 60 */
 61#define SNAPSHOT_DISK_VERSION 1
 62
 63#define NUM_SNAPSHOT_HDR_CHUNKS 1
 64
 65struct disk_header {
 66	__le32 magic;
 67
 68	/*
 69	 * Is this snapshot valid.  There is no way of recovering
 70	 * an invalid snapshot.
 71	 */
 72	__le32 valid;
 73
 74	/*
 75	 * Simple, incrementing version. no backward
 76	 * compatibility.
 77	 */
 78	__le32 version;
 79
 80	/* In sectors */
 81	__le32 chunk_size;
 82} __packed;
 83
 84struct disk_exception {
 85	__le64 old_chunk;
 86	__le64 new_chunk;
 87} __packed;
 88
 89struct core_exception {
 90	uint64_t old_chunk;
 91	uint64_t new_chunk;
 92};
 93
 94struct commit_callback {
 95	void (*callback)(void *, int success);
 96	void *context;
 97};
 98
 99/*
100 * The top level structure for a persistent exception store.
101 */
102struct pstore {
103	struct dm_exception_store *store;
104	int version;
105	int valid;
106	uint32_t exceptions_per_area;
107
108	/*
109	 * Now that we have an asynchronous kcopyd there is no
110	 * need for large chunk sizes, so it wont hurt to have a
111	 * whole chunks worth of metadata in memory at once.
112	 */
113	void *area;
114
115	/*
116	 * An area of zeros used to clear the next area.
117	 */
118	void *zero_area;
119
120	/*
121	 * An area used for header. The header can be written
122	 * concurrently with metadata (when invalidating the snapshot),
123	 * so it needs a separate buffer.
124	 */
125	void *header_area;
126
127	/*
128	 * Used to keep track of which metadata area the data in
129	 * 'chunk' refers to.
130	 */
131	chunk_t current_area;
132
133	/*
134	 * The next free chunk for an exception.
135	 *
136	 * When creating exceptions, all the chunks here and above are
137	 * free.  It holds the next chunk to be allocated.  On rare
138	 * occasions (e.g. after a system crash) holes can be left in
139	 * the exception store because chunks can be committed out of
140	 * order.
141	 *
142	 * When merging exceptions, it does not necessarily mean all the
143	 * chunks here and above are free.  It holds the value it would
144	 * have held if all chunks had been committed in order of
145	 * allocation.  Consequently the value may occasionally be
146	 * slightly too low, but since it's only used for 'status' and
147	 * it can never reach its minimum value too early this doesn't
148	 * matter.
149	 */
150
151	chunk_t next_free;
152
153	/*
154	 * The index of next free exception in the current
155	 * metadata area.
156	 */
157	uint32_t current_committed;
158
159	atomic_t pending_count;
160	uint32_t callback_count;
161	struct commit_callback *callbacks;
162	struct dm_io_client *io_client;
163
164	struct workqueue_struct *metadata_wq;
165};
166
167static int alloc_area(struct pstore *ps)
168{
169	int r = -ENOMEM;
170	size_t len;
171
172	len = ps->store->chunk_size << SECTOR_SHIFT;
173
174	/*
175	 * Allocate the chunk_size block of memory that will hold
176	 * a single metadata area.
177	 */
178	ps->area = vmalloc(len);
179	if (!ps->area)
180		goto err_area;
181
182	ps->zero_area = vzalloc(len);
183	if (!ps->zero_area)
184		goto err_zero_area;
185
186	ps->header_area = vmalloc(len);
187	if (!ps->header_area)
188		goto err_header_area;
189
190	return 0;
191
192err_header_area:
193	vfree(ps->zero_area);
194
195err_zero_area:
196	vfree(ps->area);
197
198err_area:
199	return r;
200}
201
202static void free_area(struct pstore *ps)
203{
204	vfree(ps->area);
 
205	ps->area = NULL;
206	vfree(ps->zero_area);
 
 
207	ps->zero_area = NULL;
208	vfree(ps->header_area);
 
 
209	ps->header_area = NULL;
210}
211
212struct mdata_req {
213	struct dm_io_region *where;
214	struct dm_io_request *io_req;
215	struct work_struct work;
216	int result;
217};
218
219static void do_metadata(struct work_struct *work)
220{
221	struct mdata_req *req = container_of(work, struct mdata_req, work);
222
223	req->result = dm_io(req->io_req, 1, req->where, NULL);
224}
225
226/*
227 * Read or write a chunk aligned and sized block of data from a device.
228 */
229static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
230		    int metadata)
231{
232	struct dm_io_region where = {
233		.bdev = dm_snap_cow(ps->store->snap)->bdev,
234		.sector = ps->store->chunk_size * chunk,
235		.count = ps->store->chunk_size,
236	};
237	struct dm_io_request io_req = {
238		.bi_opf = opf,
239		.mem.type = DM_IO_VMA,
240		.mem.ptr.vma = area,
241		.client = ps->io_client,
242		.notify.fn = NULL,
243	};
244	struct mdata_req req;
245
246	if (!metadata)
247		return dm_io(&io_req, 1, &where, NULL);
248
249	req.where = &where;
250	req.io_req = &io_req;
251
252	/*
253	 * Issue the synchronous I/O from a different thread
254	 * to avoid submit_bio_noacct recursion.
255	 */
256	INIT_WORK_ONSTACK(&req.work, do_metadata);
257	queue_work(ps->metadata_wq, &req.work);
258	flush_workqueue(ps->metadata_wq);
259	destroy_work_on_stack(&req.work);
260
261	return req.result;
262}
263
264/*
265 * Convert a metadata area index to a chunk index.
266 */
267static chunk_t area_location(struct pstore *ps, chunk_t area)
268{
269	return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
270}
271
272static void skip_metadata(struct pstore *ps)
273{
274	uint32_t stride = ps->exceptions_per_area + 1;
275	chunk_t next_free = ps->next_free;
276	if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
277		ps->next_free++;
278}
279
280/*
281 * Read or write a metadata area.  Remembering to skip the first
282 * chunk which holds the header.
283 */
284static int area_io(struct pstore *ps, blk_opf_t opf)
285{
286	chunk_t chunk = area_location(ps, ps->current_area);
 
287
288	return chunk_io(ps, ps->area, chunk, opf, 0);
 
 
 
 
 
 
289}
290
291static void zero_memory_area(struct pstore *ps)
292{
293	memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
294}
295
296static int zero_disk_area(struct pstore *ps, chunk_t area)
297{
298	return chunk_io(ps, ps->zero_area, area_location(ps, area),
299			REQ_OP_WRITE, 0);
300}
301
302static int read_header(struct pstore *ps, int *new_snapshot)
303{
304	int r;
305	struct disk_header *dh;
306	unsigned chunk_size;
307	int chunk_size_supplied = 1;
308	char *chunk_err;
309
310	/*
311	 * Use default chunk size (or logical_block_size, if larger)
312	 * if none supplied
313	 */
314	if (!ps->store->chunk_size) {
315		ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
316		    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
317					    bdev) >> 9);
318		ps->store->chunk_mask = ps->store->chunk_size - 1;
319		ps->store->chunk_shift = __ffs(ps->store->chunk_size);
320		chunk_size_supplied = 0;
321	}
322
323	ps->io_client = dm_io_client_create();
324	if (IS_ERR(ps->io_client))
325		return PTR_ERR(ps->io_client);
326
327	r = alloc_area(ps);
328	if (r)
329		return r;
330
331	r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 1);
332	if (r)
333		goto bad;
334
335	dh = ps->header_area;
336
337	if (le32_to_cpu(dh->magic) == 0) {
338		*new_snapshot = 1;
339		return 0;
340	}
341
342	if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
343		DMWARN("Invalid or corrupt snapshot");
344		r = -ENXIO;
345		goto bad;
346	}
347
348	*new_snapshot = 0;
349	ps->valid = le32_to_cpu(dh->valid);
350	ps->version = le32_to_cpu(dh->version);
351	chunk_size = le32_to_cpu(dh->chunk_size);
352
353	if (ps->store->chunk_size == chunk_size)
354		return 0;
355
356	if (chunk_size_supplied)
357		DMWARN("chunk size %u in device metadata overrides "
358		       "table chunk size of %u.",
359		       chunk_size, ps->store->chunk_size);
360
361	/* We had a bogus chunk_size. Fix stuff up. */
362	free_area(ps);
363
364	r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
365					      &chunk_err);
366	if (r) {
367		DMERR("invalid on-disk chunk size %u: %s.",
368		      chunk_size, chunk_err);
369		return r;
370	}
371
372	r = alloc_area(ps);
373	return r;
374
375bad:
376	free_area(ps);
377	return r;
378}
379
380static int write_header(struct pstore *ps)
381{
382	struct disk_header *dh;
383
384	memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
385
386	dh = ps->header_area;
387	dh->magic = cpu_to_le32(SNAP_MAGIC);
388	dh->valid = cpu_to_le32(ps->valid);
389	dh->version = cpu_to_le32(ps->version);
390	dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
391
392	return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 1);
393}
394
395/*
396 * Access functions for the disk exceptions, these do the endian conversions.
397 */
398static struct disk_exception *get_exception(struct pstore *ps, void *ps_area,
399					    uint32_t index)
400{
401	BUG_ON(index >= ps->exceptions_per_area);
402
403	return ((struct disk_exception *) ps_area) + index;
404}
405
406static void read_exception(struct pstore *ps, void *ps_area,
407			   uint32_t index, struct core_exception *result)
408{
409	struct disk_exception *de = get_exception(ps, ps_area, index);
410
411	/* copy it */
412	result->old_chunk = le64_to_cpu(de->old_chunk);
413	result->new_chunk = le64_to_cpu(de->new_chunk);
414}
415
416static void write_exception(struct pstore *ps,
417			    uint32_t index, struct core_exception *e)
418{
419	struct disk_exception *de = get_exception(ps, ps->area, index);
420
421	/* copy it */
422	de->old_chunk = cpu_to_le64(e->old_chunk);
423	de->new_chunk = cpu_to_le64(e->new_chunk);
424}
425
426static void clear_exception(struct pstore *ps, uint32_t index)
427{
428	struct disk_exception *de = get_exception(ps, ps->area, index);
429
430	/* clear it */
431	de->old_chunk = 0;
432	de->new_chunk = 0;
433}
434
435/*
436 * Registers the exceptions that are present in the current area.
437 * 'full' is filled in to indicate if the area has been
438 * filled.
439 */
440static int insert_exceptions(struct pstore *ps, void *ps_area,
441			     int (*callback)(void *callback_context,
442					     chunk_t old, chunk_t new),
443			     void *callback_context,
444			     int *full)
445{
446	int r;
447	unsigned int i;
448	struct core_exception e;
449
450	/* presume the area is full */
451	*full = 1;
452
453	for (i = 0; i < ps->exceptions_per_area; i++) {
454		read_exception(ps, ps_area, i, &e);
455
456		/*
457		 * If the new_chunk is pointing at the start of
458		 * the COW device, where the first metadata area
459		 * is we know that we've hit the end of the
460		 * exceptions.  Therefore the area is not full.
461		 */
462		if (e.new_chunk == 0LL) {
463			ps->current_committed = i;
464			*full = 0;
465			break;
466		}
467
468		/*
469		 * Keep track of the start of the free chunks.
470		 */
471		if (ps->next_free <= e.new_chunk)
472			ps->next_free = e.new_chunk + 1;
473
474		/*
475		 * Otherwise we add the exception to the snapshot.
476		 */
477		r = callback(callback_context, e.old_chunk, e.new_chunk);
478		if (r)
479			return r;
480	}
481
482	return 0;
483}
484
485static int read_exceptions(struct pstore *ps,
486			   int (*callback)(void *callback_context, chunk_t old,
487					   chunk_t new),
488			   void *callback_context)
489{
490	int r, full = 1;
491	struct dm_bufio_client *client;
492	chunk_t prefetch_area = 0;
493
494	client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
495					ps->store->chunk_size << SECTOR_SHIFT,
496					1, 0, NULL, NULL, 0);
497
498	if (IS_ERR(client))
499		return PTR_ERR(client);
500
501	/*
502	 * Setup for one current buffer + desired readahead buffers.
503	 */
504	dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS);
505
506	/*
507	 * Keeping reading chunks and inserting exceptions until
508	 * we find a partially full area.
509	 */
510	for (ps->current_area = 0; full; ps->current_area++) {
511		struct dm_buffer *bp;
512		void *area;
513		chunk_t chunk;
514
515		if (unlikely(prefetch_area < ps->current_area))
516			prefetch_area = ps->current_area;
517
518		if (DM_PREFETCH_CHUNKS) do {
519			chunk_t pf_chunk = area_location(ps, prefetch_area);
520			if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
521				break;
522			dm_bufio_prefetch(client, pf_chunk, 1);
523			prefetch_area++;
524			if (unlikely(!prefetch_area))
525				break;
526		} while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
527
528		chunk = area_location(ps, ps->current_area);
529
530		area = dm_bufio_read(client, chunk, &bp);
531		if (IS_ERR(area)) {
532			r = PTR_ERR(area);
533			goto ret_destroy_bufio;
534		}
535
536		r = insert_exceptions(ps, area, callback, callback_context,
537				      &full);
538
539		if (!full)
540			memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
541
542		dm_bufio_release(bp);
543
544		dm_bufio_forget(client, chunk);
545
546		if (unlikely(r))
547			goto ret_destroy_bufio;
548	}
549
550	ps->current_area--;
551
552	skip_metadata(ps);
553
554	r = 0;
555
556ret_destroy_bufio:
557	dm_bufio_client_destroy(client);
558
559	return r;
560}
561
562static struct pstore *get_info(struct dm_exception_store *store)
563{
564	return (struct pstore *) store->context;
565}
566
567static void persistent_usage(struct dm_exception_store *store,
568			     sector_t *total_sectors,
569			     sector_t *sectors_allocated,
570			     sector_t *metadata_sectors)
571{
572	struct pstore *ps = get_info(store);
573
574	*sectors_allocated = ps->next_free * store->chunk_size;
575	*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
576
577	/*
578	 * First chunk is the fixed header.
579	 * Then there are (ps->current_area + 1) metadata chunks, each one
580	 * separated from the next by ps->exceptions_per_area data chunks.
581	 */
582	*metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
583			    store->chunk_size;
584}
585
586static void persistent_dtr(struct dm_exception_store *store)
587{
588	struct pstore *ps = get_info(store);
589
590	destroy_workqueue(ps->metadata_wq);
591
592	/* Created in read_header */
593	if (ps->io_client)
594		dm_io_client_destroy(ps->io_client);
595	free_area(ps);
596
597	/* Allocated in persistent_read_metadata */
598	kvfree(ps->callbacks);
 
599
600	kfree(ps);
601}
602
603static int persistent_read_metadata(struct dm_exception_store *store,
604				    int (*callback)(void *callback_context,
605						    chunk_t old, chunk_t new),
606				    void *callback_context)
607{
608	int r, new_snapshot;
609	struct pstore *ps = get_info(store);
610
611	/*
612	 * Read the snapshot header.
613	 */
614	r = read_header(ps, &new_snapshot);
615	if (r)
616		return r;
617
618	/*
619	 * Now we know correct chunk_size, complete the initialisation.
620	 */
621	ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
622				  sizeof(struct disk_exception);
623	ps->callbacks = kvcalloc(ps->exceptions_per_area,
624				 sizeof(*ps->callbacks), GFP_KERNEL);
625	if (!ps->callbacks)
626		return -ENOMEM;
627
628	/*
629	 * Do we need to setup a new snapshot ?
630	 */
631	if (new_snapshot) {
632		r = write_header(ps);
633		if (r) {
634			DMWARN("write_header failed");
635			return r;
636		}
637
638		ps->current_area = 0;
639		zero_memory_area(ps);
640		r = zero_disk_area(ps, 0);
641		if (r)
642			DMWARN("zero_disk_area(0) failed");
643		return r;
644	}
645	/*
646	 * Sanity checks.
647	 */
648	if (ps->version != SNAPSHOT_DISK_VERSION) {
649		DMWARN("unable to handle snapshot disk version %d",
650		       ps->version);
651		return -EINVAL;
652	}
653
654	/*
655	 * Metadata are valid, but snapshot is invalidated
656	 */
657	if (!ps->valid)
658		return 1;
659
660	/*
661	 * Read the metadata.
662	 */
663	r = read_exceptions(ps, callback, callback_context);
664
665	return r;
666}
667
668static int persistent_prepare_exception(struct dm_exception_store *store,
669					struct dm_exception *e)
670{
671	struct pstore *ps = get_info(store);
 
 
672	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
673
674	/* Is there enough room ? */
675	if (size < ((ps->next_free + 1) * store->chunk_size))
676		return -ENOSPC;
677
678	e->new_chunk = ps->next_free;
679
680	/*
681	 * Move onto the next free pending, making sure to take
682	 * into account the location of the metadata chunks.
683	 */
684	ps->next_free++;
685	skip_metadata(ps);
 
 
686
687	atomic_inc(&ps->pending_count);
688	return 0;
689}
690
691static void persistent_commit_exception(struct dm_exception_store *store,
692					struct dm_exception *e, int valid,
693					void (*callback) (void *, int success),
694					void *callback_context)
695{
696	unsigned int i;
697	struct pstore *ps = get_info(store);
698	struct core_exception ce;
699	struct commit_callback *cb;
700
701	if (!valid)
702		ps->valid = 0;
703
704	ce.old_chunk = e->old_chunk;
705	ce.new_chunk = e->new_chunk;
706	write_exception(ps, ps->current_committed++, &ce);
707
708	/*
709	 * Add the callback to the back of the array.  This code
710	 * is the only place where the callback array is
711	 * manipulated, and we know that it will never be called
712	 * multiple times concurrently.
713	 */
714	cb = ps->callbacks + ps->callback_count++;
715	cb->callback = callback;
716	cb->context = callback_context;
717
718	/*
719	 * If there are exceptions in flight and we have not yet
720	 * filled this metadata area there's nothing more to do.
721	 */
722	if (!atomic_dec_and_test(&ps->pending_count) &&
723	    (ps->current_committed != ps->exceptions_per_area))
724		return;
725
726	/*
727	 * If we completely filled the current area, then wipe the next one.
728	 */
729	if ((ps->current_committed == ps->exceptions_per_area) &&
730	    zero_disk_area(ps, ps->current_area + 1))
731		ps->valid = 0;
732
733	/*
734	 * Commit exceptions to disk.
735	 */
736	if (ps->valid && area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA |
737				 REQ_SYNC))
738		ps->valid = 0;
739
740	/*
741	 * Advance to the next area if this one is full.
742	 */
743	if (ps->current_committed == ps->exceptions_per_area) {
744		ps->current_committed = 0;
745		ps->current_area++;
746		zero_memory_area(ps);
747	}
748
749	for (i = 0; i < ps->callback_count; i++) {
750		cb = ps->callbacks + i;
751		cb->callback(cb->context, ps->valid);
752	}
753
754	ps->callback_count = 0;
755}
756
757static int persistent_prepare_merge(struct dm_exception_store *store,
758				    chunk_t *last_old_chunk,
759				    chunk_t *last_new_chunk)
760{
761	struct pstore *ps = get_info(store);
762	struct core_exception ce;
763	int nr_consecutive;
764	int r;
765
766	/*
767	 * When current area is empty, move back to preceding area.
768	 */
769	if (!ps->current_committed) {
770		/*
771		 * Have we finished?
772		 */
773		if (!ps->current_area)
774			return 0;
775
776		ps->current_area--;
777		r = area_io(ps, REQ_OP_READ);
778		if (r < 0)
779			return r;
780		ps->current_committed = ps->exceptions_per_area;
781	}
782
783	read_exception(ps, ps->area, ps->current_committed - 1, &ce);
784	*last_old_chunk = ce.old_chunk;
785	*last_new_chunk = ce.new_chunk;
786
787	/*
788	 * Find number of consecutive chunks within the current area,
789	 * working backwards.
790	 */
791	for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
792	     nr_consecutive++) {
793		read_exception(ps, ps->area,
794			       ps->current_committed - 1 - nr_consecutive, &ce);
795		if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
796		    ce.new_chunk != *last_new_chunk - nr_consecutive)
797			break;
798	}
799
800	return nr_consecutive;
801}
802
803static int persistent_commit_merge(struct dm_exception_store *store,
804				   int nr_merged)
805{
806	int r, i;
807	struct pstore *ps = get_info(store);
808
809	BUG_ON(nr_merged > ps->current_committed);
810
811	for (i = 0; i < nr_merged; i++)
812		clear_exception(ps, ps->current_committed - 1 - i);
813
814	r = area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA);
815	if (r < 0)
816		return r;
817
818	ps->current_committed -= nr_merged;
819
820	/*
821	 * At this stage, only persistent_usage() uses ps->next_free, so
822	 * we make no attempt to keep ps->next_free strictly accurate
823	 * as exceptions may have been committed out-of-order originally.
824	 * Once a snapshot has become merging, we set it to the value it
825	 * would have held had all the exceptions been committed in order.
826	 *
827	 * ps->current_area does not get reduced by prepare_merge() until
828	 * after commit_merge() has removed the nr_merged previous exceptions.
829	 */
830	ps->next_free = area_location(ps, ps->current_area) +
831			ps->current_committed + 1;
832
833	return 0;
834}
835
836static void persistent_drop_snapshot(struct dm_exception_store *store)
837{
838	struct pstore *ps = get_info(store);
839
840	ps->valid = 0;
841	if (write_header(ps))
842		DMWARN("write header failed");
843}
844
845static int persistent_ctr(struct dm_exception_store *store, char *options)
 
846{
847	struct pstore *ps;
848	int r;
849
850	/* allocate the pstore */
851	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
852	if (!ps)
853		return -ENOMEM;
854
855	ps->store = store;
856	ps->valid = 1;
857	ps->version = SNAPSHOT_DISK_VERSION;
858	ps->area = NULL;
859	ps->zero_area = NULL;
860	ps->header_area = NULL;
861	ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
862	ps->current_committed = 0;
863
864	ps->callback_count = 0;
865	atomic_set(&ps->pending_count, 0);
866	ps->callbacks = NULL;
867
868	ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
869	if (!ps->metadata_wq) {
 
870		DMERR("couldn't start header metadata update thread");
871		r = -ENOMEM;
872		goto err_workqueue;
873	}
874
875	if (options) {
876		char overflow = toupper(options[0]);
877		if (overflow == 'O')
878			store->userspace_supports_overflow = true;
879		else {
880			DMERR("Unsupported persistent store option: %s", options);
881			r = -EINVAL;
882			goto err_options;
883		}
884	}
885
886	store->context = ps;
887
888	return 0;
889
890err_options:
891	destroy_workqueue(ps->metadata_wq);
892err_workqueue:
893	kfree(ps);
894
895	return r;
896}
897
898static unsigned persistent_status(struct dm_exception_store *store,
899				  status_type_t status, char *result,
900				  unsigned maxlen)
901{
902	unsigned sz = 0;
903
904	switch (status) {
905	case STATUSTYPE_INFO:
906		break;
907	case STATUSTYPE_TABLE:
908		DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
909		       (unsigned long long)store->chunk_size);
910		break;
911	case STATUSTYPE_IMA:
912		*result = '\0';
913		break;
914	}
915
916	return sz;
917}
918
919static struct dm_exception_store_type _persistent_type = {
920	.name = "persistent",
921	.module = THIS_MODULE,
922	.ctr = persistent_ctr,
923	.dtr = persistent_dtr,
924	.read_metadata = persistent_read_metadata,
925	.prepare_exception = persistent_prepare_exception,
926	.commit_exception = persistent_commit_exception,
927	.prepare_merge = persistent_prepare_merge,
928	.commit_merge = persistent_commit_merge,
929	.drop_snapshot = persistent_drop_snapshot,
930	.usage = persistent_usage,
931	.status = persistent_status,
932};
933
934static struct dm_exception_store_type _persistent_compat_type = {
935	.name = "P",
936	.module = THIS_MODULE,
937	.ctr = persistent_ctr,
938	.dtr = persistent_dtr,
939	.read_metadata = persistent_read_metadata,
940	.prepare_exception = persistent_prepare_exception,
941	.commit_exception = persistent_commit_exception,
942	.prepare_merge = persistent_prepare_merge,
943	.commit_merge = persistent_commit_merge,
944	.drop_snapshot = persistent_drop_snapshot,
945	.usage = persistent_usage,
946	.status = persistent_status,
947};
948
949int dm_persistent_snapshot_init(void)
950{
951	int r;
952
953	r = dm_exception_store_type_register(&_persistent_type);
954	if (r) {
955		DMERR("Unable to register persistent exception store type");
956		return r;
957	}
958
959	r = dm_exception_store_type_register(&_persistent_compat_type);
960	if (r) {
961		DMERR("Unable to register old-style persistent exception "
962		      "store type");
963		dm_exception_store_type_unregister(&_persistent_type);
964		return r;
965	}
966
967	return r;
968}
969
970void dm_persistent_snapshot_exit(void)
971{
972	dm_exception_store_type_unregister(&_persistent_type);
973	dm_exception_store_type_unregister(&_persistent_compat_type);
974}