Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v3.5.6
 
  1/*
  2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  3 * Copyright (C) 2006-2008 Red Hat GmbH
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include "dm-exception-store.h"
  9
 
 10#include <linux/mm.h>
 11#include <linux/pagemap.h>
 12#include <linux/vmalloc.h>
 13#include <linux/export.h>
 14#include <linux/slab.h>
 15#include <linux/dm-io.h>
 
 16
 17#define DM_MSG_PREFIX "persistent snapshot"
 18#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32	/* 16KB */
 19
 20/*-----------------------------------------------------------------
 
 
 
 21 * Persistent snapshots, by persistent we mean that the snapshot
 22 * will survive a reboot.
 23 *---------------------------------------------------------------*/
 
 24
 25/*
 26 * We need to store a record of which parts of the origin have
 27 * been copied to the snapshot device.  The snapshot code
 28 * requires that we copy exception chunks to chunk aligned areas
 29 * of the COW store.  It makes sense therefore, to store the
 30 * metadata in chunk size blocks.
 31 *
 32 * There is no backward or forward compatibility implemented,
 33 * snapshots with different disk versions than the kernel will
 34 * not be usable.  It is expected that "lvcreate" will blank out
 35 * the start of a fresh COW device before calling the snapshot
 36 * constructor.
 37 *
 38 * The first chunk of the COW device just contains the header.
 39 * After this there is a chunk filled with exception metadata,
 40 * followed by as many exception chunks as can fit in the
 41 * metadata areas.
 42 *
 43 * All on disk structures are in little-endian format.  The end
 44 * of the exceptions info is indicated by an exception with a
 45 * new_chunk of 0, which is invalid since it would point to the
 46 * header chunk.
 47 */
 48
 49/*
 50 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
 51 */
 52#define SNAP_MAGIC 0x70416e53
 53
 54/*
 55 * The on-disk version of the metadata.
 56 */
 57#define SNAPSHOT_DISK_VERSION 1
 58
 59#define NUM_SNAPSHOT_HDR_CHUNKS 1
 60
 61struct disk_header {
 62	__le32 magic;
 63
 64	/*
 65	 * Is this snapshot valid.  There is no way of recovering
 66	 * an invalid snapshot.
 67	 */
 68	__le32 valid;
 69
 70	/*
 71	 * Simple, incrementing version. no backward
 72	 * compatibility.
 73	 */
 74	__le32 version;
 75
 76	/* In sectors */
 77	__le32 chunk_size;
 78} __packed;
 79
 80struct disk_exception {
 81	__le64 old_chunk;
 82	__le64 new_chunk;
 83} __packed;
 84
 85struct core_exception {
 86	uint64_t old_chunk;
 87	uint64_t new_chunk;
 88};
 89
 90struct commit_callback {
 91	void (*callback)(void *, int success);
 92	void *context;
 93};
 94
 95/*
 96 * The top level structure for a persistent exception store.
 97 */
 98struct pstore {
 99	struct dm_exception_store *store;
100	int version;
101	int valid;
102	uint32_t exceptions_per_area;
103
104	/*
105	 * Now that we have an asynchronous kcopyd there is no
106	 * need for large chunk sizes, so it wont hurt to have a
107	 * whole chunks worth of metadata in memory at once.
108	 */
109	void *area;
110
111	/*
112	 * An area of zeros used to clear the next area.
113	 */
114	void *zero_area;
115
116	/*
117	 * An area used for header. The header can be written
118	 * concurrently with metadata (when invalidating the snapshot),
119	 * so it needs a separate buffer.
120	 */
121	void *header_area;
122
123	/*
124	 * Used to keep track of which metadata area the data in
125	 * 'chunk' refers to.
126	 */
127	chunk_t current_area;
128
129	/*
130	 * The next free chunk for an exception.
131	 *
132	 * When creating exceptions, all the chunks here and above are
133	 * free.  It holds the next chunk to be allocated.  On rare
134	 * occasions (e.g. after a system crash) holes can be left in
135	 * the exception store because chunks can be committed out of
136	 * order.
137	 *
138	 * When merging exceptions, it does not necessarily mean all the
139	 * chunks here and above are free.  It holds the value it would
140	 * have held if all chunks had been committed in order of
141	 * allocation.  Consequently the value may occasionally be
142	 * slightly too low, but since it's only used for 'status' and
143	 * it can never reach its minimum value too early this doesn't
144	 * matter.
145	 */
146
147	chunk_t next_free;
148
149	/*
150	 * The index of next free exception in the current
151	 * metadata area.
152	 */
153	uint32_t current_committed;
154
155	atomic_t pending_count;
156	uint32_t callback_count;
157	struct commit_callback *callbacks;
158	struct dm_io_client *io_client;
159
160	struct workqueue_struct *metadata_wq;
161};
162
163static int alloc_area(struct pstore *ps)
164{
165	int r = -ENOMEM;
166	size_t len;
167
168	len = ps->store->chunk_size << SECTOR_SHIFT;
169
170	/*
171	 * Allocate the chunk_size block of memory that will hold
172	 * a single metadata area.
173	 */
174	ps->area = vmalloc(len);
175	if (!ps->area)
176		goto err_area;
177
178	ps->zero_area = vzalloc(len);
179	if (!ps->zero_area)
180		goto err_zero_area;
181
182	ps->header_area = vmalloc(len);
183	if (!ps->header_area)
184		goto err_header_area;
185
186	return 0;
187
188err_header_area:
189	vfree(ps->zero_area);
190
191err_zero_area:
192	vfree(ps->area);
193
194err_area:
195	return r;
196}
197
198static void free_area(struct pstore *ps)
199{
200	if (ps->area)
201		vfree(ps->area);
202	ps->area = NULL;
203
204	if (ps->zero_area)
205		vfree(ps->zero_area);
206	ps->zero_area = NULL;
207
208	if (ps->header_area)
209		vfree(ps->header_area);
210	ps->header_area = NULL;
211}
212
213struct mdata_req {
214	struct dm_io_region *where;
215	struct dm_io_request *io_req;
216	struct work_struct work;
217	int result;
218};
219
220static void do_metadata(struct work_struct *work)
221{
222	struct mdata_req *req = container_of(work, struct mdata_req, work);
223
224	req->result = dm_io(req->io_req, 1, req->where, NULL);
225}
226
227/*
228 * Read or write a chunk aligned and sized block of data from a device.
229 */
230static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
231		    int metadata)
232{
233	struct dm_io_region where = {
234		.bdev = dm_snap_cow(ps->store->snap)->bdev,
235		.sector = ps->store->chunk_size * chunk,
236		.count = ps->store->chunk_size,
237	};
238	struct dm_io_request io_req = {
239		.bi_rw = rw,
240		.mem.type = DM_IO_VMA,
241		.mem.ptr.vma = area,
242		.client = ps->io_client,
243		.notify.fn = NULL,
244	};
245	struct mdata_req req;
246
247	if (!metadata)
248		return dm_io(&io_req, 1, &where, NULL);
249
250	req.where = &where;
251	req.io_req = &io_req;
252
253	/*
254	 * Issue the synchronous I/O from a different thread
255	 * to avoid generic_make_request recursion.
256	 */
257	INIT_WORK_ONSTACK(&req.work, do_metadata);
258	queue_work(ps->metadata_wq, &req.work);
259	flush_work(&req.work);
 
260
261	return req.result;
262}
263
264/*
265 * Convert a metadata area index to a chunk index.
266 */
267static chunk_t area_location(struct pstore *ps, chunk_t area)
268{
269	return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
270}
271
 
 
 
 
 
 
 
 
 
272/*
273 * Read or write a metadata area.  Remembering to skip the first
274 * chunk which holds the header.
275 */
276static int area_io(struct pstore *ps, int rw)
277{
278	int r;
279	chunk_t chunk;
280
281	chunk = area_location(ps, ps->current_area);
282
283	r = chunk_io(ps, ps->area, chunk, rw, 0);
284	if (r)
285		return r;
286
287	return 0;
288}
289
290static void zero_memory_area(struct pstore *ps)
291{
292	memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
293}
294
295static int zero_disk_area(struct pstore *ps, chunk_t area)
296{
297	return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
 
298}
299
300static int read_header(struct pstore *ps, int *new_snapshot)
301{
302	int r;
303	struct disk_header *dh;
304	unsigned chunk_size;
305	int chunk_size_supplied = 1;
306	char *chunk_err;
307
308	/*
309	 * Use default chunk size (or logical_block_size, if larger)
310	 * if none supplied
311	 */
312	if (!ps->store->chunk_size) {
313		ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
314		    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
315					    bdev) >> 9);
316		ps->store->chunk_mask = ps->store->chunk_size - 1;
317		ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
318		chunk_size_supplied = 0;
319	}
320
321	ps->io_client = dm_io_client_create();
322	if (IS_ERR(ps->io_client))
323		return PTR_ERR(ps->io_client);
324
325	r = alloc_area(ps);
326	if (r)
327		return r;
328
329	r = chunk_io(ps, ps->header_area, 0, READ, 1);
330	if (r)
331		goto bad;
332
333	dh = ps->header_area;
334
335	if (le32_to_cpu(dh->magic) == 0) {
336		*new_snapshot = 1;
337		return 0;
338	}
339
340	if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
341		DMWARN("Invalid or corrupt snapshot");
342		r = -ENXIO;
343		goto bad;
344	}
345
346	*new_snapshot = 0;
347	ps->valid = le32_to_cpu(dh->valid);
348	ps->version = le32_to_cpu(dh->version);
349	chunk_size = le32_to_cpu(dh->chunk_size);
350
351	if (ps->store->chunk_size == chunk_size)
352		return 0;
353
354	if (chunk_size_supplied)
355		DMWARN("chunk size %u in device metadata overrides "
356		       "table chunk size of %u.",
357		       chunk_size, ps->store->chunk_size);
358
359	/* We had a bogus chunk_size. Fix stuff up. */
360	free_area(ps);
361
362	r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
363					      &chunk_err);
364	if (r) {
365		DMERR("invalid on-disk chunk size %u: %s.",
366		      chunk_size, chunk_err);
367		return r;
368	}
369
370	r = alloc_area(ps);
371	return r;
372
373bad:
374	free_area(ps);
375	return r;
376}
377
378static int write_header(struct pstore *ps)
379{
380	struct disk_header *dh;
381
382	memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
383
384	dh = ps->header_area;
385	dh->magic = cpu_to_le32(SNAP_MAGIC);
386	dh->valid = cpu_to_le32(ps->valid);
387	dh->version = cpu_to_le32(ps->version);
388	dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
389
390	return chunk_io(ps, ps->header_area, 0, WRITE, 1);
391}
392
393/*
394 * Access functions for the disk exceptions, these do the endian conversions.
395 */
396static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
 
397{
398	BUG_ON(index >= ps->exceptions_per_area);
399
400	return ((struct disk_exception *) ps->area) + index;
401}
402
403static void read_exception(struct pstore *ps,
404			   uint32_t index, struct core_exception *result)
405{
406	struct disk_exception *de = get_exception(ps, index);
407
408	/* copy it */
409	result->old_chunk = le64_to_cpu(de->old_chunk);
410	result->new_chunk = le64_to_cpu(de->new_chunk);
411}
412
413static void write_exception(struct pstore *ps,
414			    uint32_t index, struct core_exception *e)
415{
416	struct disk_exception *de = get_exception(ps, index);
417
418	/* copy it */
419	de->old_chunk = cpu_to_le64(e->old_chunk);
420	de->new_chunk = cpu_to_le64(e->new_chunk);
421}
422
423static void clear_exception(struct pstore *ps, uint32_t index)
424{
425	struct disk_exception *de = get_exception(ps, index);
426
427	/* clear it */
428	de->old_chunk = 0;
429	de->new_chunk = 0;
430}
431
432/*
433 * Registers the exceptions that are present in the current area.
434 * 'full' is filled in to indicate if the area has been
435 * filled.
436 */
437static int insert_exceptions(struct pstore *ps,
438			     int (*callback)(void *callback_context,
439					     chunk_t old, chunk_t new),
440			     void *callback_context,
441			     int *full)
442{
443	int r;
444	unsigned int i;
445	struct core_exception e;
446
447	/* presume the area is full */
448	*full = 1;
449
450	for (i = 0; i < ps->exceptions_per_area; i++) {
451		read_exception(ps, i, &e);
452
453		/*
454		 * If the new_chunk is pointing at the start of
455		 * the COW device, where the first metadata area
456		 * is we know that we've hit the end of the
457		 * exceptions.  Therefore the area is not full.
458		 */
459		if (e.new_chunk == 0LL) {
460			ps->current_committed = i;
461			*full = 0;
462			break;
463		}
464
465		/*
466		 * Keep track of the start of the free chunks.
467		 */
468		if (ps->next_free <= e.new_chunk)
469			ps->next_free = e.new_chunk + 1;
470
471		/*
472		 * Otherwise we add the exception to the snapshot.
473		 */
474		r = callback(callback_context, e.old_chunk, e.new_chunk);
475		if (r)
476			return r;
477	}
478
479	return 0;
480}
481
482static int read_exceptions(struct pstore *ps,
483			   int (*callback)(void *callback_context, chunk_t old,
484					   chunk_t new),
485			   void *callback_context)
486{
487	int r, full = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
488
489	/*
490	 * Keeping reading chunks and inserting exceptions until
491	 * we find a partially full area.
492	 */
493	for (ps->current_area = 0; full; ps->current_area++) {
494		r = area_io(ps, READ);
495		if (r)
496			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
497
498		r = insert_exceptions(ps, callback, callback_context, &full);
499		if (r)
500			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
501	}
502
503	ps->current_area--;
504
505	return 0;
 
 
 
 
 
 
 
506}
507
508static struct pstore *get_info(struct dm_exception_store *store)
509{
510	return (struct pstore *) store->context;
511}
512
513static void persistent_usage(struct dm_exception_store *store,
514			     sector_t *total_sectors,
515			     sector_t *sectors_allocated,
516			     sector_t *metadata_sectors)
517{
518	struct pstore *ps = get_info(store);
519
520	*sectors_allocated = ps->next_free * store->chunk_size;
521	*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
522
523	/*
524	 * First chunk is the fixed header.
525	 * Then there are (ps->current_area + 1) metadata chunks, each one
526	 * separated from the next by ps->exceptions_per_area data chunks.
527	 */
528	*metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
529			    store->chunk_size;
530}
531
532static void persistent_dtr(struct dm_exception_store *store)
533{
534	struct pstore *ps = get_info(store);
535
536	destroy_workqueue(ps->metadata_wq);
537
538	/* Created in read_header */
539	if (ps->io_client)
540		dm_io_client_destroy(ps->io_client);
541	free_area(ps);
542
543	/* Allocated in persistent_read_metadata */
544	if (ps->callbacks)
545		vfree(ps->callbacks);
546
547	kfree(ps);
548}
549
550static int persistent_read_metadata(struct dm_exception_store *store,
551				    int (*callback)(void *callback_context,
552						    chunk_t old, chunk_t new),
553				    void *callback_context)
554{
555	int r, uninitialized_var(new_snapshot);
556	struct pstore *ps = get_info(store);
557
558	/*
559	 * Read the snapshot header.
560	 */
561	r = read_header(ps, &new_snapshot);
562	if (r)
563		return r;
564
565	/*
566	 * Now we know correct chunk_size, complete the initialisation.
567	 */
568	ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
569				  sizeof(struct disk_exception);
570	ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
571				   sizeof(*ps->callbacks));
572	if (!ps->callbacks)
573		return -ENOMEM;
574
575	/*
576	 * Do we need to setup a new snapshot ?
577	 */
578	if (new_snapshot) {
579		r = write_header(ps);
580		if (r) {
581			DMWARN("write_header failed");
582			return r;
583		}
584
585		ps->current_area = 0;
586		zero_memory_area(ps);
587		r = zero_disk_area(ps, 0);
588		if (r)
589			DMWARN("zero_disk_area(0) failed");
590		return r;
591	}
592	/*
593	 * Sanity checks.
594	 */
595	if (ps->version != SNAPSHOT_DISK_VERSION) {
596		DMWARN("unable to handle snapshot disk version %d",
597		       ps->version);
598		return -EINVAL;
599	}
600
601	/*
602	 * Metadata are valid, but snapshot is invalidated
603	 */
604	if (!ps->valid)
605		return 1;
606
607	/*
608	 * Read the metadata.
609	 */
610	r = read_exceptions(ps, callback, callback_context);
611
612	return r;
613}
614
615static int persistent_prepare_exception(struct dm_exception_store *store,
616					struct dm_exception *e)
617{
618	struct pstore *ps = get_info(store);
619	uint32_t stride;
620	chunk_t next_free;
621	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
622
623	/* Is there enough room ? */
624	if (size < ((ps->next_free + 1) * store->chunk_size))
625		return -ENOSPC;
626
627	e->new_chunk = ps->next_free;
628
629	/*
630	 * Move onto the next free pending, making sure to take
631	 * into account the location of the metadata chunks.
632	 */
633	stride = (ps->exceptions_per_area + 1);
634	next_free = ++ps->next_free;
635	if (sector_div(next_free, stride) == 1)
636		ps->next_free++;
637
638	atomic_inc(&ps->pending_count);
639	return 0;
640}
641
642static void persistent_commit_exception(struct dm_exception_store *store,
643					struct dm_exception *e,
644					void (*callback) (void *, int success),
645					void *callback_context)
646{
647	unsigned int i;
648	struct pstore *ps = get_info(store);
649	struct core_exception ce;
650	struct commit_callback *cb;
651
 
 
 
652	ce.old_chunk = e->old_chunk;
653	ce.new_chunk = e->new_chunk;
654	write_exception(ps, ps->current_committed++, &ce);
655
656	/*
657	 * Add the callback to the back of the array.  This code
658	 * is the only place where the callback array is
659	 * manipulated, and we know that it will never be called
660	 * multiple times concurrently.
661	 */
662	cb = ps->callbacks + ps->callback_count++;
663	cb->callback = callback;
664	cb->context = callback_context;
665
666	/*
667	 * If there are exceptions in flight and we have not yet
668	 * filled this metadata area there's nothing more to do.
669	 */
670	if (!atomic_dec_and_test(&ps->pending_count) &&
671	    (ps->current_committed != ps->exceptions_per_area))
672		return;
673
674	/*
675	 * If we completely filled the current area, then wipe the next one.
676	 */
677	if ((ps->current_committed == ps->exceptions_per_area) &&
678	    zero_disk_area(ps, ps->current_area + 1))
679		ps->valid = 0;
680
681	/*
682	 * Commit exceptions to disk.
683	 */
684	if (ps->valid && area_io(ps, WRITE_FLUSH_FUA))
 
685		ps->valid = 0;
686
687	/*
688	 * Advance to the next area if this one is full.
689	 */
690	if (ps->current_committed == ps->exceptions_per_area) {
691		ps->current_committed = 0;
692		ps->current_area++;
693		zero_memory_area(ps);
694	}
695
696	for (i = 0; i < ps->callback_count; i++) {
697		cb = ps->callbacks + i;
698		cb->callback(cb->context, ps->valid);
699	}
700
701	ps->callback_count = 0;
702}
703
704static int persistent_prepare_merge(struct dm_exception_store *store,
705				    chunk_t *last_old_chunk,
706				    chunk_t *last_new_chunk)
707{
708	struct pstore *ps = get_info(store);
709	struct core_exception ce;
710	int nr_consecutive;
711	int r;
712
713	/*
714	 * When current area is empty, move back to preceding area.
715	 */
716	if (!ps->current_committed) {
717		/*
718		 * Have we finished?
719		 */
720		if (!ps->current_area)
721			return 0;
722
723		ps->current_area--;
724		r = area_io(ps, READ);
725		if (r < 0)
726			return r;
727		ps->current_committed = ps->exceptions_per_area;
728	}
729
730	read_exception(ps, ps->current_committed - 1, &ce);
731	*last_old_chunk = ce.old_chunk;
732	*last_new_chunk = ce.new_chunk;
733
734	/*
735	 * Find number of consecutive chunks within the current area,
736	 * working backwards.
737	 */
738	for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
739	     nr_consecutive++) {
740		read_exception(ps, ps->current_committed - 1 - nr_consecutive,
741			       &ce);
742		if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
743		    ce.new_chunk != *last_new_chunk - nr_consecutive)
744			break;
745	}
746
747	return nr_consecutive;
748}
749
750static int persistent_commit_merge(struct dm_exception_store *store,
751				   int nr_merged)
752{
753	int r, i;
754	struct pstore *ps = get_info(store);
755
756	BUG_ON(nr_merged > ps->current_committed);
757
758	for (i = 0; i < nr_merged; i++)
759		clear_exception(ps, ps->current_committed - 1 - i);
760
761	r = area_io(ps, WRITE_FLUSH_FUA);
762	if (r < 0)
763		return r;
764
765	ps->current_committed -= nr_merged;
766
767	/*
768	 * At this stage, only persistent_usage() uses ps->next_free, so
769	 * we make no attempt to keep ps->next_free strictly accurate
770	 * as exceptions may have been committed out-of-order originally.
771	 * Once a snapshot has become merging, we set it to the value it
772	 * would have held had all the exceptions been committed in order.
773	 *
774	 * ps->current_area does not get reduced by prepare_merge() until
775	 * after commit_merge() has removed the nr_merged previous exceptions.
776	 */
777	ps->next_free = area_location(ps, ps->current_area) +
778			ps->current_committed + 1;
779
780	return 0;
781}
782
783static void persistent_drop_snapshot(struct dm_exception_store *store)
784{
785	struct pstore *ps = get_info(store);
786
787	ps->valid = 0;
788	if (write_header(ps))
789		DMWARN("write header failed");
790}
791
792static int persistent_ctr(struct dm_exception_store *store,
793			  unsigned argc, char **argv)
794{
795	struct pstore *ps;
 
796
797	/* allocate the pstore */
798	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
799	if (!ps)
800		return -ENOMEM;
801
802	ps->store = store;
803	ps->valid = 1;
804	ps->version = SNAPSHOT_DISK_VERSION;
805	ps->area = NULL;
806	ps->zero_area = NULL;
807	ps->header_area = NULL;
808	ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
809	ps->current_committed = 0;
810
811	ps->callback_count = 0;
812	atomic_set(&ps->pending_count, 0);
813	ps->callbacks = NULL;
814
815	ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
816	if (!ps->metadata_wq) {
817		kfree(ps);
818		DMERR("couldn't start header metadata update thread");
819		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
820	}
821
822	store->context = ps;
823
824	return 0;
 
 
 
 
 
 
 
825}
826
827static unsigned persistent_status(struct dm_exception_store *store,
828				  status_type_t status, char *result,
829				  unsigned maxlen)
830{
831	unsigned sz = 0;
832
833	switch (status) {
834	case STATUSTYPE_INFO:
835		break;
836	case STATUSTYPE_TABLE:
837		DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
 
 
 
 
 
838	}
839
840	return sz;
841}
842
843static struct dm_exception_store_type _persistent_type = {
844	.name = "persistent",
845	.module = THIS_MODULE,
846	.ctr = persistent_ctr,
847	.dtr = persistent_dtr,
848	.read_metadata = persistent_read_metadata,
849	.prepare_exception = persistent_prepare_exception,
850	.commit_exception = persistent_commit_exception,
851	.prepare_merge = persistent_prepare_merge,
852	.commit_merge = persistent_commit_merge,
853	.drop_snapshot = persistent_drop_snapshot,
854	.usage = persistent_usage,
855	.status = persistent_status,
856};
857
858static struct dm_exception_store_type _persistent_compat_type = {
859	.name = "P",
860	.module = THIS_MODULE,
861	.ctr = persistent_ctr,
862	.dtr = persistent_dtr,
863	.read_metadata = persistent_read_metadata,
864	.prepare_exception = persistent_prepare_exception,
865	.commit_exception = persistent_commit_exception,
866	.prepare_merge = persistent_prepare_merge,
867	.commit_merge = persistent_commit_merge,
868	.drop_snapshot = persistent_drop_snapshot,
869	.usage = persistent_usage,
870	.status = persistent_status,
871};
872
873int dm_persistent_snapshot_init(void)
874{
875	int r;
876
877	r = dm_exception_store_type_register(&_persistent_type);
878	if (r) {
879		DMERR("Unable to register persistent exception store type");
880		return r;
881	}
882
883	r = dm_exception_store_type_register(&_persistent_compat_type);
884	if (r) {
885		DMERR("Unable to register old-style persistent exception "
886		      "store type");
887		dm_exception_store_type_unregister(&_persistent_type);
888		return r;
889	}
890
891	return r;
892}
893
894void dm_persistent_snapshot_exit(void)
895{
896	dm_exception_store_type_unregister(&_persistent_type);
897	dm_exception_store_type_unregister(&_persistent_compat_type);
898}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  4 * Copyright (C) 2006-2008 Red Hat GmbH
  5 *
  6 * This file is released under the GPL.
  7 */
  8
  9#include "dm-exception-store.h"
 10
 11#include <linux/ctype.h>
 12#include <linux/mm.h>
 13#include <linux/pagemap.h>
 14#include <linux/vmalloc.h>
 15#include <linux/export.h>
 16#include <linux/slab.h>
 17#include <linux/dm-io.h>
 18#include <linux/dm-bufio.h>
 19
 20#define DM_MSG_PREFIX "persistent snapshot"
 21#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U	/* 16KB */
 22
 23#define DM_PREFETCH_CHUNKS		12
 24
 25/*
 26 *---------------------------------------------------------------
 27 * Persistent snapshots, by persistent we mean that the snapshot
 28 * will survive a reboot.
 29 *---------------------------------------------------------------
 30 */
 31
 32/*
 33 * We need to store a record of which parts of the origin have
 34 * been copied to the snapshot device.  The snapshot code
 35 * requires that we copy exception chunks to chunk aligned areas
 36 * of the COW store.  It makes sense therefore, to store the
 37 * metadata in chunk size blocks.
 38 *
 39 * There is no backward or forward compatibility implemented,
 40 * snapshots with different disk versions than the kernel will
 41 * not be usable.  It is expected that "lvcreate" will blank out
 42 * the start of a fresh COW device before calling the snapshot
 43 * constructor.
 44 *
 45 * The first chunk of the COW device just contains the header.
 46 * After this there is a chunk filled with exception metadata,
 47 * followed by as many exception chunks as can fit in the
 48 * metadata areas.
 49 *
 50 * All on disk structures are in little-endian format.  The end
 51 * of the exceptions info is indicated by an exception with a
 52 * new_chunk of 0, which is invalid since it would point to the
 53 * header chunk.
 54 */
 55
 56/*
 57 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
 58 */
 59#define SNAP_MAGIC 0x70416e53
 60
 61/*
 62 * The on-disk version of the metadata.
 63 */
 64#define SNAPSHOT_DISK_VERSION 1
 65
 66#define NUM_SNAPSHOT_HDR_CHUNKS 1
 67
 68struct disk_header {
 69	__le32 magic;
 70
 71	/*
 72	 * Is this snapshot valid.  There is no way of recovering
 73	 * an invalid snapshot.
 74	 */
 75	__le32 valid;
 76
 77	/*
 78	 * Simple, incrementing version. no backward
 79	 * compatibility.
 80	 */
 81	__le32 version;
 82
 83	/* In sectors */
 84	__le32 chunk_size;
 85} __packed;
 86
 87struct disk_exception {
 88	__le64 old_chunk;
 89	__le64 new_chunk;
 90} __packed;
 91
 92struct core_exception {
 93	uint64_t old_chunk;
 94	uint64_t new_chunk;
 95};
 96
 97struct commit_callback {
 98	void (*callback)(void *ref, int success);
 99	void *context;
100};
101
102/*
103 * The top level structure for a persistent exception store.
104 */
105struct pstore {
106	struct dm_exception_store *store;
107	int version;
108	int valid;
109	uint32_t exceptions_per_area;
110
111	/*
112	 * Now that we have an asynchronous kcopyd there is no
113	 * need for large chunk sizes, so it wont hurt to have a
114	 * whole chunks worth of metadata in memory at once.
115	 */
116	void *area;
117
118	/*
119	 * An area of zeros used to clear the next area.
120	 */
121	void *zero_area;
122
123	/*
124	 * An area used for header. The header can be written
125	 * concurrently with metadata (when invalidating the snapshot),
126	 * so it needs a separate buffer.
127	 */
128	void *header_area;
129
130	/*
131	 * Used to keep track of which metadata area the data in
132	 * 'chunk' refers to.
133	 */
134	chunk_t current_area;
135
136	/*
137	 * The next free chunk for an exception.
138	 *
139	 * When creating exceptions, all the chunks here and above are
140	 * free.  It holds the next chunk to be allocated.  On rare
141	 * occasions (e.g. after a system crash) holes can be left in
142	 * the exception store because chunks can be committed out of
143	 * order.
144	 *
145	 * When merging exceptions, it does not necessarily mean all the
146	 * chunks here and above are free.  It holds the value it would
147	 * have held if all chunks had been committed in order of
148	 * allocation.  Consequently the value may occasionally be
149	 * slightly too low, but since it's only used for 'status' and
150	 * it can never reach its minimum value too early this doesn't
151	 * matter.
152	 */
153
154	chunk_t next_free;
155
156	/*
157	 * The index of next free exception in the current
158	 * metadata area.
159	 */
160	uint32_t current_committed;
161
162	atomic_t pending_count;
163	uint32_t callback_count;
164	struct commit_callback *callbacks;
165	struct dm_io_client *io_client;
166
167	struct workqueue_struct *metadata_wq;
168};
169
170static int alloc_area(struct pstore *ps)
171{
172	int r = -ENOMEM;
173	size_t len;
174
175	len = ps->store->chunk_size << SECTOR_SHIFT;
176
177	/*
178	 * Allocate the chunk_size block of memory that will hold
179	 * a single metadata area.
180	 */
181	ps->area = vmalloc(len);
182	if (!ps->area)
183		goto err_area;
184
185	ps->zero_area = vzalloc(len);
186	if (!ps->zero_area)
187		goto err_zero_area;
188
189	ps->header_area = vmalloc(len);
190	if (!ps->header_area)
191		goto err_header_area;
192
193	return 0;
194
195err_header_area:
196	vfree(ps->zero_area);
197
198err_zero_area:
199	vfree(ps->area);
200
201err_area:
202	return r;
203}
204
205static void free_area(struct pstore *ps)
206{
207	vfree(ps->area);
 
208	ps->area = NULL;
209	vfree(ps->zero_area);
 
 
210	ps->zero_area = NULL;
211	vfree(ps->header_area);
 
 
212	ps->header_area = NULL;
213}
214
215struct mdata_req {
216	struct dm_io_region *where;
217	struct dm_io_request *io_req;
218	struct work_struct work;
219	int result;
220};
221
222static void do_metadata(struct work_struct *work)
223{
224	struct mdata_req *req = container_of(work, struct mdata_req, work);
225
226	req->result = dm_io(req->io_req, 1, req->where, NULL, IOPRIO_DEFAULT);
227}
228
229/*
230 * Read or write a chunk aligned and sized block of data from a device.
231 */
232static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
233		    int metadata)
234{
235	struct dm_io_region where = {
236		.bdev = dm_snap_cow(ps->store->snap)->bdev,
237		.sector = ps->store->chunk_size * chunk,
238		.count = ps->store->chunk_size,
239	};
240	struct dm_io_request io_req = {
241		.bi_opf = opf,
242		.mem.type = DM_IO_VMA,
243		.mem.ptr.vma = area,
244		.client = ps->io_client,
245		.notify.fn = NULL,
246	};
247	struct mdata_req req;
248
249	if (!metadata)
250		return dm_io(&io_req, 1, &where, NULL, IOPRIO_DEFAULT);
251
252	req.where = &where;
253	req.io_req = &io_req;
254
255	/*
256	 * Issue the synchronous I/O from a different thread
257	 * to avoid submit_bio_noacct recursion.
258	 */
259	INIT_WORK_ONSTACK(&req.work, do_metadata);
260	queue_work(ps->metadata_wq, &req.work);
261	flush_workqueue(ps->metadata_wq);
262	destroy_work_on_stack(&req.work);
263
264	return req.result;
265}
266
267/*
268 * Convert a metadata area index to a chunk index.
269 */
270static chunk_t area_location(struct pstore *ps, chunk_t area)
271{
272	return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
273}
274
275static void skip_metadata(struct pstore *ps)
276{
277	uint32_t stride = ps->exceptions_per_area + 1;
278	chunk_t next_free = ps->next_free;
279
280	if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
281		ps->next_free++;
282}
283
284/*
285 * Read or write a metadata area.  Remembering to skip the first
286 * chunk which holds the header.
287 */
288static int area_io(struct pstore *ps, blk_opf_t opf)
289{
290	chunk_t chunk = area_location(ps, ps->current_area);
 
 
 
291
292	return chunk_io(ps, ps->area, chunk, opf, 0);
 
 
 
 
293}
294
295static void zero_memory_area(struct pstore *ps)
296{
297	memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
298}
299
300static int zero_disk_area(struct pstore *ps, chunk_t area)
301{
302	return chunk_io(ps, ps->zero_area, area_location(ps, area),
303			REQ_OP_WRITE, 0);
304}
305
306static int read_header(struct pstore *ps, int *new_snapshot)
307{
308	int r;
309	struct disk_header *dh;
310	unsigned int chunk_size;
311	int chunk_size_supplied = 1;
312	char *chunk_err;
313
314	/*
315	 * Use default chunk size (or logical_block_size, if larger)
316	 * if none supplied
317	 */
318	if (!ps->store->chunk_size) {
319		ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
320		    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
321					    bdev) >> 9);
322		ps->store->chunk_mask = ps->store->chunk_size - 1;
323		ps->store->chunk_shift = __ffs(ps->store->chunk_size);
324		chunk_size_supplied = 0;
325	}
326
327	ps->io_client = dm_io_client_create();
328	if (IS_ERR(ps->io_client))
329		return PTR_ERR(ps->io_client);
330
331	r = alloc_area(ps);
332	if (r)
333		return r;
334
335	r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 1);
336	if (r)
337		goto bad;
338
339	dh = ps->header_area;
340
341	if (le32_to_cpu(dh->magic) == 0) {
342		*new_snapshot = 1;
343		return 0;
344	}
345
346	if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
347		DMWARN("Invalid or corrupt snapshot");
348		r = -ENXIO;
349		goto bad;
350	}
351
352	*new_snapshot = 0;
353	ps->valid = le32_to_cpu(dh->valid);
354	ps->version = le32_to_cpu(dh->version);
355	chunk_size = le32_to_cpu(dh->chunk_size);
356
357	if (ps->store->chunk_size == chunk_size)
358		return 0;
359
360	if (chunk_size_supplied)
361		DMWARN("chunk size %u in device metadata overrides table chunk size of %u.",
 
362		       chunk_size, ps->store->chunk_size);
363
364	/* We had a bogus chunk_size. Fix stuff up. */
365	free_area(ps);
366
367	r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
368					      &chunk_err);
369	if (r) {
370		DMERR("invalid on-disk chunk size %u: %s.",
371		      chunk_size, chunk_err);
372		return r;
373	}
374
375	r = alloc_area(ps);
376	return r;
377
378bad:
379	free_area(ps);
380	return r;
381}
382
383static int write_header(struct pstore *ps)
384{
385	struct disk_header *dh;
386
387	memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
388
389	dh = ps->header_area;
390	dh->magic = cpu_to_le32(SNAP_MAGIC);
391	dh->valid = cpu_to_le32(ps->valid);
392	dh->version = cpu_to_le32(ps->version);
393	dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
394
395	return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 1);
396}
397
398/*
399 * Access functions for the disk exceptions, these do the endian conversions.
400 */
401static struct disk_exception *get_exception(struct pstore *ps, void *ps_area,
402					    uint32_t index)
403{
404	BUG_ON(index >= ps->exceptions_per_area);
405
406	return ((struct disk_exception *) ps_area) + index;
407}
408
409static void read_exception(struct pstore *ps, void *ps_area,
410			   uint32_t index, struct core_exception *result)
411{
412	struct disk_exception *de = get_exception(ps, ps_area, index);
413
414	/* copy it */
415	result->old_chunk = le64_to_cpu(de->old_chunk);
416	result->new_chunk = le64_to_cpu(de->new_chunk);
417}
418
419static void write_exception(struct pstore *ps,
420			    uint32_t index, struct core_exception *e)
421{
422	struct disk_exception *de = get_exception(ps, ps->area, index);
423
424	/* copy it */
425	de->old_chunk = cpu_to_le64(e->old_chunk);
426	de->new_chunk = cpu_to_le64(e->new_chunk);
427}
428
429static void clear_exception(struct pstore *ps, uint32_t index)
430{
431	struct disk_exception *de = get_exception(ps, ps->area, index);
432
433	/* clear it */
434	de->old_chunk = 0;
435	de->new_chunk = 0;
436}
437
438/*
439 * Registers the exceptions that are present in the current area.
440 * 'full' is filled in to indicate if the area has been
441 * filled.
442 */
443static int insert_exceptions(struct pstore *ps, void *ps_area,
444			     int (*callback)(void *callback_context,
445					     chunk_t old, chunk_t new),
446			     void *callback_context,
447			     int *full)
448{
449	int r;
450	unsigned int i;
451	struct core_exception e;
452
453	/* presume the area is full */
454	*full = 1;
455
456	for (i = 0; i < ps->exceptions_per_area; i++) {
457		read_exception(ps, ps_area, i, &e);
458
459		/*
460		 * If the new_chunk is pointing at the start of
461		 * the COW device, where the first metadata area
462		 * is we know that we've hit the end of the
463		 * exceptions.  Therefore the area is not full.
464		 */
465		if (e.new_chunk == 0LL) {
466			ps->current_committed = i;
467			*full = 0;
468			break;
469		}
470
471		/*
472		 * Keep track of the start of the free chunks.
473		 */
474		if (ps->next_free <= e.new_chunk)
475			ps->next_free = e.new_chunk + 1;
476
477		/*
478		 * Otherwise we add the exception to the snapshot.
479		 */
480		r = callback(callback_context, e.old_chunk, e.new_chunk);
481		if (r)
482			return r;
483	}
484
485	return 0;
486}
487
488static int read_exceptions(struct pstore *ps,
489			   int (*callback)(void *callback_context, chunk_t old,
490					   chunk_t new),
491			   void *callback_context)
492{
493	int r, full = 1;
494	struct dm_bufio_client *client;
495	chunk_t prefetch_area = 0;
496
497	client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
498					ps->store->chunk_size << SECTOR_SHIFT,
499					1, 0, NULL, NULL, 0);
500
501	if (IS_ERR(client))
502		return PTR_ERR(client);
503
504	/*
505	 * Setup for one current buffer + desired readahead buffers.
506	 */
507	dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS);
508
509	/*
510	 * Keeping reading chunks and inserting exceptions until
511	 * we find a partially full area.
512	 */
513	for (ps->current_area = 0; full; ps->current_area++) {
514		struct dm_buffer *bp;
515		void *area;
516		chunk_t chunk;
517
518		if (unlikely(prefetch_area < ps->current_area))
519			prefetch_area = ps->current_area;
520
521		if (DM_PREFETCH_CHUNKS) {
522			do {
523				chunk_t pf_chunk = area_location(ps, prefetch_area);
524
525				if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
526					break;
527				dm_bufio_prefetch(client, pf_chunk, 1);
528				prefetch_area++;
529				if (unlikely(!prefetch_area))
530					break;
531			} while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
532		}
533
534		chunk = area_location(ps, ps->current_area);
535
536		area = dm_bufio_read(client, chunk, &bp);
537		if (IS_ERR(area)) {
538			r = PTR_ERR(area);
539			goto ret_destroy_bufio;
540		}
541
542		r = insert_exceptions(ps, area, callback, callback_context,
543				      &full);
544
545		if (!full)
546			memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
547
548		dm_bufio_release(bp);
549
550		dm_bufio_forget(client, chunk);
551
552		if (unlikely(r))
553			goto ret_destroy_bufio;
554	}
555
556	ps->current_area--;
557
558	skip_metadata(ps);
559
560	r = 0;
561
562ret_destroy_bufio:
563	dm_bufio_client_destroy(client);
564
565	return r;
566}
567
568static struct pstore *get_info(struct dm_exception_store *store)
569{
570	return store->context;
571}
572
573static void persistent_usage(struct dm_exception_store *store,
574			     sector_t *total_sectors,
575			     sector_t *sectors_allocated,
576			     sector_t *metadata_sectors)
577{
578	struct pstore *ps = get_info(store);
579
580	*sectors_allocated = ps->next_free * store->chunk_size;
581	*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
582
583	/*
584	 * First chunk is the fixed header.
585	 * Then there are (ps->current_area + 1) metadata chunks, each one
586	 * separated from the next by ps->exceptions_per_area data chunks.
587	 */
588	*metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
589			    store->chunk_size;
590}
591
592static void persistent_dtr(struct dm_exception_store *store)
593{
594	struct pstore *ps = get_info(store);
595
596	destroy_workqueue(ps->metadata_wq);
597
598	/* Created in read_header */
599	if (ps->io_client)
600		dm_io_client_destroy(ps->io_client);
601	free_area(ps);
602
603	/* Allocated in persistent_read_metadata */
604	kvfree(ps->callbacks);
 
605
606	kfree(ps);
607}
608
609static int persistent_read_metadata(struct dm_exception_store *store,
610				    int (*callback)(void *callback_context,
611						    chunk_t old, chunk_t new),
612				    void *callback_context)
613{
614	int r, new_snapshot;
615	struct pstore *ps = get_info(store);
616
617	/*
618	 * Read the snapshot header.
619	 */
620	r = read_header(ps, &new_snapshot);
621	if (r)
622		return r;
623
624	/*
625	 * Now we know correct chunk_size, complete the initialisation.
626	 */
627	ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
628				  sizeof(struct disk_exception);
629	ps->callbacks = kvcalloc(ps->exceptions_per_area,
630				 sizeof(*ps->callbacks), GFP_KERNEL);
631	if (!ps->callbacks)
632		return -ENOMEM;
633
634	/*
635	 * Do we need to setup a new snapshot ?
636	 */
637	if (new_snapshot) {
638		r = write_header(ps);
639		if (r) {
640			DMWARN("write_header failed");
641			return r;
642		}
643
644		ps->current_area = 0;
645		zero_memory_area(ps);
646		r = zero_disk_area(ps, 0);
647		if (r)
648			DMWARN("zero_disk_area(0) failed");
649		return r;
650	}
651	/*
652	 * Sanity checks.
653	 */
654	if (ps->version != SNAPSHOT_DISK_VERSION) {
655		DMWARN("unable to handle snapshot disk version %d",
656		       ps->version);
657		return -EINVAL;
658	}
659
660	/*
661	 * Metadata are valid, but snapshot is invalidated
662	 */
663	if (!ps->valid)
664		return 1;
665
666	/*
667	 * Read the metadata.
668	 */
669	r = read_exceptions(ps, callback, callback_context);
670
671	return r;
672}
673
674static int persistent_prepare_exception(struct dm_exception_store *store,
675					struct dm_exception *e)
676{
677	struct pstore *ps = get_info(store);
 
 
678	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
679
680	/* Is there enough room ? */
681	if (size < ((ps->next_free + 1) * store->chunk_size))
682		return -ENOSPC;
683
684	e->new_chunk = ps->next_free;
685
686	/*
687	 * Move onto the next free pending, making sure to take
688	 * into account the location of the metadata chunks.
689	 */
690	ps->next_free++;
691	skip_metadata(ps);
 
 
692
693	atomic_inc(&ps->pending_count);
694	return 0;
695}
696
697static void persistent_commit_exception(struct dm_exception_store *store,
698					struct dm_exception *e, int valid,
699					void (*callback)(void *, int success),
700					void *callback_context)
701{
702	unsigned int i;
703	struct pstore *ps = get_info(store);
704	struct core_exception ce;
705	struct commit_callback *cb;
706
707	if (!valid)
708		ps->valid = 0;
709
710	ce.old_chunk = e->old_chunk;
711	ce.new_chunk = e->new_chunk;
712	write_exception(ps, ps->current_committed++, &ce);
713
714	/*
715	 * Add the callback to the back of the array.  This code
716	 * is the only place where the callback array is
717	 * manipulated, and we know that it will never be called
718	 * multiple times concurrently.
719	 */
720	cb = ps->callbacks + ps->callback_count++;
721	cb->callback = callback;
722	cb->context = callback_context;
723
724	/*
725	 * If there are exceptions in flight and we have not yet
726	 * filled this metadata area there's nothing more to do.
727	 */
728	if (!atomic_dec_and_test(&ps->pending_count) &&
729	    (ps->current_committed != ps->exceptions_per_area))
730		return;
731
732	/*
733	 * If we completely filled the current area, then wipe the next one.
734	 */
735	if ((ps->current_committed == ps->exceptions_per_area) &&
736	    zero_disk_area(ps, ps->current_area + 1))
737		ps->valid = 0;
738
739	/*
740	 * Commit exceptions to disk.
741	 */
742	if (ps->valid && area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA |
743				 REQ_SYNC))
744		ps->valid = 0;
745
746	/*
747	 * Advance to the next area if this one is full.
748	 */
749	if (ps->current_committed == ps->exceptions_per_area) {
750		ps->current_committed = 0;
751		ps->current_area++;
752		zero_memory_area(ps);
753	}
754
755	for (i = 0; i < ps->callback_count; i++) {
756		cb = ps->callbacks + i;
757		cb->callback(cb->context, ps->valid);
758	}
759
760	ps->callback_count = 0;
761}
762
763static int persistent_prepare_merge(struct dm_exception_store *store,
764				    chunk_t *last_old_chunk,
765				    chunk_t *last_new_chunk)
766{
767	struct pstore *ps = get_info(store);
768	struct core_exception ce;
769	int nr_consecutive;
770	int r;
771
772	/*
773	 * When current area is empty, move back to preceding area.
774	 */
775	if (!ps->current_committed) {
776		/*
777		 * Have we finished?
778		 */
779		if (!ps->current_area)
780			return 0;
781
782		ps->current_area--;
783		r = area_io(ps, REQ_OP_READ);
784		if (r < 0)
785			return r;
786		ps->current_committed = ps->exceptions_per_area;
787	}
788
789	read_exception(ps, ps->area, ps->current_committed - 1, &ce);
790	*last_old_chunk = ce.old_chunk;
791	*last_new_chunk = ce.new_chunk;
792
793	/*
794	 * Find number of consecutive chunks within the current area,
795	 * working backwards.
796	 */
797	for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
798	     nr_consecutive++) {
799		read_exception(ps, ps->area,
800			       ps->current_committed - 1 - nr_consecutive, &ce);
801		if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
802		    ce.new_chunk != *last_new_chunk - nr_consecutive)
803			break;
804	}
805
806	return nr_consecutive;
807}
808
809static int persistent_commit_merge(struct dm_exception_store *store,
810				   int nr_merged)
811{
812	int r, i;
813	struct pstore *ps = get_info(store);
814
815	BUG_ON(nr_merged > ps->current_committed);
816
817	for (i = 0; i < nr_merged; i++)
818		clear_exception(ps, ps->current_committed - 1 - i);
819
820	r = area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA);
821	if (r < 0)
822		return r;
823
824	ps->current_committed -= nr_merged;
825
826	/*
827	 * At this stage, only persistent_usage() uses ps->next_free, so
828	 * we make no attempt to keep ps->next_free strictly accurate
829	 * as exceptions may have been committed out-of-order originally.
830	 * Once a snapshot has become merging, we set it to the value it
831	 * would have held had all the exceptions been committed in order.
832	 *
833	 * ps->current_area does not get reduced by prepare_merge() until
834	 * after commit_merge() has removed the nr_merged previous exceptions.
835	 */
836	ps->next_free = area_location(ps, ps->current_area) +
837			ps->current_committed + 1;
838
839	return 0;
840}
841
842static void persistent_drop_snapshot(struct dm_exception_store *store)
843{
844	struct pstore *ps = get_info(store);
845
846	ps->valid = 0;
847	if (write_header(ps))
848		DMWARN("write header failed");
849}
850
851static int persistent_ctr(struct dm_exception_store *store, char *options)
 
852{
853	struct pstore *ps;
854	int r;
855
856	/* allocate the pstore */
857	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
858	if (!ps)
859		return -ENOMEM;
860
861	ps->store = store;
862	ps->valid = 1;
863	ps->version = SNAPSHOT_DISK_VERSION;
864	ps->area = NULL;
865	ps->zero_area = NULL;
866	ps->header_area = NULL;
867	ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
868	ps->current_committed = 0;
869
870	ps->callback_count = 0;
871	atomic_set(&ps->pending_count, 0);
872	ps->callbacks = NULL;
873
874	ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
875	if (!ps->metadata_wq) {
 
876		DMERR("couldn't start header metadata update thread");
877		r = -ENOMEM;
878		goto err_workqueue;
879	}
880
881	if (options) {
882		char overflow = toupper(options[0]);
883
884		if (overflow == 'O')
885			store->userspace_supports_overflow = true;
886		else {
887			DMERR("Unsupported persistent store option: %s", options);
888			r = -EINVAL;
889			goto err_options;
890		}
891	}
892
893	store->context = ps;
894
895	return 0;
896
897err_options:
898	destroy_workqueue(ps->metadata_wq);
899err_workqueue:
900	kfree(ps);
901
902	return r;
903}
904
905static unsigned int persistent_status(struct dm_exception_store *store,
906				  status_type_t status, char *result,
907				  unsigned int maxlen)
908{
909	unsigned int sz = 0;
910
911	switch (status) {
912	case STATUSTYPE_INFO:
913		break;
914	case STATUSTYPE_TABLE:
915		DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
916		       (unsigned long long)store->chunk_size);
917		break;
918	case STATUSTYPE_IMA:
919		*result = '\0';
920		break;
921	}
922
923	return sz;
924}
925
926static struct dm_exception_store_type _persistent_type = {
927	.name = "persistent",
928	.module = THIS_MODULE,
929	.ctr = persistent_ctr,
930	.dtr = persistent_dtr,
931	.read_metadata = persistent_read_metadata,
932	.prepare_exception = persistent_prepare_exception,
933	.commit_exception = persistent_commit_exception,
934	.prepare_merge = persistent_prepare_merge,
935	.commit_merge = persistent_commit_merge,
936	.drop_snapshot = persistent_drop_snapshot,
937	.usage = persistent_usage,
938	.status = persistent_status,
939};
940
941static struct dm_exception_store_type _persistent_compat_type = {
942	.name = "P",
943	.module = THIS_MODULE,
944	.ctr = persistent_ctr,
945	.dtr = persistent_dtr,
946	.read_metadata = persistent_read_metadata,
947	.prepare_exception = persistent_prepare_exception,
948	.commit_exception = persistent_commit_exception,
949	.prepare_merge = persistent_prepare_merge,
950	.commit_merge = persistent_commit_merge,
951	.drop_snapshot = persistent_drop_snapshot,
952	.usage = persistent_usage,
953	.status = persistent_status,
954};
955
956int dm_persistent_snapshot_init(void)
957{
958	int r;
959
960	r = dm_exception_store_type_register(&_persistent_type);
961	if (r) {
962		DMERR("Unable to register persistent exception store type");
963		return r;
964	}
965
966	r = dm_exception_store_type_register(&_persistent_compat_type);
967	if (r) {
968		DMERR("Unable to register old-style persistent exception store type");
 
969		dm_exception_store_type_unregister(&_persistent_type);
970		return r;
971	}
972
973	return r;
974}
975
976void dm_persistent_snapshot_exit(void)
977{
978	dm_exception_store_type_unregister(&_persistent_type);
979	dm_exception_store_type_unregister(&_persistent_compat_type);
980}