Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  3 * Copyright (C) 2006-2008 Red Hat GmbH
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include "dm-exception-store.h"
  9
 10#include <linux/ctype.h>
 11#include <linux/mm.h>
 12#include <linux/pagemap.h>
 13#include <linux/vmalloc.h>
 14#include <linux/export.h>
 15#include <linux/slab.h>
 16#include <linux/dm-io.h>
 17#include <linux/dm-bufio.h>
 18
 19#define DM_MSG_PREFIX "persistent snapshot"
 20#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U	/* 16KB */
 21
 22#define DM_PREFETCH_CHUNKS		12
 23
 24/*-----------------------------------------------------------------
 25 * Persistent snapshots, by persistent we mean that the snapshot
 26 * will survive a reboot.
 27 *---------------------------------------------------------------*/
 28
 29/*
 30 * We need to store a record of which parts of the origin have
 31 * been copied to the snapshot device.  The snapshot code
 32 * requires that we copy exception chunks to chunk aligned areas
 33 * of the COW store.  It makes sense therefore, to store the
 34 * metadata in chunk size blocks.
 35 *
 36 * There is no backward or forward compatibility implemented,
 37 * snapshots with different disk versions than the kernel will
 38 * not be usable.  It is expected that "lvcreate" will blank out
 39 * the start of a fresh COW device before calling the snapshot
 40 * constructor.
 41 *
 42 * The first chunk of the COW device just contains the header.
 43 * After this there is a chunk filled with exception metadata,
 44 * followed by as many exception chunks as can fit in the
 45 * metadata areas.
 46 *
 47 * All on disk structures are in little-endian format.  The end
 48 * of the exceptions info is indicated by an exception with a
 49 * new_chunk of 0, which is invalid since it would point to the
 50 * header chunk.
 51 */
 52
 53/*
 54 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
 55 */
 56#define SNAP_MAGIC 0x70416e53
 57
 58/*
 59 * The on-disk version of the metadata.
 60 */
 61#define SNAPSHOT_DISK_VERSION 1
 62
 63#define NUM_SNAPSHOT_HDR_CHUNKS 1
 64
 65struct disk_header {
 66	__le32 magic;
 67
 68	/*
 69	 * Is this snapshot valid.  There is no way of recovering
 70	 * an invalid snapshot.
 71	 */
 72	__le32 valid;
 73
 74	/*
 75	 * Simple, incrementing version. no backward
 76	 * compatibility.
 77	 */
 78	__le32 version;
 79
 80	/* In sectors */
 81	__le32 chunk_size;
 82} __packed;
 83
 84struct disk_exception {
 85	__le64 old_chunk;
 86	__le64 new_chunk;
 87} __packed;
 88
 89struct core_exception {
 90	uint64_t old_chunk;
 91	uint64_t new_chunk;
 92};
 93
 94struct commit_callback {
 95	void (*callback)(void *, int success);
 96	void *context;
 97};
 98
 99/*
100 * The top level structure for a persistent exception store.
101 */
102struct pstore {
103	struct dm_exception_store *store;
104	int version;
105	int valid;
106	uint32_t exceptions_per_area;
107
108	/*
109	 * Now that we have an asynchronous kcopyd there is no
110	 * need for large chunk sizes, so it wont hurt to have a
111	 * whole chunks worth of metadata in memory at once.
112	 */
113	void *area;
114
115	/*
116	 * An area of zeros used to clear the next area.
117	 */
118	void *zero_area;
119
120	/*
121	 * An area used for header. The header can be written
122	 * concurrently with metadata (when invalidating the snapshot),
123	 * so it needs a separate buffer.
124	 */
125	void *header_area;
126
127	/*
128	 * Used to keep track of which metadata area the data in
129	 * 'chunk' refers to.
130	 */
131	chunk_t current_area;
132
133	/*
134	 * The next free chunk for an exception.
135	 *
136	 * When creating exceptions, all the chunks here and above are
137	 * free.  It holds the next chunk to be allocated.  On rare
138	 * occasions (e.g. after a system crash) holes can be left in
139	 * the exception store because chunks can be committed out of
140	 * order.
141	 *
142	 * When merging exceptions, it does not necessarily mean all the
143	 * chunks here and above are free.  It holds the value it would
144	 * have held if all chunks had been committed in order of
145	 * allocation.  Consequently the value may occasionally be
146	 * slightly too low, but since it's only used for 'status' and
147	 * it can never reach its minimum value too early this doesn't
148	 * matter.
149	 */
150
151	chunk_t next_free;
152
153	/*
154	 * The index of next free exception in the current
155	 * metadata area.
156	 */
157	uint32_t current_committed;
158
159	atomic_t pending_count;
160	uint32_t callback_count;
161	struct commit_callback *callbacks;
162	struct dm_io_client *io_client;
163
164	struct workqueue_struct *metadata_wq;
165};
166
167static int alloc_area(struct pstore *ps)
168{
169	int r = -ENOMEM;
170	size_t len;
171
172	len = ps->store->chunk_size << SECTOR_SHIFT;
173
174	/*
175	 * Allocate the chunk_size block of memory that will hold
176	 * a single metadata area.
177	 */
178	ps->area = vmalloc(len);
179	if (!ps->area)
180		goto err_area;
181
182	ps->zero_area = vzalloc(len);
183	if (!ps->zero_area)
184		goto err_zero_area;
185
186	ps->header_area = vmalloc(len);
187	if (!ps->header_area)
188		goto err_header_area;
189
190	return 0;
191
192err_header_area:
193	vfree(ps->zero_area);
194
195err_zero_area:
196	vfree(ps->area);
197
198err_area:
199	return r;
200}
201
202static void free_area(struct pstore *ps)
203{
204	vfree(ps->area);
 
205	ps->area = NULL;
206	vfree(ps->zero_area);
 
 
207	ps->zero_area = NULL;
208	vfree(ps->header_area);
 
 
209	ps->header_area = NULL;
210}
211
212struct mdata_req {
213	struct dm_io_region *where;
214	struct dm_io_request *io_req;
215	struct work_struct work;
216	int result;
217};
218
219static void do_metadata(struct work_struct *work)
220{
221	struct mdata_req *req = container_of(work, struct mdata_req, work);
222
223	req->result = dm_io(req->io_req, 1, req->where, NULL);
224}
225
226/*
227 * Read or write a chunk aligned and sized block of data from a device.
228 */
229static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
230		    int op_flags, int metadata)
231{
232	struct dm_io_region where = {
233		.bdev = dm_snap_cow(ps->store->snap)->bdev,
234		.sector = ps->store->chunk_size * chunk,
235		.count = ps->store->chunk_size,
236	};
237	struct dm_io_request io_req = {
238		.bi_op = op,
239		.bi_op_flags = op_flags,
240		.mem.type = DM_IO_VMA,
241		.mem.ptr.vma = area,
242		.client = ps->io_client,
243		.notify.fn = NULL,
244	};
245	struct mdata_req req;
246
247	if (!metadata)
248		return dm_io(&io_req, 1, &where, NULL);
249
250	req.where = &where;
251	req.io_req = &io_req;
252
253	/*
254	 * Issue the synchronous I/O from a different thread
255	 * to avoid submit_bio_noacct recursion.
256	 */
257	INIT_WORK_ONSTACK(&req.work, do_metadata);
258	queue_work(ps->metadata_wq, &req.work);
259	flush_workqueue(ps->metadata_wq);
260	destroy_work_on_stack(&req.work);
261
262	return req.result;
263}
264
265/*
266 * Convert a metadata area index to a chunk index.
267 */
268static chunk_t area_location(struct pstore *ps, chunk_t area)
269{
270	return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
271}
272
273static void skip_metadata(struct pstore *ps)
274{
275	uint32_t stride = ps->exceptions_per_area + 1;
276	chunk_t next_free = ps->next_free;
277	if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
278		ps->next_free++;
279}
280
281/*
282 * Read or write a metadata area.  Remembering to skip the first
283 * chunk which holds the header.
284 */
285static int area_io(struct pstore *ps, int op, int op_flags)
286{
287	int r;
288	chunk_t chunk;
289
290	chunk = area_location(ps, ps->current_area);
291
292	r = chunk_io(ps, ps->area, chunk, op, op_flags, 0);
293	if (r)
294		return r;
295
296	return 0;
297}
298
299static void zero_memory_area(struct pstore *ps)
300{
301	memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
302}
303
304static int zero_disk_area(struct pstore *ps, chunk_t area)
305{
306	return chunk_io(ps, ps->zero_area, area_location(ps, area),
307			REQ_OP_WRITE, 0, 0);
308}
309
310static int read_header(struct pstore *ps, int *new_snapshot)
311{
312	int r;
313	struct disk_header *dh;
314	unsigned chunk_size;
315	int chunk_size_supplied = 1;
316	char *chunk_err;
317
318	/*
319	 * Use default chunk size (or logical_block_size, if larger)
320	 * if none supplied
321	 */
322	if (!ps->store->chunk_size) {
323		ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
324		    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
325					    bdev) >> 9);
326		ps->store->chunk_mask = ps->store->chunk_size - 1;
327		ps->store->chunk_shift = __ffs(ps->store->chunk_size);
328		chunk_size_supplied = 0;
329	}
330
331	ps->io_client = dm_io_client_create();
332	if (IS_ERR(ps->io_client))
333		return PTR_ERR(ps->io_client);
334
335	r = alloc_area(ps);
336	if (r)
337		return r;
338
339	r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 0, 1);
340	if (r)
341		goto bad;
342
343	dh = ps->header_area;
344
345	if (le32_to_cpu(dh->magic) == 0) {
346		*new_snapshot = 1;
347		return 0;
348	}
349
350	if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
351		DMWARN("Invalid or corrupt snapshot");
352		r = -ENXIO;
353		goto bad;
354	}
355
356	*new_snapshot = 0;
357	ps->valid = le32_to_cpu(dh->valid);
358	ps->version = le32_to_cpu(dh->version);
359	chunk_size = le32_to_cpu(dh->chunk_size);
360
361	if (ps->store->chunk_size == chunk_size)
362		return 0;
363
364	if (chunk_size_supplied)
365		DMWARN("chunk size %u in device metadata overrides "
366		       "table chunk size of %u.",
367		       chunk_size, ps->store->chunk_size);
368
369	/* We had a bogus chunk_size. Fix stuff up. */
370	free_area(ps);
371
372	r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
373					      &chunk_err);
374	if (r) {
375		DMERR("invalid on-disk chunk size %u: %s.",
376		      chunk_size, chunk_err);
377		return r;
378	}
379
380	r = alloc_area(ps);
381	return r;
382
383bad:
384	free_area(ps);
385	return r;
386}
387
388static int write_header(struct pstore *ps)
389{
390	struct disk_header *dh;
391
392	memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
393
394	dh = ps->header_area;
395	dh->magic = cpu_to_le32(SNAP_MAGIC);
396	dh->valid = cpu_to_le32(ps->valid);
397	dh->version = cpu_to_le32(ps->version);
398	dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
399
400	return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 0, 1);
401}
402
403/*
404 * Access functions for the disk exceptions, these do the endian conversions.
405 */
406static struct disk_exception *get_exception(struct pstore *ps, void *ps_area,
407					    uint32_t index)
408{
409	BUG_ON(index >= ps->exceptions_per_area);
410
411	return ((struct disk_exception *) ps_area) + index;
412}
413
414static void read_exception(struct pstore *ps, void *ps_area,
415			   uint32_t index, struct core_exception *result)
416{
417	struct disk_exception *de = get_exception(ps, ps_area, index);
418
419	/* copy it */
420	result->old_chunk = le64_to_cpu(de->old_chunk);
421	result->new_chunk = le64_to_cpu(de->new_chunk);
422}
423
424static void write_exception(struct pstore *ps,
425			    uint32_t index, struct core_exception *e)
426{
427	struct disk_exception *de = get_exception(ps, ps->area, index);
428
429	/* copy it */
430	de->old_chunk = cpu_to_le64(e->old_chunk);
431	de->new_chunk = cpu_to_le64(e->new_chunk);
432}
433
434static void clear_exception(struct pstore *ps, uint32_t index)
435{
436	struct disk_exception *de = get_exception(ps, ps->area, index);
437
438	/* clear it */
439	de->old_chunk = 0;
440	de->new_chunk = 0;
441}
442
443/*
444 * Registers the exceptions that are present in the current area.
445 * 'full' is filled in to indicate if the area has been
446 * filled.
447 */
448static int insert_exceptions(struct pstore *ps, void *ps_area,
449			     int (*callback)(void *callback_context,
450					     chunk_t old, chunk_t new),
451			     void *callback_context,
452			     int *full)
453{
454	int r;
455	unsigned int i;
456	struct core_exception e;
457
458	/* presume the area is full */
459	*full = 1;
460
461	for (i = 0; i < ps->exceptions_per_area; i++) {
462		read_exception(ps, ps_area, i, &e);
463
464		/*
465		 * If the new_chunk is pointing at the start of
466		 * the COW device, where the first metadata area
467		 * is we know that we've hit the end of the
468		 * exceptions.  Therefore the area is not full.
469		 */
470		if (e.new_chunk == 0LL) {
471			ps->current_committed = i;
472			*full = 0;
473			break;
474		}
475
476		/*
477		 * Keep track of the start of the free chunks.
478		 */
479		if (ps->next_free <= e.new_chunk)
480			ps->next_free = e.new_chunk + 1;
481
482		/*
483		 * Otherwise we add the exception to the snapshot.
484		 */
485		r = callback(callback_context, e.old_chunk, e.new_chunk);
486		if (r)
487			return r;
488	}
489
490	return 0;
491}
492
493static int read_exceptions(struct pstore *ps,
494			   int (*callback)(void *callback_context, chunk_t old,
495					   chunk_t new),
496			   void *callback_context)
497{
498	int r, full = 1;
499	struct dm_bufio_client *client;
500	chunk_t prefetch_area = 0;
501
502	client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
503					ps->store->chunk_size << SECTOR_SHIFT,
504					1, 0, NULL, NULL);
505
506	if (IS_ERR(client))
507		return PTR_ERR(client);
508
509	/*
510	 * Setup for one current buffer + desired readahead buffers.
511	 */
512	dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS);
513
514	/*
515	 * Keeping reading chunks and inserting exceptions until
516	 * we find a partially full area.
517	 */
518	for (ps->current_area = 0; full; ps->current_area++) {
519		struct dm_buffer *bp;
520		void *area;
521		chunk_t chunk;
522
523		if (unlikely(prefetch_area < ps->current_area))
524			prefetch_area = ps->current_area;
525
526		if (DM_PREFETCH_CHUNKS) do {
527			chunk_t pf_chunk = area_location(ps, prefetch_area);
528			if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
529				break;
530			dm_bufio_prefetch(client, pf_chunk, 1);
531			prefetch_area++;
532			if (unlikely(!prefetch_area))
533				break;
534		} while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
535
536		chunk = area_location(ps, ps->current_area);
537
538		area = dm_bufio_read(client, chunk, &bp);
539		if (IS_ERR(area)) {
540			r = PTR_ERR(area);
541			goto ret_destroy_bufio;
542		}
543
544		r = insert_exceptions(ps, area, callback, callback_context,
545				      &full);
546
547		if (!full)
548			memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
549
550		dm_bufio_release(bp);
551
552		dm_bufio_forget(client, chunk);
553
554		if (unlikely(r))
555			goto ret_destroy_bufio;
556	}
557
558	ps->current_area--;
559
560	skip_metadata(ps);
561
562	r = 0;
563
564ret_destroy_bufio:
565	dm_bufio_client_destroy(client);
566
567	return r;
568}
569
570static struct pstore *get_info(struct dm_exception_store *store)
571{
572	return (struct pstore *) store->context;
573}
574
575static void persistent_usage(struct dm_exception_store *store,
576			     sector_t *total_sectors,
577			     sector_t *sectors_allocated,
578			     sector_t *metadata_sectors)
579{
580	struct pstore *ps = get_info(store);
581
582	*sectors_allocated = ps->next_free * store->chunk_size;
583	*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
584
585	/*
586	 * First chunk is the fixed header.
587	 * Then there are (ps->current_area + 1) metadata chunks, each one
588	 * separated from the next by ps->exceptions_per_area data chunks.
589	 */
590	*metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
591			    store->chunk_size;
592}
593
594static void persistent_dtr(struct dm_exception_store *store)
595{
596	struct pstore *ps = get_info(store);
597
598	destroy_workqueue(ps->metadata_wq);
599
600	/* Created in read_header */
601	if (ps->io_client)
602		dm_io_client_destroy(ps->io_client);
603	free_area(ps);
604
605	/* Allocated in persistent_read_metadata */
606	vfree(ps->callbacks);
 
607
608	kfree(ps);
609}
610
611static int persistent_read_metadata(struct dm_exception_store *store,
612				    int (*callback)(void *callback_context,
613						    chunk_t old, chunk_t new),
614				    void *callback_context)
615{
616	int r, new_snapshot;
617	struct pstore *ps = get_info(store);
618
619	/*
620	 * Read the snapshot header.
621	 */
622	r = read_header(ps, &new_snapshot);
623	if (r)
624		return r;
625
626	/*
627	 * Now we know correct chunk_size, complete the initialisation.
628	 */
629	ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
630				  sizeof(struct disk_exception);
631	ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
632				   sizeof(*ps->callbacks));
633	if (!ps->callbacks)
634		return -ENOMEM;
635
636	/*
637	 * Do we need to setup a new snapshot ?
638	 */
639	if (new_snapshot) {
640		r = write_header(ps);
641		if (r) {
642			DMWARN("write_header failed");
643			return r;
644		}
645
646		ps->current_area = 0;
647		zero_memory_area(ps);
648		r = zero_disk_area(ps, 0);
649		if (r)
650			DMWARN("zero_disk_area(0) failed");
651		return r;
652	}
653	/*
654	 * Sanity checks.
655	 */
656	if (ps->version != SNAPSHOT_DISK_VERSION) {
657		DMWARN("unable to handle snapshot disk version %d",
658		       ps->version);
659		return -EINVAL;
660	}
661
662	/*
663	 * Metadata are valid, but snapshot is invalidated
664	 */
665	if (!ps->valid)
666		return 1;
667
668	/*
669	 * Read the metadata.
670	 */
671	r = read_exceptions(ps, callback, callback_context);
672
673	return r;
674}
675
676static int persistent_prepare_exception(struct dm_exception_store *store,
677					struct dm_exception *e)
678{
679	struct pstore *ps = get_info(store);
680	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
681
682	/* Is there enough room ? */
683	if (size < ((ps->next_free + 1) * store->chunk_size))
684		return -ENOSPC;
685
686	e->new_chunk = ps->next_free;
687
688	/*
689	 * Move onto the next free pending, making sure to take
690	 * into account the location of the metadata chunks.
691	 */
692	ps->next_free++;
693	skip_metadata(ps);
694
695	atomic_inc(&ps->pending_count);
696	return 0;
697}
698
699static void persistent_commit_exception(struct dm_exception_store *store,
700					struct dm_exception *e, int valid,
701					void (*callback) (void *, int success),
702					void *callback_context)
703{
704	unsigned int i;
705	struct pstore *ps = get_info(store);
706	struct core_exception ce;
707	struct commit_callback *cb;
708
709	if (!valid)
710		ps->valid = 0;
711
712	ce.old_chunk = e->old_chunk;
713	ce.new_chunk = e->new_chunk;
714	write_exception(ps, ps->current_committed++, &ce);
715
716	/*
717	 * Add the callback to the back of the array.  This code
718	 * is the only place where the callback array is
719	 * manipulated, and we know that it will never be called
720	 * multiple times concurrently.
721	 */
722	cb = ps->callbacks + ps->callback_count++;
723	cb->callback = callback;
724	cb->context = callback_context;
725
726	/*
727	 * If there are exceptions in flight and we have not yet
728	 * filled this metadata area there's nothing more to do.
729	 */
730	if (!atomic_dec_and_test(&ps->pending_count) &&
731	    (ps->current_committed != ps->exceptions_per_area))
732		return;
733
734	/*
735	 * If we completely filled the current area, then wipe the next one.
736	 */
737	if ((ps->current_committed == ps->exceptions_per_area) &&
738	    zero_disk_area(ps, ps->current_area + 1))
739		ps->valid = 0;
740
741	/*
742	 * Commit exceptions to disk.
743	 */
744	if (ps->valid && area_io(ps, REQ_OP_WRITE,
745				 REQ_PREFLUSH | REQ_FUA | REQ_SYNC))
746		ps->valid = 0;
747
748	/*
749	 * Advance to the next area if this one is full.
750	 */
751	if (ps->current_committed == ps->exceptions_per_area) {
752		ps->current_committed = 0;
753		ps->current_area++;
754		zero_memory_area(ps);
755	}
756
757	for (i = 0; i < ps->callback_count; i++) {
758		cb = ps->callbacks + i;
759		cb->callback(cb->context, ps->valid);
760	}
761
762	ps->callback_count = 0;
763}
764
765static int persistent_prepare_merge(struct dm_exception_store *store,
766				    chunk_t *last_old_chunk,
767				    chunk_t *last_new_chunk)
768{
769	struct pstore *ps = get_info(store);
770	struct core_exception ce;
771	int nr_consecutive;
772	int r;
773
774	/*
775	 * When current area is empty, move back to preceding area.
776	 */
777	if (!ps->current_committed) {
778		/*
779		 * Have we finished?
780		 */
781		if (!ps->current_area)
782			return 0;
783
784		ps->current_area--;
785		r = area_io(ps, REQ_OP_READ, 0);
786		if (r < 0)
787			return r;
788		ps->current_committed = ps->exceptions_per_area;
789	}
790
791	read_exception(ps, ps->area, ps->current_committed - 1, &ce);
792	*last_old_chunk = ce.old_chunk;
793	*last_new_chunk = ce.new_chunk;
794
795	/*
796	 * Find number of consecutive chunks within the current area,
797	 * working backwards.
798	 */
799	for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
800	     nr_consecutive++) {
801		read_exception(ps, ps->area,
802			       ps->current_committed - 1 - nr_consecutive, &ce);
803		if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
804		    ce.new_chunk != *last_new_chunk - nr_consecutive)
805			break;
806	}
807
808	return nr_consecutive;
809}
810
811static int persistent_commit_merge(struct dm_exception_store *store,
812				   int nr_merged)
813{
814	int r, i;
815	struct pstore *ps = get_info(store);
816
817	BUG_ON(nr_merged > ps->current_committed);
818
819	for (i = 0; i < nr_merged; i++)
820		clear_exception(ps, ps->current_committed - 1 - i);
821
822	r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA);
823	if (r < 0)
824		return r;
825
826	ps->current_committed -= nr_merged;
827
828	/*
829	 * At this stage, only persistent_usage() uses ps->next_free, so
830	 * we make no attempt to keep ps->next_free strictly accurate
831	 * as exceptions may have been committed out-of-order originally.
832	 * Once a snapshot has become merging, we set it to the value it
833	 * would have held had all the exceptions been committed in order.
834	 *
835	 * ps->current_area does not get reduced by prepare_merge() until
836	 * after commit_merge() has removed the nr_merged previous exceptions.
837	 */
838	ps->next_free = area_location(ps, ps->current_area) +
839			ps->current_committed + 1;
840
841	return 0;
842}
843
844static void persistent_drop_snapshot(struct dm_exception_store *store)
845{
846	struct pstore *ps = get_info(store);
847
848	ps->valid = 0;
849	if (write_header(ps))
850		DMWARN("write header failed");
851}
852
853static int persistent_ctr(struct dm_exception_store *store, char *options)
 
854{
855	struct pstore *ps;
856	int r;
857
858	/* allocate the pstore */
859	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
860	if (!ps)
861		return -ENOMEM;
862
863	ps->store = store;
864	ps->valid = 1;
865	ps->version = SNAPSHOT_DISK_VERSION;
866	ps->area = NULL;
867	ps->zero_area = NULL;
868	ps->header_area = NULL;
869	ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
870	ps->current_committed = 0;
871
872	ps->callback_count = 0;
873	atomic_set(&ps->pending_count, 0);
874	ps->callbacks = NULL;
875
876	ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
877	if (!ps->metadata_wq) {
 
878		DMERR("couldn't start header metadata update thread");
879		r = -ENOMEM;
880		goto err_workqueue;
881	}
882
883	if (options) {
884		char overflow = toupper(options[0]);
885		if (overflow == 'O')
886			store->userspace_supports_overflow = true;
887		else {
888			DMERR("Unsupported persistent store option: %s", options);
889			r = -EINVAL;
890			goto err_options;
891		}
892	}
893
894	store->context = ps;
895
896	return 0;
897
898err_options:
899	destroy_workqueue(ps->metadata_wq);
900err_workqueue:
901	kfree(ps);
902
903	return r;
904}
905
906static unsigned persistent_status(struct dm_exception_store *store,
907				  status_type_t status, char *result,
908				  unsigned maxlen)
909{
910	unsigned sz = 0;
911
912	switch (status) {
913	case STATUSTYPE_INFO:
914		break;
915	case STATUSTYPE_TABLE:
916		DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
917		       (unsigned long long)store->chunk_size);
918	}
919
920	return sz;
921}
922
923static struct dm_exception_store_type _persistent_type = {
924	.name = "persistent",
925	.module = THIS_MODULE,
926	.ctr = persistent_ctr,
927	.dtr = persistent_dtr,
928	.read_metadata = persistent_read_metadata,
929	.prepare_exception = persistent_prepare_exception,
930	.commit_exception = persistent_commit_exception,
931	.prepare_merge = persistent_prepare_merge,
932	.commit_merge = persistent_commit_merge,
933	.drop_snapshot = persistent_drop_snapshot,
934	.usage = persistent_usage,
935	.status = persistent_status,
936};
937
938static struct dm_exception_store_type _persistent_compat_type = {
939	.name = "P",
940	.module = THIS_MODULE,
941	.ctr = persistent_ctr,
942	.dtr = persistent_dtr,
943	.read_metadata = persistent_read_metadata,
944	.prepare_exception = persistent_prepare_exception,
945	.commit_exception = persistent_commit_exception,
946	.prepare_merge = persistent_prepare_merge,
947	.commit_merge = persistent_commit_merge,
948	.drop_snapshot = persistent_drop_snapshot,
949	.usage = persistent_usage,
950	.status = persistent_status,
951};
952
953int dm_persistent_snapshot_init(void)
954{
955	int r;
956
957	r = dm_exception_store_type_register(&_persistent_type);
958	if (r) {
959		DMERR("Unable to register persistent exception store type");
960		return r;
961	}
962
963	r = dm_exception_store_type_register(&_persistent_compat_type);
964	if (r) {
965		DMERR("Unable to register old-style persistent exception "
966		      "store type");
967		dm_exception_store_type_unregister(&_persistent_type);
968		return r;
969	}
970
971	return r;
972}
973
974void dm_persistent_snapshot_exit(void)
975{
976	dm_exception_store_type_unregister(&_persistent_type);
977	dm_exception_store_type_unregister(&_persistent_compat_type);
978}
v3.15
  1/*
  2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
  3 * Copyright (C) 2006-2008 Red Hat GmbH
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include "dm-exception-store.h"
  9
 
 10#include <linux/mm.h>
 11#include <linux/pagemap.h>
 12#include <linux/vmalloc.h>
 13#include <linux/export.h>
 14#include <linux/slab.h>
 15#include <linux/dm-io.h>
 16#include "dm-bufio.h"
 17
 18#define DM_MSG_PREFIX "persistent snapshot"
 19#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32	/* 16KB */
 20
 21#define DM_PREFETCH_CHUNKS		12
 22
 23/*-----------------------------------------------------------------
 24 * Persistent snapshots, by persistent we mean that the snapshot
 25 * will survive a reboot.
 26 *---------------------------------------------------------------*/
 27
 28/*
 29 * We need to store a record of which parts of the origin have
 30 * been copied to the snapshot device.  The snapshot code
 31 * requires that we copy exception chunks to chunk aligned areas
 32 * of the COW store.  It makes sense therefore, to store the
 33 * metadata in chunk size blocks.
 34 *
 35 * There is no backward or forward compatibility implemented,
 36 * snapshots with different disk versions than the kernel will
 37 * not be usable.  It is expected that "lvcreate" will blank out
 38 * the start of a fresh COW device before calling the snapshot
 39 * constructor.
 40 *
 41 * The first chunk of the COW device just contains the header.
 42 * After this there is a chunk filled with exception metadata,
 43 * followed by as many exception chunks as can fit in the
 44 * metadata areas.
 45 *
 46 * All on disk structures are in little-endian format.  The end
 47 * of the exceptions info is indicated by an exception with a
 48 * new_chunk of 0, which is invalid since it would point to the
 49 * header chunk.
 50 */
 51
 52/*
 53 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
 54 */
 55#define SNAP_MAGIC 0x70416e53
 56
 57/*
 58 * The on-disk version of the metadata.
 59 */
 60#define SNAPSHOT_DISK_VERSION 1
 61
 62#define NUM_SNAPSHOT_HDR_CHUNKS 1
 63
 64struct disk_header {
 65	__le32 magic;
 66
 67	/*
 68	 * Is this snapshot valid.  There is no way of recovering
 69	 * an invalid snapshot.
 70	 */
 71	__le32 valid;
 72
 73	/*
 74	 * Simple, incrementing version. no backward
 75	 * compatibility.
 76	 */
 77	__le32 version;
 78
 79	/* In sectors */
 80	__le32 chunk_size;
 81} __packed;
 82
 83struct disk_exception {
 84	__le64 old_chunk;
 85	__le64 new_chunk;
 86} __packed;
 87
 88struct core_exception {
 89	uint64_t old_chunk;
 90	uint64_t new_chunk;
 91};
 92
 93struct commit_callback {
 94	void (*callback)(void *, int success);
 95	void *context;
 96};
 97
 98/*
 99 * The top level structure for a persistent exception store.
100 */
101struct pstore {
102	struct dm_exception_store *store;
103	int version;
104	int valid;
105	uint32_t exceptions_per_area;
106
107	/*
108	 * Now that we have an asynchronous kcopyd there is no
109	 * need for large chunk sizes, so it wont hurt to have a
110	 * whole chunks worth of metadata in memory at once.
111	 */
112	void *area;
113
114	/*
115	 * An area of zeros used to clear the next area.
116	 */
117	void *zero_area;
118
119	/*
120	 * An area used for header. The header can be written
121	 * concurrently with metadata (when invalidating the snapshot),
122	 * so it needs a separate buffer.
123	 */
124	void *header_area;
125
126	/*
127	 * Used to keep track of which metadata area the data in
128	 * 'chunk' refers to.
129	 */
130	chunk_t current_area;
131
132	/*
133	 * The next free chunk for an exception.
134	 *
135	 * When creating exceptions, all the chunks here and above are
136	 * free.  It holds the next chunk to be allocated.  On rare
137	 * occasions (e.g. after a system crash) holes can be left in
138	 * the exception store because chunks can be committed out of
139	 * order.
140	 *
141	 * When merging exceptions, it does not necessarily mean all the
142	 * chunks here and above are free.  It holds the value it would
143	 * have held if all chunks had been committed in order of
144	 * allocation.  Consequently the value may occasionally be
145	 * slightly too low, but since it's only used for 'status' and
146	 * it can never reach its minimum value too early this doesn't
147	 * matter.
148	 */
149
150	chunk_t next_free;
151
152	/*
153	 * The index of next free exception in the current
154	 * metadata area.
155	 */
156	uint32_t current_committed;
157
158	atomic_t pending_count;
159	uint32_t callback_count;
160	struct commit_callback *callbacks;
161	struct dm_io_client *io_client;
162
163	struct workqueue_struct *metadata_wq;
164};
165
166static int alloc_area(struct pstore *ps)
167{
168	int r = -ENOMEM;
169	size_t len;
170
171	len = ps->store->chunk_size << SECTOR_SHIFT;
172
173	/*
174	 * Allocate the chunk_size block of memory that will hold
175	 * a single metadata area.
176	 */
177	ps->area = vmalloc(len);
178	if (!ps->area)
179		goto err_area;
180
181	ps->zero_area = vzalloc(len);
182	if (!ps->zero_area)
183		goto err_zero_area;
184
185	ps->header_area = vmalloc(len);
186	if (!ps->header_area)
187		goto err_header_area;
188
189	return 0;
190
191err_header_area:
192	vfree(ps->zero_area);
193
194err_zero_area:
195	vfree(ps->area);
196
197err_area:
198	return r;
199}
200
201static void free_area(struct pstore *ps)
202{
203	if (ps->area)
204		vfree(ps->area);
205	ps->area = NULL;
206
207	if (ps->zero_area)
208		vfree(ps->zero_area);
209	ps->zero_area = NULL;
210
211	if (ps->header_area)
212		vfree(ps->header_area);
213	ps->header_area = NULL;
214}
215
216struct mdata_req {
217	struct dm_io_region *where;
218	struct dm_io_request *io_req;
219	struct work_struct work;
220	int result;
221};
222
223static void do_metadata(struct work_struct *work)
224{
225	struct mdata_req *req = container_of(work, struct mdata_req, work);
226
227	req->result = dm_io(req->io_req, 1, req->where, NULL);
228}
229
230/*
231 * Read or write a chunk aligned and sized block of data from a device.
232 */
233static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
234		    int metadata)
235{
236	struct dm_io_region where = {
237		.bdev = dm_snap_cow(ps->store->snap)->bdev,
238		.sector = ps->store->chunk_size * chunk,
239		.count = ps->store->chunk_size,
240	};
241	struct dm_io_request io_req = {
242		.bi_rw = rw,
 
243		.mem.type = DM_IO_VMA,
244		.mem.ptr.vma = area,
245		.client = ps->io_client,
246		.notify.fn = NULL,
247	};
248	struct mdata_req req;
249
250	if (!metadata)
251		return dm_io(&io_req, 1, &where, NULL);
252
253	req.where = &where;
254	req.io_req = &io_req;
255
256	/*
257	 * Issue the synchronous I/O from a different thread
258	 * to avoid generic_make_request recursion.
259	 */
260	INIT_WORK_ONSTACK(&req.work, do_metadata);
261	queue_work(ps->metadata_wq, &req.work);
262	flush_workqueue(ps->metadata_wq);
263	destroy_work_on_stack(&req.work);
264
265	return req.result;
266}
267
268/*
269 * Convert a metadata area index to a chunk index.
270 */
271static chunk_t area_location(struct pstore *ps, chunk_t area)
272{
273	return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
274}
275
276static void skip_metadata(struct pstore *ps)
277{
278	uint32_t stride = ps->exceptions_per_area + 1;
279	chunk_t next_free = ps->next_free;
280	if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
281		ps->next_free++;
282}
283
284/*
285 * Read or write a metadata area.  Remembering to skip the first
286 * chunk which holds the header.
287 */
288static int area_io(struct pstore *ps, int rw)
289{
290	int r;
291	chunk_t chunk;
292
293	chunk = area_location(ps, ps->current_area);
294
295	r = chunk_io(ps, ps->area, chunk, rw, 0);
296	if (r)
297		return r;
298
299	return 0;
300}
301
302static void zero_memory_area(struct pstore *ps)
303{
304	memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
305}
306
307static int zero_disk_area(struct pstore *ps, chunk_t area)
308{
309	return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
 
310}
311
312static int read_header(struct pstore *ps, int *new_snapshot)
313{
314	int r;
315	struct disk_header *dh;
316	unsigned chunk_size;
317	int chunk_size_supplied = 1;
318	char *chunk_err;
319
320	/*
321	 * Use default chunk size (or logical_block_size, if larger)
322	 * if none supplied
323	 */
324	if (!ps->store->chunk_size) {
325		ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
326		    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
327					    bdev) >> 9);
328		ps->store->chunk_mask = ps->store->chunk_size - 1;
329		ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
330		chunk_size_supplied = 0;
331	}
332
333	ps->io_client = dm_io_client_create();
334	if (IS_ERR(ps->io_client))
335		return PTR_ERR(ps->io_client);
336
337	r = alloc_area(ps);
338	if (r)
339		return r;
340
341	r = chunk_io(ps, ps->header_area, 0, READ, 1);
342	if (r)
343		goto bad;
344
345	dh = ps->header_area;
346
347	if (le32_to_cpu(dh->magic) == 0) {
348		*new_snapshot = 1;
349		return 0;
350	}
351
352	if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
353		DMWARN("Invalid or corrupt snapshot");
354		r = -ENXIO;
355		goto bad;
356	}
357
358	*new_snapshot = 0;
359	ps->valid = le32_to_cpu(dh->valid);
360	ps->version = le32_to_cpu(dh->version);
361	chunk_size = le32_to_cpu(dh->chunk_size);
362
363	if (ps->store->chunk_size == chunk_size)
364		return 0;
365
366	if (chunk_size_supplied)
367		DMWARN("chunk size %u in device metadata overrides "
368		       "table chunk size of %u.",
369		       chunk_size, ps->store->chunk_size);
370
371	/* We had a bogus chunk_size. Fix stuff up. */
372	free_area(ps);
373
374	r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
375					      &chunk_err);
376	if (r) {
377		DMERR("invalid on-disk chunk size %u: %s.",
378		      chunk_size, chunk_err);
379		return r;
380	}
381
382	r = alloc_area(ps);
383	return r;
384
385bad:
386	free_area(ps);
387	return r;
388}
389
390static int write_header(struct pstore *ps)
391{
392	struct disk_header *dh;
393
394	memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
395
396	dh = ps->header_area;
397	dh->magic = cpu_to_le32(SNAP_MAGIC);
398	dh->valid = cpu_to_le32(ps->valid);
399	dh->version = cpu_to_le32(ps->version);
400	dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
401
402	return chunk_io(ps, ps->header_area, 0, WRITE, 1);
403}
404
405/*
406 * Access functions for the disk exceptions, these do the endian conversions.
407 */
408static struct disk_exception *get_exception(struct pstore *ps, void *ps_area,
409					    uint32_t index)
410{
411	BUG_ON(index >= ps->exceptions_per_area);
412
413	return ((struct disk_exception *) ps_area) + index;
414}
415
416static void read_exception(struct pstore *ps, void *ps_area,
417			   uint32_t index, struct core_exception *result)
418{
419	struct disk_exception *de = get_exception(ps, ps_area, index);
420
421	/* copy it */
422	result->old_chunk = le64_to_cpu(de->old_chunk);
423	result->new_chunk = le64_to_cpu(de->new_chunk);
424}
425
426static void write_exception(struct pstore *ps,
427			    uint32_t index, struct core_exception *e)
428{
429	struct disk_exception *de = get_exception(ps, ps->area, index);
430
431	/* copy it */
432	de->old_chunk = cpu_to_le64(e->old_chunk);
433	de->new_chunk = cpu_to_le64(e->new_chunk);
434}
435
436static void clear_exception(struct pstore *ps, uint32_t index)
437{
438	struct disk_exception *de = get_exception(ps, ps->area, index);
439
440	/* clear it */
441	de->old_chunk = 0;
442	de->new_chunk = 0;
443}
444
445/*
446 * Registers the exceptions that are present in the current area.
447 * 'full' is filled in to indicate if the area has been
448 * filled.
449 */
450static int insert_exceptions(struct pstore *ps, void *ps_area,
451			     int (*callback)(void *callback_context,
452					     chunk_t old, chunk_t new),
453			     void *callback_context,
454			     int *full)
455{
456	int r;
457	unsigned int i;
458	struct core_exception e;
459
460	/* presume the area is full */
461	*full = 1;
462
463	for (i = 0; i < ps->exceptions_per_area; i++) {
464		read_exception(ps, ps_area, i, &e);
465
466		/*
467		 * If the new_chunk is pointing at the start of
468		 * the COW device, where the first metadata area
469		 * is we know that we've hit the end of the
470		 * exceptions.  Therefore the area is not full.
471		 */
472		if (e.new_chunk == 0LL) {
473			ps->current_committed = i;
474			*full = 0;
475			break;
476		}
477
478		/*
479		 * Keep track of the start of the free chunks.
480		 */
481		if (ps->next_free <= e.new_chunk)
482			ps->next_free = e.new_chunk + 1;
483
484		/*
485		 * Otherwise we add the exception to the snapshot.
486		 */
487		r = callback(callback_context, e.old_chunk, e.new_chunk);
488		if (r)
489			return r;
490	}
491
492	return 0;
493}
494
495static int read_exceptions(struct pstore *ps,
496			   int (*callback)(void *callback_context, chunk_t old,
497					   chunk_t new),
498			   void *callback_context)
499{
500	int r, full = 1;
501	struct dm_bufio_client *client;
502	chunk_t prefetch_area = 0;
503
504	client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
505					ps->store->chunk_size << SECTOR_SHIFT,
506					1, 0, NULL, NULL);
507
508	if (IS_ERR(client))
509		return PTR_ERR(client);
510
511	/*
512	 * Setup for one current buffer + desired readahead buffers.
513	 */
514	dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS);
515
516	/*
517	 * Keeping reading chunks and inserting exceptions until
518	 * we find a partially full area.
519	 */
520	for (ps->current_area = 0; full; ps->current_area++) {
521		struct dm_buffer *bp;
522		void *area;
523		chunk_t chunk;
524
525		if (unlikely(prefetch_area < ps->current_area))
526			prefetch_area = ps->current_area;
527
528		if (DM_PREFETCH_CHUNKS) do {
529			chunk_t pf_chunk = area_location(ps, prefetch_area);
530			if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
531				break;
532			dm_bufio_prefetch(client, pf_chunk, 1);
533			prefetch_area++;
534			if (unlikely(!prefetch_area))
535				break;
536		} while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
537
538		chunk = area_location(ps, ps->current_area);
539
540		area = dm_bufio_read(client, chunk, &bp);
541		if (unlikely(IS_ERR(area))) {
542			r = PTR_ERR(area);
543			goto ret_destroy_bufio;
544		}
545
546		r = insert_exceptions(ps, area, callback, callback_context,
547				      &full);
548
549		if (!full)
550			memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
551
552		dm_bufio_release(bp);
553
554		dm_bufio_forget(client, chunk);
555
556		if (unlikely(r))
557			goto ret_destroy_bufio;
558	}
559
560	ps->current_area--;
561
562	skip_metadata(ps);
563
564	r = 0;
565
566ret_destroy_bufio:
567	dm_bufio_client_destroy(client);
568
569	return r;
570}
571
572static struct pstore *get_info(struct dm_exception_store *store)
573{
574	return (struct pstore *) store->context;
575}
576
577static void persistent_usage(struct dm_exception_store *store,
578			     sector_t *total_sectors,
579			     sector_t *sectors_allocated,
580			     sector_t *metadata_sectors)
581{
582	struct pstore *ps = get_info(store);
583
584	*sectors_allocated = ps->next_free * store->chunk_size;
585	*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
586
587	/*
588	 * First chunk is the fixed header.
589	 * Then there are (ps->current_area + 1) metadata chunks, each one
590	 * separated from the next by ps->exceptions_per_area data chunks.
591	 */
592	*metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
593			    store->chunk_size;
594}
595
596static void persistent_dtr(struct dm_exception_store *store)
597{
598	struct pstore *ps = get_info(store);
599
600	destroy_workqueue(ps->metadata_wq);
601
602	/* Created in read_header */
603	if (ps->io_client)
604		dm_io_client_destroy(ps->io_client);
605	free_area(ps);
606
607	/* Allocated in persistent_read_metadata */
608	if (ps->callbacks)
609		vfree(ps->callbacks);
610
611	kfree(ps);
612}
613
614static int persistent_read_metadata(struct dm_exception_store *store,
615				    int (*callback)(void *callback_context,
616						    chunk_t old, chunk_t new),
617				    void *callback_context)
618{
619	int r, uninitialized_var(new_snapshot);
620	struct pstore *ps = get_info(store);
621
622	/*
623	 * Read the snapshot header.
624	 */
625	r = read_header(ps, &new_snapshot);
626	if (r)
627		return r;
628
629	/*
630	 * Now we know correct chunk_size, complete the initialisation.
631	 */
632	ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
633				  sizeof(struct disk_exception);
634	ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
635				   sizeof(*ps->callbacks));
636	if (!ps->callbacks)
637		return -ENOMEM;
638
639	/*
640	 * Do we need to setup a new snapshot ?
641	 */
642	if (new_snapshot) {
643		r = write_header(ps);
644		if (r) {
645			DMWARN("write_header failed");
646			return r;
647		}
648
649		ps->current_area = 0;
650		zero_memory_area(ps);
651		r = zero_disk_area(ps, 0);
652		if (r)
653			DMWARN("zero_disk_area(0) failed");
654		return r;
655	}
656	/*
657	 * Sanity checks.
658	 */
659	if (ps->version != SNAPSHOT_DISK_VERSION) {
660		DMWARN("unable to handle snapshot disk version %d",
661		       ps->version);
662		return -EINVAL;
663	}
664
665	/*
666	 * Metadata are valid, but snapshot is invalidated
667	 */
668	if (!ps->valid)
669		return 1;
670
671	/*
672	 * Read the metadata.
673	 */
674	r = read_exceptions(ps, callback, callback_context);
675
676	return r;
677}
678
679static int persistent_prepare_exception(struct dm_exception_store *store,
680					struct dm_exception *e)
681{
682	struct pstore *ps = get_info(store);
683	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
684
685	/* Is there enough room ? */
686	if (size < ((ps->next_free + 1) * store->chunk_size))
687		return -ENOSPC;
688
689	e->new_chunk = ps->next_free;
690
691	/*
692	 * Move onto the next free pending, making sure to take
693	 * into account the location of the metadata chunks.
694	 */
695	ps->next_free++;
696	skip_metadata(ps);
697
698	atomic_inc(&ps->pending_count);
699	return 0;
700}
701
702static void persistent_commit_exception(struct dm_exception_store *store,
703					struct dm_exception *e,
704					void (*callback) (void *, int success),
705					void *callback_context)
706{
707	unsigned int i;
708	struct pstore *ps = get_info(store);
709	struct core_exception ce;
710	struct commit_callback *cb;
711
 
 
 
712	ce.old_chunk = e->old_chunk;
713	ce.new_chunk = e->new_chunk;
714	write_exception(ps, ps->current_committed++, &ce);
715
716	/*
717	 * Add the callback to the back of the array.  This code
718	 * is the only place where the callback array is
719	 * manipulated, and we know that it will never be called
720	 * multiple times concurrently.
721	 */
722	cb = ps->callbacks + ps->callback_count++;
723	cb->callback = callback;
724	cb->context = callback_context;
725
726	/*
727	 * If there are exceptions in flight and we have not yet
728	 * filled this metadata area there's nothing more to do.
729	 */
730	if (!atomic_dec_and_test(&ps->pending_count) &&
731	    (ps->current_committed != ps->exceptions_per_area))
732		return;
733
734	/*
735	 * If we completely filled the current area, then wipe the next one.
736	 */
737	if ((ps->current_committed == ps->exceptions_per_area) &&
738	    zero_disk_area(ps, ps->current_area + 1))
739		ps->valid = 0;
740
741	/*
742	 * Commit exceptions to disk.
743	 */
744	if (ps->valid && area_io(ps, WRITE_FLUSH_FUA))
 
745		ps->valid = 0;
746
747	/*
748	 * Advance to the next area if this one is full.
749	 */
750	if (ps->current_committed == ps->exceptions_per_area) {
751		ps->current_committed = 0;
752		ps->current_area++;
753		zero_memory_area(ps);
754	}
755
756	for (i = 0; i < ps->callback_count; i++) {
757		cb = ps->callbacks + i;
758		cb->callback(cb->context, ps->valid);
759	}
760
761	ps->callback_count = 0;
762}
763
764static int persistent_prepare_merge(struct dm_exception_store *store,
765				    chunk_t *last_old_chunk,
766				    chunk_t *last_new_chunk)
767{
768	struct pstore *ps = get_info(store);
769	struct core_exception ce;
770	int nr_consecutive;
771	int r;
772
773	/*
774	 * When current area is empty, move back to preceding area.
775	 */
776	if (!ps->current_committed) {
777		/*
778		 * Have we finished?
779		 */
780		if (!ps->current_area)
781			return 0;
782
783		ps->current_area--;
784		r = area_io(ps, READ);
785		if (r < 0)
786			return r;
787		ps->current_committed = ps->exceptions_per_area;
788	}
789
790	read_exception(ps, ps->area, ps->current_committed - 1, &ce);
791	*last_old_chunk = ce.old_chunk;
792	*last_new_chunk = ce.new_chunk;
793
794	/*
795	 * Find number of consecutive chunks within the current area,
796	 * working backwards.
797	 */
798	for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
799	     nr_consecutive++) {
800		read_exception(ps, ps->area,
801			       ps->current_committed - 1 - nr_consecutive, &ce);
802		if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
803		    ce.new_chunk != *last_new_chunk - nr_consecutive)
804			break;
805	}
806
807	return nr_consecutive;
808}
809
810static int persistent_commit_merge(struct dm_exception_store *store,
811				   int nr_merged)
812{
813	int r, i;
814	struct pstore *ps = get_info(store);
815
816	BUG_ON(nr_merged > ps->current_committed);
817
818	for (i = 0; i < nr_merged; i++)
819		clear_exception(ps, ps->current_committed - 1 - i);
820
821	r = area_io(ps, WRITE_FLUSH_FUA);
822	if (r < 0)
823		return r;
824
825	ps->current_committed -= nr_merged;
826
827	/*
828	 * At this stage, only persistent_usage() uses ps->next_free, so
829	 * we make no attempt to keep ps->next_free strictly accurate
830	 * as exceptions may have been committed out-of-order originally.
831	 * Once a snapshot has become merging, we set it to the value it
832	 * would have held had all the exceptions been committed in order.
833	 *
834	 * ps->current_area does not get reduced by prepare_merge() until
835	 * after commit_merge() has removed the nr_merged previous exceptions.
836	 */
837	ps->next_free = area_location(ps, ps->current_area) +
838			ps->current_committed + 1;
839
840	return 0;
841}
842
843static void persistent_drop_snapshot(struct dm_exception_store *store)
844{
845	struct pstore *ps = get_info(store);
846
847	ps->valid = 0;
848	if (write_header(ps))
849		DMWARN("write header failed");
850}
851
852static int persistent_ctr(struct dm_exception_store *store,
853			  unsigned argc, char **argv)
854{
855	struct pstore *ps;
 
856
857	/* allocate the pstore */
858	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
859	if (!ps)
860		return -ENOMEM;
861
862	ps->store = store;
863	ps->valid = 1;
864	ps->version = SNAPSHOT_DISK_VERSION;
865	ps->area = NULL;
866	ps->zero_area = NULL;
867	ps->header_area = NULL;
868	ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
869	ps->current_committed = 0;
870
871	ps->callback_count = 0;
872	atomic_set(&ps->pending_count, 0);
873	ps->callbacks = NULL;
874
875	ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
876	if (!ps->metadata_wq) {
877		kfree(ps);
878		DMERR("couldn't start header metadata update thread");
879		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
880	}
881
882	store->context = ps;
883
884	return 0;
 
 
 
 
 
 
 
885}
886
887static unsigned persistent_status(struct dm_exception_store *store,
888				  status_type_t status, char *result,
889				  unsigned maxlen)
890{
891	unsigned sz = 0;
892
893	switch (status) {
894	case STATUSTYPE_INFO:
895		break;
896	case STATUSTYPE_TABLE:
897		DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
 
898	}
899
900	return sz;
901}
902
903static struct dm_exception_store_type _persistent_type = {
904	.name = "persistent",
905	.module = THIS_MODULE,
906	.ctr = persistent_ctr,
907	.dtr = persistent_dtr,
908	.read_metadata = persistent_read_metadata,
909	.prepare_exception = persistent_prepare_exception,
910	.commit_exception = persistent_commit_exception,
911	.prepare_merge = persistent_prepare_merge,
912	.commit_merge = persistent_commit_merge,
913	.drop_snapshot = persistent_drop_snapshot,
914	.usage = persistent_usage,
915	.status = persistent_status,
916};
917
918static struct dm_exception_store_type _persistent_compat_type = {
919	.name = "P",
920	.module = THIS_MODULE,
921	.ctr = persistent_ctr,
922	.dtr = persistent_dtr,
923	.read_metadata = persistent_read_metadata,
924	.prepare_exception = persistent_prepare_exception,
925	.commit_exception = persistent_commit_exception,
926	.prepare_merge = persistent_prepare_merge,
927	.commit_merge = persistent_commit_merge,
928	.drop_snapshot = persistent_drop_snapshot,
929	.usage = persistent_usage,
930	.status = persistent_status,
931};
932
933int dm_persistent_snapshot_init(void)
934{
935	int r;
936
937	r = dm_exception_store_type_register(&_persistent_type);
938	if (r) {
939		DMERR("Unable to register persistent exception store type");
940		return r;
941	}
942
943	r = dm_exception_store_type_register(&_persistent_compat_type);
944	if (r) {
945		DMERR("Unable to register old-style persistent exception "
946		      "store type");
947		dm_exception_store_type_unregister(&_persistent_type);
948		return r;
949	}
950
951	return r;
952}
953
954void dm_persistent_snapshot_exit(void)
955{
956	dm_exception_store_type_unregister(&_persistent_type);
957	dm_exception_store_type_unregister(&_persistent_compat_type);
958}