Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright (C) 2002 Sistina Software (UK) Limited.
  3 * Copyright (C) 2006 Red Hat GmbH
  4 *
  5 * This file is released under the GPL.
  6 *
  7 * Kcopyd provides a simple interface for copying an area of one
  8 * block-device to one or more other block-devices, with an asynchronous
  9 * completion notification.
 10 */
 11
 12#include <linux/types.h>
 13#include <linux/atomic.h>
 14#include <linux/blkdev.h>
 15#include <linux/fs.h>
 16#include <linux/init.h>
 17#include <linux/list.h>
 18#include <linux/mempool.h>
 19#include <linux/module.h>
 20#include <linux/pagemap.h>
 21#include <linux/slab.h>
 22#include <linux/vmalloc.h>
 23#include <linux/workqueue.h>
 24#include <linux/mutex.h>
 25#include <linux/device-mapper.h>
 26#include <linux/dm-kcopyd.h>
 27
 28#include "dm.h"
 29
 30#define SUB_JOB_SIZE	128
 31#define SPLIT_COUNT	8
 32#define MIN_JOBS	8
 33#define RESERVE_PAGES	(DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE))
 34
 35/*-----------------------------------------------------------------
 36 * Each kcopyd client has its own little pool of preallocated
 37 * pages for kcopyd io.
 38 *---------------------------------------------------------------*/
 39struct dm_kcopyd_client {
 40	struct page_list *pages;
 41	unsigned nr_reserved_pages;
 42	unsigned nr_free_pages;
 43
 44	struct dm_io_client *io_client;
 45
 46	wait_queue_head_t destroyq;
 47	atomic_t nr_jobs;
 48
 49	mempool_t *job_pool;
 50
 51	struct workqueue_struct *kcopyd_wq;
 52	struct work_struct kcopyd_work;
 53
 54/*
 55 * We maintain three lists of jobs:
 56 *
 57 * i)   jobs waiting for pages
 58 * ii)  jobs that have pages, and are waiting for the io to be issued.
 59 * iii) jobs that have completed.
 60 *
 61 * All three of these are protected by job_lock.
 62 */
 63	spinlock_t job_lock;
 64	struct list_head complete_jobs;
 65	struct list_head io_jobs;
 66	struct list_head pages_jobs;
 67};
 68
 69static struct page_list zero_page_list;
 70
 71static void wake(struct dm_kcopyd_client *kc)
 72{
 73	queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
 74}
 75
 76/*
 77 * Obtain one page for the use of kcopyd.
 78 */
 79static struct page_list *alloc_pl(gfp_t gfp)
 80{
 81	struct page_list *pl;
 82
 83	pl = kmalloc(sizeof(*pl), gfp);
 84	if (!pl)
 85		return NULL;
 86
 87	pl->page = alloc_page(gfp);
 88	if (!pl->page) {
 89		kfree(pl);
 90		return NULL;
 91	}
 92
 93	return pl;
 94}
 95
 96static void free_pl(struct page_list *pl)
 97{
 98	__free_page(pl->page);
 99	kfree(pl);
100}
101
102/*
103 * Add the provided pages to a client's free page list, releasing
104 * back to the system any beyond the reserved_pages limit.
105 */
106static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
107{
108	struct page_list *next;
109
110	do {
111		next = pl->next;
112
113		if (kc->nr_free_pages >= kc->nr_reserved_pages)
114			free_pl(pl);
115		else {
116			pl->next = kc->pages;
117			kc->pages = pl;
118			kc->nr_free_pages++;
119		}
120
121		pl = next;
122	} while (pl);
123}
124
125static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
126			    unsigned int nr, struct page_list **pages)
127{
128	struct page_list *pl;
129
130	*pages = NULL;
131
132	do {
133		pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY);
134		if (unlikely(!pl)) {
135			/* Use reserved pages */
136			pl = kc->pages;
137			if (unlikely(!pl))
138				goto out_of_memory;
139			kc->pages = pl->next;
140			kc->nr_free_pages--;
141		}
142		pl->next = *pages;
143		*pages = pl;
144	} while (--nr);
145
146	return 0;
147
148out_of_memory:
149	if (*pages)
150		kcopyd_put_pages(kc, *pages);
151	return -ENOMEM;
152}
153
154/*
155 * These three functions resize the page pool.
156 */
157static void drop_pages(struct page_list *pl)
158{
159	struct page_list *next;
160
161	while (pl) {
162		next = pl->next;
163		free_pl(pl);
164		pl = next;
165	}
166}
167
168/*
169 * Allocate and reserve nr_pages for the use of a specific client.
170 */
171static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
172{
173	unsigned i;
174	struct page_list *pl = NULL, *next;
175
176	for (i = 0; i < nr_pages; i++) {
177		next = alloc_pl(GFP_KERNEL);
178		if (!next) {
179			if (pl)
180				drop_pages(pl);
181			return -ENOMEM;
182		}
183		next->next = pl;
184		pl = next;
185	}
186
187	kc->nr_reserved_pages += nr_pages;
188	kcopyd_put_pages(kc, pl);
189
190	return 0;
191}
192
193static void client_free_pages(struct dm_kcopyd_client *kc)
194{
195	BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
196	drop_pages(kc->pages);
197	kc->pages = NULL;
198	kc->nr_free_pages = kc->nr_reserved_pages = 0;
199}
200
201/*-----------------------------------------------------------------
202 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
203 * for this reason we use a mempool to prevent the client from
204 * ever having to do io (which could cause a deadlock).
205 *---------------------------------------------------------------*/
206struct kcopyd_job {
207	struct dm_kcopyd_client *kc;
208	struct list_head list;
209	unsigned long flags;
210
211	/*
212	 * Error state of the job.
213	 */
214	int read_err;
215	unsigned long write_err;
216
217	/*
218	 * Either READ or WRITE
219	 */
220	int rw;
221	struct dm_io_region source;
222
223	/*
224	 * The destinations for the transfer.
225	 */
226	unsigned int num_dests;
227	struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
228
229	struct page_list *pages;
230
231	/*
232	 * Set this to ensure you are notified when the job has
233	 * completed.  'context' is for callback to use.
234	 */
235	dm_kcopyd_notify_fn fn;
236	void *context;
237
238	/*
239	 * These fields are only used if the job has been split
240	 * into more manageable parts.
241	 */
242	struct mutex lock;
243	atomic_t sub_jobs;
244	sector_t progress;
245
246	struct kcopyd_job *master_job;
247};
248
249static struct kmem_cache *_job_cache;
250
251int __init dm_kcopyd_init(void)
252{
253	_job_cache = kmem_cache_create("kcopyd_job",
254				sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1),
255				__alignof__(struct kcopyd_job), 0, NULL);
256	if (!_job_cache)
257		return -ENOMEM;
258
259	zero_page_list.next = &zero_page_list;
260	zero_page_list.page = ZERO_PAGE(0);
261
262	return 0;
263}
264
265void dm_kcopyd_exit(void)
266{
267	kmem_cache_destroy(_job_cache);
268	_job_cache = NULL;
269}
270
271/*
272 * Functions to push and pop a job onto the head of a given job
273 * list.
274 */
275static struct kcopyd_job *pop(struct list_head *jobs,
276			      struct dm_kcopyd_client *kc)
277{
278	struct kcopyd_job *job = NULL;
279	unsigned long flags;
280
281	spin_lock_irqsave(&kc->job_lock, flags);
282
283	if (!list_empty(jobs)) {
284		job = list_entry(jobs->next, struct kcopyd_job, list);
285		list_del(&job->list);
286	}
287	spin_unlock_irqrestore(&kc->job_lock, flags);
288
289	return job;
290}
291
292static void push(struct list_head *jobs, struct kcopyd_job *job)
293{
294	unsigned long flags;
295	struct dm_kcopyd_client *kc = job->kc;
296
297	spin_lock_irqsave(&kc->job_lock, flags);
298	list_add_tail(&job->list, jobs);
299	spin_unlock_irqrestore(&kc->job_lock, flags);
300}
301
302
303static void push_head(struct list_head *jobs, struct kcopyd_job *job)
304{
305	unsigned long flags;
306	struct dm_kcopyd_client *kc = job->kc;
307
308	spin_lock_irqsave(&kc->job_lock, flags);
309	list_add(&job->list, jobs);
310	spin_unlock_irqrestore(&kc->job_lock, flags);
311}
312
313/*
314 * These three functions process 1 item from the corresponding
315 * job list.
316 *
317 * They return:
318 * < 0: error
319 *   0: success
320 * > 0: can't process yet.
321 */
322static int run_complete_job(struct kcopyd_job *job)
323{
324	void *context = job->context;
325	int read_err = job->read_err;
326	unsigned long write_err = job->write_err;
327	dm_kcopyd_notify_fn fn = job->fn;
328	struct dm_kcopyd_client *kc = job->kc;
329
330	if (job->pages && job->pages != &zero_page_list)
331		kcopyd_put_pages(kc, job->pages);
332	/*
333	 * If this is the master job, the sub jobs have already
334	 * completed so we can free everything.
335	 */
336	if (job->master_job == job)
337		mempool_free(job, kc->job_pool);
338	fn(read_err, write_err, context);
339
340	if (atomic_dec_and_test(&kc->nr_jobs))
341		wake_up(&kc->destroyq);
342
343	return 0;
344}
345
346static void complete_io(unsigned long error, void *context)
347{
348	struct kcopyd_job *job = (struct kcopyd_job *) context;
349	struct dm_kcopyd_client *kc = job->kc;
350
351	if (error) {
352		if (job->rw == WRITE)
353			job->write_err |= error;
354		else
355			job->read_err = 1;
356
357		if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
358			push(&kc->complete_jobs, job);
359			wake(kc);
360			return;
361		}
362	}
363
364	if (job->rw == WRITE)
365		push(&kc->complete_jobs, job);
366
367	else {
368		job->rw = WRITE;
369		push(&kc->io_jobs, job);
370	}
371
372	wake(kc);
373}
374
375/*
376 * Request io on as many buffer heads as we can currently get for
377 * a particular job.
378 */
379static int run_io_job(struct kcopyd_job *job)
380{
381	int r;
382	struct dm_io_request io_req = {
383		.bi_rw = job->rw,
384		.mem.type = DM_IO_PAGE_LIST,
385		.mem.ptr.pl = job->pages,
386		.mem.offset = 0,
387		.notify.fn = complete_io,
388		.notify.context = job,
389		.client = job->kc->io_client,
390	};
391
392	if (job->rw == READ)
393		r = dm_io(&io_req, 1, &job->source, NULL);
394	else
395		r = dm_io(&io_req, job->num_dests, job->dests, NULL);
396
397	return r;
398}
399
400static int run_pages_job(struct kcopyd_job *job)
401{
402	int r;
403	unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
404
405	r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
406	if (!r) {
407		/* this job is ready for io */
408		push(&job->kc->io_jobs, job);
409		return 0;
410	}
411
412	if (r == -ENOMEM)
413		/* can't complete now */
414		return 1;
415
416	return r;
417}
418
419/*
420 * Run through a list for as long as possible.  Returns the count
421 * of successful jobs.
422 */
423static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
424			int (*fn) (struct kcopyd_job *))
425{
426	struct kcopyd_job *job;
427	int r, count = 0;
428
429	while ((job = pop(jobs, kc))) {
430
431		r = fn(job);
432
433		if (r < 0) {
434			/* error this rogue job */
435			if (job->rw == WRITE)
436				job->write_err = (unsigned long) -1L;
437			else
438				job->read_err = 1;
439			push(&kc->complete_jobs, job);
440			break;
441		}
442
443		if (r > 0) {
444			/*
445			 * We couldn't service this job ATM, so
446			 * push this job back onto the list.
447			 */
448			push_head(jobs, job);
449			break;
450		}
451
452		count++;
453	}
454
455	return count;
456}
457
458/*
459 * kcopyd does this every time it's woken up.
460 */
461static void do_work(struct work_struct *work)
462{
463	struct dm_kcopyd_client *kc = container_of(work,
464					struct dm_kcopyd_client, kcopyd_work);
465	struct blk_plug plug;
466
467	/*
468	 * The order that these are called is *very* important.
469	 * complete jobs can free some pages for pages jobs.
470	 * Pages jobs when successful will jump onto the io jobs
471	 * list.  io jobs call wake when they complete and it all
472	 * starts again.
473	 */
474	blk_start_plug(&plug);
475	process_jobs(&kc->complete_jobs, kc, run_complete_job);
476	process_jobs(&kc->pages_jobs, kc, run_pages_job);
477	process_jobs(&kc->io_jobs, kc, run_io_job);
478	blk_finish_plug(&plug);
479}
480
481/*
482 * If we are copying a small region we just dispatch a single job
483 * to do the copy, otherwise the io has to be split up into many
484 * jobs.
485 */
486static void dispatch_job(struct kcopyd_job *job)
487{
488	struct dm_kcopyd_client *kc = job->kc;
489	atomic_inc(&kc->nr_jobs);
490	if (unlikely(!job->source.count))
491		push(&kc->complete_jobs, job);
492	else if (job->pages == &zero_page_list)
493		push(&kc->io_jobs, job);
494	else
495		push(&kc->pages_jobs, job);
496	wake(kc);
497}
498
499static void segment_complete(int read_err, unsigned long write_err,
500			     void *context)
501{
502	/* FIXME: tidy this function */
503	sector_t progress = 0;
504	sector_t count = 0;
505	struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
506	struct kcopyd_job *job = sub_job->master_job;
507	struct dm_kcopyd_client *kc = job->kc;
508
509	mutex_lock(&job->lock);
510
511	/* update the error */
512	if (read_err)
513		job->read_err = 1;
514
515	if (write_err)
516		job->write_err |= write_err;
517
518	/*
519	 * Only dispatch more work if there hasn't been an error.
520	 */
521	if ((!job->read_err && !job->write_err) ||
522	    test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
523		/* get the next chunk of work */
524		progress = job->progress;
525		count = job->source.count - progress;
526		if (count) {
527			if (count > SUB_JOB_SIZE)
528				count = SUB_JOB_SIZE;
529
530			job->progress += count;
531		}
532	}
533	mutex_unlock(&job->lock);
534
535	if (count) {
536		int i;
537
538		*sub_job = *job;
539		sub_job->source.sector += progress;
540		sub_job->source.count = count;
541
542		for (i = 0; i < job->num_dests; i++) {
543			sub_job->dests[i].sector += progress;
544			sub_job->dests[i].count = count;
545		}
546
547		sub_job->fn = segment_complete;
548		sub_job->context = sub_job;
549		dispatch_job(sub_job);
550
551	} else if (atomic_dec_and_test(&job->sub_jobs)) {
552
553		/*
554		 * Queue the completion callback to the kcopyd thread.
555		 *
556		 * Some callers assume that all the completions are called
557		 * from a single thread and don't race with each other.
558		 *
559		 * We must not call the callback directly here because this
560		 * code may not be executing in the thread.
561		 */
562		push(&kc->complete_jobs, job);
563		wake(kc);
564	}
565}
566
567/*
568 * Create some sub jobs to share the work between them.
569 */
570static void split_job(struct kcopyd_job *master_job)
571{
572	int i;
573
574	atomic_inc(&master_job->kc->nr_jobs);
575
576	atomic_set(&master_job->sub_jobs, SPLIT_COUNT);
577	for (i = 0; i < SPLIT_COUNT; i++) {
578		master_job[i + 1].master_job = master_job;
579		segment_complete(0, 0u, &master_job[i + 1]);
580	}
581}
582
583int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
584		   unsigned int num_dests, struct dm_io_region *dests,
585		   unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
586{
587	struct kcopyd_job *job;
588
589	/*
590	 * Allocate an array of jobs consisting of one master job
591	 * followed by SPLIT_COUNT sub jobs.
592	 */
593	job = mempool_alloc(kc->job_pool, GFP_NOIO);
594
595	/*
596	 * set up for the read.
597	 */
598	job->kc = kc;
599	job->flags = flags;
600	job->read_err = 0;
601	job->write_err = 0;
 
 
 
602
603	job->num_dests = num_dests;
604	memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
605
606	if (from) {
607		job->source = *from;
608		job->pages = NULL;
609		job->rw = READ;
610	} else {
611		memset(&job->source, 0, sizeof job->source);
612		job->source.count = job->dests[0].count;
613		job->pages = &zero_page_list;
614		job->rw = WRITE;
615	}
616
617	job->fn = fn;
618	job->context = context;
619	job->master_job = job;
620
621	if (job->source.count <= SUB_JOB_SIZE)
622		dispatch_job(job);
623	else {
624		mutex_init(&job->lock);
625		job->progress = 0;
626		split_job(job);
627	}
628
629	return 0;
630}
631EXPORT_SYMBOL(dm_kcopyd_copy);
632
633int dm_kcopyd_zero(struct dm_kcopyd_client *kc,
634		   unsigned num_dests, struct dm_io_region *dests,
635		   unsigned flags, dm_kcopyd_notify_fn fn, void *context)
636{
637	return dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
638}
639EXPORT_SYMBOL(dm_kcopyd_zero);
640
641void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
642				 dm_kcopyd_notify_fn fn, void *context)
643{
644	struct kcopyd_job *job;
645
646	job = mempool_alloc(kc->job_pool, GFP_NOIO);
647
648	memset(job, 0, sizeof(struct kcopyd_job));
649	job->kc = kc;
650	job->fn = fn;
651	job->context = context;
652	job->master_job = job;
653
654	atomic_inc(&kc->nr_jobs);
655
656	return job;
657}
658EXPORT_SYMBOL(dm_kcopyd_prepare_callback);
659
660void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
661{
662	struct kcopyd_job *job = j;
663	struct dm_kcopyd_client *kc = job->kc;
664
665	job->read_err = read_err;
666	job->write_err = write_err;
667
668	push(&kc->complete_jobs, job);
669	wake(kc);
670}
671EXPORT_SYMBOL(dm_kcopyd_do_callback);
672
673/*
674 * Cancels a kcopyd job, eg. someone might be deactivating a
675 * mirror.
676 */
677#if 0
678int kcopyd_cancel(struct kcopyd_job *job, int block)
679{
680	/* FIXME: finish */
681	return -1;
682}
683#endif  /*  0  */
684
685/*-----------------------------------------------------------------
686 * Client setup
687 *---------------------------------------------------------------*/
688struct dm_kcopyd_client *dm_kcopyd_client_create(void)
689{
690	int r = -ENOMEM;
691	struct dm_kcopyd_client *kc;
692
693	kc = kmalloc(sizeof(*kc), GFP_KERNEL);
694	if (!kc)
695		return ERR_PTR(-ENOMEM);
696
697	spin_lock_init(&kc->job_lock);
698	INIT_LIST_HEAD(&kc->complete_jobs);
699	INIT_LIST_HEAD(&kc->io_jobs);
700	INIT_LIST_HEAD(&kc->pages_jobs);
701
702	kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
703	if (!kc->job_pool)
704		goto bad_slab;
705
706	INIT_WORK(&kc->kcopyd_work, do_work);
707	kc->kcopyd_wq = alloc_workqueue("kcopyd",
708					WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
709	if (!kc->kcopyd_wq)
710		goto bad_workqueue;
711
712	kc->pages = NULL;
713	kc->nr_reserved_pages = kc->nr_free_pages = 0;
714	r = client_reserve_pages(kc, RESERVE_PAGES);
715	if (r)
716		goto bad_client_pages;
717
718	kc->io_client = dm_io_client_create();
719	if (IS_ERR(kc->io_client)) {
720		r = PTR_ERR(kc->io_client);
721		goto bad_io_client;
722	}
723
724	init_waitqueue_head(&kc->destroyq);
725	atomic_set(&kc->nr_jobs, 0);
726
727	return kc;
728
729bad_io_client:
730	client_free_pages(kc);
731bad_client_pages:
732	destroy_workqueue(kc->kcopyd_wq);
733bad_workqueue:
734	mempool_destroy(kc->job_pool);
735bad_slab:
736	kfree(kc);
737
738	return ERR_PTR(r);
739}
740EXPORT_SYMBOL(dm_kcopyd_client_create);
741
742void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
743{
744	/* Wait for completion of all jobs submitted by this client. */
745	wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
746
747	BUG_ON(!list_empty(&kc->complete_jobs));
748	BUG_ON(!list_empty(&kc->io_jobs));
749	BUG_ON(!list_empty(&kc->pages_jobs));
750	destroy_workqueue(kc->kcopyd_wq);
751	dm_io_client_destroy(kc->io_client);
752	client_free_pages(kc);
753	mempool_destroy(kc->job_pool);
754	kfree(kc);
755}
756EXPORT_SYMBOL(dm_kcopyd_client_destroy);
v3.1
  1/*
  2 * Copyright (C) 2002 Sistina Software (UK) Limited.
  3 * Copyright (C) 2006 Red Hat GmbH
  4 *
  5 * This file is released under the GPL.
  6 *
  7 * Kcopyd provides a simple interface for copying an area of one
  8 * block-device to one or more other block-devices, with an asynchronous
  9 * completion notification.
 10 */
 11
 12#include <linux/types.h>
 13#include <linux/atomic.h>
 14#include <linux/blkdev.h>
 15#include <linux/fs.h>
 16#include <linux/init.h>
 17#include <linux/list.h>
 18#include <linux/mempool.h>
 19#include <linux/module.h>
 20#include <linux/pagemap.h>
 21#include <linux/slab.h>
 22#include <linux/vmalloc.h>
 23#include <linux/workqueue.h>
 24#include <linux/mutex.h>
 25#include <linux/device-mapper.h>
 26#include <linux/dm-kcopyd.h>
 27
 28#include "dm.h"
 29
 30#define SUB_JOB_SIZE	128
 31#define SPLIT_COUNT	8
 32#define MIN_JOBS	8
 33#define RESERVE_PAGES	(DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE))
 34
 35/*-----------------------------------------------------------------
 36 * Each kcopyd client has its own little pool of preallocated
 37 * pages for kcopyd io.
 38 *---------------------------------------------------------------*/
 39struct dm_kcopyd_client {
 40	struct page_list *pages;
 41	unsigned nr_reserved_pages;
 42	unsigned nr_free_pages;
 43
 44	struct dm_io_client *io_client;
 45
 46	wait_queue_head_t destroyq;
 47	atomic_t nr_jobs;
 48
 49	mempool_t *job_pool;
 50
 51	struct workqueue_struct *kcopyd_wq;
 52	struct work_struct kcopyd_work;
 53
 54/*
 55 * We maintain three lists of jobs:
 56 *
 57 * i)   jobs waiting for pages
 58 * ii)  jobs that have pages, and are waiting for the io to be issued.
 59 * iii) jobs that have completed.
 60 *
 61 * All three of these are protected by job_lock.
 62 */
 63	spinlock_t job_lock;
 64	struct list_head complete_jobs;
 65	struct list_head io_jobs;
 66	struct list_head pages_jobs;
 67};
 68
 
 
 69static void wake(struct dm_kcopyd_client *kc)
 70{
 71	queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
 72}
 73
 74/*
 75 * Obtain one page for the use of kcopyd.
 76 */
 77static struct page_list *alloc_pl(gfp_t gfp)
 78{
 79	struct page_list *pl;
 80
 81	pl = kmalloc(sizeof(*pl), gfp);
 82	if (!pl)
 83		return NULL;
 84
 85	pl->page = alloc_page(gfp);
 86	if (!pl->page) {
 87		kfree(pl);
 88		return NULL;
 89	}
 90
 91	return pl;
 92}
 93
 94static void free_pl(struct page_list *pl)
 95{
 96	__free_page(pl->page);
 97	kfree(pl);
 98}
 99
100/*
101 * Add the provided pages to a client's free page list, releasing
102 * back to the system any beyond the reserved_pages limit.
103 */
104static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
105{
106	struct page_list *next;
107
108	do {
109		next = pl->next;
110
111		if (kc->nr_free_pages >= kc->nr_reserved_pages)
112			free_pl(pl);
113		else {
114			pl->next = kc->pages;
115			kc->pages = pl;
116			kc->nr_free_pages++;
117		}
118
119		pl = next;
120	} while (pl);
121}
122
123static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
124			    unsigned int nr, struct page_list **pages)
125{
126	struct page_list *pl;
127
128	*pages = NULL;
129
130	do {
131		pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY);
132		if (unlikely(!pl)) {
133			/* Use reserved pages */
134			pl = kc->pages;
135			if (unlikely(!pl))
136				goto out_of_memory;
137			kc->pages = pl->next;
138			kc->nr_free_pages--;
139		}
140		pl->next = *pages;
141		*pages = pl;
142	} while (--nr);
143
144	return 0;
145
146out_of_memory:
147	if (*pages)
148		kcopyd_put_pages(kc, *pages);
149	return -ENOMEM;
150}
151
152/*
153 * These three functions resize the page pool.
154 */
155static void drop_pages(struct page_list *pl)
156{
157	struct page_list *next;
158
159	while (pl) {
160		next = pl->next;
161		free_pl(pl);
162		pl = next;
163	}
164}
165
166/*
167 * Allocate and reserve nr_pages for the use of a specific client.
168 */
169static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
170{
171	unsigned i;
172	struct page_list *pl = NULL, *next;
173
174	for (i = 0; i < nr_pages; i++) {
175		next = alloc_pl(GFP_KERNEL);
176		if (!next) {
177			if (pl)
178				drop_pages(pl);
179			return -ENOMEM;
180		}
181		next->next = pl;
182		pl = next;
183	}
184
185	kc->nr_reserved_pages += nr_pages;
186	kcopyd_put_pages(kc, pl);
187
188	return 0;
189}
190
191static void client_free_pages(struct dm_kcopyd_client *kc)
192{
193	BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
194	drop_pages(kc->pages);
195	kc->pages = NULL;
196	kc->nr_free_pages = kc->nr_reserved_pages = 0;
197}
198
199/*-----------------------------------------------------------------
200 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
201 * for this reason we use a mempool to prevent the client from
202 * ever having to do io (which could cause a deadlock).
203 *---------------------------------------------------------------*/
204struct kcopyd_job {
205	struct dm_kcopyd_client *kc;
206	struct list_head list;
207	unsigned long flags;
208
209	/*
210	 * Error state of the job.
211	 */
212	int read_err;
213	unsigned long write_err;
214
215	/*
216	 * Either READ or WRITE
217	 */
218	int rw;
219	struct dm_io_region source;
220
221	/*
222	 * The destinations for the transfer.
223	 */
224	unsigned int num_dests;
225	struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
226
227	struct page_list *pages;
228
229	/*
230	 * Set this to ensure you are notified when the job has
231	 * completed.  'context' is for callback to use.
232	 */
233	dm_kcopyd_notify_fn fn;
234	void *context;
235
236	/*
237	 * These fields are only used if the job has been split
238	 * into more manageable parts.
239	 */
240	struct mutex lock;
241	atomic_t sub_jobs;
242	sector_t progress;
243
244	struct kcopyd_job *master_job;
245};
246
247static struct kmem_cache *_job_cache;
248
249int __init dm_kcopyd_init(void)
250{
251	_job_cache = kmem_cache_create("kcopyd_job",
252				sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1),
253				__alignof__(struct kcopyd_job), 0, NULL);
254	if (!_job_cache)
255		return -ENOMEM;
256
 
 
 
257	return 0;
258}
259
260void dm_kcopyd_exit(void)
261{
262	kmem_cache_destroy(_job_cache);
263	_job_cache = NULL;
264}
265
266/*
267 * Functions to push and pop a job onto the head of a given job
268 * list.
269 */
270static struct kcopyd_job *pop(struct list_head *jobs,
271			      struct dm_kcopyd_client *kc)
272{
273	struct kcopyd_job *job = NULL;
274	unsigned long flags;
275
276	spin_lock_irqsave(&kc->job_lock, flags);
277
278	if (!list_empty(jobs)) {
279		job = list_entry(jobs->next, struct kcopyd_job, list);
280		list_del(&job->list);
281	}
282	spin_unlock_irqrestore(&kc->job_lock, flags);
283
284	return job;
285}
286
287static void push(struct list_head *jobs, struct kcopyd_job *job)
288{
289	unsigned long flags;
290	struct dm_kcopyd_client *kc = job->kc;
291
292	spin_lock_irqsave(&kc->job_lock, flags);
293	list_add_tail(&job->list, jobs);
294	spin_unlock_irqrestore(&kc->job_lock, flags);
295}
296
297
298static void push_head(struct list_head *jobs, struct kcopyd_job *job)
299{
300	unsigned long flags;
301	struct dm_kcopyd_client *kc = job->kc;
302
303	spin_lock_irqsave(&kc->job_lock, flags);
304	list_add(&job->list, jobs);
305	spin_unlock_irqrestore(&kc->job_lock, flags);
306}
307
308/*
309 * These three functions process 1 item from the corresponding
310 * job list.
311 *
312 * They return:
313 * < 0: error
314 *   0: success
315 * > 0: can't process yet.
316 */
317static int run_complete_job(struct kcopyd_job *job)
318{
319	void *context = job->context;
320	int read_err = job->read_err;
321	unsigned long write_err = job->write_err;
322	dm_kcopyd_notify_fn fn = job->fn;
323	struct dm_kcopyd_client *kc = job->kc;
324
325	if (job->pages)
326		kcopyd_put_pages(kc, job->pages);
327	/*
328	 * If this is the master job, the sub jobs have already
329	 * completed so we can free everything.
330	 */
331	if (job->master_job == job)
332		mempool_free(job, kc->job_pool);
333	fn(read_err, write_err, context);
334
335	if (atomic_dec_and_test(&kc->nr_jobs))
336		wake_up(&kc->destroyq);
337
338	return 0;
339}
340
341static void complete_io(unsigned long error, void *context)
342{
343	struct kcopyd_job *job = (struct kcopyd_job *) context;
344	struct dm_kcopyd_client *kc = job->kc;
345
346	if (error) {
347		if (job->rw == WRITE)
348			job->write_err |= error;
349		else
350			job->read_err = 1;
351
352		if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
353			push(&kc->complete_jobs, job);
354			wake(kc);
355			return;
356		}
357	}
358
359	if (job->rw == WRITE)
360		push(&kc->complete_jobs, job);
361
362	else {
363		job->rw = WRITE;
364		push(&kc->io_jobs, job);
365	}
366
367	wake(kc);
368}
369
370/*
371 * Request io on as many buffer heads as we can currently get for
372 * a particular job.
373 */
374static int run_io_job(struct kcopyd_job *job)
375{
376	int r;
377	struct dm_io_request io_req = {
378		.bi_rw = job->rw,
379		.mem.type = DM_IO_PAGE_LIST,
380		.mem.ptr.pl = job->pages,
381		.mem.offset = 0,
382		.notify.fn = complete_io,
383		.notify.context = job,
384		.client = job->kc->io_client,
385	};
386
387	if (job->rw == READ)
388		r = dm_io(&io_req, 1, &job->source, NULL);
389	else
390		r = dm_io(&io_req, job->num_dests, job->dests, NULL);
391
392	return r;
393}
394
395static int run_pages_job(struct kcopyd_job *job)
396{
397	int r;
398	unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
399
400	r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
401	if (!r) {
402		/* this job is ready for io */
403		push(&job->kc->io_jobs, job);
404		return 0;
405	}
406
407	if (r == -ENOMEM)
408		/* can't complete now */
409		return 1;
410
411	return r;
412}
413
414/*
415 * Run through a list for as long as possible.  Returns the count
416 * of successful jobs.
417 */
418static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
419			int (*fn) (struct kcopyd_job *))
420{
421	struct kcopyd_job *job;
422	int r, count = 0;
423
424	while ((job = pop(jobs, kc))) {
425
426		r = fn(job);
427
428		if (r < 0) {
429			/* error this rogue job */
430			if (job->rw == WRITE)
431				job->write_err = (unsigned long) -1L;
432			else
433				job->read_err = 1;
434			push(&kc->complete_jobs, job);
435			break;
436		}
437
438		if (r > 0) {
439			/*
440			 * We couldn't service this job ATM, so
441			 * push this job back onto the list.
442			 */
443			push_head(jobs, job);
444			break;
445		}
446
447		count++;
448	}
449
450	return count;
451}
452
453/*
454 * kcopyd does this every time it's woken up.
455 */
456static void do_work(struct work_struct *work)
457{
458	struct dm_kcopyd_client *kc = container_of(work,
459					struct dm_kcopyd_client, kcopyd_work);
460	struct blk_plug plug;
461
462	/*
463	 * The order that these are called is *very* important.
464	 * complete jobs can free some pages for pages jobs.
465	 * Pages jobs when successful will jump onto the io jobs
466	 * list.  io jobs call wake when they complete and it all
467	 * starts again.
468	 */
469	blk_start_plug(&plug);
470	process_jobs(&kc->complete_jobs, kc, run_complete_job);
471	process_jobs(&kc->pages_jobs, kc, run_pages_job);
472	process_jobs(&kc->io_jobs, kc, run_io_job);
473	blk_finish_plug(&plug);
474}
475
476/*
477 * If we are copying a small region we just dispatch a single job
478 * to do the copy, otherwise the io has to be split up into many
479 * jobs.
480 */
481static void dispatch_job(struct kcopyd_job *job)
482{
483	struct dm_kcopyd_client *kc = job->kc;
484	atomic_inc(&kc->nr_jobs);
485	if (unlikely(!job->source.count))
486		push(&kc->complete_jobs, job);
 
 
487	else
488		push(&kc->pages_jobs, job);
489	wake(kc);
490}
491
492static void segment_complete(int read_err, unsigned long write_err,
493			     void *context)
494{
495	/* FIXME: tidy this function */
496	sector_t progress = 0;
497	sector_t count = 0;
498	struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
499	struct kcopyd_job *job = sub_job->master_job;
500	struct dm_kcopyd_client *kc = job->kc;
501
502	mutex_lock(&job->lock);
503
504	/* update the error */
505	if (read_err)
506		job->read_err = 1;
507
508	if (write_err)
509		job->write_err |= write_err;
510
511	/*
512	 * Only dispatch more work if there hasn't been an error.
513	 */
514	if ((!job->read_err && !job->write_err) ||
515	    test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
516		/* get the next chunk of work */
517		progress = job->progress;
518		count = job->source.count - progress;
519		if (count) {
520			if (count > SUB_JOB_SIZE)
521				count = SUB_JOB_SIZE;
522
523			job->progress += count;
524		}
525	}
526	mutex_unlock(&job->lock);
527
528	if (count) {
529		int i;
530
531		*sub_job = *job;
532		sub_job->source.sector += progress;
533		sub_job->source.count = count;
534
535		for (i = 0; i < job->num_dests; i++) {
536			sub_job->dests[i].sector += progress;
537			sub_job->dests[i].count = count;
538		}
539
540		sub_job->fn = segment_complete;
541		sub_job->context = sub_job;
542		dispatch_job(sub_job);
543
544	} else if (atomic_dec_and_test(&job->sub_jobs)) {
545
546		/*
547		 * Queue the completion callback to the kcopyd thread.
548		 *
549		 * Some callers assume that all the completions are called
550		 * from a single thread and don't race with each other.
551		 *
552		 * We must not call the callback directly here because this
553		 * code may not be executing in the thread.
554		 */
555		push(&kc->complete_jobs, job);
556		wake(kc);
557	}
558}
559
560/*
561 * Create some sub jobs to share the work between them.
562 */
563static void split_job(struct kcopyd_job *master_job)
564{
565	int i;
566
567	atomic_inc(&master_job->kc->nr_jobs);
568
569	atomic_set(&master_job->sub_jobs, SPLIT_COUNT);
570	for (i = 0; i < SPLIT_COUNT; i++) {
571		master_job[i + 1].master_job = master_job;
572		segment_complete(0, 0u, &master_job[i + 1]);
573	}
574}
575
576int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
577		   unsigned int num_dests, struct dm_io_region *dests,
578		   unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
579{
580	struct kcopyd_job *job;
581
582	/*
583	 * Allocate an array of jobs consisting of one master job
584	 * followed by SPLIT_COUNT sub jobs.
585	 */
586	job = mempool_alloc(kc->job_pool, GFP_NOIO);
587
588	/*
589	 * set up for the read.
590	 */
591	job->kc = kc;
592	job->flags = flags;
593	job->read_err = 0;
594	job->write_err = 0;
595	job->rw = READ;
596
597	job->source = *from;
598
599	job->num_dests = num_dests;
600	memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
601
602	job->pages = NULL;
 
 
 
 
 
 
 
 
 
603
604	job->fn = fn;
605	job->context = context;
606	job->master_job = job;
607
608	if (job->source.count <= SUB_JOB_SIZE)
609		dispatch_job(job);
610	else {
611		mutex_init(&job->lock);
612		job->progress = 0;
613		split_job(job);
614	}
615
616	return 0;
617}
618EXPORT_SYMBOL(dm_kcopyd_copy);
 
 
 
 
 
 
 
 
619
620void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
621				 dm_kcopyd_notify_fn fn, void *context)
622{
623	struct kcopyd_job *job;
624
625	job = mempool_alloc(kc->job_pool, GFP_NOIO);
626
627	memset(job, 0, sizeof(struct kcopyd_job));
628	job->kc = kc;
629	job->fn = fn;
630	job->context = context;
631	job->master_job = job;
632
633	atomic_inc(&kc->nr_jobs);
634
635	return job;
636}
637EXPORT_SYMBOL(dm_kcopyd_prepare_callback);
638
639void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
640{
641	struct kcopyd_job *job = j;
642	struct dm_kcopyd_client *kc = job->kc;
643
644	job->read_err = read_err;
645	job->write_err = write_err;
646
647	push(&kc->complete_jobs, job);
648	wake(kc);
649}
650EXPORT_SYMBOL(dm_kcopyd_do_callback);
651
652/*
653 * Cancels a kcopyd job, eg. someone might be deactivating a
654 * mirror.
655 */
656#if 0
657int kcopyd_cancel(struct kcopyd_job *job, int block)
658{
659	/* FIXME: finish */
660	return -1;
661}
662#endif  /*  0  */
663
664/*-----------------------------------------------------------------
665 * Client setup
666 *---------------------------------------------------------------*/
667struct dm_kcopyd_client *dm_kcopyd_client_create(void)
668{
669	int r = -ENOMEM;
670	struct dm_kcopyd_client *kc;
671
672	kc = kmalloc(sizeof(*kc), GFP_KERNEL);
673	if (!kc)
674		return ERR_PTR(-ENOMEM);
675
676	spin_lock_init(&kc->job_lock);
677	INIT_LIST_HEAD(&kc->complete_jobs);
678	INIT_LIST_HEAD(&kc->io_jobs);
679	INIT_LIST_HEAD(&kc->pages_jobs);
680
681	kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
682	if (!kc->job_pool)
683		goto bad_slab;
684
685	INIT_WORK(&kc->kcopyd_work, do_work);
686	kc->kcopyd_wq = alloc_workqueue("kcopyd",
687					WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
688	if (!kc->kcopyd_wq)
689		goto bad_workqueue;
690
691	kc->pages = NULL;
692	kc->nr_reserved_pages = kc->nr_free_pages = 0;
693	r = client_reserve_pages(kc, RESERVE_PAGES);
694	if (r)
695		goto bad_client_pages;
696
697	kc->io_client = dm_io_client_create();
698	if (IS_ERR(kc->io_client)) {
699		r = PTR_ERR(kc->io_client);
700		goto bad_io_client;
701	}
702
703	init_waitqueue_head(&kc->destroyq);
704	atomic_set(&kc->nr_jobs, 0);
705
706	return kc;
707
708bad_io_client:
709	client_free_pages(kc);
710bad_client_pages:
711	destroy_workqueue(kc->kcopyd_wq);
712bad_workqueue:
713	mempool_destroy(kc->job_pool);
714bad_slab:
715	kfree(kc);
716
717	return ERR_PTR(r);
718}
719EXPORT_SYMBOL(dm_kcopyd_client_create);
720
721void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
722{
723	/* Wait for completion of all jobs submitted by this client. */
724	wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
725
726	BUG_ON(!list_empty(&kc->complete_jobs));
727	BUG_ON(!list_empty(&kc->io_jobs));
728	BUG_ON(!list_empty(&kc->pages_jobs));
729	destroy_workqueue(kc->kcopyd_wq);
730	dm_io_client_destroy(kc->io_client);
731	client_free_pages(kc);
732	mempool_destroy(kc->job_pool);
733	kfree(kc);
734}
735EXPORT_SYMBOL(dm_kcopyd_client_destroy);