Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright (C) 2011 Red Hat UK.
   3 *
   4 * This file is released under the GPL.
   5 */
   6
   7#include "dm-thin-metadata.h"
   8
   9#include <linux/device-mapper.h>
  10#include <linux/dm-io.h>
  11#include <linux/dm-kcopyd.h>
  12#include <linux/list.h>
  13#include <linux/init.h>
  14#include <linux/module.h>
  15#include <linux/slab.h>
  16
  17#define	DM_MSG_PREFIX	"thin"
  18
  19/*
  20 * Tunable constants
  21 */
  22#define ENDIO_HOOK_POOL_SIZE 1024
  23#define DEFERRED_SET_SIZE 64
  24#define MAPPING_POOL_SIZE 1024
  25#define PRISON_CELLS 1024
  26#define COMMIT_PERIOD HZ
  27
  28/*
  29 * The block size of the device holding pool data must be
  30 * between 64KB and 1GB.
  31 */
  32#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
  33#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
  34
  35/*
  36 * Device id is restricted to 24 bits.
  37 */
  38#define MAX_DEV_ID ((1 << 24) - 1)
  39
  40/*
  41 * How do we handle breaking sharing of data blocks?
  42 * =================================================
  43 *
  44 * We use a standard copy-on-write btree to store the mappings for the
  45 * devices (note I'm talking about copy-on-write of the metadata here, not
  46 * the data).  When you take an internal snapshot you clone the root node
  47 * of the origin btree.  After this there is no concept of an origin or a
  48 * snapshot.  They are just two device trees that happen to point to the
  49 * same data blocks.
  50 *
  51 * When we get a write in we decide if it's to a shared data block using
  52 * some timestamp magic.  If it is, we have to break sharing.
  53 *
  54 * Let's say we write to a shared block in what was the origin.  The
  55 * steps are:
  56 *
  57 * i) plug io further to this physical block. (see bio_prison code).
  58 *
  59 * ii) quiesce any read io to that shared data block.  Obviously
  60 * including all devices that share this block.  (see deferred_set code)
  61 *
  62 * iii) copy the data block to a newly allocate block.  This step can be
  63 * missed out if the io covers the block. (schedule_copy).
  64 *
  65 * iv) insert the new mapping into the origin's btree
  66 * (process_prepared_mapping).  This act of inserting breaks some
  67 * sharing of btree nodes between the two devices.  Breaking sharing only
  68 * effects the btree of that specific device.  Btrees for the other
  69 * devices that share the block never change.  The btree for the origin
  70 * device as it was after the last commit is untouched, ie. we're using
  71 * persistent data structures in the functional programming sense.
  72 *
  73 * v) unplug io to this physical block, including the io that triggered
  74 * the breaking of sharing.
  75 *
  76 * Steps (ii) and (iii) occur in parallel.
  77 *
  78 * The metadata _doesn't_ need to be committed before the io continues.  We
  79 * get away with this because the io is always written to a _new_ block.
  80 * If there's a crash, then:
  81 *
  82 * - The origin mapping will point to the old origin block (the shared
  83 * one).  This will contain the data as it was before the io that triggered
  84 * the breaking of sharing came in.
  85 *
  86 * - The snap mapping still points to the old block.  As it would after
  87 * the commit.
  88 *
  89 * The downside of this scheme is the timestamp magic isn't perfect, and
  90 * will continue to think that data block in the snapshot device is shared
  91 * even after the write to the origin has broken sharing.  I suspect data
  92 * blocks will typically be shared by many different devices, so we're
  93 * breaking sharing n + 1 times, rather than n, where n is the number of
  94 * devices that reference this data block.  At the moment I think the
  95 * benefits far, far outweigh the disadvantages.
  96 */
  97
  98/*----------------------------------------------------------------*/
  99
 100/*
 101 * Sometimes we can't deal with a bio straight away.  We put them in prison
 102 * where they can't cause any mischief.  Bios are put in a cell identified
 103 * by a key, multiple bios can be in the same cell.  When the cell is
 104 * subsequently unlocked the bios become available.
 105 */
 106struct bio_prison;
 107
 108struct cell_key {
 109	int virtual;
 110	dm_thin_id dev;
 111	dm_block_t block;
 112};
 113
 114struct dm_bio_prison_cell {
 115	struct hlist_node list;
 116	struct bio_prison *prison;
 117	struct cell_key key;
 118	struct bio *holder;
 119	struct bio_list bios;
 120};
 121
 122struct bio_prison {
 123	spinlock_t lock;
 124	mempool_t *cell_pool;
 125
 126	unsigned nr_buckets;
 127	unsigned hash_mask;
 128	struct hlist_head *cells;
 129};
 130
 131static uint32_t calc_nr_buckets(unsigned nr_cells)
 132{
 133	uint32_t n = 128;
 134
 135	nr_cells /= 4;
 136	nr_cells = min(nr_cells, 8192u);
 137
 138	while (n < nr_cells)
 139		n <<= 1;
 140
 141	return n;
 142}
 143
 144static struct kmem_cache *_cell_cache;
 145
 146/*
 147 * @nr_cells should be the number of cells you want in use _concurrently_.
 148 * Don't confuse it with the number of distinct keys.
 149 */
 150static struct bio_prison *prison_create(unsigned nr_cells)
 151{
 152	unsigned i;
 153	uint32_t nr_buckets = calc_nr_buckets(nr_cells);
 154	size_t len = sizeof(struct bio_prison) +
 155		(sizeof(struct hlist_head) * nr_buckets);
 156	struct bio_prison *prison = kmalloc(len, GFP_KERNEL);
 157
 158	if (!prison)
 159		return NULL;
 160
 161	spin_lock_init(&prison->lock);
 162	prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
 163	if (!prison->cell_pool) {
 164		kfree(prison);
 165		return NULL;
 166	}
 167
 168	prison->nr_buckets = nr_buckets;
 169	prison->hash_mask = nr_buckets - 1;
 170	prison->cells = (struct hlist_head *) (prison + 1);
 171	for (i = 0; i < nr_buckets; i++)
 172		INIT_HLIST_HEAD(prison->cells + i);
 173
 174	return prison;
 175}
 176
 177static void prison_destroy(struct bio_prison *prison)
 178{
 179	mempool_destroy(prison->cell_pool);
 180	kfree(prison);
 181}
 182
 183static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key)
 184{
 185	const unsigned long BIG_PRIME = 4294967291UL;
 186	uint64_t hash = key->block * BIG_PRIME;
 187
 188	return (uint32_t) (hash & prison->hash_mask);
 189}
 190
 191static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
 192{
 193	       return (lhs->virtual == rhs->virtual) &&
 194		       (lhs->dev == rhs->dev) &&
 195		       (lhs->block == rhs->block);
 196}
 197
 198static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
 199						  struct cell_key *key)
 200{
 201	struct dm_bio_prison_cell *cell;
 202	struct hlist_node *tmp;
 203
 204	hlist_for_each_entry(cell, tmp, bucket, list)
 205		if (keys_equal(&cell->key, key))
 206			return cell;
 207
 208	return NULL;
 209}
 210
 211/*
 212 * This may block if a new cell needs allocating.  You must ensure that
 213 * cells will be unlocked even if the calling thread is blocked.
 214 *
 215 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
 216 */
 217static int bio_detain(struct bio_prison *prison, struct cell_key *key,
 218		      struct bio *inmate, struct dm_bio_prison_cell **ref)
 219{
 220	int r = 1;
 221	unsigned long flags;
 222	uint32_t hash = hash_key(prison, key);
 223	struct dm_bio_prison_cell *cell, *cell2;
 224
 225	BUG_ON(hash > prison->nr_buckets);
 226
 227	spin_lock_irqsave(&prison->lock, flags);
 228
 229	cell = __search_bucket(prison->cells + hash, key);
 230	if (cell) {
 231		bio_list_add(&cell->bios, inmate);
 232		goto out;
 233	}
 234
 235	/*
 236	 * Allocate a new cell
 237	 */
 238	spin_unlock_irqrestore(&prison->lock, flags);
 239	cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
 240	spin_lock_irqsave(&prison->lock, flags);
 241
 242	/*
 243	 * We've been unlocked, so we have to double check that
 244	 * nobody else has inserted this cell in the meantime.
 245	 */
 246	cell = __search_bucket(prison->cells + hash, key);
 247	if (cell) {
 248		mempool_free(cell2, prison->cell_pool);
 249		bio_list_add(&cell->bios, inmate);
 250		goto out;
 251	}
 252
 253	/*
 254	 * Use new cell.
 255	 */
 256	cell = cell2;
 257
 258	cell->prison = prison;
 259	memcpy(&cell->key, key, sizeof(cell->key));
 260	cell->holder = inmate;
 261	bio_list_init(&cell->bios);
 262	hlist_add_head(&cell->list, prison->cells + hash);
 263
 264	r = 0;
 265
 266out:
 267	spin_unlock_irqrestore(&prison->lock, flags);
 268
 269	*ref = cell;
 270
 271	return r;
 272}
 273
 274/*
 275 * @inmates must have been initialised prior to this call
 276 */
 277static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
 278{
 279	struct bio_prison *prison = cell->prison;
 280
 281	hlist_del(&cell->list);
 282
 283	if (inmates) {
 284		bio_list_add(inmates, cell->holder);
 285		bio_list_merge(inmates, &cell->bios);
 286	}
 287
 288	mempool_free(cell, prison->cell_pool);
 289}
 290
 291static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
 292{
 293	unsigned long flags;
 294	struct bio_prison *prison = cell->prison;
 295
 296	spin_lock_irqsave(&prison->lock, flags);
 297	__cell_release(cell, bios);
 298	spin_unlock_irqrestore(&prison->lock, flags);
 299}
 300
 301/*
 302 * There are a couple of places where we put a bio into a cell briefly
 303 * before taking it out again.  In these situations we know that no other
 304 * bio may be in the cell.  This function releases the cell, and also does
 305 * a sanity check.
 306 */
 307static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
 308{
 309	BUG_ON(cell->holder != bio);
 310	BUG_ON(!bio_list_empty(&cell->bios));
 311
 312	__cell_release(cell, NULL);
 313}
 314
 315static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
 316{
 317	unsigned long flags;
 318	struct bio_prison *prison = cell->prison;
 319
 320	spin_lock_irqsave(&prison->lock, flags);
 321	__cell_release_singleton(cell, bio);
 322	spin_unlock_irqrestore(&prison->lock, flags);
 323}
 324
 325/*
 326 * Sometimes we don't want the holder, just the additional bios.
 327 */
 328static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
 329				     struct bio_list *inmates)
 330{
 331	struct bio_prison *prison = cell->prison;
 332
 333	hlist_del(&cell->list);
 334	bio_list_merge(inmates, &cell->bios);
 335
 336	mempool_free(cell, prison->cell_pool);
 337}
 338
 339static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
 340				   struct bio_list *inmates)
 341{
 342	unsigned long flags;
 343	struct bio_prison *prison = cell->prison;
 344
 345	spin_lock_irqsave(&prison->lock, flags);
 346	__cell_release_no_holder(cell, inmates);
 347	spin_unlock_irqrestore(&prison->lock, flags);
 348}
 349
 350static void cell_error(struct dm_bio_prison_cell *cell)
 351{
 352	struct bio_prison *prison = cell->prison;
 353	struct bio_list bios;
 354	struct bio *bio;
 355	unsigned long flags;
 356
 357	bio_list_init(&bios);
 358
 359	spin_lock_irqsave(&prison->lock, flags);
 360	__cell_release(cell, &bios);
 361	spin_unlock_irqrestore(&prison->lock, flags);
 362
 363	while ((bio = bio_list_pop(&bios)))
 364		bio_io_error(bio);
 365}
 366
 367/*----------------------------------------------------------------*/
 368
 369/*
 370 * We use the deferred set to keep track of pending reads to shared blocks.
 371 * We do this to ensure the new mapping caused by a write isn't performed
 372 * until these prior reads have completed.  Otherwise the insertion of the
 373 * new mapping could free the old block that the read bios are mapped to.
 374 */
 375
 376struct deferred_set;
 377struct deferred_entry {
 378	struct deferred_set *ds;
 379	unsigned count;
 380	struct list_head work_items;
 381};
 382
 383struct deferred_set {
 384	spinlock_t lock;
 385	unsigned current_entry;
 386	unsigned sweeper;
 387	struct deferred_entry entries[DEFERRED_SET_SIZE];
 388};
 389
 390static void ds_init(struct deferred_set *ds)
 391{
 392	int i;
 393
 394	spin_lock_init(&ds->lock);
 395	ds->current_entry = 0;
 396	ds->sweeper = 0;
 397	for (i = 0; i < DEFERRED_SET_SIZE; i++) {
 398		ds->entries[i].ds = ds;
 399		ds->entries[i].count = 0;
 400		INIT_LIST_HEAD(&ds->entries[i].work_items);
 401	}
 402}
 403
 404static struct deferred_entry *ds_inc(struct deferred_set *ds)
 405{
 406	unsigned long flags;
 407	struct deferred_entry *entry;
 408
 409	spin_lock_irqsave(&ds->lock, flags);
 410	entry = ds->entries + ds->current_entry;
 411	entry->count++;
 412	spin_unlock_irqrestore(&ds->lock, flags);
 413
 414	return entry;
 415}
 416
 417static unsigned ds_next(unsigned index)
 418{
 419	return (index + 1) % DEFERRED_SET_SIZE;
 420}
 421
 422static void __sweep(struct deferred_set *ds, struct list_head *head)
 423{
 424	while ((ds->sweeper != ds->current_entry) &&
 425	       !ds->entries[ds->sweeper].count) {
 426		list_splice_init(&ds->entries[ds->sweeper].work_items, head);
 427		ds->sweeper = ds_next(ds->sweeper);
 428	}
 429
 430	if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
 431		list_splice_init(&ds->entries[ds->sweeper].work_items, head);
 432}
 433
 434static void ds_dec(struct deferred_entry *entry, struct list_head *head)
 435{
 436	unsigned long flags;
 437
 438	spin_lock_irqsave(&entry->ds->lock, flags);
 439	BUG_ON(!entry->count);
 440	--entry->count;
 441	__sweep(entry->ds, head);
 442	spin_unlock_irqrestore(&entry->ds->lock, flags);
 443}
 444
 445/*
 446 * Returns 1 if deferred or 0 if no pending items to delay job.
 447 */
 448static int ds_add_work(struct deferred_set *ds, struct list_head *work)
 449{
 450	int r = 1;
 451	unsigned long flags;
 452	unsigned next_entry;
 453
 454	spin_lock_irqsave(&ds->lock, flags);
 455	if ((ds->sweeper == ds->current_entry) &&
 456	    !ds->entries[ds->current_entry].count)
 457		r = 0;
 458	else {
 459		list_add(work, &ds->entries[ds->current_entry].work_items);
 460		next_entry = ds_next(ds->current_entry);
 461		if (!ds->entries[next_entry].count)
 462			ds->current_entry = next_entry;
 463	}
 464	spin_unlock_irqrestore(&ds->lock, flags);
 465
 466	return r;
 467}
 468
 469/*----------------------------------------------------------------*/
 470
 471/*
 472 * Key building.
 473 */
 474static void build_data_key(struct dm_thin_device *td,
 475			   dm_block_t b, struct cell_key *key)
 476{
 477	key->virtual = 0;
 478	key->dev = dm_thin_dev_id(td);
 479	key->block = b;
 480}
 481
 482static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
 483			      struct cell_key *key)
 484{
 485	key->virtual = 1;
 486	key->dev = dm_thin_dev_id(td);
 487	key->block = b;
 488}
 489
 490/*----------------------------------------------------------------*/
 491
 492/*
 493 * A pool device ties together a metadata device and a data device.  It
 494 * also provides the interface for creating and destroying internal
 495 * devices.
 496 */
 497struct dm_thin_new_mapping;
 498
 499struct pool_features {
 500	unsigned zero_new_blocks:1;
 501	unsigned discard_enabled:1;
 502	unsigned discard_passdown:1;
 503};
 504
 505struct pool {
 506	struct list_head list;
 507	struct dm_target *ti;	/* Only set if a pool target is bound */
 508
 509	struct mapped_device *pool_md;
 510	struct block_device *md_dev;
 511	struct dm_pool_metadata *pmd;
 512
 513	uint32_t sectors_per_block;
 514	unsigned block_shift;
 515	dm_block_t offset_mask;
 516	dm_block_t low_water_blocks;
 517
 518	struct pool_features pf;
 519	unsigned low_water_triggered:1;	/* A dm event has been sent */
 520	unsigned no_free_space:1;	/* A -ENOSPC warning has been issued */
 521
 522	struct bio_prison *prison;
 523	struct dm_kcopyd_client *copier;
 524
 525	struct workqueue_struct *wq;
 526	struct work_struct worker;
 527	struct delayed_work waker;
 528
 529	unsigned ref_count;
 530	unsigned long last_commit_jiffies;
 531
 532	spinlock_t lock;
 533	struct bio_list deferred_bios;
 534	struct bio_list deferred_flush_bios;
 535	struct list_head prepared_mappings;
 536	struct list_head prepared_discards;
 537
 538	struct bio_list retry_on_resume_list;
 539
 540	struct deferred_set shared_read_ds;
 541	struct deferred_set all_io_ds;
 542
 543	struct dm_thin_new_mapping *next_mapping;
 544	mempool_t *mapping_pool;
 545	mempool_t *endio_hook_pool;
 546};
 547
 548/*
 549 * Target context for a pool.
 550 */
 551struct pool_c {
 552	struct dm_target *ti;
 553	struct pool *pool;
 554	struct dm_dev *data_dev;
 555	struct dm_dev *metadata_dev;
 556	struct dm_target_callbacks callbacks;
 557
 558	dm_block_t low_water_blocks;
 559	struct pool_features pf;
 560};
 561
 562/*
 563 * Target context for a thin.
 564 */
 565struct thin_c {
 566	struct dm_dev *pool_dev;
 567	struct dm_dev *origin_dev;
 568	dm_thin_id dev_id;
 569
 570	struct pool *pool;
 571	struct dm_thin_device *td;
 572};
 573
 574/*----------------------------------------------------------------*/
 575
 576/*
 577 * A global list of pools that uses a struct mapped_device as a key.
 578 */
 579static struct dm_thin_pool_table {
 580	struct mutex mutex;
 581	struct list_head pools;
 582} dm_thin_pool_table;
 583
 584static void pool_table_init(void)
 585{
 586	mutex_init(&dm_thin_pool_table.mutex);
 587	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
 588}
 589
 590static void __pool_table_insert(struct pool *pool)
 591{
 592	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 593	list_add(&pool->list, &dm_thin_pool_table.pools);
 594}
 595
 596static void __pool_table_remove(struct pool *pool)
 597{
 598	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 599	list_del(&pool->list);
 600}
 601
 602static struct pool *__pool_table_lookup(struct mapped_device *md)
 603{
 604	struct pool *pool = NULL, *tmp;
 605
 606	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 607
 608	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
 609		if (tmp->pool_md == md) {
 610			pool = tmp;
 611			break;
 612		}
 613	}
 614
 615	return pool;
 616}
 617
 618static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
 619{
 620	struct pool *pool = NULL, *tmp;
 621
 622	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 623
 624	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
 625		if (tmp->md_dev == md_dev) {
 626			pool = tmp;
 627			break;
 628		}
 629	}
 630
 631	return pool;
 632}
 633
 634/*----------------------------------------------------------------*/
 635
 636struct dm_thin_endio_hook {
 637	struct thin_c *tc;
 638	struct deferred_entry *shared_read_entry;
 639	struct deferred_entry *all_io_entry;
 640	struct dm_thin_new_mapping *overwrite_mapping;
 641};
 642
 643static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
 644{
 645	struct bio *bio;
 646	struct bio_list bios;
 647
 648	bio_list_init(&bios);
 649	bio_list_merge(&bios, master);
 650	bio_list_init(master);
 651
 652	while ((bio = bio_list_pop(&bios))) {
 653		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
 654
 655		if (h->tc == tc)
 656			bio_endio(bio, DM_ENDIO_REQUEUE);
 657		else
 658			bio_list_add(master, bio);
 659	}
 660}
 661
 662static void requeue_io(struct thin_c *tc)
 663{
 664	struct pool *pool = tc->pool;
 665	unsigned long flags;
 666
 667	spin_lock_irqsave(&pool->lock, flags);
 668	__requeue_bio_list(tc, &pool->deferred_bios);
 669	__requeue_bio_list(tc, &pool->retry_on_resume_list);
 670	spin_unlock_irqrestore(&pool->lock, flags);
 671}
 672
 673/*
 674 * This section of code contains the logic for processing a thin device's IO.
 675 * Much of the code depends on pool object resources (lists, workqueues, etc)
 676 * but most is exclusively called from the thin target rather than the thin-pool
 677 * target.
 678 */
 679
 680static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 681{
 682	return bio->bi_sector >> tc->pool->block_shift;
 683}
 684
 685static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
 686{
 687	struct pool *pool = tc->pool;
 688
 689	bio->bi_bdev = tc->pool_dev->bdev;
 690	bio->bi_sector = (block << pool->block_shift) +
 691		(bio->bi_sector & pool->offset_mask);
 692}
 693
 694static void remap_to_origin(struct thin_c *tc, struct bio *bio)
 695{
 696	bio->bi_bdev = tc->origin_dev->bdev;
 697}
 698
 699static void issue(struct thin_c *tc, struct bio *bio)
 700{
 701	struct pool *pool = tc->pool;
 702	unsigned long flags;
 703
 704	/*
 705	 * Batch together any FUA/FLUSH bios we find and then issue
 706	 * a single commit for them in process_deferred_bios().
 707	 */
 708	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
 709		spin_lock_irqsave(&pool->lock, flags);
 710		bio_list_add(&pool->deferred_flush_bios, bio);
 711		spin_unlock_irqrestore(&pool->lock, flags);
 712	} else
 713		generic_make_request(bio);
 714}
 715
 716static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
 717{
 718	remap_to_origin(tc, bio);
 719	issue(tc, bio);
 720}
 721
 722static void remap_and_issue(struct thin_c *tc, struct bio *bio,
 723			    dm_block_t block)
 724{
 725	remap(tc, bio, block);
 726	issue(tc, bio);
 727}
 728
 729/*
 730 * wake_worker() is used when new work is queued and when pool_resume is
 731 * ready to continue deferred IO processing.
 732 */
 733static void wake_worker(struct pool *pool)
 734{
 735	queue_work(pool->wq, &pool->worker);
 736}
 737
 738/*----------------------------------------------------------------*/
 739
 740/*
 741 * Bio endio functions.
 742 */
 743struct dm_thin_new_mapping {
 744	struct list_head list;
 745
 746	unsigned quiesced:1;
 747	unsigned prepared:1;
 748	unsigned pass_discard:1;
 749
 750	struct thin_c *tc;
 751	dm_block_t virt_block;
 752	dm_block_t data_block;
 753	struct dm_bio_prison_cell *cell, *cell2;
 754	int err;
 755
 756	/*
 757	 * If the bio covers the whole area of a block then we can avoid
 758	 * zeroing or copying.  Instead this bio is hooked.  The bio will
 759	 * still be in the cell, so care has to be taken to avoid issuing
 760	 * the bio twice.
 761	 */
 762	struct bio *bio;
 763	bio_end_io_t *saved_bi_end_io;
 764};
 765
 766static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
 767{
 768	struct pool *pool = m->tc->pool;
 769
 770	if (m->quiesced && m->prepared) {
 771		list_add(&m->list, &pool->prepared_mappings);
 772		wake_worker(pool);
 773	}
 774}
 775
 776static void copy_complete(int read_err, unsigned long write_err, void *context)
 777{
 778	unsigned long flags;
 779	struct dm_thin_new_mapping *m = context;
 780	struct pool *pool = m->tc->pool;
 781
 782	m->err = read_err || write_err ? -EIO : 0;
 783
 784	spin_lock_irqsave(&pool->lock, flags);
 785	m->prepared = 1;
 786	__maybe_add_mapping(m);
 787	spin_unlock_irqrestore(&pool->lock, flags);
 788}
 789
 790static void overwrite_endio(struct bio *bio, int err)
 791{
 792	unsigned long flags;
 793	struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
 794	struct dm_thin_new_mapping *m = h->overwrite_mapping;
 795	struct pool *pool = m->tc->pool;
 796
 797	m->err = err;
 798
 799	spin_lock_irqsave(&pool->lock, flags);
 800	m->prepared = 1;
 801	__maybe_add_mapping(m);
 802	spin_unlock_irqrestore(&pool->lock, flags);
 803}
 804
 805/*----------------------------------------------------------------*/
 806
 807/*
 808 * Workqueue.
 809 */
 810
 811/*
 812 * Prepared mapping jobs.
 813 */
 814
 815/*
 816 * This sends the bios in the cell back to the deferred_bios list.
 817 */
 818static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
 819		       dm_block_t data_block)
 820{
 821	struct pool *pool = tc->pool;
 822	unsigned long flags;
 823
 824	spin_lock_irqsave(&pool->lock, flags);
 825	cell_release(cell, &pool->deferred_bios);
 826	spin_unlock_irqrestore(&tc->pool->lock, flags);
 827
 828	wake_worker(pool);
 829}
 830
 831/*
 832 * Same as cell_defer above, except it omits one particular detainee,
 833 * a write bio that covers the block and has already been processed.
 834 */
 835static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
 836{
 837	struct bio_list bios;
 838	struct pool *pool = tc->pool;
 839	unsigned long flags;
 840
 841	bio_list_init(&bios);
 842
 843	spin_lock_irqsave(&pool->lock, flags);
 844	cell_release_no_holder(cell, &pool->deferred_bios);
 845	spin_unlock_irqrestore(&pool->lock, flags);
 846
 847	wake_worker(pool);
 848}
 849
 850static void process_prepared_mapping(struct dm_thin_new_mapping *m)
 851{
 852	struct thin_c *tc = m->tc;
 853	struct bio *bio;
 854	int r;
 855
 856	bio = m->bio;
 857	if (bio)
 858		bio->bi_end_io = m->saved_bi_end_io;
 859
 860	if (m->err) {
 861		cell_error(m->cell);
 862		goto out;
 863	}
 864
 865	/*
 866	 * Commit the prepared block into the mapping btree.
 867	 * Any I/O for this block arriving after this point will get
 868	 * remapped to it directly.
 869	 */
 870	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
 871	if (r) {
 872		DMERR("dm_thin_insert_block() failed");
 873		cell_error(m->cell);
 874		goto out;
 875	}
 876
 877	/*
 878	 * Release any bios held while the block was being provisioned.
 879	 * If we are processing a write bio that completely covers the block,
 880	 * we already processed it so can ignore it now when processing
 881	 * the bios in the cell.
 882	 */
 883	if (bio) {
 884		cell_defer_except(tc, m->cell);
 885		bio_endio(bio, 0);
 886	} else
 887		cell_defer(tc, m->cell, m->data_block);
 888
 889out:
 890	list_del(&m->list);
 891	mempool_free(m, tc->pool->mapping_pool);
 892}
 893
 894static void process_prepared_discard(struct dm_thin_new_mapping *m)
 895{
 896	int r;
 897	struct thin_c *tc = m->tc;
 898
 899	r = dm_thin_remove_block(tc->td, m->virt_block);
 900	if (r)
 901		DMERR("dm_thin_remove_block() failed");
 902
 903	/*
 904	 * Pass the discard down to the underlying device?
 905	 */
 906	if (m->pass_discard)
 907		remap_and_issue(tc, m->bio, m->data_block);
 908	else
 909		bio_endio(m->bio, 0);
 910
 911	cell_defer_except(tc, m->cell);
 912	cell_defer_except(tc, m->cell2);
 913	mempool_free(m, tc->pool->mapping_pool);
 914}
 915
 916static void process_prepared(struct pool *pool, struct list_head *head,
 917			     void (*fn)(struct dm_thin_new_mapping *))
 918{
 919	unsigned long flags;
 920	struct list_head maps;
 921	struct dm_thin_new_mapping *m, *tmp;
 922
 923	INIT_LIST_HEAD(&maps);
 924	spin_lock_irqsave(&pool->lock, flags);
 925	list_splice_init(head, &maps);
 926	spin_unlock_irqrestore(&pool->lock, flags);
 927
 928	list_for_each_entry_safe(m, tmp, &maps, list)
 929		fn(m);
 930}
 931
 932/*
 933 * Deferred bio jobs.
 934 */
 935static int io_overlaps_block(struct pool *pool, struct bio *bio)
 936{
 937	return !(bio->bi_sector & pool->offset_mask) &&
 938		(bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT));
 939
 940}
 941
 942static int io_overwrites_block(struct pool *pool, struct bio *bio)
 943{
 944	return (bio_data_dir(bio) == WRITE) &&
 945		io_overlaps_block(pool, bio);
 946}
 947
 948static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
 949			       bio_end_io_t *fn)
 950{
 951	*save = bio->bi_end_io;
 952	bio->bi_end_io = fn;
 953}
 954
 955static int ensure_next_mapping(struct pool *pool)
 956{
 957	if (pool->next_mapping)
 958		return 0;
 959
 960	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
 961
 962	return pool->next_mapping ? 0 : -ENOMEM;
 963}
 964
 965static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
 966{
 967	struct dm_thin_new_mapping *r = pool->next_mapping;
 968
 969	BUG_ON(!pool->next_mapping);
 970
 971	pool->next_mapping = NULL;
 972
 973	return r;
 974}
 975
 976static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
 977			  struct dm_dev *origin, dm_block_t data_origin,
 978			  dm_block_t data_dest,
 979			  struct dm_bio_prison_cell *cell, struct bio *bio)
 980{
 981	int r;
 982	struct pool *pool = tc->pool;
 983	struct dm_thin_new_mapping *m = get_next_mapping(pool);
 984
 985	INIT_LIST_HEAD(&m->list);
 986	m->quiesced = 0;
 987	m->prepared = 0;
 988	m->tc = tc;
 989	m->virt_block = virt_block;
 990	m->data_block = data_dest;
 991	m->cell = cell;
 992	m->err = 0;
 993	m->bio = NULL;
 994
 995	if (!ds_add_work(&pool->shared_read_ds, &m->list))
 996		m->quiesced = 1;
 997
 998	/*
 999	 * IO to pool_dev remaps to the pool target's data_dev.
1000	 *
1001	 * If the whole block of data is being overwritten, we can issue the
1002	 * bio immediately. Otherwise we use kcopyd to clone the data first.
1003	 */
1004	if (io_overwrites_block(pool, bio)) {
1005		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1006
1007		h->overwrite_mapping = m;
1008		m->bio = bio;
1009		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1010		remap_and_issue(tc, bio, data_dest);
1011	} else {
1012		struct dm_io_region from, to;
1013
1014		from.bdev = origin->bdev;
1015		from.sector = data_origin * pool->sectors_per_block;
1016		from.count = pool->sectors_per_block;
1017
1018		to.bdev = tc->pool_dev->bdev;
1019		to.sector = data_dest * pool->sectors_per_block;
1020		to.count = pool->sectors_per_block;
1021
1022		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
1023				   0, copy_complete, m);
1024		if (r < 0) {
1025			mempool_free(m, pool->mapping_pool);
1026			DMERR("dm_kcopyd_copy() failed");
1027			cell_error(cell);
1028		}
1029	}
1030}
1031
1032static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1033				   dm_block_t data_origin, dm_block_t data_dest,
1034				   struct dm_bio_prison_cell *cell, struct bio *bio)
1035{
1036	schedule_copy(tc, virt_block, tc->pool_dev,
1037		      data_origin, data_dest, cell, bio);
1038}
1039
1040static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1041				   dm_block_t data_dest,
1042				   struct dm_bio_prison_cell *cell, struct bio *bio)
1043{
1044	schedule_copy(tc, virt_block, tc->origin_dev,
1045		      virt_block, data_dest, cell, bio);
1046}
1047
1048static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1049			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
1050			  struct bio *bio)
1051{
1052	struct pool *pool = tc->pool;
1053	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1054
1055	INIT_LIST_HEAD(&m->list);
1056	m->quiesced = 1;
1057	m->prepared = 0;
1058	m->tc = tc;
1059	m->virt_block = virt_block;
1060	m->data_block = data_block;
1061	m->cell = cell;
1062	m->err = 0;
1063	m->bio = NULL;
1064
1065	/*
1066	 * If the whole block of data is being overwritten or we are not
1067	 * zeroing pre-existing data, we can issue the bio immediately.
1068	 * Otherwise we use kcopyd to zero the data first.
1069	 */
1070	if (!pool->pf.zero_new_blocks)
1071		process_prepared_mapping(m);
1072
1073	else if (io_overwrites_block(pool, bio)) {
1074		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1075
1076		h->overwrite_mapping = m;
1077		m->bio = bio;
1078		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1079		remap_and_issue(tc, bio, data_block);
1080	} else {
1081		int r;
1082		struct dm_io_region to;
1083
1084		to.bdev = tc->pool_dev->bdev;
1085		to.sector = data_block * pool->sectors_per_block;
1086		to.count = pool->sectors_per_block;
1087
1088		r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
1089		if (r < 0) {
1090			mempool_free(m, pool->mapping_pool);
1091			DMERR("dm_kcopyd_zero() failed");
1092			cell_error(cell);
1093		}
1094	}
1095}
1096
1097static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1098{
1099	int r;
1100	dm_block_t free_blocks;
1101	unsigned long flags;
1102	struct pool *pool = tc->pool;
1103
1104	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1105	if (r)
1106		return r;
1107
1108	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1109		DMWARN("%s: reached low water mark, sending event.",
1110		       dm_device_name(pool->pool_md));
1111		spin_lock_irqsave(&pool->lock, flags);
1112		pool->low_water_triggered = 1;
1113		spin_unlock_irqrestore(&pool->lock, flags);
1114		dm_table_event(pool->ti->table);
1115	}
1116
1117	if (!free_blocks) {
1118		if (pool->no_free_space)
1119			return -ENOSPC;
1120		else {
1121			/*
1122			 * Try to commit to see if that will free up some
1123			 * more space.
1124			 */
1125			r = dm_pool_commit_metadata(pool->pmd);
1126			if (r) {
1127				DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
1128				      __func__, r);
1129				return r;
1130			}
1131
1132			r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1133			if (r)
1134				return r;
1135
1136			/*
1137			 * If we still have no space we set a flag to avoid
1138			 * doing all this checking and return -ENOSPC.
1139			 */
1140			if (!free_blocks) {
1141				DMWARN("%s: no free space available.",
1142				       dm_device_name(pool->pool_md));
1143				spin_lock_irqsave(&pool->lock, flags);
1144				pool->no_free_space = 1;
1145				spin_unlock_irqrestore(&pool->lock, flags);
1146				return -ENOSPC;
1147			}
1148		}
1149	}
1150
1151	r = dm_pool_alloc_data_block(pool->pmd, result);
1152	if (r)
1153		return r;
1154
1155	return 0;
1156}
1157
1158/*
1159 * If we have run out of space, queue bios until the device is
1160 * resumed, presumably after having been reloaded with more space.
1161 */
1162static void retry_on_resume(struct bio *bio)
1163{
1164	struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1165	struct thin_c *tc = h->tc;
1166	struct pool *pool = tc->pool;
1167	unsigned long flags;
1168
1169	spin_lock_irqsave(&pool->lock, flags);
1170	bio_list_add(&pool->retry_on_resume_list, bio);
1171	spin_unlock_irqrestore(&pool->lock, flags);
1172}
1173
1174static void no_space(struct dm_bio_prison_cell *cell)
1175{
1176	struct bio *bio;
1177	struct bio_list bios;
1178
1179	bio_list_init(&bios);
1180	cell_release(cell, &bios);
1181
1182	while ((bio = bio_list_pop(&bios)))
1183		retry_on_resume(bio);
1184}
1185
1186static void process_discard(struct thin_c *tc, struct bio *bio)
1187{
1188	int r;
1189	unsigned long flags;
1190	struct pool *pool = tc->pool;
1191	struct dm_bio_prison_cell *cell, *cell2;
1192	struct cell_key key, key2;
1193	dm_block_t block = get_bio_block(tc, bio);
1194	struct dm_thin_lookup_result lookup_result;
1195	struct dm_thin_new_mapping *m;
1196
1197	build_virtual_key(tc->td, block, &key);
1198	if (bio_detain(tc->pool->prison, &key, bio, &cell))
1199		return;
1200
1201	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1202	switch (r) {
1203	case 0:
1204		/*
1205		 * Check nobody is fiddling with this pool block.  This can
1206		 * happen if someone's in the process of breaking sharing
1207		 * on this block.
1208		 */
1209		build_data_key(tc->td, lookup_result.block, &key2);
1210		if (bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
1211			cell_release_singleton(cell, bio);
1212			break;
1213		}
1214
1215		if (io_overlaps_block(pool, bio)) {
1216			/*
1217			 * IO may still be going to the destination block.  We must
1218			 * quiesce before we can do the removal.
1219			 */
1220			m = get_next_mapping(pool);
1221			m->tc = tc;
1222			m->pass_discard = (!lookup_result.shared) & pool->pf.discard_passdown;
1223			m->virt_block = block;
1224			m->data_block = lookup_result.block;
1225			m->cell = cell;
1226			m->cell2 = cell2;
1227			m->err = 0;
1228			m->bio = bio;
1229
1230			if (!ds_add_work(&pool->all_io_ds, &m->list)) {
1231				spin_lock_irqsave(&pool->lock, flags);
1232				list_add(&m->list, &pool->prepared_discards);
1233				spin_unlock_irqrestore(&pool->lock, flags);
1234				wake_worker(pool);
1235			}
1236		} else {
1237			/*
1238			 * This path is hit if people are ignoring
1239			 * limits->discard_granularity.  It ignores any
1240			 * part of the discard that is in a subsequent
1241			 * block.
1242			 */
1243			sector_t offset = bio->bi_sector - (block << pool->block_shift);
1244			unsigned remaining = (pool->sectors_per_block - offset) << 9;
1245			bio->bi_size = min(bio->bi_size, remaining);
1246
1247			cell_release_singleton(cell, bio);
1248			cell_release_singleton(cell2, bio);
1249			if ((!lookup_result.shared) && pool->pf.discard_passdown)
1250				remap_and_issue(tc, bio, lookup_result.block);
1251			else
1252				bio_endio(bio, 0);
1253		}
1254		break;
1255
1256	case -ENODATA:
1257		/*
1258		 * It isn't provisioned, just forget it.
1259		 */
1260		cell_release_singleton(cell, bio);
1261		bio_endio(bio, 0);
1262		break;
1263
1264	default:
1265		DMERR("discard: find block unexpectedly returned %d", r);
1266		cell_release_singleton(cell, bio);
1267		bio_io_error(bio);
1268		break;
1269	}
1270}
1271
1272static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1273			  struct cell_key *key,
1274			  struct dm_thin_lookup_result *lookup_result,
1275			  struct dm_bio_prison_cell *cell)
1276{
1277	int r;
1278	dm_block_t data_block;
1279
1280	r = alloc_data_block(tc, &data_block);
1281	switch (r) {
1282	case 0:
1283		schedule_internal_copy(tc, block, lookup_result->block,
1284				       data_block, cell, bio);
1285		break;
1286
1287	case -ENOSPC:
1288		no_space(cell);
1289		break;
1290
1291	default:
1292		DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1293		cell_error(cell);
1294		break;
1295	}
1296}
1297
1298static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1299			       dm_block_t block,
1300			       struct dm_thin_lookup_result *lookup_result)
1301{
1302	struct dm_bio_prison_cell *cell;
1303	struct pool *pool = tc->pool;
1304	struct cell_key key;
1305
1306	/*
1307	 * If cell is already occupied, then sharing is already in the process
1308	 * of being broken so we have nothing further to do here.
1309	 */
1310	build_data_key(tc->td, lookup_result->block, &key);
1311	if (bio_detain(pool->prison, &key, bio, &cell))
1312		return;
1313
1314	if (bio_data_dir(bio) == WRITE)
1315		break_sharing(tc, bio, block, &key, lookup_result, cell);
1316	else {
1317		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1318
1319		h->shared_read_entry = ds_inc(&pool->shared_read_ds);
1320
1321		cell_release_singleton(cell, bio);
1322		remap_and_issue(tc, bio, lookup_result->block);
1323	}
1324}
1325
1326static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1327			    struct dm_bio_prison_cell *cell)
1328{
1329	int r;
1330	dm_block_t data_block;
1331
1332	/*
1333	 * Remap empty bios (flushes) immediately, without provisioning.
1334	 */
1335	if (!bio->bi_size) {
1336		cell_release_singleton(cell, bio);
1337		remap_and_issue(tc, bio, 0);
1338		return;
1339	}
1340
1341	/*
1342	 * Fill read bios with zeroes and complete them immediately.
1343	 */
1344	if (bio_data_dir(bio) == READ) {
1345		zero_fill_bio(bio);
1346		cell_release_singleton(cell, bio);
1347		bio_endio(bio, 0);
1348		return;
1349	}
1350
1351	r = alloc_data_block(tc, &data_block);
1352	switch (r) {
1353	case 0:
1354		if (tc->origin_dev)
1355			schedule_external_copy(tc, block, data_block, cell, bio);
1356		else
1357			schedule_zero(tc, block, data_block, cell, bio);
1358		break;
1359
1360	case -ENOSPC:
1361		no_space(cell);
1362		break;
1363
1364	default:
1365		DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1366		cell_error(cell);
1367		break;
1368	}
1369}
1370
1371static void process_bio(struct thin_c *tc, struct bio *bio)
1372{
1373	int r;
1374	dm_block_t block = get_bio_block(tc, bio);
1375	struct dm_bio_prison_cell *cell;
1376	struct cell_key key;
1377	struct dm_thin_lookup_result lookup_result;
1378
1379	/*
1380	 * If cell is already occupied, then the block is already
1381	 * being provisioned so we have nothing further to do here.
1382	 */
1383	build_virtual_key(tc->td, block, &key);
1384	if (bio_detain(tc->pool->prison, &key, bio, &cell))
1385		return;
1386
1387	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1388	switch (r) {
1389	case 0:
1390		/*
1391		 * We can release this cell now.  This thread is the only
1392		 * one that puts bios into a cell, and we know there were
1393		 * no preceding bios.
1394		 */
1395		/*
1396		 * TODO: this will probably have to change when discard goes
1397		 * back in.
1398		 */
1399		cell_release_singleton(cell, bio);
1400
1401		if (lookup_result.shared)
1402			process_shared_bio(tc, bio, block, &lookup_result);
1403		else
1404			remap_and_issue(tc, bio, lookup_result.block);
1405		break;
1406
1407	case -ENODATA:
1408		if (bio_data_dir(bio) == READ && tc->origin_dev) {
1409			cell_release_singleton(cell, bio);
1410			remap_to_origin_and_issue(tc, bio);
1411		} else
1412			provision_block(tc, bio, block, cell);
1413		break;
1414
1415	default:
1416		DMERR("dm_thin_find_block() failed, error = %d", r);
1417		cell_release_singleton(cell, bio);
1418		bio_io_error(bio);
1419		break;
1420	}
1421}
1422
1423static int need_commit_due_to_time(struct pool *pool)
1424{
1425	return jiffies < pool->last_commit_jiffies ||
1426	       jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1427}
1428
1429static void process_deferred_bios(struct pool *pool)
1430{
1431	unsigned long flags;
1432	struct bio *bio;
1433	struct bio_list bios;
1434	int r;
1435
1436	bio_list_init(&bios);
1437
1438	spin_lock_irqsave(&pool->lock, flags);
1439	bio_list_merge(&bios, &pool->deferred_bios);
1440	bio_list_init(&pool->deferred_bios);
1441	spin_unlock_irqrestore(&pool->lock, flags);
1442
1443	while ((bio = bio_list_pop(&bios))) {
1444		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1445		struct thin_c *tc = h->tc;
1446
1447		/*
1448		 * If we've got no free new_mapping structs, and processing
1449		 * this bio might require one, we pause until there are some
1450		 * prepared mappings to process.
1451		 */
1452		if (ensure_next_mapping(pool)) {
1453			spin_lock_irqsave(&pool->lock, flags);
1454			bio_list_merge(&pool->deferred_bios, &bios);
1455			spin_unlock_irqrestore(&pool->lock, flags);
1456
1457			break;
1458		}
1459
1460		if (bio->bi_rw & REQ_DISCARD)
1461			process_discard(tc, bio);
1462		else
1463			process_bio(tc, bio);
1464	}
1465
1466	/*
1467	 * If there are any deferred flush bios, we must commit
1468	 * the metadata before issuing them.
1469	 */
1470	bio_list_init(&bios);
1471	spin_lock_irqsave(&pool->lock, flags);
1472	bio_list_merge(&bios, &pool->deferred_flush_bios);
1473	bio_list_init(&pool->deferred_flush_bios);
1474	spin_unlock_irqrestore(&pool->lock, flags);
1475
1476	if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1477		return;
1478
1479	r = dm_pool_commit_metadata(pool->pmd);
1480	if (r) {
1481		DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
1482		      __func__, r);
1483		while ((bio = bio_list_pop(&bios)))
1484			bio_io_error(bio);
1485		return;
1486	}
1487	pool->last_commit_jiffies = jiffies;
1488
1489	while ((bio = bio_list_pop(&bios)))
1490		generic_make_request(bio);
1491}
1492
1493static void do_worker(struct work_struct *ws)
1494{
1495	struct pool *pool = container_of(ws, struct pool, worker);
1496
1497	process_prepared(pool, &pool->prepared_mappings, process_prepared_mapping);
1498	process_prepared(pool, &pool->prepared_discards, process_prepared_discard);
1499	process_deferred_bios(pool);
1500}
1501
1502/*
1503 * We want to commit periodically so that not too much
1504 * unwritten data builds up.
1505 */
1506static void do_waker(struct work_struct *ws)
1507{
1508	struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1509	wake_worker(pool);
1510	queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1511}
1512
1513/*----------------------------------------------------------------*/
1514
1515/*
1516 * Mapping functions.
1517 */
1518
1519/*
1520 * Called only while mapping a thin bio to hand it over to the workqueue.
1521 */
1522static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1523{
1524	unsigned long flags;
1525	struct pool *pool = tc->pool;
1526
1527	spin_lock_irqsave(&pool->lock, flags);
1528	bio_list_add(&pool->deferred_bios, bio);
1529	spin_unlock_irqrestore(&pool->lock, flags);
1530
1531	wake_worker(pool);
1532}
1533
1534static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
1535{
1536	struct pool *pool = tc->pool;
1537	struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1538
1539	h->tc = tc;
1540	h->shared_read_entry = NULL;
1541	h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : ds_inc(&pool->all_io_ds);
1542	h->overwrite_mapping = NULL;
1543
1544	return h;
1545}
1546
1547/*
1548 * Non-blocking function called from the thin target's map function.
1549 */
1550static int thin_bio_map(struct dm_target *ti, struct bio *bio,
1551			union map_info *map_context)
1552{
1553	int r;
1554	struct thin_c *tc = ti->private;
1555	dm_block_t block = get_bio_block(tc, bio);
1556	struct dm_thin_device *td = tc->td;
1557	struct dm_thin_lookup_result result;
1558
1559	map_context->ptr = thin_hook_bio(tc, bio);
1560	if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1561		thin_defer_bio(tc, bio);
1562		return DM_MAPIO_SUBMITTED;
1563	}
1564
1565	r = dm_thin_find_block(td, block, 0, &result);
1566
1567	/*
1568	 * Note that we defer readahead too.
1569	 */
1570	switch (r) {
1571	case 0:
1572		if (unlikely(result.shared)) {
1573			/*
1574			 * We have a race condition here between the
1575			 * result.shared value returned by the lookup and
1576			 * snapshot creation, which may cause new
1577			 * sharing.
1578			 *
1579			 * To avoid this always quiesce the origin before
1580			 * taking the snap.  You want to do this anyway to
1581			 * ensure a consistent application view
1582			 * (i.e. lockfs).
1583			 *
1584			 * More distant ancestors are irrelevant. The
1585			 * shared flag will be set in their case.
1586			 */
1587			thin_defer_bio(tc, bio);
1588			r = DM_MAPIO_SUBMITTED;
1589		} else {
1590			remap(tc, bio, result.block);
1591			r = DM_MAPIO_REMAPPED;
1592		}
1593		break;
1594
1595	case -ENODATA:
1596		/*
1597		 * In future, the failed dm_thin_find_block above could
1598		 * provide the hint to load the metadata into cache.
1599		 */
1600	case -EWOULDBLOCK:
1601		thin_defer_bio(tc, bio);
1602		r = DM_MAPIO_SUBMITTED;
1603		break;
1604	}
1605
1606	return r;
1607}
1608
1609static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1610{
1611	int r;
1612	unsigned long flags;
1613	struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1614
1615	spin_lock_irqsave(&pt->pool->lock, flags);
1616	r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1617	spin_unlock_irqrestore(&pt->pool->lock, flags);
1618
1619	if (!r) {
1620		struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1621		r = bdi_congested(&q->backing_dev_info, bdi_bits);
1622	}
1623
1624	return r;
1625}
1626
1627static void __requeue_bios(struct pool *pool)
1628{
1629	bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1630	bio_list_init(&pool->retry_on_resume_list);
1631}
1632
1633/*----------------------------------------------------------------
1634 * Binding of control targets to a pool object
1635 *--------------------------------------------------------------*/
1636static int bind_control_target(struct pool *pool, struct dm_target *ti)
1637{
1638	struct pool_c *pt = ti->private;
1639
1640	pool->ti = ti;
1641	pool->low_water_blocks = pt->low_water_blocks;
1642	pool->pf = pt->pf;
1643
1644	/*
1645	 * If discard_passdown was enabled verify that the data device
1646	 * supports discards.  Disable discard_passdown if not; otherwise
1647	 * -EOPNOTSUPP will be returned.
1648	 */
1649	if (pt->pf.discard_passdown) {
1650		struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1651		if (!q || !blk_queue_discard(q)) {
1652			char buf[BDEVNAME_SIZE];
1653			DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.",
1654			       bdevname(pt->data_dev->bdev, buf));
1655			pool->pf.discard_passdown = 0;
1656		}
1657	}
1658
1659	return 0;
1660}
1661
1662static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1663{
1664	if (pool->ti == ti)
1665		pool->ti = NULL;
1666}
1667
1668/*----------------------------------------------------------------
1669 * Pool creation
1670 *--------------------------------------------------------------*/
1671/* Initialize pool features. */
1672static void pool_features_init(struct pool_features *pf)
1673{
1674	pf->zero_new_blocks = 1;
1675	pf->discard_enabled = 1;
1676	pf->discard_passdown = 1;
1677}
1678
1679static void __pool_destroy(struct pool *pool)
1680{
1681	__pool_table_remove(pool);
1682
1683	if (dm_pool_metadata_close(pool->pmd) < 0)
1684		DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1685
1686	prison_destroy(pool->prison);
1687	dm_kcopyd_client_destroy(pool->copier);
1688
1689	if (pool->wq)
1690		destroy_workqueue(pool->wq);
1691
1692	if (pool->next_mapping)
1693		mempool_free(pool->next_mapping, pool->mapping_pool);
1694	mempool_destroy(pool->mapping_pool);
1695	mempool_destroy(pool->endio_hook_pool);
1696	kfree(pool);
1697}
1698
1699static struct kmem_cache *_new_mapping_cache;
1700static struct kmem_cache *_endio_hook_cache;
1701
1702static struct pool *pool_create(struct mapped_device *pool_md,
1703				struct block_device *metadata_dev,
1704				unsigned long block_size, char **error)
1705{
1706	int r;
1707	void *err_p;
1708	struct pool *pool;
1709	struct dm_pool_metadata *pmd;
1710
1711	pmd = dm_pool_metadata_open(metadata_dev, block_size);
1712	if (IS_ERR(pmd)) {
1713		*error = "Error creating metadata object";
1714		return (struct pool *)pmd;
1715	}
1716
1717	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1718	if (!pool) {
1719		*error = "Error allocating memory for pool";
1720		err_p = ERR_PTR(-ENOMEM);
1721		goto bad_pool;
1722	}
1723
1724	pool->pmd = pmd;
1725	pool->sectors_per_block = block_size;
1726	pool->block_shift = ffs(block_size) - 1;
1727	pool->offset_mask = block_size - 1;
1728	pool->low_water_blocks = 0;
1729	pool_features_init(&pool->pf);
1730	pool->prison = prison_create(PRISON_CELLS);
1731	if (!pool->prison) {
1732		*error = "Error creating pool's bio prison";
1733		err_p = ERR_PTR(-ENOMEM);
1734		goto bad_prison;
1735	}
1736
1737	pool->copier = dm_kcopyd_client_create();
1738	if (IS_ERR(pool->copier)) {
1739		r = PTR_ERR(pool->copier);
1740		*error = "Error creating pool's kcopyd client";
1741		err_p = ERR_PTR(r);
1742		goto bad_kcopyd_client;
1743	}
1744
1745	/*
1746	 * Create singlethreaded workqueue that will service all devices
1747	 * that use this metadata.
1748	 */
1749	pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1750	if (!pool->wq) {
1751		*error = "Error creating pool's workqueue";
1752		err_p = ERR_PTR(-ENOMEM);
1753		goto bad_wq;
1754	}
1755
1756	INIT_WORK(&pool->worker, do_worker);
1757	INIT_DELAYED_WORK(&pool->waker, do_waker);
1758	spin_lock_init(&pool->lock);
1759	bio_list_init(&pool->deferred_bios);
1760	bio_list_init(&pool->deferred_flush_bios);
1761	INIT_LIST_HEAD(&pool->prepared_mappings);
1762	INIT_LIST_HEAD(&pool->prepared_discards);
1763	pool->low_water_triggered = 0;
1764	pool->no_free_space = 0;
1765	bio_list_init(&pool->retry_on_resume_list);
1766	ds_init(&pool->shared_read_ds);
1767	ds_init(&pool->all_io_ds);
1768
1769	pool->next_mapping = NULL;
1770	pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1771						      _new_mapping_cache);
1772	if (!pool->mapping_pool) {
1773		*error = "Error creating pool's mapping mempool";
1774		err_p = ERR_PTR(-ENOMEM);
1775		goto bad_mapping_pool;
1776	}
1777
1778	pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
1779							 _endio_hook_cache);
1780	if (!pool->endio_hook_pool) {
1781		*error = "Error creating pool's endio_hook mempool";
1782		err_p = ERR_PTR(-ENOMEM);
1783		goto bad_endio_hook_pool;
1784	}
1785	pool->ref_count = 1;
1786	pool->last_commit_jiffies = jiffies;
1787	pool->pool_md = pool_md;
1788	pool->md_dev = metadata_dev;
1789	__pool_table_insert(pool);
1790
1791	return pool;
1792
1793bad_endio_hook_pool:
1794	mempool_destroy(pool->mapping_pool);
1795bad_mapping_pool:
1796	destroy_workqueue(pool->wq);
1797bad_wq:
1798	dm_kcopyd_client_destroy(pool->copier);
1799bad_kcopyd_client:
1800	prison_destroy(pool->prison);
1801bad_prison:
1802	kfree(pool);
1803bad_pool:
1804	if (dm_pool_metadata_close(pmd))
1805		DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1806
1807	return err_p;
1808}
1809
1810static void __pool_inc(struct pool *pool)
1811{
1812	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1813	pool->ref_count++;
1814}
1815
1816static void __pool_dec(struct pool *pool)
1817{
1818	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1819	BUG_ON(!pool->ref_count);
1820	if (!--pool->ref_count)
1821		__pool_destroy(pool);
1822}
1823
1824static struct pool *__pool_find(struct mapped_device *pool_md,
1825				struct block_device *metadata_dev,
1826				unsigned long block_size, char **error,
1827				int *created)
1828{
1829	struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1830
1831	if (pool) {
1832		if (pool->pool_md != pool_md)
1833			return ERR_PTR(-EBUSY);
1834		__pool_inc(pool);
1835
1836	} else {
1837		pool = __pool_table_lookup(pool_md);
1838		if (pool) {
1839			if (pool->md_dev != metadata_dev)
1840				return ERR_PTR(-EINVAL);
1841			__pool_inc(pool);
1842
1843		} else {
1844			pool = pool_create(pool_md, metadata_dev, block_size, error);
1845			*created = 1;
1846		}
1847	}
1848
1849	return pool;
1850}
1851
1852/*----------------------------------------------------------------
1853 * Pool target methods
1854 *--------------------------------------------------------------*/
1855static void pool_dtr(struct dm_target *ti)
1856{
1857	struct pool_c *pt = ti->private;
1858
1859	mutex_lock(&dm_thin_pool_table.mutex);
1860
1861	unbind_control_target(pt->pool, ti);
1862	__pool_dec(pt->pool);
1863	dm_put_device(ti, pt->metadata_dev);
1864	dm_put_device(ti, pt->data_dev);
1865	kfree(pt);
1866
1867	mutex_unlock(&dm_thin_pool_table.mutex);
1868}
1869
1870static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1871			       struct dm_target *ti)
1872{
1873	int r;
1874	unsigned argc;
1875	const char *arg_name;
1876
1877	static struct dm_arg _args[] = {
1878		{0, 3, "Invalid number of pool feature arguments"},
1879	};
1880
1881	/*
1882	 * No feature arguments supplied.
1883	 */
1884	if (!as->argc)
1885		return 0;
1886
1887	r = dm_read_arg_group(_args, as, &argc, &ti->error);
1888	if (r)
1889		return -EINVAL;
1890
1891	while (argc && !r) {
1892		arg_name = dm_shift_arg(as);
1893		argc--;
1894
1895		if (!strcasecmp(arg_name, "skip_block_zeroing")) {
1896			pf->zero_new_blocks = 0;
1897			continue;
1898		} else if (!strcasecmp(arg_name, "ignore_discard")) {
1899			pf->discard_enabled = 0;
1900			continue;
1901		} else if (!strcasecmp(arg_name, "no_discard_passdown")) {
1902			pf->discard_passdown = 0;
1903			continue;
1904		}
1905
1906		ti->error = "Unrecognised pool feature requested";
1907		r = -EINVAL;
1908	}
1909
1910	return r;
1911}
1912
1913/*
1914 * thin-pool <metadata dev> <data dev>
1915 *	     <data block size (sectors)>
1916 *	     <low water mark (blocks)>
1917 *	     [<#feature args> [<arg>]*]
1918 *
1919 * Optional feature arguments are:
1920 *	     skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
1921 *	     ignore_discard: disable discard
1922 *	     no_discard_passdown: don't pass discards down to the data device
1923 */
1924static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1925{
1926	int r, pool_created = 0;
1927	struct pool_c *pt;
1928	struct pool *pool;
1929	struct pool_features pf;
1930	struct dm_arg_set as;
1931	struct dm_dev *data_dev;
1932	unsigned long block_size;
1933	dm_block_t low_water_blocks;
1934	struct dm_dev *metadata_dev;
1935	sector_t metadata_dev_size;
1936	char b[BDEVNAME_SIZE];
1937
1938	/*
1939	 * FIXME Remove validation from scope of lock.
1940	 */
1941	mutex_lock(&dm_thin_pool_table.mutex);
1942
1943	if (argc < 4) {
1944		ti->error = "Invalid argument count";
1945		r = -EINVAL;
1946		goto out_unlock;
1947	}
1948	as.argc = argc;
1949	as.argv = argv;
1950
1951	r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
1952	if (r) {
1953		ti->error = "Error opening metadata block device";
1954		goto out_unlock;
1955	}
1956
1957	metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
1958	if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
1959		DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1960		       bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
1961
1962	r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
1963	if (r) {
1964		ti->error = "Error getting data device";
1965		goto out_metadata;
1966	}
1967
1968	if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
1969	    block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1970	    block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
1971	    !is_power_of_2(block_size)) {
1972		ti->error = "Invalid block size";
1973		r = -EINVAL;
1974		goto out;
1975	}
1976
1977	if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
1978		ti->error = "Invalid low water mark";
1979		r = -EINVAL;
1980		goto out;
1981	}
1982
1983	/*
1984	 * Set default pool features.
1985	 */
1986	pool_features_init(&pf);
1987
1988	dm_consume_args(&as, 4);
1989	r = parse_pool_features(&as, &pf, ti);
1990	if (r)
1991		goto out;
1992
1993	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
1994	if (!pt) {
1995		r = -ENOMEM;
1996		goto out;
1997	}
1998
1999	pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
2000			   block_size, &ti->error, &pool_created);
2001	if (IS_ERR(pool)) {
2002		r = PTR_ERR(pool);
2003		goto out_free_pt;
2004	}
2005
2006	/*
2007	 * 'pool_created' reflects whether this is the first table load.
2008	 * Top level discard support is not allowed to be changed after
2009	 * initial load.  This would require a pool reload to trigger thin
2010	 * device changes.
2011	 */
2012	if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2013		ti->error = "Discard support cannot be disabled once enabled";
2014		r = -EINVAL;
2015		goto out_flags_changed;
2016	}
2017
2018	pt->pool = pool;
2019	pt->ti = ti;
2020	pt->metadata_dev = metadata_dev;
2021	pt->data_dev = data_dev;
2022	pt->low_water_blocks = low_water_blocks;
2023	pt->pf = pf;
2024	ti->num_flush_requests = 1;
2025	/*
2026	 * Only need to enable discards if the pool should pass
2027	 * them down to the data device.  The thin device's discard
2028	 * processing will cause mappings to be removed from the btree.
2029	 */
2030	if (pf.discard_enabled && pf.discard_passdown) {
2031		ti->num_discard_requests = 1;
2032		/*
2033		 * Setting 'discards_supported' circumvents the normal
2034		 * stacking of discard limits (this keeps the pool and
2035		 * thin devices' discard limits consistent).
2036		 */
2037		ti->discards_supported = 1;
2038	}
2039	ti->private = pt;
2040
2041	pt->callbacks.congested_fn = pool_is_congested;
2042	dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2043
2044	mutex_unlock(&dm_thin_pool_table.mutex);
2045
2046	return 0;
2047
2048out_flags_changed:
2049	__pool_dec(pool);
2050out_free_pt:
2051	kfree(pt);
2052out:
2053	dm_put_device(ti, data_dev);
2054out_metadata:
2055	dm_put_device(ti, metadata_dev);
2056out_unlock:
2057	mutex_unlock(&dm_thin_pool_table.mutex);
2058
2059	return r;
2060}
2061
2062static int pool_map(struct dm_target *ti, struct bio *bio,
2063		    union map_info *map_context)
2064{
2065	int r;
2066	struct pool_c *pt = ti->private;
2067	struct pool *pool = pt->pool;
2068	unsigned long flags;
2069
2070	/*
2071	 * As this is a singleton target, ti->begin is always zero.
2072	 */
2073	spin_lock_irqsave(&pool->lock, flags);
2074	bio->bi_bdev = pt->data_dev->bdev;
2075	r = DM_MAPIO_REMAPPED;
2076	spin_unlock_irqrestore(&pool->lock, flags);
2077
2078	return r;
2079}
2080
2081/*
2082 * Retrieves the number of blocks of the data device from
2083 * the superblock and compares it to the actual device size,
2084 * thus resizing the data device in case it has grown.
2085 *
2086 * This both copes with opening preallocated data devices in the ctr
2087 * being followed by a resume
2088 * -and-
2089 * calling the resume method individually after userspace has
2090 * grown the data device in reaction to a table event.
2091 */
2092static int pool_preresume(struct dm_target *ti)
2093{
2094	int r;
2095	struct pool_c *pt = ti->private;
2096	struct pool *pool = pt->pool;
2097	dm_block_t data_size, sb_data_size;
2098
2099	/*
2100	 * Take control of the pool object.
2101	 */
2102	r = bind_control_target(pool, ti);
2103	if (r)
2104		return r;
2105
2106	data_size = ti->len >> pool->block_shift;
2107	r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2108	if (r) {
2109		DMERR("failed to retrieve data device size");
2110		return r;
2111	}
2112
2113	if (data_size < sb_data_size) {
2114		DMERR("pool target too small, is %llu blocks (expected %llu)",
2115		      data_size, sb_data_size);
2116		return -EINVAL;
2117
2118	} else if (data_size > sb_data_size) {
2119		r = dm_pool_resize_data_dev(pool->pmd, data_size);
2120		if (r) {
2121			DMERR("failed to resize data device");
2122			return r;
2123		}
2124
2125		r = dm_pool_commit_metadata(pool->pmd);
2126		if (r) {
2127			DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
2128			      __func__, r);
2129			return r;
2130		}
2131	}
2132
2133	return 0;
2134}
2135
2136static void pool_resume(struct dm_target *ti)
2137{
2138	struct pool_c *pt = ti->private;
2139	struct pool *pool = pt->pool;
2140	unsigned long flags;
2141
2142	spin_lock_irqsave(&pool->lock, flags);
2143	pool->low_water_triggered = 0;
2144	pool->no_free_space = 0;
2145	__requeue_bios(pool);
2146	spin_unlock_irqrestore(&pool->lock, flags);
2147
2148	do_waker(&pool->waker.work);
2149}
2150
2151static void pool_postsuspend(struct dm_target *ti)
2152{
2153	int r;
2154	struct pool_c *pt = ti->private;
2155	struct pool *pool = pt->pool;
2156
2157	cancel_delayed_work(&pool->waker);
2158	flush_workqueue(pool->wq);
2159
2160	r = dm_pool_commit_metadata(pool->pmd);
2161	if (r < 0) {
2162		DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
2163		      __func__, r);
2164		/* FIXME: invalidate device? error the next FUA or FLUSH bio ?*/
2165	}
2166}
2167
2168static int check_arg_count(unsigned argc, unsigned args_required)
2169{
2170	if (argc != args_required) {
2171		DMWARN("Message received with %u arguments instead of %u.",
2172		       argc, args_required);
2173		return -EINVAL;
2174	}
2175
2176	return 0;
2177}
2178
2179static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2180{
2181	if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2182	    *dev_id <= MAX_DEV_ID)
2183		return 0;
2184
2185	if (warning)
2186		DMWARN("Message received with invalid device id: %s", arg);
2187
2188	return -EINVAL;
2189}
2190
2191static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2192{
2193	dm_thin_id dev_id;
2194	int r;
2195
2196	r = check_arg_count(argc, 2);
2197	if (r)
2198		return r;
2199
2200	r = read_dev_id(argv[1], &dev_id, 1);
2201	if (r)
2202		return r;
2203
2204	r = dm_pool_create_thin(pool->pmd, dev_id);
2205	if (r) {
2206		DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2207		       argv[1]);
2208		return r;
2209	}
2210
2211	return 0;
2212}
2213
2214static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2215{
2216	dm_thin_id dev_id;
2217	dm_thin_id origin_dev_id;
2218	int r;
2219
2220	r = check_arg_count(argc, 3);
2221	if (r)
2222		return r;
2223
2224	r = read_dev_id(argv[1], &dev_id, 1);
2225	if (r)
2226		return r;
2227
2228	r = read_dev_id(argv[2], &origin_dev_id, 1);
2229	if (r)
2230		return r;
2231
2232	r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2233	if (r) {
2234		DMWARN("Creation of new snapshot %s of device %s failed.",
2235		       argv[1], argv[2]);
2236		return r;
2237	}
2238
2239	return 0;
2240}
2241
2242static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2243{
2244	dm_thin_id dev_id;
2245	int r;
2246
2247	r = check_arg_count(argc, 2);
2248	if (r)
2249		return r;
2250
2251	r = read_dev_id(argv[1], &dev_id, 1);
2252	if (r)
2253		return r;
2254
2255	r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2256	if (r)
2257		DMWARN("Deletion of thin device %s failed.", argv[1]);
2258
2259	return r;
2260}
2261
2262static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2263{
2264	dm_thin_id old_id, new_id;
2265	int r;
2266
2267	r = check_arg_count(argc, 3);
2268	if (r)
2269		return r;
2270
2271	if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2272		DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2273		return -EINVAL;
2274	}
2275
2276	if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2277		DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2278		return -EINVAL;
2279	}
2280
2281	r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2282	if (r) {
2283		DMWARN("Failed to change transaction id from %s to %s.",
2284		       argv[1], argv[2]);
2285		return r;
2286	}
2287
2288	return 0;
2289}
2290
2291static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2292{
2293	int r;
2294
2295	r = check_arg_count(argc, 1);
2296	if (r)
2297		return r;
2298
2299	r = dm_pool_commit_metadata(pool->pmd);
2300	if (r) {
2301		DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
2302		      __func__, r);
2303		return r;
2304	}
2305
2306	r = dm_pool_reserve_metadata_snap(pool->pmd);
2307	if (r)
2308		DMWARN("reserve_metadata_snap message failed.");
2309
2310	return r;
2311}
2312
2313static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2314{
2315	int r;
2316
2317	r = check_arg_count(argc, 1);
2318	if (r)
2319		return r;
2320
2321	r = dm_pool_release_metadata_snap(pool->pmd);
2322	if (r)
2323		DMWARN("release_metadata_snap message failed.");
2324
2325	return r;
2326}
2327
2328/*
2329 * Messages supported:
2330 *   create_thin	<dev_id>
2331 *   create_snap	<dev_id> <origin_id>
2332 *   delete		<dev_id>
2333 *   trim		<dev_id> <new_size_in_sectors>
2334 *   set_transaction_id <current_trans_id> <new_trans_id>
2335 *   reserve_metadata_snap
2336 *   release_metadata_snap
2337 */
2338static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2339{
2340	int r = -EINVAL;
2341	struct pool_c *pt = ti->private;
2342	struct pool *pool = pt->pool;
2343
2344	if (!strcasecmp(argv[0], "create_thin"))
2345		r = process_create_thin_mesg(argc, argv, pool);
2346
2347	else if (!strcasecmp(argv[0], "create_snap"))
2348		r = process_create_snap_mesg(argc, argv, pool);
2349
2350	else if (!strcasecmp(argv[0], "delete"))
2351		r = process_delete_mesg(argc, argv, pool);
2352
2353	else if (!strcasecmp(argv[0], "set_transaction_id"))
2354		r = process_set_transaction_id_mesg(argc, argv, pool);
2355
2356	else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2357		r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2358
2359	else if (!strcasecmp(argv[0], "release_metadata_snap"))
2360		r = process_release_metadata_snap_mesg(argc, argv, pool);
2361
2362	else
2363		DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2364
2365	if (!r) {
2366		r = dm_pool_commit_metadata(pool->pmd);
2367		if (r)
2368			DMERR("%s message: dm_pool_commit_metadata() failed, error = %d",
2369			      argv[0], r);
2370	}
2371
2372	return r;
2373}
2374
2375/*
2376 * Status line is:
2377 *    <transaction id> <used metadata sectors>/<total metadata sectors>
2378 *    <used data sectors>/<total data sectors> <held metadata root>
2379 */
2380static int pool_status(struct dm_target *ti, status_type_t type,
2381		       char *result, unsigned maxlen)
2382{
2383	int r, count;
2384	unsigned sz = 0;
2385	uint64_t transaction_id;
2386	dm_block_t nr_free_blocks_data;
2387	dm_block_t nr_free_blocks_metadata;
2388	dm_block_t nr_blocks_data;
2389	dm_block_t nr_blocks_metadata;
2390	dm_block_t held_root;
2391	char buf[BDEVNAME_SIZE];
2392	char buf2[BDEVNAME_SIZE];
2393	struct pool_c *pt = ti->private;
2394	struct pool *pool = pt->pool;
2395
2396	switch (type) {
2397	case STATUSTYPE_INFO:
2398		r = dm_pool_get_metadata_transaction_id(pool->pmd,
2399							&transaction_id);
2400		if (r)
2401			return r;
2402
2403		r = dm_pool_get_free_metadata_block_count(pool->pmd,
2404							  &nr_free_blocks_metadata);
2405		if (r)
2406			return r;
2407
2408		r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2409		if (r)
2410			return r;
2411
2412		r = dm_pool_get_free_block_count(pool->pmd,
2413						 &nr_free_blocks_data);
2414		if (r)
2415			return r;
2416
2417		r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2418		if (r)
2419			return r;
2420
2421		r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2422		if (r)
2423			return r;
2424
2425		DMEMIT("%llu %llu/%llu %llu/%llu ",
2426		       (unsigned long long)transaction_id,
2427		       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2428		       (unsigned long long)nr_blocks_metadata,
2429		       (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2430		       (unsigned long long)nr_blocks_data);
2431
2432		if (held_root)
2433			DMEMIT("%llu", held_root);
2434		else
2435			DMEMIT("-");
2436
2437		break;
2438
2439	case STATUSTYPE_TABLE:
2440		DMEMIT("%s %s %lu %llu ",
2441		       format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2442		       format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2443		       (unsigned long)pool->sectors_per_block,
2444		       (unsigned long long)pt->low_water_blocks);
2445
2446		count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled +
2447			!pt->pf.discard_passdown;
2448		DMEMIT("%u ", count);
2449
2450		if (!pool->pf.zero_new_blocks)
2451			DMEMIT("skip_block_zeroing ");
2452
2453		if (!pool->pf.discard_enabled)
2454			DMEMIT("ignore_discard ");
2455
2456		if (!pt->pf.discard_passdown)
2457			DMEMIT("no_discard_passdown ");
2458
2459		break;
2460	}
2461
2462	return 0;
2463}
2464
2465static int pool_iterate_devices(struct dm_target *ti,
2466				iterate_devices_callout_fn fn, void *data)
2467{
2468	struct pool_c *pt = ti->private;
2469
2470	return fn(ti, pt->data_dev, 0, ti->len, data);
2471}
2472
2473static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2474		      struct bio_vec *biovec, int max_size)
2475{
2476	struct pool_c *pt = ti->private;
2477	struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2478
2479	if (!q->merge_bvec_fn)
2480		return max_size;
2481
2482	bvm->bi_bdev = pt->data_dev->bdev;
2483
2484	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2485}
2486
2487static void set_discard_limits(struct pool *pool, struct queue_limits *limits)
2488{
2489	/*
2490	 * FIXME: these limits may be incompatible with the pool's data device
2491	 */
2492	limits->max_discard_sectors = pool->sectors_per_block;
2493
2494	/*
2495	 * This is just a hint, and not enforced.  We have to cope with
2496	 * bios that overlap 2 blocks.
2497	 */
2498	limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2499	limits->discard_zeroes_data = pool->pf.zero_new_blocks;
2500}
2501
2502static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2503{
2504	struct pool_c *pt = ti->private;
2505	struct pool *pool = pt->pool;
2506
2507	blk_limits_io_min(limits, 0);
2508	blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2509	if (pool->pf.discard_enabled)
2510		set_discard_limits(pool, limits);
2511}
2512
2513static struct target_type pool_target = {
2514	.name = "thin-pool",
2515	.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2516		    DM_TARGET_IMMUTABLE,
2517	.version = {1, 2, 0},
2518	.module = THIS_MODULE,
2519	.ctr = pool_ctr,
2520	.dtr = pool_dtr,
2521	.map = pool_map,
2522	.postsuspend = pool_postsuspend,
2523	.preresume = pool_preresume,
2524	.resume = pool_resume,
2525	.message = pool_message,
2526	.status = pool_status,
2527	.merge = pool_merge,
2528	.iterate_devices = pool_iterate_devices,
2529	.io_hints = pool_io_hints,
2530};
2531
2532/*----------------------------------------------------------------
2533 * Thin target methods
2534 *--------------------------------------------------------------*/
2535static void thin_dtr(struct dm_target *ti)
2536{
2537	struct thin_c *tc = ti->private;
2538
2539	mutex_lock(&dm_thin_pool_table.mutex);
2540
2541	__pool_dec(tc->pool);
2542	dm_pool_close_thin_device(tc->td);
2543	dm_put_device(ti, tc->pool_dev);
2544	if (tc->origin_dev)
2545		dm_put_device(ti, tc->origin_dev);
2546	kfree(tc);
2547
2548	mutex_unlock(&dm_thin_pool_table.mutex);
2549}
2550
2551/*
2552 * Thin target parameters:
2553 *
2554 * <pool_dev> <dev_id> [origin_dev]
2555 *
2556 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2557 * dev_id: the internal device identifier
2558 * origin_dev: a device external to the pool that should act as the origin
2559 *
2560 * If the pool device has discards disabled, they get disabled for the thin
2561 * device as well.
2562 */
2563static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2564{
2565	int r;
2566	struct thin_c *tc;
2567	struct dm_dev *pool_dev, *origin_dev;
2568	struct mapped_device *pool_md;
2569
2570	mutex_lock(&dm_thin_pool_table.mutex);
2571
2572	if (argc != 2 && argc != 3) {
2573		ti->error = "Invalid argument count";
2574		r = -EINVAL;
2575		goto out_unlock;
2576	}
2577
2578	tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2579	if (!tc) {
2580		ti->error = "Out of memory";
2581		r = -ENOMEM;
2582		goto out_unlock;
2583	}
2584
2585	if (argc == 3) {
2586		r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
2587		if (r) {
2588			ti->error = "Error opening origin device";
2589			goto bad_origin_dev;
2590		}
2591		tc->origin_dev = origin_dev;
2592	}
2593
2594	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2595	if (r) {
2596		ti->error = "Error opening pool device";
2597		goto bad_pool_dev;
2598	}
2599	tc->pool_dev = pool_dev;
2600
2601	if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2602		ti->error = "Invalid device id";
2603		r = -EINVAL;
2604		goto bad_common;
2605	}
2606
2607	pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2608	if (!pool_md) {
2609		ti->error = "Couldn't get pool mapped device";
2610		r = -EINVAL;
2611		goto bad_common;
2612	}
2613
2614	tc->pool = __pool_table_lookup(pool_md);
2615	if (!tc->pool) {
2616		ti->error = "Couldn't find pool object";
2617		r = -EINVAL;
2618		goto bad_pool_lookup;
2619	}
2620	__pool_inc(tc->pool);
2621
2622	r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2623	if (r) {
2624		ti->error = "Couldn't open thin internal device";
2625		goto bad_thin_open;
2626	}
2627
2628	ti->split_io = tc->pool->sectors_per_block;
2629	ti->num_flush_requests = 1;
2630
2631	/* In case the pool supports discards, pass them on. */
2632	if (tc->pool->pf.discard_enabled) {
2633		ti->discards_supported = 1;
2634		ti->num_discard_requests = 1;
2635		ti->discard_zeroes_data_unsupported = 1;
2636	}
2637
2638	dm_put(pool_md);
2639
2640	mutex_unlock(&dm_thin_pool_table.mutex);
2641
2642	return 0;
2643
2644bad_thin_open:
2645	__pool_dec(tc->pool);
2646bad_pool_lookup:
2647	dm_put(pool_md);
2648bad_common:
2649	dm_put_device(ti, tc->pool_dev);
2650bad_pool_dev:
2651	if (tc->origin_dev)
2652		dm_put_device(ti, tc->origin_dev);
2653bad_origin_dev:
2654	kfree(tc);
2655out_unlock:
2656	mutex_unlock(&dm_thin_pool_table.mutex);
2657
2658	return r;
2659}
2660
2661static int thin_map(struct dm_target *ti, struct bio *bio,
2662		    union map_info *map_context)
2663{
2664	bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
2665
2666	return thin_bio_map(ti, bio, map_context);
2667}
2668
2669static int thin_endio(struct dm_target *ti,
2670		      struct bio *bio, int err,
2671		      union map_info *map_context)
2672{
2673	unsigned long flags;
2674	struct dm_thin_endio_hook *h = map_context->ptr;
2675	struct list_head work;
2676	struct dm_thin_new_mapping *m, *tmp;
2677	struct pool *pool = h->tc->pool;
2678
2679	if (h->shared_read_entry) {
2680		INIT_LIST_HEAD(&work);
2681		ds_dec(h->shared_read_entry, &work);
2682
2683		spin_lock_irqsave(&pool->lock, flags);
2684		list_for_each_entry_safe(m, tmp, &work, list) {
2685			list_del(&m->list);
2686			m->quiesced = 1;
2687			__maybe_add_mapping(m);
2688		}
2689		spin_unlock_irqrestore(&pool->lock, flags);
2690	}
2691
2692	if (h->all_io_entry) {
2693		INIT_LIST_HEAD(&work);
2694		ds_dec(h->all_io_entry, &work);
2695		spin_lock_irqsave(&pool->lock, flags);
2696		list_for_each_entry_safe(m, tmp, &work, list)
2697			list_add(&m->list, &pool->prepared_discards);
2698		spin_unlock_irqrestore(&pool->lock, flags);
2699	}
2700
2701	mempool_free(h, pool->endio_hook_pool);
2702
2703	return 0;
2704}
2705
2706static void thin_postsuspend(struct dm_target *ti)
2707{
2708	if (dm_noflush_suspending(ti))
2709		requeue_io((struct thin_c *)ti->private);
2710}
2711
2712/*
2713 * <nr mapped sectors> <highest mapped sector>
2714 */
2715static int thin_status(struct dm_target *ti, status_type_t type,
2716		       char *result, unsigned maxlen)
2717{
2718	int r;
2719	ssize_t sz = 0;
2720	dm_block_t mapped, highest;
2721	char buf[BDEVNAME_SIZE];
2722	struct thin_c *tc = ti->private;
2723
2724	if (!tc->td)
2725		DMEMIT("-");
2726	else {
2727		switch (type) {
2728		case STATUSTYPE_INFO:
2729			r = dm_thin_get_mapped_count(tc->td, &mapped);
2730			if (r)
2731				return r;
2732
2733			r = dm_thin_get_highest_mapped_block(tc->td, &highest);
2734			if (r < 0)
2735				return r;
2736
2737			DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
2738			if (r)
2739				DMEMIT("%llu", ((highest + 1) *
2740						tc->pool->sectors_per_block) - 1);
2741			else
2742				DMEMIT("-");
2743			break;
2744
2745		case STATUSTYPE_TABLE:
2746			DMEMIT("%s %lu",
2747			       format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
2748			       (unsigned long) tc->dev_id);
2749			if (tc->origin_dev)
2750				DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
2751			break;
2752		}
2753	}
2754
2755	return 0;
2756}
2757
2758static int thin_iterate_devices(struct dm_target *ti,
2759				iterate_devices_callout_fn fn, void *data)
2760{
2761	dm_block_t blocks;
2762	struct thin_c *tc = ti->private;
2763
2764	/*
2765	 * We can't call dm_pool_get_data_dev_size() since that blocks.  So
2766	 * we follow a more convoluted path through to the pool's target.
2767	 */
2768	if (!tc->pool->ti)
2769		return 0;	/* nothing is bound */
2770
2771	blocks = tc->pool->ti->len >> tc->pool->block_shift;
2772	if (blocks)
2773		return fn(ti, tc->pool_dev, 0, tc->pool->sectors_per_block * blocks, data);
2774
2775	return 0;
2776}
2777
2778static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
2779{
2780	struct thin_c *tc = ti->private;
2781	struct pool *pool = tc->pool;
2782
2783	blk_limits_io_min(limits, 0);
2784	blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2785	set_discard_limits(pool, limits);
2786}
2787
2788static struct target_type thin_target = {
2789	.name = "thin",
2790	.version = {1, 1, 0},
2791	.module	= THIS_MODULE,
2792	.ctr = thin_ctr,
2793	.dtr = thin_dtr,
2794	.map = thin_map,
2795	.end_io = thin_endio,
2796	.postsuspend = thin_postsuspend,
2797	.status = thin_status,
2798	.iterate_devices = thin_iterate_devices,
2799	.io_hints = thin_io_hints,
2800};
2801
2802/*----------------------------------------------------------------*/
2803
2804static int __init dm_thin_init(void)
2805{
2806	int r;
2807
2808	pool_table_init();
2809
2810	r = dm_register_target(&thin_target);
2811	if (r)
2812		return r;
2813
2814	r = dm_register_target(&pool_target);
2815	if (r)
2816		goto bad_pool_target;
2817
2818	r = -ENOMEM;
2819
2820	_cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
2821	if (!_cell_cache)
2822		goto bad_cell_cache;
2823
2824	_new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
2825	if (!_new_mapping_cache)
2826		goto bad_new_mapping_cache;
2827
2828	_endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
2829	if (!_endio_hook_cache)
2830		goto bad_endio_hook_cache;
2831
2832	return 0;
2833
2834bad_endio_hook_cache:
2835	kmem_cache_destroy(_new_mapping_cache);
2836bad_new_mapping_cache:
2837	kmem_cache_destroy(_cell_cache);
2838bad_cell_cache:
2839	dm_unregister_target(&pool_target);
2840bad_pool_target:
2841	dm_unregister_target(&thin_target);
2842
2843	return r;
2844}
2845
2846static void dm_thin_exit(void)
2847{
2848	dm_unregister_target(&thin_target);
2849	dm_unregister_target(&pool_target);
2850
2851	kmem_cache_destroy(_cell_cache);
2852	kmem_cache_destroy(_new_mapping_cache);
2853	kmem_cache_destroy(_endio_hook_cache);
2854}
2855
2856module_init(dm_thin_init);
2857module_exit(dm_thin_exit);
2858
2859MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
2860MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2861MODULE_LICENSE("GPL");