Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright (C) 2011-2012 Red Hat UK.
   3 *
   4 * This file is released under the GPL.
   5 */
   6
   7#include "dm-thin-metadata.h"
   8#include "dm-bio-prison-v1.h"
   9#include "dm.h"
  10
  11#include <linux/device-mapper.h>
  12#include <linux/dm-io.h>
  13#include <linux/dm-kcopyd.h>
  14#include <linux/jiffies.h>
  15#include <linux/log2.h>
  16#include <linux/list.h>
  17#include <linux/rculist.h>
  18#include <linux/init.h>
  19#include <linux/module.h>
  20#include <linux/slab.h>
  21#include <linux/vmalloc.h>
  22#include <linux/sort.h>
  23#include <linux/rbtree.h>
  24
  25#define	DM_MSG_PREFIX	"thin"
  26
  27/*
  28 * Tunable constants
  29 */
  30#define ENDIO_HOOK_POOL_SIZE 1024
  31#define MAPPING_POOL_SIZE 1024
  32#define COMMIT_PERIOD HZ
  33#define NO_SPACE_TIMEOUT_SECS 60
  34
  35static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
  36
  37DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
  38		"A percentage of time allocated for copy on write");
  39
  40/*
  41 * The block size of the device holding pool data must be
  42 * between 64KB and 1GB.
  43 */
  44#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
  45#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
  46
  47/*
  48 * Device id is restricted to 24 bits.
  49 */
  50#define MAX_DEV_ID ((1 << 24) - 1)
  51
  52/*
  53 * How do we handle breaking sharing of data blocks?
  54 * =================================================
  55 *
  56 * We use a standard copy-on-write btree to store the mappings for the
  57 * devices (note I'm talking about copy-on-write of the metadata here, not
  58 * the data).  When you take an internal snapshot you clone the root node
  59 * of the origin btree.  After this there is no concept of an origin or a
  60 * snapshot.  They are just two device trees that happen to point to the
  61 * same data blocks.
  62 *
  63 * When we get a write in we decide if it's to a shared data block using
  64 * some timestamp magic.  If it is, we have to break sharing.
  65 *
  66 * Let's say we write to a shared block in what was the origin.  The
  67 * steps are:
  68 *
  69 * i) plug io further to this physical block. (see bio_prison code).
  70 *
  71 * ii) quiesce any read io to that shared data block.  Obviously
  72 * including all devices that share this block.  (see dm_deferred_set code)
  73 *
  74 * iii) copy the data block to a newly allocate block.  This step can be
  75 * missed out if the io covers the block. (schedule_copy).
  76 *
  77 * iv) insert the new mapping into the origin's btree
  78 * (process_prepared_mapping).  This act of inserting breaks some
  79 * sharing of btree nodes between the two devices.  Breaking sharing only
  80 * effects the btree of that specific device.  Btrees for the other
  81 * devices that share the block never change.  The btree for the origin
  82 * device as it was after the last commit is untouched, ie. we're using
  83 * persistent data structures in the functional programming sense.
  84 *
  85 * v) unplug io to this physical block, including the io that triggered
  86 * the breaking of sharing.
  87 *
  88 * Steps (ii) and (iii) occur in parallel.
  89 *
  90 * The metadata _doesn't_ need to be committed before the io continues.  We
  91 * get away with this because the io is always written to a _new_ block.
  92 * If there's a crash, then:
  93 *
  94 * - The origin mapping will point to the old origin block (the shared
  95 * one).  This will contain the data as it was before the io that triggered
  96 * the breaking of sharing came in.
  97 *
  98 * - The snap mapping still points to the old block.  As it would after
  99 * the commit.
 100 *
 101 * The downside of this scheme is the timestamp magic isn't perfect, and
 102 * will continue to think that data block in the snapshot device is shared
 103 * even after the write to the origin has broken sharing.  I suspect data
 104 * blocks will typically be shared by many different devices, so we're
 105 * breaking sharing n + 1 times, rather than n, where n is the number of
 106 * devices that reference this data block.  At the moment I think the
 107 * benefits far, far outweigh the disadvantages.
 108 */
 109
 110/*----------------------------------------------------------------*/
 111
 112/*
 113 * Key building.
 114 */
 115enum lock_space {
 116	VIRTUAL,
 117	PHYSICAL
 118};
 119
 120static void build_key(struct dm_thin_device *td, enum lock_space ls,
 121		      dm_block_t b, dm_block_t e, struct dm_cell_key *key)
 122{
 123	key->virtual = (ls == VIRTUAL);
 124	key->dev = dm_thin_dev_id(td);
 125	key->block_begin = b;
 126	key->block_end = e;
 127}
 128
 129static void build_data_key(struct dm_thin_device *td, dm_block_t b,
 130			   struct dm_cell_key *key)
 131{
 132	build_key(td, PHYSICAL, b, b + 1llu, key);
 133}
 134
 135static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
 136			      struct dm_cell_key *key)
 137{
 138	build_key(td, VIRTUAL, b, b + 1llu, key);
 139}
 140
 141/*----------------------------------------------------------------*/
 142
 143#define THROTTLE_THRESHOLD (1 * HZ)
 144
 145struct throttle {
 146	struct rw_semaphore lock;
 147	unsigned long threshold;
 148	bool throttle_applied;
 149};
 150
 151static void throttle_init(struct throttle *t)
 152{
 153	init_rwsem(&t->lock);
 154	t->throttle_applied = false;
 155}
 156
 157static void throttle_work_start(struct throttle *t)
 158{
 159	t->threshold = jiffies + THROTTLE_THRESHOLD;
 160}
 161
 162static void throttle_work_update(struct throttle *t)
 163{
 164	if (!t->throttle_applied && jiffies > t->threshold) {
 165		down_write(&t->lock);
 166		t->throttle_applied = true;
 167	}
 168}
 169
 170static void throttle_work_complete(struct throttle *t)
 171{
 172	if (t->throttle_applied) {
 173		t->throttle_applied = false;
 174		up_write(&t->lock);
 175	}
 176}
 177
 178static void throttle_lock(struct throttle *t)
 179{
 180	down_read(&t->lock);
 181}
 182
 183static void throttle_unlock(struct throttle *t)
 184{
 185	up_read(&t->lock);
 186}
 187
 188/*----------------------------------------------------------------*/
 189
 190/*
 191 * A pool device ties together a metadata device and a data device.  It
 192 * also provides the interface for creating and destroying internal
 193 * devices.
 194 */
 195struct dm_thin_new_mapping;
 196
 197/*
 198 * The pool runs in various modes.  Ordered in degraded order for comparisons.
 199 */
 200enum pool_mode {
 201	PM_WRITE,		/* metadata may be changed */
 202	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */
 203
 204	/*
 205	 * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
 206	 */
 207	PM_OUT_OF_METADATA_SPACE,
 208	PM_READ_ONLY,		/* metadata may not be changed */
 209
 210	PM_FAIL,		/* all I/O fails */
 211};
 212
 213struct pool_features {
 214	enum pool_mode mode;
 215
 216	bool zero_new_blocks:1;
 217	bool discard_enabled:1;
 218	bool discard_passdown:1;
 219	bool error_if_no_space:1;
 220};
 221
 222struct thin_c;
 223typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
 224typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
 225typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
 226
 227#define CELL_SORT_ARRAY_SIZE 8192
 228
 229struct pool {
 230	struct list_head list;
 231	struct dm_target *ti;	/* Only set if a pool target is bound */
 232
 233	struct mapped_device *pool_md;
 234	struct block_device *md_dev;
 235	struct dm_pool_metadata *pmd;
 236
 237	dm_block_t low_water_blocks;
 238	uint32_t sectors_per_block;
 239	int sectors_per_block_shift;
 240
 241	struct pool_features pf;
 242	bool low_water_triggered:1;	/* A dm event has been sent */
 243	bool suspended:1;
 244	bool out_of_data_space:1;
 245
 246	struct dm_bio_prison *prison;
 247	struct dm_kcopyd_client *copier;
 248
 249	struct work_struct worker;
 250	struct workqueue_struct *wq;
 251	struct throttle throttle;
 252	struct delayed_work waker;
 253	struct delayed_work no_space_timeout;
 254
 255	unsigned long last_commit_jiffies;
 256	unsigned ref_count;
 257
 258	spinlock_t lock;
 259	struct bio_list deferred_flush_bios;
 260	struct bio_list deferred_flush_completions;
 261	struct list_head prepared_mappings;
 262	struct list_head prepared_discards;
 263	struct list_head prepared_discards_pt2;
 264	struct list_head active_thins;
 265
 266	struct dm_deferred_set *shared_read_ds;
 267	struct dm_deferred_set *all_io_ds;
 268
 269	struct dm_thin_new_mapping *next_mapping;
 270
 271	process_bio_fn process_bio;
 272	process_bio_fn process_discard;
 273
 274	process_cell_fn process_cell;
 275	process_cell_fn process_discard_cell;
 276
 277	process_mapping_fn process_prepared_mapping;
 278	process_mapping_fn process_prepared_discard;
 279	process_mapping_fn process_prepared_discard_pt2;
 280
 281	struct dm_bio_prison_cell **cell_sort_array;
 282
 283	mempool_t mapping_pool;
 284};
 285
 286static void metadata_operation_failed(struct pool *pool, const char *op, int r);
 287
 288static enum pool_mode get_pool_mode(struct pool *pool)
 289{
 290	return pool->pf.mode;
 291}
 292
 293static void notify_of_pool_mode_change(struct pool *pool)
 294{
 295	const char *descs[] = {
 296		"write",
 297		"out-of-data-space",
 298		"read-only",
 299		"read-only",
 300		"fail"
 301	};
 302	const char *extra_desc = NULL;
 303	enum pool_mode mode = get_pool_mode(pool);
 304
 305	if (mode == PM_OUT_OF_DATA_SPACE) {
 306		if (!pool->pf.error_if_no_space)
 307			extra_desc = " (queue IO)";
 308		else
 309			extra_desc = " (error IO)";
 310	}
 311
 312	dm_table_event(pool->ti->table);
 313	DMINFO("%s: switching pool to %s%s mode",
 314	       dm_device_name(pool->pool_md),
 315	       descs[(int)mode], extra_desc ? : "");
 316}
 317
 318/*
 319 * Target context for a pool.
 320 */
 321struct pool_c {
 322	struct dm_target *ti;
 323	struct pool *pool;
 324	struct dm_dev *data_dev;
 325	struct dm_dev *metadata_dev;
 326	struct dm_target_callbacks callbacks;
 327
 328	dm_block_t low_water_blocks;
 329	struct pool_features requested_pf; /* Features requested during table load */
 330	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
 331};
 332
 333/*
 334 * Target context for a thin.
 335 */
 336struct thin_c {
 337	struct list_head list;
 338	struct dm_dev *pool_dev;
 339	struct dm_dev *origin_dev;
 340	sector_t origin_size;
 341	dm_thin_id dev_id;
 342
 343	struct pool *pool;
 344	struct dm_thin_device *td;
 345	struct mapped_device *thin_md;
 346
 347	bool requeue_mode:1;
 348	spinlock_t lock;
 349	struct list_head deferred_cells;
 350	struct bio_list deferred_bio_list;
 351	struct bio_list retry_on_resume_list;
 352	struct rb_root sort_bio_list; /* sorted list of deferred bios */
 353
 354	/*
 355	 * Ensures the thin is not destroyed until the worker has finished
 356	 * iterating the active_thins list.
 357	 */
 358	refcount_t refcount;
 359	struct completion can_destroy;
 360};
 361
 362/*----------------------------------------------------------------*/
 363
 364static bool block_size_is_power_of_two(struct pool *pool)
 365{
 366	return pool->sectors_per_block_shift >= 0;
 367}
 368
 369static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
 370{
 371	return block_size_is_power_of_two(pool) ?
 372		(b << pool->sectors_per_block_shift) :
 373		(b * pool->sectors_per_block);
 374}
 375
 376/*----------------------------------------------------------------*/
 377
 378struct discard_op {
 379	struct thin_c *tc;
 380	struct blk_plug plug;
 381	struct bio *parent_bio;
 382	struct bio *bio;
 383};
 384
 385static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
 386{
 387	BUG_ON(!parent);
 388
 389	op->tc = tc;
 390	blk_start_plug(&op->plug);
 391	op->parent_bio = parent;
 392	op->bio = NULL;
 393}
 394
 395static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e)
 396{
 397	struct thin_c *tc = op->tc;
 398	sector_t s = block_to_sectors(tc->pool, data_b);
 399	sector_t len = block_to_sectors(tc->pool, data_e - data_b);
 400
 401	return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
 402				      GFP_NOWAIT, 0, &op->bio);
 403}
 404
 405static void end_discard(struct discard_op *op, int r)
 406{
 407	if (op->bio) {
 408		/*
 409		 * Even if one of the calls to issue_discard failed, we
 410		 * need to wait for the chain to complete.
 411		 */
 412		bio_chain(op->bio, op->parent_bio);
 413		bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0);
 414		submit_bio(op->bio);
 415	}
 416
 417	blk_finish_plug(&op->plug);
 418
 419	/*
 420	 * Even if r is set, there could be sub discards in flight that we
 421	 * need to wait for.
 422	 */
 423	if (r && !op->parent_bio->bi_status)
 424		op->parent_bio->bi_status = errno_to_blk_status(r);
 425	bio_endio(op->parent_bio);
 426}
 427
 428/*----------------------------------------------------------------*/
 429
 430/*
 431 * wake_worker() is used when new work is queued and when pool_resume is
 432 * ready to continue deferred IO processing.
 433 */
 434static void wake_worker(struct pool *pool)
 435{
 436	queue_work(pool->wq, &pool->worker);
 437}
 438
 439/*----------------------------------------------------------------*/
 440
 441static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
 442		      struct dm_bio_prison_cell **cell_result)
 443{
 444	int r;
 445	struct dm_bio_prison_cell *cell_prealloc;
 446
 447	/*
 448	 * Allocate a cell from the prison's mempool.
 449	 * This might block but it can't fail.
 450	 */
 451	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
 452
 453	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
 454	if (r)
 455		/*
 456		 * We reused an old cell; we can get rid of
 457		 * the new one.
 458		 */
 459		dm_bio_prison_free_cell(pool->prison, cell_prealloc);
 460
 461	return r;
 462}
 463
 464static void cell_release(struct pool *pool,
 465			 struct dm_bio_prison_cell *cell,
 466			 struct bio_list *bios)
 467{
 468	dm_cell_release(pool->prison, cell, bios);
 469	dm_bio_prison_free_cell(pool->prison, cell);
 470}
 471
 472static void cell_visit_release(struct pool *pool,
 473			       void (*fn)(void *, struct dm_bio_prison_cell *),
 474			       void *context,
 475			       struct dm_bio_prison_cell *cell)
 476{
 477	dm_cell_visit_release(pool->prison, fn, context, cell);
 478	dm_bio_prison_free_cell(pool->prison, cell);
 479}
 480
 481static void cell_release_no_holder(struct pool *pool,
 482				   struct dm_bio_prison_cell *cell,
 483				   struct bio_list *bios)
 484{
 485	dm_cell_release_no_holder(pool->prison, cell, bios);
 486	dm_bio_prison_free_cell(pool->prison, cell);
 487}
 488
 489static void cell_error_with_code(struct pool *pool,
 490		struct dm_bio_prison_cell *cell, blk_status_t error_code)
 491{
 492	dm_cell_error(pool->prison, cell, error_code);
 493	dm_bio_prison_free_cell(pool->prison, cell);
 494}
 495
 496static blk_status_t get_pool_io_error_code(struct pool *pool)
 497{
 498	return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR;
 499}
 500
 501static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
 502{
 503	cell_error_with_code(pool, cell, get_pool_io_error_code(pool));
 504}
 505
 506static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
 507{
 508	cell_error_with_code(pool, cell, 0);
 509}
 510
 511static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
 512{
 513	cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE);
 514}
 515
 516/*----------------------------------------------------------------*/
 517
 518/*
 519 * A global list of pools that uses a struct mapped_device as a key.
 520 */
 521static struct dm_thin_pool_table {
 522	struct mutex mutex;
 523	struct list_head pools;
 524} dm_thin_pool_table;
 525
 526static void pool_table_init(void)
 527{
 528	mutex_init(&dm_thin_pool_table.mutex);
 529	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
 530}
 531
 532static void pool_table_exit(void)
 533{
 534	mutex_destroy(&dm_thin_pool_table.mutex);
 535}
 536
 537static void __pool_table_insert(struct pool *pool)
 538{
 539	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 540	list_add(&pool->list, &dm_thin_pool_table.pools);
 541}
 542
 543static void __pool_table_remove(struct pool *pool)
 544{
 545	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 546	list_del(&pool->list);
 547}
 548
 549static struct pool *__pool_table_lookup(struct mapped_device *md)
 550{
 551	struct pool *pool = NULL, *tmp;
 552
 553	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 554
 555	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
 556		if (tmp->pool_md == md) {
 557			pool = tmp;
 558			break;
 559		}
 560	}
 561
 562	return pool;
 563}
 564
 565static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
 566{
 567	struct pool *pool = NULL, *tmp;
 568
 569	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 570
 571	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
 572		if (tmp->md_dev == md_dev) {
 573			pool = tmp;
 574			break;
 575		}
 576	}
 577
 578	return pool;
 579}
 580
 581/*----------------------------------------------------------------*/
 582
 583struct dm_thin_endio_hook {
 584	struct thin_c *tc;
 585	struct dm_deferred_entry *shared_read_entry;
 586	struct dm_deferred_entry *all_io_entry;
 587	struct dm_thin_new_mapping *overwrite_mapping;
 588	struct rb_node rb_node;
 589	struct dm_bio_prison_cell *cell;
 590};
 591
 592static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
 593{
 594	bio_list_merge(bios, master);
 595	bio_list_init(master);
 596}
 597
 598static void error_bio_list(struct bio_list *bios, blk_status_t error)
 599{
 600	struct bio *bio;
 601
 602	while ((bio = bio_list_pop(bios))) {
 603		bio->bi_status = error;
 604		bio_endio(bio);
 605	}
 606}
 607
 608static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
 609		blk_status_t error)
 610{
 611	struct bio_list bios;
 612	unsigned long flags;
 613
 614	bio_list_init(&bios);
 615
 616	spin_lock_irqsave(&tc->lock, flags);
 617	__merge_bio_list(&bios, master);
 618	spin_unlock_irqrestore(&tc->lock, flags);
 619
 620	error_bio_list(&bios, error);
 621}
 622
 623static void requeue_deferred_cells(struct thin_c *tc)
 624{
 625	struct pool *pool = tc->pool;
 626	unsigned long flags;
 627	struct list_head cells;
 628	struct dm_bio_prison_cell *cell, *tmp;
 629
 630	INIT_LIST_HEAD(&cells);
 631
 632	spin_lock_irqsave(&tc->lock, flags);
 633	list_splice_init(&tc->deferred_cells, &cells);
 634	spin_unlock_irqrestore(&tc->lock, flags);
 635
 636	list_for_each_entry_safe(cell, tmp, &cells, user_list)
 637		cell_requeue(pool, cell);
 638}
 639
 640static void requeue_io(struct thin_c *tc)
 641{
 642	struct bio_list bios;
 643	unsigned long flags;
 644
 645	bio_list_init(&bios);
 646
 647	spin_lock_irqsave(&tc->lock, flags);
 648	__merge_bio_list(&bios, &tc->deferred_bio_list);
 649	__merge_bio_list(&bios, &tc->retry_on_resume_list);
 650	spin_unlock_irqrestore(&tc->lock, flags);
 651
 652	error_bio_list(&bios, BLK_STS_DM_REQUEUE);
 653	requeue_deferred_cells(tc);
 654}
 655
 656static void error_retry_list_with_code(struct pool *pool, blk_status_t error)
 657{
 658	struct thin_c *tc;
 659
 660	rcu_read_lock();
 661	list_for_each_entry_rcu(tc, &pool->active_thins, list)
 662		error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
 663	rcu_read_unlock();
 664}
 665
 666static void error_retry_list(struct pool *pool)
 667{
 668	error_retry_list_with_code(pool, get_pool_io_error_code(pool));
 669}
 670
 671/*
 672 * This section of code contains the logic for processing a thin device's IO.
 673 * Much of the code depends on pool object resources (lists, workqueues, etc)
 674 * but most is exclusively called from the thin target rather than the thin-pool
 675 * target.
 676 */
 677
 678static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 679{
 680	struct pool *pool = tc->pool;
 681	sector_t block_nr = bio->bi_iter.bi_sector;
 682
 683	if (block_size_is_power_of_two(pool))
 684		block_nr >>= pool->sectors_per_block_shift;
 685	else
 686		(void) sector_div(block_nr, pool->sectors_per_block);
 687
 688	return block_nr;
 689}
 690
 691/*
 692 * Returns the _complete_ blocks that this bio covers.
 693 */
 694static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
 695				dm_block_t *begin, dm_block_t *end)
 696{
 697	struct pool *pool = tc->pool;
 698	sector_t b = bio->bi_iter.bi_sector;
 699	sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
 700
 701	b += pool->sectors_per_block - 1ull; /* so we round up */
 702
 703	if (block_size_is_power_of_two(pool)) {
 704		b >>= pool->sectors_per_block_shift;
 705		e >>= pool->sectors_per_block_shift;
 706	} else {
 707		(void) sector_div(b, pool->sectors_per_block);
 708		(void) sector_div(e, pool->sectors_per_block);
 709	}
 710
 711	if (e < b)
 712		/* Can happen if the bio is within a single block. */
 713		e = b;
 714
 715	*begin = b;
 716	*end = e;
 717}
 718
 719static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
 720{
 721	struct pool *pool = tc->pool;
 722	sector_t bi_sector = bio->bi_iter.bi_sector;
 723
 724	bio_set_dev(bio, tc->pool_dev->bdev);
 725	if (block_size_is_power_of_two(pool))
 726		bio->bi_iter.bi_sector =
 727			(block << pool->sectors_per_block_shift) |
 728			(bi_sector & (pool->sectors_per_block - 1));
 729	else
 730		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
 731				 sector_div(bi_sector, pool->sectors_per_block);
 732}
 733
 734static void remap_to_origin(struct thin_c *tc, struct bio *bio)
 735{
 736	bio_set_dev(bio, tc->origin_dev->bdev);
 737}
 738
 739static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
 740{
 741	return op_is_flush(bio->bi_opf) &&
 742		dm_thin_changed_this_transaction(tc->td);
 743}
 744
 745static void inc_all_io_entry(struct pool *pool, struct bio *bio)
 746{
 747	struct dm_thin_endio_hook *h;
 748
 749	if (bio_op(bio) == REQ_OP_DISCARD)
 750		return;
 751
 752	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 753	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
 754}
 755
 756static void issue(struct thin_c *tc, struct bio *bio)
 757{
 758	struct pool *pool = tc->pool;
 759	unsigned long flags;
 760
 761	if (!bio_triggers_commit(tc, bio)) {
 762		generic_make_request(bio);
 763		return;
 764	}
 765
 766	/*
 767	 * Complete bio with an error if earlier I/O caused changes to
 768	 * the metadata that can't be committed e.g, due to I/O errors
 769	 * on the metadata device.
 770	 */
 771	if (dm_thin_aborted_changes(tc->td)) {
 772		bio_io_error(bio);
 773		return;
 774	}
 775
 776	/*
 777	 * Batch together any bios that trigger commits and then issue a
 778	 * single commit for them in process_deferred_bios().
 779	 */
 780	spin_lock_irqsave(&pool->lock, flags);
 781	bio_list_add(&pool->deferred_flush_bios, bio);
 782	spin_unlock_irqrestore(&pool->lock, flags);
 783}
 784
 785static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
 786{
 787	remap_to_origin(tc, bio);
 788	issue(tc, bio);
 789}
 790
 791static void remap_and_issue(struct thin_c *tc, struct bio *bio,
 792			    dm_block_t block)
 793{
 794	remap(tc, bio, block);
 795	issue(tc, bio);
 796}
 797
 798/*----------------------------------------------------------------*/
 799
 800/*
 801 * Bio endio functions.
 802 */
 803struct dm_thin_new_mapping {
 804	struct list_head list;
 805
 806	bool pass_discard:1;
 807	bool maybe_shared:1;
 808
 809	/*
 810	 * Track quiescing, copying and zeroing preparation actions.  When this
 811	 * counter hits zero the block is prepared and can be inserted into the
 812	 * btree.
 813	 */
 814	atomic_t prepare_actions;
 815
 816	blk_status_t status;
 817	struct thin_c *tc;
 818	dm_block_t virt_begin, virt_end;
 819	dm_block_t data_block;
 820	struct dm_bio_prison_cell *cell;
 821
 822	/*
 823	 * If the bio covers the whole area of a block then we can avoid
 824	 * zeroing or copying.  Instead this bio is hooked.  The bio will
 825	 * still be in the cell, so care has to be taken to avoid issuing
 826	 * the bio twice.
 827	 */
 828	struct bio *bio;
 829	bio_end_io_t *saved_bi_end_io;
 830};
 831
 832static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
 833{
 834	struct pool *pool = m->tc->pool;
 835
 836	if (atomic_dec_and_test(&m->prepare_actions)) {
 837		list_add_tail(&m->list, &pool->prepared_mappings);
 838		wake_worker(pool);
 839	}
 840}
 841
 842static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
 843{
 844	unsigned long flags;
 845	struct pool *pool = m->tc->pool;
 846
 847	spin_lock_irqsave(&pool->lock, flags);
 848	__complete_mapping_preparation(m);
 849	spin_unlock_irqrestore(&pool->lock, flags);
 850}
 851
 852static void copy_complete(int read_err, unsigned long write_err, void *context)
 853{
 854	struct dm_thin_new_mapping *m = context;
 855
 856	m->status = read_err || write_err ? BLK_STS_IOERR : 0;
 857	complete_mapping_preparation(m);
 858}
 859
 860static void overwrite_endio(struct bio *bio)
 861{
 862	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 863	struct dm_thin_new_mapping *m = h->overwrite_mapping;
 864
 865	bio->bi_end_io = m->saved_bi_end_io;
 866
 867	m->status = bio->bi_status;
 868	complete_mapping_preparation(m);
 869}
 870
 871/*----------------------------------------------------------------*/
 872
 873/*
 874 * Workqueue.
 875 */
 876
 877/*
 878 * Prepared mapping jobs.
 879 */
 880
 881/*
 882 * This sends the bios in the cell, except the original holder, back
 883 * to the deferred_bios list.
 884 */
 885static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
 886{
 887	struct pool *pool = tc->pool;
 888	unsigned long flags;
 889
 890	spin_lock_irqsave(&tc->lock, flags);
 891	cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
 892	spin_unlock_irqrestore(&tc->lock, flags);
 893
 894	wake_worker(pool);
 895}
 896
 897static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
 898
 899struct remap_info {
 900	struct thin_c *tc;
 901	struct bio_list defer_bios;
 902	struct bio_list issue_bios;
 903};
 904
 905static void __inc_remap_and_issue_cell(void *context,
 906				       struct dm_bio_prison_cell *cell)
 907{
 908	struct remap_info *info = context;
 909	struct bio *bio;
 910
 911	while ((bio = bio_list_pop(&cell->bios))) {
 912		if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD)
 913			bio_list_add(&info->defer_bios, bio);
 914		else {
 915			inc_all_io_entry(info->tc->pool, bio);
 916
 917			/*
 918			 * We can't issue the bios with the bio prison lock
 919			 * held, so we add them to a list to issue on
 920			 * return from this function.
 921			 */
 922			bio_list_add(&info->issue_bios, bio);
 923		}
 924	}
 925}
 926
 927static void inc_remap_and_issue_cell(struct thin_c *tc,
 928				     struct dm_bio_prison_cell *cell,
 929				     dm_block_t block)
 930{
 931	struct bio *bio;
 932	struct remap_info info;
 933
 934	info.tc = tc;
 935	bio_list_init(&info.defer_bios);
 936	bio_list_init(&info.issue_bios);
 937
 938	/*
 939	 * We have to be careful to inc any bios we're about to issue
 940	 * before the cell is released, and avoid a race with new bios
 941	 * being added to the cell.
 942	 */
 943	cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
 944			   &info, cell);
 945
 946	while ((bio = bio_list_pop(&info.defer_bios)))
 947		thin_defer_bio(tc, bio);
 948
 949	while ((bio = bio_list_pop(&info.issue_bios)))
 950		remap_and_issue(info.tc, bio, block);
 951}
 952
 953static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
 954{
 955	cell_error(m->tc->pool, m->cell);
 956	list_del(&m->list);
 957	mempool_free(m, &m->tc->pool->mapping_pool);
 958}
 959
 960static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
 961{
 962	struct pool *pool = tc->pool;
 963	unsigned long flags;
 964
 965	/*
 966	 * If the bio has the REQ_FUA flag set we must commit the metadata
 967	 * before signaling its completion.
 968	 */
 969	if (!bio_triggers_commit(tc, bio)) {
 970		bio_endio(bio);
 971		return;
 972	}
 973
 974	/*
 975	 * Complete bio with an error if earlier I/O caused changes to the
 976	 * metadata that can't be committed, e.g, due to I/O errors on the
 977	 * metadata device.
 978	 */
 979	if (dm_thin_aborted_changes(tc->td)) {
 980		bio_io_error(bio);
 981		return;
 982	}
 983
 984	/*
 985	 * Batch together any bios that trigger commits and then issue a
 986	 * single commit for them in process_deferred_bios().
 987	 */
 988	spin_lock_irqsave(&pool->lock, flags);
 989	bio_list_add(&pool->deferred_flush_completions, bio);
 990	spin_unlock_irqrestore(&pool->lock, flags);
 991}
 992
 993static void process_prepared_mapping(struct dm_thin_new_mapping *m)
 994{
 995	struct thin_c *tc = m->tc;
 996	struct pool *pool = tc->pool;
 997	struct bio *bio = m->bio;
 998	int r;
 999
1000	if (m->status) {
1001		cell_error(pool, m->cell);
1002		goto out;
1003	}
1004
1005	/*
1006	 * Commit the prepared block into the mapping btree.
1007	 * Any I/O for this block arriving after this point will get
1008	 * remapped to it directly.
1009	 */
1010	r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block);
1011	if (r) {
1012		metadata_operation_failed(pool, "dm_thin_insert_block", r);
1013		cell_error(pool, m->cell);
1014		goto out;
1015	}
1016
1017	/*
1018	 * Release any bios held while the block was being provisioned.
1019	 * If we are processing a write bio that completely covers the block,
1020	 * we already processed it so can ignore it now when processing
1021	 * the bios in the cell.
1022	 */
1023	if (bio) {
1024		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1025		complete_overwrite_bio(tc, bio);
1026	} else {
1027		inc_all_io_entry(tc->pool, m->cell->holder);
1028		remap_and_issue(tc, m->cell->holder, m->data_block);
1029		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1030	}
1031
1032out:
1033	list_del(&m->list);
1034	mempool_free(m, &pool->mapping_pool);
1035}
1036
1037/*----------------------------------------------------------------*/
1038
1039static void free_discard_mapping(struct dm_thin_new_mapping *m)
1040{
1041	struct thin_c *tc = m->tc;
1042	if (m->cell)
1043		cell_defer_no_holder(tc, m->cell);
1044	mempool_free(m, &tc->pool->mapping_pool);
1045}
1046
1047static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
1048{
1049	bio_io_error(m->bio);
1050	free_discard_mapping(m);
1051}
1052
1053static void process_prepared_discard_success(struct dm_thin_new_mapping *m)
1054{
1055	bio_endio(m->bio);
1056	free_discard_mapping(m);
1057}
1058
1059static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
1060{
1061	int r;
1062	struct thin_c *tc = m->tc;
1063
1064	r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end);
1065	if (r) {
1066		metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
1067		bio_io_error(m->bio);
1068	} else
1069		bio_endio(m->bio);
1070
1071	cell_defer_no_holder(tc, m->cell);
1072	mempool_free(m, &tc->pool->mapping_pool);
1073}
1074
1075/*----------------------------------------------------------------*/
1076
1077static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m,
1078						   struct bio *discard_parent)
1079{
1080	/*
1081	 * We've already unmapped this range of blocks, but before we
1082	 * passdown we have to check that these blocks are now unused.
1083	 */
1084	int r = 0;
1085	bool shared = true;
1086	struct thin_c *tc = m->tc;
1087	struct pool *pool = tc->pool;
1088	dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
1089	struct discard_op op;
1090
1091	begin_discard(&op, tc, discard_parent);
1092	while (b != end) {
1093		/* find start of unmapped run */
1094		for (; b < end; b++) {
1095			r = dm_pool_block_is_shared(pool->pmd, b, &shared);
1096			if (r)
1097				goto out;
1098
1099			if (!shared)
1100				break;
1101		}
1102
1103		if (b == end)
1104			break;
1105
1106		/* find end of run */
1107		for (e = b + 1; e != end; e++) {
1108			r = dm_pool_block_is_shared(pool->pmd, e, &shared);
1109			if (r)
1110				goto out;
1111
1112			if (shared)
1113				break;
1114		}
1115
1116		r = issue_discard(&op, b, e);
1117		if (r)
1118			goto out;
1119
1120		b = e;
1121	}
1122out:
1123	end_discard(&op, r);
1124}
1125
1126static void queue_passdown_pt2(struct dm_thin_new_mapping *m)
1127{
1128	unsigned long flags;
1129	struct pool *pool = m->tc->pool;
1130
1131	spin_lock_irqsave(&pool->lock, flags);
1132	list_add_tail(&m->list, &pool->prepared_discards_pt2);
1133	spin_unlock_irqrestore(&pool->lock, flags);
1134	wake_worker(pool);
1135}
1136
1137static void passdown_endio(struct bio *bio)
1138{
1139	/*
1140	 * It doesn't matter if the passdown discard failed, we still want
1141	 * to unmap (we ignore err).
1142	 */
1143	queue_passdown_pt2(bio->bi_private);
1144	bio_put(bio);
1145}
1146
1147static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
1148{
1149	int r;
1150	struct thin_c *tc = m->tc;
1151	struct pool *pool = tc->pool;
1152	struct bio *discard_parent;
1153	dm_block_t data_end = m->data_block + (m->virt_end - m->virt_begin);
1154
1155	/*
1156	 * Only this thread allocates blocks, so we can be sure that the
1157	 * newly unmapped blocks will not be allocated before the end of
1158	 * the function.
1159	 */
1160	r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
1161	if (r) {
1162		metadata_operation_failed(pool, "dm_thin_remove_range", r);
1163		bio_io_error(m->bio);
1164		cell_defer_no_holder(tc, m->cell);
1165		mempool_free(m, &pool->mapping_pool);
1166		return;
1167	}
1168
1169	/*
1170	 * Increment the unmapped blocks.  This prevents a race between the
1171	 * passdown io and reallocation of freed blocks.
1172	 */
1173	r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
1174	if (r) {
1175		metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
1176		bio_io_error(m->bio);
1177		cell_defer_no_holder(tc, m->cell);
1178		mempool_free(m, &pool->mapping_pool);
1179		return;
1180	}
1181
1182	discard_parent = bio_alloc(GFP_NOIO, 1);
1183	if (!discard_parent) {
1184		DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
1185		       dm_device_name(tc->pool->pool_md));
1186		queue_passdown_pt2(m);
1187
1188	} else {
1189		discard_parent->bi_end_io = passdown_endio;
1190		discard_parent->bi_private = m;
1191
1192		if (m->maybe_shared)
1193			passdown_double_checking_shared_status(m, discard_parent);
1194		else {
1195			struct discard_op op;
1196
1197			begin_discard(&op, tc, discard_parent);
1198			r = issue_discard(&op, m->data_block, data_end);
1199			end_discard(&op, r);
1200		}
1201	}
1202}
1203
1204static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
1205{
1206	int r;
1207	struct thin_c *tc = m->tc;
1208	struct pool *pool = tc->pool;
1209
1210	/*
1211	 * The passdown has completed, so now we can decrement all those
1212	 * unmapped blocks.
1213	 */
1214	r = dm_pool_dec_data_range(pool->pmd, m->data_block,
1215				   m->data_block + (m->virt_end - m->virt_begin));
1216	if (r) {
1217		metadata_operation_failed(pool, "dm_pool_dec_data_range", r);
1218		bio_io_error(m->bio);
1219	} else
1220		bio_endio(m->bio);
1221
1222	cell_defer_no_holder(tc, m->cell);
1223	mempool_free(m, &pool->mapping_pool);
1224}
1225
1226static void process_prepared(struct pool *pool, struct list_head *head,
1227			     process_mapping_fn *fn)
1228{
1229	unsigned long flags;
1230	struct list_head maps;
1231	struct dm_thin_new_mapping *m, *tmp;
1232
1233	INIT_LIST_HEAD(&maps);
1234	spin_lock_irqsave(&pool->lock, flags);
1235	list_splice_init(head, &maps);
1236	spin_unlock_irqrestore(&pool->lock, flags);
1237
1238	list_for_each_entry_safe(m, tmp, &maps, list)
1239		(*fn)(m);
1240}
1241
1242/*
1243 * Deferred bio jobs.
1244 */
1245static int io_overlaps_block(struct pool *pool, struct bio *bio)
1246{
1247	return bio->bi_iter.bi_size ==
1248		(pool->sectors_per_block << SECTOR_SHIFT);
1249}
1250
1251static int io_overwrites_block(struct pool *pool, struct bio *bio)
1252{
1253	return (bio_data_dir(bio) == WRITE) &&
1254		io_overlaps_block(pool, bio);
1255}
1256
1257static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
1258			       bio_end_io_t *fn)
1259{
1260	*save = bio->bi_end_io;
1261	bio->bi_end_io = fn;
1262}
1263
1264static int ensure_next_mapping(struct pool *pool)
1265{
1266	if (pool->next_mapping)
1267		return 0;
1268
1269	pool->next_mapping = mempool_alloc(&pool->mapping_pool, GFP_ATOMIC);
1270
1271	return pool->next_mapping ? 0 : -ENOMEM;
1272}
1273
1274static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
1275{
1276	struct dm_thin_new_mapping *m = pool->next_mapping;
1277
1278	BUG_ON(!pool->next_mapping);
1279
1280	memset(m, 0, sizeof(struct dm_thin_new_mapping));
1281	INIT_LIST_HEAD(&m->list);
1282	m->bio = NULL;
1283
1284	pool->next_mapping = NULL;
1285
1286	return m;
1287}
1288
1289static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
1290		    sector_t begin, sector_t end)
1291{
1292	struct dm_io_region to;
1293
1294	to.bdev = tc->pool_dev->bdev;
1295	to.sector = begin;
1296	to.count = end - begin;
1297
1298	dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
1299}
1300
1301static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
1302				      dm_block_t data_begin,
1303				      struct dm_thin_new_mapping *m)
1304{
1305	struct pool *pool = tc->pool;
1306	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1307
1308	h->overwrite_mapping = m;
1309	m->bio = bio;
1310	save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1311	inc_all_io_entry(pool, bio);
1312	remap_and_issue(tc, bio, data_begin);
1313}
1314
1315/*
1316 * A partial copy also needs to zero the uncopied region.
1317 */
1318static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1319			  struct dm_dev *origin, dm_block_t data_origin,
1320			  dm_block_t data_dest,
1321			  struct dm_bio_prison_cell *cell, struct bio *bio,
1322			  sector_t len)
1323{
1324	struct pool *pool = tc->pool;
1325	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1326
1327	m->tc = tc;
1328	m->virt_begin = virt_block;
1329	m->virt_end = virt_block + 1u;
1330	m->data_block = data_dest;
1331	m->cell = cell;
1332
1333	/*
1334	 * quiesce action + copy action + an extra reference held for the
1335	 * duration of this function (we may need to inc later for a
1336	 * partial zero).
1337	 */
1338	atomic_set(&m->prepare_actions, 3);
1339
1340	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1341		complete_mapping_preparation(m); /* already quiesced */
1342
1343	/*
1344	 * IO to pool_dev remaps to the pool target's data_dev.
1345	 *
1346	 * If the whole block of data is being overwritten, we can issue the
1347	 * bio immediately. Otherwise we use kcopyd to clone the data first.
1348	 */
1349	if (io_overwrites_block(pool, bio))
1350		remap_and_issue_overwrite(tc, bio, data_dest, m);
1351	else {
1352		struct dm_io_region from, to;
1353
1354		from.bdev = origin->bdev;
1355		from.sector = data_origin * pool->sectors_per_block;
1356		from.count = len;
1357
1358		to.bdev = tc->pool_dev->bdev;
1359		to.sector = data_dest * pool->sectors_per_block;
1360		to.count = len;
1361
1362		dm_kcopyd_copy(pool->copier, &from, 1, &to,
1363			       0, copy_complete, m);
1364
1365		/*
1366		 * Do we need to zero a tail region?
1367		 */
1368		if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
1369			atomic_inc(&m->prepare_actions);
1370			ll_zero(tc, m,
1371				data_dest * pool->sectors_per_block + len,
1372				(data_dest + 1) * pool->sectors_per_block);
1373		}
1374	}
1375
1376	complete_mapping_preparation(m); /* drop our ref */
1377}
1378
1379static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1380				   dm_block_t data_origin, dm_block_t data_dest,
1381				   struct dm_bio_prison_cell *cell, struct bio *bio)
1382{
1383	schedule_copy(tc, virt_block, tc->pool_dev,
1384		      data_origin, data_dest, cell, bio,
1385		      tc->pool->sectors_per_block);
1386}
1387
1388static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1389			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
1390			  struct bio *bio)
1391{
1392	struct pool *pool = tc->pool;
1393	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1394
1395	atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
1396	m->tc = tc;
1397	m->virt_begin = virt_block;
1398	m->virt_end = virt_block + 1u;
1399	m->data_block = data_block;
1400	m->cell = cell;
1401
1402	/*
1403	 * If the whole block of data is being overwritten or we are not
1404	 * zeroing pre-existing data, we can issue the bio immediately.
1405	 * Otherwise we use kcopyd to zero the data first.
1406	 */
1407	if (pool->pf.zero_new_blocks) {
1408		if (io_overwrites_block(pool, bio))
1409			remap_and_issue_overwrite(tc, bio, data_block, m);
1410		else
1411			ll_zero(tc, m, data_block * pool->sectors_per_block,
1412				(data_block + 1) * pool->sectors_per_block);
1413	} else
1414		process_prepared_mapping(m);
1415}
1416
1417static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1418				   dm_block_t data_dest,
1419				   struct dm_bio_prison_cell *cell, struct bio *bio)
1420{
1421	struct pool *pool = tc->pool;
1422	sector_t virt_block_begin = virt_block * pool->sectors_per_block;
1423	sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
1424
1425	if (virt_block_end <= tc->origin_size)
1426		schedule_copy(tc, virt_block, tc->origin_dev,
1427			      virt_block, data_dest, cell, bio,
1428			      pool->sectors_per_block);
1429
1430	else if (virt_block_begin < tc->origin_size)
1431		schedule_copy(tc, virt_block, tc->origin_dev,
1432			      virt_block, data_dest, cell, bio,
1433			      tc->origin_size - virt_block_begin);
1434
1435	else
1436		schedule_zero(tc, virt_block, data_dest, cell, bio);
1437}
1438
1439static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1440
1441static void requeue_bios(struct pool *pool);
1442
1443static bool is_read_only_pool_mode(enum pool_mode mode)
1444{
1445	return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
1446}
1447
1448static bool is_read_only(struct pool *pool)
1449{
1450	return is_read_only_pool_mode(get_pool_mode(pool));
1451}
1452
1453static void check_for_metadata_space(struct pool *pool)
1454{
1455	int r;
1456	const char *ooms_reason = NULL;
1457	dm_block_t nr_free;
1458
1459	r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
1460	if (r)
1461		ooms_reason = "Could not get free metadata blocks";
1462	else if (!nr_free)
1463		ooms_reason = "No free metadata blocks";
1464
1465	if (ooms_reason && !is_read_only(pool)) {
1466		DMERR("%s", ooms_reason);
1467		set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
1468	}
1469}
1470
1471static void check_for_data_space(struct pool *pool)
1472{
1473	int r;
1474	dm_block_t nr_free;
1475
1476	if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
1477		return;
1478
1479	r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
1480	if (r)
1481		return;
1482
1483	if (nr_free) {
1484		set_pool_mode(pool, PM_WRITE);
1485		requeue_bios(pool);
1486	}
1487}
1488
1489/*
1490 * A non-zero return indicates read_only or fail_io mode.
1491 * Many callers don't care about the return value.
1492 */
1493static int commit(struct pool *pool)
1494{
1495	int r;
1496
1497	if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
1498		return -EINVAL;
1499
1500	r = dm_pool_commit_metadata(pool->pmd);
1501	if (r)
1502		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1503	else {
1504		check_for_metadata_space(pool);
1505		check_for_data_space(pool);
1506	}
1507
1508	return r;
1509}
1510
1511static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
1512{
1513	unsigned long flags;
1514
1515	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1516		DMWARN("%s: reached low water mark for data device: sending event.",
1517		       dm_device_name(pool->pool_md));
1518		spin_lock_irqsave(&pool->lock, flags);
1519		pool->low_water_triggered = true;
1520		spin_unlock_irqrestore(&pool->lock, flags);
1521		dm_table_event(pool->ti->table);
1522	}
1523}
1524
1525static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1526{
1527	int r;
1528	dm_block_t free_blocks;
1529	struct pool *pool = tc->pool;
1530
1531	if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
1532		return -EINVAL;
1533
1534	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1535	if (r) {
1536		metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1537		return r;
1538	}
1539
1540	check_low_water_mark(pool, free_blocks);
1541
1542	if (!free_blocks) {
1543		/*
1544		 * Try to commit to see if that will free up some
1545		 * more space.
1546		 */
1547		r = commit(pool);
1548		if (r)
1549			return r;
1550
1551		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1552		if (r) {
1553			metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1554			return r;
1555		}
1556
1557		if (!free_blocks) {
1558			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1559			return -ENOSPC;
1560		}
1561	}
1562
1563	r = dm_pool_alloc_data_block(pool->pmd, result);
1564	if (r) {
1565		if (r == -ENOSPC)
1566			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1567		else
1568			metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1569		return r;
1570	}
1571
1572	r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
1573	if (r) {
1574		metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
1575		return r;
1576	}
1577
1578	if (!free_blocks) {
1579		/* Let's commit before we use up the metadata reserve. */
1580		r = commit(pool);
1581		if (r)
1582			return r;
1583	}
1584
1585	return 0;
1586}
1587
1588/*
1589 * If we have run out of space, queue bios until the device is
1590 * resumed, presumably after having been reloaded with more space.
1591 */
1592static void retry_on_resume(struct bio *bio)
1593{
1594	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1595	struct thin_c *tc = h->tc;
1596	unsigned long flags;
1597
1598	spin_lock_irqsave(&tc->lock, flags);
1599	bio_list_add(&tc->retry_on_resume_list, bio);
1600	spin_unlock_irqrestore(&tc->lock, flags);
1601}
1602
1603static blk_status_t should_error_unserviceable_bio(struct pool *pool)
1604{
1605	enum pool_mode m = get_pool_mode(pool);
1606
1607	switch (m) {
1608	case PM_WRITE:
1609		/* Shouldn't get here */
1610		DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1611		return BLK_STS_IOERR;
1612
1613	case PM_OUT_OF_DATA_SPACE:
1614		return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
1615
1616	case PM_OUT_OF_METADATA_SPACE:
1617	case PM_READ_ONLY:
1618	case PM_FAIL:
1619		return BLK_STS_IOERR;
1620	default:
1621		/* Shouldn't get here */
1622		DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1623		return BLK_STS_IOERR;
1624	}
1625}
1626
1627static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1628{
1629	blk_status_t error = should_error_unserviceable_bio(pool);
1630
1631	if (error) {
1632		bio->bi_status = error;
1633		bio_endio(bio);
1634	} else
1635		retry_on_resume(bio);
1636}
1637
1638static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1639{
1640	struct bio *bio;
1641	struct bio_list bios;
1642	blk_status_t error;
1643
1644	error = should_error_unserviceable_bio(pool);
1645	if (error) {
1646		cell_error_with_code(pool, cell, error);
1647		return;
1648	}
1649
1650	bio_list_init(&bios);
1651	cell_release(pool, cell, &bios);
1652
1653	while ((bio = bio_list_pop(&bios)))
1654		retry_on_resume(bio);
1655}
1656
1657static void process_discard_cell_no_passdown(struct thin_c *tc,
1658					     struct dm_bio_prison_cell *virt_cell)
1659{
1660	struct pool *pool = tc->pool;
1661	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1662
1663	/*
1664	 * We don't need to lock the data blocks, since there's no
1665	 * passdown.  We only lock data blocks for allocation and breaking sharing.
1666	 */
1667	m->tc = tc;
1668	m->virt_begin = virt_cell->key.block_begin;
1669	m->virt_end = virt_cell->key.block_end;
1670	m->cell = virt_cell;
1671	m->bio = virt_cell->holder;
1672
1673	if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1674		pool->process_prepared_discard(m);
1675}
1676
1677static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
1678				 struct bio *bio)
1679{
1680	struct pool *pool = tc->pool;
1681
1682	int r;
1683	bool maybe_shared;
1684	struct dm_cell_key data_key;
1685	struct dm_bio_prison_cell *data_cell;
1686	struct dm_thin_new_mapping *m;
1687	dm_block_t virt_begin, virt_end, data_begin;
1688
1689	while (begin != end) {
1690		r = ensure_next_mapping(pool);
1691		if (r)
1692			/* we did our best */
1693			return;
1694
1695		r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end,
1696					      &data_begin, &maybe_shared);
1697		if (r)
1698			/*
1699			 * Silently fail, letting any mappings we've
1700			 * created complete.
1701			 */
1702			break;
1703
1704		build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key);
1705		if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) {
1706			/* contention, we'll give up with this range */
1707			begin = virt_end;
1708			continue;
1709		}
1710
1711		/*
1712		 * IO may still be going to the destination block.  We must
1713		 * quiesce before we can do the removal.
1714		 */
1715		m = get_next_mapping(pool);
1716		m->tc = tc;
1717		m->maybe_shared = maybe_shared;
1718		m->virt_begin = virt_begin;
1719		m->virt_end = virt_end;
1720		m->data_block = data_begin;
1721		m->cell = data_cell;
1722		m->bio = bio;
1723
1724		/*
1725		 * The parent bio must not complete before sub discard bios are
1726		 * chained to it (see end_discard's bio_chain)!
1727		 *
1728		 * This per-mapping bi_remaining increment is paired with
1729		 * the implicit decrement that occurs via bio_endio() in
1730		 * end_discard().
1731		 */
1732		bio_inc_remaining(bio);
1733		if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1734			pool->process_prepared_discard(m);
1735
1736		begin = virt_end;
1737	}
1738}
1739
1740static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell)
1741{
1742	struct bio *bio = virt_cell->holder;
1743	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1744
1745	/*
1746	 * The virt_cell will only get freed once the origin bio completes.
1747	 * This means it will remain locked while all the individual
1748	 * passdown bios are in flight.
1749	 */
1750	h->cell = virt_cell;
1751	break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio);
1752
1753	/*
1754	 * We complete the bio now, knowing that the bi_remaining field
1755	 * will prevent completion until the sub range discards have
1756	 * completed.
1757	 */
1758	bio_endio(bio);
1759}
1760
1761static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1762{
1763	dm_block_t begin, end;
1764	struct dm_cell_key virt_key;
1765	struct dm_bio_prison_cell *virt_cell;
1766
1767	get_bio_block_range(tc, bio, &begin, &end);
1768	if (begin == end) {
1769		/*
1770		 * The discard covers less than a block.
1771		 */
1772		bio_endio(bio);
1773		return;
1774	}
1775
1776	build_key(tc->td, VIRTUAL, begin, end, &virt_key);
1777	if (bio_detain(tc->pool, &virt_key, bio, &virt_cell))
1778		/*
1779		 * Potential starvation issue: We're relying on the
1780		 * fs/application being well behaved, and not trying to
1781		 * send IO to a region at the same time as discarding it.
1782		 * If they do this persistently then it's possible this
1783		 * cell will never be granted.
1784		 */
1785		return;
1786
1787	tc->pool->process_discard_cell(tc, virt_cell);
1788}
1789
1790static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1791			  struct dm_cell_key *key,
1792			  struct dm_thin_lookup_result *lookup_result,
1793			  struct dm_bio_prison_cell *cell)
1794{
1795	int r;
1796	dm_block_t data_block;
1797	struct pool *pool = tc->pool;
1798
1799	r = alloc_data_block(tc, &data_block);
1800	switch (r) {
1801	case 0:
1802		schedule_internal_copy(tc, block, lookup_result->block,
1803				       data_block, cell, bio);
1804		break;
1805
1806	case -ENOSPC:
1807		retry_bios_on_resume(pool, cell);
1808		break;
1809
1810	default:
1811		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1812			    __func__, r);
1813		cell_error(pool, cell);
1814		break;
1815	}
1816}
1817
1818static void __remap_and_issue_shared_cell(void *context,
1819					  struct dm_bio_prison_cell *cell)
1820{
1821	struct remap_info *info = context;
1822	struct bio *bio;
1823
1824	while ((bio = bio_list_pop(&cell->bios))) {
1825		if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) ||
1826		    bio_op(bio) == REQ_OP_DISCARD)
1827			bio_list_add(&info->defer_bios, bio);
1828		else {
1829			struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1830
1831			h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1832			inc_all_io_entry(info->tc->pool, bio);
1833			bio_list_add(&info->issue_bios, bio);
1834		}
1835	}
1836}
1837
1838static void remap_and_issue_shared_cell(struct thin_c *tc,
1839					struct dm_bio_prison_cell *cell,
1840					dm_block_t block)
1841{
1842	struct bio *bio;
1843	struct remap_info info;
1844
1845	info.tc = tc;
1846	bio_list_init(&info.defer_bios);
1847	bio_list_init(&info.issue_bios);
1848
1849	cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1850			   &info, cell);
1851
1852	while ((bio = bio_list_pop(&info.defer_bios)))
1853		thin_defer_bio(tc, bio);
1854
1855	while ((bio = bio_list_pop(&info.issue_bios)))
1856		remap_and_issue(tc, bio, block);
1857}
1858
1859static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1860			       dm_block_t block,
1861			       struct dm_thin_lookup_result *lookup_result,
1862			       struct dm_bio_prison_cell *virt_cell)
1863{
1864	struct dm_bio_prison_cell *data_cell;
1865	struct pool *pool = tc->pool;
1866	struct dm_cell_key key;
1867
1868	/*
1869	 * If cell is already occupied, then sharing is already in the process
1870	 * of being broken so we have nothing further to do here.
1871	 */
1872	build_data_key(tc->td, lookup_result->block, &key);
1873	if (bio_detain(pool, &key, bio, &data_cell)) {
1874		cell_defer_no_holder(tc, virt_cell);
1875		return;
1876	}
1877
1878	if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1879		break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1880		cell_defer_no_holder(tc, virt_cell);
1881	} else {
1882		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1883
1884		h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1885		inc_all_io_entry(pool, bio);
1886		remap_and_issue(tc, bio, lookup_result->block);
1887
1888		remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1889		remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
1890	}
1891}
1892
1893static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1894			    struct dm_bio_prison_cell *cell)
1895{
1896	int r;
1897	dm_block_t data_block;
1898	struct pool *pool = tc->pool;
1899
1900	/*
1901	 * Remap empty bios (flushes) immediately, without provisioning.
1902	 */
1903	if (!bio->bi_iter.bi_size) {
1904		inc_all_io_entry(pool, bio);
1905		cell_defer_no_holder(tc, cell);
1906
1907		remap_and_issue(tc, bio, 0);
1908		return;
1909	}
1910
1911	/*
1912	 * Fill read bios with zeroes and complete them immediately.
1913	 */
1914	if (bio_data_dir(bio) == READ) {
1915		zero_fill_bio(bio);
1916		cell_defer_no_holder(tc, cell);
1917		bio_endio(bio);
1918		return;
1919	}
1920
1921	r = alloc_data_block(tc, &data_block);
1922	switch (r) {
1923	case 0:
1924		if (tc->origin_dev)
1925			schedule_external_copy(tc, block, data_block, cell, bio);
1926		else
1927			schedule_zero(tc, block, data_block, cell, bio);
1928		break;
1929
1930	case -ENOSPC:
1931		retry_bios_on_resume(pool, cell);
1932		break;
1933
1934	default:
1935		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1936			    __func__, r);
1937		cell_error(pool, cell);
1938		break;
1939	}
1940}
1941
1942static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1943{
1944	int r;
1945	struct pool *pool = tc->pool;
1946	struct bio *bio = cell->holder;
1947	dm_block_t block = get_bio_block(tc, bio);
1948	struct dm_thin_lookup_result lookup_result;
1949
1950	if (tc->requeue_mode) {
1951		cell_requeue(pool, cell);
1952		return;
1953	}
1954
1955	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1956	switch (r) {
1957	case 0:
1958		if (lookup_result.shared)
1959			process_shared_bio(tc, bio, block, &lookup_result, cell);
1960		else {
1961			inc_all_io_entry(pool, bio);
1962			remap_and_issue(tc, bio, lookup_result.block);
1963			inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1964		}
1965		break;
1966
1967	case -ENODATA:
1968		if (bio_data_dir(bio) == READ && tc->origin_dev) {
1969			inc_all_io_entry(pool, bio);
1970			cell_defer_no_holder(tc, cell);
1971
1972			if (bio_end_sector(bio) <= tc->origin_size)
1973				remap_to_origin_and_issue(tc, bio);
1974
1975			else if (bio->bi_iter.bi_sector < tc->origin_size) {
1976				zero_fill_bio(bio);
1977				bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1978				remap_to_origin_and_issue(tc, bio);
1979
1980			} else {
1981				zero_fill_bio(bio);
1982				bio_endio(bio);
1983			}
1984		} else
1985			provision_block(tc, bio, block, cell);
1986		break;
1987
1988	default:
1989		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1990			    __func__, r);
1991		cell_defer_no_holder(tc, cell);
1992		bio_io_error(bio);
1993		break;
1994	}
1995}
1996
1997static void process_bio(struct thin_c *tc, struct bio *bio)
1998{
1999	struct pool *pool = tc->pool;
2000	dm_block_t block = get_bio_block(tc, bio);
2001	struct dm_bio_prison_cell *cell;
2002	struct dm_cell_key key;
2003
2004	/*
2005	 * If cell is already occupied, then the block is already
2006	 * being provisioned so we have nothing further to do here.
2007	 */
2008	build_virtual_key(tc->td, block, &key);
2009	if (bio_detain(pool, &key, bio, &cell))
2010		return;
2011
2012	process_cell(tc, cell);
2013}
2014
2015static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
2016				    struct dm_bio_prison_cell *cell)
2017{
2018	int r;
2019	int rw = bio_data_dir(bio);
2020	dm_block_t block = get_bio_block(tc, bio);
2021	struct dm_thin_lookup_result lookup_result;
2022
2023	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
2024	switch (r) {
2025	case 0:
2026		if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
2027			handle_unserviceable_bio(tc->pool, bio);
2028			if (cell)
2029				cell_defer_no_holder(tc, cell);
2030		} else {
2031			inc_all_io_entry(tc->pool, bio);
2032			remap_and_issue(tc, bio, lookup_result.block);
2033			if (cell)
2034				inc_remap_and_issue_cell(tc, cell, lookup_result.block);
2035		}
2036		break;
2037
2038	case -ENODATA:
2039		if (cell)
2040			cell_defer_no_holder(tc, cell);
2041		if (rw != READ) {
2042			handle_unserviceable_bio(tc->pool, bio);
2043			break;
2044		}
2045
2046		if (tc->origin_dev) {
2047			inc_all_io_entry(tc->pool, bio);
2048			remap_to_origin_and_issue(tc, bio);
2049			break;
2050		}
2051
2052		zero_fill_bio(bio);
2053		bio_endio(bio);
2054		break;
2055
2056	default:
2057		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
2058			    __func__, r);
2059		if (cell)
2060			cell_defer_no_holder(tc, cell);
2061		bio_io_error(bio);
2062		break;
2063	}
2064}
2065
2066static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
2067{
2068	__process_bio_read_only(tc, bio, NULL);
2069}
2070
2071static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2072{
2073	__process_bio_read_only(tc, cell->holder, cell);
2074}
2075
2076static void process_bio_success(struct thin_c *tc, struct bio *bio)
2077{
2078	bio_endio(bio);
2079}
2080
2081static void process_bio_fail(struct thin_c *tc, struct bio *bio)
2082{
2083	bio_io_error(bio);
2084}
2085
2086static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2087{
2088	cell_success(tc->pool, cell);
2089}
2090
2091static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2092{
2093	cell_error(tc->pool, cell);
2094}
2095
2096/*
2097 * FIXME: should we also commit due to size of transaction, measured in
2098 * metadata blocks?
2099 */
2100static int need_commit_due_to_time(struct pool *pool)
2101{
2102	return !time_in_range(jiffies, pool->last_commit_jiffies,
2103			      pool->last_commit_jiffies + COMMIT_PERIOD);
2104}
2105
2106#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
2107#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
2108
2109static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
2110{
2111	struct rb_node **rbp, *parent;
2112	struct dm_thin_endio_hook *pbd;
2113	sector_t bi_sector = bio->bi_iter.bi_sector;
2114
2115	rbp = &tc->sort_bio_list.rb_node;
2116	parent = NULL;
2117	while (*rbp) {
2118		parent = *rbp;
2119		pbd = thin_pbd(parent);
2120
2121		if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
2122			rbp = &(*rbp)->rb_left;
2123		else
2124			rbp = &(*rbp)->rb_right;
2125	}
2126
2127	pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2128	rb_link_node(&pbd->rb_node, parent, rbp);
2129	rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
2130}
2131
2132static void __extract_sorted_bios(struct thin_c *tc)
2133{
2134	struct rb_node *node;
2135	struct dm_thin_endio_hook *pbd;
2136	struct bio *bio;
2137
2138	for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
2139		pbd = thin_pbd(node);
2140		bio = thin_bio(pbd);
2141
2142		bio_list_add(&tc->deferred_bio_list, bio);
2143		rb_erase(&pbd->rb_node, &tc->sort_bio_list);
2144	}
2145
2146	WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
2147}
2148
2149static void __sort_thin_deferred_bios(struct thin_c *tc)
2150{
2151	struct bio *bio;
2152	struct bio_list bios;
2153
2154	bio_list_init(&bios);
2155	bio_list_merge(&bios, &tc->deferred_bio_list);
2156	bio_list_init(&tc->deferred_bio_list);
2157
2158	/* Sort deferred_bio_list using rb-tree */
2159	while ((bio = bio_list_pop(&bios)))
2160		__thin_bio_rb_add(tc, bio);
2161
2162	/*
2163	 * Transfer the sorted bios in sort_bio_list back to
2164	 * deferred_bio_list to allow lockless submission of
2165	 * all bios.
2166	 */
2167	__extract_sorted_bios(tc);
2168}
2169
2170static void process_thin_deferred_bios(struct thin_c *tc)
2171{
2172	struct pool *pool = tc->pool;
2173	unsigned long flags;
2174	struct bio *bio;
2175	struct bio_list bios;
2176	struct blk_plug plug;
2177	unsigned count = 0;
2178
2179	if (tc->requeue_mode) {
2180		error_thin_bio_list(tc, &tc->deferred_bio_list,
2181				BLK_STS_DM_REQUEUE);
2182		return;
2183	}
2184
2185	bio_list_init(&bios);
2186
2187	spin_lock_irqsave(&tc->lock, flags);
2188
2189	if (bio_list_empty(&tc->deferred_bio_list)) {
2190		spin_unlock_irqrestore(&tc->lock, flags);
2191		return;
2192	}
2193
2194	__sort_thin_deferred_bios(tc);
2195
2196	bio_list_merge(&bios, &tc->deferred_bio_list);
2197	bio_list_init(&tc->deferred_bio_list);
2198
2199	spin_unlock_irqrestore(&tc->lock, flags);
2200
2201	blk_start_plug(&plug);
2202	while ((bio = bio_list_pop(&bios))) {
2203		/*
2204		 * If we've got no free new_mapping structs, and processing
2205		 * this bio might require one, we pause until there are some
2206		 * prepared mappings to process.
2207		 */
2208		if (ensure_next_mapping(pool)) {
2209			spin_lock_irqsave(&tc->lock, flags);
2210			bio_list_add(&tc->deferred_bio_list, bio);
2211			bio_list_merge(&tc->deferred_bio_list, &bios);
2212			spin_unlock_irqrestore(&tc->lock, flags);
2213			break;
2214		}
2215
2216		if (bio_op(bio) == REQ_OP_DISCARD)
2217			pool->process_discard(tc, bio);
2218		else
2219			pool->process_bio(tc, bio);
2220
2221		if ((count++ & 127) == 0) {
2222			throttle_work_update(&pool->throttle);
2223			dm_pool_issue_prefetches(pool->pmd);
2224		}
2225	}
2226	blk_finish_plug(&plug);
2227}
2228
2229static int cmp_cells(const void *lhs, const void *rhs)
2230{
2231	struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs);
2232	struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs);
2233
2234	BUG_ON(!lhs_cell->holder);
2235	BUG_ON(!rhs_cell->holder);
2236
2237	if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector)
2238		return -1;
2239
2240	if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector)
2241		return 1;
2242
2243	return 0;
2244}
2245
2246static unsigned sort_cells(struct pool *pool, struct list_head *cells)
2247{
2248	unsigned count = 0;
2249	struct dm_bio_prison_cell *cell, *tmp;
2250
2251	list_for_each_entry_safe(cell, tmp, cells, user_list) {
2252		if (count >= CELL_SORT_ARRAY_SIZE)
2253			break;
2254
2255		pool->cell_sort_array[count++] = cell;
2256		list_del(&cell->user_list);
2257	}
2258
2259	sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
2260
2261	return count;
2262}
2263
2264static void process_thin_deferred_cells(struct thin_c *tc)
2265{
2266	struct pool *pool = tc->pool;
2267	unsigned long flags;
2268	struct list_head cells;
2269	struct dm_bio_prison_cell *cell;
2270	unsigned i, j, count;
2271
2272	INIT_LIST_HEAD(&cells);
2273
2274	spin_lock_irqsave(&tc->lock, flags);
2275	list_splice_init(&tc->deferred_cells, &cells);
2276	spin_unlock_irqrestore(&tc->lock, flags);
2277
2278	if (list_empty(&cells))
2279		return;
2280
2281	do {
2282		count = sort_cells(tc->pool, &cells);
2283
2284		for (i = 0; i < count; i++) {
2285			cell = pool->cell_sort_array[i];
2286			BUG_ON(!cell->holder);
2287
2288			/*
2289			 * If we've got no free new_mapping structs, and processing
2290			 * this bio might require one, we pause until there are some
2291			 * prepared mappings to process.
2292			 */
2293			if (ensure_next_mapping(pool)) {
2294				for (j = i; j < count; j++)
2295					list_add(&pool->cell_sort_array[j]->user_list, &cells);
2296
2297				spin_lock_irqsave(&tc->lock, flags);
2298				list_splice(&cells, &tc->deferred_cells);
2299				spin_unlock_irqrestore(&tc->lock, flags);
2300				return;
2301			}
2302
2303			if (bio_op(cell->holder) == REQ_OP_DISCARD)
2304				pool->process_discard_cell(tc, cell);
2305			else
2306				pool->process_cell(tc, cell);
2307		}
2308	} while (!list_empty(&cells));
2309}
2310
2311static void thin_get(struct thin_c *tc);
2312static void thin_put(struct thin_c *tc);
2313
2314/*
2315 * We can't hold rcu_read_lock() around code that can block.  So we
2316 * find a thin with the rcu lock held; bump a refcount; then drop
2317 * the lock.
2318 */
2319static struct thin_c *get_first_thin(struct pool *pool)
2320{
2321	struct thin_c *tc = NULL;
2322
2323	rcu_read_lock();
2324	if (!list_empty(&pool->active_thins)) {
2325		tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
2326		thin_get(tc);
2327	}
2328	rcu_read_unlock();
2329
2330	return tc;
2331}
2332
2333static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
2334{
2335	struct thin_c *old_tc = tc;
2336
2337	rcu_read_lock();
2338	list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
2339		thin_get(tc);
2340		thin_put(old_tc);
2341		rcu_read_unlock();
2342		return tc;
2343	}
2344	thin_put(old_tc);
2345	rcu_read_unlock();
2346
2347	return NULL;
2348}
2349
2350static void process_deferred_bios(struct pool *pool)
2351{
2352	unsigned long flags;
2353	struct bio *bio;
2354	struct bio_list bios, bio_completions;
2355	struct thin_c *tc;
2356
2357	tc = get_first_thin(pool);
2358	while (tc) {
2359		process_thin_deferred_cells(tc);
2360		process_thin_deferred_bios(tc);
2361		tc = get_next_thin(pool, tc);
2362	}
2363
2364	/*
2365	 * If there are any deferred flush bios, we must commit the metadata
2366	 * before issuing them or signaling their completion.
2367	 */
2368	bio_list_init(&bios);
2369	bio_list_init(&bio_completions);
2370
2371	spin_lock_irqsave(&pool->lock, flags);
2372	bio_list_merge(&bios, &pool->deferred_flush_bios);
2373	bio_list_init(&pool->deferred_flush_bios);
2374
2375	bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
2376	bio_list_init(&pool->deferred_flush_completions);
2377	spin_unlock_irqrestore(&pool->lock, flags);
2378
2379	if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
2380	    !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
2381		return;
2382
2383	if (commit(pool)) {
2384		bio_list_merge(&bios, &bio_completions);
2385
2386		while ((bio = bio_list_pop(&bios)))
2387			bio_io_error(bio);
2388		return;
2389	}
2390	pool->last_commit_jiffies = jiffies;
2391
2392	while ((bio = bio_list_pop(&bio_completions)))
2393		bio_endio(bio);
2394
2395	while ((bio = bio_list_pop(&bios)))
2396		generic_make_request(bio);
2397}
2398
2399static void do_worker(struct work_struct *ws)
2400{
2401	struct pool *pool = container_of(ws, struct pool, worker);
2402
2403	throttle_work_start(&pool->throttle);
2404	dm_pool_issue_prefetches(pool->pmd);
2405	throttle_work_update(&pool->throttle);
2406	process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
2407	throttle_work_update(&pool->throttle);
2408	process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
2409	throttle_work_update(&pool->throttle);
2410	process_prepared(pool, &pool->prepared_discards_pt2, &pool->process_prepared_discard_pt2);
2411	throttle_work_update(&pool->throttle);
2412	process_deferred_bios(pool);
2413	throttle_work_complete(&pool->throttle);
2414}
2415
2416/*
2417 * We want to commit periodically so that not too much
2418 * unwritten data builds up.
2419 */
2420static void do_waker(struct work_struct *ws)
2421{
2422	struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
2423	wake_worker(pool);
2424	queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
2425}
2426
2427/*
2428 * We're holding onto IO to allow userland time to react.  After the
2429 * timeout either the pool will have been resized (and thus back in
2430 * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space.
2431 */
2432static void do_no_space_timeout(struct work_struct *ws)
2433{
2434	struct pool *pool = container_of(to_delayed_work(ws), struct pool,
2435					 no_space_timeout);
2436
2437	if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
2438		pool->pf.error_if_no_space = true;
2439		notify_of_pool_mode_change(pool);
2440		error_retry_list_with_code(pool, BLK_STS_NOSPC);
2441	}
2442}
2443
2444/*----------------------------------------------------------------*/
2445
2446struct pool_work {
2447	struct work_struct worker;
2448	struct completion complete;
2449};
2450
2451static struct pool_work *to_pool_work(struct work_struct *ws)
2452{
2453	return container_of(ws, struct pool_work, worker);
2454}
2455
2456static void pool_work_complete(struct pool_work *pw)
2457{
2458	complete(&pw->complete);
2459}
2460
2461static void pool_work_wait(struct pool_work *pw, struct pool *pool,
2462			   void (*fn)(struct work_struct *))
2463{
2464	INIT_WORK_ONSTACK(&pw->worker, fn);
2465	init_completion(&pw->complete);
2466	queue_work(pool->wq, &pw->worker);
2467	wait_for_completion(&pw->complete);
2468}
2469
2470/*----------------------------------------------------------------*/
2471
2472struct noflush_work {
2473	struct pool_work pw;
2474	struct thin_c *tc;
2475};
2476
2477static struct noflush_work *to_noflush(struct work_struct *ws)
2478{
2479	return container_of(to_pool_work(ws), struct noflush_work, pw);
2480}
2481
2482static void do_noflush_start(struct work_struct *ws)
2483{
2484	struct noflush_work *w = to_noflush(ws);
2485	w->tc->requeue_mode = true;
2486	requeue_io(w->tc);
2487	pool_work_complete(&w->pw);
2488}
2489
2490static void do_noflush_stop(struct work_struct *ws)
2491{
2492	struct noflush_work *w = to_noflush(ws);
2493	w->tc->requeue_mode = false;
2494	pool_work_complete(&w->pw);
2495}
2496
2497static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
2498{
2499	struct noflush_work w;
2500
2501	w.tc = tc;
2502	pool_work_wait(&w.pw, tc->pool, fn);
2503}
2504
2505/*----------------------------------------------------------------*/
2506
2507static bool passdown_enabled(struct pool_c *pt)
2508{
2509	return pt->adjusted_pf.discard_passdown;
2510}
2511
2512static void set_discard_callbacks(struct pool *pool)
2513{
2514	struct pool_c *pt = pool->ti->private;
2515
2516	if (passdown_enabled(pt)) {
2517		pool->process_discard_cell = process_discard_cell_passdown;
2518		pool->process_prepared_discard = process_prepared_discard_passdown_pt1;
2519		pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2;
2520	} else {
2521		pool->process_discard_cell = process_discard_cell_no_passdown;
2522		pool->process_prepared_discard = process_prepared_discard_no_passdown;
2523	}
2524}
2525
2526static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2527{
2528	struct pool_c *pt = pool->ti->private;
2529	bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
2530	enum pool_mode old_mode = get_pool_mode(pool);
2531	unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ;
2532
2533	/*
2534	 * Never allow the pool to transition to PM_WRITE mode if user
2535	 * intervention is required to verify metadata and data consistency.
2536	 */
2537	if (new_mode == PM_WRITE && needs_check) {
2538		DMERR("%s: unable to switch pool to write mode until repaired.",
2539		      dm_device_name(pool->pool_md));
2540		if (old_mode != new_mode)
2541			new_mode = old_mode;
2542		else
2543			new_mode = PM_READ_ONLY;
2544	}
2545	/*
2546	 * If we were in PM_FAIL mode, rollback of metadata failed.  We're
2547	 * not going to recover without a thin_repair.	So we never let the
2548	 * pool move out of the old mode.
2549	 */
2550	if (old_mode == PM_FAIL)
2551		new_mode = old_mode;
2552
2553	switch (new_mode) {
2554	case PM_FAIL:
2555		dm_pool_metadata_read_only(pool->pmd);
2556		pool->process_bio = process_bio_fail;
2557		pool->process_discard = process_bio_fail;
2558		pool->process_cell = process_cell_fail;
2559		pool->process_discard_cell = process_cell_fail;
2560		pool->process_prepared_mapping = process_prepared_mapping_fail;
2561		pool->process_prepared_discard = process_prepared_discard_fail;
2562
2563		error_retry_list(pool);
2564		break;
2565
2566	case PM_OUT_OF_METADATA_SPACE:
2567	case PM_READ_ONLY:
2568		dm_pool_metadata_read_only(pool->pmd);
2569		pool->process_bio = process_bio_read_only;
2570		pool->process_discard = process_bio_success;
2571		pool->process_cell = process_cell_read_only;
2572		pool->process_discard_cell = process_cell_success;
2573		pool->process_prepared_mapping = process_prepared_mapping_fail;
2574		pool->process_prepared_discard = process_prepared_discard_success;
2575
2576		error_retry_list(pool);
2577		break;
2578
2579	case PM_OUT_OF_DATA_SPACE:
2580		/*
2581		 * Ideally we'd never hit this state; the low water mark
2582		 * would trigger userland to extend the pool before we
2583		 * completely run out of data space.  However, many small
2584		 * IOs to unprovisioned space can consume data space at an
2585		 * alarming rate.  Adjust your low water mark if you're
2586		 * frequently seeing this mode.
2587		 */
2588		pool->out_of_data_space = true;
2589		pool->process_bio = process_bio_read_only;
2590		pool->process_discard = process_discard_bio;
2591		pool->process_cell = process_cell_read_only;
2592		pool->process_prepared_mapping = process_prepared_mapping;
2593		set_discard_callbacks(pool);
2594
2595		if (!pool->pf.error_if_no_space && no_space_timeout)
2596			queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
2597		break;
2598
2599	case PM_WRITE:
2600		if (old_mode == PM_OUT_OF_DATA_SPACE)
2601			cancel_delayed_work_sync(&pool->no_space_timeout);
2602		pool->out_of_data_space = false;
2603		pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
2604		dm_pool_metadata_read_write(pool->pmd);
2605		pool->process_bio = process_bio;
2606		pool->process_discard = process_discard_bio;
2607		pool->process_cell = process_cell;
2608		pool->process_prepared_mapping = process_prepared_mapping;
2609		set_discard_callbacks(pool);
2610		break;
2611	}
2612
2613	pool->pf.mode = new_mode;
2614	/*
2615	 * The pool mode may have changed, sync it so bind_control_target()
2616	 * doesn't cause an unexpected mode transition on resume.
2617	 */
2618	pt->adjusted_pf.mode = new_mode;
2619
2620	if (old_mode != new_mode)
2621		notify_of_pool_mode_change(pool);
2622}
2623
2624static void abort_transaction(struct pool *pool)
2625{
2626	const char *dev_name = dm_device_name(pool->pool_md);
2627
2628	DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
2629	if (dm_pool_abort_metadata(pool->pmd)) {
2630		DMERR("%s: failed to abort metadata transaction", dev_name);
2631		set_pool_mode(pool, PM_FAIL);
2632	}
2633
2634	if (dm_pool_metadata_set_needs_check(pool->pmd)) {
2635		DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
2636		set_pool_mode(pool, PM_FAIL);
2637	}
2638}
2639
2640static void metadata_operation_failed(struct pool *pool, const char *op, int r)
2641{
2642	DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
2643		    dm_device_name(pool->pool_md), op, r);
2644
2645	abort_transaction(pool);
2646	set_pool_mode(pool, PM_READ_ONLY);
2647}
2648
2649/*----------------------------------------------------------------*/
2650
2651/*
2652 * Mapping functions.
2653 */
2654
2655/*
2656 * Called only while mapping a thin bio to hand it over to the workqueue.
2657 */
2658static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2659{
2660	unsigned long flags;
2661	struct pool *pool = tc->pool;
2662
2663	spin_lock_irqsave(&tc->lock, flags);
2664	bio_list_add(&tc->deferred_bio_list, bio);
2665	spin_unlock_irqrestore(&tc->lock, flags);
2666
2667	wake_worker(pool);
2668}
2669
2670static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2671{
2672	struct pool *pool = tc->pool;
2673
2674	throttle_lock(&pool->throttle);
2675	thin_defer_bio(tc, bio);
2676	throttle_unlock(&pool->throttle);
2677}
2678
2679static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2680{
2681	unsigned long flags;
2682	struct pool *pool = tc->pool;
2683
2684	throttle_lock(&pool->throttle);
2685	spin_lock_irqsave(&tc->lock, flags);
2686	list_add_tail(&cell->user_list, &tc->deferred_cells);
2687	spin_unlock_irqrestore(&tc->lock, flags);
2688	throttle_unlock(&pool->throttle);
2689
2690	wake_worker(pool);
2691}
2692
2693static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
2694{
2695	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2696
2697	h->tc = tc;
2698	h->shared_read_entry = NULL;
2699	h->all_io_entry = NULL;
2700	h->overwrite_mapping = NULL;
2701	h->cell = NULL;
2702}
2703
2704/*
2705 * Non-blocking function called from the thin target's map function.
2706 */
2707static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2708{
2709	int r;
2710	struct thin_c *tc = ti->private;
2711	dm_block_t block = get_bio_block(tc, bio);
2712	struct dm_thin_device *td = tc->td;
2713	struct dm_thin_lookup_result result;
2714	struct dm_bio_prison_cell *virt_cell, *data_cell;
2715	struct dm_cell_key key;
2716
2717	thin_hook_bio(tc, bio);
2718
2719	if (tc->requeue_mode) {
2720		bio->bi_status = BLK_STS_DM_REQUEUE;
2721		bio_endio(bio);
2722		return DM_MAPIO_SUBMITTED;
2723	}
2724
2725	if (get_pool_mode(tc->pool) == PM_FAIL) {
2726		bio_io_error(bio);
2727		return DM_MAPIO_SUBMITTED;
2728	}
2729
2730	if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) {
2731		thin_defer_bio_with_throttle(tc, bio);
2732		return DM_MAPIO_SUBMITTED;
2733	}
2734
2735	/*
2736	 * We must hold the virtual cell before doing the lookup, otherwise
2737	 * there's a race with discard.
2738	 */
2739	build_virtual_key(tc->td, block, &key);
2740	if (bio_detain(tc->pool, &key, bio, &virt_cell))
2741		return DM_MAPIO_SUBMITTED;
2742
2743	r = dm_thin_find_block(td, block, 0, &result);
2744
2745	/*
2746	 * Note that we defer readahead too.
2747	 */
2748	switch (r) {
2749	case 0:
2750		if (unlikely(result.shared)) {
2751			/*
2752			 * We have a race condition here between the
2753			 * result.shared value returned by the lookup and
2754			 * snapshot creation, which may cause new
2755			 * sharing.
2756			 *
2757			 * To avoid this always quiesce the origin before
2758			 * taking the snap.  You want to do this anyway to
2759			 * ensure a consistent application view
2760			 * (i.e. lockfs).
2761			 *
2762			 * More distant ancestors are irrelevant. The
2763			 * shared flag will be set in their case.
2764			 */
2765			thin_defer_cell(tc, virt_cell);
2766			return DM_MAPIO_SUBMITTED;
2767		}
2768
2769		build_data_key(tc->td, result.block, &key);
2770		if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2771			cell_defer_no_holder(tc, virt_cell);
2772			return DM_MAPIO_SUBMITTED;
2773		}
2774
2775		inc_all_io_entry(tc->pool, bio);
2776		cell_defer_no_holder(tc, data_cell);
2777		cell_defer_no_holder(tc, virt_cell);
2778
2779		remap(tc, bio, result.block);
2780		return DM_MAPIO_REMAPPED;
2781
2782	case -ENODATA:
2783	case -EWOULDBLOCK:
2784		thin_defer_cell(tc, virt_cell);
2785		return DM_MAPIO_SUBMITTED;
2786
2787	default:
2788		/*
2789		 * Must always call bio_io_error on failure.
2790		 * dm_thin_find_block can fail with -EINVAL if the
2791		 * pool is switched to fail-io mode.
2792		 */
2793		bio_io_error(bio);
2794		cell_defer_no_holder(tc, virt_cell);
2795		return DM_MAPIO_SUBMITTED;
2796	}
2797}
2798
2799static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2800{
2801	struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
2802	struct request_queue *q;
2803
2804	if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
2805		return 1;
2806
2807	q = bdev_get_queue(pt->data_dev->bdev);
2808	return bdi_congested(q->backing_dev_info, bdi_bits);
2809}
2810
2811static void requeue_bios(struct pool *pool)
2812{
2813	unsigned long flags;
2814	struct thin_c *tc;
2815
2816	rcu_read_lock();
2817	list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2818		spin_lock_irqsave(&tc->lock, flags);
2819		bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
2820		bio_list_init(&tc->retry_on_resume_list);
2821		spin_unlock_irqrestore(&tc->lock, flags);
2822	}
2823	rcu_read_unlock();
2824}
2825
2826/*----------------------------------------------------------------
2827 * Binding of control targets to a pool object
2828 *--------------------------------------------------------------*/
2829static bool data_dev_supports_discard(struct pool_c *pt)
2830{
2831	struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2832
2833	return q && blk_queue_discard(q);
2834}
2835
2836static bool is_factor(sector_t block_size, uint32_t n)
2837{
2838	return !sector_div(block_size, n);
2839}
2840
2841/*
2842 * If discard_passdown was enabled verify that the data device
2843 * supports discards.  Disable discard_passdown if not.
2844 */
2845static void disable_passdown_if_not_supported(struct pool_c *pt)
2846{
2847	struct pool *pool = pt->pool;
2848	struct block_device *data_bdev = pt->data_dev->bdev;
2849	struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
2850	const char *reason = NULL;
2851	char buf[BDEVNAME_SIZE];
2852
2853	if (!pt->adjusted_pf.discard_passdown)
2854		return;
2855
2856	if (!data_dev_supports_discard(pt))
2857		reason = "discard unsupported";
2858
2859	else if (data_limits->max_discard_sectors < pool->sectors_per_block)
2860		reason = "max discard sectors smaller than a block";
2861
2862	if (reason) {
2863		DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
2864		pt->adjusted_pf.discard_passdown = false;
2865	}
2866}
2867
2868static int bind_control_target(struct pool *pool, struct dm_target *ti)
2869{
2870	struct pool_c *pt = ti->private;
2871
2872	/*
2873	 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
2874	 */
2875	enum pool_mode old_mode = get_pool_mode(pool);
2876	enum pool_mode new_mode = pt->adjusted_pf.mode;
2877
2878	/*
2879	 * Don't change the pool's mode until set_pool_mode() below.
2880	 * Otherwise the pool's process_* function pointers may
2881	 * not match the desired pool mode.
2882	 */
2883	pt->adjusted_pf.mode = old_mode;
2884
2885	pool->ti = ti;
2886	pool->pf = pt->adjusted_pf;
2887	pool->low_water_blocks = pt->low_water_blocks;
2888
2889	set_pool_mode(pool, new_mode);
2890
2891	return 0;
2892}
2893
2894static void unbind_control_target(struct pool *pool, struct dm_target *ti)
2895{
2896	if (pool->ti == ti)
2897		pool->ti = NULL;
2898}
2899
2900/*----------------------------------------------------------------
2901 * Pool creation
2902 *--------------------------------------------------------------*/
2903/* Initialize pool features. */
2904static void pool_features_init(struct pool_features *pf)
2905{
2906	pf->mode = PM_WRITE;
2907	pf->zero_new_blocks = true;
2908	pf->discard_enabled = true;
2909	pf->discard_passdown = true;
2910	pf->error_if_no_space = false;
2911}
2912
2913static void __pool_destroy(struct pool *pool)
2914{
2915	__pool_table_remove(pool);
2916
2917	vfree(pool->cell_sort_array);
2918	if (dm_pool_metadata_close(pool->pmd) < 0)
2919		DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2920
2921	dm_bio_prison_destroy(pool->prison);
2922	dm_kcopyd_client_destroy(pool->copier);
2923
2924	if (pool->wq)
2925		destroy_workqueue(pool->wq);
2926
2927	if (pool->next_mapping)
2928		mempool_free(pool->next_mapping, &pool->mapping_pool);
2929	mempool_exit(&pool->mapping_pool);
2930	dm_deferred_set_destroy(pool->shared_read_ds);
2931	dm_deferred_set_destroy(pool->all_io_ds);
2932	kfree(pool);
2933}
2934
2935static struct kmem_cache *_new_mapping_cache;
2936
2937static struct pool *pool_create(struct mapped_device *pool_md,
2938				struct block_device *metadata_dev,
2939				unsigned long block_size,
2940				int read_only, char **error)
2941{
2942	int r;
2943	void *err_p;
2944	struct pool *pool;
2945	struct dm_pool_metadata *pmd;
2946	bool format_device = read_only ? false : true;
2947
2948	pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
2949	if (IS_ERR(pmd)) {
2950		*error = "Error creating metadata object";
2951		return (struct pool *)pmd;
2952	}
2953
2954	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
2955	if (!pool) {
2956		*error = "Error allocating memory for pool";
2957		err_p = ERR_PTR(-ENOMEM);
2958		goto bad_pool;
2959	}
2960
2961	pool->pmd = pmd;
2962	pool->sectors_per_block = block_size;
2963	if (block_size & (block_size - 1))
2964		pool->sectors_per_block_shift = -1;
2965	else
2966		pool->sectors_per_block_shift = __ffs(block_size);
2967	pool->low_water_blocks = 0;
2968	pool_features_init(&pool->pf);
2969	pool->prison = dm_bio_prison_create();
2970	if (!pool->prison) {
2971		*error = "Error creating pool's bio prison";
2972		err_p = ERR_PTR(-ENOMEM);
2973		goto bad_prison;
2974	}
2975
2976	pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2977	if (IS_ERR(pool->copier)) {
2978		r = PTR_ERR(pool->copier);
2979		*error = "Error creating pool's kcopyd client";
2980		err_p = ERR_PTR(r);
2981		goto bad_kcopyd_client;
2982	}
2983
2984	/*
2985	 * Create singlethreaded workqueue that will service all devices
2986	 * that use this metadata.
2987	 */
2988	pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2989	if (!pool->wq) {
2990		*error = "Error creating pool's workqueue";
2991		err_p = ERR_PTR(-ENOMEM);
2992		goto bad_wq;
2993	}
2994
2995	throttle_init(&pool->throttle);
2996	INIT_WORK(&pool->worker, do_worker);
2997	INIT_DELAYED_WORK(&pool->waker, do_waker);
2998	INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2999	spin_lock_init(&pool->lock);
3000	bio_list_init(&pool->deferred_flush_bios);
3001	bio_list_init(&pool->deferred_flush_completions);
3002	INIT_LIST_HEAD(&pool->prepared_mappings);
3003	INIT_LIST_HEAD(&pool->prepared_discards);
3004	INIT_LIST_HEAD(&pool->prepared_discards_pt2);
3005	INIT_LIST_HEAD(&pool->active_thins);
3006	pool->low_water_triggered = false;
3007	pool->suspended = true;
3008	pool->out_of_data_space = false;
3009
3010	pool->shared_read_ds = dm_deferred_set_create();
3011	if (!pool->shared_read_ds) {
3012		*error = "Error creating pool's shared read deferred set";
3013		err_p = ERR_PTR(-ENOMEM);
3014		goto bad_shared_read_ds;
3015	}
3016
3017	pool->all_io_ds = dm_deferred_set_create();
3018	if (!pool->all_io_ds) {
3019		*error = "Error creating pool's all io deferred set";
3020		err_p = ERR_PTR(-ENOMEM);
3021		goto bad_all_io_ds;
3022	}
3023
3024	pool->next_mapping = NULL;
3025	r = mempool_init_slab_pool(&pool->mapping_pool, MAPPING_POOL_SIZE,
3026				   _new_mapping_cache);
3027	if (r) {
3028		*error = "Error creating pool's mapping mempool";
3029		err_p = ERR_PTR(r);
3030		goto bad_mapping_pool;
3031	}
3032
3033	pool->cell_sort_array =
3034		vmalloc(array_size(CELL_SORT_ARRAY_SIZE,
3035				   sizeof(*pool->cell_sort_array)));
3036	if (!pool->cell_sort_array) {
3037		*error = "Error allocating cell sort array";
3038		err_p = ERR_PTR(-ENOMEM);
3039		goto bad_sort_array;
3040	}
3041
3042	pool->ref_count = 1;
3043	pool->last_commit_jiffies = jiffies;
3044	pool->pool_md = pool_md;
3045	pool->md_dev = metadata_dev;
3046	__pool_table_insert(pool);
3047
3048	return pool;
3049
3050bad_sort_array:
3051	mempool_exit(&pool->mapping_pool);
3052bad_mapping_pool:
3053	dm_deferred_set_destroy(pool->all_io_ds);
3054bad_all_io_ds:
3055	dm_deferred_set_destroy(pool->shared_read_ds);
3056bad_shared_read_ds:
3057	destroy_workqueue(pool->wq);
3058bad_wq:
3059	dm_kcopyd_client_destroy(pool->copier);
3060bad_kcopyd_client:
3061	dm_bio_prison_destroy(pool->prison);
3062bad_prison:
3063	kfree(pool);
3064bad_pool:
3065	if (dm_pool_metadata_close(pmd))
3066		DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
3067
3068	return err_p;
3069}
3070
3071static void __pool_inc(struct pool *pool)
3072{
3073	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
3074	pool->ref_count++;
3075}
3076
3077static void __pool_dec(struct pool *pool)
3078{
3079	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
3080	BUG_ON(!pool->ref_count);
3081	if (!--pool->ref_count)
3082		__pool_destroy(pool);
3083}
3084
3085static struct pool *__pool_find(struct mapped_device *pool_md,
3086				struct block_device *metadata_dev,
3087				unsigned long block_size, int read_only,
3088				char **error, int *created)
3089{
3090	struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
3091
3092	if (pool) {
3093		if (pool->pool_md != pool_md) {
3094			*error = "metadata device already in use by a pool";
3095			return ERR_PTR(-EBUSY);
3096		}
3097		__pool_inc(pool);
3098
3099	} else {
3100		pool = __pool_table_lookup(pool_md);
3101		if (pool) {
3102			if (pool->md_dev != metadata_dev) {
3103				*error = "different pool cannot replace a pool";
3104				return ERR_PTR(-EINVAL);
3105			}
3106			__pool_inc(pool);
3107
3108		} else {
3109			pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
3110			*created = 1;
3111		}
3112	}
3113
3114	return pool;
3115}
3116
3117/*----------------------------------------------------------------
3118 * Pool target methods
3119 *--------------------------------------------------------------*/
3120static void pool_dtr(struct dm_target *ti)
3121{
3122	struct pool_c *pt = ti->private;
3123
3124	mutex_lock(&dm_thin_pool_table.mutex);
3125
3126	unbind_control_target(pt->pool, ti);
3127	__pool_dec(pt->pool);
3128	dm_put_device(ti, pt->metadata_dev);
3129	dm_put_device(ti, pt->data_dev);
3130	kfree(pt);
3131
3132	mutex_unlock(&dm_thin_pool_table.mutex);
3133}
3134
3135static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
3136			       struct dm_target *ti)
3137{
3138	int r;
3139	unsigned argc;
3140	const char *arg_name;
3141
3142	static const struct dm_arg _args[] = {
3143		{0, 4, "Invalid number of pool feature arguments"},
3144	};
3145
3146	/*
3147	 * No feature arguments supplied.
3148	 */
3149	if (!as->argc)
3150		return 0;
3151
3152	r = dm_read_arg_group(_args, as, &argc, &ti->error);
3153	if (r)
3154		return -EINVAL;
3155
3156	while (argc && !r) {
3157		arg_name = dm_shift_arg(as);
3158		argc--;
3159
3160		if (!strcasecmp(arg_name, "skip_block_zeroing"))
3161			pf->zero_new_blocks = false;
3162
3163		else if (!strcasecmp(arg_name, "ignore_discard"))
3164			pf->discard_enabled = false;
3165
3166		else if (!strcasecmp(arg_name, "no_discard_passdown"))
3167			pf->discard_passdown = false;
3168
3169		else if (!strcasecmp(arg_name, "read_only"))
3170			pf->mode = PM_READ_ONLY;
3171
3172		else if (!strcasecmp(arg_name, "error_if_no_space"))
3173			pf->error_if_no_space = true;
3174
3175		else {
3176			ti->error = "Unrecognised pool feature requested";
3177			r = -EINVAL;
3178			break;
3179		}
3180	}
3181
3182	return r;
3183}
3184
3185static void metadata_low_callback(void *context)
3186{
3187	struct pool *pool = context;
3188
3189	DMWARN("%s: reached low water mark for metadata device: sending event.",
3190	       dm_device_name(pool->pool_md));
3191
3192	dm_table_event(pool->ti->table);
3193}
3194
3195static sector_t get_dev_size(struct block_device *bdev)
3196{
3197	return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
3198}
3199
3200static void warn_if_metadata_device_too_big(struct block_device *bdev)
3201{
3202	sector_t metadata_dev_size = get_dev_size(bdev);
3203	char buffer[BDEVNAME_SIZE];
3204
3205	if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
3206		DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
3207		       bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
3208}
3209
3210static sector_t get_metadata_dev_size(struct block_device *bdev)
3211{
3212	sector_t metadata_dev_size = get_dev_size(bdev);
3213
3214	if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
3215		metadata_dev_size = THIN_METADATA_MAX_SECTORS;
3216
3217	return metadata_dev_size;
3218}
3219
3220static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
3221{
3222	sector_t metadata_dev_size = get_metadata_dev_size(bdev);
3223
3224	sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
3225
3226	return metadata_dev_size;
3227}
3228
3229/*
3230 * When a metadata threshold is crossed a dm event is triggered, and
3231 * userland should respond by growing the metadata device.  We could let
3232 * userland set the threshold, like we do with the data threshold, but I'm
3233 * not sure they know enough to do this well.
3234 */
3235static dm_block_t calc_metadata_threshold(struct pool_c *pt)
3236{
3237	/*
3238	 * 4M is ample for all ops with the possible exception of thin
3239	 * device deletion which is harmless if it fails (just retry the
3240	 * delete after you've grown the device).
3241	 */
3242	dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
3243	return min((dm_block_t)1024ULL /* 4M */, quarter);
3244}
3245
3246/*
3247 * thin-pool <metadata dev> <data dev>
3248 *	     <data block size (sectors)>
3249 *	     <low water mark (blocks)>
3250 *	     [<#feature args> [<arg>]*]
3251 *
3252 * Optional feature arguments are:
3253 *	     skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
3254 *	     ignore_discard: disable discard
3255 *	     no_discard_passdown: don't pass discards down to the data device
3256 *	     read_only: Don't allow any changes to be made to the pool metadata.
3257 *	     error_if_no_space: error IOs, instead of queueing, if no space.
3258 */
3259static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
3260{
3261	int r, pool_created = 0;
3262	struct pool_c *pt;
3263	struct pool *pool;
3264	struct pool_features pf;
3265	struct dm_arg_set as;
3266	struct dm_dev *data_dev;
3267	unsigned long block_size;
3268	dm_block_t low_water_blocks;
3269	struct dm_dev *metadata_dev;
3270	fmode_t metadata_mode;
3271
3272	/*
3273	 * FIXME Remove validation from scope of lock.
3274	 */
3275	mutex_lock(&dm_thin_pool_table.mutex);
3276
3277	if (argc < 4) {
3278		ti->error = "Invalid argument count";
3279		r = -EINVAL;
3280		goto out_unlock;
3281	}
3282
3283	as.argc = argc;
3284	as.argv = argv;
3285
3286	/* make sure metadata and data are different devices */
3287	if (!strcmp(argv[0], argv[1])) {
3288		ti->error = "Error setting metadata or data device";
3289		r = -EINVAL;
3290		goto out_unlock;
3291	}
3292
3293	/*
3294	 * Set default pool features.
3295	 */
3296	pool_features_init(&pf);
3297
3298	dm_consume_args(&as, 4);
3299	r = parse_pool_features(&as, &pf, ti);
3300	if (r)
3301		goto out_unlock;
3302
3303	metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
3304	r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
3305	if (r) {
3306		ti->error = "Error opening metadata block device";
3307		goto out_unlock;
3308	}
3309	warn_if_metadata_device_too_big(metadata_dev->bdev);
3310
3311	r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
3312	if (r) {
3313		ti->error = "Error getting data device";
3314		goto out_metadata;
3315	}
3316
3317	if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
3318	    block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
3319	    block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
3320	    block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
3321		ti->error = "Invalid block size";
3322		r = -EINVAL;
3323		goto out;
3324	}
3325
3326	if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
3327		ti->error = "Invalid low water mark";
3328		r = -EINVAL;
3329		goto out;
3330	}
3331
3332	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
3333	if (!pt) {
3334		r = -ENOMEM;
3335		goto out;
3336	}
3337
3338	pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
3339			   block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
3340	if (IS_ERR(pool)) {
3341		r = PTR_ERR(pool);
3342		goto out_free_pt;
3343	}
3344
3345	/*
3346	 * 'pool_created' reflects whether this is the first table load.
3347	 * Top level discard support is not allowed to be changed after
3348	 * initial load.  This would require a pool reload to trigger thin
3349	 * device changes.
3350	 */
3351	if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
3352		ti->error = "Discard support cannot be disabled once enabled";
3353		r = -EINVAL;
3354		goto out_flags_changed;
3355	}
3356
3357	pt->pool = pool;
3358	pt->ti = ti;
3359	pt->metadata_dev = metadata_dev;
3360	pt->data_dev = data_dev;
3361	pt->low_water_blocks = low_water_blocks;
3362	pt->adjusted_pf = pt->requested_pf = pf;
3363	ti->num_flush_bios = 1;
3364
3365	/*
3366	 * Only need to enable discards if the pool should pass
3367	 * them down to the data device.  The thin device's discard
3368	 * processing will cause mappings to be removed from the btree.
3369	 */
3370	if (pf.discard_enabled && pf.discard_passdown) {
3371		ti->num_discard_bios = 1;
3372
3373		/*
3374		 * Setting 'discards_supported' circumvents the normal
3375		 * stacking of discard limits (this keeps the pool and
3376		 * thin devices' discard limits consistent).
3377		 */
3378		ti->discards_supported = true;
3379	}
3380	ti->private = pt;
3381
3382	r = dm_pool_register_metadata_threshold(pt->pool->pmd,
3383						calc_metadata_threshold(pt),
3384						metadata_low_callback,
3385						pool);
3386	if (r)
3387		goto out_flags_changed;
3388
3389	pt->callbacks.congested_fn = pool_is_congested;
3390	dm_table_add_target_callbacks(ti->table, &pt->callbacks);
3391
3392	mutex_unlock(&dm_thin_pool_table.mutex);
3393
3394	return 0;
3395
3396out_flags_changed:
3397	__pool_dec(pool);
3398out_free_pt:
3399	kfree(pt);
3400out:
3401	dm_put_device(ti, data_dev);
3402out_metadata:
3403	dm_put_device(ti, metadata_dev);
3404out_unlock:
3405	mutex_unlock(&dm_thin_pool_table.mutex);
3406
3407	return r;
3408}
3409
3410static int pool_map(struct dm_target *ti, struct bio *bio)
3411{
3412	int r;
3413	struct pool_c *pt = ti->private;
3414	struct pool *pool = pt->pool;
3415	unsigned long flags;
3416
3417	/*
3418	 * As this is a singleton target, ti->begin is always zero.
3419	 */
3420	spin_lock_irqsave(&pool->lock, flags);
3421	bio_set_dev(bio, pt->data_dev->bdev);
3422	r = DM_MAPIO_REMAPPED;
3423	spin_unlock_irqrestore(&pool->lock, flags);
3424
3425	return r;
3426}
3427
3428static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
3429{
3430	int r;
3431	struct pool_c *pt = ti->private;
3432	struct pool *pool = pt->pool;
3433	sector_t data_size = ti->len;
3434	dm_block_t sb_data_size;
3435
3436	*need_commit = false;
3437
3438	(void) sector_div(data_size, pool->sectors_per_block);
3439
3440	r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
3441	if (r) {
3442		DMERR("%s: failed to retrieve data device size",
3443		      dm_device_name(pool->pool_md));
3444		return r;
3445	}
3446
3447	if (data_size < sb_data_size) {
3448		DMERR("%s: pool target (%llu blocks) too small: expected %llu",
3449		      dm_device_name(pool->pool_md),
3450		      (unsigned long long)data_size, sb_data_size);
3451		return -EINVAL;
3452
3453	} else if (data_size > sb_data_size) {
3454		if (dm_pool_metadata_needs_check(pool->pmd)) {
3455			DMERR("%s: unable to grow the data device until repaired.",
3456			      dm_device_name(pool->pool_md));
3457			return 0;
3458		}
3459
3460		if (sb_data_size)
3461			DMINFO("%s: growing the data device from %llu to %llu blocks",
3462			       dm_device_name(pool->pool_md),
3463			       sb_data_size, (unsigned long long)data_size);
3464		r = dm_pool_resize_data_dev(pool->pmd, data_size);
3465		if (r) {
3466			metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
3467			return r;
3468		}
3469
3470		*need_commit = true;
3471	}
3472
3473	return 0;
3474}
3475
3476static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
3477{
3478	int r;
3479	struct pool_c *pt = ti->private;
3480	struct pool *pool = pt->pool;
3481	dm_block_t metadata_dev_size, sb_metadata_dev_size;
3482
3483	*need_commit = false;
3484
3485	metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
3486
3487	r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
3488	if (r) {
3489		DMERR("%s: failed to retrieve metadata device size",
3490		      dm_device_name(pool->pool_md));
3491		return r;
3492	}
3493
3494	if (metadata_dev_size < sb_metadata_dev_size) {
3495		DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
3496		      dm_device_name(pool->pool_md),
3497		      metadata_dev_size, sb_metadata_dev_size);
3498		return -EINVAL;
3499
3500	} else if (metadata_dev_size > sb_metadata_dev_size) {
3501		if (dm_pool_metadata_needs_check(pool->pmd)) {
3502			DMERR("%s: unable to grow the metadata device until repaired.",
3503			      dm_device_name(pool->pool_md));
3504			return 0;
3505		}
3506
3507		warn_if_metadata_device_too_big(pool->md_dev);
3508		DMINFO("%s: growing the metadata device from %llu to %llu blocks",
3509		       dm_device_name(pool->pool_md),
3510		       sb_metadata_dev_size, metadata_dev_size);
3511
3512		if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
3513			set_pool_mode(pool, PM_WRITE);
3514
3515		r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
3516		if (r) {
3517			metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
3518			return r;
3519		}
3520
3521		*need_commit = true;
3522	}
3523
3524	return 0;
3525}
3526
3527/*
3528 * Retrieves the number of blocks of the data device from
3529 * the superblock and compares it to the actual device size,
3530 * thus resizing the data device in case it has grown.
3531 *
3532 * This both copes with opening preallocated data devices in the ctr
3533 * being followed by a resume
3534 * -and-
3535 * calling the resume method individually after userspace has
3536 * grown the data device in reaction to a table event.
3537 */
3538static int pool_preresume(struct dm_target *ti)
3539{
3540	int r;
3541	bool need_commit1, need_commit2;
3542	struct pool_c *pt = ti->private;
3543	struct pool *pool = pt->pool;
3544
3545	/*
3546	 * Take control of the pool object.
3547	 */
3548	r = bind_control_target(pool, ti);
3549	if (r)
3550		return r;
3551
3552	r = maybe_resize_data_dev(ti, &need_commit1);
3553	if (r)
3554		return r;
3555
3556	r = maybe_resize_metadata_dev(ti, &need_commit2);
3557	if (r)
3558		return r;
3559
3560	if (need_commit1 || need_commit2)
3561		(void) commit(pool);
3562
3563	return 0;
3564}
3565
3566static void pool_suspend_active_thins(struct pool *pool)
3567{
3568	struct thin_c *tc;
3569
3570	/* Suspend all active thin devices */
3571	tc = get_first_thin(pool);
3572	while (tc) {
3573		dm_internal_suspend_noflush(tc->thin_md);
3574		tc = get_next_thin(pool, tc);
3575	}
3576}
3577
3578static void pool_resume_active_thins(struct pool *pool)
3579{
3580	struct thin_c *tc;
3581
3582	/* Resume all active thin devices */
3583	tc = get_first_thin(pool);
3584	while (tc) {
3585		dm_internal_resume(tc->thin_md);
3586		tc = get_next_thin(pool, tc);
3587	}
3588}
3589
3590static void pool_resume(struct dm_target *ti)
3591{
3592	struct pool_c *pt = ti->private;
3593	struct pool *pool = pt->pool;
3594	unsigned long flags;
3595
3596	/*
3597	 * Must requeue active_thins' bios and then resume
3598	 * active_thins _before_ clearing 'suspend' flag.
3599	 */
3600	requeue_bios(pool);
3601	pool_resume_active_thins(pool);
3602
3603	spin_lock_irqsave(&pool->lock, flags);
3604	pool->low_water_triggered = false;
3605	pool->suspended = false;
3606	spin_unlock_irqrestore(&pool->lock, flags);
3607
3608	do_waker(&pool->waker.work);
3609}
3610
3611static void pool_presuspend(struct dm_target *ti)
3612{
3613	struct pool_c *pt = ti->private;
3614	struct pool *pool = pt->pool;
3615	unsigned long flags;
3616
3617	spin_lock_irqsave(&pool->lock, flags);
3618	pool->suspended = true;
3619	spin_unlock_irqrestore(&pool->lock, flags);
3620
3621	pool_suspend_active_thins(pool);
3622}
3623
3624static void pool_presuspend_undo(struct dm_target *ti)
3625{
3626	struct pool_c *pt = ti->private;
3627	struct pool *pool = pt->pool;
3628	unsigned long flags;
3629
3630	pool_resume_active_thins(pool);
3631
3632	spin_lock_irqsave(&pool->lock, flags);
3633	pool->suspended = false;
3634	spin_unlock_irqrestore(&pool->lock, flags);
3635}
3636
3637static void pool_postsuspend(struct dm_target *ti)
3638{
3639	struct pool_c *pt = ti->private;
3640	struct pool *pool = pt->pool;
3641
3642	cancel_delayed_work_sync(&pool->waker);
3643	cancel_delayed_work_sync(&pool->no_space_timeout);
3644	flush_workqueue(pool->wq);
3645	(void) commit(pool);
3646}
3647
3648static int check_arg_count(unsigned argc, unsigned args_required)
3649{
3650	if (argc != args_required) {
3651		DMWARN("Message received with %u arguments instead of %u.",
3652		       argc, args_required);
3653		return -EINVAL;
3654	}
3655
3656	return 0;
3657}
3658
3659static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
3660{
3661	if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
3662	    *dev_id <= MAX_DEV_ID)
3663		return 0;
3664
3665	if (warning)
3666		DMWARN("Message received with invalid device id: %s", arg);
3667
3668	return -EINVAL;
3669}
3670
3671static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
3672{
3673	dm_thin_id dev_id;
3674	int r;
3675
3676	r = check_arg_count(argc, 2);
3677	if (r)
3678		return r;
3679
3680	r = read_dev_id(argv[1], &dev_id, 1);
3681	if (r)
3682		return r;
3683
3684	r = dm_pool_create_thin(pool->pmd, dev_id);
3685	if (r) {
3686		DMWARN("Creation of new thinly-provisioned device with id %s failed.",
3687		       argv[1]);
3688		return r;
3689	}
3690
3691	return 0;
3692}
3693
3694static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3695{
3696	dm_thin_id dev_id;
3697	dm_thin_id origin_dev_id;
3698	int r;
3699
3700	r = check_arg_count(argc, 3);
3701	if (r)
3702		return r;
3703
3704	r = read_dev_id(argv[1], &dev_id, 1);
3705	if (r)
3706		return r;
3707
3708	r = read_dev_id(argv[2], &origin_dev_id, 1);
3709	if (r)
3710		return r;
3711
3712	r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
3713	if (r) {
3714		DMWARN("Creation of new snapshot %s of device %s failed.",
3715		       argv[1], argv[2]);
3716		return r;
3717	}
3718
3719	return 0;
3720}
3721
3722static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
3723{
3724	dm_thin_id dev_id;
3725	int r;
3726
3727	r = check_arg_count(argc, 2);
3728	if (r)
3729		return r;
3730
3731	r = read_dev_id(argv[1], &dev_id, 1);
3732	if (r)
3733		return r;
3734
3735	r = dm_pool_delete_thin_device(pool->pmd, dev_id);
3736	if (r)
3737		DMWARN("Deletion of thin device %s failed.", argv[1]);
3738
3739	return r;
3740}
3741
3742static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
3743{
3744	dm_thin_id old_id, new_id;
3745	int r;
3746
3747	r = check_arg_count(argc, 3);
3748	if (r)
3749		return r;
3750
3751	if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
3752		DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
3753		return -EINVAL;
3754	}
3755
3756	if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
3757		DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
3758		return -EINVAL;
3759	}
3760
3761	r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
3762	if (r) {
3763		DMWARN("Failed to change transaction id from %s to %s.",
3764		       argv[1], argv[2]);
3765		return r;
3766	}
3767
3768	return 0;
3769}
3770
3771static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3772{
3773	int r;
3774
3775	r = check_arg_count(argc, 1);
3776	if (r)
3777		return r;
3778
3779	(void) commit(pool);
3780
3781	r = dm_pool_reserve_metadata_snap(pool->pmd);
3782	if (r)
3783		DMWARN("reserve_metadata_snap message failed.");
3784
3785	return r;
3786}
3787
3788static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3789{
3790	int r;
3791
3792	r = check_arg_count(argc, 1);
3793	if (r)
3794		return r;
3795
3796	r = dm_pool_release_metadata_snap(pool->pmd);
3797	if (r)
3798		DMWARN("release_metadata_snap message failed.");
3799
3800	return r;
3801}
3802
3803/*
3804 * Messages supported:
3805 *   create_thin	<dev_id>
3806 *   create_snap	<dev_id> <origin_id>
3807 *   delete		<dev_id>
3808 *   set_transaction_id <current_trans_id> <new_trans_id>
3809 *   reserve_metadata_snap
3810 *   release_metadata_snap
3811 */
3812static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
3813			char *result, unsigned maxlen)
3814{
3815	int r = -EINVAL;
3816	struct pool_c *pt = ti->private;
3817	struct pool *pool = pt->pool;
3818
3819	if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
3820		DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
3821		      dm_device_name(pool->pool_md));
3822		return -EOPNOTSUPP;
3823	}
3824
3825	if (!strcasecmp(argv[0], "create_thin"))
3826		r = process_create_thin_mesg(argc, argv, pool);
3827
3828	else if (!strcasecmp(argv[0], "create_snap"))
3829		r = process_create_snap_mesg(argc, argv, pool);
3830
3831	else if (!strcasecmp(argv[0], "delete"))
3832		r = process_delete_mesg(argc, argv, pool);
3833
3834	else if (!strcasecmp(argv[0], "set_transaction_id"))
3835		r = process_set_transaction_id_mesg(argc, argv, pool);
3836
3837	else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
3838		r = process_reserve_metadata_snap_mesg(argc, argv, pool);
3839
3840	else if (!strcasecmp(argv[0], "release_metadata_snap"))
3841		r = process_release_metadata_snap_mesg(argc, argv, pool);
3842
3843	else
3844		DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
3845
3846	if (!r)
3847		(void) commit(pool);
3848
3849	return r;
3850}
3851
3852static void emit_flags(struct pool_features *pf, char *result,
3853		       unsigned sz, unsigned maxlen)
3854{
3855	unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
3856		!pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
3857		pf->error_if_no_space;
3858	DMEMIT("%u ", count);
3859
3860	if (!pf->zero_new_blocks)
3861		DMEMIT("skip_block_zeroing ");
3862
3863	if (!pf->discard_enabled)
3864		DMEMIT("ignore_discard ");
3865
3866	if (!pf->discard_passdown)
3867		DMEMIT("no_discard_passdown ");
3868
3869	if (pf->mode == PM_READ_ONLY)
3870		DMEMIT("read_only ");
3871
3872	if (pf->error_if_no_space)
3873		DMEMIT("error_if_no_space ");
3874}
3875
3876/*
3877 * Status line is:
3878 *    <transaction id> <used metadata sectors>/<total metadata sectors>
3879 *    <used data sectors>/<total data sectors> <held metadata root>
3880 *    <pool mode> <discard config> <no space config> <needs_check>
3881 */
3882static void pool_status(struct dm_target *ti, status_type_t type,
3883			unsigned status_flags, char *result, unsigned maxlen)
3884{
3885	int r;
3886	unsigned sz = 0;
3887	uint64_t transaction_id;
3888	dm_block_t nr_free_blocks_data;
3889	dm_block_t nr_free_blocks_metadata;
3890	dm_block_t nr_blocks_data;
3891	dm_block_t nr_blocks_metadata;
3892	dm_block_t held_root;
3893	enum pool_mode mode;
3894	char buf[BDEVNAME_SIZE];
3895	char buf2[BDEVNAME_SIZE];
3896	struct pool_c *pt = ti->private;
3897	struct pool *pool = pt->pool;
3898
3899	switch (type) {
3900	case STATUSTYPE_INFO:
3901		if (get_pool_mode(pool) == PM_FAIL) {
3902			DMEMIT("Fail");
3903			break;
3904		}
3905
3906		/* Commit to ensure statistics aren't out-of-date */
3907		if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
3908			(void) commit(pool);
3909
3910		r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
3911		if (r) {
3912			DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
3913			      dm_device_name(pool->pool_md), r);
3914			goto err;
3915		}
3916
3917		r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
3918		if (r) {
3919			DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
3920			      dm_device_name(pool->pool_md), r);
3921			goto err;
3922		}
3923
3924		r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
3925		if (r) {
3926			DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
3927			      dm_device_name(pool->pool_md), r);
3928			goto err;
3929		}
3930
3931		r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
3932		if (r) {
3933			DMERR("%s: dm_pool_get_free_block_count returned %d",
3934			      dm_device_name(pool->pool_md), r);
3935			goto err;
3936		}
3937
3938		r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
3939		if (r) {
3940			DMERR("%s: dm_pool_get_data_dev_size returned %d",
3941			      dm_device_name(pool->pool_md), r);
3942			goto err;
3943		}
3944
3945		r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
3946		if (r) {
3947			DMERR("%s: dm_pool_get_metadata_snap returned %d",
3948			      dm_device_name(pool->pool_md), r);
3949			goto err;
3950		}
3951
3952		DMEMIT("%llu %llu/%llu %llu/%llu ",
3953		       (unsigned long long)transaction_id,
3954		       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3955		       (unsigned long long)nr_blocks_metadata,
3956		       (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
3957		       (unsigned long long)nr_blocks_data);
3958
3959		if (held_root)
3960			DMEMIT("%llu ", held_root);
3961		else
3962			DMEMIT("- ");
3963
3964		mode = get_pool_mode(pool);
3965		if (mode == PM_OUT_OF_DATA_SPACE)
3966			DMEMIT("out_of_data_space ");
3967		else if (is_read_only_pool_mode(mode))
3968			DMEMIT("ro ");
3969		else
3970			DMEMIT("rw ");
3971
3972		if (!pool->pf.discard_enabled)
3973			DMEMIT("ignore_discard ");
3974		else if (pool->pf.discard_passdown)
3975			DMEMIT("discard_passdown ");
3976		else
3977			DMEMIT("no_discard_passdown ");
3978
3979		if (pool->pf.error_if_no_space)
3980			DMEMIT("error_if_no_space ");
3981		else
3982			DMEMIT("queue_if_no_space ");
3983
3984		if (dm_pool_metadata_needs_check(pool->pmd))
3985			DMEMIT("needs_check ");
3986		else
3987			DMEMIT("- ");
3988
3989		DMEMIT("%llu ", (unsigned long long)calc_metadata_threshold(pt));
3990
3991		break;
3992
3993	case STATUSTYPE_TABLE:
3994		DMEMIT("%s %s %lu %llu ",
3995		       format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
3996		       format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
3997		       (unsigned long)pool->sectors_per_block,
3998		       (unsigned long long)pt->low_water_blocks);
3999		emit_flags(&pt->requested_pf, result, sz, maxlen);
4000		break;
4001	}
4002	return;
4003
4004err:
4005	DMEMIT("Error");
4006}
4007
4008static int pool_iterate_devices(struct dm_target *ti,
4009				iterate_devices_callout_fn fn, void *data)
4010{
4011	struct pool_c *pt = ti->private;
4012
4013	return fn(ti, pt->data_dev, 0, ti->len, data);
4014}
4015
4016static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
4017{
4018	struct pool_c *pt = ti->private;
4019	struct pool *pool = pt->pool;
4020	sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
4021
4022	/*
4023	 * If max_sectors is smaller than pool->sectors_per_block adjust it
4024	 * to the highest possible power-of-2 factor of pool->sectors_per_block.
4025	 * This is especially beneficial when the pool's data device is a RAID
4026	 * device that has a full stripe width that matches pool->sectors_per_block
4027	 * -- because even though partial RAID stripe-sized IOs will be issued to a
4028	 *    single RAID stripe; when aggregated they will end on a full RAID stripe
4029	 *    boundary.. which avoids additional partial RAID stripe writes cascading
4030	 */
4031	if (limits->max_sectors < pool->sectors_per_block) {
4032		while (!is_factor(pool->sectors_per_block, limits->max_sectors)) {
4033			if ((limits->max_sectors & (limits->max_sectors - 1)) == 0)
4034				limits->max_sectors--;
4035			limits->max_sectors = rounddown_pow_of_two(limits->max_sectors);
4036		}
4037	}
4038
4039	/*
4040	 * If the system-determined stacked limits are compatible with the
4041	 * pool's blocksize (io_opt is a factor) do not override them.
4042	 */
4043	if (io_opt_sectors < pool->sectors_per_block ||
4044	    !is_factor(io_opt_sectors, pool->sectors_per_block)) {
4045		if (is_factor(pool->sectors_per_block, limits->max_sectors))
4046			blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
4047		else
4048			blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
4049		blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
4050	}
4051
4052	/*
4053	 * pt->adjusted_pf is a staging area for the actual features to use.
4054	 * They get transferred to the live pool in bind_control_target()
4055	 * called from pool_preresume().
4056	 */
4057	if (!pt->adjusted_pf.discard_enabled) {
4058		/*
4059		 * Must explicitly disallow stacking discard limits otherwise the
4060		 * block layer will stack them if pool's data device has support.
4061		 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
4062		 * user to see that, so make sure to set all discard limits to 0.
4063		 */
4064		limits->discard_granularity = 0;
4065		return;
4066	}
4067
4068	disable_passdown_if_not_supported(pt);
4069
4070	/*
4071	 * The pool uses the same discard limits as the underlying data
4072	 * device.  DM core has already set this up.
4073	 */
4074}
4075
4076static struct target_type pool_target = {
4077	.name = "thin-pool",
4078	.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
4079		    DM_TARGET_IMMUTABLE,
4080	.version = {1, 21, 0},
4081	.module = THIS_MODULE,
4082	.ctr = pool_ctr,
4083	.dtr = pool_dtr,
4084	.map = pool_map,
4085	.presuspend = pool_presuspend,
4086	.presuspend_undo = pool_presuspend_undo,
4087	.postsuspend = pool_postsuspend,
4088	.preresume = pool_preresume,
4089	.resume = pool_resume,
4090	.message = pool_message,
4091	.status = pool_status,
4092	.iterate_devices = pool_iterate_devices,
4093	.io_hints = pool_io_hints,
4094};
4095
4096/*----------------------------------------------------------------
4097 * Thin target methods
4098 *--------------------------------------------------------------*/
4099static void thin_get(struct thin_c *tc)
4100{
4101	refcount_inc(&tc->refcount);
4102}
4103
4104static void thin_put(struct thin_c *tc)
4105{
4106	if (refcount_dec_and_test(&tc->refcount))
4107		complete(&tc->can_destroy);
4108}
4109
4110static void thin_dtr(struct dm_target *ti)
4111{
4112	struct thin_c *tc = ti->private;
4113	unsigned long flags;
4114
4115	spin_lock_irqsave(&tc->pool->lock, flags);
4116	list_del_rcu(&tc->list);
4117	spin_unlock_irqrestore(&tc->pool->lock, flags);
4118	synchronize_rcu();
4119
4120	thin_put(tc);
4121	wait_for_completion(&tc->can_destroy);
4122
4123	mutex_lock(&dm_thin_pool_table.mutex);
4124
4125	__pool_dec(tc->pool);
4126	dm_pool_close_thin_device(tc->td);
4127	dm_put_device(ti, tc->pool_dev);
4128	if (tc->origin_dev)
4129		dm_put_device(ti, tc->origin_dev);
4130	kfree(tc);
4131
4132	mutex_unlock(&dm_thin_pool_table.mutex);
4133}
4134
4135/*
4136 * Thin target parameters:
4137 *
4138 * <pool_dev> <dev_id> [origin_dev]
4139 *
4140 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
4141 * dev_id: the internal device identifier
4142 * origin_dev: a device external to the pool that should act as the origin
4143 *
4144 * If the pool device has discards disabled, they get disabled for the thin
4145 * device as well.
4146 */
4147static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
4148{
4149	int r;
4150	struct thin_c *tc;
4151	struct dm_dev *pool_dev, *origin_dev;
4152	struct mapped_device *pool_md;
4153	unsigned long flags;
4154
4155	mutex_lock(&dm_thin_pool_table.mutex);
4156
4157	if (argc != 2 && argc != 3) {
4158		ti->error = "Invalid argument count";
4159		r = -EINVAL;
4160		goto out_unlock;
4161	}
4162
4163	tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
4164	if (!tc) {
4165		ti->error = "Out of memory";
4166		r = -ENOMEM;
4167		goto out_unlock;
4168	}
4169	tc->thin_md = dm_table_get_md(ti->table);
4170	spin_lock_init(&tc->lock);
4171	INIT_LIST_HEAD(&tc->deferred_cells);
4172	bio_list_init(&tc->deferred_bio_list);
4173	bio_list_init(&tc->retry_on_resume_list);
4174	tc->sort_bio_list = RB_ROOT;
4175
4176	if (argc == 3) {
4177		if (!strcmp(argv[0], argv[2])) {
4178			ti->error = "Error setting origin device";
4179			r = -EINVAL;
4180			goto bad_origin_dev;
4181		}
4182
4183		r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
4184		if (r) {
4185			ti->error = "Error opening origin device";
4186			goto bad_origin_dev;
4187		}
4188		tc->origin_dev = origin_dev;
4189	}
4190
4191	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
4192	if (r) {
4193		ti->error = "Error opening pool device";
4194		goto bad_pool_dev;
4195	}
4196	tc->pool_dev = pool_dev;
4197
4198	if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
4199		ti->error = "Invalid device id";
4200		r = -EINVAL;
4201		goto bad_common;
4202	}
4203
4204	pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
4205	if (!pool_md) {
4206		ti->error = "Couldn't get pool mapped device";
4207		r = -EINVAL;
4208		goto bad_common;
4209	}
4210
4211	tc->pool = __pool_table_lookup(pool_md);
4212	if (!tc->pool) {
4213		ti->error = "Couldn't find pool object";
4214		r = -EINVAL;
4215		goto bad_pool_lookup;
4216	}
4217	__pool_inc(tc->pool);
4218
4219	if (get_pool_mode(tc->pool) == PM_FAIL) {
4220		ti->error = "Couldn't open thin device, Pool is in fail mode";
4221		r = -EINVAL;
4222		goto bad_pool;
4223	}
4224
4225	r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
4226	if (r) {
4227		ti->error = "Couldn't open thin internal device";
4228		goto bad_pool;
4229	}
4230
4231	r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
4232	if (r)
4233		goto bad;
4234
4235	ti->num_flush_bios = 1;
4236	ti->flush_supported = true;
4237	ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
4238
4239	/* In case the pool supports discards, pass them on. */
4240	if (tc->pool->pf.discard_enabled) {
4241		ti->discards_supported = true;
4242		ti->num_discard_bios = 1;
4243	}
4244
4245	mutex_unlock(&dm_thin_pool_table.mutex);
4246
4247	spin_lock_irqsave(&tc->pool->lock, flags);
4248	if (tc->pool->suspended) {
4249		spin_unlock_irqrestore(&tc->pool->lock, flags);
4250		mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
4251		ti->error = "Unable to activate thin device while pool is suspended";
4252		r = -EINVAL;
4253		goto bad;
4254	}
4255	refcount_set(&tc->refcount, 1);
4256	init_completion(&tc->can_destroy);
4257	list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
4258	spin_unlock_irqrestore(&tc->pool->lock, flags);
4259	/*
4260	 * This synchronize_rcu() call is needed here otherwise we risk a
4261	 * wake_worker() call finding no bios to process (because the newly
4262	 * added tc isn't yet visible).  So this reduces latency since we
4263	 * aren't then dependent on the periodic commit to wake_worker().
4264	 */
4265	synchronize_rcu();
4266
4267	dm_put(pool_md);
4268
4269	return 0;
4270
4271bad:
4272	dm_pool_close_thin_device(tc->td);
4273bad_pool:
4274	__pool_dec(tc->pool);
4275bad_pool_lookup:
4276	dm_put(pool_md);
4277bad_common:
4278	dm_put_device(ti, tc->pool_dev);
4279bad_pool_dev:
4280	if (tc->origin_dev)
4281		dm_put_device(ti, tc->origin_dev);
4282bad_origin_dev:
4283	kfree(tc);
4284out_unlock:
4285	mutex_unlock(&dm_thin_pool_table.mutex);
4286
4287	return r;
4288}
4289
4290static int thin_map(struct dm_target *ti, struct bio *bio)
4291{
4292	bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
4293
4294	return thin_bio_map(ti, bio);
4295}
4296
4297static int thin_endio(struct dm_target *ti, struct bio *bio,
4298		blk_status_t *err)
4299{
4300	unsigned long flags;
4301	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
4302	struct list_head work;
4303	struct dm_thin_new_mapping *m, *tmp;
4304	struct pool *pool = h->tc->pool;
4305
4306	if (h->shared_read_entry) {
4307		INIT_LIST_HEAD(&work);
4308		dm_deferred_entry_dec(h->shared_read_entry, &work);
4309
4310		spin_lock_irqsave(&pool->lock, flags);
4311		list_for_each_entry_safe(m, tmp, &work, list) {
4312			list_del(&m->list);
4313			__complete_mapping_preparation(m);
4314		}
4315		spin_unlock_irqrestore(&pool->lock, flags);
4316	}
4317
4318	if (h->all_io_entry) {
4319		INIT_LIST_HEAD(&work);
4320		dm_deferred_entry_dec(h->all_io_entry, &work);
4321		if (!list_empty(&work)) {
4322			spin_lock_irqsave(&pool->lock, flags);
4323			list_for_each_entry_safe(m, tmp, &work, list)
4324				list_add_tail(&m->list, &pool->prepared_discards);
4325			spin_unlock_irqrestore(&pool->lock, flags);
4326			wake_worker(pool);
4327		}
4328	}
4329
4330	if (h->cell)
4331		cell_defer_no_holder(h->tc, h->cell);
4332
4333	return DM_ENDIO_DONE;
4334}
4335
4336static void thin_presuspend(struct dm_target *ti)
4337{
4338	struct thin_c *tc = ti->private;
4339
4340	if (dm_noflush_suspending(ti))
4341		noflush_work(tc, do_noflush_start);
4342}
4343
4344static void thin_postsuspend(struct dm_target *ti)
4345{
4346	struct thin_c *tc = ti->private;
4347
4348	/*
4349	 * The dm_noflush_suspending flag has been cleared by now, so
4350	 * unfortunately we must always run this.
4351	 */
4352	noflush_work(tc, do_noflush_stop);
4353}
4354
4355static int thin_preresume(struct dm_target *ti)
4356{
4357	struct thin_c *tc = ti->private;
4358
4359	if (tc->origin_dev)
4360		tc->origin_size = get_dev_size(tc->origin_dev->bdev);
4361
4362	return 0;
4363}
4364
4365/*
4366 * <nr mapped sectors> <highest mapped sector>
4367 */
4368static void thin_status(struct dm_target *ti, status_type_t type,
4369			unsigned status_flags, char *result, unsigned maxlen)
4370{
4371	int r;
4372	ssize_t sz = 0;
4373	dm_block_t mapped, highest;
4374	char buf[BDEVNAME_SIZE];
4375	struct thin_c *tc = ti->private;
4376
4377	if (get_pool_mode(tc->pool) == PM_FAIL) {
4378		DMEMIT("Fail");
4379		return;
4380	}
4381
4382	if (!tc->td)
4383		DMEMIT("-");
4384	else {
4385		switch (type) {
4386		case STATUSTYPE_INFO:
4387			r = dm_thin_get_mapped_count(tc->td, &mapped);
4388			if (r) {
4389				DMERR("dm_thin_get_mapped_count returned %d", r);
4390				goto err;
4391			}
4392
4393			r = dm_thin_get_highest_mapped_block(tc->td, &highest);
4394			if (r < 0) {
4395				DMERR("dm_thin_get_highest_mapped_block returned %d", r);
4396				goto err;
4397			}
4398
4399			DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
4400			if (r)
4401				DMEMIT("%llu", ((highest + 1) *
4402						tc->pool->sectors_per_block) - 1);
4403			else
4404				DMEMIT("-");
4405			break;
4406
4407		case STATUSTYPE_TABLE:
4408			DMEMIT("%s %lu",
4409			       format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
4410			       (unsigned long) tc->dev_id);
4411			if (tc->origin_dev)
4412				DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
4413			break;
4414		}
4415	}
4416
4417	return;
4418
4419err:
4420	DMEMIT("Error");
4421}
4422
4423static int thin_iterate_devices(struct dm_target *ti,
4424				iterate_devices_callout_fn fn, void *data)
4425{
4426	sector_t blocks;
4427	struct thin_c *tc = ti->private;
4428	struct pool *pool = tc->pool;
4429
4430	/*
4431	 * We can't call dm_pool_get_data_dev_size() since that blocks.  So
4432	 * we follow a more convoluted path through to the pool's target.
4433	 */
4434	if (!pool->ti)
4435		return 0;	/* nothing is bound */
4436
4437	blocks = pool->ti->len;
4438	(void) sector_div(blocks, pool->sectors_per_block);
4439	if (blocks)
4440		return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
4441
4442	return 0;
4443}
4444
4445static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4446{
4447	struct thin_c *tc = ti->private;
4448	struct pool *pool = tc->pool;
4449
4450	if (!pool->pf.discard_enabled)
4451		return;
4452
4453	limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4454	limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
4455}
4456
4457static struct target_type thin_target = {
4458	.name = "thin",
4459	.version = {1, 21, 0},
4460	.module	= THIS_MODULE,
4461	.ctr = thin_ctr,
4462	.dtr = thin_dtr,
4463	.map = thin_map,
4464	.end_io = thin_endio,
4465	.preresume = thin_preresume,
4466	.presuspend = thin_presuspend,
4467	.postsuspend = thin_postsuspend,
4468	.status = thin_status,
4469	.iterate_devices = thin_iterate_devices,
4470	.io_hints = thin_io_hints,
4471};
4472
4473/*----------------------------------------------------------------*/
4474
4475static int __init dm_thin_init(void)
4476{
4477	int r = -ENOMEM;
4478
4479	pool_table_init();
4480
4481	_new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4482	if (!_new_mapping_cache)
4483		return r;
4484
4485	r = dm_register_target(&thin_target);
4486	if (r)
4487		goto bad_new_mapping_cache;
4488
4489	r = dm_register_target(&pool_target);
4490	if (r)
4491		goto bad_thin_target;
4492
4493	return 0;
4494
4495bad_thin_target:
4496	dm_unregister_target(&thin_target);
4497bad_new_mapping_cache:
4498	kmem_cache_destroy(_new_mapping_cache);
4499
4500	return r;
4501}
4502
4503static void dm_thin_exit(void)
4504{
4505	dm_unregister_target(&thin_target);
4506	dm_unregister_target(&pool_target);
4507
4508	kmem_cache_destroy(_new_mapping_cache);
4509
4510	pool_table_exit();
4511}
4512
4513module_init(dm_thin_init);
4514module_exit(dm_thin_exit);
4515
4516module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
4517MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
4518
4519MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
4520MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
4521MODULE_LICENSE("GPL");