Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v4.6
   1/*
   2 * Copyright (C) 2011-2012 Red Hat UK.
   3 *
   4 * This file is released under the GPL.
   5 */
   6
   7#include "dm-thin-metadata.h"
   8#include "dm-bio-prison.h"
   9#include "dm.h"
  10
  11#include <linux/device-mapper.h>
  12#include <linux/dm-io.h>
  13#include <linux/dm-kcopyd.h>
  14#include <linux/jiffies.h>
  15#include <linux/log2.h>
  16#include <linux/list.h>
  17#include <linux/rculist.h>
  18#include <linux/init.h>
  19#include <linux/module.h>
  20#include <linux/slab.h>
  21#include <linux/vmalloc.h>
  22#include <linux/sort.h>
  23#include <linux/rbtree.h>
  24
  25#define	DM_MSG_PREFIX	"thin"
  26
  27/*
  28 * Tunable constants
  29 */
  30#define ENDIO_HOOK_POOL_SIZE 1024
 
  31#define MAPPING_POOL_SIZE 1024
 
  32#define COMMIT_PERIOD HZ
  33#define NO_SPACE_TIMEOUT_SECS 60
  34
  35static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
  36
  37DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
  38		"A percentage of time allocated for copy on write");
  39
  40/*
  41 * The block size of the device holding pool data must be
  42 * between 64KB and 1GB.
  43 */
  44#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
  45#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
  46
  47/*
  48 * Device id is restricted to 24 bits.
  49 */
  50#define MAX_DEV_ID ((1 << 24) - 1)
  51
  52/*
  53 * How do we handle breaking sharing of data blocks?
  54 * =================================================
  55 *
  56 * We use a standard copy-on-write btree to store the mappings for the
  57 * devices (note I'm talking about copy-on-write of the metadata here, not
  58 * the data).  When you take an internal snapshot you clone the root node
  59 * of the origin btree.  After this there is no concept of an origin or a
  60 * snapshot.  They are just two device trees that happen to point to the
  61 * same data blocks.
  62 *
  63 * When we get a write in we decide if it's to a shared data block using
  64 * some timestamp magic.  If it is, we have to break sharing.
  65 *
  66 * Let's say we write to a shared block in what was the origin.  The
  67 * steps are:
  68 *
  69 * i) plug io further to this physical block. (see bio_prison code).
  70 *
  71 * ii) quiesce any read io to that shared data block.  Obviously
  72 * including all devices that share this block.  (see dm_deferred_set code)
  73 *
  74 * iii) copy the data block to a newly allocate block.  This step can be
  75 * missed out if the io covers the block. (schedule_copy).
  76 *
  77 * iv) insert the new mapping into the origin's btree
  78 * (process_prepared_mapping).  This act of inserting breaks some
  79 * sharing of btree nodes between the two devices.  Breaking sharing only
  80 * effects the btree of that specific device.  Btrees for the other
  81 * devices that share the block never change.  The btree for the origin
  82 * device as it was after the last commit is untouched, ie. we're using
  83 * persistent data structures in the functional programming sense.
  84 *
  85 * v) unplug io to this physical block, including the io that triggered
  86 * the breaking of sharing.
  87 *
  88 * Steps (ii) and (iii) occur in parallel.
  89 *
  90 * The metadata _doesn't_ need to be committed before the io continues.  We
  91 * get away with this because the io is always written to a _new_ block.
  92 * If there's a crash, then:
  93 *
  94 * - The origin mapping will point to the old origin block (the shared
  95 * one).  This will contain the data as it was before the io that triggered
  96 * the breaking of sharing came in.
  97 *
  98 * - The snap mapping still points to the old block.  As it would after
  99 * the commit.
 100 *
 101 * The downside of this scheme is the timestamp magic isn't perfect, and
 102 * will continue to think that data block in the snapshot device is shared
 103 * even after the write to the origin has broken sharing.  I suspect data
 104 * blocks will typically be shared by many different devices, so we're
 105 * breaking sharing n + 1 times, rather than n, where n is the number of
 106 * devices that reference this data block.  At the moment I think the
 107 * benefits far, far outweigh the disadvantages.
 108 */
 109
 110/*----------------------------------------------------------------*/
 111
 112/*
 113 * Key building.
 114 */
 115enum lock_space {
 116	VIRTUAL,
 117	PHYSICAL
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 118};
 119
 120static void build_key(struct dm_thin_device *td, enum lock_space ls,
 121		      dm_block_t b, dm_block_t e, struct dm_cell_key *key)
 122{
 123	key->virtual = (ls == VIRTUAL);
 124	key->dev = dm_thin_dev_id(td);
 125	key->block_begin = b;
 126	key->block_end = e;
 
 
 
 
 
 127}
 128
 129static void build_data_key(struct dm_thin_device *td, dm_block_t b,
 130			   struct dm_cell_key *key)
 
 
 
 
 
 131{
 132	build_key(td, PHYSICAL, b, b + 1llu, key);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 133}
 134
 135static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
 136			      struct dm_cell_key *key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 137{
 138	build_key(td, VIRTUAL, b, b + 1llu, key);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 139}
 140
 141/*----------------------------------------------------------------*/
 142
 143#define THROTTLE_THRESHOLD (1 * HZ)
 
 
 
 
 
 144
 145struct throttle {
 146	struct rw_semaphore lock;
 147	unsigned long threshold;
 148	bool throttle_applied;
 
 149};
 150
 151static void throttle_init(struct throttle *t)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 152{
 153	init_rwsem(&t->lock);
 154	t->throttle_applied = false;
 
 
 
 
 
 
 
 155}
 156
 157static void throttle_work_start(struct throttle *t)
 158{
 159	t->threshold = jiffies + THROTTLE_THRESHOLD;
 160}
 161
 162static void throttle_work_update(struct throttle *t)
 163{
 164	if (!t->throttle_applied && jiffies > t->threshold) {
 165		down_write(&t->lock);
 166		t->throttle_applied = true;
 
 167	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 168}
 169
 170static void throttle_work_complete(struct throttle *t)
 
 
 
 171{
 172	if (t->throttle_applied) {
 173		t->throttle_applied = false;
 174		up_write(&t->lock);
 
 
 
 
 
 
 
 
 
 
 175	}
 
 
 
 176}
 177
 178static void throttle_lock(struct throttle *t)
 
 
 
 
 
 
 179{
 180	down_read(&t->lock);
 
 
 181}
 182
 183static void throttle_unlock(struct throttle *t)
 
 184{
 185	up_read(&t->lock);
 
 
 186}
 187
 188/*----------------------------------------------------------------*/
 189
 190/*
 191 * A pool device ties together a metadata device and a data device.  It
 192 * also provides the interface for creating and destroying internal
 193 * devices.
 194 */
 195struct dm_thin_new_mapping;
 196
 197/*
 198 * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
 199 */
 200enum pool_mode {
 201	PM_WRITE,		/* metadata may be changed */
 202	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */
 203	PM_READ_ONLY,		/* metadata may not be changed */
 204	PM_FAIL,		/* all I/O fails */
 205};
 206
 207struct pool_features {
 208	enum pool_mode mode;
 209
 210	bool zero_new_blocks:1;
 211	bool discard_enabled:1;
 212	bool discard_passdown:1;
 213	bool error_if_no_space:1;
 214};
 215
 216struct thin_c;
 217typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
 218typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
 219typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
 220
 221#define CELL_SORT_ARRAY_SIZE 8192
 222
 223struct pool {
 224	struct list_head list;
 225	struct dm_target *ti;	/* Only set if a pool target is bound */
 226
 227	struct mapped_device *pool_md;
 228	struct block_device *md_dev;
 229	struct dm_pool_metadata *pmd;
 230
 231	dm_block_t low_water_blocks;
 232	uint32_t sectors_per_block;
 233	int sectors_per_block_shift;
 
 
 234
 235	struct pool_features pf;
 236	bool low_water_triggered:1;	/* A dm event has been sent */
 237	bool suspended:1;
 238	bool out_of_data_space:1;
 239
 240	struct dm_bio_prison *prison;
 241	struct dm_kcopyd_client *copier;
 242
 243	struct workqueue_struct *wq;
 244	struct throttle throttle;
 245	struct work_struct worker;
 246	struct delayed_work waker;
 247	struct delayed_work no_space_timeout;
 248
 249	unsigned long last_commit_jiffies;
 250	unsigned ref_count;
 
 251
 252	spinlock_t lock;
 
 253	struct bio_list deferred_flush_bios;
 254	struct list_head prepared_mappings;
 255	struct list_head prepared_discards;
 256	struct list_head active_thins;
 257
 258	struct dm_deferred_set *shared_read_ds;
 259	struct dm_deferred_set *all_io_ds;
 
 
 260
 261	struct dm_thin_new_mapping *next_mapping;
 262	mempool_t *mapping_pool;
 263
 264	process_bio_fn process_bio;
 265	process_bio_fn process_discard;
 266
 267	process_cell_fn process_cell;
 268	process_cell_fn process_discard_cell;
 269
 270	process_mapping_fn process_prepared_mapping;
 271	process_mapping_fn process_prepared_discard;
 272
 273	struct dm_bio_prison_cell **cell_sort_array;
 274};
 275
 276static enum pool_mode get_pool_mode(struct pool *pool);
 277static void metadata_operation_failed(struct pool *pool, const char *op, int r);
 278
 279/*
 280 * Target context for a pool.
 281 */
 282struct pool_c {
 283	struct dm_target *ti;
 284	struct pool *pool;
 285	struct dm_dev *data_dev;
 286	struct dm_dev *metadata_dev;
 287	struct dm_target_callbacks callbacks;
 288
 289	dm_block_t low_water_blocks;
 290	struct pool_features requested_pf; /* Features requested during table load */
 291	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
 292};
 293
 294/*
 295 * Target context for a thin.
 296 */
 297struct thin_c {
 298	struct list_head list;
 299	struct dm_dev *pool_dev;
 300	struct dm_dev *origin_dev;
 301	sector_t origin_size;
 302	dm_thin_id dev_id;
 303
 304	struct pool *pool;
 305	struct dm_thin_device *td;
 306	struct mapped_device *thin_md;
 307
 308	bool requeue_mode:1;
 309	spinlock_t lock;
 310	struct list_head deferred_cells;
 311	struct bio_list deferred_bio_list;
 312	struct bio_list retry_on_resume_list;
 313	struct rb_root sort_bio_list; /* sorted list of deferred bios */
 314
 315	/*
 316	 * Ensures the thin is not destroyed until the worker has finished
 317	 * iterating the active_thins list.
 318	 */
 319	atomic_t refcount;
 320	struct completion can_destroy;
 321};
 322
 323/*----------------------------------------------------------------*/
 324
 325/**
 326 * __blkdev_issue_discard_async - queue a discard with async completion
 327 * @bdev:	blockdev to issue discard for
 328 * @sector:	start sector
 329 * @nr_sects:	number of sectors to discard
 330 * @gfp_mask:	memory allocation flags (for bio_alloc)
 331 * @flags:	BLKDEV_IFL_* flags to control behaviour
 332 * @parent_bio: parent discard bio that all sub discards get chained to
 333 *
 334 * Description:
 335 *    Asynchronously issue a discard request for the sectors in question.
 336 */
 337static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector,
 338					sector_t nr_sects, gfp_t gfp_mask, unsigned long flags,
 339					struct bio *parent_bio)
 340{
 341	struct request_queue *q = bdev_get_queue(bdev);
 342	int type = REQ_WRITE | REQ_DISCARD;
 343	struct bio *bio;
 344
 345	if (!q || !nr_sects)
 346		return -ENXIO;
 347
 348	if (!blk_queue_discard(q))
 349		return -EOPNOTSUPP;
 350
 351	if (flags & BLKDEV_DISCARD_SECURE) {
 352		if (!blk_queue_secdiscard(q))
 353			return -EOPNOTSUPP;
 354		type |= REQ_SECURE;
 355	}
 356
 357	/*
 358	 * Required bio_put occurs in bio_endio thanks to bio_chain below
 359	 */
 360	bio = bio_alloc(gfp_mask, 1);
 361	if (!bio)
 362		return -ENOMEM;
 363
 364	bio_chain(bio, parent_bio);
 365
 366	bio->bi_iter.bi_sector = sector;
 367	bio->bi_bdev = bdev;
 368	bio->bi_iter.bi_size = nr_sects << 9;
 369
 370	submit_bio(type, bio);
 371
 372	return 0;
 373}
 374
 375static bool block_size_is_power_of_two(struct pool *pool)
 376{
 377	return pool->sectors_per_block_shift >= 0;
 378}
 379
 380static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
 381{
 382	return block_size_is_power_of_two(pool) ?
 383		(b << pool->sectors_per_block_shift) :
 384		(b * pool->sectors_per_block);
 385}
 386
 387static int issue_discard(struct thin_c *tc, dm_block_t data_b, dm_block_t data_e,
 388			 struct bio *parent_bio)
 389{
 390	sector_t s = block_to_sectors(tc->pool, data_b);
 391	sector_t len = block_to_sectors(tc->pool, data_e - data_b);
 392
 393	return __blkdev_issue_discard_async(tc->pool_dev->bdev, s, len,
 394					    GFP_NOWAIT, 0, parent_bio);
 395}
 396
 397/*----------------------------------------------------------------*/
 398
 399/*
 400 * wake_worker() is used when new work is queued and when pool_resume is
 401 * ready to continue deferred IO processing.
 402 */
 403static void wake_worker(struct pool *pool)
 404{
 405	queue_work(pool->wq, &pool->worker);
 406}
 407
 408/*----------------------------------------------------------------*/
 409
 410static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
 411		      struct dm_bio_prison_cell **cell_result)
 412{
 413	int r;
 414	struct dm_bio_prison_cell *cell_prealloc;
 415
 416	/*
 417	 * Allocate a cell from the prison's mempool.
 418	 * This might block but it can't fail.
 419	 */
 420	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
 421
 422	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
 423	if (r)
 424		/*
 425		 * We reused an old cell; we can get rid of
 426		 * the new one.
 427		 */
 428		dm_bio_prison_free_cell(pool->prison, cell_prealloc);
 429
 430	return r;
 431}
 432
 433static void cell_release(struct pool *pool,
 434			 struct dm_bio_prison_cell *cell,
 435			 struct bio_list *bios)
 436{
 437	dm_cell_release(pool->prison, cell, bios);
 438	dm_bio_prison_free_cell(pool->prison, cell);
 439}
 440
 441static void cell_visit_release(struct pool *pool,
 442			       void (*fn)(void *, struct dm_bio_prison_cell *),
 443			       void *context,
 444			       struct dm_bio_prison_cell *cell)
 445{
 446	dm_cell_visit_release(pool->prison, fn, context, cell);
 447	dm_bio_prison_free_cell(pool->prison, cell);
 448}
 449
 450static void cell_release_no_holder(struct pool *pool,
 451				   struct dm_bio_prison_cell *cell,
 452				   struct bio_list *bios)
 453{
 454	dm_cell_release_no_holder(pool->prison, cell, bios);
 455	dm_bio_prison_free_cell(pool->prison, cell);
 456}
 457
 458static void cell_error_with_code(struct pool *pool,
 459				 struct dm_bio_prison_cell *cell, int error_code)
 460{
 461	dm_cell_error(pool->prison, cell, error_code);
 462	dm_bio_prison_free_cell(pool->prison, cell);
 463}
 464
 465static int get_pool_io_error_code(struct pool *pool)
 466{
 467	return pool->out_of_data_space ? -ENOSPC : -EIO;
 468}
 469
 470static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
 471{
 472	int error = get_pool_io_error_code(pool);
 473
 474	cell_error_with_code(pool, cell, error);
 475}
 476
 477static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
 478{
 479	cell_error_with_code(pool, cell, 0);
 480}
 481
 482static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
 483{
 484	cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
 485}
 486
 487/*----------------------------------------------------------------*/
 488
 489/*
 490 * A global list of pools that uses a struct mapped_device as a key.
 491 */
 492static struct dm_thin_pool_table {
 493	struct mutex mutex;
 494	struct list_head pools;
 495} dm_thin_pool_table;
 496
 497static void pool_table_init(void)
 498{
 499	mutex_init(&dm_thin_pool_table.mutex);
 500	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
 501}
 502
 503static void __pool_table_insert(struct pool *pool)
 504{
 505	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 506	list_add(&pool->list, &dm_thin_pool_table.pools);
 507}
 508
 509static void __pool_table_remove(struct pool *pool)
 510{
 511	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 512	list_del(&pool->list);
 513}
 514
 515static struct pool *__pool_table_lookup(struct mapped_device *md)
 516{
 517	struct pool *pool = NULL, *tmp;
 518
 519	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 520
 521	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
 522		if (tmp->pool_md == md) {
 523			pool = tmp;
 524			break;
 525		}
 526	}
 527
 528	return pool;
 529}
 530
 531static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
 532{
 533	struct pool *pool = NULL, *tmp;
 534
 535	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 536
 537	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
 538		if (tmp->md_dev == md_dev) {
 539			pool = tmp;
 540			break;
 541		}
 542	}
 543
 544	return pool;
 545}
 546
 547/*----------------------------------------------------------------*/
 548
 549struct dm_thin_endio_hook {
 550	struct thin_c *tc;
 551	struct dm_deferred_entry *shared_read_entry;
 552	struct dm_deferred_entry *all_io_entry;
 553	struct dm_thin_new_mapping *overwrite_mapping;
 554	struct rb_node rb_node;
 555	struct dm_bio_prison_cell *cell;
 556};
 557
 558static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
 559{
 560	bio_list_merge(bios, master);
 561	bio_list_init(master);
 562}
 563
 564static void error_bio_list(struct bio_list *bios, int error)
 565{
 566	struct bio *bio;
 567
 568	while ((bio = bio_list_pop(bios))) {
 569		bio->bi_error = error;
 570		bio_endio(bio);
 571	}
 572}
 573
 574static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
 575{
 576	struct bio_list bios;
 577	unsigned long flags;
 578
 579	bio_list_init(&bios);
 
 
 580
 581	spin_lock_irqsave(&tc->lock, flags);
 582	__merge_bio_list(&bios, master);
 583	spin_unlock_irqrestore(&tc->lock, flags);
 584
 585	error_bio_list(&bios, error);
 586}
 587
 588static void requeue_deferred_cells(struct thin_c *tc)
 589{
 590	struct pool *pool = tc->pool;
 591	unsigned long flags;
 592	struct list_head cells;
 593	struct dm_bio_prison_cell *cell, *tmp;
 594
 595	INIT_LIST_HEAD(&cells);
 596
 597	spin_lock_irqsave(&tc->lock, flags);
 598	list_splice_init(&tc->deferred_cells, &cells);
 599	spin_unlock_irqrestore(&tc->lock, flags);
 600
 601	list_for_each_entry_safe(cell, tmp, &cells, user_list)
 602		cell_requeue(pool, cell);
 
 
 
 603}
 604
 605static void requeue_io(struct thin_c *tc)
 606{
 607	struct bio_list bios;
 608	unsigned long flags;
 609
 610	bio_list_init(&bios);
 611
 612	spin_lock_irqsave(&tc->lock, flags);
 613	__merge_bio_list(&bios, &tc->deferred_bio_list);
 614	__merge_bio_list(&bios, &tc->retry_on_resume_list);
 615	spin_unlock_irqrestore(&tc->lock, flags);
 616
 617	error_bio_list(&bios, DM_ENDIO_REQUEUE);
 618	requeue_deferred_cells(tc);
 619}
 620
 621static void error_retry_list_with_code(struct pool *pool, int error)
 622{
 623	struct thin_c *tc;
 624
 625	rcu_read_lock();
 626	list_for_each_entry_rcu(tc, &pool->active_thins, list)
 627		error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
 628	rcu_read_unlock();
 629}
 630
 631static void error_retry_list(struct pool *pool)
 632{
 633	int error = get_pool_io_error_code(pool);
 634
 635	return error_retry_list_with_code(pool, error);
 636}
 637
 638/*
 639 * This section of code contains the logic for processing a thin device's IO.
 640 * Much of the code depends on pool object resources (lists, workqueues, etc)
 641 * but most is exclusively called from the thin target rather than the thin-pool
 642 * target.
 643 */
 644
 645static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 646{
 647	struct pool *pool = tc->pool;
 648	sector_t block_nr = bio->bi_iter.bi_sector;
 649
 650	if (block_size_is_power_of_two(pool))
 651		block_nr >>= pool->sectors_per_block_shift;
 652	else
 653		(void) sector_div(block_nr, pool->sectors_per_block);
 654
 655	return block_nr;
 656}
 657
 658/*
 659 * Returns the _complete_ blocks that this bio covers.
 660 */
 661static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
 662				dm_block_t *begin, dm_block_t *end)
 663{
 664	struct pool *pool = tc->pool;
 665	sector_t b = bio->bi_iter.bi_sector;
 666	sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
 667
 668	b += pool->sectors_per_block - 1ull; /* so we round up */
 669
 670	if (block_size_is_power_of_two(pool)) {
 671		b >>= pool->sectors_per_block_shift;
 672		e >>= pool->sectors_per_block_shift;
 673	} else {
 674		(void) sector_div(b, pool->sectors_per_block);
 675		(void) sector_div(e, pool->sectors_per_block);
 676	}
 677
 678	if (e < b)
 679		/* Can happen if the bio is within a single block. */
 680		e = b;
 681
 682	*begin = b;
 683	*end = e;
 684}
 685
 686static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
 687{
 688	struct pool *pool = tc->pool;
 689	sector_t bi_sector = bio->bi_iter.bi_sector;
 690
 691	bio->bi_bdev = tc->pool_dev->bdev;
 692	if (block_size_is_power_of_two(pool))
 693		bio->bi_iter.bi_sector =
 694			(block << pool->sectors_per_block_shift) |
 695			(bi_sector & (pool->sectors_per_block - 1));
 696	else
 697		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
 698				 sector_div(bi_sector, pool->sectors_per_block);
 699}
 700
 701static void remap_to_origin(struct thin_c *tc, struct bio *bio)
 702{
 703	bio->bi_bdev = tc->origin_dev->bdev;
 704}
 705
 706static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
 707{
 708	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
 709		dm_thin_changed_this_transaction(tc->td);
 710}
 711
 712static void inc_all_io_entry(struct pool *pool, struct bio *bio)
 713{
 714	struct dm_thin_endio_hook *h;
 715
 716	if (bio->bi_rw & REQ_DISCARD)
 717		return;
 718
 719	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 720	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
 721}
 722
 723static void issue(struct thin_c *tc, struct bio *bio)
 724{
 725	struct pool *pool = tc->pool;
 726	unsigned long flags;
 727
 728	if (!bio_triggers_commit(tc, bio)) {
 729		generic_make_request(bio);
 730		return;
 731	}
 732
 733	/*
 734	 * Complete bio with an error if earlier I/O caused changes to
 735	 * the metadata that can't be committed e.g, due to I/O errors
 736	 * on the metadata device.
 737	 */
 738	if (dm_thin_aborted_changes(tc->td)) {
 739		bio_io_error(bio);
 740		return;
 741	}
 742
 743	/*
 744	 * Batch together any bios that trigger commits and then issue a
 745	 * single commit for them in process_deferred_bios().
 746	 */
 747	spin_lock_irqsave(&pool->lock, flags);
 748	bio_list_add(&pool->deferred_flush_bios, bio);
 749	spin_unlock_irqrestore(&pool->lock, flags);
 750}
 751
 752static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
 753{
 754	remap_to_origin(tc, bio);
 755	issue(tc, bio);
 756}
 757
 758static void remap_and_issue(struct thin_c *tc, struct bio *bio,
 759			    dm_block_t block)
 760{
 761	remap(tc, bio, block);
 762	issue(tc, bio);
 763}
 764
 
 
 
 
 
 
 
 
 
 765/*----------------------------------------------------------------*/
 766
 767/*
 768 * Bio endio functions.
 769 */
 770struct dm_thin_new_mapping {
 771	struct list_head list;
 772
 773	bool pass_discard:1;
 774	bool maybe_shared:1;
 775
 776	/*
 777	 * Track quiescing, copying and zeroing preparation actions.  When this
 778	 * counter hits zero the block is prepared and can be inserted into the
 779	 * btree.
 780	 */
 781	atomic_t prepare_actions;
 782
 783	int err;
 784	struct thin_c *tc;
 785	dm_block_t virt_begin, virt_end;
 786	dm_block_t data_block;
 787	struct dm_bio_prison_cell *cell;
 
 788
 789	/*
 790	 * If the bio covers the whole area of a block then we can avoid
 791	 * zeroing or copying.  Instead this bio is hooked.  The bio will
 792	 * still be in the cell, so care has to be taken to avoid issuing
 793	 * the bio twice.
 794	 */
 795	struct bio *bio;
 796	bio_end_io_t *saved_bi_end_io;
 797};
 798
 799static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
 800{
 801	struct pool *pool = m->tc->pool;
 802
 803	if (atomic_dec_and_test(&m->prepare_actions)) {
 804		list_add_tail(&m->list, &pool->prepared_mappings);
 805		wake_worker(pool);
 806	}
 807}
 808
 809static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
 810{
 811	unsigned long flags;
 
 812	struct pool *pool = m->tc->pool;
 813
 
 
 814	spin_lock_irqsave(&pool->lock, flags);
 815	__complete_mapping_preparation(m);
 
 816	spin_unlock_irqrestore(&pool->lock, flags);
 817}
 818
 819static void copy_complete(int read_err, unsigned long write_err, void *context)
 820{
 821	struct dm_thin_new_mapping *m = context;
 822
 823	m->err = read_err || write_err ? -EIO : 0;
 824	complete_mapping_preparation(m);
 825}
 826
 827static void overwrite_endio(struct bio *bio)
 828{
 829	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 
 830	struct dm_thin_new_mapping *m = h->overwrite_mapping;
 
 831
 832	bio->bi_end_io = m->saved_bi_end_io;
 833
 834	m->err = bio->bi_error;
 835	complete_mapping_preparation(m);
 
 
 836}
 837
 838/*----------------------------------------------------------------*/
 839
 840/*
 841 * Workqueue.
 842 */
 843
 844/*
 845 * Prepared mapping jobs.
 846 */
 847
 848/*
 849 * This sends the bios in the cell, except the original holder, back
 850 * to the deferred_bios list.
 851 */
 852static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
 
 853{
 854	struct pool *pool = tc->pool;
 855	unsigned long flags;
 856
 857	spin_lock_irqsave(&tc->lock, flags);
 858	cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
 859	spin_unlock_irqrestore(&tc->lock, flags);
 860
 861	wake_worker(pool);
 862}
 863
 864static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
 865
 866struct remap_info {
 867	struct thin_c *tc;
 868	struct bio_list defer_bios;
 869	struct bio_list issue_bios;
 870};
 871
 872static void __inc_remap_and_issue_cell(void *context,
 873				       struct dm_bio_prison_cell *cell)
 874{
 875	struct remap_info *info = context;
 876	struct bio *bio;
 877
 878	while ((bio = bio_list_pop(&cell->bios))) {
 879		if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
 880			bio_list_add(&info->defer_bios, bio);
 881		else {
 882			inc_all_io_entry(info->tc->pool, bio);
 883
 884			/*
 885			 * We can't issue the bios with the bio prison lock
 886			 * held, so we add them to a list to issue on
 887			 * return from this function.
 888			 */
 889			bio_list_add(&info->issue_bios, bio);
 890		}
 891	}
 892}
 893
 894static void inc_remap_and_issue_cell(struct thin_c *tc,
 895				     struct dm_bio_prison_cell *cell,
 896				     dm_block_t block)
 897{
 898	struct bio *bio;
 899	struct remap_info info;
 900
 901	info.tc = tc;
 902	bio_list_init(&info.defer_bios);
 903	bio_list_init(&info.issue_bios);
 904
 905	/*
 906	 * We have to be careful to inc any bios we're about to issue
 907	 * before the cell is released, and avoid a race with new bios
 908	 * being added to the cell.
 909	 */
 910	cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
 911			   &info, cell);
 912
 913	while ((bio = bio_list_pop(&info.defer_bios)))
 914		thin_defer_bio(tc, bio);
 915
 916	while ((bio = bio_list_pop(&info.issue_bios)))
 917		remap_and_issue(info.tc, bio, block);
 918}
 919
 920static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
 921{
 922	cell_error(m->tc->pool, m->cell);
 923	list_del(&m->list);
 924	mempool_free(m, m->tc->pool->mapping_pool);
 925}
 926
 927static void process_prepared_mapping(struct dm_thin_new_mapping *m)
 928{
 929	struct thin_c *tc = m->tc;
 930	struct pool *pool = tc->pool;
 931	struct bio *bio = m->bio;
 932	int r;
 933
 
 
 
 
 934	if (m->err) {
 935		cell_error(pool, m->cell);
 936		goto out;
 937	}
 938
 939	/*
 940	 * Commit the prepared block into the mapping btree.
 941	 * Any I/O for this block arriving after this point will get
 942	 * remapped to it directly.
 943	 */
 944	r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block);
 945	if (r) {
 946		metadata_operation_failed(pool, "dm_thin_insert_block", r);
 947		cell_error(pool, m->cell);
 948		goto out;
 949	}
 950
 951	/*
 952	 * Release any bios held while the block was being provisioned.
 953	 * If we are processing a write bio that completely covers the block,
 954	 * we already processed it so can ignore it now when processing
 955	 * the bios in the cell.
 956	 */
 957	if (bio) {
 958		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
 959		bio_endio(bio);
 960	} else {
 961		inc_all_io_entry(tc->pool, m->cell->holder);
 962		remap_and_issue(tc, m->cell->holder, m->data_block);
 963		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
 964	}
 965
 966out:
 967	list_del(&m->list);
 968	mempool_free(m, pool->mapping_pool);
 969}
 970
 971/*----------------------------------------------------------------*/
 972
 973static void free_discard_mapping(struct dm_thin_new_mapping *m)
 974{
 975	struct thin_c *tc = m->tc;
 976	if (m->cell)
 977		cell_defer_no_holder(tc, m->cell);
 978	mempool_free(m, tc->pool->mapping_pool);
 979}
 980
 981static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
 982{
 983	bio_io_error(m->bio);
 984	free_discard_mapping(m);
 985}
 986
 987static void process_prepared_discard_success(struct dm_thin_new_mapping *m)
 988{
 989	bio_endio(m->bio);
 990	free_discard_mapping(m);
 991}
 992
 993static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
 994{
 995	int r;
 996	struct thin_c *tc = m->tc;
 997
 998	r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end);
 999	if (r) {
1000		metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
1001		bio_io_error(m->bio);
1002	} else
1003		bio_endio(m->bio);
1004
1005	cell_defer_no_holder(tc, m->cell);
1006	mempool_free(m, tc->pool->mapping_pool);
1007}
1008
1009static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
1010{
1011	/*
1012	 * We've already unmapped this range of blocks, but before we
1013	 * passdown we have to check that these blocks are now unused.
1014	 */
1015	int r;
1016	bool used = true;
1017	struct thin_c *tc = m->tc;
1018	struct pool *pool = tc->pool;
1019	dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
1020
1021	while (b != end) {
1022		/* find start of unmapped run */
1023		for (; b < end; b++) {
1024			r = dm_pool_block_is_used(pool->pmd, b, &used);
1025			if (r)
1026				return r;
1027
1028			if (!used)
1029				break;
1030		}
1031
1032		if (b == end)
1033			break;
1034
1035		/* find end of run */
1036		for (e = b + 1; e != end; e++) {
1037			r = dm_pool_block_is_used(pool->pmd, e, &used);
1038			if (r)
1039				return r;
1040
1041			if (used)
1042				break;
1043		}
1044
1045		r = issue_discard(tc, b, e, m->bio);
1046		if (r)
1047			return r;
1048
1049		b = e;
1050	}
1051
1052	return 0;
1053}
1054
1055static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
1056{
1057	int r;
1058	struct thin_c *tc = m->tc;
1059	struct pool *pool = tc->pool;
1060
1061	r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
1062	if (r)
1063		metadata_operation_failed(pool, "dm_thin_remove_range", r);
1064
1065	else if (m->maybe_shared)
1066		r = passdown_double_checking_shared_status(m);
1067	else
1068		r = issue_discard(tc, m->data_block, m->data_block + (m->virt_end - m->virt_begin), m->bio);
1069
1070	/*
1071	 * Even if r is set, there could be sub discards in flight that we
1072	 * need to wait for.
1073	 */
1074	m->bio->bi_error = r;
1075	bio_endio(m->bio);
1076	cell_defer_no_holder(tc, m->cell);
1077	mempool_free(m, pool->mapping_pool);
1078}
1079
1080static void process_prepared(struct pool *pool, struct list_head *head,
1081			     process_mapping_fn *fn)
1082{
1083	unsigned long flags;
1084	struct list_head maps;
1085	struct dm_thin_new_mapping *m, *tmp;
1086
1087	INIT_LIST_HEAD(&maps);
1088	spin_lock_irqsave(&pool->lock, flags);
1089	list_splice_init(head, &maps);
1090	spin_unlock_irqrestore(&pool->lock, flags);
1091
1092	list_for_each_entry_safe(m, tmp, &maps, list)
1093		(*fn)(m);
1094}
1095
1096/*
1097 * Deferred bio jobs.
1098 */
1099static int io_overlaps_block(struct pool *pool, struct bio *bio)
1100{
1101	return bio->bi_iter.bi_size ==
1102		(pool->sectors_per_block << SECTOR_SHIFT);
 
1103}
1104
1105static int io_overwrites_block(struct pool *pool, struct bio *bio)
1106{
1107	return (bio_data_dir(bio) == WRITE) &&
1108		io_overlaps_block(pool, bio);
1109}
1110
1111static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
1112			       bio_end_io_t *fn)
1113{
1114	*save = bio->bi_end_io;
1115	bio->bi_end_io = fn;
1116}
1117
1118static int ensure_next_mapping(struct pool *pool)
1119{
1120	if (pool->next_mapping)
1121		return 0;
1122
1123	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
1124
1125	return pool->next_mapping ? 0 : -ENOMEM;
1126}
1127
1128static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
1129{
1130	struct dm_thin_new_mapping *m = pool->next_mapping;
1131
1132	BUG_ON(!pool->next_mapping);
1133
1134	memset(m, 0, sizeof(struct dm_thin_new_mapping));
1135	INIT_LIST_HEAD(&m->list);
1136	m->bio = NULL;
1137
1138	pool->next_mapping = NULL;
1139
1140	return m;
1141}
1142
1143static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
1144		    sector_t begin, sector_t end)
1145{
1146	int r;
1147	struct dm_io_region to;
1148
1149	to.bdev = tc->pool_dev->bdev;
1150	to.sector = begin;
1151	to.count = end - begin;
1152
1153	r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
1154	if (r < 0) {
1155		DMERR_LIMIT("dm_kcopyd_zero() failed");
1156		copy_complete(1, 1, m);
1157	}
1158}
1159
1160static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
1161				      dm_block_t data_begin,
1162				      struct dm_thin_new_mapping *m)
1163{
1164	struct pool *pool = tc->pool;
1165	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1166
1167	h->overwrite_mapping = m;
1168	m->bio = bio;
1169	save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1170	inc_all_io_entry(pool, bio);
1171	remap_and_issue(tc, bio, data_begin);
1172}
1173
1174/*
1175 * A partial copy also needs to zero the uncopied region.
1176 */
1177static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1178			  struct dm_dev *origin, dm_block_t data_origin,
1179			  dm_block_t data_dest,
1180			  struct dm_bio_prison_cell *cell, struct bio *bio,
1181			  sector_t len)
1182{
1183	int r;
1184	struct pool *pool = tc->pool;
1185	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1186
 
 
 
1187	m->tc = tc;
1188	m->virt_begin = virt_block;
1189	m->virt_end = virt_block + 1u;
1190	m->data_block = data_dest;
1191	m->cell = cell;
 
 
1192
1193	/*
1194	 * quiesce action + copy action + an extra reference held for the
1195	 * duration of this function (we may need to inc later for a
1196	 * partial zero).
1197	 */
1198	atomic_set(&m->prepare_actions, 3);
1199
1200	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1201		complete_mapping_preparation(m); /* already quiesced */
1202
1203	/*
1204	 * IO to pool_dev remaps to the pool target's data_dev.
1205	 *
1206	 * If the whole block of data is being overwritten, we can issue the
1207	 * bio immediately. Otherwise we use kcopyd to clone the data first.
1208	 */
1209	if (io_overwrites_block(pool, bio))
1210		remap_and_issue_overwrite(tc, bio, data_dest, m);
1211	else {
 
 
 
 
 
1212		struct dm_io_region from, to;
1213
1214		from.bdev = origin->bdev;
1215		from.sector = data_origin * pool->sectors_per_block;
1216		from.count = len;
1217
1218		to.bdev = tc->pool_dev->bdev;
1219		to.sector = data_dest * pool->sectors_per_block;
1220		to.count = len;
1221
1222		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
1223				   0, copy_complete, m);
1224		if (r < 0) {
1225			DMERR_LIMIT("dm_kcopyd_copy() failed");
1226			copy_complete(1, 1, m);
1227
1228			/*
1229			 * We allow the zero to be issued, to simplify the
1230			 * error path.  Otherwise we'd need to start
1231			 * worrying about decrementing the prepare_actions
1232			 * counter.
1233			 */
1234		}
1235
1236		/*
1237		 * Do we need to zero a tail region?
1238		 */
1239		if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
1240			atomic_inc(&m->prepare_actions);
1241			ll_zero(tc, m,
1242				data_dest * pool->sectors_per_block + len,
1243				(data_dest + 1) * pool->sectors_per_block);
1244		}
1245	}
1246
1247	complete_mapping_preparation(m); /* drop our ref */
1248}
1249
1250static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1251				   dm_block_t data_origin, dm_block_t data_dest,
1252				   struct dm_bio_prison_cell *cell, struct bio *bio)
1253{
1254	schedule_copy(tc, virt_block, tc->pool_dev,
1255		      data_origin, data_dest, cell, bio,
1256		      tc->pool->sectors_per_block);
 
 
 
 
 
 
 
1257}
1258
1259static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1260			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
1261			  struct bio *bio)
1262{
1263	struct pool *pool = tc->pool;
1264	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1265
1266	atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
 
 
1267	m->tc = tc;
1268	m->virt_begin = virt_block;
1269	m->virt_end = virt_block + 1u;
1270	m->data_block = data_block;
1271	m->cell = cell;
 
 
1272
1273	/*
1274	 * If the whole block of data is being overwritten or we are not
1275	 * zeroing pre-existing data, we can issue the bio immediately.
1276	 * Otherwise we use kcopyd to zero the data first.
1277	 */
1278	if (pool->pf.zero_new_blocks) {
1279		if (io_overwrites_block(pool, bio))
1280			remap_and_issue_overwrite(tc, bio, data_block, m);
1281		else
1282			ll_zero(tc, m, data_block * pool->sectors_per_block,
1283				(data_block + 1) * pool->sectors_per_block);
1284	} else
1285		process_prepared_mapping(m);
1286}
1287
1288static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1289				   dm_block_t data_dest,
1290				   struct dm_bio_prison_cell *cell, struct bio *bio)
1291{
1292	struct pool *pool = tc->pool;
1293	sector_t virt_block_begin = virt_block * pool->sectors_per_block;
1294	sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
1295
1296	if (virt_block_end <= tc->origin_size)
1297		schedule_copy(tc, virt_block, tc->origin_dev,
1298			      virt_block, data_dest, cell, bio,
1299			      pool->sectors_per_block);
1300
1301	else if (virt_block_begin < tc->origin_size)
1302		schedule_copy(tc, virt_block, tc->origin_dev,
1303			      virt_block, data_dest, cell, bio,
1304			      tc->origin_size - virt_block_begin);
1305
1306	else
1307		schedule_zero(tc, virt_block, data_dest, cell, bio);
1308}
1309
1310static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1311
1312static void check_for_space(struct pool *pool)
1313{
1314	int r;
1315	dm_block_t nr_free;
1316
1317	if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
1318		return;
1319
1320	r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
1321	if (r)
1322		return;
1323
1324	if (nr_free)
1325		set_pool_mode(pool, PM_WRITE);
 
 
 
 
 
1326}
1327
1328/*
1329 * A non-zero return indicates read_only or fail_io mode.
1330 * Many callers don't care about the return value.
1331 */
1332static int commit(struct pool *pool)
1333{
1334	int r;
 
 
 
1335
1336	if (get_pool_mode(pool) >= PM_READ_ONLY)
1337		return -EINVAL;
1338
1339	r = dm_pool_commit_metadata(pool->pmd);
1340	if (r)
1341		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1342	else
1343		check_for_space(pool);
1344
1345	return r;
1346}
1347
1348static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
1349{
1350	unsigned long flags;
1351
1352	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1353		DMWARN("%s: reached low water mark for data device: sending event.",
1354		       dm_device_name(pool->pool_md));
1355		spin_lock_irqsave(&pool->lock, flags);
1356		pool->low_water_triggered = true;
1357		spin_unlock_irqrestore(&pool->lock, flags);
1358		dm_table_event(pool->ti->table);
1359	}
1360}
1361
1362static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1363{
1364	int r;
1365	dm_block_t free_blocks;
1366	struct pool *pool = tc->pool;
1367
1368	if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
1369		return -EINVAL;
1370
1371	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1372	if (r) {
1373		metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1374		return r;
1375	}
1376
1377	check_low_water_mark(pool, free_blocks);
1378
1379	if (!free_blocks) {
1380		/*
1381		 * Try to commit to see if that will free up some
1382		 * more space.
1383		 */
1384		r = commit(pool);
1385		if (r)
1386			return r;
 
 
 
 
 
 
1387
1388		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1389		if (r) {
1390			metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1391			return r;
1392		}
1393
1394		if (!free_blocks) {
1395			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1396			return -ENOSPC;
 
 
 
 
 
 
 
 
 
1397		}
1398	}
1399
1400	r = dm_pool_alloc_data_block(pool->pmd, result);
1401	if (r) {
1402		metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1403		return r;
1404	}
1405
1406	return 0;
1407}
1408
1409/*
1410 * If we have run out of space, queue bios until the device is
1411 * resumed, presumably after having been reloaded with more space.
1412 */
1413static void retry_on_resume(struct bio *bio)
1414{
1415	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1416	struct thin_c *tc = h->tc;
 
1417	unsigned long flags;
1418
1419	spin_lock_irqsave(&tc->lock, flags);
1420	bio_list_add(&tc->retry_on_resume_list, bio);
1421	spin_unlock_irqrestore(&tc->lock, flags);
1422}
1423
1424static int should_error_unserviceable_bio(struct pool *pool)
1425{
1426	enum pool_mode m = get_pool_mode(pool);
1427
1428	switch (m) {
1429	case PM_WRITE:
1430		/* Shouldn't get here */
1431		DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1432		return -EIO;
1433
1434	case PM_OUT_OF_DATA_SPACE:
1435		return pool->pf.error_if_no_space ? -ENOSPC : 0;
1436
1437	case PM_READ_ONLY:
1438	case PM_FAIL:
1439		return -EIO;
1440	default:
1441		/* Shouldn't get here */
1442		DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1443		return -EIO;
1444	}
1445}
1446
1447static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1448{
1449	int error = should_error_unserviceable_bio(pool);
1450
1451	if (error) {
1452		bio->bi_error = error;
1453		bio_endio(bio);
1454	} else
1455		retry_on_resume(bio);
1456}
1457
1458static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1459{
1460	struct bio *bio;
1461	struct bio_list bios;
1462	int error;
1463
1464	error = should_error_unserviceable_bio(pool);
1465	if (error) {
1466		cell_error_with_code(pool, cell, error);
1467		return;
1468	}
1469
1470	bio_list_init(&bios);
1471	cell_release(pool, cell, &bios);
1472
1473	while ((bio = bio_list_pop(&bios)))
1474		retry_on_resume(bio);
1475}
1476
1477static void process_discard_cell_no_passdown(struct thin_c *tc,
1478					     struct dm_bio_prison_cell *virt_cell)
1479{
1480	struct pool *pool = tc->pool;
1481	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1482
1483	/*
1484	 * We don't need to lock the data blocks, since there's no
1485	 * passdown.  We only lock data blocks for allocation and breaking sharing.
1486	 */
1487	m->tc = tc;
1488	m->virt_begin = virt_cell->key.block_begin;
1489	m->virt_end = virt_cell->key.block_end;
1490	m->cell = virt_cell;
1491	m->bio = virt_cell->holder;
1492
1493	if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1494		pool->process_prepared_discard(m);
1495}
1496
1497/*
1498 * __bio_inc_remaining() is used to defer parent bios's end_io until
1499 * we _know_ all chained sub range discard bios have completed.
1500 */
1501static inline void __bio_inc_remaining(struct bio *bio)
1502{
1503	bio->bi_flags |= (1 << BIO_CHAIN);
1504	smp_mb__before_atomic();
1505	atomic_inc(&bio->__bi_remaining);
1506}
1507
1508static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
1509				 struct bio *bio)
1510{
1511	struct pool *pool = tc->pool;
1512
1513	int r;
1514	bool maybe_shared;
1515	struct dm_cell_key data_key;
1516	struct dm_bio_prison_cell *data_cell;
 
 
 
1517	struct dm_thin_new_mapping *m;
1518	dm_block_t virt_begin, virt_end, data_begin;
1519
1520	while (begin != end) {
1521		r = ensure_next_mapping(pool);
1522		if (r)
1523			/* we did our best */
1524			return;
1525
1526		r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end,
1527					      &data_begin, &maybe_shared);
1528		if (r)
1529			/*
1530			 * Silently fail, letting any mappings we've
1531			 * created complete.
1532			 */
 
 
 
 
1533			break;
1534
1535		build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key);
1536		if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) {
1537			/* contention, we'll give up with this range */
1538			begin = virt_end;
1539			continue;
1540		}
1541
1542		/*
1543		 * IO may still be going to the destination block.  We must
1544		 * quiesce before we can do the removal.
1545		 */
1546		m = get_next_mapping(pool);
1547		m->tc = tc;
1548		m->maybe_shared = maybe_shared;
1549		m->virt_begin = virt_begin;
1550		m->virt_end = virt_end;
1551		m->data_block = data_begin;
1552		m->cell = data_cell;
1553		m->bio = bio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1554
 
1555		/*
1556		 * The parent bio must not complete before sub discard bios are
1557		 * chained to it (see __blkdev_issue_discard_async's bio_chain)!
1558		 *
1559		 * This per-mapping bi_remaining increment is paired with
1560		 * the implicit decrement that occurs via bio_endio() in
1561		 * process_prepared_discard_{passdown,no_passdown}.
1562		 */
1563		__bio_inc_remaining(bio);
1564		if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1565			pool->process_prepared_discard(m);
1566
1567		begin = virt_end;
1568	}
1569}
1570
1571static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell)
1572{
1573	struct bio *bio = virt_cell->holder;
1574	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1575
1576	/*
1577	 * The virt_cell will only get freed once the origin bio completes.
1578	 * This means it will remain locked while all the individual
1579	 * passdown bios are in flight.
1580	 */
1581	h->cell = virt_cell;
1582	break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio);
1583
1584	/*
1585	 * We complete the bio now, knowing that the bi_remaining field
1586	 * will prevent completion until the sub range discards have
1587	 * completed.
1588	 */
1589	bio_endio(bio);
1590}
1591
1592static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1593{
1594	dm_block_t begin, end;
1595	struct dm_cell_key virt_key;
1596	struct dm_bio_prison_cell *virt_cell;
1597
1598	get_bio_block_range(tc, bio, &begin, &end);
1599	if (begin == end) {
1600		/*
1601		 * The discard covers less than a block.
1602		 */
1603		bio_endio(bio);
1604		return;
1605	}
1606
1607	build_key(tc->td, VIRTUAL, begin, end, &virt_key);
1608	if (bio_detain(tc->pool, &virt_key, bio, &virt_cell))
1609		/*
1610		 * Potential starvation issue: We're relying on the
1611		 * fs/application being well behaved, and not trying to
1612		 * send IO to a region at the same time as discarding it.
1613		 * If they do this persistently then it's possible this
1614		 * cell will never be granted.
1615		 */
1616		return;
1617
1618	tc->pool->process_discard_cell(tc, virt_cell);
1619}
1620
1621static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1622			  struct dm_cell_key *key,
1623			  struct dm_thin_lookup_result *lookup_result,
1624			  struct dm_bio_prison_cell *cell)
1625{
1626	int r;
1627	dm_block_t data_block;
1628	struct pool *pool = tc->pool;
1629
1630	r = alloc_data_block(tc, &data_block);
1631	switch (r) {
1632	case 0:
1633		schedule_internal_copy(tc, block, lookup_result->block,
1634				       data_block, cell, bio);
1635		break;
1636
1637	case -ENOSPC:
1638		retry_bios_on_resume(pool, cell);
1639		break;
1640
1641	default:
1642		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1643			    __func__, r);
1644		cell_error(pool, cell);
1645		break;
1646	}
1647}
1648
1649static void __remap_and_issue_shared_cell(void *context,
1650					  struct dm_bio_prison_cell *cell)
1651{
1652	struct remap_info *info = context;
1653	struct bio *bio;
1654
1655	while ((bio = bio_list_pop(&cell->bios))) {
1656		if ((bio_data_dir(bio) == WRITE) ||
1657		    (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
1658			bio_list_add(&info->defer_bios, bio);
1659		else {
1660			struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
1661
1662			h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1663			inc_all_io_entry(info->tc->pool, bio);
1664			bio_list_add(&info->issue_bios, bio);
1665		}
1666	}
1667}
1668
1669static void remap_and_issue_shared_cell(struct thin_c *tc,
1670					struct dm_bio_prison_cell *cell,
1671					dm_block_t block)
1672{
1673	struct bio *bio;
1674	struct remap_info info;
1675
1676	info.tc = tc;
1677	bio_list_init(&info.defer_bios);
1678	bio_list_init(&info.issue_bios);
1679
1680	cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1681			   &info, cell);
1682
1683	while ((bio = bio_list_pop(&info.defer_bios)))
1684		thin_defer_bio(tc, bio);
1685
1686	while ((bio = bio_list_pop(&info.issue_bios)))
1687		remap_and_issue(tc, bio, block);
1688}
1689
1690static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1691			       dm_block_t block,
1692			       struct dm_thin_lookup_result *lookup_result,
1693			       struct dm_bio_prison_cell *virt_cell)
1694{
1695	struct dm_bio_prison_cell *data_cell;
1696	struct pool *pool = tc->pool;
1697	struct dm_cell_key key;
1698
1699	/*
1700	 * If cell is already occupied, then sharing is already in the process
1701	 * of being broken so we have nothing further to do here.
1702	 */
1703	build_data_key(tc->td, lookup_result->block, &key);
1704	if (bio_detain(pool, &key, bio, &data_cell)) {
1705		cell_defer_no_holder(tc, virt_cell);
1706		return;
1707	}
1708
1709	if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1710		break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1711		cell_defer_no_holder(tc, virt_cell);
1712	} else {
1713		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1714
1715		h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1716		inc_all_io_entry(pool, bio);
1717		remap_and_issue(tc, bio, lookup_result->block);
1718
1719		remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1720		remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
1721	}
1722}
1723
1724static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1725			    struct dm_bio_prison_cell *cell)
1726{
1727	int r;
1728	dm_block_t data_block;
1729	struct pool *pool = tc->pool;
1730
1731	/*
1732	 * Remap empty bios (flushes) immediately, without provisioning.
1733	 */
1734	if (!bio->bi_iter.bi_size) {
1735		inc_all_io_entry(pool, bio);
1736		cell_defer_no_holder(tc, cell);
1737
1738		remap_and_issue(tc, bio, 0);
1739		return;
1740	}
1741
1742	/*
1743	 * Fill read bios with zeroes and complete them immediately.
1744	 */
1745	if (bio_data_dir(bio) == READ) {
1746		zero_fill_bio(bio);
1747		cell_defer_no_holder(tc, cell);
1748		bio_endio(bio);
1749		return;
1750	}
1751
1752	r = alloc_data_block(tc, &data_block);
1753	switch (r) {
1754	case 0:
1755		if (tc->origin_dev)
1756			schedule_external_copy(tc, block, data_block, cell, bio);
1757		else
1758			schedule_zero(tc, block, data_block, cell, bio);
1759		break;
1760
1761	case -ENOSPC:
1762		retry_bios_on_resume(pool, cell);
1763		break;
1764
1765	default:
1766		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1767			    __func__, r);
1768		cell_error(pool, cell);
1769		break;
1770	}
1771}
1772
1773static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1774{
1775	int r;
1776	struct pool *pool = tc->pool;
1777	struct bio *bio = cell->holder;
1778	dm_block_t block = get_bio_block(tc, bio);
1779	struct dm_thin_lookup_result lookup_result;
1780
1781	if (tc->requeue_mode) {
1782		cell_requeue(pool, cell);
1783		return;
1784	}
1785
1786	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1787	switch (r) {
1788	case 0:
1789		if (lookup_result.shared)
1790			process_shared_bio(tc, bio, block, &lookup_result, cell);
1791		else {
1792			inc_all_io_entry(pool, bio);
1793			remap_and_issue(tc, bio, lookup_result.block);
1794			inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1795		}
1796		break;
1797
1798	case -ENODATA:
1799		if (bio_data_dir(bio) == READ && tc->origin_dev) {
1800			inc_all_io_entry(pool, bio);
1801			cell_defer_no_holder(tc, cell);
1802
1803			if (bio_end_sector(bio) <= tc->origin_size)
1804				remap_to_origin_and_issue(tc, bio);
1805
1806			else if (bio->bi_iter.bi_sector < tc->origin_size) {
1807				zero_fill_bio(bio);
1808				bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1809				remap_to_origin_and_issue(tc, bio);
1810
1811			} else {
1812				zero_fill_bio(bio);
1813				bio_endio(bio);
1814			}
1815		} else
1816			provision_block(tc, bio, block, cell);
1817		break;
1818
1819	default:
1820		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1821			    __func__, r);
1822		cell_defer_no_holder(tc, cell);
1823		bio_io_error(bio);
1824		break;
1825	}
1826}
1827
1828static void process_bio(struct thin_c *tc, struct bio *bio)
1829{
1830	struct pool *pool = tc->pool;
1831	dm_block_t block = get_bio_block(tc, bio);
1832	struct dm_bio_prison_cell *cell;
1833	struct dm_cell_key key;
 
1834
1835	/*
1836	 * If cell is already occupied, then the block is already
1837	 * being provisioned so we have nothing further to do here.
1838	 */
1839	build_virtual_key(tc->td, block, &key);
1840	if (bio_detain(pool, &key, bio, &cell))
1841		return;
1842
1843	process_cell(tc, cell);
1844}
1845
1846static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
1847				    struct dm_bio_prison_cell *cell)
1848{
1849	int r;
1850	int rw = bio_data_dir(bio);
1851	dm_block_t block = get_bio_block(tc, bio);
1852	struct dm_thin_lookup_result lookup_result;
1853
1854	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1855	switch (r) {
1856	case 0:
1857		if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
1858			handle_unserviceable_bio(tc->pool, bio);
1859			if (cell)
1860				cell_defer_no_holder(tc, cell);
1861		} else {
1862			inc_all_io_entry(tc->pool, bio);
 
 
 
 
 
 
 
 
1863			remap_and_issue(tc, bio, lookup_result.block);
1864			if (cell)
1865				inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1866		}
1867		break;
1868
1869	case -ENODATA:
1870		if (cell)
1871			cell_defer_no_holder(tc, cell);
1872		if (rw != READ) {
1873			handle_unserviceable_bio(tc->pool, bio);
1874			break;
1875		}
1876
1877		if (tc->origin_dev) {
1878			inc_all_io_entry(tc->pool, bio);
1879			remap_to_origin_and_issue(tc, bio);
1880			break;
1881		}
1882
1883		zero_fill_bio(bio);
1884		bio_endio(bio);
1885		break;
1886
1887	default:
1888		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1889			    __func__, r);
1890		if (cell)
1891			cell_defer_no_holder(tc, cell);
1892		bio_io_error(bio);
1893		break;
1894	}
1895}
1896
1897static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1898{
1899	__process_bio_read_only(tc, bio, NULL);
1900}
1901
1902static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1903{
1904	__process_bio_read_only(tc, cell->holder, cell);
1905}
1906
1907static void process_bio_success(struct thin_c *tc, struct bio *bio)
1908{
1909	bio_endio(bio);
1910}
1911
1912static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1913{
1914	bio_io_error(bio);
1915}
1916
1917static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1918{
1919	cell_success(tc->pool, cell);
1920}
1921
1922static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1923{
1924	cell_error(tc->pool, cell);
1925}
1926
1927/*
1928 * FIXME: should we also commit due to size of transaction, measured in
1929 * metadata blocks?
1930 */
1931static int need_commit_due_to_time(struct pool *pool)
1932{
1933	return !time_in_range(jiffies, pool->last_commit_jiffies,
1934			      pool->last_commit_jiffies + COMMIT_PERIOD);
1935}
1936
1937#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
1938#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
1939
1940static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
1941{
1942	struct rb_node **rbp, *parent;
1943	struct dm_thin_endio_hook *pbd;
1944	sector_t bi_sector = bio->bi_iter.bi_sector;
1945
1946	rbp = &tc->sort_bio_list.rb_node;
1947	parent = NULL;
1948	while (*rbp) {
1949		parent = *rbp;
1950		pbd = thin_pbd(parent);
1951
1952		if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
1953			rbp = &(*rbp)->rb_left;
1954		else
1955			rbp = &(*rbp)->rb_right;
1956	}
1957
1958	pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1959	rb_link_node(&pbd->rb_node, parent, rbp);
1960	rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
1961}
1962
1963static void __extract_sorted_bios(struct thin_c *tc)
1964{
1965	struct rb_node *node;
1966	struct dm_thin_endio_hook *pbd;
1967	struct bio *bio;
1968
1969	for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
1970		pbd = thin_pbd(node);
1971		bio = thin_bio(pbd);
1972
1973		bio_list_add(&tc->deferred_bio_list, bio);
1974		rb_erase(&pbd->rb_node, &tc->sort_bio_list);
1975	}
1976
1977	WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
1978}
1979
1980static void __sort_thin_deferred_bios(struct thin_c *tc)
1981{
1982	struct bio *bio;
1983	struct bio_list bios;
1984
1985	bio_list_init(&bios);
1986	bio_list_merge(&bios, &tc->deferred_bio_list);
1987	bio_list_init(&tc->deferred_bio_list);
1988
1989	/* Sort deferred_bio_list using rb-tree */
1990	while ((bio = bio_list_pop(&bios)))
1991		__thin_bio_rb_add(tc, bio);
1992
1993	/*
1994	 * Transfer the sorted bios in sort_bio_list back to
1995	 * deferred_bio_list to allow lockless submission of
1996	 * all bios.
1997	 */
1998	__extract_sorted_bios(tc);
1999}
2000
2001static void process_thin_deferred_bios(struct thin_c *tc)
2002{
2003	struct pool *pool = tc->pool;
2004	unsigned long flags;
2005	struct bio *bio;
2006	struct bio_list bios;
2007	struct blk_plug plug;
2008	unsigned count = 0;
2009
2010	if (tc->requeue_mode) {
2011		error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE);
2012		return;
2013	}
2014
2015	bio_list_init(&bios);
2016
2017	spin_lock_irqsave(&tc->lock, flags);
2018
2019	if (bio_list_empty(&tc->deferred_bio_list)) {
2020		spin_unlock_irqrestore(&tc->lock, flags);
2021		return;
2022	}
2023
2024	__sort_thin_deferred_bios(tc);
2025
2026	bio_list_merge(&bios, &tc->deferred_bio_list);
2027	bio_list_init(&tc->deferred_bio_list);
2028
2029	spin_unlock_irqrestore(&tc->lock, flags);
2030
2031	blk_start_plug(&plug);
2032	while ((bio = bio_list_pop(&bios))) {
 
 
 
2033		/*
2034		 * If we've got no free new_mapping structs, and processing
2035		 * this bio might require one, we pause until there are some
2036		 * prepared mappings to process.
2037		 */
2038		if (ensure_next_mapping(pool)) {
2039			spin_lock_irqsave(&tc->lock, flags);
2040			bio_list_add(&tc->deferred_bio_list, bio);
2041			bio_list_merge(&tc->deferred_bio_list, &bios);
2042			spin_unlock_irqrestore(&tc->lock, flags);
2043			break;
2044		}
2045
2046		if (bio->bi_rw & REQ_DISCARD)
2047			pool->process_discard(tc, bio);
2048		else
2049			pool->process_bio(tc, bio);
2050
2051		if ((count++ & 127) == 0) {
2052			throttle_work_update(&pool->throttle);
2053			dm_pool_issue_prefetches(pool->pmd);
2054		}
2055	}
2056	blk_finish_plug(&plug);
2057}
2058
2059static int cmp_cells(const void *lhs, const void *rhs)
2060{
2061	struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs);
2062	struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs);
2063
2064	BUG_ON(!lhs_cell->holder);
2065	BUG_ON(!rhs_cell->holder);
2066
2067	if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector)
2068		return -1;
2069
2070	if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector)
2071		return 1;
2072
2073	return 0;
2074}
2075
2076static unsigned sort_cells(struct pool *pool, struct list_head *cells)
2077{
2078	unsigned count = 0;
2079	struct dm_bio_prison_cell *cell, *tmp;
2080
2081	list_for_each_entry_safe(cell, tmp, cells, user_list) {
2082		if (count >= CELL_SORT_ARRAY_SIZE)
2083			break;
2084
2085		pool->cell_sort_array[count++] = cell;
2086		list_del(&cell->user_list);
2087	}
2088
2089	sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
2090
2091	return count;
2092}
2093
2094static void process_thin_deferred_cells(struct thin_c *tc)
2095{
2096	struct pool *pool = tc->pool;
2097	unsigned long flags;
2098	struct list_head cells;
2099	struct dm_bio_prison_cell *cell;
2100	unsigned i, j, count;
2101
2102	INIT_LIST_HEAD(&cells);
2103
2104	spin_lock_irqsave(&tc->lock, flags);
2105	list_splice_init(&tc->deferred_cells, &cells);
2106	spin_unlock_irqrestore(&tc->lock, flags);
2107
2108	if (list_empty(&cells))
2109		return;
2110
2111	do {
2112		count = sort_cells(tc->pool, &cells);
2113
2114		for (i = 0; i < count; i++) {
2115			cell = pool->cell_sort_array[i];
2116			BUG_ON(!cell->holder);
2117
2118			/*
2119			 * If we've got no free new_mapping structs, and processing
2120			 * this bio might require one, we pause until there are some
2121			 * prepared mappings to process.
2122			 */
2123			if (ensure_next_mapping(pool)) {
2124				for (j = i; j < count; j++)
2125					list_add(&pool->cell_sort_array[j]->user_list, &cells);
2126
2127				spin_lock_irqsave(&tc->lock, flags);
2128				list_splice(&cells, &tc->deferred_cells);
2129				spin_unlock_irqrestore(&tc->lock, flags);
2130				return;
2131			}
2132
2133			if (cell->holder->bi_rw & REQ_DISCARD)
2134				pool->process_discard_cell(tc, cell);
2135			else
2136				pool->process_cell(tc, cell);
2137		}
2138	} while (!list_empty(&cells));
2139}
2140
2141static void thin_get(struct thin_c *tc);
2142static void thin_put(struct thin_c *tc);
2143
2144/*
2145 * We can't hold rcu_read_lock() around code that can block.  So we
2146 * find a thin with the rcu lock held; bump a refcount; then drop
2147 * the lock.
2148 */
2149static struct thin_c *get_first_thin(struct pool *pool)
2150{
2151	struct thin_c *tc = NULL;
2152
2153	rcu_read_lock();
2154	if (!list_empty(&pool->active_thins)) {
2155		tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
2156		thin_get(tc);
2157	}
2158	rcu_read_unlock();
2159
2160	return tc;
2161}
2162
2163static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
2164{
2165	struct thin_c *old_tc = tc;
2166
2167	rcu_read_lock();
2168	list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
2169		thin_get(tc);
2170		thin_put(old_tc);
2171		rcu_read_unlock();
2172		return tc;
2173	}
2174	thin_put(old_tc);
2175	rcu_read_unlock();
2176
2177	return NULL;
2178}
2179
2180static void process_deferred_bios(struct pool *pool)
2181{
2182	unsigned long flags;
2183	struct bio *bio;
2184	struct bio_list bios;
2185	struct thin_c *tc;
2186
2187	tc = get_first_thin(pool);
2188	while (tc) {
2189		process_thin_deferred_cells(tc);
2190		process_thin_deferred_bios(tc);
2191		tc = get_next_thin(pool, tc);
2192	}
2193
2194	/*
2195	 * If there are any deferred flush bios, we must commit
2196	 * the metadata before issuing them.
2197	 */
2198	bio_list_init(&bios);
2199	spin_lock_irqsave(&pool->lock, flags);
2200	bio_list_merge(&bios, &pool->deferred_flush_bios);
2201	bio_list_init(&pool->deferred_flush_bios);
2202	spin_unlock_irqrestore(&pool->lock, flags);
2203
2204	if (bio_list_empty(&bios) &&
2205	    !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
2206		return;
2207
2208	if (commit(pool)) {
 
 
 
2209		while ((bio = bio_list_pop(&bios)))
2210			bio_io_error(bio);
2211		return;
2212	}
2213	pool->last_commit_jiffies = jiffies;
2214
2215	while ((bio = bio_list_pop(&bios)))
2216		generic_make_request(bio);
2217}
2218
2219static void do_worker(struct work_struct *ws)
2220{
2221	struct pool *pool = container_of(ws, struct pool, worker);
2222
2223	throttle_work_start(&pool->throttle);
2224	dm_pool_issue_prefetches(pool->pmd);
2225	throttle_work_update(&pool->throttle);
2226	process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
2227	throttle_work_update(&pool->throttle);
2228	process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
2229	throttle_work_update(&pool->throttle);
2230	process_deferred_bios(pool);
2231	throttle_work_complete(&pool->throttle);
2232}
2233
2234/*
2235 * We want to commit periodically so that not too much
2236 * unwritten data builds up.
2237 */
2238static void do_waker(struct work_struct *ws)
2239{
2240	struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
2241	wake_worker(pool);
2242	queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
2243}
2244
2245static void notify_of_pool_mode_change_to_oods(struct pool *pool);
2246
2247/*
2248 * We're holding onto IO to allow userland time to react.  After the
2249 * timeout either the pool will have been resized (and thus back in
2250 * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space.
2251 */
2252static void do_no_space_timeout(struct work_struct *ws)
2253{
2254	struct pool *pool = container_of(to_delayed_work(ws), struct pool,
2255					 no_space_timeout);
2256
2257	if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
2258		pool->pf.error_if_no_space = true;
2259		notify_of_pool_mode_change_to_oods(pool);
2260		error_retry_list_with_code(pool, -ENOSPC);
2261	}
2262}
2263
2264/*----------------------------------------------------------------*/
2265
2266struct pool_work {
2267	struct work_struct worker;
2268	struct completion complete;
2269};
2270
2271static struct pool_work *to_pool_work(struct work_struct *ws)
2272{
2273	return container_of(ws, struct pool_work, worker);
2274}
2275
2276static void pool_work_complete(struct pool_work *pw)
2277{
2278	complete(&pw->complete);
2279}
2280
2281static void pool_work_wait(struct pool_work *pw, struct pool *pool,
2282			   void (*fn)(struct work_struct *))
2283{
2284	INIT_WORK_ONSTACK(&pw->worker, fn);
2285	init_completion(&pw->complete);
2286	queue_work(pool->wq, &pw->worker);
2287	wait_for_completion(&pw->complete);
2288}
2289
2290/*----------------------------------------------------------------*/
2291
2292struct noflush_work {
2293	struct pool_work pw;
2294	struct thin_c *tc;
2295};
2296
2297static struct noflush_work *to_noflush(struct work_struct *ws)
2298{
2299	return container_of(to_pool_work(ws), struct noflush_work, pw);
2300}
2301
2302static void do_noflush_start(struct work_struct *ws)
2303{
2304	struct noflush_work *w = to_noflush(ws);
2305	w->tc->requeue_mode = true;
2306	requeue_io(w->tc);
2307	pool_work_complete(&w->pw);
2308}
2309
2310static void do_noflush_stop(struct work_struct *ws)
2311{
2312	struct noflush_work *w = to_noflush(ws);
2313	w->tc->requeue_mode = false;
2314	pool_work_complete(&w->pw);
2315}
2316
2317static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
2318{
2319	struct noflush_work w;
2320
2321	w.tc = tc;
2322	pool_work_wait(&w.pw, tc->pool, fn);
2323}
2324
2325/*----------------------------------------------------------------*/
2326
2327static enum pool_mode get_pool_mode(struct pool *pool)
2328{
2329	return pool->pf.mode;
2330}
2331
2332static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
2333{
2334	dm_table_event(pool->ti->table);
2335	DMINFO("%s: switching pool to %s mode",
2336	       dm_device_name(pool->pool_md), new_mode);
2337}
2338
2339static void notify_of_pool_mode_change_to_oods(struct pool *pool)
2340{
2341	if (!pool->pf.error_if_no_space)
2342		notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
2343	else
2344		notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
2345}
2346
2347static bool passdown_enabled(struct pool_c *pt)
2348{
2349	return pt->adjusted_pf.discard_passdown;
2350}
2351
2352static void set_discard_callbacks(struct pool *pool)
2353{
2354	struct pool_c *pt = pool->ti->private;
2355
2356	if (passdown_enabled(pt)) {
2357		pool->process_discard_cell = process_discard_cell_passdown;
2358		pool->process_prepared_discard = process_prepared_discard_passdown;
2359	} else {
2360		pool->process_discard_cell = process_discard_cell_no_passdown;
2361		pool->process_prepared_discard = process_prepared_discard_no_passdown;
2362	}
2363}
2364
2365static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2366{
2367	struct pool_c *pt = pool->ti->private;
2368	bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
2369	enum pool_mode old_mode = get_pool_mode(pool);
2370	unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
2371
2372	/*
2373	 * Never allow the pool to transition to PM_WRITE mode if user
2374	 * intervention is required to verify metadata and data consistency.
2375	 */
2376	if (new_mode == PM_WRITE && needs_check) {
2377		DMERR("%s: unable to switch pool to write mode until repaired.",
2378		      dm_device_name(pool->pool_md));
2379		if (old_mode != new_mode)
2380			new_mode = old_mode;
2381		else
2382			new_mode = PM_READ_ONLY;
2383	}
2384	/*
2385	 * If we were in PM_FAIL mode, rollback of metadata failed.  We're
2386	 * not going to recover without a thin_repair.	So we never let the
2387	 * pool move out of the old mode.
2388	 */
2389	if (old_mode == PM_FAIL)
2390		new_mode = old_mode;
2391
2392	switch (new_mode) {
2393	case PM_FAIL:
2394		if (old_mode != new_mode)
2395			notify_of_pool_mode_change(pool, "failure");
2396		dm_pool_metadata_read_only(pool->pmd);
2397		pool->process_bio = process_bio_fail;
2398		pool->process_discard = process_bio_fail;
2399		pool->process_cell = process_cell_fail;
2400		pool->process_discard_cell = process_cell_fail;
2401		pool->process_prepared_mapping = process_prepared_mapping_fail;
2402		pool->process_prepared_discard = process_prepared_discard_fail;
2403
2404		error_retry_list(pool);
2405		break;
2406
2407	case PM_READ_ONLY:
2408		if (old_mode != new_mode)
2409			notify_of_pool_mode_change(pool, "read-only");
2410		dm_pool_metadata_read_only(pool->pmd);
2411		pool->process_bio = process_bio_read_only;
2412		pool->process_discard = process_bio_success;
2413		pool->process_cell = process_cell_read_only;
2414		pool->process_discard_cell = process_cell_success;
2415		pool->process_prepared_mapping = process_prepared_mapping_fail;
2416		pool->process_prepared_discard = process_prepared_discard_success;
2417
2418		error_retry_list(pool);
2419		break;
2420
2421	case PM_OUT_OF_DATA_SPACE:
2422		/*
2423		 * Ideally we'd never hit this state; the low water mark
2424		 * would trigger userland to extend the pool before we
2425		 * completely run out of data space.  However, many small
2426		 * IOs to unprovisioned space can consume data space at an
2427		 * alarming rate.  Adjust your low water mark if you're
2428		 * frequently seeing this mode.
2429		 */
2430		if (old_mode != new_mode)
2431			notify_of_pool_mode_change_to_oods(pool);
2432		pool->out_of_data_space = true;
2433		pool->process_bio = process_bio_read_only;
2434		pool->process_discard = process_discard_bio;
2435		pool->process_cell = process_cell_read_only;
2436		pool->process_prepared_mapping = process_prepared_mapping;
2437		set_discard_callbacks(pool);
2438
2439		if (!pool->pf.error_if_no_space && no_space_timeout)
2440			queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
2441		break;
2442
2443	case PM_WRITE:
2444		if (old_mode != new_mode)
2445			notify_of_pool_mode_change(pool, "write");
2446		pool->out_of_data_space = false;
2447		pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
2448		dm_pool_metadata_read_write(pool->pmd);
2449		pool->process_bio = process_bio;
2450		pool->process_discard = process_discard_bio;
2451		pool->process_cell = process_cell;
2452		pool->process_prepared_mapping = process_prepared_mapping;
2453		set_discard_callbacks(pool);
2454		break;
2455	}
2456
2457	pool->pf.mode = new_mode;
2458	/*
2459	 * The pool mode may have changed, sync it so bind_control_target()
2460	 * doesn't cause an unexpected mode transition on resume.
2461	 */
2462	pt->adjusted_pf.mode = new_mode;
2463}
2464
2465static void abort_transaction(struct pool *pool)
2466{
2467	const char *dev_name = dm_device_name(pool->pool_md);
2468
2469	DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
2470	if (dm_pool_abort_metadata(pool->pmd)) {
2471		DMERR("%s: failed to abort metadata transaction", dev_name);
2472		set_pool_mode(pool, PM_FAIL);
2473	}
2474
2475	if (dm_pool_metadata_set_needs_check(pool->pmd)) {
2476		DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
2477		set_pool_mode(pool, PM_FAIL);
2478	}
2479}
2480
2481static void metadata_operation_failed(struct pool *pool, const char *op, int r)
2482{
2483	DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
2484		    dm_device_name(pool->pool_md), op, r);
2485
2486	abort_transaction(pool);
2487	set_pool_mode(pool, PM_READ_ONLY);
2488}
2489
2490/*----------------------------------------------------------------*/
2491
2492/*
2493 * Mapping functions.
2494 */
2495
2496/*
2497 * Called only while mapping a thin bio to hand it over to the workqueue.
2498 */
2499static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2500{
2501	unsigned long flags;
2502	struct pool *pool = tc->pool;
2503
2504	spin_lock_irqsave(&tc->lock, flags);
2505	bio_list_add(&tc->deferred_bio_list, bio);
2506	spin_unlock_irqrestore(&tc->lock, flags);
2507
2508	wake_worker(pool);
2509}
2510
2511static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2512{
2513	struct pool *pool = tc->pool;
2514
2515	throttle_lock(&pool->throttle);
2516	thin_defer_bio(tc, bio);
2517	throttle_unlock(&pool->throttle);
2518}
2519
2520static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2521{
2522	unsigned long flags;
2523	struct pool *pool = tc->pool;
2524
2525	throttle_lock(&pool->throttle);
2526	spin_lock_irqsave(&tc->lock, flags);
2527	list_add_tail(&cell->user_list, &tc->deferred_cells);
2528	spin_unlock_irqrestore(&tc->lock, flags);
2529	throttle_unlock(&pool->throttle);
2530
2531	wake_worker(pool);
2532}
2533
2534static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
2535{
2536	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2537
2538	h->tc = tc;
2539	h->shared_read_entry = NULL;
2540	h->all_io_entry = NULL;
2541	h->overwrite_mapping = NULL;
2542	h->cell = NULL;
 
2543}
2544
2545/*
2546 * Non-blocking function called from the thin target's map function.
2547 */
2548static int thin_bio_map(struct dm_target *ti, struct bio *bio)
 
2549{
2550	int r;
2551	struct thin_c *tc = ti->private;
2552	dm_block_t block = get_bio_block(tc, bio);
2553	struct dm_thin_device *td = tc->td;
2554	struct dm_thin_lookup_result result;
2555	struct dm_bio_prison_cell *virt_cell, *data_cell;
2556	struct dm_cell_key key;
2557
2558	thin_hook_bio(tc, bio);
2559
2560	if (tc->requeue_mode) {
2561		bio->bi_error = DM_ENDIO_REQUEUE;
2562		bio_endio(bio);
2563		return DM_MAPIO_SUBMITTED;
2564	}
2565
2566	if (get_pool_mode(tc->pool) == PM_FAIL) {
2567		bio_io_error(bio);
2568		return DM_MAPIO_SUBMITTED;
2569	}
2570
 
2571	if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
2572		thin_defer_bio_with_throttle(tc, bio);
2573		return DM_MAPIO_SUBMITTED;
2574	}
2575
2576	/*
2577	 * We must hold the virtual cell before doing the lookup, otherwise
2578	 * there's a race with discard.
2579	 */
2580	build_virtual_key(tc->td, block, &key);
2581	if (bio_detain(tc->pool, &key, bio, &virt_cell))
2582		return DM_MAPIO_SUBMITTED;
2583
2584	r = dm_thin_find_block(td, block, 0, &result);
2585
2586	/*
2587	 * Note that we defer readahead too.
2588	 */
2589	switch (r) {
2590	case 0:
2591		if (unlikely(result.shared)) {
2592			/*
2593			 * We have a race condition here between the
2594			 * result.shared value returned by the lookup and
2595			 * snapshot creation, which may cause new
2596			 * sharing.
2597			 *
2598			 * To avoid this always quiesce the origin before
2599			 * taking the snap.  You want to do this anyway to
2600			 * ensure a consistent application view
2601			 * (i.e. lockfs).
2602			 *
2603			 * More distant ancestors are irrelevant. The
2604			 * shared flag will be set in their case.
2605			 */
2606			thin_defer_cell(tc, virt_cell);
2607			return DM_MAPIO_SUBMITTED;
2608		}
2609
2610		build_data_key(tc->td, result.block, &key);
2611		if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2612			cell_defer_no_holder(tc, virt_cell);
2613			return DM_MAPIO_SUBMITTED;
2614		}
2615
2616		inc_all_io_entry(tc->pool, bio);
2617		cell_defer_no_holder(tc, data_cell);
2618		cell_defer_no_holder(tc, virt_cell);
2619
2620		remap(tc, bio, result.block);
2621		return DM_MAPIO_REMAPPED;
2622
2623	case -ENODATA:
2624	case -EWOULDBLOCK:
2625		thin_defer_cell(tc, virt_cell);
2626		return DM_MAPIO_SUBMITTED;
2627
2628	default:
2629		/*
2630		 * Must always call bio_io_error on failure.
2631		 * dm_thin_find_block can fail with -EINVAL if the
2632		 * pool is switched to fail-io mode.
2633		 */
2634		bio_io_error(bio);
2635		cell_defer_no_holder(tc, virt_cell);
2636		return DM_MAPIO_SUBMITTED;
 
2637	}
 
 
2638}
2639
2640static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2641{
 
 
2642	struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
2643	struct request_queue *q;
2644
2645	if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
2646		return 1;
 
 
 
 
 
 
2647
2648	q = bdev_get_queue(pt->data_dev->bdev);
2649	return bdi_congested(&q->backing_dev_info, bdi_bits);
2650}
2651
2652static void requeue_bios(struct pool *pool)
2653{
2654	unsigned long flags;
2655	struct thin_c *tc;
2656
2657	rcu_read_lock();
2658	list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2659		spin_lock_irqsave(&tc->lock, flags);
2660		bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
2661		bio_list_init(&tc->retry_on_resume_list);
2662		spin_unlock_irqrestore(&tc->lock, flags);
2663	}
2664	rcu_read_unlock();
2665}
2666
2667/*----------------------------------------------------------------
2668 * Binding of control targets to a pool object
2669 *--------------------------------------------------------------*/
2670static bool data_dev_supports_discard(struct pool_c *pt)
2671{
2672	struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2673
2674	return q && blk_queue_discard(q);
2675}
2676
2677static bool is_factor(sector_t block_size, uint32_t n)
2678{
2679	return !sector_div(block_size, n);
2680}
2681
2682/*
2683 * If discard_passdown was enabled verify that the data device
2684 * supports discards.  Disable discard_passdown if not.
2685 */
2686static void disable_passdown_if_not_supported(struct pool_c *pt)
2687{
2688	struct pool *pool = pt->pool;
2689	struct block_device *data_bdev = pt->data_dev->bdev;
2690	struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
2691	const char *reason = NULL;
2692	char buf[BDEVNAME_SIZE];
2693
2694	if (!pt->adjusted_pf.discard_passdown)
2695		return;
2696
2697	if (!data_dev_supports_discard(pt))
2698		reason = "discard unsupported";
2699
2700	else if (data_limits->max_discard_sectors < pool->sectors_per_block)
2701		reason = "max discard sectors smaller than a block";
2702
2703	if (reason) {
2704		DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
2705		pt->adjusted_pf.discard_passdown = false;
2706	}
2707}
2708
2709static int bind_control_target(struct pool *pool, struct dm_target *ti)
2710{
2711	struct pool_c *pt = ti->private;
2712
2713	/*
2714	 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
2715	 */
2716	enum pool_mode old_mode = get_pool_mode(pool);
2717	enum pool_mode new_mode = pt->adjusted_pf.mode;
2718
2719	/*
2720	 * Don't change the pool's mode until set_pool_mode() below.
2721	 * Otherwise the pool's process_* function pointers may
2722	 * not match the desired pool mode.
2723	 */
2724	pt->adjusted_pf.mode = old_mode;
2725
2726	pool->ti = ti;
2727	pool->pf = pt->adjusted_pf;
2728	pool->low_water_blocks = pt->low_water_blocks;
 
2729
2730	set_pool_mode(pool, new_mode);
 
 
 
 
 
 
 
 
 
 
 
 
 
2731
2732	return 0;
2733}
2734
2735static void unbind_control_target(struct pool *pool, struct dm_target *ti)
2736{
2737	if (pool->ti == ti)
2738		pool->ti = NULL;
2739}
2740
2741/*----------------------------------------------------------------
2742 * Pool creation
2743 *--------------------------------------------------------------*/
2744/* Initialize pool features. */
2745static void pool_features_init(struct pool_features *pf)
2746{
2747	pf->mode = PM_WRITE;
2748	pf->zero_new_blocks = true;
2749	pf->discard_enabled = true;
2750	pf->discard_passdown = true;
2751	pf->error_if_no_space = false;
2752}
2753
2754static void __pool_destroy(struct pool *pool)
2755{
2756	__pool_table_remove(pool);
2757
2758	vfree(pool->cell_sort_array);
2759	if (dm_pool_metadata_close(pool->pmd) < 0)
2760		DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2761
2762	dm_bio_prison_destroy(pool->prison);
2763	dm_kcopyd_client_destroy(pool->copier);
2764
2765	if (pool->wq)
2766		destroy_workqueue(pool->wq);
2767
2768	if (pool->next_mapping)
2769		mempool_free(pool->next_mapping, pool->mapping_pool);
2770	mempool_destroy(pool->mapping_pool);
2771	dm_deferred_set_destroy(pool->shared_read_ds);
2772	dm_deferred_set_destroy(pool->all_io_ds);
2773	kfree(pool);
2774}
2775
2776static struct kmem_cache *_new_mapping_cache;
 
2777
2778static struct pool *pool_create(struct mapped_device *pool_md,
2779				struct block_device *metadata_dev,
2780				unsigned long block_size,
2781				int read_only, char **error)
2782{
2783	int r;
2784	void *err_p;
2785	struct pool *pool;
2786	struct dm_pool_metadata *pmd;
2787	bool format_device = read_only ? false : true;
2788
2789	pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
2790	if (IS_ERR(pmd)) {
2791		*error = "Error creating metadata object";
2792		return (struct pool *)pmd;
2793	}
2794
2795	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
2796	if (!pool) {
2797		*error = "Error allocating memory for pool";
2798		err_p = ERR_PTR(-ENOMEM);
2799		goto bad_pool;
2800	}
2801
2802	pool->pmd = pmd;
2803	pool->sectors_per_block = block_size;
2804	if (block_size & (block_size - 1))
2805		pool->sectors_per_block_shift = -1;
2806	else
2807		pool->sectors_per_block_shift = __ffs(block_size);
2808	pool->low_water_blocks = 0;
2809	pool_features_init(&pool->pf);
2810	pool->prison = dm_bio_prison_create();
2811	if (!pool->prison) {
2812		*error = "Error creating pool's bio prison";
2813		err_p = ERR_PTR(-ENOMEM);
2814		goto bad_prison;
2815	}
2816
2817	pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2818	if (IS_ERR(pool->copier)) {
2819		r = PTR_ERR(pool->copier);
2820		*error = "Error creating pool's kcopyd client";
2821		err_p = ERR_PTR(r);
2822		goto bad_kcopyd_client;
2823	}
2824
2825	/*
2826	 * Create singlethreaded workqueue that will service all devices
2827	 * that use this metadata.
2828	 */
2829	pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2830	if (!pool->wq) {
2831		*error = "Error creating pool's workqueue";
2832		err_p = ERR_PTR(-ENOMEM);
2833		goto bad_wq;
2834	}
2835
2836	throttle_init(&pool->throttle);
2837	INIT_WORK(&pool->worker, do_worker);
2838	INIT_DELAYED_WORK(&pool->waker, do_waker);
2839	INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2840	spin_lock_init(&pool->lock);
 
2841	bio_list_init(&pool->deferred_flush_bios);
2842	INIT_LIST_HEAD(&pool->prepared_mappings);
2843	INIT_LIST_HEAD(&pool->prepared_discards);
2844	INIT_LIST_HEAD(&pool->active_thins);
2845	pool->low_water_triggered = false;
2846	pool->suspended = true;
2847	pool->out_of_data_space = false;
2848
2849	pool->shared_read_ds = dm_deferred_set_create();
2850	if (!pool->shared_read_ds) {
2851		*error = "Error creating pool's shared read deferred set";
2852		err_p = ERR_PTR(-ENOMEM);
2853		goto bad_shared_read_ds;
2854	}
2855
2856	pool->all_io_ds = dm_deferred_set_create();
2857	if (!pool->all_io_ds) {
2858		*error = "Error creating pool's all io deferred set";
2859		err_p = ERR_PTR(-ENOMEM);
2860		goto bad_all_io_ds;
2861	}
2862
2863	pool->next_mapping = NULL;
2864	pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2865						      _new_mapping_cache);
2866	if (!pool->mapping_pool) {
2867		*error = "Error creating pool's mapping mempool";
2868		err_p = ERR_PTR(-ENOMEM);
2869		goto bad_mapping_pool;
2870	}
2871
2872	pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
2873	if (!pool->cell_sort_array) {
2874		*error = "Error allocating cell sort array";
 
2875		err_p = ERR_PTR(-ENOMEM);
2876		goto bad_sort_array;
2877	}
2878
2879	pool->ref_count = 1;
2880	pool->last_commit_jiffies = jiffies;
2881	pool->pool_md = pool_md;
2882	pool->md_dev = metadata_dev;
2883	__pool_table_insert(pool);
2884
2885	return pool;
2886
2887bad_sort_array:
2888	mempool_destroy(pool->mapping_pool);
2889bad_mapping_pool:
2890	dm_deferred_set_destroy(pool->all_io_ds);
2891bad_all_io_ds:
2892	dm_deferred_set_destroy(pool->shared_read_ds);
2893bad_shared_read_ds:
2894	destroy_workqueue(pool->wq);
2895bad_wq:
2896	dm_kcopyd_client_destroy(pool->copier);
2897bad_kcopyd_client:
2898	dm_bio_prison_destroy(pool->prison);
2899bad_prison:
2900	kfree(pool);
2901bad_pool:
2902	if (dm_pool_metadata_close(pmd))
2903		DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2904
2905	return err_p;
2906}
2907
2908static void __pool_inc(struct pool *pool)
2909{
2910	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2911	pool->ref_count++;
2912}
2913
2914static void __pool_dec(struct pool *pool)
2915{
2916	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2917	BUG_ON(!pool->ref_count);
2918	if (!--pool->ref_count)
2919		__pool_destroy(pool);
2920}
2921
2922static struct pool *__pool_find(struct mapped_device *pool_md,
2923				struct block_device *metadata_dev,
2924				unsigned long block_size, int read_only,
2925				char **error, int *created)
2926{
2927	struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
2928
2929	if (pool) {
2930		if (pool->pool_md != pool_md) {
2931			*error = "metadata device already in use by a pool";
2932			return ERR_PTR(-EBUSY);
2933		}
2934		__pool_inc(pool);
2935
2936	} else {
2937		pool = __pool_table_lookup(pool_md);
2938		if (pool) {
2939			if (pool->md_dev != metadata_dev) {
2940				*error = "different pool cannot replace a pool";
2941				return ERR_PTR(-EINVAL);
2942			}
2943			__pool_inc(pool);
2944
2945		} else {
2946			pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
2947			*created = 1;
2948		}
2949	}
2950
2951	return pool;
2952}
2953
2954/*----------------------------------------------------------------
2955 * Pool target methods
2956 *--------------------------------------------------------------*/
2957static void pool_dtr(struct dm_target *ti)
2958{
2959	struct pool_c *pt = ti->private;
2960
2961	mutex_lock(&dm_thin_pool_table.mutex);
2962
2963	unbind_control_target(pt->pool, ti);
2964	__pool_dec(pt->pool);
2965	dm_put_device(ti, pt->metadata_dev);
2966	dm_put_device(ti, pt->data_dev);
2967	kfree(pt);
2968
2969	mutex_unlock(&dm_thin_pool_table.mutex);
2970}
2971
2972static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
2973			       struct dm_target *ti)
2974{
2975	int r;
2976	unsigned argc;
2977	const char *arg_name;
2978
2979	static struct dm_arg _args[] = {
2980		{0, 4, "Invalid number of pool feature arguments"},
2981	};
2982
2983	/*
2984	 * No feature arguments supplied.
2985	 */
2986	if (!as->argc)
2987		return 0;
2988
2989	r = dm_read_arg_group(_args, as, &argc, &ti->error);
2990	if (r)
2991		return -EINVAL;
2992
2993	while (argc && !r) {
2994		arg_name = dm_shift_arg(as);
2995		argc--;
2996
2997		if (!strcasecmp(arg_name, "skip_block_zeroing"))
2998			pf->zero_new_blocks = false;
2999
3000		else if (!strcasecmp(arg_name, "ignore_discard"))
3001			pf->discard_enabled = false;
3002
3003		else if (!strcasecmp(arg_name, "no_discard_passdown"))
3004			pf->discard_passdown = false;
3005
3006		else if (!strcasecmp(arg_name, "read_only"))
3007			pf->mode = PM_READ_ONLY;
3008
3009		else if (!strcasecmp(arg_name, "error_if_no_space"))
3010			pf->error_if_no_space = true;
3011
3012		else {
3013			ti->error = "Unrecognised pool feature requested";
3014			r = -EINVAL;
3015			break;
3016		}
 
 
 
3017	}
3018
3019	return r;
3020}
3021
3022static void metadata_low_callback(void *context)
3023{
3024	struct pool *pool = context;
3025
3026	DMWARN("%s: reached low water mark for metadata device: sending event.",
3027	       dm_device_name(pool->pool_md));
3028
3029	dm_table_event(pool->ti->table);
3030}
3031
3032static sector_t get_dev_size(struct block_device *bdev)
3033{
3034	return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
3035}
3036
3037static void warn_if_metadata_device_too_big(struct block_device *bdev)
3038{
3039	sector_t metadata_dev_size = get_dev_size(bdev);
3040	char buffer[BDEVNAME_SIZE];
3041
3042	if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
3043		DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
3044		       bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
3045}
3046
3047static sector_t get_metadata_dev_size(struct block_device *bdev)
3048{
3049	sector_t metadata_dev_size = get_dev_size(bdev);
3050
3051	if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
3052		metadata_dev_size = THIN_METADATA_MAX_SECTORS;
3053
3054	return metadata_dev_size;
3055}
3056
3057static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
3058{
3059	sector_t metadata_dev_size = get_metadata_dev_size(bdev);
3060
3061	sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
3062
3063	return metadata_dev_size;
3064}
3065
3066/*
3067 * When a metadata threshold is crossed a dm event is triggered, and
3068 * userland should respond by growing the metadata device.  We could let
3069 * userland set the threshold, like we do with the data threshold, but I'm
3070 * not sure they know enough to do this well.
3071 */
3072static dm_block_t calc_metadata_threshold(struct pool_c *pt)
3073{
3074	/*
3075	 * 4M is ample for all ops with the possible exception of thin
3076	 * device deletion which is harmless if it fails (just retry the
3077	 * delete after you've grown the device).
3078	 */
3079	dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
3080	return min((dm_block_t)1024ULL /* 4M */, quarter);
3081}
3082
3083/*
3084 * thin-pool <metadata dev> <data dev>
3085 *	     <data block size (sectors)>
3086 *	     <low water mark (blocks)>
3087 *	     [<#feature args> [<arg>]*]
3088 *
3089 * Optional feature arguments are:
3090 *	     skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
3091 *	     ignore_discard: disable discard
3092 *	     no_discard_passdown: don't pass discards down to the data device
3093 *	     read_only: Don't allow any changes to be made to the pool metadata.
3094 *	     error_if_no_space: error IOs, instead of queueing, if no space.
3095 */
3096static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
3097{
3098	int r, pool_created = 0;
3099	struct pool_c *pt;
3100	struct pool *pool;
3101	struct pool_features pf;
3102	struct dm_arg_set as;
3103	struct dm_dev *data_dev;
3104	unsigned long block_size;
3105	dm_block_t low_water_blocks;
3106	struct dm_dev *metadata_dev;
3107	fmode_t metadata_mode;
 
3108
3109	/*
3110	 * FIXME Remove validation from scope of lock.
3111	 */
3112	mutex_lock(&dm_thin_pool_table.mutex);
3113
3114	if (argc < 4) {
3115		ti->error = "Invalid argument count";
3116		r = -EINVAL;
3117		goto out_unlock;
3118	}
3119
3120	as.argc = argc;
3121	as.argv = argv;
3122
3123	/*
3124	 * Set default pool features.
3125	 */
3126	pool_features_init(&pf);
3127
3128	dm_consume_args(&as, 4);
3129	r = parse_pool_features(&as, &pf, ti);
3130	if (r)
3131		goto out_unlock;
3132
3133	metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
3134	r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
3135	if (r) {
3136		ti->error = "Error opening metadata block device";
3137		goto out_unlock;
3138	}
3139	warn_if_metadata_device_too_big(metadata_dev->bdev);
 
 
 
 
3140
3141	r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
3142	if (r) {
3143		ti->error = "Error getting data device";
3144		goto out_metadata;
3145	}
3146
3147	if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
3148	    block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
3149	    block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
3150	    block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
3151		ti->error = "Invalid block size";
3152		r = -EINVAL;
3153		goto out;
3154	}
3155
3156	if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
3157		ti->error = "Invalid low water mark";
3158		r = -EINVAL;
3159		goto out;
3160	}
3161
 
 
 
 
 
 
 
 
 
 
3162	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
3163	if (!pt) {
3164		r = -ENOMEM;
3165		goto out;
3166	}
3167
3168	pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
3169			   block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
3170	if (IS_ERR(pool)) {
3171		r = PTR_ERR(pool);
3172		goto out_free_pt;
3173	}
3174
3175	/*
3176	 * 'pool_created' reflects whether this is the first table load.
3177	 * Top level discard support is not allowed to be changed after
3178	 * initial load.  This would require a pool reload to trigger thin
3179	 * device changes.
3180	 */
3181	if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
3182		ti->error = "Discard support cannot be disabled once enabled";
3183		r = -EINVAL;
3184		goto out_flags_changed;
3185	}
3186
3187	pt->pool = pool;
3188	pt->ti = ti;
3189	pt->metadata_dev = metadata_dev;
3190	pt->data_dev = data_dev;
3191	pt->low_water_blocks = low_water_blocks;
3192	pt->adjusted_pf = pt->requested_pf = pf;
3193	ti->num_flush_bios = 1;
3194
3195	/*
3196	 * Only need to enable discards if the pool should pass
3197	 * them down to the data device.  The thin device's discard
3198	 * processing will cause mappings to be removed from the btree.
3199	 */
3200	ti->discard_zeroes_data_unsupported = true;
3201	if (pf.discard_enabled && pf.discard_passdown) {
3202		ti->num_discard_bios = 1;
3203
3204		/*
3205		 * Setting 'discards_supported' circumvents the normal
3206		 * stacking of discard limits (this keeps the pool and
3207		 * thin devices' discard limits consistent).
3208		 */
3209		ti->discards_supported = true;
3210	}
3211	ti->private = pt;
3212
3213	r = dm_pool_register_metadata_threshold(pt->pool->pmd,
3214						calc_metadata_threshold(pt),
3215						metadata_low_callback,
3216						pool);
3217	if (r)
3218		goto out_flags_changed;
3219
3220	pt->callbacks.congested_fn = pool_is_congested;
3221	dm_table_add_target_callbacks(ti->table, &pt->callbacks);
3222
3223	mutex_unlock(&dm_thin_pool_table.mutex);
3224
3225	return 0;
3226
3227out_flags_changed:
3228	__pool_dec(pool);
3229out_free_pt:
3230	kfree(pt);
3231out:
3232	dm_put_device(ti, data_dev);
3233out_metadata:
3234	dm_put_device(ti, metadata_dev);
3235out_unlock:
3236	mutex_unlock(&dm_thin_pool_table.mutex);
3237
3238	return r;
3239}
3240
3241static int pool_map(struct dm_target *ti, struct bio *bio)
 
3242{
3243	int r;
3244	struct pool_c *pt = ti->private;
3245	struct pool *pool = pt->pool;
3246	unsigned long flags;
3247
3248	/*
3249	 * As this is a singleton target, ti->begin is always zero.
3250	 */
3251	spin_lock_irqsave(&pool->lock, flags);
3252	bio->bi_bdev = pt->data_dev->bdev;
3253	r = DM_MAPIO_REMAPPED;
3254	spin_unlock_irqrestore(&pool->lock, flags);
3255
3256	return r;
3257}
3258
3259static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
3260{
3261	int r;
3262	struct pool_c *pt = ti->private;
3263	struct pool *pool = pt->pool;
3264	sector_t data_size = ti->len;
3265	dm_block_t sb_data_size;
3266
3267	*need_commit = false;
3268
3269	(void) sector_div(data_size, pool->sectors_per_block);
3270
3271	r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
3272	if (r) {
3273		DMERR("%s: failed to retrieve data device size",
3274		      dm_device_name(pool->pool_md));
3275		return r;
3276	}
3277
3278	if (data_size < sb_data_size) {
3279		DMERR("%s: pool target (%llu blocks) too small: expected %llu",
3280		      dm_device_name(pool->pool_md),
3281		      (unsigned long long)data_size, sb_data_size);
3282		return -EINVAL;
3283
3284	} else if (data_size > sb_data_size) {
3285		if (dm_pool_metadata_needs_check(pool->pmd)) {
3286			DMERR("%s: unable to grow the data device until repaired.",
3287			      dm_device_name(pool->pool_md));
3288			return 0;
3289		}
3290
3291		if (sb_data_size)
3292			DMINFO("%s: growing the data device from %llu to %llu blocks",
3293			       dm_device_name(pool->pool_md),
3294			       sb_data_size, (unsigned long long)data_size);
3295		r = dm_pool_resize_data_dev(pool->pmd, data_size);
3296		if (r) {
3297			metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
3298			return r;
3299		}
3300
3301		*need_commit = true;
3302	}
3303
3304	return 0;
3305}
3306
3307static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
3308{
3309	int r;
3310	struct pool_c *pt = ti->private;
3311	struct pool *pool = pt->pool;
3312	dm_block_t metadata_dev_size, sb_metadata_dev_size;
3313
3314	*need_commit = false;
3315
3316	metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
3317
3318	r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
3319	if (r) {
3320		DMERR("%s: failed to retrieve metadata device size",
3321		      dm_device_name(pool->pool_md));
3322		return r;
3323	}
3324
3325	if (metadata_dev_size < sb_metadata_dev_size) {
3326		DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
3327		      dm_device_name(pool->pool_md),
3328		      metadata_dev_size, sb_metadata_dev_size);
3329		return -EINVAL;
3330
3331	} else if (metadata_dev_size > sb_metadata_dev_size) {
3332		if (dm_pool_metadata_needs_check(pool->pmd)) {
3333			DMERR("%s: unable to grow the metadata device until repaired.",
3334			      dm_device_name(pool->pool_md));
3335			return 0;
3336		}
3337
3338		warn_if_metadata_device_too_big(pool->md_dev);
3339		DMINFO("%s: growing the metadata device from %llu to %llu blocks",
3340		       dm_device_name(pool->pool_md),
3341		       sb_metadata_dev_size, metadata_dev_size);
3342		r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
3343		if (r) {
3344			metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
3345			return r;
3346		}
3347
3348		*need_commit = true;
3349	}
3350
3351	return 0;
3352}
3353
3354/*
3355 * Retrieves the number of blocks of the data device from
3356 * the superblock and compares it to the actual device size,
3357 * thus resizing the data device in case it has grown.
3358 *
3359 * This both copes with opening preallocated data devices in the ctr
3360 * being followed by a resume
3361 * -and-
3362 * calling the resume method individually after userspace has
3363 * grown the data device in reaction to a table event.
3364 */
3365static int pool_preresume(struct dm_target *ti)
3366{
3367	int r;
3368	bool need_commit1, need_commit2;
3369	struct pool_c *pt = ti->private;
3370	struct pool *pool = pt->pool;
 
3371
3372	/*
3373	 * Take control of the pool object.
3374	 */
3375	r = bind_control_target(pool, ti);
3376	if (r)
3377		return r;
3378
3379	r = maybe_resize_data_dev(ti, &need_commit1);
3380	if (r)
3381		return r;
3382
3383	r = maybe_resize_metadata_dev(ti, &need_commit2);
3384	if (r)
3385		return r;
 
3386
3387	if (need_commit1 || need_commit2)
3388		(void) commit(pool);
3389
3390	return 0;
3391}
3392
3393static void pool_suspend_active_thins(struct pool *pool)
3394{
3395	struct thin_c *tc;
 
 
 
3396
3397	/* Suspend all active thin devices */
3398	tc = get_first_thin(pool);
3399	while (tc) {
3400		dm_internal_suspend_noflush(tc->thin_md);
3401		tc = get_next_thin(pool, tc);
 
3402	}
3403}
3404
3405static void pool_resume_active_thins(struct pool *pool)
3406{
3407	struct thin_c *tc;
3408
3409	/* Resume all active thin devices */
3410	tc = get_first_thin(pool);
3411	while (tc) {
3412		dm_internal_resume(tc->thin_md);
3413		tc = get_next_thin(pool, tc);
3414	}
3415}
3416
3417static void pool_resume(struct dm_target *ti)
3418{
3419	struct pool_c *pt = ti->private;
3420	struct pool *pool = pt->pool;
3421	unsigned long flags;
3422
3423	/*
3424	 * Must requeue active_thins' bios and then resume
3425	 * active_thins _before_ clearing 'suspend' flag.
3426	 */
3427	requeue_bios(pool);
3428	pool_resume_active_thins(pool);
3429
3430	spin_lock_irqsave(&pool->lock, flags);
3431	pool->low_water_triggered = false;
3432	pool->suspended = false;
 
3433	spin_unlock_irqrestore(&pool->lock, flags);
3434
3435	do_waker(&pool->waker.work);
3436}
3437
3438static void pool_presuspend(struct dm_target *ti)
3439{
3440	struct pool_c *pt = ti->private;
3441	struct pool *pool = pt->pool;
3442	unsigned long flags;
3443
3444	spin_lock_irqsave(&pool->lock, flags);
3445	pool->suspended = true;
3446	spin_unlock_irqrestore(&pool->lock, flags);
3447
3448	pool_suspend_active_thins(pool);
3449}
3450
3451static void pool_presuspend_undo(struct dm_target *ti)
3452{
3453	struct pool_c *pt = ti->private;
3454	struct pool *pool = pt->pool;
3455	unsigned long flags;
3456
3457	pool_resume_active_thins(pool);
3458
3459	spin_lock_irqsave(&pool->lock, flags);
3460	pool->suspended = false;
3461	spin_unlock_irqrestore(&pool->lock, flags);
3462}
3463
3464static void pool_postsuspend(struct dm_target *ti)
3465{
 
3466	struct pool_c *pt = ti->private;
3467	struct pool *pool = pt->pool;
3468
3469	cancel_delayed_work_sync(&pool->waker);
3470	cancel_delayed_work_sync(&pool->no_space_timeout);
3471	flush_workqueue(pool->wq);
3472	(void) commit(pool);
 
 
 
 
 
 
3473}
3474
3475static int check_arg_count(unsigned argc, unsigned args_required)
3476{
3477	if (argc != args_required) {
3478		DMWARN("Message received with %u arguments instead of %u.",
3479		       argc, args_required);
3480		return -EINVAL;
3481	}
3482
3483	return 0;
3484}
3485
3486static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
3487{
3488	if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
3489	    *dev_id <= MAX_DEV_ID)
3490		return 0;
3491
3492	if (warning)
3493		DMWARN("Message received with invalid device id: %s", arg);
3494
3495	return -EINVAL;
3496}
3497
3498static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
3499{
3500	dm_thin_id dev_id;
3501	int r;
3502
3503	r = check_arg_count(argc, 2);
3504	if (r)
3505		return r;
3506
3507	r = read_dev_id(argv[1], &dev_id, 1);
3508	if (r)
3509		return r;
3510
3511	r = dm_pool_create_thin(pool->pmd, dev_id);
3512	if (r) {
3513		DMWARN("Creation of new thinly-provisioned device with id %s failed.",
3514		       argv[1]);
3515		return r;
3516	}
3517
3518	return 0;
3519}
3520
3521static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3522{
3523	dm_thin_id dev_id;
3524	dm_thin_id origin_dev_id;
3525	int r;
3526
3527	r = check_arg_count(argc, 3);
3528	if (r)
3529		return r;
3530
3531	r = read_dev_id(argv[1], &dev_id, 1);
3532	if (r)
3533		return r;
3534
3535	r = read_dev_id(argv[2], &origin_dev_id, 1);
3536	if (r)
3537		return r;
3538
3539	r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
3540	if (r) {
3541		DMWARN("Creation of new snapshot %s of device %s failed.",
3542		       argv[1], argv[2]);
3543		return r;
3544	}
3545
3546	return 0;
3547}
3548
3549static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
3550{
3551	dm_thin_id dev_id;
3552	int r;
3553
3554	r = check_arg_count(argc, 2);
3555	if (r)
3556		return r;
3557
3558	r = read_dev_id(argv[1], &dev_id, 1);
3559	if (r)
3560		return r;
3561
3562	r = dm_pool_delete_thin_device(pool->pmd, dev_id);
3563	if (r)
3564		DMWARN("Deletion of thin device %s failed.", argv[1]);
3565
3566	return r;
3567}
3568
3569static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
3570{
3571	dm_thin_id old_id, new_id;
3572	int r;
3573
3574	r = check_arg_count(argc, 3);
3575	if (r)
3576		return r;
3577
3578	if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
3579		DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
3580		return -EINVAL;
3581	}
3582
3583	if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
3584		DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
3585		return -EINVAL;
3586	}
3587
3588	r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
3589	if (r) {
3590		DMWARN("Failed to change transaction id from %s to %s.",
3591		       argv[1], argv[2]);
3592		return r;
3593	}
3594
3595	return 0;
3596}
3597
3598static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3599{
3600	int r;
3601
3602	r = check_arg_count(argc, 1);
3603	if (r)
3604		return r;
3605
3606	(void) commit(pool);
 
 
 
 
 
3607
3608	r = dm_pool_reserve_metadata_snap(pool->pmd);
3609	if (r)
3610		DMWARN("reserve_metadata_snap message failed.");
3611
3612	return r;
3613}
3614
3615static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3616{
3617	int r;
3618
3619	r = check_arg_count(argc, 1);
3620	if (r)
3621		return r;
3622
3623	r = dm_pool_release_metadata_snap(pool->pmd);
3624	if (r)
3625		DMWARN("release_metadata_snap message failed.");
3626
3627	return r;
3628}
3629
3630/*
3631 * Messages supported:
3632 *   create_thin	<dev_id>
3633 *   create_snap	<dev_id> <origin_id>
3634 *   delete		<dev_id>
 
3635 *   set_transaction_id <current_trans_id> <new_trans_id>
3636 *   reserve_metadata_snap
3637 *   release_metadata_snap
3638 */
3639static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
3640{
3641	int r = -EINVAL;
3642	struct pool_c *pt = ti->private;
3643	struct pool *pool = pt->pool;
3644
3645	if (get_pool_mode(pool) >= PM_READ_ONLY) {
3646		DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
3647		      dm_device_name(pool->pool_md));
3648		return -EOPNOTSUPP;
3649	}
3650
3651	if (!strcasecmp(argv[0], "create_thin"))
3652		r = process_create_thin_mesg(argc, argv, pool);
3653
3654	else if (!strcasecmp(argv[0], "create_snap"))
3655		r = process_create_snap_mesg(argc, argv, pool);
3656
3657	else if (!strcasecmp(argv[0], "delete"))
3658		r = process_delete_mesg(argc, argv, pool);
3659
3660	else if (!strcasecmp(argv[0], "set_transaction_id"))
3661		r = process_set_transaction_id_mesg(argc, argv, pool);
3662
3663	else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
3664		r = process_reserve_metadata_snap_mesg(argc, argv, pool);
3665
3666	else if (!strcasecmp(argv[0], "release_metadata_snap"))
3667		r = process_release_metadata_snap_mesg(argc, argv, pool);
3668
3669	else
3670		DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
3671
3672	if (!r)
3673		(void) commit(pool);
 
 
 
 
3674
3675	return r;
3676}
3677
3678static void emit_flags(struct pool_features *pf, char *result,
3679		       unsigned sz, unsigned maxlen)
3680{
3681	unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
3682		!pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
3683		pf->error_if_no_space;
3684	DMEMIT("%u ", count);
3685
3686	if (!pf->zero_new_blocks)
3687		DMEMIT("skip_block_zeroing ");
3688
3689	if (!pf->discard_enabled)
3690		DMEMIT("ignore_discard ");
3691
3692	if (!pf->discard_passdown)
3693		DMEMIT("no_discard_passdown ");
3694
3695	if (pf->mode == PM_READ_ONLY)
3696		DMEMIT("read_only ");
3697
3698	if (pf->error_if_no_space)
3699		DMEMIT("error_if_no_space ");
3700}
3701
3702/*
3703 * Status line is:
3704 *    <transaction id> <used metadata sectors>/<total metadata sectors>
3705 *    <used data sectors>/<total data sectors> <held metadata root>
3706 *    <pool mode> <discard config> <no space config> <needs_check>
3707 */
3708static void pool_status(struct dm_target *ti, status_type_t type,
3709			unsigned status_flags, char *result, unsigned maxlen)
3710{
3711	int r;
3712	unsigned sz = 0;
3713	uint64_t transaction_id;
3714	dm_block_t nr_free_blocks_data;
3715	dm_block_t nr_free_blocks_metadata;
3716	dm_block_t nr_blocks_data;
3717	dm_block_t nr_blocks_metadata;
3718	dm_block_t held_root;
3719	char buf[BDEVNAME_SIZE];
3720	char buf2[BDEVNAME_SIZE];
3721	struct pool_c *pt = ti->private;
3722	struct pool *pool = pt->pool;
3723
3724	switch (type) {
3725	case STATUSTYPE_INFO:
3726		if (get_pool_mode(pool) == PM_FAIL) {
3727			DMEMIT("Fail");
3728			break;
3729		}
3730
3731		/* Commit to ensure statistics aren't out-of-date */
3732		if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
3733			(void) commit(pool);
3734
3735		r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
3736		if (r) {
3737			DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
3738			      dm_device_name(pool->pool_md), r);
3739			goto err;
3740		}
3741
3742		r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
3743		if (r) {
3744			DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
3745			      dm_device_name(pool->pool_md), r);
3746			goto err;
3747		}
3748
3749		r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
3750		if (r) {
3751			DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
3752			      dm_device_name(pool->pool_md), r);
3753			goto err;
3754		}
3755
3756		r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
3757		if (r) {
3758			DMERR("%s: dm_pool_get_free_block_count returned %d",
3759			      dm_device_name(pool->pool_md), r);
3760			goto err;
3761		}
3762
3763		r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
3764		if (r) {
3765			DMERR("%s: dm_pool_get_data_dev_size returned %d",
3766			      dm_device_name(pool->pool_md), r);
3767			goto err;
3768		}
3769
3770		r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
3771		if (r) {
3772			DMERR("%s: dm_pool_get_metadata_snap returned %d",
3773			      dm_device_name(pool->pool_md), r);
3774			goto err;
3775		}
3776
3777		DMEMIT("%llu %llu/%llu %llu/%llu ",
3778		       (unsigned long long)transaction_id,
3779		       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3780		       (unsigned long long)nr_blocks_metadata,
3781		       (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
3782		       (unsigned long long)nr_blocks_data);
3783
3784		if (held_root)
3785			DMEMIT("%llu ", held_root);
3786		else
3787			DMEMIT("- ");
3788
3789		if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
3790			DMEMIT("out_of_data_space ");
3791		else if (pool->pf.mode == PM_READ_ONLY)
3792			DMEMIT("ro ");
3793		else
3794			DMEMIT("rw ");
3795
3796		if (!pool->pf.discard_enabled)
3797			DMEMIT("ignore_discard ");
3798		else if (pool->pf.discard_passdown)
3799			DMEMIT("discard_passdown ");
3800		else
3801			DMEMIT("no_discard_passdown ");
3802
3803		if (pool->pf.error_if_no_space)
3804			DMEMIT("error_if_no_space ");
3805		else
3806			DMEMIT("queue_if_no_space ");
3807
3808		if (dm_pool_metadata_needs_check(pool->pmd))
3809			DMEMIT("needs_check ");
3810		else
3811			DMEMIT("- ");
3812
3813		break;
3814
3815	case STATUSTYPE_TABLE:
3816		DMEMIT("%s %s %lu %llu ",
3817		       format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
3818		       format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
3819		       (unsigned long)pool->sectors_per_block,
3820		       (unsigned long long)pt->low_water_blocks);
3821		emit_flags(&pt->requested_pf, result, sz, maxlen);
 
 
 
 
 
 
 
 
 
 
 
 
 
3822		break;
3823	}
3824	return;
3825
3826err:
3827	DMEMIT("Error");
3828}
3829
3830static int pool_iterate_devices(struct dm_target *ti,
3831				iterate_devices_callout_fn fn, void *data)
3832{
3833	struct pool_c *pt = ti->private;
3834
3835	return fn(ti, pt->data_dev, 0, ti->len, data);
3836}
3837
3838static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
3839{
3840	struct pool_c *pt = ti->private;
3841	struct pool *pool = pt->pool;
3842	sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3843
3844	/*
3845	 * If max_sectors is smaller than pool->sectors_per_block adjust it
3846	 * to the highest possible power-of-2 factor of pool->sectors_per_block.
3847	 * This is especially beneficial when the pool's data device is a RAID
3848	 * device that has a full stripe width that matches pool->sectors_per_block
3849	 * -- because even though partial RAID stripe-sized IOs will be issued to a
3850	 *    single RAID stripe; when aggregated they will end on a full RAID stripe
3851	 *    boundary.. which avoids additional partial RAID stripe writes cascading
3852	 */
3853	if (limits->max_sectors < pool->sectors_per_block) {
3854		while (!is_factor(pool->sectors_per_block, limits->max_sectors)) {
3855			if ((limits->max_sectors & (limits->max_sectors - 1)) == 0)
3856				limits->max_sectors--;
3857			limits->max_sectors = rounddown_pow_of_two(limits->max_sectors);
3858		}
3859	}
3860
 
 
 
 
 
 
 
3861	/*
3862	 * If the system-determined stacked limits are compatible with the
3863	 * pool's blocksize (io_opt is a factor) do not override them.
3864	 */
3865	if (io_opt_sectors < pool->sectors_per_block ||
3866	    !is_factor(io_opt_sectors, pool->sectors_per_block)) {
3867		if (is_factor(pool->sectors_per_block, limits->max_sectors))
3868			blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
3869		else
3870			blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
3871		blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3872	}
3873
3874	/*
3875	 * pt->adjusted_pf is a staging area for the actual features to use.
3876	 * They get transferred to the live pool in bind_control_target()
3877	 * called from pool_preresume().
3878	 */
3879	if (!pt->adjusted_pf.discard_enabled) {
3880		/*
3881		 * Must explicitly disallow stacking discard limits otherwise the
3882		 * block layer will stack them if pool's data device has support.
3883		 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
3884		 * user to see that, so make sure to set all discard limits to 0.
3885		 */
3886		limits->discard_granularity = 0;
3887		return;
3888	}
3889
3890	disable_passdown_if_not_supported(pt);
 
 
 
3891
3892	/*
3893	 * The pool uses the same discard limits as the underlying data
3894	 * device.  DM core has already set this up.
3895	 */
3896}
3897
3898static struct target_type pool_target = {
3899	.name = "thin-pool",
3900	.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3901		    DM_TARGET_IMMUTABLE,
3902	.version = {1, 18, 0},
3903	.module = THIS_MODULE,
3904	.ctr = pool_ctr,
3905	.dtr = pool_dtr,
3906	.map = pool_map,
3907	.presuspend = pool_presuspend,
3908	.presuspend_undo = pool_presuspend_undo,
3909	.postsuspend = pool_postsuspend,
3910	.preresume = pool_preresume,
3911	.resume = pool_resume,
3912	.message = pool_message,
3913	.status = pool_status,
 
3914	.iterate_devices = pool_iterate_devices,
3915	.io_hints = pool_io_hints,
3916};
3917
3918/*----------------------------------------------------------------
3919 * Thin target methods
3920 *--------------------------------------------------------------*/
3921static void thin_get(struct thin_c *tc)
3922{
3923	atomic_inc(&tc->refcount);
3924}
3925
3926static void thin_put(struct thin_c *tc)
3927{
3928	if (atomic_dec_and_test(&tc->refcount))
3929		complete(&tc->can_destroy);
3930}
3931
3932static void thin_dtr(struct dm_target *ti)
3933{
3934	struct thin_c *tc = ti->private;
3935	unsigned long flags;
3936
3937	spin_lock_irqsave(&tc->pool->lock, flags);
3938	list_del_rcu(&tc->list);
3939	spin_unlock_irqrestore(&tc->pool->lock, flags);
3940	synchronize_rcu();
3941
3942	thin_put(tc);
3943	wait_for_completion(&tc->can_destroy);
3944
3945	mutex_lock(&dm_thin_pool_table.mutex);
3946
3947	__pool_dec(tc->pool);
3948	dm_pool_close_thin_device(tc->td);
3949	dm_put_device(ti, tc->pool_dev);
3950	if (tc->origin_dev)
3951		dm_put_device(ti, tc->origin_dev);
3952	kfree(tc);
3953
3954	mutex_unlock(&dm_thin_pool_table.mutex);
3955}
3956
3957/*
3958 * Thin target parameters:
3959 *
3960 * <pool_dev> <dev_id> [origin_dev]
3961 *
3962 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
3963 * dev_id: the internal device identifier
3964 * origin_dev: a device external to the pool that should act as the origin
3965 *
3966 * If the pool device has discards disabled, they get disabled for the thin
3967 * device as well.
3968 */
3969static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3970{
3971	int r;
3972	struct thin_c *tc;
3973	struct dm_dev *pool_dev, *origin_dev;
3974	struct mapped_device *pool_md;
3975	unsigned long flags;
3976
3977	mutex_lock(&dm_thin_pool_table.mutex);
3978
3979	if (argc != 2 && argc != 3) {
3980		ti->error = "Invalid argument count";
3981		r = -EINVAL;
3982		goto out_unlock;
3983	}
3984
3985	tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
3986	if (!tc) {
3987		ti->error = "Out of memory";
3988		r = -ENOMEM;
3989		goto out_unlock;
3990	}
3991	tc->thin_md = dm_table_get_md(ti->table);
3992	spin_lock_init(&tc->lock);
3993	INIT_LIST_HEAD(&tc->deferred_cells);
3994	bio_list_init(&tc->deferred_bio_list);
3995	bio_list_init(&tc->retry_on_resume_list);
3996	tc->sort_bio_list = RB_ROOT;
3997
3998	if (argc == 3) {
3999		r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
4000		if (r) {
4001			ti->error = "Error opening origin device";
4002			goto bad_origin_dev;
4003		}
4004		tc->origin_dev = origin_dev;
4005	}
4006
4007	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
4008	if (r) {
4009		ti->error = "Error opening pool device";
4010		goto bad_pool_dev;
4011	}
4012	tc->pool_dev = pool_dev;
4013
4014	if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
4015		ti->error = "Invalid device id";
4016		r = -EINVAL;
4017		goto bad_common;
4018	}
4019
4020	pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
4021	if (!pool_md) {
4022		ti->error = "Couldn't get pool mapped device";
4023		r = -EINVAL;
4024		goto bad_common;
4025	}
4026
4027	tc->pool = __pool_table_lookup(pool_md);
4028	if (!tc->pool) {
4029		ti->error = "Couldn't find pool object";
4030		r = -EINVAL;
4031		goto bad_pool_lookup;
4032	}
4033	__pool_inc(tc->pool);
4034
4035	if (get_pool_mode(tc->pool) == PM_FAIL) {
4036		ti->error = "Couldn't open thin device, Pool is in fail mode";
4037		r = -EINVAL;
4038		goto bad_pool;
4039	}
4040
4041	r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
4042	if (r) {
4043		ti->error = "Couldn't open thin internal device";
4044		goto bad_pool;
4045	}
4046
4047	r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
4048	if (r)
4049		goto bad;
4050
4051	ti->num_flush_bios = 1;
4052	ti->flush_supported = true;
4053	ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
4054
4055	/* In case the pool supports discards, pass them on. */
4056	ti->discard_zeroes_data_unsupported = true;
4057	if (tc->pool->pf.discard_enabled) {
4058		ti->discards_supported = true;
4059		ti->num_discard_bios = 1;
4060		ti->split_discard_bios = false;
4061	}
4062
4063	mutex_unlock(&dm_thin_pool_table.mutex);
4064
4065	spin_lock_irqsave(&tc->pool->lock, flags);
4066	if (tc->pool->suspended) {
4067		spin_unlock_irqrestore(&tc->pool->lock, flags);
4068		mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
4069		ti->error = "Unable to activate thin device while pool is suspended";
4070		r = -EINVAL;
4071		goto bad;
4072	}
4073	atomic_set(&tc->refcount, 1);
4074	init_completion(&tc->can_destroy);
4075	list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
4076	spin_unlock_irqrestore(&tc->pool->lock, flags);
4077	/*
4078	 * This synchronize_rcu() call is needed here otherwise we risk a
4079	 * wake_worker() call finding no bios to process (because the newly
4080	 * added tc isn't yet visible).  So this reduces latency since we
4081	 * aren't then dependent on the periodic commit to wake_worker().
4082	 */
4083	synchronize_rcu();
4084
4085	dm_put(pool_md);
4086
 
 
4087	return 0;
4088
4089bad:
4090	dm_pool_close_thin_device(tc->td);
4091bad_pool:
4092	__pool_dec(tc->pool);
4093bad_pool_lookup:
4094	dm_put(pool_md);
4095bad_common:
4096	dm_put_device(ti, tc->pool_dev);
4097bad_pool_dev:
4098	if (tc->origin_dev)
4099		dm_put_device(ti, tc->origin_dev);
4100bad_origin_dev:
4101	kfree(tc);
4102out_unlock:
4103	mutex_unlock(&dm_thin_pool_table.mutex);
4104
4105	return r;
4106}
4107
4108static int thin_map(struct dm_target *ti, struct bio *bio)
 
4109{
4110	bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
4111
4112	return thin_bio_map(ti, bio);
4113}
4114
4115static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
 
 
4116{
4117	unsigned long flags;
4118	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
4119	struct list_head work;
4120	struct dm_thin_new_mapping *m, *tmp;
4121	struct pool *pool = h->tc->pool;
4122
4123	if (h->shared_read_entry) {
4124		INIT_LIST_HEAD(&work);
4125		dm_deferred_entry_dec(h->shared_read_entry, &work);
4126
4127		spin_lock_irqsave(&pool->lock, flags);
4128		list_for_each_entry_safe(m, tmp, &work, list) {
4129			list_del(&m->list);
4130			__complete_mapping_preparation(m);
 
4131		}
4132		spin_unlock_irqrestore(&pool->lock, flags);
4133	}
4134
4135	if (h->all_io_entry) {
4136		INIT_LIST_HEAD(&work);
4137		dm_deferred_entry_dec(h->all_io_entry, &work);
4138		if (!list_empty(&work)) {
4139			spin_lock_irqsave(&pool->lock, flags);
4140			list_for_each_entry_safe(m, tmp, &work, list)
4141				list_add_tail(&m->list, &pool->prepared_discards);
4142			spin_unlock_irqrestore(&pool->lock, flags);
4143			wake_worker(pool);
4144		}
4145	}
4146
4147	if (h->cell)
4148		cell_defer_no_holder(h->tc, h->cell);
4149
4150	return 0;
4151}
4152
4153static void thin_presuspend(struct dm_target *ti)
4154{
4155	struct thin_c *tc = ti->private;
4156
4157	if (dm_noflush_suspending(ti))
4158		noflush_work(tc, do_noflush_start);
4159}
4160
4161static void thin_postsuspend(struct dm_target *ti)
4162{
4163	struct thin_c *tc = ti->private;
4164
4165	/*
4166	 * The dm_noflush_suspending flag has been cleared by now, so
4167	 * unfortunately we must always run this.
4168	 */
4169	noflush_work(tc, do_noflush_stop);
4170}
4171
4172static int thin_preresume(struct dm_target *ti)
4173{
4174	struct thin_c *tc = ti->private;
4175
4176	if (tc->origin_dev)
4177		tc->origin_size = get_dev_size(tc->origin_dev->bdev);
4178
4179	return 0;
4180}
4181
4182/*
4183 * <nr mapped sectors> <highest mapped sector>
4184 */
4185static void thin_status(struct dm_target *ti, status_type_t type,
4186			unsigned status_flags, char *result, unsigned maxlen)
4187{
4188	int r;
4189	ssize_t sz = 0;
4190	dm_block_t mapped, highest;
4191	char buf[BDEVNAME_SIZE];
4192	struct thin_c *tc = ti->private;
4193
4194	if (get_pool_mode(tc->pool) == PM_FAIL) {
4195		DMEMIT("Fail");
4196		return;
4197	}
4198
4199	if (!tc->td)
4200		DMEMIT("-");
4201	else {
4202		switch (type) {
4203		case STATUSTYPE_INFO:
4204			r = dm_thin_get_mapped_count(tc->td, &mapped);
4205			if (r) {
4206				DMERR("dm_thin_get_mapped_count returned %d", r);
4207				goto err;
4208			}
4209
4210			r = dm_thin_get_highest_mapped_block(tc->td, &highest);
4211			if (r < 0) {
4212				DMERR("dm_thin_get_highest_mapped_block returned %d", r);
4213				goto err;
4214			}
4215
4216			DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
4217			if (r)
4218				DMEMIT("%llu", ((highest + 1) *
4219						tc->pool->sectors_per_block) - 1);
4220			else
4221				DMEMIT("-");
4222			break;
4223
4224		case STATUSTYPE_TABLE:
4225			DMEMIT("%s %lu",
4226			       format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
4227			       (unsigned long) tc->dev_id);
4228			if (tc->origin_dev)
4229				DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
4230			break;
4231		}
4232	}
4233
4234	return;
4235
4236err:
4237	DMEMIT("Error");
4238}
4239
4240static int thin_iterate_devices(struct dm_target *ti,
4241				iterate_devices_callout_fn fn, void *data)
4242{
4243	sector_t blocks;
4244	struct thin_c *tc = ti->private;
4245	struct pool *pool = tc->pool;
4246
4247	/*
4248	 * We can't call dm_pool_get_data_dev_size() since that blocks.  So
4249	 * we follow a more convoluted path through to the pool's target.
4250	 */
4251	if (!pool->ti)
4252		return 0;	/* nothing is bound */
4253
4254	blocks = pool->ti->len;
4255	(void) sector_div(blocks, pool->sectors_per_block);
4256	if (blocks)
4257		return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
4258
4259	return 0;
4260}
4261
4262static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4263{
4264	struct thin_c *tc = ti->private;
4265	struct pool *pool = tc->pool;
4266
4267	if (!pool->pf.discard_enabled)
4268		return;
4269
4270	limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4271	limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
4272}
4273
4274static struct target_type thin_target = {
4275	.name = "thin",
4276	.version = {1, 18, 0},
4277	.module	= THIS_MODULE,
4278	.ctr = thin_ctr,
4279	.dtr = thin_dtr,
4280	.map = thin_map,
4281	.end_io = thin_endio,
4282	.preresume = thin_preresume,
4283	.presuspend = thin_presuspend,
4284	.postsuspend = thin_postsuspend,
4285	.status = thin_status,
4286	.iterate_devices = thin_iterate_devices,
4287	.io_hints = thin_io_hints,
4288};
4289
4290/*----------------------------------------------------------------*/
4291
4292static int __init dm_thin_init(void)
4293{
4294	int r;
4295
4296	pool_table_init();
4297
4298	r = dm_register_target(&thin_target);
4299	if (r)
4300		return r;
4301
4302	r = dm_register_target(&pool_target);
4303	if (r)
4304		goto bad_pool_target;
4305
4306	r = -ENOMEM;
4307
 
 
 
 
4308	_new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4309	if (!_new_mapping_cache)
4310		goto bad_new_mapping_cache;
4311
 
 
 
 
4312	return 0;
4313
 
 
4314bad_new_mapping_cache:
 
 
4315	dm_unregister_target(&pool_target);
4316bad_pool_target:
4317	dm_unregister_target(&thin_target);
4318
4319	return r;
4320}
4321
4322static void dm_thin_exit(void)
4323{
4324	dm_unregister_target(&thin_target);
4325	dm_unregister_target(&pool_target);
4326
 
4327	kmem_cache_destroy(_new_mapping_cache);
 
4328}
4329
4330module_init(dm_thin_init);
4331module_exit(dm_thin_exit);
4332
4333module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
4334MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
4335
4336MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
4337MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
4338MODULE_LICENSE("GPL");
v3.5.6
   1/*
   2 * Copyright (C) 2011 Red Hat UK.
   3 *
   4 * This file is released under the GPL.
   5 */
   6
   7#include "dm-thin-metadata.h"
 
 
   8
   9#include <linux/device-mapper.h>
  10#include <linux/dm-io.h>
  11#include <linux/dm-kcopyd.h>
 
 
  12#include <linux/list.h>
 
  13#include <linux/init.h>
  14#include <linux/module.h>
  15#include <linux/slab.h>
 
 
 
  16
  17#define	DM_MSG_PREFIX	"thin"
  18
  19/*
  20 * Tunable constants
  21 */
  22#define ENDIO_HOOK_POOL_SIZE 1024
  23#define DEFERRED_SET_SIZE 64
  24#define MAPPING_POOL_SIZE 1024
  25#define PRISON_CELLS 1024
  26#define COMMIT_PERIOD HZ
 
 
 
 
 
 
  27
  28/*
  29 * The block size of the device holding pool data must be
  30 * between 64KB and 1GB.
  31 */
  32#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
  33#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
  34
  35/*
  36 * Device id is restricted to 24 bits.
  37 */
  38#define MAX_DEV_ID ((1 << 24) - 1)
  39
  40/*
  41 * How do we handle breaking sharing of data blocks?
  42 * =================================================
  43 *
  44 * We use a standard copy-on-write btree to store the mappings for the
  45 * devices (note I'm talking about copy-on-write of the metadata here, not
  46 * the data).  When you take an internal snapshot you clone the root node
  47 * of the origin btree.  After this there is no concept of an origin or a
  48 * snapshot.  They are just two device trees that happen to point to the
  49 * same data blocks.
  50 *
  51 * When we get a write in we decide if it's to a shared data block using
  52 * some timestamp magic.  If it is, we have to break sharing.
  53 *
  54 * Let's say we write to a shared block in what was the origin.  The
  55 * steps are:
  56 *
  57 * i) plug io further to this physical block. (see bio_prison code).
  58 *
  59 * ii) quiesce any read io to that shared data block.  Obviously
  60 * including all devices that share this block.  (see deferred_set code)
  61 *
  62 * iii) copy the data block to a newly allocate block.  This step can be
  63 * missed out if the io covers the block. (schedule_copy).
  64 *
  65 * iv) insert the new mapping into the origin's btree
  66 * (process_prepared_mapping).  This act of inserting breaks some
  67 * sharing of btree nodes between the two devices.  Breaking sharing only
  68 * effects the btree of that specific device.  Btrees for the other
  69 * devices that share the block never change.  The btree for the origin
  70 * device as it was after the last commit is untouched, ie. we're using
  71 * persistent data structures in the functional programming sense.
  72 *
  73 * v) unplug io to this physical block, including the io that triggered
  74 * the breaking of sharing.
  75 *
  76 * Steps (ii) and (iii) occur in parallel.
  77 *
  78 * The metadata _doesn't_ need to be committed before the io continues.  We
  79 * get away with this because the io is always written to a _new_ block.
  80 * If there's a crash, then:
  81 *
  82 * - The origin mapping will point to the old origin block (the shared
  83 * one).  This will contain the data as it was before the io that triggered
  84 * the breaking of sharing came in.
  85 *
  86 * - The snap mapping still points to the old block.  As it would after
  87 * the commit.
  88 *
  89 * The downside of this scheme is the timestamp magic isn't perfect, and
  90 * will continue to think that data block in the snapshot device is shared
  91 * even after the write to the origin has broken sharing.  I suspect data
  92 * blocks will typically be shared by many different devices, so we're
  93 * breaking sharing n + 1 times, rather than n, where n is the number of
  94 * devices that reference this data block.  At the moment I think the
  95 * benefits far, far outweigh the disadvantages.
  96 */
  97
  98/*----------------------------------------------------------------*/
  99
 100/*
 101 * Sometimes we can't deal with a bio straight away.  We put them in prison
 102 * where they can't cause any mischief.  Bios are put in a cell identified
 103 * by a key, multiple bios can be in the same cell.  When the cell is
 104 * subsequently unlocked the bios become available.
 105 */
 106struct bio_prison;
 107
 108struct cell_key {
 109	int virtual;
 110	dm_thin_id dev;
 111	dm_block_t block;
 112};
 113
 114struct dm_bio_prison_cell {
 115	struct hlist_node list;
 116	struct bio_prison *prison;
 117	struct cell_key key;
 118	struct bio *holder;
 119	struct bio_list bios;
 120};
 121
 122struct bio_prison {
 123	spinlock_t lock;
 124	mempool_t *cell_pool;
 125
 126	unsigned nr_buckets;
 127	unsigned hash_mask;
 128	struct hlist_head *cells;
 129};
 130
 131static uint32_t calc_nr_buckets(unsigned nr_cells)
 
 132{
 133	uint32_t n = 128;
 134
 135	nr_cells /= 4;
 136	nr_cells = min(nr_cells, 8192u);
 137
 138	while (n < nr_cells)
 139		n <<= 1;
 140
 141	return n;
 142}
 143
 144static struct kmem_cache *_cell_cache;
 145
 146/*
 147 * @nr_cells should be the number of cells you want in use _concurrently_.
 148 * Don't confuse it with the number of distinct keys.
 149 */
 150static struct bio_prison *prison_create(unsigned nr_cells)
 151{
 152	unsigned i;
 153	uint32_t nr_buckets = calc_nr_buckets(nr_cells);
 154	size_t len = sizeof(struct bio_prison) +
 155		(sizeof(struct hlist_head) * nr_buckets);
 156	struct bio_prison *prison = kmalloc(len, GFP_KERNEL);
 157
 158	if (!prison)
 159		return NULL;
 160
 161	spin_lock_init(&prison->lock);
 162	prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
 163	if (!prison->cell_pool) {
 164		kfree(prison);
 165		return NULL;
 166	}
 167
 168	prison->nr_buckets = nr_buckets;
 169	prison->hash_mask = nr_buckets - 1;
 170	prison->cells = (struct hlist_head *) (prison + 1);
 171	for (i = 0; i < nr_buckets; i++)
 172		INIT_HLIST_HEAD(prison->cells + i);
 173
 174	return prison;
 175}
 176
 177static void prison_destroy(struct bio_prison *prison)
 178{
 179	mempool_destroy(prison->cell_pool);
 180	kfree(prison);
 181}
 182
 183static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key)
 184{
 185	const unsigned long BIG_PRIME = 4294967291UL;
 186	uint64_t hash = key->block * BIG_PRIME;
 187
 188	return (uint32_t) (hash & prison->hash_mask);
 189}
 190
 191static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
 192{
 193	       return (lhs->virtual == rhs->virtual) &&
 194		       (lhs->dev == rhs->dev) &&
 195		       (lhs->block == rhs->block);
 196}
 197
 198static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
 199						  struct cell_key *key)
 200{
 201	struct dm_bio_prison_cell *cell;
 202	struct hlist_node *tmp;
 203
 204	hlist_for_each_entry(cell, tmp, bucket, list)
 205		if (keys_equal(&cell->key, key))
 206			return cell;
 207
 208	return NULL;
 209}
 210
 211/*
 212 * This may block if a new cell needs allocating.  You must ensure that
 213 * cells will be unlocked even if the calling thread is blocked.
 214 *
 215 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
 216 */
 217static int bio_detain(struct bio_prison *prison, struct cell_key *key,
 218		      struct bio *inmate, struct dm_bio_prison_cell **ref)
 219{
 220	int r = 1;
 221	unsigned long flags;
 222	uint32_t hash = hash_key(prison, key);
 223	struct dm_bio_prison_cell *cell, *cell2;
 224
 225	BUG_ON(hash > prison->nr_buckets);
 226
 227	spin_lock_irqsave(&prison->lock, flags);
 228
 229	cell = __search_bucket(prison->cells + hash, key);
 230	if (cell) {
 231		bio_list_add(&cell->bios, inmate);
 232		goto out;
 233	}
 234
 235	/*
 236	 * Allocate a new cell
 237	 */
 238	spin_unlock_irqrestore(&prison->lock, flags);
 239	cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
 240	spin_lock_irqsave(&prison->lock, flags);
 241
 242	/*
 243	 * We've been unlocked, so we have to double check that
 244	 * nobody else has inserted this cell in the meantime.
 245	 */
 246	cell = __search_bucket(prison->cells + hash, key);
 247	if (cell) {
 248		mempool_free(cell2, prison->cell_pool);
 249		bio_list_add(&cell->bios, inmate);
 250		goto out;
 251	}
 252
 253	/*
 254	 * Use new cell.
 255	 */
 256	cell = cell2;
 257
 258	cell->prison = prison;
 259	memcpy(&cell->key, key, sizeof(cell->key));
 260	cell->holder = inmate;
 261	bio_list_init(&cell->bios);
 262	hlist_add_head(&cell->list, prison->cells + hash);
 263
 264	r = 0;
 265
 266out:
 267	spin_unlock_irqrestore(&prison->lock, flags);
 268
 269	*ref = cell;
 270
 271	return r;
 272}
 273
 274/*
 275 * @inmates must have been initialised prior to this call
 276 */
 277static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
 278{
 279	struct bio_prison *prison = cell->prison;
 280
 281	hlist_del(&cell->list);
 282
 283	if (inmates) {
 284		bio_list_add(inmates, cell->holder);
 285		bio_list_merge(inmates, &cell->bios);
 286	}
 287
 288	mempool_free(cell, prison->cell_pool);
 289}
 290
 291static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
 292{
 293	unsigned long flags;
 294	struct bio_prison *prison = cell->prison;
 295
 296	spin_lock_irqsave(&prison->lock, flags);
 297	__cell_release(cell, bios);
 298	spin_unlock_irqrestore(&prison->lock, flags);
 299}
 300
 301/*
 302 * There are a couple of places where we put a bio into a cell briefly
 303 * before taking it out again.  In these situations we know that no other
 304 * bio may be in the cell.  This function releases the cell, and also does
 305 * a sanity check.
 306 */
 307static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
 308{
 309	BUG_ON(cell->holder != bio);
 310	BUG_ON(!bio_list_empty(&cell->bios));
 311
 312	__cell_release(cell, NULL);
 313}
 314
 315static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
 316{
 317	unsigned long flags;
 318	struct bio_prison *prison = cell->prison;
 319
 320	spin_lock_irqsave(&prison->lock, flags);
 321	__cell_release_singleton(cell, bio);
 322	spin_unlock_irqrestore(&prison->lock, flags);
 323}
 324
 325/*
 326 * Sometimes we don't want the holder, just the additional bios.
 327 */
 328static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
 329				     struct bio_list *inmates)
 330{
 331	struct bio_prison *prison = cell->prison;
 332
 333	hlist_del(&cell->list);
 334	bio_list_merge(inmates, &cell->bios);
 335
 336	mempool_free(cell, prison->cell_pool);
 337}
 338
 339static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
 340				   struct bio_list *inmates)
 341{
 342	unsigned long flags;
 343	struct bio_prison *prison = cell->prison;
 344
 345	spin_lock_irqsave(&prison->lock, flags);
 346	__cell_release_no_holder(cell, inmates);
 347	spin_unlock_irqrestore(&prison->lock, flags);
 348}
 349
 350static void cell_error(struct dm_bio_prison_cell *cell)
 351{
 352	struct bio_prison *prison = cell->prison;
 353	struct bio_list bios;
 354	struct bio *bio;
 355	unsigned long flags;
 356
 357	bio_list_init(&bios);
 358
 359	spin_lock_irqsave(&prison->lock, flags);
 360	__cell_release(cell, &bios);
 361	spin_unlock_irqrestore(&prison->lock, flags);
 362
 363	while ((bio = bio_list_pop(&bios)))
 364		bio_io_error(bio);
 365}
 366
 367/*----------------------------------------------------------------*/
 368
 369/*
 370 * We use the deferred set to keep track of pending reads to shared blocks.
 371 * We do this to ensure the new mapping caused by a write isn't performed
 372 * until these prior reads have completed.  Otherwise the insertion of the
 373 * new mapping could free the old block that the read bios are mapped to.
 374 */
 375
 376struct deferred_set;
 377struct deferred_entry {
 378	struct deferred_set *ds;
 379	unsigned count;
 380	struct list_head work_items;
 381};
 382
 383struct deferred_set {
 384	spinlock_t lock;
 385	unsigned current_entry;
 386	unsigned sweeper;
 387	struct deferred_entry entries[DEFERRED_SET_SIZE];
 388};
 389
 390static void ds_init(struct deferred_set *ds)
 391{
 392	int i;
 393
 394	spin_lock_init(&ds->lock);
 395	ds->current_entry = 0;
 396	ds->sweeper = 0;
 397	for (i = 0; i < DEFERRED_SET_SIZE; i++) {
 398		ds->entries[i].ds = ds;
 399		ds->entries[i].count = 0;
 400		INIT_LIST_HEAD(&ds->entries[i].work_items);
 401	}
 402}
 403
 404static struct deferred_entry *ds_inc(struct deferred_set *ds)
 405{
 406	unsigned long flags;
 407	struct deferred_entry *entry;
 408
 409	spin_lock_irqsave(&ds->lock, flags);
 410	entry = ds->entries + ds->current_entry;
 411	entry->count++;
 412	spin_unlock_irqrestore(&ds->lock, flags);
 413
 414	return entry;
 415}
 416
 417static unsigned ds_next(unsigned index)
 418{
 419	return (index + 1) % DEFERRED_SET_SIZE;
 420}
 421
 422static void __sweep(struct deferred_set *ds, struct list_head *head)
 423{
 424	while ((ds->sweeper != ds->current_entry) &&
 425	       !ds->entries[ds->sweeper].count) {
 426		list_splice_init(&ds->entries[ds->sweeper].work_items, head);
 427		ds->sweeper = ds_next(ds->sweeper);
 428	}
 429
 430	if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
 431		list_splice_init(&ds->entries[ds->sweeper].work_items, head);
 432}
 433
 434static void ds_dec(struct deferred_entry *entry, struct list_head *head)
 435{
 436	unsigned long flags;
 437
 438	spin_lock_irqsave(&entry->ds->lock, flags);
 439	BUG_ON(!entry->count);
 440	--entry->count;
 441	__sweep(entry->ds, head);
 442	spin_unlock_irqrestore(&entry->ds->lock, flags);
 443}
 444
 445/*
 446 * Returns 1 if deferred or 0 if no pending items to delay job.
 447 */
 448static int ds_add_work(struct deferred_set *ds, struct list_head *work)
 449{
 450	int r = 1;
 451	unsigned long flags;
 452	unsigned next_entry;
 453
 454	spin_lock_irqsave(&ds->lock, flags);
 455	if ((ds->sweeper == ds->current_entry) &&
 456	    !ds->entries[ds->current_entry].count)
 457		r = 0;
 458	else {
 459		list_add(work, &ds->entries[ds->current_entry].work_items);
 460		next_entry = ds_next(ds->current_entry);
 461		if (!ds->entries[next_entry].count)
 462			ds->current_entry = next_entry;
 463	}
 464	spin_unlock_irqrestore(&ds->lock, flags);
 465
 466	return r;
 467}
 468
 469/*----------------------------------------------------------------*/
 470
 471/*
 472 * Key building.
 473 */
 474static void build_data_key(struct dm_thin_device *td,
 475			   dm_block_t b, struct cell_key *key)
 476{
 477	key->virtual = 0;
 478	key->dev = dm_thin_dev_id(td);
 479	key->block = b;
 480}
 481
 482static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
 483			      struct cell_key *key)
 484{
 485	key->virtual = 1;
 486	key->dev = dm_thin_dev_id(td);
 487	key->block = b;
 488}
 489
 490/*----------------------------------------------------------------*/
 491
 492/*
 493 * A pool device ties together a metadata device and a data device.  It
 494 * also provides the interface for creating and destroying internal
 495 * devices.
 496 */
 497struct dm_thin_new_mapping;
 498
 
 
 
 
 
 
 
 
 
 
 499struct pool_features {
 500	unsigned zero_new_blocks:1;
 501	unsigned discard_enabled:1;
 502	unsigned discard_passdown:1;
 
 
 
 503};
 504
 
 
 
 
 
 
 
 505struct pool {
 506	struct list_head list;
 507	struct dm_target *ti;	/* Only set if a pool target is bound */
 508
 509	struct mapped_device *pool_md;
 510	struct block_device *md_dev;
 511	struct dm_pool_metadata *pmd;
 512
 
 513	uint32_t sectors_per_block;
 514	unsigned block_shift;
 515	dm_block_t offset_mask;
 516	dm_block_t low_water_blocks;
 517
 518	struct pool_features pf;
 519	unsigned low_water_triggered:1;	/* A dm event has been sent */
 520	unsigned no_free_space:1;	/* A -ENOSPC warning has been issued */
 
 521
 522	struct bio_prison *prison;
 523	struct dm_kcopyd_client *copier;
 524
 525	struct workqueue_struct *wq;
 
 526	struct work_struct worker;
 527	struct delayed_work waker;
 
 528
 
 529	unsigned ref_count;
 530	unsigned long last_commit_jiffies;
 531
 532	spinlock_t lock;
 533	struct bio_list deferred_bios;
 534	struct bio_list deferred_flush_bios;
 535	struct list_head prepared_mappings;
 536	struct list_head prepared_discards;
 
 537
 538	struct bio_list retry_on_resume_list;
 539
 540	struct deferred_set shared_read_ds;
 541	struct deferred_set all_io_ds;
 542
 543	struct dm_thin_new_mapping *next_mapping;
 544	mempool_t *mapping_pool;
 545	mempool_t *endio_hook_pool;
 
 
 
 
 
 
 
 
 
 
 546};
 547
 
 
 
 548/*
 549 * Target context for a pool.
 550 */
 551struct pool_c {
 552	struct dm_target *ti;
 553	struct pool *pool;
 554	struct dm_dev *data_dev;
 555	struct dm_dev *metadata_dev;
 556	struct dm_target_callbacks callbacks;
 557
 558	dm_block_t low_water_blocks;
 559	struct pool_features pf;
 
 560};
 561
 562/*
 563 * Target context for a thin.
 564 */
 565struct thin_c {
 
 566	struct dm_dev *pool_dev;
 567	struct dm_dev *origin_dev;
 
 568	dm_thin_id dev_id;
 569
 570	struct pool *pool;
 571	struct dm_thin_device *td;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 572};
 573
 574/*----------------------------------------------------------------*/
 575
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 576/*
 577 * A global list of pools that uses a struct mapped_device as a key.
 578 */
 579static struct dm_thin_pool_table {
 580	struct mutex mutex;
 581	struct list_head pools;
 582} dm_thin_pool_table;
 583
 584static void pool_table_init(void)
 585{
 586	mutex_init(&dm_thin_pool_table.mutex);
 587	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
 588}
 589
 590static void __pool_table_insert(struct pool *pool)
 591{
 592	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 593	list_add(&pool->list, &dm_thin_pool_table.pools);
 594}
 595
 596static void __pool_table_remove(struct pool *pool)
 597{
 598	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 599	list_del(&pool->list);
 600}
 601
 602static struct pool *__pool_table_lookup(struct mapped_device *md)
 603{
 604	struct pool *pool = NULL, *tmp;
 605
 606	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 607
 608	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
 609		if (tmp->pool_md == md) {
 610			pool = tmp;
 611			break;
 612		}
 613	}
 614
 615	return pool;
 616}
 617
 618static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
 619{
 620	struct pool *pool = NULL, *tmp;
 621
 622	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 623
 624	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
 625		if (tmp->md_dev == md_dev) {
 626			pool = tmp;
 627			break;
 628		}
 629	}
 630
 631	return pool;
 632}
 633
 634/*----------------------------------------------------------------*/
 635
 636struct dm_thin_endio_hook {
 637	struct thin_c *tc;
 638	struct deferred_entry *shared_read_entry;
 639	struct deferred_entry *all_io_entry;
 640	struct dm_thin_new_mapping *overwrite_mapping;
 
 
 641};
 642
 643static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
 
 
 
 
 
 
 644{
 645	struct bio *bio;
 
 
 
 
 
 
 
 
 
 646	struct bio_list bios;
 
 647
 648	bio_list_init(&bios);
 649	bio_list_merge(&bios, master);
 650	bio_list_init(master);
 651
 652	while ((bio = bio_list_pop(&bios))) {
 653		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 654
 655		if (h->tc == tc)
 656			bio_endio(bio, DM_ENDIO_REQUEUE);
 657		else
 658			bio_list_add(master, bio);
 659	}
 660}
 661
 662static void requeue_io(struct thin_c *tc)
 663{
 664	struct pool *pool = tc->pool;
 665	unsigned long flags;
 666
 667	spin_lock_irqsave(&pool->lock, flags);
 668	__requeue_bio_list(tc, &pool->deferred_bios);
 669	__requeue_bio_list(tc, &pool->retry_on_resume_list);
 670	spin_unlock_irqrestore(&pool->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 671}
 672
 673/*
 674 * This section of code contains the logic for processing a thin device's IO.
 675 * Much of the code depends on pool object resources (lists, workqueues, etc)
 676 * but most is exclusively called from the thin target rather than the thin-pool
 677 * target.
 678 */
 679
 680static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 681{
 682	return bio->bi_sector >> tc->pool->block_shift;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 683}
 684
 685static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
 686{
 687	struct pool *pool = tc->pool;
 
 688
 689	bio->bi_bdev = tc->pool_dev->bdev;
 690	bio->bi_sector = (block << pool->block_shift) +
 691		(bio->bi_sector & pool->offset_mask);
 
 
 
 
 
 692}
 693
 694static void remap_to_origin(struct thin_c *tc, struct bio *bio)
 695{
 696	bio->bi_bdev = tc->origin_dev->bdev;
 697}
 698
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 699static void issue(struct thin_c *tc, struct bio *bio)
 700{
 701	struct pool *pool = tc->pool;
 702	unsigned long flags;
 703
 
 
 
 
 
 704	/*
 705	 * Batch together any FUA/FLUSH bios we find and then issue
 706	 * a single commit for them in process_deferred_bios().
 
 707	 */
 708	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
 709		spin_lock_irqsave(&pool->lock, flags);
 710		bio_list_add(&pool->deferred_flush_bios, bio);
 711		spin_unlock_irqrestore(&pool->lock, flags);
 712	} else
 713		generic_make_request(bio);
 
 
 
 
 
 
 714}
 715
 716static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
 717{
 718	remap_to_origin(tc, bio);
 719	issue(tc, bio);
 720}
 721
 722static void remap_and_issue(struct thin_c *tc, struct bio *bio,
 723			    dm_block_t block)
 724{
 725	remap(tc, bio, block);
 726	issue(tc, bio);
 727}
 728
 729/*
 730 * wake_worker() is used when new work is queued and when pool_resume is
 731 * ready to continue deferred IO processing.
 732 */
 733static void wake_worker(struct pool *pool)
 734{
 735	queue_work(pool->wq, &pool->worker);
 736}
 737
 738/*----------------------------------------------------------------*/
 739
 740/*
 741 * Bio endio functions.
 742 */
 743struct dm_thin_new_mapping {
 744	struct list_head list;
 745
 746	unsigned quiesced:1;
 747	unsigned prepared:1;
 748	unsigned pass_discard:1;
 
 
 
 
 
 
 749
 
 750	struct thin_c *tc;
 751	dm_block_t virt_block;
 752	dm_block_t data_block;
 753	struct dm_bio_prison_cell *cell, *cell2;
 754	int err;
 755
 756	/*
 757	 * If the bio covers the whole area of a block then we can avoid
 758	 * zeroing or copying.  Instead this bio is hooked.  The bio will
 759	 * still be in the cell, so care has to be taken to avoid issuing
 760	 * the bio twice.
 761	 */
 762	struct bio *bio;
 763	bio_end_io_t *saved_bi_end_io;
 764};
 765
 766static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
 767{
 768	struct pool *pool = m->tc->pool;
 769
 770	if (m->quiesced && m->prepared) {
 771		list_add(&m->list, &pool->prepared_mappings);
 772		wake_worker(pool);
 773	}
 774}
 775
 776static void copy_complete(int read_err, unsigned long write_err, void *context)
 777{
 778	unsigned long flags;
 779	struct dm_thin_new_mapping *m = context;
 780	struct pool *pool = m->tc->pool;
 781
 782	m->err = read_err || write_err ? -EIO : 0;
 783
 784	spin_lock_irqsave(&pool->lock, flags);
 785	m->prepared = 1;
 786	__maybe_add_mapping(m);
 787	spin_unlock_irqrestore(&pool->lock, flags);
 788}
 789
 790static void overwrite_endio(struct bio *bio, int err)
 
 
 
 
 
 
 
 
 791{
 792	unsigned long flags;
 793	struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
 794	struct dm_thin_new_mapping *m = h->overwrite_mapping;
 795	struct pool *pool = m->tc->pool;
 796
 797	m->err = err;
 798
 799	spin_lock_irqsave(&pool->lock, flags);
 800	m->prepared = 1;
 801	__maybe_add_mapping(m);
 802	spin_unlock_irqrestore(&pool->lock, flags);
 803}
 804
 805/*----------------------------------------------------------------*/
 806
 807/*
 808 * Workqueue.
 809 */
 810
 811/*
 812 * Prepared mapping jobs.
 813 */
 814
 815/*
 816 * This sends the bios in the cell back to the deferred_bios list.
 
 817 */
 818static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
 819		       dm_block_t data_block)
 820{
 821	struct pool *pool = tc->pool;
 822	unsigned long flags;
 823
 824	spin_lock_irqsave(&pool->lock, flags);
 825	cell_release(cell, &pool->deferred_bios);
 826	spin_unlock_irqrestore(&tc->pool->lock, flags);
 827
 828	wake_worker(pool);
 829}
 830
 831/*
 832 * Same as cell_defer above, except it omits one particular detainee,
 833 * a write bio that covers the block and has already been processed.
 834 */
 835static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 836{
 837	struct bio_list bios;
 838	struct pool *pool = tc->pool;
 839	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 840
 841	bio_list_init(&bios);
 
 842
 843	spin_lock_irqsave(&pool->lock, flags);
 844	cell_release_no_holder(cell, &pool->deferred_bios);
 845	spin_unlock_irqrestore(&pool->lock, flags);
 846
 847	wake_worker(pool);
 
 
 
 
 848}
 849
 850static void process_prepared_mapping(struct dm_thin_new_mapping *m)
 851{
 852	struct thin_c *tc = m->tc;
 853	struct bio *bio;
 
 854	int r;
 855
 856	bio = m->bio;
 857	if (bio)
 858		bio->bi_end_io = m->saved_bi_end_io;
 859
 860	if (m->err) {
 861		cell_error(m->cell);
 862		goto out;
 863	}
 864
 865	/*
 866	 * Commit the prepared block into the mapping btree.
 867	 * Any I/O for this block arriving after this point will get
 868	 * remapped to it directly.
 869	 */
 870	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
 871	if (r) {
 872		DMERR("dm_thin_insert_block() failed");
 873		cell_error(m->cell);
 874		goto out;
 875	}
 876
 877	/*
 878	 * Release any bios held while the block was being provisioned.
 879	 * If we are processing a write bio that completely covers the block,
 880	 * we already processed it so can ignore it now when processing
 881	 * the bios in the cell.
 882	 */
 883	if (bio) {
 884		cell_defer_except(tc, m->cell);
 885		bio_endio(bio, 0);
 886	} else
 887		cell_defer(tc, m->cell, m->data_block);
 
 
 
 888
 889out:
 890	list_del(&m->list);
 
 
 
 
 
 
 
 
 
 
 891	mempool_free(m, tc->pool->mapping_pool);
 892}
 893
 894static void process_prepared_discard(struct dm_thin_new_mapping *m)
 
 
 
 
 
 
 
 
 
 
 
 
 895{
 896	int r;
 897	struct thin_c *tc = m->tc;
 898
 899	r = dm_thin_remove_block(tc->td, m->virt_block);
 900	if (r)
 901		DMERR("dm_thin_remove_block() failed");
 
 
 
 
 
 
 
 902
 
 
 903	/*
 904	 * Pass the discard down to the underlying device?
 
 905	 */
 906	if (m->pass_discard)
 907		remap_and_issue(tc, m->bio, m->data_block);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 908	else
 909		bio_endio(m->bio, 0);
 910
 911	cell_defer_except(tc, m->cell);
 912	cell_defer_except(tc, m->cell2);
 913	mempool_free(m, tc->pool->mapping_pool);
 
 
 
 
 
 914}
 915
 916static void process_prepared(struct pool *pool, struct list_head *head,
 917			     void (*fn)(struct dm_thin_new_mapping *))
 918{
 919	unsigned long flags;
 920	struct list_head maps;
 921	struct dm_thin_new_mapping *m, *tmp;
 922
 923	INIT_LIST_HEAD(&maps);
 924	spin_lock_irqsave(&pool->lock, flags);
 925	list_splice_init(head, &maps);
 926	spin_unlock_irqrestore(&pool->lock, flags);
 927
 928	list_for_each_entry_safe(m, tmp, &maps, list)
 929		fn(m);
 930}
 931
 932/*
 933 * Deferred bio jobs.
 934 */
 935static int io_overlaps_block(struct pool *pool, struct bio *bio)
 936{
 937	return !(bio->bi_sector & pool->offset_mask) &&
 938		(bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT));
 939
 940}
 941
 942static int io_overwrites_block(struct pool *pool, struct bio *bio)
 943{
 944	return (bio_data_dir(bio) == WRITE) &&
 945		io_overlaps_block(pool, bio);
 946}
 947
 948static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
 949			       bio_end_io_t *fn)
 950{
 951	*save = bio->bi_end_io;
 952	bio->bi_end_io = fn;
 953}
 954
 955static int ensure_next_mapping(struct pool *pool)
 956{
 957	if (pool->next_mapping)
 958		return 0;
 959
 960	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
 961
 962	return pool->next_mapping ? 0 : -ENOMEM;
 963}
 964
 965static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
 966{
 967	struct dm_thin_new_mapping *r = pool->next_mapping;
 968
 969	BUG_ON(!pool->next_mapping);
 970
 
 
 
 
 971	pool->next_mapping = NULL;
 972
 973	return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 974}
 975
 
 
 
 976static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
 977			  struct dm_dev *origin, dm_block_t data_origin,
 978			  dm_block_t data_dest,
 979			  struct dm_bio_prison_cell *cell, struct bio *bio)
 
 980{
 981	int r;
 982	struct pool *pool = tc->pool;
 983	struct dm_thin_new_mapping *m = get_next_mapping(pool);
 984
 985	INIT_LIST_HEAD(&m->list);
 986	m->quiesced = 0;
 987	m->prepared = 0;
 988	m->tc = tc;
 989	m->virt_block = virt_block;
 
 990	m->data_block = data_dest;
 991	m->cell = cell;
 992	m->err = 0;
 993	m->bio = NULL;
 994
 995	if (!ds_add_work(&pool->shared_read_ds, &m->list))
 996		m->quiesced = 1;
 
 
 
 
 
 
 
 997
 998	/*
 999	 * IO to pool_dev remaps to the pool target's data_dev.
1000	 *
1001	 * If the whole block of data is being overwritten, we can issue the
1002	 * bio immediately. Otherwise we use kcopyd to clone the data first.
1003	 */
1004	if (io_overwrites_block(pool, bio)) {
1005		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1006
1007		h->overwrite_mapping = m;
1008		m->bio = bio;
1009		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1010		remap_and_issue(tc, bio, data_dest);
1011	} else {
1012		struct dm_io_region from, to;
1013
1014		from.bdev = origin->bdev;
1015		from.sector = data_origin * pool->sectors_per_block;
1016		from.count = pool->sectors_per_block;
1017
1018		to.bdev = tc->pool_dev->bdev;
1019		to.sector = data_dest * pool->sectors_per_block;
1020		to.count = pool->sectors_per_block;
1021
1022		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
1023				   0, copy_complete, m);
1024		if (r < 0) {
1025			mempool_free(m, pool->mapping_pool);
1026			DMERR("dm_kcopyd_copy() failed");
1027			cell_error(cell);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1028		}
1029	}
 
 
1030}
1031
1032static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1033				   dm_block_t data_origin, dm_block_t data_dest,
1034				   struct dm_bio_prison_cell *cell, struct bio *bio)
1035{
1036	schedule_copy(tc, virt_block, tc->pool_dev,
1037		      data_origin, data_dest, cell, bio);
1038}
1039
1040static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1041				   dm_block_t data_dest,
1042				   struct dm_bio_prison_cell *cell, struct bio *bio)
1043{
1044	schedule_copy(tc, virt_block, tc->origin_dev,
1045		      virt_block, data_dest, cell, bio);
1046}
1047
1048static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1049			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
1050			  struct bio *bio)
1051{
1052	struct pool *pool = tc->pool;
1053	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1054
1055	INIT_LIST_HEAD(&m->list);
1056	m->quiesced = 1;
1057	m->prepared = 0;
1058	m->tc = tc;
1059	m->virt_block = virt_block;
 
1060	m->data_block = data_block;
1061	m->cell = cell;
1062	m->err = 0;
1063	m->bio = NULL;
1064
1065	/*
1066	 * If the whole block of data is being overwritten or we are not
1067	 * zeroing pre-existing data, we can issue the bio immediately.
1068	 * Otherwise we use kcopyd to zero the data first.
1069	 */
1070	if (!pool->pf.zero_new_blocks)
 
 
 
 
 
 
1071		process_prepared_mapping(m);
 
1072
1073	else if (io_overwrites_block(pool, bio)) {
1074		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
 
 
 
 
 
1075
1076		h->overwrite_mapping = m;
1077		m->bio = bio;
1078		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1079		remap_and_issue(tc, bio, data_block);
1080	} else {
1081		int r;
1082		struct dm_io_region to;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1083
1084		to.bdev = tc->pool_dev->bdev;
1085		to.sector = data_block * pool->sectors_per_block;
1086		to.count = pool->sectors_per_block;
1087
1088		r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
1089		if (r < 0) {
1090			mempool_free(m, pool->mapping_pool);
1091			DMERR("dm_kcopyd_zero() failed");
1092			cell_error(cell);
1093		}
1094	}
1095}
1096
1097static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
 
 
 
 
1098{
1099	int r;
1100	dm_block_t free_blocks;
1101	unsigned long flags;
1102	struct pool *pool = tc->pool;
1103
1104	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
 
 
 
1105	if (r)
1106		return r;
 
 
 
 
 
 
 
 
 
1107
1108	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1109		DMWARN("%s: reached low water mark, sending event.",
1110		       dm_device_name(pool->pool_md));
1111		spin_lock_irqsave(&pool->lock, flags);
1112		pool->low_water_triggered = 1;
1113		spin_unlock_irqrestore(&pool->lock, flags);
1114		dm_table_event(pool->ti->table);
1115	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1116
1117	if (!free_blocks) {
1118		if (pool->no_free_space)
1119			return -ENOSPC;
1120		else {
1121			/*
1122			 * Try to commit to see if that will free up some
1123			 * more space.
1124			 */
1125			r = dm_pool_commit_metadata(pool->pmd);
1126			if (r) {
1127				DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
1128				      __func__, r);
1129				return r;
1130			}
1131
1132			r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1133			if (r)
1134				return r;
 
 
1135
1136			/*
1137			 * If we still have no space we set a flag to avoid
1138			 * doing all this checking and return -ENOSPC.
1139			 */
1140			if (!free_blocks) {
1141				DMWARN("%s: no free space available.",
1142				       dm_device_name(pool->pool_md));
1143				spin_lock_irqsave(&pool->lock, flags);
1144				pool->no_free_space = 1;
1145				spin_unlock_irqrestore(&pool->lock, flags);
1146				return -ENOSPC;
1147			}
1148		}
1149	}
1150
1151	r = dm_pool_alloc_data_block(pool->pmd, result);
1152	if (r)
 
1153		return r;
 
1154
1155	return 0;
1156}
1157
1158/*
1159 * If we have run out of space, queue bios until the device is
1160 * resumed, presumably after having been reloaded with more space.
1161 */
1162static void retry_on_resume(struct bio *bio)
1163{
1164	struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1165	struct thin_c *tc = h->tc;
1166	struct pool *pool = tc->pool;
1167	unsigned long flags;
1168
1169	spin_lock_irqsave(&pool->lock, flags);
1170	bio_list_add(&pool->retry_on_resume_list, bio);
1171	spin_unlock_irqrestore(&pool->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1172}
1173
1174static void no_space(struct dm_bio_prison_cell *cell)
1175{
1176	struct bio *bio;
1177	struct bio_list bios;
 
 
 
 
 
 
 
1178
1179	bio_list_init(&bios);
1180	cell_release(cell, &bios);
1181
1182	while ((bio = bio_list_pop(&bios)))
1183		retry_on_resume(bio);
1184}
1185
1186static void process_discard(struct thin_c *tc, struct bio *bio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1187{
 
 
1188	int r;
1189	unsigned long flags;
1190	struct pool *pool = tc->pool;
1191	struct dm_bio_prison_cell *cell, *cell2;
1192	struct cell_key key, key2;
1193	dm_block_t block = get_bio_block(tc, bio);
1194	struct dm_thin_lookup_result lookup_result;
1195	struct dm_thin_new_mapping *m;
 
1196
1197	build_virtual_key(tc->td, block, &key);
1198	if (bio_detain(tc->pool->prison, &key, bio, &cell))
1199		return;
 
 
1200
1201	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1202	switch (r) {
1203	case 0:
1204		/*
1205		 * Check nobody is fiddling with this pool block.  This can
1206		 * happen if someone's in the process of breaking sharing
1207		 * on this block.
1208		 */
1209		build_data_key(tc->td, lookup_result.block, &key2);
1210		if (bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
1211			cell_release_singleton(cell, bio);
1212			break;
 
 
 
 
 
 
1213		}
1214
1215		if (io_overlaps_block(pool, bio)) {
1216			/*
1217			 * IO may still be going to the destination block.  We must
1218			 * quiesce before we can do the removal.
1219			 */
1220			m = get_next_mapping(pool);
1221			m->tc = tc;
1222			m->pass_discard = (!lookup_result.shared) & pool->pf.discard_passdown;
1223			m->virt_block = block;
1224			m->data_block = lookup_result.block;
1225			m->cell = cell;
1226			m->cell2 = cell2;
1227			m->err = 0;
1228			m->bio = bio;
1229
1230			if (!ds_add_work(&pool->all_io_ds, &m->list)) {
1231				spin_lock_irqsave(&pool->lock, flags);
1232				list_add(&m->list, &pool->prepared_discards);
1233				spin_unlock_irqrestore(&pool->lock, flags);
1234				wake_worker(pool);
1235			}
1236		} else {
1237			/*
1238			 * This path is hit if people are ignoring
1239			 * limits->discard_granularity.  It ignores any
1240			 * part of the discard that is in a subsequent
1241			 * block.
1242			 */
1243			sector_t offset = bio->bi_sector - (block << pool->block_shift);
1244			unsigned remaining = (pool->sectors_per_block - offset) << 9;
1245			bio->bi_size = min(bio->bi_size, remaining);
1246
1247			cell_release_singleton(cell, bio);
1248			cell_release_singleton(cell2, bio);
1249			if ((!lookup_result.shared) && pool->pf.discard_passdown)
1250				remap_and_issue(tc, bio, lookup_result.block);
1251			else
1252				bio_endio(bio, 0);
1253		}
1254		break;
1255
1256	case -ENODATA:
1257		/*
1258		 * It isn't provisioned, just forget it.
 
 
 
 
 
1259		 */
1260		cell_release_singleton(cell, bio);
1261		bio_endio(bio, 0);
1262		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1263
1264	default:
1265		DMERR("discard: find block unexpectedly returned %d", r);
1266		cell_release_singleton(cell, bio);
1267		bio_io_error(bio);
1268		break;
 
 
1269	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1270}
1271
1272static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1273			  struct cell_key *key,
1274			  struct dm_thin_lookup_result *lookup_result,
1275			  struct dm_bio_prison_cell *cell)
1276{
1277	int r;
1278	dm_block_t data_block;
 
1279
1280	r = alloc_data_block(tc, &data_block);
1281	switch (r) {
1282	case 0:
1283		schedule_internal_copy(tc, block, lookup_result->block,
1284				       data_block, cell, bio);
1285		break;
1286
1287	case -ENOSPC:
1288		no_space(cell);
1289		break;
1290
1291	default:
1292		DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1293		cell_error(cell);
 
1294		break;
1295	}
1296}
1297
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1298static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1299			       dm_block_t block,
1300			       struct dm_thin_lookup_result *lookup_result)
 
1301{
1302	struct dm_bio_prison_cell *cell;
1303	struct pool *pool = tc->pool;
1304	struct cell_key key;
1305
1306	/*
1307	 * If cell is already occupied, then sharing is already in the process
1308	 * of being broken so we have nothing further to do here.
1309	 */
1310	build_data_key(tc->td, lookup_result->block, &key);
1311	if (bio_detain(pool->prison, &key, bio, &cell))
 
1312		return;
 
1313
1314	if (bio_data_dir(bio) == WRITE)
1315		break_sharing(tc, bio, block, &key, lookup_result, cell);
1316	else {
1317		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
 
1318
1319		h->shared_read_entry = ds_inc(&pool->shared_read_ds);
 
 
1320
1321		cell_release_singleton(cell, bio);
1322		remap_and_issue(tc, bio, lookup_result->block);
1323	}
1324}
1325
1326static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1327			    struct dm_bio_prison_cell *cell)
1328{
1329	int r;
1330	dm_block_t data_block;
 
1331
1332	/*
1333	 * Remap empty bios (flushes) immediately, without provisioning.
1334	 */
1335	if (!bio->bi_size) {
1336		cell_release_singleton(cell, bio);
 
 
1337		remap_and_issue(tc, bio, 0);
1338		return;
1339	}
1340
1341	/*
1342	 * Fill read bios with zeroes and complete them immediately.
1343	 */
1344	if (bio_data_dir(bio) == READ) {
1345		zero_fill_bio(bio);
1346		cell_release_singleton(cell, bio);
1347		bio_endio(bio, 0);
1348		return;
1349	}
1350
1351	r = alloc_data_block(tc, &data_block);
1352	switch (r) {
1353	case 0:
1354		if (tc->origin_dev)
1355			schedule_external_copy(tc, block, data_block, cell, bio);
1356		else
1357			schedule_zero(tc, block, data_block, cell, bio);
1358		break;
1359
1360	case -ENOSPC:
1361		no_space(cell);
1362		break;
1363
1364	default:
1365		DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1366		cell_error(cell);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1367		break;
1368	}
1369}
1370
1371static void process_bio(struct thin_c *tc, struct bio *bio)
1372{
1373	int r;
1374	dm_block_t block = get_bio_block(tc, bio);
1375	struct dm_bio_prison_cell *cell;
1376	struct cell_key key;
1377	struct dm_thin_lookup_result lookup_result;
1378
1379	/*
1380	 * If cell is already occupied, then the block is already
1381	 * being provisioned so we have nothing further to do here.
1382	 */
1383	build_virtual_key(tc->td, block, &key);
1384	if (bio_detain(tc->pool->prison, &key, bio, &cell))
1385		return;
1386
 
 
 
 
 
 
 
 
 
 
 
1387	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1388	switch (r) {
1389	case 0:
1390		/*
1391		 * We can release this cell now.  This thread is the only
1392		 * one that puts bios into a cell, and we know there were
1393		 * no preceding bios.
1394		 */
1395		/*
1396		 * TODO: this will probably have to change when discard goes
1397		 * back in.
1398		 */
1399		cell_release_singleton(cell, bio);
1400
1401		if (lookup_result.shared)
1402			process_shared_bio(tc, bio, block, &lookup_result);
1403		else
1404			remap_and_issue(tc, bio, lookup_result.block);
 
 
 
1405		break;
1406
1407	case -ENODATA:
1408		if (bio_data_dir(bio) == READ && tc->origin_dev) {
1409			cell_release_singleton(cell, bio);
 
 
 
 
 
 
 
1410			remap_to_origin_and_issue(tc, bio);
1411		} else
1412			provision_block(tc, bio, block, cell);
 
 
 
1413		break;
1414
1415	default:
1416		DMERR("dm_thin_find_block() failed, error = %d", r);
1417		cell_release_singleton(cell, bio);
 
 
1418		bio_io_error(bio);
1419		break;
1420	}
1421}
1422
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1423static int need_commit_due_to_time(struct pool *pool)
1424{
1425	return jiffies < pool->last_commit_jiffies ||
1426	       jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1427}
1428
1429static void process_deferred_bios(struct pool *pool)
1430{
 
1431	unsigned long flags;
1432	struct bio *bio;
1433	struct bio_list bios;
1434	int r;
 
 
 
 
 
 
1435
1436	bio_list_init(&bios);
1437
1438	spin_lock_irqsave(&pool->lock, flags);
1439	bio_list_merge(&bios, &pool->deferred_bios);
1440	bio_list_init(&pool->deferred_bios);
1441	spin_unlock_irqrestore(&pool->lock, flags);
 
 
 
 
 
 
 
 
 
1442
 
1443	while ((bio = bio_list_pop(&bios))) {
1444		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1445		struct thin_c *tc = h->tc;
1446
1447		/*
1448		 * If we've got no free new_mapping structs, and processing
1449		 * this bio might require one, we pause until there are some
1450		 * prepared mappings to process.
1451		 */
1452		if (ensure_next_mapping(pool)) {
1453			spin_lock_irqsave(&pool->lock, flags);
1454			bio_list_merge(&pool->deferred_bios, &bios);
1455			spin_unlock_irqrestore(&pool->lock, flags);
1456
1457			break;
1458		}
1459
1460		if (bio->bi_rw & REQ_DISCARD)
1461			process_discard(tc, bio);
1462		else
1463			process_bio(tc, bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1464	}
1465
1466	/*
1467	 * If there are any deferred flush bios, we must commit
1468	 * the metadata before issuing them.
1469	 */
1470	bio_list_init(&bios);
1471	spin_lock_irqsave(&pool->lock, flags);
1472	bio_list_merge(&bios, &pool->deferred_flush_bios);
1473	bio_list_init(&pool->deferred_flush_bios);
1474	spin_unlock_irqrestore(&pool->lock, flags);
1475
1476	if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
 
1477		return;
1478
1479	r = dm_pool_commit_metadata(pool->pmd);
1480	if (r) {
1481		DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
1482		      __func__, r);
1483		while ((bio = bio_list_pop(&bios)))
1484			bio_io_error(bio);
1485		return;
1486	}
1487	pool->last_commit_jiffies = jiffies;
1488
1489	while ((bio = bio_list_pop(&bios)))
1490		generic_make_request(bio);
1491}
1492
1493static void do_worker(struct work_struct *ws)
1494{
1495	struct pool *pool = container_of(ws, struct pool, worker);
1496
1497	process_prepared(pool, &pool->prepared_mappings, process_prepared_mapping);
1498	process_prepared(pool, &pool->prepared_discards, process_prepared_discard);
 
 
 
 
 
1499	process_deferred_bios(pool);
 
1500}
1501
1502/*
1503 * We want to commit periodically so that not too much
1504 * unwritten data builds up.
1505 */
1506static void do_waker(struct work_struct *ws)
1507{
1508	struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1509	wake_worker(pool);
1510	queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1511}
1512
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1513/*----------------------------------------------------------------*/
1514
1515/*
1516 * Mapping functions.
1517 */
1518
1519/*
1520 * Called only while mapping a thin bio to hand it over to the workqueue.
1521 */
1522static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1523{
1524	unsigned long flags;
1525	struct pool *pool = tc->pool;
1526
1527	spin_lock_irqsave(&pool->lock, flags);
1528	bio_list_add(&pool->deferred_bios, bio);
1529	spin_unlock_irqrestore(&pool->lock, flags);
1530
1531	wake_worker(pool);
1532}
1533
1534static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
1535{
1536	struct pool *pool = tc->pool;
1537	struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1538
1539	h->tc = tc;
1540	h->shared_read_entry = NULL;
1541	h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : ds_inc(&pool->all_io_ds);
1542	h->overwrite_mapping = NULL;
1543
1544	return h;
1545}
1546
1547/*
1548 * Non-blocking function called from the thin target's map function.
1549 */
1550static int thin_bio_map(struct dm_target *ti, struct bio *bio,
1551			union map_info *map_context)
1552{
1553	int r;
1554	struct thin_c *tc = ti->private;
1555	dm_block_t block = get_bio_block(tc, bio);
1556	struct dm_thin_device *td = tc->td;
1557	struct dm_thin_lookup_result result;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1558
1559	map_context->ptr = thin_hook_bio(tc, bio);
1560	if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1561		thin_defer_bio(tc, bio);
1562		return DM_MAPIO_SUBMITTED;
1563	}
1564
 
 
 
 
 
 
 
 
1565	r = dm_thin_find_block(td, block, 0, &result);
1566
1567	/*
1568	 * Note that we defer readahead too.
1569	 */
1570	switch (r) {
1571	case 0:
1572		if (unlikely(result.shared)) {
1573			/*
1574			 * We have a race condition here between the
1575			 * result.shared value returned by the lookup and
1576			 * snapshot creation, which may cause new
1577			 * sharing.
1578			 *
1579			 * To avoid this always quiesce the origin before
1580			 * taking the snap.  You want to do this anyway to
1581			 * ensure a consistent application view
1582			 * (i.e. lockfs).
1583			 *
1584			 * More distant ancestors are irrelevant. The
1585			 * shared flag will be set in their case.
1586			 */
1587			thin_defer_bio(tc, bio);
1588			r = DM_MAPIO_SUBMITTED;
1589		} else {
1590			remap(tc, bio, result.block);
1591			r = DM_MAPIO_REMAPPED;
 
 
 
1592		}
1593		break;
 
 
 
 
 
 
1594
1595	case -ENODATA:
 
 
 
 
 
1596		/*
1597		 * In future, the failed dm_thin_find_block above could
1598		 * provide the hint to load the metadata into cache.
 
1599		 */
1600	case -EWOULDBLOCK:
1601		thin_defer_bio(tc, bio);
1602		r = DM_MAPIO_SUBMITTED;
1603		break;
1604	}
1605
1606	return r;
1607}
1608
1609static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1610{
1611	int r;
1612	unsigned long flags;
1613	struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
 
1614
1615	spin_lock_irqsave(&pt->pool->lock, flags);
1616	r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1617	spin_unlock_irqrestore(&pt->pool->lock, flags);
1618
1619	if (!r) {
1620		struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1621		r = bdi_congested(&q->backing_dev_info, bdi_bits);
1622	}
1623
1624	return r;
 
1625}
1626
1627static void __requeue_bios(struct pool *pool)
1628{
1629	bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1630	bio_list_init(&pool->retry_on_resume_list);
 
 
 
 
 
 
 
 
 
1631}
1632
1633/*----------------------------------------------------------------
1634 * Binding of control targets to a pool object
1635 *--------------------------------------------------------------*/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1636static int bind_control_target(struct pool *pool, struct dm_target *ti)
1637{
1638	struct pool_c *pt = ti->private;
1639
 
 
 
 
 
 
 
 
 
 
 
 
 
1640	pool->ti = ti;
 
1641	pool->low_water_blocks = pt->low_water_blocks;
1642	pool->pf = pt->pf;
1643
1644	/*
1645	 * If discard_passdown was enabled verify that the data device
1646	 * supports discards.  Disable discard_passdown if not; otherwise
1647	 * -EOPNOTSUPP will be returned.
1648	 */
1649	if (pt->pf.discard_passdown) {
1650		struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1651		if (!q || !blk_queue_discard(q)) {
1652			char buf[BDEVNAME_SIZE];
1653			DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.",
1654			       bdevname(pt->data_dev->bdev, buf));
1655			pool->pf.discard_passdown = 0;
1656		}
1657	}
1658
1659	return 0;
1660}
1661
1662static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1663{
1664	if (pool->ti == ti)
1665		pool->ti = NULL;
1666}
1667
1668/*----------------------------------------------------------------
1669 * Pool creation
1670 *--------------------------------------------------------------*/
1671/* Initialize pool features. */
1672static void pool_features_init(struct pool_features *pf)
1673{
1674	pf->zero_new_blocks = 1;
1675	pf->discard_enabled = 1;
1676	pf->discard_passdown = 1;
 
 
1677}
1678
1679static void __pool_destroy(struct pool *pool)
1680{
1681	__pool_table_remove(pool);
1682
 
1683	if (dm_pool_metadata_close(pool->pmd) < 0)
1684		DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1685
1686	prison_destroy(pool->prison);
1687	dm_kcopyd_client_destroy(pool->copier);
1688
1689	if (pool->wq)
1690		destroy_workqueue(pool->wq);
1691
1692	if (pool->next_mapping)
1693		mempool_free(pool->next_mapping, pool->mapping_pool);
1694	mempool_destroy(pool->mapping_pool);
1695	mempool_destroy(pool->endio_hook_pool);
 
1696	kfree(pool);
1697}
1698
1699static struct kmem_cache *_new_mapping_cache;
1700static struct kmem_cache *_endio_hook_cache;
1701
1702static struct pool *pool_create(struct mapped_device *pool_md,
1703				struct block_device *metadata_dev,
1704				unsigned long block_size, char **error)
 
1705{
1706	int r;
1707	void *err_p;
1708	struct pool *pool;
1709	struct dm_pool_metadata *pmd;
 
1710
1711	pmd = dm_pool_metadata_open(metadata_dev, block_size);
1712	if (IS_ERR(pmd)) {
1713		*error = "Error creating metadata object";
1714		return (struct pool *)pmd;
1715	}
1716
1717	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1718	if (!pool) {
1719		*error = "Error allocating memory for pool";
1720		err_p = ERR_PTR(-ENOMEM);
1721		goto bad_pool;
1722	}
1723
1724	pool->pmd = pmd;
1725	pool->sectors_per_block = block_size;
1726	pool->block_shift = ffs(block_size) - 1;
1727	pool->offset_mask = block_size - 1;
 
 
1728	pool->low_water_blocks = 0;
1729	pool_features_init(&pool->pf);
1730	pool->prison = prison_create(PRISON_CELLS);
1731	if (!pool->prison) {
1732		*error = "Error creating pool's bio prison";
1733		err_p = ERR_PTR(-ENOMEM);
1734		goto bad_prison;
1735	}
1736
1737	pool->copier = dm_kcopyd_client_create();
1738	if (IS_ERR(pool->copier)) {
1739		r = PTR_ERR(pool->copier);
1740		*error = "Error creating pool's kcopyd client";
1741		err_p = ERR_PTR(r);
1742		goto bad_kcopyd_client;
1743	}
1744
1745	/*
1746	 * Create singlethreaded workqueue that will service all devices
1747	 * that use this metadata.
1748	 */
1749	pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1750	if (!pool->wq) {
1751		*error = "Error creating pool's workqueue";
1752		err_p = ERR_PTR(-ENOMEM);
1753		goto bad_wq;
1754	}
1755
 
1756	INIT_WORK(&pool->worker, do_worker);
1757	INIT_DELAYED_WORK(&pool->waker, do_waker);
 
1758	spin_lock_init(&pool->lock);
1759	bio_list_init(&pool->deferred_bios);
1760	bio_list_init(&pool->deferred_flush_bios);
1761	INIT_LIST_HEAD(&pool->prepared_mappings);
1762	INIT_LIST_HEAD(&pool->prepared_discards);
1763	pool->low_water_triggered = 0;
1764	pool->no_free_space = 0;
1765	bio_list_init(&pool->retry_on_resume_list);
1766	ds_init(&pool->shared_read_ds);
1767	ds_init(&pool->all_io_ds);
 
 
 
 
 
 
 
 
 
 
 
 
 
1768
1769	pool->next_mapping = NULL;
1770	pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1771						      _new_mapping_cache);
1772	if (!pool->mapping_pool) {
1773		*error = "Error creating pool's mapping mempool";
1774		err_p = ERR_PTR(-ENOMEM);
1775		goto bad_mapping_pool;
1776	}
1777
1778	pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
1779							 _endio_hook_cache);
1780	if (!pool->endio_hook_pool) {
1781		*error = "Error creating pool's endio_hook mempool";
1782		err_p = ERR_PTR(-ENOMEM);
1783		goto bad_endio_hook_pool;
1784	}
 
1785	pool->ref_count = 1;
1786	pool->last_commit_jiffies = jiffies;
1787	pool->pool_md = pool_md;
1788	pool->md_dev = metadata_dev;
1789	__pool_table_insert(pool);
1790
1791	return pool;
1792
1793bad_endio_hook_pool:
1794	mempool_destroy(pool->mapping_pool);
1795bad_mapping_pool:
 
 
 
 
1796	destroy_workqueue(pool->wq);
1797bad_wq:
1798	dm_kcopyd_client_destroy(pool->copier);
1799bad_kcopyd_client:
1800	prison_destroy(pool->prison);
1801bad_prison:
1802	kfree(pool);
1803bad_pool:
1804	if (dm_pool_metadata_close(pmd))
1805		DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1806
1807	return err_p;
1808}
1809
1810static void __pool_inc(struct pool *pool)
1811{
1812	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1813	pool->ref_count++;
1814}
1815
1816static void __pool_dec(struct pool *pool)
1817{
1818	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1819	BUG_ON(!pool->ref_count);
1820	if (!--pool->ref_count)
1821		__pool_destroy(pool);
1822}
1823
1824static struct pool *__pool_find(struct mapped_device *pool_md,
1825				struct block_device *metadata_dev,
1826				unsigned long block_size, char **error,
1827				int *created)
1828{
1829	struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1830
1831	if (pool) {
1832		if (pool->pool_md != pool_md)
 
1833			return ERR_PTR(-EBUSY);
 
1834		__pool_inc(pool);
1835
1836	} else {
1837		pool = __pool_table_lookup(pool_md);
1838		if (pool) {
1839			if (pool->md_dev != metadata_dev)
 
1840				return ERR_PTR(-EINVAL);
 
1841			__pool_inc(pool);
1842
1843		} else {
1844			pool = pool_create(pool_md, metadata_dev, block_size, error);
1845			*created = 1;
1846		}
1847	}
1848
1849	return pool;
1850}
1851
1852/*----------------------------------------------------------------
1853 * Pool target methods
1854 *--------------------------------------------------------------*/
1855static void pool_dtr(struct dm_target *ti)
1856{
1857	struct pool_c *pt = ti->private;
1858
1859	mutex_lock(&dm_thin_pool_table.mutex);
1860
1861	unbind_control_target(pt->pool, ti);
1862	__pool_dec(pt->pool);
1863	dm_put_device(ti, pt->metadata_dev);
1864	dm_put_device(ti, pt->data_dev);
1865	kfree(pt);
1866
1867	mutex_unlock(&dm_thin_pool_table.mutex);
1868}
1869
1870static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1871			       struct dm_target *ti)
1872{
1873	int r;
1874	unsigned argc;
1875	const char *arg_name;
1876
1877	static struct dm_arg _args[] = {
1878		{0, 3, "Invalid number of pool feature arguments"},
1879	};
1880
1881	/*
1882	 * No feature arguments supplied.
1883	 */
1884	if (!as->argc)
1885		return 0;
1886
1887	r = dm_read_arg_group(_args, as, &argc, &ti->error);
1888	if (r)
1889		return -EINVAL;
1890
1891	while (argc && !r) {
1892		arg_name = dm_shift_arg(as);
1893		argc--;
1894
1895		if (!strcasecmp(arg_name, "skip_block_zeroing")) {
1896			pf->zero_new_blocks = 0;
1897			continue;
1898		} else if (!strcasecmp(arg_name, "ignore_discard")) {
1899			pf->discard_enabled = 0;
1900			continue;
1901		} else if (!strcasecmp(arg_name, "no_discard_passdown")) {
1902			pf->discard_passdown = 0;
1903			continue;
 
 
 
 
 
 
 
 
 
 
1904		}
1905
1906		ti->error = "Unrecognised pool feature requested";
1907		r = -EINVAL;
1908	}
1909
1910	return r;
1911}
1912
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1913/*
1914 * thin-pool <metadata dev> <data dev>
1915 *	     <data block size (sectors)>
1916 *	     <low water mark (blocks)>
1917 *	     [<#feature args> [<arg>]*]
1918 *
1919 * Optional feature arguments are:
1920 *	     skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
1921 *	     ignore_discard: disable discard
1922 *	     no_discard_passdown: don't pass discards down to the data device
 
 
1923 */
1924static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1925{
1926	int r, pool_created = 0;
1927	struct pool_c *pt;
1928	struct pool *pool;
1929	struct pool_features pf;
1930	struct dm_arg_set as;
1931	struct dm_dev *data_dev;
1932	unsigned long block_size;
1933	dm_block_t low_water_blocks;
1934	struct dm_dev *metadata_dev;
1935	sector_t metadata_dev_size;
1936	char b[BDEVNAME_SIZE];
1937
1938	/*
1939	 * FIXME Remove validation from scope of lock.
1940	 */
1941	mutex_lock(&dm_thin_pool_table.mutex);
1942
1943	if (argc < 4) {
1944		ti->error = "Invalid argument count";
1945		r = -EINVAL;
1946		goto out_unlock;
1947	}
 
1948	as.argc = argc;
1949	as.argv = argv;
1950
1951	r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
 
 
 
 
 
 
 
 
 
 
 
1952	if (r) {
1953		ti->error = "Error opening metadata block device";
1954		goto out_unlock;
1955	}
1956
1957	metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
1958	if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
1959		DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1960		       bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
1961
1962	r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
1963	if (r) {
1964		ti->error = "Error getting data device";
1965		goto out_metadata;
1966	}
1967
1968	if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
1969	    block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1970	    block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
1971	    !is_power_of_2(block_size)) {
1972		ti->error = "Invalid block size";
1973		r = -EINVAL;
1974		goto out;
1975	}
1976
1977	if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
1978		ti->error = "Invalid low water mark";
1979		r = -EINVAL;
1980		goto out;
1981	}
1982
1983	/*
1984	 * Set default pool features.
1985	 */
1986	pool_features_init(&pf);
1987
1988	dm_consume_args(&as, 4);
1989	r = parse_pool_features(&as, &pf, ti);
1990	if (r)
1991		goto out;
1992
1993	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
1994	if (!pt) {
1995		r = -ENOMEM;
1996		goto out;
1997	}
1998
1999	pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
2000			   block_size, &ti->error, &pool_created);
2001	if (IS_ERR(pool)) {
2002		r = PTR_ERR(pool);
2003		goto out_free_pt;
2004	}
2005
2006	/*
2007	 * 'pool_created' reflects whether this is the first table load.
2008	 * Top level discard support is not allowed to be changed after
2009	 * initial load.  This would require a pool reload to trigger thin
2010	 * device changes.
2011	 */
2012	if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2013		ti->error = "Discard support cannot be disabled once enabled";
2014		r = -EINVAL;
2015		goto out_flags_changed;
2016	}
2017
2018	pt->pool = pool;
2019	pt->ti = ti;
2020	pt->metadata_dev = metadata_dev;
2021	pt->data_dev = data_dev;
2022	pt->low_water_blocks = low_water_blocks;
2023	pt->pf = pf;
2024	ti->num_flush_requests = 1;
 
2025	/*
2026	 * Only need to enable discards if the pool should pass
2027	 * them down to the data device.  The thin device's discard
2028	 * processing will cause mappings to be removed from the btree.
2029	 */
 
2030	if (pf.discard_enabled && pf.discard_passdown) {
2031		ti->num_discard_requests = 1;
 
2032		/*
2033		 * Setting 'discards_supported' circumvents the normal
2034		 * stacking of discard limits (this keeps the pool and
2035		 * thin devices' discard limits consistent).
2036		 */
2037		ti->discards_supported = 1;
2038	}
2039	ti->private = pt;
2040
 
 
 
 
 
 
 
2041	pt->callbacks.congested_fn = pool_is_congested;
2042	dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2043
2044	mutex_unlock(&dm_thin_pool_table.mutex);
2045
2046	return 0;
2047
2048out_flags_changed:
2049	__pool_dec(pool);
2050out_free_pt:
2051	kfree(pt);
2052out:
2053	dm_put_device(ti, data_dev);
2054out_metadata:
2055	dm_put_device(ti, metadata_dev);
2056out_unlock:
2057	mutex_unlock(&dm_thin_pool_table.mutex);
2058
2059	return r;
2060}
2061
2062static int pool_map(struct dm_target *ti, struct bio *bio,
2063		    union map_info *map_context)
2064{
2065	int r;
2066	struct pool_c *pt = ti->private;
2067	struct pool *pool = pt->pool;
2068	unsigned long flags;
2069
2070	/*
2071	 * As this is a singleton target, ti->begin is always zero.
2072	 */
2073	spin_lock_irqsave(&pool->lock, flags);
2074	bio->bi_bdev = pt->data_dev->bdev;
2075	r = DM_MAPIO_REMAPPED;
2076	spin_unlock_irqrestore(&pool->lock, flags);
2077
2078	return r;
2079}
2080
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2081/*
2082 * Retrieves the number of blocks of the data device from
2083 * the superblock and compares it to the actual device size,
2084 * thus resizing the data device in case it has grown.
2085 *
2086 * This both copes with opening preallocated data devices in the ctr
2087 * being followed by a resume
2088 * -and-
2089 * calling the resume method individually after userspace has
2090 * grown the data device in reaction to a table event.
2091 */
2092static int pool_preresume(struct dm_target *ti)
2093{
2094	int r;
 
2095	struct pool_c *pt = ti->private;
2096	struct pool *pool = pt->pool;
2097	dm_block_t data_size, sb_data_size;
2098
2099	/*
2100	 * Take control of the pool object.
2101	 */
2102	r = bind_control_target(pool, ti);
2103	if (r)
2104		return r;
2105
2106	data_size = ti->len >> pool->block_shift;
2107	r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2108	if (r) {
2109		DMERR("failed to retrieve data device size");
 
 
2110		return r;
2111	}
2112
2113	if (data_size < sb_data_size) {
2114		DMERR("pool target too small, is %llu blocks (expected %llu)",
2115		      data_size, sb_data_size);
2116		return -EINVAL;
 
2117
2118	} else if (data_size > sb_data_size) {
2119		r = dm_pool_resize_data_dev(pool->pmd, data_size);
2120		if (r) {
2121			DMERR("failed to resize data device");
2122			return r;
2123		}
2124
2125		r = dm_pool_commit_metadata(pool->pmd);
2126		if (r) {
2127			DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
2128			      __func__, r);
2129			return r;
2130		}
2131	}
 
2132
2133	return 0;
 
 
 
 
 
 
 
 
 
2134}
2135
2136static void pool_resume(struct dm_target *ti)
2137{
2138	struct pool_c *pt = ti->private;
2139	struct pool *pool = pt->pool;
2140	unsigned long flags;
2141
 
 
 
 
 
 
 
2142	spin_lock_irqsave(&pool->lock, flags);
2143	pool->low_water_triggered = 0;
2144	pool->no_free_space = 0;
2145	__requeue_bios(pool);
2146	spin_unlock_irqrestore(&pool->lock, flags);
2147
2148	do_waker(&pool->waker.work);
2149}
2150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2151static void pool_postsuspend(struct dm_target *ti)
2152{
2153	int r;
2154	struct pool_c *pt = ti->private;
2155	struct pool *pool = pt->pool;
2156
2157	cancel_delayed_work(&pool->waker);
 
2158	flush_workqueue(pool->wq);
2159
2160	r = dm_pool_commit_metadata(pool->pmd);
2161	if (r < 0) {
2162		DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
2163		      __func__, r);
2164		/* FIXME: invalidate device? error the next FUA or FLUSH bio ?*/
2165	}
2166}
2167
2168static int check_arg_count(unsigned argc, unsigned args_required)
2169{
2170	if (argc != args_required) {
2171		DMWARN("Message received with %u arguments instead of %u.",
2172		       argc, args_required);
2173		return -EINVAL;
2174	}
2175
2176	return 0;
2177}
2178
2179static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2180{
2181	if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2182	    *dev_id <= MAX_DEV_ID)
2183		return 0;
2184
2185	if (warning)
2186		DMWARN("Message received with invalid device id: %s", arg);
2187
2188	return -EINVAL;
2189}
2190
2191static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2192{
2193	dm_thin_id dev_id;
2194	int r;
2195
2196	r = check_arg_count(argc, 2);
2197	if (r)
2198		return r;
2199
2200	r = read_dev_id(argv[1], &dev_id, 1);
2201	if (r)
2202		return r;
2203
2204	r = dm_pool_create_thin(pool->pmd, dev_id);
2205	if (r) {
2206		DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2207		       argv[1]);
2208		return r;
2209	}
2210
2211	return 0;
2212}
2213
2214static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2215{
2216	dm_thin_id dev_id;
2217	dm_thin_id origin_dev_id;
2218	int r;
2219
2220	r = check_arg_count(argc, 3);
2221	if (r)
2222		return r;
2223
2224	r = read_dev_id(argv[1], &dev_id, 1);
2225	if (r)
2226		return r;
2227
2228	r = read_dev_id(argv[2], &origin_dev_id, 1);
2229	if (r)
2230		return r;
2231
2232	r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2233	if (r) {
2234		DMWARN("Creation of new snapshot %s of device %s failed.",
2235		       argv[1], argv[2]);
2236		return r;
2237	}
2238
2239	return 0;
2240}
2241
2242static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2243{
2244	dm_thin_id dev_id;
2245	int r;
2246
2247	r = check_arg_count(argc, 2);
2248	if (r)
2249		return r;
2250
2251	r = read_dev_id(argv[1], &dev_id, 1);
2252	if (r)
2253		return r;
2254
2255	r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2256	if (r)
2257		DMWARN("Deletion of thin device %s failed.", argv[1]);
2258
2259	return r;
2260}
2261
2262static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2263{
2264	dm_thin_id old_id, new_id;
2265	int r;
2266
2267	r = check_arg_count(argc, 3);
2268	if (r)
2269		return r;
2270
2271	if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2272		DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2273		return -EINVAL;
2274	}
2275
2276	if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2277		DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2278		return -EINVAL;
2279	}
2280
2281	r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2282	if (r) {
2283		DMWARN("Failed to change transaction id from %s to %s.",
2284		       argv[1], argv[2]);
2285		return r;
2286	}
2287
2288	return 0;
2289}
2290
2291static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2292{
2293	int r;
2294
2295	r = check_arg_count(argc, 1);
2296	if (r)
2297		return r;
2298
2299	r = dm_pool_commit_metadata(pool->pmd);
2300	if (r) {
2301		DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
2302		      __func__, r);
2303		return r;
2304	}
2305
2306	r = dm_pool_reserve_metadata_snap(pool->pmd);
2307	if (r)
2308		DMWARN("reserve_metadata_snap message failed.");
2309
2310	return r;
2311}
2312
2313static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2314{
2315	int r;
2316
2317	r = check_arg_count(argc, 1);
2318	if (r)
2319		return r;
2320
2321	r = dm_pool_release_metadata_snap(pool->pmd);
2322	if (r)
2323		DMWARN("release_metadata_snap message failed.");
2324
2325	return r;
2326}
2327
2328/*
2329 * Messages supported:
2330 *   create_thin	<dev_id>
2331 *   create_snap	<dev_id> <origin_id>
2332 *   delete		<dev_id>
2333 *   trim		<dev_id> <new_size_in_sectors>
2334 *   set_transaction_id <current_trans_id> <new_trans_id>
2335 *   reserve_metadata_snap
2336 *   release_metadata_snap
2337 */
2338static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2339{
2340	int r = -EINVAL;
2341	struct pool_c *pt = ti->private;
2342	struct pool *pool = pt->pool;
2343
 
 
 
 
 
 
2344	if (!strcasecmp(argv[0], "create_thin"))
2345		r = process_create_thin_mesg(argc, argv, pool);
2346
2347	else if (!strcasecmp(argv[0], "create_snap"))
2348		r = process_create_snap_mesg(argc, argv, pool);
2349
2350	else if (!strcasecmp(argv[0], "delete"))
2351		r = process_delete_mesg(argc, argv, pool);
2352
2353	else if (!strcasecmp(argv[0], "set_transaction_id"))
2354		r = process_set_transaction_id_mesg(argc, argv, pool);
2355
2356	else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2357		r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2358
2359	else if (!strcasecmp(argv[0], "release_metadata_snap"))
2360		r = process_release_metadata_snap_mesg(argc, argv, pool);
2361
2362	else
2363		DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2364
2365	if (!r) {
2366		r = dm_pool_commit_metadata(pool->pmd);
2367		if (r)
2368			DMERR("%s message: dm_pool_commit_metadata() failed, error = %d",
2369			      argv[0], r);
2370	}
2371
2372	return r;
2373}
2374
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2375/*
2376 * Status line is:
2377 *    <transaction id> <used metadata sectors>/<total metadata sectors>
2378 *    <used data sectors>/<total data sectors> <held metadata root>
 
2379 */
2380static int pool_status(struct dm_target *ti, status_type_t type,
2381		       char *result, unsigned maxlen)
2382{
2383	int r, count;
2384	unsigned sz = 0;
2385	uint64_t transaction_id;
2386	dm_block_t nr_free_blocks_data;
2387	dm_block_t nr_free_blocks_metadata;
2388	dm_block_t nr_blocks_data;
2389	dm_block_t nr_blocks_metadata;
2390	dm_block_t held_root;
2391	char buf[BDEVNAME_SIZE];
2392	char buf2[BDEVNAME_SIZE];
2393	struct pool_c *pt = ti->private;
2394	struct pool *pool = pt->pool;
2395
2396	switch (type) {
2397	case STATUSTYPE_INFO:
2398		r = dm_pool_get_metadata_transaction_id(pool->pmd,
2399							&transaction_id);
2400		if (r)
2401			return r;
 
 
 
 
 
 
 
 
 
 
 
2402
2403		r = dm_pool_get_free_metadata_block_count(pool->pmd,
2404							  &nr_free_blocks_metadata);
2405		if (r)
2406			return r;
 
 
2407
2408		r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2409		if (r)
2410			return r;
 
 
 
2411
2412		r = dm_pool_get_free_block_count(pool->pmd,
2413						 &nr_free_blocks_data);
2414		if (r)
2415			return r;
 
 
2416
2417		r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2418		if (r)
2419			return r;
 
 
 
2420
2421		r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2422		if (r)
2423			return r;
 
 
 
2424
2425		DMEMIT("%llu %llu/%llu %llu/%llu ",
2426		       (unsigned long long)transaction_id,
2427		       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2428		       (unsigned long long)nr_blocks_metadata,
2429		       (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2430		       (unsigned long long)nr_blocks_data);
2431
2432		if (held_root)
2433			DMEMIT("%llu", held_root);
 
 
 
 
 
 
 
2434		else
2435			DMEMIT("-");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2436
2437		break;
2438
2439	case STATUSTYPE_TABLE:
2440		DMEMIT("%s %s %lu %llu ",
2441		       format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2442		       format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2443		       (unsigned long)pool->sectors_per_block,
2444		       (unsigned long long)pt->low_water_blocks);
2445
2446		count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled +
2447			!pt->pf.discard_passdown;
2448		DMEMIT("%u ", count);
2449
2450		if (!pool->pf.zero_new_blocks)
2451			DMEMIT("skip_block_zeroing ");
2452
2453		if (!pool->pf.discard_enabled)
2454			DMEMIT("ignore_discard ");
2455
2456		if (!pt->pf.discard_passdown)
2457			DMEMIT("no_discard_passdown ");
2458
2459		break;
2460	}
 
2461
2462	return 0;
 
2463}
2464
2465static int pool_iterate_devices(struct dm_target *ti,
2466				iterate_devices_callout_fn fn, void *data)
2467{
2468	struct pool_c *pt = ti->private;
2469
2470	return fn(ti, pt->data_dev, 0, ti->len, data);
2471}
2472
2473static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2474		      struct bio_vec *biovec, int max_size)
2475{
2476	struct pool_c *pt = ti->private;
2477	struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
 
2478
2479	if (!q->merge_bvec_fn)
2480		return max_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2481
2482	bvm->bi_bdev = pt->data_dev->bdev;
2483
2484	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2485}
2486
2487static void set_discard_limits(struct pool *pool, struct queue_limits *limits)
2488{
2489	/*
2490	 * FIXME: these limits may be incompatible with the pool's data device
 
2491	 */
2492	limits->max_discard_sectors = pool->sectors_per_block;
 
 
 
 
 
 
 
2493
2494	/*
2495	 * This is just a hint, and not enforced.  We have to cope with
2496	 * bios that overlap 2 blocks.
 
2497	 */
2498	limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2499	limits->discard_zeroes_data = pool->pf.zero_new_blocks;
2500}
 
 
 
 
 
 
 
2501
2502static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2503{
2504	struct pool_c *pt = ti->private;
2505	struct pool *pool = pt->pool;
2506
2507	blk_limits_io_min(limits, 0);
2508	blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2509	if (pool->pf.discard_enabled)
2510		set_discard_limits(pool, limits);
2511}
2512
2513static struct target_type pool_target = {
2514	.name = "thin-pool",
2515	.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2516		    DM_TARGET_IMMUTABLE,
2517	.version = {1, 2, 0},
2518	.module = THIS_MODULE,
2519	.ctr = pool_ctr,
2520	.dtr = pool_dtr,
2521	.map = pool_map,
 
 
2522	.postsuspend = pool_postsuspend,
2523	.preresume = pool_preresume,
2524	.resume = pool_resume,
2525	.message = pool_message,
2526	.status = pool_status,
2527	.merge = pool_merge,
2528	.iterate_devices = pool_iterate_devices,
2529	.io_hints = pool_io_hints,
2530};
2531
2532/*----------------------------------------------------------------
2533 * Thin target methods
2534 *--------------------------------------------------------------*/
 
 
 
 
 
 
 
 
 
 
 
2535static void thin_dtr(struct dm_target *ti)
2536{
2537	struct thin_c *tc = ti->private;
 
 
 
 
 
 
 
 
 
2538
2539	mutex_lock(&dm_thin_pool_table.mutex);
2540
2541	__pool_dec(tc->pool);
2542	dm_pool_close_thin_device(tc->td);
2543	dm_put_device(ti, tc->pool_dev);
2544	if (tc->origin_dev)
2545		dm_put_device(ti, tc->origin_dev);
2546	kfree(tc);
2547
2548	mutex_unlock(&dm_thin_pool_table.mutex);
2549}
2550
2551/*
2552 * Thin target parameters:
2553 *
2554 * <pool_dev> <dev_id> [origin_dev]
2555 *
2556 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2557 * dev_id: the internal device identifier
2558 * origin_dev: a device external to the pool that should act as the origin
2559 *
2560 * If the pool device has discards disabled, they get disabled for the thin
2561 * device as well.
2562 */
2563static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2564{
2565	int r;
2566	struct thin_c *tc;
2567	struct dm_dev *pool_dev, *origin_dev;
2568	struct mapped_device *pool_md;
 
2569
2570	mutex_lock(&dm_thin_pool_table.mutex);
2571
2572	if (argc != 2 && argc != 3) {
2573		ti->error = "Invalid argument count";
2574		r = -EINVAL;
2575		goto out_unlock;
2576	}
2577
2578	tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2579	if (!tc) {
2580		ti->error = "Out of memory";
2581		r = -ENOMEM;
2582		goto out_unlock;
2583	}
 
 
 
 
 
 
2584
2585	if (argc == 3) {
2586		r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
2587		if (r) {
2588			ti->error = "Error opening origin device";
2589			goto bad_origin_dev;
2590		}
2591		tc->origin_dev = origin_dev;
2592	}
2593
2594	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2595	if (r) {
2596		ti->error = "Error opening pool device";
2597		goto bad_pool_dev;
2598	}
2599	tc->pool_dev = pool_dev;
2600
2601	if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2602		ti->error = "Invalid device id";
2603		r = -EINVAL;
2604		goto bad_common;
2605	}
2606
2607	pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2608	if (!pool_md) {
2609		ti->error = "Couldn't get pool mapped device";
2610		r = -EINVAL;
2611		goto bad_common;
2612	}
2613
2614	tc->pool = __pool_table_lookup(pool_md);
2615	if (!tc->pool) {
2616		ti->error = "Couldn't find pool object";
2617		r = -EINVAL;
2618		goto bad_pool_lookup;
2619	}
2620	__pool_inc(tc->pool);
2621
 
 
 
 
 
 
2622	r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2623	if (r) {
2624		ti->error = "Couldn't open thin internal device";
2625		goto bad_thin_open;
2626	}
2627
2628	ti->split_io = tc->pool->sectors_per_block;
2629	ti->num_flush_requests = 1;
 
 
 
 
 
2630
2631	/* In case the pool supports discards, pass them on. */
 
2632	if (tc->pool->pf.discard_enabled) {
2633		ti->discards_supported = 1;
2634		ti->num_discard_requests = 1;
2635		ti->discard_zeroes_data_unsupported = 1;
2636	}
2637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2638	dm_put(pool_md);
2639
2640	mutex_unlock(&dm_thin_pool_table.mutex);
2641
2642	return 0;
2643
2644bad_thin_open:
 
 
2645	__pool_dec(tc->pool);
2646bad_pool_lookup:
2647	dm_put(pool_md);
2648bad_common:
2649	dm_put_device(ti, tc->pool_dev);
2650bad_pool_dev:
2651	if (tc->origin_dev)
2652		dm_put_device(ti, tc->origin_dev);
2653bad_origin_dev:
2654	kfree(tc);
2655out_unlock:
2656	mutex_unlock(&dm_thin_pool_table.mutex);
2657
2658	return r;
2659}
2660
2661static int thin_map(struct dm_target *ti, struct bio *bio,
2662		    union map_info *map_context)
2663{
2664	bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
2665
2666	return thin_bio_map(ti, bio, map_context);
2667}
2668
2669static int thin_endio(struct dm_target *ti,
2670		      struct bio *bio, int err,
2671		      union map_info *map_context)
2672{
2673	unsigned long flags;
2674	struct dm_thin_endio_hook *h = map_context->ptr;
2675	struct list_head work;
2676	struct dm_thin_new_mapping *m, *tmp;
2677	struct pool *pool = h->tc->pool;
2678
2679	if (h->shared_read_entry) {
2680		INIT_LIST_HEAD(&work);
2681		ds_dec(h->shared_read_entry, &work);
2682
2683		spin_lock_irqsave(&pool->lock, flags);
2684		list_for_each_entry_safe(m, tmp, &work, list) {
2685			list_del(&m->list);
2686			m->quiesced = 1;
2687			__maybe_add_mapping(m);
2688		}
2689		spin_unlock_irqrestore(&pool->lock, flags);
2690	}
2691
2692	if (h->all_io_entry) {
2693		INIT_LIST_HEAD(&work);
2694		ds_dec(h->all_io_entry, &work);
2695		spin_lock_irqsave(&pool->lock, flags);
2696		list_for_each_entry_safe(m, tmp, &work, list)
2697			list_add(&m->list, &pool->prepared_discards);
2698		spin_unlock_irqrestore(&pool->lock, flags);
 
 
 
2699	}
2700
2701	mempool_free(h, pool->endio_hook_pool);
 
2702
2703	return 0;
2704}
2705
 
 
 
 
 
 
 
 
2706static void thin_postsuspend(struct dm_target *ti)
2707{
2708	if (dm_noflush_suspending(ti))
2709		requeue_io((struct thin_c *)ti->private);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2710}
2711
2712/*
2713 * <nr mapped sectors> <highest mapped sector>
2714 */
2715static int thin_status(struct dm_target *ti, status_type_t type,
2716		       char *result, unsigned maxlen)
2717{
2718	int r;
2719	ssize_t sz = 0;
2720	dm_block_t mapped, highest;
2721	char buf[BDEVNAME_SIZE];
2722	struct thin_c *tc = ti->private;
2723
 
 
 
 
 
2724	if (!tc->td)
2725		DMEMIT("-");
2726	else {
2727		switch (type) {
2728		case STATUSTYPE_INFO:
2729			r = dm_thin_get_mapped_count(tc->td, &mapped);
2730			if (r)
2731				return r;
 
 
2732
2733			r = dm_thin_get_highest_mapped_block(tc->td, &highest);
2734			if (r < 0)
2735				return r;
 
 
2736
2737			DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
2738			if (r)
2739				DMEMIT("%llu", ((highest + 1) *
2740						tc->pool->sectors_per_block) - 1);
2741			else
2742				DMEMIT("-");
2743			break;
2744
2745		case STATUSTYPE_TABLE:
2746			DMEMIT("%s %lu",
2747			       format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
2748			       (unsigned long) tc->dev_id);
2749			if (tc->origin_dev)
2750				DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
2751			break;
2752		}
2753	}
2754
2755	return 0;
 
 
 
2756}
2757
2758static int thin_iterate_devices(struct dm_target *ti,
2759				iterate_devices_callout_fn fn, void *data)
2760{
2761	dm_block_t blocks;
2762	struct thin_c *tc = ti->private;
 
2763
2764	/*
2765	 * We can't call dm_pool_get_data_dev_size() since that blocks.  So
2766	 * we follow a more convoluted path through to the pool's target.
2767	 */
2768	if (!tc->pool->ti)
2769		return 0;	/* nothing is bound */
2770
2771	blocks = tc->pool->ti->len >> tc->pool->block_shift;
 
2772	if (blocks)
2773		return fn(ti, tc->pool_dev, 0, tc->pool->sectors_per_block * blocks, data);
2774
2775	return 0;
2776}
2777
2778static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
2779{
2780	struct thin_c *tc = ti->private;
2781	struct pool *pool = tc->pool;
2782
2783	blk_limits_io_min(limits, 0);
2784	blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2785	set_discard_limits(pool, limits);
 
 
2786}
2787
2788static struct target_type thin_target = {
2789	.name = "thin",
2790	.version = {1, 1, 0},
2791	.module	= THIS_MODULE,
2792	.ctr = thin_ctr,
2793	.dtr = thin_dtr,
2794	.map = thin_map,
2795	.end_io = thin_endio,
 
 
2796	.postsuspend = thin_postsuspend,
2797	.status = thin_status,
2798	.iterate_devices = thin_iterate_devices,
2799	.io_hints = thin_io_hints,
2800};
2801
2802/*----------------------------------------------------------------*/
2803
2804static int __init dm_thin_init(void)
2805{
2806	int r;
2807
2808	pool_table_init();
2809
2810	r = dm_register_target(&thin_target);
2811	if (r)
2812		return r;
2813
2814	r = dm_register_target(&pool_target);
2815	if (r)
2816		goto bad_pool_target;
2817
2818	r = -ENOMEM;
2819
2820	_cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
2821	if (!_cell_cache)
2822		goto bad_cell_cache;
2823
2824	_new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
2825	if (!_new_mapping_cache)
2826		goto bad_new_mapping_cache;
2827
2828	_endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
2829	if (!_endio_hook_cache)
2830		goto bad_endio_hook_cache;
2831
2832	return 0;
2833
2834bad_endio_hook_cache:
2835	kmem_cache_destroy(_new_mapping_cache);
2836bad_new_mapping_cache:
2837	kmem_cache_destroy(_cell_cache);
2838bad_cell_cache:
2839	dm_unregister_target(&pool_target);
2840bad_pool_target:
2841	dm_unregister_target(&thin_target);
2842
2843	return r;
2844}
2845
2846static void dm_thin_exit(void)
2847{
2848	dm_unregister_target(&thin_target);
2849	dm_unregister_target(&pool_target);
2850
2851	kmem_cache_destroy(_cell_cache);
2852	kmem_cache_destroy(_new_mapping_cache);
2853	kmem_cache_destroy(_endio_hook_cache);
2854}
2855
2856module_init(dm_thin_init);
2857module_exit(dm_thin_exit);
 
 
 
2858
2859MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
2860MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2861MODULE_LICENSE("GPL");