Linux Audio

Check our new training course

Loading...
v6.2
   1/*
   2 * Copyright (C) 2012 Red Hat. All rights reserved.
   3 *
   4 * This file is released under the GPL.
   5 */
   6
   7#include "dm.h"
   8#include "dm-bio-prison-v2.h"
   9#include "dm-bio-record.h"
  10#include "dm-cache-metadata.h"
  11#include "dm-io-tracker.h"
  12
  13#include <linux/dm-io.h>
  14#include <linux/dm-kcopyd.h>
  15#include <linux/jiffies.h>
  16#include <linux/init.h>
  17#include <linux/mempool.h>
  18#include <linux/module.h>
  19#include <linux/rwsem.h>
  20#include <linux/slab.h>
  21#include <linux/vmalloc.h>
  22
  23#define DM_MSG_PREFIX "cache"
  24
  25DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
  26	"A percentage of time allocated for copying to and/or from cache");
  27
  28/*----------------------------------------------------------------*/
  29
  30/*
  31 * Glossary:
  32 *
  33 * oblock: index of an origin block
  34 * cblock: index of a cache block
  35 * promotion: movement of a block from origin to cache
  36 * demotion: movement of a block from cache to origin
  37 * migration: movement of a block between the origin and cache device,
  38 *	      either direction
  39 */
  40
  41/*----------------------------------------------------------------*/
  42
  43/*
  44 * Represents a chunk of future work.  'input' allows continuations to pass
  45 * values between themselves, typically error values.
  46 */
  47struct continuation {
  48	struct work_struct ws;
  49	blk_status_t input;
  50};
  51
  52static inline void init_continuation(struct continuation *k,
  53				     void (*fn)(struct work_struct *))
  54{
  55	INIT_WORK(&k->ws, fn);
  56	k->input = 0;
  57}
  58
  59static inline void queue_continuation(struct workqueue_struct *wq,
  60				      struct continuation *k)
  61{
  62	queue_work(wq, &k->ws);
  63}
  64
  65/*----------------------------------------------------------------*/
  66
  67/*
  68 * The batcher collects together pieces of work that need a particular
  69 * operation to occur before they can proceed (typically a commit).
  70 */
  71struct batcher {
  72	/*
  73	 * The operation that everyone is waiting for.
  74	 */
  75	blk_status_t (*commit_op)(void *context);
  76	void *commit_context;
  77
  78	/*
  79	 * This is how bios should be issued once the commit op is complete
  80	 * (accounted_request).
  81	 */
  82	void (*issue_op)(struct bio *bio, void *context);
  83	void *issue_context;
  84
  85	/*
  86	 * Queued work gets put on here after commit.
  87	 */
  88	struct workqueue_struct *wq;
  89
  90	spinlock_t lock;
  91	struct list_head work_items;
  92	struct bio_list bios;
  93	struct work_struct commit_work;
  94
  95	bool commit_scheduled;
  96};
  97
  98static void __commit(struct work_struct *_ws)
  99{
 100	struct batcher *b = container_of(_ws, struct batcher, commit_work);
 101	blk_status_t r;
 102	struct list_head work_items;
 103	struct work_struct *ws, *tmp;
 104	struct continuation *k;
 105	struct bio *bio;
 106	struct bio_list bios;
 107
 108	INIT_LIST_HEAD(&work_items);
 109	bio_list_init(&bios);
 110
 111	/*
 112	 * We have to grab these before the commit_op to avoid a race
 113	 * condition.
 114	 */
 115	spin_lock_irq(&b->lock);
 116	list_splice_init(&b->work_items, &work_items);
 117	bio_list_merge(&bios, &b->bios);
 118	bio_list_init(&b->bios);
 119	b->commit_scheduled = false;
 120	spin_unlock_irq(&b->lock);
 121
 122	r = b->commit_op(b->commit_context);
 123
 124	list_for_each_entry_safe(ws, tmp, &work_items, entry) {
 125		k = container_of(ws, struct continuation, ws);
 126		k->input = r;
 127		INIT_LIST_HEAD(&ws->entry); /* to avoid a WARN_ON */
 128		queue_work(b->wq, ws);
 129	}
 130
 131	while ((bio = bio_list_pop(&bios))) {
 132		if (r) {
 133			bio->bi_status = r;
 134			bio_endio(bio);
 135		} else
 136			b->issue_op(bio, b->issue_context);
 137	}
 138}
 139
 140static void batcher_init(struct batcher *b,
 141			 blk_status_t (*commit_op)(void *),
 142			 void *commit_context,
 143			 void (*issue_op)(struct bio *bio, void *),
 144			 void *issue_context,
 145			 struct workqueue_struct *wq)
 146{
 147	b->commit_op = commit_op;
 148	b->commit_context = commit_context;
 149	b->issue_op = issue_op;
 150	b->issue_context = issue_context;
 151	b->wq = wq;
 152
 153	spin_lock_init(&b->lock);
 154	INIT_LIST_HEAD(&b->work_items);
 155	bio_list_init(&b->bios);
 156	INIT_WORK(&b->commit_work, __commit);
 157	b->commit_scheduled = false;
 158}
 159
 160static void async_commit(struct batcher *b)
 161{
 162	queue_work(b->wq, &b->commit_work);
 
 163}
 164
 165static void continue_after_commit(struct batcher *b, struct continuation *k)
 166{
 167	bool commit_scheduled;
 168
 169	spin_lock_irq(&b->lock);
 170	commit_scheduled = b->commit_scheduled;
 171	list_add_tail(&k->ws.entry, &b->work_items);
 172	spin_unlock_irq(&b->lock);
 173
 174	if (commit_scheduled)
 175		async_commit(b);
 176}
 177
 178/*
 179 * Bios are errored if commit failed.
 180 */
 181static void issue_after_commit(struct batcher *b, struct bio *bio)
 182{
 183       bool commit_scheduled;
 184
 185       spin_lock_irq(&b->lock);
 186       commit_scheduled = b->commit_scheduled;
 187       bio_list_add(&b->bios, bio);
 188       spin_unlock_irq(&b->lock);
 189
 190       if (commit_scheduled)
 191	       async_commit(b);
 192}
 193
 194/*
 195 * Call this if some urgent work is waiting for the commit to complete.
 196 */
 197static void schedule_commit(struct batcher *b)
 198{
 199	bool immediate;
 200
 201	spin_lock_irq(&b->lock);
 202	immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios);
 203	b->commit_scheduled = true;
 204	spin_unlock_irq(&b->lock);
 205
 206	if (immediate)
 207		async_commit(b);
 208}
 209
 210/*
 211 * There are a couple of places where we let a bio run, but want to do some
 212 * work before calling its endio function.  We do this by temporarily
 213 * changing the endio fn.
 214 */
 215struct dm_hook_info {
 216	bio_end_io_t *bi_end_io;
 
 217};
 218
 219static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
 220			bio_end_io_t *bi_end_io, void *bi_private)
 221{
 222	h->bi_end_io = bio->bi_end_io;
 
 223
 224	bio->bi_end_io = bi_end_io;
 225	bio->bi_private = bi_private;
 226}
 227
 228static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
 229{
 230	bio->bi_end_io = h->bi_end_io;
 
 
 
 
 
 
 
 231}
 232
 233/*----------------------------------------------------------------*/
 234
 
 235#define MIGRATION_POOL_SIZE 128
 236#define COMMIT_PERIOD HZ
 237#define MIGRATION_COUNT_WINDOW 10
 238
 239/*
 240 * The block size of the device holding cache data must be
 241 * between 32KB and 1GB.
 242 */
 243#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
 244#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
 245
 
 
 
 246enum cache_metadata_mode {
 247	CM_WRITE,		/* metadata may be changed */
 248	CM_READ_ONLY,		/* metadata may not be changed */
 249	CM_FAIL
 250};
 251
 252enum cache_io_mode {
 253	/*
 254	 * Data is written to cached blocks only.  These blocks are marked
 255	 * dirty.  If you lose the cache device you will lose data.
 256	 * Potential performance increase for both reads and writes.
 257	 */
 258	CM_IO_WRITEBACK,
 259
 260	/*
 261	 * Data is written to both cache and origin.  Blocks are never
 262	 * dirty.  Potential performance benfit for reads only.
 263	 */
 264	CM_IO_WRITETHROUGH,
 265
 266	/*
 267	 * A degraded mode useful for various cache coherency situations
 268	 * (eg, rolling back snapshots).  Reads and writes always go to the
 269	 * origin.  If a write goes to a cached oblock, then the cache
 270	 * block is invalidated.
 271	 */
 272	CM_IO_PASSTHROUGH
 273};
 274
 275struct cache_features {
 276	enum cache_metadata_mode mode;
 277	enum cache_io_mode io_mode;
 278	unsigned metadata_version;
 279	bool discard_passdown:1;
 280};
 281
 282struct cache_stats {
 283	atomic_t read_hit;
 284	atomic_t read_miss;
 285	atomic_t write_hit;
 286	atomic_t write_miss;
 287	atomic_t demotion;
 288	atomic_t promotion;
 289	atomic_t writeback;
 290	atomic_t copies_avoided;
 291	atomic_t cache_cell_clash;
 292	atomic_t commit_count;
 293	atomic_t discard_count;
 294};
 295
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 296struct cache {
 297	struct dm_target *ti;
 298	spinlock_t lock;
 299
 300	/*
 301	 * Fields for converting from sectors to blocks.
 302	 */
 303	int sectors_per_block_shift;
 304	sector_t sectors_per_block;
 305
 306	struct dm_cache_metadata *cmd;
 307
 308	/*
 309	 * Metadata is written to this device.
 310	 */
 311	struct dm_dev *metadata_dev;
 312
 313	/*
 314	 * The slower of the two data devices.  Typically a spindle.
 315	 */
 316	struct dm_dev *origin_dev;
 317
 318	/*
 319	 * The faster of the two data devices.  Typically an SSD.
 320	 */
 321	struct dm_dev *cache_dev;
 322
 323	/*
 324	 * Size of the origin device in _complete_ blocks and native sectors.
 325	 */
 326	dm_oblock_t origin_blocks;
 327	sector_t origin_sectors;
 328
 329	/*
 330	 * Size of the cache device in blocks.
 331	 */
 332	dm_cblock_t cache_size;
 333
 334	/*
 335	 * Invalidation fields.
 336	 */
 337	spinlock_t invalidation_lock;
 338	struct list_head invalidation_requests;
 339
 
 
 
 
 
 
 
 340	sector_t migration_threshold;
 341	wait_queue_head_t migration_wait;
 342	atomic_t nr_allocated_migrations;
 
 
 
 
 343
 344	/*
 345	 * The number of in flight migrations that are performing
 346	 * background io. eg, promotion, writeback.
 347	 */
 348	atomic_t nr_io_migrations;
 349
 350	struct bio_list deferred_bios;
 351
 352	struct rw_semaphore quiesce_lock;
 353
 354	/*
 355	 * origin_blocks entries, discarded if set.
 356	 */
 357	dm_dblock_t discard_nr_blocks;
 358	unsigned long *discard_bitset;
 359	uint32_t discard_block_size; /* a power of 2 times sectors per block */
 360
 361	/*
 362	 * Rather than reconstructing the table line for the status we just
 363	 * save it and regurgitate.
 364	 */
 365	unsigned nr_ctr_args;
 366	const char **ctr_args;
 367
 368	struct dm_kcopyd_client *copier;
 369	struct work_struct deferred_bio_worker;
 370	struct work_struct migration_worker;
 371	struct workqueue_struct *wq;
 372	struct delayed_work waker;
 373	struct dm_bio_prison_v2 *prison;
 374
 375	/*
 376	 * cache_size entries, dirty if set
 377	 */
 378	unsigned long *dirty_bitset;
 379	atomic_t nr_dirty;
 380
 381	unsigned policy_nr_args;
 382	struct dm_cache_policy *policy;
 383
 384	/*
 385	 * Cache features such as write-through.
 386	 */
 387	struct cache_features features;
 388
 389	struct cache_stats stats;
 
 390
 391	bool need_tick_bio:1;
 392	bool sized:1;
 393	bool invalidate:1;
 394	bool commit_requested:1;
 395	bool loaded_mappings:1;
 396	bool loaded_discards:1;
 397
 398	struct rw_semaphore background_work_lock;
 399
 400	struct batcher committer;
 401	struct work_struct commit_ws;
 402
 403	struct dm_io_tracker tracker;
 404
 405	mempool_t migration_pool;
 406
 407	struct bio_set bs;
 
 
 
 
 408};
 409
 410struct per_bio_data {
 411	bool tick:1;
 412	unsigned req_nr:2;
 413	struct dm_bio_prison_cell_v2 *cell;
 414	struct dm_hook_info hook_info;
 415	sector_t len;
 416};
 417
 418struct dm_cache_migration {
 419	struct continuation k;
 
 
 
 420	struct cache *cache;
 421
 422	struct policy_work *op;
 423	struct bio *overwrite_bio;
 424	struct dm_bio_prison_cell_v2 *cell;
 425
 426	dm_cblock_t invalidate_cblock;
 427	dm_oblock_t invalidate_oblock;
 428};
 429
 430/*----------------------------------------------------------------*/
 431
 432static bool writethrough_mode(struct cache *cache)
 433{
 434	return cache->features.io_mode == CM_IO_WRITETHROUGH;
 435}
 436
 437static bool writeback_mode(struct cache *cache)
 438{
 439	return cache->features.io_mode == CM_IO_WRITEBACK;
 440}
 441
 442static inline bool passthrough_mode(struct cache *cache)
 443{
 444	return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
 445}
 
 
 446
 447/*----------------------------------------------------------------*/
 
 
 448
 449static void wake_deferred_bio_worker(struct cache *cache)
 450{
 451	queue_work(cache->wq, &cache->deferred_bio_worker);
 452}
 
 
 
 
 
 
 453
 454static void wake_migration_worker(struct cache *cache)
 455{
 456	if (passthrough_mode(cache))
 457		return;
 458
 459	queue_work(cache->wq, &cache->migration_worker);
 460}
 461
 462/*----------------------------------------------------------------*/
 463
 464static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
 465{
 466	return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
 
 467}
 468
 469static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
 470{
 471	dm_bio_prison_free_cell_v2(cache->prison, cell);
 472}
 473
 474static struct dm_cache_migration *alloc_migration(struct cache *cache)
 475{
 476	struct dm_cache_migration *mg;
 477
 478	mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
 
 
 479
 480	memset(mg, 0, sizeof(*mg));
 
 
 
 
 481
 482	mg->cache = cache;
 483	atomic_inc(&cache->nr_allocated_migrations);
 
 
 
 484
 485	return mg;
 486}
 487
 488static void free_migration(struct dm_cache_migration *mg)
 489{
 490	struct cache *cache = mg->cache;
 
 491
 492	if (atomic_dec_and_test(&cache->nr_allocated_migrations))
 493		wake_up(&cache->migration_wait);
 494
 495	mempool_free(mg, &cache->migration_pool);
 
 496}
 497
 498/*----------------------------------------------------------------*/
 499
 500static inline dm_oblock_t oblock_succ(dm_oblock_t b)
 501{
 502	return to_oblock(from_oblock(b) + 1ull);
 503}
 504
 505static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2 *key)
 506{
 507	key->virtual = 0;
 508	key->dev = 0;
 509	key->block_begin = from_oblock(begin);
 510	key->block_end = from_oblock(end);
 511}
 512
 513/*
 514 * We have two lock levels.  Level 0, which is used to prevent WRITEs, and
 515 * level 1 which prevents *both* READs and WRITEs.
 516 */
 517#define WRITE_LOCK_LEVEL 0
 518#define READ_WRITE_LOCK_LEVEL 1
 519
 520static unsigned lock_level(struct bio *bio)
 521{
 522	return bio_data_dir(bio) == WRITE ?
 523		WRITE_LOCK_LEVEL :
 524		READ_WRITE_LOCK_LEVEL;
 525}
 526
 527/*----------------------------------------------------------------
 528 * Per bio data
 529 *--------------------------------------------------------------*/
 530
 531static struct per_bio_data *get_per_bio_data(struct bio *bio)
 532{
 533	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
 534	BUG_ON(!pb);
 535	return pb;
 
 
 536}
 537
 538static struct per_bio_data *init_per_bio_data(struct bio *bio)
 
 
 
 
 539{
 540	struct per_bio_data *pb = get_per_bio_data(bio);
 
 541
 542	pb->tick = false;
 543	pb->req_nr = dm_bio_get_target_bio_nr(bio);
 544	pb->cell = NULL;
 545	pb->len = 0;
 546
 547	return pb;
 
 548}
 549
 550/*----------------------------------------------------------------*/
 551
 552static void defer_bio(struct cache *cache, struct bio *bio)
 553{
 554	spin_lock_irq(&cache->lock);
 555	bio_list_add(&cache->deferred_bios, bio);
 556	spin_unlock_irq(&cache->lock);
 557
 558	wake_deferred_bio_worker(cache);
 559}
 560
 561static void defer_bios(struct cache *cache, struct bio_list *bios)
 
 
 
 
 
 
 
 
 
 
 562{
 563	spin_lock_irq(&cache->lock);
 564	bio_list_merge(&cache->deferred_bios, bios);
 565	bio_list_init(bios);
 566	spin_unlock_irq(&cache->lock);
 567
 568	wake_deferred_bio_worker(cache);
 569}
 
 
 570
 571/*----------------------------------------------------------------*/
 
 572
 573static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
 
 
 
 574{
 575	bool r;
 576	struct per_bio_data *pb;
 577	struct dm_cell_key_v2 key;
 578	dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
 579	struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
 580
 581	cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
 582
 583	build_key(oblock, end, &key);
 584	r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
 585	if (!r) {
 586		/*
 587		 * Failed to get the lock.
 588		 */
 589		free_prison_cell(cache, cell_prealloc);
 590		return r;
 591	}
 592
 593	if (cell != cell_prealloc)
 594		free_prison_cell(cache, cell_prealloc);
 595
 596	pb = get_per_bio_data(bio);
 597	pb->cell = cell;
 
 
 598
 599	return r;
 600}
 601
 602/*----------------------------------------------------------------*/
 603
 604static bool is_dirty(struct cache *cache, dm_cblock_t b)
 605{
 606	return test_bit(from_cblock(b), cache->dirty_bitset);
 607}
 608
 609static void set_dirty(struct cache *cache, dm_cblock_t cblock)
 610{
 611	if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
 612		atomic_inc(&cache->nr_dirty);
 613		policy_set_dirty(cache->policy, cblock);
 614	}
 615}
 616
 617/*
 618 * These two are called when setting after migrations to force the policy
 619 * and dirty bitset to be in sync.
 620 */
 621static void force_set_dirty(struct cache *cache, dm_cblock_t cblock)
 622{
 623	if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset))
 624		atomic_inc(&cache->nr_dirty);
 625	policy_set_dirty(cache->policy, cblock);
 626}
 627
 628static void force_clear_dirty(struct cache *cache, dm_cblock_t cblock)
 629{
 630	if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
 631		if (atomic_dec_return(&cache->nr_dirty) == 0)
 
 
 632			dm_table_event(cache->ti->table);
 633	}
 634
 635	policy_clear_dirty(cache->policy, cblock);
 636}
 637
 638/*----------------------------------------------------------------*/
 639
 640static bool block_size_is_power_of_two(struct cache *cache)
 641{
 642	return cache->sectors_per_block_shift >= 0;
 643}
 644
 
 
 
 
 645static dm_block_t block_div(dm_block_t b, uint32_t n)
 646{
 647	do_div(b, n);
 648
 649	return b;
 650}
 651
 652static dm_block_t oblocks_per_dblock(struct cache *cache)
 653{
 654	dm_block_t oblocks = cache->discard_block_size;
 655
 656	if (block_size_is_power_of_two(cache))
 657		oblocks >>= cache->sectors_per_block_shift;
 658	else
 659		oblocks = block_div(oblocks, cache->sectors_per_block);
 660
 661	return oblocks;
 
 
 662}
 663
 664static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
 665{
 666	return to_dblock(block_div(from_oblock(oblock),
 667				   oblocks_per_dblock(cache)));
 
 
 
 668}
 669
 670static void set_discard(struct cache *cache, dm_dblock_t b)
 671{
 672	BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
 673	atomic_inc(&cache->stats.discard_count);
 674
 675	spin_lock_irq(&cache->lock);
 676	set_bit(from_dblock(b), cache->discard_bitset);
 677	spin_unlock_irq(&cache->lock);
 678}
 679
 680static void clear_discard(struct cache *cache, dm_dblock_t b)
 681{
 682	spin_lock_irq(&cache->lock);
 683	clear_bit(from_dblock(b), cache->discard_bitset);
 684	spin_unlock_irq(&cache->lock);
 685}
 686
 687static bool is_discarded(struct cache *cache, dm_dblock_t b)
 688{
 689	int r;
 690	spin_lock_irq(&cache->lock);
 691	r = test_bit(from_dblock(b), cache->discard_bitset);
 692	spin_unlock_irq(&cache->lock);
 
 
 693
 694	return r;
 695}
 696
 697static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
 
 
 698{
 699	int r;
 700	spin_lock_irq(&cache->lock);
 701	r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
 702		     cache->discard_bitset);
 703	spin_unlock_irq(&cache->lock);
 704
 705	return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 706}
 707
 708/*----------------------------------------------------------------
 709 * Remapping
 710 *--------------------------------------------------------------*/
 711static void remap_to_origin(struct cache *cache, struct bio *bio)
 712{
 713	bio_set_dev(bio, cache->origin_dev->bdev);
 714}
 715
 716static void remap_to_cache(struct cache *cache, struct bio *bio,
 717			   dm_cblock_t cblock)
 718{
 719	sector_t bi_sector = bio->bi_iter.bi_sector;
 720	sector_t block = from_cblock(cblock);
 721
 722	bio_set_dev(bio, cache->cache_dev->bdev);
 723	if (!block_size_is_power_of_two(cache))
 724		bio->bi_iter.bi_sector =
 725			(block * cache->sectors_per_block) +
 726			sector_div(bi_sector, cache->sectors_per_block);
 727	else
 728		bio->bi_iter.bi_sector =
 729			(block << cache->sectors_per_block_shift) |
 730			(bi_sector & (cache->sectors_per_block - 1));
 731}
 732
 733static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
 734{
 735	struct per_bio_data *pb;
 
 
 736
 737	spin_lock_irq(&cache->lock);
 738	if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
 739	    bio_op(bio) != REQ_OP_DISCARD) {
 740		pb = get_per_bio_data(bio);
 741		pb->tick = true;
 742		cache->need_tick_bio = false;
 743	}
 744	spin_unlock_irq(&cache->lock);
 745}
 746
 747static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
 748					  dm_oblock_t oblock)
 749{
 750	// FIXME: check_if_tick_bio_needed() is called way too much through this interface
 751	check_if_tick_bio_needed(cache, bio);
 752	remap_to_origin(cache, bio);
 753	if (bio_data_dir(bio) == WRITE)
 754		clear_discard(cache, oblock_to_dblock(cache, oblock));
 755}
 756
 757static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
 758				 dm_oblock_t oblock, dm_cblock_t cblock)
 759{
 760	check_if_tick_bio_needed(cache, bio);
 761	remap_to_cache(cache, bio, cblock);
 762	if (bio_data_dir(bio) == WRITE) {
 763		set_dirty(cache, cblock);
 764		clear_discard(cache, oblock_to_dblock(cache, oblock));
 765	}
 766}
 767
 768static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
 769{
 770	sector_t block_nr = bio->bi_iter.bi_sector;
 771
 772	if (!block_size_is_power_of_two(cache))
 773		(void) sector_div(block_nr, cache->sectors_per_block);
 774	else
 775		block_nr >>= cache->sectors_per_block_shift;
 776
 777	return to_oblock(block_nr);
 778}
 779
 780static bool accountable_bio(struct cache *cache, struct bio *bio)
 781{
 782	return bio_op(bio) != REQ_OP_DISCARD;
 783}
 784
 785static void accounted_begin(struct cache *cache, struct bio *bio)
 786{
 787	struct per_bio_data *pb;
 788
 789	if (accountable_bio(cache, bio)) {
 790		pb = get_per_bio_data(bio);
 791		pb->len = bio_sectors(bio);
 792		dm_iot_io_begin(&cache->tracker, pb->len);
 793	}
 
 
 
 
 
 
 
 
 
 794}
 795
 796static void accounted_complete(struct cache *cache, struct bio *bio)
 797{
 798	struct per_bio_data *pb = get_per_bio_data(bio);
 799
 800	dm_iot_io_end(&cache->tracker, pb->len);
 801}
 
 802
 803static void accounted_request(struct cache *cache, struct bio *bio)
 804{
 805	accounted_begin(cache, bio);
 806	dm_submit_bio_remap(bio, NULL);
 807}
 808
 809static void issue_op(struct bio *bio, void *context)
 810{
 811	struct cache *cache = context;
 812	accounted_request(cache, bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 813}
 814
 815/*
 816 * When running in writethrough mode we need to send writes to clean blocks
 817 * to both the cache and origin devices.  Clone the bio and send them in parallel.
 
 
 818 */
 819static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
 820				      dm_oblock_t oblock, dm_cblock_t cblock)
 821{
 822	struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio,
 823						 GFP_NOIO, &cache->bs);
 824
 825	BUG_ON(!origin_bio);
 826
 827	bio_chain(origin_bio, bio);
 828
 829	if (bio_data_dir(origin_bio) == WRITE)
 830		clear_discard(cache, oblock_to_dblock(cache, oblock));
 831	submit_bio(origin_bio);
 
 832
 833	remap_to_cache(cache, bio, cblock);
 834}
 835
 836/*----------------------------------------------------------------
 837 * Failure modes
 
 
 
 838 *--------------------------------------------------------------*/
 839static enum cache_metadata_mode get_cache_mode(struct cache *cache)
 840{
 841	return cache->features.mode;
 842}
 843
 844static const char *cache_device_name(struct cache *cache)
 845{
 846	return dm_table_device_name(cache->ti->table);
 847}
 848
 849static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
 850{
 851	const char *descs[] = {
 852		"write",
 853		"read-only",
 854		"fail"
 855	};
 856
 857	dm_table_event(cache->ti->table);
 858	DMINFO("%s: switching cache to %s mode",
 859	       cache_device_name(cache), descs[(int)mode]);
 
 860}
 861
 862static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
 
 863{
 864	bool needs_check;
 865	enum cache_metadata_mode old_mode = get_cache_mode(cache);
 866
 867	if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
 868		DMERR("%s: unable to read needs_check flag, setting failure mode.",
 869		      cache_device_name(cache));
 870		new_mode = CM_FAIL;
 871	}
 872
 873	if (new_mode == CM_WRITE && needs_check) {
 874		DMERR("%s: unable to switch cache to write mode until repaired.",
 875		      cache_device_name(cache));
 876		if (old_mode != new_mode)
 877			new_mode = old_mode;
 878		else
 879			new_mode = CM_READ_ONLY;
 880	}
 881
 882	/* Never move out of fail mode */
 883	if (old_mode == CM_FAIL)
 884		new_mode = CM_FAIL;
 885
 886	switch (new_mode) {
 887	case CM_FAIL:
 888	case CM_READ_ONLY:
 889		dm_cache_metadata_set_read_only(cache->cmd);
 890		break;
 891
 892	case CM_WRITE:
 893		dm_cache_metadata_set_read_write(cache->cmd);
 894		break;
 895	}
 896
 897	cache->features.mode = new_mode;
 
 
 898
 899	if (new_mode != old_mode)
 900		notify_mode_switch(cache, new_mode);
 901}
 902
 903static void abort_transaction(struct cache *cache)
 904{
 905	const char *dev_name = cache_device_name(cache);
 
 
 
 906
 907	if (get_cache_mode(cache) >= CM_READ_ONLY)
 908		return;
 
 909
 910	DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
 911	if (dm_cache_metadata_abort(cache->cmd)) {
 912		DMERR("%s: failed to abort metadata transaction", dev_name);
 913		set_cache_mode(cache, CM_FAIL);
 
 
 
 
 
 
 
 
 
 
 
 
 914	}
 915
 916	if (dm_cache_metadata_set_needs_check(cache->cmd)) {
 917		DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
 918		set_cache_mode(cache, CM_FAIL);
 919	}
 920}
 921
 922static void metadata_operation_failed(struct cache *cache, const char *op, int r)
 923{
 924	DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
 925		    cache_device_name(cache), op, r);
 926	abort_transaction(cache);
 927	set_cache_mode(cache, CM_READ_ONLY);
 928}
 929
 930/*----------------------------------------------------------------*/
 
 
 
 
 931
 932static void load_stats(struct cache *cache)
 933{
 934	struct dm_cache_statistics stats;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 935
 936	dm_cache_metadata_get_stats(cache->cmd, &stats);
 937	atomic_set(&cache->stats.read_hit, stats.read_hits);
 938	atomic_set(&cache->stats.read_miss, stats.read_misses);
 939	atomic_set(&cache->stats.write_hit, stats.write_hits);
 940	atomic_set(&cache->stats.write_miss, stats.write_misses);
 941}
 942
 943static void save_stats(struct cache *cache)
 944{
 945	struct dm_cache_statistics stats;
 
 946
 947	if (get_cache_mode(cache) >= CM_READ_ONLY)
 
 948		return;
 949
 950	stats.read_hits = atomic_read(&cache->stats.read_hit);
 951	stats.read_misses = atomic_read(&cache->stats.read_miss);
 952	stats.write_hits = atomic_read(&cache->stats.write_hit);
 953	stats.write_misses = atomic_read(&cache->stats.write_miss);
 954
 955	dm_cache_metadata_set_stats(cache->cmd, &stats);
 956}
 957
 958static void update_stats(struct cache_stats *stats, enum policy_operation op)
 959{
 960	switch (op) {
 961	case POLICY_PROMOTE:
 962		atomic_inc(&stats->promotion);
 963		break;
 964
 965	case POLICY_DEMOTE:
 966		atomic_inc(&stats->demotion);
 967		break;
 
 
 968
 969	case POLICY_WRITEBACK:
 970		atomic_inc(&stats->writeback);
 971		break;
 
 
 
 
 
 
 972	}
 973}
 974
 975/*----------------------------------------------------------------
 976 * Migration processing
 977 *
 978 * Migration covers moving data from the origin device to the cache, or
 979 * vice versa.
 980 *--------------------------------------------------------------*/
 981
 982static void inc_io_migrations(struct cache *cache)
 983{
 984	atomic_inc(&cache->nr_io_migrations);
 985}
 
 986
 987static void dec_io_migrations(struct cache *cache)
 988{
 989	atomic_dec(&cache->nr_io_migrations);
 990}
 991
 992static bool discard_or_flush(struct bio *bio)
 993{
 994	return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
 
 
 995}
 996
 997static void calc_discard_block_range(struct cache *cache, struct bio *bio,
 998				     dm_dblock_t *b, dm_dblock_t *e)
 999{
1000	sector_t sb = bio->bi_iter.bi_sector;
1001	sector_t se = bio_end_sector(bio);
 
 
1002
1003	*b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
 
1004
1005	if (se - sb < cache->discard_block_size)
1006		*e = *b;
1007	else
1008		*e = to_dblock(block_div(se, cache->discard_block_size));
1009}
1010
1011/*----------------------------------------------------------------*/
 
 
 
 
 
 
 
 
1012
1013static void prevent_background_work(struct cache *cache)
1014{
1015	lockdep_off();
1016	down_write(&cache->background_work_lock);
1017	lockdep_on();
1018}
1019
1020static void allow_background_work(struct cache *cache)
1021{
1022	lockdep_off();
1023	up_write(&cache->background_work_lock);
1024	lockdep_on();
1025}
 
1026
1027static bool background_work_begin(struct cache *cache)
1028{
1029	bool r;
1030
1031	lockdep_off();
1032	r = down_read_trylock(&cache->background_work_lock);
1033	lockdep_on();
1034
1035	return r;
 
 
 
 
 
 
1036}
1037
1038static void background_work_end(struct cache *cache)
1039{
1040	lockdep_off();
1041	up_read(&cache->background_work_lock);
1042	lockdep_on();
1043}
1044
1045/*----------------------------------------------------------------*/
 
 
 
1046
1047static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1048{
1049	return (bio_data_dir(bio) == WRITE) &&
1050		(bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1051}
1052
1053static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
1054{
1055	return writeback_mode(cache) &&
1056		(is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
1057}
1058
1059static void quiesce(struct dm_cache_migration *mg,
1060		    void (*continuation)(struct work_struct *))
1061{
1062	init_continuation(&mg->k, continuation);
1063	dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws);
1064}
1065
1066static struct dm_cache_migration *ws_to_mg(struct work_struct *ws)
1067{
1068	struct continuation *k = container_of(ws, struct continuation, ws);
1069	return container_of(k, struct dm_cache_migration, k);
1070}
1071
1072static void copy_complete(int read_err, unsigned long write_err, void *context)
1073{
1074	struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k);
1075
1076	if (read_err || write_err)
1077		mg->k.input = BLK_STS_IOERR;
 
 
 
1078
1079	queue_continuation(mg->cache->wq, &mg->k);
1080}
1081
1082static void copy(struct dm_cache_migration *mg, bool promote)
1083{
1084	struct dm_io_region o_region, c_region;
1085	struct cache *cache = mg->cache;
 
 
 
1086
1087	o_region.bdev = cache->origin_dev->bdev;
1088	o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block;
1089	o_region.count = cache->sectors_per_block;
 
 
 
1090
1091	c_region.bdev = cache->cache_dev->bdev;
1092	c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block;
1093	c_region.count = cache->sectors_per_block;
 
1094
1095	if (promote)
1096		dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k);
1097	else
1098		dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k);
1099}
1100
1101static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
1102{
1103	struct per_bio_data *pb = get_per_bio_data(bio);
1104
1105	if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell))
1106		free_prison_cell(cache, pb->cell);
1107	pb->cell = NULL;
1108}
1109
1110static void overwrite_endio(struct bio *bio)
1111{
1112	struct dm_cache_migration *mg = bio->bi_private;
1113	struct cache *cache = mg->cache;
1114	struct per_bio_data *pb = get_per_bio_data(bio);
1115
1116	dm_unhook_bio(&pb->hook_info, bio);
1117
1118	if (bio->bi_status)
1119		mg->k.input = bio->bi_status;
 
1120
1121	queue_continuation(cache->wq, &mg->k);
1122}
1123
1124static void overwrite(struct dm_cache_migration *mg,
1125		      void (*continuation)(struct work_struct *))
1126{
1127	struct bio *bio = mg->overwrite_bio;
1128	struct per_bio_data *pb = get_per_bio_data(bio);
1129
1130	dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1131
1132	/*
1133	 * The overwrite bio is part of the copy operation, as such it does
1134	 * not set/clear discard or dirty flags.
1135	 */
1136	if (mg->op->op == POLICY_PROMOTE)
1137		remap_to_cache(mg->cache, bio, mg->op->cblock);
1138	else
1139		remap_to_origin(mg->cache, bio);
1140
1141	init_continuation(&mg->k, continuation);
1142	accounted_request(mg->cache, bio);
1143}
1144
1145/*
1146 * Migration steps:
1147 *
1148 * 1) exclusive lock preventing WRITEs
1149 * 2) quiesce
1150 * 3) copy or issue overwrite bio
1151 * 4) upgrade to exclusive lock preventing READs and WRITEs
1152 * 5) quiesce
1153 * 6) update metadata and commit
1154 * 7) unlock
1155 */
1156static void mg_complete(struct dm_cache_migration *mg, bool success)
1157{
1158	struct bio_list bios;
1159	struct cache *cache = mg->cache;
1160	struct policy_work *op = mg->op;
1161	dm_cblock_t cblock = op->cblock;
1162
1163	if (success)
1164		update_stats(&cache->stats, op->op);
1165
1166	switch (op->op) {
1167	case POLICY_PROMOTE:
1168		clear_discard(cache, oblock_to_dblock(cache, op->oblock));
1169		policy_complete_background_work(cache->policy, op, success);
1170
1171		if (mg->overwrite_bio) {
1172			if (success)
1173				force_set_dirty(cache, cblock);
1174			else if (mg->k.input)
1175				mg->overwrite_bio->bi_status = mg->k.input;
1176			else
1177				mg->overwrite_bio->bi_status = BLK_STS_IOERR;
1178			bio_endio(mg->overwrite_bio);
1179		} else {
1180			if (success)
1181				force_clear_dirty(cache, cblock);
1182			dec_io_migrations(cache);
1183		}
1184		break;
1185
1186	case POLICY_DEMOTE:
1187		/*
1188		 * We clear dirty here to update the nr_dirty counter.
1189		 */
1190		if (success)
1191			force_clear_dirty(cache, cblock);
1192		policy_complete_background_work(cache->policy, op, success);
1193		dec_io_migrations(cache);
1194		break;
1195
1196	case POLICY_WRITEBACK:
1197		if (success)
1198			force_clear_dirty(cache, cblock);
1199		policy_complete_background_work(cache->policy, op, success);
1200		dec_io_migrations(cache);
1201		break;
1202	}
1203
1204	bio_list_init(&bios);
1205	if (mg->cell) {
1206		if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1207			free_prison_cell(cache, mg->cell);
1208	}
1209
1210	free_migration(mg);
1211	defer_bios(cache, &bios);
1212	wake_migration_worker(cache);
1213
1214	background_work_end(cache);
 
1215}
1216
1217static void mg_success(struct work_struct *ws)
1218{
1219	struct dm_cache_migration *mg = ws_to_mg(ws);
1220	mg_complete(mg, mg->k.input == 0);
1221}
1222
1223static void mg_update_metadata(struct work_struct *ws)
 
 
1224{
1225	int r;
1226	struct dm_cache_migration *mg = ws_to_mg(ws);
1227	struct cache *cache = mg->cache;
1228	struct policy_work *op = mg->op;
1229
1230	switch (op->op) {
1231	case POLICY_PROMOTE:
1232		r = dm_cache_insert_mapping(cache->cmd, op->cblock, op->oblock);
1233		if (r) {
1234			DMERR_LIMIT("%s: migration failed; couldn't insert mapping",
1235				    cache_device_name(cache));
1236			metadata_operation_failed(cache, "dm_cache_insert_mapping", r);
1237
1238			mg_complete(mg, false);
1239			return;
1240		}
1241		mg_complete(mg, true);
1242		break;
1243
1244	case POLICY_DEMOTE:
1245		r = dm_cache_remove_mapping(cache->cmd, op->cblock);
1246		if (r) {
1247			DMERR_LIMIT("%s: migration failed; couldn't update on disk metadata",
1248				    cache_device_name(cache));
1249			metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1250
1251			mg_complete(mg, false);
1252			return;
1253		}
1254
1255		/*
1256		 * It would be nice if we only had to commit when a REQ_FLUSH
1257		 * comes through.  But there's one scenario that we have to
1258		 * look out for:
1259		 *
1260		 * - vblock x in a cache block
1261		 * - domotion occurs
1262		 * - cache block gets reallocated and over written
1263		 * - crash
1264		 *
1265		 * When we recover, because there was no commit the cache will
1266		 * rollback to having the data for vblock x in the cache block.
1267		 * But the cache block has since been overwritten, so it'll end
1268		 * up pointing to data that was never in 'x' during the history
1269		 * of the device.
1270		 *
1271		 * To avoid this issue we require a commit as part of the
1272		 * demotion operation.
1273		 */
1274		init_continuation(&mg->k, mg_success);
1275		continue_after_commit(&cache->committer, &mg->k);
1276		schedule_commit(&cache->committer);
1277		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1278
1279	case POLICY_WRITEBACK:
1280		mg_complete(mg, true);
1281		break;
1282	}
1283}
1284
1285static void mg_update_metadata_after_copy(struct work_struct *ws)
 
 
 
 
 
 
1286{
1287	struct dm_cache_migration *mg = ws_to_mg(ws);
1288
1289	/*
1290	 * Did the copy succeed?
1291	 */
1292	if (mg->k.input)
1293		mg_complete(mg, false);
1294	else
1295		mg_update_metadata(ws);
 
 
 
 
 
 
 
 
1296}
1297
1298static void mg_upgrade_lock(struct work_struct *ws)
 
 
 
1299{
1300	int r;
1301	struct dm_cache_migration *mg = ws_to_mg(ws);
1302
1303	/*
1304	 * Did the copy succeed?
1305	 */
1306	if (mg->k.input)
1307		mg_complete(mg, false);
1308
1309	else {
1310		/*
1311		 * Now we want the lock to prevent both reads and writes.
1312		 */
1313		r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell,
1314					    READ_WRITE_LOCK_LEVEL);
1315		if (r < 0)
1316			mg_complete(mg, false);
1317
1318		else if (r)
1319			quiesce(mg, mg_update_metadata);
 
1320
1321		else
1322			mg_update_metadata(ws);
1323	}
1324}
1325
1326static void mg_full_copy(struct work_struct *ws)
1327{
1328	struct dm_cache_migration *mg = ws_to_mg(ws);
1329	struct cache *cache = mg->cache;
1330	struct policy_work *op = mg->op;
1331	bool is_policy_promote = (op->op == POLICY_PROMOTE);
1332
1333	if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
1334	    is_discarded_oblock(cache, op->oblock)) {
1335		mg_upgrade_lock(ws);
1336		return;
1337	}
1338
1339	init_continuation(&mg->k, mg_upgrade_lock);
1340	copy(mg, is_policy_promote);
1341}
1342
1343static void mg_copy(struct work_struct *ws)
 
 
 
 
 
 
 
 
 
 
 
 
1344{
1345	struct dm_cache_migration *mg = ws_to_mg(ws);
 
 
 
1346
1347	if (mg->overwrite_bio) {
1348		/*
1349		 * No exclusive lock was held when we last checked if the bio
1350		 * was optimisable.  So we have to check again in case things
1351		 * have changed (eg, the block may no longer be discarded).
1352		 */
1353		if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
1354			/*
1355			 * Fallback to a real full copy after doing some tidying up.
1356			 */
1357			bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
1358			BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
1359			mg->overwrite_bio = NULL;
1360			inc_io_migrations(mg->cache);
1361			mg_full_copy(ws);
1362			return;
1363		}
1364
1365		/*
1366		 * It's safe to do this here, even though it's new data
1367		 * because all IO has been locked out of the block.
1368		 *
1369		 * mg_lock_writes() already took READ_WRITE_LOCK_LEVEL
1370		 * so _not_ using mg_upgrade_lock() as continutation.
1371		 */
1372		overwrite(mg, mg_update_metadata_after_copy);
1373
1374	} else
1375		mg_full_copy(ws);
1376}
1377
1378static int mg_lock_writes(struct dm_cache_migration *mg)
1379{
1380	int r;
1381	struct dm_cell_key_v2 key;
1382	struct cache *cache = mg->cache;
1383	struct dm_bio_prison_cell_v2 *prealloc;
1384
1385	prealloc = alloc_prison_cell(cache);
1386
1387	/*
1388	 * Prevent writes to the block, but allow reads to continue.
1389	 * Unless we're using an overwrite bio, in which case we lock
1390	 * everything.
1391	 */
1392	build_key(mg->op->oblock, oblock_succ(mg->op->oblock), &key);
1393	r = dm_cell_lock_v2(cache->prison, &key,
1394			    mg->overwrite_bio ?  READ_WRITE_LOCK_LEVEL : WRITE_LOCK_LEVEL,
1395			    prealloc, &mg->cell);
1396	if (r < 0) {
1397		free_prison_cell(cache, prealloc);
1398		mg_complete(mg, false);
1399		return r;
1400	}
1401
1402	if (mg->cell != prealloc)
1403		free_prison_cell(cache, prealloc);
 
 
 
1404
1405	if (r == 0)
1406		mg_copy(&mg->k.ws);
1407	else
1408		quiesce(mg, mg_copy);
 
1409
1410	return 0;
 
 
 
 
 
 
1411}
1412
1413static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio)
 
1414{
1415	struct dm_cache_migration *mg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1416
1417	if (!background_work_begin(cache)) {
1418		policy_complete_background_work(cache->policy, op, false);
1419		return -EPERM;
1420	}
1421
1422	mg = alloc_migration(cache);
 
 
 
 
 
 
 
1423
1424	mg->op = op;
1425	mg->overwrite_bio = bio;
 
 
 
1426
1427	if (!bio)
1428		inc_io_migrations(cache);
 
 
1429
1430	return mg_lock_writes(mg);
1431}
 
 
 
 
 
 
1432
1433/*----------------------------------------------------------------
1434 * invalidation processing
1435 *--------------------------------------------------------------*/
 
 
 
 
 
 
1436
1437static void invalidate_complete(struct dm_cache_migration *mg, bool success)
1438{
1439	struct bio_list bios;
1440	struct cache *cache = mg->cache;
1441
1442	bio_list_init(&bios);
1443	if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1444		free_prison_cell(cache, mg->cell);
 
 
 
1445
1446	if (!success && mg->overwrite_bio)
1447		bio_io_error(mg->overwrite_bio);
 
 
 
1448
1449	free_migration(mg);
1450	defer_bios(cache, &bios);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1451
1452	background_work_end(cache);
1453}
 
 
 
1454
1455static void invalidate_completed(struct work_struct *ws)
1456{
1457	struct dm_cache_migration *mg = ws_to_mg(ws);
1458	invalidate_complete(mg, !mg->k.input);
 
 
 
 
1459}
1460
1461static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock)
1462{
1463	int r = policy_invalidate_mapping(cache->policy, cblock);
1464	if (!r) {
1465		r = dm_cache_remove_mapping(cache->cmd, cblock);
1466		if (r) {
1467			DMERR_LIMIT("%s: invalidation failed; couldn't update on disk metadata",
1468				    cache_device_name(cache));
1469			metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1470		}
1471
1472	} else if (r == -ENODATA) {
1473		/*
1474		 * Harmless, already unmapped.
1475		 */
1476		r = 0;
1477
1478	} else
1479		DMERR("%s: policy_invalidate_mapping failed", cache_device_name(cache));
1480
1481	return r;
1482}
1483
1484static void invalidate_remove(struct work_struct *ws)
1485{
1486	int r;
1487	struct dm_cache_migration *mg = ws_to_mg(ws);
1488	struct cache *cache = mg->cache;
1489
1490	r = invalidate_cblock(cache, mg->invalidate_cblock);
1491	if (r) {
1492		invalidate_complete(mg, false);
1493		return;
 
 
1494	}
1495
1496	init_continuation(&mg->k, invalidate_completed);
1497	continue_after_commit(&cache->committer, &mg->k);
1498	remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock);
1499	mg->overwrite_bio = NULL;
1500	schedule_commit(&cache->committer);
1501}
1502
1503static int invalidate_lock(struct dm_cache_migration *mg)
1504{
1505	int r;
1506	struct dm_cell_key_v2 key;
1507	struct cache *cache = mg->cache;
1508	struct dm_bio_prison_cell_v2 *prealloc;
1509
1510	prealloc = alloc_prison_cell(cache);
1511
1512	build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
1513	r = dm_cell_lock_v2(cache->prison, &key,
1514			    READ_WRITE_LOCK_LEVEL, prealloc, &mg->cell);
1515	if (r < 0) {
1516		free_prison_cell(cache, prealloc);
1517		invalidate_complete(mg, false);
1518		return r;
1519	}
1520
1521	if (mg->cell != prealloc)
1522		free_prison_cell(cache, prealloc);
1523
1524	if (r)
1525		quiesce(mg, invalidate_remove);
 
 
1526
1527	else {
1528		/*
1529		 * We can't call invalidate_remove() directly here because we
1530		 * might still be in request context.
 
1531		 */
1532		init_continuation(&mg->k, invalidate_remove);
1533		queue_work(cache->wq, &mg->k.ws);
 
 
 
 
 
 
 
 
 
 
 
 
 
1534	}
1535
1536	return 0;
1537}
1538
1539static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
1540			    dm_oblock_t oblock, struct bio *bio)
1541{
1542	struct dm_cache_migration *mg;
1543
1544	if (!background_work_begin(cache))
1545		return -EPERM;
1546
1547	mg = alloc_migration(cache);
1548
1549	mg->overwrite_bio = bio;
1550	mg->invalidate_cblock = cblock;
1551	mg->invalidate_oblock = oblock;
 
1552
1553	return invalidate_lock(mg);
 
1554}
1555
1556/*----------------------------------------------------------------
1557 * bio processing
1558 *--------------------------------------------------------------*/
 
 
1559
1560enum busy {
1561	IDLE,
1562	BUSY
1563};
1564
1565static enum busy spare_migration_bandwidth(struct cache *cache)
1566{
1567	bool idle = dm_iot_idle_for(&cache->tracker, HZ);
1568	sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
1569		cache->sectors_per_block;
1570
1571	if (idle && current_volume <= cache->migration_threshold)
1572		return IDLE;
1573	else
1574		return BUSY;
1575}
1576
1577static void inc_hit_counter(struct cache *cache, struct bio *bio)
1578{
1579	atomic_inc(bio_data_dir(bio) == READ ?
1580		   &cache->stats.read_hit : &cache->stats.write_hit);
1581}
 
 
1582
1583static void inc_miss_counter(struct cache *cache, struct bio *bio)
1584{
1585	atomic_inc(bio_data_dir(bio) == READ ?
1586		   &cache->stats.read_miss : &cache->stats.write_miss);
1587}
1588
1589/*----------------------------------------------------------------*/
 
 
1590
1591static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
1592		   bool *commit_needed)
1593{
1594	int r, data_dir;
1595	bool rb, background_queued;
1596	dm_cblock_t cblock;
1597
1598	*commit_needed = false;
 
 
 
 
1599
1600	rb = bio_detain_shared(cache, block, bio);
1601	if (!rb) {
1602		/*
1603		 * An exclusive lock is held for this block, so we have to
1604		 * wait.  We set the commit_needed flag so the current
1605		 * transaction will be committed asap, allowing this lock
1606		 * to be dropped.
1607		 */
1608		*commit_needed = true;
1609		return DM_MAPIO_SUBMITTED;
1610	}
1611
1612	data_dir = bio_data_dir(bio);
1613
1614	if (optimisable_bio(cache, bio, block)) {
1615		struct policy_work *op = NULL;
1616
1617		r = policy_lookup_with_work(cache->policy, block, &cblock, data_dir, true, &op);
1618		if (unlikely(r && r != -ENOENT)) {
1619			DMERR_LIMIT("%s: policy_lookup_with_work() failed with r = %d",
1620				    cache_device_name(cache), r);
1621			bio_io_error(bio);
1622			return DM_MAPIO_SUBMITTED;
1623		}
1624
1625		if (r == -ENOENT && op) {
1626			bio_drop_shared_lock(cache, bio);
1627			BUG_ON(op->op != POLICY_PROMOTE);
1628			mg_start(cache, op, bio);
1629			return DM_MAPIO_SUBMITTED;
1630		}
1631	} else {
1632		r = policy_lookup(cache->policy, block, &cblock, data_dir, false, &background_queued);
1633		if (unlikely(r && r != -ENOENT)) {
1634			DMERR_LIMIT("%s: policy_lookup() failed with r = %d",
1635				    cache_device_name(cache), r);
1636			bio_io_error(bio);
1637			return DM_MAPIO_SUBMITTED;
1638		}
1639
1640		if (background_queued)
1641			wake_migration_worker(cache);
1642	}
 
 
 
1643
1644	if (r == -ENOENT) {
1645		struct per_bio_data *pb = get_per_bio_data(bio);
 
1646
1647		/*
1648		 * Miss.
1649		 */
1650		inc_miss_counter(cache, bio);
1651		if (pb->req_nr == 0) {
1652			accounted_begin(cache, bio);
1653			remap_to_origin_clear_discard(cache, bio, block);
1654		} else {
1655			/*
1656			 * This is a duplicate writethrough io that is no
1657			 * longer needed because the block has been demoted.
1658			 */
1659			bio_endio(bio);
1660			return DM_MAPIO_SUBMITTED;
1661		}
1662	} else {
1663		/*
1664		 * Hit.
1665		 */
1666		inc_hit_counter(cache, bio);
1667
1668		/*
1669		 * Passthrough always maps to the origin, invalidating any
1670		 * cache blocks that are written to.
1671		 */
1672		if (passthrough_mode(cache)) {
1673			if (bio_data_dir(bio) == WRITE) {
1674				bio_drop_shared_lock(cache, bio);
1675				atomic_inc(&cache->stats.demotion);
1676				invalidate_start(cache, cblock, block, bio);
1677			} else
1678				remap_to_origin_clear_discard(cache, bio, block);
1679		} else {
1680			if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
1681			    !is_dirty(cache, cblock)) {
1682				remap_to_origin_and_cache(cache, bio, block, cblock);
1683				accounted_begin(cache, bio);
1684			} else
1685				remap_to_cache_dirty(cache, bio, block, cblock);
1686		}
1687	}
1688
1689	/*
1690	 * dm core turns FUA requests into a separate payload and FLUSH req.
1691	 */
1692	if (bio->bi_opf & REQ_FUA) {
1693		/*
1694		 * issue_after_commit will call accounted_begin a second time.  So
1695		 * we call accounted_complete() to avoid double accounting.
1696		 */
1697		accounted_complete(cache, bio);
1698		issue_after_commit(&cache->committer, bio);
1699		*commit_needed = true;
1700		return DM_MAPIO_SUBMITTED;
1701	}
1702
1703	return DM_MAPIO_REMAPPED;
 
 
 
1704}
1705
1706static bool process_bio(struct cache *cache, struct bio *bio)
1707{
1708	bool commit_needed;
 
1709
1710	if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
1711		dm_submit_bio_remap(bio, NULL);
 
 
1712
1713	return commit_needed;
 
1714}
1715
1716/*
1717 * A non-zero return indicates read_only or fail_io mode.
1718 */
1719static int commit(struct cache *cache, bool clean_shutdown)
1720{
1721	int r;
1722
1723	if (get_cache_mode(cache) >= CM_READ_ONLY)
1724		return -EINVAL;
1725
1726	atomic_inc(&cache->stats.commit_count);
1727	r = dm_cache_commit(cache->cmd, clean_shutdown);
1728	if (r)
1729		metadata_operation_failed(cache, "dm_cache_commit", r);
1730
1731	return r;
1732}
1733
1734/*
1735 * Used by the batcher.
1736 */
1737static blk_status_t commit_op(void *context)
1738{
1739	struct cache *cache = context;
1740
1741	if (dm_cache_changed_this_transaction(cache->cmd))
1742		return errno_to_blk_status(commit(cache, false));
1743
1744	return 0;
1745}
1746
1747/*----------------------------------------------------------------*/
1748
1749static bool process_flush_bio(struct cache *cache, struct bio *bio)
1750{
1751	struct per_bio_data *pb = get_per_bio_data(bio);
1752
1753	if (!pb->req_nr)
1754		remap_to_origin(cache, bio);
1755	else
1756		remap_to_cache(cache, bio, 0);
1757
1758	issue_after_commit(&cache->committer, bio);
1759	return true;
1760}
1761
1762static bool process_discard_bio(struct cache *cache, struct bio *bio)
1763{
1764	dm_dblock_t b, e;
1765
1766	// FIXME: do we need to lock the region?  Or can we just assume the
1767	// user wont be so foolish as to issue discard concurrently with
1768	// other IO?
1769	calc_discard_block_range(cache, bio, &b, &e);
1770	while (b != e) {
1771		set_discard(cache, b);
1772		b = to_dblock(from_dblock(b) + 1);
1773	}
1774
1775	if (cache->features.discard_passdown) {
1776		remap_to_origin(cache, bio);
1777		dm_submit_bio_remap(bio, NULL);
1778	} else
1779		bio_endio(bio);
1780
1781	return false;
 
 
1782}
1783
1784static void process_deferred_bios(struct work_struct *ws)
1785{
1786	struct cache *cache = container_of(ws, struct cache, deferred_bio_worker);
 
 
1787
1788	bool commit_needed = false;
1789	struct bio_list bios;
1790	struct bio *bio;
 
1791
1792	bio_list_init(&bios);
1793
1794	spin_lock_irq(&cache->lock);
1795	bio_list_merge(&bios, &cache->deferred_bios);
1796	bio_list_init(&cache->deferred_bios);
1797	spin_unlock_irq(&cache->lock);
1798
1799	while ((bio = bio_list_pop(&bios))) {
1800		if (bio->bi_opf & REQ_PREFLUSH)
1801			commit_needed = process_flush_bio(cache, bio) || commit_needed;
1802
1803		else if (bio_op(bio) == REQ_OP_DISCARD)
1804			commit_needed = process_discard_bio(cache, bio) || commit_needed;
 
 
 
 
 
 
 
 
 
 
 
 
 
1805
1806		else
1807			commit_needed = process_bio(cache, bio) || commit_needed;
1808	}
1809
1810	if (commit_needed)
1811		schedule_commit(&cache->committer);
1812}
 
 
 
 
1813
1814/*----------------------------------------------------------------
1815 * Main worker loop
1816 *--------------------------------------------------------------*/
1817
1818static void requeue_deferred_bios(struct cache *cache)
1819{
1820	struct bio *bio;
1821	struct bio_list bios;
1822
1823	bio_list_init(&bios);
1824	bio_list_merge(&bios, &cache->deferred_bios);
1825	bio_list_init(&cache->deferred_bios);
 
 
 
 
 
 
1826
1827	while ((bio = bio_list_pop(&bios))) {
1828		bio->bi_status = BLK_STS_DM_REQUEUE;
1829		bio_endio(bio);
1830	}
1831}
1832
1833/*
1834 * We want to commit periodically so that not too much
1835 * unwritten metadata builds up.
1836 */
1837static void do_waker(struct work_struct *ws)
1838{
1839	struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
1840
1841	policy_tick(cache->policy, true);
1842	wake_migration_worker(cache);
1843	schedule_commit(&cache->committer);
1844	queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1845}
1846
1847static void check_migrations(struct work_struct *ws)
1848{
1849	int r;
1850	struct policy_work *op;
1851	struct cache *cache = container_of(ws, struct cache, migration_worker);
1852	enum busy b;
1853
1854	for (;;) {
1855		b = spare_migration_bandwidth(cache);
1856
1857		r = policy_get_background_work(cache->policy, b == IDLE, &op);
1858		if (r == -ENODATA)
1859			break;
 
 
1860
1861		if (r) {
1862			DMERR_LIMIT("%s: policy_background_work failed",
1863				    cache_device_name(cache));
1864			break;
1865		}
1866
1867		r = mg_start(cache, op, NULL);
1868		if (r)
1869			break;
1870	}
1871}
1872
1873/*----------------------------------------------------------------
1874 * Target methods
1875 *--------------------------------------------------------------*/
1876
1877/*
1878 * This function gets called on the error paths of the constructor, so we
1879 * have to cope with a partially initialised struct.
1880 */
1881static void destroy(struct cache *cache)
1882{
1883	unsigned i;
1884
1885	mempool_exit(&cache->migration_pool);
 
 
 
 
 
 
 
1886
1887	if (cache->prison)
1888		dm_bio_prison_destroy_v2(cache->prison);
1889
1890	cancel_delayed_work_sync(&cache->waker);
1891	if (cache->wq)
1892		destroy_workqueue(cache->wq);
1893
1894	if (cache->dirty_bitset)
1895		free_bitset(cache->dirty_bitset);
1896
1897	if (cache->discard_bitset)
1898		free_bitset(cache->discard_bitset);
1899
1900	if (cache->copier)
1901		dm_kcopyd_client_destroy(cache->copier);
1902
1903	if (cache->cmd)
1904		dm_cache_metadata_close(cache->cmd);
1905
1906	if (cache->metadata_dev)
1907		dm_put_device(cache->ti, cache->metadata_dev);
1908
1909	if (cache->origin_dev)
1910		dm_put_device(cache->ti, cache->origin_dev);
1911
1912	if (cache->cache_dev)
1913		dm_put_device(cache->ti, cache->cache_dev);
1914
1915	if (cache->policy)
1916		dm_cache_policy_destroy(cache->policy);
1917
1918	for (i = 0; i < cache->nr_ctr_args ; i++)
1919		kfree(cache->ctr_args[i]);
1920	kfree(cache->ctr_args);
1921
1922	bioset_exit(&cache->bs);
1923
1924	kfree(cache);
1925}
1926
1927static void cache_dtr(struct dm_target *ti)
1928{
1929	struct cache *cache = ti->private;
1930
1931	destroy(cache);
1932}
1933
1934static sector_t get_dev_size(struct dm_dev *dev)
1935{
1936	return bdev_nr_sectors(dev->bdev);
1937}
1938
1939/*----------------------------------------------------------------*/
1940
1941/*
1942 * Construct a cache device mapping.
1943 *
1944 * cache <metadata dev> <cache dev> <origin dev> <block size>
1945 *       <#feature args> [<feature arg>]*
1946 *       <policy> <#policy args> [<policy arg>]*
1947 *
1948 * metadata dev    : fast device holding the persistent metadata
1949 * cache dev	   : fast device holding cached data blocks
1950 * origin dev	   : slow device holding original data blocks
1951 * block size	   : cache unit size in sectors
1952 *
1953 * #feature args   : number of feature arguments passed
1954 * feature args    : writethrough.  (The default is writeback.)
1955 *
1956 * policy	   : the replacement policy to use
1957 * #policy args    : an even number of policy arguments corresponding
1958 *		     to key/value pairs passed to the policy
1959 * policy args	   : key/value pairs passed to the policy
1960 *		     E.g. 'sequential_threshold 1024'
1961 *		     See cache-policies.txt for details.
1962 *
1963 * Optional feature arguments are:
1964 *   writethrough  : write through caching that prohibits cache block
1965 *		     content from being different from origin block content.
1966 *		     Without this argument, the default behaviour is to write
1967 *		     back cache block contents later for performance reasons,
1968 *		     so they may differ from the corresponding origin blocks.
1969 */
1970struct cache_args {
1971	struct dm_target *ti;
1972
1973	struct dm_dev *metadata_dev;
1974
1975	struct dm_dev *cache_dev;
1976	sector_t cache_sectors;
1977
1978	struct dm_dev *origin_dev;
1979	sector_t origin_sectors;
1980
1981	uint32_t block_size;
1982
1983	const char *policy_name;
1984	int policy_argc;
1985	const char **policy_argv;
1986
1987	struct cache_features features;
1988};
1989
1990static void destroy_cache_args(struct cache_args *ca)
1991{
1992	if (ca->metadata_dev)
1993		dm_put_device(ca->ti, ca->metadata_dev);
1994
1995	if (ca->cache_dev)
1996		dm_put_device(ca->ti, ca->cache_dev);
1997
1998	if (ca->origin_dev)
1999		dm_put_device(ca->ti, ca->origin_dev);
2000
2001	kfree(ca);
2002}
2003
2004static bool at_least_one_arg(struct dm_arg_set *as, char **error)
2005{
2006	if (!as->argc) {
2007		*error = "Insufficient args";
2008		return false;
2009	}
2010
2011	return true;
2012}
2013
2014static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
2015			      char **error)
2016{
2017	int r;
2018	sector_t metadata_dev_size;
 
2019
2020	if (!at_least_one_arg(as, error))
2021		return -EINVAL;
2022
2023	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2024			  &ca->metadata_dev);
2025	if (r) {
2026		*error = "Error opening metadata device";
2027		return r;
2028	}
2029
2030	metadata_dev_size = get_dev_size(ca->metadata_dev);
2031	if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
2032		DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
2033		       ca->metadata_dev->bdev, THIN_METADATA_MAX_SECTORS);
2034
2035	return 0;
2036}
2037
2038static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
2039			   char **error)
2040{
2041	int r;
2042
2043	if (!at_least_one_arg(as, error))
2044		return -EINVAL;
2045
2046	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2047			  &ca->cache_dev);
2048	if (r) {
2049		*error = "Error opening cache device";
2050		return r;
2051	}
2052	ca->cache_sectors = get_dev_size(ca->cache_dev);
2053
2054	return 0;
2055}
2056
2057static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
2058			    char **error)
2059{
2060	int r;
2061
2062	if (!at_least_one_arg(as, error))
2063		return -EINVAL;
2064
2065	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2066			  &ca->origin_dev);
2067	if (r) {
2068		*error = "Error opening origin device";
2069		return r;
2070	}
2071
2072	ca->origin_sectors = get_dev_size(ca->origin_dev);
2073	if (ca->ti->len > ca->origin_sectors) {
2074		*error = "Device size larger than cached device";
2075		return -EINVAL;
2076	}
2077
2078	return 0;
2079}
2080
2081static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
2082			    char **error)
2083{
2084	unsigned long block_size;
2085
2086	if (!at_least_one_arg(as, error))
2087		return -EINVAL;
2088
2089	if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
2090	    block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2091	    block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2092	    block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2093		*error = "Invalid data block size";
2094		return -EINVAL;
2095	}
2096
2097	if (block_size > ca->cache_sectors) {
2098		*error = "Data block size is larger than the cache device";
2099		return -EINVAL;
2100	}
2101
2102	ca->block_size = block_size;
2103
2104	return 0;
2105}
2106
2107static void init_features(struct cache_features *cf)
2108{
2109	cf->mode = CM_WRITE;
2110	cf->io_mode = CM_IO_WRITEBACK;
2111	cf->metadata_version = 1;
2112	cf->discard_passdown = true;
2113}
2114
2115static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2116			  char **error)
2117{
2118	static const struct dm_arg _args[] = {
2119		{0, 3, "Invalid number of cache feature arguments"},
2120	};
2121
2122	int r, mode_ctr = 0;
2123	unsigned argc;
2124	const char *arg;
2125	struct cache_features *cf = &ca->features;
2126
2127	init_features(cf);
2128
2129	r = dm_read_arg_group(_args, as, &argc, error);
2130	if (r)
2131		return -EINVAL;
2132
2133	while (argc--) {
2134		arg = dm_shift_arg(as);
2135
2136		if (!strcasecmp(arg, "writeback")) {
2137			cf->io_mode = CM_IO_WRITEBACK;
2138			mode_ctr++;
2139		}
2140
2141		else if (!strcasecmp(arg, "writethrough")) {
2142			cf->io_mode = CM_IO_WRITETHROUGH;
2143			mode_ctr++;
2144		}
2145
2146		else if (!strcasecmp(arg, "passthrough")) {
2147			cf->io_mode = CM_IO_PASSTHROUGH;
2148			mode_ctr++;
2149		}
2150
2151		else if (!strcasecmp(arg, "metadata2"))
2152			cf->metadata_version = 2;
2153
2154		else if (!strcasecmp(arg, "no_discard_passdown"))
2155			cf->discard_passdown = false;
2156
2157		else {
2158			*error = "Unrecognised cache feature requested";
2159			return -EINVAL;
2160		}
2161	}
2162
2163	if (mode_ctr > 1) {
2164		*error = "Duplicate cache io_mode features requested";
2165		return -EINVAL;
2166	}
2167
2168	return 0;
2169}
2170
2171static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
2172			char **error)
2173{
2174	static const struct dm_arg _args[] = {
2175		{0, 1024, "Invalid number of policy arguments"},
2176	};
2177
2178	int r;
2179
2180	if (!at_least_one_arg(as, error))
2181		return -EINVAL;
2182
2183	ca->policy_name = dm_shift_arg(as);
2184
2185	r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
2186	if (r)
2187		return -EINVAL;
2188
2189	ca->policy_argv = (const char **)as->argv;
2190	dm_consume_args(as, ca->policy_argc);
2191
2192	return 0;
2193}
2194
2195static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
2196			    char **error)
2197{
2198	int r;
2199	struct dm_arg_set as;
2200
2201	as.argc = argc;
2202	as.argv = argv;
2203
2204	r = parse_metadata_dev(ca, &as, error);
2205	if (r)
2206		return r;
2207
2208	r = parse_cache_dev(ca, &as, error);
2209	if (r)
2210		return r;
2211
2212	r = parse_origin_dev(ca, &as, error);
2213	if (r)
2214		return r;
2215
2216	r = parse_block_size(ca, &as, error);
2217	if (r)
2218		return r;
2219
2220	r = parse_features(ca, &as, error);
2221	if (r)
2222		return r;
2223
2224	r = parse_policy(ca, &as, error);
2225	if (r)
2226		return r;
2227
2228	return 0;
2229}
2230
2231/*----------------------------------------------------------------*/
2232
2233static struct kmem_cache *migration_cache;
2234
2235#define NOT_CORE_OPTION 1
2236
2237static int process_config_option(struct cache *cache, const char *key, const char *value)
2238{
2239	unsigned long tmp;
2240
2241	if (!strcasecmp(key, "migration_threshold")) {
2242		if (kstrtoul(value, 10, &tmp))
2243			return -EINVAL;
2244
2245		cache->migration_threshold = tmp;
2246		return 0;
2247	}
2248
2249	return NOT_CORE_OPTION;
2250}
2251
2252static int set_config_value(struct cache *cache, const char *key, const char *value)
2253{
2254	int r = process_config_option(cache, key, value);
2255
2256	if (r == NOT_CORE_OPTION)
2257		r = policy_set_config_value(cache->policy, key, value);
2258
2259	if (r)
2260		DMWARN("bad config value for %s: %s", key, value);
2261
2262	return r;
2263}
2264
2265static int set_config_values(struct cache *cache, int argc, const char **argv)
2266{
2267	int r = 0;
2268
2269	if (argc & 1) {
2270		DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
2271		return -EINVAL;
2272	}
2273
2274	while (argc) {
2275		r = set_config_value(cache, argv[0], argv[1]);
2276		if (r)
2277			break;
2278
2279		argc -= 2;
2280		argv += 2;
2281	}
2282
2283	return r;
2284}
2285
2286static int create_cache_policy(struct cache *cache, struct cache_args *ca,
2287			       char **error)
2288{
2289	struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
2290							   cache->cache_size,
2291							   cache->origin_sectors,
2292							   cache->sectors_per_block);
2293	if (IS_ERR(p)) {
2294		*error = "Error creating cache's policy";
2295		return PTR_ERR(p);
2296	}
2297	cache->policy = p;
2298	BUG_ON(!cache->policy);
2299
2300	return 0;
2301}
2302
2303/*
2304 * We want the discard block size to be at least the size of the cache
2305 * block size and have no more than 2^14 discard blocks across the origin.
2306 */
2307#define MAX_DISCARD_BLOCKS (1 << 14)
2308
2309static bool too_many_discard_blocks(sector_t discard_block_size,
2310				    sector_t origin_size)
2311{
2312	(void) sector_div(origin_size, discard_block_size);
2313
2314	return origin_size > MAX_DISCARD_BLOCKS;
2315}
2316
2317static sector_t calculate_discard_block_size(sector_t cache_block_size,
2318					     sector_t origin_size)
2319{
2320	sector_t discard_block_size = cache_block_size;
2321
2322	if (origin_size)
2323		while (too_many_discard_blocks(discard_block_size, origin_size))
2324			discard_block_size *= 2;
2325
2326	return discard_block_size;
2327}
2328
2329static void set_cache_size(struct cache *cache, dm_cblock_t size)
2330{
2331	dm_block_t nr_blocks = from_cblock(size);
2332
2333	if (nr_blocks > (1 << 20) && cache->cache_size != size)
2334		DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
2335			     "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n"
2336			     "Please consider increasing the cache block size to reduce the overall cache block count.",
2337			     (unsigned long long) nr_blocks);
2338
2339	cache->cache_size = size;
2340}
2341
2342#define DEFAULT_MIGRATION_THRESHOLD 2048
2343
2344static int cache_create(struct cache_args *ca, struct cache **result)
2345{
2346	int r = 0;
2347	char **error = &ca->ti->error;
2348	struct cache *cache;
2349	struct dm_target *ti = ca->ti;
2350	dm_block_t origin_blocks;
2351	struct dm_cache_metadata *cmd;
2352	bool may_format = ca->features.mode == CM_WRITE;
2353
2354	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
2355	if (!cache)
2356		return -ENOMEM;
2357
2358	cache->ti = ca->ti;
2359	ti->private = cache;
2360	ti->accounts_remapped_io = true;
2361	ti->num_flush_bios = 2;
2362	ti->flush_supported = true;
2363
2364	ti->num_discard_bios = 1;
2365	ti->discards_supported = true;
2366
2367	ti->per_io_data_size = sizeof(struct per_bio_data);
 
2368
2369	cache->features = ca->features;
2370	if (writethrough_mode(cache)) {
2371		/* Create bioset for writethrough bios issued to origin */
2372		r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0);
2373		if (r)
2374			goto bad;
2375	}
2376
2377	cache->metadata_dev = ca->metadata_dev;
2378	cache->origin_dev = ca->origin_dev;
2379	cache->cache_dev = ca->cache_dev;
2380
2381	ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
2382
 
2383	origin_blocks = cache->origin_sectors = ca->origin_sectors;
2384	origin_blocks = block_div(origin_blocks, ca->block_size);
2385	cache->origin_blocks = to_oblock(origin_blocks);
2386
2387	cache->sectors_per_block = ca->block_size;
2388	if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
2389		r = -EINVAL;
2390		goto bad;
2391	}
2392
2393	if (ca->block_size & (ca->block_size - 1)) {
2394		dm_block_t cache_size = ca->cache_sectors;
2395
2396		cache->sectors_per_block_shift = -1;
2397		cache_size = block_div(cache_size, ca->block_size);
2398		set_cache_size(cache, to_cblock(cache_size));
2399	} else {
2400		cache->sectors_per_block_shift = __ffs(ca->block_size);
2401		set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
2402	}
2403
2404	r = create_cache_policy(cache, ca, error);
2405	if (r)
2406		goto bad;
2407
2408	cache->policy_nr_args = ca->policy_argc;
2409	cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
2410
2411	r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2412	if (r) {
2413		*error = "Error setting cache policy's config values";
2414		goto bad;
2415	}
2416
2417	cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2418				     ca->block_size, may_format,
2419				     dm_cache_policy_get_hint_size(cache->policy),
2420				     ca->features.metadata_version);
2421	if (IS_ERR(cmd)) {
2422		*error = "Error creating metadata object";
2423		r = PTR_ERR(cmd);
2424		goto bad;
2425	}
2426	cache->cmd = cmd;
2427	set_cache_mode(cache, CM_WRITE);
2428	if (get_cache_mode(cache) != CM_WRITE) {
2429		*error = "Unable to get write access to metadata, please check/repair metadata.";
2430		r = -EINVAL;
2431		goto bad;
2432	}
2433
2434	if (passthrough_mode(cache)) {
2435		bool all_clean;
2436
2437		r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
2438		if (r) {
2439			*error = "dm_cache_metadata_all_clean() failed";
2440			goto bad;
2441		}
2442
2443		if (!all_clean) {
2444			*error = "Cannot enter passthrough mode unless all blocks are clean";
2445			r = -EINVAL;
2446			goto bad;
2447		}
2448
2449		policy_allow_migrations(cache->policy, false);
2450	}
2451
2452	spin_lock_init(&cache->lock);
2453	bio_list_init(&cache->deferred_bios);
2454	atomic_set(&cache->nr_allocated_migrations, 0);
2455	atomic_set(&cache->nr_io_migrations, 0);
 
 
 
 
2456	init_waitqueue_head(&cache->migration_wait);
2457
 
 
 
 
2458	r = -ENOMEM;
2459	atomic_set(&cache->nr_dirty, 0);
2460	cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2461	if (!cache->dirty_bitset) {
2462		*error = "could not allocate dirty bitset";
2463		goto bad;
2464	}
2465	clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2466
2467	cache->discard_block_size =
2468		calculate_discard_block_size(cache->sectors_per_block,
2469					     cache->origin_sectors);
2470	cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
2471							      cache->discard_block_size));
2472	cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2473	if (!cache->discard_bitset) {
2474		*error = "could not allocate discard bitset";
2475		goto bad;
2476	}
2477	clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2478
2479	cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2480	if (IS_ERR(cache->copier)) {
2481		*error = "could not create kcopyd client";
2482		r = PTR_ERR(cache->copier);
2483		goto bad;
2484	}
2485
2486	cache->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
2487	if (!cache->wq) {
2488		*error = "could not create workqueue for metadata object";
2489		goto bad;
2490	}
2491	INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios);
2492	INIT_WORK(&cache->migration_worker, check_migrations);
2493	INIT_DELAYED_WORK(&cache->waker, do_waker);
 
2494
2495	cache->prison = dm_bio_prison_create_v2(cache->wq);
2496	if (!cache->prison) {
2497		*error = "could not create bio prison";
2498		goto bad;
2499	}
2500
2501	r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE,
2502				   migration_cache);
2503	if (r) {
 
 
 
 
 
 
2504		*error = "Error creating cache's migration mempool";
2505		goto bad;
2506	}
2507
 
 
2508	cache->need_tick_bio = true;
2509	cache->sized = false;
2510	cache->invalidate = false;
2511	cache->commit_requested = false;
2512	cache->loaded_mappings = false;
2513	cache->loaded_discards = false;
2514
2515	load_stats(cache);
2516
2517	atomic_set(&cache->stats.demotion, 0);
2518	atomic_set(&cache->stats.promotion, 0);
2519	atomic_set(&cache->stats.copies_avoided, 0);
2520	atomic_set(&cache->stats.cache_cell_clash, 0);
2521	atomic_set(&cache->stats.commit_count, 0);
2522	atomic_set(&cache->stats.discard_count, 0);
2523
2524	spin_lock_init(&cache->invalidation_lock);
2525	INIT_LIST_HEAD(&cache->invalidation_requests);
2526
2527	batcher_init(&cache->committer, commit_op, cache,
2528		     issue_op, cache, cache->wq);
2529	dm_iot_init(&cache->tracker);
2530
2531	init_rwsem(&cache->background_work_lock);
2532	prevent_background_work(cache);
2533
2534	*result = cache;
2535	return 0;
 
2536bad:
2537	destroy(cache);
2538	return r;
2539}
2540
2541static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2542{
2543	unsigned i;
2544	const char **copy;
2545
2546	copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2547	if (!copy)
2548		return -ENOMEM;
2549	for (i = 0; i < argc; i++) {
2550		copy[i] = kstrdup(argv[i], GFP_KERNEL);
2551		if (!copy[i]) {
2552			while (i--)
2553				kfree(copy[i]);
2554			kfree(copy);
2555			return -ENOMEM;
2556		}
2557	}
2558
2559	cache->nr_ctr_args = argc;
2560	cache->ctr_args = copy;
2561
2562	return 0;
2563}
2564
2565static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2566{
2567	int r = -EINVAL;
2568	struct cache_args *ca;
2569	struct cache *cache = NULL;
2570
2571	ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2572	if (!ca) {
2573		ti->error = "Error allocating memory for cache";
2574		return -ENOMEM;
2575	}
2576	ca->ti = ti;
2577
2578	r = parse_cache_args(ca, argc, argv, &ti->error);
2579	if (r)
2580		goto out;
2581
2582	r = cache_create(ca, &cache);
2583	if (r)
2584		goto out;
2585
2586	r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2587	if (r) {
2588		destroy(cache);
2589		goto out;
2590	}
2591
2592	ti->private = cache;
 
2593out:
2594	destroy_cache_args(ca);
2595	return r;
2596}
2597
2598/*----------------------------------------------------------------*/
2599
2600static int cache_map(struct dm_target *ti, struct bio *bio)
2601{
2602	struct cache *cache = ti->private;
2603
2604	int r;
2605	bool commit_needed;
2606	dm_oblock_t block = get_bio_block(cache, bio);
 
 
 
 
 
 
2607
2608	init_per_bio_data(bio);
2609	if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2610		/*
2611		 * This can only occur if the io goes to a partial block at
2612		 * the end of the origin device.  We don't cache these.
2613		 * Just remap to the origin and carry on.
2614		 */
2615		remap_to_origin(cache, bio);
2616		accounted_begin(cache, bio);
2617		return DM_MAPIO_REMAPPED;
2618	}
2619
2620	if (discard_or_flush(bio)) {
 
 
 
 
 
 
 
 
 
2621		defer_bio(cache, bio);
2622		return DM_MAPIO_SUBMITTED;
2623	}
2624
2625	r = map_bio(cache, bio, block, &commit_needed);
2626	if (commit_needed)
2627		schedule_commit(&cache->committer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2628
2629	return r;
2630}
2631
2632static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
2633{
2634	struct cache *cache = ti->private;
2635	unsigned long flags;
2636	struct per_bio_data *pb = get_per_bio_data(bio);
 
2637
2638	if (pb->tick) {
2639		policy_tick(cache->policy, false);
2640
2641		spin_lock_irqsave(&cache->lock, flags);
2642		cache->need_tick_bio = true;
2643		spin_unlock_irqrestore(&cache->lock, flags);
2644	}
2645
2646	bio_drop_shared_lock(cache, bio);
2647	accounted_complete(cache, bio);
2648
2649	return DM_ENDIO_DONE;
2650}
2651
2652static int write_dirty_bitset(struct cache *cache)
2653{
2654	int r;
2655
2656	if (get_cache_mode(cache) >= CM_READ_ONLY)
2657		return -EINVAL;
2658
2659	r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset);
2660	if (r)
2661		metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r);
 
 
 
2662
2663	return r;
2664}
2665
2666static int write_discard_bitset(struct cache *cache)
2667{
2668	unsigned i, r;
2669
2670	if (get_cache_mode(cache) >= CM_READ_ONLY)
2671		return -EINVAL;
2672
2673	r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2674					   cache->discard_nr_blocks);
2675	if (r) {
2676		DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache));
2677		metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r);
2678		return r;
2679	}
2680
2681	for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2682		r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2683					 is_discarded(cache, to_dblock(i)));
2684		if (r) {
2685			metadata_operation_failed(cache, "dm_cache_set_discard", r);
2686			return r;
2687		}
2688	}
2689
2690	return 0;
2691}
2692
2693static int write_hints(struct cache *cache)
2694{
2695	int r;
2696
2697	if (get_cache_mode(cache) >= CM_READ_ONLY)
2698		return -EINVAL;
2699
2700	r = dm_cache_write_hints(cache->cmd, cache->policy);
2701	if (r) {
2702		metadata_operation_failed(cache, "dm_cache_write_hints", r);
2703		return r;
2704	}
2705
2706	return 0;
2707}
2708
2709/*
2710 * returns true on success
2711 */
2712static bool sync_metadata(struct cache *cache)
2713{
2714	int r1, r2, r3, r4;
2715
2716	r1 = write_dirty_bitset(cache);
2717	if (r1)
2718		DMERR("%s: could not write dirty bitset", cache_device_name(cache));
2719
2720	r2 = write_discard_bitset(cache);
2721	if (r2)
2722		DMERR("%s: could not write discard bitset", cache_device_name(cache));
2723
2724	save_stats(cache);
2725
2726	r3 = write_hints(cache);
2727	if (r3)
2728		DMERR("%s: could not write hints", cache_device_name(cache));
2729
2730	/*
2731	 * If writing the above metadata failed, we still commit, but don't
2732	 * set the clean shutdown flag.  This will effectively force every
2733	 * dirty bit to be set on reload.
2734	 */
2735	r4 = commit(cache, !r1 && !r2 && !r3);
2736	if (r4)
2737		DMERR("%s: could not write cache metadata", cache_device_name(cache));
2738
2739	return !r1 && !r2 && !r3 && !r4;
2740}
2741
2742static void cache_postsuspend(struct dm_target *ti)
2743{
2744	struct cache *cache = ti->private;
2745
2746	prevent_background_work(cache);
2747	BUG_ON(atomic_read(&cache->nr_io_migrations));
2748
2749	cancel_delayed_work_sync(&cache->waker);
2750	drain_workqueue(cache->wq);
2751	WARN_ON(cache->tracker.in_flight);
2752
2753	/*
2754	 * If it's a flush suspend there won't be any deferred bios, so this
2755	 * call is harmless.
2756	 */
2757	requeue_deferred_bios(cache);
2758
2759	if (get_cache_mode(cache) == CM_WRITE)
2760		(void) sync_metadata(cache);
2761}
2762
2763static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2764			bool dirty, uint32_t hint, bool hint_valid)
2765{
 
2766	struct cache *cache = context;
2767
2768	if (dirty) {
2769		set_bit(from_cblock(cblock), cache->dirty_bitset);
2770		atomic_inc(&cache->nr_dirty);
2771	} else
2772		clear_bit(from_cblock(cblock), cache->dirty_bitset);
2773
2774	return policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
2775}
2776
2777/*
2778 * The discard block size in the on disk metadata is not
2779 * necessarily the same as we're currently using.  So we have to
2780 * be careful to only set the discarded attribute if we know it
2781 * covers a complete block of the new size.
2782 */
2783struct discard_load_info {
2784	struct cache *cache;
2785
2786	/*
2787	 * These blocks are sized using the on disk dblock size, rather
2788	 * than the current one.
2789	 */
2790	dm_block_t block_size;
2791	dm_block_t discard_begin, discard_end;
2792};
2793
2794static void discard_load_info_init(struct cache *cache,
2795				   struct discard_load_info *li)
2796{
2797	li->cache = cache;
2798	li->discard_begin = li->discard_end = 0;
2799}
2800
2801static void set_discard_range(struct discard_load_info *li)
2802{
2803	sector_t b, e;
2804
2805	if (li->discard_begin == li->discard_end)
2806		return;
2807
2808	/*
2809	 * Convert to sectors.
2810	 */
2811	b = li->discard_begin * li->block_size;
2812	e = li->discard_end * li->block_size;
2813
2814	/*
2815	 * Then convert back to the current dblock size.
2816	 */
2817	b = dm_sector_div_up(b, li->cache->discard_block_size);
2818	sector_div(e, li->cache->discard_block_size);
2819
2820	/*
2821	 * The origin may have shrunk, so we need to check we're still in
2822	 * bounds.
2823	 */
2824	if (e > from_dblock(li->cache->discard_nr_blocks))
2825		e = from_dblock(li->cache->discard_nr_blocks);
2826
2827	for (; b < e; b++)
2828		set_discard(li->cache, to_dblock(b));
2829}
2830
2831static int load_discard(void *context, sector_t discard_block_size,
2832			dm_dblock_t dblock, bool discard)
2833{
2834	struct discard_load_info *li = context;
2835
2836	li->block_size = discard_block_size;
2837
2838	if (discard) {
2839		if (from_dblock(dblock) == li->discard_end)
2840			/*
2841			 * We're already in a discard range, just extend it.
2842			 */
2843			li->discard_end = li->discard_end + 1ULL;
2844
2845		else {
2846			/*
2847			 * Emit the old range and start a new one.
2848			 */
2849			set_discard_range(li);
2850			li->discard_begin = from_dblock(dblock);
2851			li->discard_end = li->discard_begin + 1ULL;
2852		}
2853	} else {
2854		set_discard_range(li);
2855		li->discard_begin = li->discard_end = 0;
2856	}
2857
2858	return 0;
2859}
2860
2861static dm_cblock_t get_cache_dev_size(struct cache *cache)
2862{
2863	sector_t size = get_dev_size(cache->cache_dev);
2864	(void) sector_div(size, cache->sectors_per_block);
2865	return to_cblock(size);
2866}
2867
2868static bool can_resize(struct cache *cache, dm_cblock_t new_size)
2869{
2870	if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
2871		if (cache->sized) {
2872			DMERR("%s: unable to extend cache due to missing cache table reload",
2873			      cache_device_name(cache));
2874			return false;
2875		}
2876	}
2877
2878	/*
2879	 * We can't drop a dirty block when shrinking the cache.
2880	 */
2881	while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
2882		new_size = to_cblock(from_cblock(new_size) + 1);
2883		if (is_dirty(cache, new_size)) {
2884			DMERR("%s: unable to shrink cache; cache block %llu is dirty",
2885			      cache_device_name(cache),
2886			      (unsigned long long) from_cblock(new_size));
2887			return false;
2888		}
2889	}
2890
2891	return true;
2892}
2893
2894static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2895{
2896	int r;
2897
2898	r = dm_cache_resize(cache->cmd, new_size);
2899	if (r) {
2900		DMERR("%s: could not resize cache metadata", cache_device_name(cache));
2901		metadata_operation_failed(cache, "dm_cache_resize", r);
2902		return r;
2903	}
2904
2905	set_cache_size(cache, new_size);
2906
2907	return 0;
2908}
2909
2910static int cache_preresume(struct dm_target *ti)
2911{
2912	int r = 0;
2913	struct cache *cache = ti->private;
2914	dm_cblock_t csize = get_cache_dev_size(cache);
2915
2916	/*
2917	 * Check to see if the cache has resized.
2918	 */
2919	if (!cache->sized) {
2920		r = resize_cache_dev(cache, csize);
2921		if (r)
2922			return r;
2923
2924		cache->sized = true;
2925
2926	} else if (csize != cache->cache_size) {
2927		if (!can_resize(cache, csize))
2928			return -EINVAL;
2929
2930		r = resize_cache_dev(cache, csize);
2931		if (r)
2932			return r;
2933	}
2934
2935	if (!cache->loaded_mappings) {
2936		r = dm_cache_load_mappings(cache->cmd, cache->policy,
2937					   load_mapping, cache);
2938		if (r) {
2939			DMERR("%s: could not load cache mappings", cache_device_name(cache));
2940			metadata_operation_failed(cache, "dm_cache_load_mappings", r);
2941			return r;
2942		}
2943
2944		cache->loaded_mappings = true;
2945	}
2946
2947	if (!cache->loaded_discards) {
2948		struct discard_load_info li;
2949
2950		/*
2951		 * The discard bitset could have been resized, or the
2952		 * discard block size changed.  To be safe we start by
2953		 * setting every dblock to not discarded.
2954		 */
2955		clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2956
2957		discard_load_info_init(cache, &li);
2958		r = dm_cache_load_discards(cache->cmd, load_discard, &li);
2959		if (r) {
2960			DMERR("%s: could not load origin discards", cache_device_name(cache));
2961			metadata_operation_failed(cache, "dm_cache_load_discards", r);
2962			return r;
2963		}
2964		set_discard_range(&li);
2965
2966		cache->loaded_discards = true;
2967	}
2968
2969	return r;
2970}
2971
2972static void cache_resume(struct dm_target *ti)
2973{
2974	struct cache *cache = ti->private;
2975
2976	cache->need_tick_bio = true;
2977	allow_background_work(cache);
2978	do_waker(&cache->waker.work);
2979}
2980
2981static void emit_flags(struct cache *cache, char *result,
2982		       unsigned maxlen, ssize_t *sz_ptr)
2983{
2984	ssize_t sz = *sz_ptr;
2985	struct cache_features *cf = &cache->features;
2986	unsigned count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
2987
2988	DMEMIT("%u ", count);
2989
2990	if (cf->metadata_version == 2)
2991		DMEMIT("metadata2 ");
2992
2993	if (writethrough_mode(cache))
2994		DMEMIT("writethrough ");
2995
2996	else if (passthrough_mode(cache))
2997		DMEMIT("passthrough ");
2998
2999	else if (writeback_mode(cache))
3000		DMEMIT("writeback ");
3001
3002	else {
3003		DMEMIT("unknown ");
3004		DMERR("%s: internal error: unknown io mode: %d",
3005		      cache_device_name(cache), (int) cf->io_mode);
3006	}
3007
3008	if (!cf->discard_passdown)
3009		DMEMIT("no_discard_passdown ");
3010
3011	*sz_ptr = sz;
3012}
3013
3014/*
3015 * Status format:
3016 *
3017 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
3018 * <cache block size> <#used cache blocks>/<#total cache blocks>
3019 * <#read hits> <#read misses> <#write hits> <#write misses>
3020 * <#demotions> <#promotions> <#dirty>
3021 * <#features> <features>*
3022 * <#core args> <core args>
3023 * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
3024 */
3025static void cache_status(struct dm_target *ti, status_type_t type,
3026			 unsigned status_flags, char *result, unsigned maxlen)
3027{
3028	int r = 0;
3029	unsigned i;
3030	ssize_t sz = 0;
3031	dm_block_t nr_free_blocks_metadata = 0;
3032	dm_block_t nr_blocks_metadata = 0;
3033	char buf[BDEVNAME_SIZE];
3034	struct cache *cache = ti->private;
3035	dm_cblock_t residency;
3036	bool needs_check;
3037
3038	switch (type) {
3039	case STATUSTYPE_INFO:
3040		if (get_cache_mode(cache) == CM_FAIL) {
3041			DMEMIT("Fail");
3042			break;
3043		}
3044
3045		/* Commit to ensure statistics aren't out-of-date */
3046		if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
3047			(void) commit(cache, false);
 
 
 
3048
3049		r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata);
 
3050		if (r) {
3051			DMERR("%s: dm_cache_get_free_metadata_block_count returned %d",
3052			      cache_device_name(cache), r);
3053			goto err;
3054		}
3055
3056		r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
3057		if (r) {
3058			DMERR("%s: dm_cache_get_metadata_dev_size returned %d",
3059			      cache_device_name(cache), r);
3060			goto err;
3061		}
3062
3063		residency = policy_residency(cache->policy);
3064
3065		DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
3066		       (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
3067		       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3068		       (unsigned long long)nr_blocks_metadata,
3069		       (unsigned long long)cache->sectors_per_block,
3070		       (unsigned long long) from_cblock(residency),
3071		       (unsigned long long) from_cblock(cache->cache_size),
3072		       (unsigned) atomic_read(&cache->stats.read_hit),
3073		       (unsigned) atomic_read(&cache->stats.read_miss),
3074		       (unsigned) atomic_read(&cache->stats.write_hit),
3075		       (unsigned) atomic_read(&cache->stats.write_miss),
3076		       (unsigned) atomic_read(&cache->stats.demotion),
3077		       (unsigned) atomic_read(&cache->stats.promotion),
3078		       (unsigned long) atomic_read(&cache->nr_dirty));
3079
3080		emit_flags(cache, result, maxlen, &sz);
 
 
 
 
 
 
 
 
 
 
 
 
3081
3082		DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
3083
3084		DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
3085		if (sz < maxlen) {
3086			r = policy_emit_config_values(cache->policy, result, maxlen, &sz);
3087			if (r)
3088				DMERR("%s: policy_emit_config_values returned %d",
3089				      cache_device_name(cache), r);
3090		}
3091
3092		if (get_cache_mode(cache) == CM_READ_ONLY)
3093			DMEMIT("ro ");
3094		else
3095			DMEMIT("rw ");
3096
3097		r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
3098
3099		if (r || needs_check)
3100			DMEMIT("needs_check ");
3101		else
3102			DMEMIT("- ");
3103
3104		break;
3105
3106	case STATUSTYPE_TABLE:
3107		format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
3108		DMEMIT("%s ", buf);
3109		format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
3110		DMEMIT("%s ", buf);
3111		format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
3112		DMEMIT("%s", buf);
3113
3114		for (i = 0; i < cache->nr_ctr_args - 1; i++)
3115			DMEMIT(" %s", cache->ctr_args[i]);
3116		if (cache->nr_ctr_args)
3117			DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
3118		break;
3119
3120	case STATUSTYPE_IMA:
3121		DMEMIT_TARGET_NAME_VERSION(ti->type);
3122		if (get_cache_mode(cache) == CM_FAIL)
3123			DMEMIT(",metadata_mode=fail");
3124		else if (get_cache_mode(cache) == CM_READ_ONLY)
3125			DMEMIT(",metadata_mode=ro");
3126		else
3127			DMEMIT(",metadata_mode=rw");
3128
3129		format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
3130		DMEMIT(",cache_metadata_device=%s", buf);
3131		format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
3132		DMEMIT(",cache_device=%s", buf);
3133		format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
3134		DMEMIT(",cache_origin_device=%s", buf);
3135		DMEMIT(",writethrough=%c", writethrough_mode(cache) ? 'y' : 'n');
3136		DMEMIT(",writeback=%c", writeback_mode(cache) ? 'y' : 'n');
3137		DMEMIT(",passthrough=%c", passthrough_mode(cache) ? 'y' : 'n');
3138		DMEMIT(",metadata2=%c", cache->features.metadata_version == 2 ? 'y' : 'n');
3139		DMEMIT(",no_discard_passdown=%c", cache->features.discard_passdown ? 'n' : 'y');
3140		DMEMIT(";");
3141		break;
3142	}
3143
3144	return;
3145
3146err:
3147	DMEMIT("Error");
3148}
3149
3150/*
3151 * Defines a range of cblocks, begin to (end - 1) are in the range.  end is
3152 * the one-past-the-end value.
3153 */
3154struct cblock_range {
3155	dm_cblock_t begin;
3156	dm_cblock_t end;
3157};
3158
3159/*
3160 * A cache block range can take two forms:
3161 *
3162 * i) A single cblock, eg. '3456'
3163 * ii) A begin and end cblock with a dash between, eg. 123-234
3164 */
3165static int parse_cblock_range(struct cache *cache, const char *str,
3166			      struct cblock_range *result)
3167{
3168	char dummy;
3169	uint64_t b, e;
3170	int r;
3171
3172	/*
3173	 * Try and parse form (ii) first.
3174	 */
3175	r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
3176	if (r < 0)
3177		return r;
3178
3179	if (r == 2) {
3180		result->begin = to_cblock(b);
3181		result->end = to_cblock(e);
3182		return 0;
3183	}
3184
3185	/*
3186	 * That didn't work, try form (i).
3187	 */
3188	r = sscanf(str, "%llu%c", &b, &dummy);
3189	if (r < 0)
3190		return r;
3191
3192	if (r == 1) {
3193		result->begin = to_cblock(b);
3194		result->end = to_cblock(from_cblock(result->begin) + 1u);
3195		return 0;
3196	}
3197
3198	DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str);
3199	return -EINVAL;
3200}
3201
3202static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
3203{
3204	uint64_t b = from_cblock(range->begin);
3205	uint64_t e = from_cblock(range->end);
3206	uint64_t n = from_cblock(cache->cache_size);
3207
3208	if (b >= n) {
3209		DMERR("%s: begin cblock out of range: %llu >= %llu",
3210		      cache_device_name(cache), b, n);
3211		return -EINVAL;
3212	}
3213
3214	if (e > n) {
3215		DMERR("%s: end cblock out of range: %llu > %llu",
3216		      cache_device_name(cache), e, n);
3217		return -EINVAL;
3218	}
3219
3220	if (b >= e) {
3221		DMERR("%s: invalid cblock range: %llu >= %llu",
3222		      cache_device_name(cache), b, e);
3223		return -EINVAL;
3224	}
3225
3226	return 0;
3227}
3228
3229static inline dm_cblock_t cblock_succ(dm_cblock_t b)
3230{
3231	return to_cblock(from_cblock(b) + 1);
3232}
3233
3234static int request_invalidation(struct cache *cache, struct cblock_range *range)
3235{
3236	int r = 0;
3237
3238	/*
3239	 * We don't need to do any locking here because we know we're in
3240	 * passthrough mode.  There's is potential for a race between an
3241	 * invalidation triggered by an io and an invalidation message.  This
3242	 * is harmless, we must not worry if the policy call fails.
3243	 */
3244	while (range->begin != range->end) {
3245		r = invalidate_cblock(cache, range->begin);
3246		if (r)
3247			return r;
3248
3249		range->begin = cblock_succ(range->begin);
3250	}
 
 
 
 
 
 
 
 
3251
3252	cache->commit_requested = true;
3253	return r;
3254}
3255
3256static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
3257					      const char **cblock_ranges)
3258{
3259	int r = 0;
3260	unsigned i;
3261	struct cblock_range range;
3262
3263	if (!passthrough_mode(cache)) {
3264		DMERR("%s: cache has to be in passthrough mode for invalidation",
3265		      cache_device_name(cache));
3266		return -EPERM;
3267	}
3268
3269	for (i = 0; i < count; i++) {
3270		r = parse_cblock_range(cache, cblock_ranges[i], &range);
3271		if (r)
3272			break;
3273
3274		r = validate_cblock_range(cache, &range);
3275		if (r)
3276			break;
3277
3278		/*
3279		 * Pass begin and end origin blocks to the worker and wake it.
3280		 */
3281		r = request_invalidation(cache, &range);
3282		if (r)
3283			break;
3284	}
3285
3286	return r;
3287}
3288
3289/*
3290 * Supports
3291 *	"<key> <value>"
3292 * and
3293 *     "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
3294 *
3295 * The key migration_threshold is supported by the cache target core.
3296 */
3297static int cache_message(struct dm_target *ti, unsigned argc, char **argv,
3298			 char *result, unsigned maxlen)
3299{
3300	struct cache *cache = ti->private;
3301
3302	if (!argc)
3303		return -EINVAL;
3304
3305	if (get_cache_mode(cache) >= CM_READ_ONLY) {
3306		DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode",
3307		      cache_device_name(cache));
3308		return -EOPNOTSUPP;
3309	}
3310
3311	if (!strcasecmp(argv[0], "invalidate_cblocks"))
3312		return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
3313
3314	if (argc != 2)
3315		return -EINVAL;
3316
3317	return set_config_value(cache, argv[0], argv[1]);
3318}
3319
3320static int cache_iterate_devices(struct dm_target *ti,
3321				 iterate_devices_callout_fn fn, void *data)
3322{
3323	int r = 0;
3324	struct cache *cache = ti->private;
3325
3326	r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
3327	if (!r)
3328		r = fn(ti, cache->origin_dev, 0, ti->len, data);
3329
3330	return r;
3331}
3332
3333/*
3334 * If discard_passdown was enabled verify that the origin device
3335 * supports discards.  Disable discard_passdown if not.
3336 */
3337static void disable_passdown_if_not_supported(struct cache *cache)
 
 
 
 
3338{
3339	struct block_device *origin_bdev = cache->origin_dev->bdev;
3340	struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
3341	const char *reason = NULL;
3342
3343	if (!cache->features.discard_passdown)
3344		return;
3345
3346	if (!bdev_max_discard_sectors(origin_bdev))
3347		reason = "discard unsupported";
3348
3349	else if (origin_limits->max_discard_sectors < cache->sectors_per_block)
3350		reason = "max discard sectors smaller than a block";
3351
3352	if (reason) {
3353		DMWARN("Origin device (%pg) %s: Disabling discard passdown.",
3354		       origin_bdev, reason);
3355		cache->features.discard_passdown = false;
3356	}
3357}
3358
3359static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
3360{
3361	struct block_device *origin_bdev = cache->origin_dev->bdev;
3362	struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
3363
3364	if (!cache->features.discard_passdown) {
3365		/* No passdown is done so setting own virtual limits */
3366		limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
3367						    cache->origin_sectors);
3368		limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
3369		return;
3370	}
3371
3372	/*
3373	 * cache_iterate_devices() is stacking both origin and fast device limits
3374	 * but discards aren't passed to fast device, so inherit origin's limits.
3375	 */
3376	limits->max_discard_sectors = origin_limits->max_discard_sectors;
3377	limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
3378	limits->discard_granularity = origin_limits->discard_granularity;
3379	limits->discard_alignment = origin_limits->discard_alignment;
3380	limits->discard_misaligned = origin_limits->discard_misaligned;
3381}
3382
3383static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3384{
3385	struct cache *cache = ti->private;
3386	uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3387
3388	/*
3389	 * If the system-determined stacked limits are compatible with the
3390	 * cache's blocksize (io_opt is a factor) do not override them.
3391	 */
3392	if (io_opt_sectors < cache->sectors_per_block ||
3393	    do_div(io_opt_sectors, cache->sectors_per_block)) {
3394		blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
3395		blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
3396	}
3397
3398	disable_passdown_if_not_supported(cache);
3399	set_discard_limits(cache, limits);
3400}
3401
3402/*----------------------------------------------------------------*/
3403
3404static struct target_type cache_target = {
3405	.name = "cache",
3406	.version = {2, 2, 0},
3407	.module = THIS_MODULE,
3408	.ctr = cache_ctr,
3409	.dtr = cache_dtr,
3410	.map = cache_map,
3411	.end_io = cache_end_io,
3412	.postsuspend = cache_postsuspend,
3413	.preresume = cache_preresume,
3414	.resume = cache_resume,
3415	.status = cache_status,
3416	.message = cache_message,
3417	.iterate_devices = cache_iterate_devices,
 
3418	.io_hints = cache_io_hints,
3419};
3420
3421static int __init dm_cache_init(void)
3422{
3423	int r;
3424
3425	migration_cache = KMEM_CACHE(dm_cache_migration, 0);
3426	if (!migration_cache)
3427		return -ENOMEM;
3428
3429	r = dm_register_target(&cache_target);
3430	if (r) {
3431		DMERR("cache target registration failed: %d", r);
3432		kmem_cache_destroy(migration_cache);
3433		return r;
 
 
 
 
 
 
3434	}
3435
3436	return 0;
3437}
3438
3439static void __exit dm_cache_exit(void)
3440{
3441	dm_unregister_target(&cache_target);
3442	kmem_cache_destroy(migration_cache);
3443}
3444
3445module_init(dm_cache_init);
3446module_exit(dm_cache_exit);
3447
3448MODULE_DESCRIPTION(DM_NAME " cache target");
3449MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
3450MODULE_LICENSE("GPL");
v3.15
   1/*
   2 * Copyright (C) 2012 Red Hat. All rights reserved.
   3 *
   4 * This file is released under the GPL.
   5 */
   6
   7#include "dm.h"
   8#include "dm-bio-prison.h"
   9#include "dm-bio-record.h"
  10#include "dm-cache-metadata.h"
 
  11
  12#include <linux/dm-io.h>
  13#include <linux/dm-kcopyd.h>
 
  14#include <linux/init.h>
  15#include <linux/mempool.h>
  16#include <linux/module.h>
 
  17#include <linux/slab.h>
  18#include <linux/vmalloc.h>
  19
  20#define DM_MSG_PREFIX "cache"
  21
  22DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
  23	"A percentage of time allocated for copying to and/or from cache");
  24
  25/*----------------------------------------------------------------*/
  26
  27/*
  28 * Glossary:
  29 *
  30 * oblock: index of an origin block
  31 * cblock: index of a cache block
  32 * promotion: movement of a block from origin to cache
  33 * demotion: movement of a block from cache to origin
  34 * migration: movement of a block between the origin and cache device,
  35 *	      either direction
  36 */
  37
  38/*----------------------------------------------------------------*/
  39
  40static size_t bitset_size_in_bytes(unsigned nr_entries)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  41{
  42	return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
 
 
 
 
 
 
 
 
 
 
  43}
  44
  45static unsigned long *alloc_bitset(unsigned nr_entries)
  46{
  47	size_t s = bitset_size_in_bytes(nr_entries);
  48	return vzalloc(s);
  49}
  50
  51static void clear_bitset(void *bitset, unsigned nr_entries)
  52{
  53	size_t s = bitset_size_in_bytes(nr_entries);
  54	memset(bitset, 0, s);
 
 
 
 
 
 
 
  55}
  56
  57static void free_bitset(unsigned long *bits)
 
 
 
  58{
  59	vfree(bits);
 
 
 
 
 
 
 
 
  60}
  61
  62/*----------------------------------------------------------------*/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  63
  64/*
  65 * There are a couple of places where we let a bio run, but want to do some
  66 * work before calling its endio function.  We do this by temporarily
  67 * changing the endio fn.
  68 */
  69struct dm_hook_info {
  70	bio_end_io_t *bi_end_io;
  71	void *bi_private;
  72};
  73
  74static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
  75			bio_end_io_t *bi_end_io, void *bi_private)
  76{
  77	h->bi_end_io = bio->bi_end_io;
  78	h->bi_private = bio->bi_private;
  79
  80	bio->bi_end_io = bi_end_io;
  81	bio->bi_private = bi_private;
  82}
  83
  84static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
  85{
  86	bio->bi_end_io = h->bi_end_io;
  87	bio->bi_private = h->bi_private;
  88
  89	/*
  90	 * Must bump bi_remaining to allow bio to complete with
  91	 * restored bi_end_io.
  92	 */
  93	atomic_inc(&bio->bi_remaining);
  94}
  95
  96/*----------------------------------------------------------------*/
  97
  98#define PRISON_CELLS 1024
  99#define MIGRATION_POOL_SIZE 128
 100#define COMMIT_PERIOD HZ
 101#define MIGRATION_COUNT_WINDOW 10
 102
 103/*
 104 * The block size of the device holding cache data must be
 105 * between 32KB and 1GB.
 106 */
 107#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
 108#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
 109
 110/*
 111 * FIXME: the cache is read/write for the time being.
 112 */
 113enum cache_metadata_mode {
 114	CM_WRITE,		/* metadata may be changed */
 115	CM_READ_ONLY,		/* metadata may not be changed */
 
 116};
 117
 118enum cache_io_mode {
 119	/*
 120	 * Data is written to cached blocks only.  These blocks are marked
 121	 * dirty.  If you lose the cache device you will lose data.
 122	 * Potential performance increase for both reads and writes.
 123	 */
 124	CM_IO_WRITEBACK,
 125
 126	/*
 127	 * Data is written to both cache and origin.  Blocks are never
 128	 * dirty.  Potential performance benfit for reads only.
 129	 */
 130	CM_IO_WRITETHROUGH,
 131
 132	/*
 133	 * A degraded mode useful for various cache coherency situations
 134	 * (eg, rolling back snapshots).  Reads and writes always go to the
 135	 * origin.  If a write goes to a cached oblock, then the cache
 136	 * block is invalidated.
 137	 */
 138	CM_IO_PASSTHROUGH
 139};
 140
 141struct cache_features {
 142	enum cache_metadata_mode mode;
 143	enum cache_io_mode io_mode;
 
 
 144};
 145
 146struct cache_stats {
 147	atomic_t read_hit;
 148	atomic_t read_miss;
 149	atomic_t write_hit;
 150	atomic_t write_miss;
 151	atomic_t demotion;
 152	atomic_t promotion;
 
 153	atomic_t copies_avoided;
 154	atomic_t cache_cell_clash;
 155	atomic_t commit_count;
 156	atomic_t discard_count;
 157};
 158
 159/*
 160 * Defines a range of cblocks, begin to (end - 1) are in the range.  end is
 161 * the one-past-the-end value.
 162 */
 163struct cblock_range {
 164	dm_cblock_t begin;
 165	dm_cblock_t end;
 166};
 167
 168struct invalidation_request {
 169	struct list_head list;
 170	struct cblock_range *cblocks;
 171
 172	atomic_t complete;
 173	int err;
 174
 175	wait_queue_head_t result_wait;
 176};
 177
 178struct cache {
 179	struct dm_target *ti;
 180	struct dm_target_callbacks callbacks;
 
 
 
 
 
 
 181
 182	struct dm_cache_metadata *cmd;
 183
 184	/*
 185	 * Metadata is written to this device.
 186	 */
 187	struct dm_dev *metadata_dev;
 188
 189	/*
 190	 * The slower of the two data devices.  Typically a spindle.
 191	 */
 192	struct dm_dev *origin_dev;
 193
 194	/*
 195	 * The faster of the two data devices.  Typically an SSD.
 196	 */
 197	struct dm_dev *cache_dev;
 198
 199	/*
 200	 * Size of the origin device in _complete_ blocks and native sectors.
 201	 */
 202	dm_oblock_t origin_blocks;
 203	sector_t origin_sectors;
 204
 205	/*
 206	 * Size of the cache device in blocks.
 207	 */
 208	dm_cblock_t cache_size;
 209
 210	/*
 211	 * Fields for converting from sectors to blocks.
 212	 */
 213	uint32_t sectors_per_block;
 214	int sectors_per_block_shift;
 215
 216	spinlock_t lock;
 217	struct bio_list deferred_bios;
 218	struct bio_list deferred_flush_bios;
 219	struct bio_list deferred_writethrough_bios;
 220	struct list_head quiesced_migrations;
 221	struct list_head completed_migrations;
 222	struct list_head need_commit_migrations;
 223	sector_t migration_threshold;
 224	wait_queue_head_t migration_wait;
 225	atomic_t nr_migrations;
 226
 227	wait_queue_head_t quiescing_wait;
 228	atomic_t quiescing;
 229	atomic_t quiescing_ack;
 230
 231	/*
 232	 * cache_size entries, dirty if set
 
 233	 */
 234	dm_cblock_t nr_dirty;
 235	unsigned long *dirty_bitset;
 
 
 
 236
 237	/*
 238	 * origin_blocks entries, discarded if set.
 239	 */
 240	dm_oblock_t discard_nr_blocks;
 241	unsigned long *discard_bitset;
 
 242
 243	/*
 244	 * Rather than reconstructing the table line for the status we just
 245	 * save it and regurgitate.
 246	 */
 247	unsigned nr_ctr_args;
 248	const char **ctr_args;
 249
 250	struct dm_kcopyd_client *copier;
 
 
 251	struct workqueue_struct *wq;
 252	struct work_struct worker;
 
 253
 254	struct delayed_work waker;
 255	unsigned long last_commit_jiffies;
 
 
 
 256
 257	struct dm_bio_prison *prison;
 258	struct dm_deferred_set *all_io_ds;
 259
 260	mempool_t *migration_pool;
 261	struct dm_cache_migration *next_migration;
 
 
 262
 263	struct dm_cache_policy *policy;
 264	unsigned policy_nr_args;
 265
 266	bool need_tick_bio:1;
 267	bool sized:1;
 268	bool invalidate:1;
 269	bool commit_requested:1;
 270	bool loaded_mappings:1;
 271	bool loaded_discards:1;
 272
 273	/*
 274	 * Cache features such as write-through.
 275	 */
 276	struct cache_features features;
 
 
 277
 278	struct cache_stats stats;
 279
 280	/*
 281	 * Invalidation fields.
 282	 */
 283	spinlock_t invalidation_lock;
 284	struct list_head invalidation_requests;
 285};
 286
 287struct per_bio_data {
 288	bool tick:1;
 289	unsigned req_nr:2;
 290	struct dm_deferred_entry *all_io_entry;
 291	struct dm_hook_info hook_info;
 
 
 292
 293	/*
 294	 * writethrough fields.  These MUST remain at the end of this
 295	 * structure and the 'cache' member must be the first as it
 296	 * is used to determine the offset of the writethrough fields.
 297	 */
 298	struct cache *cache;
 299	dm_cblock_t cblock;
 300	struct dm_bio_details bio_details;
 
 
 
 
 
 301};
 302
 303struct dm_cache_migration {
 304	struct list_head list;
 305	struct cache *cache;
 
 
 
 306
 307	unsigned long start_jiffies;
 308	dm_oblock_t old_oblock;
 309	dm_oblock_t new_oblock;
 310	dm_cblock_t cblock;
 311
 312	bool err:1;
 313	bool writeback:1;
 314	bool demote:1;
 315	bool promote:1;
 316	bool requeue_holder:1;
 317	bool invalidate:1;
 318
 319	struct dm_bio_prison_cell *old_ocell;
 320	struct dm_bio_prison_cell *new_ocell;
 321};
 322
 323/*
 324 * Processing a bio in the worker thread may require these memory
 325 * allocations.  We prealloc to avoid deadlocks (the same worker thread
 326 * frees them back to the mempool).
 327 */
 328struct prealloc {
 329	struct dm_cache_migration *mg;
 330	struct dm_bio_prison_cell *cell1;
 331	struct dm_bio_prison_cell *cell2;
 332};
 333
 334static void wake_worker(struct cache *cache)
 335{
 336	queue_work(cache->wq, &cache->worker);
 
 
 
 337}
 338
 339/*----------------------------------------------------------------*/
 340
 341static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
 342{
 343	/* FIXME: change to use a local slab. */
 344	return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
 345}
 346
 347static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
 348{
 349	dm_bio_prison_free_cell(cache->prison, cell);
 350}
 351
 352static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
 353{
 354	if (!p->mg) {
 355		p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
 356		if (!p->mg)
 357			return -ENOMEM;
 358	}
 359
 360	if (!p->cell1) {
 361		p->cell1 = alloc_prison_cell(cache);
 362		if (!p->cell1)
 363			return -ENOMEM;
 364	}
 365
 366	if (!p->cell2) {
 367		p->cell2 = alloc_prison_cell(cache);
 368		if (!p->cell2)
 369			return -ENOMEM;
 370	}
 371
 372	return 0;
 373}
 374
 375static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
 376{
 377	if (p->cell2)
 378		free_prison_cell(cache, p->cell2);
 379
 380	if (p->cell1)
 381		free_prison_cell(cache, p->cell1);
 382
 383	if (p->mg)
 384		mempool_free(p->mg, cache->migration_pool);
 385}
 386
 387static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
 
 
 388{
 389	struct dm_cache_migration *mg = p->mg;
 
 390
 391	BUG_ON(!mg);
 392	p->mg = NULL;
 393
 394	return mg;
 
 
 395}
 396
 397/*
 398 * You must have a cell within the prealloc struct to return.  If not this
 399 * function will BUG() rather than returning NULL.
 400 */
 401static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
 
 
 
 402{
 403	struct dm_bio_prison_cell *r = NULL;
 
 
 
 404
 405	if (p->cell1) {
 406		r = p->cell1;
 407		p->cell1 = NULL;
 408
 409	} else if (p->cell2) {
 410		r = p->cell2;
 411		p->cell2 = NULL;
 412	} else
 413		BUG();
 414
 415	return r;
 416}
 417
 418/*
 419 * You can't have more than two cells in a prealloc struct.  BUG() will be
 420 * called if you try and overfill.
 421 */
 422static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
 423{
 424	if (!p->cell2)
 425		p->cell2 = cell;
 426
 427	else if (!p->cell1)
 428		p->cell1 = cell;
 
 
 429
 430	else
 431		BUG();
 432}
 433
 434/*----------------------------------------------------------------*/
 435
 436static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
 437{
 438	key->virtual = 0;
 439	key->dev = 0;
 440	key->block = from_oblock(oblock);
 
 
 441}
 442
 443/*
 444 * The caller hands in a preallocated cell, and a free function for it.
 445 * The cell will be freed if there's an error, or if it wasn't used because
 446 * a cell with that key already exists.
 447 */
 448typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
 449
 450static int bio_detain(struct cache *cache, dm_oblock_t oblock,
 451		      struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
 452		      cell_free_fn free_fn, void *free_context,
 453		      struct dm_bio_prison_cell **cell_result)
 454{
 455	int r;
 456	struct dm_cell_key key;
 
 
 457
 458	build_key(oblock, &key);
 459	r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
 460	if (r)
 461		free_fn(free_context, cell_prealloc);
 462
 463	return r;
 464}
 465
 466static int get_cell(struct cache *cache,
 467		    dm_oblock_t oblock,
 468		    struct prealloc *structs,
 469		    struct dm_bio_prison_cell **cell_result)
 470{
 471	int r;
 472	struct dm_cell_key key;
 473	struct dm_bio_prison_cell *cell_prealloc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 474
 475	cell_prealloc = prealloc_get_cell(structs);
 
 476
 477	build_key(oblock, &key);
 478	r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
 479	if (r)
 480		prealloc_put_cell(structs, cell_prealloc);
 481
 482	return r;
 483}
 484
 485/*----------------------------------------------------------------*/
 486
 487static bool is_dirty(struct cache *cache, dm_cblock_t b)
 488{
 489	return test_bit(from_cblock(b), cache->dirty_bitset);
 490}
 491
 492static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
 493{
 494	if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
 495		cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
 496		policy_set_dirty(cache->policy, oblock);
 497	}
 498}
 499
 500static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
 
 
 
 
 
 
 
 
 
 
 
 501{
 502	if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
 503		policy_clear_dirty(cache->policy, oblock);
 504		cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
 505		if (!from_cblock(cache->nr_dirty))
 506			dm_table_event(cache->ti->table);
 507	}
 
 
 508}
 509
 510/*----------------------------------------------------------------*/
 511
 512static bool block_size_is_power_of_two(struct cache *cache)
 513{
 514	return cache->sectors_per_block_shift >= 0;
 515}
 516
 517/* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
 518#if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
 519__always_inline
 520#endif
 521static dm_block_t block_div(dm_block_t b, uint32_t n)
 522{
 523	do_div(b, n);
 524
 525	return b;
 526}
 527
 528static void set_discard(struct cache *cache, dm_oblock_t b)
 529{
 530	unsigned long flags;
 531
 532	atomic_inc(&cache->stats.discard_count);
 
 
 
 533
 534	spin_lock_irqsave(&cache->lock, flags);
 535	set_bit(from_oblock(b), cache->discard_bitset);
 536	spin_unlock_irqrestore(&cache->lock, flags);
 537}
 538
 539static void clear_discard(struct cache *cache, dm_oblock_t b)
 540{
 541	unsigned long flags;
 542
 543	spin_lock_irqsave(&cache->lock, flags);
 544	clear_bit(from_oblock(b), cache->discard_bitset);
 545	spin_unlock_irqrestore(&cache->lock, flags);
 546}
 547
 548static bool is_discarded(struct cache *cache, dm_oblock_t b)
 549{
 550	int r;
 551	unsigned long flags;
 552
 553	spin_lock_irqsave(&cache->lock, flags);
 554	r = test_bit(from_oblock(b), cache->discard_bitset);
 555	spin_unlock_irqrestore(&cache->lock, flags);
 
 556
 557	return r;
 
 
 
 
 558}
 559
 560static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
 561{
 562	int r;
 563	unsigned long flags;
 564
 565	spin_lock_irqsave(&cache->lock, flags);
 566	r = test_bit(from_oblock(b), cache->discard_bitset);
 567	spin_unlock_irqrestore(&cache->lock, flags);
 568
 569	return r;
 570}
 571
 572/*----------------------------------------------------------------*/
 573
 574static void load_stats(struct cache *cache)
 575{
 576	struct dm_cache_statistics stats;
 
 
 
 
 577
 578	dm_cache_metadata_get_stats(cache->cmd, &stats);
 579	atomic_set(&cache->stats.read_hit, stats.read_hits);
 580	atomic_set(&cache->stats.read_miss, stats.read_misses);
 581	atomic_set(&cache->stats.write_hit, stats.write_hits);
 582	atomic_set(&cache->stats.write_miss, stats.write_misses);
 583}
 584
 585static void save_stats(struct cache *cache)
 586{
 587	struct dm_cache_statistics stats;
 588
 589	stats.read_hits = atomic_read(&cache->stats.read_hit);
 590	stats.read_misses = atomic_read(&cache->stats.read_miss);
 591	stats.write_hits = atomic_read(&cache->stats.write_hit);
 592	stats.write_misses = atomic_read(&cache->stats.write_miss);
 593
 594	dm_cache_metadata_set_stats(cache->cmd, &stats);
 595}
 596
 597/*----------------------------------------------------------------
 598 * Per bio data
 599 *--------------------------------------------------------------*/
 600
 601/*
 602 * If using writeback, leave out struct per_bio_data's writethrough fields.
 603 */
 604#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
 605#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
 606
 607static bool writethrough_mode(struct cache_features *f)
 608{
 609	return f->io_mode == CM_IO_WRITETHROUGH;
 610}
 611
 612static bool writeback_mode(struct cache_features *f)
 613{
 614	return f->io_mode == CM_IO_WRITEBACK;
 615}
 616
 617static bool passthrough_mode(struct cache_features *f)
 618{
 619	return f->io_mode == CM_IO_PASSTHROUGH;
 620}
 621
 622static size_t get_per_bio_data_size(struct cache *cache)
 623{
 624	return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
 625}
 626
 627static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
 628{
 629	struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
 630	BUG_ON(!pb);
 631	return pb;
 632}
 633
 634static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
 635{
 636	struct per_bio_data *pb = get_per_bio_data(bio, data_size);
 637
 638	pb->tick = false;
 639	pb->req_nr = dm_bio_get_target_bio_nr(bio);
 640	pb->all_io_entry = NULL;
 641
 642	return pb;
 643}
 644
 645/*----------------------------------------------------------------
 646 * Remapping
 647 *--------------------------------------------------------------*/
 648static void remap_to_origin(struct cache *cache, struct bio *bio)
 649{
 650	bio->bi_bdev = cache->origin_dev->bdev;
 651}
 652
 653static void remap_to_cache(struct cache *cache, struct bio *bio,
 654			   dm_cblock_t cblock)
 655{
 656	sector_t bi_sector = bio->bi_iter.bi_sector;
 657	sector_t block = from_cblock(cblock);
 658
 659	bio->bi_bdev = cache->cache_dev->bdev;
 660	if (!block_size_is_power_of_two(cache))
 661		bio->bi_iter.bi_sector =
 662			(block * cache->sectors_per_block) +
 663			sector_div(bi_sector, cache->sectors_per_block);
 664	else
 665		bio->bi_iter.bi_sector =
 666			(block << cache->sectors_per_block_shift) |
 667			(bi_sector & (cache->sectors_per_block - 1));
 668}
 669
 670static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
 671{
 672	unsigned long flags;
 673	size_t pb_data_size = get_per_bio_data_size(cache);
 674	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
 675
 676	spin_lock_irqsave(&cache->lock, flags);
 677	if (cache->need_tick_bio &&
 678	    !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
 
 679		pb->tick = true;
 680		cache->need_tick_bio = false;
 681	}
 682	spin_unlock_irqrestore(&cache->lock, flags);
 683}
 684
 685static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
 686				  dm_oblock_t oblock)
 687{
 
 688	check_if_tick_bio_needed(cache, bio);
 689	remap_to_origin(cache, bio);
 690	if (bio_data_dir(bio) == WRITE)
 691		clear_discard(cache, oblock);
 692}
 693
 694static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
 695				 dm_oblock_t oblock, dm_cblock_t cblock)
 696{
 697	check_if_tick_bio_needed(cache, bio);
 698	remap_to_cache(cache, bio, cblock);
 699	if (bio_data_dir(bio) == WRITE) {
 700		set_dirty(cache, oblock, cblock);
 701		clear_discard(cache, oblock);
 702	}
 703}
 704
 705static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
 706{
 707	sector_t block_nr = bio->bi_iter.bi_sector;
 708
 709	if (!block_size_is_power_of_two(cache))
 710		(void) sector_div(block_nr, cache->sectors_per_block);
 711	else
 712		block_nr >>= cache->sectors_per_block_shift;
 713
 714	return to_oblock(block_nr);
 715}
 716
 717static int bio_triggers_commit(struct cache *cache, struct bio *bio)
 718{
 719	return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
 720}
 721
 722static void issue(struct cache *cache, struct bio *bio)
 723{
 724	unsigned long flags;
 725
 726	if (!bio_triggers_commit(cache, bio)) {
 727		generic_make_request(bio);
 728		return;
 
 729	}
 730
 731	/*
 732	 * Batch together any bios that trigger commits and then issue a
 733	 * single commit for them in do_worker().
 734	 */
 735	spin_lock_irqsave(&cache->lock, flags);
 736	cache->commit_requested = true;
 737	bio_list_add(&cache->deferred_flush_bios, bio);
 738	spin_unlock_irqrestore(&cache->lock, flags);
 739}
 740
 741static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
 742{
 743	unsigned long flags;
 744
 745	spin_lock_irqsave(&cache->lock, flags);
 746	bio_list_add(&cache->deferred_writethrough_bios, bio);
 747	spin_unlock_irqrestore(&cache->lock, flags);
 748
 749	wake_worker(cache);
 
 
 
 750}
 751
 752static void writethrough_endio(struct bio *bio, int err)
 753{
 754	struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
 755
 756	dm_unhook_bio(&pb->hook_info, bio);
 757
 758	if (err) {
 759		bio_endio(bio, err);
 760		return;
 761	}
 762
 763	dm_bio_restore(&pb->bio_details, bio);
 764	remap_to_cache(pb->cache, bio, pb->cblock);
 765
 766	/*
 767	 * We can't issue this bio directly, since we're in interrupt
 768	 * context.  So it gets put on a bio list for processing by the
 769	 * worker thread.
 770	 */
 771	defer_writethrough_bio(pb->cache, bio);
 772}
 773
 774/*
 775 * When running in writethrough mode we need to send writes to clean blocks
 776 * to both the cache and origin devices.  In future we'd like to clone the
 777 * bio and send them in parallel, but for now we're doing them in
 778 * series as this is easier.
 779 */
 780static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
 781				       dm_oblock_t oblock, dm_cblock_t cblock)
 782{
 783	struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
 
 
 
 
 
 784
 785	pb->cache = cache;
 786	pb->cblock = cblock;
 787	dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
 788	dm_bio_record(&pb->bio_details, bio);
 789
 790	remap_to_origin_clear_discard(pb->cache, bio, oblock);
 791}
 792
 793/*----------------------------------------------------------------
 794 * Migration processing
 795 *
 796 * Migration covers moving data from the origin device to the cache, or
 797 * vice versa.
 798 *--------------------------------------------------------------*/
 799static void free_migration(struct dm_cache_migration *mg)
 800{
 801	mempool_free(mg, mg->cache->migration_pool);
 802}
 803
 804static void inc_nr_migrations(struct cache *cache)
 805{
 806	atomic_inc(&cache->nr_migrations);
 807}
 808
 809static void dec_nr_migrations(struct cache *cache)
 810{
 811	atomic_dec(&cache->nr_migrations);
 
 
 
 
 812
 813	/*
 814	 * Wake the worker in case we're suspending the target.
 815	 */
 816	wake_up(&cache->migration_wait);
 817}
 818
 819static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
 820			 bool holder)
 821{
 822	(holder ? dm_cell_release : dm_cell_release_no_holder)
 823		(cache->prison, cell, &cache->deferred_bios);
 824	free_prison_cell(cache, cell);
 825}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 826
 827static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
 828		       bool holder)
 829{
 830	unsigned long flags;
 831
 832	spin_lock_irqsave(&cache->lock, flags);
 833	__cell_defer(cache, cell, holder);
 834	spin_unlock_irqrestore(&cache->lock, flags);
 835
 836	wake_worker(cache);
 
 837}
 838
 839static void cleanup_migration(struct dm_cache_migration *mg)
 840{
 841	struct cache *cache = mg->cache;
 842	free_migration(mg);
 843	dec_nr_migrations(cache);
 844}
 845
 846static void migration_failure(struct dm_cache_migration *mg)
 847{
 848	struct cache *cache = mg->cache;
 849
 850	if (mg->writeback) {
 851		DMWARN_LIMIT("writeback failed; couldn't copy block");
 852		set_dirty(cache, mg->old_oblock, mg->cblock);
 853		cell_defer(cache, mg->old_ocell, false);
 854
 855	} else if (mg->demote) {
 856		DMWARN_LIMIT("demotion failed; couldn't copy block");
 857		policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
 858
 859		cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
 860		if (mg->promote)
 861			cell_defer(cache, mg->new_ocell, true);
 862	} else {
 863		DMWARN_LIMIT("promotion failed; couldn't copy block");
 864		policy_remove_mapping(cache->policy, mg->new_oblock);
 865		cell_defer(cache, mg->new_ocell, true);
 866	}
 867
 868	cleanup_migration(mg);
 
 
 
 869}
 870
 871static void migration_success_pre_commit(struct dm_cache_migration *mg)
 872{
 873	unsigned long flags;
 874	struct cache *cache = mg->cache;
 
 
 
 875
 876	if (mg->writeback) {
 877		cell_defer(cache, mg->old_ocell, false);
 878		clear_dirty(cache, mg->old_oblock, mg->cblock);
 879		cleanup_migration(mg);
 880		return;
 881
 882	} else if (mg->demote) {
 883		if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
 884			DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
 885			policy_force_mapping(cache->policy, mg->new_oblock,
 886					     mg->old_oblock);
 887			if (mg->promote)
 888				cell_defer(cache, mg->new_ocell, true);
 889			cleanup_migration(mg);
 890			return;
 891		}
 892	} else {
 893		if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
 894			DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
 895			policy_remove_mapping(cache->policy, mg->new_oblock);
 896			cleanup_migration(mg);
 897			return;
 898		}
 899	}
 900
 901	spin_lock_irqsave(&cache->lock, flags);
 902	list_add_tail(&mg->list, &cache->need_commit_migrations);
 903	cache->commit_requested = true;
 904	spin_unlock_irqrestore(&cache->lock, flags);
 
 905}
 906
 907static void migration_success_post_commit(struct dm_cache_migration *mg)
 908{
 909	unsigned long flags;
 910	struct cache *cache = mg->cache;
 911
 912	if (mg->writeback) {
 913		DMWARN("writeback unexpectedly triggered commit");
 914		return;
 915
 916	} else if (mg->demote) {
 917		cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
 
 
 918
 919		if (mg->promote) {
 920			mg->demote = false;
 921
 922			spin_lock_irqsave(&cache->lock, flags);
 923			list_add_tail(&mg->list, &cache->quiesced_migrations);
 924			spin_unlock_irqrestore(&cache->lock, flags);
 
 
 
 925
 926		} else {
 927			if (mg->invalidate)
 928				policy_remove_mapping(cache->policy, mg->old_oblock);
 929			cleanup_migration(mg);
 930		}
 931
 932	} else {
 933		if (mg->requeue_holder)
 934			cell_defer(cache, mg->new_ocell, true);
 935		else {
 936			bio_endio(mg->new_ocell->holder, 0);
 937			cell_defer(cache, mg->new_ocell, false);
 938		}
 939		clear_dirty(cache, mg->new_oblock, mg->cblock);
 940		cleanup_migration(mg);
 941	}
 942}
 943
 944static void copy_complete(int read_err, unsigned long write_err, void *context)
 
 
 
 
 
 
 
 945{
 946	unsigned long flags;
 947	struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
 948	struct cache *cache = mg->cache;
 949
 950	if (read_err || write_err)
 951		mg->err = true;
 
 
 952
 953	spin_lock_irqsave(&cache->lock, flags);
 954	list_add_tail(&mg->list, &cache->completed_migrations);
 955	spin_unlock_irqrestore(&cache->lock, flags);
 956
 957	wake_worker(cache);
 958}
 959
 960static void issue_copy_real(struct dm_cache_migration *mg)
 
 961{
 962	int r;
 963	struct dm_io_region o_region, c_region;
 964	struct cache *cache = mg->cache;
 965	sector_t cblock = from_cblock(mg->cblock);
 966
 967	o_region.bdev = cache->origin_dev->bdev;
 968	o_region.count = cache->sectors_per_block;
 969
 970	c_region.bdev = cache->cache_dev->bdev;
 971	c_region.sector = cblock * cache->sectors_per_block;
 972	c_region.count = cache->sectors_per_block;
 
 
 973
 974	if (mg->writeback || mg->demote) {
 975		/* demote */
 976		o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
 977		r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
 978	} else {
 979		/* promote */
 980		o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
 981		r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
 982	}
 983
 984	if (r < 0) {
 985		DMERR_LIMIT("issuing migration failed");
 986		migration_failure(mg);
 987	}
 
 988}
 989
 990static void overwrite_endio(struct bio *bio, int err)
 991{
 992	struct dm_cache_migration *mg = bio->bi_private;
 993	struct cache *cache = mg->cache;
 994	size_t pb_data_size = get_per_bio_data_size(cache);
 995	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
 996	unsigned long flags;
 997
 998	dm_unhook_bio(&pb->hook_info, bio);
 
 
 999
1000	if (err)
1001		mg->err = true;
 
1002
1003	mg->requeue_holder = false;
1004
1005	spin_lock_irqsave(&cache->lock, flags);
1006	list_add_tail(&mg->list, &cache->completed_migrations);
1007	spin_unlock_irqrestore(&cache->lock, flags);
1008
1009	wake_worker(cache);
1010}
1011
1012static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
1013{
1014	size_t pb_data_size = get_per_bio_data_size(mg->cache);
1015	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
 
 
1016
1017	dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1018	remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
1019	generic_make_request(bio);
1020}
1021
1022static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1023{
1024	return (bio_data_dir(bio) == WRITE) &&
1025		(bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1026}
1027
1028static void avoid_copy(struct dm_cache_migration *mg)
1029{
1030	atomic_inc(&mg->cache->stats.copies_avoided);
1031	migration_success_pre_commit(mg);
1032}
1033
1034static void issue_copy(struct dm_cache_migration *mg)
 
1035{
1036	bool avoid;
1037	struct cache *cache = mg->cache;
 
1038
1039	if (mg->writeback || mg->demote)
1040		avoid = !is_dirty(cache, mg->cblock) ||
1041			is_discarded_oblock(cache, mg->old_oblock);
1042	else {
1043		struct bio *bio = mg->new_ocell->holder;
1044
1045		avoid = is_discarded_oblock(cache, mg->new_oblock);
 
 
1046
1047		if (!avoid && bio_writes_complete_block(cache, bio)) {
1048			issue_overwrite(mg, bio);
1049			return;
1050		}
1051	}
1052
1053	avoid ? avoid_copy(mg) : issue_copy_real(mg);
1054}
1055
1056static void complete_migration(struct dm_cache_migration *mg)
1057{
1058	if (mg->err)
1059		migration_failure(mg);
1060	else
1061		migration_success_pre_commit(mg);
1062}
1063
1064static void process_migrations(struct cache *cache, struct list_head *head,
1065			       void (*fn)(struct dm_cache_migration *))
1066{
1067	unsigned long flags;
1068	struct list_head list;
1069	struct dm_cache_migration *mg, *tmp;
1070
1071	INIT_LIST_HEAD(&list);
1072	spin_lock_irqsave(&cache->lock, flags);
1073	list_splice_init(head, &list);
1074	spin_unlock_irqrestore(&cache->lock, flags);
1075
1076	list_for_each_entry_safe(mg, tmp, &list, list)
1077		fn(mg);
 
 
1078}
1079
1080static void __queue_quiesced_migration(struct dm_cache_migration *mg)
1081{
1082	list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
 
 
 
 
1083}
1084
1085static void queue_quiesced_migration(struct dm_cache_migration *mg)
1086{
1087	unsigned long flags;
1088	struct cache *cache = mg->cache;
 
 
 
1089
1090	spin_lock_irqsave(&cache->lock, flags);
1091	__queue_quiesced_migration(mg);
1092	spin_unlock_irqrestore(&cache->lock, flags);
1093
1094	wake_worker(cache);
1095}
1096
1097static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
 
1098{
1099	unsigned long flags;
1100	struct dm_cache_migration *mg, *tmp;
 
 
1101
1102	spin_lock_irqsave(&cache->lock, flags);
1103	list_for_each_entry_safe(mg, tmp, work, list)
1104		__queue_quiesced_migration(mg);
1105	spin_unlock_irqrestore(&cache->lock, flags);
 
 
 
 
1106
1107	wake_worker(cache);
 
1108}
1109
1110static void check_for_quiesced_migrations(struct cache *cache,
1111					  struct per_bio_data *pb)
 
 
 
 
 
 
 
 
 
 
1112{
1113	struct list_head work;
 
 
 
 
 
 
1114
1115	if (!pb->all_io_entry)
1116		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1117
1118	INIT_LIST_HEAD(&work);
1119	if (pb->all_io_entry)
1120		dm_deferred_entry_dec(pb->all_io_entry, &work);
1121
1122	if (!list_empty(&work))
1123		queue_quiesced_migrations(cache, &work);
1124}
1125
1126static void quiesce_migration(struct dm_cache_migration *mg)
1127{
1128	if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
1129		queue_quiesced_migration(mg);
1130}
1131
1132static void promote(struct cache *cache, struct prealloc *structs,
1133		    dm_oblock_t oblock, dm_cblock_t cblock,
1134		    struct dm_bio_prison_cell *cell)
1135{
1136	struct dm_cache_migration *mg = prealloc_get_migration(structs);
 
 
 
 
 
 
 
 
 
 
 
1137
1138	mg->err = false;
1139	mg->writeback = false;
1140	mg->demote = false;
1141	mg->promote = true;
1142	mg->requeue_holder = true;
1143	mg->invalidate = false;
1144	mg->cache = cache;
1145	mg->new_oblock = oblock;
1146	mg->cblock = cblock;
1147	mg->old_ocell = NULL;
1148	mg->new_ocell = cell;
1149	mg->start_jiffies = jiffies;
1150
1151	inc_nr_migrations(cache);
1152	quiesce_migration(mg);
1153}
1154
1155static void writeback(struct cache *cache, struct prealloc *structs,
1156		      dm_oblock_t oblock, dm_cblock_t cblock,
1157		      struct dm_bio_prison_cell *cell)
1158{
1159	struct dm_cache_migration *mg = prealloc_get_migration(structs);
1160
1161	mg->err = false;
1162	mg->writeback = true;
1163	mg->demote = false;
1164	mg->promote = false;
1165	mg->requeue_holder = true;
1166	mg->invalidate = false;
1167	mg->cache = cache;
1168	mg->old_oblock = oblock;
1169	mg->cblock = cblock;
1170	mg->old_ocell = cell;
1171	mg->new_ocell = NULL;
1172	mg->start_jiffies = jiffies;
1173
1174	inc_nr_migrations(cache);
1175	quiesce_migration(mg);
1176}
1177
1178static void demote_then_promote(struct cache *cache, struct prealloc *structs,
1179				dm_oblock_t old_oblock, dm_oblock_t new_oblock,
1180				dm_cblock_t cblock,
1181				struct dm_bio_prison_cell *old_ocell,
1182				struct dm_bio_prison_cell *new_ocell)
1183{
1184	struct dm_cache_migration *mg = prealloc_get_migration(structs);
1185
1186	mg->err = false;
1187	mg->writeback = false;
1188	mg->demote = true;
1189	mg->promote = true;
1190	mg->requeue_holder = true;
1191	mg->invalidate = false;
1192	mg->cache = cache;
1193	mg->old_oblock = old_oblock;
1194	mg->new_oblock = new_oblock;
1195	mg->cblock = cblock;
1196	mg->old_ocell = old_ocell;
1197	mg->new_ocell = new_ocell;
1198	mg->start_jiffies = jiffies;
1199
1200	inc_nr_migrations(cache);
1201	quiesce_migration(mg);
 
 
1202}
1203
1204/*
1205 * Invalidate a cache entry.  No writeback occurs; any changes in the cache
1206 * block are thrown away.
1207 */
1208static void invalidate(struct cache *cache, struct prealloc *structs,
1209		       dm_oblock_t oblock, dm_cblock_t cblock,
1210		       struct dm_bio_prison_cell *cell)
1211{
1212	struct dm_cache_migration *mg = prealloc_get_migration(structs);
1213
1214	mg->err = false;
1215	mg->writeback = false;
1216	mg->demote = true;
1217	mg->promote = false;
1218	mg->requeue_holder = true;
1219	mg->invalidate = true;
1220	mg->cache = cache;
1221	mg->old_oblock = oblock;
1222	mg->cblock = cblock;
1223	mg->old_ocell = cell;
1224	mg->new_ocell = NULL;
1225	mg->start_jiffies = jiffies;
1226
1227	inc_nr_migrations(cache);
1228	quiesce_migration(mg);
1229}
1230
1231/*----------------------------------------------------------------
1232 * bio processing
1233 *--------------------------------------------------------------*/
1234static void defer_bio(struct cache *cache, struct bio *bio)
1235{
1236	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1237
1238	spin_lock_irqsave(&cache->lock, flags);
1239	bio_list_add(&cache->deferred_bios, bio);
1240	spin_unlock_irqrestore(&cache->lock, flags);
1241
1242	wake_worker(cache);
 
 
1243}
1244
1245static void process_flush_bio(struct cache *cache, struct bio *bio)
1246{
1247	size_t pb_data_size = get_per_bio_data_size(cache);
1248	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
 
 
1249
1250	BUG_ON(bio->bi_iter.bi_size);
1251	if (!pb->req_nr)
1252		remap_to_origin(cache, bio);
1253	else
1254		remap_to_cache(cache, bio, 0);
1255
1256	issue(cache, bio);
 
1257}
1258
1259/*
1260 * People generally discard large parts of a device, eg, the whole device
1261 * when formatting.  Splitting these large discards up into cache block
1262 * sized ios and then quiescing (always neccessary for discard) takes too
1263 * long.
1264 *
1265 * We keep it simple, and allow any size of discard to come in, and just
1266 * mark off blocks on the discard bitset.  No passdown occurs!
1267 *
1268 * To implement passdown we need to change the bio_prison such that a cell
1269 * can have a key that spans many blocks.
1270 */
1271static void process_discard_bio(struct cache *cache, struct bio *bio)
1272{
1273	dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
1274						  cache->sectors_per_block);
1275	dm_block_t end_block = bio_end_sector(bio);
1276	dm_block_t b;
1277
1278	end_block = block_div(end_block, cache->sectors_per_block);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1279
1280	for (b = start_block; b < end_block; b++)
1281		set_discard(cache, to_oblock(b));
 
 
 
 
 
 
1282
1283	bio_endio(bio, 0);
 
1284}
1285
1286static bool spare_migration_bandwidth(struct cache *cache)
1287{
1288	sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
1289		cache->sectors_per_block;
1290	return current_volume < cache->migration_threshold;
1291}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1292
1293static void inc_hit_counter(struct cache *cache, struct bio *bio)
1294{
1295	atomic_inc(bio_data_dir(bio) == READ ?
1296		   &cache->stats.read_hit : &cache->stats.write_hit);
1297}
1298
1299static void inc_miss_counter(struct cache *cache, struct bio *bio)
1300{
1301	atomic_inc(bio_data_dir(bio) == READ ?
1302		   &cache->stats.read_miss : &cache->stats.write_miss);
1303}
1304
1305static void issue_cache_bio(struct cache *cache, struct bio *bio,
1306			    struct per_bio_data *pb,
1307			    dm_oblock_t oblock, dm_cblock_t cblock)
1308{
1309	pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1310	remap_to_cache_dirty(cache, bio, oblock, cblock);
1311	issue(cache, bio);
1312}
1313
1314static void process_bio(struct cache *cache, struct prealloc *structs,
1315			struct bio *bio)
1316{
1317	int r;
1318	bool release_cell = true;
1319	dm_oblock_t block = get_bio_block(cache, bio);
1320	struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
1321	struct policy_result lookup_result;
1322	size_t pb_data_size = get_per_bio_data_size(cache);
1323	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1324	bool discarded_block = is_discarded_oblock(cache, block);
1325	bool passthrough = passthrough_mode(&cache->features);
1326	bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
1327
1328	/*
1329	 * Check to see if that block is currently migrating.
1330	 */
1331	cell_prealloc = prealloc_get_cell(structs);
1332	r = bio_detain(cache, block, bio, cell_prealloc,
1333		       (cell_free_fn) prealloc_put_cell,
1334		       structs, &new_ocell);
1335	if (r > 0)
1336		return;
1337
1338	r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
1339		       bio, &lookup_result);
 
 
1340
1341	if (r == -EWOULDBLOCK)
1342		/* migration has been denied */
1343		lookup_result.op = POLICY_MISS;
1344
1345	switch (lookup_result.op) {
1346	case POLICY_HIT:
1347		if (passthrough) {
1348			inc_miss_counter(cache, bio);
1349
1350			/*
1351			 * Passthrough always maps to the origin,
1352			 * invalidating any cache blocks that are written
1353			 * to.
1354			 */
1355
1356			if (bio_data_dir(bio) == WRITE) {
1357				atomic_inc(&cache->stats.demotion);
1358				invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
1359				release_cell = false;
1360
1361			} else {
1362				/* FIXME: factor out issue_origin() */
1363				pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1364				remap_to_origin_clear_discard(cache, bio, block);
1365				issue(cache, bio);
1366			}
1367		} else {
1368			inc_hit_counter(cache, bio);
1369
1370			if (bio_data_dir(bio) == WRITE &&
1371			    writethrough_mode(&cache->features) &&
1372			    !is_dirty(cache, lookup_result.cblock)) {
1373				pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1374				remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
1375				issue(cache, bio);
1376			} else
1377				issue_cache_bio(cache, bio, pb, block, lookup_result.cblock);
1378		}
1379
1380		break;
 
 
 
1381
1382	case POLICY_MISS:
1383		inc_miss_counter(cache, bio);
1384		pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1385		remap_to_origin_clear_discard(cache, bio, block);
1386		issue(cache, bio);
1387		break;
1388
1389	case POLICY_NEW:
1390		atomic_inc(&cache->stats.promotion);
1391		promote(cache, structs, block, lookup_result.cblock, new_ocell);
1392		release_cell = false;
1393		break;
1394
1395	case POLICY_REPLACE:
1396		cell_prealloc = prealloc_get_cell(structs);
1397		r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
1398			       (cell_free_fn) prealloc_put_cell,
1399			       structs, &old_ocell);
1400		if (r > 0) {
1401			/*
1402			 * We have to be careful to avoid lock inversion of
1403			 * the cells.  So we back off, and wait for the
1404			 * old_ocell to become free.
1405			 */
1406			policy_force_mapping(cache->policy, block,
1407					     lookup_result.old_oblock);
1408			atomic_inc(&cache->stats.cache_cell_clash);
1409			break;
1410		}
1411		atomic_inc(&cache->stats.demotion);
1412		atomic_inc(&cache->stats.promotion);
1413
1414		demote_then_promote(cache, structs, lookup_result.old_oblock,
1415				    block, lookup_result.cblock,
1416				    old_ocell, new_ocell);
1417		release_cell = false;
1418		break;
1419
1420	default:
1421		DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
1422			    (unsigned) lookup_result.op);
1423		bio_io_error(bio);
1424	}
1425
1426	if (release_cell)
1427		cell_defer(cache, new_ocell, false);
1428}
1429
1430static int need_commit_due_to_time(struct cache *cache)
1431{
1432	return jiffies < cache->last_commit_jiffies ||
1433	       jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1434}
1435
1436static int commit_if_needed(struct cache *cache)
1437{
1438	int r = 0;
 
 
1439
1440	if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
1441	    dm_cache_changed_this_transaction(cache->cmd)) {
1442		atomic_inc(&cache->stats.commit_count);
1443		cache->commit_requested = false;
1444		r = dm_cache_commit(cache->cmd, false);
1445		cache->last_commit_jiffies = jiffies;
1446	}
1447
1448	return r;
 
 
 
 
1449}
1450
1451static void process_deferred_bios(struct cache *cache)
1452{
1453	unsigned long flags;
1454	struct bio_list bios;
1455	struct bio *bio;
1456	struct prealloc structs;
 
 
 
 
 
 
 
 
 
 
 
1457
1458	memset(&structs, 0, sizeof(structs));
1459	bio_list_init(&bios);
1460
1461	spin_lock_irqsave(&cache->lock, flags);
1462	bio_list_merge(&bios, &cache->deferred_bios);
1463	bio_list_init(&cache->deferred_bios);
1464	spin_unlock_irqrestore(&cache->lock, flags);
1465
1466	while (!bio_list_empty(&bios)) {
1467		/*
1468		 * If we've got no free migration structs, and processing
1469		 * this bio might require one, we pause until there are some
1470		 * prepared mappings to process.
1471		 */
1472		if (prealloc_data_structs(cache, &structs)) {
1473			spin_lock_irqsave(&cache->lock, flags);
1474			bio_list_merge(&cache->deferred_bios, &bios);
1475			spin_unlock_irqrestore(&cache->lock, flags);
1476			break;
1477		}
1478
1479		bio = bio_list_pop(&bios);
1480
1481		if (bio->bi_rw & REQ_FLUSH)
1482			process_flush_bio(cache, bio);
1483		else if (bio->bi_rw & REQ_DISCARD)
1484			process_discard_bio(cache, bio);
1485		else
1486			process_bio(cache, &structs, bio);
1487	}
1488
1489	prealloc_free_structs(cache, &structs);
1490}
1491
1492static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
 
1493{
1494	unsigned long flags;
1495	struct bio_list bios;
1496	struct bio *bio;
 
1497
1498	bio_list_init(&bios);
1499
1500	spin_lock_irqsave(&cache->lock, flags);
1501	bio_list_merge(&bios, &cache->deferred_flush_bios);
1502	bio_list_init(&cache->deferred_flush_bios);
1503	spin_unlock_irqrestore(&cache->lock, flags);
1504
1505	while ((bio = bio_list_pop(&bios)))
1506		submit_bios ? generic_make_request(bio) : bio_io_error(bio);
1507}
1508
1509static void process_deferred_writethrough_bios(struct cache *cache)
1510{
1511	unsigned long flags;
1512	struct bio_list bios;
1513	struct bio *bio;
1514
1515	bio_list_init(&bios);
 
 
 
1516
1517	spin_lock_irqsave(&cache->lock, flags);
1518	bio_list_merge(&bios, &cache->deferred_writethrough_bios);
1519	bio_list_init(&cache->deferred_writethrough_bios);
1520	spin_unlock_irqrestore(&cache->lock, flags);
 
1521
1522	while ((bio = bio_list_pop(&bios)))
1523		generic_make_request(bio);
 
 
1524}
1525
1526static void writeback_some_dirty_blocks(struct cache *cache)
1527{
1528	int r = 0;
1529	dm_oblock_t oblock;
1530	dm_cblock_t cblock;
1531	struct prealloc structs;
1532	struct dm_bio_prison_cell *old_ocell;
1533
1534	memset(&structs, 0, sizeof(structs));
 
 
 
 
1535
1536	while (spare_migration_bandwidth(cache)) {
1537		if (prealloc_data_structs(cache, &structs))
1538			break;
1539
1540		r = policy_writeback_work(cache->policy, &oblock, &cblock);
1541		if (r)
1542			break;
 
 
 
1543
1544		r = get_cell(cache, oblock, &structs, &old_ocell);
1545		if (r) {
1546			policy_set_dirty(cache->policy, oblock);
1547			break;
1548		}
1549
1550		writeback(cache, &structs, oblock, cblock, old_ocell);
 
 
 
 
 
 
 
 
 
1551	}
1552
1553	prealloc_free_structs(cache, &structs);
1554}
 
 
1555
1556/*----------------------------------------------------------------
1557 * Invalidations.
1558 * Dropping something from the cache *without* writing back.
1559 *--------------------------------------------------------------*/
 
 
 
1560
1561static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)
1562{
1563	int r = 0;
1564	uint64_t begin = from_cblock(req->cblocks->begin);
1565	uint64_t end = from_cblock(req->cblocks->end);
 
 
 
 
 
 
 
 
 
1566
1567	while (begin != end) {
1568		r = policy_remove_cblock(cache->policy, to_cblock(begin));
1569		if (!r) {
1570			r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
1571			if (r)
1572				break;
1573
1574		} else if (r == -ENODATA) {
1575			/* harmless, already unmapped */
1576			r = 0;
1577
 
 
 
 
 
 
 
1578		} else {
1579			DMERR("policy_remove_cblock failed");
1580			break;
 
 
 
 
1581		}
 
 
 
 
 
1582
1583		begin++;
1584        }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1585
1586	cache->commit_requested = true;
 
 
 
 
 
 
 
 
 
 
 
 
1587
1588	req->err = r;
1589	atomic_set(&req->complete, 1);
1590
1591	wake_up(&req->result_wait);
1592}
1593
1594static void process_invalidation_requests(struct cache *cache)
1595{
1596	struct list_head list;
1597	struct invalidation_request *req, *tmp;
1598
1599	INIT_LIST_HEAD(&list);
1600	spin_lock(&cache->invalidation_lock);
1601	list_splice_init(&cache->invalidation_requests, &list);
1602	spin_unlock(&cache->invalidation_lock);
1603
1604	list_for_each_entry_safe (req, tmp, &list, list)
1605		process_invalidation_request(cache, req);
1606}
1607
1608/*----------------------------------------------------------------
1609 * Main worker loop
1610 *--------------------------------------------------------------*/
1611static bool is_quiescing(struct cache *cache)
1612{
1613	return atomic_read(&cache->quiescing);
 
 
 
 
 
 
 
 
 
 
1614}
1615
1616static void ack_quiescing(struct cache *cache)
 
 
 
1617{
1618	if (is_quiescing(cache)) {
1619		atomic_inc(&cache->quiescing_ack);
1620		wake_up(&cache->quiescing_wait);
1621	}
 
 
1622}
1623
1624static void wait_for_quiescing_ack(struct cache *cache)
 
 
1625{
1626	wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
 
 
 
 
 
 
 
 
1627}
1628
1629static void start_quiescing(struct cache *cache)
1630{
1631	atomic_inc(&cache->quiescing);
1632	wait_for_quiescing_ack(cache);
1633}
 
 
 
 
 
 
 
1634
1635static void stop_quiescing(struct cache *cache)
1636{
1637	atomic_set(&cache->quiescing, 0);
1638	atomic_set(&cache->quiescing_ack, 0);
1639}
1640
1641static void wait_for_migrations(struct cache *cache)
1642{
1643	wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
1644}
1645
1646static void stop_worker(struct cache *cache)
1647{
1648	cancel_delayed_work(&cache->waker);
1649	flush_workqueue(cache->wq);
1650}
1651
1652static void requeue_deferred_io(struct cache *cache)
1653{
1654	struct bio *bio;
1655	struct bio_list bios;
1656
1657	bio_list_init(&bios);
 
 
1658	bio_list_merge(&bios, &cache->deferred_bios);
1659	bio_list_init(&cache->deferred_bios);
 
1660
1661	while ((bio = bio_list_pop(&bios)))
1662		bio_endio(bio, DM_ENDIO_REQUEUE);
1663}
1664
1665static int more_work(struct cache *cache)
1666{
1667	if (is_quiescing(cache))
1668		return !list_empty(&cache->quiesced_migrations) ||
1669			!list_empty(&cache->completed_migrations) ||
1670			!list_empty(&cache->need_commit_migrations);
1671	else
1672		return !bio_list_empty(&cache->deferred_bios) ||
1673			!bio_list_empty(&cache->deferred_flush_bios) ||
1674			!bio_list_empty(&cache->deferred_writethrough_bios) ||
1675			!list_empty(&cache->quiesced_migrations) ||
1676			!list_empty(&cache->completed_migrations) ||
1677			!list_empty(&cache->need_commit_migrations) ||
1678			cache->invalidate;
1679}
1680
1681static void do_worker(struct work_struct *ws)
1682{
1683	struct cache *cache = container_of(ws, struct cache, worker);
1684
1685	do {
1686		if (!is_quiescing(cache)) {
1687			writeback_some_dirty_blocks(cache);
1688			process_deferred_writethrough_bios(cache);
1689			process_deferred_bios(cache);
1690			process_invalidation_requests(cache);
1691		}
1692
1693		process_migrations(cache, &cache->quiesced_migrations, issue_copy);
1694		process_migrations(cache, &cache->completed_migrations, complete_migration);
 
1695
1696		if (commit_if_needed(cache)) {
1697			process_deferred_flush_bios(cache, false);
 
 
1698
1699			/*
1700			 * FIXME: rollback metadata or just go into a
1701			 * failure mode and error everything
1702			 */
1703		} else {
1704			process_deferred_flush_bios(cache, true);
1705			process_migrations(cache, &cache->need_commit_migrations,
1706					   migration_success_post_commit);
1707		}
1708
1709		ack_quiescing(cache);
1710
1711	} while (more_work(cache));
 
1712}
1713
1714/*
1715 * We want to commit periodically so that not too much
1716 * unwritten metadata builds up.
1717 */
1718static void do_waker(struct work_struct *ws)
1719{
1720	struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
1721	policy_tick(cache->policy);
1722	wake_worker(cache);
 
 
1723	queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1724}
1725
1726/*----------------------------------------------------------------*/
 
 
 
 
 
 
 
 
1727
1728static int is_congested(struct dm_dev *dev, int bdi_bits)
1729{
1730	struct request_queue *q = bdev_get_queue(dev->bdev);
1731	return bdi_congested(&q->backing_dev_info, bdi_bits);
1732}
1733
1734static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1735{
1736	struct cache *cache = container_of(cb, struct cache, callbacks);
 
 
1737
1738	return is_congested(cache->origin_dev, bdi_bits) ||
1739		is_congested(cache->cache_dev, bdi_bits);
 
 
1740}
1741
1742/*----------------------------------------------------------------
1743 * Target methods
1744 *--------------------------------------------------------------*/
1745
1746/*
1747 * This function gets called on the error paths of the constructor, so we
1748 * have to cope with a partially initialised struct.
1749 */
1750static void destroy(struct cache *cache)
1751{
1752	unsigned i;
1753
1754	if (cache->next_migration)
1755		mempool_free(cache->next_migration, cache->migration_pool);
1756
1757	if (cache->migration_pool)
1758		mempool_destroy(cache->migration_pool);
1759
1760	if (cache->all_io_ds)
1761		dm_deferred_set_destroy(cache->all_io_ds);
1762
1763	if (cache->prison)
1764		dm_bio_prison_destroy(cache->prison);
1765
 
1766	if (cache->wq)
1767		destroy_workqueue(cache->wq);
1768
1769	if (cache->dirty_bitset)
1770		free_bitset(cache->dirty_bitset);
1771
1772	if (cache->discard_bitset)
1773		free_bitset(cache->discard_bitset);
1774
1775	if (cache->copier)
1776		dm_kcopyd_client_destroy(cache->copier);
1777
1778	if (cache->cmd)
1779		dm_cache_metadata_close(cache->cmd);
1780
1781	if (cache->metadata_dev)
1782		dm_put_device(cache->ti, cache->metadata_dev);
1783
1784	if (cache->origin_dev)
1785		dm_put_device(cache->ti, cache->origin_dev);
1786
1787	if (cache->cache_dev)
1788		dm_put_device(cache->ti, cache->cache_dev);
1789
1790	if (cache->policy)
1791		dm_cache_policy_destroy(cache->policy);
1792
1793	for (i = 0; i < cache->nr_ctr_args ; i++)
1794		kfree(cache->ctr_args[i]);
1795	kfree(cache->ctr_args);
1796
 
 
1797	kfree(cache);
1798}
1799
1800static void cache_dtr(struct dm_target *ti)
1801{
1802	struct cache *cache = ti->private;
1803
1804	destroy(cache);
1805}
1806
1807static sector_t get_dev_size(struct dm_dev *dev)
1808{
1809	return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1810}
1811
1812/*----------------------------------------------------------------*/
1813
1814/*
1815 * Construct a cache device mapping.
1816 *
1817 * cache <metadata dev> <cache dev> <origin dev> <block size>
1818 *       <#feature args> [<feature arg>]*
1819 *       <policy> <#policy args> [<policy arg>]*
1820 *
1821 * metadata dev    : fast device holding the persistent metadata
1822 * cache dev	   : fast device holding cached data blocks
1823 * origin dev	   : slow device holding original data blocks
1824 * block size	   : cache unit size in sectors
1825 *
1826 * #feature args   : number of feature arguments passed
1827 * feature args    : writethrough.  (The default is writeback.)
1828 *
1829 * policy	   : the replacement policy to use
1830 * #policy args    : an even number of policy arguments corresponding
1831 *		     to key/value pairs passed to the policy
1832 * policy args	   : key/value pairs passed to the policy
1833 *		     E.g. 'sequential_threshold 1024'
1834 *		     See cache-policies.txt for details.
1835 *
1836 * Optional feature arguments are:
1837 *   writethrough  : write through caching that prohibits cache block
1838 *		     content from being different from origin block content.
1839 *		     Without this argument, the default behaviour is to write
1840 *		     back cache block contents later for performance reasons,
1841 *		     so they may differ from the corresponding origin blocks.
1842 */
1843struct cache_args {
1844	struct dm_target *ti;
1845
1846	struct dm_dev *metadata_dev;
1847
1848	struct dm_dev *cache_dev;
1849	sector_t cache_sectors;
1850
1851	struct dm_dev *origin_dev;
1852	sector_t origin_sectors;
1853
1854	uint32_t block_size;
1855
1856	const char *policy_name;
1857	int policy_argc;
1858	const char **policy_argv;
1859
1860	struct cache_features features;
1861};
1862
1863static void destroy_cache_args(struct cache_args *ca)
1864{
1865	if (ca->metadata_dev)
1866		dm_put_device(ca->ti, ca->metadata_dev);
1867
1868	if (ca->cache_dev)
1869		dm_put_device(ca->ti, ca->cache_dev);
1870
1871	if (ca->origin_dev)
1872		dm_put_device(ca->ti, ca->origin_dev);
1873
1874	kfree(ca);
1875}
1876
1877static bool at_least_one_arg(struct dm_arg_set *as, char **error)
1878{
1879	if (!as->argc) {
1880		*error = "Insufficient args";
1881		return false;
1882	}
1883
1884	return true;
1885}
1886
1887static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
1888			      char **error)
1889{
1890	int r;
1891	sector_t metadata_dev_size;
1892	char b[BDEVNAME_SIZE];
1893
1894	if (!at_least_one_arg(as, error))
1895		return -EINVAL;
1896
1897	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1898			  &ca->metadata_dev);
1899	if (r) {
1900		*error = "Error opening metadata device";
1901		return r;
1902	}
1903
1904	metadata_dev_size = get_dev_size(ca->metadata_dev);
1905	if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
1906		DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1907		       bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
1908
1909	return 0;
1910}
1911
1912static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
1913			   char **error)
1914{
1915	int r;
1916
1917	if (!at_least_one_arg(as, error))
1918		return -EINVAL;
1919
1920	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1921			  &ca->cache_dev);
1922	if (r) {
1923		*error = "Error opening cache device";
1924		return r;
1925	}
1926	ca->cache_sectors = get_dev_size(ca->cache_dev);
1927
1928	return 0;
1929}
1930
1931static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
1932			    char **error)
1933{
1934	int r;
1935
1936	if (!at_least_one_arg(as, error))
1937		return -EINVAL;
1938
1939	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1940			  &ca->origin_dev);
1941	if (r) {
1942		*error = "Error opening origin device";
1943		return r;
1944	}
1945
1946	ca->origin_sectors = get_dev_size(ca->origin_dev);
1947	if (ca->ti->len > ca->origin_sectors) {
1948		*error = "Device size larger than cached device";
1949		return -EINVAL;
1950	}
1951
1952	return 0;
1953}
1954
1955static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
1956			    char **error)
1957{
1958	unsigned long block_size;
1959
1960	if (!at_least_one_arg(as, error))
1961		return -EINVAL;
1962
1963	if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
1964	    block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1965	    block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
1966	    block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
1967		*error = "Invalid data block size";
1968		return -EINVAL;
1969	}
1970
1971	if (block_size > ca->cache_sectors) {
1972		*error = "Data block size is larger than the cache device";
1973		return -EINVAL;
1974	}
1975
1976	ca->block_size = block_size;
1977
1978	return 0;
1979}
1980
1981static void init_features(struct cache_features *cf)
1982{
1983	cf->mode = CM_WRITE;
1984	cf->io_mode = CM_IO_WRITEBACK;
 
 
1985}
1986
1987static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1988			  char **error)
1989{
1990	static struct dm_arg _args[] = {
1991		{0, 1, "Invalid number of cache feature arguments"},
1992	};
1993
1994	int r;
1995	unsigned argc;
1996	const char *arg;
1997	struct cache_features *cf = &ca->features;
1998
1999	init_features(cf);
2000
2001	r = dm_read_arg_group(_args, as, &argc, error);
2002	if (r)
2003		return -EINVAL;
2004
2005	while (argc--) {
2006		arg = dm_shift_arg(as);
2007
2008		if (!strcasecmp(arg, "writeback"))
2009			cf->io_mode = CM_IO_WRITEBACK;
 
 
2010
2011		else if (!strcasecmp(arg, "writethrough"))
2012			cf->io_mode = CM_IO_WRITETHROUGH;
 
 
2013
2014		else if (!strcasecmp(arg, "passthrough"))
2015			cf->io_mode = CM_IO_PASSTHROUGH;
 
 
 
 
 
 
 
 
2016
2017		else {
2018			*error = "Unrecognised cache feature requested";
2019			return -EINVAL;
2020		}
2021	}
2022
 
 
 
 
 
2023	return 0;
2024}
2025
2026static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
2027			char **error)
2028{
2029	static struct dm_arg _args[] = {
2030		{0, 1024, "Invalid number of policy arguments"},
2031	};
2032
2033	int r;
2034
2035	if (!at_least_one_arg(as, error))
2036		return -EINVAL;
2037
2038	ca->policy_name = dm_shift_arg(as);
2039
2040	r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
2041	if (r)
2042		return -EINVAL;
2043
2044	ca->policy_argv = (const char **)as->argv;
2045	dm_consume_args(as, ca->policy_argc);
2046
2047	return 0;
2048}
2049
2050static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
2051			    char **error)
2052{
2053	int r;
2054	struct dm_arg_set as;
2055
2056	as.argc = argc;
2057	as.argv = argv;
2058
2059	r = parse_metadata_dev(ca, &as, error);
2060	if (r)
2061		return r;
2062
2063	r = parse_cache_dev(ca, &as, error);
2064	if (r)
2065		return r;
2066
2067	r = parse_origin_dev(ca, &as, error);
2068	if (r)
2069		return r;
2070
2071	r = parse_block_size(ca, &as, error);
2072	if (r)
2073		return r;
2074
2075	r = parse_features(ca, &as, error);
2076	if (r)
2077		return r;
2078
2079	r = parse_policy(ca, &as, error);
2080	if (r)
2081		return r;
2082
2083	return 0;
2084}
2085
2086/*----------------------------------------------------------------*/
2087
2088static struct kmem_cache *migration_cache;
2089
2090#define NOT_CORE_OPTION 1
2091
2092static int process_config_option(struct cache *cache, const char *key, const char *value)
2093{
2094	unsigned long tmp;
2095
2096	if (!strcasecmp(key, "migration_threshold")) {
2097		if (kstrtoul(value, 10, &tmp))
2098			return -EINVAL;
2099
2100		cache->migration_threshold = tmp;
2101		return 0;
2102	}
2103
2104	return NOT_CORE_OPTION;
2105}
2106
2107static int set_config_value(struct cache *cache, const char *key, const char *value)
2108{
2109	int r = process_config_option(cache, key, value);
2110
2111	if (r == NOT_CORE_OPTION)
2112		r = policy_set_config_value(cache->policy, key, value);
2113
2114	if (r)
2115		DMWARN("bad config value for %s: %s", key, value);
2116
2117	return r;
2118}
2119
2120static int set_config_values(struct cache *cache, int argc, const char **argv)
2121{
2122	int r = 0;
2123
2124	if (argc & 1) {
2125		DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
2126		return -EINVAL;
2127	}
2128
2129	while (argc) {
2130		r = set_config_value(cache, argv[0], argv[1]);
2131		if (r)
2132			break;
2133
2134		argc -= 2;
2135		argv += 2;
2136	}
2137
2138	return r;
2139}
2140
2141static int create_cache_policy(struct cache *cache, struct cache_args *ca,
2142			       char **error)
2143{
2144	struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
2145							   cache->cache_size,
2146							   cache->origin_sectors,
2147							   cache->sectors_per_block);
2148	if (IS_ERR(p)) {
2149		*error = "Error creating cache's policy";
2150		return PTR_ERR(p);
2151	}
2152	cache->policy = p;
 
2153
2154	return 0;
2155}
2156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2157#define DEFAULT_MIGRATION_THRESHOLD 2048
2158
2159static int cache_create(struct cache_args *ca, struct cache **result)
2160{
2161	int r = 0;
2162	char **error = &ca->ti->error;
2163	struct cache *cache;
2164	struct dm_target *ti = ca->ti;
2165	dm_block_t origin_blocks;
2166	struct dm_cache_metadata *cmd;
2167	bool may_format = ca->features.mode == CM_WRITE;
2168
2169	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
2170	if (!cache)
2171		return -ENOMEM;
2172
2173	cache->ti = ca->ti;
2174	ti->private = cache;
 
2175	ti->num_flush_bios = 2;
2176	ti->flush_supported = true;
2177
2178	ti->num_discard_bios = 1;
2179	ti->discards_supported = true;
2180	ti->discard_zeroes_data_unsupported = true;
2181	/* Discard bios must be split on a block boundary */
2182	ti->split_discard_bios = true;
2183
2184	cache->features = ca->features;
2185	ti->per_bio_data_size = get_per_bio_data_size(cache);
2186
2187	cache->callbacks.congested_fn = cache_is_congested;
2188	dm_table_add_target_callbacks(ti->table, &cache->callbacks);
 
 
2189
2190	cache->metadata_dev = ca->metadata_dev;
2191	cache->origin_dev = ca->origin_dev;
2192	cache->cache_dev = ca->cache_dev;
2193
2194	ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
2195
2196	/* FIXME: factor out this whole section */
2197	origin_blocks = cache->origin_sectors = ca->origin_sectors;
2198	origin_blocks = block_div(origin_blocks, ca->block_size);
2199	cache->origin_blocks = to_oblock(origin_blocks);
2200
2201	cache->sectors_per_block = ca->block_size;
2202	if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
2203		r = -EINVAL;
2204		goto bad;
2205	}
2206
2207	if (ca->block_size & (ca->block_size - 1)) {
2208		dm_block_t cache_size = ca->cache_sectors;
2209
2210		cache->sectors_per_block_shift = -1;
2211		cache_size = block_div(cache_size, ca->block_size);
2212		cache->cache_size = to_cblock(cache_size);
2213	} else {
2214		cache->sectors_per_block_shift = __ffs(ca->block_size);
2215		cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
2216	}
2217
2218	r = create_cache_policy(cache, ca, error);
2219	if (r)
2220		goto bad;
2221
2222	cache->policy_nr_args = ca->policy_argc;
2223	cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
2224
2225	r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2226	if (r) {
2227		*error = "Error setting cache policy's config values";
2228		goto bad;
2229	}
2230
2231	cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2232				     ca->block_size, may_format,
2233				     dm_cache_policy_get_hint_size(cache->policy));
 
2234	if (IS_ERR(cmd)) {
2235		*error = "Error creating metadata object";
2236		r = PTR_ERR(cmd);
2237		goto bad;
2238	}
2239	cache->cmd = cmd;
 
 
 
 
 
 
2240
2241	if (passthrough_mode(&cache->features)) {
2242		bool all_clean;
2243
2244		r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
2245		if (r) {
2246			*error = "dm_cache_metadata_all_clean() failed";
2247			goto bad;
2248		}
2249
2250		if (!all_clean) {
2251			*error = "Cannot enter passthrough mode unless all blocks are clean";
2252			r = -EINVAL;
2253			goto bad;
2254		}
 
 
2255	}
2256
2257	spin_lock_init(&cache->lock);
2258	bio_list_init(&cache->deferred_bios);
2259	bio_list_init(&cache->deferred_flush_bios);
2260	bio_list_init(&cache->deferred_writethrough_bios);
2261	INIT_LIST_HEAD(&cache->quiesced_migrations);
2262	INIT_LIST_HEAD(&cache->completed_migrations);
2263	INIT_LIST_HEAD(&cache->need_commit_migrations);
2264	atomic_set(&cache->nr_migrations, 0);
2265	init_waitqueue_head(&cache->migration_wait);
2266
2267	init_waitqueue_head(&cache->quiescing_wait);
2268	atomic_set(&cache->quiescing, 0);
2269	atomic_set(&cache->quiescing_ack, 0);
2270
2271	r = -ENOMEM;
2272	cache->nr_dirty = 0;
2273	cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2274	if (!cache->dirty_bitset) {
2275		*error = "could not allocate dirty bitset";
2276		goto bad;
2277	}
2278	clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2279
2280	cache->discard_nr_blocks = cache->origin_blocks;
2281	cache->discard_bitset = alloc_bitset(from_oblock(cache->discard_nr_blocks));
 
 
 
 
2282	if (!cache->discard_bitset) {
2283		*error = "could not allocate discard bitset";
2284		goto bad;
2285	}
2286	clear_bitset(cache->discard_bitset, from_oblock(cache->discard_nr_blocks));
2287
2288	cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2289	if (IS_ERR(cache->copier)) {
2290		*error = "could not create kcopyd client";
2291		r = PTR_ERR(cache->copier);
2292		goto bad;
2293	}
2294
2295	cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2296	if (!cache->wq) {
2297		*error = "could not create workqueue for metadata object";
2298		goto bad;
2299	}
2300	INIT_WORK(&cache->worker, do_worker);
 
2301	INIT_DELAYED_WORK(&cache->waker, do_waker);
2302	cache->last_commit_jiffies = jiffies;
2303
2304	cache->prison = dm_bio_prison_create(PRISON_CELLS);
2305	if (!cache->prison) {
2306		*error = "could not create bio prison";
2307		goto bad;
2308	}
2309
2310	cache->all_io_ds = dm_deferred_set_create();
2311	if (!cache->all_io_ds) {
2312		*error = "could not create all_io deferred set";
2313		goto bad;
2314	}
2315
2316	cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
2317							 migration_cache);
2318	if (!cache->migration_pool) {
2319		*error = "Error creating cache's migration mempool";
2320		goto bad;
2321	}
2322
2323	cache->next_migration = NULL;
2324
2325	cache->need_tick_bio = true;
2326	cache->sized = false;
2327	cache->invalidate = false;
2328	cache->commit_requested = false;
2329	cache->loaded_mappings = false;
2330	cache->loaded_discards = false;
2331
2332	load_stats(cache);
2333
2334	atomic_set(&cache->stats.demotion, 0);
2335	atomic_set(&cache->stats.promotion, 0);
2336	atomic_set(&cache->stats.copies_avoided, 0);
2337	atomic_set(&cache->stats.cache_cell_clash, 0);
2338	atomic_set(&cache->stats.commit_count, 0);
2339	atomic_set(&cache->stats.discard_count, 0);
2340
2341	spin_lock_init(&cache->invalidation_lock);
2342	INIT_LIST_HEAD(&cache->invalidation_requests);
2343
 
 
 
 
 
 
 
2344	*result = cache;
2345	return 0;
2346
2347bad:
2348	destroy(cache);
2349	return r;
2350}
2351
2352static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2353{
2354	unsigned i;
2355	const char **copy;
2356
2357	copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2358	if (!copy)
2359		return -ENOMEM;
2360	for (i = 0; i < argc; i++) {
2361		copy[i] = kstrdup(argv[i], GFP_KERNEL);
2362		if (!copy[i]) {
2363			while (i--)
2364				kfree(copy[i]);
2365			kfree(copy);
2366			return -ENOMEM;
2367		}
2368	}
2369
2370	cache->nr_ctr_args = argc;
2371	cache->ctr_args = copy;
2372
2373	return 0;
2374}
2375
2376static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2377{
2378	int r = -EINVAL;
2379	struct cache_args *ca;
2380	struct cache *cache = NULL;
2381
2382	ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2383	if (!ca) {
2384		ti->error = "Error allocating memory for cache";
2385		return -ENOMEM;
2386	}
2387	ca->ti = ti;
2388
2389	r = parse_cache_args(ca, argc, argv, &ti->error);
2390	if (r)
2391		goto out;
2392
2393	r = cache_create(ca, &cache);
2394	if (r)
2395		goto out;
2396
2397	r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2398	if (r) {
2399		destroy(cache);
2400		goto out;
2401	}
2402
2403	ti->private = cache;
2404
2405out:
2406	destroy_cache_args(ca);
2407	return r;
2408}
2409
 
 
2410static int cache_map(struct dm_target *ti, struct bio *bio)
2411{
2412	struct cache *cache = ti->private;
2413
2414	int r;
 
2415	dm_oblock_t block = get_bio_block(cache, bio);
2416	size_t pb_data_size = get_per_bio_data_size(cache);
2417	bool can_migrate = false;
2418	bool discarded_block;
2419	struct dm_bio_prison_cell *cell;
2420	struct policy_result lookup_result;
2421	struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
2422
 
2423	if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2424		/*
2425		 * This can only occur if the io goes to a partial block at
2426		 * the end of the origin device.  We don't cache these.
2427		 * Just remap to the origin and carry on.
2428		 */
2429		remap_to_origin(cache, bio);
 
2430		return DM_MAPIO_REMAPPED;
2431	}
2432
2433	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
2434		defer_bio(cache, bio);
2435		return DM_MAPIO_SUBMITTED;
2436	}
2437
2438	/*
2439	 * Check to see if that block is currently migrating.
2440	 */
2441	cell = alloc_prison_cell(cache);
2442	if (!cell) {
2443		defer_bio(cache, bio);
2444		return DM_MAPIO_SUBMITTED;
2445	}
2446
2447	r = bio_detain(cache, block, bio, cell,
2448		       (cell_free_fn) free_prison_cell,
2449		       cache, &cell);
2450	if (r) {
2451		if (r < 0)
2452			defer_bio(cache, bio);
2453
2454		return DM_MAPIO_SUBMITTED;
2455	}
2456
2457	discarded_block = is_discarded_oblock(cache, block);
2458
2459	r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
2460		       bio, &lookup_result);
2461	if (r == -EWOULDBLOCK) {
2462		cell_defer(cache, cell, true);
2463		return DM_MAPIO_SUBMITTED;
2464
2465	} else if (r) {
2466		DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
2467		bio_io_error(bio);
2468		return DM_MAPIO_SUBMITTED;
2469	}
2470
2471	r = DM_MAPIO_REMAPPED;
2472	switch (lookup_result.op) {
2473	case POLICY_HIT:
2474		if (passthrough_mode(&cache->features)) {
2475			if (bio_data_dir(bio) == WRITE) {
2476				/*
2477				 * We need to invalidate this block, so
2478				 * defer for the worker thread.
2479				 */
2480				cell_defer(cache, cell, true);
2481				r = DM_MAPIO_SUBMITTED;
2482
2483			} else {
2484				pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2485				inc_miss_counter(cache, bio);
2486				remap_to_origin_clear_discard(cache, bio, block);
2487
2488				cell_defer(cache, cell, false);
2489			}
2490
2491		} else {
2492			inc_hit_counter(cache, bio);
2493			pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2494
2495			if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
2496			    !is_dirty(cache, lookup_result.cblock))
2497				remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
2498			else
2499				remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
2500
2501			cell_defer(cache, cell, false);
2502		}
2503		break;
2504
2505	case POLICY_MISS:
2506		inc_miss_counter(cache, bio);
2507		pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2508
2509		if (pb->req_nr != 0) {
2510			/*
2511			 * This is a duplicate writethrough io that is no
2512			 * longer needed because the block has been demoted.
2513			 */
2514			bio_endio(bio, 0);
2515			cell_defer(cache, cell, false);
2516			return DM_MAPIO_SUBMITTED;
2517		} else {
2518			remap_to_origin_clear_discard(cache, bio, block);
2519			cell_defer(cache, cell, false);
2520		}
2521		break;
2522
2523	default:
2524		DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
2525			    (unsigned) lookup_result.op);
2526		bio_io_error(bio);
2527		r = DM_MAPIO_SUBMITTED;
2528	}
2529
2530	return r;
2531}
2532
2533static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2534{
2535	struct cache *cache = ti->private;
2536	unsigned long flags;
2537	size_t pb_data_size = get_per_bio_data_size(cache);
2538	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
2539
2540	if (pb->tick) {
2541		policy_tick(cache->policy);
2542
2543		spin_lock_irqsave(&cache->lock, flags);
2544		cache->need_tick_bio = true;
2545		spin_unlock_irqrestore(&cache->lock, flags);
2546	}
2547
2548	check_for_quiesced_migrations(cache, pb);
 
2549
2550	return 0;
2551}
2552
2553static int write_dirty_bitset(struct cache *cache)
2554{
2555	unsigned i, r;
 
 
 
2556
2557	for (i = 0; i < from_cblock(cache->cache_size); i++) {
2558		r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
2559				       is_dirty(cache, to_cblock(i)));
2560		if (r)
2561			return r;
2562	}
2563
2564	return 0;
2565}
2566
2567static int write_discard_bitset(struct cache *cache)
2568{
2569	unsigned i, r;
2570
2571	r = dm_cache_discard_bitset_resize(cache->cmd, cache->sectors_per_block,
2572					   cache->origin_blocks);
 
 
 
2573	if (r) {
2574		DMERR("could not resize on-disk discard bitset");
 
2575		return r;
2576	}
2577
2578	for (i = 0; i < from_oblock(cache->discard_nr_blocks); i++) {
2579		r = dm_cache_set_discard(cache->cmd, to_oblock(i),
2580					 is_discarded(cache, to_oblock(i)));
2581		if (r)
 
2582			return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2583	}
2584
2585	return 0;
2586}
2587
2588/*
2589 * returns true on success
2590 */
2591static bool sync_metadata(struct cache *cache)
2592{
2593	int r1, r2, r3, r4;
2594
2595	r1 = write_dirty_bitset(cache);
2596	if (r1)
2597		DMERR("could not write dirty bitset");
2598
2599	r2 = write_discard_bitset(cache);
2600	if (r2)
2601		DMERR("could not write discard bitset");
2602
2603	save_stats(cache);
2604
2605	r3 = dm_cache_write_hints(cache->cmd, cache->policy);
2606	if (r3)
2607		DMERR("could not write hints");
2608
2609	/*
2610	 * If writing the above metadata failed, we still commit, but don't
2611	 * set the clean shutdown flag.  This will effectively force every
2612	 * dirty bit to be set on reload.
2613	 */
2614	r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
2615	if (r4)
2616		DMERR("could not write cache metadata.  Data loss may occur.");
2617
2618	return !r1 && !r2 && !r3 && !r4;
2619}
2620
2621static void cache_postsuspend(struct dm_target *ti)
2622{
2623	struct cache *cache = ti->private;
2624
2625	start_quiescing(cache);
2626	wait_for_migrations(cache);
2627	stop_worker(cache);
2628	requeue_deferred_io(cache);
2629	stop_quiescing(cache);
 
2630
2631	(void) sync_metadata(cache);
 
 
 
 
 
 
 
2632}
2633
2634static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2635			bool dirty, uint32_t hint, bool hint_valid)
2636{
2637	int r;
2638	struct cache *cache = context;
2639
2640	r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
2641	if (r)
2642		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2643
2644	if (dirty)
2645		set_dirty(cache, oblock, cblock);
2646	else
2647		clear_dirty(cache, oblock, cblock);
 
 
2648
2649	return 0;
 
2650}
2651
2652static int load_discard(void *context, sector_t discard_block_size,
2653			dm_oblock_t oblock, bool discard)
2654{
2655	struct cache *cache = context;
 
 
 
 
 
 
 
 
 
2656
2657	if (discard)
2658		set_discard(cache, oblock);
2659	else
2660		clear_discard(cache, oblock);
 
 
 
 
 
 
 
 
2661
2662	return 0;
2663}
2664
2665static dm_cblock_t get_cache_dev_size(struct cache *cache)
2666{
2667	sector_t size = get_dev_size(cache->cache_dev);
2668	(void) sector_div(size, cache->sectors_per_block);
2669	return to_cblock(size);
2670}
2671
2672static bool can_resize(struct cache *cache, dm_cblock_t new_size)
2673{
2674	if (from_cblock(new_size) > from_cblock(cache->cache_size))
2675		return true;
 
 
 
 
 
2676
2677	/*
2678	 * We can't drop a dirty block when shrinking the cache.
2679	 */
2680	while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
2681		new_size = to_cblock(from_cblock(new_size) + 1);
2682		if (is_dirty(cache, new_size)) {
2683			DMERR("unable to shrink cache; cache block %llu is dirty",
 
2684			      (unsigned long long) from_cblock(new_size));
2685			return false;
2686		}
2687	}
2688
2689	return true;
2690}
2691
2692static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2693{
2694	int r;
2695
2696	r = dm_cache_resize(cache->cmd, new_size);
2697	if (r) {
2698		DMERR("could not resize cache metadata");
 
2699		return r;
2700	}
2701
2702	cache->cache_size = new_size;
2703
2704	return 0;
2705}
2706
2707static int cache_preresume(struct dm_target *ti)
2708{
2709	int r = 0;
2710	struct cache *cache = ti->private;
2711	dm_cblock_t csize = get_cache_dev_size(cache);
2712
2713	/*
2714	 * Check to see if the cache has resized.
2715	 */
2716	if (!cache->sized) {
2717		r = resize_cache_dev(cache, csize);
2718		if (r)
2719			return r;
2720
2721		cache->sized = true;
2722
2723	} else if (csize != cache->cache_size) {
2724		if (!can_resize(cache, csize))
2725			return -EINVAL;
2726
2727		r = resize_cache_dev(cache, csize);
2728		if (r)
2729			return r;
2730	}
2731
2732	if (!cache->loaded_mappings) {
2733		r = dm_cache_load_mappings(cache->cmd, cache->policy,
2734					   load_mapping, cache);
2735		if (r) {
2736			DMERR("could not load cache mappings");
 
2737			return r;
2738		}
2739
2740		cache->loaded_mappings = true;
2741	}
2742
2743	if (!cache->loaded_discards) {
2744		r = dm_cache_load_discards(cache->cmd, load_discard, cache);
 
 
 
 
 
 
 
 
 
 
2745		if (r) {
2746			DMERR("could not load origin discards");
 
2747			return r;
2748		}
 
2749
2750		cache->loaded_discards = true;
2751	}
2752
2753	return r;
2754}
2755
2756static void cache_resume(struct dm_target *ti)
2757{
2758	struct cache *cache = ti->private;
2759
2760	cache->need_tick_bio = true;
 
2761	do_waker(&cache->waker.work);
2762}
2763
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2764/*
2765 * Status format:
2766 *
2767 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
2768 * <cache block size> <#used cache blocks>/<#total cache blocks>
2769 * <#read hits> <#read misses> <#write hits> <#write misses>
2770 * <#demotions> <#promotions> <#dirty>
2771 * <#features> <features>*
2772 * <#core args> <core args>
2773 * <policy name> <#policy args> <policy args>*
2774 */
2775static void cache_status(struct dm_target *ti, status_type_t type,
2776			 unsigned status_flags, char *result, unsigned maxlen)
2777{
2778	int r = 0;
2779	unsigned i;
2780	ssize_t sz = 0;
2781	dm_block_t nr_free_blocks_metadata = 0;
2782	dm_block_t nr_blocks_metadata = 0;
2783	char buf[BDEVNAME_SIZE];
2784	struct cache *cache = ti->private;
2785	dm_cblock_t residency;
 
2786
2787	switch (type) {
2788	case STATUSTYPE_INFO:
 
 
 
 
 
2789		/* Commit to ensure statistics aren't out-of-date */
2790		if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
2791			r = dm_cache_commit(cache->cmd, false);
2792			if (r)
2793				DMERR("could not commit metadata for accurate status");
2794		}
2795
2796		r = dm_cache_get_free_metadata_block_count(cache->cmd,
2797							   &nr_free_blocks_metadata);
2798		if (r) {
2799			DMERR("could not get metadata free block count");
 
2800			goto err;
2801		}
2802
2803		r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
2804		if (r) {
2805			DMERR("could not get metadata device size");
 
2806			goto err;
2807		}
2808
2809		residency = policy_residency(cache->policy);
2810
2811		DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ",
2812		       (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
2813		       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2814		       (unsigned long long)nr_blocks_metadata,
2815		       cache->sectors_per_block,
2816		       (unsigned long long) from_cblock(residency),
2817		       (unsigned long long) from_cblock(cache->cache_size),
2818		       (unsigned) atomic_read(&cache->stats.read_hit),
2819		       (unsigned) atomic_read(&cache->stats.read_miss),
2820		       (unsigned) atomic_read(&cache->stats.write_hit),
2821		       (unsigned) atomic_read(&cache->stats.write_miss),
2822		       (unsigned) atomic_read(&cache->stats.demotion),
2823		       (unsigned) atomic_read(&cache->stats.promotion),
2824		       (unsigned long long) from_cblock(cache->nr_dirty));
2825
2826		if (writethrough_mode(&cache->features))
2827			DMEMIT("1 writethrough ");
2828
2829		else if (passthrough_mode(&cache->features))
2830			DMEMIT("1 passthrough ");
2831
2832		else if (writeback_mode(&cache->features))
2833			DMEMIT("1 writeback ");
2834
2835		else {
2836			DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode);
2837			goto err;
2838		}
2839
2840		DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
2841
2842		DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
2843		if (sz < maxlen) {
2844			r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
2845			if (r)
2846				DMERR("policy_emit_config_values returned %d", r);
 
2847		}
2848
 
 
 
 
 
 
 
 
 
 
 
 
2849		break;
2850
2851	case STATUSTYPE_TABLE:
2852		format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
2853		DMEMIT("%s ", buf);
2854		format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
2855		DMEMIT("%s ", buf);
2856		format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
2857		DMEMIT("%s", buf);
2858
2859		for (i = 0; i < cache->nr_ctr_args - 1; i++)
2860			DMEMIT(" %s", cache->ctr_args[i]);
2861		if (cache->nr_ctr_args)
2862			DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2863	}
2864
2865	return;
2866
2867err:
2868	DMEMIT("Error");
2869}
2870
2871/*
 
 
 
 
 
 
 
 
 
2872 * A cache block range can take two forms:
2873 *
2874 * i) A single cblock, eg. '3456'
2875 * ii) A begin and end cblock with dots between, eg. 123-234
2876 */
2877static int parse_cblock_range(struct cache *cache, const char *str,
2878			      struct cblock_range *result)
2879{
2880	char dummy;
2881	uint64_t b, e;
2882	int r;
2883
2884	/*
2885	 * Try and parse form (ii) first.
2886	 */
2887	r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
2888	if (r < 0)
2889		return r;
2890
2891	if (r == 2) {
2892		result->begin = to_cblock(b);
2893		result->end = to_cblock(e);
2894		return 0;
2895	}
2896
2897	/*
2898	 * That didn't work, try form (i).
2899	 */
2900	r = sscanf(str, "%llu%c", &b, &dummy);
2901	if (r < 0)
2902		return r;
2903
2904	if (r == 1) {
2905		result->begin = to_cblock(b);
2906		result->end = to_cblock(from_cblock(result->begin) + 1u);
2907		return 0;
2908	}
2909
2910	DMERR("invalid cblock range '%s'", str);
2911	return -EINVAL;
2912}
2913
2914static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
2915{
2916	uint64_t b = from_cblock(range->begin);
2917	uint64_t e = from_cblock(range->end);
2918	uint64_t n = from_cblock(cache->cache_size);
2919
2920	if (b >= n) {
2921		DMERR("begin cblock out of range: %llu >= %llu", b, n);
 
2922		return -EINVAL;
2923	}
2924
2925	if (e > n) {
2926		DMERR("end cblock out of range: %llu > %llu", e, n);
 
2927		return -EINVAL;
2928	}
2929
2930	if (b >= e) {
2931		DMERR("invalid cblock range: %llu >= %llu", b, e);
 
2932		return -EINVAL;
2933	}
2934
2935	return 0;
2936}
2937
 
 
 
 
 
2938static int request_invalidation(struct cache *cache, struct cblock_range *range)
2939{
2940	struct invalidation_request req;
 
 
 
 
 
 
 
 
 
 
 
2941
2942	INIT_LIST_HEAD(&req.list);
2943	req.cblocks = range;
2944	atomic_set(&req.complete, 0);
2945	req.err = 0;
2946	init_waitqueue_head(&req.result_wait);
2947
2948	spin_lock(&cache->invalidation_lock);
2949	list_add(&req.list, &cache->invalidation_requests);
2950	spin_unlock(&cache->invalidation_lock);
2951	wake_worker(cache);
2952
2953	wait_event(req.result_wait, atomic_read(&req.complete));
2954	return req.err;
2955}
2956
2957static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
2958					      const char **cblock_ranges)
2959{
2960	int r = 0;
2961	unsigned i;
2962	struct cblock_range range;
2963
2964	if (!passthrough_mode(&cache->features)) {
2965		DMERR("cache has to be in passthrough mode for invalidation");
 
2966		return -EPERM;
2967	}
2968
2969	for (i = 0; i < count; i++) {
2970		r = parse_cblock_range(cache, cblock_ranges[i], &range);
2971		if (r)
2972			break;
2973
2974		r = validate_cblock_range(cache, &range);
2975		if (r)
2976			break;
2977
2978		/*
2979		 * Pass begin and end origin blocks to the worker and wake it.
2980		 */
2981		r = request_invalidation(cache, &range);
2982		if (r)
2983			break;
2984	}
2985
2986	return r;
2987}
2988
2989/*
2990 * Supports
2991 *	"<key> <value>"
2992 * and
2993 *     "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
2994 *
2995 * The key migration_threshold is supported by the cache target core.
2996 */
2997static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
 
2998{
2999	struct cache *cache = ti->private;
3000
3001	if (!argc)
3002		return -EINVAL;
3003
 
 
 
 
 
 
3004	if (!strcasecmp(argv[0], "invalidate_cblocks"))
3005		return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
3006
3007	if (argc != 2)
3008		return -EINVAL;
3009
3010	return set_config_value(cache, argv[0], argv[1]);
3011}
3012
3013static int cache_iterate_devices(struct dm_target *ti,
3014				 iterate_devices_callout_fn fn, void *data)
3015{
3016	int r = 0;
3017	struct cache *cache = ti->private;
3018
3019	r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
3020	if (!r)
3021		r = fn(ti, cache->origin_dev, 0, ti->len, data);
3022
3023	return r;
3024}
3025
3026/*
3027 * We assume I/O is going to the origin (which is the volume
3028 * more likely to have restrictions e.g. by being striped).
3029 * (Looking up the exact location of the data would be expensive
3030 * and could always be out of date by the time the bio is submitted.)
3031 */
3032static int cache_bvec_merge(struct dm_target *ti,
3033			    struct bvec_merge_data *bvm,
3034			    struct bio_vec *biovec, int max_size)
3035{
3036	struct cache *cache = ti->private;
3037	struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
 
 
 
 
 
 
 
3038
3039	if (!q->merge_bvec_fn)
3040		return max_size;
3041
3042	bvm->bi_bdev = cache->origin_dev->bdev;
3043	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
 
 
 
3044}
3045
3046static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
3047{
 
 
 
 
 
 
 
 
 
 
 
3048	/*
3049	 * FIXME: these limits may be incompatible with the cache device
 
3050	 */
3051	limits->max_discard_sectors = cache->sectors_per_block;
3052	limits->discard_granularity = cache->sectors_per_block << SECTOR_SHIFT;
 
 
 
3053}
3054
3055static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3056{
3057	struct cache *cache = ti->private;
3058	uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3059
3060	/*
3061	 * If the system-determined stacked limits are compatible with the
3062	 * cache's blocksize (io_opt is a factor) do not override them.
3063	 */
3064	if (io_opt_sectors < cache->sectors_per_block ||
3065	    do_div(io_opt_sectors, cache->sectors_per_block)) {
3066		blk_limits_io_min(limits, 0);
3067		blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
3068	}
 
 
3069	set_discard_limits(cache, limits);
3070}
3071
3072/*----------------------------------------------------------------*/
3073
3074static struct target_type cache_target = {
3075	.name = "cache",
3076	.version = {1, 4, 0},
3077	.module = THIS_MODULE,
3078	.ctr = cache_ctr,
3079	.dtr = cache_dtr,
3080	.map = cache_map,
3081	.end_io = cache_end_io,
3082	.postsuspend = cache_postsuspend,
3083	.preresume = cache_preresume,
3084	.resume = cache_resume,
3085	.status = cache_status,
3086	.message = cache_message,
3087	.iterate_devices = cache_iterate_devices,
3088	.merge = cache_bvec_merge,
3089	.io_hints = cache_io_hints,
3090};
3091
3092static int __init dm_cache_init(void)
3093{
3094	int r;
3095
 
 
 
 
3096	r = dm_register_target(&cache_target);
3097	if (r) {
3098		DMERR("cache target registration failed: %d", r);
 
3099		return r;
3100	}
3101
3102	migration_cache = KMEM_CACHE(dm_cache_migration, 0);
3103	if (!migration_cache) {
3104		dm_unregister_target(&cache_target);
3105		return -ENOMEM;
3106	}
3107
3108	return 0;
3109}
3110
3111static void __exit dm_cache_exit(void)
3112{
3113	dm_unregister_target(&cache_target);
3114	kmem_cache_destroy(migration_cache);
3115}
3116
3117module_init(dm_cache_init);
3118module_exit(dm_cache_exit);
3119
3120MODULE_DESCRIPTION(DM_NAME " cache target");
3121MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
3122MODULE_LICENSE("GPL");