Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012 Red Hat, Inc.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm-cache-metadata.h"
   9
  10#include "persistent-data/dm-array.h"
  11#include "persistent-data/dm-bitset.h"
  12#include "persistent-data/dm-space-map.h"
  13#include "persistent-data/dm-space-map-disk.h"
  14#include "persistent-data/dm-transaction-manager.h"
  15
  16#include <linux/device-mapper.h>
  17#include <linux/refcount.h>
  18
  19/*----------------------------------------------------------------*/
  20
  21#define DM_MSG_PREFIX   "cache metadata"
  22
  23#define CACHE_SUPERBLOCK_MAGIC 06142003
  24#define CACHE_SUPERBLOCK_LOCATION 0
  25
  26/*
  27 * defines a range of metadata versions that this module can handle.
  28 */
  29#define MIN_CACHE_VERSION 1
  30#define MAX_CACHE_VERSION 2
 
 
  31
  32/*
  33 *  3 for btree insert +
  34 *  2 for btree lookup used within space map
  35 */
  36#define CACHE_MAX_CONCURRENT_LOCKS 5
  37#define SPACE_MAP_ROOT_SIZE 128
  38
  39enum superblock_flag_bits {
  40	/* for spotting crashes that would invalidate the dirty bitset */
  41	CLEAN_SHUTDOWN,
  42	/* metadata must be checked using the tools */
  43	NEEDS_CHECK,
  44};
  45
  46/*
  47 * Each mapping from cache block -> origin block carries a set of flags.
  48 */
  49enum mapping_bits {
  50	/*
  51	 * A valid mapping.  Because we're using an array we clear this
  52	 * flag for an non existant mapping.
  53	 */
  54	M_VALID = 1,
  55
  56	/*
  57	 * The data on the cache is different from that on the origin.
  58	 * This flag is only used by metadata format 1.
  59	 */
  60	M_DIRTY = 2
  61};
  62
  63struct cache_disk_superblock {
  64	__le32 csum;
  65	__le32 flags;
  66	__le64 blocknr;
  67
  68	__u8 uuid[16];
  69	__le64 magic;
  70	__le32 version;
  71
  72	__u8 policy_name[CACHE_POLICY_NAME_SIZE];
  73	__le32 policy_hint_size;
  74
  75	__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
  76	__le64 mapping_root;
  77	__le64 hint_root;
  78
  79	__le64 discard_root;
  80	__le64 discard_block_size;
  81	__le64 discard_nr_blocks;
  82
  83	__le32 data_block_size;
  84	__le32 metadata_block_size;
  85	__le32 cache_blocks;
  86
  87	__le32 compat_flags;
  88	__le32 compat_ro_flags;
  89	__le32 incompat_flags;
  90
  91	__le32 read_hits;
  92	__le32 read_misses;
  93	__le32 write_hits;
  94	__le32 write_misses;
  95
  96	__le32 policy_version[CACHE_POLICY_VERSION_SIZE];
  97
  98	/*
  99	 * Metadata format 2 fields.
 100	 */
 101	__le64 dirty_root;
 102} __packed;
 103
 104struct dm_cache_metadata {
 105	refcount_t ref_count;
 106	struct list_head list;
 107
 108	unsigned int version;
 109	struct block_device *bdev;
 110	struct dm_block_manager *bm;
 111	struct dm_space_map *metadata_sm;
 112	struct dm_transaction_manager *tm;
 113
 114	struct dm_array_info info;
 115	struct dm_array_info hint_info;
 116	struct dm_disk_bitset discard_info;
 117
 118	struct rw_semaphore root_lock;
 119	unsigned long flags;
 120	dm_block_t root;
 121	dm_block_t hint_root;
 122	dm_block_t discard_root;
 123
 124	sector_t discard_block_size;
 125	dm_dblock_t discard_nr_blocks;
 126
 127	sector_t data_block_size;
 128	dm_cblock_t cache_blocks;
 129	bool changed:1;
 130	bool clean_when_opened:1;
 131
 132	char policy_name[CACHE_POLICY_NAME_SIZE];
 133	unsigned int policy_version[CACHE_POLICY_VERSION_SIZE];
 134	size_t policy_hint_size;
 135	struct dm_cache_statistics stats;
 136
 137	/*
 138	 * Reading the space map root can fail, so we read it into this
 139	 * buffer before the superblock is locked and updated.
 140	 */
 141	__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
 142
 143	/*
 144	 * Set if a transaction has to be aborted but the attempt to roll
 145	 * back to the previous (good) transaction failed.  The only
 146	 * metadata operation permissible in this state is the closing of
 147	 * the device.
 148	 */
 149	bool fail_io:1;
 150
 151	/*
 152	 * Metadata format 2 fields.
 153	 */
 154	dm_block_t dirty_root;
 155	struct dm_disk_bitset dirty_info;
 156
 157	/*
 158	 * These structures are used when loading metadata.  They're too
 159	 * big to put on the stack.
 160	 */
 161	struct dm_array_cursor mapping_cursor;
 162	struct dm_array_cursor hint_cursor;
 163	struct dm_bitset_cursor dirty_cursor;
 164};
 165
 166/*
 167 *-----------------------------------------------------------------
 168 * superblock validator
 169 *-----------------------------------------------------------------
 170 */
 171#define SUPERBLOCK_CSUM_XOR 9031977
 172
 173static void sb_prepare_for_write(const struct dm_block_validator *v,
 174				 struct dm_block *b,
 175				 size_t sb_block_size)
 176{
 177	struct cache_disk_superblock *disk_super = dm_block_data(b);
 178
 179	disk_super->blocknr = cpu_to_le64(dm_block_location(b));
 180	disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
 181						      sb_block_size - sizeof(__le32),
 182						      SUPERBLOCK_CSUM_XOR));
 183}
 184
 185static int check_metadata_version(struct cache_disk_superblock *disk_super)
 186{
 187	uint32_t metadata_version = le32_to_cpu(disk_super->version);
 188
 189	if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) {
 190		DMERR("Cache metadata version %u found, but only versions between %u and %u supported.",
 191		      metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION);
 192		return -EINVAL;
 193	}
 194
 195	return 0;
 196}
 197
 198static int sb_check(const struct dm_block_validator *v,
 199		    struct dm_block *b,
 200		    size_t sb_block_size)
 201{
 202	struct cache_disk_superblock *disk_super = dm_block_data(b);
 203	__le32 csum_le;
 204
 205	if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
 206		DMERR("%s failed: blocknr %llu: wanted %llu",
 207		      __func__, le64_to_cpu(disk_super->blocknr),
 208		      (unsigned long long)dm_block_location(b));
 209		return -ENOTBLK;
 210	}
 211
 212	if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
 213		DMERR("%s failed: magic %llu: wanted %llu",
 214		      __func__, le64_to_cpu(disk_super->magic),
 215		      (unsigned long long)CACHE_SUPERBLOCK_MAGIC);
 216		return -EILSEQ;
 217	}
 218
 219	csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
 220					     sb_block_size - sizeof(__le32),
 221					     SUPERBLOCK_CSUM_XOR));
 222	if (csum_le != disk_super->csum) {
 223		DMERR("%s failed: csum %u: wanted %u",
 224		      __func__, le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
 225		return -EILSEQ;
 226	}
 227
 228	return check_metadata_version(disk_super);
 229}
 230
 231static const struct dm_block_validator sb_validator = {
 232	.name = "superblock",
 233	.prepare_for_write = sb_prepare_for_write,
 234	.check = sb_check
 235};
 236
 237/*----------------------------------------------------------------*/
 238
 239static int superblock_read_lock(struct dm_cache_metadata *cmd,
 240				struct dm_block **sblock)
 241{
 242	return dm_bm_read_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 243			       &sb_validator, sblock);
 244}
 245
 246static int superblock_lock_zero(struct dm_cache_metadata *cmd,
 247				struct dm_block **sblock)
 248{
 249	return dm_bm_write_lock_zero(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 250				     &sb_validator, sblock);
 251}
 252
 253static int superblock_lock(struct dm_cache_metadata *cmd,
 254			   struct dm_block **sblock)
 255{
 256	return dm_bm_write_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 257				&sb_validator, sblock);
 258}
 259
 260/*----------------------------------------------------------------*/
 261
 262static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
 263{
 264	int r;
 265	unsigned int i;
 266	struct dm_block *b;
 267	__le64 *data_le, zero = cpu_to_le64(0);
 268	unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
 269
 270	/*
 271	 * We can't use a validator here - it may be all zeroes.
 272	 */
 273	r = dm_bm_read_lock(bm, CACHE_SUPERBLOCK_LOCATION, NULL, &b);
 274	if (r)
 275		return r;
 276
 277	data_le = dm_block_data(b);
 278	*result = true;
 279	for (i = 0; i < sb_block_size; i++) {
 280		if (data_le[i] != zero) {
 281			*result = false;
 282			break;
 283		}
 284	}
 285
 286	dm_bm_unlock(b);
 287
 288	return 0;
 289}
 290
 291static void __setup_mapping_info(struct dm_cache_metadata *cmd)
 292{
 293	struct dm_btree_value_type vt;
 294
 295	vt.context = NULL;
 296	vt.size = sizeof(__le64);
 297	vt.inc = NULL;
 298	vt.dec = NULL;
 299	vt.equal = NULL;
 300	dm_array_info_init(&cmd->info, cmd->tm, &vt);
 301
 302	if (cmd->policy_hint_size) {
 303		vt.size = sizeof(__le32);
 304		dm_array_info_init(&cmd->hint_info, cmd->tm, &vt);
 305	}
 306}
 307
 308static int __save_sm_root(struct dm_cache_metadata *cmd)
 309{
 310	int r;
 311	size_t metadata_len;
 312
 313	r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
 314	if (r < 0)
 315		return r;
 316
 317	return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root,
 318			       metadata_len);
 319}
 320
 321static void __copy_sm_root(struct dm_cache_metadata *cmd,
 322			   struct cache_disk_superblock *disk_super)
 323{
 324	memcpy(&disk_super->metadata_space_map_root,
 325	       &cmd->metadata_space_map_root,
 326	       sizeof(cmd->metadata_space_map_root));
 327}
 328
 329static bool separate_dirty_bits(struct dm_cache_metadata *cmd)
 330{
 331	return cmd->version >= 2;
 332}
 333
 334static int __write_initial_superblock(struct dm_cache_metadata *cmd)
 335{
 336	int r;
 337	struct dm_block *sblock;
 338	struct cache_disk_superblock *disk_super;
 339	sector_t bdev_size = bdev_nr_sectors(cmd->bdev);
 340
 341	/* FIXME: see if we can lose the max sectors limit */
 342	if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
 343		bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
 344
 345	r = dm_tm_pre_commit(cmd->tm);
 346	if (r < 0)
 347		return r;
 348
 349	/*
 350	 * dm_sm_copy_root() can fail.  So we need to do it before we start
 351	 * updating the superblock.
 352	 */
 353	r = __save_sm_root(cmd);
 354	if (r)
 355		return r;
 356
 357	r = superblock_lock_zero(cmd, &sblock);
 358	if (r)
 359		return r;
 360
 361	disk_super = dm_block_data(sblock);
 362	disk_super->flags = 0;
 363	memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
 364	disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
 365	disk_super->version = cpu_to_le32(cmd->version);
 366	memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
 367	memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
 368	disk_super->policy_hint_size = cpu_to_le32(0);
 369
 370	__copy_sm_root(cmd, disk_super);
 371
 372	disk_super->mapping_root = cpu_to_le64(cmd->root);
 373	disk_super->hint_root = cpu_to_le64(cmd->hint_root);
 374	disk_super->discard_root = cpu_to_le64(cmd->discard_root);
 375	disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
 376	disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
 377	disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE);
 378	disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
 379	disk_super->cache_blocks = cpu_to_le32(0);
 380
 381	disk_super->read_hits = cpu_to_le32(0);
 382	disk_super->read_misses = cpu_to_le32(0);
 383	disk_super->write_hits = cpu_to_le32(0);
 384	disk_super->write_misses = cpu_to_le32(0);
 385
 386	if (separate_dirty_bits(cmd))
 387		disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
 388
 389	return dm_tm_commit(cmd->tm, sblock);
 390}
 391
 392static int __format_metadata(struct dm_cache_metadata *cmd)
 393{
 394	int r;
 395
 396	r = dm_tm_create_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 397				 &cmd->tm, &cmd->metadata_sm);
 398	if (r < 0) {
 399		DMERR("tm_create_with_sm failed");
 400		return r;
 401	}
 402
 403	__setup_mapping_info(cmd);
 404
 405	r = dm_array_empty(&cmd->info, &cmd->root);
 406	if (r < 0)
 407		goto bad;
 408
 409	if (separate_dirty_bits(cmd)) {
 410		dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
 411		r = dm_bitset_empty(&cmd->dirty_info, &cmd->dirty_root);
 412		if (r < 0)
 413			goto bad;
 414	}
 415
 416	dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
 417	r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
 418	if (r < 0)
 419		goto bad;
 420
 421	cmd->discard_block_size = 0;
 422	cmd->discard_nr_blocks = 0;
 423
 424	r = __write_initial_superblock(cmd);
 425	if (r)
 426		goto bad;
 427
 428	cmd->clean_when_opened = true;
 429	return 0;
 430
 431bad:
 432	dm_tm_destroy(cmd->tm);
 433	dm_sm_destroy(cmd->metadata_sm);
 434
 435	return r;
 436}
 437
 438static int __check_incompat_features(struct cache_disk_superblock *disk_super,
 439				     struct dm_cache_metadata *cmd)
 440{
 441	uint32_t incompat_flags, features;
 442
 443	incompat_flags = le32_to_cpu(disk_super->incompat_flags);
 444	features = incompat_flags & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
 445	if (features) {
 446		DMERR("could not access metadata due to unsupported optional features (%lx).",
 447		      (unsigned long)features);
 448		return -EINVAL;
 449	}
 450
 451	/*
 452	 * Check for read-only metadata to skip the following RDWR checks.
 453	 */
 454	if (bdev_read_only(cmd->bdev))
 455		return 0;
 456
 457	features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
 458	if (features) {
 459		DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
 460		      (unsigned long)features);
 461		return -EINVAL;
 462	}
 463
 464	return 0;
 465}
 466
 467static int __open_metadata(struct dm_cache_metadata *cmd)
 468{
 469	int r;
 470	struct dm_block *sblock;
 471	struct cache_disk_superblock *disk_super;
 472	unsigned long sb_flags;
 473
 474	r = superblock_read_lock(cmd, &sblock);
 475	if (r < 0) {
 476		DMERR("couldn't read lock superblock");
 477		return r;
 478	}
 479
 480	disk_super = dm_block_data(sblock);
 481
 482	/* Verify the data block size hasn't changed */
 483	if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
 484		DMERR("changing the data block size (from %u to %llu) is not supported",
 485		      le32_to_cpu(disk_super->data_block_size),
 486		      (unsigned long long)cmd->data_block_size);
 487		r = -EINVAL;
 488		goto bad;
 489	}
 490
 491	r = __check_incompat_features(disk_super, cmd);
 492	if (r < 0)
 493		goto bad;
 494
 495	r = dm_tm_open_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 496			       disk_super->metadata_space_map_root,
 497			       sizeof(disk_super->metadata_space_map_root),
 498			       &cmd->tm, &cmd->metadata_sm);
 499	if (r < 0) {
 500		DMERR("tm_open_with_sm failed");
 501		goto bad;
 502	}
 503
 504	__setup_mapping_info(cmd);
 505	dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
 506	dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
 507	sb_flags = le32_to_cpu(disk_super->flags);
 508	cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
 509	dm_bm_unlock(sblock);
 510
 511	return 0;
 512
 513bad:
 514	dm_bm_unlock(sblock);
 515	return r;
 516}
 517
 518static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
 519				     bool format_device)
 520{
 521	int r;
 522	bool unformatted = false;
 523
 524	r = __superblock_all_zeroes(cmd->bm, &unformatted);
 525	if (r)
 526		return r;
 527
 528	if (unformatted)
 529		return format_device ? __format_metadata(cmd) : -EPERM;
 530
 531	return __open_metadata(cmd);
 532}
 533
 534static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
 535					    bool may_format_device)
 536{
 537	int r;
 538
 539	cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
 
 540					  CACHE_MAX_CONCURRENT_LOCKS);
 541	if (IS_ERR(cmd->bm)) {
 542		DMERR("could not create block manager");
 543		r = PTR_ERR(cmd->bm);
 544		cmd->bm = NULL;
 545		return r;
 546	}
 547
 548	r = __open_or_format_metadata(cmd, may_format_device);
 549	if (r) {
 550		dm_block_manager_destroy(cmd->bm);
 551		cmd->bm = NULL;
 552	}
 553
 554	return r;
 555}
 556
 557static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd,
 558					      bool destroy_bm)
 559{
 560	dm_sm_destroy(cmd->metadata_sm);
 561	dm_tm_destroy(cmd->tm);
 562	if (destroy_bm)
 563		dm_block_manager_destroy(cmd->bm);
 564}
 565
 566typedef unsigned long (*flags_mutator)(unsigned long);
 567
 568static void update_flags(struct cache_disk_superblock *disk_super,
 569			 flags_mutator mutator)
 570{
 571	uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
 572
 573	disk_super->flags = cpu_to_le32(sb_flags);
 574}
 575
 576static unsigned long set_clean_shutdown(unsigned long flags)
 577{
 578	set_bit(CLEAN_SHUTDOWN, &flags);
 579	return flags;
 580}
 581
 582static unsigned long clear_clean_shutdown(unsigned long flags)
 583{
 584	clear_bit(CLEAN_SHUTDOWN, &flags);
 585	return flags;
 586}
 587
 588static void read_superblock_fields(struct dm_cache_metadata *cmd,
 589				   struct cache_disk_superblock *disk_super)
 590{
 591	cmd->version = le32_to_cpu(disk_super->version);
 592	cmd->flags = le32_to_cpu(disk_super->flags);
 593	cmd->root = le64_to_cpu(disk_super->mapping_root);
 594	cmd->hint_root = le64_to_cpu(disk_super->hint_root);
 595	cmd->discard_root = le64_to_cpu(disk_super->discard_root);
 596	cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
 597	cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
 598	cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
 599	cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
 600	strscpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
 601	cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]);
 602	cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]);
 603	cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]);
 604	cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
 605
 606	cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
 607	cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses);
 608	cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits);
 609	cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses);
 610
 611	if (separate_dirty_bits(cmd))
 612		cmd->dirty_root = le64_to_cpu(disk_super->dirty_root);
 613
 614	cmd->changed = false;
 615}
 616
 617/*
 618 * The mutator updates the superblock flags.
 619 */
 620static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
 621				     flags_mutator mutator)
 622{
 623	int r;
 624	struct cache_disk_superblock *disk_super;
 625	struct dm_block *sblock;
 626
 627	r = superblock_lock(cmd, &sblock);
 628	if (r)
 629		return r;
 630
 631	disk_super = dm_block_data(sblock);
 632	update_flags(disk_super, mutator);
 633	read_superblock_fields(cmd, disk_super);
 634	dm_bm_unlock(sblock);
 635
 636	return dm_bm_flush(cmd->bm);
 637}
 638
 639static int __begin_transaction(struct dm_cache_metadata *cmd)
 640{
 641	int r;
 642	struct cache_disk_superblock *disk_super;
 643	struct dm_block *sblock;
 644
 645	/*
 646	 * We re-read the superblock every time.  Shouldn't need to do this
 647	 * really.
 648	 */
 649	r = superblock_read_lock(cmd, &sblock);
 650	if (r)
 651		return r;
 652
 653	disk_super = dm_block_data(sblock);
 654	read_superblock_fields(cmd, disk_super);
 655	dm_bm_unlock(sblock);
 656
 657	return 0;
 658}
 659
 660static int __commit_transaction(struct dm_cache_metadata *cmd,
 661				flags_mutator mutator)
 662{
 663	int r;
 664	struct cache_disk_superblock *disk_super;
 665	struct dm_block *sblock;
 666
 667	/*
 668	 * We need to know if the cache_disk_superblock exceeds a 512-byte sector.
 669	 */
 670	BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512);
 671
 672	if (separate_dirty_bits(cmd)) {
 673		r = dm_bitset_flush(&cmd->dirty_info, cmd->dirty_root,
 674				    &cmd->dirty_root);
 675		if (r)
 676			return r;
 677	}
 678
 679	r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root,
 680			    &cmd->discard_root);
 681	if (r)
 682		return r;
 683
 684	r = dm_tm_pre_commit(cmd->tm);
 685	if (r < 0)
 686		return r;
 687
 688	r = __save_sm_root(cmd);
 689	if (r)
 690		return r;
 691
 692	r = superblock_lock(cmd, &sblock);
 693	if (r)
 694		return r;
 695
 696	disk_super = dm_block_data(sblock);
 697
 698	disk_super->flags = cpu_to_le32(cmd->flags);
 699	if (mutator)
 700		update_flags(disk_super, mutator);
 701
 702	disk_super->mapping_root = cpu_to_le64(cmd->root);
 703	if (separate_dirty_bits(cmd))
 704		disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
 705	disk_super->hint_root = cpu_to_le64(cmd->hint_root);
 706	disk_super->discard_root = cpu_to_le64(cmd->discard_root);
 707	disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
 708	disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
 709	disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
 710	strscpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
 711	disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
 712	disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
 713	disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
 714	disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
 715
 716	disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
 717	disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
 718	disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
 719	disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
 720	__copy_sm_root(cmd, disk_super);
 721
 722	return dm_tm_commit(cmd->tm, sblock);
 723}
 724
 725/*----------------------------------------------------------------*/
 726
 727/*
 728 * The mappings are held in a dm-array that has 64-bit values stored in
 729 * little-endian format.  The index is the cblock, the high 48bits of the
 730 * value are the oblock and the low 16 bit the flags.
 731 */
 732#define FLAGS_MASK ((1 << 16) - 1)
 733
 734static __le64 pack_value(dm_oblock_t block, unsigned int flags)
 735{
 736	uint64_t value = from_oblock(block);
 737
 738	value <<= 16;
 739	value = value | (flags & FLAGS_MASK);
 740	return cpu_to_le64(value);
 741}
 742
 743static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned int *flags)
 744{
 745	uint64_t value = le64_to_cpu(value_le);
 746	uint64_t b = value >> 16;
 747
 748	*block = to_oblock(b);
 749	*flags = value & FLAGS_MASK;
 750}
 751
 752/*----------------------------------------------------------------*/
 753
 754static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
 755					       sector_t data_block_size,
 756					       bool may_format_device,
 757					       size_t policy_hint_size,
 758					       unsigned int metadata_version)
 759{
 760	int r;
 761	struct dm_cache_metadata *cmd;
 762
 763	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 764	if (!cmd) {
 765		DMERR("could not allocate metadata struct");
 766		return ERR_PTR(-ENOMEM);
 767	}
 768
 769	cmd->version = metadata_version;
 770	refcount_set(&cmd->ref_count, 1);
 771	init_rwsem(&cmd->root_lock);
 772	cmd->bdev = bdev;
 773	cmd->data_block_size = data_block_size;
 774	cmd->cache_blocks = 0;
 775	cmd->policy_hint_size = policy_hint_size;
 776	cmd->changed = true;
 777	cmd->fail_io = false;
 778
 779	r = __create_persistent_data_objects(cmd, may_format_device);
 780	if (r) {
 781		kfree(cmd);
 782		return ERR_PTR(r);
 783	}
 784
 785	r = __begin_transaction_flags(cmd, clear_clean_shutdown);
 786	if (r < 0) {
 787		dm_cache_metadata_close(cmd);
 788		return ERR_PTR(r);
 789	}
 790
 791	return cmd;
 792}
 793
 794/*
 795 * We keep a little list of ref counted metadata objects to prevent two
 796 * different target instances creating separate bufio instances.  This is
 797 * an issue if a table is reloaded before the suspend.
 798 */
 799static DEFINE_MUTEX(table_lock);
 800static LIST_HEAD(table);
 801
 802static struct dm_cache_metadata *lookup(struct block_device *bdev)
 803{
 804	struct dm_cache_metadata *cmd;
 805
 806	list_for_each_entry(cmd, &table, list)
 807		if (cmd->bdev == bdev) {
 808			refcount_inc(&cmd->ref_count);
 809			return cmd;
 810		}
 811
 812	return NULL;
 813}
 814
 815static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
 816						sector_t data_block_size,
 817						bool may_format_device,
 818						size_t policy_hint_size,
 819						unsigned int metadata_version)
 820{
 821	struct dm_cache_metadata *cmd, *cmd2;
 822
 823	mutex_lock(&table_lock);
 824	cmd = lookup(bdev);
 825	mutex_unlock(&table_lock);
 826
 827	if (cmd)
 828		return cmd;
 829
 830	cmd = metadata_open(bdev, data_block_size, may_format_device,
 831			    policy_hint_size, metadata_version);
 832	if (!IS_ERR(cmd)) {
 833		mutex_lock(&table_lock);
 834		cmd2 = lookup(bdev);
 835		if (cmd2) {
 836			mutex_unlock(&table_lock);
 837			__destroy_persistent_data_objects(cmd, true);
 838			kfree(cmd);
 839			return cmd2;
 840		}
 841		list_add(&cmd->list, &table);
 842		mutex_unlock(&table_lock);
 843	}
 844
 845	return cmd;
 846}
 847
 848static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
 849{
 850	if (cmd->data_block_size != data_block_size) {
 851		DMERR("data_block_size (%llu) different from that in metadata (%llu)",
 852		      (unsigned long long) data_block_size,
 853		      (unsigned long long) cmd->data_block_size);
 854		return false;
 855	}
 856
 857	return true;
 858}
 859
 860struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
 861						 sector_t data_block_size,
 862						 bool may_format_device,
 863						 size_t policy_hint_size,
 864						 unsigned int metadata_version)
 865{
 866	struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
 867						       policy_hint_size, metadata_version);
 868
 869	if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
 870		dm_cache_metadata_close(cmd);
 871		return ERR_PTR(-EINVAL);
 872	}
 873
 874	return cmd;
 875}
 876
 877void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
 878{
 879	if (refcount_dec_and_test(&cmd->ref_count)) {
 880		mutex_lock(&table_lock);
 881		list_del(&cmd->list);
 882		mutex_unlock(&table_lock);
 883
 884		if (!cmd->fail_io)
 885			__destroy_persistent_data_objects(cmd, true);
 886		kfree(cmd);
 887	}
 888}
 889
 890/*
 891 * Checks that the given cache block is either unmapped or clean.
 892 */
 893static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t b,
 894				      bool *result)
 895{
 896	int r;
 897	__le64 value;
 898	dm_oblock_t ob;
 899	unsigned int flags;
 900
 901	r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
 902	if (r)
 
 903		return r;
 
 904
 905	unpack_value(value, &ob, &flags);
 906	*result = !((flags & M_VALID) && (flags & M_DIRTY));
 907
 908	return 0;
 909}
 910
 911static int blocks_are_clean_combined_dirty(struct dm_cache_metadata *cmd,
 912					   dm_cblock_t begin, dm_cblock_t end,
 913					   bool *result)
 914{
 915	int r;
 916	*result = true;
 917
 918	while (begin != end) {
 919		r = block_clean_combined_dirty(cmd, begin, result);
 920		if (r) {
 921			DMERR("block_clean_combined_dirty failed");
 922			return r;
 923		}
 924
 925		if (!*result) {
 926			DMERR("cache block %llu is dirty",
 927			      (unsigned long long) from_cblock(begin));
 928			return 0;
 929		}
 930
 931		begin = to_cblock(from_cblock(begin) + 1);
 932	}
 933
 934	return 0;
 935}
 936
 937static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
 938					   dm_cblock_t begin, dm_cblock_t end,
 939					   bool *result)
 940{
 941	int r;
 942	bool dirty_flag;
 943	*result = true;
 944
 945	if (from_cblock(cmd->cache_blocks) == 0)
 946		/* Nothing to do */
 947		return 0;
 948
 949	r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
 950				   from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
 951	if (r) {
 952		DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
 953		return r;
 954	}
 955
 956	r = dm_bitset_cursor_skip(&cmd->dirty_cursor, from_cblock(begin));
 957	if (r) {
 958		DMERR("%s: dm_bitset_cursor_skip for dirty failed", __func__);
 959		dm_bitset_cursor_end(&cmd->dirty_cursor);
 960		return r;
 961	}
 962
 963	while (begin != end) {
 964		/*
 965		 * We assume that unmapped blocks have their dirty bit
 966		 * cleared.
 967		 */
 968		dirty_flag = dm_bitset_cursor_get_value(&cmd->dirty_cursor);
 969		if (dirty_flag) {
 970			DMERR("%s: cache block %llu is dirty", __func__,
 971			      (unsigned long long) from_cblock(begin));
 972			dm_bitset_cursor_end(&cmd->dirty_cursor);
 973			*result = false;
 974			return 0;
 975		}
 976
 977		begin = to_cblock(from_cblock(begin) + 1);
 978		if (begin == end)
 979			break;
 980
 981		r = dm_bitset_cursor_next(&cmd->dirty_cursor);
 982		if (r) {
 983			DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
 984			dm_bitset_cursor_end(&cmd->dirty_cursor);
 985			return r;
 986		}
 987	}
 988
 989	dm_bitset_cursor_end(&cmd->dirty_cursor);
 990
 991	return 0;
 992}
 993
 994static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
 995					dm_cblock_t begin, dm_cblock_t end,
 996					bool *result)
 997{
 998	if (separate_dirty_bits(cmd))
 999		return blocks_are_clean_separate_dirty(cmd, begin, end, result);
1000	else
1001		return blocks_are_clean_combined_dirty(cmd, begin, end, result);
1002}
1003
1004static bool cmd_write_lock(struct dm_cache_metadata *cmd)
1005{
1006	down_write(&cmd->root_lock);
1007	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
1008		up_write(&cmd->root_lock);
1009		return false;
1010	}
1011	return true;
1012}
1013
1014#define WRITE_LOCK(cmd)				\
1015	do {					\
1016		if (!cmd_write_lock((cmd)))	\
1017			return -EINVAL;		\
1018	} while (0)
1019
1020#define WRITE_LOCK_VOID(cmd)			\
1021	do {					\
1022		if (!cmd_write_lock((cmd)))	\
1023			return;			\
1024	} while (0)
1025
1026#define WRITE_UNLOCK(cmd) \
1027	up_write(&(cmd)->root_lock)
1028
1029static bool cmd_read_lock(struct dm_cache_metadata *cmd)
1030{
1031	down_read(&cmd->root_lock);
1032	if (cmd->fail_io) {
1033		up_read(&cmd->root_lock);
1034		return false;
1035	}
1036	return true;
1037}
1038
1039#define READ_LOCK(cmd)				\
1040	do {					\
1041		if (!cmd_read_lock((cmd)))	\
1042			return -EINVAL;		\
1043	} while (0)
1044
1045#define READ_LOCK_VOID(cmd)			\
1046	do {					\
1047		if (!cmd_read_lock((cmd)))	\
1048			return;			\
1049	} while (0)
1050
1051#define READ_UNLOCK(cmd) \
1052	up_read(&(cmd)->root_lock)
1053
1054int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
1055{
1056	int r;
1057	bool clean;
1058	__le64 null_mapping = pack_value(0, 0);
1059
1060	WRITE_LOCK(cmd);
1061	__dm_bless_for_disk(&null_mapping);
1062
1063	if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) {
1064		r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean);
1065		if (r) {
1066			__dm_unbless_for_disk(&null_mapping);
1067			goto out;
1068		}
1069
1070		if (!clean) {
1071			DMERR("unable to shrink cache due to dirty blocks");
1072			r = -EINVAL;
1073			__dm_unbless_for_disk(&null_mapping);
1074			goto out;
1075		}
1076	}
1077
1078	r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
1079			    from_cblock(new_cache_size),
1080			    &null_mapping, &cmd->root);
1081	if (r)
1082		goto out;
1083
1084	if (separate_dirty_bits(cmd)) {
1085		r = dm_bitset_resize(&cmd->dirty_info, cmd->dirty_root,
1086				     from_cblock(cmd->cache_blocks), from_cblock(new_cache_size),
1087				     false, &cmd->dirty_root);
1088		if (r)
1089			goto out;
1090	}
1091
1092	cmd->cache_blocks = new_cache_size;
1093	cmd->changed = true;
1094
1095out:
1096	WRITE_UNLOCK(cmd);
1097
1098	return r;
1099}
1100
1101int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
1102				   sector_t discard_block_size,
1103				   dm_dblock_t new_nr_entries)
1104{
1105	int r;
1106
1107	WRITE_LOCK(cmd);
1108	r = dm_bitset_resize(&cmd->discard_info,
1109			     cmd->discard_root,
1110			     from_dblock(cmd->discard_nr_blocks),
1111			     from_dblock(new_nr_entries),
1112			     false, &cmd->discard_root);
1113	if (!r) {
1114		cmd->discard_block_size = discard_block_size;
1115		cmd->discard_nr_blocks = new_nr_entries;
1116	}
1117
1118	cmd->changed = true;
1119	WRITE_UNLOCK(cmd);
1120
1121	return r;
1122}
1123
1124static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
1125{
1126	return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
1127				 from_dblock(b), &cmd->discard_root);
1128}
1129
1130static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
1131{
1132	return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
1133				   from_dblock(b), &cmd->discard_root);
1134}
1135
 
 
 
 
 
 
 
 
1136static int __discard(struct dm_cache_metadata *cmd,
1137		     dm_dblock_t dblock, bool discard)
1138{
1139	int r;
1140
1141	r = (discard ? __set_discard : __clear_discard)(cmd, dblock);
1142	if (r)
1143		return r;
1144
1145	cmd->changed = true;
1146	return 0;
1147}
1148
1149int dm_cache_set_discard(struct dm_cache_metadata *cmd,
1150			 dm_dblock_t dblock, bool discard)
1151{
1152	int r;
1153
1154	WRITE_LOCK(cmd);
1155	r = __discard(cmd, dblock, discard);
1156	WRITE_UNLOCK(cmd);
1157
1158	return r;
1159}
1160
1161static int __load_discards(struct dm_cache_metadata *cmd,
1162			   load_discard_fn fn, void *context)
1163{
1164	int r = 0;
1165	uint32_t b;
1166	struct dm_bitset_cursor c;
1167
1168	if (from_dblock(cmd->discard_nr_blocks) == 0)
1169		/* nothing to do */
1170		return 0;
1171
1172	if (cmd->clean_when_opened) {
1173		r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root, &cmd->discard_root);
1174		if (r)
1175			return r;
1176
1177		r = dm_bitset_cursor_begin(&cmd->discard_info, cmd->discard_root,
1178					   from_dblock(cmd->discard_nr_blocks), &c);
1179		if (r)
1180			return r;
1181
1182		for (b = 0; ; b++) {
1183			r = fn(context, cmd->discard_block_size, to_dblock(b),
1184			       dm_bitset_cursor_get_value(&c));
1185			if (r)
1186				break;
1187
1188			if (b >= (from_dblock(cmd->discard_nr_blocks) - 1))
1189				break;
1190
1191			r = dm_bitset_cursor_next(&c);
1192			if (r)
1193				break;
1194		}
1195
1196		dm_bitset_cursor_end(&c);
1197
1198	} else {
1199		for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
1200			r = fn(context, cmd->discard_block_size, to_dblock(b), false);
1201			if (r)
1202				return r;
1203		}
 
 
 
 
 
1204	}
1205
1206	return r;
1207}
1208
1209int dm_cache_load_discards(struct dm_cache_metadata *cmd,
1210			   load_discard_fn fn, void *context)
1211{
1212	int r;
1213
1214	READ_LOCK(cmd);
1215	r = __load_discards(cmd, fn, context);
1216	READ_UNLOCK(cmd);
1217
1218	return r;
1219}
1220
 
 
 
 
 
 
 
 
 
1221static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1222{
1223	int r;
1224	__le64 value = pack_value(0, 0);
1225
1226	__dm_bless_for_disk(&value);
1227	r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1228			       &value, &cmd->root);
1229	if (r)
1230		return r;
1231
1232	cmd->changed = true;
1233	return 0;
1234}
1235
1236int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1237{
1238	int r;
1239
1240	WRITE_LOCK(cmd);
1241	r = __remove(cmd, cblock);
1242	WRITE_UNLOCK(cmd);
1243
1244	return r;
1245}
1246
1247static int __insert(struct dm_cache_metadata *cmd,
1248		    dm_cblock_t cblock, dm_oblock_t oblock)
1249{
1250	int r;
1251	__le64 value = pack_value(oblock, M_VALID);
1252
1253	__dm_bless_for_disk(&value);
1254
1255	r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1256			       &value, &cmd->root);
1257	if (r)
1258		return r;
1259
1260	cmd->changed = true;
1261	return 0;
1262}
1263
1264int dm_cache_insert_mapping(struct dm_cache_metadata *cmd,
1265			    dm_cblock_t cblock, dm_oblock_t oblock)
1266{
1267	int r;
1268
1269	WRITE_LOCK(cmd);
1270	r = __insert(cmd, cblock, oblock);
1271	WRITE_UNLOCK(cmd);
1272
1273	return r;
1274}
1275
 
 
 
 
 
 
 
 
 
1276static bool policy_unchanged(struct dm_cache_metadata *cmd,
1277			     struct dm_cache_policy *policy)
1278{
1279	const char *policy_name = dm_cache_policy_get_name(policy);
1280	const unsigned int *policy_version = dm_cache_policy_get_version(policy);
1281	size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
1282
1283	/*
1284	 * Ensure policy names match.
1285	 */
1286	if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name)))
1287		return false;
1288
1289	/*
1290	 * Ensure policy major versions match.
1291	 */
1292	if (cmd->policy_version[0] != policy_version[0])
1293		return false;
1294
1295	/*
1296	 * Ensure policy hint sizes match.
1297	 */
1298	if (cmd->policy_hint_size != policy_hint_size)
1299		return false;
1300
1301	return true;
1302}
1303
1304static bool hints_array_initialized(struct dm_cache_metadata *cmd)
1305{
1306	return cmd->hint_root && cmd->policy_hint_size;
1307}
1308
1309static bool hints_array_available(struct dm_cache_metadata *cmd,
1310				  struct dm_cache_policy *policy)
1311{
1312	return cmd->clean_when_opened && policy_unchanged(cmd, policy) &&
1313		hints_array_initialized(cmd);
1314}
1315
1316static int __load_mapping_v1(struct dm_cache_metadata *cmd,
1317			     uint64_t cb, bool hints_valid,
1318			     struct dm_array_cursor *mapping_cursor,
1319			     struct dm_array_cursor *hint_cursor,
1320			     load_mapping_fn fn, void *context)
1321{
1322	int r = 0;
1323
1324	__le64 mapping;
1325	__le32 hint = 0;
1326
1327	__le64 *mapping_value_le;
1328	__le32 *hint_value_le;
1329
1330	dm_oblock_t oblock;
1331	unsigned int flags;
1332	bool dirty = true;
1333
1334	dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1335	memcpy(&mapping, mapping_value_le, sizeof(mapping));
1336	unpack_value(mapping, &oblock, &flags);
1337
1338	if (flags & M_VALID) {
1339		if (hints_valid) {
1340			dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1341			memcpy(&hint, hint_value_le, sizeof(hint));
1342		}
1343		if (cmd->clean_when_opened)
1344			dirty = flags & M_DIRTY;
1345
1346		r = fn(context, oblock, to_cblock(cb), dirty,
1347		       le32_to_cpu(hint), hints_valid);
1348		if (r) {
1349			DMERR("policy couldn't load cache block %llu",
1350			      (unsigned long long) from_cblock(to_cblock(cb)));
1351		}
1352	}
1353
1354	return r;
1355}
1356
1357static int __load_mapping_v2(struct dm_cache_metadata *cmd,
1358			     uint64_t cb, bool hints_valid,
1359			     struct dm_array_cursor *mapping_cursor,
1360			     struct dm_array_cursor *hint_cursor,
1361			     struct dm_bitset_cursor *dirty_cursor,
1362			     load_mapping_fn fn, void *context)
1363{
1364	int r = 0;
1365
1366	__le64 mapping;
1367	__le32 hint = 0;
1368
1369	__le64 *mapping_value_le;
1370	__le32 *hint_value_le;
1371
1372	dm_oblock_t oblock;
1373	unsigned int flags;
1374	bool dirty = true;
1375
1376	dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1377	memcpy(&mapping, mapping_value_le, sizeof(mapping));
1378	unpack_value(mapping, &oblock, &flags);
1379
1380	if (flags & M_VALID) {
1381		if (hints_valid) {
1382			dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1383			memcpy(&hint, hint_value_le, sizeof(hint));
1384		}
1385		if (cmd->clean_when_opened)
1386			dirty = dm_bitset_cursor_get_value(dirty_cursor);
1387
1388		r = fn(context, oblock, to_cblock(cb), dirty,
1389		       le32_to_cpu(hint), hints_valid);
1390		if (r) {
1391			DMERR("policy couldn't load cache block %llu",
1392			      (unsigned long long) from_cblock(to_cblock(cb)));
1393		}
1394	}
1395
1396	return r;
1397}
1398
1399static int __load_mappings(struct dm_cache_metadata *cmd,
1400			   struct dm_cache_policy *policy,
1401			   load_mapping_fn fn, void *context)
1402{
1403	int r;
1404	uint64_t cb;
1405
1406	bool hints_valid = hints_array_available(cmd, policy);
1407
1408	if (from_cblock(cmd->cache_blocks) == 0)
1409		/* Nothing to do */
1410		return 0;
1411
1412	r = dm_array_cursor_begin(&cmd->info, cmd->root, &cmd->mapping_cursor);
1413	if (r)
1414		return r;
1415
1416	if (hints_valid) {
1417		r = dm_array_cursor_begin(&cmd->hint_info, cmd->hint_root, &cmd->hint_cursor);
1418		if (r) {
1419			dm_array_cursor_end(&cmd->mapping_cursor);
1420			return r;
1421		}
1422	}
1423
1424	if (separate_dirty_bits(cmd)) {
1425		r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
1426					   from_cblock(cmd->cache_blocks),
1427					   &cmd->dirty_cursor);
1428		if (r) {
1429			dm_array_cursor_end(&cmd->hint_cursor);
1430			dm_array_cursor_end(&cmd->mapping_cursor);
1431			return r;
1432		}
1433	}
1434
1435	for (cb = 0; ; cb++) {
1436		if (separate_dirty_bits(cmd))
1437			r = __load_mapping_v2(cmd, cb, hints_valid,
1438					      &cmd->mapping_cursor,
1439					      &cmd->hint_cursor,
1440					      &cmd->dirty_cursor,
1441					      fn, context);
1442		else
1443			r = __load_mapping_v1(cmd, cb, hints_valid,
1444					      &cmd->mapping_cursor, &cmd->hint_cursor,
1445					      fn, context);
1446		if (r)
1447			goto out;
1448
1449		/*
1450		 * We need to break out before we move the cursors.
1451		 */
1452		if (cb >= (from_cblock(cmd->cache_blocks) - 1))
1453			break;
1454
1455		r = dm_array_cursor_next(&cmd->mapping_cursor);
1456		if (r) {
1457			DMERR("dm_array_cursor_next for mapping failed");
1458			goto out;
1459		}
1460
1461		if (hints_valid) {
1462			r = dm_array_cursor_next(&cmd->hint_cursor);
1463			if (r) {
1464				dm_array_cursor_end(&cmd->hint_cursor);
1465				hints_valid = false;
1466			}
1467		}
1468
1469		if (separate_dirty_bits(cmd)) {
1470			r = dm_bitset_cursor_next(&cmd->dirty_cursor);
1471			if (r) {
1472				DMERR("dm_bitset_cursor_next for dirty failed");
1473				goto out;
1474			}
1475		}
1476	}
1477out:
1478	dm_array_cursor_end(&cmd->mapping_cursor);
1479	if (hints_valid)
1480		dm_array_cursor_end(&cmd->hint_cursor);
1481
1482	if (separate_dirty_bits(cmd))
1483		dm_bitset_cursor_end(&cmd->dirty_cursor);
1484
1485	return r;
1486}
1487
1488int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
1489			   struct dm_cache_policy *policy,
1490			   load_mapping_fn fn, void *context)
1491{
1492	int r;
1493
1494	READ_LOCK(cmd);
1495	r = __load_mappings(cmd, policy, fn, context);
1496	READ_UNLOCK(cmd);
1497
1498	return r;
1499}
1500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1501int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
1502{
1503	int r;
1504
1505	READ_LOCK(cmd);
1506	r = cmd->changed;
1507	READ_UNLOCK(cmd);
1508
1509	return r;
1510}
1511
1512static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
1513{
1514	int r;
1515	unsigned int flags;
1516	dm_oblock_t oblock;
1517	__le64 value;
1518
1519	r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(cblock), &value);
1520	if (r)
1521		return r;
1522
1523	unpack_value(value, &oblock, &flags);
1524
1525	if (((flags & M_DIRTY) && dirty) || (!(flags & M_DIRTY) && !dirty))
1526		/* nothing to be done */
1527		return 0;
1528
1529	value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0));
1530	__dm_bless_for_disk(&value);
1531
1532	r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1533			       &value, &cmd->root);
1534	if (r)
1535		return r;
1536
1537	cmd->changed = true;
1538	return 0;
1539
1540}
1541
1542static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
1543{
1544	int r;
1545	unsigned int i;
1546
1547	for (i = 0; i < nr_bits; i++) {
1548		r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
1549		if (r)
1550			return r;
1551	}
1552
1553	return 0;
1554}
1555
1556static int is_dirty_callback(uint32_t index, bool *value, void *context)
1557{
1558	unsigned long *bits = context;
1559	*value = test_bit(index, bits);
1560	return 0;
1561}
1562
1563static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
1564{
1565	int r = 0;
1566
1567	/* nr_bits is really just a sanity check */
1568	if (nr_bits != from_cblock(cmd->cache_blocks)) {
1569		DMERR("dirty bitset is wrong size");
1570		return -EINVAL;
1571	}
1572
1573	r = dm_bitset_del(&cmd->dirty_info, cmd->dirty_root);
1574	if (r)
1575		return r;
1576
1577	cmd->changed = true;
1578	return dm_bitset_new(&cmd->dirty_info, &cmd->dirty_root, nr_bits, is_dirty_callback, bits);
1579}
1580
1581int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
1582			    unsigned int nr_bits,
1583			    unsigned long *bits)
1584{
1585	int r;
1586
1587	WRITE_LOCK(cmd);
1588	if (separate_dirty_bits(cmd))
1589		r = __set_dirty_bits_v2(cmd, nr_bits, bits);
1590	else
1591		r = __set_dirty_bits_v1(cmd, nr_bits, bits);
1592	WRITE_UNLOCK(cmd);
1593
1594	return r;
1595}
1596
1597void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
1598				 struct dm_cache_statistics *stats)
1599{
1600	READ_LOCK_VOID(cmd);
1601	*stats = cmd->stats;
1602	READ_UNLOCK(cmd);
1603}
1604
1605void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
1606				 struct dm_cache_statistics *stats)
1607{
1608	WRITE_LOCK_VOID(cmd);
1609	cmd->stats = *stats;
1610	WRITE_UNLOCK(cmd);
1611}
1612
1613int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
1614{
1615	int r = -EINVAL;
1616	flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
1617				 clear_clean_shutdown);
1618
1619	WRITE_LOCK(cmd);
1620	if (cmd->fail_io)
1621		goto out;
1622
1623	r = __commit_transaction(cmd, mutator);
1624	if (r)
1625		goto out;
1626
1627	r = __begin_transaction(cmd);
 
1628out:
1629	WRITE_UNLOCK(cmd);
1630	return r;
1631}
1632
1633int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
1634					   dm_block_t *result)
1635{
1636	int r = -EINVAL;
1637
1638	READ_LOCK(cmd);
1639	if (!cmd->fail_io)
1640		r = dm_sm_get_nr_free(cmd->metadata_sm, result);
1641	READ_UNLOCK(cmd);
1642
1643	return r;
1644}
1645
1646int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
1647				   dm_block_t *result)
1648{
1649	int r = -EINVAL;
1650
1651	READ_LOCK(cmd);
1652	if (!cmd->fail_io)
1653		r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
1654	READ_UNLOCK(cmd);
1655
1656	return r;
1657}
1658
1659/*----------------------------------------------------------------*/
1660
1661static int get_hint(uint32_t index, void *value_le, void *context)
1662{
1663	uint32_t value;
1664	struct dm_cache_policy *policy = context;
1665
1666	value = policy_get_hint(policy, to_cblock(index));
1667	*((__le32 *) value_le) = cpu_to_le32(value);
1668
1669	return 0;
1670}
1671
1672/*
1673 * It's quicker to always delete the hint array, and recreate with
1674 * dm_array_new().
1675 */
1676static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1677{
1678	int r;
1679	size_t hint_size;
1680	const char *policy_name = dm_cache_policy_get_name(policy);
1681	const unsigned int *policy_version = dm_cache_policy_get_version(policy);
1682
1683	if (!policy_name[0] ||
1684	    (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
1685		return -EINVAL;
1686
1687	strscpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
1688	memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
1689
1690	hint_size = dm_cache_policy_get_hint_size(policy);
1691	if (!hint_size)
1692		return 0; /* short-circuit hints initialization */
1693	cmd->policy_hint_size = hint_size;
1694
1695	if (cmd->hint_root) {
1696		r = dm_array_del(&cmd->hint_info, cmd->hint_root);
1697		if (r)
1698			return r;
1699	}
1700
1701	return dm_array_new(&cmd->hint_info, &cmd->hint_root,
1702			    from_cblock(cmd->cache_blocks),
1703			    get_hint, policy);
1704}
1705
1706int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1707{
1708	int r;
1709
1710	WRITE_LOCK(cmd);
1711	r = write_hints(cmd, policy);
1712	WRITE_UNLOCK(cmd);
1713
1714	return r;
1715}
1716
1717int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
1718{
1719	int r;
1720
1721	READ_LOCK(cmd);
1722	r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
1723	READ_UNLOCK(cmd);
1724
1725	return r;
1726}
1727
1728void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
1729{
1730	WRITE_LOCK_VOID(cmd);
1731	dm_bm_set_read_only(cmd->bm);
1732	WRITE_UNLOCK(cmd);
1733}
1734
1735void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd)
1736{
1737	WRITE_LOCK_VOID(cmd);
1738	dm_bm_set_read_write(cmd->bm);
1739	WRITE_UNLOCK(cmd);
1740}
1741
1742int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
1743{
1744	int r;
1745	struct dm_block *sblock;
1746	struct cache_disk_superblock *disk_super;
1747
1748	WRITE_LOCK(cmd);
1749	set_bit(NEEDS_CHECK, &cmd->flags);
1750
1751	r = superblock_lock(cmd, &sblock);
1752	if (r) {
1753		DMERR("couldn't read superblock");
1754		goto out;
1755	}
1756
1757	disk_super = dm_block_data(sblock);
1758	disk_super->flags = cpu_to_le32(cmd->flags);
1759
1760	dm_bm_unlock(sblock);
1761
1762out:
1763	WRITE_UNLOCK(cmd);
1764	return r;
1765}
1766
1767int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
1768{
1769	READ_LOCK(cmd);
1770	*result = !!test_bit(NEEDS_CHECK, &cmd->flags);
1771	READ_UNLOCK(cmd);
1772
1773	return 0;
1774}
1775
1776int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
1777{
1778	int r = -EINVAL;
1779	struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
1780
1781	/* fail_io is double-checked with cmd->root_lock held below */
1782	if (unlikely(cmd->fail_io))
1783		return r;
1784
1785	/*
1786	 * Replacement block manager (new_bm) is created and old_bm destroyed outside of
1787	 * cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
1788	 * shrinker associated with the block manager's bufio client vs cmd root_lock).
1789	 * - must take shrinker_mutex without holding cmd->root_lock
1790	 */
1791	new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
1792					 CACHE_MAX_CONCURRENT_LOCKS);
1793
1794	WRITE_LOCK(cmd);
1795	if (cmd->fail_io) {
1796		WRITE_UNLOCK(cmd);
1797		goto out;
1798	}
1799
1800	__destroy_persistent_data_objects(cmd, false);
1801	old_bm = cmd->bm;
1802	if (IS_ERR(new_bm)) {
1803		DMERR("could not create block manager during abort");
1804		cmd->bm = NULL;
1805		r = PTR_ERR(new_bm);
1806		goto out_unlock;
1807	}
1808
1809	cmd->bm = new_bm;
1810	r = __open_or_format_metadata(cmd, false);
1811	if (r) {
1812		cmd->bm = NULL;
1813		goto out_unlock;
1814	}
1815	new_bm = NULL;
1816out_unlock:
1817	if (r)
1818		cmd->fail_io = true;
1819	WRITE_UNLOCK(cmd);
1820	dm_block_manager_destroy(old_bm);
1821out:
1822	if (new_bm && !IS_ERR(new_bm))
1823		dm_block_manager_destroy(new_bm);
1824
1825	return r;
1826}
v4.10.11
 
   1/*
   2 * Copyright (C) 2012 Red Hat, Inc.
   3 *
   4 * This file is released under the GPL.
   5 */
   6
   7#include "dm-cache-metadata.h"
   8
   9#include "persistent-data/dm-array.h"
  10#include "persistent-data/dm-bitset.h"
  11#include "persistent-data/dm-space-map.h"
  12#include "persistent-data/dm-space-map-disk.h"
  13#include "persistent-data/dm-transaction-manager.h"
  14
  15#include <linux/device-mapper.h>
 
  16
  17/*----------------------------------------------------------------*/
  18
  19#define DM_MSG_PREFIX   "cache metadata"
  20
  21#define CACHE_SUPERBLOCK_MAGIC 06142003
  22#define CACHE_SUPERBLOCK_LOCATION 0
  23
  24/*
  25 * defines a range of metadata versions that this module can handle.
  26 */
  27#define MIN_CACHE_VERSION 1
  28#define MAX_CACHE_VERSION 1
  29
  30#define CACHE_METADATA_CACHE_SIZE 64
  31
  32/*
  33 *  3 for btree insert +
  34 *  2 for btree lookup used within space map
  35 */
  36#define CACHE_MAX_CONCURRENT_LOCKS 5
  37#define SPACE_MAP_ROOT_SIZE 128
  38
  39enum superblock_flag_bits {
  40	/* for spotting crashes that would invalidate the dirty bitset */
  41	CLEAN_SHUTDOWN,
  42	/* metadata must be checked using the tools */
  43	NEEDS_CHECK,
  44};
  45
  46/*
  47 * Each mapping from cache block -> origin block carries a set of flags.
  48 */
  49enum mapping_bits {
  50	/*
  51	 * A valid mapping.  Because we're using an array we clear this
  52	 * flag for an non existant mapping.
  53	 */
  54	M_VALID = 1,
  55
  56	/*
  57	 * The data on the cache is different from that on the origin.
 
  58	 */
  59	M_DIRTY = 2
  60};
  61
  62struct cache_disk_superblock {
  63	__le32 csum;
  64	__le32 flags;
  65	__le64 blocknr;
  66
  67	__u8 uuid[16];
  68	__le64 magic;
  69	__le32 version;
  70
  71	__u8 policy_name[CACHE_POLICY_NAME_SIZE];
  72	__le32 policy_hint_size;
  73
  74	__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
  75	__le64 mapping_root;
  76	__le64 hint_root;
  77
  78	__le64 discard_root;
  79	__le64 discard_block_size;
  80	__le64 discard_nr_blocks;
  81
  82	__le32 data_block_size;
  83	__le32 metadata_block_size;
  84	__le32 cache_blocks;
  85
  86	__le32 compat_flags;
  87	__le32 compat_ro_flags;
  88	__le32 incompat_flags;
  89
  90	__le32 read_hits;
  91	__le32 read_misses;
  92	__le32 write_hits;
  93	__le32 write_misses;
  94
  95	__le32 policy_version[CACHE_POLICY_VERSION_SIZE];
 
 
 
 
 
  96} __packed;
  97
  98struct dm_cache_metadata {
  99	atomic_t ref_count;
 100	struct list_head list;
 101
 
 102	struct block_device *bdev;
 103	struct dm_block_manager *bm;
 104	struct dm_space_map *metadata_sm;
 105	struct dm_transaction_manager *tm;
 106
 107	struct dm_array_info info;
 108	struct dm_array_info hint_info;
 109	struct dm_disk_bitset discard_info;
 110
 111	struct rw_semaphore root_lock;
 112	unsigned long flags;
 113	dm_block_t root;
 114	dm_block_t hint_root;
 115	dm_block_t discard_root;
 116
 117	sector_t discard_block_size;
 118	dm_dblock_t discard_nr_blocks;
 119
 120	sector_t data_block_size;
 121	dm_cblock_t cache_blocks;
 122	bool changed:1;
 123	bool clean_when_opened:1;
 124
 125	char policy_name[CACHE_POLICY_NAME_SIZE];
 126	unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
 127	size_t policy_hint_size;
 128	struct dm_cache_statistics stats;
 129
 130	/*
 131	 * Reading the space map root can fail, so we read it into this
 132	 * buffer before the superblock is locked and updated.
 133	 */
 134	__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
 135
 136	/*
 137	 * Set if a transaction has to be aborted but the attempt to roll
 138	 * back to the previous (good) transaction failed.  The only
 139	 * metadata operation permissible in this state is the closing of
 140	 * the device.
 141	 */
 142	bool fail_io:1;
 143
 144	/*
 
 
 
 
 
 
 145	 * These structures are used when loading metadata.  They're too
 146	 * big to put on the stack.
 147	 */
 148	struct dm_array_cursor mapping_cursor;
 149	struct dm_array_cursor hint_cursor;
 
 150};
 151
 152/*-------------------------------------------------------------------
 
 153 * superblock validator
 154 *-----------------------------------------------------------------*/
 155
 156#define SUPERBLOCK_CSUM_XOR 9031977
 157
 158static void sb_prepare_for_write(struct dm_block_validator *v,
 159				 struct dm_block *b,
 160				 size_t sb_block_size)
 161{
 162	struct cache_disk_superblock *disk_super = dm_block_data(b);
 163
 164	disk_super->blocknr = cpu_to_le64(dm_block_location(b));
 165	disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
 166						      sb_block_size - sizeof(__le32),
 167						      SUPERBLOCK_CSUM_XOR));
 168}
 169
 170static int check_metadata_version(struct cache_disk_superblock *disk_super)
 171{
 172	uint32_t metadata_version = le32_to_cpu(disk_super->version);
 
 173	if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) {
 174		DMERR("Cache metadata version %u found, but only versions between %u and %u supported.",
 175		      metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION);
 176		return -EINVAL;
 177	}
 178
 179	return 0;
 180}
 181
 182static int sb_check(struct dm_block_validator *v,
 183		    struct dm_block *b,
 184		    size_t sb_block_size)
 185{
 186	struct cache_disk_superblock *disk_super = dm_block_data(b);
 187	__le32 csum_le;
 188
 189	if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
 190		DMERR("sb_check failed: blocknr %llu: wanted %llu",
 191		      le64_to_cpu(disk_super->blocknr),
 192		      (unsigned long long)dm_block_location(b));
 193		return -ENOTBLK;
 194	}
 195
 196	if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
 197		DMERR("sb_check failed: magic %llu: wanted %llu",
 198		      le64_to_cpu(disk_super->magic),
 199		      (unsigned long long)CACHE_SUPERBLOCK_MAGIC);
 200		return -EILSEQ;
 201	}
 202
 203	csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
 204					     sb_block_size - sizeof(__le32),
 205					     SUPERBLOCK_CSUM_XOR));
 206	if (csum_le != disk_super->csum) {
 207		DMERR("sb_check failed: csum %u: wanted %u",
 208		      le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
 209		return -EILSEQ;
 210	}
 211
 212	return check_metadata_version(disk_super);
 213}
 214
 215static struct dm_block_validator sb_validator = {
 216	.name = "superblock",
 217	.prepare_for_write = sb_prepare_for_write,
 218	.check = sb_check
 219};
 220
 221/*----------------------------------------------------------------*/
 222
 223static int superblock_read_lock(struct dm_cache_metadata *cmd,
 224				struct dm_block **sblock)
 225{
 226	return dm_bm_read_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 227			       &sb_validator, sblock);
 228}
 229
 230static int superblock_lock_zero(struct dm_cache_metadata *cmd,
 231				struct dm_block **sblock)
 232{
 233	return dm_bm_write_lock_zero(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 234				     &sb_validator, sblock);
 235}
 236
 237static int superblock_lock(struct dm_cache_metadata *cmd,
 238			   struct dm_block **sblock)
 239{
 240	return dm_bm_write_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 241				&sb_validator, sblock);
 242}
 243
 244/*----------------------------------------------------------------*/
 245
 246static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
 247{
 248	int r;
 249	unsigned i;
 250	struct dm_block *b;
 251	__le64 *data_le, zero = cpu_to_le64(0);
 252	unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
 253
 254	/*
 255	 * We can't use a validator here - it may be all zeroes.
 256	 */
 257	r = dm_bm_read_lock(bm, CACHE_SUPERBLOCK_LOCATION, NULL, &b);
 258	if (r)
 259		return r;
 260
 261	data_le = dm_block_data(b);
 262	*result = true;
 263	for (i = 0; i < sb_block_size; i++) {
 264		if (data_le[i] != zero) {
 265			*result = false;
 266			break;
 267		}
 268	}
 269
 270	dm_bm_unlock(b);
 271
 272	return 0;
 273}
 274
 275static void __setup_mapping_info(struct dm_cache_metadata *cmd)
 276{
 277	struct dm_btree_value_type vt;
 278
 279	vt.context = NULL;
 280	vt.size = sizeof(__le64);
 281	vt.inc = NULL;
 282	vt.dec = NULL;
 283	vt.equal = NULL;
 284	dm_array_info_init(&cmd->info, cmd->tm, &vt);
 285
 286	if (cmd->policy_hint_size) {
 287		vt.size = sizeof(__le32);
 288		dm_array_info_init(&cmd->hint_info, cmd->tm, &vt);
 289	}
 290}
 291
 292static int __save_sm_root(struct dm_cache_metadata *cmd)
 293{
 294	int r;
 295	size_t metadata_len;
 296
 297	r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
 298	if (r < 0)
 299		return r;
 300
 301	return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root,
 302			       metadata_len);
 303}
 304
 305static void __copy_sm_root(struct dm_cache_metadata *cmd,
 306			   struct cache_disk_superblock *disk_super)
 307{
 308	memcpy(&disk_super->metadata_space_map_root,
 309	       &cmd->metadata_space_map_root,
 310	       sizeof(cmd->metadata_space_map_root));
 311}
 312
 
 
 
 
 
 313static int __write_initial_superblock(struct dm_cache_metadata *cmd)
 314{
 315	int r;
 316	struct dm_block *sblock;
 317	struct cache_disk_superblock *disk_super;
 318	sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
 319
 320	/* FIXME: see if we can lose the max sectors limit */
 321	if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
 322		bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
 323
 324	r = dm_tm_pre_commit(cmd->tm);
 325	if (r < 0)
 326		return r;
 327
 328	/*
 329	 * dm_sm_copy_root() can fail.  So we need to do it before we start
 330	 * updating the superblock.
 331	 */
 332	r = __save_sm_root(cmd);
 333	if (r)
 334		return r;
 335
 336	r = superblock_lock_zero(cmd, &sblock);
 337	if (r)
 338		return r;
 339
 340	disk_super = dm_block_data(sblock);
 341	disk_super->flags = 0;
 342	memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
 343	disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
 344	disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);
 345	memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
 346	memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
 347	disk_super->policy_hint_size = 0;
 348
 349	__copy_sm_root(cmd, disk_super);
 350
 351	disk_super->mapping_root = cpu_to_le64(cmd->root);
 352	disk_super->hint_root = cpu_to_le64(cmd->hint_root);
 353	disk_super->discard_root = cpu_to_le64(cmd->discard_root);
 354	disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
 355	disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
 356	disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE);
 357	disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
 358	disk_super->cache_blocks = cpu_to_le32(0);
 359
 360	disk_super->read_hits = cpu_to_le32(0);
 361	disk_super->read_misses = cpu_to_le32(0);
 362	disk_super->write_hits = cpu_to_le32(0);
 363	disk_super->write_misses = cpu_to_le32(0);
 364
 
 
 
 365	return dm_tm_commit(cmd->tm, sblock);
 366}
 367
 368static int __format_metadata(struct dm_cache_metadata *cmd)
 369{
 370	int r;
 371
 372	r = dm_tm_create_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 373				 &cmd->tm, &cmd->metadata_sm);
 374	if (r < 0) {
 375		DMERR("tm_create_with_sm failed");
 376		return r;
 377	}
 378
 379	__setup_mapping_info(cmd);
 380
 381	r = dm_array_empty(&cmd->info, &cmd->root);
 382	if (r < 0)
 383		goto bad;
 384
 
 
 
 
 
 
 
 385	dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
 386	r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
 387	if (r < 0)
 388		goto bad;
 389
 390	cmd->discard_block_size = 0;
 391	cmd->discard_nr_blocks = 0;
 392
 393	r = __write_initial_superblock(cmd);
 394	if (r)
 395		goto bad;
 396
 397	cmd->clean_when_opened = true;
 398	return 0;
 399
 400bad:
 401	dm_tm_destroy(cmd->tm);
 402	dm_sm_destroy(cmd->metadata_sm);
 403
 404	return r;
 405}
 406
 407static int __check_incompat_features(struct cache_disk_superblock *disk_super,
 408				     struct dm_cache_metadata *cmd)
 409{
 410	uint32_t features;
 411
 412	features = le32_to_cpu(disk_super->incompat_flags) & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
 
 413	if (features) {
 414		DMERR("could not access metadata due to unsupported optional features (%lx).",
 415		      (unsigned long)features);
 416		return -EINVAL;
 417	}
 418
 419	/*
 420	 * Check for read-only metadata to skip the following RDWR checks.
 421	 */
 422	if (get_disk_ro(cmd->bdev->bd_disk))
 423		return 0;
 424
 425	features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
 426	if (features) {
 427		DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
 428		      (unsigned long)features);
 429		return -EINVAL;
 430	}
 431
 432	return 0;
 433}
 434
 435static int __open_metadata(struct dm_cache_metadata *cmd)
 436{
 437	int r;
 438	struct dm_block *sblock;
 439	struct cache_disk_superblock *disk_super;
 440	unsigned long sb_flags;
 441
 442	r = superblock_read_lock(cmd, &sblock);
 443	if (r < 0) {
 444		DMERR("couldn't read lock superblock");
 445		return r;
 446	}
 447
 448	disk_super = dm_block_data(sblock);
 449
 450	/* Verify the data block size hasn't changed */
 451	if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
 452		DMERR("changing the data block size (from %u to %llu) is not supported",
 453		      le32_to_cpu(disk_super->data_block_size),
 454		      (unsigned long long)cmd->data_block_size);
 455		r = -EINVAL;
 456		goto bad;
 457	}
 458
 459	r = __check_incompat_features(disk_super, cmd);
 460	if (r < 0)
 461		goto bad;
 462
 463	r = dm_tm_open_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
 464			       disk_super->metadata_space_map_root,
 465			       sizeof(disk_super->metadata_space_map_root),
 466			       &cmd->tm, &cmd->metadata_sm);
 467	if (r < 0) {
 468		DMERR("tm_open_with_sm failed");
 469		goto bad;
 470	}
 471
 472	__setup_mapping_info(cmd);
 
 473	dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
 474	sb_flags = le32_to_cpu(disk_super->flags);
 475	cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
 476	dm_bm_unlock(sblock);
 477
 478	return 0;
 479
 480bad:
 481	dm_bm_unlock(sblock);
 482	return r;
 483}
 484
 485static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
 486				     bool format_device)
 487{
 488	int r;
 489	bool unformatted = false;
 490
 491	r = __superblock_all_zeroes(cmd->bm, &unformatted);
 492	if (r)
 493		return r;
 494
 495	if (unformatted)
 496		return format_device ? __format_metadata(cmd) : -EPERM;
 497
 498	return __open_metadata(cmd);
 499}
 500
 501static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
 502					    bool may_format_device)
 503{
 504	int r;
 
 505	cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
 506					  CACHE_METADATA_CACHE_SIZE,
 507					  CACHE_MAX_CONCURRENT_LOCKS);
 508	if (IS_ERR(cmd->bm)) {
 509		DMERR("could not create block manager");
 510		return PTR_ERR(cmd->bm);
 
 
 511	}
 512
 513	r = __open_or_format_metadata(cmd, may_format_device);
 514	if (r)
 515		dm_block_manager_destroy(cmd->bm);
 
 
 516
 517	return r;
 518}
 519
 520static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd)
 
 521{
 522	dm_sm_destroy(cmd->metadata_sm);
 523	dm_tm_destroy(cmd->tm);
 524	dm_block_manager_destroy(cmd->bm);
 
 525}
 526
 527typedef unsigned long (*flags_mutator)(unsigned long);
 528
 529static void update_flags(struct cache_disk_superblock *disk_super,
 530			 flags_mutator mutator)
 531{
 532	uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
 
 533	disk_super->flags = cpu_to_le32(sb_flags);
 534}
 535
 536static unsigned long set_clean_shutdown(unsigned long flags)
 537{
 538	set_bit(CLEAN_SHUTDOWN, &flags);
 539	return flags;
 540}
 541
 542static unsigned long clear_clean_shutdown(unsigned long flags)
 543{
 544	clear_bit(CLEAN_SHUTDOWN, &flags);
 545	return flags;
 546}
 547
 548static void read_superblock_fields(struct dm_cache_metadata *cmd,
 549				   struct cache_disk_superblock *disk_super)
 550{
 
 551	cmd->flags = le32_to_cpu(disk_super->flags);
 552	cmd->root = le64_to_cpu(disk_super->mapping_root);
 553	cmd->hint_root = le64_to_cpu(disk_super->hint_root);
 554	cmd->discard_root = le64_to_cpu(disk_super->discard_root);
 555	cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
 556	cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
 557	cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
 558	cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
 559	strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
 560	cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]);
 561	cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]);
 562	cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]);
 563	cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
 564
 565	cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
 566	cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses);
 567	cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits);
 568	cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses);
 569
 
 
 
 570	cmd->changed = false;
 571}
 572
 573/*
 574 * The mutator updates the superblock flags.
 575 */
 576static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
 577				     flags_mutator mutator)
 578{
 579	int r;
 580	struct cache_disk_superblock *disk_super;
 581	struct dm_block *sblock;
 582
 583	r = superblock_lock(cmd, &sblock);
 584	if (r)
 585		return r;
 586
 587	disk_super = dm_block_data(sblock);
 588	update_flags(disk_super, mutator);
 589	read_superblock_fields(cmd, disk_super);
 590	dm_bm_unlock(sblock);
 591
 592	return dm_bm_flush(cmd->bm);
 593}
 594
 595static int __begin_transaction(struct dm_cache_metadata *cmd)
 596{
 597	int r;
 598	struct cache_disk_superblock *disk_super;
 599	struct dm_block *sblock;
 600
 601	/*
 602	 * We re-read the superblock every time.  Shouldn't need to do this
 603	 * really.
 604	 */
 605	r = superblock_read_lock(cmd, &sblock);
 606	if (r)
 607		return r;
 608
 609	disk_super = dm_block_data(sblock);
 610	read_superblock_fields(cmd, disk_super);
 611	dm_bm_unlock(sblock);
 612
 613	return 0;
 614}
 615
 616static int __commit_transaction(struct dm_cache_metadata *cmd,
 617				flags_mutator mutator)
 618{
 619	int r;
 620	struct cache_disk_superblock *disk_super;
 621	struct dm_block *sblock;
 622
 623	/*
 624	 * We need to know if the cache_disk_superblock exceeds a 512-byte sector.
 625	 */
 626	BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512);
 627
 
 
 
 
 
 
 
 628	r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root,
 629			    &cmd->discard_root);
 630	if (r)
 631		return r;
 632
 633	r = dm_tm_pre_commit(cmd->tm);
 634	if (r < 0)
 635		return r;
 636
 637	r = __save_sm_root(cmd);
 638	if (r)
 639		return r;
 640
 641	r = superblock_lock(cmd, &sblock);
 642	if (r)
 643		return r;
 644
 645	disk_super = dm_block_data(sblock);
 646
 647	disk_super->flags = cpu_to_le32(cmd->flags);
 648	if (mutator)
 649		update_flags(disk_super, mutator);
 650
 651	disk_super->mapping_root = cpu_to_le64(cmd->root);
 
 
 652	disk_super->hint_root = cpu_to_le64(cmd->hint_root);
 653	disk_super->discard_root = cpu_to_le64(cmd->discard_root);
 654	disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
 655	disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
 656	disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
 657	strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
 658	disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
 659	disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
 660	disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
 
 661
 662	disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
 663	disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
 664	disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
 665	disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
 666	__copy_sm_root(cmd, disk_super);
 667
 668	return dm_tm_commit(cmd->tm, sblock);
 669}
 670
 671/*----------------------------------------------------------------*/
 672
 673/*
 674 * The mappings are held in a dm-array that has 64-bit values stored in
 675 * little-endian format.  The index is the cblock, the high 48bits of the
 676 * value are the oblock and the low 16 bit the flags.
 677 */
 678#define FLAGS_MASK ((1 << 16) - 1)
 679
 680static __le64 pack_value(dm_oblock_t block, unsigned flags)
 681{
 682	uint64_t value = from_oblock(block);
 
 683	value <<= 16;
 684	value = value | (flags & FLAGS_MASK);
 685	return cpu_to_le64(value);
 686}
 687
 688static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
 689{
 690	uint64_t value = le64_to_cpu(value_le);
 691	uint64_t b = value >> 16;
 
 692	*block = to_oblock(b);
 693	*flags = value & FLAGS_MASK;
 694}
 695
 696/*----------------------------------------------------------------*/
 697
 698static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
 699					       sector_t data_block_size,
 700					       bool may_format_device,
 701					       size_t policy_hint_size)
 
 702{
 703	int r;
 704	struct dm_cache_metadata *cmd;
 705
 706	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 707	if (!cmd) {
 708		DMERR("could not allocate metadata struct");
 709		return ERR_PTR(-ENOMEM);
 710	}
 711
 712	atomic_set(&cmd->ref_count, 1);
 
 713	init_rwsem(&cmd->root_lock);
 714	cmd->bdev = bdev;
 715	cmd->data_block_size = data_block_size;
 716	cmd->cache_blocks = 0;
 717	cmd->policy_hint_size = policy_hint_size;
 718	cmd->changed = true;
 719	cmd->fail_io = false;
 720
 721	r = __create_persistent_data_objects(cmd, may_format_device);
 722	if (r) {
 723		kfree(cmd);
 724		return ERR_PTR(r);
 725	}
 726
 727	r = __begin_transaction_flags(cmd, clear_clean_shutdown);
 728	if (r < 0) {
 729		dm_cache_metadata_close(cmd);
 730		return ERR_PTR(r);
 731	}
 732
 733	return cmd;
 734}
 735
 736/*
 737 * We keep a little list of ref counted metadata objects to prevent two
 738 * different target instances creating separate bufio instances.  This is
 739 * an issue if a table is reloaded before the suspend.
 740 */
 741static DEFINE_MUTEX(table_lock);
 742static LIST_HEAD(table);
 743
 744static struct dm_cache_metadata *lookup(struct block_device *bdev)
 745{
 746	struct dm_cache_metadata *cmd;
 747
 748	list_for_each_entry(cmd, &table, list)
 749		if (cmd->bdev == bdev) {
 750			atomic_inc(&cmd->ref_count);
 751			return cmd;
 752		}
 753
 754	return NULL;
 755}
 756
 757static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
 758						sector_t data_block_size,
 759						bool may_format_device,
 760						size_t policy_hint_size)
 
 761{
 762	struct dm_cache_metadata *cmd, *cmd2;
 763
 764	mutex_lock(&table_lock);
 765	cmd = lookup(bdev);
 766	mutex_unlock(&table_lock);
 767
 768	if (cmd)
 769		return cmd;
 770
 771	cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
 
 772	if (!IS_ERR(cmd)) {
 773		mutex_lock(&table_lock);
 774		cmd2 = lookup(bdev);
 775		if (cmd2) {
 776			mutex_unlock(&table_lock);
 777			__destroy_persistent_data_objects(cmd);
 778			kfree(cmd);
 779			return cmd2;
 780		}
 781		list_add(&cmd->list, &table);
 782		mutex_unlock(&table_lock);
 783	}
 784
 785	return cmd;
 786}
 787
 788static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
 789{
 790	if (cmd->data_block_size != data_block_size) {
 791		DMERR("data_block_size (%llu) different from that in metadata (%llu)",
 792		      (unsigned long long) data_block_size,
 793		      (unsigned long long) cmd->data_block_size);
 794		return false;
 795	}
 796
 797	return true;
 798}
 799
 800struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
 801						 sector_t data_block_size,
 802						 bool may_format_device,
 803						 size_t policy_hint_size)
 
 804{
 805	struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
 806						       may_format_device, policy_hint_size);
 807
 808	if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
 809		dm_cache_metadata_close(cmd);
 810		return ERR_PTR(-EINVAL);
 811	}
 812
 813	return cmd;
 814}
 815
 816void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
 817{
 818	if (atomic_dec_and_test(&cmd->ref_count)) {
 819		mutex_lock(&table_lock);
 820		list_del(&cmd->list);
 821		mutex_unlock(&table_lock);
 822
 823		if (!cmd->fail_io)
 824			__destroy_persistent_data_objects(cmd);
 825		kfree(cmd);
 826	}
 827}
 828
 829/*
 830 * Checks that the given cache block is either unmapped or clean.
 831 */
 832static int block_unmapped_or_clean(struct dm_cache_metadata *cmd, dm_cblock_t b,
 833				   bool *result)
 834{
 835	int r;
 836	__le64 value;
 837	dm_oblock_t ob;
 838	unsigned flags;
 839
 840	r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
 841	if (r) {
 842		DMERR("block_unmapped_or_clean failed");
 843		return r;
 844	}
 845
 846	unpack_value(value, &ob, &flags);
 847	*result = !((flags & M_VALID) && (flags & M_DIRTY));
 848
 849	return 0;
 850}
 851
 852static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
 853					dm_cblock_t begin, dm_cblock_t end,
 854					bool *result)
 855{
 856	int r;
 857	*result = true;
 858
 859	while (begin != end) {
 860		r = block_unmapped_or_clean(cmd, begin, result);
 861		if (r)
 
 862			return r;
 
 863
 864		if (!*result) {
 865			DMERR("cache block %llu is dirty",
 866			      (unsigned long long) from_cblock(begin));
 867			return 0;
 868		}
 869
 870		begin = to_cblock(from_cblock(begin) + 1);
 871	}
 872
 873	return 0;
 874}
 875
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876static bool cmd_write_lock(struct dm_cache_metadata *cmd)
 877{
 878	down_write(&cmd->root_lock);
 879	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
 880		up_write(&cmd->root_lock);
 881		return false;
 882	}
 883	return true;
 884}
 885
 886#define WRITE_LOCK(cmd)				\
 887	do {					\
 888		if (!cmd_write_lock((cmd)))	\
 889			return -EINVAL;		\
 890	} while(0)
 891
 892#define WRITE_LOCK_VOID(cmd)			\
 893	do {					\
 894		if (!cmd_write_lock((cmd)))	\
 895			return;			\
 896	} while(0)
 897
 898#define WRITE_UNLOCK(cmd) \
 899	up_write(&(cmd)->root_lock)
 900
 901static bool cmd_read_lock(struct dm_cache_metadata *cmd)
 902{
 903	down_read(&cmd->root_lock);
 904	if (cmd->fail_io) {
 905		up_read(&cmd->root_lock);
 906		return false;
 907	}
 908	return true;
 909}
 910
 911#define READ_LOCK(cmd)				\
 912	do {					\
 913		if (!cmd_read_lock((cmd)))	\
 914			return -EINVAL;		\
 915	} while(0)
 916
 917#define READ_LOCK_VOID(cmd)			\
 918	do {					\
 919		if (!cmd_read_lock((cmd)))	\
 920			return;			\
 921	} while(0)
 922
 923#define READ_UNLOCK(cmd) \
 924	up_read(&(cmd)->root_lock)
 925
 926int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
 927{
 928	int r;
 929	bool clean;
 930	__le64 null_mapping = pack_value(0, 0);
 931
 932	WRITE_LOCK(cmd);
 933	__dm_bless_for_disk(&null_mapping);
 934
 935	if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) {
 936		r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean);
 937		if (r) {
 938			__dm_unbless_for_disk(&null_mapping);
 939			goto out;
 940		}
 941
 942		if (!clean) {
 943			DMERR("unable to shrink cache due to dirty blocks");
 944			r = -EINVAL;
 945			__dm_unbless_for_disk(&null_mapping);
 946			goto out;
 947		}
 948	}
 949
 950	r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
 951			    from_cblock(new_cache_size),
 952			    &null_mapping, &cmd->root);
 953	if (!r)
 954		cmd->cache_blocks = new_cache_size;
 
 
 
 
 
 
 
 
 
 
 955	cmd->changed = true;
 956
 957out:
 958	WRITE_UNLOCK(cmd);
 959
 960	return r;
 961}
 962
 963int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
 964				   sector_t discard_block_size,
 965				   dm_dblock_t new_nr_entries)
 966{
 967	int r;
 968
 969	WRITE_LOCK(cmd);
 970	r = dm_bitset_resize(&cmd->discard_info,
 971			     cmd->discard_root,
 972			     from_dblock(cmd->discard_nr_blocks),
 973			     from_dblock(new_nr_entries),
 974			     false, &cmd->discard_root);
 975	if (!r) {
 976		cmd->discard_block_size = discard_block_size;
 977		cmd->discard_nr_blocks = new_nr_entries;
 978	}
 979
 980	cmd->changed = true;
 981	WRITE_UNLOCK(cmd);
 982
 983	return r;
 984}
 985
 986static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
 987{
 988	return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
 989				 from_dblock(b), &cmd->discard_root);
 990}
 991
 992static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
 993{
 994	return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
 995				   from_dblock(b), &cmd->discard_root);
 996}
 997
 998static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b,
 999			  bool *is_discarded)
1000{
1001	return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root,
1002				  from_dblock(b), &cmd->discard_root,
1003				  is_discarded);
1004}
1005
1006static int __discard(struct dm_cache_metadata *cmd,
1007		     dm_dblock_t dblock, bool discard)
1008{
1009	int r;
1010
1011	r = (discard ? __set_discard : __clear_discard)(cmd, dblock);
1012	if (r)
1013		return r;
1014
1015	cmd->changed = true;
1016	return 0;
1017}
1018
1019int dm_cache_set_discard(struct dm_cache_metadata *cmd,
1020			 dm_dblock_t dblock, bool discard)
1021{
1022	int r;
1023
1024	WRITE_LOCK(cmd);
1025	r = __discard(cmd, dblock, discard);
1026	WRITE_UNLOCK(cmd);
1027
1028	return r;
1029}
1030
1031static int __load_discards(struct dm_cache_metadata *cmd,
1032			   load_discard_fn fn, void *context)
1033{
1034	int r = 0;
1035	dm_block_t b;
1036	bool discard;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1037
1038	for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
1039		dm_dblock_t dblock = to_dblock(b);
1040
1041		if (cmd->clean_when_opened) {
1042			r = __is_discarded(cmd, dblock, &discard);
 
 
 
 
 
 
 
 
1043			if (r)
1044				return r;
1045		} else
1046			discard = false;
1047
1048		r = fn(context, cmd->discard_block_size, dblock, discard);
1049		if (r)
1050			break;
1051	}
1052
1053	return r;
1054}
1055
1056int dm_cache_load_discards(struct dm_cache_metadata *cmd,
1057			   load_discard_fn fn, void *context)
1058{
1059	int r;
1060
1061	READ_LOCK(cmd);
1062	r = __load_discards(cmd, fn, context);
1063	READ_UNLOCK(cmd);
1064
1065	return r;
1066}
1067
1068int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
1069{
1070	READ_LOCK(cmd);
1071	*result = cmd->cache_blocks;
1072	READ_UNLOCK(cmd);
1073
1074	return 0;
1075}
1076
1077static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1078{
1079	int r;
1080	__le64 value = pack_value(0, 0);
1081
1082	__dm_bless_for_disk(&value);
1083	r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1084			       &value, &cmd->root);
1085	if (r)
1086		return r;
1087
1088	cmd->changed = true;
1089	return 0;
1090}
1091
1092int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1093{
1094	int r;
1095
1096	WRITE_LOCK(cmd);
1097	r = __remove(cmd, cblock);
1098	WRITE_UNLOCK(cmd);
1099
1100	return r;
1101}
1102
1103static int __insert(struct dm_cache_metadata *cmd,
1104		    dm_cblock_t cblock, dm_oblock_t oblock)
1105{
1106	int r;
1107	__le64 value = pack_value(oblock, M_VALID);
 
1108	__dm_bless_for_disk(&value);
1109
1110	r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1111			       &value, &cmd->root);
1112	if (r)
1113		return r;
1114
1115	cmd->changed = true;
1116	return 0;
1117}
1118
1119int dm_cache_insert_mapping(struct dm_cache_metadata *cmd,
1120			    dm_cblock_t cblock, dm_oblock_t oblock)
1121{
1122	int r;
1123
1124	WRITE_LOCK(cmd);
1125	r = __insert(cmd, cblock, oblock);
1126	WRITE_UNLOCK(cmd);
1127
1128	return r;
1129}
1130
1131struct thunk {
1132	load_mapping_fn fn;
1133	void *context;
1134
1135	struct dm_cache_metadata *cmd;
1136	bool respect_dirty_flags;
1137	bool hints_valid;
1138};
1139
1140static bool policy_unchanged(struct dm_cache_metadata *cmd,
1141			     struct dm_cache_policy *policy)
1142{
1143	const char *policy_name = dm_cache_policy_get_name(policy);
1144	const unsigned *policy_version = dm_cache_policy_get_version(policy);
1145	size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
1146
1147	/*
1148	 * Ensure policy names match.
1149	 */
1150	if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name)))
1151		return false;
1152
1153	/*
1154	 * Ensure policy major versions match.
1155	 */
1156	if (cmd->policy_version[0] != policy_version[0])
1157		return false;
1158
1159	/*
1160	 * Ensure policy hint sizes match.
1161	 */
1162	if (cmd->policy_hint_size != policy_hint_size)
1163		return false;
1164
1165	return true;
1166}
1167
1168static bool hints_array_initialized(struct dm_cache_metadata *cmd)
1169{
1170	return cmd->hint_root && cmd->policy_hint_size;
1171}
1172
1173static bool hints_array_available(struct dm_cache_metadata *cmd,
1174				  struct dm_cache_policy *policy)
1175{
1176	return cmd->clean_when_opened && policy_unchanged(cmd, policy) &&
1177		hints_array_initialized(cmd);
1178}
1179
1180static int __load_mapping(struct dm_cache_metadata *cmd,
1181			  uint64_t cb, bool hints_valid,
1182			  struct dm_array_cursor *mapping_cursor,
1183			  struct dm_array_cursor *hint_cursor,
1184			  load_mapping_fn fn, void *context)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1185{
1186	int r = 0;
1187
1188	__le64 mapping;
1189	__le32 hint = 0;
1190
1191	__le64 *mapping_value_le;
1192	__le32 *hint_value_le;
1193
1194	dm_oblock_t oblock;
1195	unsigned flags;
 
1196
1197	dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1198	memcpy(&mapping, mapping_value_le, sizeof(mapping));
1199	unpack_value(mapping, &oblock, &flags);
1200
1201	if (flags & M_VALID) {
1202		if (hints_valid) {
1203			dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1204			memcpy(&hint, hint_value_le, sizeof(hint));
1205		}
 
 
1206
1207		r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY,
1208		       le32_to_cpu(hint), hints_valid);
1209		if (r)
1210			DMERR("policy couldn't load cblock");
 
 
1211	}
1212
1213	return r;
1214}
1215
1216static int __load_mappings(struct dm_cache_metadata *cmd,
1217			   struct dm_cache_policy *policy,
1218			   load_mapping_fn fn, void *context)
1219{
1220	int r;
1221	uint64_t cb;
1222
1223	bool hints_valid = hints_array_available(cmd, policy);
1224
1225	if (from_cblock(cmd->cache_blocks) == 0)
1226		/* Nothing to do */
1227		return 0;
1228
1229	r = dm_array_cursor_begin(&cmd->info, cmd->root, &cmd->mapping_cursor);
1230	if (r)
1231		return r;
1232
1233	if (hints_valid) {
1234		r = dm_array_cursor_begin(&cmd->hint_info, cmd->hint_root, &cmd->hint_cursor);
1235		if (r) {
1236			dm_array_cursor_end(&cmd->mapping_cursor);
1237			return r;
1238		}
1239	}
1240
 
 
 
 
 
 
 
 
 
 
 
1241	for (cb = 0; ; cb++) {
1242		r = __load_mapping(cmd, cb, hints_valid,
1243				   &cmd->mapping_cursor, &cmd->hint_cursor,
1244				   fn, context);
 
 
 
 
 
 
 
1245		if (r)
1246			goto out;
1247
1248		/*
1249		 * We need to break out before we move the cursors.
1250		 */
1251		if (cb >= (from_cblock(cmd->cache_blocks) - 1))
1252			break;
1253
1254		r = dm_array_cursor_next(&cmd->mapping_cursor);
1255		if (r) {
1256			DMERR("dm_array_cursor_next for mapping failed");
1257			goto out;
1258		}
1259
1260		if (hints_valid) {
1261			r = dm_array_cursor_next(&cmd->hint_cursor);
1262			if (r) {
1263				DMERR("dm_array_cursor_next for hint failed");
 
 
 
 
 
 
 
 
1264				goto out;
1265			}
1266		}
1267	}
1268out:
1269	dm_array_cursor_end(&cmd->mapping_cursor);
1270	if (hints_valid)
1271		dm_array_cursor_end(&cmd->hint_cursor);
1272
 
 
 
1273	return r;
1274}
1275
1276int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
1277			   struct dm_cache_policy *policy,
1278			   load_mapping_fn fn, void *context)
1279{
1280	int r;
1281
1282	READ_LOCK(cmd);
1283	r = __load_mappings(cmd, policy, fn, context);
1284	READ_UNLOCK(cmd);
1285
1286	return r;
1287}
1288
1289static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
1290{
1291	int r = 0;
1292	__le64 value;
1293	dm_oblock_t oblock;
1294	unsigned flags;
1295
1296	memcpy(&value, leaf, sizeof(value));
1297	unpack_value(value, &oblock, &flags);
1298
1299	return r;
1300}
1301
1302static int __dump_mappings(struct dm_cache_metadata *cmd)
1303{
1304	return dm_array_walk(&cmd->info, cmd->root, __dump_mapping, NULL);
1305}
1306
1307void dm_cache_dump(struct dm_cache_metadata *cmd)
1308{
1309	READ_LOCK_VOID(cmd);
1310	__dump_mappings(cmd);
1311	READ_UNLOCK(cmd);
1312}
1313
1314int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
1315{
1316	int r;
1317
1318	READ_LOCK(cmd);
1319	r = cmd->changed;
1320	READ_UNLOCK(cmd);
1321
1322	return r;
1323}
1324
1325static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
1326{
1327	int r;
1328	unsigned flags;
1329	dm_oblock_t oblock;
1330	__le64 value;
1331
1332	r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(cblock), &value);
1333	if (r)
1334		return r;
1335
1336	unpack_value(value, &oblock, &flags);
1337
1338	if (((flags & M_DIRTY) && dirty) || (!(flags & M_DIRTY) && !dirty))
1339		/* nothing to be done */
1340		return 0;
1341
1342	value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0));
1343	__dm_bless_for_disk(&value);
1344
1345	r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1346			       &value, &cmd->root);
1347	if (r)
1348		return r;
1349
1350	cmd->changed = true;
1351	return 0;
1352
1353}
1354
1355int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
1356		       dm_cblock_t cblock, bool dirty)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1357{
1358	int r;
1359
1360	WRITE_LOCK(cmd);
1361	r = __dirty(cmd, cblock, dirty);
 
 
 
1362	WRITE_UNLOCK(cmd);
1363
1364	return r;
1365}
1366
1367void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
1368				 struct dm_cache_statistics *stats)
1369{
1370	READ_LOCK_VOID(cmd);
1371	*stats = cmd->stats;
1372	READ_UNLOCK(cmd);
1373}
1374
1375void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
1376				 struct dm_cache_statistics *stats)
1377{
1378	WRITE_LOCK_VOID(cmd);
1379	cmd->stats = *stats;
1380	WRITE_UNLOCK(cmd);
1381}
1382
1383int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
1384{
1385	int r;
1386	flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
1387				 clear_clean_shutdown);
1388
1389	WRITE_LOCK(cmd);
 
 
 
1390	r = __commit_transaction(cmd, mutator);
1391	if (r)
1392		goto out;
1393
1394	r = __begin_transaction(cmd);
1395
1396out:
1397	WRITE_UNLOCK(cmd);
1398	return r;
1399}
1400
1401int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
1402					   dm_block_t *result)
1403{
1404	int r = -EINVAL;
1405
1406	READ_LOCK(cmd);
1407	r = dm_sm_get_nr_free(cmd->metadata_sm, result);
 
1408	READ_UNLOCK(cmd);
1409
1410	return r;
1411}
1412
1413int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
1414				   dm_block_t *result)
1415{
1416	int r = -EINVAL;
1417
1418	READ_LOCK(cmd);
1419	r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
 
1420	READ_UNLOCK(cmd);
1421
1422	return r;
1423}
1424
1425/*----------------------------------------------------------------*/
1426
1427static int get_hint(uint32_t index, void *value_le, void *context)
1428{
1429	uint32_t value;
1430	struct dm_cache_policy *policy = context;
1431
1432	value = policy_get_hint(policy, to_cblock(index));
1433	*((__le32 *) value_le) = cpu_to_le32(value);
1434
1435	return 0;
1436}
1437
1438/*
1439 * It's quicker to always delete the hint array, and recreate with
1440 * dm_array_new().
1441 */
1442static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1443{
1444	int r;
1445	size_t hint_size;
1446	const char *policy_name = dm_cache_policy_get_name(policy);
1447	const unsigned *policy_version = dm_cache_policy_get_version(policy);
1448
1449	if (!policy_name[0] ||
1450	    (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
1451		return -EINVAL;
1452
1453	strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
1454	memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
1455
1456	hint_size = dm_cache_policy_get_hint_size(policy);
1457	if (!hint_size)
1458		return 0; /* short-circuit hints initialization */
1459	cmd->policy_hint_size = hint_size;
1460
1461	if (cmd->hint_root) {
1462		r = dm_array_del(&cmd->hint_info, cmd->hint_root);
1463		if (r)
1464			return r;
1465	}
1466
1467	return dm_array_new(&cmd->hint_info, &cmd->hint_root,
1468			    from_cblock(cmd->cache_blocks),
1469			    get_hint, policy);
1470}
1471
1472int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1473{
1474	int r;
1475
1476	WRITE_LOCK(cmd);
1477	r = write_hints(cmd, policy);
1478	WRITE_UNLOCK(cmd);
1479
1480	return r;
1481}
1482
1483int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
1484{
1485	int r;
1486
1487	READ_LOCK(cmd);
1488	r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
1489	READ_UNLOCK(cmd);
1490
1491	return r;
1492}
1493
1494void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
1495{
1496	WRITE_LOCK_VOID(cmd);
1497	dm_bm_set_read_only(cmd->bm);
1498	WRITE_UNLOCK(cmd);
1499}
1500
1501void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd)
1502{
1503	WRITE_LOCK_VOID(cmd);
1504	dm_bm_set_read_write(cmd->bm);
1505	WRITE_UNLOCK(cmd);
1506}
1507
1508int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
1509{
1510	int r;
1511	struct dm_block *sblock;
1512	struct cache_disk_superblock *disk_super;
1513
1514	WRITE_LOCK(cmd);
1515	set_bit(NEEDS_CHECK, &cmd->flags);
1516
1517	r = superblock_lock(cmd, &sblock);
1518	if (r) {
1519		DMERR("couldn't read superblock");
1520		goto out;
1521	}
1522
1523	disk_super = dm_block_data(sblock);
1524	disk_super->flags = cpu_to_le32(cmd->flags);
1525
1526	dm_bm_unlock(sblock);
1527
1528out:
1529	WRITE_UNLOCK(cmd);
1530	return r;
1531}
1532
1533int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
1534{
1535	READ_LOCK(cmd);
1536	*result = !!test_bit(NEEDS_CHECK, &cmd->flags);
1537	READ_UNLOCK(cmd);
1538
1539	return 0;
1540}
1541
1542int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
1543{
1544	int r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1545
1546	WRITE_LOCK(cmd);
1547	__destroy_persistent_data_objects(cmd);
1548	r = __create_persistent_data_objects(cmd, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1549	if (r)
1550		cmd->fail_io = true;
1551	WRITE_UNLOCK(cmd);
 
 
 
 
1552
1553	return r;
1554}