Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm-zoned.h"
   9
  10#include <linux/module.h>
  11#include <linux/crc32.h>
  12#include <linux/sched/mm.h>
  13
  14#define	DM_MSG_PREFIX		"zoned metadata"
  15
  16/*
  17 * Metadata version.
  18 */
  19#define DMZ_META_VER	2
  20
  21/*
  22 * On-disk super block magic.
  23 */
  24#define DMZ_MAGIC	((((unsigned int)('D')) << 24) | \
  25			 (((unsigned int)('Z')) << 16) | \
  26			 (((unsigned int)('B')) <<  8) | \
  27			 ((unsigned int)('D')))
  28
  29/*
  30 * On disk super block.
  31 * This uses only 512 B but uses on disk a full 4KB block. This block is
  32 * followed on disk by the mapping table of chunks to zones and the bitmap
  33 * blocks indicating zone block validity.
  34 * The overall resulting metadata format is:
  35 *    (1) Super block (1 block)
  36 *    (2) Chunk mapping table (nr_map_blocks)
  37 *    (3) Bitmap blocks (nr_bitmap_blocks)
  38 * All metadata blocks are stored in conventional zones, starting from
  39 * the first conventional zone found on disk.
  40 */
  41struct dmz_super {
  42	/* Magic number */
  43	__le32		magic;			/*   4 */
  44
  45	/* Metadata version number */
  46	__le32		version;		/*   8 */
  47
  48	/* Generation number */
  49	__le64		gen;			/*  16 */
  50
  51	/* This block number */
  52	__le64		sb_block;		/*  24 */
  53
  54	/* The number of metadata blocks, including this super block */
  55	__le32		nr_meta_blocks;		/*  28 */
  56
  57	/* The number of sequential zones reserved for reclaim */
  58	__le32		nr_reserved_seq;	/*  32 */
  59
  60	/* The number of entries in the mapping table */
  61	__le32		nr_chunks;		/*  36 */
  62
  63	/* The number of blocks used for the chunk mapping table */
  64	__le32		nr_map_blocks;		/*  40 */
  65
  66	/* The number of blocks used for the block bitmaps */
  67	__le32		nr_bitmap_blocks;	/*  44 */
  68
  69	/* Checksum */
  70	__le32		crc;			/*  48 */
  71
  72	/* DM-Zoned label */
  73	u8		dmz_label[32];		/*  80 */
  74
  75	/* DM-Zoned UUID */
  76	u8		dmz_uuid[16];		/*  96 */
  77
  78	/* Device UUID */
  79	u8		dev_uuid[16];		/* 112 */
  80
  81	/* Padding to full 512B sector */
  82	u8		reserved[400];		/* 512 */
  83};
  84
  85/*
  86 * Chunk mapping entry: entries are indexed by chunk number
  87 * and give the zone ID (dzone_id) mapping the chunk on disk.
  88 * This zone may be sequential or random. If it is a sequential
  89 * zone, a second zone (bzone_id) used as a write buffer may
  90 * also be specified. This second zone will always be a randomly
  91 * writeable zone.
  92 */
  93struct dmz_map {
  94	__le32			dzone_id;
  95	__le32			bzone_id;
  96};
  97
  98/*
  99 * Chunk mapping table metadata: 512 8-bytes entries per 4KB block.
 100 */
 101#define DMZ_MAP_ENTRIES		(DMZ_BLOCK_SIZE / sizeof(struct dmz_map))
 102#define DMZ_MAP_ENTRIES_SHIFT	(ilog2(DMZ_MAP_ENTRIES))
 103#define DMZ_MAP_ENTRIES_MASK	(DMZ_MAP_ENTRIES - 1)
 104#define DMZ_MAP_UNMAPPED	UINT_MAX
 105
 106/*
 107 * Meta data block descriptor (for cached metadata blocks).
 108 */
 109struct dmz_mblock {
 110	struct rb_node		node;
 111	struct list_head	link;
 112	sector_t		no;
 113	unsigned int		ref;
 114	unsigned long		state;
 115	struct page		*page;
 116	void			*data;
 117};
 118
 119/*
 120 * Metadata block state flags.
 121 */
 122enum {
 123	DMZ_META_DIRTY,
 124	DMZ_META_READING,
 125	DMZ_META_WRITING,
 126	DMZ_META_ERROR,
 127};
 128
 129/*
 130 * Super block information (one per metadata set).
 131 */
 132struct dmz_sb {
 133	sector_t		block;
 134	struct dmz_dev		*dev;
 135	struct dmz_mblock	*mblk;
 136	struct dmz_super	*sb;
 137	struct dm_zone		*zone;
 138};
 139
 140/*
 141 * In-memory metadata.
 142 */
 143struct dmz_metadata {
 144	struct dmz_dev		*dev;
 145	unsigned int		nr_devs;
 146
 147	char			devname[BDEVNAME_SIZE];
 148	char			label[BDEVNAME_SIZE];
 149	uuid_t			uuid;
 150
 151	sector_t		zone_bitmap_size;
 152	unsigned int		zone_nr_bitmap_blocks;
 153	unsigned int		zone_bits_per_mblk;
 154
 155	sector_t		zone_nr_blocks;
 156	sector_t		zone_nr_blocks_shift;
 157
 158	sector_t		zone_nr_sectors;
 159	sector_t		zone_nr_sectors_shift;
 160
 161	unsigned int		nr_bitmap_blocks;
 162	unsigned int		nr_map_blocks;
 163
 164	unsigned int		nr_zones;
 165	unsigned int		nr_useable_zones;
 166	unsigned int		nr_meta_blocks;
 167	unsigned int		nr_meta_zones;
 168	unsigned int		nr_data_zones;
 169	unsigned int		nr_cache_zones;
 170	unsigned int		nr_rnd_zones;
 171	unsigned int		nr_reserved_seq;
 172	unsigned int		nr_chunks;
 173
 174	/* Zone information array */
 175	struct xarray		zones;
 176
 177	struct dmz_sb		sb[2];
 178	unsigned int		mblk_primary;
 179	unsigned int		sb_version;
 180	u64			sb_gen;
 181	unsigned int		min_nr_mblks;
 182	unsigned int		max_nr_mblks;
 183	atomic_t		nr_mblks;
 184	struct rw_semaphore	mblk_sem;
 185	struct mutex		mblk_flush_lock;
 186	spinlock_t		mblk_lock;
 187	struct rb_root		mblk_rbtree;
 188	struct list_head	mblk_lru_list;
 189	struct list_head	mblk_dirty_list;
 190	struct shrinker		mblk_shrinker;
 191
 192	/* Zone allocation management */
 193	struct mutex		map_lock;
 194	struct dmz_mblock	**map_mblk;
 195
 196	unsigned int		nr_cache;
 197	atomic_t		unmap_nr_cache;
 198	struct list_head	unmap_cache_list;
 199	struct list_head	map_cache_list;
 200
 201	atomic_t		nr_reserved_seq_zones;
 202	struct list_head	reserved_seq_zones_list;
 203
 204	wait_queue_head_t	free_wq;
 205};
 206
 207#define dmz_zmd_info(zmd, format, args...)	\
 208	DMINFO("(%s): " format, (zmd)->label, ## args)
 209
 210#define dmz_zmd_err(zmd, format, args...)	\
 211	DMERR("(%s): " format, (zmd)->label, ## args)
 212
 213#define dmz_zmd_warn(zmd, format, args...)	\
 214	DMWARN("(%s): " format, (zmd)->label, ## args)
 215
 216#define dmz_zmd_debug(zmd, format, args...)	\
 217	DMDEBUG("(%s): " format, (zmd)->label, ## args)
 218/*
 219 * Various accessors
 220 */
 221static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone)
 222{
 223	if (WARN_ON(!zone))
 224		return 0;
 225
 226	return zone->id - zone->dev->zone_offset;
 227}
 228
 229sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
 230{
 231	unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
 232
 233	return (sector_t)zone_id << zmd->zone_nr_sectors_shift;
 234}
 235
 236sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
 237{
 238	unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
 239
 240	return (sector_t)zone_id << zmd->zone_nr_blocks_shift;
 241}
 242
 243unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd)
 244{
 245	return zmd->zone_nr_blocks;
 246}
 247
 248unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd)
 249{
 250	return zmd->zone_nr_blocks_shift;
 251}
 252
 253unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd)
 254{
 255	return zmd->zone_nr_sectors;
 256}
 257
 258unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd)
 259{
 260	return zmd->zone_nr_sectors_shift;
 261}
 262
 263unsigned int dmz_nr_zones(struct dmz_metadata *zmd)
 264{
 265	return zmd->nr_zones;
 266}
 267
 268unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
 269{
 270	return zmd->nr_chunks;
 271}
 272
 273unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd, int idx)
 274{
 275	return zmd->dev[idx].nr_rnd;
 276}
 277
 278unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd, int idx)
 279{
 280	return atomic_read(&zmd->dev[idx].unmap_nr_rnd);
 281}
 282
 283unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd)
 284{
 285	return zmd->nr_cache;
 286}
 287
 288unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd)
 289{
 290	return atomic_read(&zmd->unmap_nr_cache);
 291}
 292
 293unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd, int idx)
 294{
 295	return zmd->dev[idx].nr_seq;
 296}
 297
 298unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd, int idx)
 299{
 300	return atomic_read(&zmd->dev[idx].unmap_nr_seq);
 301}
 302
 303static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
 304{
 305	return xa_load(&zmd->zones, zone_id);
 306}
 307
 308static struct dm_zone *dmz_insert(struct dmz_metadata *zmd,
 309				  unsigned int zone_id, struct dmz_dev *dev)
 310{
 311	struct dm_zone *zone = kzalloc(sizeof(struct dm_zone), GFP_KERNEL);
 312
 313	if (!zone)
 314		return ERR_PTR(-ENOMEM);
 315
 316	if (xa_insert(&zmd->zones, zone_id, zone, GFP_KERNEL)) {
 317		kfree(zone);
 318		return ERR_PTR(-EBUSY);
 319	}
 320
 321	INIT_LIST_HEAD(&zone->link);
 322	atomic_set(&zone->refcount, 0);
 323	zone->id = zone_id;
 324	zone->chunk = DMZ_MAP_UNMAPPED;
 325	zone->dev = dev;
 326
 327	return zone;
 328}
 329
 330const char *dmz_metadata_label(struct dmz_metadata *zmd)
 331{
 332	return (const char *)zmd->label;
 333}
 334
 335bool dmz_check_dev(struct dmz_metadata *zmd)
 336{
 337	unsigned int i;
 338
 339	for (i = 0; i < zmd->nr_devs; i++) {
 340		if (!dmz_check_bdev(&zmd->dev[i]))
 341			return false;
 342	}
 343	return true;
 344}
 345
 346bool dmz_dev_is_dying(struct dmz_metadata *zmd)
 347{
 348	unsigned int i;
 349
 350	for (i = 0; i < zmd->nr_devs; i++) {
 351		if (dmz_bdev_is_dying(&zmd->dev[i]))
 352			return true;
 353	}
 354	return false;
 355}
 356
 357/*
 358 * Lock/unlock mapping table.
 359 * The map lock also protects all the zone lists.
 360 */
 361void dmz_lock_map(struct dmz_metadata *zmd)
 362{
 363	mutex_lock(&zmd->map_lock);
 364}
 365
 366void dmz_unlock_map(struct dmz_metadata *zmd)
 367{
 368	mutex_unlock(&zmd->map_lock);
 369}
 370
 371/*
 372 * Lock/unlock metadata access. This is a "read" lock on a semaphore
 373 * that prevents metadata flush from running while metadata are being
 374 * modified. The actual metadata write mutual exclusion is achieved with
 375 * the map lock and zone state management (active and reclaim state are
 376 * mutually exclusive).
 377 */
 378void dmz_lock_metadata(struct dmz_metadata *zmd)
 379{
 380	down_read(&zmd->mblk_sem);
 381}
 382
 383void dmz_unlock_metadata(struct dmz_metadata *zmd)
 384{
 385	up_read(&zmd->mblk_sem);
 386}
 387
 388/*
 389 * Lock/unlock flush: prevent concurrent executions
 390 * of dmz_flush_metadata as well as metadata modification in reclaim
 391 * while flush is being executed.
 392 */
 393void dmz_lock_flush(struct dmz_metadata *zmd)
 394{
 395	mutex_lock(&zmd->mblk_flush_lock);
 396}
 397
 398void dmz_unlock_flush(struct dmz_metadata *zmd)
 399{
 400	mutex_unlock(&zmd->mblk_flush_lock);
 401}
 402
 403/*
 404 * Allocate a metadata block.
 405 */
 406static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
 407					   sector_t mblk_no)
 408{
 409	struct dmz_mblock *mblk = NULL;
 410
 411	/* See if we can reuse cached blocks */
 412	if (zmd->max_nr_mblks && atomic_read(&zmd->nr_mblks) > zmd->max_nr_mblks) {
 413		spin_lock(&zmd->mblk_lock);
 414		mblk = list_first_entry_or_null(&zmd->mblk_lru_list,
 415						struct dmz_mblock, link);
 416		if (mblk) {
 417			list_del_init(&mblk->link);
 418			rb_erase(&mblk->node, &zmd->mblk_rbtree);
 419			mblk->no = mblk_no;
 420		}
 421		spin_unlock(&zmd->mblk_lock);
 422		if (mblk)
 423			return mblk;
 424	}
 425
 426	/* Allocate a new block */
 427	mblk = kmalloc(sizeof(struct dmz_mblock), GFP_NOIO);
 428	if (!mblk)
 429		return NULL;
 430
 431	mblk->page = alloc_page(GFP_NOIO);
 432	if (!mblk->page) {
 433		kfree(mblk);
 434		return NULL;
 435	}
 436
 437	RB_CLEAR_NODE(&mblk->node);
 438	INIT_LIST_HEAD(&mblk->link);
 439	mblk->ref = 0;
 440	mblk->state = 0;
 441	mblk->no = mblk_no;
 442	mblk->data = page_address(mblk->page);
 443
 444	atomic_inc(&zmd->nr_mblks);
 445
 446	return mblk;
 447}
 448
 449/*
 450 * Free a metadata block.
 451 */
 452static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
 453{
 454	__free_pages(mblk->page, 0);
 455	kfree(mblk);
 456
 457	atomic_dec(&zmd->nr_mblks);
 458}
 459
 460/*
 461 * Insert a metadata block in the rbtree.
 462 */
 463static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
 464{
 465	struct rb_root *root = &zmd->mblk_rbtree;
 466	struct rb_node **new = &(root->rb_node), *parent = NULL;
 467	struct dmz_mblock *b;
 468
 469	/* Figure out where to put the new node */
 470	while (*new) {
 471		b = container_of(*new, struct dmz_mblock, node);
 472		parent = *new;
 473		new = (b->no < mblk->no) ? &((*new)->rb_left) : &((*new)->rb_right);
 474	}
 475
 476	/* Add new node and rebalance tree */
 477	rb_link_node(&mblk->node, parent, new);
 478	rb_insert_color(&mblk->node, root);
 479}
 480
 481/*
 482 * Lookup a metadata block in the rbtree. If the block is found, increment
 483 * its reference count.
 484 */
 485static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
 486					      sector_t mblk_no)
 487{
 488	struct rb_root *root = &zmd->mblk_rbtree;
 489	struct rb_node *node = root->rb_node;
 490	struct dmz_mblock *mblk;
 491
 492	while (node) {
 493		mblk = container_of(node, struct dmz_mblock, node);
 494		if (mblk->no == mblk_no) {
 495			/*
 496			 * If this is the first reference to the block,
 497			 * remove it from the LRU list.
 498			 */
 499			mblk->ref++;
 500			if (mblk->ref == 1 &&
 501			    !test_bit(DMZ_META_DIRTY, &mblk->state))
 502				list_del_init(&mblk->link);
 503			return mblk;
 504		}
 505		node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
 506	}
 507
 508	return NULL;
 509}
 510
 511/*
 512 * Metadata block BIO end callback.
 513 */
 514static void dmz_mblock_bio_end_io(struct bio *bio)
 515{
 516	struct dmz_mblock *mblk = bio->bi_private;
 517	int flag;
 518
 519	if (bio->bi_status)
 520		set_bit(DMZ_META_ERROR, &mblk->state);
 521
 522	if (bio_op(bio) == REQ_OP_WRITE)
 523		flag = DMZ_META_WRITING;
 524	else
 525		flag = DMZ_META_READING;
 526
 527	clear_bit_unlock(flag, &mblk->state);
 528	smp_mb__after_atomic();
 529	wake_up_bit(&mblk->state, flag);
 530
 531	bio_put(bio);
 532}
 533
 534/*
 535 * Read an uncached metadata block from disk and add it to the cache.
 536 */
 537static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
 538					      sector_t mblk_no)
 539{
 540	struct dmz_mblock *mblk, *m;
 541	sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
 542	struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
 543	struct bio *bio;
 544
 545	if (dmz_bdev_is_dying(dev))
 546		return ERR_PTR(-EIO);
 547
 548	/* Get a new block and a BIO to read it */
 549	mblk = dmz_alloc_mblock(zmd, mblk_no);
 550	if (!mblk)
 551		return ERR_PTR(-ENOMEM);
 552
 553	bio = bio_alloc(dev->bdev, 1, REQ_OP_READ | REQ_META | REQ_PRIO,
 554			GFP_NOIO);
 555
 556	spin_lock(&zmd->mblk_lock);
 557
 558	/*
 559	 * Make sure that another context did not start reading
 560	 * the block already.
 561	 */
 562	m = dmz_get_mblock_fast(zmd, mblk_no);
 563	if (m) {
 564		spin_unlock(&zmd->mblk_lock);
 565		dmz_free_mblock(zmd, mblk);
 566		bio_put(bio);
 567		return m;
 568	}
 569
 570	mblk->ref++;
 571	set_bit(DMZ_META_READING, &mblk->state);
 572	dmz_insert_mblock(zmd, mblk);
 573
 574	spin_unlock(&zmd->mblk_lock);
 575
 576	/* Submit read BIO */
 577	bio->bi_iter.bi_sector = dmz_blk2sect(block);
 578	bio->bi_private = mblk;
 579	bio->bi_end_io = dmz_mblock_bio_end_io;
 580	bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
 581	submit_bio(bio);
 582
 583	return mblk;
 584}
 585
 586/*
 587 * Free metadata blocks.
 588 */
 589static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata *zmd,
 590					     unsigned long limit)
 591{
 592	struct dmz_mblock *mblk;
 593	unsigned long count = 0;
 594
 595	if (!zmd->max_nr_mblks)
 596		return 0;
 597
 598	while (!list_empty(&zmd->mblk_lru_list) &&
 599	       atomic_read(&zmd->nr_mblks) > zmd->min_nr_mblks &&
 600	       count < limit) {
 601		mblk = list_first_entry(&zmd->mblk_lru_list,
 602					struct dmz_mblock, link);
 603		list_del_init(&mblk->link);
 604		rb_erase(&mblk->node, &zmd->mblk_rbtree);
 605		dmz_free_mblock(zmd, mblk);
 606		count++;
 607	}
 608
 609	return count;
 610}
 611
 612/*
 613 * For mblock shrinker: get the number of unused metadata blocks in the cache.
 614 */
 615static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink,
 616					       struct shrink_control *sc)
 617{
 618	struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
 619
 620	return atomic_read(&zmd->nr_mblks);
 621}
 622
 623/*
 624 * For mblock shrinker: scan unused metadata blocks and shrink the cache.
 625 */
 626static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink,
 627					      struct shrink_control *sc)
 628{
 629	struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
 630	unsigned long count;
 631
 632	spin_lock(&zmd->mblk_lock);
 633	count = dmz_shrink_mblock_cache(zmd, sc->nr_to_scan);
 634	spin_unlock(&zmd->mblk_lock);
 635
 636	return count ? count : SHRINK_STOP;
 637}
 638
 639/*
 640 * Release a metadata block.
 641 */
 642static void dmz_release_mblock(struct dmz_metadata *zmd,
 643			       struct dmz_mblock *mblk)
 644{
 645
 646	if (!mblk)
 647		return;
 648
 649	spin_lock(&zmd->mblk_lock);
 650
 651	mblk->ref--;
 652	if (mblk->ref == 0) {
 653		if (test_bit(DMZ_META_ERROR, &mblk->state)) {
 654			rb_erase(&mblk->node, &zmd->mblk_rbtree);
 655			dmz_free_mblock(zmd, mblk);
 656		} else if (!test_bit(DMZ_META_DIRTY, &mblk->state)) {
 657			list_add_tail(&mblk->link, &zmd->mblk_lru_list);
 658			dmz_shrink_mblock_cache(zmd, 1);
 659		}
 660	}
 661
 662	spin_unlock(&zmd->mblk_lock);
 663}
 664
 665/*
 666 * Get a metadata block from the rbtree. If the block
 667 * is not present, read it from disk.
 668 */
 669static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
 670					 sector_t mblk_no)
 671{
 672	struct dmz_mblock *mblk;
 673	struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
 674
 675	/* Check rbtree */
 676	spin_lock(&zmd->mblk_lock);
 677	mblk = dmz_get_mblock_fast(zmd, mblk_no);
 678	spin_unlock(&zmd->mblk_lock);
 679
 680	if (!mblk) {
 681		/* Cache miss: read the block from disk */
 682		mblk = dmz_get_mblock_slow(zmd, mblk_no);
 683		if (IS_ERR(mblk))
 684			return mblk;
 685	}
 686
 687	/* Wait for on-going read I/O and check for error */
 688	wait_on_bit_io(&mblk->state, DMZ_META_READING,
 689		       TASK_UNINTERRUPTIBLE);
 690	if (test_bit(DMZ_META_ERROR, &mblk->state)) {
 691		dmz_release_mblock(zmd, mblk);
 692		dmz_check_bdev(dev);
 693		return ERR_PTR(-EIO);
 694	}
 695
 696	return mblk;
 697}
 698
 699/*
 700 * Mark a metadata block dirty.
 701 */
 702static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
 703{
 704	spin_lock(&zmd->mblk_lock);
 705	if (!test_and_set_bit(DMZ_META_DIRTY, &mblk->state))
 706		list_add_tail(&mblk->link, &zmd->mblk_dirty_list);
 707	spin_unlock(&zmd->mblk_lock);
 708}
 709
 710/*
 711 * Issue a metadata block write BIO.
 712 */
 713static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
 714			    unsigned int set)
 715{
 716	struct dmz_dev *dev = zmd->sb[set].dev;
 717	sector_t block = zmd->sb[set].block + mblk->no;
 718	struct bio *bio;
 719
 720	if (dmz_bdev_is_dying(dev))
 721		return -EIO;
 722
 723	bio = bio_alloc(dev->bdev, 1, REQ_OP_WRITE | REQ_META | REQ_PRIO,
 724			GFP_NOIO);
 725
 726	set_bit(DMZ_META_WRITING, &mblk->state);
 727
 728	bio->bi_iter.bi_sector = dmz_blk2sect(block);
 729	bio->bi_private = mblk;
 730	bio->bi_end_io = dmz_mblock_bio_end_io;
 731	bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
 732	submit_bio(bio);
 733
 734	return 0;
 735}
 736
 737/*
 738 * Read/write a metadata block.
 739 */
 740static int dmz_rdwr_block(struct dmz_dev *dev, enum req_op op,
 741			  sector_t block, struct page *page)
 742{
 743	struct bio *bio;
 744	int ret;
 745
 746	if (WARN_ON(!dev))
 747		return -EIO;
 748
 749	if (dmz_bdev_is_dying(dev))
 750		return -EIO;
 751
 752	bio = bio_alloc(dev->bdev, 1, op | REQ_SYNC | REQ_META | REQ_PRIO,
 753			GFP_NOIO);
 754	bio->bi_iter.bi_sector = dmz_blk2sect(block);
 755	bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
 756	ret = submit_bio_wait(bio);
 757	bio_put(bio);
 758
 759	if (ret)
 760		dmz_check_bdev(dev);
 761	return ret;
 762}
 763
 764/*
 765 * Write super block of the specified metadata set.
 766 */
 767static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
 768{
 769	struct dmz_mblock *mblk = zmd->sb[set].mblk;
 770	struct dmz_super *sb = zmd->sb[set].sb;
 771	struct dmz_dev *dev = zmd->sb[set].dev;
 772	sector_t sb_block;
 773	u64 sb_gen = zmd->sb_gen + 1;
 774	int ret;
 775
 776	sb->magic = cpu_to_le32(DMZ_MAGIC);
 777
 778	sb->version = cpu_to_le32(zmd->sb_version);
 779	if (zmd->sb_version > 1) {
 780		BUILD_BUG_ON(UUID_SIZE != 16);
 781		export_uuid(sb->dmz_uuid, &zmd->uuid);
 782		memcpy(sb->dmz_label, zmd->label, BDEVNAME_SIZE);
 783		export_uuid(sb->dev_uuid, &dev->uuid);
 784	}
 785
 786	sb->gen = cpu_to_le64(sb_gen);
 787
 788	/*
 789	 * The metadata always references the absolute block address,
 790	 * ie relative to the entire block range, not the per-device
 791	 * block address.
 792	 */
 793	sb_block = zmd->sb[set].zone->id << zmd->zone_nr_blocks_shift;
 794	sb->sb_block = cpu_to_le64(sb_block);
 795	sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks);
 796	sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq);
 797	sb->nr_chunks = cpu_to_le32(zmd->nr_chunks);
 798
 799	sb->nr_map_blocks = cpu_to_le32(zmd->nr_map_blocks);
 800	sb->nr_bitmap_blocks = cpu_to_le32(zmd->nr_bitmap_blocks);
 801
 802	sb->crc = 0;
 803	sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE));
 804
 805	ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block,
 806			     mblk->page);
 807	if (ret == 0)
 808		ret = blkdev_issue_flush(dev->bdev);
 809
 810	return ret;
 811}
 812
 813/*
 814 * Write dirty metadata blocks to the specified set.
 815 */
 816static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
 817				   struct list_head *write_list,
 818				   unsigned int set)
 819{
 820	struct dmz_mblock *mblk;
 821	struct dmz_dev *dev = zmd->sb[set].dev;
 822	struct blk_plug plug;
 823	int ret = 0, nr_mblks_submitted = 0;
 824
 825	/* Issue writes */
 826	blk_start_plug(&plug);
 827	list_for_each_entry(mblk, write_list, link) {
 828		ret = dmz_write_mblock(zmd, mblk, set);
 829		if (ret)
 830			break;
 831		nr_mblks_submitted++;
 832	}
 833	blk_finish_plug(&plug);
 834
 835	/* Wait for completion */
 836	list_for_each_entry(mblk, write_list, link) {
 837		if (!nr_mblks_submitted)
 838			break;
 839		wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
 840			       TASK_UNINTERRUPTIBLE);
 841		if (test_bit(DMZ_META_ERROR, &mblk->state)) {
 842			clear_bit(DMZ_META_ERROR, &mblk->state);
 843			dmz_check_bdev(dev);
 844			ret = -EIO;
 845		}
 846		nr_mblks_submitted--;
 847	}
 848
 849	/* Flush drive cache (this will also sync data) */
 850	if (ret == 0)
 851		ret = blkdev_issue_flush(dev->bdev);
 852
 853	return ret;
 854}
 855
 856/*
 857 * Log dirty metadata blocks.
 858 */
 859static int dmz_log_dirty_mblocks(struct dmz_metadata *zmd,
 860				 struct list_head *write_list)
 861{
 862	unsigned int log_set = zmd->mblk_primary ^ 0x1;
 863	int ret;
 864
 865	/* Write dirty blocks to the log */
 866	ret = dmz_write_dirty_mblocks(zmd, write_list, log_set);
 867	if (ret)
 868		return ret;
 869
 870	/*
 871	 * No error so far: now validate the log by updating the
 872	 * log index super block generation.
 873	 */
 874	ret = dmz_write_sb(zmd, log_set);
 875	if (ret)
 876		return ret;
 877
 878	return 0;
 879}
 880
 881/*
 882 * Flush dirty metadata blocks.
 883 */
 884int dmz_flush_metadata(struct dmz_metadata *zmd)
 885{
 886	struct dmz_mblock *mblk;
 887	struct list_head write_list;
 888	struct dmz_dev *dev;
 889	int ret;
 890
 891	if (WARN_ON(!zmd))
 892		return 0;
 893
 894	INIT_LIST_HEAD(&write_list);
 895
 896	/*
 897	 * Make sure that metadata blocks are stable before logging: take
 898	 * the write lock on the metadata semaphore to prevent target BIOs
 899	 * from modifying metadata.
 900	 */
 901	down_write(&zmd->mblk_sem);
 902	dev = zmd->sb[zmd->mblk_primary].dev;
 903
 904	/*
 905	 * This is called from the target flush work and reclaim work.
 906	 * Concurrent execution is not allowed.
 907	 */
 908	dmz_lock_flush(zmd);
 909
 910	if (dmz_bdev_is_dying(dev)) {
 911		ret = -EIO;
 912		goto out;
 913	}
 914
 915	/* Get dirty blocks */
 916	spin_lock(&zmd->mblk_lock);
 917	list_splice_init(&zmd->mblk_dirty_list, &write_list);
 918	spin_unlock(&zmd->mblk_lock);
 919
 920	/* If there are no dirty metadata blocks, just flush the device cache */
 921	if (list_empty(&write_list)) {
 922		ret = blkdev_issue_flush(dev->bdev);
 923		goto err;
 924	}
 925
 926	/*
 927	 * The primary metadata set is still clean. Keep it this way until
 928	 * all updates are successful in the secondary set. That is, use
 929	 * the secondary set as a log.
 930	 */
 931	ret = dmz_log_dirty_mblocks(zmd, &write_list);
 932	if (ret)
 933		goto err;
 934
 935	/*
 936	 * The log is on disk. It is now safe to update in place
 937	 * in the primary metadata set.
 938	 */
 939	ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary);
 940	if (ret)
 941		goto err;
 942
 943	ret = dmz_write_sb(zmd, zmd->mblk_primary);
 944	if (ret)
 945		goto err;
 946
 947	while (!list_empty(&write_list)) {
 948		mblk = list_first_entry(&write_list, struct dmz_mblock, link);
 949		list_del_init(&mblk->link);
 950
 951		spin_lock(&zmd->mblk_lock);
 952		clear_bit(DMZ_META_DIRTY, &mblk->state);
 953		if (mblk->ref == 0)
 954			list_add_tail(&mblk->link, &zmd->mblk_lru_list);
 955		spin_unlock(&zmd->mblk_lock);
 956	}
 957
 958	zmd->sb_gen++;
 959out:
 960	dmz_unlock_flush(zmd);
 961	up_write(&zmd->mblk_sem);
 962
 963	return ret;
 964
 965err:
 966	if (!list_empty(&write_list)) {
 967		spin_lock(&zmd->mblk_lock);
 968		list_splice(&write_list, &zmd->mblk_dirty_list);
 969		spin_unlock(&zmd->mblk_lock);
 970	}
 971	if (!dmz_check_bdev(dev))
 972		ret = -EIO;
 973	goto out;
 974}
 975
 976/*
 977 * Check super block.
 978 */
 979static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
 980			bool tertiary)
 981{
 982	struct dmz_super *sb = dsb->sb;
 983	struct dmz_dev *dev = dsb->dev;
 984	unsigned int nr_meta_zones, nr_data_zones;
 985	u32 crc, stored_crc;
 986	u64 gen, sb_block;
 987
 988	if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
 989		dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
 990			    DMZ_MAGIC, le32_to_cpu(sb->magic));
 991		return -ENXIO;
 992	}
 993
 994	zmd->sb_version = le32_to_cpu(sb->version);
 995	if (zmd->sb_version > DMZ_META_VER) {
 996		dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
 997			    DMZ_META_VER, zmd->sb_version);
 998		return -EINVAL;
 999	}
1000	if (zmd->sb_version < 2 && tertiary) {
1001		dmz_dev_err(dev, "Tertiary superblocks are not supported");
1002		return -EINVAL;
1003	}
1004
1005	gen = le64_to_cpu(sb->gen);
1006	stored_crc = le32_to_cpu(sb->crc);
1007	sb->crc = 0;
1008	crc = crc32_le(gen, (unsigned char *)sb, DMZ_BLOCK_SIZE);
1009	if (crc != stored_crc) {
1010		dmz_dev_err(dev, "Invalid checksum (needed 0x%08x, got 0x%08x)",
1011			    crc, stored_crc);
1012		return -ENXIO;
1013	}
1014
1015	sb_block = le64_to_cpu(sb->sb_block);
1016	if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift ) {
1017		dmz_dev_err(dev, "Invalid superblock position "
1018			    "(is %llu expected %llu)",
1019			    sb_block,
1020			    (u64)dsb->zone->id << zmd->zone_nr_blocks_shift);
1021		return -EINVAL;
1022	}
1023	if (zmd->sb_version > 1) {
1024		uuid_t sb_uuid;
1025
1026		import_uuid(&sb_uuid, sb->dmz_uuid);
1027		if (uuid_is_null(&sb_uuid)) {
1028			dmz_dev_err(dev, "NULL DM-Zoned uuid");
1029			return -ENXIO;
1030		} else if (uuid_is_null(&zmd->uuid)) {
1031			uuid_copy(&zmd->uuid, &sb_uuid);
1032		} else if (!uuid_equal(&zmd->uuid, &sb_uuid)) {
1033			dmz_dev_err(dev, "mismatching DM-Zoned uuid, "
1034				    "is %pUl expected %pUl",
1035				    &sb_uuid, &zmd->uuid);
1036			return -ENXIO;
1037		}
1038		if (!strlen(zmd->label))
1039			memcpy(zmd->label, sb->dmz_label, BDEVNAME_SIZE);
1040		else if (memcmp(zmd->label, sb->dmz_label, BDEVNAME_SIZE)) {
1041			dmz_dev_err(dev, "mismatching DM-Zoned label, "
1042				    "is %s expected %s",
1043				    sb->dmz_label, zmd->label);
1044			return -ENXIO;
1045		}
1046		import_uuid(&dev->uuid, sb->dev_uuid);
1047		if (uuid_is_null(&dev->uuid)) {
1048			dmz_dev_err(dev, "NULL device uuid");
1049			return -ENXIO;
1050		}
1051
1052		if (tertiary) {
1053			/*
1054			 * Generation number should be 0, but it doesn't
1055			 * really matter if it isn't.
1056			 */
1057			if (gen != 0)
1058				dmz_dev_warn(dev, "Invalid generation %llu",
1059					    gen);
1060			return 0;
1061		}
1062	}
1063
1064	nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
1065		>> zmd->zone_nr_blocks_shift;
1066	if (!nr_meta_zones ||
1067	    (zmd->nr_devs <= 1 && nr_meta_zones >= zmd->nr_rnd_zones) ||
1068	    (zmd->nr_devs > 1 && nr_meta_zones >= zmd->nr_cache_zones)) {
1069		dmz_dev_err(dev, "Invalid number of metadata blocks");
1070		return -ENXIO;
1071	}
1072
1073	if (!le32_to_cpu(sb->nr_reserved_seq) ||
1074	    le32_to_cpu(sb->nr_reserved_seq) >= (zmd->nr_useable_zones - nr_meta_zones)) {
1075		dmz_dev_err(dev, "Invalid number of reserved sequential zones");
1076		return -ENXIO;
1077	}
1078
1079	nr_data_zones = zmd->nr_useable_zones -
1080		(nr_meta_zones * 2 + le32_to_cpu(sb->nr_reserved_seq));
1081	if (le32_to_cpu(sb->nr_chunks) > nr_data_zones) {
1082		dmz_dev_err(dev, "Invalid number of chunks %u / %u",
1083			    le32_to_cpu(sb->nr_chunks), nr_data_zones);
1084		return -ENXIO;
1085	}
1086
1087	/* OK */
1088	zmd->nr_meta_blocks = le32_to_cpu(sb->nr_meta_blocks);
1089	zmd->nr_reserved_seq = le32_to_cpu(sb->nr_reserved_seq);
1090	zmd->nr_chunks = le32_to_cpu(sb->nr_chunks);
1091	zmd->nr_map_blocks = le32_to_cpu(sb->nr_map_blocks);
1092	zmd->nr_bitmap_blocks = le32_to_cpu(sb->nr_bitmap_blocks);
1093	zmd->nr_meta_zones = nr_meta_zones;
1094	zmd->nr_data_zones = nr_data_zones;
1095
1096	return 0;
1097}
1098
1099/*
1100 * Read the first or second super block from disk.
1101 */
1102static int dmz_read_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
1103{
1104	dmz_zmd_debug(zmd, "read superblock set %d dev %pg block %llu",
1105		      set, sb->dev->bdev, sb->block);
1106
1107	return dmz_rdwr_block(sb->dev, REQ_OP_READ,
1108			      sb->block, sb->mblk->page);
1109}
1110
1111/*
1112 * Determine the position of the secondary super blocks on disk.
1113 * This is used only if a corruption of the primary super block
1114 * is detected.
1115 */
1116static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
1117{
1118	unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
1119	struct dmz_mblock *mblk;
1120	unsigned int zone_id = zmd->sb[0].zone->id;
1121	int i;
1122
1123	/* Allocate a block */
1124	mblk = dmz_alloc_mblock(zmd, 0);
1125	if (!mblk)
1126		return -ENOMEM;
1127
1128	zmd->sb[1].mblk = mblk;
1129	zmd->sb[1].sb = mblk->data;
1130
1131	/* Bad first super block: search for the second one */
1132	zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks;
1133	zmd->sb[1].zone = dmz_get(zmd, zone_id + 1);
1134	zmd->sb[1].dev = zmd->sb[0].dev;
1135	for (i = 1; i < zmd->nr_rnd_zones; i++) {
1136		if (dmz_read_sb(zmd, &zmd->sb[1], 1) != 0)
1137			break;
1138		if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC)
1139			return 0;
1140		zmd->sb[1].block += zone_nr_blocks;
1141		zmd->sb[1].zone = dmz_get(zmd, zone_id + i);
1142	}
1143
1144	dmz_free_mblock(zmd, mblk);
1145	zmd->sb[1].mblk = NULL;
1146	zmd->sb[1].zone = NULL;
1147	zmd->sb[1].dev = NULL;
1148
1149	return -EIO;
1150}
1151
1152/*
1153 * Read a super block from disk.
1154 */
1155static int dmz_get_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
1156{
1157	struct dmz_mblock *mblk;
1158	int ret;
1159
1160	/* Allocate a block */
1161	mblk = dmz_alloc_mblock(zmd, 0);
1162	if (!mblk)
1163		return -ENOMEM;
1164
1165	sb->mblk = mblk;
1166	sb->sb = mblk->data;
1167
1168	/* Read super block */
1169	ret = dmz_read_sb(zmd, sb, set);
1170	if (ret) {
1171		dmz_free_mblock(zmd, mblk);
1172		sb->mblk = NULL;
1173		return ret;
1174	}
1175
1176	return 0;
1177}
1178
1179/*
1180 * Recover a metadata set.
1181 */
1182static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
1183{
1184	unsigned int src_set = dst_set ^ 0x1;
1185	struct page *page;
1186	int i, ret;
1187
1188	dmz_dev_warn(zmd->sb[dst_set].dev,
1189		     "Metadata set %u invalid: recovering", dst_set);
1190
1191	if (dst_set == 0)
1192		zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
1193	else
1194		zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
1195
1196	page = alloc_page(GFP_NOIO);
1197	if (!page)
1198		return -ENOMEM;
1199
1200	/* Copy metadata blocks */
1201	for (i = 1; i < zmd->nr_meta_blocks; i++) {
1202		ret = dmz_rdwr_block(zmd->sb[src_set].dev, REQ_OP_READ,
1203				     zmd->sb[src_set].block + i, page);
1204		if (ret)
1205			goto out;
1206		ret = dmz_rdwr_block(zmd->sb[dst_set].dev, REQ_OP_WRITE,
1207				     zmd->sb[dst_set].block + i, page);
1208		if (ret)
1209			goto out;
1210	}
1211
1212	/* Finalize with the super block */
1213	if (!zmd->sb[dst_set].mblk) {
1214		zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, 0);
1215		if (!zmd->sb[dst_set].mblk) {
1216			ret = -ENOMEM;
1217			goto out;
1218		}
1219		zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data;
1220	}
1221
1222	ret = dmz_write_sb(zmd, dst_set);
1223out:
1224	__free_pages(page, 0);
1225
1226	return ret;
1227}
1228
1229/*
1230 * Get super block from disk.
1231 */
1232static int dmz_load_sb(struct dmz_metadata *zmd)
1233{
1234	bool sb_good[2] = {false, false};
1235	u64 sb_gen[2] = {0, 0};
1236	int ret;
1237
1238	if (!zmd->sb[0].zone) {
1239		dmz_zmd_err(zmd, "Primary super block zone not set");
1240		return -ENXIO;
1241	}
1242
1243	/* Read and check the primary super block */
1244	zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
1245	zmd->sb[0].dev = zmd->sb[0].zone->dev;
1246	ret = dmz_get_sb(zmd, &zmd->sb[0], 0);
1247	if (ret) {
1248		dmz_dev_err(zmd->sb[0].dev, "Read primary super block failed");
1249		return ret;
1250	}
1251
1252	ret = dmz_check_sb(zmd, &zmd->sb[0], false);
1253
1254	/* Read and check secondary super block */
1255	if (ret == 0) {
1256		sb_good[0] = true;
1257		if (!zmd->sb[1].zone) {
1258			unsigned int zone_id =
1259				zmd->sb[0].zone->id + zmd->nr_meta_zones;
1260
1261			zmd->sb[1].zone = dmz_get(zmd, zone_id);
1262		}
1263		zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
1264		zmd->sb[1].dev = zmd->sb[0].dev;
1265		ret = dmz_get_sb(zmd, &zmd->sb[1], 1);
1266	} else
1267		ret = dmz_lookup_secondary_sb(zmd);
1268
1269	if (ret) {
1270		dmz_dev_err(zmd->sb[1].dev, "Read secondary super block failed");
1271		return ret;
1272	}
1273
1274	ret = dmz_check_sb(zmd, &zmd->sb[1], false);
1275	if (ret == 0)
1276		sb_good[1] = true;
1277
1278	/* Use highest generation sb first */
1279	if (!sb_good[0] && !sb_good[1]) {
1280		dmz_zmd_err(zmd, "No valid super block found");
1281		return -EIO;
1282	}
1283
1284	if (sb_good[0])
1285		sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen);
1286	else {
1287		ret = dmz_recover_mblocks(zmd, 0);
1288		if (ret) {
1289			dmz_dev_err(zmd->sb[0].dev,
1290				    "Recovery of superblock 0 failed");
1291			return -EIO;
1292		}
1293	}
1294
1295	if (sb_good[1])
1296		sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen);
1297	else {
1298		ret = dmz_recover_mblocks(zmd, 1);
1299
1300		if (ret) {
1301			dmz_dev_err(zmd->sb[1].dev,
1302				    "Recovery of superblock 1 failed");
1303			return -EIO;
1304		}
1305	}
1306
1307	if (sb_gen[0] >= sb_gen[1]) {
1308		zmd->sb_gen = sb_gen[0];
1309		zmd->mblk_primary = 0;
1310	} else {
1311		zmd->sb_gen = sb_gen[1];
1312		zmd->mblk_primary = 1;
1313	}
1314
1315	dmz_dev_debug(zmd->sb[zmd->mblk_primary].dev,
1316		      "Using super block %u (gen %llu)",
1317		      zmd->mblk_primary, zmd->sb_gen);
1318
1319	if (zmd->sb_version > 1) {
1320		int i;
1321		struct dmz_sb *sb;
1322
1323		sb = kzalloc(sizeof(struct dmz_sb), GFP_KERNEL);
1324		if (!sb)
1325			return -ENOMEM;
1326		for (i = 1; i < zmd->nr_devs; i++) {
1327			sb->block = 0;
1328			sb->zone = dmz_get(zmd, zmd->dev[i].zone_offset);
1329			sb->dev = &zmd->dev[i];
1330			if (!dmz_is_meta(sb->zone)) {
1331				dmz_dev_err(sb->dev,
1332					    "Tertiary super block zone %u not marked as metadata zone",
1333					    sb->zone->id);
1334				ret = -EINVAL;
1335				goto out_kfree;
1336			}
1337			ret = dmz_get_sb(zmd, sb, i + 1);
1338			if (ret) {
1339				dmz_dev_err(sb->dev,
1340					    "Read tertiary super block failed");
1341				dmz_free_mblock(zmd, sb->mblk);
1342				goto out_kfree;
1343			}
1344			ret = dmz_check_sb(zmd, sb, true);
1345			dmz_free_mblock(zmd, sb->mblk);
1346			if (ret == -EINVAL)
1347				goto out_kfree;
1348		}
1349	out_kfree:
1350		kfree(sb);
1351	}
1352	return ret;
1353}
1354
1355/*
1356 * Initialize a zone descriptor.
1357 */
1358static int dmz_init_zone(struct blk_zone *blkz, unsigned int num, void *data)
1359{
1360	struct dmz_dev *dev = data;
1361	struct dmz_metadata *zmd = dev->metadata;
1362	int idx = num + dev->zone_offset;
1363	struct dm_zone *zone;
1364
1365	zone = dmz_insert(zmd, idx, dev);
1366	if (IS_ERR(zone))
1367		return PTR_ERR(zone);
1368
1369	if (blkz->len != zmd->zone_nr_sectors) {
1370		if (zmd->sb_version > 1) {
1371			/* Ignore the eventual runt (smaller) zone */
1372			set_bit(DMZ_OFFLINE, &zone->flags);
1373			return 0;
1374		} else if (blkz->start + blkz->len == dev->capacity)
1375			return 0;
1376		return -ENXIO;
1377	}
1378
1379	/*
1380	 * Devices that have zones with a capacity smaller than the zone size
1381	 * (e.g. NVMe zoned namespaces) are not supported.
1382	 */
1383	if (blkz->capacity != blkz->len)
1384		return -ENXIO;
1385
1386	switch (blkz->type) {
1387	case BLK_ZONE_TYPE_CONVENTIONAL:
1388		set_bit(DMZ_RND, &zone->flags);
1389		break;
1390	case BLK_ZONE_TYPE_SEQWRITE_REQ:
1391	case BLK_ZONE_TYPE_SEQWRITE_PREF:
1392		set_bit(DMZ_SEQ, &zone->flags);
1393		break;
1394	default:
1395		return -ENXIO;
1396	}
1397
1398	if (dmz_is_rnd(zone))
1399		zone->wp_block = 0;
1400	else
1401		zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1402
1403	if (blkz->cond == BLK_ZONE_COND_OFFLINE)
1404		set_bit(DMZ_OFFLINE, &zone->flags);
1405	else if (blkz->cond == BLK_ZONE_COND_READONLY)
1406		set_bit(DMZ_READ_ONLY, &zone->flags);
1407	else {
1408		zmd->nr_useable_zones++;
1409		if (dmz_is_rnd(zone)) {
1410			zmd->nr_rnd_zones++;
1411			if (zmd->nr_devs == 1 && !zmd->sb[0].zone) {
1412				/* Primary super block zone */
1413				zmd->sb[0].zone = zone;
1414			}
1415		}
1416		if (zmd->nr_devs > 1 && num == 0) {
1417			/*
1418			 * Tertiary superblock zones are always at the
1419			 * start of the zoned devices, so mark them
1420			 * as metadata zone.
1421			 */
1422			set_bit(DMZ_META, &zone->flags);
1423		}
1424	}
1425	return 0;
1426}
1427
1428static int dmz_emulate_zones(struct dmz_metadata *zmd, struct dmz_dev *dev)
1429{
1430	int idx;
1431	sector_t zone_offset = 0;
1432
1433	for(idx = 0; idx < dev->nr_zones; idx++) {
1434		struct dm_zone *zone;
1435
1436		zone = dmz_insert(zmd, idx, dev);
1437		if (IS_ERR(zone))
1438			return PTR_ERR(zone);
1439		set_bit(DMZ_CACHE, &zone->flags);
1440		zone->wp_block = 0;
1441		zmd->nr_cache_zones++;
1442		zmd->nr_useable_zones++;
1443		if (dev->capacity - zone_offset < zmd->zone_nr_sectors) {
1444			/* Disable runt zone */
1445			set_bit(DMZ_OFFLINE, &zone->flags);
1446			break;
1447		}
1448		zone_offset += zmd->zone_nr_sectors;
1449	}
1450	return 0;
1451}
1452
1453/*
1454 * Free zones descriptors.
1455 */
1456static void dmz_drop_zones(struct dmz_metadata *zmd)
1457{
1458	int idx;
1459
1460	for(idx = 0; idx < zmd->nr_zones; idx++) {
1461		struct dm_zone *zone = xa_load(&zmd->zones, idx);
1462
1463		kfree(zone);
1464		xa_erase(&zmd->zones, idx);
1465	}
1466	xa_destroy(&zmd->zones);
1467}
1468
1469/*
1470 * Allocate and initialize zone descriptors using the zone
1471 * information from disk.
1472 */
1473static int dmz_init_zones(struct dmz_metadata *zmd)
1474{
1475	int i, ret;
1476	struct dmz_dev *zoned_dev = &zmd->dev[0];
1477
1478	/* Init */
1479	zmd->zone_nr_sectors = zmd->dev[0].zone_nr_sectors;
1480	zmd->zone_nr_sectors_shift = ilog2(zmd->zone_nr_sectors);
1481	zmd->zone_nr_blocks = dmz_sect2blk(zmd->zone_nr_sectors);
1482	zmd->zone_nr_blocks_shift = ilog2(zmd->zone_nr_blocks);
1483	zmd->zone_bitmap_size = zmd->zone_nr_blocks >> 3;
1484	zmd->zone_nr_bitmap_blocks =
1485		max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
1486	zmd->zone_bits_per_mblk = min_t(sector_t, zmd->zone_nr_blocks,
1487					DMZ_BLOCK_SIZE_BITS);
1488
1489	/* Allocate zone array */
1490	zmd->nr_zones = 0;
1491	for (i = 0; i < zmd->nr_devs; i++) {
1492		struct dmz_dev *dev = &zmd->dev[i];
1493
1494		dev->metadata = zmd;
1495		zmd->nr_zones += dev->nr_zones;
1496
1497		atomic_set(&dev->unmap_nr_rnd, 0);
1498		INIT_LIST_HEAD(&dev->unmap_rnd_list);
1499		INIT_LIST_HEAD(&dev->map_rnd_list);
1500
1501		atomic_set(&dev->unmap_nr_seq, 0);
1502		INIT_LIST_HEAD(&dev->unmap_seq_list);
1503		INIT_LIST_HEAD(&dev->map_seq_list);
1504	}
1505
1506	if (!zmd->nr_zones) {
1507		DMERR("(%s): No zones found", zmd->devname);
1508		return -ENXIO;
1509	}
1510	xa_init(&zmd->zones);
1511
1512	DMDEBUG("(%s): Using %zu B for zone information",
1513		zmd->devname, sizeof(struct dm_zone) * zmd->nr_zones);
1514
1515	if (zmd->nr_devs > 1) {
1516		ret = dmz_emulate_zones(zmd, &zmd->dev[0]);
1517		if (ret < 0) {
1518			DMDEBUG("(%s): Failed to emulate zones, error %d",
1519				zmd->devname, ret);
1520			dmz_drop_zones(zmd);
1521			return ret;
1522		}
1523
1524		/*
1525		 * Primary superblock zone is always at zone 0 when multiple
1526		 * drives are present.
1527		 */
1528		zmd->sb[0].zone = dmz_get(zmd, 0);
1529
1530		for (i = 1; i < zmd->nr_devs; i++) {
1531			zoned_dev = &zmd->dev[i];
1532
1533			ret = blkdev_report_zones(zoned_dev->bdev, 0,
1534						  BLK_ALL_ZONES,
1535						  dmz_init_zone, zoned_dev);
1536			if (ret < 0) {
1537				DMDEBUG("(%s): Failed to report zones, error %d",
1538					zmd->devname, ret);
1539				dmz_drop_zones(zmd);
1540				return ret;
1541			}
1542		}
1543		return 0;
1544	}
1545
1546	/*
1547	 * Get zone information and initialize zone descriptors.  At the same
1548	 * time, determine where the super block should be: first block of the
1549	 * first randomly writable zone.
1550	 */
1551	ret = blkdev_report_zones(zoned_dev->bdev, 0, BLK_ALL_ZONES,
1552				  dmz_init_zone, zoned_dev);
1553	if (ret < 0) {
1554		DMDEBUG("(%s): Failed to report zones, error %d",
1555			zmd->devname, ret);
1556		dmz_drop_zones(zmd);
1557		return ret;
1558	}
1559
1560	return 0;
1561}
1562
1563static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx,
1564			      void *data)
1565{
1566	struct dm_zone *zone = data;
1567
1568	clear_bit(DMZ_OFFLINE, &zone->flags);
1569	clear_bit(DMZ_READ_ONLY, &zone->flags);
1570	if (blkz->cond == BLK_ZONE_COND_OFFLINE)
1571		set_bit(DMZ_OFFLINE, &zone->flags);
1572	else if (blkz->cond == BLK_ZONE_COND_READONLY)
1573		set_bit(DMZ_READ_ONLY, &zone->flags);
1574
1575	if (dmz_is_seq(zone))
1576		zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1577	else
1578		zone->wp_block = 0;
1579	return 0;
1580}
1581
1582/*
1583 * Update a zone information.
1584 */
1585static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1586{
1587	struct dmz_dev *dev = zone->dev;
1588	unsigned int noio_flag;
1589	int ret;
1590
1591	if (dev->flags & DMZ_BDEV_REGULAR)
1592		return 0;
1593
1594	/*
1595	 * Get zone information from disk. Since blkdev_report_zones() uses
1596	 * GFP_KERNEL by default for memory allocations, set the per-task
1597	 * PF_MEMALLOC_NOIO flag so that all allocations are done as if
1598	 * GFP_NOIO was specified.
1599	 */
1600	noio_flag = memalloc_noio_save();
1601	ret = blkdev_report_zones(dev->bdev, dmz_start_sect(zmd, zone), 1,
1602				  dmz_update_zone_cb, zone);
1603	memalloc_noio_restore(noio_flag);
1604
1605	if (ret == 0)
1606		ret = -EIO;
1607	if (ret < 0) {
1608		dmz_dev_err(dev, "Get zone %u report failed",
1609			    zone->id);
1610		dmz_check_bdev(dev);
1611		return ret;
1612	}
1613
1614	return 0;
1615}
1616
1617/*
1618 * Check a zone write pointer position when the zone is marked
1619 * with the sequential write error flag.
1620 */
1621static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
1622				    struct dm_zone *zone)
1623{
1624	struct dmz_dev *dev = zone->dev;
1625	unsigned int wp = 0;
1626	int ret;
1627
1628	wp = zone->wp_block;
1629	ret = dmz_update_zone(zmd, zone);
1630	if (ret)
1631		return ret;
1632
1633	dmz_dev_warn(dev, "Processing zone %u write error (zone wp %u/%u)",
1634		     zone->id, zone->wp_block, wp);
1635
1636	if (zone->wp_block < wp) {
1637		dmz_invalidate_blocks(zmd, zone, zone->wp_block,
1638				      wp - zone->wp_block);
1639	}
1640
1641	return 0;
1642}
1643
1644/*
1645 * Reset a zone write pointer.
1646 */
1647static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1648{
1649	int ret;
1650
1651	/*
1652	 * Ignore offline zones, read only zones,
1653	 * and conventional zones.
1654	 */
1655	if (dmz_is_offline(zone) ||
1656	    dmz_is_readonly(zone) ||
1657	    dmz_is_rnd(zone))
1658		return 0;
1659
1660	if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
1661		struct dmz_dev *dev = zone->dev;
1662
1663		ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
1664				       dmz_start_sect(zmd, zone),
1665				       zmd->zone_nr_sectors, GFP_NOIO);
1666		if (ret) {
1667			dmz_dev_err(dev, "Reset zone %u failed %d",
1668				    zone->id, ret);
1669			return ret;
1670		}
1671	}
1672
1673	/* Clear write error bit and rewind write pointer position */
1674	clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
1675	zone->wp_block = 0;
1676
1677	return 0;
1678}
1679
1680static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
1681
1682/*
1683 * Initialize chunk mapping.
1684 */
1685static int dmz_load_mapping(struct dmz_metadata *zmd)
1686{
1687	struct dm_zone *dzone, *bzone;
1688	struct dmz_mblock *dmap_mblk = NULL;
1689	struct dmz_map *dmap;
1690	unsigned int i = 0, e = 0, chunk = 0;
1691	unsigned int dzone_id;
1692	unsigned int bzone_id;
1693
1694	/* Metadata block array for the chunk mapping table */
1695	zmd->map_mblk = kcalloc(zmd->nr_map_blocks,
1696				sizeof(struct dmz_mblk *), GFP_KERNEL);
1697	if (!zmd->map_mblk)
1698		return -ENOMEM;
1699
1700	/* Get chunk mapping table blocks and initialize zone mapping */
1701	while (chunk < zmd->nr_chunks) {
1702		if (!dmap_mblk) {
1703			/* Get mapping block */
1704			dmap_mblk = dmz_get_mblock(zmd, i + 1);
1705			if (IS_ERR(dmap_mblk))
1706				return PTR_ERR(dmap_mblk);
1707			zmd->map_mblk[i] = dmap_mblk;
1708			dmap = (struct dmz_map *) dmap_mblk->data;
1709			i++;
1710			e = 0;
1711		}
1712
1713		/* Check data zone */
1714		dzone_id = le32_to_cpu(dmap[e].dzone_id);
1715		if (dzone_id == DMZ_MAP_UNMAPPED)
1716			goto next;
1717
1718		if (dzone_id >= zmd->nr_zones) {
1719			dmz_zmd_err(zmd, "Chunk %u mapping: invalid data zone ID %u",
1720				    chunk, dzone_id);
1721			return -EIO;
1722		}
1723
1724		dzone = dmz_get(zmd, dzone_id);
1725		if (!dzone) {
1726			dmz_zmd_err(zmd, "Chunk %u mapping: data zone %u not present",
1727				    chunk, dzone_id);
1728			return -EIO;
1729		}
1730		set_bit(DMZ_DATA, &dzone->flags);
1731		dzone->chunk = chunk;
1732		dmz_get_zone_weight(zmd, dzone);
1733
1734		if (dmz_is_cache(dzone))
1735			list_add_tail(&dzone->link, &zmd->map_cache_list);
1736		else if (dmz_is_rnd(dzone))
1737			list_add_tail(&dzone->link, &dzone->dev->map_rnd_list);
1738		else
1739			list_add_tail(&dzone->link, &dzone->dev->map_seq_list);
1740
1741		/* Check buffer zone */
1742		bzone_id = le32_to_cpu(dmap[e].bzone_id);
1743		if (bzone_id == DMZ_MAP_UNMAPPED)
1744			goto next;
1745
1746		if (bzone_id >= zmd->nr_zones) {
1747			dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone ID %u",
1748				    chunk, bzone_id);
1749			return -EIO;
1750		}
1751
1752		bzone = dmz_get(zmd, bzone_id);
1753		if (!bzone) {
1754			dmz_zmd_err(zmd, "Chunk %u mapping: buffer zone %u not present",
1755				    chunk, bzone_id);
1756			return -EIO;
1757		}
1758		if (!dmz_is_rnd(bzone) && !dmz_is_cache(bzone)) {
1759			dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone %u",
1760				    chunk, bzone_id);
1761			return -EIO;
1762		}
1763
1764		set_bit(DMZ_DATA, &bzone->flags);
1765		set_bit(DMZ_BUF, &bzone->flags);
1766		bzone->chunk = chunk;
1767		bzone->bzone = dzone;
1768		dzone->bzone = bzone;
1769		dmz_get_zone_weight(zmd, bzone);
1770		if (dmz_is_cache(bzone))
1771			list_add_tail(&bzone->link, &zmd->map_cache_list);
1772		else
1773			list_add_tail(&bzone->link, &bzone->dev->map_rnd_list);
1774next:
1775		chunk++;
1776		e++;
1777		if (e >= DMZ_MAP_ENTRIES)
1778			dmap_mblk = NULL;
1779	}
1780
1781	/*
1782	 * At this point, only meta zones and mapped data zones were
1783	 * fully initialized. All remaining zones are unmapped data
1784	 * zones. Finish initializing those here.
1785	 */
1786	for (i = 0; i < zmd->nr_zones; i++) {
1787		dzone = dmz_get(zmd, i);
1788		if (!dzone)
1789			continue;
1790		if (dmz_is_meta(dzone))
1791			continue;
1792		if (dmz_is_offline(dzone))
1793			continue;
1794
1795		if (dmz_is_cache(dzone))
1796			zmd->nr_cache++;
1797		else if (dmz_is_rnd(dzone))
1798			dzone->dev->nr_rnd++;
1799		else
1800			dzone->dev->nr_seq++;
1801
1802		if (dmz_is_data(dzone)) {
1803			/* Already initialized */
1804			continue;
1805		}
1806
1807		/* Unmapped data zone */
1808		set_bit(DMZ_DATA, &dzone->flags);
1809		dzone->chunk = DMZ_MAP_UNMAPPED;
1810		if (dmz_is_cache(dzone)) {
1811			list_add_tail(&dzone->link, &zmd->unmap_cache_list);
1812			atomic_inc(&zmd->unmap_nr_cache);
1813		} else if (dmz_is_rnd(dzone)) {
1814			list_add_tail(&dzone->link,
1815				      &dzone->dev->unmap_rnd_list);
1816			atomic_inc(&dzone->dev->unmap_nr_rnd);
1817		} else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) {
1818			list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list);
1819			set_bit(DMZ_RESERVED, &dzone->flags);
1820			atomic_inc(&zmd->nr_reserved_seq_zones);
1821			dzone->dev->nr_seq--;
1822		} else {
1823			list_add_tail(&dzone->link,
1824				      &dzone->dev->unmap_seq_list);
1825			atomic_inc(&dzone->dev->unmap_nr_seq);
1826		}
1827	}
1828
1829	return 0;
1830}
1831
1832/*
1833 * Set a data chunk mapping.
1834 */
1835static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk,
1836				  unsigned int dzone_id, unsigned int bzone_id)
1837{
1838	struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
1839	struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
1840	int map_idx = chunk & DMZ_MAP_ENTRIES_MASK;
1841
1842	dmap[map_idx].dzone_id = cpu_to_le32(dzone_id);
1843	dmap[map_idx].bzone_id = cpu_to_le32(bzone_id);
1844	dmz_dirty_mblock(zmd, dmap_mblk);
1845}
1846
1847/*
1848 * The list of mapped zones is maintained in LRU order.
1849 * This rotates a zone at the end of its map list.
1850 */
1851static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1852{
1853	if (list_empty(&zone->link))
1854		return;
1855
1856	list_del_init(&zone->link);
1857	if (dmz_is_seq(zone)) {
1858		/* LRU rotate sequential zone */
1859		list_add_tail(&zone->link, &zone->dev->map_seq_list);
1860	} else if (dmz_is_cache(zone)) {
1861		/* LRU rotate cache zone */
1862		list_add_tail(&zone->link, &zmd->map_cache_list);
1863	} else {
1864		/* LRU rotate random zone */
1865		list_add_tail(&zone->link, &zone->dev->map_rnd_list);
1866	}
1867}
1868
1869/*
1870 * The list of mapped random zones is maintained
1871 * in LRU order. This rotates a zone at the end of the list.
1872 */
1873static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1874{
1875	__dmz_lru_zone(zmd, zone);
1876	if (zone->bzone)
1877		__dmz_lru_zone(zmd, zone->bzone);
1878}
1879
1880/*
1881 * Wait for any zone to be freed.
1882 */
1883static void dmz_wait_for_free_zones(struct dmz_metadata *zmd)
1884{
1885	DEFINE_WAIT(wait);
1886
1887	prepare_to_wait(&zmd->free_wq, &wait, TASK_UNINTERRUPTIBLE);
1888	dmz_unlock_map(zmd);
1889	dmz_unlock_metadata(zmd);
1890
1891	io_schedule_timeout(HZ);
1892
1893	dmz_lock_metadata(zmd);
1894	dmz_lock_map(zmd);
1895	finish_wait(&zmd->free_wq, &wait);
1896}
1897
1898/*
1899 * Lock a zone for reclaim (set the zone RECLAIM bit).
1900 * Returns false if the zone cannot be locked or if it is already locked
1901 * and 1 otherwise.
1902 */
1903int dmz_lock_zone_reclaim(struct dm_zone *zone)
1904{
1905	/* Active zones cannot be reclaimed */
1906	if (dmz_is_active(zone))
1907		return 0;
1908
1909	return !test_and_set_bit(DMZ_RECLAIM, &zone->flags);
1910}
1911
1912/*
1913 * Clear a zone reclaim flag.
1914 */
1915void dmz_unlock_zone_reclaim(struct dm_zone *zone)
1916{
1917	WARN_ON(dmz_is_active(zone));
1918	WARN_ON(!dmz_in_reclaim(zone));
1919
1920	clear_bit_unlock(DMZ_RECLAIM, &zone->flags);
1921	smp_mb__after_atomic();
1922	wake_up_bit(&zone->flags, DMZ_RECLAIM);
1923}
1924
1925/*
1926 * Wait for a zone reclaim to complete.
1927 */
1928static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
1929{
1930	dmz_unlock_map(zmd);
1931	dmz_unlock_metadata(zmd);
1932	set_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
1933	wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
1934	clear_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
1935	dmz_lock_metadata(zmd);
1936	dmz_lock_map(zmd);
1937}
1938
1939/*
1940 * Select a cache or random write zone for reclaim.
1941 */
1942static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
1943						    unsigned int idx, bool idle)
1944{
1945	struct dm_zone *dzone = NULL;
1946	struct dm_zone *zone, *maxw_z = NULL;
1947	struct list_head *zone_list;
1948
1949	/* If we have cache zones select from the cache zone list */
1950	if (zmd->nr_cache) {
1951		zone_list = &zmd->map_cache_list;
1952		/* Try to relaim random zones, too, when idle */
1953		if (idle && list_empty(zone_list))
1954			zone_list = &zmd->dev[idx].map_rnd_list;
1955	} else
1956		zone_list = &zmd->dev[idx].map_rnd_list;
1957
1958	/*
1959	 * Find the buffer zone with the heaviest weight or the first (oldest)
1960	 * data zone that can be reclaimed.
1961	 */
1962	list_for_each_entry(zone, zone_list, link) {
1963		if (dmz_is_buf(zone)) {
1964			dzone = zone->bzone;
1965			if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
1966				continue;
1967			if (!maxw_z || maxw_z->weight < dzone->weight)
1968				maxw_z = dzone;
1969		} else {
1970			dzone = zone;
1971			if (dmz_lock_zone_reclaim(dzone))
1972				return dzone;
1973		}
1974	}
1975
1976	if (maxw_z && dmz_lock_zone_reclaim(maxw_z))
1977		return maxw_z;
1978
1979	/*
1980	 * If we come here, none of the zones inspected could be locked for
1981	 * reclaim. Try again, being more aggressive, that is, find the
1982	 * first zone that can be reclaimed regardless of its weitght.
1983	 */
1984	list_for_each_entry(zone, zone_list, link) {
1985		if (dmz_is_buf(zone)) {
1986			dzone = zone->bzone;
1987			if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
1988				continue;
1989		} else
1990			dzone = zone;
1991		if (dmz_lock_zone_reclaim(dzone))
1992			return dzone;
1993	}
1994
1995	return NULL;
1996}
1997
1998/*
1999 * Select a buffered sequential zone for reclaim.
2000 */
2001static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd,
2002						    unsigned int idx)
2003{
2004	struct dm_zone *zone;
2005
2006	list_for_each_entry(zone, &zmd->dev[idx].map_seq_list, link) {
2007		if (!zone->bzone)
2008			continue;
2009		if (dmz_lock_zone_reclaim(zone))
2010			return zone;
2011	}
2012
2013	return NULL;
2014}
2015
2016/*
2017 * Select a zone for reclaim.
2018 */
2019struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
2020					 unsigned int dev_idx, bool idle)
2021{
2022	struct dm_zone *zone = NULL;
2023
2024	/*
2025	 * Search for a zone candidate to reclaim: 2 cases are possible.
2026	 * (1) There is no free sequential zones. Then a random data zone
2027	 *     cannot be reclaimed. So choose a sequential zone to reclaim so
2028	 *     that afterward a random zone can be reclaimed.
2029	 * (2) At least one free sequential zone is available, then choose
2030	 *     the oldest random zone (data or buffer) that can be locked.
2031	 */
2032	dmz_lock_map(zmd);
2033	if (list_empty(&zmd->reserved_seq_zones_list))
2034		zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx);
2035	if (!zone)
2036		zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle);
2037	dmz_unlock_map(zmd);
2038
2039	return zone;
2040}
2041
2042/*
2043 * Get the zone mapping a chunk, if the chunk is mapped already.
2044 * If no mapping exist and the operation is WRITE, a zone is
2045 * allocated and used to map the chunk.
2046 * The zone returned will be set to the active state.
2047 */
2048struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd,
2049				      unsigned int chunk, enum req_op op)
2050{
2051	struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
2052	struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
2053	int dmap_idx = chunk & DMZ_MAP_ENTRIES_MASK;
2054	unsigned int dzone_id;
2055	struct dm_zone *dzone = NULL;
2056	int ret = 0;
2057	int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
2058
2059	dmz_lock_map(zmd);
2060again:
2061	/* Get the chunk mapping */
2062	dzone_id = le32_to_cpu(dmap[dmap_idx].dzone_id);
2063	if (dzone_id == DMZ_MAP_UNMAPPED) {
2064		/*
2065		 * Read or discard in unmapped chunks are fine. But for
2066		 * writes, we need a mapping, so get one.
2067		 */
2068		if (op != REQ_OP_WRITE)
2069			goto out;
2070
2071		/* Allocate a random zone */
2072		dzone = dmz_alloc_zone(zmd, 0, alloc_flags);
2073		if (!dzone) {
2074			if (dmz_dev_is_dying(zmd)) {
2075				dzone = ERR_PTR(-EIO);
2076				goto out;
2077			}
2078			dmz_wait_for_free_zones(zmd);
2079			goto again;
2080		}
2081
2082		dmz_map_zone(zmd, dzone, chunk);
2083
2084	} else {
2085		/* The chunk is already mapped: get the mapping zone */
2086		dzone = dmz_get(zmd, dzone_id);
2087		if (!dzone) {
2088			dzone = ERR_PTR(-EIO);
2089			goto out;
2090		}
2091		if (dzone->chunk != chunk) {
2092			dzone = ERR_PTR(-EIO);
2093			goto out;
2094		}
2095
2096		/* Repair write pointer if the sequential dzone has error */
2097		if (dmz_seq_write_err(dzone)) {
2098			ret = dmz_handle_seq_write_err(zmd, dzone);
2099			if (ret) {
2100				dzone = ERR_PTR(-EIO);
2101				goto out;
2102			}
2103			clear_bit(DMZ_SEQ_WRITE_ERR, &dzone->flags);
2104		}
2105	}
2106
2107	/*
2108	 * If the zone is being reclaimed, the chunk mapping may change
2109	 * to a different zone. So wait for reclaim and retry. Otherwise,
2110	 * activate the zone (this will prevent reclaim from touching it).
2111	 */
2112	if (dmz_in_reclaim(dzone)) {
2113		dmz_wait_for_reclaim(zmd, dzone);
2114		goto again;
2115	}
2116	dmz_activate_zone(dzone);
2117	dmz_lru_zone(zmd, dzone);
2118out:
2119	dmz_unlock_map(zmd);
2120
2121	return dzone;
2122}
2123
2124/*
2125 * Write and discard change the block validity of data zones and their buffer
2126 * zones. Check here that valid blocks are still present. If all blocks are
2127 * invalid, the zones can be unmapped on the fly without waiting for reclaim
2128 * to do it.
2129 */
2130void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *dzone)
2131{
2132	struct dm_zone *bzone;
2133
2134	dmz_lock_map(zmd);
2135
2136	bzone = dzone->bzone;
2137	if (bzone) {
2138		if (dmz_weight(bzone))
2139			dmz_lru_zone(zmd, bzone);
2140		else {
2141			/* Empty buffer zone: reclaim it */
2142			dmz_unmap_zone(zmd, bzone);
2143			dmz_free_zone(zmd, bzone);
2144			bzone = NULL;
2145		}
2146	}
2147
2148	/* Deactivate the data zone */
2149	dmz_deactivate_zone(dzone);
2150	if (dmz_is_active(dzone) || bzone || dmz_weight(dzone))
2151		dmz_lru_zone(zmd, dzone);
2152	else {
2153		/* Unbuffered inactive empty data zone: reclaim it */
2154		dmz_unmap_zone(zmd, dzone);
2155		dmz_free_zone(zmd, dzone);
2156	}
2157
2158	dmz_unlock_map(zmd);
2159}
2160
2161/*
2162 * Allocate and map a random zone to buffer a chunk
2163 * already mapped to a sequential zone.
2164 */
2165struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
2166				     struct dm_zone *dzone)
2167{
2168	struct dm_zone *bzone;
2169	int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
2170
2171	dmz_lock_map(zmd);
2172again:
2173	bzone = dzone->bzone;
2174	if (bzone)
2175		goto out;
2176
2177	/* Allocate a random zone */
2178	bzone = dmz_alloc_zone(zmd, 0, alloc_flags);
2179	if (!bzone) {
2180		if (dmz_dev_is_dying(zmd)) {
2181			bzone = ERR_PTR(-EIO);
2182			goto out;
2183		}
2184		dmz_wait_for_free_zones(zmd);
2185		goto again;
2186	}
2187
2188	/* Update the chunk mapping */
2189	dmz_set_chunk_mapping(zmd, dzone->chunk, dzone->id, bzone->id);
2190
2191	set_bit(DMZ_BUF, &bzone->flags);
2192	bzone->chunk = dzone->chunk;
2193	bzone->bzone = dzone;
2194	dzone->bzone = bzone;
2195	if (dmz_is_cache(bzone))
2196		list_add_tail(&bzone->link, &zmd->map_cache_list);
2197	else
2198		list_add_tail(&bzone->link, &bzone->dev->map_rnd_list);
2199out:
2200	dmz_unlock_map(zmd);
2201
2202	return bzone;
2203}
2204
2205/*
2206 * Get an unmapped (free) zone.
2207 * This must be called with the mapping lock held.
2208 */
2209struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx,
2210			       unsigned long flags)
2211{
2212	struct list_head *list;
2213	struct dm_zone *zone;
2214	int i;
2215
2216	/* Schedule reclaim to ensure free zones are available */
2217	if (!(flags & DMZ_ALLOC_RECLAIM)) {
2218		for (i = 0; i < zmd->nr_devs; i++)
2219			dmz_schedule_reclaim(zmd->dev[i].reclaim);
2220	}
2221
2222	i = 0;
2223again:
2224	if (flags & DMZ_ALLOC_CACHE)
2225		list = &zmd->unmap_cache_list;
2226	else if (flags & DMZ_ALLOC_RND)
2227		list = &zmd->dev[dev_idx].unmap_rnd_list;
2228	else
2229		list = &zmd->dev[dev_idx].unmap_seq_list;
2230
2231	if (list_empty(list)) {
2232		/*
2233		 * No free zone: return NULL if this is for not reclaim.
2234		 */
2235		if (!(flags & DMZ_ALLOC_RECLAIM))
2236			return NULL;
2237		/*
2238		 * Try to allocate from other devices
2239		 */
2240		if (i < zmd->nr_devs) {
2241			dev_idx = (dev_idx + 1) % zmd->nr_devs;
2242			i++;
2243			goto again;
2244		}
2245
2246		/*
2247		 * Fallback to the reserved sequential zones
2248		 */
2249		zone = list_first_entry_or_null(&zmd->reserved_seq_zones_list,
2250						struct dm_zone, link);
2251		if (zone) {
2252			list_del_init(&zone->link);
2253			atomic_dec(&zmd->nr_reserved_seq_zones);
2254		}
2255		return zone;
2256	}
2257
2258	zone = list_first_entry(list, struct dm_zone, link);
2259	list_del_init(&zone->link);
2260
2261	if (dmz_is_cache(zone))
2262		atomic_dec(&zmd->unmap_nr_cache);
2263	else if (dmz_is_rnd(zone))
2264		atomic_dec(&zone->dev->unmap_nr_rnd);
2265	else
2266		atomic_dec(&zone->dev->unmap_nr_seq);
2267
2268	if (dmz_is_offline(zone)) {
2269		dmz_zmd_warn(zmd, "Zone %u is offline", zone->id);
2270		zone = NULL;
2271		goto again;
2272	}
2273	if (dmz_is_meta(zone)) {
2274		dmz_zmd_warn(zmd, "Zone %u has metadata", zone->id);
2275		zone = NULL;
2276		goto again;
2277	}
2278	return zone;
2279}
2280
2281/*
2282 * Free a zone.
2283 * This must be called with the mapping lock held.
2284 */
2285void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
2286{
2287	/* If this is a sequential zone, reset it */
2288	if (dmz_is_seq(zone))
2289		dmz_reset_zone(zmd, zone);
2290
2291	/* Return the zone to its type unmap list */
2292	if (dmz_is_cache(zone)) {
2293		list_add_tail(&zone->link, &zmd->unmap_cache_list);
2294		atomic_inc(&zmd->unmap_nr_cache);
2295	} else if (dmz_is_rnd(zone)) {
2296		list_add_tail(&zone->link, &zone->dev->unmap_rnd_list);
2297		atomic_inc(&zone->dev->unmap_nr_rnd);
2298	} else if (dmz_is_reserved(zone)) {
2299		list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
2300		atomic_inc(&zmd->nr_reserved_seq_zones);
2301	} else {
2302		list_add_tail(&zone->link, &zone->dev->unmap_seq_list);
2303		atomic_inc(&zone->dev->unmap_nr_seq);
2304	}
2305
2306	wake_up_all(&zmd->free_wq);
2307}
2308
2309/*
2310 * Map a chunk to a zone.
2311 * This must be called with the mapping lock held.
2312 */
2313void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone,
2314		  unsigned int chunk)
2315{
2316	/* Set the chunk mapping */
2317	dmz_set_chunk_mapping(zmd, chunk, dzone->id,
2318			      DMZ_MAP_UNMAPPED);
2319	dzone->chunk = chunk;
2320	if (dmz_is_cache(dzone))
2321		list_add_tail(&dzone->link, &zmd->map_cache_list);
2322	else if (dmz_is_rnd(dzone))
2323		list_add_tail(&dzone->link, &dzone->dev->map_rnd_list);
2324	else
2325		list_add_tail(&dzone->link, &dzone->dev->map_seq_list);
2326}
2327
2328/*
2329 * Unmap a zone.
2330 * This must be called with the mapping lock held.
2331 */
2332void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
2333{
2334	unsigned int chunk = zone->chunk;
2335	unsigned int dzone_id;
2336
2337	if (chunk == DMZ_MAP_UNMAPPED) {
2338		/* Already unmapped */
2339		return;
2340	}
2341
2342	if (test_and_clear_bit(DMZ_BUF, &zone->flags)) {
2343		/*
2344		 * Unmapping the chunk buffer zone: clear only
2345		 * the chunk buffer mapping
2346		 */
2347		dzone_id = zone->bzone->id;
2348		zone->bzone->bzone = NULL;
2349		zone->bzone = NULL;
2350
2351	} else {
2352		/*
2353		 * Unmapping the chunk data zone: the zone must
2354		 * not be buffered.
2355		 */
2356		if (WARN_ON(zone->bzone)) {
2357			zone->bzone->bzone = NULL;
2358			zone->bzone = NULL;
2359		}
2360		dzone_id = DMZ_MAP_UNMAPPED;
2361	}
2362
2363	dmz_set_chunk_mapping(zmd, chunk, dzone_id, DMZ_MAP_UNMAPPED);
2364
2365	zone->chunk = DMZ_MAP_UNMAPPED;
2366	list_del_init(&zone->link);
2367}
2368
2369/*
2370 * Set @nr_bits bits in @bitmap starting from @bit.
2371 * Return the number of bits changed from 0 to 1.
2372 */
2373static unsigned int dmz_set_bits(unsigned long *bitmap,
2374				 unsigned int bit, unsigned int nr_bits)
2375{
2376	unsigned long *addr;
2377	unsigned int end = bit + nr_bits;
2378	unsigned int n = 0;
2379
2380	while (bit < end) {
2381		if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2382		    ((end - bit) >= BITS_PER_LONG)) {
2383			/* Try to set the whole word at once */
2384			addr = bitmap + BIT_WORD(bit);
2385			if (*addr == 0) {
2386				*addr = ULONG_MAX;
2387				n += BITS_PER_LONG;
2388				bit += BITS_PER_LONG;
2389				continue;
2390			}
2391		}
2392
2393		if (!test_and_set_bit(bit, bitmap))
2394			n++;
2395		bit++;
2396	}
2397
2398	return n;
2399}
2400
2401/*
2402 * Get the bitmap block storing the bit for chunk_block in zone.
2403 */
2404static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd,
2405					 struct dm_zone *zone,
2406					 sector_t chunk_block)
2407{
2408	sector_t bitmap_block = 1 + zmd->nr_map_blocks +
2409		(sector_t)(zone->id * zmd->zone_nr_bitmap_blocks) +
2410		(chunk_block >> DMZ_BLOCK_SHIFT_BITS);
2411
2412	return dmz_get_mblock(zmd, bitmap_block);
2413}
2414
2415/*
2416 * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone.
2417 */
2418int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
2419			  struct dm_zone *to_zone)
2420{
2421	struct dmz_mblock *from_mblk, *to_mblk;
2422	sector_t chunk_block = 0;
2423
2424	/* Get the zones bitmap blocks */
2425	while (chunk_block < zmd->zone_nr_blocks) {
2426		from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block);
2427		if (IS_ERR(from_mblk))
2428			return PTR_ERR(from_mblk);
2429		to_mblk = dmz_get_bitmap(zmd, to_zone, chunk_block);
2430		if (IS_ERR(to_mblk)) {
2431			dmz_release_mblock(zmd, from_mblk);
2432			return PTR_ERR(to_mblk);
2433		}
2434
2435		memcpy(to_mblk->data, from_mblk->data, DMZ_BLOCK_SIZE);
2436		dmz_dirty_mblock(zmd, to_mblk);
2437
2438		dmz_release_mblock(zmd, to_mblk);
2439		dmz_release_mblock(zmd, from_mblk);
2440
2441		chunk_block += zmd->zone_bits_per_mblk;
2442	}
2443
2444	to_zone->weight = from_zone->weight;
2445
2446	return 0;
2447}
2448
2449/*
2450 * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone,
2451 * starting from chunk_block.
2452 */
2453int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
2454			   struct dm_zone *to_zone, sector_t chunk_block)
2455{
2456	unsigned int nr_blocks;
2457	int ret;
2458
2459	/* Get the zones bitmap blocks */
2460	while (chunk_block < zmd->zone_nr_blocks) {
2461		/* Get a valid region from the source zone */
2462		ret = dmz_first_valid_block(zmd, from_zone, &chunk_block);
2463		if (ret <= 0)
2464			return ret;
2465
2466		nr_blocks = ret;
2467		ret = dmz_validate_blocks(zmd, to_zone, chunk_block, nr_blocks);
2468		if (ret)
2469			return ret;
2470
2471		chunk_block += nr_blocks;
2472	}
2473
2474	return 0;
2475}
2476
2477/*
2478 * Validate all the blocks in the range [block..block+nr_blocks-1].
2479 */
2480int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2481			sector_t chunk_block, unsigned int nr_blocks)
2482{
2483	unsigned int count, bit, nr_bits;
2484	unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
2485	struct dmz_mblock *mblk;
2486	unsigned int n = 0;
2487
2488	dmz_zmd_debug(zmd, "=> VALIDATE zone %u, block %llu, %u blocks",
2489		      zone->id, (unsigned long long)chunk_block,
2490		      nr_blocks);
2491
2492	WARN_ON(chunk_block + nr_blocks > zone_nr_blocks);
2493
2494	while (nr_blocks) {
2495		/* Get bitmap block */
2496		mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2497		if (IS_ERR(mblk))
2498			return PTR_ERR(mblk);
2499
2500		/* Set bits */
2501		bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2502		nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
2503
2504		count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
2505		if (count) {
2506			dmz_dirty_mblock(zmd, mblk);
2507			n += count;
2508		}
2509		dmz_release_mblock(zmd, mblk);
2510
2511		nr_blocks -= nr_bits;
2512		chunk_block += nr_bits;
2513	}
2514
2515	if (likely(zone->weight + n <= zone_nr_blocks))
2516		zone->weight += n;
2517	else {
2518		dmz_zmd_warn(zmd, "Zone %u: weight %u should be <= %u",
2519			     zone->id, zone->weight,
2520			     zone_nr_blocks - n);
2521		zone->weight = zone_nr_blocks;
2522	}
2523
2524	return 0;
2525}
2526
2527/*
2528 * Clear nr_bits bits in bitmap starting from bit.
2529 * Return the number of bits cleared.
2530 */
2531static int dmz_clear_bits(unsigned long *bitmap, int bit, int nr_bits)
2532{
2533	unsigned long *addr;
2534	int end = bit + nr_bits;
2535	int n = 0;
2536
2537	while (bit < end) {
2538		if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2539		    ((end - bit) >= BITS_PER_LONG)) {
2540			/* Try to clear whole word at once */
2541			addr = bitmap + BIT_WORD(bit);
2542			if (*addr == ULONG_MAX) {
2543				*addr = 0;
2544				n += BITS_PER_LONG;
2545				bit += BITS_PER_LONG;
2546				continue;
2547			}
2548		}
2549
2550		if (test_and_clear_bit(bit, bitmap))
2551			n++;
2552		bit++;
2553	}
2554
2555	return n;
2556}
2557
2558/*
2559 * Invalidate all the blocks in the range [block..block+nr_blocks-1].
2560 */
2561int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2562			  sector_t chunk_block, unsigned int nr_blocks)
2563{
2564	unsigned int count, bit, nr_bits;
2565	struct dmz_mblock *mblk;
2566	unsigned int n = 0;
2567
2568	dmz_zmd_debug(zmd, "=> INVALIDATE zone %u, block %llu, %u blocks",
2569		      zone->id, (u64)chunk_block, nr_blocks);
2570
2571	WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
2572
2573	while (nr_blocks) {
2574		/* Get bitmap block */
2575		mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2576		if (IS_ERR(mblk))
2577			return PTR_ERR(mblk);
2578
2579		/* Clear bits */
2580		bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2581		nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
2582
2583		count = dmz_clear_bits((unsigned long *)mblk->data,
2584				       bit, nr_bits);
2585		if (count) {
2586			dmz_dirty_mblock(zmd, mblk);
2587			n += count;
2588		}
2589		dmz_release_mblock(zmd, mblk);
2590
2591		nr_blocks -= nr_bits;
2592		chunk_block += nr_bits;
2593	}
2594
2595	if (zone->weight >= n)
2596		zone->weight -= n;
2597	else {
2598		dmz_zmd_warn(zmd, "Zone %u: weight %u should be >= %u",
2599			     zone->id, zone->weight, n);
2600		zone->weight = 0;
2601	}
2602
2603	return 0;
2604}
2605
2606/*
2607 * Get a block bit value.
2608 */
2609static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2610			  sector_t chunk_block)
2611{
2612	struct dmz_mblock *mblk;
2613	int ret;
2614
2615	WARN_ON(chunk_block >= zmd->zone_nr_blocks);
2616
2617	/* Get bitmap block */
2618	mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2619	if (IS_ERR(mblk))
2620		return PTR_ERR(mblk);
2621
2622	/* Get offset */
2623	ret = test_bit(chunk_block & DMZ_BLOCK_MASK_BITS,
2624		       (unsigned long *) mblk->data) != 0;
2625
2626	dmz_release_mblock(zmd, mblk);
2627
2628	return ret;
2629}
2630
2631/*
2632 * Return the number of blocks from chunk_block to the first block with a bit
2633 * value specified by set. Search at most nr_blocks blocks from chunk_block.
2634 */
2635static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2636				 sector_t chunk_block, unsigned int nr_blocks,
2637				 int set)
2638{
2639	struct dmz_mblock *mblk;
2640	unsigned int bit, set_bit, nr_bits;
2641	unsigned int zone_bits = zmd->zone_bits_per_mblk;
2642	unsigned long *bitmap;
2643	int n = 0;
2644
2645	WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
2646
2647	while (nr_blocks) {
2648		/* Get bitmap block */
2649		mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2650		if (IS_ERR(mblk))
2651			return PTR_ERR(mblk);
2652
2653		/* Get offset */
2654		bitmap = (unsigned long *) mblk->data;
2655		bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2656		nr_bits = min(nr_blocks, zone_bits - bit);
2657		if (set)
2658			set_bit = find_next_bit(bitmap, zone_bits, bit);
2659		else
2660			set_bit = find_next_zero_bit(bitmap, zone_bits, bit);
2661		dmz_release_mblock(zmd, mblk);
2662
2663		n += set_bit - bit;
2664		if (set_bit < zone_bits)
2665			break;
2666
2667		nr_blocks -= nr_bits;
2668		chunk_block += nr_bits;
2669	}
2670
2671	return n;
2672}
2673
2674/*
2675 * Test if chunk_block is valid. If it is, the number of consecutive
2676 * valid blocks from chunk_block will be returned.
2677 */
2678int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
2679		    sector_t chunk_block)
2680{
2681	int valid;
2682
2683	valid = dmz_test_block(zmd, zone, chunk_block);
2684	if (valid <= 0)
2685		return valid;
2686
2687	/* The block is valid: get the number of valid blocks from block */
2688	return dmz_to_next_set_block(zmd, zone, chunk_block,
2689				     zmd->zone_nr_blocks - chunk_block, 0);
2690}
2691
2692/*
2693 * Find the first valid block from @chunk_block in @zone.
2694 * If such a block is found, its number is returned using
2695 * @chunk_block and the total number of valid blocks from @chunk_block
2696 * is returned.
2697 */
2698int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2699			  sector_t *chunk_block)
2700{
2701	sector_t start_block = *chunk_block;
2702	int ret;
2703
2704	ret = dmz_to_next_set_block(zmd, zone, start_block,
2705				    zmd->zone_nr_blocks - start_block, 1);
2706	if (ret < 0)
2707		return ret;
2708
2709	start_block += ret;
2710	*chunk_block = start_block;
2711
2712	return dmz_to_next_set_block(zmd, zone, start_block,
2713				     zmd->zone_nr_blocks - start_block, 0);
2714}
2715
2716/*
2717 * Count the number of bits set starting from bit up to bit + nr_bits - 1.
2718 */
2719static int dmz_count_bits(void *bitmap, int bit, int nr_bits)
2720{
2721	unsigned long *addr;
2722	int end = bit + nr_bits;
2723	int n = 0;
2724
2725	while (bit < end) {
2726		if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2727		    ((end - bit) >= BITS_PER_LONG)) {
2728			addr = (unsigned long *)bitmap + BIT_WORD(bit);
2729			if (*addr == ULONG_MAX) {
2730				n += BITS_PER_LONG;
2731				bit += BITS_PER_LONG;
2732				continue;
2733			}
2734		}
2735
2736		if (test_bit(bit, bitmap))
2737			n++;
2738		bit++;
2739	}
2740
2741	return n;
2742}
2743
2744/*
2745 * Get a zone weight.
2746 */
2747static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
2748{
2749	struct dmz_mblock *mblk;
2750	sector_t chunk_block = 0;
2751	unsigned int bit, nr_bits;
2752	unsigned int nr_blocks = zmd->zone_nr_blocks;
2753	void *bitmap;
2754	int n = 0;
2755
2756	while (nr_blocks) {
2757		/* Get bitmap block */
2758		mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2759		if (IS_ERR(mblk)) {
2760			n = 0;
2761			break;
2762		}
2763
2764		/* Count bits in this block */
2765		bitmap = mblk->data;
2766		bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2767		nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
2768		n += dmz_count_bits(bitmap, bit, nr_bits);
2769
2770		dmz_release_mblock(zmd, mblk);
2771
2772		nr_blocks -= nr_bits;
2773		chunk_block += nr_bits;
2774	}
2775
2776	zone->weight = n;
2777}
2778
2779/*
2780 * Cleanup the zoned metadata resources.
2781 */
2782static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
2783{
2784	struct rb_root *root;
2785	struct dmz_mblock *mblk, *next;
2786	int i;
2787
2788	/* Release zone mapping resources */
2789	if (zmd->map_mblk) {
2790		for (i = 0; i < zmd->nr_map_blocks; i++)
2791			dmz_release_mblock(zmd, zmd->map_mblk[i]);
2792		kfree(zmd->map_mblk);
2793		zmd->map_mblk = NULL;
2794	}
2795
2796	/* Release super blocks */
2797	for (i = 0; i < 2; i++) {
2798		if (zmd->sb[i].mblk) {
2799			dmz_free_mblock(zmd, zmd->sb[i].mblk);
2800			zmd->sb[i].mblk = NULL;
2801		}
2802	}
2803
2804	/* Free cached blocks */
2805	while (!list_empty(&zmd->mblk_dirty_list)) {
2806		mblk = list_first_entry(&zmd->mblk_dirty_list,
2807					struct dmz_mblock, link);
2808		dmz_zmd_warn(zmd, "mblock %llu still in dirty list (ref %u)",
2809			     (u64)mblk->no, mblk->ref);
2810		list_del_init(&mblk->link);
2811		rb_erase(&mblk->node, &zmd->mblk_rbtree);
2812		dmz_free_mblock(zmd, mblk);
2813	}
2814
2815	while (!list_empty(&zmd->mblk_lru_list)) {
2816		mblk = list_first_entry(&zmd->mblk_lru_list,
2817					struct dmz_mblock, link);
2818		list_del_init(&mblk->link);
2819		rb_erase(&mblk->node, &zmd->mblk_rbtree);
2820		dmz_free_mblock(zmd, mblk);
2821	}
2822
2823	/* Sanity checks: the mblock rbtree should now be empty */
2824	root = &zmd->mblk_rbtree;
2825	rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
2826		dmz_zmd_warn(zmd, "mblock %llu ref %u still in rbtree",
2827			     (u64)mblk->no, mblk->ref);
2828		mblk->ref = 0;
2829		dmz_free_mblock(zmd, mblk);
2830	}
2831
2832	/* Free the zone descriptors */
2833	dmz_drop_zones(zmd);
2834
2835	mutex_destroy(&zmd->mblk_flush_lock);
2836	mutex_destroy(&zmd->map_lock);
2837}
2838
2839static void dmz_print_dev(struct dmz_metadata *zmd, int num)
2840{
2841	struct dmz_dev *dev = &zmd->dev[num];
2842
2843	if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE)
2844		dmz_dev_info(dev, "Regular block device");
2845	else
2846		dmz_dev_info(dev, "Host-%s zoned block device",
2847			     bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
2848			     "aware" : "managed");
2849	if (zmd->sb_version > 1) {
2850		sector_t sector_offset =
2851			dev->zone_offset << zmd->zone_nr_sectors_shift;
2852
2853		dmz_dev_info(dev, "  %llu 512-byte logical sectors (offset %llu)",
2854			     (u64)dev->capacity, (u64)sector_offset);
2855		dmz_dev_info(dev, "  %u zones of %llu 512-byte logical sectors (offset %llu)",
2856			     dev->nr_zones, (u64)zmd->zone_nr_sectors,
2857			     (u64)dev->zone_offset);
2858	} else {
2859		dmz_dev_info(dev, "  %llu 512-byte logical sectors",
2860			     (u64)dev->capacity);
2861		dmz_dev_info(dev, "  %u zones of %llu 512-byte logical sectors",
2862			     dev->nr_zones, (u64)zmd->zone_nr_sectors);
2863	}
2864}
2865
2866/*
2867 * Initialize the zoned metadata.
2868 */
2869int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
2870		     struct dmz_metadata **metadata,
2871		     const char *devname)
2872{
2873	struct dmz_metadata *zmd;
2874	unsigned int i;
2875	struct dm_zone *zone;
2876	int ret;
2877
2878	zmd = kzalloc(sizeof(struct dmz_metadata), GFP_KERNEL);
2879	if (!zmd)
2880		return -ENOMEM;
2881
2882	strcpy(zmd->devname, devname);
2883	zmd->dev = dev;
2884	zmd->nr_devs = num_dev;
2885	zmd->mblk_rbtree = RB_ROOT;
2886	init_rwsem(&zmd->mblk_sem);
2887	mutex_init(&zmd->mblk_flush_lock);
2888	spin_lock_init(&zmd->mblk_lock);
2889	INIT_LIST_HEAD(&zmd->mblk_lru_list);
2890	INIT_LIST_HEAD(&zmd->mblk_dirty_list);
2891
2892	mutex_init(&zmd->map_lock);
2893
2894	atomic_set(&zmd->unmap_nr_cache, 0);
2895	INIT_LIST_HEAD(&zmd->unmap_cache_list);
2896	INIT_LIST_HEAD(&zmd->map_cache_list);
2897
2898	atomic_set(&zmd->nr_reserved_seq_zones, 0);
2899	INIT_LIST_HEAD(&zmd->reserved_seq_zones_list);
2900
2901	init_waitqueue_head(&zmd->free_wq);
2902
2903	/* Initialize zone descriptors */
2904	ret = dmz_init_zones(zmd);
2905	if (ret)
2906		goto err;
2907
2908	/* Get super block */
2909	ret = dmz_load_sb(zmd);
2910	if (ret)
2911		goto err;
2912
2913	/* Set metadata zones starting from sb_zone */
2914	for (i = 0; i < zmd->nr_meta_zones << 1; i++) {
2915		zone = dmz_get(zmd, zmd->sb[0].zone->id + i);
2916		if (!zone) {
2917			dmz_zmd_err(zmd,
2918				    "metadata zone %u not present", i);
2919			ret = -ENXIO;
2920			goto err;
2921		}
2922		if (!dmz_is_rnd(zone) && !dmz_is_cache(zone)) {
2923			dmz_zmd_err(zmd,
2924				    "metadata zone %d is not random", i);
2925			ret = -ENXIO;
2926			goto err;
2927		}
2928		set_bit(DMZ_META, &zone->flags);
2929	}
2930	/* Load mapping table */
2931	ret = dmz_load_mapping(zmd);
2932	if (ret)
2933		goto err;
2934
2935	/*
2936	 * Cache size boundaries: allow at least 2 super blocks, the chunk map
2937	 * blocks and enough blocks to be able to cache the bitmap blocks of
2938	 * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow
2939	 * the cache to add 512 more metadata blocks.
2940	 */
2941	zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks * 16;
2942	zmd->max_nr_mblks = zmd->min_nr_mblks + 512;
2943	zmd->mblk_shrinker.count_objects = dmz_mblock_shrinker_count;
2944	zmd->mblk_shrinker.scan_objects = dmz_mblock_shrinker_scan;
2945	zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
2946
2947	/* Metadata cache shrinker */
2948	ret = register_shrinker(&zmd->mblk_shrinker, "md-meta:(%u:%u)",
2949				MAJOR(dev->bdev->bd_dev),
2950				MINOR(dev->bdev->bd_dev));
2951	if (ret) {
2952		dmz_zmd_err(zmd, "Register metadata cache shrinker failed");
 
2953		goto err;
2954	}
2955
 
 
 
 
 
 
2956	dmz_zmd_info(zmd, "DM-Zoned metadata version %d", zmd->sb_version);
2957	for (i = 0; i < zmd->nr_devs; i++)
2958		dmz_print_dev(zmd, i);
2959
2960	dmz_zmd_info(zmd, "  %u zones of %llu 512-byte logical sectors",
2961		     zmd->nr_zones, (u64)zmd->zone_nr_sectors);
2962	dmz_zmd_debug(zmd, "  %u metadata zones",
2963		      zmd->nr_meta_zones * 2);
2964	dmz_zmd_debug(zmd, "  %u data zones for %u chunks",
2965		      zmd->nr_data_zones, zmd->nr_chunks);
2966	dmz_zmd_debug(zmd, "    %u cache zones (%u unmapped)",
2967		      zmd->nr_cache, atomic_read(&zmd->unmap_nr_cache));
2968	for (i = 0; i < zmd->nr_devs; i++) {
2969		dmz_zmd_debug(zmd, "    %u random zones (%u unmapped)",
2970			      dmz_nr_rnd_zones(zmd, i),
2971			      dmz_nr_unmap_rnd_zones(zmd, i));
2972		dmz_zmd_debug(zmd, "    %u sequential zones (%u unmapped)",
2973			      dmz_nr_seq_zones(zmd, i),
2974			      dmz_nr_unmap_seq_zones(zmd, i));
2975	}
2976	dmz_zmd_debug(zmd, "  %u reserved sequential data zones",
2977		      zmd->nr_reserved_seq);
2978	dmz_zmd_debug(zmd, "Format:");
2979	dmz_zmd_debug(zmd, "%u metadata blocks per set (%u max cache)",
2980		      zmd->nr_meta_blocks, zmd->max_nr_mblks);
2981	dmz_zmd_debug(zmd, "  %u data zone mapping blocks",
2982		      zmd->nr_map_blocks);
2983	dmz_zmd_debug(zmd, "  %u bitmap blocks",
2984		      zmd->nr_bitmap_blocks);
2985
2986	*metadata = zmd;
2987
2988	return 0;
2989err:
2990	dmz_cleanup_metadata(zmd);
2991	kfree(zmd);
2992	*metadata = NULL;
2993
2994	return ret;
2995}
2996
2997/*
2998 * Cleanup the zoned metadata resources.
2999 */
3000void dmz_dtr_metadata(struct dmz_metadata *zmd)
3001{
3002	unregister_shrinker(&zmd->mblk_shrinker);
3003	dmz_cleanup_metadata(zmd);
3004	kfree(zmd);
3005}
3006
3007/*
3008 * Check zone information on resume.
3009 */
3010int dmz_resume_metadata(struct dmz_metadata *zmd)
3011{
3012	struct dm_zone *zone;
3013	sector_t wp_block;
3014	unsigned int i;
3015	int ret;
3016
3017	/* Check zones */
3018	for (i = 0; i < zmd->nr_zones; i++) {
3019		zone = dmz_get(zmd, i);
3020		if (!zone) {
3021			dmz_zmd_err(zmd, "Unable to get zone %u", i);
3022			return -EIO;
3023		}
3024		wp_block = zone->wp_block;
3025
3026		ret = dmz_update_zone(zmd, zone);
3027		if (ret) {
3028			dmz_zmd_err(zmd, "Broken zone %u", i);
3029			return ret;
3030		}
3031
3032		if (dmz_is_offline(zone)) {
3033			dmz_zmd_warn(zmd, "Zone %u is offline", i);
3034			continue;
3035		}
3036
3037		/* Check write pointer */
3038		if (!dmz_is_seq(zone))
3039			zone->wp_block = 0;
3040		else if (zone->wp_block != wp_block) {
3041			dmz_zmd_err(zmd, "Zone %u: Invalid wp (%llu / %llu)",
3042				    i, (u64)zone->wp_block, (u64)wp_block);
3043			zone->wp_block = wp_block;
3044			dmz_invalidate_blocks(zmd, zone, zone->wp_block,
3045					      zmd->zone_nr_blocks - zone->wp_block);
3046		}
3047	}
3048
3049	return 0;
3050}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm-zoned.h"
   9
  10#include <linux/module.h>
  11#include <linux/crc32.h>
  12#include <linux/sched/mm.h>
  13
  14#define	DM_MSG_PREFIX		"zoned metadata"
  15
  16/*
  17 * Metadata version.
  18 */
  19#define DMZ_META_VER	2
  20
  21/*
  22 * On-disk super block magic.
  23 */
  24#define DMZ_MAGIC	((((unsigned int)('D')) << 24) | \
  25			 (((unsigned int)('Z')) << 16) | \
  26			 (((unsigned int)('B')) <<  8) | \
  27			 ((unsigned int)('D')))
  28
  29/*
  30 * On disk super block.
  31 * This uses only 512 B but uses on disk a full 4KB block. This block is
  32 * followed on disk by the mapping table of chunks to zones and the bitmap
  33 * blocks indicating zone block validity.
  34 * The overall resulting metadata format is:
  35 *    (1) Super block (1 block)
  36 *    (2) Chunk mapping table (nr_map_blocks)
  37 *    (3) Bitmap blocks (nr_bitmap_blocks)
  38 * All metadata blocks are stored in conventional zones, starting from
  39 * the first conventional zone found on disk.
  40 */
  41struct dmz_super {
  42	/* Magic number */
  43	__le32		magic;			/*   4 */
  44
  45	/* Metadata version number */
  46	__le32		version;		/*   8 */
  47
  48	/* Generation number */
  49	__le64		gen;			/*  16 */
  50
  51	/* This block number */
  52	__le64		sb_block;		/*  24 */
  53
  54	/* The number of metadata blocks, including this super block */
  55	__le32		nr_meta_blocks;		/*  28 */
  56
  57	/* The number of sequential zones reserved for reclaim */
  58	__le32		nr_reserved_seq;	/*  32 */
  59
  60	/* The number of entries in the mapping table */
  61	__le32		nr_chunks;		/*  36 */
  62
  63	/* The number of blocks used for the chunk mapping table */
  64	__le32		nr_map_blocks;		/*  40 */
  65
  66	/* The number of blocks used for the block bitmaps */
  67	__le32		nr_bitmap_blocks;	/*  44 */
  68
  69	/* Checksum */
  70	__le32		crc;			/*  48 */
  71
  72	/* DM-Zoned label */
  73	u8		dmz_label[32];		/*  80 */
  74
  75	/* DM-Zoned UUID */
  76	u8		dmz_uuid[16];		/*  96 */
  77
  78	/* Device UUID */
  79	u8		dev_uuid[16];		/* 112 */
  80
  81	/* Padding to full 512B sector */
  82	u8		reserved[400];		/* 512 */
  83};
  84
  85/*
  86 * Chunk mapping entry: entries are indexed by chunk number
  87 * and give the zone ID (dzone_id) mapping the chunk on disk.
  88 * This zone may be sequential or random. If it is a sequential
  89 * zone, a second zone (bzone_id) used as a write buffer may
  90 * also be specified. This second zone will always be a randomly
  91 * writeable zone.
  92 */
  93struct dmz_map {
  94	__le32			dzone_id;
  95	__le32			bzone_id;
  96};
  97
  98/*
  99 * Chunk mapping table metadata: 512 8-bytes entries per 4KB block.
 100 */
 101#define DMZ_MAP_ENTRIES		(DMZ_BLOCK_SIZE / sizeof(struct dmz_map))
 102#define DMZ_MAP_ENTRIES_SHIFT	(ilog2(DMZ_MAP_ENTRIES))
 103#define DMZ_MAP_ENTRIES_MASK	(DMZ_MAP_ENTRIES - 1)
 104#define DMZ_MAP_UNMAPPED	UINT_MAX
 105
 106/*
 107 * Meta data block descriptor (for cached metadata blocks).
 108 */
 109struct dmz_mblock {
 110	struct rb_node		node;
 111	struct list_head	link;
 112	sector_t		no;
 113	unsigned int		ref;
 114	unsigned long		state;
 115	struct page		*page;
 116	void			*data;
 117};
 118
 119/*
 120 * Metadata block state flags.
 121 */
 122enum {
 123	DMZ_META_DIRTY,
 124	DMZ_META_READING,
 125	DMZ_META_WRITING,
 126	DMZ_META_ERROR,
 127};
 128
 129/*
 130 * Super block information (one per metadata set).
 131 */
 132struct dmz_sb {
 133	sector_t		block;
 134	struct dmz_dev		*dev;
 135	struct dmz_mblock	*mblk;
 136	struct dmz_super	*sb;
 137	struct dm_zone		*zone;
 138};
 139
 140/*
 141 * In-memory metadata.
 142 */
 143struct dmz_metadata {
 144	struct dmz_dev		*dev;
 145	unsigned int		nr_devs;
 146
 147	char			devname[BDEVNAME_SIZE];
 148	char			label[BDEVNAME_SIZE];
 149	uuid_t			uuid;
 150
 151	sector_t		zone_bitmap_size;
 152	unsigned int		zone_nr_bitmap_blocks;
 153	unsigned int		zone_bits_per_mblk;
 154
 155	sector_t		zone_nr_blocks;
 156	sector_t		zone_nr_blocks_shift;
 157
 158	sector_t		zone_nr_sectors;
 159	sector_t		zone_nr_sectors_shift;
 160
 161	unsigned int		nr_bitmap_blocks;
 162	unsigned int		nr_map_blocks;
 163
 164	unsigned int		nr_zones;
 165	unsigned int		nr_useable_zones;
 166	unsigned int		nr_meta_blocks;
 167	unsigned int		nr_meta_zones;
 168	unsigned int		nr_data_zones;
 169	unsigned int		nr_cache_zones;
 170	unsigned int		nr_rnd_zones;
 171	unsigned int		nr_reserved_seq;
 172	unsigned int		nr_chunks;
 173
 174	/* Zone information array */
 175	struct xarray		zones;
 176
 177	struct dmz_sb		sb[2];
 178	unsigned int		mblk_primary;
 179	unsigned int		sb_version;
 180	u64			sb_gen;
 181	unsigned int		min_nr_mblks;
 182	unsigned int		max_nr_mblks;
 183	atomic_t		nr_mblks;
 184	struct rw_semaphore	mblk_sem;
 185	struct mutex		mblk_flush_lock;
 186	spinlock_t		mblk_lock;
 187	struct rb_root		mblk_rbtree;
 188	struct list_head	mblk_lru_list;
 189	struct list_head	mblk_dirty_list;
 190	struct shrinker		*mblk_shrinker;
 191
 192	/* Zone allocation management */
 193	struct mutex		map_lock;
 194	struct dmz_mblock	**map_mblk;
 195
 196	unsigned int		nr_cache;
 197	atomic_t		unmap_nr_cache;
 198	struct list_head	unmap_cache_list;
 199	struct list_head	map_cache_list;
 200
 201	atomic_t		nr_reserved_seq_zones;
 202	struct list_head	reserved_seq_zones_list;
 203
 204	wait_queue_head_t	free_wq;
 205};
 206
 207#define dmz_zmd_info(zmd, format, args...)	\
 208	DMINFO("(%s): " format, (zmd)->label, ## args)
 209
 210#define dmz_zmd_err(zmd, format, args...)	\
 211	DMERR("(%s): " format, (zmd)->label, ## args)
 212
 213#define dmz_zmd_warn(zmd, format, args...)	\
 214	DMWARN("(%s): " format, (zmd)->label, ## args)
 215
 216#define dmz_zmd_debug(zmd, format, args...)	\
 217	DMDEBUG("(%s): " format, (zmd)->label, ## args)
 218/*
 219 * Various accessors
 220 */
 221static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone)
 222{
 223	if (WARN_ON(!zone))
 224		return 0;
 225
 226	return zone->id - zone->dev->zone_offset;
 227}
 228
 229sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
 230{
 231	unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
 232
 233	return (sector_t)zone_id << zmd->zone_nr_sectors_shift;
 234}
 235
 236sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
 237{
 238	unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
 239
 240	return (sector_t)zone_id << zmd->zone_nr_blocks_shift;
 241}
 242
 243unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd)
 244{
 245	return zmd->zone_nr_blocks;
 246}
 247
 248unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd)
 249{
 250	return zmd->zone_nr_blocks_shift;
 251}
 252
 253unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd)
 254{
 255	return zmd->zone_nr_sectors;
 256}
 257
 258unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd)
 259{
 260	return zmd->zone_nr_sectors_shift;
 261}
 262
 263unsigned int dmz_nr_zones(struct dmz_metadata *zmd)
 264{
 265	return zmd->nr_zones;
 266}
 267
 268unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
 269{
 270	return zmd->nr_chunks;
 271}
 272
 273unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd, int idx)
 274{
 275	return zmd->dev[idx].nr_rnd;
 276}
 277
 278unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd, int idx)
 279{
 280	return atomic_read(&zmd->dev[idx].unmap_nr_rnd);
 281}
 282
 283unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd)
 284{
 285	return zmd->nr_cache;
 286}
 287
 288unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd)
 289{
 290	return atomic_read(&zmd->unmap_nr_cache);
 291}
 292
 293unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd, int idx)
 294{
 295	return zmd->dev[idx].nr_seq;
 296}
 297
 298unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd, int idx)
 299{
 300	return atomic_read(&zmd->dev[idx].unmap_nr_seq);
 301}
 302
 303static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
 304{
 305	return xa_load(&zmd->zones, zone_id);
 306}
 307
 308static struct dm_zone *dmz_insert(struct dmz_metadata *zmd,
 309				  unsigned int zone_id, struct dmz_dev *dev)
 310{
 311	struct dm_zone *zone = kzalloc(sizeof(struct dm_zone), GFP_KERNEL);
 312
 313	if (!zone)
 314		return ERR_PTR(-ENOMEM);
 315
 316	if (xa_insert(&zmd->zones, zone_id, zone, GFP_KERNEL)) {
 317		kfree(zone);
 318		return ERR_PTR(-EBUSY);
 319	}
 320
 321	INIT_LIST_HEAD(&zone->link);
 322	atomic_set(&zone->refcount, 0);
 323	zone->id = zone_id;
 324	zone->chunk = DMZ_MAP_UNMAPPED;
 325	zone->dev = dev;
 326
 327	return zone;
 328}
 329
 330const char *dmz_metadata_label(struct dmz_metadata *zmd)
 331{
 332	return (const char *)zmd->label;
 333}
 334
 335bool dmz_check_dev(struct dmz_metadata *zmd)
 336{
 337	unsigned int i;
 338
 339	for (i = 0; i < zmd->nr_devs; i++) {
 340		if (!dmz_check_bdev(&zmd->dev[i]))
 341			return false;
 342	}
 343	return true;
 344}
 345
 346bool dmz_dev_is_dying(struct dmz_metadata *zmd)
 347{
 348	unsigned int i;
 349
 350	for (i = 0; i < zmd->nr_devs; i++) {
 351		if (dmz_bdev_is_dying(&zmd->dev[i]))
 352			return true;
 353	}
 354	return false;
 355}
 356
 357/*
 358 * Lock/unlock mapping table.
 359 * The map lock also protects all the zone lists.
 360 */
 361void dmz_lock_map(struct dmz_metadata *zmd)
 362{
 363	mutex_lock(&zmd->map_lock);
 364}
 365
 366void dmz_unlock_map(struct dmz_metadata *zmd)
 367{
 368	mutex_unlock(&zmd->map_lock);
 369}
 370
 371/*
 372 * Lock/unlock metadata access. This is a "read" lock on a semaphore
 373 * that prevents metadata flush from running while metadata are being
 374 * modified. The actual metadata write mutual exclusion is achieved with
 375 * the map lock and zone state management (active and reclaim state are
 376 * mutually exclusive).
 377 */
 378void dmz_lock_metadata(struct dmz_metadata *zmd)
 379{
 380	down_read(&zmd->mblk_sem);
 381}
 382
 383void dmz_unlock_metadata(struct dmz_metadata *zmd)
 384{
 385	up_read(&zmd->mblk_sem);
 386}
 387
 388/*
 389 * Lock/unlock flush: prevent concurrent executions
 390 * of dmz_flush_metadata as well as metadata modification in reclaim
 391 * while flush is being executed.
 392 */
 393void dmz_lock_flush(struct dmz_metadata *zmd)
 394{
 395	mutex_lock(&zmd->mblk_flush_lock);
 396}
 397
 398void dmz_unlock_flush(struct dmz_metadata *zmd)
 399{
 400	mutex_unlock(&zmd->mblk_flush_lock);
 401}
 402
 403/*
 404 * Allocate a metadata block.
 405 */
 406static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
 407					   sector_t mblk_no)
 408{
 409	struct dmz_mblock *mblk = NULL;
 410
 411	/* See if we can reuse cached blocks */
 412	if (zmd->max_nr_mblks && atomic_read(&zmd->nr_mblks) > zmd->max_nr_mblks) {
 413		spin_lock(&zmd->mblk_lock);
 414		mblk = list_first_entry_or_null(&zmd->mblk_lru_list,
 415						struct dmz_mblock, link);
 416		if (mblk) {
 417			list_del_init(&mblk->link);
 418			rb_erase(&mblk->node, &zmd->mblk_rbtree);
 419			mblk->no = mblk_no;
 420		}
 421		spin_unlock(&zmd->mblk_lock);
 422		if (mblk)
 423			return mblk;
 424	}
 425
 426	/* Allocate a new block */
 427	mblk = kmalloc(sizeof(struct dmz_mblock), GFP_NOIO);
 428	if (!mblk)
 429		return NULL;
 430
 431	mblk->page = alloc_page(GFP_NOIO);
 432	if (!mblk->page) {
 433		kfree(mblk);
 434		return NULL;
 435	}
 436
 437	RB_CLEAR_NODE(&mblk->node);
 438	INIT_LIST_HEAD(&mblk->link);
 439	mblk->ref = 0;
 440	mblk->state = 0;
 441	mblk->no = mblk_no;
 442	mblk->data = page_address(mblk->page);
 443
 444	atomic_inc(&zmd->nr_mblks);
 445
 446	return mblk;
 447}
 448
 449/*
 450 * Free a metadata block.
 451 */
 452static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
 453{
 454	__free_pages(mblk->page, 0);
 455	kfree(mblk);
 456
 457	atomic_dec(&zmd->nr_mblks);
 458}
 459
 460/*
 461 * Insert a metadata block in the rbtree.
 462 */
 463static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
 464{
 465	struct rb_root *root = &zmd->mblk_rbtree;
 466	struct rb_node **new = &(root->rb_node), *parent = NULL;
 467	struct dmz_mblock *b;
 468
 469	/* Figure out where to put the new node */
 470	while (*new) {
 471		b = container_of(*new, struct dmz_mblock, node);
 472		parent = *new;
 473		new = (b->no < mblk->no) ? &((*new)->rb_left) : &((*new)->rb_right);
 474	}
 475
 476	/* Add new node and rebalance tree */
 477	rb_link_node(&mblk->node, parent, new);
 478	rb_insert_color(&mblk->node, root);
 479}
 480
 481/*
 482 * Lookup a metadata block in the rbtree. If the block is found, increment
 483 * its reference count.
 484 */
 485static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
 486					      sector_t mblk_no)
 487{
 488	struct rb_root *root = &zmd->mblk_rbtree;
 489	struct rb_node *node = root->rb_node;
 490	struct dmz_mblock *mblk;
 491
 492	while (node) {
 493		mblk = container_of(node, struct dmz_mblock, node);
 494		if (mblk->no == mblk_no) {
 495			/*
 496			 * If this is the first reference to the block,
 497			 * remove it from the LRU list.
 498			 */
 499			mblk->ref++;
 500			if (mblk->ref == 1 &&
 501			    !test_bit(DMZ_META_DIRTY, &mblk->state))
 502				list_del_init(&mblk->link);
 503			return mblk;
 504		}
 505		node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
 506	}
 507
 508	return NULL;
 509}
 510
 511/*
 512 * Metadata block BIO end callback.
 513 */
 514static void dmz_mblock_bio_end_io(struct bio *bio)
 515{
 516	struct dmz_mblock *mblk = bio->bi_private;
 517	int flag;
 518
 519	if (bio->bi_status)
 520		set_bit(DMZ_META_ERROR, &mblk->state);
 521
 522	if (bio_op(bio) == REQ_OP_WRITE)
 523		flag = DMZ_META_WRITING;
 524	else
 525		flag = DMZ_META_READING;
 526
 527	clear_bit_unlock(flag, &mblk->state);
 528	smp_mb__after_atomic();
 529	wake_up_bit(&mblk->state, flag);
 530
 531	bio_put(bio);
 532}
 533
 534/*
 535 * Read an uncached metadata block from disk and add it to the cache.
 536 */
 537static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
 538					      sector_t mblk_no)
 539{
 540	struct dmz_mblock *mblk, *m;
 541	sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
 542	struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
 543	struct bio *bio;
 544
 545	if (dmz_bdev_is_dying(dev))
 546		return ERR_PTR(-EIO);
 547
 548	/* Get a new block and a BIO to read it */
 549	mblk = dmz_alloc_mblock(zmd, mblk_no);
 550	if (!mblk)
 551		return ERR_PTR(-ENOMEM);
 552
 553	bio = bio_alloc(dev->bdev, 1, REQ_OP_READ | REQ_META | REQ_PRIO,
 554			GFP_NOIO);
 555
 556	spin_lock(&zmd->mblk_lock);
 557
 558	/*
 559	 * Make sure that another context did not start reading
 560	 * the block already.
 561	 */
 562	m = dmz_get_mblock_fast(zmd, mblk_no);
 563	if (m) {
 564		spin_unlock(&zmd->mblk_lock);
 565		dmz_free_mblock(zmd, mblk);
 566		bio_put(bio);
 567		return m;
 568	}
 569
 570	mblk->ref++;
 571	set_bit(DMZ_META_READING, &mblk->state);
 572	dmz_insert_mblock(zmd, mblk);
 573
 574	spin_unlock(&zmd->mblk_lock);
 575
 576	/* Submit read BIO */
 577	bio->bi_iter.bi_sector = dmz_blk2sect(block);
 578	bio->bi_private = mblk;
 579	bio->bi_end_io = dmz_mblock_bio_end_io;
 580	__bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
 581	submit_bio(bio);
 582
 583	return mblk;
 584}
 585
 586/*
 587 * Free metadata blocks.
 588 */
 589static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata *zmd,
 590					     unsigned long limit)
 591{
 592	struct dmz_mblock *mblk;
 593	unsigned long count = 0;
 594
 595	if (!zmd->max_nr_mblks)
 596		return 0;
 597
 598	while (!list_empty(&zmd->mblk_lru_list) &&
 599	       atomic_read(&zmd->nr_mblks) > zmd->min_nr_mblks &&
 600	       count < limit) {
 601		mblk = list_first_entry(&zmd->mblk_lru_list,
 602					struct dmz_mblock, link);
 603		list_del_init(&mblk->link);
 604		rb_erase(&mblk->node, &zmd->mblk_rbtree);
 605		dmz_free_mblock(zmd, mblk);
 606		count++;
 607	}
 608
 609	return count;
 610}
 611
 612/*
 613 * For mblock shrinker: get the number of unused metadata blocks in the cache.
 614 */
 615static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink,
 616					       struct shrink_control *sc)
 617{
 618	struct dmz_metadata *zmd = shrink->private_data;
 619
 620	return atomic_read(&zmd->nr_mblks);
 621}
 622
 623/*
 624 * For mblock shrinker: scan unused metadata blocks and shrink the cache.
 625 */
 626static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink,
 627					      struct shrink_control *sc)
 628{
 629	struct dmz_metadata *zmd = shrink->private_data;
 630	unsigned long count;
 631
 632	spin_lock(&zmd->mblk_lock);
 633	count = dmz_shrink_mblock_cache(zmd, sc->nr_to_scan);
 634	spin_unlock(&zmd->mblk_lock);
 635
 636	return count ? count : SHRINK_STOP;
 637}
 638
 639/*
 640 * Release a metadata block.
 641 */
 642static void dmz_release_mblock(struct dmz_metadata *zmd,
 643			       struct dmz_mblock *mblk)
 644{
 645
 646	if (!mblk)
 647		return;
 648
 649	spin_lock(&zmd->mblk_lock);
 650
 651	mblk->ref--;
 652	if (mblk->ref == 0) {
 653		if (test_bit(DMZ_META_ERROR, &mblk->state)) {
 654			rb_erase(&mblk->node, &zmd->mblk_rbtree);
 655			dmz_free_mblock(zmd, mblk);
 656		} else if (!test_bit(DMZ_META_DIRTY, &mblk->state)) {
 657			list_add_tail(&mblk->link, &zmd->mblk_lru_list);
 658			dmz_shrink_mblock_cache(zmd, 1);
 659		}
 660	}
 661
 662	spin_unlock(&zmd->mblk_lock);
 663}
 664
 665/*
 666 * Get a metadata block from the rbtree. If the block
 667 * is not present, read it from disk.
 668 */
 669static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
 670					 sector_t mblk_no)
 671{
 672	struct dmz_mblock *mblk;
 673	struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
 674
 675	/* Check rbtree */
 676	spin_lock(&zmd->mblk_lock);
 677	mblk = dmz_get_mblock_fast(zmd, mblk_no);
 678	spin_unlock(&zmd->mblk_lock);
 679
 680	if (!mblk) {
 681		/* Cache miss: read the block from disk */
 682		mblk = dmz_get_mblock_slow(zmd, mblk_no);
 683		if (IS_ERR(mblk))
 684			return mblk;
 685	}
 686
 687	/* Wait for on-going read I/O and check for error */
 688	wait_on_bit_io(&mblk->state, DMZ_META_READING,
 689		       TASK_UNINTERRUPTIBLE);
 690	if (test_bit(DMZ_META_ERROR, &mblk->state)) {
 691		dmz_release_mblock(zmd, mblk);
 692		dmz_check_bdev(dev);
 693		return ERR_PTR(-EIO);
 694	}
 695
 696	return mblk;
 697}
 698
 699/*
 700 * Mark a metadata block dirty.
 701 */
 702static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
 703{
 704	spin_lock(&zmd->mblk_lock);
 705	if (!test_and_set_bit(DMZ_META_DIRTY, &mblk->state))
 706		list_add_tail(&mblk->link, &zmd->mblk_dirty_list);
 707	spin_unlock(&zmd->mblk_lock);
 708}
 709
 710/*
 711 * Issue a metadata block write BIO.
 712 */
 713static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
 714			    unsigned int set)
 715{
 716	struct dmz_dev *dev = zmd->sb[set].dev;
 717	sector_t block = zmd->sb[set].block + mblk->no;
 718	struct bio *bio;
 719
 720	if (dmz_bdev_is_dying(dev))
 721		return -EIO;
 722
 723	bio = bio_alloc(dev->bdev, 1, REQ_OP_WRITE | REQ_META | REQ_PRIO,
 724			GFP_NOIO);
 725
 726	set_bit(DMZ_META_WRITING, &mblk->state);
 727
 728	bio->bi_iter.bi_sector = dmz_blk2sect(block);
 729	bio->bi_private = mblk;
 730	bio->bi_end_io = dmz_mblock_bio_end_io;
 731	__bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
 732	submit_bio(bio);
 733
 734	return 0;
 735}
 736
 737/*
 738 * Read/write a metadata block.
 739 */
 740static int dmz_rdwr_block(struct dmz_dev *dev, enum req_op op,
 741			  sector_t block, struct page *page)
 742{
 743	struct bio *bio;
 744	int ret;
 745
 746	if (WARN_ON(!dev))
 747		return -EIO;
 748
 749	if (dmz_bdev_is_dying(dev))
 750		return -EIO;
 751
 752	bio = bio_alloc(dev->bdev, 1, op | REQ_SYNC | REQ_META | REQ_PRIO,
 753			GFP_NOIO);
 754	bio->bi_iter.bi_sector = dmz_blk2sect(block);
 755	__bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
 756	ret = submit_bio_wait(bio);
 757	bio_put(bio);
 758
 759	if (ret)
 760		dmz_check_bdev(dev);
 761	return ret;
 762}
 763
 764/*
 765 * Write super block of the specified metadata set.
 766 */
 767static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
 768{
 769	struct dmz_mblock *mblk = zmd->sb[set].mblk;
 770	struct dmz_super *sb = zmd->sb[set].sb;
 771	struct dmz_dev *dev = zmd->sb[set].dev;
 772	sector_t sb_block;
 773	u64 sb_gen = zmd->sb_gen + 1;
 774	int ret;
 775
 776	sb->magic = cpu_to_le32(DMZ_MAGIC);
 777
 778	sb->version = cpu_to_le32(zmd->sb_version);
 779	if (zmd->sb_version > 1) {
 780		BUILD_BUG_ON(UUID_SIZE != 16);
 781		export_uuid(sb->dmz_uuid, &zmd->uuid);
 782		memcpy(sb->dmz_label, zmd->label, BDEVNAME_SIZE);
 783		export_uuid(sb->dev_uuid, &dev->uuid);
 784	}
 785
 786	sb->gen = cpu_to_le64(sb_gen);
 787
 788	/*
 789	 * The metadata always references the absolute block address,
 790	 * ie relative to the entire block range, not the per-device
 791	 * block address.
 792	 */
 793	sb_block = zmd->sb[set].zone->id << zmd->zone_nr_blocks_shift;
 794	sb->sb_block = cpu_to_le64(sb_block);
 795	sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks);
 796	sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq);
 797	sb->nr_chunks = cpu_to_le32(zmd->nr_chunks);
 798
 799	sb->nr_map_blocks = cpu_to_le32(zmd->nr_map_blocks);
 800	sb->nr_bitmap_blocks = cpu_to_le32(zmd->nr_bitmap_blocks);
 801
 802	sb->crc = 0;
 803	sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE));
 804
 805	ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block,
 806			     mblk->page);
 807	if (ret == 0)
 808		ret = blkdev_issue_flush(dev->bdev);
 809
 810	return ret;
 811}
 812
 813/*
 814 * Write dirty metadata blocks to the specified set.
 815 */
 816static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
 817				   struct list_head *write_list,
 818				   unsigned int set)
 819{
 820	struct dmz_mblock *mblk;
 821	struct dmz_dev *dev = zmd->sb[set].dev;
 822	struct blk_plug plug;
 823	int ret = 0, nr_mblks_submitted = 0;
 824
 825	/* Issue writes */
 826	blk_start_plug(&plug);
 827	list_for_each_entry(mblk, write_list, link) {
 828		ret = dmz_write_mblock(zmd, mblk, set);
 829		if (ret)
 830			break;
 831		nr_mblks_submitted++;
 832	}
 833	blk_finish_plug(&plug);
 834
 835	/* Wait for completion */
 836	list_for_each_entry(mblk, write_list, link) {
 837		if (!nr_mblks_submitted)
 838			break;
 839		wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
 840			       TASK_UNINTERRUPTIBLE);
 841		if (test_bit(DMZ_META_ERROR, &mblk->state)) {
 842			clear_bit(DMZ_META_ERROR, &mblk->state);
 843			dmz_check_bdev(dev);
 844			ret = -EIO;
 845		}
 846		nr_mblks_submitted--;
 847	}
 848
 849	/* Flush drive cache (this will also sync data) */
 850	if (ret == 0)
 851		ret = blkdev_issue_flush(dev->bdev);
 852
 853	return ret;
 854}
 855
 856/*
 857 * Log dirty metadata blocks.
 858 */
 859static int dmz_log_dirty_mblocks(struct dmz_metadata *zmd,
 860				 struct list_head *write_list)
 861{
 862	unsigned int log_set = zmd->mblk_primary ^ 0x1;
 863	int ret;
 864
 865	/* Write dirty blocks to the log */
 866	ret = dmz_write_dirty_mblocks(zmd, write_list, log_set);
 867	if (ret)
 868		return ret;
 869
 870	/*
 871	 * No error so far: now validate the log by updating the
 872	 * log index super block generation.
 873	 */
 874	ret = dmz_write_sb(zmd, log_set);
 875	if (ret)
 876		return ret;
 877
 878	return 0;
 879}
 880
 881/*
 882 * Flush dirty metadata blocks.
 883 */
 884int dmz_flush_metadata(struct dmz_metadata *zmd)
 885{
 886	struct dmz_mblock *mblk;
 887	struct list_head write_list;
 888	struct dmz_dev *dev;
 889	int ret;
 890
 891	if (WARN_ON(!zmd))
 892		return 0;
 893
 894	INIT_LIST_HEAD(&write_list);
 895
 896	/*
 897	 * Make sure that metadata blocks are stable before logging: take
 898	 * the write lock on the metadata semaphore to prevent target BIOs
 899	 * from modifying metadata.
 900	 */
 901	down_write(&zmd->mblk_sem);
 902	dev = zmd->sb[zmd->mblk_primary].dev;
 903
 904	/*
 905	 * This is called from the target flush work and reclaim work.
 906	 * Concurrent execution is not allowed.
 907	 */
 908	dmz_lock_flush(zmd);
 909
 910	if (dmz_bdev_is_dying(dev)) {
 911		ret = -EIO;
 912		goto out;
 913	}
 914
 915	/* Get dirty blocks */
 916	spin_lock(&zmd->mblk_lock);
 917	list_splice_init(&zmd->mblk_dirty_list, &write_list);
 918	spin_unlock(&zmd->mblk_lock);
 919
 920	/* If there are no dirty metadata blocks, just flush the device cache */
 921	if (list_empty(&write_list)) {
 922		ret = blkdev_issue_flush(dev->bdev);
 923		goto err;
 924	}
 925
 926	/*
 927	 * The primary metadata set is still clean. Keep it this way until
 928	 * all updates are successful in the secondary set. That is, use
 929	 * the secondary set as a log.
 930	 */
 931	ret = dmz_log_dirty_mblocks(zmd, &write_list);
 932	if (ret)
 933		goto err;
 934
 935	/*
 936	 * The log is on disk. It is now safe to update in place
 937	 * in the primary metadata set.
 938	 */
 939	ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary);
 940	if (ret)
 941		goto err;
 942
 943	ret = dmz_write_sb(zmd, zmd->mblk_primary);
 944	if (ret)
 945		goto err;
 946
 947	while (!list_empty(&write_list)) {
 948		mblk = list_first_entry(&write_list, struct dmz_mblock, link);
 949		list_del_init(&mblk->link);
 950
 951		spin_lock(&zmd->mblk_lock);
 952		clear_bit(DMZ_META_DIRTY, &mblk->state);
 953		if (mblk->ref == 0)
 954			list_add_tail(&mblk->link, &zmd->mblk_lru_list);
 955		spin_unlock(&zmd->mblk_lock);
 956	}
 957
 958	zmd->sb_gen++;
 959out:
 960	dmz_unlock_flush(zmd);
 961	up_write(&zmd->mblk_sem);
 962
 963	return ret;
 964
 965err:
 966	if (!list_empty(&write_list)) {
 967		spin_lock(&zmd->mblk_lock);
 968		list_splice(&write_list, &zmd->mblk_dirty_list);
 969		spin_unlock(&zmd->mblk_lock);
 970	}
 971	if (!dmz_check_bdev(dev))
 972		ret = -EIO;
 973	goto out;
 974}
 975
 976/*
 977 * Check super block.
 978 */
 979static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
 980			bool tertiary)
 981{
 982	struct dmz_super *sb = dsb->sb;
 983	struct dmz_dev *dev = dsb->dev;
 984	unsigned int nr_meta_zones, nr_data_zones;
 985	u32 crc, stored_crc;
 986	u64 gen, sb_block;
 987
 988	if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
 989		dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
 990			    DMZ_MAGIC, le32_to_cpu(sb->magic));
 991		return -ENXIO;
 992	}
 993
 994	zmd->sb_version = le32_to_cpu(sb->version);
 995	if (zmd->sb_version > DMZ_META_VER) {
 996		dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
 997			    DMZ_META_VER, zmd->sb_version);
 998		return -EINVAL;
 999	}
1000	if (zmd->sb_version < 2 && tertiary) {
1001		dmz_dev_err(dev, "Tertiary superblocks are not supported");
1002		return -EINVAL;
1003	}
1004
1005	gen = le64_to_cpu(sb->gen);
1006	stored_crc = le32_to_cpu(sb->crc);
1007	sb->crc = 0;
1008	crc = crc32_le(gen, (unsigned char *)sb, DMZ_BLOCK_SIZE);
1009	if (crc != stored_crc) {
1010		dmz_dev_err(dev, "Invalid checksum (needed 0x%08x, got 0x%08x)",
1011			    crc, stored_crc);
1012		return -ENXIO;
1013	}
1014
1015	sb_block = le64_to_cpu(sb->sb_block);
1016	if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift) {
1017		dmz_dev_err(dev, "Invalid superblock position (is %llu expected %llu)",
1018			    sb_block, (u64)dsb->zone->id << zmd->zone_nr_blocks_shift);
 
 
1019		return -EINVAL;
1020	}
1021	if (zmd->sb_version > 1) {
1022		uuid_t sb_uuid;
1023
1024		import_uuid(&sb_uuid, sb->dmz_uuid);
1025		if (uuid_is_null(&sb_uuid)) {
1026			dmz_dev_err(dev, "NULL DM-Zoned uuid");
1027			return -ENXIO;
1028		} else if (uuid_is_null(&zmd->uuid)) {
1029			uuid_copy(&zmd->uuid, &sb_uuid);
1030		} else if (!uuid_equal(&zmd->uuid, &sb_uuid)) {
1031			dmz_dev_err(dev, "mismatching DM-Zoned uuid, is %pUl expected %pUl",
 
1032				    &sb_uuid, &zmd->uuid);
1033			return -ENXIO;
1034		}
1035		if (!strlen(zmd->label))
1036			memcpy(zmd->label, sb->dmz_label, BDEVNAME_SIZE);
1037		else if (memcmp(zmd->label, sb->dmz_label, BDEVNAME_SIZE)) {
1038			dmz_dev_err(dev, "mismatching DM-Zoned label, is %s expected %s",
 
1039				    sb->dmz_label, zmd->label);
1040			return -ENXIO;
1041		}
1042		import_uuid(&dev->uuid, sb->dev_uuid);
1043		if (uuid_is_null(&dev->uuid)) {
1044			dmz_dev_err(dev, "NULL device uuid");
1045			return -ENXIO;
1046		}
1047
1048		if (tertiary) {
1049			/*
1050			 * Generation number should be 0, but it doesn't
1051			 * really matter if it isn't.
1052			 */
1053			if (gen != 0)
1054				dmz_dev_warn(dev, "Invalid generation %llu",
1055					    gen);
1056			return 0;
1057		}
1058	}
1059
1060	nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
1061		>> zmd->zone_nr_blocks_shift;
1062	if (!nr_meta_zones ||
1063	    (zmd->nr_devs <= 1 && nr_meta_zones >= zmd->nr_rnd_zones) ||
1064	    (zmd->nr_devs > 1 && nr_meta_zones >= zmd->nr_cache_zones)) {
1065		dmz_dev_err(dev, "Invalid number of metadata blocks");
1066		return -ENXIO;
1067	}
1068
1069	if (!le32_to_cpu(sb->nr_reserved_seq) ||
1070	    le32_to_cpu(sb->nr_reserved_seq) >= (zmd->nr_useable_zones - nr_meta_zones)) {
1071		dmz_dev_err(dev, "Invalid number of reserved sequential zones");
1072		return -ENXIO;
1073	}
1074
1075	nr_data_zones = zmd->nr_useable_zones -
1076		(nr_meta_zones * 2 + le32_to_cpu(sb->nr_reserved_seq));
1077	if (le32_to_cpu(sb->nr_chunks) > nr_data_zones) {
1078		dmz_dev_err(dev, "Invalid number of chunks %u / %u",
1079			    le32_to_cpu(sb->nr_chunks), nr_data_zones);
1080		return -ENXIO;
1081	}
1082
1083	/* OK */
1084	zmd->nr_meta_blocks = le32_to_cpu(sb->nr_meta_blocks);
1085	zmd->nr_reserved_seq = le32_to_cpu(sb->nr_reserved_seq);
1086	zmd->nr_chunks = le32_to_cpu(sb->nr_chunks);
1087	zmd->nr_map_blocks = le32_to_cpu(sb->nr_map_blocks);
1088	zmd->nr_bitmap_blocks = le32_to_cpu(sb->nr_bitmap_blocks);
1089	zmd->nr_meta_zones = nr_meta_zones;
1090	zmd->nr_data_zones = nr_data_zones;
1091
1092	return 0;
1093}
1094
1095/*
1096 * Read the first or second super block from disk.
1097 */
1098static int dmz_read_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
1099{
1100	dmz_zmd_debug(zmd, "read superblock set %d dev %pg block %llu",
1101		      set, sb->dev->bdev, sb->block);
1102
1103	return dmz_rdwr_block(sb->dev, REQ_OP_READ,
1104			      sb->block, sb->mblk->page);
1105}
1106
1107/*
1108 * Determine the position of the secondary super blocks on disk.
1109 * This is used only if a corruption of the primary super block
1110 * is detected.
1111 */
1112static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
1113{
1114	unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
1115	struct dmz_mblock *mblk;
1116	unsigned int zone_id = zmd->sb[0].zone->id;
1117	int i;
1118
1119	/* Allocate a block */
1120	mblk = dmz_alloc_mblock(zmd, 0);
1121	if (!mblk)
1122		return -ENOMEM;
1123
1124	zmd->sb[1].mblk = mblk;
1125	zmd->sb[1].sb = mblk->data;
1126
1127	/* Bad first super block: search for the second one */
1128	zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks;
1129	zmd->sb[1].zone = dmz_get(zmd, zone_id + 1);
1130	zmd->sb[1].dev = zmd->sb[0].dev;
1131	for (i = 1; i < zmd->nr_rnd_zones; i++) {
1132		if (dmz_read_sb(zmd, &zmd->sb[1], 1) != 0)
1133			break;
1134		if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC)
1135			return 0;
1136		zmd->sb[1].block += zone_nr_blocks;
1137		zmd->sb[1].zone = dmz_get(zmd, zone_id + i);
1138	}
1139
1140	dmz_free_mblock(zmd, mblk);
1141	zmd->sb[1].mblk = NULL;
1142	zmd->sb[1].zone = NULL;
1143	zmd->sb[1].dev = NULL;
1144
1145	return -EIO;
1146}
1147
1148/*
1149 * Read a super block from disk.
1150 */
1151static int dmz_get_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
1152{
1153	struct dmz_mblock *mblk;
1154	int ret;
1155
1156	/* Allocate a block */
1157	mblk = dmz_alloc_mblock(zmd, 0);
1158	if (!mblk)
1159		return -ENOMEM;
1160
1161	sb->mblk = mblk;
1162	sb->sb = mblk->data;
1163
1164	/* Read super block */
1165	ret = dmz_read_sb(zmd, sb, set);
1166	if (ret) {
1167		dmz_free_mblock(zmd, mblk);
1168		sb->mblk = NULL;
1169		return ret;
1170	}
1171
1172	return 0;
1173}
1174
1175/*
1176 * Recover a metadata set.
1177 */
1178static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
1179{
1180	unsigned int src_set = dst_set ^ 0x1;
1181	struct page *page;
1182	int i, ret;
1183
1184	dmz_dev_warn(zmd->sb[dst_set].dev,
1185		     "Metadata set %u invalid: recovering", dst_set);
1186
1187	if (dst_set == 0)
1188		zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
1189	else
1190		zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
1191
1192	page = alloc_page(GFP_NOIO);
1193	if (!page)
1194		return -ENOMEM;
1195
1196	/* Copy metadata blocks */
1197	for (i = 1; i < zmd->nr_meta_blocks; i++) {
1198		ret = dmz_rdwr_block(zmd->sb[src_set].dev, REQ_OP_READ,
1199				     zmd->sb[src_set].block + i, page);
1200		if (ret)
1201			goto out;
1202		ret = dmz_rdwr_block(zmd->sb[dst_set].dev, REQ_OP_WRITE,
1203				     zmd->sb[dst_set].block + i, page);
1204		if (ret)
1205			goto out;
1206	}
1207
1208	/* Finalize with the super block */
1209	if (!zmd->sb[dst_set].mblk) {
1210		zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, 0);
1211		if (!zmd->sb[dst_set].mblk) {
1212			ret = -ENOMEM;
1213			goto out;
1214		}
1215		zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data;
1216	}
1217
1218	ret = dmz_write_sb(zmd, dst_set);
1219out:
1220	__free_pages(page, 0);
1221
1222	return ret;
1223}
1224
1225/*
1226 * Get super block from disk.
1227 */
1228static int dmz_load_sb(struct dmz_metadata *zmd)
1229{
1230	bool sb_good[2] = {false, false};
1231	u64 sb_gen[2] = {0, 0};
1232	int ret;
1233
1234	if (!zmd->sb[0].zone) {
1235		dmz_zmd_err(zmd, "Primary super block zone not set");
1236		return -ENXIO;
1237	}
1238
1239	/* Read and check the primary super block */
1240	zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
1241	zmd->sb[0].dev = zmd->sb[0].zone->dev;
1242	ret = dmz_get_sb(zmd, &zmd->sb[0], 0);
1243	if (ret) {
1244		dmz_dev_err(zmd->sb[0].dev, "Read primary super block failed");
1245		return ret;
1246	}
1247
1248	ret = dmz_check_sb(zmd, &zmd->sb[0], false);
1249
1250	/* Read and check secondary super block */
1251	if (ret == 0) {
1252		sb_good[0] = true;
1253		if (!zmd->sb[1].zone) {
1254			unsigned int zone_id =
1255				zmd->sb[0].zone->id + zmd->nr_meta_zones;
1256
1257			zmd->sb[1].zone = dmz_get(zmd, zone_id);
1258		}
1259		zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
1260		zmd->sb[1].dev = zmd->sb[0].dev;
1261		ret = dmz_get_sb(zmd, &zmd->sb[1], 1);
1262	} else
1263		ret = dmz_lookup_secondary_sb(zmd);
1264
1265	if (ret) {
1266		dmz_dev_err(zmd->sb[1].dev, "Read secondary super block failed");
1267		return ret;
1268	}
1269
1270	ret = dmz_check_sb(zmd, &zmd->sb[1], false);
1271	if (ret == 0)
1272		sb_good[1] = true;
1273
1274	/* Use highest generation sb first */
1275	if (!sb_good[0] && !sb_good[1]) {
1276		dmz_zmd_err(zmd, "No valid super block found");
1277		return -EIO;
1278	}
1279
1280	if (sb_good[0])
1281		sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen);
1282	else {
1283		ret = dmz_recover_mblocks(zmd, 0);
1284		if (ret) {
1285			dmz_dev_err(zmd->sb[0].dev,
1286				    "Recovery of superblock 0 failed");
1287			return -EIO;
1288		}
1289	}
1290
1291	if (sb_good[1])
1292		sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen);
1293	else {
1294		ret = dmz_recover_mblocks(zmd, 1);
1295
1296		if (ret) {
1297			dmz_dev_err(zmd->sb[1].dev,
1298				    "Recovery of superblock 1 failed");
1299			return -EIO;
1300		}
1301	}
1302
1303	if (sb_gen[0] >= sb_gen[1]) {
1304		zmd->sb_gen = sb_gen[0];
1305		zmd->mblk_primary = 0;
1306	} else {
1307		zmd->sb_gen = sb_gen[1];
1308		zmd->mblk_primary = 1;
1309	}
1310
1311	dmz_dev_debug(zmd->sb[zmd->mblk_primary].dev,
1312		      "Using super block %u (gen %llu)",
1313		      zmd->mblk_primary, zmd->sb_gen);
1314
1315	if (zmd->sb_version > 1) {
1316		int i;
1317		struct dmz_sb *sb;
1318
1319		sb = kzalloc(sizeof(struct dmz_sb), GFP_KERNEL);
1320		if (!sb)
1321			return -ENOMEM;
1322		for (i = 1; i < zmd->nr_devs; i++) {
1323			sb->block = 0;
1324			sb->zone = dmz_get(zmd, zmd->dev[i].zone_offset);
1325			sb->dev = &zmd->dev[i];
1326			if (!dmz_is_meta(sb->zone)) {
1327				dmz_dev_err(sb->dev,
1328					    "Tertiary super block zone %u not marked as metadata zone",
1329					    sb->zone->id);
1330				ret = -EINVAL;
1331				goto out_kfree;
1332			}
1333			ret = dmz_get_sb(zmd, sb, i + 1);
1334			if (ret) {
1335				dmz_dev_err(sb->dev,
1336					    "Read tertiary super block failed");
1337				dmz_free_mblock(zmd, sb->mblk);
1338				goto out_kfree;
1339			}
1340			ret = dmz_check_sb(zmd, sb, true);
1341			dmz_free_mblock(zmd, sb->mblk);
1342			if (ret == -EINVAL)
1343				goto out_kfree;
1344		}
1345out_kfree:
1346		kfree(sb);
1347	}
1348	return ret;
1349}
1350
1351/*
1352 * Initialize a zone descriptor.
1353 */
1354static int dmz_init_zone(struct blk_zone *blkz, unsigned int num, void *data)
1355{
1356	struct dmz_dev *dev = data;
1357	struct dmz_metadata *zmd = dev->metadata;
1358	int idx = num + dev->zone_offset;
1359	struct dm_zone *zone;
1360
1361	zone = dmz_insert(zmd, idx, dev);
1362	if (IS_ERR(zone))
1363		return PTR_ERR(zone);
1364
1365	if (blkz->len != zmd->zone_nr_sectors) {
1366		if (zmd->sb_version > 1) {
1367			/* Ignore the eventual runt (smaller) zone */
1368			set_bit(DMZ_OFFLINE, &zone->flags);
1369			return 0;
1370		} else if (blkz->start + blkz->len == dev->capacity)
1371			return 0;
1372		return -ENXIO;
1373	}
1374
1375	/*
1376	 * Devices that have zones with a capacity smaller than the zone size
1377	 * (e.g. NVMe zoned namespaces) are not supported.
1378	 */
1379	if (blkz->capacity != blkz->len)
1380		return -ENXIO;
1381
1382	switch (blkz->type) {
1383	case BLK_ZONE_TYPE_CONVENTIONAL:
1384		set_bit(DMZ_RND, &zone->flags);
1385		break;
1386	case BLK_ZONE_TYPE_SEQWRITE_REQ:
1387	case BLK_ZONE_TYPE_SEQWRITE_PREF:
1388		set_bit(DMZ_SEQ, &zone->flags);
1389		break;
1390	default:
1391		return -ENXIO;
1392	}
1393
1394	if (dmz_is_rnd(zone))
1395		zone->wp_block = 0;
1396	else
1397		zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1398
1399	if (blkz->cond == BLK_ZONE_COND_OFFLINE)
1400		set_bit(DMZ_OFFLINE, &zone->flags);
1401	else if (blkz->cond == BLK_ZONE_COND_READONLY)
1402		set_bit(DMZ_READ_ONLY, &zone->flags);
1403	else {
1404		zmd->nr_useable_zones++;
1405		if (dmz_is_rnd(zone)) {
1406			zmd->nr_rnd_zones++;
1407			if (zmd->nr_devs == 1 && !zmd->sb[0].zone) {
1408				/* Primary super block zone */
1409				zmd->sb[0].zone = zone;
1410			}
1411		}
1412		if (zmd->nr_devs > 1 && num == 0) {
1413			/*
1414			 * Tertiary superblock zones are always at the
1415			 * start of the zoned devices, so mark them
1416			 * as metadata zone.
1417			 */
1418			set_bit(DMZ_META, &zone->flags);
1419		}
1420	}
1421	return 0;
1422}
1423
1424static int dmz_emulate_zones(struct dmz_metadata *zmd, struct dmz_dev *dev)
1425{
1426	int idx;
1427	sector_t zone_offset = 0;
1428
1429	for (idx = 0; idx < dev->nr_zones; idx++) {
1430		struct dm_zone *zone;
1431
1432		zone = dmz_insert(zmd, idx, dev);
1433		if (IS_ERR(zone))
1434			return PTR_ERR(zone);
1435		set_bit(DMZ_CACHE, &zone->flags);
1436		zone->wp_block = 0;
1437		zmd->nr_cache_zones++;
1438		zmd->nr_useable_zones++;
1439		if (dev->capacity - zone_offset < zmd->zone_nr_sectors) {
1440			/* Disable runt zone */
1441			set_bit(DMZ_OFFLINE, &zone->flags);
1442			break;
1443		}
1444		zone_offset += zmd->zone_nr_sectors;
1445	}
1446	return 0;
1447}
1448
1449/*
1450 * Free zones descriptors.
1451 */
1452static void dmz_drop_zones(struct dmz_metadata *zmd)
1453{
1454	int idx;
1455
1456	for (idx = 0; idx < zmd->nr_zones; idx++) {
1457		struct dm_zone *zone = xa_load(&zmd->zones, idx);
1458
1459		kfree(zone);
1460		xa_erase(&zmd->zones, idx);
1461	}
1462	xa_destroy(&zmd->zones);
1463}
1464
1465/*
1466 * Allocate and initialize zone descriptors using the zone
1467 * information from disk.
1468 */
1469static int dmz_init_zones(struct dmz_metadata *zmd)
1470{
1471	int i, ret;
1472	struct dmz_dev *zoned_dev = &zmd->dev[0];
1473
1474	/* Init */
1475	zmd->zone_nr_sectors = zmd->dev[0].zone_nr_sectors;
1476	zmd->zone_nr_sectors_shift = ilog2(zmd->zone_nr_sectors);
1477	zmd->zone_nr_blocks = dmz_sect2blk(zmd->zone_nr_sectors);
1478	zmd->zone_nr_blocks_shift = ilog2(zmd->zone_nr_blocks);
1479	zmd->zone_bitmap_size = zmd->zone_nr_blocks >> 3;
1480	zmd->zone_nr_bitmap_blocks =
1481		max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
1482	zmd->zone_bits_per_mblk = min_t(sector_t, zmd->zone_nr_blocks,
1483					DMZ_BLOCK_SIZE_BITS);
1484
1485	/* Allocate zone array */
1486	zmd->nr_zones = 0;
1487	for (i = 0; i < zmd->nr_devs; i++) {
1488		struct dmz_dev *dev = &zmd->dev[i];
1489
1490		dev->metadata = zmd;
1491		zmd->nr_zones += dev->nr_zones;
1492
1493		atomic_set(&dev->unmap_nr_rnd, 0);
1494		INIT_LIST_HEAD(&dev->unmap_rnd_list);
1495		INIT_LIST_HEAD(&dev->map_rnd_list);
1496
1497		atomic_set(&dev->unmap_nr_seq, 0);
1498		INIT_LIST_HEAD(&dev->unmap_seq_list);
1499		INIT_LIST_HEAD(&dev->map_seq_list);
1500	}
1501
1502	if (!zmd->nr_zones) {
1503		DMERR("(%s): No zones found", zmd->devname);
1504		return -ENXIO;
1505	}
1506	xa_init(&zmd->zones);
1507
1508	DMDEBUG("(%s): Using %zu B for zone information",
1509		zmd->devname, sizeof(struct dm_zone) * zmd->nr_zones);
1510
1511	if (zmd->nr_devs > 1) {
1512		ret = dmz_emulate_zones(zmd, &zmd->dev[0]);
1513		if (ret < 0) {
1514			DMDEBUG("(%s): Failed to emulate zones, error %d",
1515				zmd->devname, ret);
1516			dmz_drop_zones(zmd);
1517			return ret;
1518		}
1519
1520		/*
1521		 * Primary superblock zone is always at zone 0 when multiple
1522		 * drives are present.
1523		 */
1524		zmd->sb[0].zone = dmz_get(zmd, 0);
1525
1526		for (i = 1; i < zmd->nr_devs; i++) {
1527			zoned_dev = &zmd->dev[i];
1528
1529			ret = blkdev_report_zones(zoned_dev->bdev, 0,
1530						  BLK_ALL_ZONES,
1531						  dmz_init_zone, zoned_dev);
1532			if (ret < 0) {
1533				DMDEBUG("(%s): Failed to report zones, error %d",
1534					zmd->devname, ret);
1535				dmz_drop_zones(zmd);
1536				return ret;
1537			}
1538		}
1539		return 0;
1540	}
1541
1542	/*
1543	 * Get zone information and initialize zone descriptors.  At the same
1544	 * time, determine where the super block should be: first block of the
1545	 * first randomly writable zone.
1546	 */
1547	ret = blkdev_report_zones(zoned_dev->bdev, 0, BLK_ALL_ZONES,
1548				  dmz_init_zone, zoned_dev);
1549	if (ret < 0) {
1550		DMDEBUG("(%s): Failed to report zones, error %d",
1551			zmd->devname, ret);
1552		dmz_drop_zones(zmd);
1553		return ret;
1554	}
1555
1556	return 0;
1557}
1558
1559static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx,
1560			      void *data)
1561{
1562	struct dm_zone *zone = data;
1563
1564	clear_bit(DMZ_OFFLINE, &zone->flags);
1565	clear_bit(DMZ_READ_ONLY, &zone->flags);
1566	if (blkz->cond == BLK_ZONE_COND_OFFLINE)
1567		set_bit(DMZ_OFFLINE, &zone->flags);
1568	else if (blkz->cond == BLK_ZONE_COND_READONLY)
1569		set_bit(DMZ_READ_ONLY, &zone->flags);
1570
1571	if (dmz_is_seq(zone))
1572		zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1573	else
1574		zone->wp_block = 0;
1575	return 0;
1576}
1577
1578/*
1579 * Update a zone information.
1580 */
1581static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1582{
1583	struct dmz_dev *dev = zone->dev;
1584	unsigned int noio_flag;
1585	int ret;
1586
1587	if (dev->flags & DMZ_BDEV_REGULAR)
1588		return 0;
1589
1590	/*
1591	 * Get zone information from disk. Since blkdev_report_zones() uses
1592	 * GFP_KERNEL by default for memory allocations, set the per-task
1593	 * PF_MEMALLOC_NOIO flag so that all allocations are done as if
1594	 * GFP_NOIO was specified.
1595	 */
1596	noio_flag = memalloc_noio_save();
1597	ret = blkdev_report_zones(dev->bdev, dmz_start_sect(zmd, zone), 1,
1598				  dmz_update_zone_cb, zone);
1599	memalloc_noio_restore(noio_flag);
1600
1601	if (ret == 0)
1602		ret = -EIO;
1603	if (ret < 0) {
1604		dmz_dev_err(dev, "Get zone %u report failed",
1605			    zone->id);
1606		dmz_check_bdev(dev);
1607		return ret;
1608	}
1609
1610	return 0;
1611}
1612
1613/*
1614 * Check a zone write pointer position when the zone is marked
1615 * with the sequential write error flag.
1616 */
1617static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
1618				    struct dm_zone *zone)
1619{
1620	struct dmz_dev *dev = zone->dev;
1621	unsigned int wp = 0;
1622	int ret;
1623
1624	wp = zone->wp_block;
1625	ret = dmz_update_zone(zmd, zone);
1626	if (ret)
1627		return ret;
1628
1629	dmz_dev_warn(dev, "Processing zone %u write error (zone wp %u/%u)",
1630		     zone->id, zone->wp_block, wp);
1631
1632	if (zone->wp_block < wp) {
1633		dmz_invalidate_blocks(zmd, zone, zone->wp_block,
1634				      wp - zone->wp_block);
1635	}
1636
1637	return 0;
1638}
1639
1640/*
1641 * Reset a zone write pointer.
1642 */
1643static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1644{
1645	int ret;
1646
1647	/*
1648	 * Ignore offline zones, read only zones,
1649	 * and conventional zones.
1650	 */
1651	if (dmz_is_offline(zone) ||
1652	    dmz_is_readonly(zone) ||
1653	    dmz_is_rnd(zone))
1654		return 0;
1655
1656	if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
1657		struct dmz_dev *dev = zone->dev;
1658
1659		ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
1660				       dmz_start_sect(zmd, zone),
1661				       zmd->zone_nr_sectors, GFP_NOIO);
1662		if (ret) {
1663			dmz_dev_err(dev, "Reset zone %u failed %d",
1664				    zone->id, ret);
1665			return ret;
1666		}
1667	}
1668
1669	/* Clear write error bit and rewind write pointer position */
1670	clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
1671	zone->wp_block = 0;
1672
1673	return 0;
1674}
1675
1676static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
1677
1678/*
1679 * Initialize chunk mapping.
1680 */
1681static int dmz_load_mapping(struct dmz_metadata *zmd)
1682{
1683	struct dm_zone *dzone, *bzone;
1684	struct dmz_mblock *dmap_mblk = NULL;
1685	struct dmz_map *dmap;
1686	unsigned int i = 0, e = 0, chunk = 0;
1687	unsigned int dzone_id;
1688	unsigned int bzone_id;
1689
1690	/* Metadata block array for the chunk mapping table */
1691	zmd->map_mblk = kcalloc(zmd->nr_map_blocks,
1692				sizeof(struct dmz_mblk *), GFP_KERNEL);
1693	if (!zmd->map_mblk)
1694		return -ENOMEM;
1695
1696	/* Get chunk mapping table blocks and initialize zone mapping */
1697	while (chunk < zmd->nr_chunks) {
1698		if (!dmap_mblk) {
1699			/* Get mapping block */
1700			dmap_mblk = dmz_get_mblock(zmd, i + 1);
1701			if (IS_ERR(dmap_mblk))
1702				return PTR_ERR(dmap_mblk);
1703			zmd->map_mblk[i] = dmap_mblk;
1704			dmap = dmap_mblk->data;
1705			i++;
1706			e = 0;
1707		}
1708
1709		/* Check data zone */
1710		dzone_id = le32_to_cpu(dmap[e].dzone_id);
1711		if (dzone_id == DMZ_MAP_UNMAPPED)
1712			goto next;
1713
1714		if (dzone_id >= zmd->nr_zones) {
1715			dmz_zmd_err(zmd, "Chunk %u mapping: invalid data zone ID %u",
1716				    chunk, dzone_id);
1717			return -EIO;
1718		}
1719
1720		dzone = dmz_get(zmd, dzone_id);
1721		if (!dzone) {
1722			dmz_zmd_err(zmd, "Chunk %u mapping: data zone %u not present",
1723				    chunk, dzone_id);
1724			return -EIO;
1725		}
1726		set_bit(DMZ_DATA, &dzone->flags);
1727		dzone->chunk = chunk;
1728		dmz_get_zone_weight(zmd, dzone);
1729
1730		if (dmz_is_cache(dzone))
1731			list_add_tail(&dzone->link, &zmd->map_cache_list);
1732		else if (dmz_is_rnd(dzone))
1733			list_add_tail(&dzone->link, &dzone->dev->map_rnd_list);
1734		else
1735			list_add_tail(&dzone->link, &dzone->dev->map_seq_list);
1736
1737		/* Check buffer zone */
1738		bzone_id = le32_to_cpu(dmap[e].bzone_id);
1739		if (bzone_id == DMZ_MAP_UNMAPPED)
1740			goto next;
1741
1742		if (bzone_id >= zmd->nr_zones) {
1743			dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone ID %u",
1744				    chunk, bzone_id);
1745			return -EIO;
1746		}
1747
1748		bzone = dmz_get(zmd, bzone_id);
1749		if (!bzone) {
1750			dmz_zmd_err(zmd, "Chunk %u mapping: buffer zone %u not present",
1751				    chunk, bzone_id);
1752			return -EIO;
1753		}
1754		if (!dmz_is_rnd(bzone) && !dmz_is_cache(bzone)) {
1755			dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone %u",
1756				    chunk, bzone_id);
1757			return -EIO;
1758		}
1759
1760		set_bit(DMZ_DATA, &bzone->flags);
1761		set_bit(DMZ_BUF, &bzone->flags);
1762		bzone->chunk = chunk;
1763		bzone->bzone = dzone;
1764		dzone->bzone = bzone;
1765		dmz_get_zone_weight(zmd, bzone);
1766		if (dmz_is_cache(bzone))
1767			list_add_tail(&bzone->link, &zmd->map_cache_list);
1768		else
1769			list_add_tail(&bzone->link, &bzone->dev->map_rnd_list);
1770next:
1771		chunk++;
1772		e++;
1773		if (e >= DMZ_MAP_ENTRIES)
1774			dmap_mblk = NULL;
1775	}
1776
1777	/*
1778	 * At this point, only meta zones and mapped data zones were
1779	 * fully initialized. All remaining zones are unmapped data
1780	 * zones. Finish initializing those here.
1781	 */
1782	for (i = 0; i < zmd->nr_zones; i++) {
1783		dzone = dmz_get(zmd, i);
1784		if (!dzone)
1785			continue;
1786		if (dmz_is_meta(dzone))
1787			continue;
1788		if (dmz_is_offline(dzone))
1789			continue;
1790
1791		if (dmz_is_cache(dzone))
1792			zmd->nr_cache++;
1793		else if (dmz_is_rnd(dzone))
1794			dzone->dev->nr_rnd++;
1795		else
1796			dzone->dev->nr_seq++;
1797
1798		if (dmz_is_data(dzone)) {
1799			/* Already initialized */
1800			continue;
1801		}
1802
1803		/* Unmapped data zone */
1804		set_bit(DMZ_DATA, &dzone->flags);
1805		dzone->chunk = DMZ_MAP_UNMAPPED;
1806		if (dmz_is_cache(dzone)) {
1807			list_add_tail(&dzone->link, &zmd->unmap_cache_list);
1808			atomic_inc(&zmd->unmap_nr_cache);
1809		} else if (dmz_is_rnd(dzone)) {
1810			list_add_tail(&dzone->link,
1811				      &dzone->dev->unmap_rnd_list);
1812			atomic_inc(&dzone->dev->unmap_nr_rnd);
1813		} else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) {
1814			list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list);
1815			set_bit(DMZ_RESERVED, &dzone->flags);
1816			atomic_inc(&zmd->nr_reserved_seq_zones);
1817			dzone->dev->nr_seq--;
1818		} else {
1819			list_add_tail(&dzone->link,
1820				      &dzone->dev->unmap_seq_list);
1821			atomic_inc(&dzone->dev->unmap_nr_seq);
1822		}
1823	}
1824
1825	return 0;
1826}
1827
1828/*
1829 * Set a data chunk mapping.
1830 */
1831static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk,
1832				  unsigned int dzone_id, unsigned int bzone_id)
1833{
1834	struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
1835	struct dmz_map *dmap = dmap_mblk->data;
1836	int map_idx = chunk & DMZ_MAP_ENTRIES_MASK;
1837
1838	dmap[map_idx].dzone_id = cpu_to_le32(dzone_id);
1839	dmap[map_idx].bzone_id = cpu_to_le32(bzone_id);
1840	dmz_dirty_mblock(zmd, dmap_mblk);
1841}
1842
1843/*
1844 * The list of mapped zones is maintained in LRU order.
1845 * This rotates a zone at the end of its map list.
1846 */
1847static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1848{
1849	if (list_empty(&zone->link))
1850		return;
1851
1852	list_del_init(&zone->link);
1853	if (dmz_is_seq(zone)) {
1854		/* LRU rotate sequential zone */
1855		list_add_tail(&zone->link, &zone->dev->map_seq_list);
1856	} else if (dmz_is_cache(zone)) {
1857		/* LRU rotate cache zone */
1858		list_add_tail(&zone->link, &zmd->map_cache_list);
1859	} else {
1860		/* LRU rotate random zone */
1861		list_add_tail(&zone->link, &zone->dev->map_rnd_list);
1862	}
1863}
1864
1865/*
1866 * The list of mapped random zones is maintained
1867 * in LRU order. This rotates a zone at the end of the list.
1868 */
1869static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1870{
1871	__dmz_lru_zone(zmd, zone);
1872	if (zone->bzone)
1873		__dmz_lru_zone(zmd, zone->bzone);
1874}
1875
1876/*
1877 * Wait for any zone to be freed.
1878 */
1879static void dmz_wait_for_free_zones(struct dmz_metadata *zmd)
1880{
1881	DEFINE_WAIT(wait);
1882
1883	prepare_to_wait(&zmd->free_wq, &wait, TASK_UNINTERRUPTIBLE);
1884	dmz_unlock_map(zmd);
1885	dmz_unlock_metadata(zmd);
1886
1887	io_schedule_timeout(HZ);
1888
1889	dmz_lock_metadata(zmd);
1890	dmz_lock_map(zmd);
1891	finish_wait(&zmd->free_wq, &wait);
1892}
1893
1894/*
1895 * Lock a zone for reclaim (set the zone RECLAIM bit).
1896 * Returns false if the zone cannot be locked or if it is already locked
1897 * and 1 otherwise.
1898 */
1899int dmz_lock_zone_reclaim(struct dm_zone *zone)
1900{
1901	/* Active zones cannot be reclaimed */
1902	if (dmz_is_active(zone))
1903		return 0;
1904
1905	return !test_and_set_bit(DMZ_RECLAIM, &zone->flags);
1906}
1907
1908/*
1909 * Clear a zone reclaim flag.
1910 */
1911void dmz_unlock_zone_reclaim(struct dm_zone *zone)
1912{
1913	WARN_ON(dmz_is_active(zone));
1914	WARN_ON(!dmz_in_reclaim(zone));
1915
1916	clear_bit_unlock(DMZ_RECLAIM, &zone->flags);
1917	smp_mb__after_atomic();
1918	wake_up_bit(&zone->flags, DMZ_RECLAIM);
1919}
1920
1921/*
1922 * Wait for a zone reclaim to complete.
1923 */
1924static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
1925{
1926	dmz_unlock_map(zmd);
1927	dmz_unlock_metadata(zmd);
1928	set_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
1929	wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
1930	clear_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
1931	dmz_lock_metadata(zmd);
1932	dmz_lock_map(zmd);
1933}
1934
1935/*
1936 * Select a cache or random write zone for reclaim.
1937 */
1938static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
1939						    unsigned int idx, bool idle)
1940{
1941	struct dm_zone *dzone = NULL;
1942	struct dm_zone *zone, *maxw_z = NULL;
1943	struct list_head *zone_list;
1944
1945	/* If we have cache zones select from the cache zone list */
1946	if (zmd->nr_cache) {
1947		zone_list = &zmd->map_cache_list;
1948		/* Try to relaim random zones, too, when idle */
1949		if (idle && list_empty(zone_list))
1950			zone_list = &zmd->dev[idx].map_rnd_list;
1951	} else
1952		zone_list = &zmd->dev[idx].map_rnd_list;
1953
1954	/*
1955	 * Find the buffer zone with the heaviest weight or the first (oldest)
1956	 * data zone that can be reclaimed.
1957	 */
1958	list_for_each_entry(zone, zone_list, link) {
1959		if (dmz_is_buf(zone)) {
1960			dzone = zone->bzone;
1961			if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
1962				continue;
1963			if (!maxw_z || maxw_z->weight < dzone->weight)
1964				maxw_z = dzone;
1965		} else {
1966			dzone = zone;
1967			if (dmz_lock_zone_reclaim(dzone))
1968				return dzone;
1969		}
1970	}
1971
1972	if (maxw_z && dmz_lock_zone_reclaim(maxw_z))
1973		return maxw_z;
1974
1975	/*
1976	 * If we come here, none of the zones inspected could be locked for
1977	 * reclaim. Try again, being more aggressive, that is, find the
1978	 * first zone that can be reclaimed regardless of its weitght.
1979	 */
1980	list_for_each_entry(zone, zone_list, link) {
1981		if (dmz_is_buf(zone)) {
1982			dzone = zone->bzone;
1983			if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
1984				continue;
1985		} else
1986			dzone = zone;
1987		if (dmz_lock_zone_reclaim(dzone))
1988			return dzone;
1989	}
1990
1991	return NULL;
1992}
1993
1994/*
1995 * Select a buffered sequential zone for reclaim.
1996 */
1997static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd,
1998						    unsigned int idx)
1999{
2000	struct dm_zone *zone;
2001
2002	list_for_each_entry(zone, &zmd->dev[idx].map_seq_list, link) {
2003		if (!zone->bzone)
2004			continue;
2005		if (dmz_lock_zone_reclaim(zone))
2006			return zone;
2007	}
2008
2009	return NULL;
2010}
2011
2012/*
2013 * Select a zone for reclaim.
2014 */
2015struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
2016					 unsigned int dev_idx, bool idle)
2017{
2018	struct dm_zone *zone = NULL;
2019
2020	/*
2021	 * Search for a zone candidate to reclaim: 2 cases are possible.
2022	 * (1) There is no free sequential zones. Then a random data zone
2023	 *     cannot be reclaimed. So choose a sequential zone to reclaim so
2024	 *     that afterward a random zone can be reclaimed.
2025	 * (2) At least one free sequential zone is available, then choose
2026	 *     the oldest random zone (data or buffer) that can be locked.
2027	 */
2028	dmz_lock_map(zmd);
2029	if (list_empty(&zmd->reserved_seq_zones_list))
2030		zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx);
2031	if (!zone)
2032		zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle);
2033	dmz_unlock_map(zmd);
2034
2035	return zone;
2036}
2037
2038/*
2039 * Get the zone mapping a chunk, if the chunk is mapped already.
2040 * If no mapping exist and the operation is WRITE, a zone is
2041 * allocated and used to map the chunk.
2042 * The zone returned will be set to the active state.
2043 */
2044struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd,
2045				      unsigned int chunk, enum req_op op)
2046{
2047	struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
2048	struct dmz_map *dmap = dmap_mblk->data;
2049	int dmap_idx = chunk & DMZ_MAP_ENTRIES_MASK;
2050	unsigned int dzone_id;
2051	struct dm_zone *dzone = NULL;
2052	int ret = 0;
2053	int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
2054
2055	dmz_lock_map(zmd);
2056again:
2057	/* Get the chunk mapping */
2058	dzone_id = le32_to_cpu(dmap[dmap_idx].dzone_id);
2059	if (dzone_id == DMZ_MAP_UNMAPPED) {
2060		/*
2061		 * Read or discard in unmapped chunks are fine. But for
2062		 * writes, we need a mapping, so get one.
2063		 */
2064		if (op != REQ_OP_WRITE)
2065			goto out;
2066
2067		/* Allocate a random zone */
2068		dzone = dmz_alloc_zone(zmd, 0, alloc_flags);
2069		if (!dzone) {
2070			if (dmz_dev_is_dying(zmd)) {
2071				dzone = ERR_PTR(-EIO);
2072				goto out;
2073			}
2074			dmz_wait_for_free_zones(zmd);
2075			goto again;
2076		}
2077
2078		dmz_map_zone(zmd, dzone, chunk);
2079
2080	} else {
2081		/* The chunk is already mapped: get the mapping zone */
2082		dzone = dmz_get(zmd, dzone_id);
2083		if (!dzone) {
2084			dzone = ERR_PTR(-EIO);
2085			goto out;
2086		}
2087		if (dzone->chunk != chunk) {
2088			dzone = ERR_PTR(-EIO);
2089			goto out;
2090		}
2091
2092		/* Repair write pointer if the sequential dzone has error */
2093		if (dmz_seq_write_err(dzone)) {
2094			ret = dmz_handle_seq_write_err(zmd, dzone);
2095			if (ret) {
2096				dzone = ERR_PTR(-EIO);
2097				goto out;
2098			}
2099			clear_bit(DMZ_SEQ_WRITE_ERR, &dzone->flags);
2100		}
2101	}
2102
2103	/*
2104	 * If the zone is being reclaimed, the chunk mapping may change
2105	 * to a different zone. So wait for reclaim and retry. Otherwise,
2106	 * activate the zone (this will prevent reclaim from touching it).
2107	 */
2108	if (dmz_in_reclaim(dzone)) {
2109		dmz_wait_for_reclaim(zmd, dzone);
2110		goto again;
2111	}
2112	dmz_activate_zone(dzone);
2113	dmz_lru_zone(zmd, dzone);
2114out:
2115	dmz_unlock_map(zmd);
2116
2117	return dzone;
2118}
2119
2120/*
2121 * Write and discard change the block validity of data zones and their buffer
2122 * zones. Check here that valid blocks are still present. If all blocks are
2123 * invalid, the zones can be unmapped on the fly without waiting for reclaim
2124 * to do it.
2125 */
2126void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *dzone)
2127{
2128	struct dm_zone *bzone;
2129
2130	dmz_lock_map(zmd);
2131
2132	bzone = dzone->bzone;
2133	if (bzone) {
2134		if (dmz_weight(bzone))
2135			dmz_lru_zone(zmd, bzone);
2136		else {
2137			/* Empty buffer zone: reclaim it */
2138			dmz_unmap_zone(zmd, bzone);
2139			dmz_free_zone(zmd, bzone);
2140			bzone = NULL;
2141		}
2142	}
2143
2144	/* Deactivate the data zone */
2145	dmz_deactivate_zone(dzone);
2146	if (dmz_is_active(dzone) || bzone || dmz_weight(dzone))
2147		dmz_lru_zone(zmd, dzone);
2148	else {
2149		/* Unbuffered inactive empty data zone: reclaim it */
2150		dmz_unmap_zone(zmd, dzone);
2151		dmz_free_zone(zmd, dzone);
2152	}
2153
2154	dmz_unlock_map(zmd);
2155}
2156
2157/*
2158 * Allocate and map a random zone to buffer a chunk
2159 * already mapped to a sequential zone.
2160 */
2161struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
2162				     struct dm_zone *dzone)
2163{
2164	struct dm_zone *bzone;
2165	int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
2166
2167	dmz_lock_map(zmd);
2168again:
2169	bzone = dzone->bzone;
2170	if (bzone)
2171		goto out;
2172
2173	/* Allocate a random zone */
2174	bzone = dmz_alloc_zone(zmd, 0, alloc_flags);
2175	if (!bzone) {
2176		if (dmz_dev_is_dying(zmd)) {
2177			bzone = ERR_PTR(-EIO);
2178			goto out;
2179		}
2180		dmz_wait_for_free_zones(zmd);
2181		goto again;
2182	}
2183
2184	/* Update the chunk mapping */
2185	dmz_set_chunk_mapping(zmd, dzone->chunk, dzone->id, bzone->id);
2186
2187	set_bit(DMZ_BUF, &bzone->flags);
2188	bzone->chunk = dzone->chunk;
2189	bzone->bzone = dzone;
2190	dzone->bzone = bzone;
2191	if (dmz_is_cache(bzone))
2192		list_add_tail(&bzone->link, &zmd->map_cache_list);
2193	else
2194		list_add_tail(&bzone->link, &bzone->dev->map_rnd_list);
2195out:
2196	dmz_unlock_map(zmd);
2197
2198	return bzone;
2199}
2200
2201/*
2202 * Get an unmapped (free) zone.
2203 * This must be called with the mapping lock held.
2204 */
2205struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx,
2206			       unsigned long flags)
2207{
2208	struct list_head *list;
2209	struct dm_zone *zone;
2210	int i;
2211
2212	/* Schedule reclaim to ensure free zones are available */
2213	if (!(flags & DMZ_ALLOC_RECLAIM)) {
2214		for (i = 0; i < zmd->nr_devs; i++)
2215			dmz_schedule_reclaim(zmd->dev[i].reclaim);
2216	}
2217
2218	i = 0;
2219again:
2220	if (flags & DMZ_ALLOC_CACHE)
2221		list = &zmd->unmap_cache_list;
2222	else if (flags & DMZ_ALLOC_RND)
2223		list = &zmd->dev[dev_idx].unmap_rnd_list;
2224	else
2225		list = &zmd->dev[dev_idx].unmap_seq_list;
2226
2227	if (list_empty(list)) {
2228		/*
2229		 * No free zone: return NULL if this is for not reclaim.
2230		 */
2231		if (!(flags & DMZ_ALLOC_RECLAIM))
2232			return NULL;
2233		/*
2234		 * Try to allocate from other devices
2235		 */
2236		if (i < zmd->nr_devs) {
2237			dev_idx = (dev_idx + 1) % zmd->nr_devs;
2238			i++;
2239			goto again;
2240		}
2241
2242		/*
2243		 * Fallback to the reserved sequential zones
2244		 */
2245		zone = list_first_entry_or_null(&zmd->reserved_seq_zones_list,
2246						struct dm_zone, link);
2247		if (zone) {
2248			list_del_init(&zone->link);
2249			atomic_dec(&zmd->nr_reserved_seq_zones);
2250		}
2251		return zone;
2252	}
2253
2254	zone = list_first_entry(list, struct dm_zone, link);
2255	list_del_init(&zone->link);
2256
2257	if (dmz_is_cache(zone))
2258		atomic_dec(&zmd->unmap_nr_cache);
2259	else if (dmz_is_rnd(zone))
2260		atomic_dec(&zone->dev->unmap_nr_rnd);
2261	else
2262		atomic_dec(&zone->dev->unmap_nr_seq);
2263
2264	if (dmz_is_offline(zone)) {
2265		dmz_zmd_warn(zmd, "Zone %u is offline", zone->id);
2266		zone = NULL;
2267		goto again;
2268	}
2269	if (dmz_is_meta(zone)) {
2270		dmz_zmd_warn(zmd, "Zone %u has metadata", zone->id);
2271		zone = NULL;
2272		goto again;
2273	}
2274	return zone;
2275}
2276
2277/*
2278 * Free a zone.
2279 * This must be called with the mapping lock held.
2280 */
2281void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
2282{
2283	/* If this is a sequential zone, reset it */
2284	if (dmz_is_seq(zone))
2285		dmz_reset_zone(zmd, zone);
2286
2287	/* Return the zone to its type unmap list */
2288	if (dmz_is_cache(zone)) {
2289		list_add_tail(&zone->link, &zmd->unmap_cache_list);
2290		atomic_inc(&zmd->unmap_nr_cache);
2291	} else if (dmz_is_rnd(zone)) {
2292		list_add_tail(&zone->link, &zone->dev->unmap_rnd_list);
2293		atomic_inc(&zone->dev->unmap_nr_rnd);
2294	} else if (dmz_is_reserved(zone)) {
2295		list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
2296		atomic_inc(&zmd->nr_reserved_seq_zones);
2297	} else {
2298		list_add_tail(&zone->link, &zone->dev->unmap_seq_list);
2299		atomic_inc(&zone->dev->unmap_nr_seq);
2300	}
2301
2302	wake_up_all(&zmd->free_wq);
2303}
2304
2305/*
2306 * Map a chunk to a zone.
2307 * This must be called with the mapping lock held.
2308 */
2309void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone,
2310		  unsigned int chunk)
2311{
2312	/* Set the chunk mapping */
2313	dmz_set_chunk_mapping(zmd, chunk, dzone->id,
2314			      DMZ_MAP_UNMAPPED);
2315	dzone->chunk = chunk;
2316	if (dmz_is_cache(dzone))
2317		list_add_tail(&dzone->link, &zmd->map_cache_list);
2318	else if (dmz_is_rnd(dzone))
2319		list_add_tail(&dzone->link, &dzone->dev->map_rnd_list);
2320	else
2321		list_add_tail(&dzone->link, &dzone->dev->map_seq_list);
2322}
2323
2324/*
2325 * Unmap a zone.
2326 * This must be called with the mapping lock held.
2327 */
2328void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
2329{
2330	unsigned int chunk = zone->chunk;
2331	unsigned int dzone_id;
2332
2333	if (chunk == DMZ_MAP_UNMAPPED) {
2334		/* Already unmapped */
2335		return;
2336	}
2337
2338	if (test_and_clear_bit(DMZ_BUF, &zone->flags)) {
2339		/*
2340		 * Unmapping the chunk buffer zone: clear only
2341		 * the chunk buffer mapping
2342		 */
2343		dzone_id = zone->bzone->id;
2344		zone->bzone->bzone = NULL;
2345		zone->bzone = NULL;
2346
2347	} else {
2348		/*
2349		 * Unmapping the chunk data zone: the zone must
2350		 * not be buffered.
2351		 */
2352		if (WARN_ON(zone->bzone)) {
2353			zone->bzone->bzone = NULL;
2354			zone->bzone = NULL;
2355		}
2356		dzone_id = DMZ_MAP_UNMAPPED;
2357	}
2358
2359	dmz_set_chunk_mapping(zmd, chunk, dzone_id, DMZ_MAP_UNMAPPED);
2360
2361	zone->chunk = DMZ_MAP_UNMAPPED;
2362	list_del_init(&zone->link);
2363}
2364
2365/*
2366 * Set @nr_bits bits in @bitmap starting from @bit.
2367 * Return the number of bits changed from 0 to 1.
2368 */
2369static unsigned int dmz_set_bits(unsigned long *bitmap,
2370				 unsigned int bit, unsigned int nr_bits)
2371{
2372	unsigned long *addr;
2373	unsigned int end = bit + nr_bits;
2374	unsigned int n = 0;
2375
2376	while (bit < end) {
2377		if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2378		    ((end - bit) >= BITS_PER_LONG)) {
2379			/* Try to set the whole word at once */
2380			addr = bitmap + BIT_WORD(bit);
2381			if (*addr == 0) {
2382				*addr = ULONG_MAX;
2383				n += BITS_PER_LONG;
2384				bit += BITS_PER_LONG;
2385				continue;
2386			}
2387		}
2388
2389		if (!test_and_set_bit(bit, bitmap))
2390			n++;
2391		bit++;
2392	}
2393
2394	return n;
2395}
2396
2397/*
2398 * Get the bitmap block storing the bit for chunk_block in zone.
2399 */
2400static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd,
2401					 struct dm_zone *zone,
2402					 sector_t chunk_block)
2403{
2404	sector_t bitmap_block = 1 + zmd->nr_map_blocks +
2405		(sector_t)(zone->id * zmd->zone_nr_bitmap_blocks) +
2406		(chunk_block >> DMZ_BLOCK_SHIFT_BITS);
2407
2408	return dmz_get_mblock(zmd, bitmap_block);
2409}
2410
2411/*
2412 * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone.
2413 */
2414int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
2415			  struct dm_zone *to_zone)
2416{
2417	struct dmz_mblock *from_mblk, *to_mblk;
2418	sector_t chunk_block = 0;
2419
2420	/* Get the zones bitmap blocks */
2421	while (chunk_block < zmd->zone_nr_blocks) {
2422		from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block);
2423		if (IS_ERR(from_mblk))
2424			return PTR_ERR(from_mblk);
2425		to_mblk = dmz_get_bitmap(zmd, to_zone, chunk_block);
2426		if (IS_ERR(to_mblk)) {
2427			dmz_release_mblock(zmd, from_mblk);
2428			return PTR_ERR(to_mblk);
2429		}
2430
2431		memcpy(to_mblk->data, from_mblk->data, DMZ_BLOCK_SIZE);
2432		dmz_dirty_mblock(zmd, to_mblk);
2433
2434		dmz_release_mblock(zmd, to_mblk);
2435		dmz_release_mblock(zmd, from_mblk);
2436
2437		chunk_block += zmd->zone_bits_per_mblk;
2438	}
2439
2440	to_zone->weight = from_zone->weight;
2441
2442	return 0;
2443}
2444
2445/*
2446 * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone,
2447 * starting from chunk_block.
2448 */
2449int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
2450			   struct dm_zone *to_zone, sector_t chunk_block)
2451{
2452	unsigned int nr_blocks;
2453	int ret;
2454
2455	/* Get the zones bitmap blocks */
2456	while (chunk_block < zmd->zone_nr_blocks) {
2457		/* Get a valid region from the source zone */
2458		ret = dmz_first_valid_block(zmd, from_zone, &chunk_block);
2459		if (ret <= 0)
2460			return ret;
2461
2462		nr_blocks = ret;
2463		ret = dmz_validate_blocks(zmd, to_zone, chunk_block, nr_blocks);
2464		if (ret)
2465			return ret;
2466
2467		chunk_block += nr_blocks;
2468	}
2469
2470	return 0;
2471}
2472
2473/*
2474 * Validate all the blocks in the range [block..block+nr_blocks-1].
2475 */
2476int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2477			sector_t chunk_block, unsigned int nr_blocks)
2478{
2479	unsigned int count, bit, nr_bits;
2480	unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
2481	struct dmz_mblock *mblk;
2482	unsigned int n = 0;
2483
2484	dmz_zmd_debug(zmd, "=> VALIDATE zone %u, block %llu, %u blocks",
2485		      zone->id, (unsigned long long)chunk_block,
2486		      nr_blocks);
2487
2488	WARN_ON(chunk_block + nr_blocks > zone_nr_blocks);
2489
2490	while (nr_blocks) {
2491		/* Get bitmap block */
2492		mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2493		if (IS_ERR(mblk))
2494			return PTR_ERR(mblk);
2495
2496		/* Set bits */
2497		bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2498		nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
2499
2500		count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
2501		if (count) {
2502			dmz_dirty_mblock(zmd, mblk);
2503			n += count;
2504		}
2505		dmz_release_mblock(zmd, mblk);
2506
2507		nr_blocks -= nr_bits;
2508		chunk_block += nr_bits;
2509	}
2510
2511	if (likely(zone->weight + n <= zone_nr_blocks))
2512		zone->weight += n;
2513	else {
2514		dmz_zmd_warn(zmd, "Zone %u: weight %u should be <= %u",
2515			     zone->id, zone->weight,
2516			     zone_nr_blocks - n);
2517		zone->weight = zone_nr_blocks;
2518	}
2519
2520	return 0;
2521}
2522
2523/*
2524 * Clear nr_bits bits in bitmap starting from bit.
2525 * Return the number of bits cleared.
2526 */
2527static int dmz_clear_bits(unsigned long *bitmap, int bit, int nr_bits)
2528{
2529	unsigned long *addr;
2530	int end = bit + nr_bits;
2531	int n = 0;
2532
2533	while (bit < end) {
2534		if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2535		    ((end - bit) >= BITS_PER_LONG)) {
2536			/* Try to clear whole word at once */
2537			addr = bitmap + BIT_WORD(bit);
2538			if (*addr == ULONG_MAX) {
2539				*addr = 0;
2540				n += BITS_PER_LONG;
2541				bit += BITS_PER_LONG;
2542				continue;
2543			}
2544		}
2545
2546		if (test_and_clear_bit(bit, bitmap))
2547			n++;
2548		bit++;
2549	}
2550
2551	return n;
2552}
2553
2554/*
2555 * Invalidate all the blocks in the range [block..block+nr_blocks-1].
2556 */
2557int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2558			  sector_t chunk_block, unsigned int nr_blocks)
2559{
2560	unsigned int count, bit, nr_bits;
2561	struct dmz_mblock *mblk;
2562	unsigned int n = 0;
2563
2564	dmz_zmd_debug(zmd, "=> INVALIDATE zone %u, block %llu, %u blocks",
2565		      zone->id, (u64)chunk_block, nr_blocks);
2566
2567	WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
2568
2569	while (nr_blocks) {
2570		/* Get bitmap block */
2571		mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2572		if (IS_ERR(mblk))
2573			return PTR_ERR(mblk);
2574
2575		/* Clear bits */
2576		bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2577		nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
2578
2579		count = dmz_clear_bits((unsigned long *)mblk->data,
2580				       bit, nr_bits);
2581		if (count) {
2582			dmz_dirty_mblock(zmd, mblk);
2583			n += count;
2584		}
2585		dmz_release_mblock(zmd, mblk);
2586
2587		nr_blocks -= nr_bits;
2588		chunk_block += nr_bits;
2589	}
2590
2591	if (zone->weight >= n)
2592		zone->weight -= n;
2593	else {
2594		dmz_zmd_warn(zmd, "Zone %u: weight %u should be >= %u",
2595			     zone->id, zone->weight, n);
2596		zone->weight = 0;
2597	}
2598
2599	return 0;
2600}
2601
2602/*
2603 * Get a block bit value.
2604 */
2605static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2606			  sector_t chunk_block)
2607{
2608	struct dmz_mblock *mblk;
2609	int ret;
2610
2611	WARN_ON(chunk_block >= zmd->zone_nr_blocks);
2612
2613	/* Get bitmap block */
2614	mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2615	if (IS_ERR(mblk))
2616		return PTR_ERR(mblk);
2617
2618	/* Get offset */
2619	ret = test_bit(chunk_block & DMZ_BLOCK_MASK_BITS,
2620		       (unsigned long *) mblk->data) != 0;
2621
2622	dmz_release_mblock(zmd, mblk);
2623
2624	return ret;
2625}
2626
2627/*
2628 * Return the number of blocks from chunk_block to the first block with a bit
2629 * value specified by set. Search at most nr_blocks blocks from chunk_block.
2630 */
2631static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2632				 sector_t chunk_block, unsigned int nr_blocks,
2633				 int set)
2634{
2635	struct dmz_mblock *mblk;
2636	unsigned int bit, set_bit, nr_bits;
2637	unsigned int zone_bits = zmd->zone_bits_per_mblk;
2638	unsigned long *bitmap;
2639	int n = 0;
2640
2641	WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
2642
2643	while (nr_blocks) {
2644		/* Get bitmap block */
2645		mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2646		if (IS_ERR(mblk))
2647			return PTR_ERR(mblk);
2648
2649		/* Get offset */
2650		bitmap = (unsigned long *) mblk->data;
2651		bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2652		nr_bits = min(nr_blocks, zone_bits - bit);
2653		if (set)
2654			set_bit = find_next_bit(bitmap, zone_bits, bit);
2655		else
2656			set_bit = find_next_zero_bit(bitmap, zone_bits, bit);
2657		dmz_release_mblock(zmd, mblk);
2658
2659		n += set_bit - bit;
2660		if (set_bit < zone_bits)
2661			break;
2662
2663		nr_blocks -= nr_bits;
2664		chunk_block += nr_bits;
2665	}
2666
2667	return n;
2668}
2669
2670/*
2671 * Test if chunk_block is valid. If it is, the number of consecutive
2672 * valid blocks from chunk_block will be returned.
2673 */
2674int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
2675		    sector_t chunk_block)
2676{
2677	int valid;
2678
2679	valid = dmz_test_block(zmd, zone, chunk_block);
2680	if (valid <= 0)
2681		return valid;
2682
2683	/* The block is valid: get the number of valid blocks from block */
2684	return dmz_to_next_set_block(zmd, zone, chunk_block,
2685				     zmd->zone_nr_blocks - chunk_block, 0);
2686}
2687
2688/*
2689 * Find the first valid block from @chunk_block in @zone.
2690 * If such a block is found, its number is returned using
2691 * @chunk_block and the total number of valid blocks from @chunk_block
2692 * is returned.
2693 */
2694int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2695			  sector_t *chunk_block)
2696{
2697	sector_t start_block = *chunk_block;
2698	int ret;
2699
2700	ret = dmz_to_next_set_block(zmd, zone, start_block,
2701				    zmd->zone_nr_blocks - start_block, 1);
2702	if (ret < 0)
2703		return ret;
2704
2705	start_block += ret;
2706	*chunk_block = start_block;
2707
2708	return dmz_to_next_set_block(zmd, zone, start_block,
2709				     zmd->zone_nr_blocks - start_block, 0);
2710}
2711
2712/*
2713 * Count the number of bits set starting from bit up to bit + nr_bits - 1.
2714 */
2715static int dmz_count_bits(void *bitmap, int bit, int nr_bits)
2716{
2717	unsigned long *addr;
2718	int end = bit + nr_bits;
2719	int n = 0;
2720
2721	while (bit < end) {
2722		if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2723		    ((end - bit) >= BITS_PER_LONG)) {
2724			addr = (unsigned long *)bitmap + BIT_WORD(bit);
2725			if (*addr == ULONG_MAX) {
2726				n += BITS_PER_LONG;
2727				bit += BITS_PER_LONG;
2728				continue;
2729			}
2730		}
2731
2732		if (test_bit(bit, bitmap))
2733			n++;
2734		bit++;
2735	}
2736
2737	return n;
2738}
2739
2740/*
2741 * Get a zone weight.
2742 */
2743static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
2744{
2745	struct dmz_mblock *mblk;
2746	sector_t chunk_block = 0;
2747	unsigned int bit, nr_bits;
2748	unsigned int nr_blocks = zmd->zone_nr_blocks;
2749	void *bitmap;
2750	int n = 0;
2751
2752	while (nr_blocks) {
2753		/* Get bitmap block */
2754		mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2755		if (IS_ERR(mblk)) {
2756			n = 0;
2757			break;
2758		}
2759
2760		/* Count bits in this block */
2761		bitmap = mblk->data;
2762		bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2763		nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
2764		n += dmz_count_bits(bitmap, bit, nr_bits);
2765
2766		dmz_release_mblock(zmd, mblk);
2767
2768		nr_blocks -= nr_bits;
2769		chunk_block += nr_bits;
2770	}
2771
2772	zone->weight = n;
2773}
2774
2775/*
2776 * Cleanup the zoned metadata resources.
2777 */
2778static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
2779{
2780	struct rb_root *root;
2781	struct dmz_mblock *mblk, *next;
2782	int i;
2783
2784	/* Release zone mapping resources */
2785	if (zmd->map_mblk) {
2786		for (i = 0; i < zmd->nr_map_blocks; i++)
2787			dmz_release_mblock(zmd, zmd->map_mblk[i]);
2788		kfree(zmd->map_mblk);
2789		zmd->map_mblk = NULL;
2790	}
2791
2792	/* Release super blocks */
2793	for (i = 0; i < 2; i++) {
2794		if (zmd->sb[i].mblk) {
2795			dmz_free_mblock(zmd, zmd->sb[i].mblk);
2796			zmd->sb[i].mblk = NULL;
2797		}
2798	}
2799
2800	/* Free cached blocks */
2801	while (!list_empty(&zmd->mblk_dirty_list)) {
2802		mblk = list_first_entry(&zmd->mblk_dirty_list,
2803					struct dmz_mblock, link);
2804		dmz_zmd_warn(zmd, "mblock %llu still in dirty list (ref %u)",
2805			     (u64)mblk->no, mblk->ref);
2806		list_del_init(&mblk->link);
2807		rb_erase(&mblk->node, &zmd->mblk_rbtree);
2808		dmz_free_mblock(zmd, mblk);
2809	}
2810
2811	while (!list_empty(&zmd->mblk_lru_list)) {
2812		mblk = list_first_entry(&zmd->mblk_lru_list,
2813					struct dmz_mblock, link);
2814		list_del_init(&mblk->link);
2815		rb_erase(&mblk->node, &zmd->mblk_rbtree);
2816		dmz_free_mblock(zmd, mblk);
2817	}
2818
2819	/* Sanity checks: the mblock rbtree should now be empty */
2820	root = &zmd->mblk_rbtree;
2821	rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
2822		dmz_zmd_warn(zmd, "mblock %llu ref %u still in rbtree",
2823			     (u64)mblk->no, mblk->ref);
2824		mblk->ref = 0;
2825		dmz_free_mblock(zmd, mblk);
2826	}
2827
2828	/* Free the zone descriptors */
2829	dmz_drop_zones(zmd);
2830
2831	mutex_destroy(&zmd->mblk_flush_lock);
2832	mutex_destroy(&zmd->map_lock);
2833}
2834
2835static void dmz_print_dev(struct dmz_metadata *zmd, int num)
2836{
2837	struct dmz_dev *dev = &zmd->dev[num];
2838
2839	if (!bdev_is_zoned(dev->bdev))
2840		dmz_dev_info(dev, "Regular block device");
2841	else
2842		dmz_dev_info(dev, "Host-managed zoned block device");
2843
 
2844	if (zmd->sb_version > 1) {
2845		sector_t sector_offset =
2846			dev->zone_offset << zmd->zone_nr_sectors_shift;
2847
2848		dmz_dev_info(dev, "  %llu 512-byte logical sectors (offset %llu)",
2849			     (u64)dev->capacity, (u64)sector_offset);
2850		dmz_dev_info(dev, "  %u zones of %llu 512-byte logical sectors (offset %llu)",
2851			     dev->nr_zones, (u64)zmd->zone_nr_sectors,
2852			     (u64)dev->zone_offset);
2853	} else {
2854		dmz_dev_info(dev, "  %llu 512-byte logical sectors",
2855			     (u64)dev->capacity);
2856		dmz_dev_info(dev, "  %u zones of %llu 512-byte logical sectors",
2857			     dev->nr_zones, (u64)zmd->zone_nr_sectors);
2858	}
2859}
2860
2861/*
2862 * Initialize the zoned metadata.
2863 */
2864int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
2865		     struct dmz_metadata **metadata,
2866		     const char *devname)
2867{
2868	struct dmz_metadata *zmd;
2869	unsigned int i;
2870	struct dm_zone *zone;
2871	int ret;
2872
2873	zmd = kzalloc(sizeof(struct dmz_metadata), GFP_KERNEL);
2874	if (!zmd)
2875		return -ENOMEM;
2876
2877	strcpy(zmd->devname, devname);
2878	zmd->dev = dev;
2879	zmd->nr_devs = num_dev;
2880	zmd->mblk_rbtree = RB_ROOT;
2881	init_rwsem(&zmd->mblk_sem);
2882	mutex_init(&zmd->mblk_flush_lock);
2883	spin_lock_init(&zmd->mblk_lock);
2884	INIT_LIST_HEAD(&zmd->mblk_lru_list);
2885	INIT_LIST_HEAD(&zmd->mblk_dirty_list);
2886
2887	mutex_init(&zmd->map_lock);
2888
2889	atomic_set(&zmd->unmap_nr_cache, 0);
2890	INIT_LIST_HEAD(&zmd->unmap_cache_list);
2891	INIT_LIST_HEAD(&zmd->map_cache_list);
2892
2893	atomic_set(&zmd->nr_reserved_seq_zones, 0);
2894	INIT_LIST_HEAD(&zmd->reserved_seq_zones_list);
2895
2896	init_waitqueue_head(&zmd->free_wq);
2897
2898	/* Initialize zone descriptors */
2899	ret = dmz_init_zones(zmd);
2900	if (ret)
2901		goto err;
2902
2903	/* Get super block */
2904	ret = dmz_load_sb(zmd);
2905	if (ret)
2906		goto err;
2907
2908	/* Set metadata zones starting from sb_zone */
2909	for (i = 0; i < zmd->nr_meta_zones << 1; i++) {
2910		zone = dmz_get(zmd, zmd->sb[0].zone->id + i);
2911		if (!zone) {
2912			dmz_zmd_err(zmd,
2913				    "metadata zone %u not present", i);
2914			ret = -ENXIO;
2915			goto err;
2916		}
2917		if (!dmz_is_rnd(zone) && !dmz_is_cache(zone)) {
2918			dmz_zmd_err(zmd,
2919				    "metadata zone %d is not random", i);
2920			ret = -ENXIO;
2921			goto err;
2922		}
2923		set_bit(DMZ_META, &zone->flags);
2924	}
2925	/* Load mapping table */
2926	ret = dmz_load_mapping(zmd);
2927	if (ret)
2928		goto err;
2929
2930	/*
2931	 * Cache size boundaries: allow at least 2 super blocks, the chunk map
2932	 * blocks and enough blocks to be able to cache the bitmap blocks of
2933	 * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow
2934	 * the cache to add 512 more metadata blocks.
2935	 */
2936	zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks * 16;
2937	zmd->max_nr_mblks = zmd->min_nr_mblks + 512;
 
 
 
2938
2939	/* Metadata cache shrinker */
2940	zmd->mblk_shrinker = shrinker_alloc(0,  "dm-zoned-meta:(%u:%u)",
2941					    MAJOR(dev->bdev->bd_dev),
2942					    MINOR(dev->bdev->bd_dev));
2943	if (!zmd->mblk_shrinker) {
2944		ret = -ENOMEM;
2945		dmz_zmd_err(zmd, "Allocate metadata cache shrinker failed");
2946		goto err;
2947	}
2948
2949	zmd->mblk_shrinker->count_objects = dmz_mblock_shrinker_count;
2950	zmd->mblk_shrinker->scan_objects = dmz_mblock_shrinker_scan;
2951	zmd->mblk_shrinker->private_data = zmd;
2952
2953	shrinker_register(zmd->mblk_shrinker);
2954
2955	dmz_zmd_info(zmd, "DM-Zoned metadata version %d", zmd->sb_version);
2956	for (i = 0; i < zmd->nr_devs; i++)
2957		dmz_print_dev(zmd, i);
2958
2959	dmz_zmd_info(zmd, "  %u zones of %llu 512-byte logical sectors",
2960		     zmd->nr_zones, (u64)zmd->zone_nr_sectors);
2961	dmz_zmd_debug(zmd, "  %u metadata zones",
2962		      zmd->nr_meta_zones * 2);
2963	dmz_zmd_debug(zmd, "  %u data zones for %u chunks",
2964		      zmd->nr_data_zones, zmd->nr_chunks);
2965	dmz_zmd_debug(zmd, "    %u cache zones (%u unmapped)",
2966		      zmd->nr_cache, atomic_read(&zmd->unmap_nr_cache));
2967	for (i = 0; i < zmd->nr_devs; i++) {
2968		dmz_zmd_debug(zmd, "    %u random zones (%u unmapped)",
2969			      dmz_nr_rnd_zones(zmd, i),
2970			      dmz_nr_unmap_rnd_zones(zmd, i));
2971		dmz_zmd_debug(zmd, "    %u sequential zones (%u unmapped)",
2972			      dmz_nr_seq_zones(zmd, i),
2973			      dmz_nr_unmap_seq_zones(zmd, i));
2974	}
2975	dmz_zmd_debug(zmd, "  %u reserved sequential data zones",
2976		      zmd->nr_reserved_seq);
2977	dmz_zmd_debug(zmd, "Format:");
2978	dmz_zmd_debug(zmd, "%u metadata blocks per set (%u max cache)",
2979		      zmd->nr_meta_blocks, zmd->max_nr_mblks);
2980	dmz_zmd_debug(zmd, "  %u data zone mapping blocks",
2981		      zmd->nr_map_blocks);
2982	dmz_zmd_debug(zmd, "  %u bitmap blocks",
2983		      zmd->nr_bitmap_blocks);
2984
2985	*metadata = zmd;
2986
2987	return 0;
2988err:
2989	dmz_cleanup_metadata(zmd);
2990	kfree(zmd);
2991	*metadata = NULL;
2992
2993	return ret;
2994}
2995
2996/*
2997 * Cleanup the zoned metadata resources.
2998 */
2999void dmz_dtr_metadata(struct dmz_metadata *zmd)
3000{
3001	shrinker_free(zmd->mblk_shrinker);
3002	dmz_cleanup_metadata(zmd);
3003	kfree(zmd);
3004}
3005
3006/*
3007 * Check zone information on resume.
3008 */
3009int dmz_resume_metadata(struct dmz_metadata *zmd)
3010{
3011	struct dm_zone *zone;
3012	sector_t wp_block;
3013	unsigned int i;
3014	int ret;
3015
3016	/* Check zones */
3017	for (i = 0; i < zmd->nr_zones; i++) {
3018		zone = dmz_get(zmd, i);
3019		if (!zone) {
3020			dmz_zmd_err(zmd, "Unable to get zone %u", i);
3021			return -EIO;
3022		}
3023		wp_block = zone->wp_block;
3024
3025		ret = dmz_update_zone(zmd, zone);
3026		if (ret) {
3027			dmz_zmd_err(zmd, "Broken zone %u", i);
3028			return ret;
3029		}
3030
3031		if (dmz_is_offline(zone)) {
3032			dmz_zmd_warn(zmd, "Zone %u is offline", i);
3033			continue;
3034		}
3035
3036		/* Check write pointer */
3037		if (!dmz_is_seq(zone))
3038			zone->wp_block = 0;
3039		else if (zone->wp_block != wp_block) {
3040			dmz_zmd_err(zmd, "Zone %u: Invalid wp (%llu / %llu)",
3041				    i, (u64)zone->wp_block, (u64)wp_block);
3042			zone->wp_block = wp_block;
3043			dmz_invalidate_blocks(zmd, zone, zone->wp_block,
3044					      zmd->zone_nr_blocks - zone->wp_block);
3045		}
3046	}
3047
3048	return 0;
3049}