Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2001 Sistina Software (UK) Limited.
   4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include "dm-core.h"
  10#include "dm-rq.h"
  11
  12#include <linux/module.h>
  13#include <linux/vmalloc.h>
  14#include <linux/blkdev.h>
  15#include <linux/blk-integrity.h>
  16#include <linux/namei.h>
  17#include <linux/ctype.h>
  18#include <linux/string.h>
  19#include <linux/slab.h>
  20#include <linux/interrupt.h>
  21#include <linux/mutex.h>
  22#include <linux/delay.h>
  23#include <linux/atomic.h>
  24#include <linux/blk-mq.h>
  25#include <linux/mount.h>
  26#include <linux/dax.h>
  27
  28#define DM_MSG_PREFIX "table"
  29
  30#define NODE_SIZE L1_CACHE_BYTES
  31#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
  32#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
  33
  34/*
  35 * Similar to ceiling(log_size(n))
  36 */
  37static unsigned int int_log(unsigned int n, unsigned int base)
  38{
  39	int result = 0;
  40
  41	while (n > 1) {
  42		n = dm_div_up(n, base);
  43		result++;
  44	}
  45
  46	return result;
  47}
  48
  49/*
  50 * Calculate the index of the child node of the n'th node k'th key.
  51 */
  52static inline unsigned int get_child(unsigned int n, unsigned int k)
  53{
  54	return (n * CHILDREN_PER_NODE) + k;
  55}
  56
  57/*
  58 * Return the n'th node of level l from table t.
  59 */
  60static inline sector_t *get_node(struct dm_table *t,
  61				 unsigned int l, unsigned int n)
  62{
  63	return t->index[l] + (n * KEYS_PER_NODE);
  64}
  65
  66/*
  67 * Return the highest key that you could lookup from the n'th
  68 * node on level l of the btree.
  69 */
  70static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
  71{
  72	for (; l < t->depth - 1; l++)
  73		n = get_child(n, CHILDREN_PER_NODE - 1);
  74
  75	if (n >= t->counts[l])
  76		return (sector_t) -1;
  77
  78	return get_node(t, l, n)[KEYS_PER_NODE - 1];
  79}
  80
  81/*
  82 * Fills in a level of the btree based on the highs of the level
  83 * below it.
  84 */
  85static int setup_btree_index(unsigned int l, struct dm_table *t)
  86{
  87	unsigned int n, k;
  88	sector_t *node;
  89
  90	for (n = 0U; n < t->counts[l]; n++) {
  91		node = get_node(t, l, n);
  92
  93		for (k = 0U; k < KEYS_PER_NODE; k++)
  94			node[k] = high(t, l + 1, get_child(n, k));
  95	}
  96
  97	return 0;
  98}
  99
 100/*
 101 * highs, and targets are managed as dynamic arrays during a
 102 * table load.
 103 */
 104static int alloc_targets(struct dm_table *t, unsigned int num)
 105{
 106	sector_t *n_highs;
 107	struct dm_target *n_targets;
 108
 109	/*
 110	 * Allocate both the target array and offset array at once.
 111	 */
 112	n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t),
 113			   GFP_KERNEL);
 114	if (!n_highs)
 115		return -ENOMEM;
 116
 117	n_targets = (struct dm_target *) (n_highs + num);
 118
 119	memset(n_highs, -1, sizeof(*n_highs) * num);
 120	kvfree(t->highs);
 121
 122	t->num_allocated = num;
 123	t->highs = n_highs;
 124	t->targets = n_targets;
 125
 126	return 0;
 127}
 128
 129int dm_table_create(struct dm_table **result, blk_mode_t mode,
 130		    unsigned int num_targets, struct mapped_device *md)
 131{
 132	struct dm_table *t;
 133
 134	if (num_targets > DM_MAX_TARGETS)
 135		return -EOVERFLOW;
 136
 137	t = kzalloc(sizeof(*t), GFP_KERNEL);
 138
 139	if (!t)
 140		return -ENOMEM;
 141
 142	INIT_LIST_HEAD(&t->devices);
 143	init_rwsem(&t->devices_lock);
 144
 145	if (!num_targets)
 146		num_targets = KEYS_PER_NODE;
 147
 148	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
 149
 150	if (!num_targets) {
 151		kfree(t);
 152		return -EOVERFLOW;
 153	}
 154
 155	if (alloc_targets(t, num_targets)) {
 156		kfree(t);
 157		return -ENOMEM;
 158	}
 159
 160	t->type = DM_TYPE_NONE;
 161	t->mode = mode;
 162	t->md = md;
 163	*result = t;
 164	return 0;
 165}
 166
 167static void free_devices(struct list_head *devices, struct mapped_device *md)
 168{
 169	struct list_head *tmp, *next;
 170
 171	list_for_each_safe(tmp, next, devices) {
 172		struct dm_dev_internal *dd =
 173		    list_entry(tmp, struct dm_dev_internal, list);
 174		DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
 175		       dm_device_name(md), dd->dm_dev->name);
 176		dm_put_table_device(md, dd->dm_dev);
 177		kfree(dd);
 178	}
 179}
 180
 181static void dm_table_destroy_crypto_profile(struct dm_table *t);
 182
 183void dm_table_destroy(struct dm_table *t)
 184{
 
 
 185	if (!t)
 186		return;
 187
 188	/* free the indexes */
 189	if (t->depth >= 2)
 190		kvfree(t->index[t->depth - 2]);
 191
 192	/* free the targets */
 193	for (unsigned int i = 0; i < t->num_targets; i++) {
 194		struct dm_target *ti = dm_table_get_target(t, i);
 195
 196		if (ti->type->dtr)
 197			ti->type->dtr(ti);
 198
 199		dm_put_target_type(ti->type);
 200	}
 201
 202	kvfree(t->highs);
 203
 204	/* free the device list */
 205	free_devices(&t->devices, t->md);
 206
 207	dm_free_md_mempools(t->mempools);
 208
 209	dm_table_destroy_crypto_profile(t);
 210
 211	kfree(t);
 212}
 213
 214/*
 215 * See if we've already got a device in the list.
 216 */
 217static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
 218{
 219	struct dm_dev_internal *dd;
 220
 221	list_for_each_entry(dd, l, list)
 222		if (dd->dm_dev->bdev->bd_dev == dev)
 223			return dd;
 224
 225	return NULL;
 226}
 227
 228/*
 229 * If possible, this checks an area of a destination device is invalid.
 230 */
 231static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
 232				  sector_t start, sector_t len, void *data)
 233{
 234	struct queue_limits *limits = data;
 235	struct block_device *bdev = dev->bdev;
 236	sector_t dev_size = bdev_nr_sectors(bdev);
 
 237	unsigned short logical_block_size_sectors =
 238		limits->logical_block_size >> SECTOR_SHIFT;
 
 239
 240	if (!dev_size)
 241		return 0;
 242
 243	if ((start >= dev_size) || (start + len > dev_size)) {
 244		DMERR("%s: %pg too small for target: start=%llu, len=%llu, dev_size=%llu",
 245		      dm_device_name(ti->table->md), bdev,
 246		      (unsigned long long)start,
 247		      (unsigned long long)len,
 248		      (unsigned long long)dev_size);
 
 249		return 1;
 250	}
 251
 252	/*
 253	 * If the target is mapped to zoned block device(s), check
 254	 * that the zones are not partially mapped.
 255	 */
 256	if (bdev_is_zoned(bdev)) {
 257		unsigned int zone_sectors = bdev_zone_sectors(bdev);
 258
 259		if (start & (zone_sectors - 1)) {
 260			DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg",
 261			      dm_device_name(ti->table->md),
 262			      (unsigned long long)start,
 263			      zone_sectors, bdev);
 264			return 1;
 265		}
 266
 267		/*
 268		 * Note: The last zone of a zoned block device may be smaller
 269		 * than other zones. So for a target mapping the end of a
 270		 * zoned block device with such a zone, len would not be zone
 271		 * aligned. We do not allow such last smaller zone to be part
 272		 * of the mapping here to ensure that mappings with multiple
 273		 * devices do not end up with a smaller zone in the middle of
 274		 * the sector range.
 275		 */
 276		if (len & (zone_sectors - 1)) {
 277			DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg",
 278			      dm_device_name(ti->table->md),
 279			      (unsigned long long)len,
 280			      zone_sectors, bdev);
 281			return 1;
 282		}
 283	}
 284
 285	if (logical_block_size_sectors <= 1)
 286		return 0;
 287
 288	if (start & (logical_block_size_sectors - 1)) {
 289		DMERR("%s: start=%llu not aligned to h/w logical block size %u of %pg",
 290		      dm_device_name(ti->table->md),
 291		      (unsigned long long)start,
 292		      limits->logical_block_size, bdev);
 
 293		return 1;
 294	}
 295
 296	if (len & (logical_block_size_sectors - 1)) {
 297		DMERR("%s: len=%llu not aligned to h/w logical block size %u of %pg",
 298		      dm_device_name(ti->table->md),
 299		      (unsigned long long)len,
 300		      limits->logical_block_size, bdev);
 
 301		return 1;
 302	}
 303
 304	return 0;
 305}
 306
 307/*
 308 * This upgrades the mode on an already open dm_dev, being
 309 * careful to leave things as they were if we fail to reopen the
 310 * device and not to touch the existing bdev field in case
 311 * it is accessed concurrently.
 312 */
 313static int upgrade_mode(struct dm_dev_internal *dd, blk_mode_t new_mode,
 314			struct mapped_device *md)
 315{
 316	int r;
 317	struct dm_dev *old_dev, *new_dev;
 318
 319	old_dev = dd->dm_dev;
 320
 321	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
 322				dd->dm_dev->mode | new_mode, &new_dev);
 323	if (r)
 324		return r;
 325
 326	dd->dm_dev = new_dev;
 327	dm_put_table_device(md, old_dev);
 328
 329	return 0;
 330}
 331
 332/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 333 * Add a device to the list, or just increment the usage count if
 334 * it's already present.
 335 *
 336 * Note: the __ref annotation is because this function can call the __init
 337 * marked early_lookup_bdev when called during early boot code from dm-init.c.
 338 */
 339int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
 340		  struct dm_dev **result)
 341{
 342	int r;
 343	dev_t dev;
 344	unsigned int major, minor;
 345	char dummy;
 346	struct dm_dev_internal *dd;
 347	struct dm_table *t = ti->table;
 348
 349	BUG_ON(!t);
 350
 351	if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
 352		/* Extract the major/minor numbers */
 353		dev = MKDEV(major, minor);
 354		if (MAJOR(dev) != major || MINOR(dev) != minor)
 355			return -EOVERFLOW;
 356	} else {
 357		r = lookup_bdev(path, &dev);
 358#ifndef MODULE
 359		if (r && system_state < SYSTEM_RUNNING)
 360			r = early_lookup_bdev(path, &dev);
 361#endif
 362		if (r)
 363			return r;
 364	}
 365	if (dev == disk_devt(t->md->disk))
 366		return -EINVAL;
 367
 368	down_write(&t->devices_lock);
 369
 370	dd = find_device(&t->devices, dev);
 371	if (!dd) {
 372		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
 373		if (!dd) {
 374			r = -ENOMEM;
 375			goto unlock_ret_r;
 376		}
 377
 378		r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev);
 379		if (r) {
 380			kfree(dd);
 381			goto unlock_ret_r;
 382		}
 383
 384		refcount_set(&dd->count, 1);
 385		list_add(&dd->list, &t->devices);
 386		goto out;
 387
 388	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
 389		r = upgrade_mode(dd, mode, t->md);
 390		if (r)
 391			goto unlock_ret_r;
 392	}
 393	refcount_inc(&dd->count);
 394out:
 395	up_write(&t->devices_lock);
 396	*result = dd->dm_dev;
 397	return 0;
 398
 399unlock_ret_r:
 400	up_write(&t->devices_lock);
 401	return r;
 402}
 403EXPORT_SYMBOL(dm_get_device);
 404
 405static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
 406				sector_t start, sector_t len, void *data)
 407{
 408	struct queue_limits *limits = data;
 409	struct block_device *bdev = dev->bdev;
 410	struct request_queue *q = bdev_get_queue(bdev);
 
 411
 412	if (unlikely(!q)) {
 413		DMWARN("%s: Cannot set limits for nonexistent device %pg",
 414		       dm_device_name(ti->table->md), bdev);
 415		return 0;
 416	}
 417
 418	if (blk_stack_limits(limits, &q->limits,
 419			get_start_sect(bdev) + start) < 0)
 420		DMWARN("%s: adding target device %pg caused an alignment inconsistency: "
 421		       "physical_block_size=%u, logical_block_size=%u, "
 422		       "alignment_offset=%u, start=%llu",
 423		       dm_device_name(ti->table->md), bdev,
 424		       q->limits.physical_block_size,
 425		       q->limits.logical_block_size,
 426		       q->limits.alignment_offset,
 427		       (unsigned long long) start << SECTOR_SHIFT);
 428	return 0;
 429}
 430
 431/*
 432 * Decrement a device's use count and remove it if necessary.
 433 */
 434void dm_put_device(struct dm_target *ti, struct dm_dev *d)
 435{
 436	int found = 0;
 437	struct dm_table *t = ti->table;
 438	struct list_head *devices = &t->devices;
 439	struct dm_dev_internal *dd;
 440
 441	down_write(&t->devices_lock);
 442
 443	list_for_each_entry(dd, devices, list) {
 444		if (dd->dm_dev == d) {
 445			found = 1;
 446			break;
 447		}
 448	}
 449	if (!found) {
 450		DMERR("%s: device %s not in table devices list",
 451		      dm_device_name(t->md), d->name);
 452		goto unlock_ret;
 453	}
 454	if (refcount_dec_and_test(&dd->count)) {
 455		dm_put_table_device(t->md, d);
 456		list_del(&dd->list);
 457		kfree(dd);
 458	}
 459
 460unlock_ret:
 461	up_write(&t->devices_lock);
 462}
 463EXPORT_SYMBOL(dm_put_device);
 464
 465/*
 466 * Checks to see if the target joins onto the end of the table.
 467 */
 468static int adjoin(struct dm_table *t, struct dm_target *ti)
 469{
 470	struct dm_target *prev;
 471
 472	if (!t->num_targets)
 473		return !ti->begin;
 474
 475	prev = &t->targets[t->num_targets - 1];
 476	return (ti->begin == (prev->begin + prev->len));
 477}
 478
 479/*
 480 * Used to dynamically allocate the arg array.
 481 *
 482 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
 483 * process messages even if some device is suspended. These messages have a
 484 * small fixed number of arguments.
 485 *
 486 * On the other hand, dm-switch needs to process bulk data using messages and
 487 * excessive use of GFP_NOIO could cause trouble.
 488 */
 489static char **realloc_argv(unsigned int *size, char **old_argv)
 490{
 491	char **argv;
 492	unsigned int new_size;
 493	gfp_t gfp;
 494
 495	if (*size) {
 496		new_size = *size * 2;
 497		gfp = GFP_KERNEL;
 498	} else {
 499		new_size = 8;
 500		gfp = GFP_NOIO;
 501	}
 502	argv = kmalloc_array(new_size, sizeof(*argv), gfp);
 503	if (argv && old_argv) {
 504		memcpy(argv, old_argv, *size * sizeof(*argv));
 505		*size = new_size;
 506	}
 507
 508	kfree(old_argv);
 509	return argv;
 510}
 511
 512/*
 513 * Destructively splits up the argument list to pass to ctr.
 514 */
 515int dm_split_args(int *argc, char ***argvp, char *input)
 516{
 517	char *start, *end = input, *out, **argv = NULL;
 518	unsigned int array_size = 0;
 519
 520	*argc = 0;
 521
 522	if (!input) {
 523		*argvp = NULL;
 524		return 0;
 525	}
 526
 527	argv = realloc_argv(&array_size, argv);
 528	if (!argv)
 529		return -ENOMEM;
 530
 531	while (1) {
 532		/* Skip whitespace */
 533		start = skip_spaces(end);
 534
 535		if (!*start)
 536			break;	/* success, we hit the end */
 537
 538		/* 'out' is used to remove any back-quotes */
 539		end = out = start;
 540		while (*end) {
 541			/* Everything apart from '\0' can be quoted */
 542			if (*end == '\\' && *(end + 1)) {
 543				*out++ = *(end + 1);
 544				end += 2;
 545				continue;
 546			}
 547
 548			if (isspace(*end))
 549				break;	/* end of token */
 550
 551			*out++ = *end++;
 552		}
 553
 554		/* have we already filled the array ? */
 555		if ((*argc + 1) > array_size) {
 556			argv = realloc_argv(&array_size, argv);
 557			if (!argv)
 558				return -ENOMEM;
 559		}
 560
 561		/* we know this is whitespace */
 562		if (*end)
 563			end++;
 564
 565		/* terminate the string and put it in the array */
 566		*out = '\0';
 567		argv[*argc] = start;
 568		(*argc)++;
 569	}
 570
 571	*argvp = argv;
 572	return 0;
 573}
 574
 575/*
 576 * Impose necessary and sufficient conditions on a devices's table such
 577 * that any incoming bio which respects its logical_block_size can be
 578 * processed successfully.  If it falls across the boundary between
 579 * two or more targets, the size of each piece it gets split into must
 580 * be compatible with the logical_block_size of the target processing it.
 581 */
 582static int validate_hardware_logical_block_alignment(struct dm_table *t,
 583						     struct queue_limits *limits)
 584{
 585	/*
 586	 * This function uses arithmetic modulo the logical_block_size
 587	 * (in units of 512-byte sectors).
 588	 */
 589	unsigned short device_logical_block_size_sects =
 590		limits->logical_block_size >> SECTOR_SHIFT;
 591
 592	/*
 593	 * Offset of the start of the next table entry, mod logical_block_size.
 594	 */
 595	unsigned short next_target_start = 0;
 596
 597	/*
 598	 * Given an aligned bio that extends beyond the end of a
 599	 * target, how many sectors must the next target handle?
 600	 */
 601	unsigned short remaining = 0;
 602
 603	struct dm_target *ti;
 604	struct queue_limits ti_limits;
 605	unsigned int i;
 606
 607	/*
 608	 * Check each entry in the table in turn.
 609	 */
 610	for (i = 0; i < t->num_targets; i++) {
 611		ti = dm_table_get_target(t, i);
 612
 613		blk_set_stacking_limits(&ti_limits);
 614
 615		/* combine all target devices' limits */
 616		if (ti->type->iterate_devices)
 617			ti->type->iterate_devices(ti, dm_set_device_limits,
 618						  &ti_limits);
 619
 620		/*
 621		 * If the remaining sectors fall entirely within this
 622		 * table entry are they compatible with its logical_block_size?
 623		 */
 624		if (remaining < ti->len &&
 625		    remaining & ((ti_limits.logical_block_size >>
 626				  SECTOR_SHIFT) - 1))
 627			break;	/* Error */
 628
 629		next_target_start =
 630		    (unsigned short) ((next_target_start + ti->len) &
 631				      (device_logical_block_size_sects - 1));
 632		remaining = next_target_start ?
 633		    device_logical_block_size_sects - next_target_start : 0;
 634	}
 635
 636	if (remaining) {
 637		DMERR("%s: table line %u (start sect %llu len %llu) "
 638		      "not aligned to h/w logical block size %u",
 639		      dm_device_name(t->md), i,
 640		      (unsigned long long) ti->begin,
 641		      (unsigned long long) ti->len,
 642		      limits->logical_block_size);
 643		return -EINVAL;
 644	}
 645
 646	return 0;
 647}
 648
 649int dm_table_add_target(struct dm_table *t, const char *type,
 650			sector_t start, sector_t len, char *params)
 651{
 652	int r = -EINVAL, argc;
 653	char **argv;
 654	struct dm_target *ti;
 655
 656	if (t->singleton) {
 657		DMERR("%s: target type %s must appear alone in table",
 658		      dm_device_name(t->md), t->targets->type->name);
 659		return -EINVAL;
 660	}
 661
 662	BUG_ON(t->num_targets >= t->num_allocated);
 663
 664	ti = t->targets + t->num_targets;
 665	memset(ti, 0, sizeof(*ti));
 666
 667	if (!len) {
 668		DMERR("%s: zero-length target", dm_device_name(t->md));
 669		return -EINVAL;
 670	}
 671
 672	ti->type = dm_get_target_type(type);
 673	if (!ti->type) {
 674		DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
 675		return -EINVAL;
 676	}
 677
 678	if (dm_target_needs_singleton(ti->type)) {
 679		if (t->num_targets) {
 680			ti->error = "singleton target type must appear alone in table";
 681			goto bad;
 682		}
 683		t->singleton = true;
 684	}
 685
 686	if (dm_target_always_writeable(ti->type) &&
 687	    !(t->mode & BLK_OPEN_WRITE)) {
 688		ti->error = "target type may not be included in a read-only table";
 689		goto bad;
 690	}
 691
 692	if (t->immutable_target_type) {
 693		if (t->immutable_target_type != ti->type) {
 694			ti->error = "immutable target type cannot be mixed with other target types";
 695			goto bad;
 696		}
 697	} else if (dm_target_is_immutable(ti->type)) {
 698		if (t->num_targets) {
 699			ti->error = "immutable target type cannot be mixed with other target types";
 700			goto bad;
 701		}
 702		t->immutable_target_type = ti->type;
 703	}
 704
 705	if (dm_target_has_integrity(ti->type))
 706		t->integrity_added = 1;
 707
 708	ti->table = t;
 709	ti->begin = start;
 710	ti->len = len;
 711	ti->error = "Unknown error";
 712
 713	/*
 714	 * Does this target adjoin the previous one ?
 715	 */
 716	if (!adjoin(t, ti)) {
 717		ti->error = "Gap in table";
 718		goto bad;
 719	}
 720
 721	r = dm_split_args(&argc, &argv, params);
 722	if (r) {
 723		ti->error = "couldn't split parameters";
 724		goto bad;
 725	}
 726
 727	r = ti->type->ctr(ti, argc, argv);
 728	kfree(argv);
 729	if (r)
 730		goto bad;
 731
 732	t->highs[t->num_targets++] = ti->begin + ti->len - 1;
 733
 734	if (!ti->num_discard_bios && ti->discards_supported)
 735		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
 736		       dm_device_name(t->md), type);
 737
 738	if (ti->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key))
 739		static_branch_enable(&swap_bios_enabled);
 740
 741	return 0;
 742
 743 bad:
 744	DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, ti->error, ERR_PTR(r));
 745	dm_put_target_type(ti->type);
 746	return r;
 747}
 748
 749/*
 750 * Target argument parsing helpers.
 751 */
 752static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
 753			     unsigned int *value, char **error, unsigned int grouped)
 
 754{
 755	const char *arg_str = dm_shift_arg(arg_set);
 756	char dummy;
 757
 758	if (!arg_str ||
 759	    (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
 760	    (*value < arg->min) ||
 761	    (*value > arg->max) ||
 762	    (grouped && arg_set->argc < *value)) {
 763		*error = arg->error;
 764		return -EINVAL;
 765	}
 766
 767	return 0;
 768}
 769
 770int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
 771		unsigned int *value, char **error)
 772{
 773	return validate_next_arg(arg, arg_set, value, error, 0);
 774}
 775EXPORT_SYMBOL(dm_read_arg);
 776
 777int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
 778		      unsigned int *value, char **error)
 779{
 780	return validate_next_arg(arg, arg_set, value, error, 1);
 781}
 782EXPORT_SYMBOL(dm_read_arg_group);
 783
 784const char *dm_shift_arg(struct dm_arg_set *as)
 785{
 786	char *r;
 787
 788	if (as->argc) {
 789		as->argc--;
 790		r = *as->argv;
 791		as->argv++;
 792		return r;
 793	}
 794
 795	return NULL;
 796}
 797EXPORT_SYMBOL(dm_shift_arg);
 798
 799void dm_consume_args(struct dm_arg_set *as, unsigned int num_args)
 800{
 801	BUG_ON(as->argc < num_args);
 802	as->argc -= num_args;
 803	as->argv += num_args;
 804}
 805EXPORT_SYMBOL(dm_consume_args);
 806
 807static bool __table_type_bio_based(enum dm_queue_mode table_type)
 808{
 809	return (table_type == DM_TYPE_BIO_BASED ||
 810		table_type == DM_TYPE_DAX_BIO_BASED);
 811}
 812
 813static bool __table_type_request_based(enum dm_queue_mode table_type)
 814{
 815	return table_type == DM_TYPE_REQUEST_BASED;
 816}
 817
 818void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
 819{
 820	t->type = type;
 821}
 822EXPORT_SYMBOL_GPL(dm_table_set_type);
 823
 824/* validate the dax capability of the target device span */
 825static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
 826			sector_t start, sector_t len, void *data)
 827{
 828	if (dev->dax_dev)
 829		return false;
 830
 831	DMDEBUG("%pg: error: dax unsupported by block device", dev->bdev);
 832	return true;
 
 
 
 833}
 834
 835/* Check devices support synchronous DAX */
 836static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
 837					      sector_t start, sector_t len, void *data)
 838{
 839	return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
 840}
 841
 842static bool dm_table_supports_dax(struct dm_table *t,
 843				  iterate_devices_callout_fn iterate_fn)
 844{
 
 
 
 845	/* Ensure that all targets support DAX. */
 846	for (unsigned int i = 0; i < t->num_targets; i++) {
 847		struct dm_target *ti = dm_table_get_target(t, i);
 848
 849		if (!ti->type->direct_access)
 850			return false;
 851
 852		if (dm_target_is_wildcard(ti->type) ||
 853		    !ti->type->iterate_devices ||
 854		    ti->type->iterate_devices(ti, iterate_fn, NULL))
 855			return false;
 856	}
 857
 858	return true;
 859}
 860
 861static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
 862				  sector_t start, sector_t len, void *data)
 863{
 864	struct block_device *bdev = dev->bdev;
 865	struct request_queue *q = bdev_get_queue(bdev);
 866
 867	/* request-based cannot stack on partitions! */
 868	if (bdev_is_partition(bdev))
 869		return false;
 870
 871	return queue_is_mq(q);
 872}
 873
 874static int dm_table_determine_type(struct dm_table *t)
 875{
 876	unsigned int bio_based = 0, request_based = 0, hybrid = 0;
 877	struct dm_target *ti;
 
 878	struct list_head *devices = dm_table_get_devices(t);
 879	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
 
 880
 881	if (t->type != DM_TYPE_NONE) {
 882		/* target already set the table's type */
 883		if (t->type == DM_TYPE_BIO_BASED) {
 884			/* possibly upgrade to a variant of bio-based */
 885			goto verify_bio_based;
 886		}
 887		BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
 888		goto verify_rq_based;
 889	}
 890
 891	for (unsigned int i = 0; i < t->num_targets; i++) {
 892		ti = dm_table_get_target(t, i);
 893		if (dm_target_hybrid(ti))
 894			hybrid = 1;
 895		else if (dm_target_request_based(ti))
 896			request_based = 1;
 897		else
 898			bio_based = 1;
 899
 900		if (bio_based && request_based) {
 901			DMERR("Inconsistent table: different target types can't be mixed up");
 
 902			return -EINVAL;
 903		}
 904	}
 905
 906	if (hybrid && !bio_based && !request_based) {
 907		/*
 908		 * The targets can work either way.
 909		 * Determine the type from the live device.
 910		 * Default to bio-based if device is new.
 911		 */
 912		if (__table_type_request_based(live_md_type))
 913			request_based = 1;
 914		else
 915			bio_based = 1;
 916	}
 917
 918	if (bio_based) {
 919verify_bio_based:
 920		/* We must use this table as bio-based */
 921		t->type = DM_TYPE_BIO_BASED;
 922		if (dm_table_supports_dax(t, device_not_dax_capable) ||
 923		    (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
 924			t->type = DM_TYPE_DAX_BIO_BASED;
 925		}
 926		return 0;
 927	}
 928
 929	BUG_ON(!request_based); /* No targets in this table */
 930
 931	t->type = DM_TYPE_REQUEST_BASED;
 932
 933verify_rq_based:
 934	/*
 935	 * Request-based dm supports only tables that have a single target now.
 936	 * To support multiple targets, request splitting support is needed,
 937	 * and that needs lots of changes in the block-layer.
 938	 * (e.g. request completion process for partial completion.)
 939	 */
 940	if (t->num_targets > 1) {
 941		DMERR("request-based DM doesn't support multiple targets");
 942		return -EINVAL;
 943	}
 944
 945	if (list_empty(devices)) {
 946		int srcu_idx;
 947		struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
 948
 949		/* inherit live table's type */
 950		if (live_table)
 951			t->type = live_table->type;
 952		dm_put_live_table(t->md, srcu_idx);
 953		return 0;
 954	}
 955
 956	ti = dm_table_get_immutable_target(t);
 957	if (!ti) {
 958		DMERR("table load rejected: immutable target is required");
 959		return -EINVAL;
 960	} else if (ti->max_io_len) {
 961		DMERR("table load rejected: immutable target that splits IO is not supported");
 962		return -EINVAL;
 963	}
 964
 965	/* Non-request-stackable devices can't be used for request-based dm */
 966	if (!ti->type->iterate_devices ||
 967	    !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) {
 968		DMERR("table load rejected: including non-request-stackable devices");
 969		return -EINVAL;
 970	}
 971
 972	return 0;
 973}
 974
 975enum dm_queue_mode dm_table_get_type(struct dm_table *t)
 976{
 977	return t->type;
 978}
 979
 980struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
 981{
 982	return t->immutable_target_type;
 983}
 984
 985struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
 986{
 987	/* Immutable target is implicitly a singleton */
 988	if (t->num_targets > 1 ||
 989	    !dm_target_is_immutable(t->targets[0].type))
 990		return NULL;
 991
 992	return t->targets;
 993}
 994
 995struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
 996{
 997	for (unsigned int i = 0; i < t->num_targets; i++) {
 998		struct dm_target *ti = dm_table_get_target(t, i);
 999
 
 
1000		if (dm_target_is_wildcard(ti->type))
1001			return ti;
1002	}
1003
1004	return NULL;
1005}
1006
1007bool dm_table_bio_based(struct dm_table *t)
1008{
1009	return __table_type_bio_based(dm_table_get_type(t));
1010}
1011
1012bool dm_table_request_based(struct dm_table *t)
1013{
1014	return __table_type_request_based(dm_table_get_type(t));
1015}
1016
1017static bool dm_table_supports_poll(struct dm_table *t);
1018
1019static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1020{
1021	enum dm_queue_mode type = dm_table_get_type(t);
1022	unsigned int per_io_data_size = 0, front_pad, io_front_pad;
1023	unsigned int min_pool_size = 0, pool_size;
1024	struct dm_md_mempools *pools;
 
1025
1026	if (unlikely(type == DM_TYPE_NONE)) {
1027		DMERR("no table type is set, can't allocate mempools");
1028		return -EINVAL;
1029	}
1030
1031	pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
1032	if (!pools)
 
 
 
 
 
 
 
 
1033		return -ENOMEM;
1034
1035	if (type == DM_TYPE_REQUEST_BASED) {
1036		pool_size = dm_get_reserved_rq_based_ios();
1037		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
1038		goto init_bs;
1039	}
1040
1041	for (unsigned int i = 0; i < t->num_targets; i++) {
1042		struct dm_target *ti = dm_table_get_target(t, i);
 
 
 
1043
1044		per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1045		min_pool_size = max(min_pool_size, ti->num_flush_bios);
1046	}
1047	pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
1048	front_pad = roundup(per_io_data_size,
1049		__alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
1050
1051	io_front_pad = roundup(per_io_data_size,
1052		__alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
1053	if (bioset_init(&pools->io_bs, pool_size, io_front_pad,
1054			dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0))
1055		goto out_free_pools;
1056	if (t->integrity_supported &&
1057	    bioset_integrity_create(&pools->io_bs, pool_size))
1058		goto out_free_pools;
1059init_bs:
1060	if (bioset_init(&pools->bs, pool_size, front_pad, 0))
1061		goto out_free_pools;
1062	if (t->integrity_supported &&
1063	    bioset_integrity_create(&pools->bs, pool_size))
1064		goto out_free_pools;
1065
1066	t->mempools = pools;
1067	return 0;
1068
1069out_free_pools:
1070	dm_free_md_mempools(pools);
1071	return -ENOMEM;
1072}
1073
1074static int setup_indexes(struct dm_table *t)
1075{
1076	int i;
1077	unsigned int total = 0;
1078	sector_t *indexes;
1079
1080	/* allocate the space for *all* the indexes */
1081	for (i = t->depth - 2; i >= 0; i--) {
1082		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1083		total += t->counts[i];
1084	}
1085
1086	indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL);
1087	if (!indexes)
1088		return -ENOMEM;
1089
1090	/* set up internal nodes, bottom-up */
1091	for (i = t->depth - 2; i >= 0; i--) {
1092		t->index[i] = indexes;
1093		indexes += (KEYS_PER_NODE * t->counts[i]);
1094		setup_btree_index(i, t);
1095	}
1096
1097	return 0;
1098}
1099
1100/*
1101 * Builds the btree to index the map.
1102 */
1103static int dm_table_build_index(struct dm_table *t)
1104{
1105	int r = 0;
1106	unsigned int leaf_nodes;
1107
1108	/* how many indexes will the btree have ? */
1109	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1110	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1111
1112	/* leaf layer has already been set up */
1113	t->counts[t->depth - 1] = leaf_nodes;
1114	t->index[t->depth - 1] = t->highs;
1115
1116	if (t->depth >= 2)
1117		r = setup_indexes(t);
1118
1119	return r;
1120}
1121
1122static bool integrity_profile_exists(struct gendisk *disk)
1123{
1124	return !!blk_get_integrity(disk);
1125}
1126
1127/*
1128 * Get a disk whose integrity profile reflects the table's profile.
1129 * Returns NULL if integrity support was inconsistent or unavailable.
1130 */
1131static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t)
1132{
1133	struct list_head *devices = dm_table_get_devices(t);
1134	struct dm_dev_internal *dd = NULL;
1135	struct gendisk *prev_disk = NULL, *template_disk = NULL;
 
1136
1137	for (unsigned int i = 0; i < t->num_targets; i++) {
1138		struct dm_target *ti = dm_table_get_target(t, i);
1139
1140		if (!dm_target_passes_integrity(ti->type))
1141			goto no_integrity;
1142	}
1143
1144	list_for_each_entry(dd, devices, list) {
1145		template_disk = dd->dm_dev->bdev->bd_disk;
1146		if (!integrity_profile_exists(template_disk))
1147			goto no_integrity;
1148		else if (prev_disk &&
1149			 blk_integrity_compare(prev_disk, template_disk) < 0)
1150			goto no_integrity;
1151		prev_disk = template_disk;
1152	}
1153
1154	return template_disk;
1155
1156no_integrity:
1157	if (prev_disk)
1158		DMWARN("%s: integrity not set: %s and %s profile mismatch",
1159		       dm_device_name(t->md),
1160		       prev_disk->disk_name,
1161		       template_disk->disk_name);
1162	return NULL;
1163}
1164
1165/*
1166 * Register the mapped device for blk_integrity support if the
1167 * underlying devices have an integrity profile.  But all devices may
1168 * not have matching profiles (checking all devices isn't reliable
1169 * during table load because this table may use other DM device(s) which
1170 * must be resumed before they will have an initialized integity
1171 * profile).  Consequently, stacked DM devices force a 2 stage integrity
1172 * profile validation: First pass during table load, final pass during
1173 * resume.
1174 */
1175static int dm_table_register_integrity(struct dm_table *t)
1176{
1177	struct mapped_device *md = t->md;
1178	struct gendisk *template_disk = NULL;
1179
1180	/* If target handles integrity itself do not register it here. */
1181	if (t->integrity_added)
1182		return 0;
1183
1184	template_disk = dm_table_get_integrity_disk(t);
1185	if (!template_disk)
1186		return 0;
1187
1188	if (!integrity_profile_exists(dm_disk(md))) {
1189		t->integrity_supported = true;
1190		/*
1191		 * Register integrity profile during table load; we can do
1192		 * this because the final profile must match during resume.
1193		 */
1194		blk_integrity_register(dm_disk(md),
1195				       blk_get_integrity(template_disk));
1196		return 0;
1197	}
1198
1199	/*
1200	 * If DM device already has an initialized integrity
1201	 * profile the new profile should not conflict.
1202	 */
1203	if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1204		DMERR("%s: conflict with existing integrity profile: %s profile mismatch",
1205		      dm_device_name(t->md),
1206		      template_disk->disk_name);
 
1207		return 1;
1208	}
1209
1210	/* Preserve existing integrity profile */
1211	t->integrity_supported = true;
1212	return 0;
1213}
1214
1215#ifdef CONFIG_BLK_INLINE_ENCRYPTION
1216
1217struct dm_crypto_profile {
1218	struct blk_crypto_profile profile;
1219	struct mapped_device *md;
1220};
1221
 
 
 
 
 
1222static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
1223				     sector_t start, sector_t len, void *data)
1224{
1225	const struct blk_crypto_key *key = data;
 
1226
1227	blk_crypto_evict_key(dev->bdev, key);
 
 
 
1228	return 0;
1229}
1230
1231/*
1232 * When an inline encryption key is evicted from a device-mapper device, evict
1233 * it from all the underlying devices.
1234 */
1235static int dm_keyslot_evict(struct blk_crypto_profile *profile,
1236			    const struct blk_crypto_key *key, unsigned int slot)
1237{
1238	struct mapped_device *md =
1239		container_of(profile, struct dm_crypto_profile, profile)->md;
 
 
 
1240	struct dm_table *t;
1241	int srcu_idx;
 
 
1242
1243	t = dm_get_live_table(md, &srcu_idx);
1244	if (!t)
1245		return 0;
1246
1247	for (unsigned int i = 0; i < t->num_targets; i++) {
1248		struct dm_target *ti = dm_table_get_target(t, i);
1249
1250		if (!ti->type->iterate_devices)
1251			continue;
1252		ti->type->iterate_devices(ti, dm_keyslot_evict_callback,
1253					  (void *)key);
1254	}
1255
1256	dm_put_live_table(md, srcu_idx);
1257	return 0;
1258}
1259
1260static int
1261device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
1262				     sector_t start, sector_t len, void *data)
 
 
 
 
1263{
1264	struct blk_crypto_profile *parent = data;
1265	struct blk_crypto_profile *child =
1266		bdev_get_queue(dev->bdev)->crypto_profile;
1267
1268	blk_crypto_intersect_capabilities(parent, child);
1269	return 0;
1270}
1271
1272void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
1273{
1274	struct dm_crypto_profile *dmcp = container_of(profile,
1275						      struct dm_crypto_profile,
1276						      profile);
1277
1278	if (!profile)
1279		return;
1280
1281	blk_crypto_profile_destroy(profile);
1282	kfree(dmcp);
1283}
1284
1285static void dm_table_destroy_crypto_profile(struct dm_table *t)
1286{
1287	dm_destroy_crypto_profile(t->crypto_profile);
1288	t->crypto_profile = NULL;
1289}
1290
1291/*
1292 * Constructs and initializes t->crypto_profile with a crypto profile that
1293 * represents the common set of crypto capabilities of the devices described by
1294 * the dm_table.  However, if the constructed crypto profile doesn't support all
1295 * crypto capabilities that are supported by the current mapped_device, it
1296 * returns an error instead, since we don't support removing crypto capabilities
1297 * on table changes.  Finally, if the constructed crypto profile is "empty" (has
1298 * no crypto capabilities at all), it just sets t->crypto_profile to NULL.
 
 
1299 */
1300static int dm_table_construct_crypto_profile(struct dm_table *t)
1301{
1302	struct dm_crypto_profile *dmcp;
1303	struct blk_crypto_profile *profile;
 
1304	unsigned int i;
1305	bool empty_profile = true;
1306
1307	dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL);
1308	if (!dmcp)
1309		return -ENOMEM;
1310	dmcp->md = t->md;
1311
1312	profile = &dmcp->profile;
1313	blk_crypto_profile_init(profile, 0);
1314	profile->ll_ops.keyslot_evict = dm_keyslot_evict;
1315	profile->max_dun_bytes_supported = UINT_MAX;
1316	memset(profile->modes_supported, 0xFF,
1317	       sizeof(profile->modes_supported));
1318
1319	for (i = 0; i < t->num_targets; i++) {
1320		struct dm_target *ti = dm_table_get_target(t, i);
1321
1322		if (!dm_target_passes_crypto(ti->type)) {
1323			blk_crypto_intersect_capabilities(profile, NULL);
1324			break;
1325		}
1326		if (!ti->type->iterate_devices)
1327			continue;
1328		ti->type->iterate_devices(ti,
1329					  device_intersect_crypto_capabilities,
1330					  profile);
1331	}
1332
1333	if (t->md->queue &&
1334	    !blk_crypto_has_capabilities(profile,
1335					 t->md->queue->crypto_profile)) {
1336		DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
1337		dm_destroy_crypto_profile(profile);
1338		return -EINVAL;
1339	}
1340
1341	/*
1342	 * If the new profile doesn't actually support any crypto capabilities,
1343	 * we may as well represent it with a NULL profile.
1344	 */
1345	for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) {
1346		if (profile->modes_supported[i]) {
1347			empty_profile = false;
 
1348			break;
1349		}
1350	}
1351
1352	if (empty_profile) {
1353		dm_destroy_crypto_profile(profile);
1354		profile = NULL;
1355	}
1356
1357	/*
1358	 * t->crypto_profile is only set temporarily while the table is being
1359	 * set up, and it gets set to NULL after the profile has been
1360	 * transferred to the request_queue.
1361	 */
1362	t->crypto_profile = profile;
1363
1364	return 0;
1365}
1366
1367static void dm_update_crypto_profile(struct request_queue *q,
1368				     struct dm_table *t)
1369{
1370	if (!t->crypto_profile)
1371		return;
1372
1373	/* Make the crypto profile less restrictive. */
1374	if (!q->crypto_profile) {
1375		blk_crypto_register(t->crypto_profile, q);
1376	} else {
1377		blk_crypto_update_capabilities(q->crypto_profile,
1378					       t->crypto_profile);
1379		dm_destroy_crypto_profile(t->crypto_profile);
1380	}
1381	t->crypto_profile = NULL;
1382}
1383
1384#else /* CONFIG_BLK_INLINE_ENCRYPTION */
1385
1386static int dm_table_construct_crypto_profile(struct dm_table *t)
1387{
1388	return 0;
1389}
1390
1391void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
1392{
1393}
1394
1395static void dm_table_destroy_crypto_profile(struct dm_table *t)
1396{
1397}
1398
1399static void dm_update_crypto_profile(struct request_queue *q,
1400				     struct dm_table *t)
1401{
1402}
1403
1404#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1405
1406/*
1407 * Prepares the table for use by building the indices,
1408 * setting the type, and allocating mempools.
1409 */
1410int dm_table_complete(struct dm_table *t)
1411{
1412	int r;
1413
1414	r = dm_table_determine_type(t);
1415	if (r) {
1416		DMERR("unable to determine table type");
1417		return r;
1418	}
1419
1420	r = dm_table_build_index(t);
1421	if (r) {
1422		DMERR("unable to build btrees");
1423		return r;
1424	}
1425
1426	r = dm_table_register_integrity(t);
1427	if (r) {
1428		DMERR("could not register integrity profile.");
1429		return r;
1430	}
1431
1432	r = dm_table_construct_crypto_profile(t);
1433	if (r) {
1434		DMERR("could not construct crypto profile.");
1435		return r;
1436	}
1437
1438	r = dm_table_alloc_md_mempools(t, t->md);
1439	if (r)
1440		DMERR("unable to allocate mempools");
1441
1442	return r;
1443}
1444
1445static DEFINE_MUTEX(_event_lock);
1446void dm_table_event_callback(struct dm_table *t,
1447			     void (*fn)(void *), void *context)
1448{
1449	mutex_lock(&_event_lock);
1450	t->event_fn = fn;
1451	t->event_context = context;
1452	mutex_unlock(&_event_lock);
1453}
1454
1455void dm_table_event(struct dm_table *t)
1456{
1457	mutex_lock(&_event_lock);
1458	if (t->event_fn)
1459		t->event_fn(t->event_context);
1460	mutex_unlock(&_event_lock);
1461}
1462EXPORT_SYMBOL(dm_table_event);
1463
1464inline sector_t dm_table_get_size(struct dm_table *t)
1465{
1466	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1467}
1468EXPORT_SYMBOL(dm_table_get_size);
1469
 
 
 
 
 
 
 
 
1470/*
1471 * Search the btree for the correct target.
1472 *
1473 * Caller should check returned pointer for NULL
1474 * to trap I/O beyond end of device.
1475 */
1476struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1477{
1478	unsigned int l, n = 0, k = 0;
1479	sector_t *node;
1480
1481	if (unlikely(sector >= dm_table_get_size(t)))
1482		return NULL;
1483
1484	for (l = 0; l < t->depth; l++) {
1485		n = get_child(n, k);
1486		node = get_node(t, l, n);
1487
1488		for (k = 0; k < KEYS_PER_NODE; k++)
1489			if (node[k] >= sector)
1490				break;
1491	}
1492
1493	return &t->targets[(KEYS_PER_NODE * n) + k];
1494}
1495
1496static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev,
1497				   sector_t start, sector_t len, void *data)
1498{
1499	struct request_queue *q = bdev_get_queue(dev->bdev);
1500
1501	return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags);
1502}
1503
1504/*
1505 * type->iterate_devices() should be called when the sanity check needs to
1506 * iterate and check all underlying data devices. iterate_devices() will
1507 * iterate all underlying data devices until it encounters a non-zero return
1508 * code, returned by whether the input iterate_devices_callout_fn, or
1509 * iterate_devices() itself internally.
1510 *
1511 * For some target type (e.g. dm-stripe), one call of iterate_devices() may
1512 * iterate multiple underlying devices internally, in which case a non-zero
1513 * return code returned by iterate_devices_callout_fn will stop the iteration
1514 * in advance.
1515 *
1516 * Cases requiring _any_ underlying device supporting some kind of attribute,
1517 * should use the iteration structure like dm_table_any_dev_attr(), or call
1518 * it directly. @func should handle semantics of positive examples, e.g.
1519 * capable of something.
1520 *
1521 * Cases requiring _all_ underlying devices supporting some kind of attribute,
1522 * should use the iteration structure like dm_table_supports_nowait() or
1523 * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
1524 * uses an @anti_func that handle semantics of counter examples, e.g. not
1525 * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
1526 */
1527static bool dm_table_any_dev_attr(struct dm_table *t,
1528				  iterate_devices_callout_fn func, void *data)
1529{
1530	for (unsigned int i = 0; i < t->num_targets; i++) {
1531		struct dm_target *ti = dm_table_get_target(t, i);
 
 
 
1532
1533		if (ti->type->iterate_devices &&
1534		    ti->type->iterate_devices(ti, func, data))
1535			return true;
1536	}
1537
1538	return false;
1539}
1540
1541static int count_device(struct dm_target *ti, struct dm_dev *dev,
1542			sector_t start, sector_t len, void *data)
1543{
1544	unsigned int *num_devices = data;
1545
1546	(*num_devices)++;
1547
1548	return 0;
1549}
1550
1551static bool dm_table_supports_poll(struct dm_table *t)
1552{
1553	for (unsigned int i = 0; i < t->num_targets; i++) {
1554		struct dm_target *ti = dm_table_get_target(t, i);
1555
1556		if (!ti->type->iterate_devices ||
1557		    ti->type->iterate_devices(ti, device_not_poll_capable, NULL))
1558			return false;
1559	}
1560
1561	return true;
1562}
1563
1564/*
1565 * Check whether a table has no data devices attached using each
1566 * target's iterate_devices method.
1567 * Returns false if the result is unknown because a target doesn't
1568 * support iterate_devices.
1569 */
1570bool dm_table_has_no_data_devices(struct dm_table *t)
1571{
1572	for (unsigned int i = 0; i < t->num_targets; i++) {
1573		struct dm_target *ti = dm_table_get_target(t, i);
1574		unsigned int num_devices = 0;
 
 
1575
1576		if (!ti->type->iterate_devices)
1577			return false;
1578
 
1579		ti->type->iterate_devices(ti, count_device, &num_devices);
1580		if (num_devices)
1581			return false;
1582	}
1583
1584	return true;
1585}
1586
1587static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev,
1588			    sector_t start, sector_t len, void *data)
1589{
1590	bool *zoned = data;
1591
1592	return bdev_is_zoned(dev->bdev) != *zoned;
1593}
1594
1595static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1596				 sector_t start, sector_t len, void *data)
1597{
1598	return bdev_is_zoned(dev->bdev);
1599}
1600
1601/*
1602 * Check the device zoned model based on the target feature flag. If the target
1603 * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
1604 * also accepted but all devices must have the same zoned model. If the target
1605 * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
1606 * zoned model with all zoned devices having the same zone size.
1607 */
1608static bool dm_table_supports_zoned(struct dm_table *t, bool zoned)
 
1609{
1610	for (unsigned int i = 0; i < t->num_targets; i++) {
1611		struct dm_target *ti = dm_table_get_target(t, i);
1612
1613		/*
1614		 * For the wildcard target (dm-error), if we do not have a
1615		 * backing device, we must always return false. If we have a
1616		 * backing device, the result must depend on checking zoned
1617		 * model, like for any other target. So for this, check directly
1618		 * if the target backing device is zoned as we get "false" when
1619		 * dm-error was set without a backing device.
1620		 */
1621		if (dm_target_is_wildcard(ti->type) &&
1622		    !ti->type->iterate_devices(ti, device_is_zoned_model, NULL))
1623			return false;
1624
1625		if (dm_target_supports_zoned_hm(ti->type)) {
1626			if (!ti->type->iterate_devices ||
1627			    ti->type->iterate_devices(ti, device_not_zoned,
1628						      &zoned))
1629				return false;
1630		} else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
1631			if (zoned)
1632				return false;
1633		}
1634	}
1635
1636	return true;
1637}
1638
1639static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1640					   sector_t start, sector_t len, void *data)
1641{
 
1642	unsigned int *zone_sectors = data;
1643
1644	if (!bdev_is_zoned(dev->bdev))
1645		return 0;
1646	return bdev_zone_sectors(dev->bdev) != *zone_sectors;
 
1647}
1648
1649/*
1650 * Check consistency of zoned model and zone sectors across all targets. For
1651 * zone sectors, if the destination device is a zoned block device, it shall
1652 * have the specified zone_sectors.
1653 */
1654static int validate_hardware_zoned(struct dm_table *t, bool zoned,
1655				   unsigned int zone_sectors)
 
1656{
1657	if (!zoned)
1658		return 0;
1659
1660	if (!dm_table_supports_zoned(t, zoned)) {
1661		DMERR("%s: zoned model is not consistent across all devices",
1662		      dm_device_name(t->md));
1663		return -EINVAL;
1664	}
1665
1666	/* Check zone size validity and compatibility */
1667	if (!zone_sectors || !is_power_of_2(zone_sectors))
1668		return -EINVAL;
1669
1670	if (dm_table_any_dev_attr(t, device_not_matches_zone_sectors, &zone_sectors)) {
1671		DMERR("%s: zone sectors is not consistent across all zoned devices",
1672		      dm_device_name(t->md));
1673		return -EINVAL;
1674	}
1675
1676	return 0;
1677}
1678
1679/*
1680 * Establish the new table's queue_limits and validate them.
1681 */
1682int dm_calculate_queue_limits(struct dm_table *t,
1683			      struct queue_limits *limits)
1684{
 
1685	struct queue_limits ti_limits;
 
 
1686	unsigned int zone_sectors = 0;
1687	bool zoned = false;
1688
1689	blk_set_stacking_limits(limits);
1690
1691	for (unsigned int i = 0; i < t->num_targets; i++) {
1692		struct dm_target *ti = dm_table_get_target(t, i);
1693
1694		blk_set_stacking_limits(&ti_limits);
1695
1696		if (!ti->type->iterate_devices) {
1697			/* Set I/O hints portion of queue limits */
1698			if (ti->type->io_hints)
1699				ti->type->io_hints(ti, &ti_limits);
1700			goto combine_limits;
1701		}
1702
1703		/*
1704		 * Combine queue limits of all the devices this target uses.
1705		 */
1706		ti->type->iterate_devices(ti, dm_set_device_limits,
1707					  &ti_limits);
1708
1709		if (!zoned && ti_limits.zoned) {
1710			/*
1711			 * After stacking all limits, validate all devices
1712			 * in table support this zoned model and zone sectors.
1713			 */
1714			zoned = ti_limits.zoned;
1715			zone_sectors = ti_limits.chunk_sectors;
1716		}
1717
1718		/* Set I/O hints portion of queue limits */
1719		if (ti->type->io_hints)
1720			ti->type->io_hints(ti, &ti_limits);
1721
1722		/*
1723		 * Check each device area is consistent with the target's
1724		 * overall queue limits.
1725		 */
1726		if (ti->type->iterate_devices(ti, device_area_is_invalid,
1727					      &ti_limits))
1728			return -EINVAL;
1729
1730combine_limits:
1731		/*
1732		 * Merge this target's queue limits into the overall limits
1733		 * for the table.
1734		 */
1735		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1736			DMWARN("%s: adding target device (start sect %llu len %llu) "
 
1737			       "caused an alignment inconsistency",
1738			       dm_device_name(t->md),
1739			       (unsigned long long) ti->begin,
1740			       (unsigned long long) ti->len);
1741	}
1742
1743	/*
1744	 * Verify that the zoned model and zone sectors, as determined before
1745	 * any .io_hints override, are the same across all devices in the table.
1746	 * - this is especially relevant if .io_hints is emulating a disk-managed
1747	 *   zoned model on host-managed zoned block devices.
1748	 * BUT...
1749	 */
1750	if (limits->zoned) {
1751		/*
1752		 * ...IF the above limits stacking determined a zoned model
1753		 * validate that all of the table's devices conform to it.
1754		 */
1755		zoned = limits->zoned;
1756		zone_sectors = limits->chunk_sectors;
1757	}
1758	if (validate_hardware_zoned(t, zoned, zone_sectors))
1759		return -EINVAL;
1760
1761	return validate_hardware_logical_block_alignment(t, limits);
1762}
1763
1764/*
1765 * Verify that all devices have an integrity profile that matches the
1766 * DM device's registered integrity profile.  If the profiles don't
1767 * match then unregister the DM device's integrity profile.
1768 */
1769static void dm_table_verify_integrity(struct dm_table *t)
1770{
1771	struct gendisk *template_disk = NULL;
1772
1773	if (t->integrity_added)
1774		return;
1775
1776	if (t->integrity_supported) {
1777		/*
1778		 * Verify that the original integrity profile
1779		 * matches all the devices in this table.
1780		 */
1781		template_disk = dm_table_get_integrity_disk(t);
1782		if (template_disk &&
1783		    blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1784			return;
1785	}
1786
1787	if (integrity_profile_exists(dm_disk(t->md))) {
1788		DMWARN("%s: unable to establish an integrity profile",
1789		       dm_device_name(t->md));
1790		blk_integrity_unregister(dm_disk(t->md));
1791	}
1792}
1793
1794static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1795				sector_t start, sector_t len, void *data)
1796{
1797	unsigned long flush = (unsigned long) data;
1798	struct request_queue *q = bdev_get_queue(dev->bdev);
1799
1800	return (q->queue_flags & flush);
1801}
1802
1803static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1804{
 
 
 
1805	/*
1806	 * Require at least one underlying device to support flushes.
1807	 * t->devices includes internal dm devices such as mirror logs
1808	 * so we need to use iterate_devices here, which targets
1809	 * supporting flushes must provide.
1810	 */
1811	for (unsigned int i = 0; i < t->num_targets; i++) {
1812		struct dm_target *ti = dm_table_get_target(t, i);
1813
1814		if (!ti->num_flush_bios)
1815			continue;
1816
1817		if (ti->flush_supported)
1818			return true;
1819
1820		if (ti->type->iterate_devices &&
1821		    ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1822			return true;
1823	}
1824
1825	return false;
1826}
1827
1828static int device_dax_write_cache_enabled(struct dm_target *ti,
1829					  struct dm_dev *dev, sector_t start,
1830					  sector_t len, void *data)
1831{
1832	struct dax_device *dax_dev = dev->dax_dev;
1833
1834	if (!dax_dev)
1835		return false;
1836
1837	if (dax_write_cache_enabled(dax_dev))
1838		return true;
1839	return false;
1840}
1841
1842static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
1843				sector_t start, sector_t len, void *data)
1844{
1845	return !bdev_nonrot(dev->bdev);
 
 
1846}
1847
1848static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1849			     sector_t start, sector_t len, void *data)
1850{
1851	struct request_queue *q = bdev_get_queue(dev->bdev);
1852
1853	return !blk_queue_add_random(q);
1854}
1855
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1856static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1857					   sector_t start, sector_t len, void *data)
1858{
1859	struct request_queue *q = bdev_get_queue(dev->bdev);
1860
1861	return !q->limits.max_write_zeroes_sectors;
1862}
1863
1864static bool dm_table_supports_write_zeroes(struct dm_table *t)
1865{
1866	for (unsigned int i = 0; i < t->num_targets; i++) {
1867		struct dm_target *ti = dm_table_get_target(t, i);
 
 
 
1868
1869		if (!ti->num_write_zeroes_bios)
1870			return false;
1871
1872		if (!ti->type->iterate_devices ||
1873		    ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1874			return false;
1875	}
1876
1877	return true;
1878}
1879
1880static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
1881				     sector_t start, sector_t len, void *data)
1882{
1883	return !bdev_nowait(dev->bdev);
 
 
1884}
1885
1886static bool dm_table_supports_nowait(struct dm_table *t)
1887{
1888	for (unsigned int i = 0; i < t->num_targets; i++) {
1889		struct dm_target *ti = dm_table_get_target(t, i);
 
 
 
1890
1891		if (!dm_target_supports_nowait(ti->type))
1892			return false;
1893
1894		if (!ti->type->iterate_devices ||
1895		    ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
1896			return false;
1897	}
1898
1899	return true;
1900}
1901
1902static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1903				      sector_t start, sector_t len, void *data)
1904{
1905	return !bdev_max_discard_sectors(dev->bdev);
 
 
1906}
1907
1908static bool dm_table_supports_discards(struct dm_table *t)
1909{
1910	for (unsigned int i = 0; i < t->num_targets; i++) {
1911		struct dm_target *ti = dm_table_get_target(t, i);
 
 
 
1912
1913		if (!ti->num_discard_bios)
1914			return false;
1915
1916		/*
1917		 * Either the target provides discard support (as implied by setting
1918		 * 'discards_supported') or it relies on _all_ data devices having
1919		 * discard support.
1920		 */
1921		if (!ti->discards_supported &&
1922		    (!ti->type->iterate_devices ||
1923		     ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1924			return false;
1925	}
1926
1927	return true;
1928}
1929
1930static int device_not_secure_erase_capable(struct dm_target *ti,
1931					   struct dm_dev *dev, sector_t start,
1932					   sector_t len, void *data)
1933{
1934	return !bdev_max_secure_erase_sectors(dev->bdev);
 
 
1935}
1936
1937static bool dm_table_supports_secure_erase(struct dm_table *t)
1938{
1939	for (unsigned int i = 0; i < t->num_targets; i++) {
1940		struct dm_target *ti = dm_table_get_target(t, i);
 
 
 
1941
1942		if (!ti->num_secure_erase_bios)
1943			return false;
1944
1945		if (!ti->type->iterate_devices ||
1946		    ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1947			return false;
1948	}
1949
1950	return true;
1951}
1952
1953static int device_requires_stable_pages(struct dm_target *ti,
1954					struct dm_dev *dev, sector_t start,
1955					sector_t len, void *data)
1956{
1957	return bdev_stable_writes(dev->bdev);
 
 
1958}
1959
1960int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1961			      struct queue_limits *limits)
1962{
1963	bool wc = false, fua = false;
 
1964	int r;
1965
1966	/*
1967	 * Copy table's limits to the DM device's request_queue
1968	 */
1969	q->limits = *limits;
1970
1971	if (dm_table_supports_nowait(t))
1972		blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
1973	else
1974		blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
1975
1976	if (!dm_table_supports_discards(t)) {
 
 
1977		q->limits.max_discard_sectors = 0;
1978		q->limits.max_hw_discard_sectors = 0;
1979		q->limits.discard_granularity = 0;
1980		q->limits.discard_alignment = 0;
1981		q->limits.discard_misaligned = 0;
1982	}
 
1983
1984	if (!dm_table_supports_secure_erase(t))
1985		q->limits.max_secure_erase_sectors = 0;
1986
1987	if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
1988		wc = true;
1989		if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
1990			fua = true;
1991	}
1992	blk_queue_write_cache(q, wc, fua);
1993
1994	if (dm_table_supports_dax(t, device_not_dax_capable)) {
1995		blk_queue_flag_set(QUEUE_FLAG_DAX, q);
1996		if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
1997			set_dax_synchronous(t->md->dax_dev);
1998	} else
 
1999		blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
2000
2001	if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
2002		dax_write_cache(t->md->dax_dev, true);
2003
2004	/* Ensure that all underlying devices are non-rotational. */
2005	if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
2006		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
2007	else
2008		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2009
 
 
2010	if (!dm_table_supports_write_zeroes(t))
2011		q->limits.max_write_zeroes_sectors = 0;
2012
2013	dm_table_verify_integrity(t);
2014
2015	/*
2016	 * Some devices don't use blk_integrity but still want stable pages
2017	 * because they do their own checksumming.
2018	 * If any underlying device requires stable pages, a table must require
2019	 * them as well.  Only targets that support iterate_devices are considered:
2020	 * don't want error, zero, etc to require stable pages.
2021	 */
2022	if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
2023		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
2024	else
2025		blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
2026
2027	/*
2028	 * Determine whether or not this queue's I/O timings contribute
2029	 * to the entropy pool, Only request-based targets use this.
2030	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
2031	 * have it set.
2032	 */
2033	if (blk_queue_add_random(q) &&
2034	    dm_table_any_dev_attr(t, device_is_not_random, NULL))
2035		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2036
2037	/*
2038	 * For a zoned target, setup the zones related queue attributes
2039	 * and resources necessary for zone append emulation if necessary.
2040	 */
2041	if (blk_queue_is_zoned(q)) {
2042		r = dm_set_zones_restrictions(t, q);
2043		if (r)
2044			return r;
2045		if (!static_key_enabled(&zoned_enabled.key))
2046			static_branch_enable(&zoned_enabled);
2047	}
2048
2049	dm_update_crypto_profile(q, t);
2050	disk_update_readahead(t->md->disk);
2051
2052	/*
2053	 * Check for request-based device is left to
2054	 * dm_mq_init_request_queue()->blk_mq_init_allocated_queue().
2055	 *
2056	 * For bio-based device, only set QUEUE_FLAG_POLL when all
2057	 * underlying devices supporting polling.
2058	 */
2059	if (__table_type_bio_based(t->type)) {
2060		if (dm_table_supports_poll(t))
2061			blk_queue_flag_set(QUEUE_FLAG_POLL, q);
2062		else
2063			blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
2064	}
2065
2066	return 0;
2067}
2068
 
 
 
 
 
2069struct list_head *dm_table_get_devices(struct dm_table *t)
2070{
2071	return &t->devices;
2072}
2073
2074blk_mode_t dm_table_get_mode(struct dm_table *t)
2075{
2076	return t->mode;
2077}
2078EXPORT_SYMBOL(dm_table_get_mode);
2079
2080enum suspend_mode {
2081	PRESUSPEND,
2082	PRESUSPEND_UNDO,
2083	POSTSUSPEND,
2084};
2085
2086static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
2087{
2088	lockdep_assert_held(&t->md->suspend_lock);
 
2089
2090	for (unsigned int i = 0; i < t->num_targets; i++) {
2091		struct dm_target *ti = dm_table_get_target(t, i);
2092
 
2093		switch (mode) {
2094		case PRESUSPEND:
2095			if (ti->type->presuspend)
2096				ti->type->presuspend(ti);
2097			break;
2098		case PRESUSPEND_UNDO:
2099			if (ti->type->presuspend_undo)
2100				ti->type->presuspend_undo(ti);
2101			break;
2102		case POSTSUSPEND:
2103			if (ti->type->postsuspend)
2104				ti->type->postsuspend(ti);
2105			break;
2106		}
 
2107	}
2108}
2109
2110void dm_table_presuspend_targets(struct dm_table *t)
2111{
2112	if (!t)
2113		return;
2114
2115	suspend_targets(t, PRESUSPEND);
2116}
2117
2118void dm_table_presuspend_undo_targets(struct dm_table *t)
2119{
2120	if (!t)
2121		return;
2122
2123	suspend_targets(t, PRESUSPEND_UNDO);
2124}
2125
2126void dm_table_postsuspend_targets(struct dm_table *t)
2127{
2128	if (!t)
2129		return;
2130
2131	suspend_targets(t, POSTSUSPEND);
2132}
2133
2134int dm_table_resume_targets(struct dm_table *t)
2135{
2136	unsigned int i;
2137	int r = 0;
2138
2139	lockdep_assert_held(&t->md->suspend_lock);
2140
2141	for (i = 0; i < t->num_targets; i++) {
2142		struct dm_target *ti = dm_table_get_target(t, i);
2143
2144		if (!ti->type->preresume)
2145			continue;
2146
2147		r = ti->type->preresume(ti);
2148		if (r) {
2149			DMERR("%s: %s: preresume failed, error = %d",
2150			      dm_device_name(t->md), ti->type->name, r);
2151			return r;
2152		}
2153	}
2154
2155	for (i = 0; i < t->num_targets; i++) {
2156		struct dm_target *ti = dm_table_get_target(t, i);
2157
2158		if (ti->type->resume)
2159			ti->type->resume(ti);
2160	}
2161
2162	return 0;
2163}
2164
2165struct mapped_device *dm_table_get_md(struct dm_table *t)
2166{
2167	return t->md;
2168}
2169EXPORT_SYMBOL(dm_table_get_md);
2170
2171const char *dm_table_device_name(struct dm_table *t)
2172{
2173	return dm_device_name(t->md);
2174}
2175EXPORT_SYMBOL_GPL(dm_table_device_name);
2176
2177void dm_table_run_md_queue_async(struct dm_table *t)
2178{
2179	if (!dm_table_request_based(t))
2180		return;
2181
2182	if (t->md->queue)
2183		blk_mq_run_hw_queues(t->md->queue, true);
2184}
2185EXPORT_SYMBOL(dm_table_run_md_queue_async);
2186
v5.14.15
 
   1/*
   2 * Copyright (C) 2001 Sistina Software (UK) Limited.
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm-core.h"
 
   9
  10#include <linux/module.h>
  11#include <linux/vmalloc.h>
  12#include <linux/blkdev.h>
 
  13#include <linux/namei.h>
  14#include <linux/ctype.h>
  15#include <linux/string.h>
  16#include <linux/slab.h>
  17#include <linux/interrupt.h>
  18#include <linux/mutex.h>
  19#include <linux/delay.h>
  20#include <linux/atomic.h>
  21#include <linux/blk-mq.h>
  22#include <linux/mount.h>
  23#include <linux/dax.h>
  24
  25#define DM_MSG_PREFIX "table"
  26
  27#define NODE_SIZE L1_CACHE_BYTES
  28#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
  29#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
  30
  31/*
  32 * Similar to ceiling(log_size(n))
  33 */
  34static unsigned int int_log(unsigned int n, unsigned int base)
  35{
  36	int result = 0;
  37
  38	while (n > 1) {
  39		n = dm_div_up(n, base);
  40		result++;
  41	}
  42
  43	return result;
  44}
  45
  46/*
  47 * Calculate the index of the child node of the n'th node k'th key.
  48 */
  49static inline unsigned int get_child(unsigned int n, unsigned int k)
  50{
  51	return (n * CHILDREN_PER_NODE) + k;
  52}
  53
  54/*
  55 * Return the n'th node of level l from table t.
  56 */
  57static inline sector_t *get_node(struct dm_table *t,
  58				 unsigned int l, unsigned int n)
  59{
  60	return t->index[l] + (n * KEYS_PER_NODE);
  61}
  62
  63/*
  64 * Return the highest key that you could lookup from the n'th
  65 * node on level l of the btree.
  66 */
  67static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
  68{
  69	for (; l < t->depth - 1; l++)
  70		n = get_child(n, CHILDREN_PER_NODE - 1);
  71
  72	if (n >= t->counts[l])
  73		return (sector_t) - 1;
  74
  75	return get_node(t, l, n)[KEYS_PER_NODE - 1];
  76}
  77
  78/*
  79 * Fills in a level of the btree based on the highs of the level
  80 * below it.
  81 */
  82static int setup_btree_index(unsigned int l, struct dm_table *t)
  83{
  84	unsigned int n, k;
  85	sector_t *node;
  86
  87	for (n = 0U; n < t->counts[l]; n++) {
  88		node = get_node(t, l, n);
  89
  90		for (k = 0U; k < KEYS_PER_NODE; k++)
  91			node[k] = high(t, l + 1, get_child(n, k));
  92	}
  93
  94	return 0;
  95}
  96
  97/*
  98 * highs, and targets are managed as dynamic arrays during a
  99 * table load.
 100 */
 101static int alloc_targets(struct dm_table *t, unsigned int num)
 102{
 103	sector_t *n_highs;
 104	struct dm_target *n_targets;
 105
 106	/*
 107	 * Allocate both the target array and offset array at once.
 108	 */
 109	n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t),
 110			   GFP_KERNEL);
 111	if (!n_highs)
 112		return -ENOMEM;
 113
 114	n_targets = (struct dm_target *) (n_highs + num);
 115
 116	memset(n_highs, -1, sizeof(*n_highs) * num);
 117	kvfree(t->highs);
 118
 119	t->num_allocated = num;
 120	t->highs = n_highs;
 121	t->targets = n_targets;
 122
 123	return 0;
 124}
 125
 126int dm_table_create(struct dm_table **result, fmode_t mode,
 127		    unsigned num_targets, struct mapped_device *md)
 128{
 129	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
 
 
 
 
 
 130
 131	if (!t)
 132		return -ENOMEM;
 133
 134	INIT_LIST_HEAD(&t->devices);
 
 135
 136	if (!num_targets)
 137		num_targets = KEYS_PER_NODE;
 138
 139	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
 140
 141	if (!num_targets) {
 142		kfree(t);
 143		return -ENOMEM;
 144	}
 145
 146	if (alloc_targets(t, num_targets)) {
 147		kfree(t);
 148		return -ENOMEM;
 149	}
 150
 151	t->type = DM_TYPE_NONE;
 152	t->mode = mode;
 153	t->md = md;
 154	*result = t;
 155	return 0;
 156}
 157
 158static void free_devices(struct list_head *devices, struct mapped_device *md)
 159{
 160	struct list_head *tmp, *next;
 161
 162	list_for_each_safe(tmp, next, devices) {
 163		struct dm_dev_internal *dd =
 164		    list_entry(tmp, struct dm_dev_internal, list);
 165		DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
 166		       dm_device_name(md), dd->dm_dev->name);
 167		dm_put_table_device(md, dd->dm_dev);
 168		kfree(dd);
 169	}
 170}
 171
 172static void dm_table_destroy_keyslot_manager(struct dm_table *t);
 173
 174void dm_table_destroy(struct dm_table *t)
 175{
 176	unsigned int i;
 177
 178	if (!t)
 179		return;
 180
 181	/* free the indexes */
 182	if (t->depth >= 2)
 183		kvfree(t->index[t->depth - 2]);
 184
 185	/* free the targets */
 186	for (i = 0; i < t->num_targets; i++) {
 187		struct dm_target *tgt = t->targets + i;
 188
 189		if (tgt->type->dtr)
 190			tgt->type->dtr(tgt);
 191
 192		dm_put_target_type(tgt->type);
 193	}
 194
 195	kvfree(t->highs);
 196
 197	/* free the device list */
 198	free_devices(&t->devices, t->md);
 199
 200	dm_free_md_mempools(t->mempools);
 201
 202	dm_table_destroy_keyslot_manager(t);
 203
 204	kfree(t);
 205}
 206
 207/*
 208 * See if we've already got a device in the list.
 209 */
 210static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
 211{
 212	struct dm_dev_internal *dd;
 213
 214	list_for_each_entry (dd, l, list)
 215		if (dd->dm_dev->bdev->bd_dev == dev)
 216			return dd;
 217
 218	return NULL;
 219}
 220
 221/*
 222 * If possible, this checks an area of a destination device is invalid.
 223 */
 224static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
 225				  sector_t start, sector_t len, void *data)
 226{
 227	struct queue_limits *limits = data;
 228	struct block_device *bdev = dev->bdev;
 229	sector_t dev_size =
 230		i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
 231	unsigned short logical_block_size_sectors =
 232		limits->logical_block_size >> SECTOR_SHIFT;
 233	char b[BDEVNAME_SIZE];
 234
 235	if (!dev_size)
 236		return 0;
 237
 238	if ((start >= dev_size) || (start + len > dev_size)) {
 239		DMWARN("%s: %s too small for target: "
 240		       "start=%llu, len=%llu, dev_size=%llu",
 241		       dm_device_name(ti->table->md), bdevname(bdev, b),
 242		       (unsigned long long)start,
 243		       (unsigned long long)len,
 244		       (unsigned long long)dev_size);
 245		return 1;
 246	}
 247
 248	/*
 249	 * If the target is mapped to zoned block device(s), check
 250	 * that the zones are not partially mapped.
 251	 */
 252	if (bdev_is_zoned(bdev)) {
 253		unsigned int zone_sectors = bdev_zone_sectors(bdev);
 254
 255		if (start & (zone_sectors - 1)) {
 256			DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
 257			       dm_device_name(ti->table->md),
 258			       (unsigned long long)start,
 259			       zone_sectors, bdevname(bdev, b));
 260			return 1;
 261		}
 262
 263		/*
 264		 * Note: The last zone of a zoned block device may be smaller
 265		 * than other zones. So for a target mapping the end of a
 266		 * zoned block device with such a zone, len would not be zone
 267		 * aligned. We do not allow such last smaller zone to be part
 268		 * of the mapping here to ensure that mappings with multiple
 269		 * devices do not end up with a smaller zone in the middle of
 270		 * the sector range.
 271		 */
 272		if (len & (zone_sectors - 1)) {
 273			DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
 274			       dm_device_name(ti->table->md),
 275			       (unsigned long long)len,
 276			       zone_sectors, bdevname(bdev, b));
 277			return 1;
 278		}
 279	}
 280
 281	if (logical_block_size_sectors <= 1)
 282		return 0;
 283
 284	if (start & (logical_block_size_sectors - 1)) {
 285		DMWARN("%s: start=%llu not aligned to h/w "
 286		       "logical block size %u of %s",
 287		       dm_device_name(ti->table->md),
 288		       (unsigned long long)start,
 289		       limits->logical_block_size, bdevname(bdev, b));
 290		return 1;
 291	}
 292
 293	if (len & (logical_block_size_sectors - 1)) {
 294		DMWARN("%s: len=%llu not aligned to h/w "
 295		       "logical block size %u of %s",
 296		       dm_device_name(ti->table->md),
 297		       (unsigned long long)len,
 298		       limits->logical_block_size, bdevname(bdev, b));
 299		return 1;
 300	}
 301
 302	return 0;
 303}
 304
 305/*
 306 * This upgrades the mode on an already open dm_dev, being
 307 * careful to leave things as they were if we fail to reopen the
 308 * device and not to touch the existing bdev field in case
 309 * it is accessed concurrently.
 310 */
 311static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
 312			struct mapped_device *md)
 313{
 314	int r;
 315	struct dm_dev *old_dev, *new_dev;
 316
 317	old_dev = dd->dm_dev;
 318
 319	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
 320				dd->dm_dev->mode | new_mode, &new_dev);
 321	if (r)
 322		return r;
 323
 324	dd->dm_dev = new_dev;
 325	dm_put_table_device(md, old_dev);
 326
 327	return 0;
 328}
 329
 330/*
 331 * Convert the path to a device
 332 */
 333dev_t dm_get_dev_t(const char *path)
 334{
 335	dev_t dev;
 336
 337	if (lookup_bdev(path, &dev))
 338		dev = name_to_dev_t(path);
 339	return dev;
 340}
 341EXPORT_SYMBOL_GPL(dm_get_dev_t);
 342
 343/*
 344 * Add a device to the list, or just increment the usage count if
 345 * it's already present.
 
 
 
 346 */
 347int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
 348		  struct dm_dev **result)
 349{
 350	int r;
 351	dev_t dev;
 352	unsigned int major, minor;
 353	char dummy;
 354	struct dm_dev_internal *dd;
 355	struct dm_table *t = ti->table;
 356
 357	BUG_ON(!t);
 358
 359	if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
 360		/* Extract the major/minor numbers */
 361		dev = MKDEV(major, minor);
 362		if (MAJOR(dev) != major || MINOR(dev) != minor)
 363			return -EOVERFLOW;
 364	} else {
 365		dev = dm_get_dev_t(path);
 366		if (!dev)
 367			return -ENODEV;
 
 
 
 
 368	}
 
 
 
 
 369
 370	dd = find_device(&t->devices, dev);
 371	if (!dd) {
 372		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
 373		if (!dd)
 374			return -ENOMEM;
 
 
 375
 376		if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
 
 377			kfree(dd);
 378			return r;
 379		}
 380
 381		refcount_set(&dd->count, 1);
 382		list_add(&dd->list, &t->devices);
 383		goto out;
 384
 385	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
 386		r = upgrade_mode(dd, mode, t->md);
 387		if (r)
 388			return r;
 389	}
 390	refcount_inc(&dd->count);
 391out:
 
 392	*result = dd->dm_dev;
 393	return 0;
 
 
 
 
 394}
 395EXPORT_SYMBOL(dm_get_device);
 396
 397static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
 398				sector_t start, sector_t len, void *data)
 399{
 400	struct queue_limits *limits = data;
 401	struct block_device *bdev = dev->bdev;
 402	struct request_queue *q = bdev_get_queue(bdev);
 403	char b[BDEVNAME_SIZE];
 404
 405	if (unlikely(!q)) {
 406		DMWARN("%s: Cannot set limits for nonexistent device %s",
 407		       dm_device_name(ti->table->md), bdevname(bdev, b));
 408		return 0;
 409	}
 410
 411	if (blk_stack_limits(limits, &q->limits,
 412			get_start_sect(bdev) + start) < 0)
 413		DMWARN("%s: adding target device %s caused an alignment inconsistency: "
 414		       "physical_block_size=%u, logical_block_size=%u, "
 415		       "alignment_offset=%u, start=%llu",
 416		       dm_device_name(ti->table->md), bdevname(bdev, b),
 417		       q->limits.physical_block_size,
 418		       q->limits.logical_block_size,
 419		       q->limits.alignment_offset,
 420		       (unsigned long long) start << SECTOR_SHIFT);
 421	return 0;
 422}
 423
 424/*
 425 * Decrement a device's use count and remove it if necessary.
 426 */
 427void dm_put_device(struct dm_target *ti, struct dm_dev *d)
 428{
 429	int found = 0;
 430	struct list_head *devices = &ti->table->devices;
 
 431	struct dm_dev_internal *dd;
 432
 
 
 433	list_for_each_entry(dd, devices, list) {
 434		if (dd->dm_dev == d) {
 435			found = 1;
 436			break;
 437		}
 438	}
 439	if (!found) {
 440		DMWARN("%s: device %s not in table devices list",
 441		       dm_device_name(ti->table->md), d->name);
 442		return;
 443	}
 444	if (refcount_dec_and_test(&dd->count)) {
 445		dm_put_table_device(ti->table->md, d);
 446		list_del(&dd->list);
 447		kfree(dd);
 448	}
 
 
 
 449}
 450EXPORT_SYMBOL(dm_put_device);
 451
 452/*
 453 * Checks to see if the target joins onto the end of the table.
 454 */
 455static int adjoin(struct dm_table *table, struct dm_target *ti)
 456{
 457	struct dm_target *prev;
 458
 459	if (!table->num_targets)
 460		return !ti->begin;
 461
 462	prev = &table->targets[table->num_targets - 1];
 463	return (ti->begin == (prev->begin + prev->len));
 464}
 465
 466/*
 467 * Used to dynamically allocate the arg array.
 468 *
 469 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
 470 * process messages even if some device is suspended. These messages have a
 471 * small fixed number of arguments.
 472 *
 473 * On the other hand, dm-switch needs to process bulk data using messages and
 474 * excessive use of GFP_NOIO could cause trouble.
 475 */
 476static char **realloc_argv(unsigned *size, char **old_argv)
 477{
 478	char **argv;
 479	unsigned new_size;
 480	gfp_t gfp;
 481
 482	if (*size) {
 483		new_size = *size * 2;
 484		gfp = GFP_KERNEL;
 485	} else {
 486		new_size = 8;
 487		gfp = GFP_NOIO;
 488	}
 489	argv = kmalloc_array(new_size, sizeof(*argv), gfp);
 490	if (argv && old_argv) {
 491		memcpy(argv, old_argv, *size * sizeof(*argv));
 492		*size = new_size;
 493	}
 494
 495	kfree(old_argv);
 496	return argv;
 497}
 498
 499/*
 500 * Destructively splits up the argument list to pass to ctr.
 501 */
 502int dm_split_args(int *argc, char ***argvp, char *input)
 503{
 504	char *start, *end = input, *out, **argv = NULL;
 505	unsigned array_size = 0;
 506
 507	*argc = 0;
 508
 509	if (!input) {
 510		*argvp = NULL;
 511		return 0;
 512	}
 513
 514	argv = realloc_argv(&array_size, argv);
 515	if (!argv)
 516		return -ENOMEM;
 517
 518	while (1) {
 519		/* Skip whitespace */
 520		start = skip_spaces(end);
 521
 522		if (!*start)
 523			break;	/* success, we hit the end */
 524
 525		/* 'out' is used to remove any back-quotes */
 526		end = out = start;
 527		while (*end) {
 528			/* Everything apart from '\0' can be quoted */
 529			if (*end == '\\' && *(end + 1)) {
 530				*out++ = *(end + 1);
 531				end += 2;
 532				continue;
 533			}
 534
 535			if (isspace(*end))
 536				break;	/* end of token */
 537
 538			*out++ = *end++;
 539		}
 540
 541		/* have we already filled the array ? */
 542		if ((*argc + 1) > array_size) {
 543			argv = realloc_argv(&array_size, argv);
 544			if (!argv)
 545				return -ENOMEM;
 546		}
 547
 548		/* we know this is whitespace */
 549		if (*end)
 550			end++;
 551
 552		/* terminate the string and put it in the array */
 553		*out = '\0';
 554		argv[*argc] = start;
 555		(*argc)++;
 556	}
 557
 558	*argvp = argv;
 559	return 0;
 560}
 561
 562/*
 563 * Impose necessary and sufficient conditions on a devices's table such
 564 * that any incoming bio which respects its logical_block_size can be
 565 * processed successfully.  If it falls across the boundary between
 566 * two or more targets, the size of each piece it gets split into must
 567 * be compatible with the logical_block_size of the target processing it.
 568 */
 569static int validate_hardware_logical_block_alignment(struct dm_table *table,
 570						 struct queue_limits *limits)
 571{
 572	/*
 573	 * This function uses arithmetic modulo the logical_block_size
 574	 * (in units of 512-byte sectors).
 575	 */
 576	unsigned short device_logical_block_size_sects =
 577		limits->logical_block_size >> SECTOR_SHIFT;
 578
 579	/*
 580	 * Offset of the start of the next table entry, mod logical_block_size.
 581	 */
 582	unsigned short next_target_start = 0;
 583
 584	/*
 585	 * Given an aligned bio that extends beyond the end of a
 586	 * target, how many sectors must the next target handle?
 587	 */
 588	unsigned short remaining = 0;
 589
 590	struct dm_target *ti;
 591	struct queue_limits ti_limits;
 592	unsigned i;
 593
 594	/*
 595	 * Check each entry in the table in turn.
 596	 */
 597	for (i = 0; i < dm_table_get_num_targets(table); i++) {
 598		ti = dm_table_get_target(table, i);
 599
 600		blk_set_stacking_limits(&ti_limits);
 601
 602		/* combine all target devices' limits */
 603		if (ti->type->iterate_devices)
 604			ti->type->iterate_devices(ti, dm_set_device_limits,
 605						  &ti_limits);
 606
 607		/*
 608		 * If the remaining sectors fall entirely within this
 609		 * table entry are they compatible with its logical_block_size?
 610		 */
 611		if (remaining < ti->len &&
 612		    remaining & ((ti_limits.logical_block_size >>
 613				  SECTOR_SHIFT) - 1))
 614			break;	/* Error */
 615
 616		next_target_start =
 617		    (unsigned short) ((next_target_start + ti->len) &
 618				      (device_logical_block_size_sects - 1));
 619		remaining = next_target_start ?
 620		    device_logical_block_size_sects - next_target_start : 0;
 621	}
 622
 623	if (remaining) {
 624		DMWARN("%s: table line %u (start sect %llu len %llu) "
 625		       "not aligned to h/w logical block size %u",
 626		       dm_device_name(table->md), i,
 627		       (unsigned long long) ti->begin,
 628		       (unsigned long long) ti->len,
 629		       limits->logical_block_size);
 630		return -EINVAL;
 631	}
 632
 633	return 0;
 634}
 635
 636int dm_table_add_target(struct dm_table *t, const char *type,
 637			sector_t start, sector_t len, char *params)
 638{
 639	int r = -EINVAL, argc;
 640	char **argv;
 641	struct dm_target *tgt;
 642
 643	if (t->singleton) {
 644		DMERR("%s: target type %s must appear alone in table",
 645		      dm_device_name(t->md), t->targets->type->name);
 646		return -EINVAL;
 647	}
 648
 649	BUG_ON(t->num_targets >= t->num_allocated);
 650
 651	tgt = t->targets + t->num_targets;
 652	memset(tgt, 0, sizeof(*tgt));
 653
 654	if (!len) {
 655		DMERR("%s: zero-length target", dm_device_name(t->md));
 656		return -EINVAL;
 657	}
 658
 659	tgt->type = dm_get_target_type(type);
 660	if (!tgt->type) {
 661		DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
 662		return -EINVAL;
 663	}
 664
 665	if (dm_target_needs_singleton(tgt->type)) {
 666		if (t->num_targets) {
 667			tgt->error = "singleton target type must appear alone in table";
 668			goto bad;
 669		}
 670		t->singleton = true;
 671	}
 672
 673	if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
 674		tgt->error = "target type may not be included in a read-only table";
 
 675		goto bad;
 676	}
 677
 678	if (t->immutable_target_type) {
 679		if (t->immutable_target_type != tgt->type) {
 680			tgt->error = "immutable target type cannot be mixed with other target types";
 681			goto bad;
 682		}
 683	} else if (dm_target_is_immutable(tgt->type)) {
 684		if (t->num_targets) {
 685			tgt->error = "immutable target type cannot be mixed with other target types";
 686			goto bad;
 687		}
 688		t->immutable_target_type = tgt->type;
 689	}
 690
 691	if (dm_target_has_integrity(tgt->type))
 692		t->integrity_added = 1;
 693
 694	tgt->table = t;
 695	tgt->begin = start;
 696	tgt->len = len;
 697	tgt->error = "Unknown error";
 698
 699	/*
 700	 * Does this target adjoin the previous one ?
 701	 */
 702	if (!adjoin(t, tgt)) {
 703		tgt->error = "Gap in table";
 704		goto bad;
 705	}
 706
 707	r = dm_split_args(&argc, &argv, params);
 708	if (r) {
 709		tgt->error = "couldn't split parameters (insufficient memory)";
 710		goto bad;
 711	}
 712
 713	r = tgt->type->ctr(tgt, argc, argv);
 714	kfree(argv);
 715	if (r)
 716		goto bad;
 717
 718	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
 719
 720	if (!tgt->num_discard_bios && tgt->discards_supported)
 721		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
 722		       dm_device_name(t->md), type);
 723
 
 
 
 724	return 0;
 725
 726 bad:
 727	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
 728	dm_put_target_type(tgt->type);
 729	return r;
 730}
 731
 732/*
 733 * Target argument parsing helpers.
 734 */
 735static int validate_next_arg(const struct dm_arg *arg,
 736			     struct dm_arg_set *arg_set,
 737			     unsigned *value, char **error, unsigned grouped)
 738{
 739	const char *arg_str = dm_shift_arg(arg_set);
 740	char dummy;
 741
 742	if (!arg_str ||
 743	    (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
 744	    (*value < arg->min) ||
 745	    (*value > arg->max) ||
 746	    (grouped && arg_set->argc < *value)) {
 747		*error = arg->error;
 748		return -EINVAL;
 749	}
 750
 751	return 0;
 752}
 753
 754int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
 755		unsigned *value, char **error)
 756{
 757	return validate_next_arg(arg, arg_set, value, error, 0);
 758}
 759EXPORT_SYMBOL(dm_read_arg);
 760
 761int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
 762		      unsigned *value, char **error)
 763{
 764	return validate_next_arg(arg, arg_set, value, error, 1);
 765}
 766EXPORT_SYMBOL(dm_read_arg_group);
 767
 768const char *dm_shift_arg(struct dm_arg_set *as)
 769{
 770	char *r;
 771
 772	if (as->argc) {
 773		as->argc--;
 774		r = *as->argv;
 775		as->argv++;
 776		return r;
 777	}
 778
 779	return NULL;
 780}
 781EXPORT_SYMBOL(dm_shift_arg);
 782
 783void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
 784{
 785	BUG_ON(as->argc < num_args);
 786	as->argc -= num_args;
 787	as->argv += num_args;
 788}
 789EXPORT_SYMBOL(dm_consume_args);
 790
 791static bool __table_type_bio_based(enum dm_queue_mode table_type)
 792{
 793	return (table_type == DM_TYPE_BIO_BASED ||
 794		table_type == DM_TYPE_DAX_BIO_BASED);
 795}
 796
 797static bool __table_type_request_based(enum dm_queue_mode table_type)
 798{
 799	return table_type == DM_TYPE_REQUEST_BASED;
 800}
 801
 802void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
 803{
 804	t->type = type;
 805}
 806EXPORT_SYMBOL_GPL(dm_table_set_type);
 807
 808/* validate the dax capability of the target device span */
 809int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
 810			sector_t start, sector_t len, void *data)
 811{
 812	int blocksize = *(int *) data, id;
 813	bool rc;
 814
 815	id = dax_read_lock();
 816	rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
 817	dax_read_unlock(id);
 818
 819	return rc;
 820}
 821
 822/* Check devices support synchronous DAX */
 823static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
 824					      sector_t start, sector_t len, void *data)
 825{
 826	return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
 827}
 828
 829bool dm_table_supports_dax(struct dm_table *t,
 830			   iterate_devices_callout_fn iterate_fn, int *blocksize)
 831{
 832	struct dm_target *ti;
 833	unsigned i;
 834
 835	/* Ensure that all targets support DAX. */
 836	for (i = 0; i < dm_table_get_num_targets(t); i++) {
 837		ti = dm_table_get_target(t, i);
 838
 839		if (!ti->type->direct_access)
 840			return false;
 841
 842		if (!ti->type->iterate_devices ||
 843		    ti->type->iterate_devices(ti, iterate_fn, blocksize))
 
 844			return false;
 845	}
 846
 847	return true;
 848}
 849
 850static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
 851				  sector_t start, sector_t len, void *data)
 852{
 853	struct block_device *bdev = dev->bdev;
 854	struct request_queue *q = bdev_get_queue(bdev);
 855
 856	/* request-based cannot stack on partitions! */
 857	if (bdev_is_partition(bdev))
 858		return false;
 859
 860	return queue_is_mq(q);
 861}
 862
 863static int dm_table_determine_type(struct dm_table *t)
 864{
 865	unsigned i;
 866	unsigned bio_based = 0, request_based = 0, hybrid = 0;
 867	struct dm_target *tgt;
 868	struct list_head *devices = dm_table_get_devices(t);
 869	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
 870	int page_size = PAGE_SIZE;
 871
 872	if (t->type != DM_TYPE_NONE) {
 873		/* target already set the table's type */
 874		if (t->type == DM_TYPE_BIO_BASED) {
 875			/* possibly upgrade to a variant of bio-based */
 876			goto verify_bio_based;
 877		}
 878		BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
 879		goto verify_rq_based;
 880	}
 881
 882	for (i = 0; i < t->num_targets; i++) {
 883		tgt = t->targets + i;
 884		if (dm_target_hybrid(tgt))
 885			hybrid = 1;
 886		else if (dm_target_request_based(tgt))
 887			request_based = 1;
 888		else
 889			bio_based = 1;
 890
 891		if (bio_based && request_based) {
 892			DMERR("Inconsistent table: different target types"
 893			      " can't be mixed up");
 894			return -EINVAL;
 895		}
 896	}
 897
 898	if (hybrid && !bio_based && !request_based) {
 899		/*
 900		 * The targets can work either way.
 901		 * Determine the type from the live device.
 902		 * Default to bio-based if device is new.
 903		 */
 904		if (__table_type_request_based(live_md_type))
 905			request_based = 1;
 906		else
 907			bio_based = 1;
 908	}
 909
 910	if (bio_based) {
 911verify_bio_based:
 912		/* We must use this table as bio-based */
 913		t->type = DM_TYPE_BIO_BASED;
 914		if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
 915		    (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
 916			t->type = DM_TYPE_DAX_BIO_BASED;
 917		}
 918		return 0;
 919	}
 920
 921	BUG_ON(!request_based); /* No targets in this table */
 922
 923	t->type = DM_TYPE_REQUEST_BASED;
 924
 925verify_rq_based:
 926	/*
 927	 * Request-based dm supports only tables that have a single target now.
 928	 * To support multiple targets, request splitting support is needed,
 929	 * and that needs lots of changes in the block-layer.
 930	 * (e.g. request completion process for partial completion.)
 931	 */
 932	if (t->num_targets > 1) {
 933		DMERR("request-based DM doesn't support multiple targets");
 934		return -EINVAL;
 935	}
 936
 937	if (list_empty(devices)) {
 938		int srcu_idx;
 939		struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
 940
 941		/* inherit live table's type */
 942		if (live_table)
 943			t->type = live_table->type;
 944		dm_put_live_table(t->md, srcu_idx);
 945		return 0;
 946	}
 947
 948	tgt = dm_table_get_immutable_target(t);
 949	if (!tgt) {
 950		DMERR("table load rejected: immutable target is required");
 951		return -EINVAL;
 952	} else if (tgt->max_io_len) {
 953		DMERR("table load rejected: immutable target that splits IO is not supported");
 954		return -EINVAL;
 955	}
 956
 957	/* Non-request-stackable devices can't be used for request-based dm */
 958	if (!tgt->type->iterate_devices ||
 959	    !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
 960		DMERR("table load rejected: including non-request-stackable devices");
 961		return -EINVAL;
 962	}
 963
 964	return 0;
 965}
 966
 967enum dm_queue_mode dm_table_get_type(struct dm_table *t)
 968{
 969	return t->type;
 970}
 971
 972struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
 973{
 974	return t->immutable_target_type;
 975}
 976
 977struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
 978{
 979	/* Immutable target is implicitly a singleton */
 980	if (t->num_targets > 1 ||
 981	    !dm_target_is_immutable(t->targets[0].type))
 982		return NULL;
 983
 984	return t->targets;
 985}
 986
 987struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
 988{
 989	struct dm_target *ti;
 990	unsigned i;
 991
 992	for (i = 0; i < dm_table_get_num_targets(t); i++) {
 993		ti = dm_table_get_target(t, i);
 994		if (dm_target_is_wildcard(ti->type))
 995			return ti;
 996	}
 997
 998	return NULL;
 999}
1000
1001bool dm_table_bio_based(struct dm_table *t)
1002{
1003	return __table_type_bio_based(dm_table_get_type(t));
1004}
1005
1006bool dm_table_request_based(struct dm_table *t)
1007{
1008	return __table_type_request_based(dm_table_get_type(t));
1009}
1010
 
 
1011static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1012{
1013	enum dm_queue_mode type = dm_table_get_type(t);
1014	unsigned per_io_data_size = 0;
1015	unsigned min_pool_size = 0;
1016	struct dm_target *ti;
1017	unsigned i;
1018
1019	if (unlikely(type == DM_TYPE_NONE)) {
1020		DMWARN("no table type is set, can't allocate mempools");
1021		return -EINVAL;
1022	}
1023
1024	if (__table_type_bio_based(type))
1025		for (i = 0; i < t->num_targets; i++) {
1026			ti = t->targets + i;
1027			per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1028			min_pool_size = max(min_pool_size, ti->num_flush_bios);
1029		}
1030
1031	t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
1032					   per_io_data_size, min_pool_size);
1033	if (!t->mempools)
1034		return -ENOMEM;
1035
1036	return 0;
1037}
 
 
 
1038
1039void dm_table_free_md_mempools(struct dm_table *t)
1040{
1041	dm_free_md_mempools(t->mempools);
1042	t->mempools = NULL;
1043}
1044
1045struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
1046{
1047	return t->mempools;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1048}
1049
1050static int setup_indexes(struct dm_table *t)
1051{
1052	int i;
1053	unsigned int total = 0;
1054	sector_t *indexes;
1055
1056	/* allocate the space for *all* the indexes */
1057	for (i = t->depth - 2; i >= 0; i--) {
1058		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1059		total += t->counts[i];
1060	}
1061
1062	indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL);
1063	if (!indexes)
1064		return -ENOMEM;
1065
1066	/* set up internal nodes, bottom-up */
1067	for (i = t->depth - 2; i >= 0; i--) {
1068		t->index[i] = indexes;
1069		indexes += (KEYS_PER_NODE * t->counts[i]);
1070		setup_btree_index(i, t);
1071	}
1072
1073	return 0;
1074}
1075
1076/*
1077 * Builds the btree to index the map.
1078 */
1079static int dm_table_build_index(struct dm_table *t)
1080{
1081	int r = 0;
1082	unsigned int leaf_nodes;
1083
1084	/* how many indexes will the btree have ? */
1085	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1086	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1087
1088	/* leaf layer has already been set up */
1089	t->counts[t->depth - 1] = leaf_nodes;
1090	t->index[t->depth - 1] = t->highs;
1091
1092	if (t->depth >= 2)
1093		r = setup_indexes(t);
1094
1095	return r;
1096}
1097
1098static bool integrity_profile_exists(struct gendisk *disk)
1099{
1100	return !!blk_get_integrity(disk);
1101}
1102
1103/*
1104 * Get a disk whose integrity profile reflects the table's profile.
1105 * Returns NULL if integrity support was inconsistent or unavailable.
1106 */
1107static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
1108{
1109	struct list_head *devices = dm_table_get_devices(t);
1110	struct dm_dev_internal *dd = NULL;
1111	struct gendisk *prev_disk = NULL, *template_disk = NULL;
1112	unsigned i;
1113
1114	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1115		struct dm_target *ti = dm_table_get_target(t, i);
 
1116		if (!dm_target_passes_integrity(ti->type))
1117			goto no_integrity;
1118	}
1119
1120	list_for_each_entry(dd, devices, list) {
1121		template_disk = dd->dm_dev->bdev->bd_disk;
1122		if (!integrity_profile_exists(template_disk))
1123			goto no_integrity;
1124		else if (prev_disk &&
1125			 blk_integrity_compare(prev_disk, template_disk) < 0)
1126			goto no_integrity;
1127		prev_disk = template_disk;
1128	}
1129
1130	return template_disk;
1131
1132no_integrity:
1133	if (prev_disk)
1134		DMWARN("%s: integrity not set: %s and %s profile mismatch",
1135		       dm_device_name(t->md),
1136		       prev_disk->disk_name,
1137		       template_disk->disk_name);
1138	return NULL;
1139}
1140
1141/*
1142 * Register the mapped device for blk_integrity support if the
1143 * underlying devices have an integrity profile.  But all devices may
1144 * not have matching profiles (checking all devices isn't reliable
1145 * during table load because this table may use other DM device(s) which
1146 * must be resumed before they will have an initialized integity
1147 * profile).  Consequently, stacked DM devices force a 2 stage integrity
1148 * profile validation: First pass during table load, final pass during
1149 * resume.
1150 */
1151static int dm_table_register_integrity(struct dm_table *t)
1152{
1153	struct mapped_device *md = t->md;
1154	struct gendisk *template_disk = NULL;
1155
1156	/* If target handles integrity itself do not register it here. */
1157	if (t->integrity_added)
1158		return 0;
1159
1160	template_disk = dm_table_get_integrity_disk(t);
1161	if (!template_disk)
1162		return 0;
1163
1164	if (!integrity_profile_exists(dm_disk(md))) {
1165		t->integrity_supported = true;
1166		/*
1167		 * Register integrity profile during table load; we can do
1168		 * this because the final profile must match during resume.
1169		 */
1170		blk_integrity_register(dm_disk(md),
1171				       blk_get_integrity(template_disk));
1172		return 0;
1173	}
1174
1175	/*
1176	 * If DM device already has an initialized integrity
1177	 * profile the new profile should not conflict.
1178	 */
1179	if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1180		DMWARN("%s: conflict with existing integrity profile: "
1181		       "%s profile mismatch",
1182		       dm_device_name(t->md),
1183		       template_disk->disk_name);
1184		return 1;
1185	}
1186
1187	/* Preserve existing integrity profile */
1188	t->integrity_supported = true;
1189	return 0;
1190}
1191
1192#ifdef CONFIG_BLK_INLINE_ENCRYPTION
1193
1194struct dm_keyslot_manager {
1195	struct blk_keyslot_manager ksm;
1196	struct mapped_device *md;
1197};
1198
1199struct dm_keyslot_evict_args {
1200	const struct blk_crypto_key *key;
1201	int err;
1202};
1203
1204static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
1205				     sector_t start, sector_t len, void *data)
1206{
1207	struct dm_keyslot_evict_args *args = data;
1208	int err;
1209
1210	err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key);
1211	if (!args->err)
1212		args->err = err;
1213	/* Always try to evict the key from all devices. */
1214	return 0;
1215}
1216
1217/*
1218 * When an inline encryption key is evicted from a device-mapper device, evict
1219 * it from all the underlying devices.
1220 */
1221static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
1222			    const struct blk_crypto_key *key, unsigned int slot)
1223{
1224	struct dm_keyslot_manager *dksm = container_of(ksm,
1225						       struct dm_keyslot_manager,
1226						       ksm);
1227	struct mapped_device *md = dksm->md;
1228	struct dm_keyslot_evict_args args = { key };
1229	struct dm_table *t;
1230	int srcu_idx;
1231	int i;
1232	struct dm_target *ti;
1233
1234	t = dm_get_live_table(md, &srcu_idx);
1235	if (!t)
1236		return 0;
1237	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1238		ti = dm_table_get_target(t, i);
 
 
1239		if (!ti->type->iterate_devices)
1240			continue;
1241		ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
 
1242	}
 
1243	dm_put_live_table(md, srcu_idx);
1244	return args.err;
1245}
1246
1247static const struct blk_ksm_ll_ops dm_ksm_ll_ops = {
1248	.keyslot_evict = dm_keyslot_evict,
1249};
1250
1251static int device_intersect_crypto_modes(struct dm_target *ti,
1252					 struct dm_dev *dev, sector_t start,
1253					 sector_t len, void *data)
1254{
1255	struct blk_keyslot_manager *parent = data;
1256	struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm;
 
1257
1258	blk_ksm_intersect_modes(parent, child);
1259	return 0;
1260}
1261
1262void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
1263{
1264	struct dm_keyslot_manager *dksm = container_of(ksm,
1265						       struct dm_keyslot_manager,
1266						       ksm);
1267
1268	if (!ksm)
1269		return;
1270
1271	blk_ksm_destroy(ksm);
1272	kfree(dksm);
1273}
1274
1275static void dm_table_destroy_keyslot_manager(struct dm_table *t)
1276{
1277	dm_destroy_keyslot_manager(t->ksm);
1278	t->ksm = NULL;
1279}
1280
1281/*
1282 * Constructs and initializes t->ksm with a keyslot manager that
1283 * represents the common set of crypto capabilities of the devices
1284 * described by the dm_table. However, if the constructed keyslot
1285 * manager does not support a superset of the crypto capabilities
1286 * supported by the current keyslot manager of the mapped_device,
1287 * it returns an error instead, since we don't support restricting
1288 * crypto capabilities on table changes. Finally, if the constructed
1289 * keyslot manager doesn't actually support any crypto modes at all,
1290 * it just returns NULL.
1291 */
1292static int dm_table_construct_keyslot_manager(struct dm_table *t)
1293{
1294	struct dm_keyslot_manager *dksm;
1295	struct blk_keyslot_manager *ksm;
1296	struct dm_target *ti;
1297	unsigned int i;
1298	bool ksm_is_empty = true;
1299
1300	dksm = kmalloc(sizeof(*dksm), GFP_KERNEL);
1301	if (!dksm)
1302		return -ENOMEM;
1303	dksm->md = t->md;
1304
1305	ksm = &dksm->ksm;
1306	blk_ksm_init_passthrough(ksm);
1307	ksm->ksm_ll_ops = dm_ksm_ll_ops;
1308	ksm->max_dun_bytes_supported = UINT_MAX;
1309	memset(ksm->crypto_modes_supported, 0xFF,
1310	       sizeof(ksm->crypto_modes_supported));
1311
1312	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1313		ti = dm_table_get_target(t, i);
1314
1315		if (!dm_target_passes_crypto(ti->type)) {
1316			blk_ksm_intersect_modes(ksm, NULL);
1317			break;
1318		}
1319		if (!ti->type->iterate_devices)
1320			continue;
1321		ti->type->iterate_devices(ti, device_intersect_crypto_modes,
1322					  ksm);
 
1323	}
1324
1325	if (t->md->queue && !blk_ksm_is_superset(ksm, t->md->queue->ksm)) {
1326		DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
1327		dm_destroy_keyslot_manager(ksm);
 
 
1328		return -EINVAL;
1329	}
1330
1331	/*
1332	 * If the new KSM doesn't actually support any crypto modes, we may as
1333	 * well represent it with a NULL ksm.
1334	 */
1335	ksm_is_empty = true;
1336	for (i = 0; i < ARRAY_SIZE(ksm->crypto_modes_supported); i++) {
1337		if (ksm->crypto_modes_supported[i]) {
1338			ksm_is_empty = false;
1339			break;
1340		}
1341	}
1342
1343	if (ksm_is_empty) {
1344		dm_destroy_keyslot_manager(ksm);
1345		ksm = NULL;
1346	}
1347
1348	/*
1349	 * t->ksm is only set temporarily while the table is being set
1350	 * up, and it gets set to NULL after the capabilities have
1351	 * been transferred to the request_queue.
1352	 */
1353	t->ksm = ksm;
1354
1355	return 0;
1356}
1357
1358static void dm_update_keyslot_manager(struct request_queue *q,
1359				      struct dm_table *t)
1360{
1361	if (!t->ksm)
1362		return;
1363
1364	/* Make the ksm less restrictive */
1365	if (!q->ksm) {
1366		blk_ksm_register(t->ksm, q);
1367	} else {
1368		blk_ksm_update_capabilities(q->ksm, t->ksm);
1369		dm_destroy_keyslot_manager(t->ksm);
 
1370	}
1371	t->ksm = NULL;
1372}
1373
1374#else /* CONFIG_BLK_INLINE_ENCRYPTION */
1375
1376static int dm_table_construct_keyslot_manager(struct dm_table *t)
1377{
1378	return 0;
1379}
1380
1381void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
1382{
1383}
1384
1385static void dm_table_destroy_keyslot_manager(struct dm_table *t)
1386{
1387}
1388
1389static void dm_update_keyslot_manager(struct request_queue *q,
1390				      struct dm_table *t)
1391{
1392}
1393
1394#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1395
1396/*
1397 * Prepares the table for use by building the indices,
1398 * setting the type, and allocating mempools.
1399 */
1400int dm_table_complete(struct dm_table *t)
1401{
1402	int r;
1403
1404	r = dm_table_determine_type(t);
1405	if (r) {
1406		DMERR("unable to determine table type");
1407		return r;
1408	}
1409
1410	r = dm_table_build_index(t);
1411	if (r) {
1412		DMERR("unable to build btrees");
1413		return r;
1414	}
1415
1416	r = dm_table_register_integrity(t);
1417	if (r) {
1418		DMERR("could not register integrity profile.");
1419		return r;
1420	}
1421
1422	r = dm_table_construct_keyslot_manager(t);
1423	if (r) {
1424		DMERR("could not construct keyslot manager.");
1425		return r;
1426	}
1427
1428	r = dm_table_alloc_md_mempools(t, t->md);
1429	if (r)
1430		DMERR("unable to allocate mempools");
1431
1432	return r;
1433}
1434
1435static DEFINE_MUTEX(_event_lock);
1436void dm_table_event_callback(struct dm_table *t,
1437			     void (*fn)(void *), void *context)
1438{
1439	mutex_lock(&_event_lock);
1440	t->event_fn = fn;
1441	t->event_context = context;
1442	mutex_unlock(&_event_lock);
1443}
1444
1445void dm_table_event(struct dm_table *t)
1446{
1447	mutex_lock(&_event_lock);
1448	if (t->event_fn)
1449		t->event_fn(t->event_context);
1450	mutex_unlock(&_event_lock);
1451}
1452EXPORT_SYMBOL(dm_table_event);
1453
1454inline sector_t dm_table_get_size(struct dm_table *t)
1455{
1456	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1457}
1458EXPORT_SYMBOL(dm_table_get_size);
1459
1460struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1461{
1462	if (index >= t->num_targets)
1463		return NULL;
1464
1465	return t->targets + index;
1466}
1467
1468/*
1469 * Search the btree for the correct target.
1470 *
1471 * Caller should check returned pointer for NULL
1472 * to trap I/O beyond end of device.
1473 */
1474struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1475{
1476	unsigned int l, n = 0, k = 0;
1477	sector_t *node;
1478
1479	if (unlikely(sector >= dm_table_get_size(t)))
1480		return NULL;
1481
1482	for (l = 0; l < t->depth; l++) {
1483		n = get_child(n, k);
1484		node = get_node(t, l, n);
1485
1486		for (k = 0; k < KEYS_PER_NODE; k++)
1487			if (node[k] >= sector)
1488				break;
1489	}
1490
1491	return &t->targets[(KEYS_PER_NODE * n) + k];
1492}
1493
 
 
 
 
 
 
 
 
1494/*
1495 * type->iterate_devices() should be called when the sanity check needs to
1496 * iterate and check all underlying data devices. iterate_devices() will
1497 * iterate all underlying data devices until it encounters a non-zero return
1498 * code, returned by whether the input iterate_devices_callout_fn, or
1499 * iterate_devices() itself internally.
1500 *
1501 * For some target type (e.g. dm-stripe), one call of iterate_devices() may
1502 * iterate multiple underlying devices internally, in which case a non-zero
1503 * return code returned by iterate_devices_callout_fn will stop the iteration
1504 * in advance.
1505 *
1506 * Cases requiring _any_ underlying device supporting some kind of attribute,
1507 * should use the iteration structure like dm_table_any_dev_attr(), or call
1508 * it directly. @func should handle semantics of positive examples, e.g.
1509 * capable of something.
1510 *
1511 * Cases requiring _all_ underlying devices supporting some kind of attribute,
1512 * should use the iteration structure like dm_table_supports_nowait() or
1513 * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
1514 * uses an @anti_func that handle semantics of counter examples, e.g. not
1515 * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
1516 */
1517static bool dm_table_any_dev_attr(struct dm_table *t,
1518				  iterate_devices_callout_fn func, void *data)
1519{
1520	struct dm_target *ti;
1521	unsigned int i;
1522
1523	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1524		ti = dm_table_get_target(t, i);
1525
1526		if (ti->type->iterate_devices &&
1527		    ti->type->iterate_devices(ti, func, data))
1528			return true;
1529        }
1530
1531	return false;
1532}
1533
1534static int count_device(struct dm_target *ti, struct dm_dev *dev,
1535			sector_t start, sector_t len, void *data)
1536{
1537	unsigned *num_devices = data;
1538
1539	(*num_devices)++;
1540
1541	return 0;
1542}
1543
 
 
 
 
 
 
 
 
 
 
 
 
 
1544/*
1545 * Check whether a table has no data devices attached using each
1546 * target's iterate_devices method.
1547 * Returns false if the result is unknown because a target doesn't
1548 * support iterate_devices.
1549 */
1550bool dm_table_has_no_data_devices(struct dm_table *table)
1551{
1552	struct dm_target *ti;
1553	unsigned i, num_devices;
1554
1555	for (i = 0; i < dm_table_get_num_targets(table); i++) {
1556		ti = dm_table_get_target(table, i);
1557
1558		if (!ti->type->iterate_devices)
1559			return false;
1560
1561		num_devices = 0;
1562		ti->type->iterate_devices(ti, count_device, &num_devices);
1563		if (num_devices)
1564			return false;
1565	}
1566
1567	return true;
1568}
1569
1570static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1571				  sector_t start, sector_t len, void *data)
1572{
1573	struct request_queue *q = bdev_get_queue(dev->bdev);
1574	enum blk_zoned_model *zoned_model = data;
 
 
1575
1576	return blk_queue_zoned_model(q) != *zoned_model;
 
 
 
1577}
1578
1579/*
1580 * Check the device zoned model based on the target feature flag. If the target
1581 * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
1582 * also accepted but all devices must have the same zoned model. If the target
1583 * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
1584 * zoned model with all zoned devices having the same zone size.
1585 */
1586static bool dm_table_supports_zoned_model(struct dm_table *t,
1587					  enum blk_zoned_model zoned_model)
1588{
1589	struct dm_target *ti;
1590	unsigned i;
1591
1592	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1593		ti = dm_table_get_target(t, i);
 
 
 
 
 
 
 
 
 
1594
1595		if (dm_target_supports_zoned_hm(ti->type)) {
1596			if (!ti->type->iterate_devices ||
1597			    ti->type->iterate_devices(ti, device_not_zoned_model,
1598						      &zoned_model))
1599				return false;
1600		} else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
1601			if (zoned_model == BLK_ZONED_HM)
1602				return false;
1603		}
1604	}
1605
1606	return true;
1607}
1608
1609static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1610					   sector_t start, sector_t len, void *data)
1611{
1612	struct request_queue *q = bdev_get_queue(dev->bdev);
1613	unsigned int *zone_sectors = data;
1614
1615	if (!blk_queue_is_zoned(q))
1616		return 0;
1617
1618	return blk_queue_zone_sectors(q) != *zone_sectors;
1619}
1620
1621/*
1622 * Check consistency of zoned model and zone sectors across all targets. For
1623 * zone sectors, if the destination device is a zoned block device, it shall
1624 * have the specified zone_sectors.
1625 */
1626static int validate_hardware_zoned_model(struct dm_table *table,
1627					 enum blk_zoned_model zoned_model,
1628					 unsigned int zone_sectors)
1629{
1630	if (zoned_model == BLK_ZONED_NONE)
1631		return 0;
1632
1633	if (!dm_table_supports_zoned_model(table, zoned_model)) {
1634		DMERR("%s: zoned model is not consistent across all devices",
1635		      dm_device_name(table->md));
1636		return -EINVAL;
1637	}
1638
1639	/* Check zone size validity and compatibility */
1640	if (!zone_sectors || !is_power_of_2(zone_sectors))
1641		return -EINVAL;
1642
1643	if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
1644		DMERR("%s: zone sectors is not consistent across all zoned devices",
1645		      dm_device_name(table->md));
1646		return -EINVAL;
1647	}
1648
1649	return 0;
1650}
1651
1652/*
1653 * Establish the new table's queue_limits and validate them.
1654 */
1655int dm_calculate_queue_limits(struct dm_table *table,
1656			      struct queue_limits *limits)
1657{
1658	struct dm_target *ti;
1659	struct queue_limits ti_limits;
1660	unsigned i;
1661	enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
1662	unsigned int zone_sectors = 0;
 
1663
1664	blk_set_stacking_limits(limits);
1665
1666	for (i = 0; i < dm_table_get_num_targets(table); i++) {
 
 
1667		blk_set_stacking_limits(&ti_limits);
1668
1669		ti = dm_table_get_target(table, i);
1670
1671		if (!ti->type->iterate_devices)
 
1672			goto combine_limits;
 
1673
1674		/*
1675		 * Combine queue limits of all the devices this target uses.
1676		 */
1677		ti->type->iterate_devices(ti, dm_set_device_limits,
1678					  &ti_limits);
1679
1680		if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
1681			/*
1682			 * After stacking all limits, validate all devices
1683			 * in table support this zoned model and zone sectors.
1684			 */
1685			zoned_model = ti_limits.zoned;
1686			zone_sectors = ti_limits.chunk_sectors;
1687		}
1688
1689		/* Set I/O hints portion of queue limits */
1690		if (ti->type->io_hints)
1691			ti->type->io_hints(ti, &ti_limits);
1692
1693		/*
1694		 * Check each device area is consistent with the target's
1695		 * overall queue limits.
1696		 */
1697		if (ti->type->iterate_devices(ti, device_area_is_invalid,
1698					      &ti_limits))
1699			return -EINVAL;
1700
1701combine_limits:
1702		/*
1703		 * Merge this target's queue limits into the overall limits
1704		 * for the table.
1705		 */
1706		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1707			DMWARN("%s: adding target device "
1708			       "(start sect %llu len %llu) "
1709			       "caused an alignment inconsistency",
1710			       dm_device_name(table->md),
1711			       (unsigned long long) ti->begin,
1712			       (unsigned long long) ti->len);
1713	}
1714
1715	/*
1716	 * Verify that the zoned model and zone sectors, as determined before
1717	 * any .io_hints override, are the same across all devices in the table.
1718	 * - this is especially relevant if .io_hints is emulating a disk-managed
1719	 *   zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
1720	 * BUT...
1721	 */
1722	if (limits->zoned != BLK_ZONED_NONE) {
1723		/*
1724		 * ...IF the above limits stacking determined a zoned model
1725		 * validate that all of the table's devices conform to it.
1726		 */
1727		zoned_model = limits->zoned;
1728		zone_sectors = limits->chunk_sectors;
1729	}
1730	if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
1731		return -EINVAL;
1732
1733	return validate_hardware_logical_block_alignment(table, limits);
1734}
1735
1736/*
1737 * Verify that all devices have an integrity profile that matches the
1738 * DM device's registered integrity profile.  If the profiles don't
1739 * match then unregister the DM device's integrity profile.
1740 */
1741static void dm_table_verify_integrity(struct dm_table *t)
1742{
1743	struct gendisk *template_disk = NULL;
1744
1745	if (t->integrity_added)
1746		return;
1747
1748	if (t->integrity_supported) {
1749		/*
1750		 * Verify that the original integrity profile
1751		 * matches all the devices in this table.
1752		 */
1753		template_disk = dm_table_get_integrity_disk(t);
1754		if (template_disk &&
1755		    blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1756			return;
1757	}
1758
1759	if (integrity_profile_exists(dm_disk(t->md))) {
1760		DMWARN("%s: unable to establish an integrity profile",
1761		       dm_device_name(t->md));
1762		blk_integrity_unregister(dm_disk(t->md));
1763	}
1764}
1765
1766static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1767				sector_t start, sector_t len, void *data)
1768{
1769	unsigned long flush = (unsigned long) data;
1770	struct request_queue *q = bdev_get_queue(dev->bdev);
1771
1772	return (q->queue_flags & flush);
1773}
1774
1775static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1776{
1777	struct dm_target *ti;
1778	unsigned i;
1779
1780	/*
1781	 * Require at least one underlying device to support flushes.
1782	 * t->devices includes internal dm devices such as mirror logs
1783	 * so we need to use iterate_devices here, which targets
1784	 * supporting flushes must provide.
1785	 */
1786	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1787		ti = dm_table_get_target(t, i);
1788
1789		if (!ti->num_flush_bios)
1790			continue;
1791
1792		if (ti->flush_supported)
1793			return true;
1794
1795		if (ti->type->iterate_devices &&
1796		    ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1797			return true;
1798	}
1799
1800	return false;
1801}
1802
1803static int device_dax_write_cache_enabled(struct dm_target *ti,
1804					  struct dm_dev *dev, sector_t start,
1805					  sector_t len, void *data)
1806{
1807	struct dax_device *dax_dev = dev->dax_dev;
1808
1809	if (!dax_dev)
1810		return false;
1811
1812	if (dax_write_cache_enabled(dax_dev))
1813		return true;
1814	return false;
1815}
1816
1817static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
1818				sector_t start, sector_t len, void *data)
1819{
1820	struct request_queue *q = bdev_get_queue(dev->bdev);
1821
1822	return !blk_queue_nonrot(q);
1823}
1824
1825static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1826			     sector_t start, sector_t len, void *data)
1827{
1828	struct request_queue *q = bdev_get_queue(dev->bdev);
1829
1830	return !blk_queue_add_random(q);
1831}
1832
1833static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
1834					 sector_t start, sector_t len, void *data)
1835{
1836	struct request_queue *q = bdev_get_queue(dev->bdev);
1837
1838	return !q->limits.max_write_same_sectors;
1839}
1840
1841static bool dm_table_supports_write_same(struct dm_table *t)
1842{
1843	struct dm_target *ti;
1844	unsigned i;
1845
1846	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1847		ti = dm_table_get_target(t, i);
1848
1849		if (!ti->num_write_same_bios)
1850			return false;
1851
1852		if (!ti->type->iterate_devices ||
1853		    ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1854			return false;
1855	}
1856
1857	return true;
1858}
1859
1860static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1861					   sector_t start, sector_t len, void *data)
1862{
1863	struct request_queue *q = bdev_get_queue(dev->bdev);
1864
1865	return !q->limits.max_write_zeroes_sectors;
1866}
1867
1868static bool dm_table_supports_write_zeroes(struct dm_table *t)
1869{
1870	struct dm_target *ti;
1871	unsigned i = 0;
1872
1873	while (i < dm_table_get_num_targets(t)) {
1874		ti = dm_table_get_target(t, i++);
1875
1876		if (!ti->num_write_zeroes_bios)
1877			return false;
1878
1879		if (!ti->type->iterate_devices ||
1880		    ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1881			return false;
1882	}
1883
1884	return true;
1885}
1886
1887static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
1888				     sector_t start, sector_t len, void *data)
1889{
1890	struct request_queue *q = bdev_get_queue(dev->bdev);
1891
1892	return !blk_queue_nowait(q);
1893}
1894
1895static bool dm_table_supports_nowait(struct dm_table *t)
1896{
1897	struct dm_target *ti;
1898	unsigned i = 0;
1899
1900	while (i < dm_table_get_num_targets(t)) {
1901		ti = dm_table_get_target(t, i++);
1902
1903		if (!dm_target_supports_nowait(ti->type))
1904			return false;
1905
1906		if (!ti->type->iterate_devices ||
1907		    ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
1908			return false;
1909	}
1910
1911	return true;
1912}
1913
1914static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1915				      sector_t start, sector_t len, void *data)
1916{
1917	struct request_queue *q = bdev_get_queue(dev->bdev);
1918
1919	return !blk_queue_discard(q);
1920}
1921
1922static bool dm_table_supports_discards(struct dm_table *t)
1923{
1924	struct dm_target *ti;
1925	unsigned i;
1926
1927	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1928		ti = dm_table_get_target(t, i);
1929
1930		if (!ti->num_discard_bios)
1931			return false;
1932
1933		/*
1934		 * Either the target provides discard support (as implied by setting
1935		 * 'discards_supported') or it relies on _all_ data devices having
1936		 * discard support.
1937		 */
1938		if (!ti->discards_supported &&
1939		    (!ti->type->iterate_devices ||
1940		     ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1941			return false;
1942	}
1943
1944	return true;
1945}
1946
1947static int device_not_secure_erase_capable(struct dm_target *ti,
1948					   struct dm_dev *dev, sector_t start,
1949					   sector_t len, void *data)
1950{
1951	struct request_queue *q = bdev_get_queue(dev->bdev);
1952
1953	return !blk_queue_secure_erase(q);
1954}
1955
1956static bool dm_table_supports_secure_erase(struct dm_table *t)
1957{
1958	struct dm_target *ti;
1959	unsigned int i;
1960
1961	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1962		ti = dm_table_get_target(t, i);
1963
1964		if (!ti->num_secure_erase_bios)
1965			return false;
1966
1967		if (!ti->type->iterate_devices ||
1968		    ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1969			return false;
1970	}
1971
1972	return true;
1973}
1974
1975static int device_requires_stable_pages(struct dm_target *ti,
1976					struct dm_dev *dev, sector_t start,
1977					sector_t len, void *data)
1978{
1979	struct request_queue *q = bdev_get_queue(dev->bdev);
1980
1981	return blk_queue_stable_writes(q);
1982}
1983
1984int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1985			      struct queue_limits *limits)
1986{
1987	bool wc = false, fua = false;
1988	int page_size = PAGE_SIZE;
1989	int r;
1990
1991	/*
1992	 * Copy table's limits to the DM device's request_queue
1993	 */
1994	q->limits = *limits;
1995
1996	if (dm_table_supports_nowait(t))
1997		blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
1998	else
1999		blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
2000
2001	if (!dm_table_supports_discards(t)) {
2002		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
2003		/* Must also clear discard limits... */
2004		q->limits.max_discard_sectors = 0;
2005		q->limits.max_hw_discard_sectors = 0;
2006		q->limits.discard_granularity = 0;
2007		q->limits.discard_alignment = 0;
2008		q->limits.discard_misaligned = 0;
2009	} else
2010		blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
2011
2012	if (dm_table_supports_secure_erase(t))
2013		blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
2014
2015	if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
2016		wc = true;
2017		if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
2018			fua = true;
2019	}
2020	blk_queue_write_cache(q, wc, fua);
2021
2022	if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) {
2023		blk_queue_flag_set(QUEUE_FLAG_DAX, q);
2024		if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL))
2025			set_dax_synchronous(t->md->dax_dev);
2026	}
2027	else
2028		blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
2029
2030	if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
2031		dax_write_cache(t->md->dax_dev, true);
2032
2033	/* Ensure that all underlying devices are non-rotational. */
2034	if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
2035		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
2036	else
2037		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2038
2039	if (!dm_table_supports_write_same(t))
2040		q->limits.max_write_same_sectors = 0;
2041	if (!dm_table_supports_write_zeroes(t))
2042		q->limits.max_write_zeroes_sectors = 0;
2043
2044	dm_table_verify_integrity(t);
2045
2046	/*
2047	 * Some devices don't use blk_integrity but still want stable pages
2048	 * because they do their own checksumming.
2049	 * If any underlying device requires stable pages, a table must require
2050	 * them as well.  Only targets that support iterate_devices are considered:
2051	 * don't want error, zero, etc to require stable pages.
2052	 */
2053	if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
2054		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
2055	else
2056		blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
2057
2058	/*
2059	 * Determine whether or not this queue's I/O timings contribute
2060	 * to the entropy pool, Only request-based targets use this.
2061	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
2062	 * have it set.
2063	 */
2064	if (blk_queue_add_random(q) &&
2065	    dm_table_any_dev_attr(t, device_is_not_random, NULL))
2066		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2067
2068	/*
2069	 * For a zoned target, setup the zones related queue attributes
2070	 * and resources necessary for zone append emulation if necessary.
2071	 */
2072	if (blk_queue_is_zoned(q)) {
2073		r = dm_set_zones_restrictions(t, q);
2074		if (r)
2075			return r;
 
 
2076	}
2077
2078	dm_update_keyslot_manager(q, t);
2079	blk_queue_update_readahead(q);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2080
2081	return 0;
2082}
2083
2084unsigned int dm_table_get_num_targets(struct dm_table *t)
2085{
2086	return t->num_targets;
2087}
2088
2089struct list_head *dm_table_get_devices(struct dm_table *t)
2090{
2091	return &t->devices;
2092}
2093
2094fmode_t dm_table_get_mode(struct dm_table *t)
2095{
2096	return t->mode;
2097}
2098EXPORT_SYMBOL(dm_table_get_mode);
2099
2100enum suspend_mode {
2101	PRESUSPEND,
2102	PRESUSPEND_UNDO,
2103	POSTSUSPEND,
2104};
2105
2106static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
2107{
2108	int i = t->num_targets;
2109	struct dm_target *ti = t->targets;
2110
2111	lockdep_assert_held(&t->md->suspend_lock);
 
2112
2113	while (i--) {
2114		switch (mode) {
2115		case PRESUSPEND:
2116			if (ti->type->presuspend)
2117				ti->type->presuspend(ti);
2118			break;
2119		case PRESUSPEND_UNDO:
2120			if (ti->type->presuspend_undo)
2121				ti->type->presuspend_undo(ti);
2122			break;
2123		case POSTSUSPEND:
2124			if (ti->type->postsuspend)
2125				ti->type->postsuspend(ti);
2126			break;
2127		}
2128		ti++;
2129	}
2130}
2131
2132void dm_table_presuspend_targets(struct dm_table *t)
2133{
2134	if (!t)
2135		return;
2136
2137	suspend_targets(t, PRESUSPEND);
2138}
2139
2140void dm_table_presuspend_undo_targets(struct dm_table *t)
2141{
2142	if (!t)
2143		return;
2144
2145	suspend_targets(t, PRESUSPEND_UNDO);
2146}
2147
2148void dm_table_postsuspend_targets(struct dm_table *t)
2149{
2150	if (!t)
2151		return;
2152
2153	suspend_targets(t, POSTSUSPEND);
2154}
2155
2156int dm_table_resume_targets(struct dm_table *t)
2157{
2158	int i, r = 0;
 
2159
2160	lockdep_assert_held(&t->md->suspend_lock);
2161
2162	for (i = 0; i < t->num_targets; i++) {
2163		struct dm_target *ti = t->targets + i;
2164
2165		if (!ti->type->preresume)
2166			continue;
2167
2168		r = ti->type->preresume(ti);
2169		if (r) {
2170			DMERR("%s: %s: preresume failed, error = %d",
2171			      dm_device_name(t->md), ti->type->name, r);
2172			return r;
2173		}
2174	}
2175
2176	for (i = 0; i < t->num_targets; i++) {
2177		struct dm_target *ti = t->targets + i;
2178
2179		if (ti->type->resume)
2180			ti->type->resume(ti);
2181	}
2182
2183	return 0;
2184}
2185
2186struct mapped_device *dm_table_get_md(struct dm_table *t)
2187{
2188	return t->md;
2189}
2190EXPORT_SYMBOL(dm_table_get_md);
2191
2192const char *dm_table_device_name(struct dm_table *t)
2193{
2194	return dm_device_name(t->md);
2195}
2196EXPORT_SYMBOL_GPL(dm_table_device_name);
2197
2198void dm_table_run_md_queue_async(struct dm_table *t)
2199{
2200	if (!dm_table_request_based(t))
2201		return;
2202
2203	if (t->md->queue)
2204		blk_mq_run_hw_queues(t->md->queue, true);
2205}
2206EXPORT_SYMBOL(dm_table_run_md_queue_async);
2207