Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2001 Sistina Software (UK) Limited.
   4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include "dm-core.h"
  10#include "dm-rq.h"
  11
  12#include <linux/module.h>
  13#include <linux/vmalloc.h>
  14#include <linux/blkdev.h>
  15#include <linux/blk-integrity.h>
  16#include <linux/namei.h>
  17#include <linux/ctype.h>
  18#include <linux/string.h>
  19#include <linux/slab.h>
  20#include <linux/interrupt.h>
  21#include <linux/mutex.h>
  22#include <linux/delay.h>
  23#include <linux/atomic.h>
  24#include <linux/blk-mq.h>
  25#include <linux/mount.h>
  26#include <linux/dax.h>
  27
  28#define DM_MSG_PREFIX "table"
  29
 
  30#define NODE_SIZE L1_CACHE_BYTES
  31#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
  32#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
  33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34/*
  35 * Similar to ceiling(log_size(n))
  36 */
  37static unsigned int int_log(unsigned int n, unsigned int base)
  38{
  39	int result = 0;
  40
  41	while (n > 1) {
  42		n = dm_div_up(n, base);
  43		result++;
  44	}
  45
  46	return result;
  47}
  48
  49/*
  50 * Calculate the index of the child node of the n'th node k'th key.
  51 */
  52static inline unsigned int get_child(unsigned int n, unsigned int k)
  53{
  54	return (n * CHILDREN_PER_NODE) + k;
  55}
  56
  57/*
  58 * Return the n'th node of level l from table t.
  59 */
  60static inline sector_t *get_node(struct dm_table *t,
  61				 unsigned int l, unsigned int n)
  62{
  63	return t->index[l] + (n * KEYS_PER_NODE);
  64}
  65
  66/*
  67 * Return the highest key that you could lookup from the n'th
  68 * node on level l of the btree.
  69 */
  70static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
  71{
  72	for (; l < t->depth - 1; l++)
  73		n = get_child(n, CHILDREN_PER_NODE - 1);
  74
  75	if (n >= t->counts[l])
  76		return (sector_t) -1;
  77
  78	return get_node(t, l, n)[KEYS_PER_NODE - 1];
  79}
  80
  81/*
  82 * Fills in a level of the btree based on the highs of the level
  83 * below it.
  84 */
  85static int setup_btree_index(unsigned int l, struct dm_table *t)
  86{
  87	unsigned int n, k;
  88	sector_t *node;
  89
  90	for (n = 0U; n < t->counts[l]; n++) {
  91		node = get_node(t, l, n);
  92
  93		for (k = 0U; k < KEYS_PER_NODE; k++)
  94			node[k] = high(t, l + 1, get_child(n, k));
  95	}
  96
  97	return 0;
  98}
  99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 100/*
 101 * highs, and targets are managed as dynamic arrays during a
 102 * table load.
 103 */
 104static int alloc_targets(struct dm_table *t, unsigned int num)
 105{
 106	sector_t *n_highs;
 107	struct dm_target *n_targets;
 108
 109	/*
 110	 * Allocate both the target array and offset array at once.
 
 
 111	 */
 112	n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t),
 113			   GFP_KERNEL);
 114	if (!n_highs)
 115		return -ENOMEM;
 116
 117	n_targets = (struct dm_target *) (n_highs + num);
 118
 119	memset(n_highs, -1, sizeof(*n_highs) * num);
 120	kvfree(t->highs);
 121
 122	t->num_allocated = num;
 123	t->highs = n_highs;
 124	t->targets = n_targets;
 125
 126	return 0;
 127}
 128
 129int dm_table_create(struct dm_table **result, blk_mode_t mode,
 130		    unsigned int num_targets, struct mapped_device *md)
 131{
 132	struct dm_table *t;
 133
 134	if (num_targets > DM_MAX_TARGETS)
 135		return -EOVERFLOW;
 136
 137	t = kzalloc(sizeof(*t), GFP_KERNEL);
 138
 139	if (!t)
 140		return -ENOMEM;
 141
 142	INIT_LIST_HEAD(&t->devices);
 143	init_rwsem(&t->devices_lock);
 144
 145	if (!num_targets)
 146		num_targets = KEYS_PER_NODE;
 147
 148	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
 149
 150	if (!num_targets) {
 151		kfree(t);
 152		return -EOVERFLOW;
 153	}
 154
 155	if (alloc_targets(t, num_targets)) {
 156		kfree(t);
 157		return -ENOMEM;
 158	}
 159
 160	t->type = DM_TYPE_NONE;
 161	t->mode = mode;
 162	t->md = md;
 163	*result = t;
 164	return 0;
 165}
 166
 167static void free_devices(struct list_head *devices, struct mapped_device *md)
 168{
 169	struct list_head *tmp, *next;
 170
 171	list_for_each_safe(tmp, next, devices) {
 172		struct dm_dev_internal *dd =
 173		    list_entry(tmp, struct dm_dev_internal, list);
 174		DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
 175		       dm_device_name(md), dd->dm_dev->name);
 176		dm_put_table_device(md, dd->dm_dev);
 177		kfree(dd);
 178	}
 179}
 180
 181static void dm_table_destroy_crypto_profile(struct dm_table *t);
 182
 183void dm_table_destroy(struct dm_table *t)
 184{
 
 
 185	if (!t)
 186		return;
 187
 188	/* free the indexes */
 189	if (t->depth >= 2)
 190		kvfree(t->index[t->depth - 2]);
 191
 192	/* free the targets */
 193	for (unsigned int i = 0; i < t->num_targets; i++) {
 194		struct dm_target *ti = dm_table_get_target(t, i);
 195
 196		if (ti->type->dtr)
 197			ti->type->dtr(ti);
 198
 199		dm_put_target_type(ti->type);
 200	}
 201
 202	kvfree(t->highs);
 203
 204	/* free the device list */
 205	free_devices(&t->devices, t->md);
 206
 207	dm_free_md_mempools(t->mempools);
 208
 209	dm_table_destroy_crypto_profile(t);
 210
 211	kfree(t);
 212}
 213
 214/*
 215 * See if we've already got a device in the list.
 216 */
 217static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
 218{
 219	struct dm_dev_internal *dd;
 220
 221	list_for_each_entry(dd, l, list)
 222		if (dd->dm_dev->bdev->bd_dev == dev)
 223			return dd;
 224
 225	return NULL;
 226}
 227
 228/*
 229 * If possible, this checks an area of a destination device is invalid.
 230 */
 231static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
 232				  sector_t start, sector_t len, void *data)
 233{
 
 234	struct queue_limits *limits = data;
 235	struct block_device *bdev = dev->bdev;
 236	sector_t dev_size = bdev_nr_sectors(bdev);
 
 237	unsigned short logical_block_size_sectors =
 238		limits->logical_block_size >> SECTOR_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 239
 240	if (!dev_size)
 241		return 0;
 242
 243	if ((start >= dev_size) || (start + len > dev_size)) {
 244		DMERR("%s: %pg too small for target: start=%llu, len=%llu, dev_size=%llu",
 245		      dm_device_name(ti->table->md), bdev,
 246		      (unsigned long long)start,
 247		      (unsigned long long)len,
 248		      (unsigned long long)dev_size);
 
 249		return 1;
 250	}
 251
 252	/*
 253	 * If the target is mapped to zoned block device(s), check
 254	 * that the zones are not partially mapped.
 255	 */
 256	if (bdev_is_zoned(bdev)) {
 257		unsigned int zone_sectors = bdev_zone_sectors(bdev);
 258
 259		if (start & (zone_sectors - 1)) {
 260			DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg",
 261			      dm_device_name(ti->table->md),
 262			      (unsigned long long)start,
 263			      zone_sectors, bdev);
 264			return 1;
 265		}
 266
 267		/*
 268		 * Note: The last zone of a zoned block device may be smaller
 269		 * than other zones. So for a target mapping the end of a
 270		 * zoned block device with such a zone, len would not be zone
 271		 * aligned. We do not allow such last smaller zone to be part
 272		 * of the mapping here to ensure that mappings with multiple
 273		 * devices do not end up with a smaller zone in the middle of
 274		 * the sector range.
 275		 */
 276		if (len & (zone_sectors - 1)) {
 277			DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg",
 278			      dm_device_name(ti->table->md),
 279			      (unsigned long long)len,
 280			      zone_sectors, bdev);
 281			return 1;
 282		}
 283	}
 284
 285	if (logical_block_size_sectors <= 1)
 286		return 0;
 287
 288	if (start & (logical_block_size_sectors - 1)) {
 289		DMERR("%s: start=%llu not aligned to h/w logical block size %u of %pg",
 290		      dm_device_name(ti->table->md),
 291		      (unsigned long long)start,
 292		      limits->logical_block_size, bdev);
 
 293		return 1;
 294	}
 295
 296	if (len & (logical_block_size_sectors - 1)) {
 297		DMERR("%s: len=%llu not aligned to h/w logical block size %u of %pg",
 298		      dm_device_name(ti->table->md),
 299		      (unsigned long long)len,
 300		      limits->logical_block_size, bdev);
 
 301		return 1;
 302	}
 303
 304	return 0;
 305}
 306
 307/*
 308 * This upgrades the mode on an already open dm_dev, being
 309 * careful to leave things as they were if we fail to reopen the
 310 * device and not to touch the existing bdev field in case
 311 * it is accessed concurrently.
 312 */
 313static int upgrade_mode(struct dm_dev_internal *dd, blk_mode_t new_mode,
 314			struct mapped_device *md)
 315{
 316	int r;
 317	struct dm_dev *old_dev, *new_dev;
 318
 319	old_dev = dd->dm_dev;
 320
 321	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
 322				dd->dm_dev->mode | new_mode, &new_dev);
 323	if (r)
 324		return r;
 325
 326	dd->dm_dev = new_dev;
 327	dm_put_table_device(md, old_dev);
 328
 329	return 0;
 330}
 331
 332/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 333 * Add a device to the list, or just increment the usage count if
 334 * it's already present.
 335 *
 336 * Note: the __ref annotation is because this function can call the __init
 337 * marked early_lookup_bdev when called during early boot code from dm-init.c.
 338 */
 339int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
 340		  struct dm_dev **result)
 341{
 342	int r;
 343	dev_t dev;
 344	unsigned int major, minor;
 345	char dummy;
 346	struct dm_dev_internal *dd;
 347	struct dm_table *t = ti->table;
 348
 349	BUG_ON(!t);
 350
 351	if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
 352		/* Extract the major/minor numbers */
 353		dev = MKDEV(major, minor);
 354		if (MAJOR(dev) != major || MINOR(dev) != minor)
 355			return -EOVERFLOW;
 356	} else {
 357		r = lookup_bdev(path, &dev);
 358#ifndef MODULE
 359		if (r && system_state < SYSTEM_RUNNING)
 360			r = early_lookup_bdev(path, &dev);
 361#endif
 362		if (r)
 363			return r;
 364	}
 365	if (dev == disk_devt(t->md->disk))
 366		return -EINVAL;
 367
 368	down_write(&t->devices_lock);
 369
 370	dd = find_device(&t->devices, dev);
 371	if (!dd) {
 372		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
 373		if (!dd) {
 374			r = -ENOMEM;
 375			goto unlock_ret_r;
 376		}
 377
 378		r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev);
 379		if (r) {
 380			kfree(dd);
 381			goto unlock_ret_r;
 382		}
 383
 384		refcount_set(&dd->count, 1);
 385		list_add(&dd->list, &t->devices);
 386		goto out;
 387
 388	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
 389		r = upgrade_mode(dd, mode, t->md);
 390		if (r)
 391			goto unlock_ret_r;
 392	}
 393	refcount_inc(&dd->count);
 394out:
 395	up_write(&t->devices_lock);
 396	*result = dd->dm_dev;
 397	return 0;
 398
 399unlock_ret_r:
 400	up_write(&t->devices_lock);
 401	return r;
 402}
 403EXPORT_SYMBOL(dm_get_device);
 404
 405static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
 406				sector_t start, sector_t len, void *data)
 407{
 408	struct queue_limits *limits = data;
 409	struct block_device *bdev = dev->bdev;
 410	struct request_queue *q = bdev_get_queue(bdev);
 
 411
 412	if (unlikely(!q)) {
 413		DMWARN("%s: Cannot set limits for nonexistent device %pg",
 414		       dm_device_name(ti->table->md), bdev);
 415		return 0;
 416	}
 417
 418	if (blk_stack_limits(limits, &q->limits,
 419			get_start_sect(bdev) + start) < 0)
 420		DMWARN("%s: adding target device %pg caused an alignment inconsistency: "
 421		       "physical_block_size=%u, logical_block_size=%u, "
 422		       "alignment_offset=%u, start=%llu",
 423		       dm_device_name(ti->table->md), bdev,
 424		       q->limits.physical_block_size,
 425		       q->limits.logical_block_size,
 426		       q->limits.alignment_offset,
 427		       (unsigned long long) start << SECTOR_SHIFT);
 
 428	return 0;
 429}
 430
 431/*
 432 * Decrement a device's use count and remove it if necessary.
 433 */
 434void dm_put_device(struct dm_target *ti, struct dm_dev *d)
 435{
 436	int found = 0;
 437	struct dm_table *t = ti->table;
 438	struct list_head *devices = &t->devices;
 439	struct dm_dev_internal *dd;
 440
 441	down_write(&t->devices_lock);
 442
 443	list_for_each_entry(dd, devices, list) {
 444		if (dd->dm_dev == d) {
 445			found = 1;
 446			break;
 447		}
 448	}
 449	if (!found) {
 450		DMERR("%s: device %s not in table devices list",
 451		      dm_device_name(t->md), d->name);
 452		goto unlock_ret;
 453	}
 454	if (refcount_dec_and_test(&dd->count)) {
 455		dm_put_table_device(t->md, d);
 456		list_del(&dd->list);
 457		kfree(dd);
 458	}
 459
 460unlock_ret:
 461	up_write(&t->devices_lock);
 462}
 463EXPORT_SYMBOL(dm_put_device);
 464
 465/*
 466 * Checks to see if the target joins onto the end of the table.
 467 */
 468static int adjoin(struct dm_table *t, struct dm_target *ti)
 469{
 470	struct dm_target *prev;
 471
 472	if (!t->num_targets)
 473		return !ti->begin;
 474
 475	prev = &t->targets[t->num_targets - 1];
 476	return (ti->begin == (prev->begin + prev->len));
 477}
 478
 479/*
 480 * Used to dynamically allocate the arg array.
 481 *
 482 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
 483 * process messages even if some device is suspended. These messages have a
 484 * small fixed number of arguments.
 485 *
 486 * On the other hand, dm-switch needs to process bulk data using messages and
 487 * excessive use of GFP_NOIO could cause trouble.
 488 */
 489static char **realloc_argv(unsigned int *size, char **old_argv)
 490{
 491	char **argv;
 492	unsigned int new_size;
 493	gfp_t gfp;
 494
 495	if (*size) {
 496		new_size = *size * 2;
 497		gfp = GFP_KERNEL;
 498	} else {
 499		new_size = 8;
 500		gfp = GFP_NOIO;
 501	}
 502	argv = kmalloc_array(new_size, sizeof(*argv), gfp);
 503	if (argv && old_argv) {
 504		memcpy(argv, old_argv, *size * sizeof(*argv));
 505		*size = new_size;
 506	}
 507
 508	kfree(old_argv);
 509	return argv;
 510}
 511
 512/*
 513 * Destructively splits up the argument list to pass to ctr.
 514 */
 515int dm_split_args(int *argc, char ***argvp, char *input)
 516{
 517	char *start, *end = input, *out, **argv = NULL;
 518	unsigned int array_size = 0;
 519
 520	*argc = 0;
 521
 522	if (!input) {
 523		*argvp = NULL;
 524		return 0;
 525	}
 526
 527	argv = realloc_argv(&array_size, argv);
 528	if (!argv)
 529		return -ENOMEM;
 530
 531	while (1) {
 532		/* Skip whitespace */
 533		start = skip_spaces(end);
 534
 535		if (!*start)
 536			break;	/* success, we hit the end */
 537
 538		/* 'out' is used to remove any back-quotes */
 539		end = out = start;
 540		while (*end) {
 541			/* Everything apart from '\0' can be quoted */
 542			if (*end == '\\' && *(end + 1)) {
 543				*out++ = *(end + 1);
 544				end += 2;
 545				continue;
 546			}
 547
 548			if (isspace(*end))
 549				break;	/* end of token */
 550
 551			*out++ = *end++;
 552		}
 553
 554		/* have we already filled the array ? */
 555		if ((*argc + 1) > array_size) {
 556			argv = realloc_argv(&array_size, argv);
 557			if (!argv)
 558				return -ENOMEM;
 559		}
 560
 561		/* we know this is whitespace */
 562		if (*end)
 563			end++;
 564
 565		/* terminate the string and put it in the array */
 566		*out = '\0';
 567		argv[*argc] = start;
 568		(*argc)++;
 569	}
 570
 571	*argvp = argv;
 572	return 0;
 573}
 574
 575/*
 576 * Impose necessary and sufficient conditions on a devices's table such
 577 * that any incoming bio which respects its logical_block_size can be
 578 * processed successfully.  If it falls across the boundary between
 579 * two or more targets, the size of each piece it gets split into must
 580 * be compatible with the logical_block_size of the target processing it.
 581 */
 582static int validate_hardware_logical_block_alignment(struct dm_table *t,
 583						     struct queue_limits *limits)
 584{
 585	/*
 586	 * This function uses arithmetic modulo the logical_block_size
 587	 * (in units of 512-byte sectors).
 588	 */
 589	unsigned short device_logical_block_size_sects =
 590		limits->logical_block_size >> SECTOR_SHIFT;
 591
 592	/*
 593	 * Offset of the start of the next table entry, mod logical_block_size.
 594	 */
 595	unsigned short next_target_start = 0;
 596
 597	/*
 598	 * Given an aligned bio that extends beyond the end of a
 599	 * target, how many sectors must the next target handle?
 600	 */
 601	unsigned short remaining = 0;
 602
 603	struct dm_target *ti;
 604	struct queue_limits ti_limits;
 605	unsigned int i;
 606
 607	/*
 608	 * Check each entry in the table in turn.
 609	 */
 610	for (i = 0; i < t->num_targets; i++) {
 611		ti = dm_table_get_target(t, i);
 612
 613		blk_set_stacking_limits(&ti_limits);
 614
 615		/* combine all target devices' limits */
 616		if (ti->type->iterate_devices)
 617			ti->type->iterate_devices(ti, dm_set_device_limits,
 618						  &ti_limits);
 619
 620		/*
 621		 * If the remaining sectors fall entirely within this
 622		 * table entry are they compatible with its logical_block_size?
 623		 */
 624		if (remaining < ti->len &&
 625		    remaining & ((ti_limits.logical_block_size >>
 626				  SECTOR_SHIFT) - 1))
 627			break;	/* Error */
 628
 629		next_target_start =
 630		    (unsigned short) ((next_target_start + ti->len) &
 631				      (device_logical_block_size_sects - 1));
 632		remaining = next_target_start ?
 633		    device_logical_block_size_sects - next_target_start : 0;
 634	}
 635
 636	if (remaining) {
 637		DMERR("%s: table line %u (start sect %llu len %llu) "
 638		      "not aligned to h/w logical block size %u",
 639		      dm_device_name(t->md), i,
 640		      (unsigned long long) ti->begin,
 641		      (unsigned long long) ti->len,
 642		      limits->logical_block_size);
 643		return -EINVAL;
 644	}
 645
 646	return 0;
 647}
 648
 649int dm_table_add_target(struct dm_table *t, const char *type,
 650			sector_t start, sector_t len, char *params)
 651{
 652	int r = -EINVAL, argc;
 653	char **argv;
 654	struct dm_target *ti;
 655
 656	if (t->singleton) {
 657		DMERR("%s: target type %s must appear alone in table",
 658		      dm_device_name(t->md), t->targets->type->name);
 659		return -EINVAL;
 660	}
 661
 662	BUG_ON(t->num_targets >= t->num_allocated);
 663
 664	ti = t->targets + t->num_targets;
 665	memset(ti, 0, sizeof(*ti));
 666
 667	if (!len) {
 668		DMERR("%s: zero-length target", dm_device_name(t->md));
 669		return -EINVAL;
 670	}
 671
 672	ti->type = dm_get_target_type(type);
 673	if (!ti->type) {
 674		DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
 
 675		return -EINVAL;
 676	}
 677
 678	if (dm_target_needs_singleton(ti->type)) {
 679		if (t->num_targets) {
 680			ti->error = "singleton target type must appear alone in table";
 681			goto bad;
 
 682		}
 683		t->singleton = true;
 684	}
 685
 686	if (dm_target_always_writeable(ti->type) &&
 687	    !(t->mode & BLK_OPEN_WRITE)) {
 688		ti->error = "target type may not be included in a read-only table";
 689		goto bad;
 690	}
 691
 692	if (t->immutable_target_type) {
 693		if (t->immutable_target_type != ti->type) {
 694			ti->error = "immutable target type cannot be mixed with other target types";
 695			goto bad;
 
 696		}
 697	} else if (dm_target_is_immutable(ti->type)) {
 698		if (t->num_targets) {
 699			ti->error = "immutable target type cannot be mixed with other target types";
 700			goto bad;
 
 701		}
 702		t->immutable_target_type = ti->type;
 703	}
 704
 705	if (dm_target_has_integrity(ti->type))
 706		t->integrity_added = 1;
 707
 708	ti->table = t;
 709	ti->begin = start;
 710	ti->len = len;
 711	ti->error = "Unknown error";
 712
 713	/*
 714	 * Does this target adjoin the previous one ?
 715	 */
 716	if (!adjoin(t, ti)) {
 717		ti->error = "Gap in table";
 
 718		goto bad;
 719	}
 720
 721	r = dm_split_args(&argc, &argv, params);
 722	if (r) {
 723		ti->error = "couldn't split parameters";
 724		goto bad;
 725	}
 726
 727	r = ti->type->ctr(ti, argc, argv);
 728	kfree(argv);
 729	if (r)
 730		goto bad;
 731
 732	t->highs[t->num_targets++] = ti->begin + ti->len - 1;
 733
 734	if (!ti->num_discard_bios && ti->discards_supported)
 735		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
 736		       dm_device_name(t->md), type);
 737
 738	if (ti->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key))
 739		static_branch_enable(&swap_bios_enabled);
 740
 741	return 0;
 742
 743 bad:
 744	DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, ti->error, ERR_PTR(r));
 745	dm_put_target_type(ti->type);
 746	return r;
 747}
 748
 749/*
 750 * Target argument parsing helpers.
 751 */
 752static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
 753			     unsigned int *value, char **error, unsigned int grouped)
 754{
 755	const char *arg_str = dm_shift_arg(arg_set);
 756	char dummy;
 757
 758	if (!arg_str ||
 759	    (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
 760	    (*value < arg->min) ||
 761	    (*value > arg->max) ||
 762	    (grouped && arg_set->argc < *value)) {
 763		*error = arg->error;
 764		return -EINVAL;
 765	}
 766
 767	return 0;
 768}
 769
 770int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
 771		unsigned int *value, char **error)
 772{
 773	return validate_next_arg(arg, arg_set, value, error, 0);
 774}
 775EXPORT_SYMBOL(dm_read_arg);
 776
 777int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
 778		      unsigned int *value, char **error)
 779{
 780	return validate_next_arg(arg, arg_set, value, error, 1);
 781}
 782EXPORT_SYMBOL(dm_read_arg_group);
 783
 784const char *dm_shift_arg(struct dm_arg_set *as)
 785{
 786	char *r;
 787
 788	if (as->argc) {
 789		as->argc--;
 790		r = *as->argv;
 791		as->argv++;
 792		return r;
 793	}
 794
 795	return NULL;
 796}
 797EXPORT_SYMBOL(dm_shift_arg);
 798
 799void dm_consume_args(struct dm_arg_set *as, unsigned int num_args)
 800{
 801	BUG_ON(as->argc < num_args);
 802	as->argc -= num_args;
 803	as->argv += num_args;
 804}
 805EXPORT_SYMBOL(dm_consume_args);
 806
 807static bool __table_type_bio_based(enum dm_queue_mode table_type)
 808{
 809	return (table_type == DM_TYPE_BIO_BASED ||
 810		table_type == DM_TYPE_DAX_BIO_BASED);
 811}
 812
 813static bool __table_type_request_based(enum dm_queue_mode table_type)
 814{
 815	return table_type == DM_TYPE_REQUEST_BASED;
 816}
 817
 818void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
 819{
 820	t->type = type;
 821}
 822EXPORT_SYMBOL_GPL(dm_table_set_type);
 823
 824/* validate the dax capability of the target device span */
 825static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
 826			sector_t start, sector_t len, void *data)
 827{
 828	if (dev->dax_dev)
 829		return false;
 830
 831	DMDEBUG("%pg: error: dax unsupported by block device", dev->bdev);
 832	return true;
 833}
 834
 835/* Check devices support synchronous DAX */
 836static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
 837					      sector_t start, sector_t len, void *data)
 838{
 839	return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
 840}
 841
 842static bool dm_table_supports_dax(struct dm_table *t,
 843				  iterate_devices_callout_fn iterate_fn)
 844{
 845	/* Ensure that all targets support DAX. */
 846	for (unsigned int i = 0; i < t->num_targets; i++) {
 847		struct dm_target *ti = dm_table_get_target(t, i);
 848
 849		if (!ti->type->direct_access)
 850			return false;
 851
 852		if (dm_target_is_wildcard(ti->type) ||
 853		    !ti->type->iterate_devices ||
 854		    ti->type->iterate_devices(ti, iterate_fn, NULL))
 855			return false;
 856	}
 857
 858	return true;
 859}
 860
 861static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
 862				  sector_t start, sector_t len, void *data)
 863{
 864	struct block_device *bdev = dev->bdev;
 865	struct request_queue *q = bdev_get_queue(bdev);
 866
 867	/* request-based cannot stack on partitions! */
 868	if (bdev_is_partition(bdev))
 869		return false;
 870
 871	return queue_is_mq(q);
 872}
 873
 874static int dm_table_determine_type(struct dm_table *t)
 875{
 876	unsigned int bio_based = 0, request_based = 0, hybrid = 0;
 877	struct dm_target *ti;
 878	struct list_head *devices = dm_table_get_devices(t);
 879	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
 880
 881	if (t->type != DM_TYPE_NONE) {
 882		/* target already set the table's type */
 883		if (t->type == DM_TYPE_BIO_BASED) {
 884			/* possibly upgrade to a variant of bio-based */
 885			goto verify_bio_based;
 886		}
 887		BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
 888		goto verify_rq_based;
 889	}
 890
 891	for (unsigned int i = 0; i < t->num_targets; i++) {
 892		ti = dm_table_get_target(t, i);
 893		if (dm_target_hybrid(ti))
 894			hybrid = 1;
 895		else if (dm_target_request_based(ti))
 896			request_based = 1;
 897		else
 898			bio_based = 1;
 899
 900		if (bio_based && request_based) {
 901			DMERR("Inconsistent table: different target types can't be mixed up");
 
 902			return -EINVAL;
 903		}
 904	}
 905
 906	if (hybrid && !bio_based && !request_based) {
 907		/*
 908		 * The targets can work either way.
 909		 * Determine the type from the live device.
 910		 * Default to bio-based if device is new.
 911		 */
 912		if (__table_type_request_based(live_md_type))
 913			request_based = 1;
 914		else
 915			bio_based = 1;
 916	}
 917
 918	if (bio_based) {
 919verify_bio_based:
 920		/* We must use this table as bio-based */
 921		t->type = DM_TYPE_BIO_BASED;
 922		if (dm_table_supports_dax(t, device_not_dax_capable) ||
 923		    (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
 924			t->type = DM_TYPE_DAX_BIO_BASED;
 925		}
 926		return 0;
 927	}
 928
 929	BUG_ON(!request_based); /* No targets in this table */
 930
 931	t->type = DM_TYPE_REQUEST_BASED;
 932
 933verify_rq_based:
 934	/*
 935	 * Request-based dm supports only tables that have a single target now.
 936	 * To support multiple targets, request splitting support is needed,
 937	 * and that needs lots of changes in the block-layer.
 938	 * (e.g. request completion process for partial completion.)
 939	 */
 940	if (t->num_targets > 1) {
 941		DMERR("request-based DM doesn't support multiple targets");
 942		return -EINVAL;
 943	}
 944
 945	if (list_empty(devices)) {
 946		int srcu_idx;
 947		struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
 948
 949		/* inherit live table's type */
 950		if (live_table)
 951			t->type = live_table->type;
 952		dm_put_live_table(t->md, srcu_idx);
 953		return 0;
 954	}
 955
 956	ti = dm_table_get_immutable_target(t);
 957	if (!ti) {
 958		DMERR("table load rejected: immutable target is required");
 959		return -EINVAL;
 960	} else if (ti->max_io_len) {
 961		DMERR("table load rejected: immutable target that splits IO is not supported");
 962		return -EINVAL;
 963	}
 964
 965	/* Non-request-stackable devices can't be used for request-based dm */
 966	if (!ti->type->iterate_devices ||
 967	    !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) {
 968		DMERR("table load rejected: including non-request-stackable devices");
 969		return -EINVAL;
 970	}
 971
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 972	return 0;
 973}
 974
 975enum dm_queue_mode dm_table_get_type(struct dm_table *t)
 976{
 977	return t->type;
 978}
 979
 980struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
 981{
 982	return t->immutable_target_type;
 983}
 984
 985struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
 986{
 987	/* Immutable target is implicitly a singleton */
 988	if (t->num_targets > 1 ||
 989	    !dm_target_is_immutable(t->targets[0].type))
 990		return NULL;
 991
 992	return t->targets;
 993}
 994
 995struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
 996{
 997	for (unsigned int i = 0; i < t->num_targets; i++) {
 998		struct dm_target *ti = dm_table_get_target(t, i);
 999
 
 
1000		if (dm_target_is_wildcard(ti->type))
1001			return ti;
1002	}
1003
1004	return NULL;
1005}
1006
1007bool dm_table_bio_based(struct dm_table *t)
1008{
1009	return __table_type_bio_based(dm_table_get_type(t));
1010}
1011
1012bool dm_table_request_based(struct dm_table *t)
1013{
1014	return __table_type_request_based(dm_table_get_type(t));
1015}
1016
1017static bool dm_table_supports_poll(struct dm_table *t);
 
 
 
1018
1019static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1020{
1021	enum dm_queue_mode type = dm_table_get_type(t);
1022	unsigned int per_io_data_size = 0, front_pad, io_front_pad;
1023	unsigned int min_pool_size = 0, pool_size;
1024	struct dm_md_mempools *pools;
1025
1026	if (unlikely(type == DM_TYPE_NONE)) {
1027		DMERR("no table type is set, can't allocate mempools");
1028		return -EINVAL;
1029	}
1030
1031	pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
1032	if (!pools)
1033		return -ENOMEM;
 
 
1034
1035	if (type == DM_TYPE_REQUEST_BASED) {
1036		pool_size = dm_get_reserved_rq_based_ios();
1037		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
1038		goto init_bs;
1039	}
1040
1041	for (unsigned int i = 0; i < t->num_targets; i++) {
1042		struct dm_target *ti = dm_table_get_target(t, i);
1043
1044		per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1045		min_pool_size = max(min_pool_size, ti->num_flush_bios);
1046	}
1047	pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
1048	front_pad = roundup(per_io_data_size,
1049		__alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
1050
1051	io_front_pad = roundup(per_io_data_size,
1052		__alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
1053	if (bioset_init(&pools->io_bs, pool_size, io_front_pad,
1054			dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0))
1055		goto out_free_pools;
1056	if (t->integrity_supported &&
1057	    bioset_integrity_create(&pools->io_bs, pool_size))
1058		goto out_free_pools;
1059init_bs:
1060	if (bioset_init(&pools->bs, pool_size, front_pad, 0))
1061		goto out_free_pools;
1062	if (t->integrity_supported &&
1063	    bioset_integrity_create(&pools->bs, pool_size))
1064		goto out_free_pools;
1065
1066	t->mempools = pools;
1067	return 0;
 
1068
1069out_free_pools:
1070	dm_free_md_mempools(pools);
1071	return -ENOMEM;
 
 
 
 
 
 
1072}
1073
1074static int setup_indexes(struct dm_table *t)
1075{
1076	int i;
1077	unsigned int total = 0;
1078	sector_t *indexes;
1079
1080	/* allocate the space for *all* the indexes */
1081	for (i = t->depth - 2; i >= 0; i--) {
1082		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1083		total += t->counts[i];
1084	}
1085
1086	indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL);
1087	if (!indexes)
1088		return -ENOMEM;
1089
1090	/* set up internal nodes, bottom-up */
1091	for (i = t->depth - 2; i >= 0; i--) {
1092		t->index[i] = indexes;
1093		indexes += (KEYS_PER_NODE * t->counts[i]);
1094		setup_btree_index(i, t);
1095	}
1096
1097	return 0;
1098}
1099
1100/*
1101 * Builds the btree to index the map.
1102 */
1103static int dm_table_build_index(struct dm_table *t)
1104{
1105	int r = 0;
1106	unsigned int leaf_nodes;
1107
1108	/* how many indexes will the btree have ? */
1109	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1110	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1111
1112	/* leaf layer has already been set up */
1113	t->counts[t->depth - 1] = leaf_nodes;
1114	t->index[t->depth - 1] = t->highs;
1115
1116	if (t->depth >= 2)
1117		r = setup_indexes(t);
1118
1119	return r;
1120}
1121
1122static bool integrity_profile_exists(struct gendisk *disk)
1123{
1124	return !!blk_get_integrity(disk);
1125}
1126
1127/*
1128 * Get a disk whose integrity profile reflects the table's profile.
1129 * Returns NULL if integrity support was inconsistent or unavailable.
1130 */
1131static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t)
1132{
1133	struct list_head *devices = dm_table_get_devices(t);
1134	struct dm_dev_internal *dd = NULL;
1135	struct gendisk *prev_disk = NULL, *template_disk = NULL;
1136
1137	for (unsigned int i = 0; i < t->num_targets; i++) {
1138		struct dm_target *ti = dm_table_get_target(t, i);
1139
1140		if (!dm_target_passes_integrity(ti->type))
1141			goto no_integrity;
1142	}
1143
1144	list_for_each_entry(dd, devices, list) {
1145		template_disk = dd->dm_dev->bdev->bd_disk;
1146		if (!integrity_profile_exists(template_disk))
1147			goto no_integrity;
1148		else if (prev_disk &&
1149			 blk_integrity_compare(prev_disk, template_disk) < 0)
1150			goto no_integrity;
1151		prev_disk = template_disk;
1152	}
1153
1154	return template_disk;
1155
1156no_integrity:
1157	if (prev_disk)
1158		DMWARN("%s: integrity not set: %s and %s profile mismatch",
1159		       dm_device_name(t->md),
1160		       prev_disk->disk_name,
1161		       template_disk->disk_name);
1162	return NULL;
1163}
1164
1165/*
1166 * Register the mapped device for blk_integrity support if the
1167 * underlying devices have an integrity profile.  But all devices may
1168 * not have matching profiles (checking all devices isn't reliable
1169 * during table load because this table may use other DM device(s) which
1170 * must be resumed before they will have an initialized integity
1171 * profile).  Consequently, stacked DM devices force a 2 stage integrity
1172 * profile validation: First pass during table load, final pass during
1173 * resume.
1174 */
1175static int dm_table_register_integrity(struct dm_table *t)
1176{
1177	struct mapped_device *md = t->md;
1178	struct gendisk *template_disk = NULL;
1179
1180	/* If target handles integrity itself do not register it here. */
1181	if (t->integrity_added)
1182		return 0;
1183
1184	template_disk = dm_table_get_integrity_disk(t);
1185	if (!template_disk)
1186		return 0;
1187
1188	if (!integrity_profile_exists(dm_disk(md))) {
1189		t->integrity_supported = true;
1190		/*
1191		 * Register integrity profile during table load; we can do
1192		 * this because the final profile must match during resume.
1193		 */
1194		blk_integrity_register(dm_disk(md),
1195				       blk_get_integrity(template_disk));
1196		return 0;
1197	}
1198
1199	/*
1200	 * If DM device already has an initialized integrity
1201	 * profile the new profile should not conflict.
1202	 */
1203	if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1204		DMERR("%s: conflict with existing integrity profile: %s profile mismatch",
1205		      dm_device_name(t->md),
1206		      template_disk->disk_name);
 
1207		return 1;
1208	}
1209
1210	/* Preserve existing integrity profile */
1211	t->integrity_supported = true;
1212	return 0;
1213}
1214
1215#ifdef CONFIG_BLK_INLINE_ENCRYPTION
1216
1217struct dm_crypto_profile {
1218	struct blk_crypto_profile profile;
1219	struct mapped_device *md;
1220};
1221
1222static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
1223				     sector_t start, sector_t len, void *data)
1224{
1225	const struct blk_crypto_key *key = data;
1226
1227	blk_crypto_evict_key(dev->bdev, key);
1228	return 0;
1229}
1230
1231/*
1232 * When an inline encryption key is evicted from a device-mapper device, evict
1233 * it from all the underlying devices.
1234 */
1235static int dm_keyslot_evict(struct blk_crypto_profile *profile,
1236			    const struct blk_crypto_key *key, unsigned int slot)
1237{
1238	struct mapped_device *md =
1239		container_of(profile, struct dm_crypto_profile, profile)->md;
1240	struct dm_table *t;
1241	int srcu_idx;
1242
1243	t = dm_get_live_table(md, &srcu_idx);
1244	if (!t)
1245		return 0;
1246
1247	for (unsigned int i = 0; i < t->num_targets; i++) {
1248		struct dm_target *ti = dm_table_get_target(t, i);
1249
1250		if (!ti->type->iterate_devices)
1251			continue;
1252		ti->type->iterate_devices(ti, dm_keyslot_evict_callback,
1253					  (void *)key);
1254	}
1255
1256	dm_put_live_table(md, srcu_idx);
1257	return 0;
1258}
1259
1260static int
1261device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
1262				     sector_t start, sector_t len, void *data)
1263{
1264	struct blk_crypto_profile *parent = data;
1265	struct blk_crypto_profile *child =
1266		bdev_get_queue(dev->bdev)->crypto_profile;
1267
1268	blk_crypto_intersect_capabilities(parent, child);
1269	return 0;
1270}
1271
1272void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
1273{
1274	struct dm_crypto_profile *dmcp = container_of(profile,
1275						      struct dm_crypto_profile,
1276						      profile);
1277
1278	if (!profile)
1279		return;
1280
1281	blk_crypto_profile_destroy(profile);
1282	kfree(dmcp);
1283}
1284
1285static void dm_table_destroy_crypto_profile(struct dm_table *t)
1286{
1287	dm_destroy_crypto_profile(t->crypto_profile);
1288	t->crypto_profile = NULL;
1289}
1290
1291/*
1292 * Constructs and initializes t->crypto_profile with a crypto profile that
1293 * represents the common set of crypto capabilities of the devices described by
1294 * the dm_table.  However, if the constructed crypto profile doesn't support all
1295 * crypto capabilities that are supported by the current mapped_device, it
1296 * returns an error instead, since we don't support removing crypto capabilities
1297 * on table changes.  Finally, if the constructed crypto profile is "empty" (has
1298 * no crypto capabilities at all), it just sets t->crypto_profile to NULL.
1299 */
1300static int dm_table_construct_crypto_profile(struct dm_table *t)
1301{
1302	struct dm_crypto_profile *dmcp;
1303	struct blk_crypto_profile *profile;
1304	unsigned int i;
1305	bool empty_profile = true;
1306
1307	dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL);
1308	if (!dmcp)
1309		return -ENOMEM;
1310	dmcp->md = t->md;
1311
1312	profile = &dmcp->profile;
1313	blk_crypto_profile_init(profile, 0);
1314	profile->ll_ops.keyslot_evict = dm_keyslot_evict;
1315	profile->max_dun_bytes_supported = UINT_MAX;
1316	memset(profile->modes_supported, 0xFF,
1317	       sizeof(profile->modes_supported));
1318
1319	for (i = 0; i < t->num_targets; i++) {
1320		struct dm_target *ti = dm_table_get_target(t, i);
1321
1322		if (!dm_target_passes_crypto(ti->type)) {
1323			blk_crypto_intersect_capabilities(profile, NULL);
1324			break;
1325		}
1326		if (!ti->type->iterate_devices)
1327			continue;
1328		ti->type->iterate_devices(ti,
1329					  device_intersect_crypto_capabilities,
1330					  profile);
1331	}
1332
1333	if (t->md->queue &&
1334	    !blk_crypto_has_capabilities(profile,
1335					 t->md->queue->crypto_profile)) {
1336		DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
1337		dm_destroy_crypto_profile(profile);
1338		return -EINVAL;
1339	}
1340
1341	/*
1342	 * If the new profile doesn't actually support any crypto capabilities,
1343	 * we may as well represent it with a NULL profile.
1344	 */
1345	for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) {
1346		if (profile->modes_supported[i]) {
1347			empty_profile = false;
1348			break;
1349		}
1350	}
1351
1352	if (empty_profile) {
1353		dm_destroy_crypto_profile(profile);
1354		profile = NULL;
1355	}
1356
1357	/*
1358	 * t->crypto_profile is only set temporarily while the table is being
1359	 * set up, and it gets set to NULL after the profile has been
1360	 * transferred to the request_queue.
1361	 */
1362	t->crypto_profile = profile;
1363
1364	return 0;
1365}
1366
1367static void dm_update_crypto_profile(struct request_queue *q,
1368				     struct dm_table *t)
1369{
1370	if (!t->crypto_profile)
1371		return;
1372
1373	/* Make the crypto profile less restrictive. */
1374	if (!q->crypto_profile) {
1375		blk_crypto_register(t->crypto_profile, q);
1376	} else {
1377		blk_crypto_update_capabilities(q->crypto_profile,
1378					       t->crypto_profile);
1379		dm_destroy_crypto_profile(t->crypto_profile);
1380	}
1381	t->crypto_profile = NULL;
1382}
1383
1384#else /* CONFIG_BLK_INLINE_ENCRYPTION */
1385
1386static int dm_table_construct_crypto_profile(struct dm_table *t)
1387{
1388	return 0;
1389}
1390
1391void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
1392{
1393}
1394
1395static void dm_table_destroy_crypto_profile(struct dm_table *t)
1396{
1397}
1398
1399static void dm_update_crypto_profile(struct request_queue *q,
1400				     struct dm_table *t)
1401{
1402}
1403
1404#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1405
1406/*
1407 * Prepares the table for use by building the indices,
1408 * setting the type, and allocating mempools.
1409 */
1410int dm_table_complete(struct dm_table *t)
1411{
1412	int r;
1413
1414	r = dm_table_determine_type(t);
1415	if (r) {
1416		DMERR("unable to determine table type");
1417		return r;
1418	}
1419
1420	r = dm_table_build_index(t);
1421	if (r) {
1422		DMERR("unable to build btrees");
1423		return r;
1424	}
1425
1426	r = dm_table_register_integrity(t);
1427	if (r) {
1428		DMERR("could not register integrity profile.");
1429		return r;
1430	}
1431
1432	r = dm_table_construct_crypto_profile(t);
1433	if (r) {
1434		DMERR("could not construct crypto profile.");
1435		return r;
1436	}
1437
1438	r = dm_table_alloc_md_mempools(t, t->md);
1439	if (r)
1440		DMERR("unable to allocate mempools");
1441
1442	return r;
1443}
1444
1445static DEFINE_MUTEX(_event_lock);
1446void dm_table_event_callback(struct dm_table *t,
1447			     void (*fn)(void *), void *context)
1448{
1449	mutex_lock(&_event_lock);
1450	t->event_fn = fn;
1451	t->event_context = context;
1452	mutex_unlock(&_event_lock);
1453}
1454
1455void dm_table_event(struct dm_table *t)
1456{
 
 
 
 
 
 
1457	mutex_lock(&_event_lock);
1458	if (t->event_fn)
1459		t->event_fn(t->event_context);
1460	mutex_unlock(&_event_lock);
1461}
1462EXPORT_SYMBOL(dm_table_event);
1463
1464inline sector_t dm_table_get_size(struct dm_table *t)
1465{
1466	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1467}
1468EXPORT_SYMBOL(dm_table_get_size);
1469
 
 
 
 
 
 
 
 
1470/*
1471 * Search the btree for the correct target.
1472 *
1473 * Caller should check returned pointer for NULL
1474 * to trap I/O beyond end of device.
1475 */
1476struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1477{
1478	unsigned int l, n = 0, k = 0;
1479	sector_t *node;
1480
1481	if (unlikely(sector >= dm_table_get_size(t)))
1482		return NULL;
1483
1484	for (l = 0; l < t->depth; l++) {
1485		n = get_child(n, k);
1486		node = get_node(t, l, n);
1487
1488		for (k = 0; k < KEYS_PER_NODE; k++)
1489			if (node[k] >= sector)
1490				break;
1491	}
1492
1493	return &t->targets[(KEYS_PER_NODE * n) + k];
1494}
1495
1496static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev,
1497				   sector_t start, sector_t len, void *data)
1498{
1499	struct request_queue *q = bdev_get_queue(dev->bdev);
1500
1501	return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags);
1502}
1503
1504/*
1505 * type->iterate_devices() should be called when the sanity check needs to
1506 * iterate and check all underlying data devices. iterate_devices() will
1507 * iterate all underlying data devices until it encounters a non-zero return
1508 * code, returned by whether the input iterate_devices_callout_fn, or
1509 * iterate_devices() itself internally.
1510 *
1511 * For some target type (e.g. dm-stripe), one call of iterate_devices() may
1512 * iterate multiple underlying devices internally, in which case a non-zero
1513 * return code returned by iterate_devices_callout_fn will stop the iteration
1514 * in advance.
1515 *
1516 * Cases requiring _any_ underlying device supporting some kind of attribute,
1517 * should use the iteration structure like dm_table_any_dev_attr(), or call
1518 * it directly. @func should handle semantics of positive examples, e.g.
1519 * capable of something.
1520 *
1521 * Cases requiring _all_ underlying devices supporting some kind of attribute,
1522 * should use the iteration structure like dm_table_supports_nowait() or
1523 * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
1524 * uses an @anti_func that handle semantics of counter examples, e.g. not
1525 * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
1526 */
1527static bool dm_table_any_dev_attr(struct dm_table *t,
1528				  iterate_devices_callout_fn func, void *data)
1529{
1530	for (unsigned int i = 0; i < t->num_targets; i++) {
1531		struct dm_target *ti = dm_table_get_target(t, i);
1532
1533		if (ti->type->iterate_devices &&
1534		    ti->type->iterate_devices(ti, func, data))
1535			return true;
1536	}
1537
1538	return false;
1539}
1540
1541static int count_device(struct dm_target *ti, struct dm_dev *dev,
1542			sector_t start, sector_t len, void *data)
1543{
1544	unsigned int *num_devices = data;
1545
1546	(*num_devices)++;
1547
1548	return 0;
1549}
1550
1551static bool dm_table_supports_poll(struct dm_table *t)
1552{
1553	for (unsigned int i = 0; i < t->num_targets; i++) {
1554		struct dm_target *ti = dm_table_get_target(t, i);
1555
1556		if (!ti->type->iterate_devices ||
1557		    ti->type->iterate_devices(ti, device_not_poll_capable, NULL))
1558			return false;
1559	}
1560
1561	return true;
1562}
1563
1564/*
1565 * Check whether a table has no data devices attached using each
1566 * target's iterate_devices method.
1567 * Returns false if the result is unknown because a target doesn't
1568 * support iterate_devices.
1569 */
1570bool dm_table_has_no_data_devices(struct dm_table *t)
1571{
1572	for (unsigned int i = 0; i < t->num_targets; i++) {
1573		struct dm_target *ti = dm_table_get_target(t, i);
1574		unsigned int num_devices = 0;
 
 
1575
1576		if (!ti->type->iterate_devices)
1577			return false;
1578
1579		ti->type->iterate_devices(ti, count_device, &num_devices);
1580		if (num_devices)
1581			return false;
1582	}
1583
1584	return true;
1585}
1586
1587static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev,
1588			    sector_t start, sector_t len, void *data)
1589{
1590	bool *zoned = data;
1591
1592	return bdev_is_zoned(dev->bdev) != *zoned;
1593}
1594
1595static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1596				 sector_t start, sector_t len, void *data)
1597{
1598	return bdev_is_zoned(dev->bdev);
1599}
1600
1601/*
1602 * Check the device zoned model based on the target feature flag. If the target
1603 * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
1604 * also accepted but all devices must have the same zoned model. If the target
1605 * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
1606 * zoned model with all zoned devices having the same zone size.
1607 */
1608static bool dm_table_supports_zoned(struct dm_table *t, bool zoned)
1609{
1610	for (unsigned int i = 0; i < t->num_targets; i++) {
1611		struct dm_target *ti = dm_table_get_target(t, i);
1612
1613		/*
1614		 * For the wildcard target (dm-error), if we do not have a
1615		 * backing device, we must always return false. If we have a
1616		 * backing device, the result must depend on checking zoned
1617		 * model, like for any other target. So for this, check directly
1618		 * if the target backing device is zoned as we get "false" when
1619		 * dm-error was set without a backing device.
1620		 */
1621		if (dm_target_is_wildcard(ti->type) &&
1622		    !ti->type->iterate_devices(ti, device_is_zoned_model, NULL))
1623			return false;
1624
1625		if (dm_target_supports_zoned_hm(ti->type)) {
1626			if (!ti->type->iterate_devices ||
1627			    ti->type->iterate_devices(ti, device_not_zoned,
1628						      &zoned))
1629				return false;
1630		} else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
1631			if (zoned)
1632				return false;
1633		}
1634	}
1635
1636	return true;
1637}
1638
1639static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1640					   sector_t start, sector_t len, void *data)
1641{
1642	unsigned int *zone_sectors = data;
1643
1644	if (!bdev_is_zoned(dev->bdev))
1645		return 0;
1646	return bdev_zone_sectors(dev->bdev) != *zone_sectors;
1647}
1648
1649/*
1650 * Check consistency of zoned model and zone sectors across all targets. For
1651 * zone sectors, if the destination device is a zoned block device, it shall
1652 * have the specified zone_sectors.
1653 */
1654static int validate_hardware_zoned(struct dm_table *t, bool zoned,
1655				   unsigned int zone_sectors)
1656{
1657	if (!zoned)
1658		return 0;
1659
1660	if (!dm_table_supports_zoned(t, zoned)) {
1661		DMERR("%s: zoned model is not consistent across all devices",
1662		      dm_device_name(t->md));
1663		return -EINVAL;
1664	}
1665
1666	/* Check zone size validity and compatibility */
1667	if (!zone_sectors || !is_power_of_2(zone_sectors))
1668		return -EINVAL;
1669
1670	if (dm_table_any_dev_attr(t, device_not_matches_zone_sectors, &zone_sectors)) {
1671		DMERR("%s: zone sectors is not consistent across all zoned devices",
1672		      dm_device_name(t->md));
1673		return -EINVAL;
1674	}
1675
1676	return 0;
1677}
1678
1679/*
1680 * Establish the new table's queue_limits and validate them.
1681 */
1682int dm_calculate_queue_limits(struct dm_table *t,
1683			      struct queue_limits *limits)
1684{
 
1685	struct queue_limits ti_limits;
1686	unsigned int zone_sectors = 0;
1687	bool zoned = false;
1688
1689	blk_set_stacking_limits(limits);
1690
1691	for (unsigned int i = 0; i < t->num_targets; i++) {
1692		struct dm_target *ti = dm_table_get_target(t, i);
1693
1694		blk_set_stacking_limits(&ti_limits);
1695
1696		if (!ti->type->iterate_devices) {
1697			/* Set I/O hints portion of queue limits */
1698			if (ti->type->io_hints)
1699				ti->type->io_hints(ti, &ti_limits);
1700			goto combine_limits;
1701		}
1702
1703		/*
1704		 * Combine queue limits of all the devices this target uses.
1705		 */
1706		ti->type->iterate_devices(ti, dm_set_device_limits,
1707					  &ti_limits);
1708
1709		if (!zoned && ti_limits.zoned) {
1710			/*
1711			 * After stacking all limits, validate all devices
1712			 * in table support this zoned model and zone sectors.
1713			 */
1714			zoned = ti_limits.zoned;
1715			zone_sectors = ti_limits.chunk_sectors;
1716		}
1717
1718		/* Set I/O hints portion of queue limits */
1719		if (ti->type->io_hints)
1720			ti->type->io_hints(ti, &ti_limits);
1721
1722		/*
1723		 * Check each device area is consistent with the target's
1724		 * overall queue limits.
1725		 */
1726		if (ti->type->iterate_devices(ti, device_area_is_invalid,
1727					      &ti_limits))
1728			return -EINVAL;
1729
1730combine_limits:
1731		/*
1732		 * Merge this target's queue limits into the overall limits
1733		 * for the table.
1734		 */
1735		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1736			DMWARN("%s: adding target device (start sect %llu len %llu) "
 
1737			       "caused an alignment inconsistency",
1738			       dm_device_name(t->md),
1739			       (unsigned long long) ti->begin,
1740			       (unsigned long long) ti->len);
1741	}
1742
1743	/*
1744	 * Verify that the zoned model and zone sectors, as determined before
1745	 * any .io_hints override, are the same across all devices in the table.
1746	 * - this is especially relevant if .io_hints is emulating a disk-managed
1747	 *   zoned model on host-managed zoned block devices.
1748	 * BUT...
1749	 */
1750	if (limits->zoned) {
1751		/*
1752		 * ...IF the above limits stacking determined a zoned model
1753		 * validate that all of the table's devices conform to it.
1754		 */
1755		zoned = limits->zoned;
1756		zone_sectors = limits->chunk_sectors;
1757	}
1758	if (validate_hardware_zoned(t, zoned, zone_sectors))
1759		return -EINVAL;
1760
1761	return validate_hardware_logical_block_alignment(t, limits);
1762}
1763
1764/*
1765 * Verify that all devices have an integrity profile that matches the
1766 * DM device's registered integrity profile.  If the profiles don't
1767 * match then unregister the DM device's integrity profile.
1768 */
1769static void dm_table_verify_integrity(struct dm_table *t)
1770{
1771	struct gendisk *template_disk = NULL;
1772
1773	if (t->integrity_added)
1774		return;
1775
1776	if (t->integrity_supported) {
1777		/*
1778		 * Verify that the original integrity profile
1779		 * matches all the devices in this table.
1780		 */
1781		template_disk = dm_table_get_integrity_disk(t);
1782		if (template_disk &&
1783		    blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1784			return;
1785	}
1786
1787	if (integrity_profile_exists(dm_disk(t->md))) {
1788		DMWARN("%s: unable to establish an integrity profile",
1789		       dm_device_name(t->md));
1790		blk_integrity_unregister(dm_disk(t->md));
1791	}
1792}
1793
1794static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1795				sector_t start, sector_t len, void *data)
1796{
1797	unsigned long flush = (unsigned long) data;
1798	struct request_queue *q = bdev_get_queue(dev->bdev);
1799
1800	return (q->queue_flags & flush);
1801}
1802
1803static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1804{
 
 
 
1805	/*
1806	 * Require at least one underlying device to support flushes.
1807	 * t->devices includes internal dm devices such as mirror logs
1808	 * so we need to use iterate_devices here, which targets
1809	 * supporting flushes must provide.
1810	 */
1811	for (unsigned int i = 0; i < t->num_targets; i++) {
1812		struct dm_target *ti = dm_table_get_target(t, i);
1813
1814		if (!ti->num_flush_bios)
1815			continue;
1816
1817		if (ti->flush_supported)
1818			return true;
1819
1820		if (ti->type->iterate_devices &&
1821		    ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1822			return true;
1823	}
1824
1825	return false;
1826}
1827
1828static int device_dax_write_cache_enabled(struct dm_target *ti,
1829					  struct dm_dev *dev, sector_t start,
1830					  sector_t len, void *data)
1831{
1832	struct dax_device *dax_dev = dev->dax_dev;
 
1833
1834	if (!dax_dev)
1835		return false;
 
1836
1837	if (dax_write_cache_enabled(dax_dev))
1838		return true;
1839	return false;
 
 
1840}
1841
1842static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
1843				sector_t start, sector_t len, void *data)
1844{
1845	return !bdev_nonrot(dev->bdev);
 
 
1846}
1847
1848static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1849			     sector_t start, sector_t len, void *data)
1850{
1851	struct request_queue *q = bdev_get_queue(dev->bdev);
1852
1853	return !blk_queue_add_random(q);
1854}
1855
1856static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1857					   sector_t start, sector_t len, void *data)
1858{
1859	struct request_queue *q = bdev_get_queue(dev->bdev);
1860
1861	return !q->limits.max_write_zeroes_sectors;
1862}
1863
1864static bool dm_table_supports_write_zeroes(struct dm_table *t)
 
1865{
1866	for (unsigned int i = 0; i < t->num_targets; i++) {
1867		struct dm_target *ti = dm_table_get_target(t, i);
1868
1869		if (!ti->num_write_zeroes_bios)
1870			return false;
1871
1872		if (!ti->type->iterate_devices ||
1873		    ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1874			return false;
1875	}
1876
1877	return true;
1878}
1879
1880static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
1881				     sector_t start, sector_t len, void *data)
1882{
1883	return !bdev_nowait(dev->bdev);
 
 
1884}
1885
1886static bool dm_table_supports_nowait(struct dm_table *t)
1887{
1888	for (unsigned int i = 0; i < t->num_targets; i++) {
1889		struct dm_target *ti = dm_table_get_target(t, i);
1890
1891		if (!dm_target_supports_nowait(ti->type))
 
 
 
1892			return false;
1893
1894		if (!ti->type->iterate_devices ||
1895		    ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
1896			return false;
1897	}
1898
1899	return true;
1900}
1901
1902static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1903				      sector_t start, sector_t len, void *data)
1904{
1905	return !bdev_max_discard_sectors(dev->bdev);
 
 
1906}
1907
1908static bool dm_table_supports_discards(struct dm_table *t)
1909{
1910	for (unsigned int i = 0; i < t->num_targets; i++) {
1911		struct dm_target *ti = dm_table_get_target(t, i);
1912
1913		if (!ti->num_discard_bios)
1914			return false;
1915
1916		/*
1917		 * Either the target provides discard support (as implied by setting
1918		 * 'discards_supported') or it relies on _all_ data devices having
1919		 * discard support.
1920		 */
1921		if (!ti->discards_supported &&
1922		    (!ti->type->iterate_devices ||
1923		     ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1924			return false;
1925	}
1926
1927	return true;
1928}
1929
1930static int device_not_secure_erase_capable(struct dm_target *ti,
1931					   struct dm_dev *dev, sector_t start,
1932					   sector_t len, void *data)
1933{
1934	return !bdev_max_secure_erase_sectors(dev->bdev);
1935}
 
 
 
1936
1937static bool dm_table_supports_secure_erase(struct dm_table *t)
1938{
1939	for (unsigned int i = 0; i < t->num_targets; i++) {
1940		struct dm_target *ti = dm_table_get_target(t, i);
1941
1942		if (!ti->num_secure_erase_bios)
1943			return false;
1944
1945		if (!ti->type->iterate_devices ||
1946		    ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1947			return false;
1948	}
1949
1950	return true;
1951}
1952
1953static int device_requires_stable_pages(struct dm_target *ti,
1954					struct dm_dev *dev, sector_t start,
1955					sector_t len, void *data)
1956{
1957	return bdev_stable_writes(dev->bdev);
1958}
1959
1960int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1961			      struct queue_limits *limits)
1962{
1963	bool wc = false, fua = false;
1964	int r;
1965
1966	/*
1967	 * Copy table's limits to the DM device's request_queue
1968	 */
1969	q->limits = *limits;
1970
1971	if (dm_table_supports_nowait(t))
1972		blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
1973	else
1974		blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
1975
1976	if (!dm_table_supports_discards(t)) {
1977		q->limits.max_discard_sectors = 0;
1978		q->limits.max_hw_discard_sectors = 0;
1979		q->limits.discard_granularity = 0;
1980		q->limits.discard_alignment = 0;
1981		q->limits.discard_misaligned = 0;
1982	}
1983
1984	if (!dm_table_supports_secure_erase(t))
1985		q->limits.max_secure_erase_sectors = 0;
1986
1987	if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
1988		wc = true;
1989		if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
1990			fua = true;
1991	}
1992	blk_queue_write_cache(q, wc, fua);
1993
1994	if (dm_table_supports_dax(t, device_not_dax_capable)) {
1995		blk_queue_flag_set(QUEUE_FLAG_DAX, q);
1996		if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
1997			set_dax_synchronous(t->md->dax_dev);
1998	} else
1999		blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
2000
2001	if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
2002		dax_write_cache(t->md->dax_dev, true);
2003
2004	/* Ensure that all underlying devices are non-rotational. */
2005	if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
2006		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
2007	else
2008		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2009
2010	if (!dm_table_supports_write_zeroes(t))
2011		q->limits.max_write_zeroes_sectors = 0;
2012
2013	dm_table_verify_integrity(t);
2014
2015	/*
2016	 * Some devices don't use blk_integrity but still want stable pages
2017	 * because they do their own checksumming.
2018	 * If any underlying device requires stable pages, a table must require
2019	 * them as well.  Only targets that support iterate_devices are considered:
2020	 * don't want error, zero, etc to require stable pages.
2021	 */
2022	if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
2023		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
2024	else
2025		blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
 
 
2026
2027	/*
2028	 * Determine whether or not this queue's I/O timings contribute
2029	 * to the entropy pool, Only request-based targets use this.
2030	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
2031	 * have it set.
2032	 */
2033	if (blk_queue_add_random(q) &&
2034	    dm_table_any_dev_attr(t, device_is_not_random, NULL))
2035		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2036
2037	/*
2038	 * For a zoned target, setup the zones related queue attributes
2039	 * and resources necessary for zone append emulation if necessary.
2040	 */
2041	if (blk_queue_is_zoned(q)) {
2042		r = dm_set_zones_restrictions(t, q);
2043		if (r)
2044			return r;
2045		if (!static_key_enabled(&zoned_enabled.key))
2046			static_branch_enable(&zoned_enabled);
2047	}
2048
2049	dm_update_crypto_profile(q, t);
2050	disk_update_readahead(t->md->disk);
2051
2052	/*
2053	 * Check for request-based device is left to
2054	 * dm_mq_init_request_queue()->blk_mq_init_allocated_queue().
2055	 *
2056	 * For bio-based device, only set QUEUE_FLAG_POLL when all
2057	 * underlying devices supporting polling.
 
 
2058	 */
2059	if (__table_type_bio_based(t->type)) {
2060		if (dm_table_supports_poll(t))
2061			blk_queue_flag_set(QUEUE_FLAG_POLL, q);
2062		else
2063			blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
2064	}
2065
2066	return 0;
 
 
2067}
2068
2069struct list_head *dm_table_get_devices(struct dm_table *t)
2070{
2071	return &t->devices;
2072}
2073
2074blk_mode_t dm_table_get_mode(struct dm_table *t)
2075{
2076	return t->mode;
2077}
2078EXPORT_SYMBOL(dm_table_get_mode);
2079
2080enum suspend_mode {
2081	PRESUSPEND,
2082	PRESUSPEND_UNDO,
2083	POSTSUSPEND,
2084};
2085
2086static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
2087{
2088	lockdep_assert_held(&t->md->suspend_lock);
2089
2090	for (unsigned int i = 0; i < t->num_targets; i++) {
2091		struct dm_target *ti = dm_table_get_target(t, i);
2092
 
2093		switch (mode) {
2094		case PRESUSPEND:
2095			if (ti->type->presuspend)
2096				ti->type->presuspend(ti);
2097			break;
2098		case PRESUSPEND_UNDO:
2099			if (ti->type->presuspend_undo)
2100				ti->type->presuspend_undo(ti);
2101			break;
2102		case POSTSUSPEND:
2103			if (ti->type->postsuspend)
2104				ti->type->postsuspend(ti);
2105			break;
2106		}
 
2107	}
2108}
2109
2110void dm_table_presuspend_targets(struct dm_table *t)
2111{
2112	if (!t)
2113		return;
2114
2115	suspend_targets(t, PRESUSPEND);
2116}
2117
2118void dm_table_presuspend_undo_targets(struct dm_table *t)
2119{
2120	if (!t)
2121		return;
2122
2123	suspend_targets(t, PRESUSPEND_UNDO);
2124}
2125
2126void dm_table_postsuspend_targets(struct dm_table *t)
2127{
2128	if (!t)
2129		return;
2130
2131	suspend_targets(t, POSTSUSPEND);
2132}
2133
2134int dm_table_resume_targets(struct dm_table *t)
2135{
2136	unsigned int i;
2137	int r = 0;
2138
2139	lockdep_assert_held(&t->md->suspend_lock);
2140
2141	for (i = 0; i < t->num_targets; i++) {
2142		struct dm_target *ti = dm_table_get_target(t, i);
2143
2144		if (!ti->type->preresume)
2145			continue;
2146
2147		r = ti->type->preresume(ti);
2148		if (r) {
2149			DMERR("%s: %s: preresume failed, error = %d",
2150			      dm_device_name(t->md), ti->type->name, r);
2151			return r;
2152		}
2153	}
2154
2155	for (i = 0; i < t->num_targets; i++) {
2156		struct dm_target *ti = dm_table_get_target(t, i);
2157
2158		if (ti->type->resume)
2159			ti->type->resume(ti);
2160	}
2161
2162	return 0;
2163}
2164
2165struct mapped_device *dm_table_get_md(struct dm_table *t)
2166{
2167	return t->md;
2168}
2169EXPORT_SYMBOL(dm_table_get_md);
 
 
 
 
 
 
 
 
 
 
 
2170
2171const char *dm_table_device_name(struct dm_table *t)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2172{
2173	return dm_device_name(t->md);
2174}
2175EXPORT_SYMBOL_GPL(dm_table_device_name);
2176
2177void dm_table_run_md_queue_async(struct dm_table *t)
2178{
 
 
 
 
2179	if (!dm_table_request_based(t))
2180		return;
2181
2182	if (t->md->queue)
2183		blk_mq_run_hw_queues(t->md->queue, true);
 
 
 
 
 
 
 
 
 
2184}
2185EXPORT_SYMBOL(dm_table_run_md_queue_async);
2186
v4.6
 
   1/*
   2 * Copyright (C) 2001 Sistina Software (UK) Limited.
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm.h"
 
   9
  10#include <linux/module.h>
  11#include <linux/vmalloc.h>
  12#include <linux/blkdev.h>
 
  13#include <linux/namei.h>
  14#include <linux/ctype.h>
  15#include <linux/string.h>
  16#include <linux/slab.h>
  17#include <linux/interrupt.h>
  18#include <linux/mutex.h>
  19#include <linux/delay.h>
  20#include <linux/atomic.h>
  21#include <linux/blk-mq.h>
  22#include <linux/mount.h>
 
  23
  24#define DM_MSG_PREFIX "table"
  25
  26#define MAX_DEPTH 16
  27#define NODE_SIZE L1_CACHE_BYTES
  28#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
  29#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
  30
  31struct dm_table {
  32	struct mapped_device *md;
  33	unsigned type;
  34
  35	/* btree table */
  36	unsigned int depth;
  37	unsigned int counts[MAX_DEPTH];	/* in nodes */
  38	sector_t *index[MAX_DEPTH];
  39
  40	unsigned int num_targets;
  41	unsigned int num_allocated;
  42	sector_t *highs;
  43	struct dm_target *targets;
  44
  45	struct target_type *immutable_target_type;
  46	unsigned integrity_supported:1;
  47	unsigned singleton:1;
  48
  49	/*
  50	 * Indicates the rw permissions for the new logical
  51	 * device.  This should be a combination of FMODE_READ
  52	 * and FMODE_WRITE.
  53	 */
  54	fmode_t mode;
  55
  56	/* a list of devices used by this table */
  57	struct list_head devices;
  58
  59	/* events get handed up using this callback */
  60	void (*event_fn)(void *);
  61	void *event_context;
  62
  63	struct dm_md_mempools *mempools;
  64
  65	struct list_head target_callbacks;
  66};
  67
  68/*
  69 * Similar to ceiling(log_size(n))
  70 */
  71static unsigned int int_log(unsigned int n, unsigned int base)
  72{
  73	int result = 0;
  74
  75	while (n > 1) {
  76		n = dm_div_up(n, base);
  77		result++;
  78	}
  79
  80	return result;
  81}
  82
  83/*
  84 * Calculate the index of the child node of the n'th node k'th key.
  85 */
  86static inline unsigned int get_child(unsigned int n, unsigned int k)
  87{
  88	return (n * CHILDREN_PER_NODE) + k;
  89}
  90
  91/*
  92 * Return the n'th node of level l from table t.
  93 */
  94static inline sector_t *get_node(struct dm_table *t,
  95				 unsigned int l, unsigned int n)
  96{
  97	return t->index[l] + (n * KEYS_PER_NODE);
  98}
  99
 100/*
 101 * Return the highest key that you could lookup from the n'th
 102 * node on level l of the btree.
 103 */
 104static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
 105{
 106	for (; l < t->depth - 1; l++)
 107		n = get_child(n, CHILDREN_PER_NODE - 1);
 108
 109	if (n >= t->counts[l])
 110		return (sector_t) - 1;
 111
 112	return get_node(t, l, n)[KEYS_PER_NODE - 1];
 113}
 114
 115/*
 116 * Fills in a level of the btree based on the highs of the level
 117 * below it.
 118 */
 119static int setup_btree_index(unsigned int l, struct dm_table *t)
 120{
 121	unsigned int n, k;
 122	sector_t *node;
 123
 124	for (n = 0U; n < t->counts[l]; n++) {
 125		node = get_node(t, l, n);
 126
 127		for (k = 0U; k < KEYS_PER_NODE; k++)
 128			node[k] = high(t, l + 1, get_child(n, k));
 129	}
 130
 131	return 0;
 132}
 133
 134void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
 135{
 136	unsigned long size;
 137	void *addr;
 138
 139	/*
 140	 * Check that we're not going to overflow.
 141	 */
 142	if (nmemb > (ULONG_MAX / elem_size))
 143		return NULL;
 144
 145	size = nmemb * elem_size;
 146	addr = vzalloc(size);
 147
 148	return addr;
 149}
 150EXPORT_SYMBOL(dm_vcalloc);
 151
 152/*
 153 * highs, and targets are managed as dynamic arrays during a
 154 * table load.
 155 */
 156static int alloc_targets(struct dm_table *t, unsigned int num)
 157{
 158	sector_t *n_highs;
 159	struct dm_target *n_targets;
 160
 161	/*
 162	 * Allocate both the target array and offset array at once.
 163	 * Append an empty entry to catch sectors beyond the end of
 164	 * the device.
 165	 */
 166	n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
 167					  sizeof(sector_t));
 168	if (!n_highs)
 169		return -ENOMEM;
 170
 171	n_targets = (struct dm_target *) (n_highs + num);
 172
 173	memset(n_highs, -1, sizeof(*n_highs) * num);
 174	vfree(t->highs);
 175
 176	t->num_allocated = num;
 177	t->highs = n_highs;
 178	t->targets = n_targets;
 179
 180	return 0;
 181}
 182
 183int dm_table_create(struct dm_table **result, fmode_t mode,
 184		    unsigned num_targets, struct mapped_device *md)
 185{
 186	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
 
 
 
 
 
 187
 188	if (!t)
 189		return -ENOMEM;
 190
 191	INIT_LIST_HEAD(&t->devices);
 192	INIT_LIST_HEAD(&t->target_callbacks);
 193
 194	if (!num_targets)
 195		num_targets = KEYS_PER_NODE;
 196
 197	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
 198
 199	if (!num_targets) {
 200		kfree(t);
 201		return -ENOMEM;
 202	}
 203
 204	if (alloc_targets(t, num_targets)) {
 205		kfree(t);
 206		return -ENOMEM;
 207	}
 208
 
 209	t->mode = mode;
 210	t->md = md;
 211	*result = t;
 212	return 0;
 213}
 214
 215static void free_devices(struct list_head *devices, struct mapped_device *md)
 216{
 217	struct list_head *tmp, *next;
 218
 219	list_for_each_safe(tmp, next, devices) {
 220		struct dm_dev_internal *dd =
 221		    list_entry(tmp, struct dm_dev_internal, list);
 222		DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
 223		       dm_device_name(md), dd->dm_dev->name);
 224		dm_put_table_device(md, dd->dm_dev);
 225		kfree(dd);
 226	}
 227}
 228
 
 
 229void dm_table_destroy(struct dm_table *t)
 230{
 231	unsigned int i;
 232
 233	if (!t)
 234		return;
 235
 236	/* free the indexes */
 237	if (t->depth >= 2)
 238		vfree(t->index[t->depth - 2]);
 239
 240	/* free the targets */
 241	for (i = 0; i < t->num_targets; i++) {
 242		struct dm_target *tgt = t->targets + i;
 243
 244		if (tgt->type->dtr)
 245			tgt->type->dtr(tgt);
 246
 247		dm_put_target_type(tgt->type);
 248	}
 249
 250	vfree(t->highs);
 251
 252	/* free the device list */
 253	free_devices(&t->devices, t->md);
 254
 255	dm_free_md_mempools(t->mempools);
 256
 
 
 257	kfree(t);
 258}
 259
 260/*
 261 * See if we've already got a device in the list.
 262 */
 263static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
 264{
 265	struct dm_dev_internal *dd;
 266
 267	list_for_each_entry (dd, l, list)
 268		if (dd->dm_dev->bdev->bd_dev == dev)
 269			return dd;
 270
 271	return NULL;
 272}
 273
 274/*
 275 * If possible, this checks an area of a destination device is invalid.
 276 */
 277static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
 278				  sector_t start, sector_t len, void *data)
 279{
 280	struct request_queue *q;
 281	struct queue_limits *limits = data;
 282	struct block_device *bdev = dev->bdev;
 283	sector_t dev_size =
 284		i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
 285	unsigned short logical_block_size_sectors =
 286		limits->logical_block_size >> SECTOR_SHIFT;
 287	char b[BDEVNAME_SIZE];
 288
 289	/*
 290	 * Some devices exist without request functions,
 291	 * such as loop devices not yet bound to backing files.
 292	 * Forbid the use of such devices.
 293	 */
 294	q = bdev_get_queue(bdev);
 295	if (!q || !q->make_request_fn) {
 296		DMWARN("%s: %s is not yet initialised: "
 297		       "start=%llu, len=%llu, dev_size=%llu",
 298		       dm_device_name(ti->table->md), bdevname(bdev, b),
 299		       (unsigned long long)start,
 300		       (unsigned long long)len,
 301		       (unsigned long long)dev_size);
 302		return 1;
 303	}
 304
 305	if (!dev_size)
 306		return 0;
 307
 308	if ((start >= dev_size) || (start + len > dev_size)) {
 309		DMWARN("%s: %s too small for target: "
 310		       "start=%llu, len=%llu, dev_size=%llu",
 311		       dm_device_name(ti->table->md), bdevname(bdev, b),
 312		       (unsigned long long)start,
 313		       (unsigned long long)len,
 314		       (unsigned long long)dev_size);
 315		return 1;
 316	}
 317
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 318	if (logical_block_size_sectors <= 1)
 319		return 0;
 320
 321	if (start & (logical_block_size_sectors - 1)) {
 322		DMWARN("%s: start=%llu not aligned to h/w "
 323		       "logical block size %u of %s",
 324		       dm_device_name(ti->table->md),
 325		       (unsigned long long)start,
 326		       limits->logical_block_size, bdevname(bdev, b));
 327		return 1;
 328	}
 329
 330	if (len & (logical_block_size_sectors - 1)) {
 331		DMWARN("%s: len=%llu not aligned to h/w "
 332		       "logical block size %u of %s",
 333		       dm_device_name(ti->table->md),
 334		       (unsigned long long)len,
 335		       limits->logical_block_size, bdevname(bdev, b));
 336		return 1;
 337	}
 338
 339	return 0;
 340}
 341
 342/*
 343 * This upgrades the mode on an already open dm_dev, being
 344 * careful to leave things as they were if we fail to reopen the
 345 * device and not to touch the existing bdev field in case
 346 * it is accessed concurrently inside dm_table_any_congested().
 347 */
 348static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
 349			struct mapped_device *md)
 350{
 351	int r;
 352	struct dm_dev *old_dev, *new_dev;
 353
 354	old_dev = dd->dm_dev;
 355
 356	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
 357				dd->dm_dev->mode | new_mode, &new_dev);
 358	if (r)
 359		return r;
 360
 361	dd->dm_dev = new_dev;
 362	dm_put_table_device(md, old_dev);
 363
 364	return 0;
 365}
 366
 367/*
 368 * Convert the path to a device
 369 */
 370dev_t dm_get_dev_t(const char *path)
 371{
 372	dev_t uninitialized_var(dev);
 373	struct block_device *bdev;
 374
 375	bdev = lookup_bdev(path);
 376	if (IS_ERR(bdev))
 377		dev = name_to_dev_t(path);
 378	else {
 379		dev = bdev->bd_dev;
 380		bdput(bdev);
 381	}
 382
 383	return dev;
 384}
 385EXPORT_SYMBOL_GPL(dm_get_dev_t);
 386
 387/*
 388 * Add a device to the list, or just increment the usage count if
 389 * it's already present.
 
 
 
 390 */
 391int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
 392		  struct dm_dev **result)
 393{
 394	int r;
 395	dev_t dev;
 
 
 396	struct dm_dev_internal *dd;
 397	struct dm_table *t = ti->table;
 398
 399	BUG_ON(!t);
 400
 401	dev = dm_get_dev_t(path);
 402	if (!dev)
 403		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 404
 405	dd = find_device(&t->devices, dev);
 406	if (!dd) {
 407		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
 408		if (!dd)
 409			return -ENOMEM;
 
 
 410
 411		if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
 
 412			kfree(dd);
 413			return r;
 414		}
 415
 416		atomic_set(&dd->count, 0);
 417		list_add(&dd->list, &t->devices);
 
 418
 419	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
 420		r = upgrade_mode(dd, mode, t->md);
 421		if (r)
 422			return r;
 423	}
 424	atomic_inc(&dd->count);
 425
 
 426	*result = dd->dm_dev;
 427	return 0;
 
 
 
 
 428}
 429EXPORT_SYMBOL(dm_get_device);
 430
 431static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
 432				sector_t start, sector_t len, void *data)
 433{
 434	struct queue_limits *limits = data;
 435	struct block_device *bdev = dev->bdev;
 436	struct request_queue *q = bdev_get_queue(bdev);
 437	char b[BDEVNAME_SIZE];
 438
 439	if (unlikely(!q)) {
 440		DMWARN("%s: Cannot set limits for nonexistent device %s",
 441		       dm_device_name(ti->table->md), bdevname(bdev, b));
 442		return 0;
 443	}
 444
 445	if (bdev_stack_limits(limits, bdev, start) < 0)
 446		DMWARN("%s: adding target device %s caused an alignment inconsistency: "
 
 447		       "physical_block_size=%u, logical_block_size=%u, "
 448		       "alignment_offset=%u, start=%llu",
 449		       dm_device_name(ti->table->md), bdevname(bdev, b),
 450		       q->limits.physical_block_size,
 451		       q->limits.logical_block_size,
 452		       q->limits.alignment_offset,
 453		       (unsigned long long) start << SECTOR_SHIFT);
 454
 455	return 0;
 456}
 457
 458/*
 459 * Decrement a device's use count and remove it if necessary.
 460 */
 461void dm_put_device(struct dm_target *ti, struct dm_dev *d)
 462{
 463	int found = 0;
 464	struct list_head *devices = &ti->table->devices;
 
 465	struct dm_dev_internal *dd;
 466
 
 
 467	list_for_each_entry(dd, devices, list) {
 468		if (dd->dm_dev == d) {
 469			found = 1;
 470			break;
 471		}
 472	}
 473	if (!found) {
 474		DMWARN("%s: device %s not in table devices list",
 475		       dm_device_name(ti->table->md), d->name);
 476		return;
 477	}
 478	if (atomic_dec_and_test(&dd->count)) {
 479		dm_put_table_device(ti->table->md, d);
 480		list_del(&dd->list);
 481		kfree(dd);
 482	}
 
 
 
 483}
 484EXPORT_SYMBOL(dm_put_device);
 485
 486/*
 487 * Checks to see if the target joins onto the end of the table.
 488 */
 489static int adjoin(struct dm_table *table, struct dm_target *ti)
 490{
 491	struct dm_target *prev;
 492
 493	if (!table->num_targets)
 494		return !ti->begin;
 495
 496	prev = &table->targets[table->num_targets - 1];
 497	return (ti->begin == (prev->begin + prev->len));
 498}
 499
 500/*
 501 * Used to dynamically allocate the arg array.
 502 *
 503 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
 504 * process messages even if some device is suspended. These messages have a
 505 * small fixed number of arguments.
 506 *
 507 * On the other hand, dm-switch needs to process bulk data using messages and
 508 * excessive use of GFP_NOIO could cause trouble.
 509 */
 510static char **realloc_argv(unsigned *array_size, char **old_argv)
 511{
 512	char **argv;
 513	unsigned new_size;
 514	gfp_t gfp;
 515
 516	if (*array_size) {
 517		new_size = *array_size * 2;
 518		gfp = GFP_KERNEL;
 519	} else {
 520		new_size = 8;
 521		gfp = GFP_NOIO;
 522	}
 523	argv = kmalloc(new_size * sizeof(*argv), gfp);
 524	if (argv) {
 525		memcpy(argv, old_argv, *array_size * sizeof(*argv));
 526		*array_size = new_size;
 527	}
 528
 529	kfree(old_argv);
 530	return argv;
 531}
 532
 533/*
 534 * Destructively splits up the argument list to pass to ctr.
 535 */
 536int dm_split_args(int *argc, char ***argvp, char *input)
 537{
 538	char *start, *end = input, *out, **argv = NULL;
 539	unsigned array_size = 0;
 540
 541	*argc = 0;
 542
 543	if (!input) {
 544		*argvp = NULL;
 545		return 0;
 546	}
 547
 548	argv = realloc_argv(&array_size, argv);
 549	if (!argv)
 550		return -ENOMEM;
 551
 552	while (1) {
 553		/* Skip whitespace */
 554		start = skip_spaces(end);
 555
 556		if (!*start)
 557			break;	/* success, we hit the end */
 558
 559		/* 'out' is used to remove any back-quotes */
 560		end = out = start;
 561		while (*end) {
 562			/* Everything apart from '\0' can be quoted */
 563			if (*end == '\\' && *(end + 1)) {
 564				*out++ = *(end + 1);
 565				end += 2;
 566				continue;
 567			}
 568
 569			if (isspace(*end))
 570				break;	/* end of token */
 571
 572			*out++ = *end++;
 573		}
 574
 575		/* have we already filled the array ? */
 576		if ((*argc + 1) > array_size) {
 577			argv = realloc_argv(&array_size, argv);
 578			if (!argv)
 579				return -ENOMEM;
 580		}
 581
 582		/* we know this is whitespace */
 583		if (*end)
 584			end++;
 585
 586		/* terminate the string and put it in the array */
 587		*out = '\0';
 588		argv[*argc] = start;
 589		(*argc)++;
 590	}
 591
 592	*argvp = argv;
 593	return 0;
 594}
 595
 596/*
 597 * Impose necessary and sufficient conditions on a devices's table such
 598 * that any incoming bio which respects its logical_block_size can be
 599 * processed successfully.  If it falls across the boundary between
 600 * two or more targets, the size of each piece it gets split into must
 601 * be compatible with the logical_block_size of the target processing it.
 602 */
 603static int validate_hardware_logical_block_alignment(struct dm_table *table,
 604						 struct queue_limits *limits)
 605{
 606	/*
 607	 * This function uses arithmetic modulo the logical_block_size
 608	 * (in units of 512-byte sectors).
 609	 */
 610	unsigned short device_logical_block_size_sects =
 611		limits->logical_block_size >> SECTOR_SHIFT;
 612
 613	/*
 614	 * Offset of the start of the next table entry, mod logical_block_size.
 615	 */
 616	unsigned short next_target_start = 0;
 617
 618	/*
 619	 * Given an aligned bio that extends beyond the end of a
 620	 * target, how many sectors must the next target handle?
 621	 */
 622	unsigned short remaining = 0;
 623
 624	struct dm_target *uninitialized_var(ti);
 625	struct queue_limits ti_limits;
 626	unsigned i = 0;
 627
 628	/*
 629	 * Check each entry in the table in turn.
 630	 */
 631	while (i < dm_table_get_num_targets(table)) {
 632		ti = dm_table_get_target(table, i++);
 633
 634		blk_set_stacking_limits(&ti_limits);
 635
 636		/* combine all target devices' limits */
 637		if (ti->type->iterate_devices)
 638			ti->type->iterate_devices(ti, dm_set_device_limits,
 639						  &ti_limits);
 640
 641		/*
 642		 * If the remaining sectors fall entirely within this
 643		 * table entry are they compatible with its logical_block_size?
 644		 */
 645		if (remaining < ti->len &&
 646		    remaining & ((ti_limits.logical_block_size >>
 647				  SECTOR_SHIFT) - 1))
 648			break;	/* Error */
 649
 650		next_target_start =
 651		    (unsigned short) ((next_target_start + ti->len) &
 652				      (device_logical_block_size_sects - 1));
 653		remaining = next_target_start ?
 654		    device_logical_block_size_sects - next_target_start : 0;
 655	}
 656
 657	if (remaining) {
 658		DMWARN("%s: table line %u (start sect %llu len %llu) "
 659		       "not aligned to h/w logical block size %u",
 660		       dm_device_name(table->md), i,
 661		       (unsigned long long) ti->begin,
 662		       (unsigned long long) ti->len,
 663		       limits->logical_block_size);
 664		return -EINVAL;
 665	}
 666
 667	return 0;
 668}
 669
 670int dm_table_add_target(struct dm_table *t, const char *type,
 671			sector_t start, sector_t len, char *params)
 672{
 673	int r = -EINVAL, argc;
 674	char **argv;
 675	struct dm_target *tgt;
 676
 677	if (t->singleton) {
 678		DMERR("%s: target type %s must appear alone in table",
 679		      dm_device_name(t->md), t->targets->type->name);
 680		return -EINVAL;
 681	}
 682
 683	BUG_ON(t->num_targets >= t->num_allocated);
 684
 685	tgt = t->targets + t->num_targets;
 686	memset(tgt, 0, sizeof(*tgt));
 687
 688	if (!len) {
 689		DMERR("%s: zero-length target", dm_device_name(t->md));
 690		return -EINVAL;
 691	}
 692
 693	tgt->type = dm_get_target_type(type);
 694	if (!tgt->type) {
 695		DMERR("%s: %s: unknown target type", dm_device_name(t->md),
 696		      type);
 697		return -EINVAL;
 698	}
 699
 700	if (dm_target_needs_singleton(tgt->type)) {
 701		if (t->num_targets) {
 702			DMERR("%s: target type %s must appear alone in table",
 703			      dm_device_name(t->md), type);
 704			return -EINVAL;
 705		}
 706		t->singleton = 1;
 707	}
 708
 709	if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
 710		DMERR("%s: target type %s may not be included in read-only tables",
 711		      dm_device_name(t->md), type);
 712		return -EINVAL;
 713	}
 714
 715	if (t->immutable_target_type) {
 716		if (t->immutable_target_type != tgt->type) {
 717			DMERR("%s: immutable target type %s cannot be mixed with other target types",
 718			      dm_device_name(t->md), t->immutable_target_type->name);
 719			return -EINVAL;
 720		}
 721	} else if (dm_target_is_immutable(tgt->type)) {
 722		if (t->num_targets) {
 723			DMERR("%s: immutable target type %s cannot be mixed with other target types",
 724			      dm_device_name(t->md), tgt->type->name);
 725			return -EINVAL;
 726		}
 727		t->immutable_target_type = tgt->type;
 728	}
 729
 730	tgt->table = t;
 731	tgt->begin = start;
 732	tgt->len = len;
 733	tgt->error = "Unknown error";
 
 
 
 734
 735	/*
 736	 * Does this target adjoin the previous one ?
 737	 */
 738	if (!adjoin(t, tgt)) {
 739		tgt->error = "Gap in table";
 740		r = -EINVAL;
 741		goto bad;
 742	}
 743
 744	r = dm_split_args(&argc, &argv, params);
 745	if (r) {
 746		tgt->error = "couldn't split parameters (insufficient memory)";
 747		goto bad;
 748	}
 749
 750	r = tgt->type->ctr(tgt, argc, argv);
 751	kfree(argv);
 752	if (r)
 753		goto bad;
 754
 755	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
 756
 757	if (!tgt->num_discard_bios && tgt->discards_supported)
 758		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
 759		       dm_device_name(t->md), type);
 760
 
 
 
 761	return 0;
 762
 763 bad:
 764	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
 765	dm_put_target_type(tgt->type);
 766	return r;
 767}
 768
 769/*
 770 * Target argument parsing helpers.
 771 */
 772static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
 773			     unsigned *value, char **error, unsigned grouped)
 774{
 775	const char *arg_str = dm_shift_arg(arg_set);
 776	char dummy;
 777
 778	if (!arg_str ||
 779	    (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
 780	    (*value < arg->min) ||
 781	    (*value > arg->max) ||
 782	    (grouped && arg_set->argc < *value)) {
 783		*error = arg->error;
 784		return -EINVAL;
 785	}
 786
 787	return 0;
 788}
 789
 790int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
 791		unsigned *value, char **error)
 792{
 793	return validate_next_arg(arg, arg_set, value, error, 0);
 794}
 795EXPORT_SYMBOL(dm_read_arg);
 796
 797int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
 798		      unsigned *value, char **error)
 799{
 800	return validate_next_arg(arg, arg_set, value, error, 1);
 801}
 802EXPORT_SYMBOL(dm_read_arg_group);
 803
 804const char *dm_shift_arg(struct dm_arg_set *as)
 805{
 806	char *r;
 807
 808	if (as->argc) {
 809		as->argc--;
 810		r = *as->argv;
 811		as->argv++;
 812		return r;
 813	}
 814
 815	return NULL;
 816}
 817EXPORT_SYMBOL(dm_shift_arg);
 818
 819void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
 820{
 821	BUG_ON(as->argc < num_args);
 822	as->argc -= num_args;
 823	as->argv += num_args;
 824}
 825EXPORT_SYMBOL(dm_consume_args);
 826
 827static bool __table_type_request_based(unsigned table_type)
 828{
 829	return (table_type == DM_TYPE_REQUEST_BASED ||
 830		table_type == DM_TYPE_MQ_REQUEST_BASED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 831}
 832
 833static int dm_table_set_type(struct dm_table *t)
 834{
 835	unsigned i;
 836	unsigned bio_based = 0, request_based = 0, hybrid = 0;
 837	bool use_blk_mq = false;
 838	struct dm_target *tgt;
 839	struct dm_dev_internal *dd;
 840	struct list_head *devices;
 841	unsigned live_md_type = dm_get_md_type(t->md);
 
 
 
 
 
 
 
 842
 843	for (i = 0; i < t->num_targets; i++) {
 844		tgt = t->targets + i;
 845		if (dm_target_hybrid(tgt))
 846			hybrid = 1;
 847		else if (dm_target_request_based(tgt))
 848			request_based = 1;
 849		else
 850			bio_based = 1;
 851
 852		if (bio_based && request_based) {
 853			DMWARN("Inconsistent table: different target types"
 854			       " can't be mixed up");
 855			return -EINVAL;
 856		}
 857	}
 858
 859	if (hybrid && !bio_based && !request_based) {
 860		/*
 861		 * The targets can work either way.
 862		 * Determine the type from the live device.
 863		 * Default to bio-based if device is new.
 864		 */
 865		if (__table_type_request_based(live_md_type))
 866			request_based = 1;
 867		else
 868			bio_based = 1;
 869	}
 870
 871	if (bio_based) {
 
 872		/* We must use this table as bio-based */
 873		t->type = DM_TYPE_BIO_BASED;
 
 
 
 
 874		return 0;
 875	}
 876
 877	BUG_ON(!request_based); /* No targets in this table */
 878
 
 
 
 879	/*
 880	 * Request-based dm supports only tables that have a single target now.
 881	 * To support multiple targets, request splitting support is needed,
 882	 * and that needs lots of changes in the block-layer.
 883	 * (e.g. request completion process for partial completion.)
 884	 */
 885	if (t->num_targets > 1) {
 886		DMWARN("Request-based dm doesn't support multiple targets yet");
 887		return -EINVAL;
 888	}
 889
 890	/* Non-request-stackable devices can't be used for request-based dm */
 891	devices = dm_table_get_devices(t);
 892	list_for_each_entry(dd, devices, list) {
 893		struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
 
 
 
 
 
 
 894
 895		if (!blk_queue_stackable(q)) {
 896			DMERR("table load rejected: including"
 897			      " non-request-stackable devices");
 898			return -EINVAL;
 899		}
 
 
 
 900
 901		if (q->mq_ops)
 902			use_blk_mq = true;
 
 
 
 903	}
 904
 905	if (use_blk_mq) {
 906		/* verify _all_ devices in the table are blk-mq devices */
 907		list_for_each_entry(dd, devices, list)
 908			if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) {
 909				DMERR("table load rejected: not all devices"
 910				      " are blk-mq request-stackable");
 911				return -EINVAL;
 912			}
 913		t->type = DM_TYPE_MQ_REQUEST_BASED;
 914
 915	} else if (list_empty(devices) && __table_type_request_based(live_md_type)) {
 916		/* inherit live MD type */
 917		t->type = live_md_type;
 918
 919	} else
 920		t->type = DM_TYPE_REQUEST_BASED;
 921
 922	return 0;
 923}
 924
 925unsigned dm_table_get_type(struct dm_table *t)
 926{
 927	return t->type;
 928}
 929
 930struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
 931{
 932	return t->immutable_target_type;
 933}
 934
 935struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
 936{
 937	/* Immutable target is implicitly a singleton */
 938	if (t->num_targets > 1 ||
 939	    !dm_target_is_immutable(t->targets[0].type))
 940		return NULL;
 941
 942	return t->targets;
 943}
 944
 945struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
 946{
 947	struct dm_target *uninitialized_var(ti);
 948	unsigned i = 0;
 949
 950	while (i < dm_table_get_num_targets(t)) {
 951		ti = dm_table_get_target(t, i++);
 952		if (dm_target_is_wildcard(ti->type))
 953			return ti;
 954	}
 955
 956	return NULL;
 957}
 958
 
 
 
 
 
 959bool dm_table_request_based(struct dm_table *t)
 960{
 961	return __table_type_request_based(dm_table_get_type(t));
 962}
 963
 964bool dm_table_mq_request_based(struct dm_table *t)
 965{
 966	return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED;
 967}
 968
 969static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
 970{
 971	unsigned type = dm_table_get_type(t);
 972	unsigned per_io_data_size = 0;
 973	struct dm_target *tgt;
 974	unsigned i;
 975
 976	if (unlikely(type == DM_TYPE_NONE)) {
 977		DMWARN("no table type is set, can't allocate mempools");
 978		return -EINVAL;
 979	}
 980
 981	if (type == DM_TYPE_BIO_BASED)
 982		for (i = 0; i < t->num_targets; i++) {
 983			tgt = t->targets + i;
 984			per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
 985		}
 986
 987	t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
 988	if (!t->mempools)
 989		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 990
 
 991	return 0;
 992}
 993
 994void dm_table_free_md_mempools(struct dm_table *t)
 995{
 996	dm_free_md_mempools(t->mempools);
 997	t->mempools = NULL;
 998}
 999
1000struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
1001{
1002	return t->mempools;
1003}
1004
1005static int setup_indexes(struct dm_table *t)
1006{
1007	int i;
1008	unsigned int total = 0;
1009	sector_t *indexes;
1010
1011	/* allocate the space for *all* the indexes */
1012	for (i = t->depth - 2; i >= 0; i--) {
1013		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1014		total += t->counts[i];
1015	}
1016
1017	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
1018	if (!indexes)
1019		return -ENOMEM;
1020
1021	/* set up internal nodes, bottom-up */
1022	for (i = t->depth - 2; i >= 0; i--) {
1023		t->index[i] = indexes;
1024		indexes += (KEYS_PER_NODE * t->counts[i]);
1025		setup_btree_index(i, t);
1026	}
1027
1028	return 0;
1029}
1030
1031/*
1032 * Builds the btree to index the map.
1033 */
1034static int dm_table_build_index(struct dm_table *t)
1035{
1036	int r = 0;
1037	unsigned int leaf_nodes;
1038
1039	/* how many indexes will the btree have ? */
1040	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1041	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1042
1043	/* leaf layer has already been set up */
1044	t->counts[t->depth - 1] = leaf_nodes;
1045	t->index[t->depth - 1] = t->highs;
1046
1047	if (t->depth >= 2)
1048		r = setup_indexes(t);
1049
1050	return r;
1051}
1052
1053static bool integrity_profile_exists(struct gendisk *disk)
1054{
1055	return !!blk_get_integrity(disk);
1056}
1057
1058/*
1059 * Get a disk whose integrity profile reflects the table's profile.
1060 * Returns NULL if integrity support was inconsistent or unavailable.
1061 */
1062static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
1063{
1064	struct list_head *devices = dm_table_get_devices(t);
1065	struct dm_dev_internal *dd = NULL;
1066	struct gendisk *prev_disk = NULL, *template_disk = NULL;
1067
 
 
 
 
 
 
 
1068	list_for_each_entry(dd, devices, list) {
1069		template_disk = dd->dm_dev->bdev->bd_disk;
1070		if (!integrity_profile_exists(template_disk))
1071			goto no_integrity;
1072		else if (prev_disk &&
1073			 blk_integrity_compare(prev_disk, template_disk) < 0)
1074			goto no_integrity;
1075		prev_disk = template_disk;
1076	}
1077
1078	return template_disk;
1079
1080no_integrity:
1081	if (prev_disk)
1082		DMWARN("%s: integrity not set: %s and %s profile mismatch",
1083		       dm_device_name(t->md),
1084		       prev_disk->disk_name,
1085		       template_disk->disk_name);
1086	return NULL;
1087}
1088
1089/*
1090 * Register the mapped device for blk_integrity support if the
1091 * underlying devices have an integrity profile.  But all devices may
1092 * not have matching profiles (checking all devices isn't reliable
1093 * during table load because this table may use other DM device(s) which
1094 * must be resumed before they will have an initialized integity
1095 * profile).  Consequently, stacked DM devices force a 2 stage integrity
1096 * profile validation: First pass during table load, final pass during
1097 * resume.
1098 */
1099static int dm_table_register_integrity(struct dm_table *t)
1100{
1101	struct mapped_device *md = t->md;
1102	struct gendisk *template_disk = NULL;
1103
 
 
 
 
1104	template_disk = dm_table_get_integrity_disk(t);
1105	if (!template_disk)
1106		return 0;
1107
1108	if (!integrity_profile_exists(dm_disk(md))) {
1109		t->integrity_supported = 1;
1110		/*
1111		 * Register integrity profile during table load; we can do
1112		 * this because the final profile must match during resume.
1113		 */
1114		blk_integrity_register(dm_disk(md),
1115				       blk_get_integrity(template_disk));
1116		return 0;
1117	}
1118
1119	/*
1120	 * If DM device already has an initialized integrity
1121	 * profile the new profile should not conflict.
1122	 */
1123	if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1124		DMWARN("%s: conflict with existing integrity profile: "
1125		       "%s profile mismatch",
1126		       dm_device_name(t->md),
1127		       template_disk->disk_name);
1128		return 1;
1129	}
1130
1131	/* Preserve existing integrity profile */
1132	t->integrity_supported = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1133	return 0;
1134}
1135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1136/*
1137 * Prepares the table for use by building the indices,
1138 * setting the type, and allocating mempools.
1139 */
1140int dm_table_complete(struct dm_table *t)
1141{
1142	int r;
1143
1144	r = dm_table_set_type(t);
1145	if (r) {
1146		DMERR("unable to set table type");
1147		return r;
1148	}
1149
1150	r = dm_table_build_index(t);
1151	if (r) {
1152		DMERR("unable to build btrees");
1153		return r;
1154	}
1155
1156	r = dm_table_register_integrity(t);
1157	if (r) {
1158		DMERR("could not register integrity profile.");
1159		return r;
1160	}
1161
 
 
 
 
 
 
1162	r = dm_table_alloc_md_mempools(t, t->md);
1163	if (r)
1164		DMERR("unable to allocate mempools");
1165
1166	return r;
1167}
1168
1169static DEFINE_MUTEX(_event_lock);
1170void dm_table_event_callback(struct dm_table *t,
1171			     void (*fn)(void *), void *context)
1172{
1173	mutex_lock(&_event_lock);
1174	t->event_fn = fn;
1175	t->event_context = context;
1176	mutex_unlock(&_event_lock);
1177}
1178
1179void dm_table_event(struct dm_table *t)
1180{
1181	/*
1182	 * You can no longer call dm_table_event() from interrupt
1183	 * context, use a bottom half instead.
1184	 */
1185	BUG_ON(in_interrupt());
1186
1187	mutex_lock(&_event_lock);
1188	if (t->event_fn)
1189		t->event_fn(t->event_context);
1190	mutex_unlock(&_event_lock);
1191}
1192EXPORT_SYMBOL(dm_table_event);
1193
1194sector_t dm_table_get_size(struct dm_table *t)
1195{
1196	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1197}
1198EXPORT_SYMBOL(dm_table_get_size);
1199
1200struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1201{
1202	if (index >= t->num_targets)
1203		return NULL;
1204
1205	return t->targets + index;
1206}
1207
1208/*
1209 * Search the btree for the correct target.
1210 *
1211 * Caller should check returned pointer with dm_target_is_valid()
1212 * to trap I/O beyond end of device.
1213 */
1214struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1215{
1216	unsigned int l, n = 0, k = 0;
1217	sector_t *node;
1218
 
 
 
1219	for (l = 0; l < t->depth; l++) {
1220		n = get_child(n, k);
1221		node = get_node(t, l, n);
1222
1223		for (k = 0; k < KEYS_PER_NODE; k++)
1224			if (node[k] >= sector)
1225				break;
1226	}
1227
1228	return &t->targets[(KEYS_PER_NODE * n) + k];
1229}
1230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1231static int count_device(struct dm_target *ti, struct dm_dev *dev,
1232			sector_t start, sector_t len, void *data)
1233{
1234	unsigned *num_devices = data;
1235
1236	(*num_devices)++;
1237
1238	return 0;
1239}
1240
 
 
 
 
 
 
 
 
 
 
 
 
 
1241/*
1242 * Check whether a table has no data devices attached using each
1243 * target's iterate_devices method.
1244 * Returns false if the result is unknown because a target doesn't
1245 * support iterate_devices.
1246 */
1247bool dm_table_has_no_data_devices(struct dm_table *table)
1248{
1249	struct dm_target *uninitialized_var(ti);
1250	unsigned i = 0, num_devices = 0;
1251
1252	while (i < dm_table_get_num_targets(table)) {
1253		ti = dm_table_get_target(table, i++);
1254
1255		if (!ti->type->iterate_devices)
1256			return false;
1257
1258		ti->type->iterate_devices(ti, count_device, &num_devices);
1259		if (num_devices)
1260			return false;
1261	}
1262
1263	return true;
1264}
1265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1266/*
1267 * Establish the new table's queue_limits and validate them.
1268 */
1269int dm_calculate_queue_limits(struct dm_table *table,
1270			      struct queue_limits *limits)
1271{
1272	struct dm_target *uninitialized_var(ti);
1273	struct queue_limits ti_limits;
1274	unsigned i = 0;
 
1275
1276	blk_set_stacking_limits(limits);
1277
1278	while (i < dm_table_get_num_targets(table)) {
 
 
1279		blk_set_stacking_limits(&ti_limits);
1280
1281		ti = dm_table_get_target(table, i++);
1282
1283		if (!ti->type->iterate_devices)
 
1284			goto combine_limits;
 
1285
1286		/*
1287		 * Combine queue limits of all the devices this target uses.
1288		 */
1289		ti->type->iterate_devices(ti, dm_set_device_limits,
1290					  &ti_limits);
1291
 
 
 
 
 
 
 
 
 
1292		/* Set I/O hints portion of queue limits */
1293		if (ti->type->io_hints)
1294			ti->type->io_hints(ti, &ti_limits);
1295
1296		/*
1297		 * Check each device area is consistent with the target's
1298		 * overall queue limits.
1299		 */
1300		if (ti->type->iterate_devices(ti, device_area_is_invalid,
1301					      &ti_limits))
1302			return -EINVAL;
1303
1304combine_limits:
1305		/*
1306		 * Merge this target's queue limits into the overall limits
1307		 * for the table.
1308		 */
1309		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1310			DMWARN("%s: adding target device "
1311			       "(start sect %llu len %llu) "
1312			       "caused an alignment inconsistency",
1313			       dm_device_name(table->md),
1314			       (unsigned long long) ti->begin,
1315			       (unsigned long long) ti->len);
1316	}
1317
1318	return validate_hardware_logical_block_alignment(table, limits);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1319}
1320
1321/*
1322 * Verify that all devices have an integrity profile that matches the
1323 * DM device's registered integrity profile.  If the profiles don't
1324 * match then unregister the DM device's integrity profile.
1325 */
1326static void dm_table_verify_integrity(struct dm_table *t)
1327{
1328	struct gendisk *template_disk = NULL;
1329
 
 
 
1330	if (t->integrity_supported) {
1331		/*
1332		 * Verify that the original integrity profile
1333		 * matches all the devices in this table.
1334		 */
1335		template_disk = dm_table_get_integrity_disk(t);
1336		if (template_disk &&
1337		    blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1338			return;
1339	}
1340
1341	if (integrity_profile_exists(dm_disk(t->md))) {
1342		DMWARN("%s: unable to establish an integrity profile",
1343		       dm_device_name(t->md));
1344		blk_integrity_unregister(dm_disk(t->md));
1345	}
1346}
1347
1348static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1349				sector_t start, sector_t len, void *data)
1350{
1351	unsigned flush = (*(unsigned *)data);
1352	struct request_queue *q = bdev_get_queue(dev->bdev);
1353
1354	return q && (q->flush_flags & flush);
1355}
1356
1357static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
1358{
1359	struct dm_target *ti;
1360	unsigned i = 0;
1361
1362	/*
1363	 * Require at least one underlying device to support flushes.
1364	 * t->devices includes internal dm devices such as mirror logs
1365	 * so we need to use iterate_devices here, which targets
1366	 * supporting flushes must provide.
1367	 */
1368	while (i < dm_table_get_num_targets(t)) {
1369		ti = dm_table_get_target(t, i++);
1370
1371		if (!ti->num_flush_bios)
1372			continue;
1373
1374		if (ti->flush_supported)
1375			return true;
1376
1377		if (ti->type->iterate_devices &&
1378		    ti->type->iterate_devices(ti, device_flush_capable, &flush))
1379			return true;
1380	}
1381
1382	return false;
1383}
1384
1385static bool dm_table_discard_zeroes_data(struct dm_table *t)
 
 
1386{
1387	struct dm_target *ti;
1388	unsigned i = 0;
1389
1390	/* Ensure that all targets supports discard_zeroes_data. */
1391	while (i < dm_table_get_num_targets(t)) {
1392		ti = dm_table_get_target(t, i++);
1393
1394		if (ti->discard_zeroes_data_unsupported)
1395			return false;
1396	}
1397
1398	return true;
1399}
1400
1401static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1402			    sector_t start, sector_t len, void *data)
1403{
1404	struct request_queue *q = bdev_get_queue(dev->bdev);
1405
1406	return q && blk_queue_nonrot(q);
1407}
1408
1409static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1410			     sector_t start, sector_t len, void *data)
1411{
1412	struct request_queue *q = bdev_get_queue(dev->bdev);
1413
1414	return q && !blk_queue_add_random(q);
1415}
1416
1417static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
1418				   sector_t start, sector_t len, void *data)
1419{
1420	struct request_queue *q = bdev_get_queue(dev->bdev);
1421
1422	return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
1423}
1424
1425static bool dm_table_all_devices_attribute(struct dm_table *t,
1426					   iterate_devices_callout_fn func)
1427{
1428	struct dm_target *ti;
1429	unsigned i = 0;
1430
1431	while (i < dm_table_get_num_targets(t)) {
1432		ti = dm_table_get_target(t, i++);
1433
1434		if (!ti->type->iterate_devices ||
1435		    !ti->type->iterate_devices(ti, func, NULL))
1436			return false;
1437	}
1438
1439	return true;
1440}
1441
1442static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
1443					 sector_t start, sector_t len, void *data)
1444{
1445	struct request_queue *q = bdev_get_queue(dev->bdev);
1446
1447	return q && !q->limits.max_write_same_sectors;
1448}
1449
1450static bool dm_table_supports_write_same(struct dm_table *t)
1451{
1452	struct dm_target *ti;
1453	unsigned i = 0;
1454
1455	while (i < dm_table_get_num_targets(t)) {
1456		ti = dm_table_get_target(t, i++);
1457
1458		if (!ti->num_write_same_bios)
1459			return false;
1460
1461		if (!ti->type->iterate_devices ||
1462		    ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1463			return false;
1464	}
1465
1466	return true;
1467}
1468
1469static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1470				  sector_t start, sector_t len, void *data)
1471{
1472	struct request_queue *q = bdev_get_queue(dev->bdev);
1473
1474	return q && blk_queue_discard(q);
1475}
1476
1477static bool dm_table_supports_discards(struct dm_table *t)
1478{
1479	struct dm_target *ti;
1480	unsigned i = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1481
1482	/*
1483	 * Unless any target used by the table set discards_supported,
1484	 * require at least one underlying device to support discards.
1485	 * t->devices includes internal dm devices such as mirror logs
1486	 * so we need to use iterate_devices here, which targets
1487	 * supporting discard selectively must provide.
1488	 */
1489	while (i < dm_table_get_num_targets(t)) {
1490		ti = dm_table_get_target(t, i++);
1491
1492		if (!ti->num_discard_bios)
1493			continue;
 
 
1494
1495		if (ti->discards_supported)
1496			return true;
1497
1498		if (ti->type->iterate_devices &&
1499		    ti->type->iterate_devices(ti, device_discard_capable, NULL))
1500			return true;
1501	}
1502
1503	return false;
1504}
1505
1506void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1507			       struct queue_limits *limits)
 
1508{
1509	unsigned flush = 0;
 
 
 
 
 
 
 
1510
1511	/*
1512	 * Copy table's limits to the DM device's request_queue
1513	 */
1514	q->limits = *limits;
1515
1516	if (!dm_table_supports_discards(t))
1517		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1518	else
1519		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1520
1521	if (dm_table_supports_flush(t, REQ_FLUSH)) {
1522		flush |= REQ_FLUSH;
1523		if (dm_table_supports_flush(t, REQ_FUA))
1524			flush |= REQ_FUA;
1525	}
1526	blk_queue_flush(q, flush);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1527
1528	if (!dm_table_discard_zeroes_data(t))
1529		q->limits.discard_zeroes_data = 0;
1530
1531	/* Ensure that all underlying devices are non-rotational. */
1532	if (dm_table_all_devices_attribute(t, device_is_nonrot))
1533		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
1534	else
1535		queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
1536
1537	if (!dm_table_supports_write_same(t))
1538		q->limits.max_write_same_sectors = 0;
1539
1540	if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
1541		queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
 
 
 
 
 
 
 
 
 
1542	else
1543		queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
1544
1545	dm_table_verify_integrity(t);
1546
1547	/*
1548	 * Determine whether or not this queue's I/O timings contribute
1549	 * to the entropy pool, Only request-based targets use this.
1550	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1551	 * have it set.
1552	 */
1553	if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1554		queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1555
1556	/*
1557	 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1558	 * visible to other CPUs because, once the flag is set, incoming bios
1559	 * are processed by request-based dm, which refers to the queue
1560	 * settings.
1561	 * Until the flag set, bios are passed to bio-based dm and queued to
1562	 * md->deferred where queue settings are not needed yet.
1563	 * Those bios are passed to request-based dm at the resume time.
1564	 */
1565	smp_mb();
1566	if (dm_table_request_based(t))
1567		queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
1568}
 
 
1569
1570unsigned int dm_table_get_num_targets(struct dm_table *t)
1571{
1572	return t->num_targets;
1573}
1574
1575struct list_head *dm_table_get_devices(struct dm_table *t)
1576{
1577	return &t->devices;
1578}
1579
1580fmode_t dm_table_get_mode(struct dm_table *t)
1581{
1582	return t->mode;
1583}
1584EXPORT_SYMBOL(dm_table_get_mode);
1585
1586enum suspend_mode {
1587	PRESUSPEND,
1588	PRESUSPEND_UNDO,
1589	POSTSUSPEND,
1590};
1591
1592static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
1593{
1594	int i = t->num_targets;
1595	struct dm_target *ti = t->targets;
 
 
1596
1597	while (i--) {
1598		switch (mode) {
1599		case PRESUSPEND:
1600			if (ti->type->presuspend)
1601				ti->type->presuspend(ti);
1602			break;
1603		case PRESUSPEND_UNDO:
1604			if (ti->type->presuspend_undo)
1605				ti->type->presuspend_undo(ti);
1606			break;
1607		case POSTSUSPEND:
1608			if (ti->type->postsuspend)
1609				ti->type->postsuspend(ti);
1610			break;
1611		}
1612		ti++;
1613	}
1614}
1615
1616void dm_table_presuspend_targets(struct dm_table *t)
1617{
1618	if (!t)
1619		return;
1620
1621	suspend_targets(t, PRESUSPEND);
1622}
1623
1624void dm_table_presuspend_undo_targets(struct dm_table *t)
1625{
1626	if (!t)
1627		return;
1628
1629	suspend_targets(t, PRESUSPEND_UNDO);
1630}
1631
1632void dm_table_postsuspend_targets(struct dm_table *t)
1633{
1634	if (!t)
1635		return;
1636
1637	suspend_targets(t, POSTSUSPEND);
1638}
1639
1640int dm_table_resume_targets(struct dm_table *t)
1641{
1642	int i, r = 0;
 
 
 
1643
1644	for (i = 0; i < t->num_targets; i++) {
1645		struct dm_target *ti = t->targets + i;
1646
1647		if (!ti->type->preresume)
1648			continue;
1649
1650		r = ti->type->preresume(ti);
1651		if (r) {
1652			DMERR("%s: %s: preresume failed, error = %d",
1653			      dm_device_name(t->md), ti->type->name, r);
1654			return r;
1655		}
1656	}
1657
1658	for (i = 0; i < t->num_targets; i++) {
1659		struct dm_target *ti = t->targets + i;
1660
1661		if (ti->type->resume)
1662			ti->type->resume(ti);
1663	}
1664
1665	return 0;
1666}
1667
1668void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
1669{
1670	list_add(&cb->list, &t->target_callbacks);
1671}
1672EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
1673
1674int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1675{
1676	struct dm_dev_internal *dd;
1677	struct list_head *devices = dm_table_get_devices(t);
1678	struct dm_target_callbacks *cb;
1679	int r = 0;
1680
1681	list_for_each_entry(dd, devices, list) {
1682		struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
1683		char b[BDEVNAME_SIZE];
1684
1685		if (likely(q))
1686			r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1687		else
1688			DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1689				     dm_device_name(t->md),
1690				     bdevname(dd->dm_dev->bdev, b));
1691	}
1692
1693	list_for_each_entry(cb, &t->target_callbacks, list)
1694		if (cb->congested_fn)
1695			r |= cb->congested_fn(cb, bdi_bits);
1696
1697	return r;
1698}
1699
1700struct mapped_device *dm_table_get_md(struct dm_table *t)
1701{
1702	return t->md;
1703}
1704EXPORT_SYMBOL(dm_table_get_md);
1705
1706void dm_table_run_md_queue_async(struct dm_table *t)
1707{
1708	struct mapped_device *md;
1709	struct request_queue *queue;
1710	unsigned long flags;
1711
1712	if (!dm_table_request_based(t))
1713		return;
1714
1715	md = dm_table_get_md(t);
1716	queue = dm_get_md_queue(md);
1717	if (queue) {
1718		if (queue->mq_ops)
1719			blk_mq_run_hw_queues(queue, true);
1720		else {
1721			spin_lock_irqsave(queue->queue_lock, flags);
1722			blk_run_queue_async(queue);
1723			spin_unlock_irqrestore(queue->queue_lock, flags);
1724		}
1725	}
1726}
1727EXPORT_SYMBOL(dm_table_run_md_queue_async);
1728