Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2001 Sistina Software (UK) Limited.
   4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include "dm-core.h"
  10#include "dm-rq.h"
  11
  12#include <linux/module.h>
  13#include <linux/vmalloc.h>
  14#include <linux/blkdev.h>
  15#include <linux/blk-integrity.h>
  16#include <linux/namei.h>
  17#include <linux/ctype.h>
  18#include <linux/string.h>
  19#include <linux/slab.h>
  20#include <linux/interrupt.h>
  21#include <linux/mutex.h>
  22#include <linux/delay.h>
  23#include <linux/atomic.h>
  24#include <linux/blk-mq.h>
  25#include <linux/mount.h>
  26#include <linux/dax.h>
  27
  28#define DM_MSG_PREFIX "table"
  29
  30#define NODE_SIZE L1_CACHE_BYTES
  31#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
  32#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
  33
  34/*
  35 * Similar to ceiling(log_size(n))
  36 */
  37static unsigned int int_log(unsigned int n, unsigned int base)
  38{
  39	int result = 0;
  40
  41	while (n > 1) {
  42		n = dm_div_up(n, base);
  43		result++;
  44	}
  45
  46	return result;
  47}
  48
  49/*
  50 * Calculate the index of the child node of the n'th node k'th key.
  51 */
  52static inline unsigned int get_child(unsigned int n, unsigned int k)
  53{
  54	return (n * CHILDREN_PER_NODE) + k;
  55}
  56
  57/*
  58 * Return the n'th node of level l from table t.
  59 */
  60static inline sector_t *get_node(struct dm_table *t,
  61				 unsigned int l, unsigned int n)
  62{
  63	return t->index[l] + (n * KEYS_PER_NODE);
  64}
  65
  66/*
  67 * Return the highest key that you could lookup from the n'th
  68 * node on level l of the btree.
  69 */
  70static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
  71{
  72	for (; l < t->depth - 1; l++)
  73		n = get_child(n, CHILDREN_PER_NODE - 1);
  74
  75	if (n >= t->counts[l])
  76		return (sector_t) -1;
  77
  78	return get_node(t, l, n)[KEYS_PER_NODE - 1];
  79}
  80
  81/*
  82 * Fills in a level of the btree based on the highs of the level
  83 * below it.
  84 */
  85static int setup_btree_index(unsigned int l, struct dm_table *t)
  86{
  87	unsigned int n, k;
  88	sector_t *node;
  89
  90	for (n = 0U; n < t->counts[l]; n++) {
  91		node = get_node(t, l, n);
  92
  93		for (k = 0U; k < KEYS_PER_NODE; k++)
  94			node[k] = high(t, l + 1, get_child(n, k));
  95	}
  96
  97	return 0;
  98}
  99
 100/*
 101 * highs, and targets are managed as dynamic arrays during a
 102 * table load.
 103 */
 104static int alloc_targets(struct dm_table *t, unsigned int num)
 105{
 106	sector_t *n_highs;
 107	struct dm_target *n_targets;
 108
 109	/*
 110	 * Allocate both the target array and offset array at once.
 111	 */
 112	n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t),
 113			   GFP_KERNEL);
 114	if (!n_highs)
 115		return -ENOMEM;
 116
 117	n_targets = (struct dm_target *) (n_highs + num);
 118
 119	memset(n_highs, -1, sizeof(*n_highs) * num);
 120	kvfree(t->highs);
 121
 122	t->num_allocated = num;
 123	t->highs = n_highs;
 124	t->targets = n_targets;
 125
 126	return 0;
 127}
 128
 129int dm_table_create(struct dm_table **result, blk_mode_t mode,
 130		    unsigned int num_targets, struct mapped_device *md)
 131{
 132	struct dm_table *t;
 133
 134	if (num_targets > DM_MAX_TARGETS)
 135		return -EOVERFLOW;
 136
 137	t = kzalloc(sizeof(*t), GFP_KERNEL);
 138
 139	if (!t)
 140		return -ENOMEM;
 141
 142	INIT_LIST_HEAD(&t->devices);
 143	init_rwsem(&t->devices_lock);
 144
 145	if (!num_targets)
 146		num_targets = KEYS_PER_NODE;
 147
 148	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
 149
 150	if (!num_targets) {
 151		kfree(t);
 152		return -EOVERFLOW;
 153	}
 154
 155	if (alloc_targets(t, num_targets)) {
 156		kfree(t);
 157		return -ENOMEM;
 158	}
 159
 160	t->type = DM_TYPE_NONE;
 161	t->mode = mode;
 162	t->md = md;
 163	t->flush_bypasses_map = true;
 164	*result = t;
 165	return 0;
 166}
 167
 168static void free_devices(struct list_head *devices, struct mapped_device *md)
 169{
 170	struct list_head *tmp, *next;
 171
 172	list_for_each_safe(tmp, next, devices) {
 173		struct dm_dev_internal *dd =
 174		    list_entry(tmp, struct dm_dev_internal, list);
 175		DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
 176		       dm_device_name(md), dd->dm_dev->name);
 177		dm_put_table_device(md, dd->dm_dev);
 178		kfree(dd);
 179	}
 180}
 181
 182static void dm_table_destroy_crypto_profile(struct dm_table *t);
 183
 184void dm_table_destroy(struct dm_table *t)
 185{
 
 
 186	if (!t)
 187		return;
 188
 189	/* free the indexes */
 190	if (t->depth >= 2)
 191		kvfree(t->index[t->depth - 2]);
 192
 193	/* free the targets */
 194	for (unsigned int i = 0; i < t->num_targets; i++) {
 195		struct dm_target *ti = dm_table_get_target(t, i);
 196
 197		if (ti->type->dtr)
 198			ti->type->dtr(ti);
 199
 200		dm_put_target_type(ti->type);
 201	}
 202
 203	kvfree(t->highs);
 204
 205	/* free the device list */
 206	free_devices(&t->devices, t->md);
 207
 208	dm_free_md_mempools(t->mempools);
 209
 210	dm_table_destroy_crypto_profile(t);
 211
 212	kfree(t);
 213}
 214
 215/*
 216 * See if we've already got a device in the list.
 217 */
 218static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
 219{
 220	struct dm_dev_internal *dd;
 221
 222	list_for_each_entry(dd, l, list)
 223		if (dd->dm_dev->bdev->bd_dev == dev)
 224			return dd;
 225
 226	return NULL;
 227}
 228
 229/*
 230 * If possible, this checks an area of a destination device is invalid.
 231 */
 232static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
 233				  sector_t start, sector_t len, void *data)
 234{
 235	struct queue_limits *limits = data;
 236	struct block_device *bdev = dev->bdev;
 237	sector_t dev_size = bdev_nr_sectors(bdev);
 
 238	unsigned short logical_block_size_sectors =
 239		limits->logical_block_size >> SECTOR_SHIFT;
 
 240
 241	if (!dev_size)
 242		return 0;
 243
 244	if ((start >= dev_size) || (start + len > dev_size)) {
 245		DMERR("%s: %pg too small for target: start=%llu, len=%llu, dev_size=%llu",
 246		      dm_device_name(ti->table->md), bdev,
 247		      (unsigned long long)start,
 248		      (unsigned long long)len,
 249		      (unsigned long long)dev_size);
 
 250		return 1;
 251	}
 252
 253	/*
 254	 * If the target is mapped to zoned block device(s), check
 255	 * that the zones are not partially mapped.
 256	 */
 257	if (bdev_is_zoned(bdev)) {
 258		unsigned int zone_sectors = bdev_zone_sectors(bdev);
 259
 260		if (start & (zone_sectors - 1)) {
 261			DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg",
 262			      dm_device_name(ti->table->md),
 263			      (unsigned long long)start,
 264			      zone_sectors, bdev);
 265			return 1;
 266		}
 267
 268		/*
 269		 * Note: The last zone of a zoned block device may be smaller
 270		 * than other zones. So for a target mapping the end of a
 271		 * zoned block device with such a zone, len would not be zone
 272		 * aligned. We do not allow such last smaller zone to be part
 273		 * of the mapping here to ensure that mappings with multiple
 274		 * devices do not end up with a smaller zone in the middle of
 275		 * the sector range.
 276		 */
 277		if (len & (zone_sectors - 1)) {
 278			DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg",
 279			      dm_device_name(ti->table->md),
 280			      (unsigned long long)len,
 281			      zone_sectors, bdev);
 282			return 1;
 283		}
 284	}
 285
 286	if (logical_block_size_sectors <= 1)
 287		return 0;
 288
 289	if (start & (logical_block_size_sectors - 1)) {
 290		DMERR("%s: start=%llu not aligned to h/w logical block size %u of %pg",
 291		      dm_device_name(ti->table->md),
 292		      (unsigned long long)start,
 293		      limits->logical_block_size, bdev);
 
 294		return 1;
 295	}
 296
 297	if (len & (logical_block_size_sectors - 1)) {
 298		DMERR("%s: len=%llu not aligned to h/w logical block size %u of %pg",
 299		      dm_device_name(ti->table->md),
 300		      (unsigned long long)len,
 301		      limits->logical_block_size, bdev);
 
 302		return 1;
 303	}
 304
 305	return 0;
 306}
 307
 308/*
 309 * This upgrades the mode on an already open dm_dev, being
 310 * careful to leave things as they were if we fail to reopen the
 311 * device and not to touch the existing bdev field in case
 312 * it is accessed concurrently.
 313 */
 314static int upgrade_mode(struct dm_dev_internal *dd, blk_mode_t new_mode,
 315			struct mapped_device *md)
 316{
 317	int r;
 318	struct dm_dev *old_dev, *new_dev;
 319
 320	old_dev = dd->dm_dev;
 321
 322	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
 323				dd->dm_dev->mode | new_mode, &new_dev);
 324	if (r)
 325		return r;
 326
 327	dd->dm_dev = new_dev;
 328	dm_put_table_device(md, old_dev);
 329
 330	return 0;
 331}
 332
 333/*
 334 * Note: the __ref annotation is because this function can call the __init
 335 * marked early_lookup_bdev when called during early boot code from dm-init.c.
 336 */
 337int __ref dm_devt_from_path(const char *path, dev_t *dev_p)
 338{
 339	int r;
 340	dev_t dev;
 341	unsigned int major, minor;
 342	char dummy;
 343
 344	if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
 345		/* Extract the major/minor numbers */
 346		dev = MKDEV(major, minor);
 347		if (MAJOR(dev) != major || MINOR(dev) != minor)
 348			return -EOVERFLOW;
 349	} else {
 350		r = lookup_bdev(path, &dev);
 351#ifndef MODULE
 352		if (r && system_state < SYSTEM_RUNNING)
 353			r = early_lookup_bdev(path, &dev);
 354#endif
 355		if (r)
 356			return r;
 357	}
 358	*dev_p = dev;
 359	return 0;
 360}
 361EXPORT_SYMBOL(dm_devt_from_path);
 362
 363/*
 364 * Add a device to the list, or just increment the usage count if
 365 * it's already present.
 366 */
 367int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
 368		  struct dm_dev **result)
 369{
 370	int r;
 371	dev_t dev;
 
 
 372	struct dm_dev_internal *dd;
 373	struct dm_table *t = ti->table;
 374
 375	BUG_ON(!t);
 376
 377	r = dm_devt_from_path(path, &dev);
 378	if (r)
 379		return r;
 380
 381	if (dev == disk_devt(t->md->disk))
 382		return -EINVAL;
 383
 384	down_write(&t->devices_lock);
 
 
 385
 386	dd = find_device(&t->devices, dev);
 387	if (!dd) {
 388		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
 389		if (!dd) {
 390			r = -ENOMEM;
 391			goto unlock_ret_r;
 392		}
 393
 394		r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev);
 395		if (r) {
 396			kfree(dd);
 397			goto unlock_ret_r;
 398		}
 399
 400		refcount_set(&dd->count, 1);
 401		list_add(&dd->list, &t->devices);
 402		goto out;
 403
 404	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
 405		r = upgrade_mode(dd, mode, t->md);
 406		if (r)
 407			goto unlock_ret_r;
 408	}
 409	refcount_inc(&dd->count);
 410out:
 411	up_write(&t->devices_lock);
 412	*result = dd->dm_dev;
 413	return 0;
 414
 415unlock_ret_r:
 416	up_write(&t->devices_lock);
 417	return r;
 418}
 419EXPORT_SYMBOL(dm_get_device);
 420
 421static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
 422				sector_t start, sector_t len, void *data)
 423{
 424	struct queue_limits *limits = data;
 425	struct block_device *bdev = dev->bdev;
 426	struct request_queue *q = bdev_get_queue(bdev);
 
 427
 428	if (unlikely(!q)) {
 429		DMWARN("%s: Cannot set limits for nonexistent device %pg",
 430		       dm_device_name(ti->table->md), bdev);
 431		return 0;
 432	}
 433
 434	if (blk_stack_limits(limits, &q->limits,
 435			get_start_sect(bdev) + start) < 0)
 436		DMWARN("%s: adding target device %pg caused an alignment inconsistency: "
 437		       "physical_block_size=%u, logical_block_size=%u, "
 438		       "alignment_offset=%u, start=%llu",
 439		       dm_device_name(ti->table->md), bdev,
 440		       q->limits.physical_block_size,
 441		       q->limits.logical_block_size,
 442		       q->limits.alignment_offset,
 443		       (unsigned long long) start << SECTOR_SHIFT);
 444
 445	/*
 446	 * Only stack the integrity profile if the target doesn't have native
 447	 * integrity support.
 448	 */
 449	if (!dm_target_has_integrity(ti->type))
 450		queue_limits_stack_integrity_bdev(limits, bdev);
 451	return 0;
 452}
 453
 454/*
 455 * Decrement a device's use count and remove it if necessary.
 456 */
 457void dm_put_device(struct dm_target *ti, struct dm_dev *d)
 458{
 459	int found = 0;
 460	struct dm_table *t = ti->table;
 461	struct list_head *devices = &t->devices;
 462	struct dm_dev_internal *dd;
 463
 464	down_write(&t->devices_lock);
 465
 466	list_for_each_entry(dd, devices, list) {
 467		if (dd->dm_dev == d) {
 468			found = 1;
 469			break;
 470		}
 471	}
 472	if (!found) {
 473		DMERR("%s: device %s not in table devices list",
 474		      dm_device_name(t->md), d->name);
 475		goto unlock_ret;
 476	}
 477	if (refcount_dec_and_test(&dd->count)) {
 478		dm_put_table_device(t->md, d);
 479		list_del(&dd->list);
 480		kfree(dd);
 481	}
 482
 483unlock_ret:
 484	up_write(&t->devices_lock);
 485}
 486EXPORT_SYMBOL(dm_put_device);
 487
 488/*
 489 * Checks to see if the target joins onto the end of the table.
 490 */
 491static int adjoin(struct dm_table *t, struct dm_target *ti)
 492{
 493	struct dm_target *prev;
 494
 495	if (!t->num_targets)
 496		return !ti->begin;
 497
 498	prev = &t->targets[t->num_targets - 1];
 499	return (ti->begin == (prev->begin + prev->len));
 500}
 501
 502/*
 503 * Used to dynamically allocate the arg array.
 504 *
 505 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
 506 * process messages even if some device is suspended. These messages have a
 507 * small fixed number of arguments.
 508 *
 509 * On the other hand, dm-switch needs to process bulk data using messages and
 510 * excessive use of GFP_NOIO could cause trouble.
 511 */
 512static char **realloc_argv(unsigned int *size, char **old_argv)
 513{
 514	char **argv;
 515	unsigned int new_size;
 516	gfp_t gfp;
 517
 518	if (*size) {
 519		new_size = *size * 2;
 520		gfp = GFP_KERNEL;
 521	} else {
 522		new_size = 8;
 523		gfp = GFP_NOIO;
 524	}
 525	argv = kmalloc_array(new_size, sizeof(*argv), gfp);
 526	if (argv && old_argv) {
 527		memcpy(argv, old_argv, *size * sizeof(*argv));
 528		*size = new_size;
 529	}
 530
 531	kfree(old_argv);
 532	return argv;
 533}
 534
 535/*
 536 * Destructively splits up the argument list to pass to ctr.
 537 */
 538int dm_split_args(int *argc, char ***argvp, char *input)
 539{
 540	char *start, *end = input, *out, **argv = NULL;
 541	unsigned int array_size = 0;
 542
 543	*argc = 0;
 544
 545	if (!input) {
 546		*argvp = NULL;
 547		return 0;
 548	}
 549
 550	argv = realloc_argv(&array_size, argv);
 551	if (!argv)
 552		return -ENOMEM;
 553
 554	while (1) {
 555		/* Skip whitespace */
 556		start = skip_spaces(end);
 557
 558		if (!*start)
 559			break;	/* success, we hit the end */
 560
 561		/* 'out' is used to remove any back-quotes */
 562		end = out = start;
 563		while (*end) {
 564			/* Everything apart from '\0' can be quoted */
 565			if (*end == '\\' && *(end + 1)) {
 566				*out++ = *(end + 1);
 567				end += 2;
 568				continue;
 569			}
 570
 571			if (isspace(*end))
 572				break;	/* end of token */
 573
 574			*out++ = *end++;
 575		}
 576
 577		/* have we already filled the array ? */
 578		if ((*argc + 1) > array_size) {
 579			argv = realloc_argv(&array_size, argv);
 580			if (!argv)
 581				return -ENOMEM;
 582		}
 583
 584		/* we know this is whitespace */
 585		if (*end)
 586			end++;
 587
 588		/* terminate the string and put it in the array */
 589		*out = '\0';
 590		argv[*argc] = start;
 591		(*argc)++;
 592	}
 593
 594	*argvp = argv;
 595	return 0;
 596}
 597
 598static void dm_set_stacking_limits(struct queue_limits *limits)
 599{
 600	blk_set_stacking_limits(limits);
 601	limits->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
 602}
 603
 604/*
 605 * Impose necessary and sufficient conditions on a devices's table such
 606 * that any incoming bio which respects its logical_block_size can be
 607 * processed successfully.  If it falls across the boundary between
 608 * two or more targets, the size of each piece it gets split into must
 609 * be compatible with the logical_block_size of the target processing it.
 610 */
 611static int validate_hardware_logical_block_alignment(struct dm_table *t,
 612						     struct queue_limits *limits)
 613{
 614	/*
 615	 * This function uses arithmetic modulo the logical_block_size
 616	 * (in units of 512-byte sectors).
 617	 */
 618	unsigned short device_logical_block_size_sects =
 619		limits->logical_block_size >> SECTOR_SHIFT;
 620
 621	/*
 622	 * Offset of the start of the next table entry, mod logical_block_size.
 623	 */
 624	unsigned short next_target_start = 0;
 625
 626	/*
 627	 * Given an aligned bio that extends beyond the end of a
 628	 * target, how many sectors must the next target handle?
 629	 */
 630	unsigned short remaining = 0;
 631
 632	struct dm_target *ti;
 633	struct queue_limits ti_limits;
 634	unsigned int i;
 635
 636	/*
 637	 * Check each entry in the table in turn.
 638	 */
 639	for (i = 0; i < t->num_targets; i++) {
 640		ti = dm_table_get_target(t, i);
 641
 642		dm_set_stacking_limits(&ti_limits);
 643
 644		/* combine all target devices' limits */
 645		if (ti->type->iterate_devices)
 646			ti->type->iterate_devices(ti, dm_set_device_limits,
 647						  &ti_limits);
 648
 649		/*
 650		 * If the remaining sectors fall entirely within this
 651		 * table entry are they compatible with its logical_block_size?
 652		 */
 653		if (remaining < ti->len &&
 654		    remaining & ((ti_limits.logical_block_size >>
 655				  SECTOR_SHIFT) - 1))
 656			break;	/* Error */
 657
 658		next_target_start =
 659		    (unsigned short) ((next_target_start + ti->len) &
 660				      (device_logical_block_size_sects - 1));
 661		remaining = next_target_start ?
 662		    device_logical_block_size_sects - next_target_start : 0;
 663	}
 664
 665	if (remaining) {
 666		DMERR("%s: table line %u (start sect %llu len %llu) "
 667		      "not aligned to h/w logical block size %u",
 668		      dm_device_name(t->md), i,
 669		      (unsigned long long) ti->begin,
 670		      (unsigned long long) ti->len,
 671		      limits->logical_block_size);
 672		return -EINVAL;
 673	}
 674
 675	return 0;
 676}
 677
 678int dm_table_add_target(struct dm_table *t, const char *type,
 679			sector_t start, sector_t len, char *params)
 680{
 681	int r = -EINVAL, argc;
 682	char **argv;
 683	struct dm_target *ti;
 684
 685	if (t->singleton) {
 686		DMERR("%s: target type %s must appear alone in table",
 687		      dm_device_name(t->md), t->targets->type->name);
 688		return -EINVAL;
 689	}
 690
 691	BUG_ON(t->num_targets >= t->num_allocated);
 692
 693	ti = t->targets + t->num_targets;
 694	memset(ti, 0, sizeof(*ti));
 695
 696	if (!len) {
 697		DMERR("%s: zero-length target", dm_device_name(t->md));
 698		return -EINVAL;
 699	}
 700
 701	ti->type = dm_get_target_type(type);
 702	if (!ti->type) {
 703		DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
 704		return -EINVAL;
 705	}
 706
 707	if (dm_target_needs_singleton(ti->type)) {
 708		if (t->num_targets) {
 709			ti->error = "singleton target type must appear alone in table";
 710			goto bad;
 711		}
 712		t->singleton = true;
 713	}
 714
 715	if (dm_target_always_writeable(ti->type) &&
 716	    !(t->mode & BLK_OPEN_WRITE)) {
 717		ti->error = "target type may not be included in a read-only table";
 718		goto bad;
 719	}
 720
 721	if (t->immutable_target_type) {
 722		if (t->immutable_target_type != ti->type) {
 723			ti->error = "immutable target type cannot be mixed with other target types";
 724			goto bad;
 725		}
 726	} else if (dm_target_is_immutable(ti->type)) {
 727		if (t->num_targets) {
 728			ti->error = "immutable target type cannot be mixed with other target types";
 729			goto bad;
 730		}
 731		t->immutable_target_type = ti->type;
 732	}
 733
 734	ti->table = t;
 735	ti->begin = start;
 736	ti->len = len;
 737	ti->error = "Unknown error";
 
 
 
 738
 739	/*
 740	 * Does this target adjoin the previous one ?
 741	 */
 742	if (!adjoin(t, ti)) {
 743		ti->error = "Gap in table";
 744		goto bad;
 745	}
 746
 747	r = dm_split_args(&argc, &argv, params);
 748	if (r) {
 749		ti->error = "couldn't split parameters";
 750		goto bad;
 751	}
 752
 753	r = ti->type->ctr(ti, argc, argv);
 754	kfree(argv);
 755	if (r)
 756		goto bad;
 757
 758	t->highs[t->num_targets++] = ti->begin + ti->len - 1;
 759
 760	if (!ti->num_discard_bios && ti->discards_supported)
 761		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
 762		       dm_device_name(t->md), type);
 763
 764	if (ti->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key))
 765		static_branch_enable(&swap_bios_enabled);
 766
 767	if (!ti->flush_bypasses_map)
 768		t->flush_bypasses_map = false;
 769
 770	return 0;
 771
 772 bad:
 773	DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, ti->error, ERR_PTR(r));
 774	dm_put_target_type(ti->type);
 775	return r;
 776}
 777
 778/*
 779 * Target argument parsing helpers.
 780 */
 781static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
 782			     unsigned int *value, char **error, unsigned int grouped)
 
 783{
 784	const char *arg_str = dm_shift_arg(arg_set);
 785	char dummy;
 786
 787	if (!arg_str ||
 788	    (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
 789	    (*value < arg->min) ||
 790	    (*value > arg->max) ||
 791	    (grouped && arg_set->argc < *value)) {
 792		*error = arg->error;
 793		return -EINVAL;
 794	}
 795
 796	return 0;
 797}
 798
 799int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
 800		unsigned int *value, char **error)
 801{
 802	return validate_next_arg(arg, arg_set, value, error, 0);
 803}
 804EXPORT_SYMBOL(dm_read_arg);
 805
 806int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
 807		      unsigned int *value, char **error)
 808{
 809	return validate_next_arg(arg, arg_set, value, error, 1);
 810}
 811EXPORT_SYMBOL(dm_read_arg_group);
 812
 813const char *dm_shift_arg(struct dm_arg_set *as)
 814{
 815	char *r;
 816
 817	if (as->argc) {
 818		as->argc--;
 819		r = *as->argv;
 820		as->argv++;
 821		return r;
 822	}
 823
 824	return NULL;
 825}
 826EXPORT_SYMBOL(dm_shift_arg);
 827
 828void dm_consume_args(struct dm_arg_set *as, unsigned int num_args)
 829{
 830	BUG_ON(as->argc < num_args);
 831	as->argc -= num_args;
 832	as->argv += num_args;
 833}
 834EXPORT_SYMBOL(dm_consume_args);
 835
 836static bool __table_type_bio_based(enum dm_queue_mode table_type)
 837{
 838	return (table_type == DM_TYPE_BIO_BASED ||
 839		table_type == DM_TYPE_DAX_BIO_BASED);
 840}
 841
 842static bool __table_type_request_based(enum dm_queue_mode table_type)
 843{
 844	return table_type == DM_TYPE_REQUEST_BASED;
 845}
 846
 847void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
 848{
 849	t->type = type;
 850}
 851EXPORT_SYMBOL_GPL(dm_table_set_type);
 852
 853/* validate the dax capability of the target device span */
 854static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
 855			sector_t start, sector_t len, void *data)
 856{
 857	if (dev->dax_dev)
 858		return false;
 859
 860	DMDEBUG("%pg: error: dax unsupported by block device", dev->bdev);
 861	return true;
 
 
 
 862}
 863
 864/* Check devices support synchronous DAX */
 865static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
 866					      sector_t start, sector_t len, void *data)
 867{
 868	return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
 869}
 870
 871static bool dm_table_supports_dax(struct dm_table *t,
 872				  iterate_devices_callout_fn iterate_fn)
 873{
 
 
 
 874	/* Ensure that all targets support DAX. */
 875	for (unsigned int i = 0; i < t->num_targets; i++) {
 876		struct dm_target *ti = dm_table_get_target(t, i);
 877
 878		if (!ti->type->direct_access)
 879			return false;
 880
 881		if (dm_target_is_wildcard(ti->type) ||
 882		    !ti->type->iterate_devices ||
 883		    ti->type->iterate_devices(ti, iterate_fn, NULL))
 884			return false;
 885	}
 886
 887	return true;
 888}
 889
 890static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
 891				  sector_t start, sector_t len, void *data)
 892{
 893	struct block_device *bdev = dev->bdev;
 894	struct request_queue *q = bdev_get_queue(bdev);
 895
 896	/* request-based cannot stack on partitions! */
 897	if (bdev_is_partition(bdev))
 898		return false;
 899
 900	return queue_is_mq(q);
 901}
 902
 903static int dm_table_determine_type(struct dm_table *t)
 904{
 905	unsigned int bio_based = 0, request_based = 0, hybrid = 0;
 906	struct dm_target *ti;
 
 907	struct list_head *devices = dm_table_get_devices(t);
 908	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
 
 909
 910	if (t->type != DM_TYPE_NONE) {
 911		/* target already set the table's type */
 912		if (t->type == DM_TYPE_BIO_BASED) {
 913			/* possibly upgrade to a variant of bio-based */
 914			goto verify_bio_based;
 915		}
 916		BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
 917		goto verify_rq_based;
 918	}
 919
 920	for (unsigned int i = 0; i < t->num_targets; i++) {
 921		ti = dm_table_get_target(t, i);
 922		if (dm_target_hybrid(ti))
 923			hybrid = 1;
 924		else if (dm_target_request_based(ti))
 925			request_based = 1;
 926		else
 927			bio_based = 1;
 928
 929		if (bio_based && request_based) {
 930			DMERR("Inconsistent table: different target types can't be mixed up");
 
 931			return -EINVAL;
 932		}
 933	}
 934
 935	if (hybrid && !bio_based && !request_based) {
 936		/*
 937		 * The targets can work either way.
 938		 * Determine the type from the live device.
 939		 * Default to bio-based if device is new.
 940		 */
 941		if (__table_type_request_based(live_md_type))
 942			request_based = 1;
 943		else
 944			bio_based = 1;
 945	}
 946
 947	if (bio_based) {
 948verify_bio_based:
 949		/* We must use this table as bio-based */
 950		t->type = DM_TYPE_BIO_BASED;
 951		if (dm_table_supports_dax(t, device_not_dax_capable) ||
 952		    (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
 953			t->type = DM_TYPE_DAX_BIO_BASED;
 954		}
 955		return 0;
 956	}
 957
 958	BUG_ON(!request_based); /* No targets in this table */
 959
 960	t->type = DM_TYPE_REQUEST_BASED;
 961
 962verify_rq_based:
 963	/*
 964	 * Request-based dm supports only tables that have a single target now.
 965	 * To support multiple targets, request splitting support is needed,
 966	 * and that needs lots of changes in the block-layer.
 967	 * (e.g. request completion process for partial completion.)
 968	 */
 969	if (t->num_targets > 1) {
 970		DMERR("request-based DM doesn't support multiple targets");
 971		return -EINVAL;
 972	}
 973
 974	if (list_empty(devices)) {
 975		int srcu_idx;
 976		struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
 977
 978		/* inherit live table's type */
 979		if (live_table)
 980			t->type = live_table->type;
 981		dm_put_live_table(t->md, srcu_idx);
 982		return 0;
 983	}
 984
 985	ti = dm_table_get_immutable_target(t);
 986	if (!ti) {
 987		DMERR("table load rejected: immutable target is required");
 988		return -EINVAL;
 989	} else if (ti->max_io_len) {
 990		DMERR("table load rejected: immutable target that splits IO is not supported");
 991		return -EINVAL;
 992	}
 993
 994	/* Non-request-stackable devices can't be used for request-based dm */
 995	if (!ti->type->iterate_devices ||
 996	    !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) {
 997		DMERR("table load rejected: including non-request-stackable devices");
 998		return -EINVAL;
 999	}
1000
1001	return 0;
1002}
1003
1004enum dm_queue_mode dm_table_get_type(struct dm_table *t)
1005{
1006	return t->type;
1007}
1008
1009struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
1010{
1011	return t->immutable_target_type;
1012}
1013
1014struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
1015{
1016	/* Immutable target is implicitly a singleton */
1017	if (t->num_targets > 1 ||
1018	    !dm_target_is_immutable(t->targets[0].type))
1019		return NULL;
1020
1021	return t->targets;
1022}
1023
1024struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
1025{
1026	for (unsigned int i = 0; i < t->num_targets; i++) {
1027		struct dm_target *ti = dm_table_get_target(t, i);
1028
 
 
1029		if (dm_target_is_wildcard(ti->type))
1030			return ti;
1031	}
1032
1033	return NULL;
1034}
1035
 
 
 
 
 
1036bool dm_table_request_based(struct dm_table *t)
1037{
1038	return __table_type_request_based(dm_table_get_type(t));
1039}
1040
1041static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1042{
1043	enum dm_queue_mode type = dm_table_get_type(t);
1044	unsigned int per_io_data_size = 0, front_pad, io_front_pad;
1045	unsigned int min_pool_size = 0, pool_size;
1046	struct dm_md_mempools *pools;
1047	unsigned int bioset_flags = 0;
1048	bool mempool_needs_integrity = t->integrity_supported;
1049
1050	if (unlikely(type == DM_TYPE_NONE)) {
1051		DMERR("no table type is set, can't allocate mempools");
1052		return -EINVAL;
1053	}
1054
1055	pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
1056	if (!pools)
1057		return -ENOMEM;
1058
1059	if (type == DM_TYPE_REQUEST_BASED) {
1060		pool_size = dm_get_reserved_rq_based_ios();
1061		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
1062		goto init_bs;
1063	}
1064
1065	if (md->queue->limits.features & BLK_FEAT_POLL)
1066		bioset_flags |= BIOSET_PERCPU_CACHE;
 
 
1067
1068	for (unsigned int i = 0; i < t->num_targets; i++) {
1069		struct dm_target *ti = dm_table_get_target(t, i);
1070
1071		per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1072		min_pool_size = max(min_pool_size, ti->num_flush_bios);
 
 
 
1073
1074		mempool_needs_integrity |= ti->mempool_needs_integrity;
1075	}
1076	pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
1077	front_pad = roundup(per_io_data_size,
1078		__alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
1079
1080	io_front_pad = roundup(per_io_data_size,
1081		__alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
1082	if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags))
1083		goto out_free_pools;
1084	if (mempool_needs_integrity &&
1085	    bioset_integrity_create(&pools->io_bs, pool_size))
1086		goto out_free_pools;
1087init_bs:
1088	if (bioset_init(&pools->bs, pool_size, front_pad, 0))
1089		goto out_free_pools;
1090	if (mempool_needs_integrity &&
1091	    bioset_integrity_create(&pools->bs, pool_size))
1092		goto out_free_pools;
1093
1094	t->mempools = pools;
1095	return 0;
1096
1097out_free_pools:
1098	dm_free_md_mempools(pools);
1099	return -ENOMEM;
1100}
1101
1102static int setup_indexes(struct dm_table *t)
1103{
1104	int i;
1105	unsigned int total = 0;
1106	sector_t *indexes;
1107
1108	/* allocate the space for *all* the indexes */
1109	for (i = t->depth - 2; i >= 0; i--) {
1110		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1111		total += t->counts[i];
1112	}
1113
1114	indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL);
1115	if (!indexes)
1116		return -ENOMEM;
1117
1118	/* set up internal nodes, bottom-up */
1119	for (i = t->depth - 2; i >= 0; i--) {
1120		t->index[i] = indexes;
1121		indexes += (KEYS_PER_NODE * t->counts[i]);
1122		setup_btree_index(i, t);
1123	}
1124
1125	return 0;
1126}
1127
1128/*
1129 * Builds the btree to index the map.
1130 */
1131static int dm_table_build_index(struct dm_table *t)
1132{
1133	int r = 0;
1134	unsigned int leaf_nodes;
1135
1136	/* how many indexes will the btree have ? */
1137	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1138	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1139
1140	/* leaf layer has already been set up */
1141	t->counts[t->depth - 1] = leaf_nodes;
1142	t->index[t->depth - 1] = t->highs;
1143
1144	if (t->depth >= 2)
1145		r = setup_indexes(t);
1146
1147	return r;
1148}
1149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1150#ifdef CONFIG_BLK_INLINE_ENCRYPTION
1151
1152struct dm_crypto_profile {
1153	struct blk_crypto_profile profile;
1154	struct mapped_device *md;
1155};
1156
 
 
 
 
 
1157static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
1158				     sector_t start, sector_t len, void *data)
1159{
1160	const struct blk_crypto_key *key = data;
 
1161
1162	blk_crypto_evict_key(dev->bdev, key);
 
 
 
1163	return 0;
1164}
1165
1166/*
1167 * When an inline encryption key is evicted from a device-mapper device, evict
1168 * it from all the underlying devices.
1169 */
1170static int dm_keyslot_evict(struct blk_crypto_profile *profile,
1171			    const struct blk_crypto_key *key, unsigned int slot)
1172{
1173	struct mapped_device *md =
1174		container_of(profile, struct dm_crypto_profile, profile)->md;
 
 
 
1175	struct dm_table *t;
1176	int srcu_idx;
 
 
1177
1178	t = dm_get_live_table(md, &srcu_idx);
1179	if (!t)
1180		return 0;
1181
1182	for (unsigned int i = 0; i < t->num_targets; i++) {
1183		struct dm_target *ti = dm_table_get_target(t, i);
1184
1185		if (!ti->type->iterate_devices)
1186			continue;
1187		ti->type->iterate_devices(ti, dm_keyslot_evict_callback,
1188					  (void *)key);
1189	}
1190
1191	dm_put_live_table(md, srcu_idx);
1192	return 0;
1193}
1194
1195static int
1196device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
1197				     sector_t start, sector_t len, void *data)
 
 
 
 
1198{
1199	struct blk_crypto_profile *parent = data;
1200	struct blk_crypto_profile *child =
1201		bdev_get_queue(dev->bdev)->crypto_profile;
1202
1203	blk_crypto_intersect_capabilities(parent, child);
1204	return 0;
1205}
1206
1207void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
1208{
1209	struct dm_crypto_profile *dmcp = container_of(profile,
1210						      struct dm_crypto_profile,
1211						      profile);
1212
1213	if (!profile)
1214		return;
1215
1216	blk_crypto_profile_destroy(profile);
1217	kfree(dmcp);
1218}
1219
1220static void dm_table_destroy_crypto_profile(struct dm_table *t)
1221{
1222	dm_destroy_crypto_profile(t->crypto_profile);
1223	t->crypto_profile = NULL;
1224}
1225
1226/*
1227 * Constructs and initializes t->crypto_profile with a crypto profile that
1228 * represents the common set of crypto capabilities of the devices described by
1229 * the dm_table.  However, if the constructed crypto profile doesn't support all
1230 * crypto capabilities that are supported by the current mapped_device, it
1231 * returns an error instead, since we don't support removing crypto capabilities
1232 * on table changes.  Finally, if the constructed crypto profile is "empty" (has
1233 * no crypto capabilities at all), it just sets t->crypto_profile to NULL.
 
 
1234 */
1235static int dm_table_construct_crypto_profile(struct dm_table *t)
1236{
1237	struct dm_crypto_profile *dmcp;
1238	struct blk_crypto_profile *profile;
 
1239	unsigned int i;
1240	bool empty_profile = true;
1241
1242	dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL);
1243	if (!dmcp)
1244		return -ENOMEM;
1245	dmcp->md = t->md;
1246
1247	profile = &dmcp->profile;
1248	blk_crypto_profile_init(profile, 0);
1249	profile->ll_ops.keyslot_evict = dm_keyslot_evict;
1250	profile->max_dun_bytes_supported = UINT_MAX;
1251	memset(profile->modes_supported, 0xFF,
1252	       sizeof(profile->modes_supported));
1253
1254	for (i = 0; i < t->num_targets; i++) {
1255		struct dm_target *ti = dm_table_get_target(t, i);
1256
1257		if (!dm_target_passes_crypto(ti->type)) {
1258			blk_crypto_intersect_capabilities(profile, NULL);
1259			break;
1260		}
1261		if (!ti->type->iterate_devices)
1262			continue;
1263		ti->type->iterate_devices(ti,
1264					  device_intersect_crypto_capabilities,
1265					  profile);
1266	}
1267
1268	if (t->md->queue &&
1269	    !blk_crypto_has_capabilities(profile,
1270					 t->md->queue->crypto_profile)) {
1271		DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
1272		dm_destroy_crypto_profile(profile);
1273		return -EINVAL;
1274	}
1275
1276	/*
1277	 * If the new profile doesn't actually support any crypto capabilities,
1278	 * we may as well represent it with a NULL profile.
1279	 */
1280	for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) {
1281		if (profile->modes_supported[i]) {
1282			empty_profile = false;
 
1283			break;
1284		}
1285	}
1286
1287	if (empty_profile) {
1288		dm_destroy_crypto_profile(profile);
1289		profile = NULL;
1290	}
1291
1292	/*
1293	 * t->crypto_profile is only set temporarily while the table is being
1294	 * set up, and it gets set to NULL after the profile has been
1295	 * transferred to the request_queue.
1296	 */
1297	t->crypto_profile = profile;
1298
1299	return 0;
1300}
1301
1302static void dm_update_crypto_profile(struct request_queue *q,
1303				     struct dm_table *t)
1304{
1305	if (!t->crypto_profile)
1306		return;
1307
1308	/* Make the crypto profile less restrictive. */
1309	if (!q->crypto_profile) {
1310		blk_crypto_register(t->crypto_profile, q);
1311	} else {
1312		blk_crypto_update_capabilities(q->crypto_profile,
1313					       t->crypto_profile);
1314		dm_destroy_crypto_profile(t->crypto_profile);
1315	}
1316	t->crypto_profile = NULL;
1317}
1318
1319#else /* CONFIG_BLK_INLINE_ENCRYPTION */
1320
1321static int dm_table_construct_crypto_profile(struct dm_table *t)
1322{
1323	return 0;
1324}
1325
1326void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
1327{
1328}
1329
1330static void dm_table_destroy_crypto_profile(struct dm_table *t)
1331{
1332}
1333
1334static void dm_update_crypto_profile(struct request_queue *q,
1335				     struct dm_table *t)
1336{
1337}
1338
1339#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1340
1341/*
1342 * Prepares the table for use by building the indices,
1343 * setting the type, and allocating mempools.
1344 */
1345int dm_table_complete(struct dm_table *t)
1346{
1347	int r;
1348
1349	r = dm_table_determine_type(t);
1350	if (r) {
1351		DMERR("unable to determine table type");
1352		return r;
1353	}
1354
1355	r = dm_table_build_index(t);
1356	if (r) {
1357		DMERR("unable to build btrees");
1358		return r;
1359	}
1360
1361	r = dm_table_construct_crypto_profile(t);
1362	if (r) {
1363		DMERR("could not construct crypto profile.");
 
 
 
 
 
 
1364		return r;
1365	}
1366
1367	r = dm_table_alloc_md_mempools(t, t->md);
1368	if (r)
1369		DMERR("unable to allocate mempools");
1370
1371	return r;
1372}
1373
1374static DEFINE_MUTEX(_event_lock);
1375void dm_table_event_callback(struct dm_table *t,
1376			     void (*fn)(void *), void *context)
1377{
1378	mutex_lock(&_event_lock);
1379	t->event_fn = fn;
1380	t->event_context = context;
1381	mutex_unlock(&_event_lock);
1382}
1383
1384void dm_table_event(struct dm_table *t)
1385{
1386	mutex_lock(&_event_lock);
1387	if (t->event_fn)
1388		t->event_fn(t->event_context);
1389	mutex_unlock(&_event_lock);
1390}
1391EXPORT_SYMBOL(dm_table_event);
1392
1393inline sector_t dm_table_get_size(struct dm_table *t)
1394{
1395	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1396}
1397EXPORT_SYMBOL(dm_table_get_size);
1398
 
 
 
 
 
 
 
 
1399/*
1400 * Search the btree for the correct target.
1401 *
1402 * Caller should check returned pointer for NULL
1403 * to trap I/O beyond end of device.
1404 */
1405struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1406{
1407	unsigned int l, n = 0, k = 0;
1408	sector_t *node;
1409
1410	if (unlikely(sector >= dm_table_get_size(t)))
1411		return NULL;
1412
1413	for (l = 0; l < t->depth; l++) {
1414		n = get_child(n, k);
1415		node = get_node(t, l, n);
1416
1417		for (k = 0; k < KEYS_PER_NODE; k++)
1418			if (node[k] >= sector)
1419				break;
1420	}
1421
1422	return &t->targets[(KEYS_PER_NODE * n) + k];
1423}
1424
1425/*
1426 * type->iterate_devices() should be called when the sanity check needs to
1427 * iterate and check all underlying data devices. iterate_devices() will
1428 * iterate all underlying data devices until it encounters a non-zero return
1429 * code, returned by whether the input iterate_devices_callout_fn, or
1430 * iterate_devices() itself internally.
1431 *
1432 * For some target type (e.g. dm-stripe), one call of iterate_devices() may
1433 * iterate multiple underlying devices internally, in which case a non-zero
1434 * return code returned by iterate_devices_callout_fn will stop the iteration
1435 * in advance.
1436 *
1437 * Cases requiring _any_ underlying device supporting some kind of attribute,
1438 * should use the iteration structure like dm_table_any_dev_attr(), or call
1439 * it directly. @func should handle semantics of positive examples, e.g.
1440 * capable of something.
1441 *
1442 * Cases requiring _all_ underlying devices supporting some kind of attribute,
1443 * should use the iteration structure like dm_table_supports_nowait() or
1444 * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
1445 * uses an @anti_func that handle semantics of counter examples, e.g. not
1446 * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
1447 */
1448static bool dm_table_any_dev_attr(struct dm_table *t,
1449				  iterate_devices_callout_fn func, void *data)
1450{
1451	for (unsigned int i = 0; i < t->num_targets; i++) {
1452		struct dm_target *ti = dm_table_get_target(t, i);
 
 
 
1453
1454		if (ti->type->iterate_devices &&
1455		    ti->type->iterate_devices(ti, func, data))
1456			return true;
1457	}
1458
1459	return false;
1460}
1461
1462static int count_device(struct dm_target *ti, struct dm_dev *dev,
1463			sector_t start, sector_t len, void *data)
1464{
1465	unsigned int *num_devices = data;
1466
1467	(*num_devices)++;
1468
1469	return 0;
1470}
1471
1472/*
1473 * Check whether a table has no data devices attached using each
1474 * target's iterate_devices method.
1475 * Returns false if the result is unknown because a target doesn't
1476 * support iterate_devices.
1477 */
1478bool dm_table_has_no_data_devices(struct dm_table *t)
1479{
1480	for (unsigned int i = 0; i < t->num_targets; i++) {
1481		struct dm_target *ti = dm_table_get_target(t, i);
1482		unsigned int num_devices = 0;
 
 
1483
1484		if (!ti->type->iterate_devices)
1485			return false;
1486
 
1487		ti->type->iterate_devices(ti, count_device, &num_devices);
1488		if (num_devices)
1489			return false;
1490	}
1491
1492	return true;
1493}
1494
1495static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev,
1496			    sector_t start, sector_t len, void *data)
1497{
1498	bool *zoned = data;
1499
1500	return bdev_is_zoned(dev->bdev) != *zoned;
1501}
1502
1503static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1504				 sector_t start, sector_t len, void *data)
1505{
1506	return bdev_is_zoned(dev->bdev);
1507}
1508
1509/*
1510 * Check the device zoned model based on the target feature flag. If the target
1511 * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
1512 * also accepted but all devices must have the same zoned model. If the target
1513 * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
1514 * zoned model with all zoned devices having the same zone size.
1515 */
1516static bool dm_table_supports_zoned(struct dm_table *t, bool zoned)
 
1517{
1518	for (unsigned int i = 0; i < t->num_targets; i++) {
1519		struct dm_target *ti = dm_table_get_target(t, i);
1520
1521		/*
1522		 * For the wildcard target (dm-error), if we do not have a
1523		 * backing device, we must always return false. If we have a
1524		 * backing device, the result must depend on checking zoned
1525		 * model, like for any other target. So for this, check directly
1526		 * if the target backing device is zoned as we get "false" when
1527		 * dm-error was set without a backing device.
1528		 */
1529		if (dm_target_is_wildcard(ti->type) &&
1530		    !ti->type->iterate_devices(ti, device_is_zoned_model, NULL))
1531			return false;
1532
1533		if (dm_target_supports_zoned_hm(ti->type)) {
1534			if (!ti->type->iterate_devices ||
1535			    ti->type->iterate_devices(ti, device_not_zoned,
1536						      &zoned))
1537				return false;
1538		} else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
1539			if (zoned)
1540				return false;
1541		}
1542	}
1543
1544	return true;
1545}
1546
1547static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1548					   sector_t start, sector_t len, void *data)
1549{
 
1550	unsigned int *zone_sectors = data;
1551
1552	if (!bdev_is_zoned(dev->bdev))
1553		return 0;
1554	return bdev_zone_sectors(dev->bdev) != *zone_sectors;
 
1555}
1556
1557/*
1558 * Check consistency of zoned model and zone sectors across all targets. For
1559 * zone sectors, if the destination device is a zoned block device, it shall
1560 * have the specified zone_sectors.
1561 */
1562static int validate_hardware_zoned(struct dm_table *t, bool zoned,
1563				   unsigned int zone_sectors)
 
1564{
1565	if (!zoned)
1566		return 0;
1567
1568	if (!dm_table_supports_zoned(t, zoned)) {
1569		DMERR("%s: zoned model is not consistent across all devices",
1570		      dm_device_name(t->md));
1571		return -EINVAL;
1572	}
1573
1574	/* Check zone size validity and compatibility */
1575	if (!zone_sectors || !is_power_of_2(zone_sectors))
1576		return -EINVAL;
1577
1578	if (dm_table_any_dev_attr(t, device_not_matches_zone_sectors, &zone_sectors)) {
1579		DMERR("%s: zone sectors is not consistent across all zoned devices",
1580		      dm_device_name(t->md));
1581		return -EINVAL;
1582	}
1583
1584	return 0;
1585}
1586
1587/*
1588 * Establish the new table's queue_limits and validate them.
1589 */
1590int dm_calculate_queue_limits(struct dm_table *t,
1591			      struct queue_limits *limits)
1592{
 
1593	struct queue_limits ti_limits;
 
 
1594	unsigned int zone_sectors = 0;
1595	bool zoned = false;
1596
1597	dm_set_stacking_limits(limits);
1598
1599	t->integrity_supported = true;
1600	for (unsigned int i = 0; i < t->num_targets; i++) {
1601		struct dm_target *ti = dm_table_get_target(t, i);
1602
1603		if (!dm_target_passes_integrity(ti->type))
1604			t->integrity_supported = false;
1605	}
1606
1607	for (unsigned int i = 0; i < t->num_targets; i++) {
1608		struct dm_target *ti = dm_table_get_target(t, i);
1609
1610		dm_set_stacking_limits(&ti_limits);
1611
1612		if (!ti->type->iterate_devices) {
1613			/* Set I/O hints portion of queue limits */
1614			if (ti->type->io_hints)
1615				ti->type->io_hints(ti, &ti_limits);
1616			goto combine_limits;
1617		}
1618
1619		/*
1620		 * Combine queue limits of all the devices this target uses.
1621		 */
1622		ti->type->iterate_devices(ti, dm_set_device_limits,
1623					  &ti_limits);
1624
1625		if (!zoned && (ti_limits.features & BLK_FEAT_ZONED)) {
1626			/*
1627			 * After stacking all limits, validate all devices
1628			 * in table support this zoned model and zone sectors.
1629			 */
1630			zoned = (ti_limits.features & BLK_FEAT_ZONED);
1631			zone_sectors = ti_limits.chunk_sectors;
1632		}
1633
1634		/* Set I/O hints portion of queue limits */
1635		if (ti->type->io_hints)
1636			ti->type->io_hints(ti, &ti_limits);
1637
1638		/*
1639		 * Check each device area is consistent with the target's
1640		 * overall queue limits.
1641		 */
1642		if (ti->type->iterate_devices(ti, device_area_is_invalid,
1643					      &ti_limits))
1644			return -EINVAL;
1645
1646combine_limits:
1647		/*
1648		 * Merge this target's queue limits into the overall limits
1649		 * for the table.
1650		 */
1651		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1652			DMWARN("%s: adding target device (start sect %llu len %llu) "
 
1653			       "caused an alignment inconsistency",
1654			       dm_device_name(t->md),
1655			       (unsigned long long) ti->begin,
1656			       (unsigned long long) ti->len);
1657
1658		if (t->integrity_supported ||
1659		    dm_target_has_integrity(ti->type)) {
1660			if (!queue_limits_stack_integrity(limits, &ti_limits)) {
1661				DMWARN("%s: adding target device (start sect %llu len %llu) "
1662				       "disabled integrity support due to incompatibility",
1663				       dm_device_name(t->md),
1664				       (unsigned long long) ti->begin,
1665				       (unsigned long long) ti->len);
1666				t->integrity_supported = false;
1667			}
1668		}
1669	}
1670
1671	/*
1672	 * Verify that the zoned model and zone sectors, as determined before
1673	 * any .io_hints override, are the same across all devices in the table.
1674	 * - this is especially relevant if .io_hints is emulating a disk-managed
1675	 *   zoned model on host-managed zoned block devices.
1676	 * BUT...
1677	 */
1678	if (limits->features & BLK_FEAT_ZONED) {
1679		/*
1680		 * ...IF the above limits stacking determined a zoned model
1681		 * validate that all of the table's devices conform to it.
1682		 */
1683		zoned = limits->features & BLK_FEAT_ZONED;
1684		zone_sectors = limits->chunk_sectors;
1685	}
1686	if (validate_hardware_zoned(t, zoned, zone_sectors))
1687		return -EINVAL;
1688
1689	return validate_hardware_logical_block_alignment(t, limits);
1690}
1691
1692/*
1693 * Check if a target requires flush support even if none of the underlying
1694 * devices need it (e.g. to persist target-specific metadata).
 
1695 */
1696static bool dm_table_supports_flush(struct dm_table *t)
1697{
1698	for (unsigned int i = 0; i < t->num_targets; i++) {
1699		struct dm_target *ti = dm_table_get_target(t, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
1700
1701		if (ti->num_flush_bios && ti->flush_supported)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1702			return true;
1703	}
1704
1705	return false;
1706}
1707
1708static int device_dax_write_cache_enabled(struct dm_target *ti,
1709					  struct dm_dev *dev, sector_t start,
1710					  sector_t len, void *data)
1711{
1712	struct dax_device *dax_dev = dev->dax_dev;
1713
1714	if (!dax_dev)
1715		return false;
1716
1717	if (dax_write_cache_enabled(dax_dev))
1718		return true;
1719	return false;
1720}
1721
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1722static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1723					   sector_t start, sector_t len, void *data)
1724{
1725	struct request_queue *q = bdev_get_queue(dev->bdev);
1726
1727	return !q->limits.max_write_zeroes_sectors;
1728}
1729
1730static bool dm_table_supports_write_zeroes(struct dm_table *t)
1731{
1732	for (unsigned int i = 0; i < t->num_targets; i++) {
1733		struct dm_target *ti = dm_table_get_target(t, i);
 
 
 
1734
1735		if (!ti->num_write_zeroes_bios)
1736			return false;
1737
1738		if (!ti->type->iterate_devices ||
1739		    ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1740			return false;
1741	}
1742
1743	return true;
1744}
1745
 
 
 
 
 
 
 
 
1746static bool dm_table_supports_nowait(struct dm_table *t)
1747{
1748	for (unsigned int i = 0; i < t->num_targets; i++) {
1749		struct dm_target *ti = dm_table_get_target(t, i);
 
 
 
1750
1751		if (!dm_target_supports_nowait(ti->type))
1752			return false;
 
 
 
 
1753	}
1754
1755	return true;
1756}
1757
1758static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1759				      sector_t start, sector_t len, void *data)
1760{
1761	return !bdev_max_discard_sectors(dev->bdev);
 
 
1762}
1763
1764static bool dm_table_supports_discards(struct dm_table *t)
1765{
1766	for (unsigned int i = 0; i < t->num_targets; i++) {
1767		struct dm_target *ti = dm_table_get_target(t, i);
 
 
 
1768
1769		if (!ti->num_discard_bios)
1770			return false;
1771
1772		/*
1773		 * Either the target provides discard support (as implied by setting
1774		 * 'discards_supported') or it relies on _all_ data devices having
1775		 * discard support.
1776		 */
1777		if (!ti->discards_supported &&
1778		    (!ti->type->iterate_devices ||
1779		     ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1780			return false;
1781	}
1782
1783	return true;
1784}
1785
1786static int device_not_secure_erase_capable(struct dm_target *ti,
1787					   struct dm_dev *dev, sector_t start,
1788					   sector_t len, void *data)
1789{
1790	return !bdev_max_secure_erase_sectors(dev->bdev);
 
 
1791}
1792
1793static bool dm_table_supports_secure_erase(struct dm_table *t)
1794{
1795	for (unsigned int i = 0; i < t->num_targets; i++) {
1796		struct dm_target *ti = dm_table_get_target(t, i);
 
 
 
1797
1798		if (!ti->num_secure_erase_bios)
1799			return false;
1800
1801		if (!ti->type->iterate_devices ||
1802		    ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1803			return false;
1804	}
1805
1806	return true;
1807}
1808
 
 
 
 
 
 
 
 
 
1809int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1810			      struct queue_limits *limits)
1811{
 
 
1812	int r;
1813
1814	if (!dm_table_supports_nowait(t))
1815		limits->features &= ~BLK_FEAT_NOWAIT;
1816
1817	/*
1818	 * The current polling impementation does not support request based
1819	 * stacking.
1820	 */
1821	if (!__table_type_bio_based(t->type))
1822		limits->features &= ~BLK_FEAT_POLL;
1823
1824	if (!dm_table_supports_discards(t)) {
1825		limits->max_hw_discard_sectors = 0;
1826		limits->discard_granularity = 0;
1827		limits->discard_alignment = 0;
1828	}
1829
1830	if (!dm_table_supports_write_zeroes(t))
1831		limits->max_write_zeroes_sectors = 0;
 
 
 
 
 
 
 
 
1832
1833	if (!dm_table_supports_secure_erase(t))
1834		limits->max_secure_erase_sectors = 0;
1835
1836	if (dm_table_supports_flush(t))
1837		limits->features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
 
 
 
 
1838
1839	if (dm_table_supports_dax(t, device_not_dax_capable)) {
1840		limits->features |= BLK_FEAT_DAX;
1841		if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
1842			set_dax_synchronous(t->md->dax_dev);
1843	} else
1844		limits->features &= ~BLK_FEAT_DAX;
 
1845
1846	if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
1847		dax_write_cache(t->md->dax_dev, true);
1848
1849	/* For a zoned table, setup the zone related queue attributes. */
1850	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
1851	    (limits->features & BLK_FEAT_ZONED)) {
1852		r = dm_set_zones_restrictions(t, q, limits);
1853		if (r)
1854			return r;
1855	}
1856
1857	r = queue_limits_set(q, limits);
1858	if (r)
1859		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1860
1861	/*
1862	 * Now that the limits are set, check the zones mapped by the table
1863	 * and setup the resources for zone append emulation if necessary.
 
 
1864	 */
1865	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
1866	    (limits->features & BLK_FEAT_ZONED)) {
1867		r = dm_revalidate_zones(t, q);
 
 
 
 
 
 
 
1868		if (r)
1869			return r;
1870	}
1871
1872	dm_update_crypto_profile(q, t);
 
 
1873	return 0;
1874}
1875
 
 
 
 
 
1876struct list_head *dm_table_get_devices(struct dm_table *t)
1877{
1878	return &t->devices;
1879}
1880
1881blk_mode_t dm_table_get_mode(struct dm_table *t)
1882{
1883	return t->mode;
1884}
1885EXPORT_SYMBOL(dm_table_get_mode);
1886
1887enum suspend_mode {
1888	PRESUSPEND,
1889	PRESUSPEND_UNDO,
1890	POSTSUSPEND,
1891};
1892
1893static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
1894{
1895	lockdep_assert_held(&t->md->suspend_lock);
 
1896
1897	for (unsigned int i = 0; i < t->num_targets; i++) {
1898		struct dm_target *ti = dm_table_get_target(t, i);
1899
 
1900		switch (mode) {
1901		case PRESUSPEND:
1902			if (ti->type->presuspend)
1903				ti->type->presuspend(ti);
1904			break;
1905		case PRESUSPEND_UNDO:
1906			if (ti->type->presuspend_undo)
1907				ti->type->presuspend_undo(ti);
1908			break;
1909		case POSTSUSPEND:
1910			if (ti->type->postsuspend)
1911				ti->type->postsuspend(ti);
1912			break;
1913		}
 
1914	}
1915}
1916
1917void dm_table_presuspend_targets(struct dm_table *t)
1918{
1919	if (!t)
1920		return;
1921
1922	suspend_targets(t, PRESUSPEND);
1923}
1924
1925void dm_table_presuspend_undo_targets(struct dm_table *t)
1926{
1927	if (!t)
1928		return;
1929
1930	suspend_targets(t, PRESUSPEND_UNDO);
1931}
1932
1933void dm_table_postsuspend_targets(struct dm_table *t)
1934{
1935	if (!t)
1936		return;
1937
1938	suspend_targets(t, POSTSUSPEND);
1939}
1940
1941int dm_table_resume_targets(struct dm_table *t)
1942{
1943	unsigned int i;
1944	int r = 0;
1945
1946	lockdep_assert_held(&t->md->suspend_lock);
1947
1948	for (i = 0; i < t->num_targets; i++) {
1949		struct dm_target *ti = dm_table_get_target(t, i);
1950
1951		if (!ti->type->preresume)
1952			continue;
1953
1954		r = ti->type->preresume(ti);
1955		if (r) {
1956			DMERR("%s: %s: preresume failed, error = %d",
1957			      dm_device_name(t->md), ti->type->name, r);
1958			return r;
1959		}
1960	}
1961
1962	for (i = 0; i < t->num_targets; i++) {
1963		struct dm_target *ti = dm_table_get_target(t, i);
1964
1965		if (ti->type->resume)
1966			ti->type->resume(ti);
1967	}
1968
1969	return 0;
1970}
1971
1972struct mapped_device *dm_table_get_md(struct dm_table *t)
1973{
1974	return t->md;
1975}
1976EXPORT_SYMBOL(dm_table_get_md);
1977
1978const char *dm_table_device_name(struct dm_table *t)
1979{
1980	return dm_device_name(t->md);
1981}
1982EXPORT_SYMBOL_GPL(dm_table_device_name);
1983
1984void dm_table_run_md_queue_async(struct dm_table *t)
1985{
1986	if (!dm_table_request_based(t))
1987		return;
1988
1989	if (t->md->queue)
1990		blk_mq_run_hw_queues(t->md->queue, true);
1991}
1992EXPORT_SYMBOL(dm_table_run_md_queue_async);
1993
v5.14.15
 
   1/*
   2 * Copyright (C) 2001 Sistina Software (UK) Limited.
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm-core.h"
 
   9
  10#include <linux/module.h>
  11#include <linux/vmalloc.h>
  12#include <linux/blkdev.h>
 
  13#include <linux/namei.h>
  14#include <linux/ctype.h>
  15#include <linux/string.h>
  16#include <linux/slab.h>
  17#include <linux/interrupt.h>
  18#include <linux/mutex.h>
  19#include <linux/delay.h>
  20#include <linux/atomic.h>
  21#include <linux/blk-mq.h>
  22#include <linux/mount.h>
  23#include <linux/dax.h>
  24
  25#define DM_MSG_PREFIX "table"
  26
  27#define NODE_SIZE L1_CACHE_BYTES
  28#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
  29#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
  30
  31/*
  32 * Similar to ceiling(log_size(n))
  33 */
  34static unsigned int int_log(unsigned int n, unsigned int base)
  35{
  36	int result = 0;
  37
  38	while (n > 1) {
  39		n = dm_div_up(n, base);
  40		result++;
  41	}
  42
  43	return result;
  44}
  45
  46/*
  47 * Calculate the index of the child node of the n'th node k'th key.
  48 */
  49static inline unsigned int get_child(unsigned int n, unsigned int k)
  50{
  51	return (n * CHILDREN_PER_NODE) + k;
  52}
  53
  54/*
  55 * Return the n'th node of level l from table t.
  56 */
  57static inline sector_t *get_node(struct dm_table *t,
  58				 unsigned int l, unsigned int n)
  59{
  60	return t->index[l] + (n * KEYS_PER_NODE);
  61}
  62
  63/*
  64 * Return the highest key that you could lookup from the n'th
  65 * node on level l of the btree.
  66 */
  67static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
  68{
  69	for (; l < t->depth - 1; l++)
  70		n = get_child(n, CHILDREN_PER_NODE - 1);
  71
  72	if (n >= t->counts[l])
  73		return (sector_t) - 1;
  74
  75	return get_node(t, l, n)[KEYS_PER_NODE - 1];
  76}
  77
  78/*
  79 * Fills in a level of the btree based on the highs of the level
  80 * below it.
  81 */
  82static int setup_btree_index(unsigned int l, struct dm_table *t)
  83{
  84	unsigned int n, k;
  85	sector_t *node;
  86
  87	for (n = 0U; n < t->counts[l]; n++) {
  88		node = get_node(t, l, n);
  89
  90		for (k = 0U; k < KEYS_PER_NODE; k++)
  91			node[k] = high(t, l + 1, get_child(n, k));
  92	}
  93
  94	return 0;
  95}
  96
  97/*
  98 * highs, and targets are managed as dynamic arrays during a
  99 * table load.
 100 */
 101static int alloc_targets(struct dm_table *t, unsigned int num)
 102{
 103	sector_t *n_highs;
 104	struct dm_target *n_targets;
 105
 106	/*
 107	 * Allocate both the target array and offset array at once.
 108	 */
 109	n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t),
 110			   GFP_KERNEL);
 111	if (!n_highs)
 112		return -ENOMEM;
 113
 114	n_targets = (struct dm_target *) (n_highs + num);
 115
 116	memset(n_highs, -1, sizeof(*n_highs) * num);
 117	kvfree(t->highs);
 118
 119	t->num_allocated = num;
 120	t->highs = n_highs;
 121	t->targets = n_targets;
 122
 123	return 0;
 124}
 125
 126int dm_table_create(struct dm_table **result, fmode_t mode,
 127		    unsigned num_targets, struct mapped_device *md)
 128{
 129	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
 
 
 
 
 
 130
 131	if (!t)
 132		return -ENOMEM;
 133
 134	INIT_LIST_HEAD(&t->devices);
 
 135
 136	if (!num_targets)
 137		num_targets = KEYS_PER_NODE;
 138
 139	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
 140
 141	if (!num_targets) {
 142		kfree(t);
 143		return -ENOMEM;
 144	}
 145
 146	if (alloc_targets(t, num_targets)) {
 147		kfree(t);
 148		return -ENOMEM;
 149	}
 150
 151	t->type = DM_TYPE_NONE;
 152	t->mode = mode;
 153	t->md = md;
 
 154	*result = t;
 155	return 0;
 156}
 157
 158static void free_devices(struct list_head *devices, struct mapped_device *md)
 159{
 160	struct list_head *tmp, *next;
 161
 162	list_for_each_safe(tmp, next, devices) {
 163		struct dm_dev_internal *dd =
 164		    list_entry(tmp, struct dm_dev_internal, list);
 165		DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
 166		       dm_device_name(md), dd->dm_dev->name);
 167		dm_put_table_device(md, dd->dm_dev);
 168		kfree(dd);
 169	}
 170}
 171
 172static void dm_table_destroy_keyslot_manager(struct dm_table *t);
 173
 174void dm_table_destroy(struct dm_table *t)
 175{
 176	unsigned int i;
 177
 178	if (!t)
 179		return;
 180
 181	/* free the indexes */
 182	if (t->depth >= 2)
 183		kvfree(t->index[t->depth - 2]);
 184
 185	/* free the targets */
 186	for (i = 0; i < t->num_targets; i++) {
 187		struct dm_target *tgt = t->targets + i;
 188
 189		if (tgt->type->dtr)
 190			tgt->type->dtr(tgt);
 191
 192		dm_put_target_type(tgt->type);
 193	}
 194
 195	kvfree(t->highs);
 196
 197	/* free the device list */
 198	free_devices(&t->devices, t->md);
 199
 200	dm_free_md_mempools(t->mempools);
 201
 202	dm_table_destroy_keyslot_manager(t);
 203
 204	kfree(t);
 205}
 206
 207/*
 208 * See if we've already got a device in the list.
 209 */
 210static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
 211{
 212	struct dm_dev_internal *dd;
 213
 214	list_for_each_entry (dd, l, list)
 215		if (dd->dm_dev->bdev->bd_dev == dev)
 216			return dd;
 217
 218	return NULL;
 219}
 220
 221/*
 222 * If possible, this checks an area of a destination device is invalid.
 223 */
 224static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
 225				  sector_t start, sector_t len, void *data)
 226{
 227	struct queue_limits *limits = data;
 228	struct block_device *bdev = dev->bdev;
 229	sector_t dev_size =
 230		i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
 231	unsigned short logical_block_size_sectors =
 232		limits->logical_block_size >> SECTOR_SHIFT;
 233	char b[BDEVNAME_SIZE];
 234
 235	if (!dev_size)
 236		return 0;
 237
 238	if ((start >= dev_size) || (start + len > dev_size)) {
 239		DMWARN("%s: %s too small for target: "
 240		       "start=%llu, len=%llu, dev_size=%llu",
 241		       dm_device_name(ti->table->md), bdevname(bdev, b),
 242		       (unsigned long long)start,
 243		       (unsigned long long)len,
 244		       (unsigned long long)dev_size);
 245		return 1;
 246	}
 247
 248	/*
 249	 * If the target is mapped to zoned block device(s), check
 250	 * that the zones are not partially mapped.
 251	 */
 252	if (bdev_is_zoned(bdev)) {
 253		unsigned int zone_sectors = bdev_zone_sectors(bdev);
 254
 255		if (start & (zone_sectors - 1)) {
 256			DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
 257			       dm_device_name(ti->table->md),
 258			       (unsigned long long)start,
 259			       zone_sectors, bdevname(bdev, b));
 260			return 1;
 261		}
 262
 263		/*
 264		 * Note: The last zone of a zoned block device may be smaller
 265		 * than other zones. So for a target mapping the end of a
 266		 * zoned block device with such a zone, len would not be zone
 267		 * aligned. We do not allow such last smaller zone to be part
 268		 * of the mapping here to ensure that mappings with multiple
 269		 * devices do not end up with a smaller zone in the middle of
 270		 * the sector range.
 271		 */
 272		if (len & (zone_sectors - 1)) {
 273			DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
 274			       dm_device_name(ti->table->md),
 275			       (unsigned long long)len,
 276			       zone_sectors, bdevname(bdev, b));
 277			return 1;
 278		}
 279	}
 280
 281	if (logical_block_size_sectors <= 1)
 282		return 0;
 283
 284	if (start & (logical_block_size_sectors - 1)) {
 285		DMWARN("%s: start=%llu not aligned to h/w "
 286		       "logical block size %u of %s",
 287		       dm_device_name(ti->table->md),
 288		       (unsigned long long)start,
 289		       limits->logical_block_size, bdevname(bdev, b));
 290		return 1;
 291	}
 292
 293	if (len & (logical_block_size_sectors - 1)) {
 294		DMWARN("%s: len=%llu not aligned to h/w "
 295		       "logical block size %u of %s",
 296		       dm_device_name(ti->table->md),
 297		       (unsigned long long)len,
 298		       limits->logical_block_size, bdevname(bdev, b));
 299		return 1;
 300	}
 301
 302	return 0;
 303}
 304
 305/*
 306 * This upgrades the mode on an already open dm_dev, being
 307 * careful to leave things as they were if we fail to reopen the
 308 * device and not to touch the existing bdev field in case
 309 * it is accessed concurrently.
 310 */
 311static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
 312			struct mapped_device *md)
 313{
 314	int r;
 315	struct dm_dev *old_dev, *new_dev;
 316
 317	old_dev = dd->dm_dev;
 318
 319	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
 320				dd->dm_dev->mode | new_mode, &new_dev);
 321	if (r)
 322		return r;
 323
 324	dd->dm_dev = new_dev;
 325	dm_put_table_device(md, old_dev);
 326
 327	return 0;
 328}
 329
 330/*
 331 * Convert the path to a device
 
 332 */
 333dev_t dm_get_dev_t(const char *path)
 334{
 
 335	dev_t dev;
 
 
 336
 337	if (lookup_bdev(path, &dev))
 338		dev = name_to_dev_t(path);
 339	return dev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 340}
 341EXPORT_SYMBOL_GPL(dm_get_dev_t);
 342
 343/*
 344 * Add a device to the list, or just increment the usage count if
 345 * it's already present.
 346 */
 347int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
 348		  struct dm_dev **result)
 349{
 350	int r;
 351	dev_t dev;
 352	unsigned int major, minor;
 353	char dummy;
 354	struct dm_dev_internal *dd;
 355	struct dm_table *t = ti->table;
 356
 357	BUG_ON(!t);
 358
 359	if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
 360		/* Extract the major/minor numbers */
 361		dev = MKDEV(major, minor);
 362		if (MAJOR(dev) != major || MINOR(dev) != minor)
 363			return -EOVERFLOW;
 364	} else {
 365		dev = dm_get_dev_t(path);
 366		if (!dev)
 367			return -ENODEV;
 368	}
 369
 370	dd = find_device(&t->devices, dev);
 371	if (!dd) {
 372		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
 373		if (!dd)
 374			return -ENOMEM;
 
 
 375
 376		if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
 
 377			kfree(dd);
 378			return r;
 379		}
 380
 381		refcount_set(&dd->count, 1);
 382		list_add(&dd->list, &t->devices);
 383		goto out;
 384
 385	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
 386		r = upgrade_mode(dd, mode, t->md);
 387		if (r)
 388			return r;
 389	}
 390	refcount_inc(&dd->count);
 391out:
 
 392	*result = dd->dm_dev;
 393	return 0;
 
 
 
 
 394}
 395EXPORT_SYMBOL(dm_get_device);
 396
 397static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
 398				sector_t start, sector_t len, void *data)
 399{
 400	struct queue_limits *limits = data;
 401	struct block_device *bdev = dev->bdev;
 402	struct request_queue *q = bdev_get_queue(bdev);
 403	char b[BDEVNAME_SIZE];
 404
 405	if (unlikely(!q)) {
 406		DMWARN("%s: Cannot set limits for nonexistent device %s",
 407		       dm_device_name(ti->table->md), bdevname(bdev, b));
 408		return 0;
 409	}
 410
 411	if (blk_stack_limits(limits, &q->limits,
 412			get_start_sect(bdev) + start) < 0)
 413		DMWARN("%s: adding target device %s caused an alignment inconsistency: "
 414		       "physical_block_size=%u, logical_block_size=%u, "
 415		       "alignment_offset=%u, start=%llu",
 416		       dm_device_name(ti->table->md), bdevname(bdev, b),
 417		       q->limits.physical_block_size,
 418		       q->limits.logical_block_size,
 419		       q->limits.alignment_offset,
 420		       (unsigned long long) start << SECTOR_SHIFT);
 
 
 
 
 
 
 
 421	return 0;
 422}
 423
 424/*
 425 * Decrement a device's use count and remove it if necessary.
 426 */
 427void dm_put_device(struct dm_target *ti, struct dm_dev *d)
 428{
 429	int found = 0;
 430	struct list_head *devices = &ti->table->devices;
 
 431	struct dm_dev_internal *dd;
 432
 
 
 433	list_for_each_entry(dd, devices, list) {
 434		if (dd->dm_dev == d) {
 435			found = 1;
 436			break;
 437		}
 438	}
 439	if (!found) {
 440		DMWARN("%s: device %s not in table devices list",
 441		       dm_device_name(ti->table->md), d->name);
 442		return;
 443	}
 444	if (refcount_dec_and_test(&dd->count)) {
 445		dm_put_table_device(ti->table->md, d);
 446		list_del(&dd->list);
 447		kfree(dd);
 448	}
 
 
 
 449}
 450EXPORT_SYMBOL(dm_put_device);
 451
 452/*
 453 * Checks to see if the target joins onto the end of the table.
 454 */
 455static int adjoin(struct dm_table *table, struct dm_target *ti)
 456{
 457	struct dm_target *prev;
 458
 459	if (!table->num_targets)
 460		return !ti->begin;
 461
 462	prev = &table->targets[table->num_targets - 1];
 463	return (ti->begin == (prev->begin + prev->len));
 464}
 465
 466/*
 467 * Used to dynamically allocate the arg array.
 468 *
 469 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
 470 * process messages even if some device is suspended. These messages have a
 471 * small fixed number of arguments.
 472 *
 473 * On the other hand, dm-switch needs to process bulk data using messages and
 474 * excessive use of GFP_NOIO could cause trouble.
 475 */
 476static char **realloc_argv(unsigned *size, char **old_argv)
 477{
 478	char **argv;
 479	unsigned new_size;
 480	gfp_t gfp;
 481
 482	if (*size) {
 483		new_size = *size * 2;
 484		gfp = GFP_KERNEL;
 485	} else {
 486		new_size = 8;
 487		gfp = GFP_NOIO;
 488	}
 489	argv = kmalloc_array(new_size, sizeof(*argv), gfp);
 490	if (argv && old_argv) {
 491		memcpy(argv, old_argv, *size * sizeof(*argv));
 492		*size = new_size;
 493	}
 494
 495	kfree(old_argv);
 496	return argv;
 497}
 498
 499/*
 500 * Destructively splits up the argument list to pass to ctr.
 501 */
 502int dm_split_args(int *argc, char ***argvp, char *input)
 503{
 504	char *start, *end = input, *out, **argv = NULL;
 505	unsigned array_size = 0;
 506
 507	*argc = 0;
 508
 509	if (!input) {
 510		*argvp = NULL;
 511		return 0;
 512	}
 513
 514	argv = realloc_argv(&array_size, argv);
 515	if (!argv)
 516		return -ENOMEM;
 517
 518	while (1) {
 519		/* Skip whitespace */
 520		start = skip_spaces(end);
 521
 522		if (!*start)
 523			break;	/* success, we hit the end */
 524
 525		/* 'out' is used to remove any back-quotes */
 526		end = out = start;
 527		while (*end) {
 528			/* Everything apart from '\0' can be quoted */
 529			if (*end == '\\' && *(end + 1)) {
 530				*out++ = *(end + 1);
 531				end += 2;
 532				continue;
 533			}
 534
 535			if (isspace(*end))
 536				break;	/* end of token */
 537
 538			*out++ = *end++;
 539		}
 540
 541		/* have we already filled the array ? */
 542		if ((*argc + 1) > array_size) {
 543			argv = realloc_argv(&array_size, argv);
 544			if (!argv)
 545				return -ENOMEM;
 546		}
 547
 548		/* we know this is whitespace */
 549		if (*end)
 550			end++;
 551
 552		/* terminate the string and put it in the array */
 553		*out = '\0';
 554		argv[*argc] = start;
 555		(*argc)++;
 556	}
 557
 558	*argvp = argv;
 559	return 0;
 560}
 561
 
 
 
 
 
 
 562/*
 563 * Impose necessary and sufficient conditions on a devices's table such
 564 * that any incoming bio which respects its logical_block_size can be
 565 * processed successfully.  If it falls across the boundary between
 566 * two or more targets, the size of each piece it gets split into must
 567 * be compatible with the logical_block_size of the target processing it.
 568 */
 569static int validate_hardware_logical_block_alignment(struct dm_table *table,
 570						 struct queue_limits *limits)
 571{
 572	/*
 573	 * This function uses arithmetic modulo the logical_block_size
 574	 * (in units of 512-byte sectors).
 575	 */
 576	unsigned short device_logical_block_size_sects =
 577		limits->logical_block_size >> SECTOR_SHIFT;
 578
 579	/*
 580	 * Offset of the start of the next table entry, mod logical_block_size.
 581	 */
 582	unsigned short next_target_start = 0;
 583
 584	/*
 585	 * Given an aligned bio that extends beyond the end of a
 586	 * target, how many sectors must the next target handle?
 587	 */
 588	unsigned short remaining = 0;
 589
 590	struct dm_target *ti;
 591	struct queue_limits ti_limits;
 592	unsigned i;
 593
 594	/*
 595	 * Check each entry in the table in turn.
 596	 */
 597	for (i = 0; i < dm_table_get_num_targets(table); i++) {
 598		ti = dm_table_get_target(table, i);
 599
 600		blk_set_stacking_limits(&ti_limits);
 601
 602		/* combine all target devices' limits */
 603		if (ti->type->iterate_devices)
 604			ti->type->iterate_devices(ti, dm_set_device_limits,
 605						  &ti_limits);
 606
 607		/*
 608		 * If the remaining sectors fall entirely within this
 609		 * table entry are they compatible with its logical_block_size?
 610		 */
 611		if (remaining < ti->len &&
 612		    remaining & ((ti_limits.logical_block_size >>
 613				  SECTOR_SHIFT) - 1))
 614			break;	/* Error */
 615
 616		next_target_start =
 617		    (unsigned short) ((next_target_start + ti->len) &
 618				      (device_logical_block_size_sects - 1));
 619		remaining = next_target_start ?
 620		    device_logical_block_size_sects - next_target_start : 0;
 621	}
 622
 623	if (remaining) {
 624		DMWARN("%s: table line %u (start sect %llu len %llu) "
 625		       "not aligned to h/w logical block size %u",
 626		       dm_device_name(table->md), i,
 627		       (unsigned long long) ti->begin,
 628		       (unsigned long long) ti->len,
 629		       limits->logical_block_size);
 630		return -EINVAL;
 631	}
 632
 633	return 0;
 634}
 635
 636int dm_table_add_target(struct dm_table *t, const char *type,
 637			sector_t start, sector_t len, char *params)
 638{
 639	int r = -EINVAL, argc;
 640	char **argv;
 641	struct dm_target *tgt;
 642
 643	if (t->singleton) {
 644		DMERR("%s: target type %s must appear alone in table",
 645		      dm_device_name(t->md), t->targets->type->name);
 646		return -EINVAL;
 647	}
 648
 649	BUG_ON(t->num_targets >= t->num_allocated);
 650
 651	tgt = t->targets + t->num_targets;
 652	memset(tgt, 0, sizeof(*tgt));
 653
 654	if (!len) {
 655		DMERR("%s: zero-length target", dm_device_name(t->md));
 656		return -EINVAL;
 657	}
 658
 659	tgt->type = dm_get_target_type(type);
 660	if (!tgt->type) {
 661		DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
 662		return -EINVAL;
 663	}
 664
 665	if (dm_target_needs_singleton(tgt->type)) {
 666		if (t->num_targets) {
 667			tgt->error = "singleton target type must appear alone in table";
 668			goto bad;
 669		}
 670		t->singleton = true;
 671	}
 672
 673	if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
 674		tgt->error = "target type may not be included in a read-only table";
 
 675		goto bad;
 676	}
 677
 678	if (t->immutable_target_type) {
 679		if (t->immutable_target_type != tgt->type) {
 680			tgt->error = "immutable target type cannot be mixed with other target types";
 681			goto bad;
 682		}
 683	} else if (dm_target_is_immutable(tgt->type)) {
 684		if (t->num_targets) {
 685			tgt->error = "immutable target type cannot be mixed with other target types";
 686			goto bad;
 687		}
 688		t->immutable_target_type = tgt->type;
 689	}
 690
 691	if (dm_target_has_integrity(tgt->type))
 692		t->integrity_added = 1;
 693
 694	tgt->table = t;
 695	tgt->begin = start;
 696	tgt->len = len;
 697	tgt->error = "Unknown error";
 698
 699	/*
 700	 * Does this target adjoin the previous one ?
 701	 */
 702	if (!adjoin(t, tgt)) {
 703		tgt->error = "Gap in table";
 704		goto bad;
 705	}
 706
 707	r = dm_split_args(&argc, &argv, params);
 708	if (r) {
 709		tgt->error = "couldn't split parameters (insufficient memory)";
 710		goto bad;
 711	}
 712
 713	r = tgt->type->ctr(tgt, argc, argv);
 714	kfree(argv);
 715	if (r)
 716		goto bad;
 717
 718	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
 719
 720	if (!tgt->num_discard_bios && tgt->discards_supported)
 721		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
 722		       dm_device_name(t->md), type);
 723
 
 
 
 
 
 
 724	return 0;
 725
 726 bad:
 727	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
 728	dm_put_target_type(tgt->type);
 729	return r;
 730}
 731
 732/*
 733 * Target argument parsing helpers.
 734 */
 735static int validate_next_arg(const struct dm_arg *arg,
 736			     struct dm_arg_set *arg_set,
 737			     unsigned *value, char **error, unsigned grouped)
 738{
 739	const char *arg_str = dm_shift_arg(arg_set);
 740	char dummy;
 741
 742	if (!arg_str ||
 743	    (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
 744	    (*value < arg->min) ||
 745	    (*value > arg->max) ||
 746	    (grouped && arg_set->argc < *value)) {
 747		*error = arg->error;
 748		return -EINVAL;
 749	}
 750
 751	return 0;
 752}
 753
 754int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
 755		unsigned *value, char **error)
 756{
 757	return validate_next_arg(arg, arg_set, value, error, 0);
 758}
 759EXPORT_SYMBOL(dm_read_arg);
 760
 761int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
 762		      unsigned *value, char **error)
 763{
 764	return validate_next_arg(arg, arg_set, value, error, 1);
 765}
 766EXPORT_SYMBOL(dm_read_arg_group);
 767
 768const char *dm_shift_arg(struct dm_arg_set *as)
 769{
 770	char *r;
 771
 772	if (as->argc) {
 773		as->argc--;
 774		r = *as->argv;
 775		as->argv++;
 776		return r;
 777	}
 778
 779	return NULL;
 780}
 781EXPORT_SYMBOL(dm_shift_arg);
 782
 783void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
 784{
 785	BUG_ON(as->argc < num_args);
 786	as->argc -= num_args;
 787	as->argv += num_args;
 788}
 789EXPORT_SYMBOL(dm_consume_args);
 790
 791static bool __table_type_bio_based(enum dm_queue_mode table_type)
 792{
 793	return (table_type == DM_TYPE_BIO_BASED ||
 794		table_type == DM_TYPE_DAX_BIO_BASED);
 795}
 796
 797static bool __table_type_request_based(enum dm_queue_mode table_type)
 798{
 799	return table_type == DM_TYPE_REQUEST_BASED;
 800}
 801
 802void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
 803{
 804	t->type = type;
 805}
 806EXPORT_SYMBOL_GPL(dm_table_set_type);
 807
 808/* validate the dax capability of the target device span */
 809int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
 810			sector_t start, sector_t len, void *data)
 811{
 812	int blocksize = *(int *) data, id;
 813	bool rc;
 814
 815	id = dax_read_lock();
 816	rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
 817	dax_read_unlock(id);
 818
 819	return rc;
 820}
 821
 822/* Check devices support synchronous DAX */
 823static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
 824					      sector_t start, sector_t len, void *data)
 825{
 826	return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
 827}
 828
 829bool dm_table_supports_dax(struct dm_table *t,
 830			   iterate_devices_callout_fn iterate_fn, int *blocksize)
 831{
 832	struct dm_target *ti;
 833	unsigned i;
 834
 835	/* Ensure that all targets support DAX. */
 836	for (i = 0; i < dm_table_get_num_targets(t); i++) {
 837		ti = dm_table_get_target(t, i);
 838
 839		if (!ti->type->direct_access)
 840			return false;
 841
 842		if (!ti->type->iterate_devices ||
 843		    ti->type->iterate_devices(ti, iterate_fn, blocksize))
 
 844			return false;
 845	}
 846
 847	return true;
 848}
 849
 850static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
 851				  sector_t start, sector_t len, void *data)
 852{
 853	struct block_device *bdev = dev->bdev;
 854	struct request_queue *q = bdev_get_queue(bdev);
 855
 856	/* request-based cannot stack on partitions! */
 857	if (bdev_is_partition(bdev))
 858		return false;
 859
 860	return queue_is_mq(q);
 861}
 862
 863static int dm_table_determine_type(struct dm_table *t)
 864{
 865	unsigned i;
 866	unsigned bio_based = 0, request_based = 0, hybrid = 0;
 867	struct dm_target *tgt;
 868	struct list_head *devices = dm_table_get_devices(t);
 869	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
 870	int page_size = PAGE_SIZE;
 871
 872	if (t->type != DM_TYPE_NONE) {
 873		/* target already set the table's type */
 874		if (t->type == DM_TYPE_BIO_BASED) {
 875			/* possibly upgrade to a variant of bio-based */
 876			goto verify_bio_based;
 877		}
 878		BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
 879		goto verify_rq_based;
 880	}
 881
 882	for (i = 0; i < t->num_targets; i++) {
 883		tgt = t->targets + i;
 884		if (dm_target_hybrid(tgt))
 885			hybrid = 1;
 886		else if (dm_target_request_based(tgt))
 887			request_based = 1;
 888		else
 889			bio_based = 1;
 890
 891		if (bio_based && request_based) {
 892			DMERR("Inconsistent table: different target types"
 893			      " can't be mixed up");
 894			return -EINVAL;
 895		}
 896	}
 897
 898	if (hybrid && !bio_based && !request_based) {
 899		/*
 900		 * The targets can work either way.
 901		 * Determine the type from the live device.
 902		 * Default to bio-based if device is new.
 903		 */
 904		if (__table_type_request_based(live_md_type))
 905			request_based = 1;
 906		else
 907			bio_based = 1;
 908	}
 909
 910	if (bio_based) {
 911verify_bio_based:
 912		/* We must use this table as bio-based */
 913		t->type = DM_TYPE_BIO_BASED;
 914		if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
 915		    (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
 916			t->type = DM_TYPE_DAX_BIO_BASED;
 917		}
 918		return 0;
 919	}
 920
 921	BUG_ON(!request_based); /* No targets in this table */
 922
 923	t->type = DM_TYPE_REQUEST_BASED;
 924
 925verify_rq_based:
 926	/*
 927	 * Request-based dm supports only tables that have a single target now.
 928	 * To support multiple targets, request splitting support is needed,
 929	 * and that needs lots of changes in the block-layer.
 930	 * (e.g. request completion process for partial completion.)
 931	 */
 932	if (t->num_targets > 1) {
 933		DMERR("request-based DM doesn't support multiple targets");
 934		return -EINVAL;
 935	}
 936
 937	if (list_empty(devices)) {
 938		int srcu_idx;
 939		struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
 940
 941		/* inherit live table's type */
 942		if (live_table)
 943			t->type = live_table->type;
 944		dm_put_live_table(t->md, srcu_idx);
 945		return 0;
 946	}
 947
 948	tgt = dm_table_get_immutable_target(t);
 949	if (!tgt) {
 950		DMERR("table load rejected: immutable target is required");
 951		return -EINVAL;
 952	} else if (tgt->max_io_len) {
 953		DMERR("table load rejected: immutable target that splits IO is not supported");
 954		return -EINVAL;
 955	}
 956
 957	/* Non-request-stackable devices can't be used for request-based dm */
 958	if (!tgt->type->iterate_devices ||
 959	    !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
 960		DMERR("table load rejected: including non-request-stackable devices");
 961		return -EINVAL;
 962	}
 963
 964	return 0;
 965}
 966
 967enum dm_queue_mode dm_table_get_type(struct dm_table *t)
 968{
 969	return t->type;
 970}
 971
 972struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
 973{
 974	return t->immutable_target_type;
 975}
 976
 977struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
 978{
 979	/* Immutable target is implicitly a singleton */
 980	if (t->num_targets > 1 ||
 981	    !dm_target_is_immutable(t->targets[0].type))
 982		return NULL;
 983
 984	return t->targets;
 985}
 986
 987struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
 988{
 989	struct dm_target *ti;
 990	unsigned i;
 991
 992	for (i = 0; i < dm_table_get_num_targets(t); i++) {
 993		ti = dm_table_get_target(t, i);
 994		if (dm_target_is_wildcard(ti->type))
 995			return ti;
 996	}
 997
 998	return NULL;
 999}
1000
1001bool dm_table_bio_based(struct dm_table *t)
1002{
1003	return __table_type_bio_based(dm_table_get_type(t));
1004}
1005
1006bool dm_table_request_based(struct dm_table *t)
1007{
1008	return __table_type_request_based(dm_table_get_type(t));
1009}
1010
1011static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1012{
1013	enum dm_queue_mode type = dm_table_get_type(t);
1014	unsigned per_io_data_size = 0;
1015	unsigned min_pool_size = 0;
1016	struct dm_target *ti;
1017	unsigned i;
 
1018
1019	if (unlikely(type == DM_TYPE_NONE)) {
1020		DMWARN("no table type is set, can't allocate mempools");
1021		return -EINVAL;
1022	}
1023
1024	if (__table_type_bio_based(type))
1025		for (i = 0; i < t->num_targets; i++) {
1026			ti = t->targets + i;
1027			per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
1028			min_pool_size = max(min_pool_size, ti->num_flush_bios);
1029		}
 
 
 
1030
1031	t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
1032					   per_io_data_size, min_pool_size);
1033	if (!t->mempools)
1034		return -ENOMEM;
1035
1036	return 0;
1037}
1038
1039void dm_table_free_md_mempools(struct dm_table *t)
1040{
1041	dm_free_md_mempools(t->mempools);
1042	t->mempools = NULL;
1043}
1044
1045struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
1046{
1047	return t->mempools;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1048}
1049
1050static int setup_indexes(struct dm_table *t)
1051{
1052	int i;
1053	unsigned int total = 0;
1054	sector_t *indexes;
1055
1056	/* allocate the space for *all* the indexes */
1057	for (i = t->depth - 2; i >= 0; i--) {
1058		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1059		total += t->counts[i];
1060	}
1061
1062	indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL);
1063	if (!indexes)
1064		return -ENOMEM;
1065
1066	/* set up internal nodes, bottom-up */
1067	for (i = t->depth - 2; i >= 0; i--) {
1068		t->index[i] = indexes;
1069		indexes += (KEYS_PER_NODE * t->counts[i]);
1070		setup_btree_index(i, t);
1071	}
1072
1073	return 0;
1074}
1075
1076/*
1077 * Builds the btree to index the map.
1078 */
1079static int dm_table_build_index(struct dm_table *t)
1080{
1081	int r = 0;
1082	unsigned int leaf_nodes;
1083
1084	/* how many indexes will the btree have ? */
1085	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1086	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1087
1088	/* leaf layer has already been set up */
1089	t->counts[t->depth - 1] = leaf_nodes;
1090	t->index[t->depth - 1] = t->highs;
1091
1092	if (t->depth >= 2)
1093		r = setup_indexes(t);
1094
1095	return r;
1096}
1097
1098static bool integrity_profile_exists(struct gendisk *disk)
1099{
1100	return !!blk_get_integrity(disk);
1101}
1102
1103/*
1104 * Get a disk whose integrity profile reflects the table's profile.
1105 * Returns NULL if integrity support was inconsistent or unavailable.
1106 */
1107static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
1108{
1109	struct list_head *devices = dm_table_get_devices(t);
1110	struct dm_dev_internal *dd = NULL;
1111	struct gendisk *prev_disk = NULL, *template_disk = NULL;
1112	unsigned i;
1113
1114	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1115		struct dm_target *ti = dm_table_get_target(t, i);
1116		if (!dm_target_passes_integrity(ti->type))
1117			goto no_integrity;
1118	}
1119
1120	list_for_each_entry(dd, devices, list) {
1121		template_disk = dd->dm_dev->bdev->bd_disk;
1122		if (!integrity_profile_exists(template_disk))
1123			goto no_integrity;
1124		else if (prev_disk &&
1125			 blk_integrity_compare(prev_disk, template_disk) < 0)
1126			goto no_integrity;
1127		prev_disk = template_disk;
1128	}
1129
1130	return template_disk;
1131
1132no_integrity:
1133	if (prev_disk)
1134		DMWARN("%s: integrity not set: %s and %s profile mismatch",
1135		       dm_device_name(t->md),
1136		       prev_disk->disk_name,
1137		       template_disk->disk_name);
1138	return NULL;
1139}
1140
1141/*
1142 * Register the mapped device for blk_integrity support if the
1143 * underlying devices have an integrity profile.  But all devices may
1144 * not have matching profiles (checking all devices isn't reliable
1145 * during table load because this table may use other DM device(s) which
1146 * must be resumed before they will have an initialized integity
1147 * profile).  Consequently, stacked DM devices force a 2 stage integrity
1148 * profile validation: First pass during table load, final pass during
1149 * resume.
1150 */
1151static int dm_table_register_integrity(struct dm_table *t)
1152{
1153	struct mapped_device *md = t->md;
1154	struct gendisk *template_disk = NULL;
1155
1156	/* If target handles integrity itself do not register it here. */
1157	if (t->integrity_added)
1158		return 0;
1159
1160	template_disk = dm_table_get_integrity_disk(t);
1161	if (!template_disk)
1162		return 0;
1163
1164	if (!integrity_profile_exists(dm_disk(md))) {
1165		t->integrity_supported = true;
1166		/*
1167		 * Register integrity profile during table load; we can do
1168		 * this because the final profile must match during resume.
1169		 */
1170		blk_integrity_register(dm_disk(md),
1171				       blk_get_integrity(template_disk));
1172		return 0;
1173	}
1174
1175	/*
1176	 * If DM device already has an initialized integrity
1177	 * profile the new profile should not conflict.
1178	 */
1179	if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1180		DMWARN("%s: conflict with existing integrity profile: "
1181		       "%s profile mismatch",
1182		       dm_device_name(t->md),
1183		       template_disk->disk_name);
1184		return 1;
1185	}
1186
1187	/* Preserve existing integrity profile */
1188	t->integrity_supported = true;
1189	return 0;
1190}
1191
1192#ifdef CONFIG_BLK_INLINE_ENCRYPTION
1193
1194struct dm_keyslot_manager {
1195	struct blk_keyslot_manager ksm;
1196	struct mapped_device *md;
1197};
1198
1199struct dm_keyslot_evict_args {
1200	const struct blk_crypto_key *key;
1201	int err;
1202};
1203
1204static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
1205				     sector_t start, sector_t len, void *data)
1206{
1207	struct dm_keyslot_evict_args *args = data;
1208	int err;
1209
1210	err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key);
1211	if (!args->err)
1212		args->err = err;
1213	/* Always try to evict the key from all devices. */
1214	return 0;
1215}
1216
1217/*
1218 * When an inline encryption key is evicted from a device-mapper device, evict
1219 * it from all the underlying devices.
1220 */
1221static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
1222			    const struct blk_crypto_key *key, unsigned int slot)
1223{
1224	struct dm_keyslot_manager *dksm = container_of(ksm,
1225						       struct dm_keyslot_manager,
1226						       ksm);
1227	struct mapped_device *md = dksm->md;
1228	struct dm_keyslot_evict_args args = { key };
1229	struct dm_table *t;
1230	int srcu_idx;
1231	int i;
1232	struct dm_target *ti;
1233
1234	t = dm_get_live_table(md, &srcu_idx);
1235	if (!t)
1236		return 0;
1237	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1238		ti = dm_table_get_target(t, i);
 
 
1239		if (!ti->type->iterate_devices)
1240			continue;
1241		ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
 
1242	}
 
1243	dm_put_live_table(md, srcu_idx);
1244	return args.err;
1245}
1246
1247static const struct blk_ksm_ll_ops dm_ksm_ll_ops = {
1248	.keyslot_evict = dm_keyslot_evict,
1249};
1250
1251static int device_intersect_crypto_modes(struct dm_target *ti,
1252					 struct dm_dev *dev, sector_t start,
1253					 sector_t len, void *data)
1254{
1255	struct blk_keyslot_manager *parent = data;
1256	struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm;
 
1257
1258	blk_ksm_intersect_modes(parent, child);
1259	return 0;
1260}
1261
1262void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
1263{
1264	struct dm_keyslot_manager *dksm = container_of(ksm,
1265						       struct dm_keyslot_manager,
1266						       ksm);
1267
1268	if (!ksm)
1269		return;
1270
1271	blk_ksm_destroy(ksm);
1272	kfree(dksm);
1273}
1274
1275static void dm_table_destroy_keyslot_manager(struct dm_table *t)
1276{
1277	dm_destroy_keyslot_manager(t->ksm);
1278	t->ksm = NULL;
1279}
1280
1281/*
1282 * Constructs and initializes t->ksm with a keyslot manager that
1283 * represents the common set of crypto capabilities of the devices
1284 * described by the dm_table. However, if the constructed keyslot
1285 * manager does not support a superset of the crypto capabilities
1286 * supported by the current keyslot manager of the mapped_device,
1287 * it returns an error instead, since we don't support restricting
1288 * crypto capabilities on table changes. Finally, if the constructed
1289 * keyslot manager doesn't actually support any crypto modes at all,
1290 * it just returns NULL.
1291 */
1292static int dm_table_construct_keyslot_manager(struct dm_table *t)
1293{
1294	struct dm_keyslot_manager *dksm;
1295	struct blk_keyslot_manager *ksm;
1296	struct dm_target *ti;
1297	unsigned int i;
1298	bool ksm_is_empty = true;
1299
1300	dksm = kmalloc(sizeof(*dksm), GFP_KERNEL);
1301	if (!dksm)
1302		return -ENOMEM;
1303	dksm->md = t->md;
1304
1305	ksm = &dksm->ksm;
1306	blk_ksm_init_passthrough(ksm);
1307	ksm->ksm_ll_ops = dm_ksm_ll_ops;
1308	ksm->max_dun_bytes_supported = UINT_MAX;
1309	memset(ksm->crypto_modes_supported, 0xFF,
1310	       sizeof(ksm->crypto_modes_supported));
1311
1312	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1313		ti = dm_table_get_target(t, i);
1314
1315		if (!dm_target_passes_crypto(ti->type)) {
1316			blk_ksm_intersect_modes(ksm, NULL);
1317			break;
1318		}
1319		if (!ti->type->iterate_devices)
1320			continue;
1321		ti->type->iterate_devices(ti, device_intersect_crypto_modes,
1322					  ksm);
 
1323	}
1324
1325	if (t->md->queue && !blk_ksm_is_superset(ksm, t->md->queue->ksm)) {
1326		DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
1327		dm_destroy_keyslot_manager(ksm);
 
 
1328		return -EINVAL;
1329	}
1330
1331	/*
1332	 * If the new KSM doesn't actually support any crypto modes, we may as
1333	 * well represent it with a NULL ksm.
1334	 */
1335	ksm_is_empty = true;
1336	for (i = 0; i < ARRAY_SIZE(ksm->crypto_modes_supported); i++) {
1337		if (ksm->crypto_modes_supported[i]) {
1338			ksm_is_empty = false;
1339			break;
1340		}
1341	}
1342
1343	if (ksm_is_empty) {
1344		dm_destroy_keyslot_manager(ksm);
1345		ksm = NULL;
1346	}
1347
1348	/*
1349	 * t->ksm is only set temporarily while the table is being set
1350	 * up, and it gets set to NULL after the capabilities have
1351	 * been transferred to the request_queue.
1352	 */
1353	t->ksm = ksm;
1354
1355	return 0;
1356}
1357
1358static void dm_update_keyslot_manager(struct request_queue *q,
1359				      struct dm_table *t)
1360{
1361	if (!t->ksm)
1362		return;
1363
1364	/* Make the ksm less restrictive */
1365	if (!q->ksm) {
1366		blk_ksm_register(t->ksm, q);
1367	} else {
1368		blk_ksm_update_capabilities(q->ksm, t->ksm);
1369		dm_destroy_keyslot_manager(t->ksm);
 
1370	}
1371	t->ksm = NULL;
1372}
1373
1374#else /* CONFIG_BLK_INLINE_ENCRYPTION */
1375
1376static int dm_table_construct_keyslot_manager(struct dm_table *t)
1377{
1378	return 0;
1379}
1380
1381void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
1382{
1383}
1384
1385static void dm_table_destroy_keyslot_manager(struct dm_table *t)
1386{
1387}
1388
1389static void dm_update_keyslot_manager(struct request_queue *q,
1390				      struct dm_table *t)
1391{
1392}
1393
1394#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1395
1396/*
1397 * Prepares the table for use by building the indices,
1398 * setting the type, and allocating mempools.
1399 */
1400int dm_table_complete(struct dm_table *t)
1401{
1402	int r;
1403
1404	r = dm_table_determine_type(t);
1405	if (r) {
1406		DMERR("unable to determine table type");
1407		return r;
1408	}
1409
1410	r = dm_table_build_index(t);
1411	if (r) {
1412		DMERR("unable to build btrees");
1413		return r;
1414	}
1415
1416	r = dm_table_register_integrity(t);
1417	if (r) {
1418		DMERR("could not register integrity profile.");
1419		return r;
1420	}
1421
1422	r = dm_table_construct_keyslot_manager(t);
1423	if (r) {
1424		DMERR("could not construct keyslot manager.");
1425		return r;
1426	}
1427
1428	r = dm_table_alloc_md_mempools(t, t->md);
1429	if (r)
1430		DMERR("unable to allocate mempools");
1431
1432	return r;
1433}
1434
1435static DEFINE_MUTEX(_event_lock);
1436void dm_table_event_callback(struct dm_table *t,
1437			     void (*fn)(void *), void *context)
1438{
1439	mutex_lock(&_event_lock);
1440	t->event_fn = fn;
1441	t->event_context = context;
1442	mutex_unlock(&_event_lock);
1443}
1444
1445void dm_table_event(struct dm_table *t)
1446{
1447	mutex_lock(&_event_lock);
1448	if (t->event_fn)
1449		t->event_fn(t->event_context);
1450	mutex_unlock(&_event_lock);
1451}
1452EXPORT_SYMBOL(dm_table_event);
1453
1454inline sector_t dm_table_get_size(struct dm_table *t)
1455{
1456	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1457}
1458EXPORT_SYMBOL(dm_table_get_size);
1459
1460struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1461{
1462	if (index >= t->num_targets)
1463		return NULL;
1464
1465	return t->targets + index;
1466}
1467
1468/*
1469 * Search the btree for the correct target.
1470 *
1471 * Caller should check returned pointer for NULL
1472 * to trap I/O beyond end of device.
1473 */
1474struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1475{
1476	unsigned int l, n = 0, k = 0;
1477	sector_t *node;
1478
1479	if (unlikely(sector >= dm_table_get_size(t)))
1480		return NULL;
1481
1482	for (l = 0; l < t->depth; l++) {
1483		n = get_child(n, k);
1484		node = get_node(t, l, n);
1485
1486		for (k = 0; k < KEYS_PER_NODE; k++)
1487			if (node[k] >= sector)
1488				break;
1489	}
1490
1491	return &t->targets[(KEYS_PER_NODE * n) + k];
1492}
1493
1494/*
1495 * type->iterate_devices() should be called when the sanity check needs to
1496 * iterate and check all underlying data devices. iterate_devices() will
1497 * iterate all underlying data devices until it encounters a non-zero return
1498 * code, returned by whether the input iterate_devices_callout_fn, or
1499 * iterate_devices() itself internally.
1500 *
1501 * For some target type (e.g. dm-stripe), one call of iterate_devices() may
1502 * iterate multiple underlying devices internally, in which case a non-zero
1503 * return code returned by iterate_devices_callout_fn will stop the iteration
1504 * in advance.
1505 *
1506 * Cases requiring _any_ underlying device supporting some kind of attribute,
1507 * should use the iteration structure like dm_table_any_dev_attr(), or call
1508 * it directly. @func should handle semantics of positive examples, e.g.
1509 * capable of something.
1510 *
1511 * Cases requiring _all_ underlying devices supporting some kind of attribute,
1512 * should use the iteration structure like dm_table_supports_nowait() or
1513 * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
1514 * uses an @anti_func that handle semantics of counter examples, e.g. not
1515 * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
1516 */
1517static bool dm_table_any_dev_attr(struct dm_table *t,
1518				  iterate_devices_callout_fn func, void *data)
1519{
1520	struct dm_target *ti;
1521	unsigned int i;
1522
1523	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1524		ti = dm_table_get_target(t, i);
1525
1526		if (ti->type->iterate_devices &&
1527		    ti->type->iterate_devices(ti, func, data))
1528			return true;
1529        }
1530
1531	return false;
1532}
1533
1534static int count_device(struct dm_target *ti, struct dm_dev *dev,
1535			sector_t start, sector_t len, void *data)
1536{
1537	unsigned *num_devices = data;
1538
1539	(*num_devices)++;
1540
1541	return 0;
1542}
1543
1544/*
1545 * Check whether a table has no data devices attached using each
1546 * target's iterate_devices method.
1547 * Returns false if the result is unknown because a target doesn't
1548 * support iterate_devices.
1549 */
1550bool dm_table_has_no_data_devices(struct dm_table *table)
1551{
1552	struct dm_target *ti;
1553	unsigned i, num_devices;
1554
1555	for (i = 0; i < dm_table_get_num_targets(table); i++) {
1556		ti = dm_table_get_target(table, i);
1557
1558		if (!ti->type->iterate_devices)
1559			return false;
1560
1561		num_devices = 0;
1562		ti->type->iterate_devices(ti, count_device, &num_devices);
1563		if (num_devices)
1564			return false;
1565	}
1566
1567	return true;
1568}
1569
1570static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1571				  sector_t start, sector_t len, void *data)
1572{
1573	struct request_queue *q = bdev_get_queue(dev->bdev);
1574	enum blk_zoned_model *zoned_model = data;
 
 
1575
1576	return blk_queue_zoned_model(q) != *zoned_model;
 
 
 
1577}
1578
1579/*
1580 * Check the device zoned model based on the target feature flag. If the target
1581 * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
1582 * also accepted but all devices must have the same zoned model. If the target
1583 * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
1584 * zoned model with all zoned devices having the same zone size.
1585 */
1586static bool dm_table_supports_zoned_model(struct dm_table *t,
1587					  enum blk_zoned_model zoned_model)
1588{
1589	struct dm_target *ti;
1590	unsigned i;
1591
1592	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1593		ti = dm_table_get_target(t, i);
 
 
 
 
 
 
 
 
 
1594
1595		if (dm_target_supports_zoned_hm(ti->type)) {
1596			if (!ti->type->iterate_devices ||
1597			    ti->type->iterate_devices(ti, device_not_zoned_model,
1598						      &zoned_model))
1599				return false;
1600		} else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
1601			if (zoned_model == BLK_ZONED_HM)
1602				return false;
1603		}
1604	}
1605
1606	return true;
1607}
1608
1609static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
1610					   sector_t start, sector_t len, void *data)
1611{
1612	struct request_queue *q = bdev_get_queue(dev->bdev);
1613	unsigned int *zone_sectors = data;
1614
1615	if (!blk_queue_is_zoned(q))
1616		return 0;
1617
1618	return blk_queue_zone_sectors(q) != *zone_sectors;
1619}
1620
1621/*
1622 * Check consistency of zoned model and zone sectors across all targets. For
1623 * zone sectors, if the destination device is a zoned block device, it shall
1624 * have the specified zone_sectors.
1625 */
1626static int validate_hardware_zoned_model(struct dm_table *table,
1627					 enum blk_zoned_model zoned_model,
1628					 unsigned int zone_sectors)
1629{
1630	if (zoned_model == BLK_ZONED_NONE)
1631		return 0;
1632
1633	if (!dm_table_supports_zoned_model(table, zoned_model)) {
1634		DMERR("%s: zoned model is not consistent across all devices",
1635		      dm_device_name(table->md));
1636		return -EINVAL;
1637	}
1638
1639	/* Check zone size validity and compatibility */
1640	if (!zone_sectors || !is_power_of_2(zone_sectors))
1641		return -EINVAL;
1642
1643	if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
1644		DMERR("%s: zone sectors is not consistent across all zoned devices",
1645		      dm_device_name(table->md));
1646		return -EINVAL;
1647	}
1648
1649	return 0;
1650}
1651
1652/*
1653 * Establish the new table's queue_limits and validate them.
1654 */
1655int dm_calculate_queue_limits(struct dm_table *table,
1656			      struct queue_limits *limits)
1657{
1658	struct dm_target *ti;
1659	struct queue_limits ti_limits;
1660	unsigned i;
1661	enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
1662	unsigned int zone_sectors = 0;
 
1663
1664	blk_set_stacking_limits(limits);
 
 
 
 
 
 
 
 
1665
1666	for (i = 0; i < dm_table_get_num_targets(table); i++) {
1667		blk_set_stacking_limits(&ti_limits);
1668
1669		ti = dm_table_get_target(table, i);
1670
1671		if (!ti->type->iterate_devices)
 
 
 
1672			goto combine_limits;
 
1673
1674		/*
1675		 * Combine queue limits of all the devices this target uses.
1676		 */
1677		ti->type->iterate_devices(ti, dm_set_device_limits,
1678					  &ti_limits);
1679
1680		if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
1681			/*
1682			 * After stacking all limits, validate all devices
1683			 * in table support this zoned model and zone sectors.
1684			 */
1685			zoned_model = ti_limits.zoned;
1686			zone_sectors = ti_limits.chunk_sectors;
1687		}
1688
1689		/* Set I/O hints portion of queue limits */
1690		if (ti->type->io_hints)
1691			ti->type->io_hints(ti, &ti_limits);
1692
1693		/*
1694		 * Check each device area is consistent with the target's
1695		 * overall queue limits.
1696		 */
1697		if (ti->type->iterate_devices(ti, device_area_is_invalid,
1698					      &ti_limits))
1699			return -EINVAL;
1700
1701combine_limits:
1702		/*
1703		 * Merge this target's queue limits into the overall limits
1704		 * for the table.
1705		 */
1706		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1707			DMWARN("%s: adding target device "
1708			       "(start sect %llu len %llu) "
1709			       "caused an alignment inconsistency",
1710			       dm_device_name(table->md),
1711			       (unsigned long long) ti->begin,
1712			       (unsigned long long) ti->len);
 
 
 
 
 
 
 
 
 
 
 
 
1713	}
1714
1715	/*
1716	 * Verify that the zoned model and zone sectors, as determined before
1717	 * any .io_hints override, are the same across all devices in the table.
1718	 * - this is especially relevant if .io_hints is emulating a disk-managed
1719	 *   zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
1720	 * BUT...
1721	 */
1722	if (limits->zoned != BLK_ZONED_NONE) {
1723		/*
1724		 * ...IF the above limits stacking determined a zoned model
1725		 * validate that all of the table's devices conform to it.
1726		 */
1727		zoned_model = limits->zoned;
1728		zone_sectors = limits->chunk_sectors;
1729	}
1730	if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
1731		return -EINVAL;
1732
1733	return validate_hardware_logical_block_alignment(table, limits);
1734}
1735
1736/*
1737 * Verify that all devices have an integrity profile that matches the
1738 * DM device's registered integrity profile.  If the profiles don't
1739 * match then unregister the DM device's integrity profile.
1740 */
1741static void dm_table_verify_integrity(struct dm_table *t)
1742{
1743	struct gendisk *template_disk = NULL;
1744
1745	if (t->integrity_added)
1746		return;
1747
1748	if (t->integrity_supported) {
1749		/*
1750		 * Verify that the original integrity profile
1751		 * matches all the devices in this table.
1752		 */
1753		template_disk = dm_table_get_integrity_disk(t);
1754		if (template_disk &&
1755		    blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
1756			return;
1757	}
1758
1759	if (integrity_profile_exists(dm_disk(t->md))) {
1760		DMWARN("%s: unable to establish an integrity profile",
1761		       dm_device_name(t->md));
1762		blk_integrity_unregister(dm_disk(t->md));
1763	}
1764}
1765
1766static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1767				sector_t start, sector_t len, void *data)
1768{
1769	unsigned long flush = (unsigned long) data;
1770	struct request_queue *q = bdev_get_queue(dev->bdev);
1771
1772	return (q->queue_flags & flush);
1773}
1774
1775static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1776{
1777	struct dm_target *ti;
1778	unsigned i;
1779
1780	/*
1781	 * Require at least one underlying device to support flushes.
1782	 * t->devices includes internal dm devices such as mirror logs
1783	 * so we need to use iterate_devices here, which targets
1784	 * supporting flushes must provide.
1785	 */
1786	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1787		ti = dm_table_get_target(t, i);
1788
1789		if (!ti->num_flush_bios)
1790			continue;
1791
1792		if (ti->flush_supported)
1793			return true;
1794
1795		if (ti->type->iterate_devices &&
1796		    ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1797			return true;
1798	}
1799
1800	return false;
1801}
1802
1803static int device_dax_write_cache_enabled(struct dm_target *ti,
1804					  struct dm_dev *dev, sector_t start,
1805					  sector_t len, void *data)
1806{
1807	struct dax_device *dax_dev = dev->dax_dev;
1808
1809	if (!dax_dev)
1810		return false;
1811
1812	if (dax_write_cache_enabled(dax_dev))
1813		return true;
1814	return false;
1815}
1816
1817static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
1818				sector_t start, sector_t len, void *data)
1819{
1820	struct request_queue *q = bdev_get_queue(dev->bdev);
1821
1822	return !blk_queue_nonrot(q);
1823}
1824
1825static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1826			     sector_t start, sector_t len, void *data)
1827{
1828	struct request_queue *q = bdev_get_queue(dev->bdev);
1829
1830	return !blk_queue_add_random(q);
1831}
1832
1833static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
1834					 sector_t start, sector_t len, void *data)
1835{
1836	struct request_queue *q = bdev_get_queue(dev->bdev);
1837
1838	return !q->limits.max_write_same_sectors;
1839}
1840
1841static bool dm_table_supports_write_same(struct dm_table *t)
1842{
1843	struct dm_target *ti;
1844	unsigned i;
1845
1846	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1847		ti = dm_table_get_target(t, i);
1848
1849		if (!ti->num_write_same_bios)
1850			return false;
1851
1852		if (!ti->type->iterate_devices ||
1853		    ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1854			return false;
1855	}
1856
1857	return true;
1858}
1859
1860static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
1861					   sector_t start, sector_t len, void *data)
1862{
1863	struct request_queue *q = bdev_get_queue(dev->bdev);
1864
1865	return !q->limits.max_write_zeroes_sectors;
1866}
1867
1868static bool dm_table_supports_write_zeroes(struct dm_table *t)
1869{
1870	struct dm_target *ti;
1871	unsigned i = 0;
1872
1873	while (i < dm_table_get_num_targets(t)) {
1874		ti = dm_table_get_target(t, i++);
1875
1876		if (!ti->num_write_zeroes_bios)
1877			return false;
1878
1879		if (!ti->type->iterate_devices ||
1880		    ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
1881			return false;
1882	}
1883
1884	return true;
1885}
1886
1887static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
1888				     sector_t start, sector_t len, void *data)
1889{
1890	struct request_queue *q = bdev_get_queue(dev->bdev);
1891
1892	return !blk_queue_nowait(q);
1893}
1894
1895static bool dm_table_supports_nowait(struct dm_table *t)
1896{
1897	struct dm_target *ti;
1898	unsigned i = 0;
1899
1900	while (i < dm_table_get_num_targets(t)) {
1901		ti = dm_table_get_target(t, i++);
1902
1903		if (!dm_target_supports_nowait(ti->type))
1904			return false;
1905
1906		if (!ti->type->iterate_devices ||
1907		    ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
1908			return false;
1909	}
1910
1911	return true;
1912}
1913
1914static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1915				      sector_t start, sector_t len, void *data)
1916{
1917	struct request_queue *q = bdev_get_queue(dev->bdev);
1918
1919	return !blk_queue_discard(q);
1920}
1921
1922static bool dm_table_supports_discards(struct dm_table *t)
1923{
1924	struct dm_target *ti;
1925	unsigned i;
1926
1927	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1928		ti = dm_table_get_target(t, i);
1929
1930		if (!ti->num_discard_bios)
1931			return false;
1932
1933		/*
1934		 * Either the target provides discard support (as implied by setting
1935		 * 'discards_supported') or it relies on _all_ data devices having
1936		 * discard support.
1937		 */
1938		if (!ti->discards_supported &&
1939		    (!ti->type->iterate_devices ||
1940		     ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
1941			return false;
1942	}
1943
1944	return true;
1945}
1946
1947static int device_not_secure_erase_capable(struct dm_target *ti,
1948					   struct dm_dev *dev, sector_t start,
1949					   sector_t len, void *data)
1950{
1951	struct request_queue *q = bdev_get_queue(dev->bdev);
1952
1953	return !blk_queue_secure_erase(q);
1954}
1955
1956static bool dm_table_supports_secure_erase(struct dm_table *t)
1957{
1958	struct dm_target *ti;
1959	unsigned int i;
1960
1961	for (i = 0; i < dm_table_get_num_targets(t); i++) {
1962		ti = dm_table_get_target(t, i);
1963
1964		if (!ti->num_secure_erase_bios)
1965			return false;
1966
1967		if (!ti->type->iterate_devices ||
1968		    ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
1969			return false;
1970	}
1971
1972	return true;
1973}
1974
1975static int device_requires_stable_pages(struct dm_target *ti,
1976					struct dm_dev *dev, sector_t start,
1977					sector_t len, void *data)
1978{
1979	struct request_queue *q = bdev_get_queue(dev->bdev);
1980
1981	return blk_queue_stable_writes(q);
1982}
1983
1984int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1985			      struct queue_limits *limits)
1986{
1987	bool wc = false, fua = false;
1988	int page_size = PAGE_SIZE;
1989	int r;
1990
 
 
 
1991	/*
1992	 * Copy table's limits to the DM device's request_queue
 
1993	 */
1994	q->limits = *limits;
 
1995
1996	if (dm_table_supports_nowait(t))
1997		blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
1998	else
1999		blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
 
2000
2001	if (!dm_table_supports_discards(t)) {
2002		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
2003		/* Must also clear discard limits... */
2004		q->limits.max_discard_sectors = 0;
2005		q->limits.max_hw_discard_sectors = 0;
2006		q->limits.discard_granularity = 0;
2007		q->limits.discard_alignment = 0;
2008		q->limits.discard_misaligned = 0;
2009	} else
2010		blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
2011
2012	if (dm_table_supports_secure_erase(t))
2013		blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
2014
2015	if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
2016		wc = true;
2017		if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
2018			fua = true;
2019	}
2020	blk_queue_write_cache(q, wc, fua);
2021
2022	if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) {
2023		blk_queue_flag_set(QUEUE_FLAG_DAX, q);
2024		if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL))
2025			set_dax_synchronous(t->md->dax_dev);
2026	}
2027	else
2028		blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
2029
2030	if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
2031		dax_write_cache(t->md->dax_dev, true);
2032
2033	/* Ensure that all underlying devices are non-rotational. */
2034	if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
2035		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
2036	else
2037		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
 
 
2038
2039	if (!dm_table_supports_write_same(t))
2040		q->limits.max_write_same_sectors = 0;
2041	if (!dm_table_supports_write_zeroes(t))
2042		q->limits.max_write_zeroes_sectors = 0;
2043
2044	dm_table_verify_integrity(t);
2045
2046	/*
2047	 * Some devices don't use blk_integrity but still want stable pages
2048	 * because they do their own checksumming.
2049	 * If any underlying device requires stable pages, a table must require
2050	 * them as well.  Only targets that support iterate_devices are considered:
2051	 * don't want error, zero, etc to require stable pages.
2052	 */
2053	if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
2054		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
2055	else
2056		blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
2057
2058	/*
2059	 * Determine whether or not this queue's I/O timings contribute
2060	 * to the entropy pool, Only request-based targets use this.
2061	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
2062	 * have it set.
2063	 */
2064	if (blk_queue_add_random(q) &&
2065	    dm_table_any_dev_attr(t, device_is_not_random, NULL))
2066		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2067
2068	/*
2069	 * For a zoned target, setup the zones related queue attributes
2070	 * and resources necessary for zone append emulation if necessary.
2071	 */
2072	if (blk_queue_is_zoned(q)) {
2073		r = dm_set_zones_restrictions(t, q);
2074		if (r)
2075			return r;
2076	}
2077
2078	dm_update_keyslot_manager(q, t);
2079	blk_queue_update_readahead(q);
2080
2081	return 0;
2082}
2083
2084unsigned int dm_table_get_num_targets(struct dm_table *t)
2085{
2086	return t->num_targets;
2087}
2088
2089struct list_head *dm_table_get_devices(struct dm_table *t)
2090{
2091	return &t->devices;
2092}
2093
2094fmode_t dm_table_get_mode(struct dm_table *t)
2095{
2096	return t->mode;
2097}
2098EXPORT_SYMBOL(dm_table_get_mode);
2099
2100enum suspend_mode {
2101	PRESUSPEND,
2102	PRESUSPEND_UNDO,
2103	POSTSUSPEND,
2104};
2105
2106static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
2107{
2108	int i = t->num_targets;
2109	struct dm_target *ti = t->targets;
2110
2111	lockdep_assert_held(&t->md->suspend_lock);
 
2112
2113	while (i--) {
2114		switch (mode) {
2115		case PRESUSPEND:
2116			if (ti->type->presuspend)
2117				ti->type->presuspend(ti);
2118			break;
2119		case PRESUSPEND_UNDO:
2120			if (ti->type->presuspend_undo)
2121				ti->type->presuspend_undo(ti);
2122			break;
2123		case POSTSUSPEND:
2124			if (ti->type->postsuspend)
2125				ti->type->postsuspend(ti);
2126			break;
2127		}
2128		ti++;
2129	}
2130}
2131
2132void dm_table_presuspend_targets(struct dm_table *t)
2133{
2134	if (!t)
2135		return;
2136
2137	suspend_targets(t, PRESUSPEND);
2138}
2139
2140void dm_table_presuspend_undo_targets(struct dm_table *t)
2141{
2142	if (!t)
2143		return;
2144
2145	suspend_targets(t, PRESUSPEND_UNDO);
2146}
2147
2148void dm_table_postsuspend_targets(struct dm_table *t)
2149{
2150	if (!t)
2151		return;
2152
2153	suspend_targets(t, POSTSUSPEND);
2154}
2155
2156int dm_table_resume_targets(struct dm_table *t)
2157{
2158	int i, r = 0;
 
2159
2160	lockdep_assert_held(&t->md->suspend_lock);
2161
2162	for (i = 0; i < t->num_targets; i++) {
2163		struct dm_target *ti = t->targets + i;
2164
2165		if (!ti->type->preresume)
2166			continue;
2167
2168		r = ti->type->preresume(ti);
2169		if (r) {
2170			DMERR("%s: %s: preresume failed, error = %d",
2171			      dm_device_name(t->md), ti->type->name, r);
2172			return r;
2173		}
2174	}
2175
2176	for (i = 0; i < t->num_targets; i++) {
2177		struct dm_target *ti = t->targets + i;
2178
2179		if (ti->type->resume)
2180			ti->type->resume(ti);
2181	}
2182
2183	return 0;
2184}
2185
2186struct mapped_device *dm_table_get_md(struct dm_table *t)
2187{
2188	return t->md;
2189}
2190EXPORT_SYMBOL(dm_table_get_md);
2191
2192const char *dm_table_device_name(struct dm_table *t)
2193{
2194	return dm_device_name(t->md);
2195}
2196EXPORT_SYMBOL_GPL(dm_table_device_name);
2197
2198void dm_table_run_md_queue_async(struct dm_table *t)
2199{
2200	if (!dm_table_request_based(t))
2201		return;
2202
2203	if (t->md->queue)
2204		blk_mq_run_hw_queues(t->md->queue, true);
2205}
2206EXPORT_SYMBOL(dm_table_run_md_queue_async);
2207