Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
   4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include "dm-core.h"
  10#include "dm-rq.h"
  11#include "dm-uevent.h"
  12#include "dm-ima.h"
  13
  14#include <linux/init.h>
  15#include <linux/module.h>
  16#include <linux/mutex.h>
  17#include <linux/sched/mm.h>
  18#include <linux/sched/signal.h>
  19#include <linux/blkpg.h>
  20#include <linux/bio.h>
  21#include <linux/mempool.h>
  22#include <linux/dax.h>
  23#include <linux/slab.h>
  24#include <linux/idr.h>
  25#include <linux/uio.h>
  26#include <linux/hdreg.h>
  27#include <linux/delay.h>
  28#include <linux/wait.h>
  29#include <linux/pr.h>
  30#include <linux/refcount.h>
  31#include <linux/part_stat.h>
  32#include <linux/blk-crypto.h>
  33#include <linux/blk-crypto-profile.h>
  34
  35#define DM_MSG_PREFIX "core"
  36
 
 
 
 
 
 
 
 
 
 
  37/*
  38 * Cookies are numeric values sent with CHANGE and REMOVE
  39 * uevents while resuming, removing or renaming the device.
  40 */
  41#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
  42#define DM_COOKIE_LENGTH 24
  43
  44/*
  45 * For REQ_POLLED fs bio, this flag is set if we link mapped underlying
  46 * dm_io into one list, and reuse bio->bi_private as the list head. Before
  47 * ending this fs bio, we will recover its ->bi_private.
  48 */
  49#define REQ_DM_POLL_LIST	REQ_DRV
  50
  51static const char *_name = DM_NAME;
  52
  53static unsigned int major;
  54static unsigned int _major;
  55
  56static DEFINE_IDR(_minor_idr);
  57
  58static DEFINE_SPINLOCK(_minor_lock);
 
 
 
 
 
 
 
 
 
 
 
 
  59
  60static void do_deferred_remove(struct work_struct *w);
  61
  62static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
  63
  64static struct workqueue_struct *deferred_remove_workqueue;
  65
  66atomic_t dm_global_event_nr = ATOMIC_INIT(0);
  67DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
  68
  69void dm_issue_global_event(void)
  70{
  71	atomic_inc(&dm_global_event_nr);
  72	wake_up(&dm_global_eventq);
  73}
  74
  75DEFINE_STATIC_KEY_FALSE(stats_enabled);
  76DEFINE_STATIC_KEY_FALSE(swap_bios_enabled);
  77DEFINE_STATIC_KEY_FALSE(zoned_enabled);
 
 
 
 
 
 
 
 
  78
  79/*
  80 * One of these is allocated (on-stack) per original bio.
 
  81 */
  82struct clone_info {
  83	struct dm_table *map;
  84	struct bio *bio;
  85	struct dm_io *io;
  86	sector_t sector;
  87	unsigned int sector_count;
  88	bool is_abnormal_io:1;
  89	bool submit_as_polled:1;
  90};
  91
  92static inline struct dm_target_io *clone_to_tio(struct bio *clone)
  93{
  94	return container_of(clone, struct dm_target_io, clone);
 
 
  95}
  96
  97void *dm_per_bio_data(struct bio *bio, size_t data_size)
  98{
  99	if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO))
 100		return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
 101	return (char *)bio - DM_IO_BIO_OFFSET - data_size;
 102}
 103EXPORT_SYMBOL_GPL(dm_per_bio_data);
 104
 105struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
 106{
 107	struct dm_io *io = (struct dm_io *)((char *)data + data_size);
 108
 109	if (io->magic == DM_IO_MAGIC)
 110		return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
 111	BUG_ON(io->magic != DM_TIO_MAGIC);
 112	return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET);
 113}
 114EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 115
 116unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
 117{
 118	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
 119}
 120EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
 121
 122#define MINOR_ALLOCED ((void *)-1)
 
 
 
 123
 124#define DM_NUMA_NODE NUMA_NO_NODE
 125static int dm_numa_node = DM_NUMA_NODE;
 126
 127#define DEFAULT_SWAP_BIOS	(8 * 1048576 / PAGE_SIZE)
 128static int swap_bios = DEFAULT_SWAP_BIOS;
 129static int get_swap_bios(void)
 130{
 131	int latch = READ_ONCE(swap_bios);
 132
 133	if (unlikely(latch <= 0))
 134		latch = DEFAULT_SWAP_BIOS;
 135	return latch;
 136}
 137
 138struct table_device {
 139	struct list_head list;
 140	refcount_t count;
 141	struct dm_dev dm_dev;
 142};
 
 
 
 143
 144/*
 145 * Bio-based DM's mempools' reserved IOs set by the user.
 146 */
 147#define RESERVED_BIO_BASED_IOS		16
 148static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
 149
 150static int __dm_get_module_param_int(int *module_param, int min, int max)
 151{
 152	int param = READ_ONCE(*module_param);
 153	int modified_param = 0;
 154	bool modified = true;
 155
 156	if (param < min)
 157		modified_param = min;
 158	else if (param > max)
 159		modified_param = max;
 160	else
 161		modified = false;
 162
 163	if (modified) {
 164		(void)cmpxchg(module_param, param, modified_param);
 165		param = modified_param;
 166	}
 167
 168	return param;
 169}
 
 
 
 
 
 
 170
 171unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max)
 172{
 173	unsigned int param = READ_ONCE(*module_param);
 174	unsigned int modified_param = 0;
 
 175
 176	if (!param)
 177		modified_param = def;
 178	else if (param > max)
 179		modified_param = max;
 180
 181	if (modified_param) {
 182		(void)cmpxchg(module_param, param, modified_param);
 183		param = modified_param;
 184	}
 185
 186	return param;
 187}
 
 188
 189unsigned int dm_get_reserved_bio_based_ios(void)
 190{
 191	return __dm_get_module_param(&reserved_bio_based_ios,
 192				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
 193}
 194EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
 
 
 195
 196static unsigned int dm_get_numa_node(void)
 197{
 198	return __dm_get_module_param_int(&dm_numa_node,
 199					 DM_NUMA_NODE, num_online_nodes() - 1);
 200}
 201
 202static int __init local_init(void)
 203{
 204	int r;
 205
 206	r = dm_uevent_init();
 207	if (r)
 
 208		return r;
 209
 210	deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove", 0);
 211	if (!deferred_remove_workqueue) {
 212		r = -ENOMEM;
 213		goto out_uevent_exit;
 214	}
 
 
 
 
 
 
 
 
 
 
 
 215
 216	_major = major;
 217	r = register_blkdev(_major, _name);
 218	if (r < 0)
 219		goto out_free_workqueue;
 220
 221	if (!_major)
 222		_major = r;
 223
 224	return 0;
 225
 226out_free_workqueue:
 227	destroy_workqueue(deferred_remove_workqueue);
 228out_uevent_exit:
 229	dm_uevent_exit();
 
 
 
 
 
 
 
 
 230
 231	return r;
 232}
 233
 234static void local_exit(void)
 235{
 236	destroy_workqueue(deferred_remove_workqueue);
 237
 
 
 238	unregister_blkdev(_major, _name);
 239	dm_uevent_exit();
 240
 241	_major = 0;
 242
 243	DMINFO("cleaned up");
 244}
 245
 246static int (*_inits[])(void) __initdata = {
 247	local_init,
 248	dm_target_init,
 249	dm_linear_init,
 250	dm_stripe_init,
 251	dm_io_init,
 252	dm_kcopyd_init,
 253	dm_interface_init,
 254	dm_statistics_init,
 255};
 256
 257static void (*_exits[])(void) = {
 258	local_exit,
 259	dm_target_exit,
 260	dm_linear_exit,
 261	dm_stripe_exit,
 262	dm_io_exit,
 263	dm_kcopyd_exit,
 264	dm_interface_exit,
 265	dm_statistics_exit,
 266};
 267
 268static int __init dm_init(void)
 269{
 270	const int count = ARRAY_SIZE(_inits);
 271	int r, i;
 272
 273#if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE))
 274	DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled."
 275	       " Duplicate IMA measurements will not be recorded in the IMA log.");
 276#endif
 277
 278	for (i = 0; i < count; i++) {
 279		r = _inits[i]();
 280		if (r)
 281			goto bad;
 282	}
 283
 284	return 0;
 285bad:
 
 286	while (i--)
 287		_exits[i]();
 288
 289	return r;
 290}
 291
 292static void __exit dm_exit(void)
 293{
 294	int i = ARRAY_SIZE(_exits);
 295
 296	while (i--)
 297		_exits[i]();
 298
 299	/*
 300	 * Should be empty by this point.
 301	 */
 
 302	idr_destroy(&_minor_idr);
 303}
 304
 305/*
 306 * Block device functions
 307 */
 308int dm_deleting_md(struct mapped_device *md)
 309{
 310	return test_bit(DMF_DELETING, &md->flags);
 311}
 312
 313static int dm_blk_open(struct gendisk *disk, blk_mode_t mode)
 314{
 315	struct mapped_device *md;
 316
 317	spin_lock(&_minor_lock);
 318
 319	md = disk->private_data;
 320	if (!md)
 321		goto out;
 322
 323	if (test_bit(DMF_FREEING, &md->flags) ||
 324	    dm_deleting_md(md)) {
 325		md = NULL;
 326		goto out;
 327	}
 328
 329	dm_get(md);
 330	atomic_inc(&md->open_count);
 
 331out:
 332	spin_unlock(&_minor_lock);
 333
 334	return md ? 0 : -ENXIO;
 335}
 336
 337static void dm_blk_close(struct gendisk *disk)
 338{
 339	struct mapped_device *md;
 340
 341	spin_lock(&_minor_lock);
 342
 343	md = disk->private_data;
 344	if (WARN_ON(!md))
 345		goto out;
 346
 347	if (atomic_dec_and_test(&md->open_count) &&
 348	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
 349		queue_work(deferred_remove_workqueue, &deferred_remove_work);
 350
 351	dm_put(md);
 352out:
 353	spin_unlock(&_minor_lock);
 
 
 354}
 355
 356int dm_open_count(struct mapped_device *md)
 357{
 358	return atomic_read(&md->open_count);
 359}
 360
 361/*
 362 * Guarantees nothing is using the device before it's deleted.
 363 */
 364int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
 365{
 366	int r = 0;
 367
 368	spin_lock(&_minor_lock);
 369
 370	if (dm_open_count(md)) {
 371		r = -EBUSY;
 372		if (mark_deferred)
 373			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
 374	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
 375		r = -EEXIST;
 376	else
 377		set_bit(DMF_DELETING, &md->flags);
 378
 379	spin_unlock(&_minor_lock);
 380
 381	return r;
 382}
 383
 384int dm_cancel_deferred_remove(struct mapped_device *md)
 385{
 386	int r = 0;
 387
 388	spin_lock(&_minor_lock);
 389
 390	if (test_bit(DMF_DELETING, &md->flags))
 391		r = -EBUSY;
 392	else
 393		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
 394
 395	spin_unlock(&_minor_lock);
 396
 397	return r;
 398}
 399
 400static void do_deferred_remove(struct work_struct *w)
 401{
 402	dm_deferred_remove();
 403}
 404
 405static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 406{
 407	struct mapped_device *md = bdev->bd_disk->private_data;
 408
 409	return dm_get_geometry(md, geo);
 410}
 411
 412static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
 413			    struct block_device **bdev)
 414{
 415	struct dm_target *ti;
 416	struct dm_table *map;
 417	int r;
 
 418
 419retry:
 420	r = -ENOTTY;
 421	map = dm_get_live_table(md, srcu_idx);
 422	if (!map || !dm_table_get_size(map))
 423		return r;
 424
 425	/* We only support devices that have a single target */
 426	if (map->num_targets != 1)
 427		return r;
 428
 429	ti = dm_table_get_target(map, 0);
 430	if (!ti->type->prepare_ioctl)
 431		return r;
 432
 433	if (dm_suspended_md(md))
 434		return -EAGAIN;
 435
 436	r = ti->type->prepare_ioctl(ti, bdev);
 437	if (r == -ENOTCONN && !fatal_signal_pending(current)) {
 438		dm_put_live_table(md, *srcu_idx);
 439		fsleep(10000);
 440		goto retry;
 441	}
 442
 443	return r;
 444}
 445
 446static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
 447{
 448	dm_put_live_table(md, srcu_idx);
 449}
 450
 451static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
 452			unsigned int cmd, unsigned long arg)
 453{
 454	struct mapped_device *md = bdev->bd_disk->private_data;
 455	int r, srcu_idx;
 456
 457	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
 458	if (r < 0)
 459		goto out;
 460
 461	if (r > 0) {
 462		/*
 463		 * Target determined this ioctl is being issued against a
 464		 * subset of the parent bdev; require extra privileges.
 465		 */
 466		if (!capable(CAP_SYS_RAWIO)) {
 467			DMDEBUG_LIMIT(
 468	"%s: sending ioctl %x to DM device without required privilege.",
 469				current->comm, cmd);
 470			r = -ENOIOCTLCMD;
 471			goto out;
 472		}
 473	}
 474
 475	if (!bdev->bd_disk->fops->ioctl)
 476		r = -ENOTTY;
 477	else
 478		r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
 479out:
 480	dm_unprepare_ioctl(md, srcu_idx);
 
 481	return r;
 482}
 483
 484u64 dm_start_time_ns_from_clone(struct bio *bio)
 485{
 486	return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time);
 487}
 488EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
 489
 490static inline bool bio_is_flush_with_data(struct bio *bio)
 491{
 492	return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
 493}
 494
 495static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
 496{
 497	/*
 498	 * If REQ_PREFLUSH set, don't account payload, it will be
 499	 * submitted (and accounted) after this flush completes.
 500	 */
 501	if (bio_is_flush_with_data(bio))
 502		return 0;
 503	if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
 504		return io->sectors;
 505	return bio_sectors(bio);
 506}
 507
 508static void dm_io_acct(struct dm_io *io, bool end)
 509{
 510	struct bio *bio = io->orig_bio;
 511
 512	if (dm_io_flagged(io, DM_IO_BLK_STAT)) {
 513		if (!end)
 514			bdev_start_io_acct(bio->bi_bdev, bio_op(bio),
 515					   io->start_time);
 516		else
 517			bdev_end_io_acct(bio->bi_bdev, bio_op(bio),
 518					 dm_io_sectors(io, bio),
 519					 io->start_time);
 520	}
 521
 522	if (static_branch_unlikely(&stats_enabled) &&
 523	    unlikely(dm_stats_used(&io->md->stats))) {
 524		sector_t sector;
 525
 526		if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
 527			sector = bio_end_sector(bio) - io->sector_offset;
 528		else
 529			sector = bio->bi_iter.bi_sector;
 530
 531		dm_stats_account_io(&io->md->stats, bio_data_dir(bio),
 532				    sector, dm_io_sectors(io, bio),
 533				    end, io->start_time, &io->stats_aux);
 534	}
 535}
 536
 537static void __dm_start_io_acct(struct dm_io *io)
 
 538{
 539	dm_io_acct(io, false);
 540}
 541
 542static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
 543{
 544	/*
 545	 * Ensure IO accounting is only ever started once.
 546	 */
 547	if (dm_io_flagged(io, DM_IO_ACCOUNTED))
 548		return;
 549
 550	/* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */
 551	if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) {
 552		dm_io_set_flag(io, DM_IO_ACCOUNTED);
 553	} else {
 554		unsigned long flags;
 555		/* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
 556		spin_lock_irqsave(&io->lock, flags);
 557		if (dm_io_flagged(io, DM_IO_ACCOUNTED)) {
 558			spin_unlock_irqrestore(&io->lock, flags);
 559			return;
 560		}
 561		dm_io_set_flag(io, DM_IO_ACCOUNTED);
 562		spin_unlock_irqrestore(&io->lock, flags);
 563	}
 564
 565	__dm_start_io_acct(io);
 566}
 567
 568static void dm_end_io_acct(struct dm_io *io)
 569{
 570	dm_io_acct(io, true);
 571}
 572
 573static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask)
 574{
 575	struct dm_io *io;
 576	struct dm_target_io *tio;
 577	struct bio *clone;
 578
 579	clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs);
 580	if (unlikely(!clone))
 581		return NULL;
 582	tio = clone_to_tio(clone);
 583	tio->flags = 0;
 584	dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
 585	tio->io = NULL;
 586
 587	io = container_of(tio, struct dm_io, tio);
 588	io->magic = DM_IO_MAGIC;
 589	io->status = BLK_STS_OK;
 590
 591	/* one ref is for submission, the other is for completion */
 592	atomic_set(&io->io_count, 2);
 593	this_cpu_inc(*md->pending_io);
 594	io->orig_bio = bio;
 595	io->md = md;
 596	spin_lock_init(&io->lock);
 597	io->start_time = jiffies;
 598	io->flags = 0;
 599	if (blk_queue_io_stat(md->queue))
 600		dm_io_set_flag(io, DM_IO_BLK_STAT);
 601
 602	if (static_branch_unlikely(&stats_enabled) &&
 603	    unlikely(dm_stats_used(&md->stats)))
 604		dm_stats_record_start(&md->stats, &io->stats_aux);
 605
 606	return io;
 607}
 608
 609static void free_io(struct dm_io *io)
 610{
 611	bio_put(&io->tio.clone);
 
 612}
 613
 614static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
 615			     unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask)
 616{
 617	struct mapped_device *md = ci->io->md;
 618	struct dm_target_io *tio;
 619	struct bio *clone;
 620
 621	if (!ci->io->tio.io) {
 622		/* the dm_target_io embedded in ci->io is available */
 623		tio = &ci->io->tio;
 624		/* alloc_io() already initialized embedded clone */
 625		clone = &tio->clone;
 626	} else {
 627		clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
 628					&md->mempools->bs);
 629		if (!clone)
 630			return NULL;
 631
 632		/* REQ_DM_POLL_LIST shouldn't be inherited */
 633		clone->bi_opf &= ~REQ_DM_POLL_LIST;
 634
 635		tio = clone_to_tio(clone);
 636		tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */
 637	}
 638
 639	tio->magic = DM_TIO_MAGIC;
 640	tio->io = ci->io;
 641	tio->ti = ti;
 642	tio->target_bio_nr = target_bio_nr;
 643	tio->len_ptr = len;
 644	tio->old_sector = 0;
 645
 646	/* Set default bdev, but target must bio_set_dev() before issuing IO */
 647	clone->bi_bdev = md->disk->part0;
 648	if (unlikely(ti->needs_bio_set_dev))
 649		bio_set_dev(clone, md->disk->part0);
 650
 651	if (len) {
 652		clone->bi_iter.bi_size = to_bytes(*len);
 653		if (bio_integrity(clone))
 654			bio_integrity_trim(clone);
 655	}
 656
 657	return clone;
 
 
 
 
 658}
 659
 660static void free_tio(struct bio *clone)
 661{
 662	if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO))
 663		return;
 664	bio_put(clone);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 665}
 666
 667/*
 668 * Add the bio to the list of deferred io.
 669 */
 670static void queue_io(struct mapped_device *md, struct bio *bio)
 671{
 672	unsigned long flags;
 673
 674	spin_lock_irqsave(&md->deferred_lock, flags);
 675	bio_list_add(&md->deferred, bio);
 676	spin_unlock_irqrestore(&md->deferred_lock, flags);
 677	queue_work(md->wq, &md->work);
 678}
 679
 680/*
 681 * Everyone (including functions in this file), should use this
 682 * function to access the md->map field, and make sure they call
 683 * dm_put_live_table() when finished.
 684 */
 685struct dm_table *dm_get_live_table(struct mapped_device *md,
 686				   int *srcu_idx) __acquires(md->io_barrier)
 687{
 688	*srcu_idx = srcu_read_lock(&md->io_barrier);
 689
 690	return srcu_dereference(md->map, &md->io_barrier);
 691}
 692
 693void dm_put_live_table(struct mapped_device *md,
 694		       int srcu_idx) __releases(md->io_barrier)
 695{
 696	srcu_read_unlock(&md->io_barrier, srcu_idx);
 697}
 698
 699void dm_sync_table(struct mapped_device *md)
 700{
 701	synchronize_srcu(&md->io_barrier);
 702	synchronize_rcu_expedited();
 703}
 704
 705/*
 706 * A fast alternative to dm_get_live_table/dm_put_live_table.
 707 * The caller must not block between these two functions.
 708 */
 709static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
 710{
 711	rcu_read_lock();
 712	return rcu_dereference(md->map);
 713}
 714
 715static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
 716{
 717	rcu_read_unlock();
 718}
 719
 720static char *_dm_claim_ptr = "I belong to device-mapper";
 721
 722/*
 723 * Open a table device so we can use it as a map destination.
 724 */
 725static struct table_device *open_table_device(struct mapped_device *md,
 726		dev_t dev, blk_mode_t mode)
 727{
 728	struct table_device *td;
 729	struct bdev_handle *bdev_handle;
 730	u64 part_off;
 731	int r;
 732
 733	td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
 734	if (!td)
 735		return ERR_PTR(-ENOMEM);
 736	refcount_set(&td->count, 1);
 737
 738	bdev_handle = bdev_open_by_dev(dev, mode, _dm_claim_ptr, NULL);
 739	if (IS_ERR(bdev_handle)) {
 740		r = PTR_ERR(bdev_handle);
 741		goto out_free_td;
 742	}
 743
 744	/*
 745	 * We can be called before the dm disk is added.  In that case we can't
 746	 * register the holder relation here.  It will be done once add_disk was
 747	 * called.
 748	 */
 749	if (md->disk->slave_dir) {
 750		r = bd_link_disk_holder(bdev_handle->bdev, md->disk);
 751		if (r)
 752			goto out_blkdev_put;
 753	}
 754
 755	td->dm_dev.mode = mode;
 756	td->dm_dev.bdev = bdev_handle->bdev;
 757	td->dm_dev.bdev_handle = bdev_handle;
 758	td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev, &part_off,
 759						NULL, NULL);
 760	format_dev_t(td->dm_dev.name, dev);
 761	list_add(&td->list, &md->table_devices);
 762	return td;
 763
 764out_blkdev_put:
 765	bdev_release(bdev_handle);
 766out_free_td:
 767	kfree(td);
 768	return ERR_PTR(r);
 769}
 770
 771/*
 772 * Close a table device that we've been using.
 773 */
 774static void close_table_device(struct table_device *td, struct mapped_device *md)
 775{
 776	if (md->disk->slave_dir)
 777		bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
 778	bdev_release(td->dm_dev.bdev_handle);
 779	put_dax(td->dm_dev.dax_dev);
 780	list_del(&td->list);
 781	kfree(td);
 782}
 783
 784static struct table_device *find_table_device(struct list_head *l, dev_t dev,
 785					      blk_mode_t mode)
 786{
 787	struct table_device *td;
 788
 789	list_for_each_entry(td, l, list)
 790		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
 791			return td;
 792
 793	return NULL;
 794}
 795
 796int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
 797			struct dm_dev **result)
 798{
 799	struct table_device *td;
 800
 801	mutex_lock(&md->table_devices_lock);
 802	td = find_table_device(&md->table_devices, dev, mode);
 803	if (!td) {
 804		td = open_table_device(md, dev, mode);
 805		if (IS_ERR(td)) {
 806			mutex_unlock(&md->table_devices_lock);
 807			return PTR_ERR(td);
 808		}
 809	} else {
 810		refcount_inc(&td->count);
 811	}
 812	mutex_unlock(&md->table_devices_lock);
 813
 814	*result = &td->dm_dev;
 815	return 0;
 816}
 817
 818void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
 819{
 820	struct table_device *td = container_of(d, struct table_device, dm_dev);
 
 
 821
 822	mutex_lock(&md->table_devices_lock);
 823	if (refcount_dec_and_test(&td->count))
 824		close_table_device(td, md);
 825	mutex_unlock(&md->table_devices_lock);
 826}
 827
 828/*
 829 * Get the geometry associated with a dm device
 830 */
 831int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
 832{
 833	*geo = md->geometry;
 834
 835	return 0;
 836}
 837
 838/*
 839 * Set the geometry of a device.
 840 */
 841int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
 842{
 843	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
 844
 845	if (geo->start > sz) {
 846		DMERR("Start sector is beyond the geometry limits.");
 847		return -EINVAL;
 848	}
 849
 850	md->geometry = *geo;
 851
 852	return 0;
 853}
 854
 
 
 
 
 
 
 
 
 
 855static int __noflush_suspending(struct mapped_device *md)
 856{
 857	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
 858}
 859
 860static void dm_requeue_add_io(struct dm_io *io, bool first_stage)
 
 
 
 
 861{
 
 
 
 862	struct mapped_device *md = io->md;
 863
 864	if (first_stage) {
 865		struct dm_io *next = md->requeue_list;
 866
 867		md->requeue_list = io;
 868		io->next = next;
 869	} else {
 870		bio_list_add_head(&md->deferred, io->orig_bio);
 871	}
 872}
 873
 874static void dm_kick_requeue(struct mapped_device *md, bool first_stage)
 875{
 876	if (first_stage)
 877		queue_work(md->wq, &md->requeue_work);
 878	else
 879		queue_work(md->wq, &md->work);
 880}
 
 
 
 
 
 
 881
 882/*
 883 * Return true if the dm_io's original bio is requeued.
 884 * io->status is updated with error if requeue disallowed.
 885 */
 886static bool dm_handle_requeue(struct dm_io *io, bool first_stage)
 887{
 888	struct bio *bio = io->orig_bio;
 889	bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE);
 890	bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) &&
 891				     (bio->bi_opf & REQ_POLLED));
 892	struct mapped_device *md = io->md;
 893	bool requeued = false;
 894
 895	if (handle_requeue || handle_polled_eagain) {
 896		unsigned long flags;
 897
 898		if (bio->bi_opf & REQ_POLLED) {
 899			/*
 900			 * Upper layer won't help us poll split bio
 901			 * (io->orig_bio may only reflect a subset of the
 902			 * pre-split original) so clear REQ_POLLED.
 903			 */
 904			bio_clear_polled(bio);
 
 
 
 
 
 905		}
 
 
 906
 907		/*
 908		 * Target requested pushing back the I/O or
 909		 * polled IO hit BLK_STS_AGAIN.
 910		 */
 911		spin_lock_irqsave(&md->deferred_lock, flags);
 912		if ((__noflush_suspending(md) &&
 913		     !WARN_ON_ONCE(dm_is_zone_write(md, bio))) ||
 914		    handle_polled_eagain || first_stage) {
 915			dm_requeue_add_io(io, first_stage);
 916			requeued = true;
 917		} else {
 
 
 
 918			/*
 919			 * noflush suspend was interrupted or this is
 920			 * a write to a zoned target.
 921			 */
 922			io->status = BLK_STS_IOERR;
 
 
 
 
 
 
 923		}
 924		spin_unlock_irqrestore(&md->deferred_lock, flags);
 925	}
 926
 927	if (requeued)
 928		dm_kick_requeue(md, first_stage);
 
 
 929
 930	return requeued;
 
 
 931}
 932
 933static void __dm_io_complete(struct dm_io *io, bool first_stage)
 
 
 
 934{
 935	struct bio *bio = io->orig_bio;
 936	struct mapped_device *md = io->md;
 937	blk_status_t io_error;
 938	bool requeued;
 939
 940	requeued = dm_handle_requeue(io, first_stage);
 941	if (requeued && first_stage)
 942		return;
 943
 944	io_error = io->status;
 945	if (dm_io_flagged(io, DM_IO_ACCOUNTED))
 946		dm_end_io_acct(io);
 947	else if (!io_error) {
 948		/*
 949		 * Must handle target that DM_MAPIO_SUBMITTED only to
 950		 * then bio_endio() rather than dm_submit_bio_remap()
 
 951		 */
 952		__dm_start_io_acct(io);
 953		dm_end_io_acct(io);
 954	}
 955	free_io(io);
 956	smp_wmb();
 957	this_cpu_dec(*md->pending_io);
 958
 959	/* nudge anyone waiting on suspend queue */
 960	if (unlikely(wq_has_sleeper(&md->wait)))
 961		wake_up(&md->wait);
 962
 963	/* Return early if the original bio was requeued */
 964	if (requeued)
 965		return;
 966
 967	if (bio_is_flush_with_data(bio)) {
 968		/*
 969		 * Preflush done for flush with data, reissue
 970		 * without REQ_PREFLUSH.
 
 971		 */
 972		bio->bi_opf &= ~REQ_PREFLUSH;
 973		queue_io(md, bio);
 974	} else {
 975		/* done with normal IO or empty flush */
 976		if (io_error)
 977			bio->bi_status = io_error;
 978		bio_endio(bio);
 979	}
 980}
 981
 982static void dm_wq_requeue_work(struct work_struct *work)
 983{
 984	struct mapped_device *md = container_of(work, struct mapped_device,
 985						requeue_work);
 986	unsigned long flags;
 987	struct dm_io *io;
 988
 989	/* reuse deferred lock to simplify dm_handle_requeue */
 990	spin_lock_irqsave(&md->deferred_lock, flags);
 991	io = md->requeue_list;
 992	md->requeue_list = NULL;
 993	spin_unlock_irqrestore(&md->deferred_lock, flags);
 994
 995	while (io) {
 996		struct dm_io *next = io->next;
 997
 998		dm_io_rewind(io, &md->disk->bio_split);
 
 
 
 
 
 
 999
1000		io->next = NULL;
1001		__dm_io_complete(io, false);
1002		io = next;
1003		cond_resched();
1004	}
 
1005}
1006
1007/*
1008 * Two staged requeue:
1009 *
1010 * 1) io->orig_bio points to the real original bio, and the part mapped to
1011 *    this io must be requeued, instead of other parts of the original bio.
1012 *
1013 * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
1014 */
1015static void dm_io_complete(struct dm_io *io)
1016{
1017	bool first_requeue;
 
 
 
 
 
 
 
1018
1019	/*
1020	 * Only dm_io that has been split needs two stage requeue, otherwise
1021	 * we may run into long bio clone chain during suspend and OOM could
1022	 * be triggered.
1023	 *
1024	 * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they
1025	 * also aren't handled via the first stage requeue.
1026	 */
1027	if (dm_io_flagged(io, DM_IO_WAS_SPLIT))
1028		first_requeue = true;
1029	else
1030		first_requeue = false;
 
 
1031
1032	__dm_io_complete(io, first_requeue);
 
1033}
1034
1035/*
1036 * Decrements the number of outstanding ios that a bio has been
1037 * cloned into, completing the original io if necc.
1038 */
1039static inline void __dm_io_dec_pending(struct dm_io *io)
1040{
1041	if (atomic_dec_and_test(&io->io_count))
1042		dm_io_complete(io);
1043}
 
1044
1045static void dm_io_set_error(struct dm_io *io, blk_status_t error)
1046{
1047	unsigned long flags;
1048
1049	/* Push-back supersedes any I/O errors */
1050	spin_lock_irqsave(&io->lock, flags);
1051	if (!(io->status == BLK_STS_DM_REQUEUE &&
1052	      __noflush_suspending(io->md))) {
1053		io->status = error;
 
 
1054	}
1055	spin_unlock_irqrestore(&io->lock, flags);
 
 
 
1056}
1057
1058static void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
1059{
1060	if (unlikely(error))
1061		dm_io_set_error(io, error);
 
 
1062
1063	__dm_io_dec_pending(io);
1064}
1065
1066/*
1067 * The queue_limits are only valid as long as you have a reference
1068 * count on 'md'. But _not_ imposing verification to avoid atomic_read(),
1069 */
1070static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
1071{
1072	return &md->queue->limits;
 
 
 
 
 
 
 
 
 
 
 
 
 
1073}
 
1074
1075void disable_discard(struct mapped_device *md)
1076{
1077	struct queue_limits *limits = dm_get_queue_limits(md);
1078
1079	/* device doesn't really support DISCARD, disable it */
1080	limits->max_discard_sectors = 0;
1081}
1082
1083void disable_write_zeroes(struct mapped_device *md)
1084{
1085	struct queue_limits *limits = dm_get_queue_limits(md);
1086
1087	/* device doesn't really support WRITE ZEROES, disable it */
1088	limits->max_write_zeroes_sectors = 0;
 
1089}
1090
1091static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
1092{
1093	return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
 
1094}
1095
1096static void clone_endio(struct bio *bio)
1097{
1098	blk_status_t error = bio->bi_status;
1099	struct dm_target_io *tio = clone_to_tio(bio);
1100	struct dm_target *ti = tio->ti;
1101	dm_endio_fn endio = ti->type->end_io;
1102	struct dm_io *io = tio->io;
1103	struct mapped_device *md = io->md;
1104
1105	if (unlikely(error == BLK_STS_TARGET)) {
1106		if (bio_op(bio) == REQ_OP_DISCARD &&
1107		    !bdev_max_discard_sectors(bio->bi_bdev))
1108			disable_discard(md);
1109		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
1110			 !bdev_write_zeroes_sectors(bio->bi_bdev))
1111			disable_write_zeroes(md);
1112	}
1113
1114	if (static_branch_unlikely(&zoned_enabled) &&
1115	    unlikely(bdev_is_zoned(bio->bi_bdev)))
1116		dm_zone_endio(io, bio);
 
 
1117
1118	if (endio) {
1119		int r = endio(ti, bio, &error);
1120
1121		switch (r) {
1122		case DM_ENDIO_REQUEUE:
1123			if (static_branch_unlikely(&zoned_enabled)) {
1124				/*
1125				 * Requeuing writes to a sequential zone of a zoned
1126				 * target will break the sequential write pattern:
1127				 * fail such IO.
1128				 */
1129				if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
1130					error = BLK_STS_IOERR;
1131				else
1132					error = BLK_STS_DM_REQUEUE;
1133			} else
1134				error = BLK_STS_DM_REQUEUE;
1135			fallthrough;
1136		case DM_ENDIO_DONE:
1137			break;
1138		case DM_ENDIO_INCOMPLETE:
1139			/* The target will handle the io */
1140			return;
1141		default:
1142			DMCRIT("unimplemented target endio return value: %d", r);
1143			BUG();
1144		}
1145	}
1146
1147	if (static_branch_unlikely(&swap_bios_enabled) &&
1148	    unlikely(swap_bios_limit(ti, bio)))
1149		up(&md->swap_bios_semaphore);
1150
1151	free_tio(bio);
1152	dm_io_dec_pending(io, error);
 
 
 
 
 
 
 
1153}
1154
1155/*
1156 * Return maximum size of I/O possible at the supplied sector up to the current
1157 * target boundary.
1158 */
1159static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
1160						  sector_t target_offset)
1161{
1162	return ti->len - target_offset;
1163}
 
1164
1165static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
1166			     unsigned int max_granularity,
1167			     unsigned int max_sectors)
1168{
1169	sector_t target_offset = dm_target_offset(ti, sector);
1170	sector_t len = max_io_len_target_boundary(ti, target_offset);
1171
1172	/*
1173	 * Does the target need to split IO even further?
1174	 * - varied (per target) IO splitting is a tenet of DM; this
1175	 *   explains why stacked chunk_sectors based splitting via
1176	 *   bio_split_to_limits() isn't possible here.
1177	 */
1178	if (!max_granularity)
1179		return len;
1180	return min_t(sector_t, len,
1181		min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
1182		    blk_chunk_sectors_left(target_offset, max_granularity)));
1183}
1184
1185static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
 
 
 
 
1186{
1187	return __max_io_len(ti, sector, ti->max_io_len, 0);
 
 
 
 
 
1188}
1189
1190int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
 
 
 
 
 
 
1191{
1192	if (len > UINT_MAX) {
1193		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1194		      (unsigned long long)len, UINT_MAX);
1195		ti->error = "Maximum size of target IO is too large";
1196		return -EINVAL;
1197	}
1198
1199	ti->max_io_len = (uint32_t) len;
1200
1201	return 0;
1202}
1203EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1204
1205static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1206						sector_t sector, int *srcu_idx)
1207	__acquires(md->io_barrier)
 
1208{
1209	struct dm_table *map;
1210	struct dm_target *ti;
1211
1212	map = dm_get_live_table(md, srcu_idx);
1213	if (!map)
1214		return NULL;
1215
1216	ti = dm_table_find_target(map, sector);
1217	if (!ti)
1218		return NULL;
1219
1220	return ti;
 
 
 
 
 
 
 
 
1221}
1222
1223static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1224		long nr_pages, enum dax_access_mode mode, void **kaddr,
1225		pfn_t *pfn)
 
 
1226{
1227	struct mapped_device *md = dax_get_private(dax_dev);
1228	sector_t sector = pgoff * PAGE_SECTORS;
1229	struct dm_target *ti;
1230	long len, ret = -EIO;
1231	int srcu_idx;
1232
1233	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
 
1234
1235	if (!ti)
1236		goto out;
1237	if (!ti->type->direct_access)
1238		goto out;
1239	len = max_io_len(ti, sector) / PAGE_SECTORS;
1240	if (len < 1)
1241		goto out;
1242	nr_pages = min(len, nr_pages);
1243	ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
1244
1245 out:
1246	dm_put_live_table(md, srcu_idx);
 
 
 
 
 
 
 
 
 
1247
1248	return ret;
1249}
1250
1251static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
1252				  size_t nr_pages)
1253{
1254	struct mapped_device *md = dax_get_private(dax_dev);
1255	sector_t sector = pgoff * PAGE_SECTORS;
1256	struct dm_target *ti;
1257	int ret = -EIO;
1258	int srcu_idx;
1259
1260	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
 
1261
1262	if (!ti)
1263		goto out;
1264	if (WARN_ON(!ti->type->dax_zero_page_range)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1265		/*
1266		 * ->zero_page_range() is mandatory dax operation. If we are
1267		 *  here, something is wrong.
1268		 */
1269		goto out;
 
 
 
 
 
 
1270	}
1271	ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
1272 out:
1273	dm_put_live_table(md, srcu_idx);
 
 
 
 
 
 
 
 
 
 
 
 
1274
1275	return ret;
1276}
1277
1278static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
1279		void *addr, size_t bytes, struct iov_iter *i)
 
 
 
 
1280{
1281	struct mapped_device *md = dax_get_private(dax_dev);
1282	sector_t sector = pgoff * PAGE_SECTORS;
1283	struct dm_target *ti;
1284	int srcu_idx;
1285	long ret = 0;
1286
1287	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1288	if (!ti || !ti->type->dax_recovery_write)
1289		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1290
1291	ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i);
1292out:
1293	dm_put_live_table(md, srcu_idx);
1294	return ret;
1295}
1296
1297/*
1298 * A target may call dm_accept_partial_bio only from the map routine.  It is
1299 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
1300 * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
1301 * __send_duplicate_bios().
1302 *
1303 * dm_accept_partial_bio informs the dm that the target only wants to process
1304 * additional n_sectors sectors of the bio and the rest of the data should be
1305 * sent in a next bio.
1306 *
1307 * A diagram that explains the arithmetics:
1308 * +--------------------+---------------+-------+
1309 * |         1          |       2       |   3   |
1310 * +--------------------+---------------+-------+
1311 *
1312 * <-------------- *tio->len_ptr --------------->
1313 *                      <----- bio_sectors ----->
1314 *                      <-- n_sectors -->
1315 *
1316 * Region 1 was already iterated over with bio_advance or similar function.
1317 *	(it may be empty if the target doesn't use bio_advance)
1318 * Region 2 is the remaining bio size that the target wants to process.
1319 *	(it may be empty if region 1 is non-empty, although there is no reason
1320 *	 to make it empty)
1321 * The target requires that region 3 is to be sent in the next bio.
1322 *
1323 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1324 * the partially processed part (the sum of regions 1+2) must be the same for all
1325 * copies of the bio.
1326 */
1327void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
 
 
1328{
1329	struct dm_target_io *tio = clone_to_tio(bio);
1330	struct dm_io *io = tio->io;
1331	unsigned int bio_sectors = bio_sectors(bio);
1332
1333	BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
1334	BUG_ON(op_is_zone_mgmt(bio_op(bio)));
1335	BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
1336	BUG_ON(bio_sectors > *tio->len_ptr);
1337	BUG_ON(n_sectors > bio_sectors);
 
 
 
 
 
 
 
 
 
 
 
1338
1339	*tio->len_ptr -= bio_sectors - n_sectors;
1340	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1341
1342	/*
1343	 * __split_and_process_bio() may have already saved mapped part
1344	 * for accounting but it is being reduced so update accordingly.
1345	 */
1346	dm_io_set_flag(io, DM_IO_WAS_SPLIT);
1347	io->sectors = n_sectors;
1348	io->sector_offset = bio_sectors(io->orig_bio);
 
 
 
1349}
1350EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1351
1352/*
1353 * @clone: clone bio that DM core passed to target's .map function
1354 * @tgt_clone: clone of @clone bio that target needs submitted
1355 *
1356 * Targets should use this interface to submit bios they take
1357 * ownership of when returning DM_MAPIO_SUBMITTED.
1358 *
1359 * Target should also enable ti->accounts_remapped_io
1360 */
1361void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
1362{
1363	struct dm_target_io *tio = clone_to_tio(clone);
1364	struct dm_io *io = tio->io;
1365
1366	/* establish bio that will get submitted */
1367	if (!tgt_clone)
1368		tgt_clone = clone;
1369
1370	/*
1371	 * Account io->origin_bio to DM dev on behalf of target
1372	 * that took ownership of IO with DM_MAPIO_SUBMITTED.
 
1373	 */
1374	dm_start_io_acct(io, clone);
 
 
 
 
 
 
1375
1376	trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk),
1377			      tio->old_sector);
1378	submit_bio_noacct(tgt_clone);
1379}
1380EXPORT_SYMBOL_GPL(dm_submit_bio_remap);
1381
1382static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
 
1383{
1384	mutex_lock(&md->swap_bios_lock);
1385	while (latch < md->swap_bios) {
1386		cond_resched();
1387		down(&md->swap_bios_semaphore);
1388		md->swap_bios--;
1389	}
1390	while (latch > md->swap_bios) {
1391		cond_resched();
1392		up(&md->swap_bios_semaphore);
1393		md->swap_bios++;
1394	}
1395	mutex_unlock(&md->swap_bios_lock);
1396}
1397
1398static void __map_bio(struct bio *clone)
1399{
1400	struct dm_target_io *tio = clone_to_tio(clone);
1401	struct dm_target *ti = tio->ti;
1402	struct dm_io *io = tio->io;
1403	struct mapped_device *md = io->md;
1404	int r;
1405
1406	clone->bi_end_io = clone_endio;
 
 
1407
1408	/*
1409	 * Map the clone.
1410	 */
1411	tio->old_sector = clone->bi_iter.bi_sector;
 
 
 
 
 
 
1412
1413	if (static_branch_unlikely(&swap_bios_enabled) &&
1414	    unlikely(swap_bios_limit(ti, clone))) {
1415		int latch = get_swap_bios();
 
 
 
 
1416
1417		if (unlikely(latch != md->swap_bios))
1418			__set_swap_bios_limit(md, latch);
1419		down(&md->swap_bios_semaphore);
1420	}
 
 
 
 
 
1421
1422	if (static_branch_unlikely(&zoned_enabled)) {
1423		/*
1424		 * Check if the IO needs a special mapping due to zone append
1425		 * emulation on zoned target. In this case, dm_zone_map_bio()
1426		 * calls the target map operation.
 
1427		 */
1428		if (unlikely(dm_emulate_zone_append(md)))
1429			r = dm_zone_map_bio(tio);
1430		else
1431			goto do_map;
1432	} else {
1433do_map:
1434		if (likely(ti->type->map == linear_map))
1435			r = linear_map(ti, clone);
1436		else if (ti->type->map == stripe_map)
1437			r = stripe_map(ti, clone);
1438		else
1439			r = ti->type->map(ti, clone);
1440	}
1441
1442	switch (r) {
1443	case DM_MAPIO_SUBMITTED:
1444		/* target has assumed ownership of this io */
1445		if (!ti->accounts_remapped_io)
1446			dm_start_io_acct(io, clone);
1447		break;
1448	case DM_MAPIO_REMAPPED:
1449		dm_submit_bio_remap(clone, NULL);
1450		break;
1451	case DM_MAPIO_KILL:
1452	case DM_MAPIO_REQUEUE:
1453		if (static_branch_unlikely(&swap_bios_enabled) &&
1454		    unlikely(swap_bios_limit(ti, clone)))
1455			up(&md->swap_bios_semaphore);
1456		free_tio(clone);
1457		if (r == DM_MAPIO_KILL)
1458			dm_io_dec_pending(io, BLK_STS_IOERR);
1459		else
1460			dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
1461		break;
1462	default:
1463		DMCRIT("unimplemented target map return value: %d", r);
1464		BUG();
1465	}
1466}
1467
1468static void setup_split_accounting(struct clone_info *ci, unsigned int len)
1469{
1470	struct dm_io *io = ci->io;
 
 
 
1471
1472	if (ci->sector_count > len) {
 
 
 
 
 
 
 
 
 
1473		/*
1474		 * Split needed, save the mapped part for accounting.
1475		 * NOTE: dm_accept_partial_bio() will update accordingly.
1476		 */
1477		dm_io_set_flag(io, DM_IO_WAS_SPLIT);
1478		io->sectors = len;
1479		io->sector_offset = bio_sectors(ci->bio);
1480	}
1481}
1482
1483static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1484				struct dm_target *ti, unsigned int num_bios,
1485				unsigned *len, gfp_t gfp_flag)
1486{
1487	struct bio *bio;
1488	int try = (gfp_flag & GFP_NOWAIT) ? 0 : 1;
 
 
1489
1490	for (; try < 2; try++) {
1491		int bio_nr;
1492
1493		if (try && num_bios > 1)
1494			mutex_lock(&ci->io->md->table_devices_lock);
1495		for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1496			bio = alloc_tio(ci, ti, bio_nr, len,
1497					try ? GFP_NOIO : GFP_NOWAIT);
1498			if (!bio)
1499				break;
1500
1501			bio_list_add(blist, bio);
 
1502		}
1503		if (try && num_bios > 1)
1504			mutex_unlock(&ci->io->md->table_devices_lock);
1505		if (bio_nr == num_bios)
1506			return;
1507
1508		while ((bio = bio_list_pop(blist)))
1509			free_tio(bio);
1510	}
1511}
 
 
 
 
1512
1513static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1514					  unsigned int num_bios, unsigned int *len,
1515					  gfp_t gfp_flag)
1516{
1517	struct bio_list blist = BIO_EMPTY_LIST;
1518	struct bio *clone;
1519	unsigned int ret = 0;
 
 
 
 
 
 
1520
1521	if (WARN_ON_ONCE(num_bios == 0)) /* num_bios = 0 is a bug in caller */
1522		return 0;
1523
1524	/* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
1525	if (len)
1526		setup_split_accounting(ci, *len);
1527
1528	/*
1529	 * Using alloc_multiple_bios(), even if num_bios is 1, to consistently
1530	 * support allocating using GFP_NOWAIT with GFP_NOIO fallback.
1531	 */
1532	alloc_multiple_bios(&blist, ci, ti, num_bios, len, gfp_flag);
1533	while ((clone = bio_list_pop(&blist))) {
1534		if (num_bios > 1)
1535			dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
1536		__map_bio(clone);
1537		ret += 1;
 
 
 
1538	}
1539
1540	return ret;
1541}
1542
1543static void __send_empty_flush(struct clone_info *ci)
 
 
 
1544{
1545	struct dm_table *t = ci->map;
1546	struct bio flush_bio;
1547
1548	/*
1549	 * Use an on-stack bio for this, it's safe since we don't
1550	 * need to reference it after submit. It's just used as
1551	 * the basis for the clone(s).
1552	 */
1553	bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
1554		 REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
1555
1556	ci->bio = &flush_bio;
1557	ci->sector_count = 0;
1558	ci->io->tio.clone.bi_iter.bi_size = 0;
 
 
1559
1560	for (unsigned int i = 0; i < t->num_targets; i++) {
1561		unsigned int bios;
1562		struct dm_target *ti = dm_table_get_target(t, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1563
1564		if (unlikely(ti->num_flush_bios == 0))
1565			continue;
1566
1567		atomic_add(ti->num_flush_bios, &ci->io->io_count);
1568		bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios,
1569					     NULL, GFP_NOWAIT);
1570		atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
1571	}
1572
1573	/*
1574	 * alloc_io() takes one extra reference for submission, so the
1575	 * reference won't reach 0 without the following subtraction
1576	 */
1577	atomic_sub(1, &ci->io->io_count);
 
 
 
 
1578
1579	bio_uninit(ci->bio);
1580}
 
 
 
 
 
 
 
 
 
 
 
 
 
1581
1582static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti,
1583			       unsigned int num_bios, unsigned int max_granularity,
1584			       unsigned int max_sectors)
1585{
1586	unsigned int len, bios;
1587
1588	len = min_t(sector_t, ci->sector_count,
1589		    __max_io_len(ti, ci->sector, max_granularity, max_sectors));
1590
1591	atomic_add(num_bios, &ci->io->io_count);
1592	bios = __send_duplicate_bios(ci, ti, num_bios, &len, GFP_NOIO);
1593	/*
1594	 * alloc_io() takes one extra reference for submission, so the
1595	 * reference won't reach 0 without the following (+1) subtraction
1596	 */
1597	atomic_sub(num_bios - bios + 1, &ci->io->io_count);
 
1598
1599	ci->sector += len;
1600	ci->sector_count -= len;
1601}
1602
1603static bool is_abnormal_io(struct bio *bio)
 
 
 
 
1604{
1605	enum req_op op = bio_op(bio);
 
 
1606
1607	if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) {
1608		switch (op) {
1609		case REQ_OP_DISCARD:
1610		case REQ_OP_SECURE_ERASE:
1611		case REQ_OP_WRITE_ZEROES:
1612			return true;
1613		default:
1614			break;
1615		}
 
 
 
 
 
 
 
1616	}
1617
1618	return false;
 
 
1619}
1620
1621static blk_status_t __process_abnormal_io(struct clone_info *ci,
1622					  struct dm_target *ti)
1623{
1624	unsigned int num_bios = 0;
1625	unsigned int max_granularity = 0;
1626	unsigned int max_sectors = 0;
1627	struct queue_limits *limits = dm_get_queue_limits(ti->table->md);
1628
1629	switch (bio_op(ci->bio)) {
1630	case REQ_OP_DISCARD:
1631		num_bios = ti->num_discard_bios;
1632		max_sectors = limits->max_discard_sectors;
1633		if (ti->max_discard_granularity)
1634			max_granularity = max_sectors;
1635		break;
1636	case REQ_OP_SECURE_ERASE:
1637		num_bios = ti->num_secure_erase_bios;
1638		max_sectors = limits->max_secure_erase_sectors;
1639		if (ti->max_secure_erase_granularity)
1640			max_granularity = max_sectors;
1641		break;
1642	case REQ_OP_WRITE_ZEROES:
1643		num_bios = ti->num_write_zeroes_bios;
1644		max_sectors = limits->max_write_zeroes_sectors;
1645		if (ti->max_write_zeroes_granularity)
1646			max_granularity = max_sectors;
1647		break;
1648	default:
1649		break;
1650	}
1651
1652	/*
1653	 * Even though the device advertised support for this type of
1654	 * request, that does not mean every target supports it, and
1655	 * reconfiguration might also have changed that since the
1656	 * check was performed.
1657	 */
1658	if (unlikely(!num_bios))
1659		return BLK_STS_NOTSUPP;
1660
1661	__send_abnormal_io(ci, ti, num_bios, max_granularity, max_sectors);
 
 
1662
1663	return BLK_STS_OK;
 
 
 
1664}
1665
1666/*
1667 * Reuse ->bi_private as dm_io list head for storing all dm_io instances
1668 * associated with this bio, and this bio's bi_private needs to be
1669 * stored in dm_io->data before the reuse.
1670 *
1671 * bio->bi_private is owned by fs or upper layer, so block layer won't
1672 * touch it after splitting. Meantime it won't be changed by anyone after
1673 * bio is submitted. So this reuse is safe.
1674 */
1675static inline struct dm_io **dm_poll_list_head(struct bio *bio)
1676{
1677	return (struct dm_io **)&bio->bi_private;
 
 
 
 
 
 
 
 
1678}
 
1679
1680static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
1681{
1682	struct dm_io **head = dm_poll_list_head(bio);
 
1683
1684	if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
1685		bio->bi_opf |= REQ_DM_POLL_LIST;
1686		/*
1687		 * Save .bi_private into dm_io, so that we can reuse
1688		 * .bi_private as dm_io list head for storing dm_io list
1689		 */
1690		io->data = bio->bi_private;
1691
1692		/* tell block layer to poll for completion */
1693		bio->bi_cookie = ~BLK_QC_T_NONE;
 
 
 
 
1694
1695		io->next = NULL;
1696	} else {
1697		/*
1698		 * bio recursed due to split, reuse original poll list,
1699		 * and save bio->bi_private too.
1700		 */
1701		io->data = (*head)->data;
1702		io->next = *head;
1703	}
1704
1705	*head = io;
1706}
1707
1708/*
1709 * Select the correct strategy for processing a non-flush bio.
1710 */
1711static blk_status_t __split_and_process_bio(struct clone_info *ci)
1712{
1713	struct bio *clone;
1714	struct dm_target *ti;
1715	unsigned int len;
1716
1717	ti = dm_table_find_target(ci->map, ci->sector);
1718	if (unlikely(!ti))
1719		return BLK_STS_IOERR;
 
1720
1721	if (unlikely(ci->is_abnormal_io))
1722		return __process_abnormal_io(ci, ti);
 
 
 
 
1723
1724	/*
1725	 * Only support bio polling for normal IO, and the target io is
1726	 * exactly inside the dm_io instance (verified in dm_poll_dm_io)
1727	 */
1728	ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
1729
1730	len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
1731	setup_split_accounting(ci, len);
 
 
 
1732
1733	if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) {
1734		if (unlikely(!dm_target_supports_nowait(ti->type)))
1735			return BLK_STS_NOTSUPP;
1736
1737		clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT);
1738		if (unlikely(!clone))
1739			return BLK_STS_AGAIN;
1740	} else {
1741		clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
 
 
 
 
 
 
1742	}
1743	__map_bio(clone);
1744
1745	ci->sector += len;
1746	ci->sector_count -= len;
1747
1748	return BLK_STS_OK;
1749}
1750
1751static void init_clone_info(struct clone_info *ci, struct dm_io *io,
1752			    struct dm_table *map, struct bio *bio, bool is_abnormal)
 
 
1753{
1754	ci->map = map;
1755	ci->io = io;
1756	ci->bio = bio;
1757	ci->is_abnormal_io = is_abnormal;
1758	ci->submit_as_polled = false;
1759	ci->sector = bio->bi_iter.bi_sector;
1760	ci->sector_count = bio_sectors(bio);
1761
1762	/* Shouldn't happen but sector_count was being set to 0 so... */
1763	if (static_branch_unlikely(&zoned_enabled) &&
1764	    WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
1765		ci->sector_count = 0;
 
 
 
 
 
 
 
 
 
1766}
1767
1768/*
1769 * Entry point to split a bio into clones and submit them to the targets.
 
 
1770 */
1771static void dm_split_and_process_bio(struct mapped_device *md,
1772				     struct dm_table *map, struct bio *bio)
1773{
1774	struct clone_info ci;
1775	struct dm_io *io;
1776	blk_status_t error = BLK_STS_OK;
1777	bool is_abnormal;
1778
1779	is_abnormal = is_abnormal_io(bio);
1780	if (unlikely(is_abnormal)) {
1781		/*
1782		 * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc)
1783		 * otherwise associated queue_limits won't be imposed.
1784		 */
1785		bio = bio_split_to_limits(bio);
1786		if (!bio)
1787			return;
1788	}
1789
1790	/* Only support nowait for normal IO */
1791	if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) {
1792		io = alloc_io(md, bio, GFP_NOWAIT);
1793		if (unlikely(!io)) {
1794			/* Unable to do anything without dm_io. */
1795			bio_wouldblock_error(bio);
1796			return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1797		}
1798	} else {
1799		io = alloc_io(md, bio, GFP_NOIO);
1800	}
1801	init_clone_info(&ci, io, map, bio, is_abnormal);
1802
1803	if (bio->bi_opf & REQ_PREFLUSH) {
1804		__send_empty_flush(&ci);
1805		/* dm_io_complete submits any data associated with flush */
1806		goto out;
1807	}
1808
1809	error = __split_and_process_bio(&ci);
1810	if (error || !ci.sector_count)
1811		goto out;
1812	/*
1813	 * Remainder must be passed to submit_bio_noacct() so it gets handled
1814	 * *after* bios already submitted have been completely processed.
1815	 */
1816	bio_trim(bio, io->sectors, ci.sector_count);
1817	trace_block_split(bio, bio->bi_iter.bi_sector);
1818	bio_inc_remaining(bio);
1819	submit_bio_noacct(bio);
1820out:
1821	/*
1822	 * Drop the extra reference count for non-POLLED bio, and hold one
1823	 * reference for POLLED bio, which will be released in dm_poll_bio
1824	 *
1825	 * Add every dm_io instance into the dm_io list head which is stored
1826	 * in bio->bi_private, so that dm_poll_bio can poll them all.
1827	 */
1828	if (error || !ci.submit_as_polled) {
1829		/*
1830		 * In case of submission failure, the extra reference for
1831		 * submitting io isn't consumed yet
1832		 */
1833		if (error)
1834			atomic_dec(&io->io_count);
1835		dm_io_dec_pending(io, error);
1836	} else
1837		dm_queue_poll_io(bio, io);
1838}
1839
1840static void dm_submit_bio(struct bio *bio)
 
 
 
 
1841{
1842	struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
1843	int srcu_idx;
1844	struct dm_table *map;
 
 
1845
1846	map = dm_get_live_table(md, &srcu_idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1847
1848	/* If suspended, or map not yet available, queue this IO for later */
1849	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
1850	    unlikely(!map)) {
1851		if (bio->bi_opf & REQ_NOWAIT)
1852			bio_wouldblock_error(bio);
1853		else if (bio->bi_opf & REQ_RAHEAD)
1854			bio_io_error(bio);
1855		else
1856			queue_io(md, bio);
1857		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
1858	}
1859
1860	dm_split_and_process_bio(md, map, bio);
 
 
 
 
 
 
 
1861out:
1862	dm_put_live_table(md, srcu_idx);
1863}
1864
1865static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
1866			  unsigned int flags)
1867{
1868	WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
1869
1870	/* don't poll if the mapped io is done */
1871	if (atomic_read(&io->io_count) > 1)
1872		bio_poll(&io->tio.clone, iob, flags);
1873
1874	/* bio_poll holds the last reference */
1875	return atomic_read(&io->io_count) == 1;
1876}
 
1877
1878static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
1879		       unsigned int flags)
1880{
1881	struct dm_io **head = dm_poll_list_head(bio);
1882	struct dm_io *list = *head;
1883	struct dm_io *tmp = NULL;
1884	struct dm_io *curr, *next;
1885
1886	/* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
1887	if (!(bio->bi_opf & REQ_DM_POLL_LIST))
1888		return 0;
 
1889
1890	WARN_ON_ONCE(!list);
1891
1892	/*
1893	 * Restore .bi_private before possibly completing dm_io.
1894	 *
1895	 * bio_poll() is only possible once @bio has been completely
1896	 * submitted via submit_bio_noacct()'s depth-first submission.
1897	 * So there is no dm_queue_poll_io() race associated with
1898	 * clearing REQ_DM_POLL_LIST here.
1899	 */
1900	bio->bi_opf &= ~REQ_DM_POLL_LIST;
1901	bio->bi_private = list->data;
1902
1903	for (curr = list, next = curr->next; curr; curr = next, next =
1904			curr ? curr->next : NULL) {
1905		if (dm_poll_dm_io(curr, iob, flags)) {
1906			/*
1907			 * clone_endio() has already occurred, so no
1908			 * error handling is needed here.
1909			 */
1910			__dm_io_dec_pending(curr);
1911		} else {
1912			curr->next = tmp;
1913			tmp = curr;
 
 
 
1914		}
1915	}
1916
1917	/* Not done? */
1918	if (tmp) {
1919		bio->bi_opf |= REQ_DM_POLL_LIST;
1920		/* Reset bio->bi_private to dm_io list head */
1921		*head = tmp;
1922		return 0;
1923	}
1924	return 1;
1925}
1926
1927/*
1928 *---------------------------------------------------------------
1929 * An IDR is used to keep track of allocated minor numbers.
1930 *---------------------------------------------------------------
1931 */
1932static void free_minor(int minor)
1933{
1934	spin_lock(&_minor_lock);
1935	idr_remove(&_minor_idr, minor);
1936	spin_unlock(&_minor_lock);
1937}
1938
1939/*
1940 * See if the device with a specific minor # is free.
1941 */
1942static int specific_minor(int minor)
1943{
1944	int r;
1945
1946	if (minor >= (1 << MINORBITS))
1947		return -EINVAL;
1948
1949	idr_preload(GFP_KERNEL);
 
 
 
1950	spin_lock(&_minor_lock);
1951
1952	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
 
 
 
1953
 
 
 
 
 
 
 
 
 
 
 
1954	spin_unlock(&_minor_lock);
1955	idr_preload_end();
1956	if (r < 0)
1957		return r == -ENOSPC ? -EBUSY : r;
1958	return 0;
1959}
1960
1961static int next_free_minor(int *minor)
1962{
1963	int r;
 
 
 
 
1964
1965	idr_preload(GFP_KERNEL);
1966	spin_lock(&_minor_lock);
1967
1968	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
 
 
 
 
 
 
 
 
 
 
1969
 
1970	spin_unlock(&_minor_lock);
1971	idr_preload_end();
1972	if (r < 0)
1973		return r;
1974	*minor = r;
1975	return 0;
1976}
1977
1978static const struct block_device_operations dm_blk_dops;
1979static const struct block_device_operations dm_rq_blk_dops;
1980static const struct dax_operations dm_dax_ops;
1981
1982static void dm_wq_work(struct work_struct *work);
1983
1984#ifdef CONFIG_BLK_INLINE_ENCRYPTION
1985static void dm_queue_destroy_crypto_profile(struct request_queue *q)
1986{
1987	dm_destroy_crypto_profile(q->crypto_profile);
1988}
1989
1990#else /* CONFIG_BLK_INLINE_ENCRYPTION */
1991
1992static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
1993{
1994}
1995#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1996
1997static void cleanup_mapped_device(struct mapped_device *md)
1998{
1999	if (md->wq)
2000		destroy_workqueue(md->wq);
2001	dm_free_md_mempools(md->mempools);
2002
2003	if (md->dax_dev) {
2004		dax_remove_host(md->disk);
2005		kill_dax(md->dax_dev);
2006		put_dax(md->dax_dev);
2007		md->dax_dev = NULL;
2008	}
2009
2010	dm_cleanup_zoned_dev(md);
2011	if (md->disk) {
2012		spin_lock(&_minor_lock);
2013		md->disk->private_data = NULL;
2014		spin_unlock(&_minor_lock);
2015		if (dm_get_md_type(md) != DM_TYPE_NONE) {
2016			struct table_device *td;
2017
2018			dm_sysfs_exit(md);
2019			list_for_each_entry(td, &md->table_devices, list) {
2020				bd_unlink_disk_holder(td->dm_dev.bdev,
2021						      md->disk);
2022			}
2023
2024			/*
2025			 * Hold lock to make sure del_gendisk() won't concurrent
2026			 * with open/close_table_device().
2027			 */
2028			mutex_lock(&md->table_devices_lock);
2029			del_gendisk(md->disk);
2030			mutex_unlock(&md->table_devices_lock);
2031		}
2032		dm_queue_destroy_crypto_profile(md->queue);
2033		put_disk(md->disk);
2034	}
2035
2036	if (md->pending_io) {
2037		free_percpu(md->pending_io);
2038		md->pending_io = NULL;
2039	}
2040
2041	cleanup_srcu_struct(&md->io_barrier);
2042
2043	mutex_destroy(&md->suspend_lock);
2044	mutex_destroy(&md->type_lock);
2045	mutex_destroy(&md->table_devices_lock);
2046	mutex_destroy(&md->swap_bios_lock);
2047
2048	dm_mq_cleanup_mapped_device(md);
 
 
 
 
 
2049}
2050
2051/*
2052 * Allocate and initialise a blank device with a given minor.
2053 */
2054static struct mapped_device *alloc_dev(int minor)
2055{
2056	int r, numa_node_id = dm_get_numa_node();
2057	struct mapped_device *md;
2058	void *old_md;
2059
2060	md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
2061	if (!md) {
2062		DMERR("unable to allocate device, out of memory.");
2063		return NULL;
2064	}
2065
2066	if (!try_module_get(THIS_MODULE))
2067		goto bad_module_get;
2068
2069	/* get a minor number for the dev */
2070	if (minor == DM_ANY_MINOR)
2071		r = next_free_minor(&minor);
2072	else
2073		r = specific_minor(minor);
2074	if (r < 0)
2075		goto bad_minor;
2076
2077	r = init_srcu_struct(&md->io_barrier);
2078	if (r < 0)
2079		goto bad_io_barrier;
2080
2081	md->numa_node_id = numa_node_id;
2082	md->init_tio_pdu = false;
2083	md->type = DM_TYPE_NONE;
 
2084	mutex_init(&md->suspend_lock);
2085	mutex_init(&md->type_lock);
2086	mutex_init(&md->table_devices_lock);
2087	spin_lock_init(&md->deferred_lock);
 
2088	atomic_set(&md->holders, 1);
2089	atomic_set(&md->open_count, 0);
2090	atomic_set(&md->event_nr, 0);
2091	atomic_set(&md->uevent_seq, 0);
2092	INIT_LIST_HEAD(&md->uevent_list);
2093	INIT_LIST_HEAD(&md->table_devices);
2094	spin_lock_init(&md->uevent_lock);
2095
2096	/*
2097	 * default to bio-based until DM table is loaded and md->type
2098	 * established. If request-based table is loaded: blk-mq will
2099	 * override accordingly.
2100	 */
2101	md->disk = blk_alloc_disk(md->numa_node_id);
 
2102	if (!md->disk)
2103		goto bad;
2104	md->queue = md->disk->queue;
2105
 
 
2106	init_waitqueue_head(&md->wait);
2107	INIT_WORK(&md->work, dm_wq_work);
2108	INIT_WORK(&md->requeue_work, dm_wq_requeue_work);
2109	init_waitqueue_head(&md->eventq);
2110	init_completion(&md->kobj_holder.completion);
2111
2112	md->requeue_list = NULL;
2113	md->swap_bios = get_swap_bios();
2114	sema_init(&md->swap_bios_semaphore, md->swap_bios);
2115	mutex_init(&md->swap_bios_lock);
2116
2117	md->disk->major = _major;
2118	md->disk->first_minor = minor;
2119	md->disk->minors = 1;
2120	md->disk->flags |= GENHD_FL_NO_PART;
2121	md->disk->fops = &dm_blk_dops;
 
2122	md->disk->private_data = md;
2123	sprintf(md->disk->disk_name, "dm-%d", minor);
2124
2125	if (IS_ENABLED(CONFIG_FS_DAX)) {
2126		md->dax_dev = alloc_dax(md, &dm_dax_ops);
2127		if (IS_ERR(md->dax_dev)) {
2128			md->dax_dev = NULL;
2129			goto bad;
2130		}
2131		set_dax_nocache(md->dax_dev);
2132		set_dax_nomc(md->dax_dev);
2133		if (dax_add_host(md->dax_dev, md->disk))
2134			goto bad;
2135	}
2136
2137	format_dev_t(md->name, MKDEV(_major, minor));
2138
2139	md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name);
 
2140	if (!md->wq)
2141		goto bad;
2142
2143	md->pending_io = alloc_percpu(unsigned long);
2144	if (!md->pending_io)
2145		goto bad;
2146
2147	r = dm_stats_init(&md->stats);
2148	if (r < 0)
2149		goto bad;
2150
2151	/* Populate the mapping, nobody knows we exist yet */
2152	spin_lock(&_minor_lock);
2153	old_md = idr_replace(&_minor_idr, md, minor);
2154	spin_unlock(&_minor_lock);
2155
2156	BUG_ON(old_md != MINOR_ALLOCED);
2157
2158	return md;
2159
2160bad:
2161	cleanup_mapped_device(md);
2162bad_io_barrier:
 
 
 
 
 
2163	free_minor(minor);
2164bad_minor:
2165	module_put(THIS_MODULE);
2166bad_module_get:
2167	kvfree(md);
2168	return NULL;
2169}
2170
2171static void unlock_fs(struct mapped_device *md);
2172
2173static void free_dev(struct mapped_device *md)
2174{
2175	int minor = MINOR(disk_devt(md->disk));
2176
2177	unlock_fs(md);
2178
2179	cleanup_mapped_device(md);
2180
2181	WARN_ON_ONCE(!list_empty(&md->table_devices));
2182	dm_stats_cleanup(&md->stats);
 
 
 
 
 
2183	free_minor(minor);
2184
 
 
 
 
 
 
2185	module_put(THIS_MODULE);
2186	kvfree(md);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2187}
2188
2189/*
2190 * Bind a table to the device.
2191 */
2192static void event_callback(void *context)
2193{
2194	unsigned long flags;
2195	LIST_HEAD(uevents);
2196	struct mapped_device *md = context;
2197
2198	spin_lock_irqsave(&md->uevent_lock, flags);
2199	list_splice_init(&md->uevent_list, &uevents);
2200	spin_unlock_irqrestore(&md->uevent_lock, flags);
2201
2202	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2203
2204	atomic_inc(&md->event_nr);
2205	wake_up(&md->eventq);
2206	dm_issue_global_event();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2207}
2208
2209/*
2210 * Returns old map, which caller must destroy.
2211 */
2212static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2213			       struct queue_limits *limits)
2214{
2215	struct dm_table *old_map;
 
2216	sector_t size;
2217	int ret;
2218
2219	lockdep_assert_held(&md->suspend_lock);
2220
2221	size = dm_table_get_size(t);
2222
2223	/*
2224	 * Wipe any geometry if the size of the table changed.
2225	 */
2226	if (size != dm_get_size(md))
2227		memset(&md->geometry, 0, sizeof(md->geometry));
2228
2229	set_capacity(md->disk, size);
2230
2231	dm_table_event_callback(t, event_callback, md);
2232
2233	if (dm_table_request_based(t)) {
2234		/*
2235		 * Leverage the fact that request-based DM targets are
2236		 * immutable singletons - used to optimize dm_mq_queue_rq.
2237		 */
2238		md->immutable_target = dm_table_get_immutable_target(t);
2239
2240		/*
2241		 * There is no need to reload with request-based dm because the
2242		 * size of front_pad doesn't change.
2243		 *
2244		 * Note for future: If you are to reload bioset, prep-ed
2245		 * requests in the queue may refer to bio from the old bioset,
2246		 * so you must walk through the queue to unprep.
2247		 */
2248		if (!md->mempools) {
2249			md->mempools = t->mempools;
2250			t->mempools = NULL;
2251		}
2252	} else {
2253		/*
2254		 * The md may already have mempools that need changing.
2255		 * If so, reload bioset because front_pad may have changed
2256		 * because a different table was loaded.
2257		 */
2258		dm_free_md_mempools(md->mempools);
2259		md->mempools = t->mempools;
2260		t->mempools = NULL;
2261	}
2262
2263	ret = dm_table_set_restrictions(t, md->queue, limits);
2264	if (ret) {
2265		old_map = ERR_PTR(ret);
2266		goto out;
2267	}
2268
2269	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2270	rcu_assign_pointer(md->map, (void *)t);
2271	md->immutable_target_type = dm_table_get_immutable_target_type(t);
2272
2273	if (old_map)
2274		dm_sync_table(md);
2275out:
 
 
 
 
2276	return old_map;
2277}
2278
2279/*
2280 * Returns unbound table for the caller to free.
2281 */
2282static struct dm_table *__unbind(struct mapped_device *md)
2283{
2284	struct dm_table *map = rcu_dereference_protected(md->map, 1);
 
2285
2286	if (!map)
2287		return NULL;
2288
2289	dm_table_event_callback(map, NULL, NULL);
2290	RCU_INIT_POINTER(md->map, NULL);
2291	dm_sync_table(md);
 
2292
2293	return map;
2294}
2295
2296/*
2297 * Constructor for a new device.
2298 */
2299int dm_create(int minor, struct mapped_device **result)
2300{
2301	struct mapped_device *md;
2302
2303	md = alloc_dev(minor);
2304	if (!md)
2305		return -ENXIO;
2306
2307	dm_ima_reset_data(md);
2308
2309	*result = md;
2310	return 0;
2311}
2312
2313/*
2314 * Functions to manage md->type.
2315 * All are required to hold md->type_lock.
2316 */
2317void dm_lock_md_type(struct mapped_device *md)
2318{
2319	mutex_lock(&md->type_lock);
2320}
2321
2322void dm_unlock_md_type(struct mapped_device *md)
2323{
2324	mutex_unlock(&md->type_lock);
2325}
2326
2327void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
2328{
2329	BUG_ON(!mutex_is_locked(&md->type_lock));
2330	md->type = type;
2331}
2332
2333enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2334{
2335	return md->type;
2336}
2337
2338struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2339{
2340	return md->immutable_target_type;
2341}
2342
2343/*
2344 * Setup the DM device's queue based on md's type
2345 */
2346int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2347{
2348	enum dm_queue_mode type = dm_table_get_type(t);
2349	struct queue_limits limits;
2350	struct table_device *td;
2351	int r;
2352
2353	switch (type) {
2354	case DM_TYPE_REQUEST_BASED:
2355		md->disk->fops = &dm_rq_blk_dops;
2356		r = dm_mq_init_request_queue(md, t);
2357		if (r) {
2358			DMERR("Cannot initialize queue for request-based dm mapped device");
2359			return r;
2360		}
2361		break;
2362	case DM_TYPE_BIO_BASED:
2363	case DM_TYPE_DAX_BIO_BASED:
2364		blk_queue_flag_set(QUEUE_FLAG_IO_STAT, md->queue);
2365		break;
2366	case DM_TYPE_NONE:
2367		WARN_ON_ONCE(true);
2368		break;
2369	}
2370
2371	r = dm_calculate_queue_limits(t, &limits);
2372	if (r) {
2373		DMERR("Cannot calculate initial queue limits");
2374		return r;
2375	}
2376	r = dm_table_set_restrictions(t, md->queue, &limits);
2377	if (r)
2378		return r;
2379
2380	/*
2381	 * Hold lock to make sure add_disk() and del_gendisk() won't concurrent
2382	 * with open_table_device() and close_table_device().
2383	 */
2384	mutex_lock(&md->table_devices_lock);
2385	r = add_disk(md->disk);
2386	mutex_unlock(&md->table_devices_lock);
2387	if (r)
2388		return r;
2389
2390	/*
2391	 * Register the holder relationship for devices added before the disk
2392	 * was live.
2393	 */
2394	list_for_each_entry(td, &md->table_devices, list) {
2395		r = bd_link_disk_holder(td->dm_dev.bdev, md->disk);
2396		if (r)
2397			goto out_undo_holders;
2398	}
2399
2400	r = dm_sysfs_init(md);
2401	if (r)
2402		goto out_undo_holders;
2403
2404	md->type = type;
2405	return 0;
 
 
 
 
 
 
 
 
2406
2407out_undo_holders:
2408	list_for_each_entry_continue_reverse(td, &md->table_devices, list)
2409		bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
2410	mutex_lock(&md->table_devices_lock);
2411	del_gendisk(md->disk);
2412	mutex_unlock(&md->table_devices_lock);
2413	return r;
2414}
2415
2416struct mapped_device *dm_get_md(dev_t dev)
2417{
2418	struct mapped_device *md;
2419	unsigned int minor = MINOR(dev);
2420
2421	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2422		return NULL;
2423
2424	spin_lock(&_minor_lock);
2425
2426	md = idr_find(&_minor_idr, minor);
2427	if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2428	    test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
 
 
2429		md = NULL;
2430		goto out;
2431	}
2432	dm_get(md);
2433out:
2434	spin_unlock(&_minor_lock);
2435
2436	return md;
2437}
 
 
 
 
 
 
 
 
 
 
2438EXPORT_SYMBOL_GPL(dm_get_md);
2439
2440void *dm_get_mdptr(struct mapped_device *md)
2441{
2442	return md->interface_ptr;
2443}
2444
2445void dm_set_mdptr(struct mapped_device *md, void *ptr)
2446{
2447	md->interface_ptr = ptr;
2448}
2449
2450void dm_get(struct mapped_device *md)
2451{
2452	atomic_inc(&md->holders);
2453	BUG_ON(test_bit(DMF_FREEING, &md->flags));
2454}
2455
2456int dm_hold(struct mapped_device *md)
2457{
2458	spin_lock(&_minor_lock);
2459	if (test_bit(DMF_FREEING, &md->flags)) {
2460		spin_unlock(&_minor_lock);
2461		return -EBUSY;
2462	}
2463	dm_get(md);
2464	spin_unlock(&_minor_lock);
2465	return 0;
2466}
2467EXPORT_SYMBOL_GPL(dm_hold);
2468
2469const char *dm_device_name(struct mapped_device *md)
2470{
2471	return md->name;
2472}
2473EXPORT_SYMBOL_GPL(dm_device_name);
2474
2475static void __dm_destroy(struct mapped_device *md, bool wait)
2476{
2477	struct dm_table *map;
2478	int srcu_idx;
2479
2480	might_sleep();
2481
2482	spin_lock(&_minor_lock);
 
2483	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2484	set_bit(DMF_FREEING, &md->flags);
2485	spin_unlock(&_minor_lock);
2486
2487	blk_mark_disk_dead(md->disk);
2488
2489	/*
2490	 * Take suspend_lock so that presuspend and postsuspend methods
2491	 * do not race with internal suspend.
2492	 */
2493	mutex_lock(&md->suspend_lock);
2494	map = dm_get_live_table(md, &srcu_idx);
2495	if (!dm_suspended_md(md)) {
2496		dm_table_presuspend_targets(map);
2497		set_bit(DMF_SUSPENDED, &md->flags);
2498		set_bit(DMF_POST_SUSPENDING, &md->flags);
2499		dm_table_postsuspend_targets(map);
2500	}
2501	/* dm_put_live_table must be before fsleep, otherwise deadlock is possible */
2502	dm_put_live_table(md, srcu_idx);
2503	mutex_unlock(&md->suspend_lock);
2504
2505	/*
2506	 * Rare, but there may be I/O requests still going to complete,
2507	 * for example.  Wait for all references to disappear.
2508	 * No one should increment the reference count of the mapped_device,
2509	 * after the mapped_device state becomes DMF_FREEING.
2510	 */
2511	if (wait)
2512		while (atomic_read(&md->holders))
2513			fsleep(1000);
2514	else if (atomic_read(&md->holders))
2515		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2516		       dm_device_name(md), atomic_read(&md->holders));
2517
 
 
2518	dm_table_destroy(__unbind(md));
2519	free_dev(md);
2520}
2521
2522void dm_destroy(struct mapped_device *md)
2523{
2524	__dm_destroy(md, true);
2525}
2526
2527void dm_destroy_immediate(struct mapped_device *md)
2528{
2529	__dm_destroy(md, false);
2530}
2531
2532void dm_put(struct mapped_device *md)
2533{
2534	atomic_dec(&md->holders);
2535}
2536EXPORT_SYMBOL_GPL(dm_put);
2537
2538static bool dm_in_flight_bios(struct mapped_device *md)
2539{
2540	int cpu;
2541	unsigned long sum = 0;
2542
2543	for_each_possible_cpu(cpu)
2544		sum += *per_cpu_ptr(md->pending_io, cpu);
2545
2546	return sum != 0;
2547}
2548
2549static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state)
2550{
2551	int r = 0;
2552	DEFINE_WAIT(wait);
 
 
2553
2554	while (true) {
2555		prepare_to_wait(&md->wait, &wait, task_state);
2556
2557		if (!dm_in_flight_bios(md))
2558			break;
2559
2560		if (signal_pending_state(task_state, current)) {
 
2561			r = -EINTR;
2562			break;
2563		}
2564
2565		io_schedule();
2566	}
2567	finish_wait(&md->wait, &wait);
2568
2569	smp_rmb();
2570
2571	return r;
2572}
2573
2574static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state)
2575{
2576	int r = 0;
2577
2578	if (!queue_is_mq(md->queue))
2579		return dm_wait_for_bios_completion(md, task_state);
2580
2581	while (true) {
2582		if (!blk_mq_queue_inflight(md->queue))
2583			break;
2584
2585		if (signal_pending_state(task_state, current)) {
2586			r = -EINTR;
2587			break;
2588		}
2589
2590		fsleep(5000);
2591	}
2592
2593	return r;
2594}
2595
2596/*
2597 * Process the deferred bios
2598 */
2599static void dm_wq_work(struct work_struct *work)
2600{
2601	struct mapped_device *md = container_of(work, struct mapped_device, work);
2602	struct bio *bio;
 
 
 
2603
2604	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2605		spin_lock_irq(&md->deferred_lock);
2606		bio = bio_list_pop(&md->deferred);
2607		spin_unlock_irq(&md->deferred_lock);
2608
2609		if (!bio)
2610			break;
2611
2612		submit_bio_noacct(bio);
2613		cond_resched();
 
 
 
 
 
 
2614	}
 
 
2615}
2616
2617static void dm_queue_flush(struct mapped_device *md)
2618{
2619	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2620	smp_mb__after_atomic();
2621	queue_work(md->wq, &md->work);
2622}
2623
2624/*
2625 * Swap in a new table, returning the old one for the caller to destroy.
2626 */
2627struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2628{
2629	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2630	struct queue_limits limits;
2631	int r;
2632
2633	mutex_lock(&md->suspend_lock);
2634
2635	/* device must be suspended */
2636	if (!dm_suspended_md(md))
2637		goto out;
2638
2639	/*
2640	 * If the new table has no data devices, retain the existing limits.
2641	 * This helps multipath with queue_if_no_path if all paths disappear,
2642	 * then new I/O is queued based on these limits, and then some paths
2643	 * reappear.
2644	 */
2645	if (dm_table_has_no_data_devices(table)) {
2646		live_map = dm_get_live_table_fast(md);
2647		if (live_map)
2648			limits = md->queue->limits;
2649		dm_put_live_table_fast(md);
2650	}
2651
2652	if (!live_map) {
2653		r = dm_calculate_queue_limits(table, &limits);
2654		if (r) {
2655			map = ERR_PTR(r);
2656			goto out;
2657		}
2658	}
2659
2660	map = __bind(md, table, &limits);
2661	dm_issue_global_event();
2662
2663out:
2664	mutex_unlock(&md->suspend_lock);
2665	return map;
2666}
2667
2668/*
2669 * Functions to lock and unlock any filesystem running on the
2670 * device.
2671 */
2672static int lock_fs(struct mapped_device *md)
2673{
2674	int r;
2675
2676	WARN_ON(test_bit(DMF_FROZEN, &md->flags));
2677
2678	r = bdev_freeze(md->disk->part0);
2679	if (!r)
2680		set_bit(DMF_FROZEN, &md->flags);
2681	return r;
 
 
 
 
 
 
2682}
2683
2684static void unlock_fs(struct mapped_device *md)
2685{
2686	if (!test_bit(DMF_FROZEN, &md->flags))
2687		return;
2688	bdev_thaw(md->disk->part0);
 
 
2689	clear_bit(DMF_FROZEN, &md->flags);
2690}
2691
2692/*
2693 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2694 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2695 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
 
 
 
 
 
 
 
 
 
2696 *
2697 * If __dm_suspend returns 0, the device is completely quiescent
2698 * now. There is no request-processing activity. All new requests
2699 * are being added to md->deferred list.
2700 */
2701static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2702			unsigned int suspend_flags, unsigned int task_state,
2703			int dmf_suspended_flag)
2704{
2705	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2706	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2707	int r;
 
 
 
 
 
 
 
 
2708
2709	lockdep_assert_held(&md->suspend_lock);
2710
2711	/*
2712	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2713	 * This flag is cleared before dm_suspend returns.
2714	 */
2715	if (noflush)
2716		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2717	else
2718		DMDEBUG("%s: suspending with flush", dm_device_name(md));
2719
2720	/*
2721	 * This gets reverted if there's an error later and the targets
2722	 * provide the .presuspend_undo hook.
2723	 */
2724	dm_table_presuspend_targets(map);
2725
2726	/*
2727	 * Flush I/O to the device.
2728	 * Any I/O submitted after lock_fs() may not be flushed.
2729	 * noflush takes precedence over do_lockfs.
2730	 * (lock_fs() flushes I/Os and waits for them to complete.)
2731	 */
2732	if (!noflush && do_lockfs) {
2733		r = lock_fs(md);
2734		if (r) {
2735			dm_table_presuspend_undo_targets(map);
2736			return r;
2737		}
2738	}
2739
2740	/*
2741	 * Here we must make sure that no processes are submitting requests
2742	 * to target drivers i.e. no one may be executing
2743	 * dm_split_and_process_bio from dm_submit_bio.
 
2744	 *
2745	 * To get all processes out of dm_split_and_process_bio in dm_submit_bio,
2746	 * we take the write lock. To prevent any process from reentering
2747	 * dm_split_and_process_bio from dm_submit_bio and quiesce the thread
2748	 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
2749	 * flush_workqueue(md->wq).
2750	 */
 
2751	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2752	if (map)
2753		synchronize_srcu(&md->io_barrier);
2754
2755	/*
2756	 * Stop md->queue before flushing md->wq in case request-based
2757	 * dm defers requests to md->wq from md->queue.
2758	 */
2759	if (dm_request_based(md))
2760		dm_stop_queue(md->queue);
2761
2762	flush_workqueue(md->wq);
2763
2764	/*
2765	 * At this point no more requests are entering target request routines.
2766	 * We call dm_wait_for_completion to wait for all existing requests
2767	 * to finish.
2768	 */
2769	r = dm_wait_for_completion(md, task_state);
2770	if (!r)
2771		set_bit(dmf_suspended_flag, &md->flags);
2772
 
2773	if (noflush)
2774		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2775	if (map)
2776		synchronize_srcu(&md->io_barrier);
2777
2778	/* were we interrupted ? */
2779	if (r < 0) {
2780		dm_queue_flush(md);
2781
2782		if (dm_request_based(md))
2783			dm_start_queue(md->queue);
2784
2785		unlock_fs(md);
2786		dm_table_presuspend_undo_targets(map);
2787		/* pushback list is already flushed, so skip flush */
2788	}
2789
2790	return r;
2791}
2792
2793/*
2794 * We need to be able to change a mapping table under a mounted
2795 * filesystem.  For example we might want to move some data in
2796 * the background.  Before the table can be swapped with
2797 * dm_bind_table, dm_suspend must be called to flush any in
2798 * flight bios and ensure that any further io gets deferred.
2799 */
2800/*
2801 * Suspend mechanism in request-based dm.
2802 *
2803 * 1. Flush all I/Os by lock_fs() if needed.
2804 * 2. Stop dispatching any I/O by stopping the request_queue.
2805 * 3. Wait for all in-flight I/Os to be completed or requeued.
2806 *
2807 * To abort suspend, start the request_queue.
2808 */
2809int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
2810{
2811	struct dm_table *map = NULL;
2812	int r = 0;
2813
2814retry:
2815	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2816
2817	if (dm_suspended_md(md)) {
2818		r = -EINVAL;
2819		goto out_unlock;
2820	}
2821
2822	if (dm_suspended_internally_md(md)) {
2823		/* already internally suspended, wait for internal resume */
2824		mutex_unlock(&md->suspend_lock);
2825		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2826		if (r)
2827			return r;
2828		goto retry;
2829	}
2830
2831	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2832	if (!map) {
2833		/* avoid deadlock with fs/namespace.c:do_mount() */
2834		suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
2835	}
2836
2837	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2838	if (r)
2839		goto out_unlock;
2840
2841	set_bit(DMF_POST_SUSPENDING, &md->flags);
2842	dm_table_postsuspend_targets(map);
2843	clear_bit(DMF_POST_SUSPENDING, &md->flags);
 
 
2844
2845out_unlock:
2846	mutex_unlock(&md->suspend_lock);
2847	return r;
2848}
2849
2850static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2851{
2852	if (map) {
2853		int r = dm_table_resume_targets(map);
2854
2855		if (r)
2856			return r;
2857	}
2858
2859	dm_queue_flush(md);
2860
2861	/*
2862	 * Flushing deferred I/Os must be done after targets are resumed
2863	 * so that mapping of targets can work correctly.
2864	 * Request-based dm is queueing the deferred I/Os in its request_queue.
2865	 */
2866	if (dm_request_based(md))
2867		dm_start_queue(md->queue);
2868
2869	unlock_fs(md);
2870
2871	return 0;
2872}
2873
2874int dm_resume(struct mapped_device *md)
2875{
2876	int r;
2877	struct dm_table *map = NULL;
2878
2879retry:
2880	r = -EINVAL;
2881	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2882
2883	if (!dm_suspended_md(md))
2884		goto out;
2885
2886	if (dm_suspended_internally_md(md)) {
2887		/* already internally suspended, wait for internal resume */
2888		mutex_unlock(&md->suspend_lock);
2889		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2890		if (r)
2891			return r;
2892		goto retry;
2893	}
2894
2895	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2896	if (!map || !dm_table_get_size(map))
2897		goto out;
2898
2899	r = __dm_resume(md, map);
2900	if (r)
2901		goto out;
2902
2903	clear_bit(DMF_SUSPENDED, &md->flags);
2904out:
2905	mutex_unlock(&md->suspend_lock);
2906
2907	return r;
2908}
2909
2910/*
2911 * Internal suspend/resume works like userspace-driven suspend. It waits
2912 * until all bios finish and prevents issuing new bios to the target drivers.
2913 * It may be used only from the kernel.
2914 */
2915
2916static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags)
2917{
2918	struct dm_table *map = NULL;
2919
2920	lockdep_assert_held(&md->suspend_lock);
2921
2922	if (md->internal_suspend_count++)
2923		return; /* nested internal suspend */
2924
2925	if (dm_suspended_md(md)) {
2926		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2927		return; /* nest suspend */
2928	}
2929
2930	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2931
2932	/*
2933	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2934	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
2935	 * would require changing .presuspend to return an error -- avoid this
2936	 * until there is a need for more elaborate variants of internal suspend.
2937	 */
2938	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2939			    DMF_SUSPENDED_INTERNALLY);
2940
2941	set_bit(DMF_POST_SUSPENDING, &md->flags);
2942	dm_table_postsuspend_targets(map);
2943	clear_bit(DMF_POST_SUSPENDING, &md->flags);
2944}
2945
2946static void __dm_internal_resume(struct mapped_device *md)
2947{
2948	BUG_ON(!md->internal_suspend_count);
2949
2950	if (--md->internal_suspend_count)
2951		return; /* resume from nested internal suspend */
2952
2953	if (dm_suspended_md(md))
2954		goto done; /* resume from nested suspend */
2955
2956	/*
2957	 * NOTE: existing callers don't need to call dm_table_resume_targets
2958	 * (which may fail -- so best to avoid it for now by passing NULL map)
 
2959	 */
2960	(void) __dm_resume(md, NULL);
 
2961
2962done:
2963	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2964	smp_mb__after_atomic();
2965	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2966}
2967
2968void dm_internal_suspend_noflush(struct mapped_device *md)
2969{
2970	mutex_lock(&md->suspend_lock);
2971	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2972	mutex_unlock(&md->suspend_lock);
2973}
2974EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2975
2976void dm_internal_resume(struct mapped_device *md)
2977{
2978	mutex_lock(&md->suspend_lock);
2979	__dm_internal_resume(md);
2980	mutex_unlock(&md->suspend_lock);
2981}
2982EXPORT_SYMBOL_GPL(dm_internal_resume);
2983
2984/*
2985 * Fast variants of internal suspend/resume hold md->suspend_lock,
2986 * which prevents interaction with userspace-driven suspend.
2987 */
2988
2989void dm_internal_suspend_fast(struct mapped_device *md)
2990{
2991	mutex_lock(&md->suspend_lock);
2992	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2993		return;
2994
2995	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2996	synchronize_srcu(&md->io_barrier);
2997	flush_workqueue(md->wq);
2998	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2999}
3000EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
3001
3002void dm_internal_resume_fast(struct mapped_device *md)
3003{
3004	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3005		goto done;
3006
3007	dm_queue_flush(md);
3008
3009done:
3010	mutex_unlock(&md->suspend_lock);
3011}
3012EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
3013
3014/*
3015 *---------------------------------------------------------------
3016 * Event notification.
3017 *---------------------------------------------------------------
3018 */
3019int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
3020		      unsigned int cookie, bool need_resize_uevent)
3021{
3022	int r;
3023	unsigned int noio_flag;
3024	char udev_cookie[DM_COOKIE_LENGTH];
3025	char *envp[3] = { NULL, NULL, NULL };
3026	char **envpp = envp;
3027	if (cookie) {
 
 
3028		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
3029			 DM_COOKIE_ENV_VAR_NAME, cookie);
3030		*envpp++ = udev_cookie;
3031	}
3032	if (need_resize_uevent) {
3033		*envpp++ = "RESIZE=1";
3034	}
3035
3036	noio_flag = memalloc_noio_save();
3037
3038	r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
3039
3040	memalloc_noio_restore(noio_flag);
3041
3042	return r;
3043}
3044
3045uint32_t dm_next_uevent_seq(struct mapped_device *md)
3046{
3047	return atomic_add_return(1, &md->uevent_seq);
3048}
3049
3050uint32_t dm_get_event_nr(struct mapped_device *md)
3051{
3052	return atomic_read(&md->event_nr);
3053}
3054
3055int dm_wait_event(struct mapped_device *md, int event_nr)
3056{
3057	return wait_event_interruptible(md->eventq,
3058			(event_nr != atomic_read(&md->event_nr)));
3059}
3060
3061void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
3062{
3063	unsigned long flags;
3064
3065	spin_lock_irqsave(&md->uevent_lock, flags);
3066	list_add(elist, &md->uevent_list);
3067	spin_unlock_irqrestore(&md->uevent_lock, flags);
3068}
3069
3070/*
3071 * The gendisk is only valid as long as you have a reference
3072 * count on 'md'.
3073 */
3074struct gendisk *dm_disk(struct mapped_device *md)
3075{
3076	return md->disk;
3077}
3078EXPORT_SYMBOL_GPL(dm_disk);
3079
3080struct kobject *dm_kobject(struct mapped_device *md)
3081{
3082	return &md->kobj_holder.kobj;
3083}
3084
 
 
 
 
3085struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
3086{
3087	struct mapped_device *md;
3088
3089	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
 
 
 
 
 
 
3090
3091	spin_lock(&_minor_lock);
3092	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
3093		md = NULL;
3094		goto out;
3095	}
3096	dm_get(md);
3097out:
3098	spin_unlock(&_minor_lock);
3099
3100	return md;
3101}
3102
3103int dm_suspended_md(struct mapped_device *md)
3104{
3105	return test_bit(DMF_SUSPENDED, &md->flags);
3106}
3107
3108static int dm_post_suspending_md(struct mapped_device *md)
3109{
3110	return test_bit(DMF_POST_SUSPENDING, &md->flags);
3111}
3112
3113int dm_suspended_internally_md(struct mapped_device *md)
3114{
3115	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3116}
3117
3118int dm_test_deferred_remove_flag(struct mapped_device *md)
3119{
3120	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
3121}
3122
3123int dm_suspended(struct dm_target *ti)
3124{
3125	return dm_suspended_md(ti->table->md);
3126}
3127EXPORT_SYMBOL_GPL(dm_suspended);
3128
3129int dm_post_suspending(struct dm_target *ti)
3130{
3131	return dm_post_suspending_md(ti->table->md);
3132}
3133EXPORT_SYMBOL_GPL(dm_post_suspending);
3134
3135int dm_noflush_suspending(struct dm_target *ti)
3136{
3137	return __noflush_suspending(ti->table->md);
3138}
3139EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3140
3141void dm_free_md_mempools(struct dm_md_mempools *pools)
3142{
3143	if (!pools)
3144		return;
3145
3146	bioset_exit(&pools->bs);
3147	bioset_exit(&pools->io_bs);
3148
3149	kfree(pools);
3150}
3151
3152struct dm_pr {
3153	u64	old_key;
3154	u64	new_key;
3155	u32	flags;
3156	bool	abort;
3157	bool	fail_early;
3158	int	ret;
3159	enum pr_type type;
3160	struct pr_keys *read_keys;
3161	struct pr_held_reservation *rsv;
3162};
3163
3164static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
3165		      struct dm_pr *pr)
3166{
3167	struct mapped_device *md = bdev->bd_disk->private_data;
3168	struct dm_table *table;
3169	struct dm_target *ti;
3170	int ret = -ENOTTY, srcu_idx;
3171
3172	table = dm_get_live_table(md, &srcu_idx);
3173	if (!table || !dm_table_get_size(table))
3174		goto out;
3175
3176	/* We only support devices that have a single target */
3177	if (table->num_targets != 1)
3178		goto out;
3179	ti = dm_table_get_target(table, 0);
3180
3181	if (dm_suspended_md(md)) {
3182		ret = -EAGAIN;
3183		goto out;
3184	}
3185
3186	ret = -EINVAL;
3187	if (!ti->type->iterate_devices)
3188		goto out;
3189
3190	ti->type->iterate_devices(ti, fn, pr);
3191	ret = 0;
3192out:
3193	dm_put_live_table(md, srcu_idx);
3194	return ret;
3195}
3196
3197/*
3198 * For register / unregister we need to manually call out to every path.
3199 */
3200static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
3201			    sector_t start, sector_t len, void *data)
3202{
3203	struct dm_pr *pr = data;
3204	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3205	int ret;
3206
3207	if (!ops || !ops->pr_register) {
3208		pr->ret = -EOPNOTSUPP;
3209		return -1;
3210	}
3211
3212	ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
3213	if (!ret)
3214		return 0;
3215
3216	if (!pr->ret)
3217		pr->ret = ret;
3218
3219	if (pr->fail_early)
3220		return -1;
3221
3222	return 0;
3223}
3224
3225static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
3226			  u32 flags)
3227{
3228	struct dm_pr pr = {
3229		.old_key	= old_key,
3230		.new_key	= new_key,
3231		.flags		= flags,
3232		.fail_early	= true,
3233		.ret		= 0,
3234	};
3235	int ret;
3236
3237	ret = dm_call_pr(bdev, __dm_pr_register, &pr);
3238	if (ret) {
3239		/* Didn't even get to register a path */
3240		return ret;
3241	}
3242
3243	if (!pr.ret)
3244		return 0;
3245	ret = pr.ret;
3246
3247	if (!new_key)
3248		return ret;
3249
3250	/* unregister all paths if we failed to register any path */
3251	pr.old_key = new_key;
3252	pr.new_key = 0;
3253	pr.flags = 0;
3254	pr.fail_early = false;
3255	(void) dm_call_pr(bdev, __dm_pr_register, &pr);
3256	return ret;
3257}
3258
3259
3260static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev,
3261			   sector_t start, sector_t len, void *data)
3262{
3263	struct dm_pr *pr = data;
3264	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3265
3266	if (!ops || !ops->pr_reserve) {
3267		pr->ret = -EOPNOTSUPP;
3268		return -1;
3269	}
3270
3271	pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags);
3272	if (!pr->ret)
3273		return -1;
3274
3275	return 0;
3276}
3277
3278static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3279			 u32 flags)
3280{
3281	struct dm_pr pr = {
3282		.old_key	= key,
3283		.flags		= flags,
3284		.type		= type,
3285		.fail_early	= false,
3286		.ret		= 0,
3287	};
3288	int ret;
3289
3290	ret = dm_call_pr(bdev, __dm_pr_reserve, &pr);
3291	if (ret)
3292		return ret;
3293
3294	return pr.ret;
3295}
3296
3297/*
3298 * If there is a non-All Registrants type of reservation, the release must be
3299 * sent down the holding path. For the cases where there is no reservation or
3300 * the path is not the holder the device will also return success, so we must
3301 * try each path to make sure we got the correct path.
3302 */
3303static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev,
3304			   sector_t start, sector_t len, void *data)
3305{
3306	struct dm_pr *pr = data;
3307	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3308
3309	if (!ops || !ops->pr_release) {
3310		pr->ret = -EOPNOTSUPP;
3311		return -1;
3312	}
3313
3314	pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type);
3315	if (pr->ret)
3316		return -1;
3317
3318	return 0;
3319}
3320
3321static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3322{
3323	struct dm_pr pr = {
3324		.old_key	= key,
3325		.type		= type,
3326		.fail_early	= false,
3327	};
3328	int ret;
3329
3330	ret = dm_call_pr(bdev, __dm_pr_release, &pr);
3331	if (ret)
3332		return ret;
3333
3334	return pr.ret;
3335}
3336
3337static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev,
3338			   sector_t start, sector_t len, void *data)
3339{
3340	struct dm_pr *pr = data;
3341	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3342
3343	if (!ops || !ops->pr_preempt) {
3344		pr->ret = -EOPNOTSUPP;
3345		return -1;
3346	}
 
3347
3348	pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type,
3349				  pr->abort);
3350	if (!pr->ret)
3351		return -1;
3352
3353	return 0;
3354}
3355
3356static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3357			 enum pr_type type, bool abort)
3358{
3359	struct dm_pr pr = {
3360		.new_key	= new_key,
3361		.old_key	= old_key,
3362		.type		= type,
3363		.fail_early	= false,
3364	};
3365	int ret;
3366
3367	ret = dm_call_pr(bdev, __dm_pr_preempt, &pr);
3368	if (ret)
3369		return ret;
3370
3371	return pr.ret;
3372}
3373
3374static int dm_pr_clear(struct block_device *bdev, u64 key)
3375{
3376	struct mapped_device *md = bdev->bd_disk->private_data;
3377	const struct pr_ops *ops;
3378	int r, srcu_idx;
3379
3380	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3381	if (r < 0)
3382		goto out;
3383
3384	ops = bdev->bd_disk->fops->pr_ops;
3385	if (ops && ops->pr_clear)
3386		r = ops->pr_clear(bdev, key);
3387	else
3388		r = -EOPNOTSUPP;
3389out:
3390	dm_unprepare_ioctl(md, srcu_idx);
3391	return r;
3392}
3393
3394static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev,
3395			     sector_t start, sector_t len, void *data)
3396{
3397	struct dm_pr *pr = data;
3398	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3399
3400	if (!ops || !ops->pr_read_keys) {
3401		pr->ret = -EOPNOTSUPP;
3402		return -1;
3403	}
3404
3405	pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys);
3406	if (!pr->ret)
3407		return -1;
3408
3409	return 0;
3410}
3411
3412static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys)
3413{
3414	struct dm_pr pr = {
3415		.read_keys = keys,
3416	};
3417	int ret;
3418
3419	ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr);
3420	if (ret)
3421		return ret;
3422
3423	return pr.ret;
3424}
3425
3426static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev,
3427				    sector_t start, sector_t len, void *data)
3428{
3429	struct dm_pr *pr = data;
3430	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3431
3432	if (!ops || !ops->pr_read_reservation) {
3433		pr->ret = -EOPNOTSUPP;
3434		return -1;
3435	}
3436
3437	pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv);
3438	if (!pr->ret)
3439		return -1;
3440
3441	return 0;
3442}
3443
3444static int dm_pr_read_reservation(struct block_device *bdev,
3445				  struct pr_held_reservation *rsv)
3446{
3447	struct dm_pr pr = {
3448		.rsv = rsv,
3449	};
3450	int ret;
3451
3452	ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr);
3453	if (ret)
3454		return ret;
3455
3456	return pr.ret;
3457}
3458
3459static const struct pr_ops dm_pr_ops = {
3460	.pr_register	= dm_pr_register,
3461	.pr_reserve	= dm_pr_reserve,
3462	.pr_release	= dm_pr_release,
3463	.pr_preempt	= dm_pr_preempt,
3464	.pr_clear	= dm_pr_clear,
3465	.pr_read_keys	= dm_pr_read_keys,
3466	.pr_read_reservation = dm_pr_read_reservation,
3467};
3468
3469static const struct block_device_operations dm_blk_dops = {
3470	.submit_bio = dm_submit_bio,
3471	.poll_bio = dm_poll_bio,
3472	.open = dm_blk_open,
3473	.release = dm_blk_close,
3474	.ioctl = dm_blk_ioctl,
3475	.getgeo = dm_blk_getgeo,
3476	.report_zones = dm_blk_report_zones,
3477	.pr_ops = &dm_pr_ops,
3478	.owner = THIS_MODULE
3479};
3480
3481static const struct block_device_operations dm_rq_blk_dops = {
3482	.open = dm_blk_open,
3483	.release = dm_blk_close,
3484	.ioctl = dm_blk_ioctl,
3485	.getgeo = dm_blk_getgeo,
3486	.pr_ops = &dm_pr_ops,
3487	.owner = THIS_MODULE
3488};
3489
3490static const struct dax_operations dm_dax_ops = {
3491	.direct_access = dm_dax_direct_access,
3492	.zero_page_range = dm_dax_zero_page_range,
3493	.recovery_write = dm_dax_recovery_write,
3494};
3495
3496/*
3497 * module hooks
3498 */
3499module_init(dm_init);
3500module_exit(dm_exit);
3501
3502module_param(major, uint, 0);
3503MODULE_PARM_DESC(major, "The major number of the device mapper");
3504
3505module_param(reserved_bio_based_ios, uint, 0644);
3506MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3507
3508module_param(dm_numa_node, int, 0644);
3509MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3510
3511module_param(swap_bios, int, 0644);
3512MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
3513
3514MODULE_DESCRIPTION(DM_NAME " driver");
3515MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3516MODULE_LICENSE("GPL");
v3.5.6
 
   1/*
   2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm.h"
 
   9#include "dm-uevent.h"
 
  10
  11#include <linux/init.h>
  12#include <linux/module.h>
  13#include <linux/mutex.h>
  14#include <linux/moduleparam.h>
 
  15#include <linux/blkpg.h>
  16#include <linux/bio.h>
  17#include <linux/mempool.h>
 
  18#include <linux/slab.h>
  19#include <linux/idr.h>
 
  20#include <linux/hdreg.h>
  21#include <linux/delay.h>
  22
  23#include <trace/events/block.h>
 
 
 
 
  24
  25#define DM_MSG_PREFIX "core"
  26
  27#ifdef CONFIG_PRINTK
  28/*
  29 * ratelimit state to be used in DMXXX_LIMIT().
  30 */
  31DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
  32		       DEFAULT_RATELIMIT_INTERVAL,
  33		       DEFAULT_RATELIMIT_BURST);
  34EXPORT_SYMBOL(dm_ratelimit_state);
  35#endif
  36
  37/*
  38 * Cookies are numeric values sent with CHANGE and REMOVE
  39 * uevents while resuming, removing or renaming the device.
  40 */
  41#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
  42#define DM_COOKIE_LENGTH 24
  43
 
 
 
 
 
 
 
  44static const char *_name = DM_NAME;
  45
  46static unsigned int major = 0;
  47static unsigned int _major = 0;
  48
  49static DEFINE_IDR(_minor_idr);
  50
  51static DEFINE_SPINLOCK(_minor_lock);
  52/*
  53 * For bio-based dm.
  54 * One of these is allocated per bio.
  55 */
  56struct dm_io {
  57	struct mapped_device *md;
  58	int error;
  59	atomic_t io_count;
  60	struct bio *bio;
  61	unsigned long start_time;
  62	spinlock_t endio_lock;
  63};
  64
  65/*
  66 * For bio-based dm.
  67 * One of these is allocated per target within a bio.  Hopefully
  68 * this will be simplified out one day.
  69 */
  70struct dm_target_io {
  71	struct dm_io *io;
  72	struct dm_target *ti;
  73	union map_info info;
  74};
 
 
 
 
  75
  76/*
  77 * For request-based dm.
  78 * One of these is allocated per request.
  79 */
  80struct dm_rq_target_io {
  81	struct mapped_device *md;
  82	struct dm_target *ti;
  83	struct request *orig, clone;
  84	int error;
  85	union map_info info;
  86};
  87
  88/*
  89 * For request-based dm.
  90 * One of these is allocated per bio.
  91 */
  92struct dm_rq_clone_bio_info {
  93	struct bio *orig;
  94	struct dm_rq_target_io *tio;
 
 
 
 
 
  95};
  96
  97union map_info *dm_get_mapinfo(struct bio *bio)
  98{
  99	if (bio && bio->bi_private)
 100		return &((struct dm_target_io *)bio->bi_private)->info;
 101	return NULL;
 102}
 103
 104union map_info *dm_get_rq_mapinfo(struct request *rq)
 105{
 106	if (rq && rq->end_io_data)
 107		return &((struct dm_rq_target_io *)rq->end_io_data)->info;
 108	return NULL;
 109}
 110EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
 111
 112#define MINOR_ALLOCED ((void *)-1)
 
 
 113
 114/*
 115 * Bits for the md->flags field.
 116 */
 117#define DMF_BLOCK_IO_FOR_SUSPEND 0
 118#define DMF_SUSPENDED 1
 119#define DMF_FROZEN 2
 120#define DMF_FREEING 3
 121#define DMF_DELETING 4
 122#define DMF_NOFLUSH_SUSPENDING 5
 123#define DMF_MERGE_IS_OPTIONAL 6
 124
 125/*
 126 * Work processed by per-device workqueue.
 127 */
 128struct mapped_device {
 129	struct rw_semaphore io_lock;
 130	struct mutex suspend_lock;
 131	rwlock_t map_lock;
 132	atomic_t holders;
 133	atomic_t open_count;
 134
 135	unsigned long flags;
 
 
 
 
 136
 137	struct request_queue *queue;
 138	unsigned type;
 139	/* Protect queue and type against concurrent access. */
 140	struct mutex type_lock;
 141
 142	struct target_type *immutable_target_type;
 
 143
 144	struct gendisk *disk;
 145	char name[16];
 
 
 
 146
 147	void *interface_ptr;
 
 
 
 148
 149	/*
 150	 * A list of ios that arrived while we were suspended.
 151	 */
 152	atomic_t pending[2];
 153	wait_queue_head_t wait;
 154	struct work_struct work;
 155	struct bio_list deferred;
 156	spinlock_t deferred_lock;
 157
 158	/*
 159	 * Processing queue (flush)
 160	 */
 161	struct workqueue_struct *wq;
 
 162
 163	/*
 164	 * The current mapping.
 165	 */
 166	struct dm_table *map;
 
 167
 168	/*
 169	 * io objects are allocated from here.
 170	 */
 171	mempool_t *io_pool;
 172	mempool_t *tio_pool;
 
 173
 174	struct bio_set *bs;
 
 
 
 175
 176	/*
 177	 * Event handling.
 178	 */
 179	atomic_t event_nr;
 180	wait_queue_head_t eventq;
 181	atomic_t uevent_seq;
 182	struct list_head uevent_list;
 183	spinlock_t uevent_lock; /* Protect access to uevent_list */
 184
 185	/*
 186	 * freeze/thaw support require holding onto a super block
 187	 */
 188	struct super_block *frozen_sb;
 189	struct block_device *bdev;
 190
 191	/* forced geometry settings */
 192	struct hd_geometry geometry;
 
 
 193
 194	/* sysfs handle */
 195	struct kobject kobj;
 
 
 196
 197	/* zero-length flush that will be cloned and submitted to targets */
 198	struct bio flush_bio;
 199};
 200
 201/*
 202 * For mempools pre-allocation at the table loading time.
 203 */
 204struct dm_md_mempools {
 205	mempool_t *io_pool;
 206	mempool_t *tio_pool;
 207	struct bio_set *bs;
 208};
 209
 210#define MIN_IOS 256
 211static struct kmem_cache *_io_cache;
 212static struct kmem_cache *_tio_cache;
 213static struct kmem_cache *_rq_tio_cache;
 214static struct kmem_cache *_rq_bio_info_cache;
 215
 216static int __init local_init(void)
 217{
 218	int r = -ENOMEM;
 219
 220	/* allocate a slab for the dm_ios */
 221	_io_cache = KMEM_CACHE(dm_io, 0);
 222	if (!_io_cache)
 223		return r;
 224
 225	/* allocate a slab for the target ios */
 226	_tio_cache = KMEM_CACHE(dm_target_io, 0);
 227	if (!_tio_cache)
 228		goto out_free_io_cache;
 229
 230	_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
 231	if (!_rq_tio_cache)
 232		goto out_free_tio_cache;
 233
 234	_rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
 235	if (!_rq_bio_info_cache)
 236		goto out_free_rq_tio_cache;
 237
 238	r = dm_uevent_init();
 239	if (r)
 240		goto out_free_rq_bio_info_cache;
 241
 242	_major = major;
 243	r = register_blkdev(_major, _name);
 244	if (r < 0)
 245		goto out_uevent_exit;
 246
 247	if (!_major)
 248		_major = r;
 249
 250	return 0;
 251
 
 
 252out_uevent_exit:
 253	dm_uevent_exit();
 254out_free_rq_bio_info_cache:
 255	kmem_cache_destroy(_rq_bio_info_cache);
 256out_free_rq_tio_cache:
 257	kmem_cache_destroy(_rq_tio_cache);
 258out_free_tio_cache:
 259	kmem_cache_destroy(_tio_cache);
 260out_free_io_cache:
 261	kmem_cache_destroy(_io_cache);
 262
 263	return r;
 264}
 265
 266static void local_exit(void)
 267{
 268	kmem_cache_destroy(_rq_bio_info_cache);
 269	kmem_cache_destroy(_rq_tio_cache);
 270	kmem_cache_destroy(_tio_cache);
 271	kmem_cache_destroy(_io_cache);
 272	unregister_blkdev(_major, _name);
 273	dm_uevent_exit();
 274
 275	_major = 0;
 276
 277	DMINFO("cleaned up");
 278}
 279
 280static int (*_inits[])(void) __initdata = {
 281	local_init,
 282	dm_target_init,
 283	dm_linear_init,
 284	dm_stripe_init,
 285	dm_io_init,
 286	dm_kcopyd_init,
 287	dm_interface_init,
 
 288};
 289
 290static void (*_exits[])(void) = {
 291	local_exit,
 292	dm_target_exit,
 293	dm_linear_exit,
 294	dm_stripe_exit,
 295	dm_io_exit,
 296	dm_kcopyd_exit,
 297	dm_interface_exit,
 
 298};
 299
 300static int __init dm_init(void)
 301{
 302	const int count = ARRAY_SIZE(_inits);
 
 303
 304	int r, i;
 
 
 
 305
 306	for (i = 0; i < count; i++) {
 307		r = _inits[i]();
 308		if (r)
 309			goto bad;
 310	}
 311
 312	return 0;
 313
 314      bad:
 315	while (i--)
 316		_exits[i]();
 317
 318	return r;
 319}
 320
 321static void __exit dm_exit(void)
 322{
 323	int i = ARRAY_SIZE(_exits);
 324
 325	while (i--)
 326		_exits[i]();
 327
 328	/*
 329	 * Should be empty by this point.
 330	 */
 331	idr_remove_all(&_minor_idr);
 332	idr_destroy(&_minor_idr);
 333}
 334
 335/*
 336 * Block device functions
 337 */
 338int dm_deleting_md(struct mapped_device *md)
 339{
 340	return test_bit(DMF_DELETING, &md->flags);
 341}
 342
 343static int dm_blk_open(struct block_device *bdev, fmode_t mode)
 344{
 345	struct mapped_device *md;
 346
 347	spin_lock(&_minor_lock);
 348
 349	md = bdev->bd_disk->private_data;
 350	if (!md)
 351		goto out;
 352
 353	if (test_bit(DMF_FREEING, &md->flags) ||
 354	    dm_deleting_md(md)) {
 355		md = NULL;
 356		goto out;
 357	}
 358
 359	dm_get(md);
 360	atomic_inc(&md->open_count);
 361
 362out:
 363	spin_unlock(&_minor_lock);
 364
 365	return md ? 0 : -ENXIO;
 366}
 367
 368static int dm_blk_close(struct gendisk *disk, fmode_t mode)
 369{
 370	struct mapped_device *md = disk->private_data;
 371
 372	spin_lock(&_minor_lock);
 373
 374	atomic_dec(&md->open_count);
 
 
 
 
 
 
 
 375	dm_put(md);
 376
 377	spin_unlock(&_minor_lock);
 378
 379	return 0;
 380}
 381
 382int dm_open_count(struct mapped_device *md)
 383{
 384	return atomic_read(&md->open_count);
 385}
 386
 387/*
 388 * Guarantees nothing is using the device before it's deleted.
 389 */
 390int dm_lock_for_deletion(struct mapped_device *md)
 391{
 392	int r = 0;
 393
 394	spin_lock(&_minor_lock);
 395
 396	if (dm_open_count(md))
 397		r = -EBUSY;
 
 
 
 
 398	else
 399		set_bit(DMF_DELETING, &md->flags);
 400
 401	spin_unlock(&_minor_lock);
 402
 403	return r;
 404}
 405
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 406static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 407{
 408	struct mapped_device *md = bdev->bd_disk->private_data;
 409
 410	return dm_get_geometry(md, geo);
 411}
 412
 413static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
 414			unsigned int cmd, unsigned long arg)
 415{
 416	struct mapped_device *md = bdev->bd_disk->private_data;
 417	struct dm_table *map = dm_get_live_table(md);
 418	struct dm_target *tgt;
 419	int r = -ENOTTY;
 420
 
 
 
 421	if (!map || !dm_table_get_size(map))
 422		goto out;
 423
 424	/* We only support devices that have a single target */
 425	if (dm_table_get_num_targets(map) != 1)
 426		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 427
 428	tgt = dm_table_get_target(map, 0);
 
 
 
 429
 430	if (dm_suspended_md(md)) {
 431		r = -EAGAIN;
 
 
 
 
 
 
 432		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 433	}
 434
 435	if (tgt->type->ioctl)
 436		r = tgt->type->ioctl(tgt, cmd, arg);
 437
 
 438out:
 439	dm_table_put(map);
 440
 441	return r;
 442}
 443
 444static struct dm_io *alloc_io(struct mapped_device *md)
 445{
 446	return mempool_alloc(md->io_pool, GFP_NOIO);
 447}
 
 448
 449static void free_io(struct mapped_device *md, struct dm_io *io)
 450{
 451	mempool_free(io, md->io_pool);
 452}
 453
 454static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
 455{
 456	mempool_free(tio, md->tio_pool);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 457}
 458
 459static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
 460					    gfp_t gfp_mask)
 461{
 462	return mempool_alloc(md->tio_pool, gfp_mask);
 463}
 464
 465static void free_rq_tio(struct dm_rq_target_io *tio)
 466{
 467	mempool_free(tio, tio->md->tio_pool);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 468}
 469
 470static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
 471{
 472	return mempool_alloc(md->io_pool, GFP_ATOMIC);
 473}
 474
 475static void free_bio_info(struct dm_rq_clone_bio_info *info)
 476{
 477	mempool_free(info, info->tio->md->io_pool);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 478}
 479
 480static int md_in_flight(struct mapped_device *md)
 481{
 482	return atomic_read(&md->pending[READ]) +
 483	       atomic_read(&md->pending[WRITE]);
 484}
 485
 486static void start_io_acct(struct dm_io *io)
 
 487{
 488	struct mapped_device *md = io->md;
 489	int cpu;
 490	int rw = bio_data_dir(io->bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 491
 492	io->start_time = jiffies;
 
 
 
 
 493
 494	cpu = part_stat_lock();
 495	part_round_stats(cpu, &dm_disk(md)->part0);
 496	part_stat_unlock();
 497	atomic_set(&dm_disk(md)->part0.in_flight[rw],
 498		atomic_inc_return(&md->pending[rw]));
 499}
 500
 501static void end_io_acct(struct dm_io *io)
 502{
 503	struct mapped_device *md = io->md;
 504	struct bio *bio = io->bio;
 505	unsigned long duration = jiffies - io->start_time;
 506	int pending, cpu;
 507	int rw = bio_data_dir(bio);
 508
 509	cpu = part_stat_lock();
 510	part_round_stats(cpu, &dm_disk(md)->part0);
 511	part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
 512	part_stat_unlock();
 513
 514	/*
 515	 * After this is decremented the bio must not be touched if it is
 516	 * a flush.
 517	 */
 518	pending = atomic_dec_return(&md->pending[rw]);
 519	atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
 520	pending += atomic_read(&md->pending[rw^0x1]);
 521
 522	/* nudge anyone waiting on suspend queue */
 523	if (!pending)
 524		wake_up(&md->wait);
 525}
 526
 527/*
 528 * Add the bio to the list of deferred io.
 529 */
 530static void queue_io(struct mapped_device *md, struct bio *bio)
 531{
 532	unsigned long flags;
 533
 534	spin_lock_irqsave(&md->deferred_lock, flags);
 535	bio_list_add(&md->deferred, bio);
 536	spin_unlock_irqrestore(&md->deferred_lock, flags);
 537	queue_work(md->wq, &md->work);
 538}
 539
 540/*
 541 * Everyone (including functions in this file), should use this
 542 * function to access the md->map field, and make sure they call
 543 * dm_table_put() when finished.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544 */
 545struct dm_table *dm_get_live_table(struct mapped_device *md)
 
 546{
 547	struct dm_table *t;
 548	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 549
 550	read_lock_irqsave(&md->map_lock, flags);
 551	t = md->map;
 552	if (t)
 553		dm_table_get(t);
 554	read_unlock_irqrestore(&md->map_lock, flags);
 555
 556	return t;
 
 
 
 557}
 558
 559/*
 560 * Get the geometry associated with a dm device
 561 */
 562int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
 563{
 564	*geo = md->geometry;
 565
 566	return 0;
 567}
 568
 569/*
 570 * Set the geometry of a device.
 571 */
 572int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
 573{
 574	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
 575
 576	if (geo->start > sz) {
 577		DMWARN("Start sector is beyond the geometry limits.");
 578		return -EINVAL;
 579	}
 580
 581	md->geometry = *geo;
 582
 583	return 0;
 584}
 585
 586/*-----------------------------------------------------------------
 587 * CRUD START:
 588 *   A more elegant soln is in the works that uses the queue
 589 *   merge fn, unfortunately there are a couple of changes to
 590 *   the block layer that I want to make for this.  So in the
 591 *   interests of getting something for people to use I give
 592 *   you this clearly demarcated crap.
 593 *---------------------------------------------------------------*/
 594
 595static int __noflush_suspending(struct mapped_device *md)
 596{
 597	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
 598}
 599
 600/*
 601 * Decrements the number of outstanding ios that a bio has been
 602 * cloned into, completing the original io if necc.
 603 */
 604static void dec_pending(struct dm_io *io, int error)
 605{
 606	unsigned long flags;
 607	int io_error;
 608	struct bio *bio;
 609	struct mapped_device *md = io->md;
 610
 611	/* Push-back supersedes any I/O errors */
 612	if (unlikely(error)) {
 613		spin_lock_irqsave(&io->endio_lock, flags);
 614		if (!(io->error > 0 && __noflush_suspending(md)))
 615			io->error = error;
 616		spin_unlock_irqrestore(&io->endio_lock, flags);
 
 617	}
 
 618
 619	if (atomic_dec_and_test(&io->io_count)) {
 620		if (io->error == DM_ENDIO_REQUEUE) {
 621			/*
 622			 * Target requested pushing back the I/O.
 623			 */
 624			spin_lock_irqsave(&md->deferred_lock, flags);
 625			if (__noflush_suspending(md))
 626				bio_list_add_head(&md->deferred, io->bio);
 627			else
 628				/* noflush suspend was interrupted. */
 629				io->error = -EIO;
 630			spin_unlock_irqrestore(&md->deferred_lock, flags);
 631		}
 632
 633		io_error = io->error;
 634		bio = io->bio;
 635		end_io_acct(io);
 636		free_io(md, io);
 
 
 
 
 
 
 
 
 637
 638		if (io_error == DM_ENDIO_REQUEUE)
 639			return;
 640
 641		if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
 642			/*
 643			 * Preflush done for flush with data, reissue
 644			 * without REQ_FLUSH.
 
 645			 */
 646			bio->bi_rw &= ~REQ_FLUSH;
 647			queue_io(md, bio);
 648		} else {
 649			/* done with normal IO or empty flush */
 650			trace_block_bio_complete(md->queue, bio, io_error);
 651			bio_endio(bio, io_error);
 652		}
 653	}
 654}
 655
 656static void clone_endio(struct bio *bio, int error)
 657{
 658	int r = 0;
 659	struct dm_target_io *tio = bio->bi_private;
 660	struct dm_io *io = tio->io;
 661	struct mapped_device *md = tio->io->md;
 662	dm_endio_fn endio = tio->ti->type->end_io;
 663
 664	if (!bio_flagged(bio, BIO_UPTODATE) && !error)
 665		error = -EIO;
 666
 667	if (endio) {
 668		r = endio(tio->ti, bio, error, &tio->info);
 669		if (r < 0 || r == DM_ENDIO_REQUEUE)
 670			/*
 671			 * error and requeue request are handled
 672			 * in dec_pending().
 673			 */
 674			error = r;
 675		else if (r == DM_ENDIO_INCOMPLETE)
 676			/* The target will handle the io */
 677			return;
 678		else if (r) {
 679			DMWARN("unimplemented target endio return value: %d", r);
 680			BUG();
 681		}
 
 682	}
 683
 684	/*
 685	 * Store md for cleanup instead of tio which is about to get freed.
 686	 */
 687	bio->bi_private = md->bs;
 688
 689	free_tio(md, tio);
 690	bio_put(bio);
 691	dec_pending(io, error);
 692}
 693
 694/*
 695 * Partial completion handling for request-based dm
 696 */
 697static void end_clone_bio(struct bio *clone, int error)
 698{
 699	struct dm_rq_clone_bio_info *info = clone->bi_private;
 700	struct dm_rq_target_io *tio = info->tio;
 701	struct bio *bio = info->orig;
 702	unsigned int nr_bytes = info->orig->bi_size;
 703
 704	bio_put(clone);
 
 
 705
 706	if (tio->error)
 
 
 
 707		/*
 708		 * An error has already been detected on the request.
 709		 * Once error occurred, just let clone->end_io() handle
 710		 * the remainder.
 711		 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 712		return;
 713	else if (error) {
 
 714		/*
 715		 * Don't notice the error to the upper layer yet.
 716		 * The error handling decision is made by the target driver,
 717		 * when the request is completed.
 718		 */
 719		tio->error = error;
 720		return;
 
 
 
 
 
 721	}
 
 722
 723	/*
 724	 * I/O for the bio successfully completed.
 725	 * Notice the data completion to the upper layer.
 726	 */
 
 
 
 
 
 
 
 
 
 
 
 727
 728	/*
 729	 * bios are processed from the head of the list.
 730	 * So the completing bio should always be rq->bio.
 731	 * If it's not, something wrong is happening.
 732	 */
 733	if (tio->orig->bio != bio)
 734		DMERR("bio completion is going in the middle of the request");
 735
 736	/*
 737	 * Update the original request.
 738	 * Do not use blk_end_request() here, because it may complete
 739	 * the original request before the clone, and break the ordering.
 740	 */
 741	blk_update_request(tio->orig, 0, nr_bytes);
 742}
 743
 744/*
 745 * Don't touch any member of the md after calling this function because
 746 * the md may be freed in dm_put() at the end of this function.
 747 * Or do dm_get() before calling this function and dm_put() later.
 
 
 
 748 */
 749static void rq_completed(struct mapped_device *md, int rw, int run_queue)
 750{
 751	atomic_dec(&md->pending[rw]);
 752
 753	/* nudge anyone waiting on suspend queue */
 754	if (!md_in_flight(md))
 755		wake_up(&md->wait);
 756
 757	if (run_queue)
 758		blk_run_queue(md->queue);
 759
 760	/*
 761	 * dm_put() must be at the end of this function. See the comment above
 
 
 
 
 
 762	 */
 763	dm_put(md);
 764}
 765
 766static void free_rq_clone(struct request *clone)
 767{
 768	struct dm_rq_target_io *tio = clone->end_io_data;
 769
 770	blk_rq_unprep_clone(clone);
 771	free_rq_tio(tio);
 772}
 773
 774/*
 775 * Complete the clone and the original request.
 776 * Must be called without queue lock.
 777 */
 778static void dm_end_request(struct request *clone, int error)
 779{
 780	int rw = rq_data_dir(clone);
 781	struct dm_rq_target_io *tio = clone->end_io_data;
 782	struct mapped_device *md = tio->md;
 783	struct request *rq = tio->orig;
 784
 785	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
 786		rq->errors = clone->errors;
 787		rq->resid_len = clone->resid_len;
 788
 789		if (rq->sense)
 790			/*
 791			 * We are using the sense buffer of the original
 792			 * request.
 793			 * So setting the length of the sense data is enough.
 794			 */
 795			rq->sense_len = clone->sense_len;
 796	}
 797
 798	free_rq_clone(clone);
 799	blk_end_request_all(rq, error);
 800	rq_completed(md, rw, true);
 801}
 802
 803static void dm_unprep_request(struct request *rq)
 804{
 805	struct request *clone = rq->special;
 806
 807	rq->special = NULL;
 808	rq->cmd_flags &= ~REQ_DONTPREP;
 809
 810	free_rq_clone(clone);
 811}
 812
 813/*
 814 * Requeue the original request of a clone.
 
 815 */
 816void dm_requeue_unmapped_request(struct request *clone)
 817{
 818	int rw = rq_data_dir(clone);
 819	struct dm_rq_target_io *tio = clone->end_io_data;
 820	struct mapped_device *md = tio->md;
 821	struct request *rq = tio->orig;
 822	struct request_queue *q = rq->q;
 823	unsigned long flags;
 824
 825	dm_unprep_request(rq);
 826
 827	spin_lock_irqsave(q->queue_lock, flags);
 828	blk_requeue_request(q, rq);
 829	spin_unlock_irqrestore(q->queue_lock, flags);
 830
 831	rq_completed(md, rw, 0);
 832}
 833EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
 834
 835static void __stop_queue(struct request_queue *q)
 836{
 837	blk_stop_queue(q);
 
 
 
 838}
 839
 840static void stop_queue(struct request_queue *q)
 841{
 842	unsigned long flags;
 843
 844	spin_lock_irqsave(q->queue_lock, flags);
 845	__stop_queue(q);
 846	spin_unlock_irqrestore(q->queue_lock, flags);
 847}
 848
 849static void __start_queue(struct request_queue *q)
 850{
 851	if (blk_queue_stopped(q))
 852		blk_start_queue(q);
 853}
 854
 855static void start_queue(struct request_queue *q)
 856{
 857	unsigned long flags;
 
 
 
 
 
 858
 859	spin_lock_irqsave(q->queue_lock, flags);
 860	__start_queue(q);
 861	spin_unlock_irqrestore(q->queue_lock, flags);
 862}
 
 
 
 
 863
 864static void dm_done(struct request *clone, int error, bool mapped)
 865{
 866	int r = error;
 867	struct dm_rq_target_io *tio = clone->end_io_data;
 868	dm_request_endio_fn rq_end_io = NULL;
 869
 870	if (tio->ti) {
 871		rq_end_io = tio->ti->type->rq_end_io;
 872
 873		if (mapped && rq_end_io)
 874			r = rq_end_io(tio->ti, clone, error, &tio->info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 875	}
 876
 877	if (r <= 0)
 878		/* The target wants to complete the I/O */
 879		dm_end_request(clone, r);
 880	else if (r == DM_ENDIO_INCOMPLETE)
 881		/* The target will handle the I/O */
 882		return;
 883	else if (r == DM_ENDIO_REQUEUE)
 884		/* The target wants to requeue the I/O */
 885		dm_requeue_unmapped_request(clone);
 886	else {
 887		DMWARN("unimplemented target endio return value: %d", r);
 888		BUG();
 889	}
 890}
 891
 892/*
 893 * Request completion handler for request-based dm
 
 894 */
 895static void dm_softirq_done(struct request *rq)
 
 896{
 897	bool mapped = true;
 898	struct request *clone = rq->completion_data;
 899	struct dm_rq_target_io *tio = clone->end_io_data;
 900
 901	if (rq->cmd_flags & REQ_FAILED)
 902		mapped = false;
 
 
 
 
 903
 904	dm_done(clone, tio->error, mapped);
 
 
 
 
 
 
 
 
 
 
 905}
 906
 907/*
 908 * Complete the clone and the original request with the error status
 909 * through softirq context.
 910 */
 911static void dm_complete_request(struct request *clone, int error)
 912{
 913	struct dm_rq_target_io *tio = clone->end_io_data;
 914	struct request *rq = tio->orig;
 915
 916	tio->error = error;
 917	rq->completion_data = clone;
 918	blk_complete_request(rq);
 919}
 920
 921/*
 922 * Complete the not-mapped clone and the original request with the error status
 923 * through softirq context.
 924 * Target's rq_end_io() function isn't called.
 925 * This may be used when the target's map_rq() function fails.
 926 */
 927void dm_kill_unmapped_request(struct request *clone, int error)
 928{
 929	struct dm_rq_target_io *tio = clone->end_io_data;
 930	struct request *rq = tio->orig;
 
 
 
 
 931
 932	rq->cmd_flags |= REQ_FAILED;
 933	dm_complete_request(clone, error);
 
 934}
 935EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
 936
 937/*
 938 * Called with the queue lock held
 939 */
 940static void end_clone_request(struct request *clone, int error)
 941{
 942	/*
 943	 * For just cleaning up the information of the queue in which
 944	 * the clone was dispatched.
 945	 * The clone is *NOT* freed actually here because it is alloced from
 946	 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
 947	 */
 948	__blk_put_request(clone->q, clone);
 
 
 
 949
 950	/*
 951	 * Actual request completion is done in a softirq context which doesn't
 952	 * hold the queue lock.  Otherwise, deadlock could occur because:
 953	 *     - another request may be submitted by the upper level driver
 954	 *       of the stacking during the completion
 955	 *     - the submission which requires queue lock may be done
 956	 *       against this queue
 957	 */
 958	dm_complete_request(clone, error);
 959}
 960
 961/*
 962 * Return maximum size of I/O possible at the supplied sector up to the current
 963 * target boundary.
 964 */
 965static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
 966{
 967	sector_t target_offset = dm_target_offset(ti, sector);
 
 
 
 
 968
 969	return ti->len - target_offset;
 970}
 971
 972static sector_t max_io_len(sector_t sector, struct dm_target *ti)
 973{
 974	sector_t len = max_io_len_target_boundary(sector, ti);
 
 
 
 
 
 
 975
 976	/*
 977	 * Does the target need to split even further ?
 978	 */
 979	if (ti->split_io) {
 980		sector_t boundary;
 981		sector_t offset = dm_target_offset(ti, sector);
 982		boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
 983			   - offset;
 984		if (len > boundary)
 985			len = boundary;
 986	}
 987
 988	return len;
 989}
 990
 991static void __map_bio(struct dm_target *ti, struct bio *clone,
 992		      struct dm_target_io *tio)
 993{
 994	int r;
 995	sector_t sector;
 996	struct mapped_device *md;
 
 
 997
 998	clone->bi_end_io = clone_endio;
 999	clone->bi_private = tio;
1000
1001	/*
1002	 * Map the clone.  If r == 0 we don't need to do
1003	 * anything, the target has assumed ownership of
1004	 * this io.
1005	 */
1006	atomic_inc(&tio->io->io_count);
1007	sector = clone->bi_sector;
1008	r = ti->type->map(ti, clone, &tio->info);
1009	if (r == DM_MAPIO_REMAPPED) {
1010		/* the bio has been remapped so dispatch it */
1011
1012		trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1013				      tio->io->bio->bi_bdev->bd_dev, sector);
1014
1015		generic_make_request(clone);
1016	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1017		/* error the io and bail out, or requeue it if needed */
1018		md = tio->io->md;
1019		dec_pending(tio->io, r);
1020		/*
1021		 * Store bio_set for cleanup.
 
1022		 */
1023		clone->bi_end_io = NULL;
1024		clone->bi_private = md->bs;
1025		bio_put(clone);
1026		free_tio(md, tio);
1027	} else if (r) {
1028		DMWARN("unimplemented target map return value: %d", r);
1029		BUG();
1030	}
1031}
1032
1033struct clone_info {
1034	struct mapped_device *md;
1035	struct dm_table *map;
1036	struct bio *bio;
1037	struct dm_io *io;
1038	sector_t sector;
1039	sector_t sector_count;
1040	unsigned short idx;
1041};
1042
1043static void dm_bio_destructor(struct bio *bio)
1044{
1045	struct bio_set *bs = bio->bi_private;
1046
1047	bio_free(bio, bs);
1048}
1049
1050/*
1051 * Creates a little bio that just does part of a bvec.
1052 */
1053static struct bio *split_bvec(struct bio *bio, sector_t sector,
1054			      unsigned short idx, unsigned int offset,
1055			      unsigned int len, struct bio_set *bs)
1056{
1057	struct bio *clone;
1058	struct bio_vec *bv = bio->bi_io_vec + idx;
 
 
 
1059
1060	clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
1061	clone->bi_destructor = dm_bio_destructor;
1062	*clone->bi_io_vec = *bv;
1063
1064	clone->bi_sector = sector;
1065	clone->bi_bdev = bio->bi_bdev;
1066	clone->bi_rw = bio->bi_rw;
1067	clone->bi_vcnt = 1;
1068	clone->bi_size = to_bytes(len);
1069	clone->bi_io_vec->bv_offset = offset;
1070	clone->bi_io_vec->bv_len = clone->bi_size;
1071	clone->bi_flags |= 1 << BIO_CLONED;
1072
1073	if (bio_integrity(bio)) {
1074		bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1075		bio_integrity_trim(clone,
1076				   bio_sector_offset(bio, idx, offset), len);
1077	}
1078
1079	return clone;
 
 
 
1080}
1081
1082/*
1083 * Creates a bio that consists of range of complete bvecs.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1084 */
1085static struct bio *clone_bio(struct bio *bio, sector_t sector,
1086			     unsigned short idx, unsigned short bv_count,
1087			     unsigned int len, struct bio_set *bs)
1088{
1089	struct bio *clone;
 
 
1090
1091	clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1092	__bio_clone(clone, bio);
1093	clone->bi_destructor = dm_bio_destructor;
1094	clone->bi_sector = sector;
1095	clone->bi_idx = idx;
1096	clone->bi_vcnt = idx + bv_count;
1097	clone->bi_size = to_bytes(len);
1098	clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1099
1100	if (bio_integrity(bio)) {
1101		bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1102
1103		if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1104			bio_integrity_trim(clone,
1105					   bio_sector_offset(bio, idx, 0), len);
1106	}
1107
1108	return clone;
1109}
1110
1111static struct dm_target_io *alloc_tio(struct clone_info *ci,
1112				      struct dm_target *ti)
1113{
1114	struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
1115
1116	tio->io = ci->io;
1117	tio->ti = ti;
1118	memset(&tio->info, 0, sizeof(tio->info));
1119
1120	return tio;
1121}
 
1122
1123static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
1124				   unsigned request_nr, sector_t len)
 
 
 
 
 
 
 
 
1125{
1126	struct dm_target_io *tio = alloc_tio(ci, ti);
1127	struct bio *clone;
1128
1129	tio->info.target_request_nr = request_nr;
 
 
1130
1131	/*
1132	 * Discard requests require the bio's inline iovecs be initialized.
1133	 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1134	 * and discard, so no need for concern about wasted bvec allocations.
1135	 */
1136	clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
1137	__bio_clone(clone, ci->bio);
1138	clone->bi_destructor = dm_bio_destructor;
1139	if (len) {
1140		clone->bi_sector = ci->sector;
1141		clone->bi_size = to_bytes(len);
1142	}
1143
1144	__map_bio(ti, clone, tio);
 
 
1145}
 
1146
1147static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
1148				    unsigned num_requests, sector_t len)
1149{
1150	unsigned request_nr;
1151
1152	for (request_nr = 0; request_nr < num_requests; request_nr++)
1153		__issue_target_request(ci, ti, request_nr, len);
 
 
 
 
 
 
 
 
1154}
1155
1156static int __clone_and_map_empty_flush(struct clone_info *ci)
1157{
1158	unsigned target_nr = 0;
1159	struct dm_target *ti;
 
 
 
1160
1161	BUG_ON(bio_has_data(ci->bio));
1162	while ((ti = dm_table_get_target(ci->map, target_nr++)))
1163		__issue_target_requests(ci, ti, ti->num_flush_requests, 0);
1164
1165	return 0;
1166}
1167
1168/*
1169 * Perform all io with a single clone.
1170 */
1171static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
1172{
1173	struct bio *clone, *bio = ci->bio;
1174	struct dm_target_io *tio;
1175
1176	tio = alloc_tio(ci, ti);
1177	clone = clone_bio(bio, ci->sector, ci->idx,
1178			  bio->bi_vcnt - ci->idx, ci->sector_count,
1179			  ci->md->bs);
1180	__map_bio(ti, clone, tio);
1181	ci->sector_count = 0;
1182}
1183
1184static int __clone_and_map_discard(struct clone_info *ci)
1185{
1186	struct dm_target *ti;
1187	sector_t len;
1188
1189	do {
1190		ti = dm_table_find_target(ci->map, ci->sector);
1191		if (!dm_target_is_valid(ti))
1192			return -EIO;
1193
 
1194		/*
1195		 * Even though the device advertised discard support,
1196		 * that does not mean every target supports it, and
1197		 * reconfiguration might also have changed that since the
1198		 * check was performed.
1199		 */
1200		if (!ti->num_discard_requests)
1201			return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
1202
1203		len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1204
1205		__issue_target_requests(ci, ti, ti->num_discard_requests, len);
1206
1207		ci->sector += len;
1208	} while (ci->sector_count -= len);
1209
1210	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1211}
1212
1213static int __clone_and_map(struct clone_info *ci)
1214{
1215	struct bio *clone, *bio = ci->bio;
1216	struct dm_target *ti;
1217	sector_t len = 0, max;
1218	struct dm_target_io *tio;
1219
1220	if (unlikely(bio->bi_rw & REQ_DISCARD))
1221		return __clone_and_map_discard(ci);
1222
1223	ti = dm_table_find_target(ci->map, ci->sector);
1224	if (!dm_target_is_valid(ti))
1225		return -EIO;
1226
1227	max = max_io_len(ci->sector, ti);
1228
1229	if (ci->sector_count <= max) {
1230		/*
1231		 * Optimise for the simple case where we can do all of
1232		 * the remaining io with a single clone.
1233		 */
1234		__clone_and_map_simple(ci, ti);
 
 
 
 
1235
1236	} else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1237		/*
1238		 * There are some bvecs that don't span targets.
1239		 * Do as many of these as possible.
1240		 */
1241		int i;
1242		sector_t remaining = max;
1243		sector_t bv_len;
1244
1245		for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
1246			bv_len = to_sector(bio->bi_io_vec[i].bv_len);
1247
1248			if (bv_len > remaining)
 
 
 
 
 
1249				break;
1250
1251			remaining -= bv_len;
1252			len += bv_len;
1253		}
 
 
 
 
1254
1255		tio = alloc_tio(ci, ti);
1256		clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
1257				  ci->md->bs);
1258		__map_bio(ti, clone, tio);
1259
1260		ci->sector += len;
1261		ci->sector_count -= len;
1262		ci->idx = i;
1263
1264	} else {
1265		/*
1266		 * Handle a bvec that must be split between two or more targets.
1267		 */
1268		struct bio_vec *bv = bio->bi_io_vec + ci->idx;
1269		sector_t remaining = to_sector(bv->bv_len);
1270		unsigned int offset = 0;
1271
1272		do {
1273			if (offset) {
1274				ti = dm_table_find_target(ci->map, ci->sector);
1275				if (!dm_target_is_valid(ti))
1276					return -EIO;
1277
1278				max = max_io_len(ci->sector, ti);
1279			}
1280
1281			len = min(remaining, max);
 
 
1282
1283			tio = alloc_tio(ci, ti);
1284			clone = split_bvec(bio, ci->sector, ci->idx,
1285					   bv->bv_offset + offset, len,
1286					   ci->md->bs);
1287
1288			__map_bio(ti, clone, tio);
1289
1290			ci->sector += len;
1291			ci->sector_count -= len;
1292			offset += to_bytes(len);
1293		} while (remaining -= len);
1294
1295		ci->idx++;
1296	}
1297
1298	return 0;
1299}
1300
1301/*
1302 * Split the bio into several clones and submit it to targets.
1303 */
1304static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1305{
1306	struct clone_info ci;
1307	int error = 0;
 
 
 
 
 
 
 
 
1308
1309	ci.map = dm_get_live_table(md);
1310	if (unlikely(!ci.map)) {
1311		bio_io_error(bio);
1312		return;
1313	}
1314
1315	ci.md = md;
1316	ci.io = alloc_io(md);
1317	ci.io->error = 0;
1318	atomic_set(&ci.io->io_count, 1);
1319	ci.io->bio = bio;
1320	ci.io->md = md;
1321	spin_lock_init(&ci.io->endio_lock);
1322	ci.sector = bio->bi_sector;
1323	ci.idx = bio->bi_idx;
1324
1325	start_io_acct(ci.io);
1326	if (bio->bi_rw & REQ_FLUSH) {
1327		ci.bio = &ci.md->flush_bio;
1328		ci.sector_count = 0;
1329		error = __clone_and_map_empty_flush(&ci);
1330		/* dec_pending submits any data associated with flush */
1331	} else {
1332		ci.bio = bio;
1333		ci.sector_count = bio_sectors(bio);
1334		while (ci.sector_count && !error)
1335			error = __clone_and_map(&ci);
1336	}
1337
1338	/* drop the extra reference count */
1339	dec_pending(ci.io, error);
1340	dm_table_put(ci.map);
1341}
1342/*-----------------------------------------------------------------
1343 * CRUD END
1344 *---------------------------------------------------------------*/
1345
1346static int dm_merge_bvec(struct request_queue *q,
1347			 struct bvec_merge_data *bvm,
1348			 struct bio_vec *biovec)
1349{
1350	struct mapped_device *md = q->queuedata;
1351	struct dm_table *map = dm_get_live_table(md);
1352	struct dm_target *ti;
1353	sector_t max_sectors;
1354	int max_size = 0;
1355
1356	if (unlikely(!map))
1357		goto out;
1358
1359	ti = dm_table_find_target(map, bvm->bi_sector);
1360	if (!dm_target_is_valid(ti))
1361		goto out_table;
 
 
1362
1363	/*
1364	 * Find maximum amount of I/O that won't need splitting
 
1365	 */
1366	max_sectors = min(max_io_len(bvm->bi_sector, ti),
1367			  (sector_t) BIO_MAX_SECTORS);
1368	max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1369	if (max_size < 0)
1370		max_size = 0;
1371
1372	/*
1373	 * merge_bvec_fn() returns number of bytes
1374	 * it can accept at this offset
1375	 * max is precomputed maximal io size
1376	 */
1377	if (max_size && ti->type->merge)
1378		max_size = ti->type->merge(ti, bvm, biovec, max_size);
1379	/*
1380	 * If the target doesn't support merge method and some of the devices
1381	 * provided their merge_bvec method (we know this by looking at
1382	 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1383	 * entries.  So always set max_size to 0, and the code below allows
1384	 * just one page.
1385	 */
1386	else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1387
1388		max_size = 0;
 
 
 
 
1389
1390out_table:
1391	dm_table_put(map);
1392
1393out:
 
1394	/*
1395	 * Always allow an entire first page
 
1396	 */
1397	if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1398		max_size = biovec->bv_len;
1399
1400	return max_size;
 
1401}
1402
1403/*
1404 * The request function that just remaps the bio built up by
1405 * dm_merge_bvec.
1406 */
1407static void _dm_request(struct request_queue *q, struct bio *bio)
1408{
1409	int rw = bio_data_dir(bio);
1410	struct mapped_device *md = q->queuedata;
1411	int cpu;
1412
1413	down_read(&md->io_lock);
1414
1415	cpu = part_stat_lock();
1416	part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1417	part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1418	part_stat_unlock();
1419
1420	/* if we're suspended, we have to queue this io for later */
1421	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1422		up_read(&md->io_lock);
1423
1424		if (bio_rw(bio) != READA)
1425			queue_io(md, bio);
1426		else
1427			bio_io_error(bio);
1428		return;
1429	}
1430
1431	__split_and_process_bio(md, bio);
1432	up_read(&md->io_lock);
1433	return;
1434}
1435
1436static int dm_request_based(struct mapped_device *md)
 
1437{
1438	return blk_queue_stackable(md->queue);
1439}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1440
1441static void dm_request(struct request_queue *q, struct bio *bio)
1442{
1443	struct mapped_device *md = q->queuedata;
1444
1445	if (dm_request_based(md))
1446		blk_queue_bio(q, bio);
1447	else
1448		_dm_request(q, bio);
1449}
1450
1451void dm_dispatch_request(struct request *rq)
 
 
 
 
 
 
 
 
 
1452{
1453	int r;
1454
1455	if (blk_queue_io_stat(rq->q))
1456		rq->cmd_flags |= REQ_IO_STAT;
1457
1458	rq->start_time = jiffies;
1459	r = blk_insert_cloned_request(rq->q, rq);
1460	if (r)
1461		dm_complete_request(rq, r);
1462}
1463EXPORT_SYMBOL_GPL(dm_dispatch_request);
1464
1465static void dm_rq_bio_destructor(struct bio *bio)
1466{
1467	struct dm_rq_clone_bio_info *info = bio->bi_private;
1468	struct mapped_device *md = info->tio->md;
1469
1470	free_bio_info(info);
1471	bio_free(bio, md->bs);
1472}
 
 
 
 
1473
1474static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1475				 void *data)
1476{
1477	struct dm_rq_target_io *tio = data;
1478	struct mapped_device *md = tio->md;
1479	struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
1480
1481	if (!info)
1482		return -ENOMEM;
1483
1484	info->orig = bio_orig;
1485	info->tio = tio;
1486	bio->bi_end_io = end_clone_bio;
1487	bio->bi_private = info;
1488	bio->bi_destructor = dm_rq_bio_destructor;
 
1489
1490	return 0;
1491}
1492
1493static int setup_clone(struct request *clone, struct request *rq,
1494		       struct dm_rq_target_io *tio)
 
 
1495{
1496	int r;
 
 
1497
1498	r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1499			      dm_rq_bio_constructor, tio);
1500	if (r)
1501		return r;
1502
1503	clone->cmd = rq->cmd;
1504	clone->cmd_len = rq->cmd_len;
1505	clone->sense = rq->sense;
1506	clone->buffer = rq->buffer;
1507	clone->end_io = end_clone_request;
1508	clone->end_io_data = tio;
1509
1510	return 0;
1511}
 
 
 
1512
1513static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1514				gfp_t gfp_mask)
1515{
1516	struct request *clone;
1517	struct dm_rq_target_io *tio;
1518
1519	tio = alloc_rq_tio(md, gfp_mask);
1520	if (!tio)
1521		return NULL;
1522
1523	tio->md = md;
1524	tio->ti = NULL;
1525	tio->orig = rq;
1526	tio->error = 0;
1527	memset(&tio->info, 0, sizeof(tio->info));
1528
1529	clone = &tio->clone;
1530	if (setup_clone(clone, rq, tio)) {
1531		/* -ENOMEM */
1532		free_rq_tio(tio);
1533		return NULL;
1534	}
 
1535
1536	return clone;
 
 
 
1537}
1538
1539/*
1540 * Called with the queue lock held.
1541 */
1542static int dm_prep_fn(struct request_queue *q, struct request *rq)
1543{
1544	struct mapped_device *md = q->queuedata;
1545	struct request *clone;
 
 
 
 
 
1546
1547	if (unlikely(rq->special)) {
1548		DMWARN("Already has something in rq->special.");
1549		return BLKPREP_KILL;
1550	}
1551
1552	clone = clone_rq(rq, md, GFP_ATOMIC);
1553	if (!clone)
1554		return BLKPREP_DEFER;
1555
1556	rq->special = clone;
1557	rq->cmd_flags |= REQ_DONTPREP;
1558
1559	return BLKPREP_OK;
1560}
1561
1562/*
1563 * Returns:
1564 * 0  : the request has been processed (not requeued)
1565 * !0 : the request has been requeued
1566 */
1567static int map_request(struct dm_target *ti, struct request *clone,
1568		       struct mapped_device *md)
1569{
1570	int r, requeued = 0;
1571	struct dm_rq_target_io *tio = clone->end_io_data;
 
 
 
 
 
 
 
 
 
 
 
 
 
1572
1573	tio->ti = ti;
1574	r = ti->type->map_rq(ti, clone, &tio->info);
1575	switch (r) {
1576	case DM_MAPIO_SUBMITTED:
1577		/* The target has taken the I/O to submit by itself later */
1578		break;
1579	case DM_MAPIO_REMAPPED:
1580		/* The target has remapped the I/O so dispatch it */
1581		trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1582				     blk_rq_pos(tio->orig));
1583		dm_dispatch_request(clone);
1584		break;
1585	case DM_MAPIO_REQUEUE:
1586		/* The target wants to requeue the I/O */
1587		dm_requeue_unmapped_request(clone);
1588		requeued = 1;
1589		break;
1590	default:
1591		if (r > 0) {
1592			DMWARN("unimplemented target map return value: %d", r);
1593			BUG();
1594		}
 
 
 
 
1595
1596		/* The target wants to complete the I/O */
1597		dm_kill_unmapped_request(clone, r);
1598		break;
 
1599	}
1600
1601	return requeued;
1602}
1603
1604static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
1605{
1606	struct request *clone;
1607
1608	blk_start_request(orig);
1609	clone = orig->special;
1610	atomic_inc(&md->pending[rq_data_dir(clone)]);
1611
 
1612	/*
1613	 * Hold the md reference here for the in-flight I/O.
1614	 * We can't rely on the reference count by device opener,
1615	 * because the device may be closed during the request completion
1616	 * when all bios are completed.
1617	 * See the comment in rq_completed() too.
1618	 */
1619	dm_get(md);
1620
1621	return clone;
 
 
 
 
 
 
 
1622}
1623
1624/*
1625 * q->request_fn for request-based dm.
1626 * Called with the queue lock held.
1627 */
1628static void dm_request_fn(struct request_queue *q)
1629{
1630	struct mapped_device *md = q->queuedata;
1631	struct dm_table *map = dm_get_live_table(md);
1632	struct dm_target *ti;
1633	struct request *rq, *clone;
1634	sector_t pos;
1635
1636	/*
1637	 * For suspend, check blk_queue_stopped() and increment
1638	 * ->pending within a single queue_lock not to increment the
1639	 * number of in-flight I/Os after the queue is stopped in
1640	 * dm_suspend().
1641	 */
1642	while (!blk_queue_stopped(q)) {
1643		rq = blk_peek_request(q);
1644		if (!rq)
1645			goto delay_and_out;
1646
1647		/* always use block 0 to find the target for flushes for now */
1648		pos = 0;
1649		if (!(rq->cmd_flags & REQ_FLUSH))
1650			pos = blk_rq_pos(rq);
1651
1652		ti = dm_table_find_target(map, pos);
1653		if (!dm_target_is_valid(ti)) {
1654			/*
1655			 * Must perform setup, that dm_done() requires,
1656			 * before calling dm_kill_unmapped_request
1657			 */
1658			DMERR_LIMIT("request attempted access beyond the end of device");
1659			clone = dm_start_request(md, rq);
1660			dm_kill_unmapped_request(clone, -EIO);
1661			continue;
1662		}
1663
1664		if (ti->type->busy && ti->type->busy(ti))
1665			goto delay_and_out;
1666
1667		clone = dm_start_request(md, rq);
1668
1669		spin_unlock(q->queue_lock);
1670		if (map_request(ti, clone, md))
1671			goto requeued;
1672
1673		BUG_ON(!irqs_disabled());
1674		spin_lock(q->queue_lock);
1675	}
1676
1677	goto out;
1678
1679requeued:
1680	BUG_ON(!irqs_disabled());
1681	spin_lock(q->queue_lock);
1682
1683delay_and_out:
1684	blk_delay_queue(q, HZ / 10);
1685out:
1686	dm_table_put(map);
1687}
1688
1689int dm_underlying_device_busy(struct request_queue *q)
 
1690{
1691	return blk_lld_busy(q);
 
 
 
 
 
 
 
1692}
1693EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1694
1695static int dm_lld_busy(struct request_queue *q)
 
1696{
1697	int r;
1698	struct mapped_device *md = q->queuedata;
1699	struct dm_table *map = dm_get_live_table(md);
 
1700
1701	if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1702		r = 1;
1703	else
1704		r = dm_table_any_busy_target(map);
1705
1706	dm_table_put(map);
1707
1708	return r;
1709}
1710
1711static int dm_any_congested(void *congested_data, int bdi_bits)
1712{
1713	int r = bdi_bits;
1714	struct mapped_device *md = congested_data;
1715	struct dm_table *map;
1716
1717	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1718		map = dm_get_live_table(md);
1719		if (map) {
 
 
1720			/*
1721			 * Request-based dm cares about only own queue for
1722			 * the query about congestion status of request_queue
1723			 */
1724			if (dm_request_based(md))
1725				r = md->queue->backing_dev_info.state &
1726				    bdi_bits;
1727			else
1728				r = dm_table_any_congested(map, bdi_bits);
1729
1730			dm_table_put(map);
1731		}
1732	}
1733
1734	return r;
 
 
 
 
 
 
 
1735}
1736
1737/*-----------------------------------------------------------------
 
1738 * An IDR is used to keep track of allocated minor numbers.
1739 *---------------------------------------------------------------*/
 
1740static void free_minor(int minor)
1741{
1742	spin_lock(&_minor_lock);
1743	idr_remove(&_minor_idr, minor);
1744	spin_unlock(&_minor_lock);
1745}
1746
1747/*
1748 * See if the device with a specific minor # is free.
1749 */
1750static int specific_minor(int minor)
1751{
1752	int r, m;
1753
1754	if (minor >= (1 << MINORBITS))
1755		return -EINVAL;
1756
1757	r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1758	if (!r)
1759		return -ENOMEM;
1760
1761	spin_lock(&_minor_lock);
1762
1763	if (idr_find(&_minor_idr, minor)) {
1764		r = -EBUSY;
1765		goto out;
1766	}
1767
1768	r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
1769	if (r)
1770		goto out;
1771
1772	if (m != minor) {
1773		idr_remove(&_minor_idr, m);
1774		r = -EBUSY;
1775		goto out;
1776	}
1777
1778out:
1779	spin_unlock(&_minor_lock);
1780	return r;
 
 
 
1781}
1782
1783static int next_free_minor(int *minor)
1784{
1785	int r, m;
1786
1787	r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1788	if (!r)
1789		return -ENOMEM;
1790
 
1791	spin_lock(&_minor_lock);
1792
1793	r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
1794	if (r)
1795		goto out;
1796
1797	if (m >= (1 << MINORBITS)) {
1798		idr_remove(&_minor_idr, m);
1799		r = -ENOSPC;
1800		goto out;
1801	}
1802
1803	*minor = m;
1804
1805out:
1806	spin_unlock(&_minor_lock);
1807	return r;
 
 
 
 
1808}
1809
1810static const struct block_device_operations dm_blk_dops;
 
 
1811
1812static void dm_wq_work(struct work_struct *work);
1813
1814static void dm_init_md_queue(struct mapped_device *md)
 
 
 
 
 
 
 
 
 
 
 
 
 
1815{
1816	/*
1817	 * Request-based dm devices cannot be stacked on top of bio-based dm
1818	 * devices.  The type of this dm device has not been decided yet.
1819	 * The type is decided at the first table loading time.
1820	 * To prevent problematic device stacking, clear the queue flag
1821	 * for request stacking support until then.
1822	 *
1823	 * This queue is new, so no concurrency on the queue_flags.
1824	 */
1825	queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1826
1827	md->queue->queuedata = md;
1828	md->queue->backing_dev_info.congested_fn = dm_any_congested;
1829	md->queue->backing_dev_info.congested_data = md;
1830	blk_queue_make_request(md->queue, dm_request);
1831	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1832	blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1833}
1834
1835/*
1836 * Allocate and initialise a blank device with a given minor.
1837 */
1838static struct mapped_device *alloc_dev(int minor)
1839{
1840	int r;
1841	struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1842	void *old_md;
1843
 
1844	if (!md) {
1845		DMWARN("unable to allocate device, out of memory.");
1846		return NULL;
1847	}
1848
1849	if (!try_module_get(THIS_MODULE))
1850		goto bad_module_get;
1851
1852	/* get a minor number for the dev */
1853	if (minor == DM_ANY_MINOR)
1854		r = next_free_minor(&minor);
1855	else
1856		r = specific_minor(minor);
1857	if (r < 0)
1858		goto bad_minor;
1859
 
 
 
 
 
 
1860	md->type = DM_TYPE_NONE;
1861	init_rwsem(&md->io_lock);
1862	mutex_init(&md->suspend_lock);
1863	mutex_init(&md->type_lock);
 
1864	spin_lock_init(&md->deferred_lock);
1865	rwlock_init(&md->map_lock);
1866	atomic_set(&md->holders, 1);
1867	atomic_set(&md->open_count, 0);
1868	atomic_set(&md->event_nr, 0);
1869	atomic_set(&md->uevent_seq, 0);
1870	INIT_LIST_HEAD(&md->uevent_list);
 
1871	spin_lock_init(&md->uevent_lock);
1872
1873	md->queue = blk_alloc_queue(GFP_KERNEL);
1874	if (!md->queue)
1875		goto bad_queue;
1876
1877	dm_init_md_queue(md);
1878
1879	md->disk = alloc_disk(1);
1880	if (!md->disk)
1881		goto bad_disk;
 
1882
1883	atomic_set(&md->pending[0], 0);
1884	atomic_set(&md->pending[1], 0);
1885	init_waitqueue_head(&md->wait);
1886	INIT_WORK(&md->work, dm_wq_work);
 
1887	init_waitqueue_head(&md->eventq);
 
 
 
 
 
 
1888
1889	md->disk->major = _major;
1890	md->disk->first_minor = minor;
 
 
1891	md->disk->fops = &dm_blk_dops;
1892	md->disk->queue = md->queue;
1893	md->disk->private_data = md;
1894	sprintf(md->disk->disk_name, "dm-%d", minor);
1895	add_disk(md->disk);
 
 
 
 
 
 
 
 
 
 
 
 
1896	format_dev_t(md->name, MKDEV(_major, minor));
1897
1898	md->wq = alloc_workqueue("kdmflush",
1899				 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
1900	if (!md->wq)
1901		goto bad_thread;
1902
1903	md->bdev = bdget_disk(md->disk, 0);
1904	if (!md->bdev)
1905		goto bad_bdev;
1906
1907	bio_init(&md->flush_bio);
1908	md->flush_bio.bi_bdev = md->bdev;
1909	md->flush_bio.bi_rw = WRITE_FLUSH;
1910
1911	/* Populate the mapping, nobody knows we exist yet */
1912	spin_lock(&_minor_lock);
1913	old_md = idr_replace(&_minor_idr, md, minor);
1914	spin_unlock(&_minor_lock);
1915
1916	BUG_ON(old_md != MINOR_ALLOCED);
1917
1918	return md;
1919
1920bad_bdev:
1921	destroy_workqueue(md->wq);
1922bad_thread:
1923	del_gendisk(md->disk);
1924	put_disk(md->disk);
1925bad_disk:
1926	blk_cleanup_queue(md->queue);
1927bad_queue:
1928	free_minor(minor);
1929bad_minor:
1930	module_put(THIS_MODULE);
1931bad_module_get:
1932	kfree(md);
1933	return NULL;
1934}
1935
1936static void unlock_fs(struct mapped_device *md);
1937
1938static void free_dev(struct mapped_device *md)
1939{
1940	int minor = MINOR(disk_devt(md->disk));
1941
1942	unlock_fs(md);
1943	bdput(md->bdev);
1944	destroy_workqueue(md->wq);
1945	if (md->tio_pool)
1946		mempool_destroy(md->tio_pool);
1947	if (md->io_pool)
1948		mempool_destroy(md->io_pool);
1949	if (md->bs)
1950		bioset_free(md->bs);
1951	blk_integrity_unregister(md->disk);
1952	del_gendisk(md->disk);
1953	free_minor(minor);
1954
1955	spin_lock(&_minor_lock);
1956	md->disk->private_data = NULL;
1957	spin_unlock(&_minor_lock);
1958
1959	put_disk(md->disk);
1960	blk_cleanup_queue(md->queue);
1961	module_put(THIS_MODULE);
1962	kfree(md);
1963}
1964
1965static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1966{
1967	struct dm_md_mempools *p;
1968
1969	if (md->io_pool && md->tio_pool && md->bs)
1970		/* the md already has necessary mempools */
1971		goto out;
1972
1973	p = dm_table_get_md_mempools(t);
1974	BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
1975
1976	md->io_pool = p->io_pool;
1977	p->io_pool = NULL;
1978	md->tio_pool = p->tio_pool;
1979	p->tio_pool = NULL;
1980	md->bs = p->bs;
1981	p->bs = NULL;
1982
1983out:
1984	/* mempool bind completed, now no need any mempools in the table */
1985	dm_table_free_md_mempools(t);
1986}
1987
1988/*
1989 * Bind a table to the device.
1990 */
1991static void event_callback(void *context)
1992{
1993	unsigned long flags;
1994	LIST_HEAD(uevents);
1995	struct mapped_device *md = (struct mapped_device *) context;
1996
1997	spin_lock_irqsave(&md->uevent_lock, flags);
1998	list_splice_init(&md->uevent_list, &uevents);
1999	spin_unlock_irqrestore(&md->uevent_lock, flags);
2000
2001	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2002
2003	atomic_inc(&md->event_nr);
2004	wake_up(&md->eventq);
2005}
2006
2007/*
2008 * Protected by md->suspend_lock obtained by dm_swap_table().
2009 */
2010static void __set_size(struct mapped_device *md, sector_t size)
2011{
2012	set_capacity(md->disk, size);
2013
2014	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2015}
2016
2017/*
2018 * Return 1 if the queue has a compulsory merge_bvec_fn function.
2019 *
2020 * If this function returns 0, then the device is either a non-dm
2021 * device without a merge_bvec_fn, or it is a dm device that is
2022 * able to split any bios it receives that are too big.
2023 */
2024int dm_queue_merge_is_compulsory(struct request_queue *q)
2025{
2026	struct mapped_device *dev_md;
2027
2028	if (!q->merge_bvec_fn)
2029		return 0;
2030
2031	if (q->make_request_fn == dm_request) {
2032		dev_md = q->queuedata;
2033		if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
2034			return 0;
2035	}
2036
2037	return 1;
2038}
2039
2040static int dm_device_merge_is_compulsory(struct dm_target *ti,
2041					 struct dm_dev *dev, sector_t start,
2042					 sector_t len, void *data)
2043{
2044	struct block_device *bdev = dev->bdev;
2045	struct request_queue *q = bdev_get_queue(bdev);
2046
2047	return dm_queue_merge_is_compulsory(q);
2048}
2049
2050/*
2051 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2052 * on the properties of the underlying devices.
2053 */
2054static int dm_table_merge_is_optional(struct dm_table *table)
2055{
2056	unsigned i = 0;
2057	struct dm_target *ti;
2058
2059	while (i < dm_table_get_num_targets(table)) {
2060		ti = dm_table_get_target(table, i++);
2061
2062		if (ti->type->iterate_devices &&
2063		    ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
2064			return 0;
2065	}
2066
2067	return 1;
2068}
2069
2070/*
2071 * Returns old map, which caller must destroy.
2072 */
2073static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2074			       struct queue_limits *limits)
2075{
2076	struct dm_table *old_map;
2077	struct request_queue *q = md->queue;
2078	sector_t size;
2079	unsigned long flags;
2080	int merge_is_optional;
 
2081
2082	size = dm_table_get_size(t);
2083
2084	/*
2085	 * Wipe any geometry if the size of the table changed.
2086	 */
2087	if (size != get_capacity(md->disk))
2088		memset(&md->geometry, 0, sizeof(md->geometry));
2089
2090	__set_size(md, size);
2091
2092	dm_table_event_callback(t, event_callback, md);
2093
2094	/*
2095	 * The queue hasn't been stopped yet, if the old table type wasn't
2096	 * for request-based during suspension.  So stop it to prevent
2097	 * I/O mapping before resume.
2098	 * This must be done before setting the queue restrictions,
2099	 * because request-based dm may be run just after the setting.
2100	 */
2101	if (dm_table_request_based(t) && !blk_queue_stopped(q))
2102		stop_queue(q);
2103
2104	__bind_mempools(md, t);
2105
2106	merge_is_optional = dm_table_merge_is_optional(t);
2107
2108	write_lock_irqsave(&md->map_lock, flags);
2109	old_map = md->map;
2110	md->map = t;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2111	md->immutable_target_type = dm_table_get_immutable_target_type(t);
2112
2113	dm_table_set_restrictions(t, q, limits);
2114	if (merge_is_optional)
2115		set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2116	else
2117		clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2118	write_unlock_irqrestore(&md->map_lock, flags);
2119
2120	return old_map;
2121}
2122
2123/*
2124 * Returns unbound table for the caller to free.
2125 */
2126static struct dm_table *__unbind(struct mapped_device *md)
2127{
2128	struct dm_table *map = md->map;
2129	unsigned long flags;
2130
2131	if (!map)
2132		return NULL;
2133
2134	dm_table_event_callback(map, NULL, NULL);
2135	write_lock_irqsave(&md->map_lock, flags);
2136	md->map = NULL;
2137	write_unlock_irqrestore(&md->map_lock, flags);
2138
2139	return map;
2140}
2141
2142/*
2143 * Constructor for a new device.
2144 */
2145int dm_create(int minor, struct mapped_device **result)
2146{
2147	struct mapped_device *md;
2148
2149	md = alloc_dev(minor);
2150	if (!md)
2151		return -ENXIO;
2152
2153	dm_sysfs_init(md);
2154
2155	*result = md;
2156	return 0;
2157}
2158
2159/*
2160 * Functions to manage md->type.
2161 * All are required to hold md->type_lock.
2162 */
2163void dm_lock_md_type(struct mapped_device *md)
2164{
2165	mutex_lock(&md->type_lock);
2166}
2167
2168void dm_unlock_md_type(struct mapped_device *md)
2169{
2170	mutex_unlock(&md->type_lock);
2171}
2172
2173void dm_set_md_type(struct mapped_device *md, unsigned type)
2174{
 
2175	md->type = type;
2176}
2177
2178unsigned dm_get_md_type(struct mapped_device *md)
2179{
2180	return md->type;
2181}
2182
2183struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2184{
2185	return md->immutable_target_type;
2186}
2187
2188/*
2189 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2190 */
2191static int dm_init_request_based_queue(struct mapped_device *md)
2192{
2193	struct request_queue *q = NULL;
 
 
 
2194
2195	if (md->queue->elevator)
2196		return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2197
2198	/* Fully initialize the queue */
2199	q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2200	if (!q)
2201		return 0;
 
 
 
 
2202
2203	md->queue = q;
2204	dm_init_md_queue(md);
2205	blk_queue_softirq_done(md->queue, dm_softirq_done);
2206	blk_queue_prep_rq(md->queue, dm_prep_fn);
2207	blk_queue_lld_busy(md->queue, dm_lld_busy);
 
 
 
 
2208
2209	elv_register_queue(md->queue);
 
 
 
 
 
 
 
 
2210
2211	return 1;
2212}
 
2213
2214/*
2215 * Setup the DM device's queue based on md's type
2216 */
2217int dm_setup_md_queue(struct mapped_device *md)
2218{
2219	if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2220	    !dm_init_request_based_queue(md)) {
2221		DMWARN("Cannot initialize queue for request-based mapped device");
2222		return -EINVAL;
2223	}
2224
2225	return 0;
 
 
 
 
 
 
2226}
2227
2228static struct mapped_device *dm_find_md(dev_t dev)
2229{
2230	struct mapped_device *md;
2231	unsigned minor = MINOR(dev);
2232
2233	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2234		return NULL;
2235
2236	spin_lock(&_minor_lock);
2237
2238	md = idr_find(&_minor_idr, minor);
2239	if (md && (md == MINOR_ALLOCED ||
2240		   (MINOR(disk_devt(dm_disk(md))) != minor) ||
2241		   dm_deleting_md(md) ||
2242		   test_bit(DMF_FREEING, &md->flags))) {
2243		md = NULL;
2244		goto out;
2245	}
2246
2247out:
2248	spin_unlock(&_minor_lock);
2249
2250	return md;
2251}
2252
2253struct mapped_device *dm_get_md(dev_t dev)
2254{
2255	struct mapped_device *md = dm_find_md(dev);
2256
2257	if (md)
2258		dm_get(md);
2259
2260	return md;
2261}
2262EXPORT_SYMBOL_GPL(dm_get_md);
2263
2264void *dm_get_mdptr(struct mapped_device *md)
2265{
2266	return md->interface_ptr;
2267}
2268
2269void dm_set_mdptr(struct mapped_device *md, void *ptr)
2270{
2271	md->interface_ptr = ptr;
2272}
2273
2274void dm_get(struct mapped_device *md)
2275{
2276	atomic_inc(&md->holders);
2277	BUG_ON(test_bit(DMF_FREEING, &md->flags));
2278}
2279
 
 
 
 
 
 
 
 
 
 
 
 
 
2280const char *dm_device_name(struct mapped_device *md)
2281{
2282	return md->name;
2283}
2284EXPORT_SYMBOL_GPL(dm_device_name);
2285
2286static void __dm_destroy(struct mapped_device *md, bool wait)
2287{
2288	struct dm_table *map;
 
2289
2290	might_sleep();
2291
2292	spin_lock(&_minor_lock);
2293	map = dm_get_live_table(md);
2294	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2295	set_bit(DMF_FREEING, &md->flags);
2296	spin_unlock(&_minor_lock);
2297
 
 
 
 
 
 
 
 
2298	if (!dm_suspended_md(md)) {
2299		dm_table_presuspend_targets(map);
 
 
2300		dm_table_postsuspend_targets(map);
2301	}
 
 
 
2302
2303	/*
2304	 * Rare, but there may be I/O requests still going to complete,
2305	 * for example.  Wait for all references to disappear.
2306	 * No one should increment the reference count of the mapped_device,
2307	 * after the mapped_device state becomes DMF_FREEING.
2308	 */
2309	if (wait)
2310		while (atomic_read(&md->holders))
2311			msleep(1);
2312	else if (atomic_read(&md->holders))
2313		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2314		       dm_device_name(md), atomic_read(&md->holders));
2315
2316	dm_sysfs_exit(md);
2317	dm_table_put(map);
2318	dm_table_destroy(__unbind(md));
2319	free_dev(md);
2320}
2321
2322void dm_destroy(struct mapped_device *md)
2323{
2324	__dm_destroy(md, true);
2325}
2326
2327void dm_destroy_immediate(struct mapped_device *md)
2328{
2329	__dm_destroy(md, false);
2330}
2331
2332void dm_put(struct mapped_device *md)
2333{
2334	atomic_dec(&md->holders);
2335}
2336EXPORT_SYMBOL_GPL(dm_put);
2337
2338static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
 
 
 
 
 
 
 
 
 
 
 
2339{
2340	int r = 0;
2341	DECLARE_WAITQUEUE(wait, current);
2342
2343	add_wait_queue(&md->wait, &wait);
2344
2345	while (1) {
2346		set_current_state(interruptible);
2347
2348		if (!md_in_flight(md))
2349			break;
2350
2351		if (interruptible == TASK_INTERRUPTIBLE &&
2352		    signal_pending(current)) {
2353			r = -EINTR;
2354			break;
2355		}
2356
2357		io_schedule();
2358	}
2359	set_current_state(TASK_RUNNING);
 
 
 
 
 
 
 
 
 
2360
2361	remove_wait_queue(&md->wait, &wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
2362
2363	return r;
2364}
2365
2366/*
2367 * Process the deferred bios
2368 */
2369static void dm_wq_work(struct work_struct *work)
2370{
2371	struct mapped_device *md = container_of(work, struct mapped_device,
2372						work);
2373	struct bio *c;
2374
2375	down_read(&md->io_lock);
2376
2377	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2378		spin_lock_irq(&md->deferred_lock);
2379		c = bio_list_pop(&md->deferred);
2380		spin_unlock_irq(&md->deferred_lock);
2381
2382		if (!c)
2383			break;
2384
2385		up_read(&md->io_lock);
2386
2387		if (dm_request_based(md))
2388			generic_make_request(c);
2389		else
2390			__split_and_process_bio(md, c);
2391
2392		down_read(&md->io_lock);
2393	}
2394
2395	up_read(&md->io_lock);
2396}
2397
2398static void dm_queue_flush(struct mapped_device *md)
2399{
2400	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2401	smp_mb__after_clear_bit();
2402	queue_work(md->wq, &md->work);
2403}
2404
2405/*
2406 * Swap in a new table, returning the old one for the caller to destroy.
2407 */
2408struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2409{
2410	struct dm_table *map = ERR_PTR(-EINVAL);
2411	struct queue_limits limits;
2412	int r;
2413
2414	mutex_lock(&md->suspend_lock);
2415
2416	/* device must be suspended */
2417	if (!dm_suspended_md(md))
2418		goto out;
2419
2420	r = dm_calculate_queue_limits(table, &limits);
2421	if (r) {
2422		map = ERR_PTR(r);
2423		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2424	}
2425
2426	map = __bind(md, table, &limits);
 
2427
2428out:
2429	mutex_unlock(&md->suspend_lock);
2430	return map;
2431}
2432
2433/*
2434 * Functions to lock and unlock any filesystem running on the
2435 * device.
2436 */
2437static int lock_fs(struct mapped_device *md)
2438{
2439	int r;
2440
2441	WARN_ON(md->frozen_sb);
2442
2443	md->frozen_sb = freeze_bdev(md->bdev);
2444	if (IS_ERR(md->frozen_sb)) {
2445		r = PTR_ERR(md->frozen_sb);
2446		md->frozen_sb = NULL;
2447		return r;
2448	}
2449
2450	set_bit(DMF_FROZEN, &md->flags);
2451
2452	return 0;
2453}
2454
2455static void unlock_fs(struct mapped_device *md)
2456{
2457	if (!test_bit(DMF_FROZEN, &md->flags))
2458		return;
2459
2460	thaw_bdev(md->bdev, md->frozen_sb);
2461	md->frozen_sb = NULL;
2462	clear_bit(DMF_FROZEN, &md->flags);
2463}
2464
2465/*
2466 * We need to be able to change a mapping table under a mounted
2467 * filesystem.  For example we might want to move some data in
2468 * the background.  Before the table can be swapped with
2469 * dm_bind_table, dm_suspend must be called to flush any in
2470 * flight bios and ensure that any further io gets deferred.
2471 */
2472/*
2473 * Suspend mechanism in request-based dm.
2474 *
2475 * 1. Flush all I/Os by lock_fs() if needed.
2476 * 2. Stop dispatching any I/O by stopping the request_queue.
2477 * 3. Wait for all in-flight I/Os to be completed or requeued.
2478 *
2479 * To abort suspend, start the request_queue.
2480 */
2481int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
 
 
 
 
2482{
2483	struct dm_table *map = NULL;
2484	int r = 0;
2485	int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2486	int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
2487
2488	mutex_lock(&md->suspend_lock);
2489
2490	if (dm_suspended_md(md)) {
2491		r = -EINVAL;
2492		goto out_unlock;
2493	}
2494
2495	map = dm_get_live_table(md);
2496
2497	/*
2498	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2499	 * This flag is cleared before dm_suspend returns.
2500	 */
2501	if (noflush)
2502		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
 
 
2503
2504	/* This does not get reverted if there's an error later. */
 
 
 
2505	dm_table_presuspend_targets(map);
2506
2507	/*
2508	 * Flush I/O to the device.
2509	 * Any I/O submitted after lock_fs() may not be flushed.
2510	 * noflush takes precedence over do_lockfs.
2511	 * (lock_fs() flushes I/Os and waits for them to complete.)
2512	 */
2513	if (!noflush && do_lockfs) {
2514		r = lock_fs(md);
2515		if (r)
2516			goto out;
 
 
2517	}
2518
2519	/*
2520	 * Here we must make sure that no processes are submitting requests
2521	 * to target drivers i.e. no one may be executing
2522	 * __split_and_process_bio. This is called from dm_request and
2523	 * dm_wq_work.
2524	 *
2525	 * To get all processes out of __split_and_process_bio in dm_request,
2526	 * we take the write lock. To prevent any process from reentering
2527	 * __split_and_process_bio from dm_request and quiesce the thread
2528	 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2529	 * flush_workqueue(md->wq).
2530	 */
2531	down_write(&md->io_lock);
2532	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2533	up_write(&md->io_lock);
 
2534
2535	/*
2536	 * Stop md->queue before flushing md->wq in case request-based
2537	 * dm defers requests to md->wq from md->queue.
2538	 */
2539	if (dm_request_based(md))
2540		stop_queue(md->queue);
2541
2542	flush_workqueue(md->wq);
2543
2544	/*
2545	 * At this point no more requests are entering target request routines.
2546	 * We call dm_wait_for_completion to wait for all existing requests
2547	 * to finish.
2548	 */
2549	r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
 
 
2550
2551	down_write(&md->io_lock);
2552	if (noflush)
2553		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2554	up_write(&md->io_lock);
 
2555
2556	/* were we interrupted ? */
2557	if (r < 0) {
2558		dm_queue_flush(md);
2559
2560		if (dm_request_based(md))
2561			start_queue(md->queue);
2562
2563		unlock_fs(md);
2564		goto out; /* pushback list is already flushed, so skip flush */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2565	}
2566
2567	/*
2568	 * If dm_wait_for_completion returned 0, the device is completely
2569	 * quiescent now. There is no request-processing activity. All new
2570	 * requests are being added to md->deferred list.
2571	 */
 
 
 
 
 
 
 
 
 
2572
2573	set_bit(DMF_SUSPENDED, &md->flags);
 
 
2574
 
2575	dm_table_postsuspend_targets(map);
2576
2577out:
2578	dm_table_put(map);
2579
2580out_unlock:
2581	mutex_unlock(&md->suspend_lock);
2582	return r;
2583}
2584
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2585int dm_resume(struct mapped_device *md)
2586{
2587	int r = -EINVAL;
2588	struct dm_table *map = NULL;
2589
2590	mutex_lock(&md->suspend_lock);
 
 
 
2591	if (!dm_suspended_md(md))
2592		goto out;
2593
2594	map = dm_get_live_table(md);
 
 
 
 
 
 
 
 
 
2595	if (!map || !dm_table_get_size(map))
2596		goto out;
2597
2598	r = dm_table_resume_targets(map);
2599	if (r)
2600		goto out;
2601
2602	dm_queue_flush(md);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2603
2604	/*
2605	 * Flushing deferred I/Os must be done after targets are resumed
2606	 * so that mapping of targets can work correctly.
2607	 * Request-based dm is queueing the deferred I/Os in its request_queue.
2608	 */
2609	if (dm_request_based(md))
2610		start_queue(md->queue);
2611
2612	unlock_fs(md);
 
 
 
 
2613
2614	clear_bit(DMF_SUSPENDED, &md->flags);
 
 
 
 
 
 
2615
2616	r = 0;
2617out:
2618	dm_table_put(map);
 
2619	mutex_unlock(&md->suspend_lock);
 
 
2620
2621	return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2622}
 
2623
2624/*-----------------------------------------------------------------
 
2625 * Event notification.
2626 *---------------------------------------------------------------*/
 
2627int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2628		       unsigned cookie)
2629{
 
 
2630	char udev_cookie[DM_COOKIE_LENGTH];
2631	char *envp[] = { udev_cookie, NULL };
2632
2633	if (!cookie)
2634		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2635	else {
2636		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2637			 DM_COOKIE_ENV_VAR_NAME, cookie);
2638		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2639					  action, envp);
 
 
2640	}
 
 
 
 
 
 
 
 
2641}
2642
2643uint32_t dm_next_uevent_seq(struct mapped_device *md)
2644{
2645	return atomic_add_return(1, &md->uevent_seq);
2646}
2647
2648uint32_t dm_get_event_nr(struct mapped_device *md)
2649{
2650	return atomic_read(&md->event_nr);
2651}
2652
2653int dm_wait_event(struct mapped_device *md, int event_nr)
2654{
2655	return wait_event_interruptible(md->eventq,
2656			(event_nr != atomic_read(&md->event_nr)));
2657}
2658
2659void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2660{
2661	unsigned long flags;
2662
2663	spin_lock_irqsave(&md->uevent_lock, flags);
2664	list_add(elist, &md->uevent_list);
2665	spin_unlock_irqrestore(&md->uevent_lock, flags);
2666}
2667
2668/*
2669 * The gendisk is only valid as long as you have a reference
2670 * count on 'md'.
2671 */
2672struct gendisk *dm_disk(struct mapped_device *md)
2673{
2674	return md->disk;
2675}
 
2676
2677struct kobject *dm_kobject(struct mapped_device *md)
2678{
2679	return &md->kobj;
2680}
2681
2682/*
2683 * struct mapped_device should not be exported outside of dm.c
2684 * so use this check to verify that kobj is part of md structure
2685 */
2686struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2687{
2688	struct mapped_device *md;
2689
2690	md = container_of(kobj, struct mapped_device, kobj);
2691	if (&md->kobj != kobj)
2692		return NULL;
2693
2694	if (test_bit(DMF_FREEING, &md->flags) ||
2695	    dm_deleting_md(md))
2696		return NULL;
2697
 
 
 
 
 
2698	dm_get(md);
 
 
 
2699	return md;
2700}
2701
2702int dm_suspended_md(struct mapped_device *md)
2703{
2704	return test_bit(DMF_SUSPENDED, &md->flags);
2705}
2706
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2707int dm_suspended(struct dm_target *ti)
2708{
2709	return dm_suspended_md(dm_table_get_md(ti->table));
2710}
2711EXPORT_SYMBOL_GPL(dm_suspended);
2712
 
 
 
 
 
 
2713int dm_noflush_suspending(struct dm_target *ti)
2714{
2715	return __noflush_suspending(dm_table_get_md(ti->table));
2716}
2717EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2718
2719struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2720{
2721	struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2722	unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
 
 
 
 
 
 
 
 
2723
2724	if (!pools)
2725		return NULL;
2726
2727	pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2728			 mempool_create_slab_pool(MIN_IOS, _io_cache) :
2729			 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
2730	if (!pools->io_pool)
2731		goto free_pools_and_out;
2732
2733	pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
2734			  mempool_create_slab_pool(MIN_IOS, _tio_cache) :
2735			  mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2736	if (!pools->tio_pool)
2737		goto free_io_pool_and_out;
2738
2739	pools->bs = bioset_create(pool_size, 0);
2740	if (!pools->bs)
2741		goto free_tio_pool_and_out;
 
2742
2743	if (integrity && bioset_integrity_create(pools->bs, pool_size))
2744		goto free_bioset_and_out;
2745
2746	return pools;
 
 
 
 
 
 
 
 
 
2747
2748free_bioset_and_out:
2749	bioset_free(pools->bs);
 
2750
2751free_tio_pool_and_out:
2752	mempool_destroy(pools->tio_pool);
2753
2754free_io_pool_and_out:
2755	mempool_destroy(pools->io_pool);
 
 
 
2756
2757free_pools_and_out:
2758	kfree(pools);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2759
2760	return NULL;
2761}
2762
2763void dm_free_md_mempools(struct dm_md_mempools *pools)
2764{
2765	if (!pools)
2766		return;
 
 
2767
2768	if (pools->io_pool)
2769		mempool_destroy(pools->io_pool);
 
2770
2771	if (pools->tio_pool)
2772		mempool_destroy(pools->tio_pool);
2773
2774	if (pools->bs)
2775		bioset_free(pools->bs);
 
 
 
2776
2777	kfree(pools);
 
 
 
 
 
 
 
 
 
2778}
2779
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2780static const struct block_device_operations dm_blk_dops = {
 
 
2781	.open = dm_blk_open,
2782	.release = dm_blk_close,
2783	.ioctl = dm_blk_ioctl,
2784	.getgeo = dm_blk_getgeo,
 
 
2785	.owner = THIS_MODULE
2786};
2787
2788EXPORT_SYMBOL(dm_get_mapinfo);
 
 
 
 
 
 
 
 
 
 
 
 
 
2789
2790/*
2791 * module hooks
2792 */
2793module_init(dm_init);
2794module_exit(dm_exit);
2795
2796module_param(major, uint, 0);
2797MODULE_PARM_DESC(major, "The major number of the device mapper");
 
 
 
 
 
 
 
 
 
 
2798MODULE_DESCRIPTION(DM_NAME " driver");
2799MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2800MODULE_LICENSE("GPL");