Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright 1993 by Theodore Ts'o.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
 
   5#include <linux/module.h>
   6#include <linux/moduleparam.h>
   7#include <linux/sched.h>
   8#include <linux/fs.h>
   9#include <linux/pagemap.h>
  10#include <linux/file.h>
  11#include <linux/stat.h>
  12#include <linux/errno.h>
  13#include <linux/major.h>
  14#include <linux/wait.h>
 
  15#include <linux/blkpg.h>
  16#include <linux/init.h>
  17#include <linux/swap.h>
  18#include <linux/slab.h>
  19#include <linux/compat.h>
  20#include <linux/suspend.h>
  21#include <linux/freezer.h>
  22#include <linux/mutex.h>
  23#include <linux/writeback.h>
  24#include <linux/completion.h>
  25#include <linux/highmem.h>
 
  26#include <linux/splice.h>
  27#include <linux/sysfs.h>
  28#include <linux/miscdevice.h>
  29#include <linux/falloc.h>
  30#include <linux/uio.h>
  31#include <linux/ioprio.h>
  32#include <linux/blk-cgroup.h>
  33#include <linux/sched/mm.h>
  34#include <linux/statfs.h>
  35#include <linux/uaccess.h>
  36#include <linux/blk-mq.h>
  37#include <linux/spinlock.h>
  38#include <uapi/linux/loop.h>
  39
  40/* Possible states of device */
  41enum {
  42	Lo_unbound,
  43	Lo_bound,
  44	Lo_rundown,
  45	Lo_deleting,
  46};
  47
  48struct loop_func_table;
  49
  50struct loop_device {
  51	int		lo_number;
  52	loff_t		lo_offset;
  53	loff_t		lo_sizelimit;
  54	int		lo_flags;
  55	char		lo_file_name[LO_NAME_SIZE];
  56
  57	struct file *	lo_backing_file;
  58	struct block_device *lo_device;
  59
  60	gfp_t		old_gfp_mask;
  61
  62	spinlock_t		lo_lock;
  63	int			lo_state;
  64	spinlock_t              lo_work_lock;
  65	struct workqueue_struct *workqueue;
  66	struct work_struct      rootcg_work;
  67	struct list_head        rootcg_cmd_list;
  68	struct list_head        idle_worker_list;
  69	struct rb_root          worker_tree;
  70	struct timer_list       timer;
  71	bool			use_dio;
  72	bool			sysfs_inited;
  73
  74	struct request_queue	*lo_queue;
  75	struct blk_mq_tag_set	tag_set;
  76	struct gendisk		*lo_disk;
  77	struct mutex		lo_mutex;
  78	bool			idr_visible;
  79};
  80
  81struct loop_cmd {
  82	struct list_head list_entry;
  83	bool use_aio; /* use AIO interface to handle I/O */
  84	atomic_t ref; /* only for aio */
  85	long ret;
  86	struct kiocb iocb;
  87	struct bio_vec *bvec;
  88	struct cgroup_subsys_state *blkcg_css;
  89	struct cgroup_subsys_state *memcg_css;
  90};
  91
  92#define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ)
  93#define LOOP_DEFAULT_HW_Q_DEPTH 128
  94
  95static DEFINE_IDR(loop_index_idr);
  96static DEFINE_MUTEX(loop_ctl_mutex);
  97static DEFINE_MUTEX(loop_validate_mutex);
  98
  99/**
 100 * loop_global_lock_killable() - take locks for safe loop_validate_file() test
 101 *
 102 * @lo: struct loop_device
 103 * @global: true if @lo is about to bind another "struct loop_device", false otherwise
 104 *
 105 * Returns 0 on success, -EINTR otherwise.
 106 *
 107 * Since loop_validate_file() traverses on other "struct loop_device" if
 108 * is_loop_device() is true, we need a global lock for serializing concurrent
 109 * loop_configure()/loop_change_fd()/__loop_clr_fd() calls.
 110 */
 111static int loop_global_lock_killable(struct loop_device *lo, bool global)
 
 
 
 112{
 113	int err;
 
 114
 115	if (global) {
 116		err = mutex_lock_killable(&loop_validate_mutex);
 117		if (err)
 118			return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 119	}
 120	err = mutex_lock_killable(&lo->lo_mutex);
 121	if (err && global)
 122		mutex_unlock(&loop_validate_mutex);
 123	return err;
 
 
 
 
 
 
 124}
 125
 126/**
 127 * loop_global_unlock() - release locks taken by loop_global_lock_killable()
 128 *
 129 * @lo: struct loop_device
 130 * @global: true if @lo was about to bind another "struct loop_device", false otherwise
 131 */
 132static void loop_global_unlock(struct loop_device *lo, bool global)
 133{
 134	mutex_unlock(&lo->lo_mutex);
 135	if (global)
 136		mutex_unlock(&loop_validate_mutex);
 137}
 138
 139static int max_part;
 140static int part_shift;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 141
 142static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
 143{
 144	loff_t loopsize;
 145
 146	/* Compute loopsize in bytes */
 147	loopsize = i_size_read(file->f_mapping->host);
 148	if (offset > 0)
 149		loopsize -= offset;
 150	/* offset is beyond i_size, weird but possible */
 151	if (loopsize < 0)
 152		return 0;
 153
 154	if (sizelimit > 0 && sizelimit < loopsize)
 155		loopsize = sizelimit;
 156	/*
 157	 * Unfortunately, if we want to do I/O on the device,
 158	 * the number of 512-byte sectors has to fit into a sector_t.
 159	 */
 160	return loopsize >> 9;
 161}
 162
 163static loff_t get_loop_size(struct loop_device *lo, struct file *file)
 164{
 165	return get_size(lo->lo_offset, lo->lo_sizelimit, file);
 166}
 167
 168/*
 169 * We support direct I/O only if lo_offset is aligned with the logical I/O size
 170 * of backing device, and the logical block size of loop is bigger than that of
 171 * the backing device.
 172 */
 173static bool lo_bdev_can_use_dio(struct loop_device *lo,
 174		struct block_device *backing_bdev)
 175{
 176	unsigned short sb_bsize = bdev_logical_block_size(backing_bdev);
 177
 178	if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
 179		return false;
 180	if (lo->lo_offset & (sb_bsize - 1))
 181		return false;
 182	return true;
 
 
 
 
 
 
 
 
 183}
 184
 185static void __loop_update_dio(struct loop_device *lo, bool dio)
 
 
 
 
 186{
 187	struct file *file = lo->lo_backing_file;
 188	struct inode *inode = file->f_mapping->host;
 189	struct block_device *backing_bdev = NULL;
 190	bool use_dio;
 191
 192	if (S_ISBLK(inode->i_mode))
 193		backing_bdev = I_BDEV(inode);
 194	else if (inode->i_sb->s_bdev)
 195		backing_bdev = inode->i_sb->s_bdev;
 196
 197	use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) &&
 198		(!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));
 199
 200	if (lo->use_dio == use_dio)
 201		return;
 202
 203	/* flush dirty pages before changing direct IO */
 204	vfs_fsync(file, 0);
 205
 206	/*
 207	 * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
 208	 * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
 209	 * will get updated by ioctl(LOOP_GET_STATUS)
 210	 */
 211	if (lo->lo_state == Lo_bound)
 212		blk_mq_freeze_queue(lo->lo_queue);
 213	lo->use_dio = use_dio;
 214	if (use_dio) {
 215		blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
 216		lo->lo_flags |= LO_FLAGS_DIRECT_IO;
 217	} else {
 218		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
 219		lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
 220	}
 221	if (lo->lo_state == Lo_bound)
 222		blk_mq_unfreeze_queue(lo->lo_queue);
 223}
 224
 225/**
 226 * loop_set_size() - sets device size and notifies userspace
 227 * @lo: struct loop_device to set the size for
 228 * @size: new size of the loop device
 229 *
 230 * Callers must validate that the size passed into this function fits into
 231 * a sector_t, eg using loop_validate_size()
 232 */
 233static void loop_set_size(struct loop_device *lo, loff_t size)
 
 234{
 235	if (!set_capacity_and_notify(lo->lo_disk, size))
 236		kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
 237}
 238
 239static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
 240{
 241	struct iov_iter i;
 242	ssize_t bw;
 
 243
 244	iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len);
 245
 246	bw = vfs_iter_write(file, &i, ppos, 0);
 247
 248	if (likely(bw ==  bvec->bv_len))
 
 249		return 0;
 250
 251	printk_ratelimited(KERN_ERR
 252		"loop: Write error at byte offset %llu, length %i.\n",
 253		(unsigned long long)*ppos, bvec->bv_len);
 254	if (bw >= 0)
 255		bw = -EIO;
 256	return bw;
 257}
 258
 259static int lo_write_simple(struct loop_device *lo, struct request *rq,
 260		loff_t pos)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 261{
 
 
 262	struct bio_vec bvec;
 263	struct req_iterator iter;
 
 264	int ret = 0;
 265
 266	rq_for_each_segment(bvec, rq, iter) {
 267		ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
 
 
 
 
 
 
 
 
 
 
 268		if (ret < 0)
 269			break;
 270		cond_resched();
 271	}
 272
 
 
 
 
 273	return ret;
 
 
 
 
 274}
 275
 276static int lo_read_simple(struct loop_device *lo, struct request *rq,
 277		loff_t pos)
 278{
 279	struct bio_vec bvec;
 280	struct req_iterator iter;
 281	struct iov_iter i;
 282	ssize_t len;
 283
 284	rq_for_each_segment(bvec, rq, iter) {
 285		iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len);
 286		len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
 287		if (len < 0)
 288			return len;
 289
 290		flush_dcache_page(bvec.bv_page);
 
 
 
 
 
 
 
 
 291
 292		if (len != bvec.bv_len) {
 293			struct bio *bio;
 
 
 
 294
 295			__rq_for_each_bio(bio, rq)
 296				zero_fill_bio(bio);
 297			break;
 298		}
 299		cond_resched();
 300	}
 301
 302	return 0;
 
 
 
 
 
 303}
 304
 305static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
 306			int mode)
 307{
 308	/*
 309	 * We use fallocate to manipulate the space mappings used by the image
 310	 * a.k.a. discard/zerorange.
 311	 */
 312	struct file *file = lo->lo_backing_file;
 313	int ret;
 
 
 
 
 
 314
 315	mode |= FALLOC_FL_KEEP_SIZE;
 
 
 
 316
 317	if (!bdev_max_discard_sectors(lo->lo_device))
 318		return -EOPNOTSUPP;
 
 
 
 319
 320	ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
 321	if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
 322		return -EIO;
 323	return ret;
 324}
 325
 326static int lo_req_flush(struct loop_device *lo, struct request *rq)
 
 327{
 328	int ret = vfs_fsync(lo->lo_backing_file, 0);
 329	if (unlikely(ret && ret != -EINVAL))
 330		ret = -EIO;
 331
 332	return ret;
 
 
 
 
 
 
 
 
 
 
 
 333}
 334
 335static void lo_complete_rq(struct request *rq)
 336{
 337	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
 338	blk_status_t ret = BLK_STS_OK;
 339
 340	if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
 341	    req_op(rq) != REQ_OP_READ) {
 342		if (cmd->ret < 0)
 343			ret = errno_to_blk_status(cmd->ret);
 344		goto end_io;
 345	}
 346
 347	/*
 348	 * Short READ - if we got some data, advance our request and
 349	 * retry it. If we got no data, end the rest with EIO.
 350	 */
 351	if (cmd->ret) {
 352		blk_update_request(rq, BLK_STS_OK, cmd->ret);
 353		cmd->ret = 0;
 354		blk_mq_requeue_request(rq, true);
 355	} else {
 356		if (cmd->use_aio) {
 357			struct bio *bio = rq->bio;
 358
 359			while (bio) {
 360				zero_fill_bio(bio);
 361				bio = bio->bi_next;
 
 
 362			}
 363		}
 364		ret = BLK_STS_IOERR;
 365end_io:
 366		blk_mq_end_request(rq, ret);
 367	}
 368}
 369
 370static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
 371{
 372	struct request *rq = blk_mq_rq_from_pdu(cmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 373
 374	if (!atomic_dec_and_test(&cmd->ref))
 375		return;
 376	kfree(cmd->bvec);
 377	cmd->bvec = NULL;
 378	if (likely(!blk_should_fake_timeout(rq->q)))
 379		blk_mq_complete_request(rq);
 
 
 
 
 
 
 380}
 381
 382static void lo_rw_aio_complete(struct kiocb *iocb, long ret)
 
 
 
 383{
 384	struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
 
 
 385
 386	cmd->ret = ret;
 387	lo_rw_aio_do_completion(cmd);
 
 
 
 
 
 388}
 389
 390static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
 391		     loff_t pos, int rw)
 392{
 393	struct iov_iter iter;
 394	struct req_iterator rq_iter;
 395	struct bio_vec *bvec;
 396	struct request *rq = blk_mq_rq_from_pdu(cmd);
 397	struct bio *bio = rq->bio;
 398	struct file *file = lo->lo_backing_file;
 399	struct bio_vec tmp;
 400	unsigned int offset;
 401	int nr_bvec = 0;
 402	int ret;
 403
 404	rq_for_each_bvec(tmp, rq, rq_iter)
 405		nr_bvec++;
 406
 407	if (rq->bio != rq->biotail) {
 408
 409		bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
 410				     GFP_NOIO);
 411		if (!bvec)
 412			return -EIO;
 413		cmd->bvec = bvec;
 
 
 
 
 
 
 
 
 414
 415		/*
 416		 * The bios of the request may be started from the middle of
 417		 * the 'bvec' because of bio splitting, so we can't directly
 418		 * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
 419		 * API will take care of all details for us.
 420		 */
 421		rq_for_each_bvec(tmp, rq, rq_iter) {
 422			*bvec = tmp;
 423			bvec++;
 424		}
 425		bvec = cmd->bvec;
 426		offset = 0;
 
 
 
 
 
 427	} else {
 428		/*
 429		 * Same here, this bio may be started from the middle of the
 430		 * 'bvec' because of bio splitting, so offset from the bvec
 431		 * must be passed to iov iterator
 432		 */
 433		offset = bio->bi_iter.bi_bvec_done;
 434		bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
 435	}
 436	atomic_set(&cmd->ref, 2);
 437
 438	iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
 439	iter.iov_offset = offset;
 440
 441	cmd->iocb.ki_pos = pos;
 442	cmd->iocb.ki_filp = file;
 443	cmd->iocb.ki_complete = lo_rw_aio_complete;
 444	cmd->iocb.ki_flags = IOCB_DIRECT;
 445	cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
 
 
 
 
 
 
 
 
 
 
 
 446
 447	if (rw == ITER_SOURCE)
 448		ret = call_write_iter(file, &cmd->iocb, &iter);
 449	else
 450		ret = call_read_iter(file, &cmd->iocb, &iter);
 451
 452	lo_rw_aio_do_completion(cmd);
 453
 454	if (ret != -EIOCBQUEUED)
 455		lo_rw_aio_complete(&cmd->iocb, ret);
 456	return 0;
 457}
 458
 459static int do_req_filebacked(struct loop_device *lo, struct request *rq)
 460{
 461	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
 462	loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
 
 
 
 463
 464	/*
 465	 * lo_write_simple and lo_read_simple should have been covered
 466	 * by io submit style function like lo_rw_aio(), one blocker
 467	 * is that lo_read_simple() need to call flush_dcache_page after
 468	 * the page is written from kernel, and it isn't easy to handle
 469	 * this in io submit style function which submits all segments
 470	 * of the req at one time. And direct read IO doesn't need to
 471	 * run flush_dcache_page().
 472	 */
 473	switch (req_op(rq)) {
 474	case REQ_OP_FLUSH:
 475		return lo_req_flush(lo, rq);
 476	case REQ_OP_WRITE_ZEROES:
 477		/*
 478		 * If the caller doesn't want deallocation, call zeroout to
 479		 * write zeroes the range.  Otherwise, punch them out.
 480		 */
 481		return lo_fallocate(lo, rq, pos,
 482			(rq->cmd_flags & REQ_NOUNMAP) ?
 483				FALLOC_FL_ZERO_RANGE :
 484				FALLOC_FL_PUNCH_HOLE);
 485	case REQ_OP_DISCARD:
 486		return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
 487	case REQ_OP_WRITE:
 488		if (cmd->use_aio)
 489			return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
 490		else
 491			return lo_write_simple(lo, rq, pos);
 492	case REQ_OP_READ:
 493		if (cmd->use_aio)
 494			return lo_rw_aio(lo, cmd, pos, ITER_DEST);
 495		else
 496			return lo_read_simple(lo, rq, pos);
 497	default:
 498		WARN_ON_ONCE(1);
 499		return -EIO;
 500	}
 501}
 502
 503static inline void loop_update_dio(struct loop_device *lo)
 504{
 505	__loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
 506				lo->use_dio);
 507}
 508
 509static void loop_reread_partitions(struct loop_device *lo)
 
 
 
 
 
 510{
 511	int rc;
 512
 513	mutex_lock(&lo->lo_disk->open_mutex);
 514	rc = bdev_disk_changed(lo->lo_disk, false);
 515	mutex_unlock(&lo->lo_disk->open_mutex);
 516	if (rc)
 517		pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
 518			__func__, lo->lo_number, lo->lo_file_name, rc);
 
 
 
 519}
 520
 521static inline int is_loop_device(struct file *file)
 
 
 
 522{
 523	struct inode *i = file->f_mapping->host;
 
 
 524
 525	return i && S_ISBLK(i->i_mode) && imajor(i) == LOOP_MAJOR;
 526}
 527
 528static int loop_validate_file(struct file *file, struct block_device *bdev)
 
 
 
 529{
 530	struct inode	*inode = file->f_mapping->host;
 531	struct file	*f = file;
 532
 533	/* Avoid recursion */
 534	while (is_loop_device(f)) {
 535		struct loop_device *l;
 536
 537		lockdep_assert_held(&loop_validate_mutex);
 538		if (f->f_mapping->host->i_rdev == bdev->bd_dev)
 539			return -EBADF;
 540
 541		l = I_BDEV(f->f_mapping->host)->bd_disk->private_data;
 542		if (l->lo_state != Lo_bound)
 543			return -EINVAL;
 544		/* Order wrt setting lo->lo_backing_file in loop_configure(). */
 545		rmb();
 546		f = l->lo_backing_file;
 547	}
 548	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
 549		return -EINVAL;
 550	return 0;
 551}
 552
 
 553/*
 554 * loop_change_fd switched the backing store of a loopback device to
 555 * a new file. This is useful for operating system installers to free up
 556 * the original file and in High Availability environments to switch to
 557 * an alternative location for the content in case of server meltdown.
 558 * This can only work if the loop device is used read-only, and if the
 559 * new backing store is the same size and type as the old backing store.
 560 */
 561static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
 562			  unsigned int arg)
 563{
 564	struct file *file = fget(arg);
 565	struct file *old_file;
 566	int error;
 567	bool partscan;
 568	bool is_loop;
 569
 570	if (!file)
 571		return -EBADF;
 572
 573	/* suppress uevents while reconfiguring the device */
 574	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
 575
 576	is_loop = is_loop_device(file);
 577	error = loop_global_lock_killable(lo, is_loop);
 578	if (error)
 579		goto out_putf;
 580	error = -ENXIO;
 581	if (lo->lo_state != Lo_bound)
 582		goto out_err;
 583
 584	/* the loop device has to be read-only */
 585	error = -EINVAL;
 586	if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
 587		goto out_err;
 588
 589	error = loop_validate_file(file, bdev);
 590	if (error)
 591		goto out_err;
 
 592
 
 593	old_file = lo->lo_backing_file;
 594
 595	error = -EINVAL;
 596
 
 
 
 597	/* size of the new backing store needs to be the same */
 598	if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
 599		goto out_err;
 600
 601	/* and ... switch */
 602	disk_force_media_change(lo->lo_disk);
 603	blk_mq_freeze_queue(lo->lo_queue);
 604	mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
 605	lo->lo_backing_file = file;
 606	lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
 607	mapping_set_gfp_mask(file->f_mapping,
 608			     lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
 609	loop_update_dio(lo);
 610	blk_mq_unfreeze_queue(lo->lo_queue);
 611	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
 612	loop_global_unlock(lo, is_loop);
 613
 614	/*
 615	 * Flush loop_validate_file() before fput(), for l->lo_backing_file
 616	 * might be pointing at old_file which might be the last reference.
 617	 */
 618	if (!is_loop) {
 619		mutex_lock(&loop_validate_mutex);
 620		mutex_unlock(&loop_validate_mutex);
 621	}
 622	/*
 623	 * We must drop file reference outside of lo_mutex as dropping
 624	 * the file ref can take open_mutex which creates circular locking
 625	 * dependency.
 626	 */
 627	fput(old_file);
 628	if (partscan)
 629		loop_reread_partitions(lo);
 
 630
 631	error = 0;
 632done:
 633	/* enable and uncork uevent now that we are done */
 634	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
 635	return error;
 
 636
 637out_err:
 638	loop_global_unlock(lo, is_loop);
 639out_putf:
 640	fput(file);
 641	goto done;
 642}
 643
 644/* loop sysfs attributes */
 645
 646static ssize_t loop_attr_show(struct device *dev, char *page,
 647			      ssize_t (*callback)(struct loop_device *, char *))
 648{
 649	struct gendisk *disk = dev_to_disk(dev);
 650	struct loop_device *lo = disk->private_data;
 651
 652	return callback(lo, page);
 653}
 654
 655#define LOOP_ATTR_RO(_name)						\
 656static ssize_t loop_attr_##_name##_show(struct loop_device *, char *);	\
 657static ssize_t loop_attr_do_show_##_name(struct device *d,		\
 658				struct device_attribute *attr, char *b)	\
 659{									\
 660	return loop_attr_show(d, b, loop_attr_##_name##_show);		\
 661}									\
 662static struct device_attribute loop_attr_##_name =			\
 663	__ATTR(_name, 0444, loop_attr_do_show_##_name, NULL);
 664
 665static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
 666{
 667	ssize_t ret;
 668	char *p = NULL;
 669
 670	spin_lock_irq(&lo->lo_lock);
 671	if (lo->lo_backing_file)
 672		p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
 673	spin_unlock_irq(&lo->lo_lock);
 674
 675	if (IS_ERR_OR_NULL(p))
 676		ret = PTR_ERR(p);
 677	else {
 678		ret = strlen(p);
 679		memmove(buf, p, ret);
 680		buf[ret++] = '\n';
 681		buf[ret] = 0;
 682	}
 683
 684	return ret;
 685}
 686
 687static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
 688{
 689	return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset);
 690}
 691
 692static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
 693{
 694	return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
 695}
 696
 697static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
 698{
 699	int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
 700
 701	return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0");
 702}
 703
 704static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
 705{
 706	int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
 707
 708	return sysfs_emit(buf, "%s\n", partscan ? "1" : "0");
 709}
 710
 711static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
 712{
 713	int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
 714
 715	return sysfs_emit(buf, "%s\n", dio ? "1" : "0");
 716}
 717
 718LOOP_ATTR_RO(backing_file);
 719LOOP_ATTR_RO(offset);
 720LOOP_ATTR_RO(sizelimit);
 721LOOP_ATTR_RO(autoclear);
 722LOOP_ATTR_RO(partscan);
 723LOOP_ATTR_RO(dio);
 724
 725static struct attribute *loop_attrs[] = {
 726	&loop_attr_backing_file.attr,
 727	&loop_attr_offset.attr,
 728	&loop_attr_sizelimit.attr,
 729	&loop_attr_autoclear.attr,
 730	&loop_attr_partscan.attr,
 731	&loop_attr_dio.attr,
 732	NULL,
 733};
 734
 735static struct attribute_group loop_attribute_group = {
 736	.name = "loop",
 737	.attrs= loop_attrs,
 738};
 739
 740static void loop_sysfs_init(struct loop_device *lo)
 741{
 742	lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
 743						&loop_attribute_group);
 744}
 745
 746static void loop_sysfs_exit(struct loop_device *lo)
 747{
 748	if (lo->sysfs_inited)
 749		sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
 750				   &loop_attribute_group);
 751}
 752
 753static void loop_config_discard(struct loop_device *lo,
 754		struct queue_limits *lim)
 755{
 756	struct file *file = lo->lo_backing_file;
 757	struct inode *inode = file->f_mapping->host;
 758	u32 granularity = 0, max_discard_sectors = 0;
 759	struct kstatfs sbuf;
 760
 761	/*
 762	 * If the backing device is a block device, mirror its zeroing
 763	 * capability. Set the discard sectors to the block device's zeroing
 764	 * capabilities because loop discards result in blkdev_issue_zeroout(),
 765	 * not blkdev_issue_discard(). This maintains consistent behavior with
 766	 * file-backed loop devices: discarded regions read back as zero.
 767	 */
 768	if (S_ISBLK(inode->i_mode)) {
 769		struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
 770
 771		max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
 772		granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
 773			queue_physical_block_size(backingq);
 774
 775	/*
 776	 * We use punch hole to reclaim the free space used by the
 777	 * image a.k.a. discard.
 778	 */
 779	} else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) {
 780		max_discard_sectors = UINT_MAX >> 9;
 781		granularity = sbuf.f_bsize;
 782	}
 783
 784	lim->max_hw_discard_sectors = max_discard_sectors;
 785	lim->max_write_zeroes_sectors = max_discard_sectors;
 786	if (max_discard_sectors)
 787		lim->discard_granularity = granularity;
 788	else
 789		lim->discard_granularity = 0;
 790}
 791
 792struct loop_worker {
 793	struct rb_node rb_node;
 794	struct work_struct work;
 795	struct list_head cmd_list;
 796	struct list_head idle_list;
 797	struct loop_device *lo;
 798	struct cgroup_subsys_state *blkcg_css;
 799	unsigned long last_ran_at;
 800};
 801
 802static void loop_workfn(struct work_struct *work);
 803
 804#ifdef CONFIG_BLK_CGROUP
 805static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
 806{
 807	return !css || css == blkcg_root_css;
 808}
 809#else
 810static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
 811{
 812	return !css;
 813}
 814#endif
 815
 816static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
 817{
 818	struct rb_node **node, *parent = NULL;
 819	struct loop_worker *cur_worker, *worker = NULL;
 820	struct work_struct *work;
 821	struct list_head *cmd_list;
 822
 823	spin_lock_irq(&lo->lo_work_lock);
 824
 825	if (queue_on_root_worker(cmd->blkcg_css))
 826		goto queue_work;
 827
 828	node = &lo->worker_tree.rb_node;
 829
 830	while (*node) {
 831		parent = *node;
 832		cur_worker = container_of(*node, struct loop_worker, rb_node);
 833		if (cur_worker->blkcg_css == cmd->blkcg_css) {
 834			worker = cur_worker;
 835			break;
 836		} else if ((long)cur_worker->blkcg_css < (long)cmd->blkcg_css) {
 837			node = &(*node)->rb_left;
 838		} else {
 839			node = &(*node)->rb_right;
 840		}
 841	}
 842	if (worker)
 843		goto queue_work;
 844
 845	worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN);
 846	/*
 847	 * In the event we cannot allocate a worker, just queue on the
 848	 * rootcg worker and issue the I/O as the rootcg
 849	 */
 850	if (!worker) {
 851		cmd->blkcg_css = NULL;
 852		if (cmd->memcg_css)
 853			css_put(cmd->memcg_css);
 854		cmd->memcg_css = NULL;
 855		goto queue_work;
 856	}
 857
 858	worker->blkcg_css = cmd->blkcg_css;
 859	css_get(worker->blkcg_css);
 860	INIT_WORK(&worker->work, loop_workfn);
 861	INIT_LIST_HEAD(&worker->cmd_list);
 862	INIT_LIST_HEAD(&worker->idle_list);
 863	worker->lo = lo;
 864	rb_link_node(&worker->rb_node, parent, node);
 865	rb_insert_color(&worker->rb_node, &lo->worker_tree);
 866queue_work:
 867	if (worker) {
 868		/*
 869		 * We need to remove from the idle list here while
 870		 * holding the lock so that the idle timer doesn't
 871		 * free the worker
 872		 */
 873		if (!list_empty(&worker->idle_list))
 874			list_del_init(&worker->idle_list);
 875		work = &worker->work;
 876		cmd_list = &worker->cmd_list;
 877	} else {
 878		work = &lo->rootcg_work;
 879		cmd_list = &lo->rootcg_cmd_list;
 880	}
 881	list_add_tail(&cmd->list_entry, cmd_list);
 882	queue_work(lo->workqueue, work);
 883	spin_unlock_irq(&lo->lo_work_lock);
 884}
 885
 886static void loop_set_timer(struct loop_device *lo)
 887{
 888	timer_reduce(&lo->timer, jiffies + LOOP_IDLE_WORKER_TIMEOUT);
 889}
 890
 891static void loop_free_idle_workers(struct loop_device *lo, bool delete_all)
 892{
 893	struct loop_worker *pos, *worker;
 894
 895	spin_lock_irq(&lo->lo_work_lock);
 896	list_for_each_entry_safe(worker, pos, &lo->idle_worker_list,
 897				idle_list) {
 898		if (!delete_all &&
 899		    time_is_after_jiffies(worker->last_ran_at +
 900					  LOOP_IDLE_WORKER_TIMEOUT))
 901			break;
 902		list_del(&worker->idle_list);
 903		rb_erase(&worker->rb_node, &lo->worker_tree);
 904		css_put(worker->blkcg_css);
 905		kfree(worker);
 906	}
 907	if (!list_empty(&lo->idle_worker_list))
 908		loop_set_timer(lo);
 909	spin_unlock_irq(&lo->lo_work_lock);
 910}
 911
 912static void loop_free_idle_workers_timer(struct timer_list *timer)
 913{
 914	struct loop_device *lo = container_of(timer, struct loop_device, timer);
 915
 916	return loop_free_idle_workers(lo, false);
 917}
 918
 919static void loop_update_rotational(struct loop_device *lo)
 920{
 921	struct file *file = lo->lo_backing_file;
 922	struct inode *file_inode = file->f_mapping->host;
 923	struct block_device *file_bdev = file_inode->i_sb->s_bdev;
 924	struct request_queue *q = lo->lo_queue;
 925	bool nonrot = true;
 926
 927	/* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
 928	if (file_bdev)
 929		nonrot = bdev_nonrot(file_bdev);
 930
 931	if (nonrot)
 932		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
 933	else
 934		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
 935}
 936
 937/**
 938 * loop_set_status_from_info - configure device from loop_info
 939 * @lo: struct loop_device to configure
 940 * @info: struct loop_info64 to configure the device with
 941 *
 942 * Configures the loop device parameters according to the passed
 943 * in loop_info64 configuration.
 944 */
 945static int
 946loop_set_status_from_info(struct loop_device *lo,
 947			  const struct loop_info64 *info)
 948{
 949	if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
 950		return -EINVAL;
 951
 952	switch (info->lo_encrypt_type) {
 953	case LO_CRYPT_NONE:
 954		break;
 955	case LO_CRYPT_XOR:
 956		pr_warn("support for the xor transformation has been removed.\n");
 957		return -EINVAL;
 958	case LO_CRYPT_CRYPTOAPI:
 959		pr_warn("support for cryptoloop has been removed.  Use dm-crypt instead.\n");
 960		return -EINVAL;
 961	default:
 962		return -EINVAL;
 963	}
 964
 965	/* Avoid assigning overflow values */
 966	if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX)
 967		return -EOVERFLOW;
 968
 969	lo->lo_offset = info->lo_offset;
 970	lo->lo_sizelimit = info->lo_sizelimit;
 971
 972	memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
 973	lo->lo_file_name[LO_NAME_SIZE-1] = 0;
 974	lo->lo_flags = info->lo_flags;
 975	return 0;
 976}
 977
 978static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize,
 979		bool update_discard_settings)
 980{
 981	struct queue_limits lim;
 982
 983	lim = queue_limits_start_update(lo->lo_queue);
 984	lim.logical_block_size = bsize;
 985	lim.physical_block_size = bsize;
 986	lim.io_min = bsize;
 987	if (update_discard_settings)
 988		loop_config_discard(lo, &lim);
 989	return queue_limits_commit_update(lo->lo_queue, &lim);
 990}
 991
 992static int loop_configure(struct loop_device *lo, blk_mode_t mode,
 993			  struct block_device *bdev,
 994			  const struct loop_config *config)
 995{
 996	struct file *file = fget(config->fd);
 997	struct inode *inode;
 998	struct address_space *mapping;
 999	int error;
1000	loff_t size;
1001	bool partscan;
1002	unsigned short bsize;
1003	bool is_loop;
1004
1005	if (!file)
1006		return -EBADF;
1007	is_loop = is_loop_device(file);
1008
1009	/* This is safe, since we have a reference from open(). */
1010	__module_get(THIS_MODULE);
1011
1012	/*
1013	 * If we don't hold exclusive handle for the device, upgrade to it
1014	 * here to avoid changing device under exclusive owner.
1015	 */
1016	if (!(mode & BLK_OPEN_EXCL)) {
1017		error = bd_prepare_to_claim(bdev, loop_configure, NULL);
1018		if (error)
1019			goto out_putf;
1020	}
1021
1022	error = loop_global_lock_killable(lo, is_loop);
1023	if (error)
1024		goto out_bdev;
1025
1026	error = -EBUSY;
1027	if (lo->lo_state != Lo_unbound)
1028		goto out_unlock;
1029
1030	error = loop_validate_file(file, bdev);
1031	if (error)
1032		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
1033
1034	mapping = file->f_mapping;
1035	inode = mapping->host;
1036
1037	if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
1038		error = -EINVAL;
1039		goto out_unlock;
1040	}
1041
1042	if (config->block_size) {
1043		error = blk_validate_block_size(config->block_size);
1044		if (error)
1045			goto out_unlock;
1046	}
1047
1048	error = loop_set_status_from_info(lo, &config->info);
1049	if (error)
1050		goto out_unlock;
1051
1052	if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) ||
1053	    !file->f_op->write_iter)
1054		lo->lo_flags |= LO_FLAGS_READ_ONLY;
1055
1056	if (!lo->workqueue) {
1057		lo->workqueue = alloc_workqueue("loop%d",
1058						WQ_UNBOUND | WQ_FREEZABLE,
1059						0, lo->lo_number);
1060		if (!lo->workqueue) {
1061			error = -ENOMEM;
1062			goto out_unlock;
1063		}
1064	}
1065
1066	/* suppress uevents while reconfiguring the device */
1067	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
1068
1069	disk_force_media_change(lo->lo_disk);
1070	set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
1071
1072	lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
1073	lo->lo_device = bdev;
 
1074	lo->lo_backing_file = file;
 
 
 
 
1075	lo->old_gfp_mask = mapping_gfp_mask(mapping);
1076	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
1077
1078	if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
1079		blk_queue_write_cache(lo->lo_queue, true, false);
1080
1081	if (config->block_size)
1082		bsize = config->block_size;
1083	else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev)
1084		/* In case of direct I/O, match underlying block size */
1085		bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
1086	else
1087		bsize = 512;
1088
1089	error = loop_reconfigure_limits(lo, bsize, true);
1090	if (WARN_ON_ONCE(error))
1091		goto out_unlock;
1092
1093	loop_update_rotational(lo);
1094	loop_update_dio(lo);
1095	loop_sysfs_init(lo);
 
 
1096
1097	size = get_loop_size(lo, file);
1098	loop_set_size(lo, size);
1099
1100	/* Order wrt reading lo_state in loop_validate_file(). */
1101	wmb();
1102
 
 
 
 
 
 
1103	lo->lo_state = Lo_bound;
 
1104	if (part_shift)
1105		lo->lo_flags |= LO_FLAGS_PARTSCAN;
1106	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
1107	if (partscan)
1108		clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
1109
1110	/* enable and uncork uevent now that we are done */
1111	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
1112
1113	loop_global_unlock(lo, is_loop);
1114	if (partscan)
1115		loop_reread_partitions(lo);
1116
1117	if (!(mode & BLK_OPEN_EXCL))
1118		bd_abort_claiming(bdev, loop_configure);
1119
 
 
 
 
1120	return 0;
1121
1122out_unlock:
1123	loop_global_unlock(lo, is_loop);
1124out_bdev:
1125	if (!(mode & BLK_OPEN_EXCL))
1126		bd_abort_claiming(bdev, loop_configure);
1127out_putf:
 
 
 
 
 
 
 
1128	fput(file);
 
1129	/* This is safe: open() is still holding a reference. */
1130	module_put(THIS_MODULE);
1131	return error;
1132}
1133
1134static void __loop_clr_fd(struct loop_device *lo, bool release)
 
1135{
1136	struct file *filp;
1137	gfp_t gfp = lo->old_gfp_mask;
1138
1139	if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
1140		blk_queue_write_cache(lo->lo_queue, false, false);
1141
1142	/*
1143	 * Freeze the request queue when unbinding on a live file descriptor and
1144	 * thus an open device.  When called from ->release we are guaranteed
1145	 * that there is no I/O in progress already.
1146	 */
1147	if (!release)
1148		blk_mq_freeze_queue(lo->lo_queue);
1149
1150	spin_lock_irq(&lo->lo_lock);
1151	filp = lo->lo_backing_file;
1152	lo->lo_backing_file = NULL;
1153	spin_unlock_irq(&lo->lo_lock);
1154
1155	lo->lo_device = NULL;
1156	lo->lo_offset = 0;
1157	lo->lo_sizelimit = 0;
1158	memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1159	loop_reconfigure_limits(lo, 512, false);
1160	invalidate_disk(lo->lo_disk);
1161	loop_sysfs_exit(lo);
1162	/* let user-space know about this change */
1163	kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
1164	mapping_set_gfp_mask(filp->f_mapping, gfp);
1165	/* This is safe: open() is still holding a reference. */
1166	module_put(THIS_MODULE);
1167	if (!release)
1168		blk_mq_unfreeze_queue(lo->lo_queue);
1169
1170	disk_force_media_change(lo->lo_disk);
 
 
 
 
1171
1172	if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
1173		int err;
1174
1175		/*
1176		 * open_mutex has been held already in release path, so don't
1177		 * acquire it if this function is called in such case.
1178		 *
1179		 * If the reread partition isn't from release path, lo_refcnt
1180		 * must be at least one and it can only become zero when the
1181		 * current holder is released.
1182		 */
1183		if (!release)
1184			mutex_lock(&lo->lo_disk->open_mutex);
1185		err = bdev_disk_changed(lo->lo_disk, false);
1186		if (!release)
1187			mutex_unlock(&lo->lo_disk->open_mutex);
1188		if (err)
1189			pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
1190				__func__, lo->lo_number, err);
1191		/* Device is gone, no point in returning error */
1192	}
1193
1194	/*
1195	 * lo->lo_state is set to Lo_unbound here after above partscan has
1196	 * finished. There cannot be anybody else entering __loop_clr_fd() as
1197	 * Lo_rundown state protects us from all the other places trying to
1198	 * change the 'lo' device.
1199	 */
1200	lo->lo_flags = 0;
1201	if (!part_shift)
1202		set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
1203	mutex_lock(&lo->lo_mutex);
1204	lo->lo_state = Lo_unbound;
1205	mutex_unlock(&lo->lo_mutex);
1206
1207	/*
1208	 * Need not hold lo_mutex to fput backing file. Calling fput holding
1209	 * lo_mutex triggers a circular lock dependency possibility warning as
1210	 * fput can take open_mutex which is usually taken before lo_mutex.
1211	 */
1212	fput(filp);
1213}
1214
1215static int loop_clr_fd(struct loop_device *lo)
1216{
1217	int err;
 
 
1218
1219	/*
1220	 * Since lo_ioctl() is called without locks held, it is possible that
1221	 * loop_configure()/loop_change_fd() and loop_clr_fd() run in parallel.
1222	 *
1223	 * Therefore, use global lock when setting Lo_rundown state in order to
1224	 * make sure that loop_validate_file() will fail if the "struct file"
1225	 * which loop_configure()/loop_change_fd() found via fget() was this
1226	 * loop device.
1227	 */
1228	err = loop_global_lock_killable(lo, true);
1229	if (err)
1230		return err;
1231	if (lo->lo_state != Lo_bound) {
1232		loop_global_unlock(lo, true);
1233		return -ENXIO;
1234	}
1235	/*
1236	 * If we've explicitly asked to tear down the loop device,
1237	 * and it has an elevated reference count, set it for auto-teardown when
1238	 * the last reference goes away. This stops $!~#$@ udev from
1239	 * preventing teardown because it decided that it needs to run blkid on
1240	 * the loopback device whenever they appear. xfstests is notorious for
1241	 * failing tests because blkid via udev races with a losetup
1242	 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
1243	 * command to fail with EBUSY.
1244	 */
1245	if (disk_openers(lo->lo_disk) > 1) {
1246		lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
1247		loop_global_unlock(lo, true);
1248		return 0;
1249	}
 
 
 
 
 
1250	lo->lo_state = Lo_rundown;
1251	loop_global_unlock(lo, true);
1252
1253	__loop_clr_fd(lo, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1254	return 0;
1255}
1256
1257static int
1258loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1259{
1260	int err;
1261	int prev_lo_flags;
1262	bool partscan = false;
1263	bool size_changed = false;
 
 
 
 
 
 
 
 
1264
1265	err = mutex_lock_killable(&lo->lo_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1266	if (err)
1267		return err;
1268	if (lo->lo_state != Lo_bound) {
1269		err = -ENXIO;
1270		goto out_unlock;
1271	}
1272
1273	if (lo->lo_offset != info->lo_offset ||
1274	    lo->lo_sizelimit != info->lo_sizelimit) {
1275		size_changed = true;
1276		sync_blockdev(lo->lo_device);
1277		invalidate_bdev(lo->lo_device);
1278	}
1279
1280	/* I/O need to be drained during transfer transition */
1281	blk_mq_freeze_queue(lo->lo_queue);
1282
1283	prev_lo_flags = lo->lo_flags;
 
 
 
1284
1285	err = loop_set_status_from_info(lo, info);
1286	if (err)
1287		goto out_unfreeze;
 
 
 
 
 
1288
1289	/* Mask out flags that can't be set using LOOP_SET_STATUS. */
1290	lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
1291	/* For those flags, use the previous values instead */
1292	lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
1293	/* For flags that can't be cleared, use previous values too */
1294	lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
1295
1296	if (size_changed) {
1297		loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
1298					   lo->lo_backing_file);
1299		loop_set_size(lo, new_size);
1300	}
1301
1302	/* update dio if lo_offset or transfer is changed */
1303	__loop_update_dio(lo, lo->use_dio);
1304
1305out_unfreeze:
1306	blk_mq_unfreeze_queue(lo->lo_queue);
1307
1308	if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
1309	     !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
1310		clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
1311		partscan = true;
1312	}
1313out_unlock:
1314	mutex_unlock(&lo->lo_mutex);
1315	if (partscan)
1316		loop_reread_partitions(lo);
1317
1318	return err;
 
 
 
 
 
 
 
 
 
1319}
1320
1321static int
1322loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1323{
1324	struct path path;
1325	struct kstat stat;
1326	int ret;
1327
1328	ret = mutex_lock_killable(&lo->lo_mutex);
1329	if (ret)
1330		return ret;
1331	if (lo->lo_state != Lo_bound) {
1332		mutex_unlock(&lo->lo_mutex);
1333		return -ENXIO;
1334	}
1335
 
1336	memset(info, 0, sizeof(*info));
1337	info->lo_number = lo->lo_number;
 
 
 
1338	info->lo_offset = lo->lo_offset;
1339	info->lo_sizelimit = lo->lo_sizelimit;
1340	info->lo_flags = lo->lo_flags;
1341	memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1342
1343	/* Drop lo_mutex while we call into the filesystem. */
1344	path = lo->lo_backing_file->f_path;
1345	path_get(&path);
1346	mutex_unlock(&lo->lo_mutex);
1347	ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
1348	if (!ret) {
1349		info->lo_device = huge_encode_dev(stat.dev);
1350		info->lo_inode = stat.ino;
1351		info->lo_rdevice = huge_encode_dev(stat.rdev);
1352	}
1353	path_put(&path);
1354	return ret;
1355}
1356
1357static void
1358loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1359{
1360	memset(info64, 0, sizeof(*info64));
1361	info64->lo_number = info->lo_number;
1362	info64->lo_device = info->lo_device;
1363	info64->lo_inode = info->lo_inode;
1364	info64->lo_rdevice = info->lo_rdevice;
1365	info64->lo_offset = info->lo_offset;
1366	info64->lo_sizelimit = 0;
 
 
1367	info64->lo_flags = info->lo_flags;
1368	memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
 
 
 
 
 
 
1369}
1370
1371static int
1372loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1373{
1374	memset(info, 0, sizeof(*info));
1375	info->lo_number = info64->lo_number;
1376	info->lo_device = info64->lo_device;
1377	info->lo_inode = info64->lo_inode;
1378	info->lo_rdevice = info64->lo_rdevice;
1379	info->lo_offset = info64->lo_offset;
 
 
1380	info->lo_flags = info64->lo_flags;
1381	memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
 
 
 
 
 
 
1382
1383	/* error in case values were truncated */
1384	if (info->lo_device != info64->lo_device ||
1385	    info->lo_rdevice != info64->lo_rdevice ||
1386	    info->lo_inode != info64->lo_inode ||
1387	    info->lo_offset != info64->lo_offset)
1388		return -EOVERFLOW;
1389
1390	return 0;
1391}
1392
1393static int
1394loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1395{
1396	struct loop_info info;
1397	struct loop_info64 info64;
1398
1399	if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1400		return -EFAULT;
1401	loop_info64_from_old(&info, &info64);
1402	return loop_set_status(lo, &info64);
1403}
1404
1405static int
1406loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1407{
1408	struct loop_info64 info64;
1409
1410	if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1411		return -EFAULT;
1412	return loop_set_status(lo, &info64);
1413}
1414
1415static int
1416loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1417	struct loop_info info;
1418	struct loop_info64 info64;
1419	int err;
1420
1421	if (!arg)
1422		return -EINVAL;
1423	err = loop_get_status(lo, &info64);
 
1424	if (!err)
1425		err = loop_info64_to_old(&info64, &info);
1426	if (!err && copy_to_user(arg, &info, sizeof(info)))
1427		err = -EFAULT;
1428
1429	return err;
1430}
1431
1432static int
1433loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1434	struct loop_info64 info64;
1435	int err;
1436
1437	if (!arg)
1438		return -EINVAL;
1439	err = loop_get_status(lo, &info64);
 
1440	if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1441		err = -EFAULT;
1442
1443	return err;
1444}
1445
1446static int loop_set_capacity(struct loop_device *lo)
1447{
1448	loff_t size;
1449
1450	if (unlikely(lo->lo_state != Lo_bound))
1451		return -ENXIO;
1452
1453	size = get_loop_size(lo, lo->lo_backing_file);
1454	loop_set_size(lo, size);
1455
1456	return 0;
1457}
1458
1459static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1460{
1461	int error = -ENXIO;
1462	if (lo->lo_state != Lo_bound)
1463		goto out;
1464
1465	__loop_update_dio(lo, !!arg);
1466	if (lo->use_dio == !!arg)
1467		return 0;
1468	error = -EINVAL;
1469 out:
1470	return error;
1471}
1472
1473static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
1474{
1475	int err = 0;
1476
1477	if (lo->lo_state != Lo_bound)
1478		return -ENXIO;
1479
1480	err = blk_validate_block_size(arg);
1481	if (err)
1482		return err;
1483
1484	if (lo->lo_queue->limits.logical_block_size == arg)
1485		return 0;
1486
1487	sync_blockdev(lo->lo_device);
1488	invalidate_bdev(lo->lo_device);
1489
1490	blk_mq_freeze_queue(lo->lo_queue);
1491	err = loop_reconfigure_limits(lo, arg, false);
1492	loop_update_dio(lo);
1493	blk_mq_unfreeze_queue(lo->lo_queue);
1494
1495	return err;
1496}
1497
1498static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
1499			   unsigned long arg)
1500{
1501	int err;
1502
1503	err = mutex_lock_killable(&lo->lo_mutex);
1504	if (err)
1505		return err;
1506	switch (cmd) {
1507	case LOOP_SET_CAPACITY:
1508		err = loop_set_capacity(lo);
1509		break;
1510	case LOOP_SET_DIRECT_IO:
1511		err = loop_set_dio(lo, arg);
1512		break;
1513	case LOOP_SET_BLOCK_SIZE:
1514		err = loop_set_block_size(lo, arg);
1515		break;
1516	default:
1517		err = -EINVAL;
1518	}
1519	mutex_unlock(&lo->lo_mutex);
1520	return err;
1521}
1522
1523static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
1524	unsigned int cmd, unsigned long arg)
1525{
1526	struct loop_device *lo = bdev->bd_disk->private_data;
1527	void __user *argp = (void __user *) arg;
1528	int err;
1529
 
1530	switch (cmd) {
1531	case LOOP_SET_FD: {
1532		/*
1533		 * Legacy case - pass in a zeroed out struct loop_config with
1534		 * only the file descriptor set , which corresponds with the
1535		 * default parameters we'd have used otherwise.
1536		 */
1537		struct loop_config config;
1538
1539		memset(&config, 0, sizeof(config));
1540		config.fd = arg;
1541
1542		return loop_configure(lo, mode, bdev, &config);
1543	}
1544	case LOOP_CONFIGURE: {
1545		struct loop_config config;
1546
1547		if (copy_from_user(&config, argp, sizeof(config)))
1548			return -EFAULT;
1549
1550		return loop_configure(lo, mode, bdev, &config);
1551	}
1552	case LOOP_CHANGE_FD:
1553		return loop_change_fd(lo, bdev, arg);
 
1554	case LOOP_CLR_FD:
1555		return loop_clr_fd(lo);
 
 
 
 
1556	case LOOP_SET_STATUS:
1557		err = -EPERM;
1558		if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
1559			err = loop_set_status_old(lo, argp);
 
1560		break;
1561	case LOOP_GET_STATUS:
1562		return loop_get_status_old(lo, argp);
 
1563	case LOOP_SET_STATUS64:
1564		err = -EPERM;
1565		if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
1566			err = loop_set_status64(lo, argp);
 
1567		break;
1568	case LOOP_GET_STATUS64:
1569		return loop_get_status64(lo, argp);
 
1570	case LOOP_SET_CAPACITY:
1571	case LOOP_SET_DIRECT_IO:
1572	case LOOP_SET_BLOCK_SIZE:
1573		if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
1574			return -EPERM;
1575		fallthrough;
1576	default:
1577		err = lo_simple_ioctl(lo, cmd, arg);
1578		break;
 
 
1579	}
 
1580
 
1581	return err;
1582}
1583
1584#ifdef CONFIG_COMPAT
1585struct compat_loop_info {
1586	compat_int_t	lo_number;      /* ioctl r/o */
1587	compat_dev_t	lo_device;      /* ioctl r/o */
1588	compat_ulong_t	lo_inode;       /* ioctl r/o */
1589	compat_dev_t	lo_rdevice;     /* ioctl r/o */
1590	compat_int_t	lo_offset;
1591	compat_int_t	lo_encrypt_type;        /* obsolete, ignored */
1592	compat_int_t	lo_encrypt_key_size;    /* ioctl w/o */
1593	compat_int_t	lo_flags;       /* ioctl r/o */
1594	char		lo_name[LO_NAME_SIZE];
1595	unsigned char	lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1596	compat_ulong_t	lo_init[2];
1597	char		reserved[4];
1598};
1599
1600/*
1601 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1602 * - noinlined to reduce stack space usage in main part of driver
1603 */
1604static noinline int
1605loop_info64_from_compat(const struct compat_loop_info __user *arg,
1606			struct loop_info64 *info64)
1607{
1608	struct compat_loop_info info;
1609
1610	if (copy_from_user(&info, arg, sizeof(info)))
1611		return -EFAULT;
1612
1613	memset(info64, 0, sizeof(*info64));
1614	info64->lo_number = info.lo_number;
1615	info64->lo_device = info.lo_device;
1616	info64->lo_inode = info.lo_inode;
1617	info64->lo_rdevice = info.lo_rdevice;
1618	info64->lo_offset = info.lo_offset;
1619	info64->lo_sizelimit = 0;
 
 
1620	info64->lo_flags = info.lo_flags;
1621	memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
 
 
 
 
 
 
1622	return 0;
1623}
1624
1625/*
1626 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1627 * - noinlined to reduce stack space usage in main part of driver
1628 */
1629static noinline int
1630loop_info64_to_compat(const struct loop_info64 *info64,
1631		      struct compat_loop_info __user *arg)
1632{
1633	struct compat_loop_info info;
1634
1635	memset(&info, 0, sizeof(info));
1636	info.lo_number = info64->lo_number;
1637	info.lo_device = info64->lo_device;
1638	info.lo_inode = info64->lo_inode;
1639	info.lo_rdevice = info64->lo_rdevice;
1640	info.lo_offset = info64->lo_offset;
 
 
1641	info.lo_flags = info64->lo_flags;
1642	memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
 
 
 
 
 
 
1643
1644	/* error in case values were truncated */
1645	if (info.lo_device != info64->lo_device ||
1646	    info.lo_rdevice != info64->lo_rdevice ||
1647	    info.lo_inode != info64->lo_inode ||
1648	    info.lo_offset != info64->lo_offset)
 
 
1649		return -EOVERFLOW;
1650
1651	if (copy_to_user(arg, &info, sizeof(info)))
1652		return -EFAULT;
1653	return 0;
1654}
1655
1656static int
1657loop_set_status_compat(struct loop_device *lo,
1658		       const struct compat_loop_info __user *arg)
1659{
1660	struct loop_info64 info64;
1661	int ret;
1662
1663	ret = loop_info64_from_compat(arg, &info64);
1664	if (ret < 0)
1665		return ret;
1666	return loop_set_status(lo, &info64);
1667}
1668
1669static int
1670loop_get_status_compat(struct loop_device *lo,
1671		       struct compat_loop_info __user *arg)
1672{
1673	struct loop_info64 info64;
1674	int err;
1675
1676	if (!arg)
1677		return -EINVAL;
1678	err = loop_get_status(lo, &info64);
 
1679	if (!err)
1680		err = loop_info64_to_compat(&info64, arg);
1681	return err;
1682}
1683
1684static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
1685			   unsigned int cmd, unsigned long arg)
1686{
1687	struct loop_device *lo = bdev->bd_disk->private_data;
1688	int err;
1689
1690	switch(cmd) {
1691	case LOOP_SET_STATUS:
1692		err = loop_set_status_compat(lo,
1693			     (const struct compat_loop_info __user *)arg);
 
 
1694		break;
1695	case LOOP_GET_STATUS:
1696		err = loop_get_status_compat(lo,
1697				     (struct compat_loop_info __user *)arg);
 
 
1698		break;
1699	case LOOP_SET_CAPACITY:
1700	case LOOP_CLR_FD:
1701	case LOOP_GET_STATUS64:
1702	case LOOP_SET_STATUS64:
1703	case LOOP_CONFIGURE:
1704		arg = (unsigned long) compat_ptr(arg);
1705		fallthrough;
1706	case LOOP_SET_FD:
1707	case LOOP_CHANGE_FD:
1708	case LOOP_SET_BLOCK_SIZE:
1709	case LOOP_SET_DIRECT_IO:
1710		err = lo_ioctl(bdev, mode, cmd, arg);
1711		break;
1712	default:
1713		err = -ENOIOCTLCMD;
1714		break;
1715	}
1716	return err;
1717}
1718#endif
1719
1720static void lo_release(struct gendisk *disk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1721{
1722	struct loop_device *lo = disk->private_data;
 
1723
1724	if (disk_openers(disk) > 0)
1725		return;
1726
1727	mutex_lock(&lo->lo_mutex);
1728	if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR)) {
1729		lo->lo_state = Lo_rundown;
1730		mutex_unlock(&lo->lo_mutex);
1731		/*
1732		 * In autoclear mode, stop the loop thread
1733		 * and remove configuration after last close.
1734		 */
1735		__loop_clr_fd(lo, true);
1736		return;
 
 
 
 
 
 
 
1737	}
1738	mutex_unlock(&lo->lo_mutex);
1739}
1740
1741static void lo_free_disk(struct gendisk *disk)
1742{
1743	struct loop_device *lo = disk->private_data;
1744
1745	if (lo->workqueue)
1746		destroy_workqueue(lo->workqueue);
1747	loop_free_idle_workers(lo, true);
1748	timer_shutdown_sync(&lo->timer);
1749	mutex_destroy(&lo->lo_mutex);
1750	kfree(lo);
1751}
1752
1753static const struct block_device_operations lo_fops = {
1754	.owner =	THIS_MODULE,
 
1755	.release =	lo_release,
1756	.ioctl =	lo_ioctl,
1757#ifdef CONFIG_COMPAT
1758	.compat_ioctl =	lo_compat_ioctl,
1759#endif
1760	.free_disk =	lo_free_disk,
1761};
1762
1763/*
1764 * And now the modules code and kernel interface.
1765 */
1766
1767/*
1768 * If max_loop is specified, create that many devices upfront.
1769 * This also becomes a hard limit. If max_loop is not specified,
1770 * the default isn't a hard limit (as before commit 85c50197716c
1771 * changed the default value from 0 for max_loop=0 reasons), just
1772 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
1773 * init time. Loop devices can be requested on-demand with the
1774 * /dev/loop-control interface, or be instantiated by accessing
1775 * a 'dead' device node.
1776 */
1777static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
1778
1779#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
1780static bool max_loop_specified;
1781
1782static int max_loop_param_set_int(const char *val,
1783				  const struct kernel_param *kp)
1784{
1785	int ret;
1786
1787	ret = param_set_int(val, kp);
1788	if (ret < 0)
1789		return ret;
1790
1791	max_loop_specified = true;
1792	return 0;
1793}
1794
1795static const struct kernel_param_ops max_loop_param_ops = {
1796	.set = max_loop_param_set_int,
1797	.get = param_get_int,
1798};
1799
1800module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444);
1801MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1802#else
1803module_param(max_loop, int, 0444);
1804MODULE_PARM_DESC(max_loop, "Initial number of loop devices");
1805#endif
1806
1807module_param(max_part, int, 0444);
1808MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
1809
1810static int hw_queue_depth = LOOP_DEFAULT_HW_Q_DEPTH;
1811
1812static int loop_set_hw_queue_depth(const char *s, const struct kernel_param *p)
1813{
1814	int qd, ret;
1815
1816	ret = kstrtoint(s, 0, &qd);
1817	if (ret < 0)
1818		return ret;
1819	if (qd < 1)
1820		return -EINVAL;
1821	hw_queue_depth = qd;
1822	return 0;
1823}
1824
1825static const struct kernel_param_ops loop_hw_qdepth_param_ops = {
1826	.set	= loop_set_hw_queue_depth,
1827	.get	= param_get_int,
1828};
1829
1830device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444);
1831MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: " __stringify(LOOP_DEFAULT_HW_Q_DEPTH));
1832
1833MODULE_LICENSE("GPL");
1834MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1835
1836static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1837		const struct blk_mq_queue_data *bd)
1838{
1839	struct request *rq = bd->rq;
1840	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1841	struct loop_device *lo = rq->q->queuedata;
1842
1843	blk_mq_start_request(rq);
1844
1845	if (lo->lo_state != Lo_bound)
1846		return BLK_STS_IOERR;
1847
1848	switch (req_op(rq)) {
1849	case REQ_OP_FLUSH:
1850	case REQ_OP_DISCARD:
1851	case REQ_OP_WRITE_ZEROES:
1852		cmd->use_aio = false;
1853		break;
1854	default:
1855		cmd->use_aio = lo->use_dio;
1856		break;
1857	}
1858
1859	/* always use the first bio's css */
1860	cmd->blkcg_css = NULL;
1861	cmd->memcg_css = NULL;
1862#ifdef CONFIG_BLK_CGROUP
1863	if (rq->bio) {
1864		cmd->blkcg_css = bio_blkcg_css(rq->bio);
1865#ifdef CONFIG_MEMCG
1866		if (cmd->blkcg_css) {
1867			cmd->memcg_css =
1868				cgroup_get_e_css(cmd->blkcg_css->cgroup,
1869						&memory_cgrp_subsys);
1870		}
1871#endif
1872	}
1873#endif
1874	loop_queue_work(lo, cmd);
1875
1876	return BLK_STS_OK;
 
 
 
1877}
1878
1879static void loop_handle_cmd(struct loop_cmd *cmd)
1880{
1881	struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
1882	struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
1883	struct request *rq = blk_mq_rq_from_pdu(cmd);
1884	const bool write = op_is_write(req_op(rq));
1885	struct loop_device *lo = rq->q->queuedata;
1886	int ret = 0;
1887	struct mem_cgroup *old_memcg = NULL;
1888	const bool use_aio = cmd->use_aio;
1889
1890	if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
1891		ret = -EIO;
1892		goto failed;
1893	}
1894
1895	if (cmd_blkcg_css)
1896		kthread_associate_blkcg(cmd_blkcg_css);
1897	if (cmd_memcg_css)
1898		old_memcg = set_active_memcg(
1899			mem_cgroup_from_css(cmd_memcg_css));
1900
1901	/*
1902	 * do_req_filebacked() may call blk_mq_complete_request() synchronously
1903	 * or asynchronously if using aio. Hence, do not touch 'cmd' after
1904	 * do_req_filebacked() has returned unless we are sure that 'cmd' has
1905	 * not yet been completed.
1906	 */
1907	ret = do_req_filebacked(lo, rq);
1908
1909	if (cmd_blkcg_css)
1910		kthread_associate_blkcg(NULL);
1911
1912	if (cmd_memcg_css) {
1913		set_active_memcg(old_memcg);
1914		css_put(cmd_memcg_css);
1915	}
1916 failed:
1917	/* complete non-aio request */
1918	if (!use_aio || ret) {
1919		if (ret == -EOPNOTSUPP)
1920			cmd->ret = ret;
1921		else
1922			cmd->ret = ret ? -EIO : 0;
1923		if (likely(!blk_should_fake_timeout(rq->q)))
1924			blk_mq_complete_request(rq);
1925	}
1926}
1927
1928static void loop_process_work(struct loop_worker *worker,
1929			struct list_head *cmd_list, struct loop_device *lo)
1930{
1931	int orig_flags = current->flags;
1932	struct loop_cmd *cmd;
1933
1934	current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
1935	spin_lock_irq(&lo->lo_work_lock);
1936	while (!list_empty(cmd_list)) {
1937		cmd = container_of(
1938			cmd_list->next, struct loop_cmd, list_entry);
1939		list_del(cmd_list->next);
1940		spin_unlock_irq(&lo->lo_work_lock);
1941
1942		loop_handle_cmd(cmd);
1943		cond_resched();
1944
1945		spin_lock_irq(&lo->lo_work_lock);
1946	}
1947
1948	/*
1949	 * We only add to the idle list if there are no pending cmds
1950	 * *and* the worker will not run again which ensures that it
1951	 * is safe to free any worker on the idle list
1952	 */
1953	if (worker && !work_pending(&worker->work)) {
1954		worker->last_ran_at = jiffies;
1955		list_add_tail(&worker->idle_list, &lo->idle_worker_list);
1956		loop_set_timer(lo);
1957	}
1958	spin_unlock_irq(&lo->lo_work_lock);
1959	current->flags = orig_flags;
1960}
1961
1962static void loop_workfn(struct work_struct *work)
1963{
1964	struct loop_worker *worker =
1965		container_of(work, struct loop_worker, work);
1966	loop_process_work(worker, &worker->cmd_list, worker->lo);
1967}
1968
1969static void loop_rootcg_workfn(struct work_struct *work)
1970{
1971	struct loop_device *lo =
1972		container_of(work, struct loop_device, rootcg_work);
1973	loop_process_work(NULL, &lo->rootcg_cmd_list, lo);
1974}
1975
1976static const struct blk_mq_ops loop_mq_ops = {
1977	.queue_rq       = loop_queue_rq,
1978	.complete	= lo_complete_rq,
1979};
1980
1981static int loop_add(int i)
1982{
1983	struct queue_limits lim = {
1984		/*
1985		 * Random number picked from the historic block max_sectors cap.
1986		 */
1987		.max_hw_sectors		= 2560u,
1988	};
1989	struct loop_device *lo;
1990	struct gendisk *disk;
1991	int err;
1992
1993	err = -ENOMEM;
1994	lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1995	if (!lo)
1996		goto out;
1997	lo->worker_tree = RB_ROOT;
1998	INIT_LIST_HEAD(&lo->idle_worker_list);
1999	timer_setup(&lo->timer, loop_free_idle_workers_timer, TIMER_DEFERRABLE);
2000	lo->lo_state = Lo_unbound;
2001
2002	err = mutex_lock_killable(&loop_ctl_mutex);
2003	if (err)
2004		goto out_free_dev;
2005
2006	/* allocate id, if @id >= 0, we're requesting that specific id */
2007	if (i >= 0) {
2008		err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
2009		if (err == -ENOSPC)
2010			err = -EEXIST;
2011	} else {
2012		err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
2013	}
2014	mutex_unlock(&loop_ctl_mutex);
2015	if (err < 0)
2016		goto out_free_dev;
2017	i = err;
2018
2019	lo->tag_set.ops = &loop_mq_ops;
2020	lo->tag_set.nr_hw_queues = 1;
2021	lo->tag_set.queue_depth = hw_queue_depth;
2022	lo->tag_set.numa_node = NUMA_NO_NODE;
2023	lo->tag_set.cmd_size = sizeof(struct loop_cmd);
2024	lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
2025		BLK_MQ_F_NO_SCHED_BY_DEFAULT;
2026	lo->tag_set.driver_data = lo;
2027
2028	err = blk_mq_alloc_tag_set(&lo->tag_set);
2029	if (err)
2030		goto out_free_idr;
2031
2032	disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, &lim, lo);
2033	if (IS_ERR(disk)) {
2034		err = PTR_ERR(disk);
2035		goto out_cleanup_tags;
2036	}
2037	lo->lo_queue = lo->lo_disk->queue;
2038
2039	/*
2040	 * By default, we do buffer IO, so it doesn't make sense to enable
2041	 * merge because the I/O submitted to backing file is handled page by
2042	 * page. For directio mode, merge does help to dispatch bigger request
2043	 * to underlayer disk. We will enable merge once directio is enabled.
2044	 */
2045	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
 
 
 
 
 
2046
2047	/*
2048	 * Disable partition scanning by default. The in-kernel partition
2049	 * scanning can be requested individually per-device during its
2050	 * setup. Userspace can always add and remove partitions from all
2051	 * devices. The needed partition minors are allocated from the
2052	 * extended minor space, the main loop device numbers will continue
2053	 * to match the loop minors, regardless of the number of partitions
2054	 * used.
2055	 *
2056	 * If max_part is given, partition scanning is globally enabled for
2057	 * all loop devices. The minors for the main loop devices will be
2058	 * multiples of max_part.
2059	 *
2060	 * Note: Global-for-all-devices, set-only-at-init, read-only module
2061	 * parameteters like 'max_loop' and 'max_part' make things needlessly
2062	 * complicated, are too static, inflexible and may surprise
2063	 * userspace tools. Parameters like this in general should be avoided.
2064	 */
2065	if (!part_shift)
2066		set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
2067	mutex_init(&lo->lo_mutex);
 
2068	lo->lo_number		= i;
 
 
 
2069	spin_lock_init(&lo->lo_lock);
2070	spin_lock_init(&lo->lo_work_lock);
2071	INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn);
2072	INIT_LIST_HEAD(&lo->rootcg_cmd_list);
2073	disk->major		= LOOP_MAJOR;
2074	disk->first_minor	= i << part_shift;
2075	disk->minors		= 1 << part_shift;
2076	disk->fops		= &lo_fops;
2077	disk->private_data	= lo;
2078	disk->queue		= lo->lo_queue;
2079	disk->events		= DISK_EVENT_MEDIA_CHANGE;
2080	disk->event_flags	= DISK_EVENT_FLAG_UEVENT;
2081	sprintf(disk->disk_name, "loop%d", i);
2082	/* Make this loop device reachable from pathname. */
2083	err = add_disk(disk);
2084	if (err)
2085		goto out_cleanup_disk;
2086
2087	/* Show this loop device. */
2088	mutex_lock(&loop_ctl_mutex);
2089	lo->idr_visible = true;
2090	mutex_unlock(&loop_ctl_mutex);
2091
2092	return i;
2093
2094out_cleanup_disk:
2095	put_disk(disk);
2096out_cleanup_tags:
2097	blk_mq_free_tag_set(&lo->tag_set);
2098out_free_idr:
2099	mutex_lock(&loop_ctl_mutex);
2100	idr_remove(&loop_index_idr, i);
2101	mutex_unlock(&loop_ctl_mutex);
2102out_free_dev:
2103	kfree(lo);
2104out:
2105	return err;
2106}
2107
2108static void loop_remove(struct loop_device *lo)
2109{
2110	/* Make this loop device unreachable from pathname. */
2111	del_gendisk(lo->lo_disk);
2112	blk_mq_free_tag_set(&lo->tag_set);
2113
2114	mutex_lock(&loop_ctl_mutex);
2115	idr_remove(&loop_index_idr, lo->lo_number);
2116	mutex_unlock(&loop_ctl_mutex);
2117
2118	put_disk(lo->lo_disk);
 
2119}
2120
2121#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
2122static void loop_probe(dev_t dev)
2123{
2124	int idx = MINOR(dev) >> part_shift;
 
2125
2126	if (max_loop_specified && max_loop && idx >= max_loop)
2127		return;
2128	loop_add(idx);
 
 
2129}
2130#else
2131#define loop_probe NULL
2132#endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */
2133
2134static int loop_control_remove(int idx)
2135{
2136	struct loop_device *lo;
2137	int ret;
2138
2139	if (idx < 0) {
2140		pr_warn_once("deleting an unspecified loop device is not supported.\n");
2141		return -EINVAL;
2142	}
2143		
2144	/* Hide this loop device for serialization. */
2145	ret = mutex_lock_killable(&loop_ctl_mutex);
2146	if (ret)
2147		return ret;
2148	lo = idr_find(&loop_index_idr, idx);
2149	if (!lo || !lo->idr_visible)
2150		ret = -ENODEV;
2151	else
2152		lo->idr_visible = false;
2153	mutex_unlock(&loop_ctl_mutex);
2154	if (ret)
2155		return ret;
2156
2157	/* Check whether this loop device can be removed. */
2158	ret = mutex_lock_killable(&lo->lo_mutex);
2159	if (ret)
2160		goto mark_visible;
2161	if (lo->lo_state != Lo_unbound || disk_openers(lo->lo_disk) > 0) {
2162		mutex_unlock(&lo->lo_mutex);
2163		ret = -EBUSY;
2164		goto mark_visible;
2165	}
2166	/* Mark this loop device as no more bound, but not quite unbound yet */
2167	lo->lo_state = Lo_deleting;
2168	mutex_unlock(&lo->lo_mutex);
2169
2170	loop_remove(lo);
2171	return 0;
 
 
 
 
 
2172
2173mark_visible:
2174	/* Show this loop device again. */
2175	mutex_lock(&loop_ctl_mutex);
2176	lo->idr_visible = true;
2177	mutex_unlock(&loop_ctl_mutex);
 
 
2178	return ret;
2179}
2180
2181static int loop_control_get_free(int idx)
2182{
2183	struct loop_device *lo;
2184	int id, ret;
 
2185
2186	ret = mutex_lock_killable(&loop_ctl_mutex);
2187	if (ret)
2188		return ret;
2189	idr_for_each_entry(&loop_index_idr, lo, id) {
2190		/* Hitting a race results in creating a new loop device which is harmless. */
2191		if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound)
2192			goto found;
2193	}
2194	mutex_unlock(&loop_ctl_mutex);
2195	return loop_add(-1);
2196found:
2197	mutex_unlock(&loop_ctl_mutex);
2198	return id;
2199}
2200
2201static long loop_control_ioctl(struct file *file, unsigned int cmd,
2202			       unsigned long parm)
2203{
 
 
 
 
2204	switch (cmd) {
2205	case LOOP_CTL_ADD:
2206		return loop_add(parm);
 
 
 
 
 
 
2207	case LOOP_CTL_REMOVE:
2208		return loop_control_remove(parm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2209	case LOOP_CTL_GET_FREE:
2210		return loop_control_get_free(parm);
2211	default:
2212		return -ENOSYS;
 
2213	}
 
 
 
2214}
2215
2216static const struct file_operations loop_ctl_fops = {
2217	.open		= nonseekable_open,
2218	.unlocked_ioctl	= loop_control_ioctl,
2219	.compat_ioctl	= loop_control_ioctl,
2220	.owner		= THIS_MODULE,
2221	.llseek		= noop_llseek,
2222};
2223
2224static struct miscdevice loop_misc = {
2225	.minor		= LOOP_CTRL_MINOR,
2226	.name		= "loop-control",
2227	.fops		= &loop_ctl_fops,
2228};
2229
2230MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
2231MODULE_ALIAS("devname:loop-control");
2232
2233static int __init loop_init(void)
2234{
2235	int i;
 
 
2236	int err;
2237
 
 
 
 
2238	part_shift = 0;
2239	if (max_part > 0) {
2240		part_shift = fls(max_part);
2241
2242		/*
2243		 * Adjust max_part according to part_shift as it is exported
2244		 * to user space so that user can decide correct minor number
2245		 * if [s]he want to create more devices.
2246		 *
2247		 * Note that -1 is required because partition 0 is reserved
2248		 * for the whole disk.
2249		 */
2250		max_part = (1UL << part_shift) - 1;
2251	}
2252
2253	if ((1UL << part_shift) > DISK_MAX_PARTS) {
2254		err = -EINVAL;
2255		goto err_out;
2256	}
2257
2258	if (max_loop > 1UL << (MINORBITS - part_shift)) {
2259		err = -EINVAL;
2260		goto err_out;
2261	}
2262
2263	err = misc_register(&loop_misc);
2264	if (err < 0)
2265		goto err_out;
2266
 
 
 
 
 
 
 
 
 
 
 
2267
2268	if (__register_blkdev(LOOP_MAJOR, "loop", loop_probe)) {
2269		err = -EIO;
2270		goto misc_out;
2271	}
2272
 
 
 
2273	/* pre-create number of devices given by config or max_loop */
2274	for (i = 0; i < max_loop; i++)
2275		loop_add(i);
 
 
2276
2277	printk(KERN_INFO "loop: module loaded\n");
2278	return 0;
2279
2280misc_out:
2281	misc_deregister(&loop_misc);
2282err_out:
2283	return err;
2284}
2285
2286static void __exit loop_exit(void)
2287{
2288	struct loop_device *lo;
2289	int id;
2290
2291	unregister_blkdev(LOOP_MAJOR, "loop");
2292	misc_deregister(&loop_misc);
 
2293
2294	/*
2295	 * There is no need to use loop_ctl_mutex here, for nobody else can
2296	 * access loop_index_idr when this module is unloading (unless forced
2297	 * module unloading is requested). If this is not a clean unloading,
2298	 * we have no means to avoid kernel crash.
2299	 */
2300	idr_for_each_entry(&loop_index_idr, lo, id)
2301		loop_remove(lo);
2302
 
 
 
2303	idr_destroy(&loop_index_idr);
 
 
 
 
 
2304}
2305
2306module_init(loop_init);
2307module_exit(loop_exit);
2308
2309#ifndef MODULE
2310static int __init max_loop_setup(char *str)
2311{
2312	max_loop = simple_strtol(str, NULL, 0);
2313#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
2314	max_loop_specified = true;
2315#endif
2316	return 1;
2317}
2318
2319__setup("max_loop=", max_loop_setup);
2320#endif
v3.15
 
   1/*
   2 *  linux/drivers/block/loop.c
   3 *
   4 *  Written by Theodore Ts'o, 3/29/93
   5 *
   6 * Copyright 1993 by Theodore Ts'o.  Redistribution of this file is
   7 * permitted under the GNU General Public License.
   8 *
   9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
  10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
  11 *
  12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
  13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
  14 *
  15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
  16 *
  17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
  18 *
  19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
  20 *
  21 * Loadable modules and other fixes by AK, 1998
  22 *
  23 * Make real block number available to downstream transfer functions, enables
  24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
  25 * Reed H. Petty, rhp@draper.net
  26 *
  27 * Maximum number of loop devices now dynamic via max_loop module parameter.
  28 * Russell Kroll <rkroll@exploits.org> 19990701
  29 *
  30 * Maximum number of loop devices when compiled-in now selectable by passing
  31 * max_loop=<1-255> to the kernel on boot.
  32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
  33 *
  34 * Completely rewrite request handling to be make_request_fn style and
  35 * non blocking, pushing work to a helper thread. Lots of fixes from
  36 * Al Viro too.
  37 * Jens Axboe <axboe@suse.de>, Nov 2000
  38 *
  39 * Support up to 256 loop devices
  40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
  41 *
  42 * Support for falling back on the write file operation when the address space
  43 * operations write_begin is not available on the backing filesystem.
  44 * Anton Altaparmakov, 16 Feb 2005
  45 *
  46 * Still To Fix:
  47 * - Advisory locking is ignored here.
  48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
  49 *
  50 */
  51
  52#include <linux/module.h>
  53#include <linux/moduleparam.h>
  54#include <linux/sched.h>
  55#include <linux/fs.h>
 
  56#include <linux/file.h>
  57#include <linux/stat.h>
  58#include <linux/errno.h>
  59#include <linux/major.h>
  60#include <linux/wait.h>
  61#include <linux/blkdev.h>
  62#include <linux/blkpg.h>
  63#include <linux/init.h>
  64#include <linux/swap.h>
  65#include <linux/slab.h>
  66#include <linux/compat.h>
  67#include <linux/suspend.h>
  68#include <linux/freezer.h>
  69#include <linux/mutex.h>
  70#include <linux/writeback.h>
  71#include <linux/completion.h>
  72#include <linux/highmem.h>
  73#include <linux/kthread.h>
  74#include <linux/splice.h>
  75#include <linux/sysfs.h>
  76#include <linux/miscdevice.h>
  77#include <linux/falloc.h>
  78#include "loop.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  79
  80#include <asm/uaccess.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  81
  82static DEFINE_IDR(loop_index_idr);
  83static DEFINE_MUTEX(loop_index_mutex);
 
  84
  85static int max_part;
  86static int part_shift;
  87
  88/*
  89 * Transfer functions
 
 
 
 
 
 
  90 */
  91static int transfer_none(struct loop_device *lo, int cmd,
  92			 struct page *raw_page, unsigned raw_off,
  93			 struct page *loop_page, unsigned loop_off,
  94			 int size, sector_t real_block)
  95{
  96	char *raw_buf = kmap_atomic(raw_page) + raw_off;
  97	char *loop_buf = kmap_atomic(loop_page) + loop_off;
  98
  99	if (cmd == READ)
 100		memcpy(loop_buf, raw_buf, size);
 101	else
 102		memcpy(raw_buf, loop_buf, size);
 103
 104	kunmap_atomic(loop_buf);
 105	kunmap_atomic(raw_buf);
 106	cond_resched();
 107	return 0;
 108}
 109
 110static int transfer_xor(struct loop_device *lo, int cmd,
 111			struct page *raw_page, unsigned raw_off,
 112			struct page *loop_page, unsigned loop_off,
 113			int size, sector_t real_block)
 114{
 115	char *raw_buf = kmap_atomic(raw_page) + raw_off;
 116	char *loop_buf = kmap_atomic(loop_page) + loop_off;
 117	char *in, *out, *key;
 118	int i, keysize;
 119
 120	if (cmd == READ) {
 121		in = raw_buf;
 122		out = loop_buf;
 123	} else {
 124		in = loop_buf;
 125		out = raw_buf;
 126	}
 127
 128	key = lo->lo_encrypt_key;
 129	keysize = lo->lo_encrypt_key_size;
 130	for (i = 0; i < size; i++)
 131		*out++ = *in++ ^ key[(i & 511) % keysize];
 132
 133	kunmap_atomic(loop_buf);
 134	kunmap_atomic(raw_buf);
 135	cond_resched();
 136	return 0;
 137}
 138
 139static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
 
 
 
 
 
 
 140{
 141	if (unlikely(info->lo_encrypt_key_size <= 0))
 142		return -EINVAL;
 143	return 0;
 144}
 145
 146static struct loop_func_table none_funcs = {
 147	.number = LO_CRYPT_NONE,
 148	.transfer = transfer_none,
 149}; 	
 150
 151static struct loop_func_table xor_funcs = {
 152	.number = LO_CRYPT_XOR,
 153	.transfer = transfer_xor,
 154	.init = xor_init
 155}; 	
 156
 157/* xfer_funcs[0] is special - its release function is never called */
 158static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
 159	&none_funcs,
 160	&xor_funcs
 161};
 162
 163static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
 164{
 165	loff_t loopsize;
 166
 167	/* Compute loopsize in bytes */
 168	loopsize = i_size_read(file->f_mapping->host);
 169	if (offset > 0)
 170		loopsize -= offset;
 171	/* offset is beyond i_size, weird but possible */
 172	if (loopsize < 0)
 173		return 0;
 174
 175	if (sizelimit > 0 && sizelimit < loopsize)
 176		loopsize = sizelimit;
 177	/*
 178	 * Unfortunately, if we want to do I/O on the device,
 179	 * the number of 512-byte sectors has to fit into a sector_t.
 180	 */
 181	return loopsize >> 9;
 182}
 183
 184static loff_t get_loop_size(struct loop_device *lo, struct file *file)
 185{
 186	return get_size(lo->lo_offset, lo->lo_sizelimit, file);
 187}
 188
 189static int
 190figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
 
 
 
 
 
 191{
 192	loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
 193	sector_t x = (sector_t)size;
 194	struct block_device *bdev = lo->lo_device;
 195
 196	if (unlikely((loff_t)x != size))
 197		return -EFBIG;
 198	if (lo->lo_offset != offset)
 199		lo->lo_offset = offset;
 200	if (lo->lo_sizelimit != sizelimit)
 201		lo->lo_sizelimit = sizelimit;
 202	set_capacity(lo->lo_disk, x);
 203	bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
 204	/* let user-space know about the new size */
 205	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
 206	return 0;
 207}
 208
 209static inline int
 210lo_do_transfer(struct loop_device *lo, int cmd,
 211	       struct page *rpage, unsigned roffs,
 212	       struct page *lpage, unsigned loffs,
 213	       int size, sector_t rblock)
 214{
 215	if (unlikely(!lo->transfer))
 216		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 217
 218	return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 219}
 220
 221/**
 222 * __do_lo_send_write - helper for writing data to a loop device
 
 
 223 *
 224 * This helper just factors out common code between do_lo_send_direct_write()
 225 * and do_lo_send_write().
 226 */
 227static int __do_lo_send_write(struct file *file,
 228		u8 *buf, const int len, loff_t pos)
 229{
 
 
 
 
 
 
 
 230	ssize_t bw;
 231	mm_segment_t old_fs = get_fs();
 232
 233	file_start_write(file);
 234	set_fs(get_ds());
 235	bw = file->f_op->write(file, buf, len, &pos);
 236	set_fs(old_fs);
 237	file_end_write(file);
 238	if (likely(bw == len))
 239		return 0;
 240	printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
 241			(unsigned long long)pos, len);
 
 
 242	if (bw >= 0)
 243		bw = -EIO;
 244	return bw;
 245}
 246
 247/**
 248 * do_lo_send_direct_write - helper for writing data to a loop device
 249 *
 250 * This is the fast, non-transforming version that does not need double
 251 * buffering.
 252 */
 253static int do_lo_send_direct_write(struct loop_device *lo,
 254		struct bio_vec *bvec, loff_t pos, struct page *page)
 255{
 256	ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
 257			kmap(bvec->bv_page) + bvec->bv_offset,
 258			bvec->bv_len, pos);
 259	kunmap(bvec->bv_page);
 260	cond_resched();
 261	return bw;
 262}
 263
 264/**
 265 * do_lo_send_write - helper for writing data to a loop device
 266 *
 267 * This is the slow, transforming version that needs to double buffer the
 268 * data as it cannot do the transformations in place without having direct
 269 * access to the destination pages of the backing file.
 270 */
 271static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
 272		loff_t pos, struct page *page)
 273{
 274	int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
 275			bvec->bv_offset, bvec->bv_len, pos >> 9);
 276	if (likely(!ret))
 277		return __do_lo_send_write(lo->lo_backing_file,
 278				page_address(page), bvec->bv_len,
 279				pos);
 280	printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, "
 281			"length %i.\n", (unsigned long long)pos, bvec->bv_len);
 282	if (ret > 0)
 283		ret = -EIO;
 284	return ret;
 285}
 286
 287static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
 288{
 289	int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
 290			struct page *page);
 291	struct bio_vec bvec;
 292	struct bvec_iter iter;
 293	struct page *page = NULL;
 294	int ret = 0;
 295
 296	if (lo->transfer != transfer_none) {
 297		page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
 298		if (unlikely(!page))
 299			goto fail;
 300		kmap(page);
 301		do_lo_send = do_lo_send_write;
 302	} else {
 303		do_lo_send = do_lo_send_direct_write;
 304	}
 305
 306	bio_for_each_segment(bvec, bio, iter) {
 307		ret = do_lo_send(lo, &bvec, pos, page);
 308		if (ret < 0)
 309			break;
 310		pos += bvec.bv_len;
 311	}
 312	if (page) {
 313		kunmap(page);
 314		__free_page(page);
 315	}
 316out:
 317	return ret;
 318fail:
 319	printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
 320	ret = -ENOMEM;
 321	goto out;
 322}
 323
 324struct lo_read_data {
 325	struct loop_device *lo;
 326	struct page *page;
 327	unsigned offset;
 328	int bsize;
 329};
 
 
 
 
 
 
 
 330
 331static int
 332lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
 333		struct splice_desc *sd)
 334{
 335	struct lo_read_data *p = sd->u.data;
 336	struct loop_device *lo = p->lo;
 337	struct page *page = buf->page;
 338	sector_t IV;
 339	int size;
 340
 341	IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
 342							(buf->offset >> 9);
 343	size = sd->len;
 344	if (size > p->bsize)
 345		size = p->bsize;
 346
 347	if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
 348		printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n",
 349		       page->index);
 350		size = -EINVAL;
 
 351	}
 352
 353	flush_dcache_page(p->page);
 354
 355	if (size > 0)
 356		p->offset += size;
 357
 358	return size;
 359}
 360
 361static int
 362lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
 363{
 364	return __splice_from_pipe(pipe, sd, lo_splice_actor);
 365}
 366
 367static ssize_t
 368do_lo_receive(struct loop_device *lo,
 369	      struct bio_vec *bvec, int bsize, loff_t pos)
 370{
 371	struct lo_read_data cookie;
 372	struct splice_desc sd;
 373	struct file *file;
 374	ssize_t retval;
 375
 376	cookie.lo = lo;
 377	cookie.page = bvec->bv_page;
 378	cookie.offset = bvec->bv_offset;
 379	cookie.bsize = bsize;
 380
 381	sd.len = 0;
 382	sd.total_len = bvec->bv_len;
 383	sd.flags = 0;
 384	sd.pos = pos;
 385	sd.u.data = &cookie;
 386
 387	file = lo->lo_backing_file;
 388	retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
 389
 390	return retval;
 391}
 392
 393static int
 394lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
 395{
 396	struct bio_vec bvec;
 397	struct bvec_iter iter;
 398	ssize_t s;
 399
 400	bio_for_each_segment(bvec, bio, iter) {
 401		s = do_lo_receive(lo, &bvec, bsize, pos);
 402		if (s < 0)
 403			return s;
 404
 405		if (s != bvec.bv_len) {
 406			zero_fill_bio(bio);
 407			break;
 408		}
 409		pos += bvec.bv_len;
 410	}
 411	return 0;
 412}
 413
 414static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
 415{
 416	loff_t pos;
 417	int ret;
 418
 419	pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
 
 
 
 
 
 420
 421	if (bio_rw(bio) == WRITE) {
 422		struct file *file = lo->lo_backing_file;
 
 
 
 
 
 
 
 
 
 423
 424		if (bio->bi_rw & REQ_FLUSH) {
 425			ret = vfs_fsync(file, 0);
 426			if (unlikely(ret && ret != -EINVAL)) {
 427				ret = -EIO;
 428				goto out;
 429			}
 430		}
 
 
 
 
 
 431
 432		/*
 433		 * We use punch hole to reclaim the free space used by the
 434		 * image a.k.a. discard. However we do not support discard if
 435		 * encryption is enabled, because it may give an attacker
 436		 * useful information.
 437		 */
 438		if (bio->bi_rw & REQ_DISCARD) {
 439			struct file *file = lo->lo_backing_file;
 440			int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
 441
 442			if ((!file->f_op->fallocate) ||
 443			    lo->lo_encrypt_key_size) {
 444				ret = -EOPNOTSUPP;
 445				goto out;
 446			}
 447			ret = file->f_op->fallocate(file, mode, pos,
 448						    bio->bi_iter.bi_size);
 449			if (unlikely(ret && ret != -EINVAL &&
 450				     ret != -EOPNOTSUPP))
 451				ret = -EIO;
 452			goto out;
 453		}
 454
 455		ret = lo_send(lo, bio, pos);
 456
 457		if ((bio->bi_rw & REQ_FUA) && !ret) {
 458			ret = vfs_fsync(file, 0);
 459			if (unlikely(ret && ret != -EINVAL))
 460				ret = -EIO;
 461		}
 462	} else
 463		ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
 464
 465out:
 466	return ret;
 467}
 468
 469/*
 470 * Add bio to back of pending list
 471 */
 472static void loop_add_bio(struct loop_device *lo, struct bio *bio)
 473{
 474	lo->lo_bio_count++;
 475	bio_list_add(&lo->lo_bio_list, bio);
 476}
 477
 478/*
 479 * Grab first pending buffer
 480 */
 481static struct bio *loop_get_bio(struct loop_device *lo)
 482{
 483	lo->lo_bio_count--;
 484	return bio_list_pop(&lo->lo_bio_list);
 485}
 486
 487static void loop_make_request(struct request_queue *q, struct bio *old_bio)
 
 488{
 489	struct loop_device *lo = q->queuedata;
 490	int rw = bio_rw(old_bio);
 
 
 
 
 
 
 
 
 491
 492	if (rw == READA)
 493		rw = READ;
 494
 495	BUG_ON(!lo || (rw != READ && rw != WRITE));
 496
 497	spin_lock_irq(&lo->lo_lock);
 498	if (lo->lo_state != Lo_bound)
 499		goto out;
 500	if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY)))
 501		goto out;
 502	if (lo->lo_bio_count >= q->nr_congestion_on)
 503		wait_event_lock_irq(lo->lo_req_wait,
 504				    lo->lo_bio_count < q->nr_congestion_off,
 505				    lo->lo_lock);
 506	loop_add_bio(lo, old_bio);
 507	wake_up(&lo->lo_event);
 508	spin_unlock_irq(&lo->lo_lock);
 509	return;
 510
 511out:
 512	spin_unlock_irq(&lo->lo_lock);
 513	bio_io_error(old_bio);
 514}
 515
 516struct switch_request {
 517	struct file *file;
 518	struct completion wait;
 519};
 520
 521static void do_loop_switch(struct loop_device *, struct switch_request *);
 522
 523static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
 524{
 525	if (unlikely(!bio->bi_bdev)) {
 526		do_loop_switch(lo, bio->bi_private);
 527		bio_put(bio);
 528	} else {
 529		int ret = do_bio_filebacked(lo, bio);
 530		bio_endio(bio, ret);
 
 
 
 
 
 531	}
 532}
 
 
 
 533
 534/*
 535 * worker thread that handles reads/writes to file backed loop devices,
 536 * to avoid blocking in our make_request_fn. it also does loop decrypting
 537 * on reads for block backed loop, as that is too heavy to do from
 538 * b_end_io context where irqs may be disabled.
 539 *
 540 * Loop explanation:  loop_clr_fd() sets lo_state to Lo_rundown before
 541 * calling kthread_stop().  Therefore once kthread_should_stop() is
 542 * true, make_request will not place any more requests.  Therefore
 543 * once kthread_should_stop() is true and lo_bio is NULL, we are
 544 * done with the loop.
 545 */
 546static int loop_thread(void *data)
 547{
 548	struct loop_device *lo = data;
 549	struct bio *bio;
 550
 551	set_user_nice(current, -20);
 
 
 
 552
 553	while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
 554
 555		wait_event_interruptible(lo->lo_event,
 556				!bio_list_empty(&lo->lo_bio_list) ||
 557				kthread_should_stop());
 
 558
 559		if (bio_list_empty(&lo->lo_bio_list))
 560			continue;
 561		spin_lock_irq(&lo->lo_lock);
 562		bio = loop_get_bio(lo);
 563		if (lo->lo_bio_count < lo->lo_queue->nr_congestion_off)
 564			wake_up(&lo->lo_req_wait);
 565		spin_unlock_irq(&lo->lo_lock);
 566
 567		BUG_ON(!bio);
 568		loop_handle_bio(lo, bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 569	}
 
 570
 571	return 0;
 
 
 
 572}
 573
 574/*
 575 * loop_switch performs the hard work of switching a backing store.
 576 * First it needs to flush existing IO, it does this by sending a magic
 577 * BIO down the pipe. The completion of this BIO does the actual switch.
 578 */
 579static int loop_switch(struct loop_device *lo, struct file *file)
 580{
 581	struct switch_request w;
 582	struct bio *bio = bio_alloc(GFP_KERNEL, 0);
 583	if (!bio)
 584		return -ENOMEM;
 585	init_completion(&w.wait);
 586	w.file = file;
 587	bio->bi_private = &w;
 588	bio->bi_bdev = NULL;
 589	loop_make_request(lo->lo_queue, bio);
 590	wait_for_completion(&w.wait);
 591	return 0;
 592}
 593
 594/*
 595 * Helper to flush the IOs in loop, but keeping loop thread running
 596 */
 597static int loop_flush(struct loop_device *lo)
 598{
 599	/* loop not yet configured, no running thread, nothing to flush */
 600	if (!lo->lo_thread)
 601		return 0;
 602
 603	return loop_switch(lo, NULL);
 604}
 605
 606/*
 607 * Do the actual switch; called from the BIO completion routine
 608 */
 609static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
 610{
 611	struct file *file = p->file;
 612	struct file *old_file = lo->lo_backing_file;
 613	struct address_space *mapping;
 
 
 
 614
 615	/* if no new file, only flush of queued bios requested */
 616	if (!file)
 617		goto out;
 618
 619	mapping = file->f_mapping;
 620	mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
 621	lo->lo_backing_file = file;
 622	lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
 623		mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
 624	lo->old_gfp_mask = mapping_gfp_mask(mapping);
 625	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
 626out:
 627	complete(&p->wait);
 
 628}
 629
 630
 631/*
 632 * loop_change_fd switched the backing store of a loopback device to
 633 * a new file. This is useful for operating system installers to free up
 634 * the original file and in High Availability environments to switch to
 635 * an alternative location for the content in case of server meltdown.
 636 * This can only work if the loop device is used read-only, and if the
 637 * new backing store is the same size and type as the old backing store.
 638 */
 639static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
 640			  unsigned int arg)
 641{
 642	struct file	*file, *old_file;
 643	struct inode	*inode;
 644	int		error;
 
 
 
 
 
 
 
 
 645
 
 
 
 
 646	error = -ENXIO;
 647	if (lo->lo_state != Lo_bound)
 648		goto out;
 649
 650	/* the loop device has to be read-only */
 651	error = -EINVAL;
 652	if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
 653		goto out;
 654
 655	error = -EBADF;
 656	file = fget(arg);
 657	if (!file)
 658		goto out;
 659
 660	inode = file->f_mapping->host;
 661	old_file = lo->lo_backing_file;
 662
 663	error = -EINVAL;
 664
 665	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
 666		goto out_putf;
 667
 668	/* size of the new backing store needs to be the same */
 669	if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
 670		goto out_putf;
 671
 672	/* and ... switch */
 673	error = loop_switch(lo, file);
 674	if (error)
 675		goto out_putf;
 
 
 
 
 
 
 
 
 676
 
 
 
 
 
 
 
 
 
 
 
 
 
 677	fput(old_file);
 678	if (lo->lo_flags & LO_FLAGS_PARTSCAN)
 679		ioctl_by_bdev(bdev, BLKRRPART, 0);
 680	return 0;
 681
 682 out_putf:
 683	fput(file);
 684 out:
 
 685	return error;
 686}
 687
 688static inline int is_loop_device(struct file *file)
 689{
 690	struct inode *i = file->f_mapping->host;
 691
 692	return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
 693}
 694
 695/* loop sysfs attributes */
 696
 697static ssize_t loop_attr_show(struct device *dev, char *page,
 698			      ssize_t (*callback)(struct loop_device *, char *))
 699{
 700	struct gendisk *disk = dev_to_disk(dev);
 701	struct loop_device *lo = disk->private_data;
 702
 703	return callback(lo, page);
 704}
 705
 706#define LOOP_ATTR_RO(_name)						\
 707static ssize_t loop_attr_##_name##_show(struct loop_device *, char *);	\
 708static ssize_t loop_attr_do_show_##_name(struct device *d,		\
 709				struct device_attribute *attr, char *b)	\
 710{									\
 711	return loop_attr_show(d, b, loop_attr_##_name##_show);		\
 712}									\
 713static struct device_attribute loop_attr_##_name =			\
 714	__ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL);
 715
 716static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
 717{
 718	ssize_t ret;
 719	char *p = NULL;
 720
 721	spin_lock_irq(&lo->lo_lock);
 722	if (lo->lo_backing_file)
 723		p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1);
 724	spin_unlock_irq(&lo->lo_lock);
 725
 726	if (IS_ERR_OR_NULL(p))
 727		ret = PTR_ERR(p);
 728	else {
 729		ret = strlen(p);
 730		memmove(buf, p, ret);
 731		buf[ret++] = '\n';
 732		buf[ret] = 0;
 733	}
 734
 735	return ret;
 736}
 737
 738static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
 739{
 740	return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
 741}
 742
 743static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
 744{
 745	return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
 746}
 747
 748static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
 749{
 750	int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
 751
 752	return sprintf(buf, "%s\n", autoclear ? "1" : "0");
 753}
 754
 755static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
 756{
 757	int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
 758
 759	return sprintf(buf, "%s\n", partscan ? "1" : "0");
 
 
 
 
 
 
 
 760}
 761
 762LOOP_ATTR_RO(backing_file);
 763LOOP_ATTR_RO(offset);
 764LOOP_ATTR_RO(sizelimit);
 765LOOP_ATTR_RO(autoclear);
 766LOOP_ATTR_RO(partscan);
 
 767
 768static struct attribute *loop_attrs[] = {
 769	&loop_attr_backing_file.attr,
 770	&loop_attr_offset.attr,
 771	&loop_attr_sizelimit.attr,
 772	&loop_attr_autoclear.attr,
 773	&loop_attr_partscan.attr,
 
 774	NULL,
 775};
 776
 777static struct attribute_group loop_attribute_group = {
 778	.name = "loop",
 779	.attrs= loop_attrs,
 780};
 781
 782static int loop_sysfs_init(struct loop_device *lo)
 783{
 784	return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
 785				  &loop_attribute_group);
 786}
 787
 788static void loop_sysfs_exit(struct loop_device *lo)
 789{
 790	sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
 791			   &loop_attribute_group);
 
 792}
 793
 794static void loop_config_discard(struct loop_device *lo)
 
 795{
 796	struct file *file = lo->lo_backing_file;
 797	struct inode *inode = file->f_mapping->host;
 798	struct request_queue *q = lo->lo_queue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 799
 800	/*
 801	 * We use punch hole to reclaim the free space used by the
 802	 * image a.k.a. discard. However we do not support discard if
 803	 * encryption is enabled, because it may give an attacker
 804	 * useful information.
 805	 */
 806	if ((!file->f_op->fallocate) ||
 807	    lo->lo_encrypt_key_size) {
 808		q->limits.discard_granularity = 0;
 809		q->limits.discard_alignment = 0;
 810		q->limits.max_discard_sectors = 0;
 811		q->limits.discard_zeroes_data = 0;
 812		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
 813		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 814	}
 815
 816	q->limits.discard_granularity = inode->i_sb->s_blocksize;
 817	q->limits.discard_alignment = 0;
 818	q->limits.max_discard_sectors = UINT_MAX >> 9;
 819	q->limits.discard_zeroes_data = 1;
 820	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 821}
 822
 823static int loop_set_fd(struct loop_device *lo, fmode_t mode,
 824		       struct block_device *bdev, unsigned int arg)
 
 825{
 826	struct file	*file, *f;
 827	struct inode	*inode;
 828	struct address_space *mapping;
 829	unsigned lo_blocksize;
 830	int		lo_flags = 0;
 831	int		error;
 832	loff_t		size;
 
 
 
 
 
 833
 834	/* This is safe, since we have a reference from open(). */
 835	__module_get(THIS_MODULE);
 836
 837	error = -EBADF;
 838	file = fget(arg);
 839	if (!file)
 840		goto out;
 
 
 
 
 
 
 
 
 
 841
 842	error = -EBUSY;
 843	if (lo->lo_state != Lo_unbound)
 844		goto out_putf;
 845
 846	/* Avoid recursion */
 847	f = file;
 848	while (is_loop_device(f)) {
 849		struct loop_device *l;
 850
 851		if (f->f_mapping->host->i_bdev == bdev)
 852			goto out_putf;
 853
 854		l = f->f_mapping->host->i_bdev->bd_disk->private_data;
 855		if (l->lo_state == Lo_unbound) {
 856			error = -EINVAL;
 857			goto out_putf;
 858		}
 859		f = l->lo_backing_file;
 860	}
 861
 862	mapping = file->f_mapping;
 863	inode = mapping->host;
 864
 865	error = -EINVAL;
 866	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
 867		goto out_putf;
 
 868
 869	if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
 870	    !file->f_op->write)
 871		lo_flags |= LO_FLAGS_READ_ONLY;
 
 
 872
 873	lo_blocksize = S_ISBLK(inode->i_mode) ?
 874		inode->i_bdev->bd_block_size : PAGE_SIZE;
 
 875
 876	error = -EFBIG;
 877	size = get_loop_size(lo, file);
 878	if ((loff_t)(sector_t)size != size)
 879		goto out_putf;
 
 
 
 
 
 
 
 
 
 880
 881	error = 0;
 
 882
 883	set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
 
 884
 885	lo->lo_blocksize = lo_blocksize;
 886	lo->lo_device = bdev;
 887	lo->lo_flags = lo_flags;
 888	lo->lo_backing_file = file;
 889	lo->transfer = transfer_none;
 890	lo->ioctl = NULL;
 891	lo->lo_sizelimit = 0;
 892	lo->lo_bio_count = 0;
 893	lo->old_gfp_mask = mapping_gfp_mask(mapping);
 894	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
 895
 896	bio_list_init(&lo->lo_bio_list);
 
 
 
 
 
 
 
 
 
 897
 898	if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
 899		blk_queue_flush(lo->lo_queue, REQ_FLUSH);
 
 900
 901	set_capacity(lo->lo_disk, size);
 902	bd_set_size(bdev, size << 9);
 903	loop_sysfs_init(lo);
 904	/* let user-space know about the new size */
 905	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
 906
 907	set_blocksize(bdev, lo_blocksize);
 
 
 
 
 908
 909	lo->lo_thread = kthread_create(loop_thread, lo, "loop%d",
 910						lo->lo_number);
 911	if (IS_ERR(lo->lo_thread)) {
 912		error = PTR_ERR(lo->lo_thread);
 913		goto out_clr;
 914	}
 915	lo->lo_state = Lo_bound;
 916	wake_up_process(lo->lo_thread);
 917	if (part_shift)
 918		lo->lo_flags |= LO_FLAGS_PARTSCAN;
 919	if (lo->lo_flags & LO_FLAGS_PARTSCAN)
 920		ioctl_by_bdev(bdev, BLKRRPART, 0);
 
 
 
 
 
 
 
 
 
 
 
 921
 922	/* Grab the block_device to prevent its destruction after we
 923	 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
 924	 */
 925	bdgrab(bdev);
 926	return 0;
 927
 928out_clr:
 929	loop_sysfs_exit(lo);
 930	lo->lo_thread = NULL;
 931	lo->lo_device = NULL;
 932	lo->lo_backing_file = NULL;
 933	lo->lo_flags = 0;
 934	set_capacity(lo->lo_disk, 0);
 935	invalidate_bdev(bdev);
 936	bd_set_size(bdev, 0);
 937	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
 938	mapping_set_gfp_mask(mapping, lo->old_gfp_mask);
 939	lo->lo_state = Lo_unbound;
 940 out_putf:
 941	fput(file);
 942 out:
 943	/* This is safe: open() is still holding a reference. */
 944	module_put(THIS_MODULE);
 945	return error;
 946}
 947
 948static int
 949loop_release_xfer(struct loop_device *lo)
 950{
 951	int err = 0;
 952	struct loop_func_table *xfer = lo->lo_encryption;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 953
 954	if (xfer) {
 955		if (xfer->release)
 956			err = xfer->release(lo);
 957		lo->transfer = NULL;
 958		lo->lo_encryption = NULL;
 959		module_put(xfer->owner);
 960	}
 961	return err;
 962}
 
 
 
 
 
 963
 964static int
 965loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
 966	       const struct loop_info64 *i)
 967{
 968	int err = 0;
 969
 970	if (xfer) {
 971		struct module *owner = xfer->owner;
 972
 973		if (!try_module_get(owner))
 974			return -EINVAL;
 975		if (xfer->init)
 976			err = xfer->init(lo, i);
 
 
 
 
 
 
 
 
 
 977		if (err)
 978			module_put(owner);
 979		else
 980			lo->lo_encryption = xfer;
 981	}
 982	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 983}
 984
 985static int loop_clr_fd(struct loop_device *lo)
 986{
 987	struct file *filp = lo->lo_backing_file;
 988	gfp_t gfp = lo->old_gfp_mask;
 989	struct block_device *bdev = lo->lo_device;
 990
 991	if (lo->lo_state != Lo_bound)
 
 
 
 
 
 
 
 
 
 
 
 
 
 992		return -ENXIO;
 993
 994	/*
 995	 * If we've explicitly asked to tear down the loop device,
 996	 * and it has an elevated reference count, set it for auto-teardown when
 997	 * the last reference goes away. This stops $!~#$@ udev from
 998	 * preventing teardown because it decided that it needs to run blkid on
 999	 * the loopback device whenever they appear. xfstests is notorious for
1000	 * failing tests because blkid via udev races with a losetup
1001	 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
1002	 * command to fail with EBUSY.
1003	 */
1004	if (lo->lo_refcnt > 1) {
1005		lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
1006		mutex_unlock(&lo->lo_ctl_mutex);
1007		return 0;
1008	}
1009
1010	if (filp == NULL)
1011		return -EINVAL;
1012
1013	spin_lock_irq(&lo->lo_lock);
1014	lo->lo_state = Lo_rundown;
1015	spin_unlock_irq(&lo->lo_lock);
1016
1017	kthread_stop(lo->lo_thread);
1018
1019	spin_lock_irq(&lo->lo_lock);
1020	lo->lo_backing_file = NULL;
1021	spin_unlock_irq(&lo->lo_lock);
1022
1023	loop_release_xfer(lo);
1024	lo->transfer = NULL;
1025	lo->ioctl = NULL;
1026	lo->lo_device = NULL;
1027	lo->lo_encryption = NULL;
1028	lo->lo_offset = 0;
1029	lo->lo_sizelimit = 0;
1030	lo->lo_encrypt_key_size = 0;
1031	lo->lo_thread = NULL;
1032	memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
1033	memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
1034	memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1035	if (bdev) {
1036		bdput(bdev);
1037		invalidate_bdev(bdev);
1038	}
1039	set_capacity(lo->lo_disk, 0);
1040	loop_sysfs_exit(lo);
1041	if (bdev) {
1042		bd_set_size(bdev, 0);
1043		/* let user-space know about this change */
1044		kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1045	}
1046	mapping_set_gfp_mask(filp->f_mapping, gfp);
1047	lo->lo_state = Lo_unbound;
1048	/* This is safe: open() is still holding a reference. */
1049	module_put(THIS_MODULE);
1050	if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
1051		ioctl_by_bdev(bdev, BLKRRPART, 0);
1052	lo->lo_flags = 0;
1053	if (!part_shift)
1054		lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1055	mutex_unlock(&lo->lo_ctl_mutex);
1056	/*
1057	 * Need not hold lo_ctl_mutex to fput backing file.
1058	 * Calling fput holding lo_ctl_mutex triggers a circular
1059	 * lock dependency possibility warning as fput can take
1060	 * bd_mutex which is usually taken before lo_ctl_mutex.
1061	 */
1062	fput(filp);
1063	return 0;
1064}
1065
1066static int
1067loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1068{
1069	int err;
1070	struct loop_func_table *xfer;
1071	kuid_t uid = current_uid();
1072
1073	if (lo->lo_encrypt_key_size &&
1074	    !uid_eq(lo->lo_key_owner, uid) &&
1075	    !capable(CAP_SYS_ADMIN))
1076		return -EPERM;
1077	if (lo->lo_state != Lo_bound)
1078		return -ENXIO;
1079	if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
1080		return -EINVAL;
1081
1082	err = loop_release_xfer(lo);
1083	if (err)
1084		return err;
1085
1086	if (info->lo_encrypt_type) {
1087		unsigned int type = info->lo_encrypt_type;
1088
1089		if (type >= MAX_LO_CRYPT)
1090			return -EINVAL;
1091		xfer = xfer_funcs[type];
1092		if (xfer == NULL)
1093			return -EINVAL;
1094	} else
1095		xfer = NULL;
1096
1097	err = loop_init_xfer(lo, xfer, info);
1098	if (err)
1099		return err;
 
 
 
 
1100
1101	if (lo->lo_offset != info->lo_offset ||
1102	    lo->lo_sizelimit != info->lo_sizelimit)
1103		if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit))
1104			return -EFBIG;
 
 
1105
1106	loop_config_discard(lo);
 
1107
1108	memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
1109	memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
1110	lo->lo_file_name[LO_NAME_SIZE-1] = 0;
1111	lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
1112
1113	if (!xfer)
1114		xfer = &none_funcs;
1115	lo->transfer = xfer->transfer;
1116	lo->ioctl = xfer->ioctl;
1117
1118	if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) !=
1119	     (info->lo_flags & LO_FLAGS_AUTOCLEAR))
1120		lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
1121
1122	if ((info->lo_flags & LO_FLAGS_PARTSCAN) &&
1123	     !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
1124		lo->lo_flags |= LO_FLAGS_PARTSCAN;
1125		lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
1126		ioctl_by_bdev(lo->lo_device, BLKRRPART, 0);
1127	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1128
1129	lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
1130	lo->lo_init[0] = info->lo_init[0];
1131	lo->lo_init[1] = info->lo_init[1];
1132	if (info->lo_encrypt_key_size) {
1133		memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
1134		       info->lo_encrypt_key_size);
1135		lo->lo_key_owner = uid;
1136	}	
1137
1138	return 0;
1139}
1140
1141static int
1142loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1143{
1144	struct file *file = lo->lo_backing_file;
1145	struct kstat stat;
1146	int error;
1147
1148	if (lo->lo_state != Lo_bound)
 
 
 
 
1149		return -ENXIO;
1150	error = vfs_getattr(&file->f_path, &stat);
1151	if (error)
1152		return error;
1153	memset(info, 0, sizeof(*info));
1154	info->lo_number = lo->lo_number;
1155	info->lo_device = huge_encode_dev(stat.dev);
1156	info->lo_inode = stat.ino;
1157	info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
1158	info->lo_offset = lo->lo_offset;
1159	info->lo_sizelimit = lo->lo_sizelimit;
1160	info->lo_flags = lo->lo_flags;
1161	memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1162	memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1163	info->lo_encrypt_type =
1164		lo->lo_encryption ? lo->lo_encryption->number : 0;
1165	if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1166		info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1167		memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1168		       lo->lo_encrypt_key_size);
 
 
 
1169	}
1170	return 0;
 
1171}
1172
1173static void
1174loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1175{
1176	memset(info64, 0, sizeof(*info64));
1177	info64->lo_number = info->lo_number;
1178	info64->lo_device = info->lo_device;
1179	info64->lo_inode = info->lo_inode;
1180	info64->lo_rdevice = info->lo_rdevice;
1181	info64->lo_offset = info->lo_offset;
1182	info64->lo_sizelimit = 0;
1183	info64->lo_encrypt_type = info->lo_encrypt_type;
1184	info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1185	info64->lo_flags = info->lo_flags;
1186	info64->lo_init[0] = info->lo_init[0];
1187	info64->lo_init[1] = info->lo_init[1];
1188	if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1189		memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1190	else
1191		memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1192	memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1193}
1194
1195static int
1196loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1197{
1198	memset(info, 0, sizeof(*info));
1199	info->lo_number = info64->lo_number;
1200	info->lo_device = info64->lo_device;
1201	info->lo_inode = info64->lo_inode;
1202	info->lo_rdevice = info64->lo_rdevice;
1203	info->lo_offset = info64->lo_offset;
1204	info->lo_encrypt_type = info64->lo_encrypt_type;
1205	info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1206	info->lo_flags = info64->lo_flags;
1207	info->lo_init[0] = info64->lo_init[0];
1208	info->lo_init[1] = info64->lo_init[1];
1209	if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1210		memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1211	else
1212		memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1213	memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1214
1215	/* error in case values were truncated */
1216	if (info->lo_device != info64->lo_device ||
1217	    info->lo_rdevice != info64->lo_rdevice ||
1218	    info->lo_inode != info64->lo_inode ||
1219	    info->lo_offset != info64->lo_offset)
1220		return -EOVERFLOW;
1221
1222	return 0;
1223}
1224
1225static int
1226loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1227{
1228	struct loop_info info;
1229	struct loop_info64 info64;
1230
1231	if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1232		return -EFAULT;
1233	loop_info64_from_old(&info, &info64);
1234	return loop_set_status(lo, &info64);
1235}
1236
1237static int
1238loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1239{
1240	struct loop_info64 info64;
1241
1242	if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1243		return -EFAULT;
1244	return loop_set_status(lo, &info64);
1245}
1246
1247static int
1248loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1249	struct loop_info info;
1250	struct loop_info64 info64;
1251	int err = 0;
1252
1253	if (!arg)
1254		err = -EINVAL;
1255	if (!err)
1256		err = loop_get_status(lo, &info64);
1257	if (!err)
1258		err = loop_info64_to_old(&info64, &info);
1259	if (!err && copy_to_user(arg, &info, sizeof(info)))
1260		err = -EFAULT;
1261
1262	return err;
1263}
1264
1265static int
1266loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1267	struct loop_info64 info64;
1268	int err = 0;
1269
1270	if (!arg)
1271		err = -EINVAL;
1272	if (!err)
1273		err = loop_get_status(lo, &info64);
1274	if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1275		err = -EFAULT;
1276
1277	return err;
1278}
1279
1280static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
1281{
 
 
1282	if (unlikely(lo->lo_state != Lo_bound))
1283		return -ENXIO;
1284
1285	return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1286}
1287
1288static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1289	unsigned int cmd, unsigned long arg)
1290{
1291	struct loop_device *lo = bdev->bd_disk->private_data;
 
1292	int err;
1293
1294	mutex_lock_nested(&lo->lo_ctl_mutex, 1);
1295	switch (cmd) {
1296	case LOOP_SET_FD:
1297		err = loop_set_fd(lo, mode, bdev, arg);
1298		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1299	case LOOP_CHANGE_FD:
1300		err = loop_change_fd(lo, bdev, arg);
1301		break;
1302	case LOOP_CLR_FD:
1303		/* loop_clr_fd would have unlocked lo_ctl_mutex on success */
1304		err = loop_clr_fd(lo);
1305		if (!err)
1306			goto out_unlocked;
1307		break;
1308	case LOOP_SET_STATUS:
1309		err = -EPERM;
1310		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1311			err = loop_set_status_old(lo,
1312					(struct loop_info __user *)arg);
1313		break;
1314	case LOOP_GET_STATUS:
1315		err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1316		break;
1317	case LOOP_SET_STATUS64:
1318		err = -EPERM;
1319		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1320			err = loop_set_status64(lo,
1321					(struct loop_info64 __user *) arg);
1322		break;
1323	case LOOP_GET_STATUS64:
1324		err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
1325		break;
1326	case LOOP_SET_CAPACITY:
1327		err = -EPERM;
1328		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1329			err = loop_set_capacity(lo, bdev);
 
 
 
 
1330		break;
1331	default:
1332		err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1333	}
1334	mutex_unlock(&lo->lo_ctl_mutex);
1335
1336out_unlocked:
1337	return err;
1338}
1339
1340#ifdef CONFIG_COMPAT
1341struct compat_loop_info {
1342	compat_int_t	lo_number;      /* ioctl r/o */
1343	compat_dev_t	lo_device;      /* ioctl r/o */
1344	compat_ulong_t	lo_inode;       /* ioctl r/o */
1345	compat_dev_t	lo_rdevice;     /* ioctl r/o */
1346	compat_int_t	lo_offset;
1347	compat_int_t	lo_encrypt_type;
1348	compat_int_t	lo_encrypt_key_size;    /* ioctl w/o */
1349	compat_int_t	lo_flags;       /* ioctl r/o */
1350	char		lo_name[LO_NAME_SIZE];
1351	unsigned char	lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1352	compat_ulong_t	lo_init[2];
1353	char		reserved[4];
1354};
1355
1356/*
1357 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1358 * - noinlined to reduce stack space usage in main part of driver
1359 */
1360static noinline int
1361loop_info64_from_compat(const struct compat_loop_info __user *arg,
1362			struct loop_info64 *info64)
1363{
1364	struct compat_loop_info info;
1365
1366	if (copy_from_user(&info, arg, sizeof(info)))
1367		return -EFAULT;
1368
1369	memset(info64, 0, sizeof(*info64));
1370	info64->lo_number = info.lo_number;
1371	info64->lo_device = info.lo_device;
1372	info64->lo_inode = info.lo_inode;
1373	info64->lo_rdevice = info.lo_rdevice;
1374	info64->lo_offset = info.lo_offset;
1375	info64->lo_sizelimit = 0;
1376	info64->lo_encrypt_type = info.lo_encrypt_type;
1377	info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
1378	info64->lo_flags = info.lo_flags;
1379	info64->lo_init[0] = info.lo_init[0];
1380	info64->lo_init[1] = info.lo_init[1];
1381	if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1382		memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
1383	else
1384		memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
1385	memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
1386	return 0;
1387}
1388
1389/*
1390 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1391 * - noinlined to reduce stack space usage in main part of driver
1392 */
1393static noinline int
1394loop_info64_to_compat(const struct loop_info64 *info64,
1395		      struct compat_loop_info __user *arg)
1396{
1397	struct compat_loop_info info;
1398
1399	memset(&info, 0, sizeof(info));
1400	info.lo_number = info64->lo_number;
1401	info.lo_device = info64->lo_device;
1402	info.lo_inode = info64->lo_inode;
1403	info.lo_rdevice = info64->lo_rdevice;
1404	info.lo_offset = info64->lo_offset;
1405	info.lo_encrypt_type = info64->lo_encrypt_type;
1406	info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
1407	info.lo_flags = info64->lo_flags;
1408	info.lo_init[0] = info64->lo_init[0];
1409	info.lo_init[1] = info64->lo_init[1];
1410	if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1411		memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1412	else
1413		memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
1414	memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1415
1416	/* error in case values were truncated */
1417	if (info.lo_device != info64->lo_device ||
1418	    info.lo_rdevice != info64->lo_rdevice ||
1419	    info.lo_inode != info64->lo_inode ||
1420	    info.lo_offset != info64->lo_offset ||
1421	    info.lo_init[0] != info64->lo_init[0] ||
1422	    info.lo_init[1] != info64->lo_init[1])
1423		return -EOVERFLOW;
1424
1425	if (copy_to_user(arg, &info, sizeof(info)))
1426		return -EFAULT;
1427	return 0;
1428}
1429
1430static int
1431loop_set_status_compat(struct loop_device *lo,
1432		       const struct compat_loop_info __user *arg)
1433{
1434	struct loop_info64 info64;
1435	int ret;
1436
1437	ret = loop_info64_from_compat(arg, &info64);
1438	if (ret < 0)
1439		return ret;
1440	return loop_set_status(lo, &info64);
1441}
1442
1443static int
1444loop_get_status_compat(struct loop_device *lo,
1445		       struct compat_loop_info __user *arg)
1446{
1447	struct loop_info64 info64;
1448	int err = 0;
1449
1450	if (!arg)
1451		err = -EINVAL;
1452	if (!err)
1453		err = loop_get_status(lo, &info64);
1454	if (!err)
1455		err = loop_info64_to_compat(&info64, arg);
1456	return err;
1457}
1458
1459static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1460			   unsigned int cmd, unsigned long arg)
1461{
1462	struct loop_device *lo = bdev->bd_disk->private_data;
1463	int err;
1464
1465	switch(cmd) {
1466	case LOOP_SET_STATUS:
1467		mutex_lock(&lo->lo_ctl_mutex);
1468		err = loop_set_status_compat(
1469			lo, (const struct compat_loop_info __user *) arg);
1470		mutex_unlock(&lo->lo_ctl_mutex);
1471		break;
1472	case LOOP_GET_STATUS:
1473		mutex_lock(&lo->lo_ctl_mutex);
1474		err = loop_get_status_compat(
1475			lo, (struct compat_loop_info __user *) arg);
1476		mutex_unlock(&lo->lo_ctl_mutex);
1477		break;
1478	case LOOP_SET_CAPACITY:
1479	case LOOP_CLR_FD:
1480	case LOOP_GET_STATUS64:
1481	case LOOP_SET_STATUS64:
 
1482		arg = (unsigned long) compat_ptr(arg);
 
1483	case LOOP_SET_FD:
1484	case LOOP_CHANGE_FD:
 
 
1485		err = lo_ioctl(bdev, mode, cmd, arg);
1486		break;
1487	default:
1488		err = -ENOIOCTLCMD;
1489		break;
1490	}
1491	return err;
1492}
1493#endif
1494
1495static int lo_open(struct block_device *bdev, fmode_t mode)
1496{
1497	struct loop_device *lo;
1498	int err = 0;
1499
1500	mutex_lock(&loop_index_mutex);
1501	lo = bdev->bd_disk->private_data;
1502	if (!lo) {
1503		err = -ENXIO;
1504		goto out;
1505	}
1506
1507	mutex_lock(&lo->lo_ctl_mutex);
1508	lo->lo_refcnt++;
1509	mutex_unlock(&lo->lo_ctl_mutex);
1510out:
1511	mutex_unlock(&loop_index_mutex);
1512	return err;
1513}
1514
1515static void lo_release(struct gendisk *disk, fmode_t mode)
1516{
1517	struct loop_device *lo = disk->private_data;
1518	int err;
1519
1520	mutex_lock(&lo->lo_ctl_mutex);
 
1521
1522	if (--lo->lo_refcnt)
1523		goto out;
1524
1525	if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1526		/*
1527		 * In autoclear mode, stop the loop thread
1528		 * and remove configuration after last close.
1529		 */
1530		err = loop_clr_fd(lo);
1531		if (!err)
1532			return;
1533	} else {
1534		/*
1535		 * Otherwise keep thread (if running) and config,
1536		 * but flush possible ongoing bios in thread.
1537		 */
1538		loop_flush(lo);
1539	}
 
 
 
 
 
 
1540
1541out:
1542	mutex_unlock(&lo->lo_ctl_mutex);
 
 
 
 
1543}
1544
1545static const struct block_device_operations lo_fops = {
1546	.owner =	THIS_MODULE,
1547	.open =		lo_open,
1548	.release =	lo_release,
1549	.ioctl =	lo_ioctl,
1550#ifdef CONFIG_COMPAT
1551	.compat_ioctl =	lo_compat_ioctl,
1552#endif
 
1553};
1554
1555/*
1556 * And now the modules code and kernel interface.
1557 */
1558static int max_loop;
1559module_param(max_loop, int, S_IRUGO);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1560MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1561module_param(max_part, int, S_IRUGO);
 
 
 
 
 
1562MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1563MODULE_LICENSE("GPL");
1564MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1565
1566int loop_register_transfer(struct loop_func_table *funcs)
 
1567{
1568	unsigned int n = funcs->number;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1569
1570	if (n >= MAX_LO_CRYPT || xfer_funcs[n])
1571		return -EINVAL;
1572	xfer_funcs[n] = funcs;
1573	return 0;
1574}
1575
1576static int unregister_transfer_cb(int id, void *ptr, void *data)
1577{
1578	struct loop_device *lo = ptr;
1579	struct loop_func_table *xfer = data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1580
1581	mutex_lock(&lo->lo_ctl_mutex);
1582	if (lo->lo_encryption == xfer)
1583		loop_release_xfer(lo);
1584	mutex_unlock(&lo->lo_ctl_mutex);
1585	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
1586}
1587
1588int loop_unregister_transfer(int number)
 
1589{
1590	unsigned int n = number;
1591	struct loop_func_table *xfer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1592
1593	if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1594		return -EINVAL;
 
 
 
 
1595
1596	xfer_funcs[n] = NULL;
1597	idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
1598	return 0;
 
 
1599}
1600
1601EXPORT_SYMBOL(loop_register_transfer);
1602EXPORT_SYMBOL(loop_unregister_transfer);
 
 
1603
1604static int loop_add(struct loop_device **l, int i)
1605{
 
 
 
 
 
 
1606	struct loop_device *lo;
1607	struct gendisk *disk;
1608	int err;
1609
1610	err = -ENOMEM;
1611	lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1612	if (!lo)
1613		goto out;
 
 
 
 
1614
1615	lo->lo_state = Lo_unbound;
 
 
1616
1617	/* allocate id, if @id >= 0, we're requesting that specific id */
1618	if (i >= 0) {
1619		err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
1620		if (err == -ENOSPC)
1621			err = -EEXIST;
1622	} else {
1623		err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
1624	}
 
1625	if (err < 0)
1626		goto out_free_dev;
1627	i = err;
1628
1629	err = -ENOMEM;
1630	lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1631	if (!lo->lo_queue)
 
 
 
 
 
 
 
 
1632		goto out_free_idr;
1633
 
 
 
 
 
 
 
1634	/*
1635	 * set queue make_request_fn
 
 
 
1636	 */
1637	blk_queue_make_request(lo->lo_queue, loop_make_request);
1638	lo->lo_queue->queuedata = lo;
1639
1640	disk = lo->lo_disk = alloc_disk(1 << part_shift);
1641	if (!disk)
1642		goto out_free_queue;
1643
1644	/*
1645	 * Disable partition scanning by default. The in-kernel partition
1646	 * scanning can be requested individually per-device during its
1647	 * setup. Userspace can always add and remove partitions from all
1648	 * devices. The needed partition minors are allocated from the
1649	 * extended minor space, the main loop device numbers will continue
1650	 * to match the loop minors, regardless of the number of partitions
1651	 * used.
1652	 *
1653	 * If max_part is given, partition scanning is globally enabled for
1654	 * all loop devices. The minors for the main loop devices will be
1655	 * multiples of max_part.
1656	 *
1657	 * Note: Global-for-all-devices, set-only-at-init, read-only module
1658	 * parameteters like 'max_loop' and 'max_part' make things needlessly
1659	 * complicated, are too static, inflexible and may surprise
1660	 * userspace tools. Parameters like this in general should be avoided.
1661	 */
1662	if (!part_shift)
1663		disk->flags |= GENHD_FL_NO_PART_SCAN;
1664	disk->flags |= GENHD_FL_EXT_DEVT;
1665	mutex_init(&lo->lo_ctl_mutex);
1666	lo->lo_number		= i;
1667	lo->lo_thread		= NULL;
1668	init_waitqueue_head(&lo->lo_event);
1669	init_waitqueue_head(&lo->lo_req_wait);
1670	spin_lock_init(&lo->lo_lock);
 
 
 
1671	disk->major		= LOOP_MAJOR;
1672	disk->first_minor	= i << part_shift;
 
1673	disk->fops		= &lo_fops;
1674	disk->private_data	= lo;
1675	disk->queue		= lo->lo_queue;
 
 
1676	sprintf(disk->disk_name, "loop%d", i);
1677	add_disk(disk);
1678	*l = lo;
1679	return lo->lo_number;
 
1680
1681out_free_queue:
1682	blk_cleanup_queue(lo->lo_queue);
 
 
 
 
 
 
 
 
 
1683out_free_idr:
 
1684	idr_remove(&loop_index_idr, i);
 
1685out_free_dev:
1686	kfree(lo);
1687out:
1688	return err;
1689}
1690
1691static void loop_remove(struct loop_device *lo)
1692{
 
1693	del_gendisk(lo->lo_disk);
1694	blk_cleanup_queue(lo->lo_queue);
 
 
 
 
 
1695	put_disk(lo->lo_disk);
1696	kfree(lo);
1697}
1698
1699static int find_free_cb(int id, void *ptr, void *data)
 
1700{
1701	struct loop_device *lo = ptr;
1702	struct loop_device **l = data;
1703
1704	if (lo->lo_state == Lo_unbound) {
1705		*l = lo;
1706		return 1;
1707	}
1708	return 0;
1709}
 
 
 
1710
1711static int loop_lookup(struct loop_device **l, int i)
1712{
1713	struct loop_device *lo;
1714	int ret = -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1715
1716	if (i < 0) {
1717		int err;
 
 
 
 
 
 
 
 
 
 
1718
1719		err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
1720		if (err == 1) {
1721			*l = lo;
1722			ret = lo->lo_number;
1723		}
1724		goto out;
1725	}
1726
1727	/* lookup and return a specific i */
1728	lo = idr_find(&loop_index_idr, i);
1729	if (lo) {
1730		*l = lo;
1731		ret = lo->lo_number;
1732	}
1733out:
1734	return ret;
1735}
1736
1737static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1738{
1739	struct loop_device *lo;
1740	struct kobject *kobj;
1741	int err;
1742
1743	mutex_lock(&loop_index_mutex);
1744	err = loop_lookup(&lo, MINOR(dev) >> part_shift);
1745	if (err < 0)
1746		err = loop_add(&lo, MINOR(dev) >> part_shift);
1747	if (err < 0)
1748		kobj = NULL;
1749	else
1750		kobj = get_disk(lo->lo_disk);
1751	mutex_unlock(&loop_index_mutex);
1752
1753	*part = 0;
1754	return kobj;
 
1755}
1756
1757static long loop_control_ioctl(struct file *file, unsigned int cmd,
1758			       unsigned long parm)
1759{
1760	struct loop_device *lo;
1761	int ret = -ENOSYS;
1762
1763	mutex_lock(&loop_index_mutex);
1764	switch (cmd) {
1765	case LOOP_CTL_ADD:
1766		ret = loop_lookup(&lo, parm);
1767		if (ret >= 0) {
1768			ret = -EEXIST;
1769			break;
1770		}
1771		ret = loop_add(&lo, parm);
1772		break;
1773	case LOOP_CTL_REMOVE:
1774		ret = loop_lookup(&lo, parm);
1775		if (ret < 0)
1776			break;
1777		mutex_lock(&lo->lo_ctl_mutex);
1778		if (lo->lo_state != Lo_unbound) {
1779			ret = -EBUSY;
1780			mutex_unlock(&lo->lo_ctl_mutex);
1781			break;
1782		}
1783		if (lo->lo_refcnt > 0) {
1784			ret = -EBUSY;
1785			mutex_unlock(&lo->lo_ctl_mutex);
1786			break;
1787		}
1788		lo->lo_disk->private_data = NULL;
1789		mutex_unlock(&lo->lo_ctl_mutex);
1790		idr_remove(&loop_index_idr, lo->lo_number);
1791		loop_remove(lo);
1792		break;
1793	case LOOP_CTL_GET_FREE:
1794		ret = loop_lookup(&lo, -1);
1795		if (ret >= 0)
1796			break;
1797		ret = loop_add(&lo, -1);
1798	}
1799	mutex_unlock(&loop_index_mutex);
1800
1801	return ret;
1802}
1803
1804static const struct file_operations loop_ctl_fops = {
1805	.open		= nonseekable_open,
1806	.unlocked_ioctl	= loop_control_ioctl,
1807	.compat_ioctl	= loop_control_ioctl,
1808	.owner		= THIS_MODULE,
1809	.llseek		= noop_llseek,
1810};
1811
1812static struct miscdevice loop_misc = {
1813	.minor		= LOOP_CTRL_MINOR,
1814	.name		= "loop-control",
1815	.fops		= &loop_ctl_fops,
1816};
1817
1818MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
1819MODULE_ALIAS("devname:loop-control");
1820
1821static int __init loop_init(void)
1822{
1823	int i, nr;
1824	unsigned long range;
1825	struct loop_device *lo;
1826	int err;
1827
1828	err = misc_register(&loop_misc);
1829	if (err < 0)
1830		return err;
1831
1832	part_shift = 0;
1833	if (max_part > 0) {
1834		part_shift = fls(max_part);
1835
1836		/*
1837		 * Adjust max_part according to part_shift as it is exported
1838		 * to user space so that user can decide correct minor number
1839		 * if [s]he want to create more devices.
1840		 *
1841		 * Note that -1 is required because partition 0 is reserved
1842		 * for the whole disk.
1843		 */
1844		max_part = (1UL << part_shift) - 1;
1845	}
1846
1847	if ((1UL << part_shift) > DISK_MAX_PARTS) {
1848		err = -EINVAL;
1849		goto misc_out;
1850	}
1851
1852	if (max_loop > 1UL << (MINORBITS - part_shift)) {
1853		err = -EINVAL;
1854		goto misc_out;
1855	}
1856
1857	/*
1858	 * If max_loop is specified, create that many devices upfront.
1859	 * This also becomes a hard limit. If max_loop is not specified,
1860	 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
1861	 * init time. Loop devices can be requested on-demand with the
1862	 * /dev/loop-control interface, or be instantiated by accessing
1863	 * a 'dead' device node.
1864	 */
1865	if (max_loop) {
1866		nr = max_loop;
1867		range = max_loop << part_shift;
1868	} else {
1869		nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
1870		range = 1UL << MINORBITS;
1871	}
1872
1873	if (register_blkdev(LOOP_MAJOR, "loop")) {
1874		err = -EIO;
1875		goto misc_out;
1876	}
1877
1878	blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
1879				  THIS_MODULE, loop_probe, NULL, NULL);
1880
1881	/* pre-create number of devices given by config or max_loop */
1882	mutex_lock(&loop_index_mutex);
1883	for (i = 0; i < nr; i++)
1884		loop_add(&lo, i);
1885	mutex_unlock(&loop_index_mutex);
1886
1887	printk(KERN_INFO "loop: module loaded\n");
1888	return 0;
1889
1890misc_out:
1891	misc_deregister(&loop_misc);
 
1892	return err;
1893}
1894
1895static int loop_exit_cb(int id, void *ptr, void *data)
1896{
1897	struct loop_device *lo = ptr;
 
1898
1899	loop_remove(lo);
1900	return 0;
1901}
1902
1903static void __exit loop_exit(void)
1904{
1905	unsigned long range;
 
 
 
 
 
1906
1907	range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
1908
1909	idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
1910	idr_destroy(&loop_index_idr);
1911
1912	blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
1913	unregister_blkdev(LOOP_MAJOR, "loop");
1914
1915	misc_deregister(&loop_misc);
1916}
1917
1918module_init(loop_init);
1919module_exit(loop_exit);
1920
1921#ifndef MODULE
1922static int __init max_loop_setup(char *str)
1923{
1924	max_loop = simple_strtol(str, NULL, 0);
 
 
 
1925	return 1;
1926}
1927
1928__setup("max_loop=", max_loop_setup);
1929#endif