Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright 1993 by Theodore Ts'o.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
 
   5#include <linux/module.h>
   6#include <linux/moduleparam.h>
   7#include <linux/sched.h>
   8#include <linux/fs.h>
   9#include <linux/pagemap.h>
  10#include <linux/file.h>
  11#include <linux/stat.h>
  12#include <linux/errno.h>
  13#include <linux/major.h>
  14#include <linux/wait.h>
 
  15#include <linux/blkpg.h>
  16#include <linux/init.h>
  17#include <linux/swap.h>
  18#include <linux/slab.h>
 
  19#include <linux/compat.h>
  20#include <linux/suspend.h>
  21#include <linux/freezer.h>
  22#include <linux/mutex.h>
  23#include <linux/writeback.h>
 
  24#include <linux/completion.h>
  25#include <linux/highmem.h>
 
  26#include <linux/splice.h>
  27#include <linux/sysfs.h>
  28#include <linux/miscdevice.h>
  29#include <linux/falloc.h>
  30#include <linux/uio.h>
  31#include <linux/ioprio.h>
  32#include <linux/blk-cgroup.h>
  33#include <linux/sched/mm.h>
  34#include <linux/statfs.h>
  35#include <linux/uaccess.h>
  36#include <linux/blk-mq.h>
  37#include <linux/spinlock.h>
  38#include <uapi/linux/loop.h>
  39
  40/* Possible states of device */
  41enum {
  42	Lo_unbound,
  43	Lo_bound,
  44	Lo_rundown,
  45	Lo_deleting,
  46};
  47
  48struct loop_func_table;
  49
  50struct loop_device {
  51	int		lo_number;
  52	loff_t		lo_offset;
  53	loff_t		lo_sizelimit;
  54	int		lo_flags;
  55	char		lo_file_name[LO_NAME_SIZE];
  56
  57	struct file *	lo_backing_file;
  58	struct block_device *lo_device;
  59
  60	gfp_t		old_gfp_mask;
  61
  62	spinlock_t		lo_lock;
  63	int			lo_state;
  64	spinlock_t              lo_work_lock;
  65	struct workqueue_struct *workqueue;
  66	struct work_struct      rootcg_work;
  67	struct list_head        rootcg_cmd_list;
  68	struct list_head        idle_worker_list;
  69	struct rb_root          worker_tree;
  70	struct timer_list       timer;
  71	bool			use_dio;
  72	bool			sysfs_inited;
  73
  74	struct request_queue	*lo_queue;
  75	struct blk_mq_tag_set	tag_set;
  76	struct gendisk		*lo_disk;
  77	struct mutex		lo_mutex;
  78	bool			idr_visible;
  79};
  80
  81struct loop_cmd {
  82	struct list_head list_entry;
  83	bool use_aio; /* use AIO interface to handle I/O */
  84	atomic_t ref; /* only for aio */
  85	long ret;
  86	struct kiocb iocb;
  87	struct bio_vec *bvec;
  88	struct cgroup_subsys_state *blkcg_css;
  89	struct cgroup_subsys_state *memcg_css;
  90};
  91
  92#define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ)
  93#define LOOP_DEFAULT_HW_Q_DEPTH 128
  94
  95static DEFINE_IDR(loop_index_idr);
  96static DEFINE_MUTEX(loop_ctl_mutex);
  97static DEFINE_MUTEX(loop_validate_mutex);
  98
  99/**
 100 * loop_global_lock_killable() - take locks for safe loop_validate_file() test
 101 *
 102 * @lo: struct loop_device
 103 * @global: true if @lo is about to bind another "struct loop_device", false otherwise
 104 *
 105 * Returns 0 on success, -EINTR otherwise.
 106 *
 107 * Since loop_validate_file() traverses on other "struct loop_device" if
 108 * is_loop_device() is true, we need a global lock for serializing concurrent
 109 * loop_configure()/loop_change_fd()/__loop_clr_fd() calls.
 110 */
 111static int loop_global_lock_killable(struct loop_device *lo, bool global)
 
 
 
 112{
 113	int err;
 
 114
 115	if (global) {
 116		err = mutex_lock_killable(&loop_validate_mutex);
 117		if (err)
 118			return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 119	}
 120	err = mutex_lock_killable(&lo->lo_mutex);
 121	if (err && global)
 122		mutex_unlock(&loop_validate_mutex);
 123	return err;
 
 
 
 
 
 
 124}
 125
 126/**
 127 * loop_global_unlock() - release locks taken by loop_global_lock_killable()
 128 *
 129 * @lo: struct loop_device
 130 * @global: true if @lo was about to bind another "struct loop_device", false otherwise
 131 */
 132static void loop_global_unlock(struct loop_device *lo, bool global)
 133{
 134	mutex_unlock(&lo->lo_mutex);
 135	if (global)
 136		mutex_unlock(&loop_validate_mutex);
 137}
 138
 139static int max_part;
 140static int part_shift;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 141
 142static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
 143{
 144	loff_t loopsize;
 145
 146	/* Compute loopsize in bytes */
 147	loopsize = i_size_read(file->f_mapping->host);
 148	if (offset > 0)
 149		loopsize -= offset;
 150	/* offset is beyond i_size, weird but possible */
 151	if (loopsize < 0)
 152		return 0;
 153
 154	if (sizelimit > 0 && sizelimit < loopsize)
 155		loopsize = sizelimit;
 156	/*
 157	 * Unfortunately, if we want to do I/O on the device,
 158	 * the number of 512-byte sectors has to fit into a sector_t.
 159	 */
 160	return loopsize >> 9;
 161}
 162
 163static loff_t get_loop_size(struct loop_device *lo, struct file *file)
 
 164{
 165	return get_size(lo->lo_offset, lo->lo_sizelimit, file);
 166}
 167
 168/*
 169 * We support direct I/O only if lo_offset is aligned with the logical I/O size
 170 * of backing device, and the logical block size of loop is bigger than that of
 171 * the backing device.
 172 */
 173static bool lo_bdev_can_use_dio(struct loop_device *lo,
 174		struct block_device *backing_bdev)
 175{
 176	unsigned short sb_bsize = bdev_logical_block_size(backing_bdev);
 177
 178	if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
 179		return false;
 180	if (lo->lo_offset & (sb_bsize - 1))
 181		return false;
 182	return true;
 183}
 184
 185static void __loop_update_dio(struct loop_device *lo, bool dio)
 
 
 
 
 186{
 187	struct file *file = lo->lo_backing_file;
 188	struct inode *inode = file->f_mapping->host;
 189	struct block_device *backing_bdev = NULL;
 190	bool use_dio;
 191
 192	if (S_ISBLK(inode->i_mode))
 193		backing_bdev = I_BDEV(inode);
 194	else if (inode->i_sb->s_bdev)
 195		backing_bdev = inode->i_sb->s_bdev;
 196
 197	use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) &&
 198		(!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));
 199
 200	if (lo->use_dio == use_dio)
 201		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 202
 203	/* flush dirty pages before changing direct IO */
 204	vfs_fsync(file, 0);
 205
 206	/*
 207	 * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
 208	 * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
 209	 * will get updated by ioctl(LOOP_GET_STATUS)
 210	 */
 211	if (lo->lo_state == Lo_bound)
 212		blk_mq_freeze_queue(lo->lo_queue);
 213	lo->use_dio = use_dio;
 214	if (use_dio) {
 215		blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
 216		lo->lo_flags |= LO_FLAGS_DIRECT_IO;
 217	} else {
 218		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
 219		lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
 220	}
 221	if (lo->lo_state == Lo_bound)
 222		blk_mq_unfreeze_queue(lo->lo_queue);
 
 
 
 
 
 223}
 224
 225/**
 226 * loop_set_size() - sets device size and notifies userspace
 227 * @lo: struct loop_device to set the size for
 228 * @size: new size of the loop device
 229 *
 230 * Callers must validate that the size passed into this function fits into
 231 * a sector_t, eg using loop_validate_size()
 232 */
 233static void loop_set_size(struct loop_device *lo, loff_t size)
 
 234{
 235	if (!set_capacity_and_notify(lo->lo_disk, size))
 236		kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
 237}
 238
 239static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
 240{
 241	struct iov_iter i;
 242	ssize_t bw;
 
 243
 244	iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len);
 245
 246	bw = vfs_iter_write(file, &i, ppos, 0);
 247
 248	if (likely(bw ==  bvec->bv_len))
 249		return 0;
 250
 251	printk_ratelimited(KERN_ERR
 252		"loop: Write error at byte offset %llu, length %i.\n",
 253		(unsigned long long)*ppos, bvec->bv_len);
 254	if (bw >= 0)
 255		bw = -EIO;
 256	return bw;
 257}
 258
 259static int lo_write_simple(struct loop_device *lo, struct request *rq,
 260		loff_t pos)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 261{
 262	struct bio_vec bvec;
 263	struct req_iterator iter;
 264	int ret = 0;
 
 
 265
 266	rq_for_each_segment(bvec, rq, iter) {
 267		ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
 
 
 
 
 
 
 
 
 
 
 
 268		if (ret < 0)
 269			break;
 270		cond_resched();
 271	}
 272
 
 
 
 
 273	return ret;
 
 
 
 
 274}
 275
 276static int lo_read_simple(struct loop_device *lo, struct request *rq,
 277		loff_t pos)
 278{
 279	struct bio_vec bvec;
 280	struct req_iterator iter;
 281	struct iov_iter i;
 282	ssize_t len;
 283
 284	rq_for_each_segment(bvec, rq, iter) {
 285		iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len);
 286		len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
 287		if (len < 0)
 288			return len;
 289
 290		flush_dcache_page(bvec.bv_page);
 
 
 
 
 
 
 
 
 291
 292		if (len != bvec.bv_len) {
 293			struct bio *bio;
 
 
 
 294
 295			__rq_for_each_bio(bio, rq)
 296				zero_fill_bio(bio);
 297			break;
 298		}
 299		cond_resched();
 300	}
 301
 302	return 0;
 
 
 
 
 
 303}
 304
 305static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
 306			int mode)
 307{
 308	/*
 309	 * We use fallocate to manipulate the space mappings used by the image
 310	 * a.k.a. discard/zerorange.
 311	 */
 312	struct file *file = lo->lo_backing_file;
 313	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 314
 315	mode |= FALLOC_FL_KEEP_SIZE;
 
 316
 317	if (!bdev_max_discard_sectors(lo->lo_device))
 318		return -EOPNOTSUPP;
 319
 320	ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
 321	if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
 322		return -EIO;
 323	return ret;
 324}
 325
 326static int lo_req_flush(struct loop_device *lo, struct request *rq)
 
 327{
 328	int ret = vfs_fsync(lo->lo_backing_file, 0);
 329	if (unlikely(ret && ret != -EINVAL))
 330		ret = -EIO;
 331
 
 
 
 
 
 
 332	return ret;
 333}
 334
 335static void lo_complete_rq(struct request *rq)
 336{
 337	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
 338	blk_status_t ret = BLK_STS_OK;
 339
 340	if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
 341	    req_op(rq) != REQ_OP_READ) {
 342		if (cmd->ret < 0)
 343			ret = errno_to_blk_status(cmd->ret);
 344		goto end_io;
 345	}
 346
 347	/*
 348	 * Short READ - if we got some data, advance our request and
 349	 * retry it. If we got no data, end the rest with EIO.
 350	 */
 351	if (cmd->ret) {
 352		blk_update_request(rq, BLK_STS_OK, cmd->ret);
 353		cmd->ret = 0;
 354		blk_mq_requeue_request(rq, true);
 355	} else {
 356		if (cmd->use_aio) {
 357			struct bio *bio = rq->bio;
 358
 359			while (bio) {
 360				zero_fill_bio(bio);
 361				bio = bio->bi_next;
 
 
 362			}
 363		}
 364		ret = BLK_STS_IOERR;
 365end_io:
 366		blk_mq_end_request(rq, ret);
 367	}
 368}
 369
 370static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
 371{
 372	struct request *rq = blk_mq_rq_from_pdu(cmd);
 373
 374	if (!atomic_dec_and_test(&cmd->ref))
 375		return;
 376	kfree(cmd->bvec);
 377	cmd->bvec = NULL;
 378	if (likely(!blk_should_fake_timeout(rq->q)))
 379		blk_mq_complete_request(rq);
 
 
 
 
 380}
 381
 382static void lo_rw_aio_complete(struct kiocb *iocb, long ret)
 
 
 
 383{
 384	struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
 
 385
 386	cmd->ret = ret;
 387	lo_rw_aio_do_completion(cmd);
 
 
 
 
 388}
 389
 390static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
 391		     loff_t pos, int rw)
 392{
 393	struct iov_iter iter;
 394	struct req_iterator rq_iter;
 395	struct bio_vec *bvec;
 396	struct request *rq = blk_mq_rq_from_pdu(cmd);
 397	struct bio *bio = rq->bio;
 398	struct file *file = lo->lo_backing_file;
 399	struct bio_vec tmp;
 400	unsigned int offset;
 401	int nr_bvec = 0;
 402	int ret;
 403
 404	rq_for_each_bvec(tmp, rq, rq_iter)
 405		nr_bvec++;
 406
 407	if (rq->bio != rq->biotail) {
 408
 409		bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
 410				     GFP_NOIO);
 411		if (!bvec)
 412			return -EIO;
 413		cmd->bvec = bvec;
 
 
 
 
 414
 415		/*
 416		 * The bios of the request may be started from the middle of
 417		 * the 'bvec' because of bio splitting, so we can't directly
 418		 * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
 419		 * API will take care of all details for us.
 420		 */
 421		rq_for_each_bvec(tmp, rq, rq_iter) {
 422			*bvec = tmp;
 423			bvec++;
 424		}
 425		bvec = cmd->bvec;
 426		offset = 0;
 
 
 
 
 
 
 427	} else {
 428		/*
 429		 * Same here, this bio may be started from the middle of the
 430		 * 'bvec' because of bio splitting, so offset from the bvec
 431		 * must be passed to iov iterator
 432		 */
 433		offset = bio->bi_iter.bi_bvec_done;
 434		bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
 435	}
 436	atomic_set(&cmd->ref, 2);
 437
 438	iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
 439	iter.iov_offset = offset;
 440
 441	cmd->iocb.ki_pos = pos;
 442	cmd->iocb.ki_filp = file;
 443	cmd->iocb.ki_complete = lo_rw_aio_complete;
 444	cmd->iocb.ki_flags = IOCB_DIRECT;
 445	cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
 
 
 
 
 
 
 
 
 
 
 
 446
 447	if (rw == ITER_SOURCE)
 448		ret = call_write_iter(file, &cmd->iocb, &iter);
 449	else
 450		ret = call_read_iter(file, &cmd->iocb, &iter);
 451
 452	lo_rw_aio_do_completion(cmd);
 453
 454	if (ret != -EIOCBQUEUED)
 455		lo_rw_aio_complete(&cmd->iocb, ret);
 456	return 0;
 457}
 458
 459static int do_req_filebacked(struct loop_device *lo, struct request *rq)
 460{
 461	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
 462	loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
 
 463
 464	/*
 465	 * lo_write_simple and lo_read_simple should have been covered
 466	 * by io submit style function like lo_rw_aio(), one blocker
 467	 * is that lo_read_simple() need to call flush_dcache_page after
 468	 * the page is written from kernel, and it isn't easy to handle
 469	 * this in io submit style function which submits all segments
 470	 * of the req at one time. And direct read IO doesn't need to
 471	 * run flush_dcache_page().
 472	 */
 473	switch (req_op(rq)) {
 474	case REQ_OP_FLUSH:
 475		return lo_req_flush(lo, rq);
 476	case REQ_OP_WRITE_ZEROES:
 477		/*
 478		 * If the caller doesn't want deallocation, call zeroout to
 479		 * write zeroes the range.  Otherwise, punch them out.
 480		 */
 481		return lo_fallocate(lo, rq, pos,
 482			(rq->cmd_flags & REQ_NOUNMAP) ?
 483				FALLOC_FL_ZERO_RANGE :
 484				FALLOC_FL_PUNCH_HOLE);
 485	case REQ_OP_DISCARD:
 486		return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
 487	case REQ_OP_WRITE:
 488		if (cmd->use_aio)
 489			return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
 490		else
 491			return lo_write_simple(lo, rq, pos);
 492	case REQ_OP_READ:
 493		if (cmd->use_aio)
 494			return lo_rw_aio(lo, cmd, pos, ITER_DEST);
 495		else
 496			return lo_read_simple(lo, rq, pos);
 497	default:
 498		WARN_ON_ONCE(1);
 499		return -EIO;
 500	}
 501}
 502
 503static inline void loop_update_dio(struct loop_device *lo)
 504{
 505	__loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
 506				lo->use_dio);
 507}
 508
 509static void loop_reread_partitions(struct loop_device *lo)
 
 
 
 
 
 510{
 511	int rc;
 512
 513	mutex_lock(&lo->lo_disk->open_mutex);
 514	rc = bdev_disk_changed(lo->lo_disk, false);
 515	mutex_unlock(&lo->lo_disk->open_mutex);
 516	if (rc)
 517		pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
 518			__func__, lo->lo_number, lo->lo_file_name, rc);
 
 
 
 519}
 520
 521static inline int is_loop_device(struct file *file)
 
 
 
 522{
 523	struct inode *i = file->f_mapping->host;
 
 
 524
 525	return i && S_ISBLK(i->i_mode) && imajor(i) == LOOP_MAJOR;
 526}
 527
 528static int loop_validate_file(struct file *file, struct block_device *bdev)
 
 
 
 529{
 530	struct inode	*inode = file->f_mapping->host;
 531	struct file	*f = file;
 532
 533	/* Avoid recursion */
 534	while (is_loop_device(f)) {
 535		struct loop_device *l;
 536
 537		lockdep_assert_held(&loop_validate_mutex);
 538		if (f->f_mapping->host->i_rdev == bdev->bd_dev)
 539			return -EBADF;
 540
 541		l = I_BDEV(f->f_mapping->host)->bd_disk->private_data;
 542		if (l->lo_state != Lo_bound)
 543			return -EINVAL;
 544		/* Order wrt setting lo->lo_backing_file in loop_configure(). */
 545		rmb();
 546		f = l->lo_backing_file;
 547	}
 548	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
 549		return -EINVAL;
 550	return 0;
 551}
 552
 
 553/*
 554 * loop_change_fd switched the backing store of a loopback device to
 555 * a new file. This is useful for operating system installers to free up
 556 * the original file and in High Availability environments to switch to
 557 * an alternative location for the content in case of server meltdown.
 558 * This can only work if the loop device is used read-only, and if the
 559 * new backing store is the same size and type as the old backing store.
 560 */
 561static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
 562			  unsigned int arg)
 563{
 564	struct file *file = fget(arg);
 565	struct file *old_file;
 566	int error;
 567	bool partscan;
 568	bool is_loop;
 569
 570	if (!file)
 571		return -EBADF;
 572
 573	/* suppress uevents while reconfiguring the device */
 574	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
 575
 576	is_loop = is_loop_device(file);
 577	error = loop_global_lock_killable(lo, is_loop);
 578	if (error)
 579		goto out_putf;
 580	error = -ENXIO;
 581	if (lo->lo_state != Lo_bound)
 582		goto out_err;
 583
 584	/* the loop device has to be read-only */
 585	error = -EINVAL;
 586	if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
 587		goto out_err;
 588
 589	error = loop_validate_file(file, bdev);
 590	if (error)
 591		goto out_err;
 
 592
 
 593	old_file = lo->lo_backing_file;
 594
 595	error = -EINVAL;
 596
 
 
 
 597	/* size of the new backing store needs to be the same */
 598	if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
 599		goto out_err;
 600
 601	/* and ... switch */
 602	disk_force_media_change(lo->lo_disk);
 603	blk_mq_freeze_queue(lo->lo_queue);
 604	mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
 605	lo->lo_backing_file = file;
 606	lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
 607	mapping_set_gfp_mask(file->f_mapping,
 608			     lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
 609	loop_update_dio(lo);
 610	blk_mq_unfreeze_queue(lo->lo_queue);
 611	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
 612	loop_global_unlock(lo, is_loop);
 613
 614	/*
 615	 * Flush loop_validate_file() before fput(), for l->lo_backing_file
 616	 * might be pointing at old_file which might be the last reference.
 617	 */
 618	if (!is_loop) {
 619		mutex_lock(&loop_validate_mutex);
 620		mutex_unlock(&loop_validate_mutex);
 621	}
 622	/*
 623	 * We must drop file reference outside of lo_mutex as dropping
 624	 * the file ref can take open_mutex which creates circular locking
 625	 * dependency.
 626	 */
 627	fput(old_file);
 628	if (partscan)
 629		loop_reread_partitions(lo);
 
 630
 631	error = 0;
 632done:
 633	/* enable and uncork uevent now that we are done */
 634	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
 635	return error;
 
 636
 637out_err:
 638	loop_global_unlock(lo, is_loop);
 639out_putf:
 640	fput(file);
 641	goto done;
 642}
 643
 644/* loop sysfs attributes */
 645
 646static ssize_t loop_attr_show(struct device *dev, char *page,
 647			      ssize_t (*callback)(struct loop_device *, char *))
 648{
 649	struct gendisk *disk = dev_to_disk(dev);
 650	struct loop_device *lo = disk->private_data;
 651
 652	return callback(lo, page);
 653}
 654
 655#define LOOP_ATTR_RO(_name)						\
 656static ssize_t loop_attr_##_name##_show(struct loop_device *, char *);	\
 657static ssize_t loop_attr_do_show_##_name(struct device *d,		\
 658				struct device_attribute *attr, char *b)	\
 659{									\
 660	return loop_attr_show(d, b, loop_attr_##_name##_show);		\
 661}									\
 662static struct device_attribute loop_attr_##_name =			\
 663	__ATTR(_name, 0444, loop_attr_do_show_##_name, NULL);
 664
 665static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
 666{
 667	ssize_t ret;
 668	char *p = NULL;
 669
 670	spin_lock_irq(&lo->lo_lock);
 671	if (lo->lo_backing_file)
 672		p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
 673	spin_unlock_irq(&lo->lo_lock);
 674
 675	if (IS_ERR_OR_NULL(p))
 676		ret = PTR_ERR(p);
 677	else {
 678		ret = strlen(p);
 679		memmove(buf, p, ret);
 680		buf[ret++] = '\n';
 681		buf[ret] = 0;
 682	}
 683
 684	return ret;
 685}
 686
 687static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
 688{
 689	return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset);
 690}
 691
 692static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
 693{
 694	return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
 695}
 696
 697static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
 698{
 699	int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
 700
 701	return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0");
 702}
 703
 704static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
 705{
 706	int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
 707
 708	return sysfs_emit(buf, "%s\n", partscan ? "1" : "0");
 709}
 710
 711static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
 712{
 713	int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
 714
 715	return sysfs_emit(buf, "%s\n", dio ? "1" : "0");
 716}
 717
 718LOOP_ATTR_RO(backing_file);
 719LOOP_ATTR_RO(offset);
 720LOOP_ATTR_RO(sizelimit);
 721LOOP_ATTR_RO(autoclear);
 722LOOP_ATTR_RO(partscan);
 723LOOP_ATTR_RO(dio);
 724
 725static struct attribute *loop_attrs[] = {
 726	&loop_attr_backing_file.attr,
 727	&loop_attr_offset.attr,
 728	&loop_attr_sizelimit.attr,
 729	&loop_attr_autoclear.attr,
 730	&loop_attr_partscan.attr,
 731	&loop_attr_dio.attr,
 732	NULL,
 733};
 734
 735static struct attribute_group loop_attribute_group = {
 736	.name = "loop",
 737	.attrs= loop_attrs,
 738};
 739
 740static void loop_sysfs_init(struct loop_device *lo)
 741{
 742	lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
 743						&loop_attribute_group);
 744}
 745
 746static void loop_sysfs_exit(struct loop_device *lo)
 747{
 748	if (lo->sysfs_inited)
 749		sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
 750				   &loop_attribute_group);
 751}
 752
 753static void loop_config_discard(struct loop_device *lo)
 754{
 755	struct file *file = lo->lo_backing_file;
 756	struct inode *inode = file->f_mapping->host;
 757	struct request_queue *q = lo->lo_queue;
 758	u32 granularity, max_discard_sectors;
 759
 760	/*
 761	 * If the backing device is a block device, mirror its zeroing
 762	 * capability. Set the discard sectors to the block device's zeroing
 763	 * capabilities because loop discards result in blkdev_issue_zeroout(),
 764	 * not blkdev_issue_discard(). This maintains consistent behavior with
 765	 * file-backed loop devices: discarded regions read back as zero.
 766	 */
 767	if (S_ISBLK(inode->i_mode)) {
 768		struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
 769
 770		max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
 771		granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
 772			queue_physical_block_size(backingq);
 773
 774	/*
 775	 * We use punch hole to reclaim the free space used by the
 776	 * image a.k.a. discard.
 777	 */
 778	} else if (!file->f_op->fallocate) {
 779		max_discard_sectors = 0;
 780		granularity = 0;
 781
 782	} else {
 783		struct kstatfs sbuf;
 784
 785		max_discard_sectors = UINT_MAX >> 9;
 786		if (!vfs_statfs(&file->f_path, &sbuf))
 787			granularity = sbuf.f_bsize;
 788		else
 789			max_discard_sectors = 0;
 790	}
 791
 792	if (max_discard_sectors) {
 793		q->limits.discard_granularity = granularity;
 794		blk_queue_max_discard_sectors(q, max_discard_sectors);
 795		blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
 796	} else {
 797		q->limits.discard_granularity = 0;
 798		blk_queue_max_discard_sectors(q, 0);
 799		blk_queue_max_write_zeroes_sectors(q, 0);
 800	}
 801}
 802
 803struct loop_worker {
 804	struct rb_node rb_node;
 805	struct work_struct work;
 806	struct list_head cmd_list;
 807	struct list_head idle_list;
 808	struct loop_device *lo;
 809	struct cgroup_subsys_state *blkcg_css;
 810	unsigned long last_ran_at;
 811};
 812
 813static void loop_workfn(struct work_struct *work);
 814
 815#ifdef CONFIG_BLK_CGROUP
 816static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
 817{
 818	return !css || css == blkcg_root_css;
 819}
 820#else
 821static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
 822{
 823	return !css;
 824}
 825#endif
 826
 827static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
 828{
 829	struct rb_node **node, *parent = NULL;
 830	struct loop_worker *cur_worker, *worker = NULL;
 831	struct work_struct *work;
 832	struct list_head *cmd_list;
 833
 834	spin_lock_irq(&lo->lo_work_lock);
 835
 836	if (queue_on_root_worker(cmd->blkcg_css))
 837		goto queue_work;
 838
 839	node = &lo->worker_tree.rb_node;
 840
 841	while (*node) {
 842		parent = *node;
 843		cur_worker = container_of(*node, struct loop_worker, rb_node);
 844		if (cur_worker->blkcg_css == cmd->blkcg_css) {
 845			worker = cur_worker;
 846			break;
 847		} else if ((long)cur_worker->blkcg_css < (long)cmd->blkcg_css) {
 848			node = &(*node)->rb_left;
 849		} else {
 850			node = &(*node)->rb_right;
 851		}
 852	}
 853	if (worker)
 854		goto queue_work;
 855
 856	worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN);
 857	/*
 858	 * In the event we cannot allocate a worker, just queue on the
 859	 * rootcg worker and issue the I/O as the rootcg
 860	 */
 861	if (!worker) {
 862		cmd->blkcg_css = NULL;
 863		if (cmd->memcg_css)
 864			css_put(cmd->memcg_css);
 865		cmd->memcg_css = NULL;
 866		goto queue_work;
 867	}
 868
 869	worker->blkcg_css = cmd->blkcg_css;
 870	css_get(worker->blkcg_css);
 871	INIT_WORK(&worker->work, loop_workfn);
 872	INIT_LIST_HEAD(&worker->cmd_list);
 873	INIT_LIST_HEAD(&worker->idle_list);
 874	worker->lo = lo;
 875	rb_link_node(&worker->rb_node, parent, node);
 876	rb_insert_color(&worker->rb_node, &lo->worker_tree);
 877queue_work:
 878	if (worker) {
 879		/*
 880		 * We need to remove from the idle list here while
 881		 * holding the lock so that the idle timer doesn't
 882		 * free the worker
 883		 */
 884		if (!list_empty(&worker->idle_list))
 885			list_del_init(&worker->idle_list);
 886		work = &worker->work;
 887		cmd_list = &worker->cmd_list;
 888	} else {
 889		work = &lo->rootcg_work;
 890		cmd_list = &lo->rootcg_cmd_list;
 891	}
 892	list_add_tail(&cmd->list_entry, cmd_list);
 893	queue_work(lo->workqueue, work);
 894	spin_unlock_irq(&lo->lo_work_lock);
 895}
 896
 897static void loop_set_timer(struct loop_device *lo)
 898{
 899	timer_reduce(&lo->timer, jiffies + LOOP_IDLE_WORKER_TIMEOUT);
 900}
 901
 902static void loop_free_idle_workers(struct loop_device *lo, bool delete_all)
 903{
 904	struct loop_worker *pos, *worker;
 905
 906	spin_lock_irq(&lo->lo_work_lock);
 907	list_for_each_entry_safe(worker, pos, &lo->idle_worker_list,
 908				idle_list) {
 909		if (!delete_all &&
 910		    time_is_after_jiffies(worker->last_ran_at +
 911					  LOOP_IDLE_WORKER_TIMEOUT))
 912			break;
 913		list_del(&worker->idle_list);
 914		rb_erase(&worker->rb_node, &lo->worker_tree);
 915		css_put(worker->blkcg_css);
 916		kfree(worker);
 917	}
 918	if (!list_empty(&lo->idle_worker_list))
 919		loop_set_timer(lo);
 920	spin_unlock_irq(&lo->lo_work_lock);
 921}
 922
 923static void loop_free_idle_workers_timer(struct timer_list *timer)
 924{
 925	struct loop_device *lo = container_of(timer, struct loop_device, timer);
 926
 927	return loop_free_idle_workers(lo, false);
 928}
 929
 930static void loop_update_rotational(struct loop_device *lo)
 931{
 932	struct file *file = lo->lo_backing_file;
 933	struct inode *file_inode = file->f_mapping->host;
 934	struct block_device *file_bdev = file_inode->i_sb->s_bdev;
 935	struct request_queue *q = lo->lo_queue;
 936	bool nonrot = true;
 937
 938	/* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
 939	if (file_bdev)
 940		nonrot = bdev_nonrot(file_bdev);
 941
 942	if (nonrot)
 943		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
 944	else
 945		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
 946}
 947
 948/**
 949 * loop_set_status_from_info - configure device from loop_info
 950 * @lo: struct loop_device to configure
 951 * @info: struct loop_info64 to configure the device with
 952 *
 953 * Configures the loop device parameters according to the passed
 954 * in loop_info64 configuration.
 955 */
 956static int
 957loop_set_status_from_info(struct loop_device *lo,
 958			  const struct loop_info64 *info)
 959{
 960	if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
 961		return -EINVAL;
 962
 963	switch (info->lo_encrypt_type) {
 964	case LO_CRYPT_NONE:
 965		break;
 966	case LO_CRYPT_XOR:
 967		pr_warn("support for the xor transformation has been removed.\n");
 968		return -EINVAL;
 969	case LO_CRYPT_CRYPTOAPI:
 970		pr_warn("support for cryptoloop has been removed.  Use dm-crypt instead.\n");
 971		return -EINVAL;
 972	default:
 973		return -EINVAL;
 974	}
 975
 976	/* Avoid assigning overflow values */
 977	if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX)
 978		return -EOVERFLOW;
 979
 980	lo->lo_offset = info->lo_offset;
 981	lo->lo_sizelimit = info->lo_sizelimit;
 982
 983	memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
 984	lo->lo_file_name[LO_NAME_SIZE-1] = 0;
 985	lo->lo_flags = info->lo_flags;
 986	return 0;
 987}
 988
 989static int loop_configure(struct loop_device *lo, blk_mode_t mode,
 990			  struct block_device *bdev,
 991			  const struct loop_config *config)
 992{
 993	struct file *file = fget(config->fd);
 994	struct inode *inode;
 995	struct address_space *mapping;
 996	int error;
 997	loff_t size;
 998	bool partscan;
 999	unsigned short bsize;
1000	bool is_loop;
1001
1002	if (!file)
1003		return -EBADF;
1004	is_loop = is_loop_device(file);
1005
1006	/* This is safe, since we have a reference from open(). */
1007	__module_get(THIS_MODULE);
1008
1009	/*
1010	 * If we don't hold exclusive handle for the device, upgrade to it
1011	 * here to avoid changing device under exclusive owner.
1012	 */
1013	if (!(mode & BLK_OPEN_EXCL)) {
1014		error = bd_prepare_to_claim(bdev, loop_configure, NULL);
1015		if (error)
1016			goto out_putf;
1017	}
1018
1019	error = loop_global_lock_killable(lo, is_loop);
1020	if (error)
1021		goto out_bdev;
1022
1023	error = -EBUSY;
1024	if (lo->lo_state != Lo_unbound)
1025		goto out_unlock;
1026
1027	error = loop_validate_file(file, bdev);
1028	if (error)
1029		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
1030
1031	mapping = file->f_mapping;
1032	inode = mapping->host;
1033
1034	if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
1035		error = -EINVAL;
1036		goto out_unlock;
1037	}
 
 
1038
1039	if (config->block_size) {
1040		error = blk_validate_block_size(config->block_size);
1041		if (error)
1042			goto out_unlock;
 
 
 
 
 
 
 
1043	}
1044
1045	error = loop_set_status_from_info(lo, &config->info);
1046	if (error)
1047		goto out_unlock;
1048
1049	if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) ||
1050	    !file->f_op->write_iter)
1051		lo->lo_flags |= LO_FLAGS_READ_ONLY;
1052
1053	if (!lo->workqueue) {
1054		lo->workqueue = alloc_workqueue("loop%d",
1055						WQ_UNBOUND | WQ_FREEZABLE,
1056						0, lo->lo_number);
1057		if (!lo->workqueue) {
1058			error = -ENOMEM;
1059			goto out_unlock;
1060		}
1061	}
1062
1063	/* suppress uevents while reconfiguring the device */
1064	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
1065
1066	disk_force_media_change(lo->lo_disk);
1067	set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
1068
1069	lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
1070	lo->lo_device = bdev;
 
1071	lo->lo_backing_file = file;
 
 
 
1072	lo->old_gfp_mask = mapping_gfp_mask(mapping);
1073	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
1074
1075	if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
1076		blk_queue_write_cache(lo->lo_queue, true, false);
1077
1078	if (config->block_size)
1079		bsize = config->block_size;
1080	else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev)
1081		/* In case of direct I/O, match underlying block size */
1082		bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
1083	else
1084		bsize = 512;
1085
1086	blk_queue_logical_block_size(lo->lo_queue, bsize);
1087	blk_queue_physical_block_size(lo->lo_queue, bsize);
1088	blk_queue_io_min(lo->lo_queue, bsize);
1089
1090	loop_config_discard(lo);
1091	loop_update_rotational(lo);
1092	loop_update_dio(lo);
1093	loop_sysfs_init(lo);
1094
1095	size = get_loop_size(lo, file);
1096	loop_set_size(lo, size);
 
 
 
1097
1098	/* Order wrt reading lo_state in loop_validate_file(). */
1099	wmb();
1100
 
 
 
 
 
 
1101	lo->lo_state = Lo_bound;
1102	if (part_shift)
1103		lo->lo_flags |= LO_FLAGS_PARTSCAN;
1104	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
1105	if (partscan)
1106		clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
1107
1108	/* enable and uncork uevent now that we are done */
1109	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
1110
1111	loop_global_unlock(lo, is_loop);
1112	if (partscan)
1113		loop_reread_partitions(lo);
1114
1115	if (!(mode & BLK_OPEN_EXCL))
1116		bd_abort_claiming(bdev, loop_configure);
1117
1118	return 0;
1119
1120out_unlock:
1121	loop_global_unlock(lo, is_loop);
1122out_bdev:
1123	if (!(mode & BLK_OPEN_EXCL))
1124		bd_abort_claiming(bdev, loop_configure);
1125out_putf:
 
 
 
 
 
 
 
1126	fput(file);
 
1127	/* This is safe: open() is still holding a reference. */
1128	module_put(THIS_MODULE);
1129	return error;
1130}
1131
1132static void __loop_clr_fd(struct loop_device *lo, bool release)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1133{
1134	struct file *filp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1135	gfp_t gfp = lo->old_gfp_mask;
1136
1137	if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
1138		blk_queue_write_cache(lo->lo_queue, false, false);
1139
1140	/*
1141	 * Freeze the request queue when unbinding on a live file descriptor and
1142	 * thus an open device.  When called from ->release we are guaranteed
1143	 * that there is no I/O in progress already.
1144	 */
1145	if (!release)
1146		blk_mq_freeze_queue(lo->lo_queue);
 
 
 
 
1147
1148	spin_lock_irq(&lo->lo_lock);
1149	filp = lo->lo_backing_file;
1150	lo->lo_backing_file = NULL;
1151	spin_unlock_irq(&lo->lo_lock);
1152
 
 
 
1153	lo->lo_device = NULL;
 
1154	lo->lo_offset = 0;
1155	lo->lo_sizelimit = 0;
 
 
 
 
 
1156	memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1157	blk_queue_logical_block_size(lo->lo_queue, 512);
1158	blk_queue_physical_block_size(lo->lo_queue, 512);
1159	blk_queue_io_min(lo->lo_queue, 512);
1160	invalidate_disk(lo->lo_disk);
1161	loop_sysfs_exit(lo);
1162	/* let user-space know about this change */
1163	kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
 
 
 
1164	mapping_set_gfp_mask(filp->f_mapping, gfp);
 
1165	/* This is safe: open() is still holding a reference. */
1166	module_put(THIS_MODULE);
1167	if (!release)
1168		blk_mq_unfreeze_queue(lo->lo_queue);
1169
1170	disk_force_media_change(lo->lo_disk);
1171
1172	if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
1173		int err;
1174
1175		/*
1176		 * open_mutex has been held already in release path, so don't
1177		 * acquire it if this function is called in such case.
1178		 *
1179		 * If the reread partition isn't from release path, lo_refcnt
1180		 * must be at least one and it can only become zero when the
1181		 * current holder is released.
1182		 */
1183		if (!release)
1184			mutex_lock(&lo->lo_disk->open_mutex);
1185		err = bdev_disk_changed(lo->lo_disk, false);
1186		if (!release)
1187			mutex_unlock(&lo->lo_disk->open_mutex);
1188		if (err)
1189			pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
1190				__func__, lo->lo_number, err);
1191		/* Device is gone, no point in returning error */
1192	}
1193
1194	/*
1195	 * lo->lo_state is set to Lo_unbound here after above partscan has
1196	 * finished. There cannot be anybody else entering __loop_clr_fd() as
1197	 * Lo_rundown state protects us from all the other places trying to
1198	 * change the 'lo' device.
1199	 */
1200	lo->lo_flags = 0;
1201	if (!part_shift)
1202		set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
1203	mutex_lock(&lo->lo_mutex);
1204	lo->lo_state = Lo_unbound;
1205	mutex_unlock(&lo->lo_mutex);
1206
1207	/*
1208	 * Need not hold lo_mutex to fput backing file. Calling fput holding
1209	 * lo_mutex triggers a circular lock dependency possibility warning as
1210	 * fput can take open_mutex which is usually taken before lo_mutex.
1211	 */
1212	fput(filp);
 
1213}
1214
1215static int loop_clr_fd(struct loop_device *lo)
 
1216{
1217	int err;
 
 
1218
1219	/*
1220	 * Since lo_ioctl() is called without locks held, it is possible that
1221	 * loop_configure()/loop_change_fd() and loop_clr_fd() run in parallel.
1222	 *
1223	 * Therefore, use global lock when setting Lo_rundown state in order to
1224	 * make sure that loop_validate_file() will fail if the "struct file"
1225	 * which loop_configure()/loop_change_fd() found via fget() was this
1226	 * loop device.
1227	 */
1228	err = loop_global_lock_killable(lo, true);
1229	if (err)
1230		return err;
1231	if (lo->lo_state != Lo_bound) {
1232		loop_global_unlock(lo, true);
1233		return -ENXIO;
1234	}
1235	/*
1236	 * If we've explicitly asked to tear down the loop device,
1237	 * and it has an elevated reference count, set it for auto-teardown when
1238	 * the last reference goes away. This stops $!~#$@ udev from
1239	 * preventing teardown because it decided that it needs to run blkid on
1240	 * the loopback device whenever they appear. xfstests is notorious for
1241	 * failing tests because blkid via udev races with a losetup
1242	 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
1243	 * command to fail with EBUSY.
1244	 */
1245	if (disk_openers(lo->lo_disk) > 1) {
1246		lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
1247		loop_global_unlock(lo, true);
1248		return 0;
1249	}
1250	lo->lo_state = Lo_rundown;
1251	loop_global_unlock(lo, true);
1252
1253	__loop_clr_fd(lo, false);
1254	return 0;
1255}
1256
1257static int
1258loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1259{
1260	int err;
1261	int prev_lo_flags;
1262	bool partscan = false;
1263	bool size_changed = false;
1264
1265	err = mutex_lock_killable(&lo->lo_mutex);
1266	if (err)
1267		return err;
1268	if (lo->lo_state != Lo_bound) {
1269		err = -ENXIO;
1270		goto out_unlock;
1271	}
1272
1273	if (lo->lo_offset != info->lo_offset ||
1274	    lo->lo_sizelimit != info->lo_sizelimit) {
1275		size_changed = true;
1276		sync_blockdev(lo->lo_device);
1277		invalidate_bdev(lo->lo_device);
 
1278	}
1279
1280	/* I/O need to be drained during transfer transition */
1281	blk_mq_freeze_queue(lo->lo_queue);
1282
1283	prev_lo_flags = lo->lo_flags;
1284
1285	err = loop_set_status_from_info(lo, info);
1286	if (err)
1287		goto out_unfreeze;
1288
1289	/* Mask out flags that can't be set using LOOP_SET_STATUS. */
1290	lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
1291	/* For those flags, use the previous values instead */
1292	lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
1293	/* For flags that can't be cleared, use previous values too */
1294	lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
1295
1296	if (size_changed) {
1297		loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
1298					   lo->lo_backing_file);
1299		loop_set_size(lo, new_size);
1300	}
1301
1302	/* update dio if lo_offset or transfer is changed */
1303	__loop_update_dio(lo, lo->use_dio);
1304
1305out_unfreeze:
1306	blk_mq_unfreeze_queue(lo->lo_queue);
1307
1308	if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
1309	     !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
1310		clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
1311		partscan = true;
1312	}
1313out_unlock:
1314	mutex_unlock(&lo->lo_mutex);
1315	if (partscan)
1316		loop_reread_partitions(lo);
1317
1318	return err;
1319}
1320
1321static int
1322loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1323{
1324	struct path path;
1325	struct kstat stat;
1326	int ret;
1327
1328	ret = mutex_lock_killable(&lo->lo_mutex);
1329	if (ret)
1330		return ret;
1331	if (lo->lo_state != Lo_bound) {
1332		mutex_unlock(&lo->lo_mutex);
1333		return -ENXIO;
1334	}
1335
 
1336	memset(info, 0, sizeof(*info));
1337	info->lo_number = lo->lo_number;
 
 
 
1338	info->lo_offset = lo->lo_offset;
1339	info->lo_sizelimit = lo->lo_sizelimit;
1340	info->lo_flags = lo->lo_flags;
1341	memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1342
1343	/* Drop lo_mutex while we call into the filesystem. */
1344	path = lo->lo_backing_file->f_path;
1345	path_get(&path);
1346	mutex_unlock(&lo->lo_mutex);
1347	ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
1348	if (!ret) {
1349		info->lo_device = huge_encode_dev(stat.dev);
1350		info->lo_inode = stat.ino;
1351		info->lo_rdevice = huge_encode_dev(stat.rdev);
1352	}
1353	path_put(&path);
1354	return ret;
1355}
1356
1357static void
1358loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1359{
1360	memset(info64, 0, sizeof(*info64));
1361	info64->lo_number = info->lo_number;
1362	info64->lo_device = info->lo_device;
1363	info64->lo_inode = info->lo_inode;
1364	info64->lo_rdevice = info->lo_rdevice;
1365	info64->lo_offset = info->lo_offset;
1366	info64->lo_sizelimit = 0;
 
 
1367	info64->lo_flags = info->lo_flags;
1368	memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
 
 
 
 
 
 
1369}
1370
1371static int
1372loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1373{
1374	memset(info, 0, sizeof(*info));
1375	info->lo_number = info64->lo_number;
1376	info->lo_device = info64->lo_device;
1377	info->lo_inode = info64->lo_inode;
1378	info->lo_rdevice = info64->lo_rdevice;
1379	info->lo_offset = info64->lo_offset;
 
 
1380	info->lo_flags = info64->lo_flags;
1381	memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
 
 
 
 
 
 
1382
1383	/* error in case values were truncated */
1384	if (info->lo_device != info64->lo_device ||
1385	    info->lo_rdevice != info64->lo_rdevice ||
1386	    info->lo_inode != info64->lo_inode ||
1387	    info->lo_offset != info64->lo_offset)
1388		return -EOVERFLOW;
1389
1390	return 0;
1391}
1392
1393static int
1394loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1395{
1396	struct loop_info info;
1397	struct loop_info64 info64;
1398
1399	if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1400		return -EFAULT;
1401	loop_info64_from_old(&info, &info64);
1402	return loop_set_status(lo, &info64);
1403}
1404
1405static int
1406loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1407{
1408	struct loop_info64 info64;
1409
1410	if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1411		return -EFAULT;
1412	return loop_set_status(lo, &info64);
1413}
1414
1415static int
1416loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1417	struct loop_info info;
1418	struct loop_info64 info64;
1419	int err;
1420
1421	if (!arg)
1422		return -EINVAL;
1423	err = loop_get_status(lo, &info64);
 
1424	if (!err)
1425		err = loop_info64_to_old(&info64, &info);
1426	if (!err && copy_to_user(arg, &info, sizeof(info)))
1427		err = -EFAULT;
1428
1429	return err;
1430}
1431
1432static int
1433loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1434	struct loop_info64 info64;
1435	int err;
1436
1437	if (!arg)
1438		return -EINVAL;
1439	err = loop_get_status(lo, &info64);
 
1440	if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1441		err = -EFAULT;
1442
1443	return err;
1444}
1445
1446static int loop_set_capacity(struct loop_device *lo)
1447{
1448	loff_t size;
 
 
1449
 
1450	if (unlikely(lo->lo_state != Lo_bound))
1451		return -ENXIO;
1452
1453	size = get_loop_size(lo, lo->lo_backing_file);
1454	loop_set_size(lo, size);
1455
1456	return 0;
1457}
1458
1459static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1460{
1461	int error = -ENXIO;
1462	if (lo->lo_state != Lo_bound)
1463		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
1464
1465	__loop_update_dio(lo, !!arg);
1466	if (lo->use_dio == !!arg)
1467		return 0;
1468	error = -EINVAL;
1469 out:
1470	return error;
1471}
1472
1473static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
1474{
1475	int err = 0;
1476
1477	if (lo->lo_state != Lo_bound)
1478		return -ENXIO;
1479
1480	err = blk_validate_block_size(arg);
1481	if (err)
1482		return err;
1483
1484	if (lo->lo_queue->limits.logical_block_size == arg)
1485		return 0;
1486
1487	sync_blockdev(lo->lo_device);
1488	invalidate_bdev(lo->lo_device);
1489
1490	blk_mq_freeze_queue(lo->lo_queue);
1491	blk_queue_logical_block_size(lo->lo_queue, arg);
1492	blk_queue_physical_block_size(lo->lo_queue, arg);
1493	blk_queue_io_min(lo->lo_queue, arg);
1494	loop_update_dio(lo);
1495	blk_mq_unfreeze_queue(lo->lo_queue);
1496
1497	return err;
1498}
1499
1500static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
1501			   unsigned long arg)
1502{
1503	int err;
1504
1505	err = mutex_lock_killable(&lo->lo_mutex);
1506	if (err)
1507		return err;
1508	switch (cmd) {
1509	case LOOP_SET_CAPACITY:
1510		err = loop_set_capacity(lo);
1511		break;
1512	case LOOP_SET_DIRECT_IO:
1513		err = loop_set_dio(lo, arg);
1514		break;
1515	case LOOP_SET_BLOCK_SIZE:
1516		err = loop_set_block_size(lo, arg);
1517		break;
1518	default:
1519		err = -EINVAL;
1520	}
1521	mutex_unlock(&lo->lo_mutex);
1522	return err;
1523}
1524
1525static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
1526	unsigned int cmd, unsigned long arg)
1527{
1528	struct loop_device *lo = bdev->bd_disk->private_data;
1529	void __user *argp = (void __user *) arg;
1530	int err;
1531
 
1532	switch (cmd) {
1533	case LOOP_SET_FD: {
1534		/*
1535		 * Legacy case - pass in a zeroed out struct loop_config with
1536		 * only the file descriptor set , which corresponds with the
1537		 * default parameters we'd have used otherwise.
1538		 */
1539		struct loop_config config;
1540
1541		memset(&config, 0, sizeof(config));
1542		config.fd = arg;
1543
1544		return loop_configure(lo, mode, bdev, &config);
1545	}
1546	case LOOP_CONFIGURE: {
1547		struct loop_config config;
1548
1549		if (copy_from_user(&config, argp, sizeof(config)))
1550			return -EFAULT;
1551
1552		return loop_configure(lo, mode, bdev, &config);
1553	}
1554	case LOOP_CHANGE_FD:
1555		return loop_change_fd(lo, bdev, arg);
 
1556	case LOOP_CLR_FD:
1557		return loop_clr_fd(lo);
 
 
 
 
1558	case LOOP_SET_STATUS:
1559		err = -EPERM;
1560		if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
1561			err = loop_set_status_old(lo, argp);
1562		break;
1563	case LOOP_GET_STATUS:
1564		return loop_get_status_old(lo, argp);
 
1565	case LOOP_SET_STATUS64:
1566		err = -EPERM;
1567		if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
1568			err = loop_set_status64(lo, argp);
1569		break;
1570	case LOOP_GET_STATUS64:
1571		return loop_get_status64(lo, argp);
 
1572	case LOOP_SET_CAPACITY:
1573	case LOOP_SET_DIRECT_IO:
1574	case LOOP_SET_BLOCK_SIZE:
1575		if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
1576			return -EPERM;
1577		fallthrough;
1578	default:
1579		err = lo_simple_ioctl(lo, cmd, arg);
1580		break;
 
 
1581	}
 
1582
 
1583	return err;
1584}
1585
1586#ifdef CONFIG_COMPAT
1587struct compat_loop_info {
1588	compat_int_t	lo_number;      /* ioctl r/o */
1589	compat_dev_t	lo_device;      /* ioctl r/o */
1590	compat_ulong_t	lo_inode;       /* ioctl r/o */
1591	compat_dev_t	lo_rdevice;     /* ioctl r/o */
1592	compat_int_t	lo_offset;
1593	compat_int_t	lo_encrypt_type;        /* obsolete, ignored */
1594	compat_int_t	lo_encrypt_key_size;    /* ioctl w/o */
1595	compat_int_t	lo_flags;       /* ioctl r/o */
1596	char		lo_name[LO_NAME_SIZE];
1597	unsigned char	lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1598	compat_ulong_t	lo_init[2];
1599	char		reserved[4];
1600};
1601
1602/*
1603 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1604 * - noinlined to reduce stack space usage in main part of driver
1605 */
1606static noinline int
1607loop_info64_from_compat(const struct compat_loop_info __user *arg,
1608			struct loop_info64 *info64)
1609{
1610	struct compat_loop_info info;
1611
1612	if (copy_from_user(&info, arg, sizeof(info)))
1613		return -EFAULT;
1614
1615	memset(info64, 0, sizeof(*info64));
1616	info64->lo_number = info.lo_number;
1617	info64->lo_device = info.lo_device;
1618	info64->lo_inode = info.lo_inode;
1619	info64->lo_rdevice = info.lo_rdevice;
1620	info64->lo_offset = info.lo_offset;
1621	info64->lo_sizelimit = 0;
 
 
1622	info64->lo_flags = info.lo_flags;
1623	memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
 
 
 
 
 
 
1624	return 0;
1625}
1626
1627/*
1628 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1629 * - noinlined to reduce stack space usage in main part of driver
1630 */
1631static noinline int
1632loop_info64_to_compat(const struct loop_info64 *info64,
1633		      struct compat_loop_info __user *arg)
1634{
1635	struct compat_loop_info info;
1636
1637	memset(&info, 0, sizeof(info));
1638	info.lo_number = info64->lo_number;
1639	info.lo_device = info64->lo_device;
1640	info.lo_inode = info64->lo_inode;
1641	info.lo_rdevice = info64->lo_rdevice;
1642	info.lo_offset = info64->lo_offset;
 
 
1643	info.lo_flags = info64->lo_flags;
1644	memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
 
 
 
 
 
 
1645
1646	/* error in case values were truncated */
1647	if (info.lo_device != info64->lo_device ||
1648	    info.lo_rdevice != info64->lo_rdevice ||
1649	    info.lo_inode != info64->lo_inode ||
1650	    info.lo_offset != info64->lo_offset)
 
 
1651		return -EOVERFLOW;
1652
1653	if (copy_to_user(arg, &info, sizeof(info)))
1654		return -EFAULT;
1655	return 0;
1656}
1657
1658static int
1659loop_set_status_compat(struct loop_device *lo,
1660		       const struct compat_loop_info __user *arg)
1661{
1662	struct loop_info64 info64;
1663	int ret;
1664
1665	ret = loop_info64_from_compat(arg, &info64);
1666	if (ret < 0)
1667		return ret;
1668	return loop_set_status(lo, &info64);
1669}
1670
1671static int
1672loop_get_status_compat(struct loop_device *lo,
1673		       struct compat_loop_info __user *arg)
1674{
1675	struct loop_info64 info64;
1676	int err;
1677
1678	if (!arg)
1679		return -EINVAL;
1680	err = loop_get_status(lo, &info64);
 
1681	if (!err)
1682		err = loop_info64_to_compat(&info64, arg);
1683	return err;
1684}
1685
1686static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
1687			   unsigned int cmd, unsigned long arg)
1688{
1689	struct loop_device *lo = bdev->bd_disk->private_data;
1690	int err;
1691
1692	switch(cmd) {
1693	case LOOP_SET_STATUS:
1694		err = loop_set_status_compat(lo,
1695			     (const struct compat_loop_info __user *)arg);
 
 
1696		break;
1697	case LOOP_GET_STATUS:
1698		err = loop_get_status_compat(lo,
1699				     (struct compat_loop_info __user *)arg);
 
 
1700		break;
1701	case LOOP_SET_CAPACITY:
1702	case LOOP_CLR_FD:
1703	case LOOP_GET_STATUS64:
1704	case LOOP_SET_STATUS64:
1705	case LOOP_CONFIGURE:
1706		arg = (unsigned long) compat_ptr(arg);
1707		fallthrough;
1708	case LOOP_SET_FD:
1709	case LOOP_CHANGE_FD:
1710	case LOOP_SET_BLOCK_SIZE:
1711	case LOOP_SET_DIRECT_IO:
1712		err = lo_ioctl(bdev, mode, cmd, arg);
1713		break;
1714	default:
1715		err = -ENOIOCTLCMD;
1716		break;
1717	}
1718	return err;
1719}
1720#endif
1721
1722static void lo_release(struct gendisk *disk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1723{
1724	struct loop_device *lo = disk->private_data;
 
1725
1726	if (disk_openers(disk) > 0)
1727		return;
1728
1729	mutex_lock(&lo->lo_mutex);
1730	if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR)) {
1731		lo->lo_state = Lo_rundown;
1732		mutex_unlock(&lo->lo_mutex);
1733		/*
1734		 * In autoclear mode, stop the loop thread
1735		 * and remove configuration after last close.
1736		 */
1737		__loop_clr_fd(lo, true);
1738		return;
 
 
 
 
 
 
 
1739	}
1740	mutex_unlock(&lo->lo_mutex);
1741}
1742
1743static void lo_free_disk(struct gendisk *disk)
1744{
1745	struct loop_device *lo = disk->private_data;
1746
1747	if (lo->workqueue)
1748		destroy_workqueue(lo->workqueue);
1749	loop_free_idle_workers(lo, true);
1750	timer_shutdown_sync(&lo->timer);
1751	mutex_destroy(&lo->lo_mutex);
1752	kfree(lo);
1753}
1754
1755static const struct block_device_operations lo_fops = {
1756	.owner =	THIS_MODULE,
 
1757	.release =	lo_release,
1758	.ioctl =	lo_ioctl,
1759#ifdef CONFIG_COMPAT
1760	.compat_ioctl =	lo_compat_ioctl,
1761#endif
1762	.free_disk =	lo_free_disk,
1763};
1764
1765/*
1766 * And now the modules code and kernel interface.
1767 */
1768
1769/*
1770 * If max_loop is specified, create that many devices upfront.
1771 * This also becomes a hard limit. If max_loop is not specified,
1772 * the default isn't a hard limit (as before commit 85c50197716c
1773 * changed the default value from 0 for max_loop=0 reasons), just
1774 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
1775 * init time. Loop devices can be requested on-demand with the
1776 * /dev/loop-control interface, or be instantiated by accessing
1777 * a 'dead' device node.
1778 */
1779static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
1780
1781#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
1782static bool max_loop_specified;
1783
1784static int max_loop_param_set_int(const char *val,
1785				  const struct kernel_param *kp)
1786{
1787	int ret;
1788
1789	ret = param_set_int(val, kp);
1790	if (ret < 0)
1791		return ret;
1792
1793	max_loop_specified = true;
1794	return 0;
1795}
1796
1797static const struct kernel_param_ops max_loop_param_ops = {
1798	.set = max_loop_param_set_int,
1799	.get = param_get_int,
1800};
1801
1802module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444);
1803MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1804#else
1805module_param(max_loop, int, 0444);
1806MODULE_PARM_DESC(max_loop, "Initial number of loop devices");
1807#endif
1808
1809module_param(max_part, int, 0444);
1810MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
1811
1812static int hw_queue_depth = LOOP_DEFAULT_HW_Q_DEPTH;
1813
1814static int loop_set_hw_queue_depth(const char *s, const struct kernel_param *p)
1815{
1816	int qd, ret;
1817
1818	ret = kstrtoint(s, 0, &qd);
1819	if (ret < 0)
1820		return ret;
1821	if (qd < 1)
1822		return -EINVAL;
1823	hw_queue_depth = qd;
1824	return 0;
1825}
1826
1827static const struct kernel_param_ops loop_hw_qdepth_param_ops = {
1828	.set	= loop_set_hw_queue_depth,
1829	.get	= param_get_int,
1830};
1831
1832device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444);
1833MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: " __stringify(LOOP_DEFAULT_HW_Q_DEPTH));
1834
1835MODULE_LICENSE("GPL");
1836MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1837
1838static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1839		const struct blk_mq_queue_data *bd)
1840{
1841	struct request *rq = bd->rq;
1842	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1843	struct loop_device *lo = rq->q->queuedata;
1844
1845	blk_mq_start_request(rq);
1846
1847	if (lo->lo_state != Lo_bound)
1848		return BLK_STS_IOERR;
1849
1850	switch (req_op(rq)) {
1851	case REQ_OP_FLUSH:
1852	case REQ_OP_DISCARD:
1853	case REQ_OP_WRITE_ZEROES:
1854		cmd->use_aio = false;
1855		break;
1856	default:
1857		cmd->use_aio = lo->use_dio;
1858		break;
1859	}
1860
1861	/* always use the first bio's css */
1862	cmd->blkcg_css = NULL;
1863	cmd->memcg_css = NULL;
1864#ifdef CONFIG_BLK_CGROUP
1865	if (rq->bio) {
1866		cmd->blkcg_css = bio_blkcg_css(rq->bio);
1867#ifdef CONFIG_MEMCG
1868		if (cmd->blkcg_css) {
1869			cmd->memcg_css =
1870				cgroup_get_e_css(cmd->blkcg_css->cgroup,
1871						&memory_cgrp_subsys);
1872		}
1873#endif
1874	}
1875#endif
1876	loop_queue_work(lo, cmd);
1877
1878	return BLK_STS_OK;
1879}
1880
1881static void loop_handle_cmd(struct loop_cmd *cmd)
1882{
1883	struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
1884	struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
1885	struct request *rq = blk_mq_rq_from_pdu(cmd);
1886	const bool write = op_is_write(req_op(rq));
1887	struct loop_device *lo = rq->q->queuedata;
1888	int ret = 0;
1889	struct mem_cgroup *old_memcg = NULL;
1890	const bool use_aio = cmd->use_aio;
1891
1892	if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
1893		ret = -EIO;
1894		goto failed;
1895	}
1896
1897	if (cmd_blkcg_css)
1898		kthread_associate_blkcg(cmd_blkcg_css);
1899	if (cmd_memcg_css)
1900		old_memcg = set_active_memcg(
1901			mem_cgroup_from_css(cmd_memcg_css));
1902
1903	/*
1904	 * do_req_filebacked() may call blk_mq_complete_request() synchronously
1905	 * or asynchronously if using aio. Hence, do not touch 'cmd' after
1906	 * do_req_filebacked() has returned unless we are sure that 'cmd' has
1907	 * not yet been completed.
1908	 */
1909	ret = do_req_filebacked(lo, rq);
1910
1911	if (cmd_blkcg_css)
1912		kthread_associate_blkcg(NULL);
1913
1914	if (cmd_memcg_css) {
1915		set_active_memcg(old_memcg);
1916		css_put(cmd_memcg_css);
1917	}
1918 failed:
1919	/* complete non-aio request */
1920	if (!use_aio || ret) {
1921		if (ret == -EOPNOTSUPP)
1922			cmd->ret = ret;
1923		else
1924			cmd->ret = ret ? -EIO : 0;
1925		if (likely(!blk_should_fake_timeout(rq->q)))
1926			blk_mq_complete_request(rq);
1927	}
1928}
1929
1930static void loop_process_work(struct loop_worker *worker,
1931			struct list_head *cmd_list, struct loop_device *lo)
1932{
1933	int orig_flags = current->flags;
1934	struct loop_cmd *cmd;
1935
1936	current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
1937	spin_lock_irq(&lo->lo_work_lock);
1938	while (!list_empty(cmd_list)) {
1939		cmd = container_of(
1940			cmd_list->next, struct loop_cmd, list_entry);
1941		list_del(cmd_list->next);
1942		spin_unlock_irq(&lo->lo_work_lock);
1943
1944		loop_handle_cmd(cmd);
1945		cond_resched();
1946
1947		spin_lock_irq(&lo->lo_work_lock);
1948	}
1949
1950	/*
1951	 * We only add to the idle list if there are no pending cmds
1952	 * *and* the worker will not run again which ensures that it
1953	 * is safe to free any worker on the idle list
1954	 */
1955	if (worker && !work_pending(&worker->work)) {
1956		worker->last_ran_at = jiffies;
1957		list_add_tail(&worker->idle_list, &lo->idle_worker_list);
1958		loop_set_timer(lo);
1959	}
1960	spin_unlock_irq(&lo->lo_work_lock);
1961	current->flags = orig_flags;
1962}
1963
1964static void loop_workfn(struct work_struct *work)
1965{
1966	struct loop_worker *worker =
1967		container_of(work, struct loop_worker, work);
1968	loop_process_work(worker, &worker->cmd_list, worker->lo);
1969}
1970
1971static void loop_rootcg_workfn(struct work_struct *work)
1972{
1973	struct loop_device *lo =
1974		container_of(work, struct loop_device, rootcg_work);
1975	loop_process_work(NULL, &lo->rootcg_cmd_list, lo);
1976}
1977
1978static const struct blk_mq_ops loop_mq_ops = {
1979	.queue_rq       = loop_queue_rq,
1980	.complete	= lo_complete_rq,
1981};
1982
1983static int loop_add(int i)
1984{
1985	struct loop_device *lo;
1986	struct gendisk *disk;
1987	int err;
1988
1989	err = -ENOMEM;
1990	lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1991	if (!lo)
 
1992		goto out;
1993	lo->worker_tree = RB_ROOT;
1994	INIT_LIST_HEAD(&lo->idle_worker_list);
1995	timer_setup(&lo->timer, loop_free_idle_workers_timer, TIMER_DEFERRABLE);
1996	lo->lo_state = Lo_unbound;
1997
1998	err = mutex_lock_killable(&loop_ctl_mutex);
1999	if (err)
2000		goto out_free_dev;
2001
2002	/* allocate id, if @id >= 0, we're requesting that specific id */
2003	if (i >= 0) {
2004		err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
2005		if (err == -ENOSPC)
 
 
 
 
2006			err = -EEXIST;
 
 
 
 
 
 
 
 
2007	} else {
2008		err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
2009	}
2010	mutex_unlock(&loop_ctl_mutex);
2011	if (err < 0)
2012		goto out_free_dev;
2013	i = err;
2014
2015	lo->tag_set.ops = &loop_mq_ops;
2016	lo->tag_set.nr_hw_queues = 1;
2017	lo->tag_set.queue_depth = hw_queue_depth;
2018	lo->tag_set.numa_node = NUMA_NO_NODE;
2019	lo->tag_set.cmd_size = sizeof(struct loop_cmd);
2020	lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
2021		BLK_MQ_F_NO_SCHED_BY_DEFAULT;
2022	lo->tag_set.driver_data = lo;
2023
2024	err = blk_mq_alloc_tag_set(&lo->tag_set);
2025	if (err)
2026		goto out_free_idr;
2027
2028	disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, lo);
2029	if (IS_ERR(disk)) {
2030		err = PTR_ERR(disk);
2031		goto out_cleanup_tags;
2032	}
2033	lo->lo_queue = lo->lo_disk->queue;
2034
2035	/* random number picked from the history block max_sectors cap */
2036	blk_queue_max_hw_sectors(lo->lo_queue, 2560u);
2037
2038	/*
2039	 * By default, we do buffer IO, so it doesn't make sense to enable
2040	 * merge because the I/O submitted to backing file is handled page by
2041	 * page. For directio mode, merge does help to dispatch bigger request
2042	 * to underlayer disk. We will enable merge once directio is enabled.
2043	 */
2044	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
2045
2046	/*
2047	 * Disable partition scanning by default. The in-kernel partition
2048	 * scanning can be requested individually per-device during its
2049	 * setup. Userspace can always add and remove partitions from all
2050	 * devices. The needed partition minors are allocated from the
2051	 * extended minor space, the main loop device numbers will continue
2052	 * to match the loop minors, regardless of the number of partitions
2053	 * used.
2054	 *
2055	 * If max_part is given, partition scanning is globally enabled for
2056	 * all loop devices. The minors for the main loop devices will be
2057	 * multiples of max_part.
2058	 *
2059	 * Note: Global-for-all-devices, set-only-at-init, read-only module
2060	 * parameteters like 'max_loop' and 'max_part' make things needlessly
2061	 * complicated, are too static, inflexible and may surprise
2062	 * userspace tools. Parameters like this in general should be avoided.
2063	 */
2064	if (!part_shift)
2065		set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
2066	mutex_init(&lo->lo_mutex);
2067	lo->lo_number		= i;
 
 
2068	spin_lock_init(&lo->lo_lock);
2069	spin_lock_init(&lo->lo_work_lock);
2070	INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn);
2071	INIT_LIST_HEAD(&lo->rootcg_cmd_list);
2072	disk->major		= LOOP_MAJOR;
2073	disk->first_minor	= i << part_shift;
2074	disk->minors		= 1 << part_shift;
2075	disk->fops		= &lo_fops;
2076	disk->private_data	= lo;
2077	disk->queue		= lo->lo_queue;
2078	disk->events		= DISK_EVENT_MEDIA_CHANGE;
2079	disk->event_flags	= DISK_EVENT_FLAG_UEVENT;
2080	sprintf(disk->disk_name, "loop%d", i);
2081	/* Make this loop device reachable from pathname. */
2082	err = add_disk(disk);
2083	if (err)
2084		goto out_cleanup_disk;
2085
2086	/* Show this loop device. */
2087	mutex_lock(&loop_ctl_mutex);
2088	lo->idr_visible = true;
2089	mutex_unlock(&loop_ctl_mutex);
2090
2091	return i;
2092
2093out_cleanup_disk:
2094	put_disk(disk);
2095out_cleanup_tags:
2096	blk_mq_free_tag_set(&lo->tag_set);
2097out_free_idr:
2098	mutex_lock(&loop_ctl_mutex);
2099	idr_remove(&loop_index_idr, i);
2100	mutex_unlock(&loop_ctl_mutex);
2101out_free_dev:
2102	kfree(lo);
2103out:
2104	return err;
2105}
2106
2107static void loop_remove(struct loop_device *lo)
2108{
2109	/* Make this loop device unreachable from pathname. */
2110	del_gendisk(lo->lo_disk);
2111	blk_mq_free_tag_set(&lo->tag_set);
2112
2113	mutex_lock(&loop_ctl_mutex);
2114	idr_remove(&loop_index_idr, lo->lo_number);
2115	mutex_unlock(&loop_ctl_mutex);
2116
2117	put_disk(lo->lo_disk);
 
2118}
2119
2120#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
2121static void loop_probe(dev_t dev)
2122{
2123	int idx = MINOR(dev) >> part_shift;
 
2124
2125	if (max_loop_specified && max_loop && idx >= max_loop)
2126		return;
2127	loop_add(idx);
 
 
2128}
2129#else
2130#define loop_probe NULL
2131#endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */
2132
2133static int loop_control_remove(int idx)
2134{
2135	struct loop_device *lo;
2136	int ret;
2137
2138	if (idx < 0) {
2139		pr_warn_once("deleting an unspecified loop device is not supported.\n");
2140		return -EINVAL;
2141	}
2142		
2143	/* Hide this loop device for serialization. */
2144	ret = mutex_lock_killable(&loop_ctl_mutex);
2145	if (ret)
2146		return ret;
2147	lo = idr_find(&loop_index_idr, idx);
2148	if (!lo || !lo->idr_visible)
2149		ret = -ENODEV;
2150	else
2151		lo->idr_visible = false;
2152	mutex_unlock(&loop_ctl_mutex);
2153	if (ret)
2154		return ret;
2155
2156	/* Check whether this loop device can be removed. */
2157	ret = mutex_lock_killable(&lo->lo_mutex);
2158	if (ret)
2159		goto mark_visible;
2160	if (lo->lo_state != Lo_unbound || disk_openers(lo->lo_disk) > 0) {
2161		mutex_unlock(&lo->lo_mutex);
2162		ret = -EBUSY;
2163		goto mark_visible;
2164	}
2165	/* Mark this loop device as no more bound, but not quite unbound yet */
2166	lo->lo_state = Lo_deleting;
2167	mutex_unlock(&lo->lo_mutex);
2168
2169	loop_remove(lo);
2170	return 0;
 
 
 
 
 
2171
2172mark_visible:
2173	/* Show this loop device again. */
2174	mutex_lock(&loop_ctl_mutex);
2175	lo->idr_visible = true;
2176	mutex_unlock(&loop_ctl_mutex);
 
 
2177	return ret;
2178}
2179
2180static int loop_control_get_free(int idx)
2181{
2182	struct loop_device *lo;
2183	int id, ret;
 
2184
2185	ret = mutex_lock_killable(&loop_ctl_mutex);
2186	if (ret)
2187		return ret;
2188	idr_for_each_entry(&loop_index_idr, lo, id) {
2189		/* Hitting a race results in creating a new loop device which is harmless. */
2190		if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound)
2191			goto found;
2192	}
2193	mutex_unlock(&loop_ctl_mutex);
2194	return loop_add(-1);
2195found:
2196	mutex_unlock(&loop_ctl_mutex);
2197	return id;
2198}
2199
2200static long loop_control_ioctl(struct file *file, unsigned int cmd,
2201			       unsigned long parm)
2202{
 
 
 
 
2203	switch (cmd) {
2204	case LOOP_CTL_ADD:
2205		return loop_add(parm);
 
 
 
 
 
 
2206	case LOOP_CTL_REMOVE:
2207		return loop_control_remove(parm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2208	case LOOP_CTL_GET_FREE:
2209		return loop_control_get_free(parm);
2210	default:
2211		return -ENOSYS;
 
2212	}
 
 
 
2213}
2214
2215static const struct file_operations loop_ctl_fops = {
2216	.open		= nonseekable_open,
2217	.unlocked_ioctl	= loop_control_ioctl,
2218	.compat_ioctl	= loop_control_ioctl,
2219	.owner		= THIS_MODULE,
2220	.llseek		= noop_llseek,
2221};
2222
2223static struct miscdevice loop_misc = {
2224	.minor		= LOOP_CTRL_MINOR,
2225	.name		= "loop-control",
2226	.fops		= &loop_ctl_fops,
2227};
2228
2229MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
2230MODULE_ALIAS("devname:loop-control");
2231
2232static int __init loop_init(void)
2233{
2234	int i;
 
 
2235	int err;
2236
 
 
 
 
2237	part_shift = 0;
2238	if (max_part > 0) {
2239		part_shift = fls(max_part);
2240
2241		/*
2242		 * Adjust max_part according to part_shift as it is exported
2243		 * to user space so that user can decide correct minor number
2244		 * if [s]he want to create more devices.
2245		 *
2246		 * Note that -1 is required because partition 0 is reserved
2247		 * for the whole disk.
2248		 */
2249		max_part = (1UL << part_shift) - 1;
2250	}
2251
2252	if ((1UL << part_shift) > DISK_MAX_PARTS) {
2253		err = -EINVAL;
2254		goto err_out;
2255	}
2256
2257	if (max_loop > 1UL << (MINORBITS - part_shift)) {
2258		err = -EINVAL;
2259		goto err_out;
2260	}
2261
2262	err = misc_register(&loop_misc);
2263	if (err < 0)
2264		goto err_out;
 
 
 
 
 
 
 
 
 
 
 
 
2265
 
 
2266
2267	if (__register_blkdev(LOOP_MAJOR, "loop", loop_probe)) {
2268		err = -EIO;
2269		goto misc_out;
2270	}
2271
2272	/* pre-create number of devices given by config or max_loop */
2273	for (i = 0; i < max_loop; i++)
2274		loop_add(i);
 
 
2275
2276	printk(KERN_INFO "loop: module loaded\n");
2277	return 0;
 
2278
2279misc_out:
2280	misc_deregister(&loop_misc);
2281err_out:
2282	return err;
 
 
2283}
2284
2285static void __exit loop_exit(void)
2286{
2287	struct loop_device *lo;
2288	int id;
2289
2290	unregister_blkdev(LOOP_MAJOR, "loop");
2291	misc_deregister(&loop_misc);
2292
2293	/*
2294	 * There is no need to use loop_ctl_mutex here, for nobody else can
2295	 * access loop_index_idr when this module is unloading (unless forced
2296	 * module unloading is requested). If this is not a clean unloading,
2297	 * we have no means to avoid kernel crash.
2298	 */
2299	idr_for_each_entry(&loop_index_idr, lo, id)
2300		loop_remove(lo);
2301
 
 
2302	idr_destroy(&loop_index_idr);
 
 
 
 
 
2303}
2304
2305module_init(loop_init);
2306module_exit(loop_exit);
2307
2308#ifndef MODULE
2309static int __init max_loop_setup(char *str)
2310{
2311	max_loop = simple_strtol(str, NULL, 0);
2312#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
2313	max_loop_specified = true;
2314#endif
2315	return 1;
2316}
2317
2318__setup("max_loop=", max_loop_setup);
2319#endif
v3.1
 
   1/*
   2 *  linux/drivers/block/loop.c
   3 *
   4 *  Written by Theodore Ts'o, 3/29/93
   5 *
   6 * Copyright 1993 by Theodore Ts'o.  Redistribution of this file is
   7 * permitted under the GNU General Public License.
   8 *
   9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
  10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
  11 *
  12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
  13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
  14 *
  15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
  16 *
  17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
  18 *
  19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
  20 *
  21 * Loadable modules and other fixes by AK, 1998
  22 *
  23 * Make real block number available to downstream transfer functions, enables
  24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
  25 * Reed H. Petty, rhp@draper.net
  26 *
  27 * Maximum number of loop devices now dynamic via max_loop module parameter.
  28 * Russell Kroll <rkroll@exploits.org> 19990701
  29 *
  30 * Maximum number of loop devices when compiled-in now selectable by passing
  31 * max_loop=<1-255> to the kernel on boot.
  32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
  33 *
  34 * Completely rewrite request handling to be make_request_fn style and
  35 * non blocking, pushing work to a helper thread. Lots of fixes from
  36 * Al Viro too.
  37 * Jens Axboe <axboe@suse.de>, Nov 2000
  38 *
  39 * Support up to 256 loop devices
  40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
  41 *
  42 * Support for falling back on the write file operation when the address space
  43 * operations write_begin is not available on the backing filesystem.
  44 * Anton Altaparmakov, 16 Feb 2005
  45 *
  46 * Still To Fix:
  47 * - Advisory locking is ignored here.
  48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
  49 *
  50 */
  51
  52#include <linux/module.h>
  53#include <linux/moduleparam.h>
  54#include <linux/sched.h>
  55#include <linux/fs.h>
 
  56#include <linux/file.h>
  57#include <linux/stat.h>
  58#include <linux/errno.h>
  59#include <linux/major.h>
  60#include <linux/wait.h>
  61#include <linux/blkdev.h>
  62#include <linux/blkpg.h>
  63#include <linux/init.h>
  64#include <linux/swap.h>
  65#include <linux/slab.h>
  66#include <linux/loop.h>
  67#include <linux/compat.h>
  68#include <linux/suspend.h>
  69#include <linux/freezer.h>
  70#include <linux/mutex.h>
  71#include <linux/writeback.h>
  72#include <linux/buffer_head.h>		/* for invalidate_bdev() */
  73#include <linux/completion.h>
  74#include <linux/highmem.h>
  75#include <linux/kthread.h>
  76#include <linux/splice.h>
  77#include <linux/sysfs.h>
  78#include <linux/miscdevice.h>
  79#include <asm/uaccess.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  80
  81static DEFINE_IDR(loop_index_idr);
  82static DEFINE_MUTEX(loop_index_mutex);
 
  83
  84static int max_part;
  85static int part_shift;
  86
  87/*
  88 * Transfer functions
 
 
 
 
 
 
  89 */
  90static int transfer_none(struct loop_device *lo, int cmd,
  91			 struct page *raw_page, unsigned raw_off,
  92			 struct page *loop_page, unsigned loop_off,
  93			 int size, sector_t real_block)
  94{
  95	char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
  96	char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
  97
  98	if (cmd == READ)
  99		memcpy(loop_buf, raw_buf, size);
 100	else
 101		memcpy(raw_buf, loop_buf, size);
 102
 103	kunmap_atomic(loop_buf, KM_USER1);
 104	kunmap_atomic(raw_buf, KM_USER0);
 105	cond_resched();
 106	return 0;
 107}
 108
 109static int transfer_xor(struct loop_device *lo, int cmd,
 110			struct page *raw_page, unsigned raw_off,
 111			struct page *loop_page, unsigned loop_off,
 112			int size, sector_t real_block)
 113{
 114	char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
 115	char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
 116	char *in, *out, *key;
 117	int i, keysize;
 118
 119	if (cmd == READ) {
 120		in = raw_buf;
 121		out = loop_buf;
 122	} else {
 123		in = loop_buf;
 124		out = raw_buf;
 125	}
 126
 127	key = lo->lo_encrypt_key;
 128	keysize = lo->lo_encrypt_key_size;
 129	for (i = 0; i < size; i++)
 130		*out++ = *in++ ^ key[(i & 511) % keysize];
 131
 132	kunmap_atomic(loop_buf, KM_USER1);
 133	kunmap_atomic(raw_buf, KM_USER0);
 134	cond_resched();
 135	return 0;
 136}
 137
 138static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
 
 
 
 
 
 
 139{
 140	if (unlikely(info->lo_encrypt_key_size <= 0))
 141		return -EINVAL;
 142	return 0;
 143}
 144
 145static struct loop_func_table none_funcs = {
 146	.number = LO_CRYPT_NONE,
 147	.transfer = transfer_none,
 148}; 	
 149
 150static struct loop_func_table xor_funcs = {
 151	.number = LO_CRYPT_XOR,
 152	.transfer = transfer_xor,
 153	.init = xor_init
 154}; 	
 155
 156/* xfer_funcs[0] is special - its release function is never called */
 157static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
 158	&none_funcs,
 159	&xor_funcs
 160};
 161
 162static loff_t get_loop_size(struct loop_device *lo, struct file *file)
 163{
 164	loff_t size, offset, loopsize;
 165
 166	/* Compute loopsize in bytes */
 167	size = i_size_read(file->f_mapping->host);
 168	offset = lo->lo_offset;
 169	loopsize = size - offset;
 170	if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
 171		loopsize = lo->lo_sizelimit;
 
 172
 
 
 173	/*
 174	 * Unfortunately, if we want to do I/O on the device,
 175	 * the number of 512-byte sectors has to fit into a sector_t.
 176	 */
 177	return loopsize >> 9;
 178}
 179
 180static int
 181figure_loop_size(struct loop_device *lo)
 182{
 183	loff_t size = get_loop_size(lo, lo->lo_backing_file);
 184	sector_t x = (sector_t)size;
 185
 186	if (unlikely((loff_t)x != size))
 187		return -EFBIG;
 
 
 
 
 
 
 
 188
 189	set_capacity(lo->lo_disk, x);
 190	return 0;					
 
 
 
 191}
 192
 193static inline int
 194lo_do_transfer(struct loop_device *lo, int cmd,
 195	       struct page *rpage, unsigned roffs,
 196	       struct page *lpage, unsigned loffs,
 197	       int size, sector_t rblock)
 198{
 199	if (unlikely(!lo->transfer))
 200		return 0;
 
 
 
 
 
 
 
 201
 202	return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
 203}
 204
 205/**
 206 * do_lo_send_aops - helper for writing data to a loop device
 207 *
 208 * This is the fast version for backing filesystems which implement the address
 209 * space operations write_begin and write_end.
 210 */
 211static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
 212		loff_t pos, struct page *unused)
 213{
 214	struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
 215	struct address_space *mapping = file->f_mapping;
 216	pgoff_t index;
 217	unsigned offset, bv_offs;
 218	int len, ret;
 219
 220	mutex_lock(&mapping->host->i_mutex);
 221	index = pos >> PAGE_CACHE_SHIFT;
 222	offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
 223	bv_offs = bvec->bv_offset;
 224	len = bvec->bv_len;
 225	while (len > 0) {
 226		sector_t IV;
 227		unsigned size, copied;
 228		int transfer_result;
 229		struct page *page;
 230		void *fsdata;
 231
 232		IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9);
 233		size = PAGE_CACHE_SIZE - offset;
 234		if (size > len)
 235			size = len;
 236
 237		ret = pagecache_write_begin(file, mapping, pos, size, 0,
 238							&page, &fsdata);
 239		if (ret)
 240			goto fail;
 241
 242		file_update_time(file);
 243
 244		transfer_result = lo_do_transfer(lo, WRITE, page, offset,
 245				bvec->bv_page, bv_offs, size, IV);
 246		copied = size;
 247		if (unlikely(transfer_result))
 248			copied = 0;
 249
 250		ret = pagecache_write_end(file, mapping, pos, size, copied,
 251							page, fsdata);
 252		if (ret < 0 || ret != copied)
 253			goto fail;
 254
 255		if (unlikely(transfer_result))
 256			goto fail;
 257
 258		bv_offs += copied;
 259		len -= copied;
 260		offset = 0;
 261		index++;
 262		pos += copied;
 
 
 
 
 
 
 
 
 
 263	}
 264	ret = 0;
 265out:
 266	mutex_unlock(&mapping->host->i_mutex);
 267	return ret;
 268fail:
 269	ret = -1;
 270	goto out;
 271}
 272
 273/**
 274 * __do_lo_send_write - helper for writing data to a loop device
 
 
 275 *
 276 * This helper just factors out common code between do_lo_send_direct_write()
 277 * and do_lo_send_write().
 278 */
 279static int __do_lo_send_write(struct file *file,
 280		u8 *buf, const int len, loff_t pos)
 281{
 
 
 
 
 
 
 
 282	ssize_t bw;
 283	mm_segment_t old_fs = get_fs();
 284
 285	set_fs(get_ds());
 286	bw = file->f_op->write(file, buf, len, &pos);
 287	set_fs(old_fs);
 288	if (likely(bw == len))
 
 289		return 0;
 290	printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
 291			(unsigned long long)pos, len);
 
 
 292	if (bw >= 0)
 293		bw = -EIO;
 294	return bw;
 295}
 296
 297/**
 298 * do_lo_send_direct_write - helper for writing data to a loop device
 299 *
 300 * This is the fast, non-transforming version for backing filesystems which do
 301 * not implement the address space operations write_begin and write_end.
 302 * It uses the write file operation which should be present on all writeable
 303 * filesystems.
 304 */
 305static int do_lo_send_direct_write(struct loop_device *lo,
 306		struct bio_vec *bvec, loff_t pos, struct page *page)
 307{
 308	ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
 309			kmap(bvec->bv_page) + bvec->bv_offset,
 310			bvec->bv_len, pos);
 311	kunmap(bvec->bv_page);
 312	cond_resched();
 313	return bw;
 314}
 315
 316/**
 317 * do_lo_send_write - helper for writing data to a loop device
 318 *
 319 * This is the slow, transforming version for filesystems which do not
 320 * implement the address space operations write_begin and write_end.  It
 321 * uses the write file operation which should be present on all writeable
 322 * filesystems.
 323 *
 324 * Using fops->write is slower than using aops->{prepare,commit}_write in the
 325 * transforming case because we need to double buffer the data as we cannot do
 326 * the transformations in place as we do not have direct access to the
 327 * destination pages of the backing file.
 328 */
 329static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
 330		loff_t pos, struct page *page)
 331{
 332	int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
 333			bvec->bv_offset, bvec->bv_len, pos >> 9);
 334	if (likely(!ret))
 335		return __do_lo_send_write(lo->lo_backing_file,
 336				page_address(page), bvec->bv_len,
 337				pos);
 338	printk(KERN_ERR "loop: Transfer error at byte offset %llu, "
 339			"length %i.\n", (unsigned long long)pos, bvec->bv_len);
 340	if (ret > 0)
 341		ret = -EIO;
 342	return ret;
 343}
 344
 345static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
 346{
 347	int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
 348			struct page *page);
 349	struct bio_vec *bvec;
 350	struct page *page = NULL;
 351	int i, ret = 0;
 352
 353	do_lo_send = do_lo_send_aops;
 354	if (!(lo->lo_flags & LO_FLAGS_USE_AOPS)) {
 355		do_lo_send = do_lo_send_direct_write;
 356		if (lo->transfer != transfer_none) {
 357			page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
 358			if (unlikely(!page))
 359				goto fail;
 360			kmap(page);
 361			do_lo_send = do_lo_send_write;
 362		}
 363	}
 364	bio_for_each_segment(bvec, bio, i) {
 365		ret = do_lo_send(lo, bvec, pos, page);
 366		if (ret < 0)
 367			break;
 368		pos += bvec->bv_len;
 369	}
 370	if (page) {
 371		kunmap(page);
 372		__free_page(page);
 373	}
 374out:
 375	return ret;
 376fail:
 377	printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
 378	ret = -ENOMEM;
 379	goto out;
 380}
 381
 382struct lo_read_data {
 383	struct loop_device *lo;
 384	struct page *page;
 385	unsigned offset;
 386	int bsize;
 387};
 
 
 
 
 
 
 
 388
 389static int
 390lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
 391		struct splice_desc *sd)
 392{
 393	struct lo_read_data *p = sd->u.data;
 394	struct loop_device *lo = p->lo;
 395	struct page *page = buf->page;
 396	sector_t IV;
 397	int size;
 398
 399	IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
 400							(buf->offset >> 9);
 401	size = sd->len;
 402	if (size > p->bsize)
 403		size = p->bsize;
 404
 405	if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
 406		printk(KERN_ERR "loop: transfer error block %ld\n",
 407		       page->index);
 408		size = -EINVAL;
 
 409	}
 410
 411	flush_dcache_page(p->page);
 412
 413	if (size > 0)
 414		p->offset += size;
 415
 416	return size;
 417}
 418
 419static int
 420lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
 421{
 422	return __splice_from_pipe(pipe, sd, lo_splice_actor);
 423}
 424
 425static int
 426do_lo_receive(struct loop_device *lo,
 427	      struct bio_vec *bvec, int bsize, loff_t pos)
 428{
 429	struct lo_read_data cookie;
 430	struct splice_desc sd;
 431	struct file *file;
 432	long retval;
 433
 434	cookie.lo = lo;
 435	cookie.page = bvec->bv_page;
 436	cookie.offset = bvec->bv_offset;
 437	cookie.bsize = bsize;
 438
 439	sd.len = 0;
 440	sd.total_len = bvec->bv_len;
 441	sd.flags = 0;
 442	sd.pos = pos;
 443	sd.u.data = &cookie;
 444
 445	file = lo->lo_backing_file;
 446	retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
 447
 448	if (retval < 0)
 449		return retval;
 450
 451	return 0;
 
 
 
 452}
 453
 454static int
 455lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
 456{
 457	struct bio_vec *bvec;
 458	int i, ret = 0;
 
 459
 460	bio_for_each_segment(bvec, bio, i) {
 461		ret = do_lo_receive(lo, bvec, bsize, pos);
 462		if (ret < 0)
 463			break;
 464		pos += bvec->bv_len;
 465	}
 466	return ret;
 467}
 468
 469static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
 470{
 471	loff_t pos;
 472	int ret;
 473
 474	pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 
 
 
 
 
 475
 476	if (bio_rw(bio) == WRITE) {
 477		struct file *file = lo->lo_backing_file;
 
 
 
 
 
 
 
 
 
 478
 479		if (bio->bi_rw & REQ_FLUSH) {
 480			ret = vfs_fsync(file, 0);
 481			if (unlikely(ret && ret != -EINVAL)) {
 482				ret = -EIO;
 483				goto out;
 484			}
 485		}
 
 
 
 
 
 486
 487		ret = lo_send(lo, bio, pos);
 
 
 488
 489		if ((bio->bi_rw & REQ_FUA) && !ret) {
 490			ret = vfs_fsync(file, 0);
 491			if (unlikely(ret && ret != -EINVAL))
 492				ret = -EIO;
 493		}
 494	} else
 495		ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
 496
 497out:
 498	return ret;
 499}
 500
 501/*
 502 * Add bio to back of pending list
 503 */
 504static void loop_add_bio(struct loop_device *lo, struct bio *bio)
 505{
 506	bio_list_add(&lo->lo_bio_list, bio);
 507}
 508
 509/*
 510 * Grab first pending buffer
 511 */
 512static struct bio *loop_get_bio(struct loop_device *lo)
 513{
 514	return bio_list_pop(&lo->lo_bio_list);
 515}
 516
 517static int loop_make_request(struct request_queue *q, struct bio *old_bio)
 
 518{
 519	struct loop_device *lo = q->queuedata;
 520	int rw = bio_rw(old_bio);
 
 
 
 
 
 
 
 
 521
 522	if (rw == READA)
 523		rw = READ;
 524
 525	BUG_ON(!lo || (rw != READ && rw != WRITE));
 526
 527	spin_lock_irq(&lo->lo_lock);
 528	if (lo->lo_state != Lo_bound)
 529		goto out;
 530	if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY)))
 531		goto out;
 532	loop_add_bio(lo, old_bio);
 533	wake_up(&lo->lo_event);
 534	spin_unlock_irq(&lo->lo_lock);
 535	return 0;
 536
 537out:
 538	spin_unlock_irq(&lo->lo_lock);
 539	bio_io_error(old_bio);
 540	return 0;
 541}
 542
 543struct switch_request {
 544	struct file *file;
 545	struct completion wait;
 546};
 547
 548static void do_loop_switch(struct loop_device *, struct switch_request *);
 549
 550static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
 551{
 552	if (unlikely(!bio->bi_bdev)) {
 553		do_loop_switch(lo, bio->bi_private);
 554		bio_put(bio);
 555	} else {
 556		int ret = do_bio_filebacked(lo, bio);
 557		bio_endio(bio, ret);
 
 
 
 
 
 558	}
 559}
 
 
 
 560
 561/*
 562 * worker thread that handles reads/writes to file backed loop devices,
 563 * to avoid blocking in our make_request_fn. it also does loop decrypting
 564 * on reads for block backed loop, as that is too heavy to do from
 565 * b_end_io context where irqs may be disabled.
 566 *
 567 * Loop explanation:  loop_clr_fd() sets lo_state to Lo_rundown before
 568 * calling kthread_stop().  Therefore once kthread_should_stop() is
 569 * true, make_request will not place any more requests.  Therefore
 570 * once kthread_should_stop() is true and lo_bio is NULL, we are
 571 * done with the loop.
 572 */
 573static int loop_thread(void *data)
 574{
 575	struct loop_device *lo = data;
 576	struct bio *bio;
 577
 578	set_user_nice(current, -20);
 
 
 
 579
 580	while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
 581
 582		wait_event_interruptible(lo->lo_event,
 583				!bio_list_empty(&lo->lo_bio_list) ||
 584				kthread_should_stop());
 
 585
 586		if (bio_list_empty(&lo->lo_bio_list))
 587			continue;
 588		spin_lock_irq(&lo->lo_lock);
 589		bio = loop_get_bio(lo);
 590		spin_unlock_irq(&lo->lo_lock);
 591
 592		BUG_ON(!bio);
 593		loop_handle_bio(lo, bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 594	}
 
 595
 596	return 0;
 
 
 
 597}
 598
 599/*
 600 * loop_switch performs the hard work of switching a backing store.
 601 * First it needs to flush existing IO, it does this by sending a magic
 602 * BIO down the pipe. The completion of this BIO does the actual switch.
 603 */
 604static int loop_switch(struct loop_device *lo, struct file *file)
 605{
 606	struct switch_request w;
 607	struct bio *bio = bio_alloc(GFP_KERNEL, 0);
 608	if (!bio)
 609		return -ENOMEM;
 610	init_completion(&w.wait);
 611	w.file = file;
 612	bio->bi_private = &w;
 613	bio->bi_bdev = NULL;
 614	loop_make_request(lo->lo_queue, bio);
 615	wait_for_completion(&w.wait);
 616	return 0;
 617}
 618
 619/*
 620 * Helper to flush the IOs in loop, but keeping loop thread running
 621 */
 622static int loop_flush(struct loop_device *lo)
 623{
 624	/* loop not yet configured, no running thread, nothing to flush */
 625	if (!lo->lo_thread)
 626		return 0;
 627
 628	return loop_switch(lo, NULL);
 629}
 630
 631/*
 632 * Do the actual switch; called from the BIO completion routine
 633 */
 634static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
 635{
 636	struct file *file = p->file;
 637	struct file *old_file = lo->lo_backing_file;
 638	struct address_space *mapping;
 
 
 
 639
 640	/* if no new file, only flush of queued bios requested */
 641	if (!file)
 642		goto out;
 643
 644	mapping = file->f_mapping;
 645	mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
 646	lo->lo_backing_file = file;
 647	lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
 648		mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
 649	lo->old_gfp_mask = mapping_gfp_mask(mapping);
 650	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
 651out:
 652	complete(&p->wait);
 
 653}
 654
 655
 656/*
 657 * loop_change_fd switched the backing store of a loopback device to
 658 * a new file. This is useful for operating system installers to free up
 659 * the original file and in High Availability environments to switch to
 660 * an alternative location for the content in case of server meltdown.
 661 * This can only work if the loop device is used read-only, and if the
 662 * new backing store is the same size and type as the old backing store.
 663 */
 664static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
 665			  unsigned int arg)
 666{
 667	struct file	*file, *old_file;
 668	struct inode	*inode;
 669	int		error;
 
 
 
 
 
 
 
 
 670
 
 
 
 
 671	error = -ENXIO;
 672	if (lo->lo_state != Lo_bound)
 673		goto out;
 674
 675	/* the loop device has to be read-only */
 676	error = -EINVAL;
 677	if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
 678		goto out;
 679
 680	error = -EBADF;
 681	file = fget(arg);
 682	if (!file)
 683		goto out;
 684
 685	inode = file->f_mapping->host;
 686	old_file = lo->lo_backing_file;
 687
 688	error = -EINVAL;
 689
 690	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
 691		goto out_putf;
 692
 693	/* size of the new backing store needs to be the same */
 694	if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
 695		goto out_putf;
 696
 697	/* and ... switch */
 698	error = loop_switch(lo, file);
 699	if (error)
 700		goto out_putf;
 
 
 
 
 
 
 
 
 701
 
 
 
 
 
 
 
 
 
 
 
 
 
 702	fput(old_file);
 703	if (max_part > 0)
 704		ioctl_by_bdev(bdev, BLKRRPART, 0);
 705	return 0;
 706
 707 out_putf:
 708	fput(file);
 709 out:
 
 710	return error;
 711}
 712
 713static inline int is_loop_device(struct file *file)
 714{
 715	struct inode *i = file->f_mapping->host;
 716
 717	return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
 718}
 719
 720/* loop sysfs attributes */
 721
 722static ssize_t loop_attr_show(struct device *dev, char *page,
 723			      ssize_t (*callback)(struct loop_device *, char *))
 724{
 725	struct gendisk *disk = dev_to_disk(dev);
 726	struct loop_device *lo = disk->private_data;
 727
 728	return callback(lo, page);
 729}
 730
 731#define LOOP_ATTR_RO(_name)						\
 732static ssize_t loop_attr_##_name##_show(struct loop_device *, char *);	\
 733static ssize_t loop_attr_do_show_##_name(struct device *d,		\
 734				struct device_attribute *attr, char *b)	\
 735{									\
 736	return loop_attr_show(d, b, loop_attr_##_name##_show);		\
 737}									\
 738static struct device_attribute loop_attr_##_name =			\
 739	__ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL);
 740
 741static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
 742{
 743	ssize_t ret;
 744	char *p = NULL;
 745
 746	spin_lock_irq(&lo->lo_lock);
 747	if (lo->lo_backing_file)
 748		p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1);
 749	spin_unlock_irq(&lo->lo_lock);
 750
 751	if (IS_ERR_OR_NULL(p))
 752		ret = PTR_ERR(p);
 753	else {
 754		ret = strlen(p);
 755		memmove(buf, p, ret);
 756		buf[ret++] = '\n';
 757		buf[ret] = 0;
 758	}
 759
 760	return ret;
 761}
 762
 763static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
 764{
 765	return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
 766}
 767
 768static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
 769{
 770	return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
 771}
 772
 773static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
 774{
 775	int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
 776
 777	return sprintf(buf, "%s\n", autoclear ? "1" : "0");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 778}
 779
 780LOOP_ATTR_RO(backing_file);
 781LOOP_ATTR_RO(offset);
 782LOOP_ATTR_RO(sizelimit);
 783LOOP_ATTR_RO(autoclear);
 
 
 784
 785static struct attribute *loop_attrs[] = {
 786	&loop_attr_backing_file.attr,
 787	&loop_attr_offset.attr,
 788	&loop_attr_sizelimit.attr,
 789	&loop_attr_autoclear.attr,
 
 
 790	NULL,
 791};
 792
 793static struct attribute_group loop_attribute_group = {
 794	.name = "loop",
 795	.attrs= loop_attrs,
 796};
 797
 798static int loop_sysfs_init(struct loop_device *lo)
 799{
 800	return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
 801				  &loop_attribute_group);
 802}
 803
 804static void loop_sysfs_exit(struct loop_device *lo)
 805{
 806	sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
 807			   &loop_attribute_group);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 808}
 809
 810static int loop_set_fd(struct loop_device *lo, fmode_t mode,
 811		       struct block_device *bdev, unsigned int arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 812{
 813	struct file	*file, *f;
 814	struct inode	*inode;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 815	struct address_space *mapping;
 816	unsigned lo_blocksize;
 817	int		lo_flags = 0;
 818	int		error;
 819	loff_t		size;
 
 
 
 
 
 820
 821	/* This is safe, since we have a reference from open(). */
 822	__module_get(THIS_MODULE);
 823
 824	error = -EBADF;
 825	file = fget(arg);
 826	if (!file)
 827		goto out;
 
 
 
 
 
 
 
 
 
 828
 829	error = -EBUSY;
 830	if (lo->lo_state != Lo_unbound)
 831		goto out_putf;
 832
 833	/* Avoid recursion */
 834	f = file;
 835	while (is_loop_device(f)) {
 836		struct loop_device *l;
 837
 838		if (f->f_mapping->host->i_bdev == bdev)
 839			goto out_putf;
 840
 841		l = f->f_mapping->host->i_bdev->bd_disk->private_data;
 842		if (l->lo_state == Lo_unbound) {
 843			error = -EINVAL;
 844			goto out_putf;
 845		}
 846		f = l->lo_backing_file;
 847	}
 848
 849	mapping = file->f_mapping;
 850	inode = mapping->host;
 851
 852	if (!(file->f_mode & FMODE_WRITE))
 853		lo_flags |= LO_FLAGS_READ_ONLY;
 854
 855	error = -EINVAL;
 856	if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
 857		const struct address_space_operations *aops = mapping->a_ops;
 858
 859		if (aops->write_begin)
 860			lo_flags |= LO_FLAGS_USE_AOPS;
 861		if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
 862			lo_flags |= LO_FLAGS_READ_ONLY;
 863
 864		lo_blocksize = S_ISBLK(inode->i_mode) ?
 865			inode->i_bdev->bd_block_size : PAGE_SIZE;
 866
 867		error = 0;
 868	} else {
 869		goto out_putf;
 870	}
 871
 872	size = get_loop_size(lo, file);
 
 
 873
 874	if ((loff_t)(sector_t)size != size) {
 875		error = -EFBIG;
 876		goto out_putf;
 
 
 
 
 
 
 
 
 
 877	}
 878
 879	if (!(mode & FMODE_WRITE))
 880		lo_flags |= LO_FLAGS_READ_ONLY;
 881
 882	set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
 
 883
 884	lo->lo_blocksize = lo_blocksize;
 885	lo->lo_device = bdev;
 886	lo->lo_flags = lo_flags;
 887	lo->lo_backing_file = file;
 888	lo->transfer = transfer_none;
 889	lo->ioctl = NULL;
 890	lo->lo_sizelimit = 0;
 891	lo->old_gfp_mask = mapping_gfp_mask(mapping);
 892	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
 893
 894	bio_list_init(&lo->lo_bio_list);
 
 895
 896	/*
 897	 * set queue make_request_fn, and add limits based on lower level
 898	 * device
 899	 */
 900	blk_queue_make_request(lo->lo_queue, loop_make_request);
 901	lo->lo_queue->queuedata = lo;
 
 902
 903	if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
 904		blk_queue_flush(lo->lo_queue, REQ_FLUSH);
 
 
 
 
 
 
 905
 906	set_capacity(lo->lo_disk, size);
 907	bd_set_size(bdev, size << 9);
 908	loop_sysfs_init(lo);
 909	/* let user-space know about the new size */
 910	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
 911
 912	set_blocksize(bdev, lo_blocksize);
 
 913
 914	lo->lo_thread = kthread_create(loop_thread, lo, "loop%d",
 915						lo->lo_number);
 916	if (IS_ERR(lo->lo_thread)) {
 917		error = PTR_ERR(lo->lo_thread);
 918		goto out_clr;
 919	}
 920	lo->lo_state = Lo_bound;
 921	wake_up_process(lo->lo_thread);
 922	if (max_part > 0)
 923		ioctl_by_bdev(bdev, BLKRRPART, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 924	return 0;
 925
 926out_clr:
 927	loop_sysfs_exit(lo);
 928	lo->lo_thread = NULL;
 929	lo->lo_device = NULL;
 930	lo->lo_backing_file = NULL;
 931	lo->lo_flags = 0;
 932	set_capacity(lo->lo_disk, 0);
 933	invalidate_bdev(bdev);
 934	bd_set_size(bdev, 0);
 935	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
 936	mapping_set_gfp_mask(mapping, lo->old_gfp_mask);
 937	lo->lo_state = Lo_unbound;
 938 out_putf:
 939	fput(file);
 940 out:
 941	/* This is safe: open() is still holding a reference. */
 942	module_put(THIS_MODULE);
 943	return error;
 944}
 945
 946static int
 947loop_release_xfer(struct loop_device *lo)
 948{
 949	int err = 0;
 950	struct loop_func_table *xfer = lo->lo_encryption;
 951
 952	if (xfer) {
 953		if (xfer->release)
 954			err = xfer->release(lo);
 955		lo->transfer = NULL;
 956		lo->lo_encryption = NULL;
 957		module_put(xfer->owner);
 958	}
 959	return err;
 960}
 961
 962static int
 963loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
 964	       const struct loop_info64 *i)
 965{
 966	int err = 0;
 967
 968	if (xfer) {
 969		struct module *owner = xfer->owner;
 970
 971		if (!try_module_get(owner))
 972			return -EINVAL;
 973		if (xfer->init)
 974			err = xfer->init(lo, i);
 975		if (err)
 976			module_put(owner);
 977		else
 978			lo->lo_encryption = xfer;
 979	}
 980	return err;
 981}
 982
 983static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
 984{
 985	struct file *filp = lo->lo_backing_file;
 986	gfp_t gfp = lo->old_gfp_mask;
 987
 988	if (lo->lo_state != Lo_bound)
 989		return -ENXIO;
 990
 991	if (lo->lo_refcnt > 1)	/* we needed one fd for the ioctl */
 992		return -EBUSY;
 993
 994	if (filp == NULL)
 995		return -EINVAL;
 996
 997	spin_lock_irq(&lo->lo_lock);
 998	lo->lo_state = Lo_rundown;
 999	spin_unlock_irq(&lo->lo_lock);
1000
1001	kthread_stop(lo->lo_thread);
1002
1003	spin_lock_irq(&lo->lo_lock);
 
1004	lo->lo_backing_file = NULL;
1005	spin_unlock_irq(&lo->lo_lock);
1006
1007	loop_release_xfer(lo);
1008	lo->transfer = NULL;
1009	lo->ioctl = NULL;
1010	lo->lo_device = NULL;
1011	lo->lo_encryption = NULL;
1012	lo->lo_offset = 0;
1013	lo->lo_sizelimit = 0;
1014	lo->lo_encrypt_key_size = 0;
1015	lo->lo_flags = 0;
1016	lo->lo_thread = NULL;
1017	memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
1018	memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
1019	memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1020	if (bdev)
1021		invalidate_bdev(bdev);
1022	set_capacity(lo->lo_disk, 0);
 
1023	loop_sysfs_exit(lo);
1024	if (bdev) {
1025		bd_set_size(bdev, 0);
1026		/* let user-space know about this change */
1027		kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1028	}
1029	mapping_set_gfp_mask(filp->f_mapping, gfp);
1030	lo->lo_state = Lo_unbound;
1031	/* This is safe: open() is still holding a reference. */
1032	module_put(THIS_MODULE);
1033	if (max_part > 0 && bdev)
1034		ioctl_by_bdev(bdev, BLKRRPART, 0);
1035	mutex_unlock(&lo->lo_ctl_mutex);
1036	/*
1037	 * Need not hold lo_ctl_mutex to fput backing file.
1038	 * Calling fput holding lo_ctl_mutex triggers a circular
1039	 * lock dependency possibility warning as fput can take
1040	 * bd_mutex which is usually taken before lo_ctl_mutex.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1041	 */
1042	fput(filp);
1043	return 0;
1044}
1045
1046static int
1047loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1048{
1049	int err;
1050	struct loop_func_table *xfer;
1051	uid_t uid = current_uid();
1052
1053	if (lo->lo_encrypt_key_size &&
1054	    lo->lo_key_owner != uid &&
1055	    !capable(CAP_SYS_ADMIN))
1056		return -EPERM;
1057	if (lo->lo_state != Lo_bound)
1058		return -ENXIO;
1059	if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
1060		return -EINVAL;
1061
1062	err = loop_release_xfer(lo);
1063	if (err)
1064		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1065
1066	if (info->lo_encrypt_type) {
1067		unsigned int type = info->lo_encrypt_type;
 
1068
1069		if (type >= MAX_LO_CRYPT)
1070			return -EINVAL;
1071		xfer = xfer_funcs[type];
1072		if (xfer == NULL)
1073			return -EINVAL;
1074	} else
1075		xfer = NULL;
1076
1077	err = loop_init_xfer(lo, xfer, info);
1078	if (err)
1079		return err;
 
 
 
 
1080
1081	if (lo->lo_offset != info->lo_offset ||
1082	    lo->lo_sizelimit != info->lo_sizelimit) {
1083		lo->lo_offset = info->lo_offset;
1084		lo->lo_sizelimit = info->lo_sizelimit;
1085		if (figure_loop_size(lo))
1086			return -EFBIG;
1087	}
1088
1089	memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
1090	memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
1091	lo->lo_file_name[LO_NAME_SIZE-1] = 0;
1092	lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
 
 
 
 
1093
1094	if (!xfer)
1095		xfer = &none_funcs;
1096	lo->transfer = xfer->transfer;
1097	lo->ioctl = xfer->ioctl;
1098
1099	if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) !=
1100	     (info->lo_flags & LO_FLAGS_AUTOCLEAR))
1101		lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
1102
1103	lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
1104	lo->lo_init[0] = info->lo_init[0];
1105	lo->lo_init[1] = info->lo_init[1];
1106	if (info->lo_encrypt_key_size) {
1107		memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
1108		       info->lo_encrypt_key_size);
1109		lo->lo_key_owner = uid;
1110	}	
 
 
 
 
 
 
 
 
 
 
 
1111
1112	return 0;
1113}
1114
1115static int
1116loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1117{
1118	struct file *file = lo->lo_backing_file;
1119	struct kstat stat;
1120	int error;
1121
1122	if (lo->lo_state != Lo_bound)
 
 
 
 
1123		return -ENXIO;
1124	error = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat);
1125	if (error)
1126		return error;
1127	memset(info, 0, sizeof(*info));
1128	info->lo_number = lo->lo_number;
1129	info->lo_device = huge_encode_dev(stat.dev);
1130	info->lo_inode = stat.ino;
1131	info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
1132	info->lo_offset = lo->lo_offset;
1133	info->lo_sizelimit = lo->lo_sizelimit;
1134	info->lo_flags = lo->lo_flags;
1135	memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1136	memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1137	info->lo_encrypt_type =
1138		lo->lo_encryption ? lo->lo_encryption->number : 0;
1139	if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1140		info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1141		memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1142		       lo->lo_encrypt_key_size);
 
 
 
1143	}
1144	return 0;
 
1145}
1146
1147static void
1148loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1149{
1150	memset(info64, 0, sizeof(*info64));
1151	info64->lo_number = info->lo_number;
1152	info64->lo_device = info->lo_device;
1153	info64->lo_inode = info->lo_inode;
1154	info64->lo_rdevice = info->lo_rdevice;
1155	info64->lo_offset = info->lo_offset;
1156	info64->lo_sizelimit = 0;
1157	info64->lo_encrypt_type = info->lo_encrypt_type;
1158	info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1159	info64->lo_flags = info->lo_flags;
1160	info64->lo_init[0] = info->lo_init[0];
1161	info64->lo_init[1] = info->lo_init[1];
1162	if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1163		memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1164	else
1165		memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1166	memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1167}
1168
1169static int
1170loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1171{
1172	memset(info, 0, sizeof(*info));
1173	info->lo_number = info64->lo_number;
1174	info->lo_device = info64->lo_device;
1175	info->lo_inode = info64->lo_inode;
1176	info->lo_rdevice = info64->lo_rdevice;
1177	info->lo_offset = info64->lo_offset;
1178	info->lo_encrypt_type = info64->lo_encrypt_type;
1179	info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1180	info->lo_flags = info64->lo_flags;
1181	info->lo_init[0] = info64->lo_init[0];
1182	info->lo_init[1] = info64->lo_init[1];
1183	if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1184		memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1185	else
1186		memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1187	memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1188
1189	/* error in case values were truncated */
1190	if (info->lo_device != info64->lo_device ||
1191	    info->lo_rdevice != info64->lo_rdevice ||
1192	    info->lo_inode != info64->lo_inode ||
1193	    info->lo_offset != info64->lo_offset)
1194		return -EOVERFLOW;
1195
1196	return 0;
1197}
1198
1199static int
1200loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1201{
1202	struct loop_info info;
1203	struct loop_info64 info64;
1204
1205	if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1206		return -EFAULT;
1207	loop_info64_from_old(&info, &info64);
1208	return loop_set_status(lo, &info64);
1209}
1210
1211static int
1212loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1213{
1214	struct loop_info64 info64;
1215
1216	if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1217		return -EFAULT;
1218	return loop_set_status(lo, &info64);
1219}
1220
1221static int
1222loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1223	struct loop_info info;
1224	struct loop_info64 info64;
1225	int err = 0;
1226
1227	if (!arg)
1228		err = -EINVAL;
1229	if (!err)
1230		err = loop_get_status(lo, &info64);
1231	if (!err)
1232		err = loop_info64_to_old(&info64, &info);
1233	if (!err && copy_to_user(arg, &info, sizeof(info)))
1234		err = -EFAULT;
1235
1236	return err;
1237}
1238
1239static int
1240loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1241	struct loop_info64 info64;
1242	int err = 0;
1243
1244	if (!arg)
1245		err = -EINVAL;
1246	if (!err)
1247		err = loop_get_status(lo, &info64);
1248	if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1249		err = -EFAULT;
1250
1251	return err;
1252}
1253
1254static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
1255{
1256	int err;
1257	sector_t sec;
1258	loff_t sz;
1259
1260	err = -ENXIO;
1261	if (unlikely(lo->lo_state != Lo_bound))
 
 
 
 
 
 
 
 
 
 
 
 
1262		goto out;
1263	err = figure_loop_size(lo);
1264	if (unlikely(err))
1265		goto out;
1266	sec = get_capacity(lo->lo_disk);
1267	/* the width of sector_t may be narrow for bit-shift */
1268	sz = sec;
1269	sz <<= 9;
1270	mutex_lock(&bdev->bd_mutex);
1271	bd_set_size(bdev, sz);
1272	/* let user-space know about the new size */
1273	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1274	mutex_unlock(&bdev->bd_mutex);
1275
 
 
 
 
1276 out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1277	return err;
1278}
1279
1280static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1281	unsigned int cmd, unsigned long arg)
1282{
1283	struct loop_device *lo = bdev->bd_disk->private_data;
 
1284	int err;
1285
1286	mutex_lock_nested(&lo->lo_ctl_mutex, 1);
1287	switch (cmd) {
1288	case LOOP_SET_FD:
1289		err = loop_set_fd(lo, mode, bdev, arg);
1290		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1291	case LOOP_CHANGE_FD:
1292		err = loop_change_fd(lo, bdev, arg);
1293		break;
1294	case LOOP_CLR_FD:
1295		/* loop_clr_fd would have unlocked lo_ctl_mutex on success */
1296		err = loop_clr_fd(lo, bdev);
1297		if (!err)
1298			goto out_unlocked;
1299		break;
1300	case LOOP_SET_STATUS:
1301		err = loop_set_status_old(lo, (struct loop_info __user *) arg);
 
 
1302		break;
1303	case LOOP_GET_STATUS:
1304		err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1305		break;
1306	case LOOP_SET_STATUS64:
1307		err = loop_set_status64(lo, (struct loop_info64 __user *) arg);
 
 
1308		break;
1309	case LOOP_GET_STATUS64:
1310		err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
1311		break;
1312	case LOOP_SET_CAPACITY:
1313		err = -EPERM;
1314		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1315			err = loop_set_capacity(lo, bdev);
 
 
 
 
1316		break;
1317	default:
1318		err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1319	}
1320	mutex_unlock(&lo->lo_ctl_mutex);
1321
1322out_unlocked:
1323	return err;
1324}
1325
1326#ifdef CONFIG_COMPAT
1327struct compat_loop_info {
1328	compat_int_t	lo_number;      /* ioctl r/o */
1329	compat_dev_t	lo_device;      /* ioctl r/o */
1330	compat_ulong_t	lo_inode;       /* ioctl r/o */
1331	compat_dev_t	lo_rdevice;     /* ioctl r/o */
1332	compat_int_t	lo_offset;
1333	compat_int_t	lo_encrypt_type;
1334	compat_int_t	lo_encrypt_key_size;    /* ioctl w/o */
1335	compat_int_t	lo_flags;       /* ioctl r/o */
1336	char		lo_name[LO_NAME_SIZE];
1337	unsigned char	lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1338	compat_ulong_t	lo_init[2];
1339	char		reserved[4];
1340};
1341
1342/*
1343 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1344 * - noinlined to reduce stack space usage in main part of driver
1345 */
1346static noinline int
1347loop_info64_from_compat(const struct compat_loop_info __user *arg,
1348			struct loop_info64 *info64)
1349{
1350	struct compat_loop_info info;
1351
1352	if (copy_from_user(&info, arg, sizeof(info)))
1353		return -EFAULT;
1354
1355	memset(info64, 0, sizeof(*info64));
1356	info64->lo_number = info.lo_number;
1357	info64->lo_device = info.lo_device;
1358	info64->lo_inode = info.lo_inode;
1359	info64->lo_rdevice = info.lo_rdevice;
1360	info64->lo_offset = info.lo_offset;
1361	info64->lo_sizelimit = 0;
1362	info64->lo_encrypt_type = info.lo_encrypt_type;
1363	info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
1364	info64->lo_flags = info.lo_flags;
1365	info64->lo_init[0] = info.lo_init[0];
1366	info64->lo_init[1] = info.lo_init[1];
1367	if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1368		memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
1369	else
1370		memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
1371	memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
1372	return 0;
1373}
1374
1375/*
1376 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1377 * - noinlined to reduce stack space usage in main part of driver
1378 */
1379static noinline int
1380loop_info64_to_compat(const struct loop_info64 *info64,
1381		      struct compat_loop_info __user *arg)
1382{
1383	struct compat_loop_info info;
1384
1385	memset(&info, 0, sizeof(info));
1386	info.lo_number = info64->lo_number;
1387	info.lo_device = info64->lo_device;
1388	info.lo_inode = info64->lo_inode;
1389	info.lo_rdevice = info64->lo_rdevice;
1390	info.lo_offset = info64->lo_offset;
1391	info.lo_encrypt_type = info64->lo_encrypt_type;
1392	info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
1393	info.lo_flags = info64->lo_flags;
1394	info.lo_init[0] = info64->lo_init[0];
1395	info.lo_init[1] = info64->lo_init[1];
1396	if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1397		memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1398	else
1399		memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
1400	memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1401
1402	/* error in case values were truncated */
1403	if (info.lo_device != info64->lo_device ||
1404	    info.lo_rdevice != info64->lo_rdevice ||
1405	    info.lo_inode != info64->lo_inode ||
1406	    info.lo_offset != info64->lo_offset ||
1407	    info.lo_init[0] != info64->lo_init[0] ||
1408	    info.lo_init[1] != info64->lo_init[1])
1409		return -EOVERFLOW;
1410
1411	if (copy_to_user(arg, &info, sizeof(info)))
1412		return -EFAULT;
1413	return 0;
1414}
1415
1416static int
1417loop_set_status_compat(struct loop_device *lo,
1418		       const struct compat_loop_info __user *arg)
1419{
1420	struct loop_info64 info64;
1421	int ret;
1422
1423	ret = loop_info64_from_compat(arg, &info64);
1424	if (ret < 0)
1425		return ret;
1426	return loop_set_status(lo, &info64);
1427}
1428
1429static int
1430loop_get_status_compat(struct loop_device *lo,
1431		       struct compat_loop_info __user *arg)
1432{
1433	struct loop_info64 info64;
1434	int err = 0;
1435
1436	if (!arg)
1437		err = -EINVAL;
1438	if (!err)
1439		err = loop_get_status(lo, &info64);
1440	if (!err)
1441		err = loop_info64_to_compat(&info64, arg);
1442	return err;
1443}
1444
1445static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1446			   unsigned int cmd, unsigned long arg)
1447{
1448	struct loop_device *lo = bdev->bd_disk->private_data;
1449	int err;
1450
1451	switch(cmd) {
1452	case LOOP_SET_STATUS:
1453		mutex_lock(&lo->lo_ctl_mutex);
1454		err = loop_set_status_compat(
1455			lo, (const struct compat_loop_info __user *) arg);
1456		mutex_unlock(&lo->lo_ctl_mutex);
1457		break;
1458	case LOOP_GET_STATUS:
1459		mutex_lock(&lo->lo_ctl_mutex);
1460		err = loop_get_status_compat(
1461			lo, (struct compat_loop_info __user *) arg);
1462		mutex_unlock(&lo->lo_ctl_mutex);
1463		break;
1464	case LOOP_SET_CAPACITY:
1465	case LOOP_CLR_FD:
1466	case LOOP_GET_STATUS64:
1467	case LOOP_SET_STATUS64:
 
1468		arg = (unsigned long) compat_ptr(arg);
 
1469	case LOOP_SET_FD:
1470	case LOOP_CHANGE_FD:
 
 
1471		err = lo_ioctl(bdev, mode, cmd, arg);
1472		break;
1473	default:
1474		err = -ENOIOCTLCMD;
1475		break;
1476	}
1477	return err;
1478}
1479#endif
1480
1481static int lo_open(struct block_device *bdev, fmode_t mode)
1482{
1483	struct loop_device *lo;
1484	int err = 0;
1485
1486	mutex_lock(&loop_index_mutex);
1487	lo = bdev->bd_disk->private_data;
1488	if (!lo) {
1489		err = -ENXIO;
1490		goto out;
1491	}
1492
1493	mutex_lock(&lo->lo_ctl_mutex);
1494	lo->lo_refcnt++;
1495	mutex_unlock(&lo->lo_ctl_mutex);
1496out:
1497	mutex_unlock(&loop_index_mutex);
1498	return err;
1499}
1500
1501static int lo_release(struct gendisk *disk, fmode_t mode)
1502{
1503	struct loop_device *lo = disk->private_data;
1504	int err;
1505
1506	mutex_lock(&lo->lo_ctl_mutex);
 
1507
1508	if (--lo->lo_refcnt)
1509		goto out;
1510
1511	if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1512		/*
1513		 * In autoclear mode, stop the loop thread
1514		 * and remove configuration after last close.
1515		 */
1516		err = loop_clr_fd(lo, NULL);
1517		if (!err)
1518			goto out_unlocked;
1519	} else {
1520		/*
1521		 * Otherwise keep thread (if running) and config,
1522		 * but flush possible ongoing bios in thread.
1523		 */
1524		loop_flush(lo);
1525	}
 
 
 
 
 
 
1526
1527out:
1528	mutex_unlock(&lo->lo_ctl_mutex);
1529out_unlocked:
1530	return 0;
 
 
1531}
1532
1533static const struct block_device_operations lo_fops = {
1534	.owner =	THIS_MODULE,
1535	.open =		lo_open,
1536	.release =	lo_release,
1537	.ioctl =	lo_ioctl,
1538#ifdef CONFIG_COMPAT
1539	.compat_ioctl =	lo_compat_ioctl,
1540#endif
 
1541};
1542
1543/*
1544 * And now the modules code and kernel interface.
1545 */
1546static int max_loop;
1547module_param(max_loop, int, S_IRUGO);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1548MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1549module_param(max_part, int, S_IRUGO);
 
 
 
 
 
1550MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1551MODULE_LICENSE("GPL");
1552MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1553
1554int loop_register_transfer(struct loop_func_table *funcs)
 
1555{
1556	unsigned int n = funcs->number;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1557
1558	if (n >= MAX_LO_CRYPT || xfer_funcs[n])
1559		return -EINVAL;
1560	xfer_funcs[n] = funcs;
1561	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1562}
1563
1564static int unregister_transfer_cb(int id, void *ptr, void *data)
1565{
1566	struct loop_device *lo = ptr;
1567	struct loop_func_table *xfer = data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1568
1569	mutex_lock(&lo->lo_ctl_mutex);
1570	if (lo->lo_encryption == xfer)
1571		loop_release_xfer(lo);
1572	mutex_unlock(&lo->lo_ctl_mutex);
1573	return 0;
 
 
 
 
 
 
 
 
 
1574}
1575
1576int loop_unregister_transfer(int number)
 
1577{
1578	unsigned int n = number;
1579	struct loop_func_table *xfer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1580
1581	if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1582		return -EINVAL;
 
 
 
 
1583
1584	xfer_funcs[n] = NULL;
1585	idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
1586	return 0;
 
 
1587}
1588
1589EXPORT_SYMBOL(loop_register_transfer);
1590EXPORT_SYMBOL(loop_unregister_transfer);
 
 
1591
1592static int loop_add(struct loop_device **l, int i)
1593{
1594	struct loop_device *lo;
1595	struct gendisk *disk;
1596	int err;
1597
 
1598	lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1599	if (!lo) {
1600		err = -ENOMEM;
1601		goto out;
1602	}
 
 
 
1603
1604	err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
1605	if (err < 0)
1606		goto out_free_dev;
1607
 
1608	if (i >= 0) {
1609		int m;
1610
1611		/* create specific i in the index */
1612		err = idr_get_new_above(&loop_index_idr, lo, i, &m);
1613		if (err >= 0 && i != m) {
1614			idr_remove(&loop_index_idr, m);
1615			err = -EEXIST;
1616		}
1617	} else if (i == -1) {
1618		int m;
1619
1620		/* get next free nr */
1621		err = idr_get_new(&loop_index_idr, lo, &m);
1622		if (err >= 0)
1623			i = m;
1624	} else {
1625		err = -EINVAL;
1626	}
 
1627	if (err < 0)
1628		goto out_free_dev;
 
 
 
 
 
 
 
 
 
 
1629
1630	lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1631	if (!lo->lo_queue)
1632		goto out_free_dev;
 
 
 
 
 
 
 
 
 
 
1633
1634	disk = lo->lo_disk = alloc_disk(1 << part_shift);
1635	if (!disk)
1636		goto out_free_queue;
 
 
 
 
1637
1638	mutex_init(&lo->lo_ctl_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1639	lo->lo_number		= i;
1640	lo->lo_thread		= NULL;
1641	init_waitqueue_head(&lo->lo_event);
1642	spin_lock_init(&lo->lo_lock);
 
 
 
1643	disk->major		= LOOP_MAJOR;
1644	disk->first_minor	= i << part_shift;
 
1645	disk->fops		= &lo_fops;
1646	disk->private_data	= lo;
1647	disk->queue		= lo->lo_queue;
 
 
1648	sprintf(disk->disk_name, "loop%d", i);
1649	add_disk(disk);
1650	*l = lo;
1651	return lo->lo_number;
 
1652
1653out_free_queue:
1654	blk_cleanup_queue(lo->lo_queue);
 
 
 
 
 
 
 
 
 
 
 
 
 
1655out_free_dev:
1656	kfree(lo);
1657out:
1658	return err;
1659}
1660
1661static void loop_remove(struct loop_device *lo)
1662{
 
1663	del_gendisk(lo->lo_disk);
1664	blk_cleanup_queue(lo->lo_queue);
 
 
 
 
 
1665	put_disk(lo->lo_disk);
1666	kfree(lo);
1667}
1668
1669static int find_free_cb(int id, void *ptr, void *data)
 
1670{
1671	struct loop_device *lo = ptr;
1672	struct loop_device **l = data;
1673
1674	if (lo->lo_state == Lo_unbound) {
1675		*l = lo;
1676		return 1;
1677	}
1678	return 0;
1679}
 
 
 
1680
1681static int loop_lookup(struct loop_device **l, int i)
1682{
1683	struct loop_device *lo;
1684	int ret = -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1685
1686	if (i < 0) {
1687		int err;
 
 
 
 
 
 
 
 
 
 
1688
1689		err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
1690		if (err == 1) {
1691			*l = lo;
1692			ret = lo->lo_number;
1693		}
1694		goto out;
1695	}
1696
1697	/* lookup and return a specific i */
1698	lo = idr_find(&loop_index_idr, i);
1699	if (lo) {
1700		*l = lo;
1701		ret = lo->lo_number;
1702	}
1703out:
1704	return ret;
1705}
1706
1707static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1708{
1709	struct loop_device *lo;
1710	struct kobject *kobj;
1711	int err;
1712
1713	mutex_lock(&loop_index_mutex);
1714	err = loop_lookup(&lo, MINOR(dev) >> part_shift);
1715	if (err < 0)
1716		err = loop_add(&lo, MINOR(dev) >> part_shift);
1717	if (err < 0)
1718		kobj = ERR_PTR(err);
1719	else
1720		kobj = get_disk(lo->lo_disk);
1721	mutex_unlock(&loop_index_mutex);
1722
1723	*part = 0;
1724	return kobj;
 
1725}
1726
1727static long loop_control_ioctl(struct file *file, unsigned int cmd,
1728			       unsigned long parm)
1729{
1730	struct loop_device *lo;
1731	int ret = -ENOSYS;
1732
1733	mutex_lock(&loop_index_mutex);
1734	switch (cmd) {
1735	case LOOP_CTL_ADD:
1736		ret = loop_lookup(&lo, parm);
1737		if (ret >= 0) {
1738			ret = -EEXIST;
1739			break;
1740		}
1741		ret = loop_add(&lo, parm);
1742		break;
1743	case LOOP_CTL_REMOVE:
1744		ret = loop_lookup(&lo, parm);
1745		if (ret < 0)
1746			break;
1747		mutex_lock(&lo->lo_ctl_mutex);
1748		if (lo->lo_state != Lo_unbound) {
1749			ret = -EBUSY;
1750			mutex_unlock(&lo->lo_ctl_mutex);
1751			break;
1752		}
1753		if (lo->lo_refcnt > 0) {
1754			ret = -EBUSY;
1755			mutex_unlock(&lo->lo_ctl_mutex);
1756			break;
1757		}
1758		lo->lo_disk->private_data = NULL;
1759		mutex_unlock(&lo->lo_ctl_mutex);
1760		idr_remove(&loop_index_idr, lo->lo_number);
1761		loop_remove(lo);
1762		break;
1763	case LOOP_CTL_GET_FREE:
1764		ret = loop_lookup(&lo, -1);
1765		if (ret >= 0)
1766			break;
1767		ret = loop_add(&lo, -1);
1768	}
1769	mutex_unlock(&loop_index_mutex);
1770
1771	return ret;
1772}
1773
1774static const struct file_operations loop_ctl_fops = {
1775	.open		= nonseekable_open,
1776	.unlocked_ioctl	= loop_control_ioctl,
1777	.compat_ioctl	= loop_control_ioctl,
1778	.owner		= THIS_MODULE,
1779	.llseek		= noop_llseek,
1780};
1781
1782static struct miscdevice loop_misc = {
1783	.minor		= LOOP_CTRL_MINOR,
1784	.name		= "loop-control",
1785	.fops		= &loop_ctl_fops,
1786};
1787
1788MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
1789MODULE_ALIAS("devname:loop-control");
1790
1791static int __init loop_init(void)
1792{
1793	int i, nr;
1794	unsigned long range;
1795	struct loop_device *lo;
1796	int err;
1797
1798	err = misc_register(&loop_misc);
1799	if (err < 0)
1800		return err;
1801
1802	part_shift = 0;
1803	if (max_part > 0) {
1804		part_shift = fls(max_part);
1805
1806		/*
1807		 * Adjust max_part according to part_shift as it is exported
1808		 * to user space so that user can decide correct minor number
1809		 * if [s]he want to create more devices.
1810		 *
1811		 * Note that -1 is required because partition 0 is reserved
1812		 * for the whole disk.
1813		 */
1814		max_part = (1UL << part_shift) - 1;
1815	}
1816
1817	if ((1UL << part_shift) > DISK_MAX_PARTS)
1818		return -EINVAL;
 
 
1819
1820	if (max_loop > 1UL << (MINORBITS - part_shift))
1821		return -EINVAL;
 
 
1822
1823	/*
1824	 * If max_loop is specified, create that many devices upfront.
1825	 * This also becomes a hard limit. If max_loop is not specified,
1826	 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
1827	 * init time. Loop devices can be requested on-demand with the
1828	 * /dev/loop-control interface, or be instantiated by accessing
1829	 * a 'dead' device node.
1830	 */
1831	if (max_loop) {
1832		nr = max_loop;
1833		range = max_loop << part_shift;
1834	} else {
1835		nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
1836		range = 1UL << MINORBITS;
1837	}
1838
1839	if (register_blkdev(LOOP_MAJOR, "loop"))
1840		return -EIO;
1841
1842	blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
1843				  THIS_MODULE, loop_probe, NULL, NULL);
 
 
1844
1845	/* pre-create number of devices given by config or max_loop */
1846	mutex_lock(&loop_index_mutex);
1847	for (i = 0; i < nr; i++)
1848		loop_add(&lo, i);
1849	mutex_unlock(&loop_index_mutex);
1850
1851	printk(KERN_INFO "loop: module loaded\n");
1852	return 0;
1853}
1854
1855static int loop_exit_cb(int id, void *ptr, void *data)
1856{
1857	struct loop_device *lo = ptr;
1858
1859	loop_remove(lo);
1860	return 0;
1861}
1862
1863static void __exit loop_exit(void)
1864{
1865	unsigned long range;
 
 
 
 
1866
1867	range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
 
 
 
 
 
 
 
1868
1869	idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
1870	idr_remove_all(&loop_index_idr);
1871	idr_destroy(&loop_index_idr);
1872
1873	blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
1874	unregister_blkdev(LOOP_MAJOR, "loop");
1875
1876	misc_deregister(&loop_misc);
1877}
1878
1879module_init(loop_init);
1880module_exit(loop_exit);
1881
1882#ifndef MODULE
1883static int __init max_loop_setup(char *str)
1884{
1885	max_loop = simple_strtol(str, NULL, 0);
 
 
 
1886	return 1;
1887}
1888
1889__setup("max_loop=", max_loop_setup);
1890#endif