Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *  linux/drivers/block/loop.c
   3 *
   4 *  Written by Theodore Ts'o, 3/29/93
   5 *
   6 * Copyright 1993 by Theodore Ts'o.  Redistribution of this file is
   7 * permitted under the GNU General Public License.
   8 *
   9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
  10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
  11 *
  12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
  13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
  14 *
  15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
  16 *
  17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
  18 *
  19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
  20 *
  21 * Loadable modules and other fixes by AK, 1998
  22 *
  23 * Make real block number available to downstream transfer functions, enables
  24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
  25 * Reed H. Petty, rhp@draper.net
  26 *
  27 * Maximum number of loop devices now dynamic via max_loop module parameter.
  28 * Russell Kroll <rkroll@exploits.org> 19990701
  29 *
  30 * Maximum number of loop devices when compiled-in now selectable by passing
  31 * max_loop=<1-255> to the kernel on boot.
  32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
  33 *
  34 * Completely rewrite request handling to be make_request_fn style and
  35 * non blocking, pushing work to a helper thread. Lots of fixes from
  36 * Al Viro too.
  37 * Jens Axboe <axboe@suse.de>, Nov 2000
  38 *
  39 * Support up to 256 loop devices
  40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
  41 *
  42 * Support for falling back on the write file operation when the address space
  43 * operations write_begin is not available on the backing filesystem.
  44 * Anton Altaparmakov, 16 Feb 2005
  45 *
  46 * Still To Fix:
  47 * - Advisory locking is ignored here.
  48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
  49 *
  50 */
  51
  52#include <linux/module.h>
  53#include <linux/moduleparam.h>
  54#include <linux/sched.h>
  55#include <linux/fs.h>
  56#include <linux/file.h>
  57#include <linux/stat.h>
  58#include <linux/errno.h>
  59#include <linux/major.h>
  60#include <linux/wait.h>
  61#include <linux/blkdev.h>
  62#include <linux/blkpg.h>
  63#include <linux/init.h>
  64#include <linux/swap.h>
  65#include <linux/slab.h>
  66#include <linux/loop.h>
  67#include <linux/compat.h>
  68#include <linux/suspend.h>
  69#include <linux/freezer.h>
  70#include <linux/mutex.h>
  71#include <linux/writeback.h>
  72#include <linux/buffer_head.h>		/* for invalidate_bdev() */
  73#include <linux/completion.h>
  74#include <linux/highmem.h>
  75#include <linux/kthread.h>
  76#include <linux/splice.h>
  77#include <linux/sysfs.h>
  78#include <linux/miscdevice.h>
  79#include <asm/uaccess.h>
 
 
 
 
 
 
 
  80
  81static DEFINE_IDR(loop_index_idr);
  82static DEFINE_MUTEX(loop_index_mutex);
  83
  84static int max_part;
  85static int part_shift;
  86
  87/*
  88 * Transfer functions
  89 */
  90static int transfer_none(struct loop_device *lo, int cmd,
  91			 struct page *raw_page, unsigned raw_off,
  92			 struct page *loop_page, unsigned loop_off,
  93			 int size, sector_t real_block)
  94{
  95	char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
  96	char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
  97
  98	if (cmd == READ)
  99		memcpy(loop_buf, raw_buf, size);
 100	else
 101		memcpy(raw_buf, loop_buf, size);
 102
 103	kunmap_atomic(loop_buf, KM_USER1);
 104	kunmap_atomic(raw_buf, KM_USER0);
 105	cond_resched();
 106	return 0;
 107}
 108
 109static int transfer_xor(struct loop_device *lo, int cmd,
 110			struct page *raw_page, unsigned raw_off,
 111			struct page *loop_page, unsigned loop_off,
 112			int size, sector_t real_block)
 113{
 114	char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
 115	char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
 116	char *in, *out, *key;
 117	int i, keysize;
 118
 119	if (cmd == READ) {
 120		in = raw_buf;
 121		out = loop_buf;
 122	} else {
 123		in = loop_buf;
 124		out = raw_buf;
 125	}
 126
 127	key = lo->lo_encrypt_key;
 128	keysize = lo->lo_encrypt_key_size;
 129	for (i = 0; i < size; i++)
 130		*out++ = *in++ ^ key[(i & 511) % keysize];
 131
 132	kunmap_atomic(loop_buf, KM_USER1);
 133	kunmap_atomic(raw_buf, KM_USER0);
 134	cond_resched();
 135	return 0;
 136}
 137
 138static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
 139{
 140	if (unlikely(info->lo_encrypt_key_size <= 0))
 141		return -EINVAL;
 142	return 0;
 143}
 144
 145static struct loop_func_table none_funcs = {
 146	.number = LO_CRYPT_NONE,
 147	.transfer = transfer_none,
 148}; 	
 149
 150static struct loop_func_table xor_funcs = {
 151	.number = LO_CRYPT_XOR,
 152	.transfer = transfer_xor,
 153	.init = xor_init
 154}; 	
 155
 156/* xfer_funcs[0] is special - its release function is never called */
 157static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
 158	&none_funcs,
 159	&xor_funcs
 160};
 161
 162static loff_t get_loop_size(struct loop_device *lo, struct file *file)
 163{
 164	loff_t size, offset, loopsize;
 165
 166	/* Compute loopsize in bytes */
 167	size = i_size_read(file->f_mapping->host);
 168	offset = lo->lo_offset;
 169	loopsize = size - offset;
 170	if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
 171		loopsize = lo->lo_sizelimit;
 
 172
 
 
 173	/*
 174	 * Unfortunately, if we want to do I/O on the device,
 175	 * the number of 512-byte sectors has to fit into a sector_t.
 176	 */
 177	return loopsize >> 9;
 178}
 179
 180static int
 181figure_loop_size(struct loop_device *lo)
 182{
 183	loff_t size = get_loop_size(lo, lo->lo_backing_file);
 184	sector_t x = (sector_t)size;
 185
 186	if (unlikely((loff_t)x != size))
 187		return -EFBIG;
 
 
 
 
 
 
 
 
 
 
 
 188
 189	set_capacity(lo->lo_disk, x);
 190	return 0;					
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 191}
 192
 193static inline int
 194lo_do_transfer(struct loop_device *lo, int cmd,
 195	       struct page *rpage, unsigned roffs,
 196	       struct page *lpage, unsigned loffs,
 197	       int size, sector_t rblock)
 
 198{
 199	if (unlikely(!lo->transfer))
 200		return 0;
 201
 202	return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
 203}
 204
 205/**
 206 * do_lo_send_aops - helper for writing data to a loop device
 
 
 207 *
 208 * This is the fast version for backing filesystems which implement the address
 209 * space operations write_begin and write_end.
 210 */
 211static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
 212		loff_t pos, struct page *unused)
 213{
 214	struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
 215	struct address_space *mapping = file->f_mapping;
 216	pgoff_t index;
 217	unsigned offset, bv_offs;
 218	int len, ret;
 219
 220	mutex_lock(&mapping->host->i_mutex);
 221	index = pos >> PAGE_CACHE_SHIFT;
 222	offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
 223	bv_offs = bvec->bv_offset;
 224	len = bvec->bv_len;
 225	while (len > 0) {
 226		sector_t IV;
 227		unsigned size, copied;
 228		int transfer_result;
 229		struct page *page;
 230		void *fsdata;
 231
 232		IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9);
 233		size = PAGE_CACHE_SIZE - offset;
 234		if (size > len)
 235			size = len;
 236
 237		ret = pagecache_write_begin(file, mapping, pos, size, 0,
 238							&page, &fsdata);
 239		if (ret)
 240			goto fail;
 241
 242		file_update_time(file);
 
 243
 244		transfer_result = lo_do_transfer(lo, WRITE, page, offset,
 245				bvec->bv_page, bv_offs, size, IV);
 246		copied = size;
 247		if (unlikely(transfer_result))
 248			copied = 0;
 249
 250		ret = pagecache_write_end(file, mapping, pos, size, copied,
 251							page, fsdata);
 252		if (ret < 0 || ret != copied)
 253			goto fail;
 254
 255		if (unlikely(transfer_result))
 256			goto fail;
 
 257
 258		bv_offs += copied;
 259		len -= copied;
 260		offset = 0;
 261		index++;
 262		pos += copied;
 263	}
 264	ret = 0;
 265out:
 266	mutex_unlock(&mapping->host->i_mutex);
 267	return ret;
 268fail:
 269	ret = -1;
 270	goto out;
 271}
 272
 273/**
 274 * __do_lo_send_write - helper for writing data to a loop device
 275 *
 276 * This helper just factors out common code between do_lo_send_direct_write()
 277 * and do_lo_send_write().
 278 */
 279static int __do_lo_send_write(struct file *file,
 280		u8 *buf, const int len, loff_t pos)
 281{
 
 282	ssize_t bw;
 283	mm_segment_t old_fs = get_fs();
 284
 285	set_fs(get_ds());
 286	bw = file->f_op->write(file, buf, len, &pos);
 287	set_fs(old_fs);
 288	if (likely(bw == len))
 
 
 
 289		return 0;
 290	printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
 291			(unsigned long long)pos, len);
 
 
 292	if (bw >= 0)
 293		bw = -EIO;
 294	return bw;
 295}
 296
 297/**
 298 * do_lo_send_direct_write - helper for writing data to a loop device
 299 *
 300 * This is the fast, non-transforming version for backing filesystems which do
 301 * not implement the address space operations write_begin and write_end.
 302 * It uses the write file operation which should be present on all writeable
 303 * filesystems.
 304 */
 305static int do_lo_send_direct_write(struct loop_device *lo,
 306		struct bio_vec *bvec, loff_t pos, struct page *page)
 307{
 308	ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
 309			kmap(bvec->bv_page) + bvec->bv_offset,
 310			bvec->bv_len, pos);
 311	kunmap(bvec->bv_page);
 312	cond_resched();
 313	return bw;
 314}
 
 
 
 315
 316/**
 317 * do_lo_send_write - helper for writing data to a loop device
 318 *
 319 * This is the slow, transforming version for filesystems which do not
 320 * implement the address space operations write_begin and write_end.  It
 321 * uses the write file operation which should be present on all writeable
 322 * filesystems.
 323 *
 324 * Using fops->write is slower than using aops->{prepare,commit}_write in the
 325 * transforming case because we need to double buffer the data as we cannot do
 326 * the transformations in place as we do not have direct access to the
 327 * destination pages of the backing file.
 328 */
 329static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
 330		loff_t pos, struct page *page)
 331{
 332	int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
 333			bvec->bv_offset, bvec->bv_len, pos >> 9);
 334	if (likely(!ret))
 335		return __do_lo_send_write(lo->lo_backing_file,
 336				page_address(page), bvec->bv_len,
 337				pos);
 338	printk(KERN_ERR "loop: Transfer error at byte offset %llu, "
 339			"length %i.\n", (unsigned long long)pos, bvec->bv_len);
 340	if (ret > 0)
 341		ret = -EIO;
 342	return ret;
 343}
 344
 345static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
 
 
 
 
 
 
 346{
 347	int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
 348			struct page *page);
 349	struct bio_vec *bvec;
 350	struct page *page = NULL;
 351	int i, ret = 0;
 352
 353	do_lo_send = do_lo_send_aops;
 354	if (!(lo->lo_flags & LO_FLAGS_USE_AOPS)) {
 355		do_lo_send = do_lo_send_direct_write;
 356		if (lo->transfer != transfer_none) {
 357			page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
 358			if (unlikely(!page))
 359				goto fail;
 360			kmap(page);
 361			do_lo_send = do_lo_send_write;
 362		}
 363	}
 364	bio_for_each_segment(bvec, bio, i) {
 365		ret = do_lo_send(lo, bvec, pos, page);
 
 366		if (ret < 0)
 367			break;
 368		pos += bvec->bv_len;
 369	}
 370	if (page) {
 371		kunmap(page);
 372		__free_page(page);
 373	}
 374out:
 
 375	return ret;
 376fail:
 377	printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
 378	ret = -ENOMEM;
 379	goto out;
 380}
 381
 382struct lo_read_data {
 383	struct loop_device *lo;
 384	struct page *page;
 385	unsigned offset;
 386	int bsize;
 387};
 388
 389static int
 390lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
 391		struct splice_desc *sd)
 392{
 393	struct lo_read_data *p = sd->u.data;
 394	struct loop_device *lo = p->lo;
 395	struct page *page = buf->page;
 396	sector_t IV;
 397	int size;
 398
 399	IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
 400							(buf->offset >> 9);
 401	size = sd->len;
 402	if (size > p->bsize)
 403		size = p->bsize;
 404
 405	if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
 406		printk(KERN_ERR "loop: transfer error block %ld\n",
 407		       page->index);
 408		size = -EINVAL;
 409	}
 410
 411	flush_dcache_page(p->page);
 
 412
 413	if (size > 0)
 414		p->offset += size;
 
 
 
 
 415
 416	return size;
 417}
 418
 419static int
 420lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
 421{
 422	return __splice_from_pipe(pipe, sd, lo_splice_actor);
 423}
 
 
 
 
 424
 425static int
 426do_lo_receive(struct loop_device *lo,
 427	      struct bio_vec *bvec, int bsize, loff_t pos)
 428{
 429	struct lo_read_data cookie;
 430	struct splice_desc sd;
 431	struct file *file;
 432	long retval;
 433
 434	cookie.lo = lo;
 435	cookie.page = bvec->bv_page;
 436	cookie.offset = bvec->bv_offset;
 437	cookie.bsize = bsize;
 438
 439	sd.len = 0;
 440	sd.total_len = bvec->bv_len;
 441	sd.flags = 0;
 442	sd.pos = pos;
 443	sd.u.data = &cookie;
 444
 445	file = lo->lo_backing_file;
 446	retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
 447
 448	if (retval < 0)
 449		return retval;
 
 
 
 
 
 
 
 
 450
 451	return 0;
 452}
 
 
 453
 454static int
 455lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
 456{
 457	struct bio_vec *bvec;
 458	int i, ret = 0;
 459
 460	bio_for_each_segment(bvec, bio, i) {
 461		ret = do_lo_receive(lo, bvec, bsize, pos);
 462		if (ret < 0)
 
 
 463			break;
 464		pos += bvec->bv_len;
 465	}
 
 
 
 
 466	return ret;
 467}
 468
 469static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
 
 470{
 471	loff_t pos;
 
 
 
 
 
 
 
 472	int ret;
 473
 474	pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 475
 476	if (bio_rw(bio) == WRITE) {
 477		struct file *file = lo->lo_backing_file;
 478
 479		if (bio->bi_rw & REQ_FLUSH) {
 480			ret = vfs_fsync(file, 0);
 481			if (unlikely(ret && ret != -EINVAL)) {
 482				ret = -EIO;
 483				goto out;
 484			}
 485		}
 486
 487		ret = lo_send(lo, bio, pos);
 
 
 
 
 
 488
 489		if ((bio->bi_rw & REQ_FUA) && !ret) {
 490			ret = vfs_fsync(file, 0);
 491			if (unlikely(ret && ret != -EINVAL))
 492				ret = -EIO;
 493		}
 494	} else
 495		ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
 496
 497out:
 498	return ret;
 499}
 500
 501/*
 502 * Add bio to back of pending list
 503 */
 504static void loop_add_bio(struct loop_device *lo, struct bio *bio)
 505{
 506	bio_list_add(&lo->lo_bio_list, bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 507}
 508
 509/*
 510 * Grab first pending buffer
 511 */
 512static struct bio *loop_get_bio(struct loop_device *lo)
 513{
 514	return bio_list_pop(&lo->lo_bio_list);
 
 
 
 
 
 
 
 515}
 516
 517static int loop_make_request(struct request_queue *q, struct bio *old_bio)
 518{
 519	struct loop_device *lo = q->queuedata;
 520	int rw = bio_rw(old_bio);
 521
 522	if (rw == READA)
 523		rw = READ;
 524
 525	BUG_ON(!lo || (rw != READ && rw != WRITE));
 
 
 
 
 526
 527	spin_lock_irq(&lo->lo_lock);
 528	if (lo->lo_state != Lo_bound)
 529		goto out;
 530	if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY)))
 531		goto out;
 532	loop_add_bio(lo, old_bio);
 533	wake_up(&lo->lo_event);
 534	spin_unlock_irq(&lo->lo_lock);
 535	return 0;
 
 
 
 
 536
 537out:
 538	spin_unlock_irq(&lo->lo_lock);
 539	bio_io_error(old_bio);
 540	return 0;
 541}
 542
 543struct switch_request {
 544	struct file *file;
 545	struct completion wait;
 546};
 547
 548static void do_loop_switch(struct loop_device *, struct switch_request *);
 
 
 
 
 549
 550static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
 551{
 552	if (unlikely(!bio->bi_bdev)) {
 553		do_loop_switch(lo, bio->bi_private);
 554		bio_put(bio);
 
 
 
 
 
 
 
 555	} else {
 556		int ret = do_bio_filebacked(lo, bio);
 557		bio_endio(bio, ret);
 
 
 
 
 
 558	}
 559}
 560
 561/*
 562 * worker thread that handles reads/writes to file backed loop devices,
 563 * to avoid blocking in our make_request_fn. it also does loop decrypting
 564 * on reads for block backed loop, as that is too heavy to do from
 565 * b_end_io context where irqs may be disabled.
 566 *
 567 * Loop explanation:  loop_clr_fd() sets lo_state to Lo_rundown before
 568 * calling kthread_stop().  Therefore once kthread_should_stop() is
 569 * true, make_request will not place any more requests.  Therefore
 570 * once kthread_should_stop() is true and lo_bio is NULL, we are
 571 * done with the loop.
 572 */
 573static int loop_thread(void *data)
 574{
 575	struct loop_device *lo = data;
 576	struct bio *bio;
 577
 578	set_user_nice(current, -20);
 
 
 
 
 
 
 579
 580	while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
 
 
 
 
 
 
 581
 582		wait_event_interruptible(lo->lo_event,
 583				!bio_list_empty(&lo->lo_bio_list) ||
 584				kthread_should_stop());
 
 585
 586		if (bio_list_empty(&lo->lo_bio_list))
 587			continue;
 588		spin_lock_irq(&lo->lo_lock);
 589		bio = loop_get_bio(lo);
 590		spin_unlock_irq(&lo->lo_lock);
 591
 592		BUG_ON(!bio);
 593		loop_handle_bio(lo, bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 594	}
 
 595
 596	return 0;
 
 
 
 597}
 598
 599/*
 600 * loop_switch performs the hard work of switching a backing store.
 601 * First it needs to flush existing IO, it does this by sending a magic
 602 * BIO down the pipe. The completion of this BIO does the actual switch.
 603 */
 604static int loop_switch(struct loop_device *lo, struct file *file)
 605{
 606	struct switch_request w;
 607	struct bio *bio = bio_alloc(GFP_KERNEL, 0);
 608	if (!bio)
 609		return -ENOMEM;
 610	init_completion(&w.wait);
 611	w.file = file;
 612	bio->bi_private = &w;
 613	bio->bi_bdev = NULL;
 614	loop_make_request(lo->lo_queue, bio);
 615	wait_for_completion(&w.wait);
 616	return 0;
 617}
 618
 619/*
 620 * Helper to flush the IOs in loop, but keeping loop thread running
 621 */
 622static int loop_flush(struct loop_device *lo)
 623{
 624	/* loop not yet configured, no running thread, nothing to flush */
 625	if (!lo->lo_thread)
 626		return 0;
 627
 628	return loop_switch(lo, NULL);
 629}
 630
 631/*
 632 * Do the actual switch; called from the BIO completion routine
 633 */
 634static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
 635{
 636	struct file *file = p->file;
 637	struct file *old_file = lo->lo_backing_file;
 638	struct address_space *mapping;
 639
 640	/* if no new file, only flush of queued bios requested */
 641	if (!file)
 642		goto out;
 643
 644	mapping = file->f_mapping;
 645	mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
 646	lo->lo_backing_file = file;
 647	lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
 648		mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
 649	lo->old_gfp_mask = mapping_gfp_mask(mapping);
 650	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
 651out:
 652	complete(&p->wait);
 653}
 654
 
 
 
 
 
 
 
 
 
 
 655
 656/*
 657 * loop_change_fd switched the backing store of a loopback device to
 658 * a new file. This is useful for operating system installers to free up
 659 * the original file and in High Availability environments to switch to
 660 * an alternative location for the content in case of server meltdown.
 661 * This can only work if the loop device is used read-only, and if the
 662 * new backing store is the same size and type as the old backing store.
 663 */
 664static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
 665			  unsigned int arg)
 666{
 667	struct file	*file, *old_file;
 668	struct inode	*inode;
 669	int		error;
 
 670
 
 
 
 671	error = -ENXIO;
 672	if (lo->lo_state != Lo_bound)
 673		goto out;
 674
 675	/* the loop device has to be read-only */
 676	error = -EINVAL;
 677	if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
 678		goto out;
 679
 680	error = -EBADF;
 681	file = fget(arg);
 682	if (!file)
 683		goto out;
 
 
 
 
 684
 685	inode = file->f_mapping->host;
 686	old_file = lo->lo_backing_file;
 687
 688	error = -EINVAL;
 689
 690	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
 691		goto out_putf;
 692
 693	/* size of the new backing store needs to be the same */
 694	if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
 695		goto out_putf;
 696
 697	/* and ... switch */
 698	error = loop_switch(lo, file);
 699	if (error)
 700		goto out_putf;
 701
 
 
 
 
 
 
 
 
 
 
 
 702	fput(old_file);
 703	if (max_part > 0)
 704		ioctl_by_bdev(bdev, BLKRRPART, 0);
 705	return 0;
 706
 707 out_putf:
 708	fput(file);
 709 out:
 
 710	return error;
 711}
 712
 713static inline int is_loop_device(struct file *file)
 714{
 715	struct inode *i = file->f_mapping->host;
 716
 717	return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
 718}
 719
 720/* loop sysfs attributes */
 721
 722static ssize_t loop_attr_show(struct device *dev, char *page,
 723			      ssize_t (*callback)(struct loop_device *, char *))
 724{
 725	struct gendisk *disk = dev_to_disk(dev);
 726	struct loop_device *lo = disk->private_data;
 727
 728	return callback(lo, page);
 729}
 730
 731#define LOOP_ATTR_RO(_name)						\
 732static ssize_t loop_attr_##_name##_show(struct loop_device *, char *);	\
 733static ssize_t loop_attr_do_show_##_name(struct device *d,		\
 734				struct device_attribute *attr, char *b)	\
 735{									\
 736	return loop_attr_show(d, b, loop_attr_##_name##_show);		\
 737}									\
 738static struct device_attribute loop_attr_##_name =			\
 739	__ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL);
 740
 741static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
 742{
 743	ssize_t ret;
 744	char *p = NULL;
 745
 746	spin_lock_irq(&lo->lo_lock);
 747	if (lo->lo_backing_file)
 748		p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1);
 749	spin_unlock_irq(&lo->lo_lock);
 750
 751	if (IS_ERR_OR_NULL(p))
 752		ret = PTR_ERR(p);
 753	else {
 754		ret = strlen(p);
 755		memmove(buf, p, ret);
 756		buf[ret++] = '\n';
 757		buf[ret] = 0;
 758	}
 759
 760	return ret;
 761}
 762
 763static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
 764{
 765	return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
 766}
 767
 768static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
 769{
 770	return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
 771}
 772
 773static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
 774{
 775	int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
 776
 777	return sprintf(buf, "%s\n", autoclear ? "1" : "0");
 778}
 779
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 780LOOP_ATTR_RO(backing_file);
 781LOOP_ATTR_RO(offset);
 782LOOP_ATTR_RO(sizelimit);
 783LOOP_ATTR_RO(autoclear);
 
 
 784
 785static struct attribute *loop_attrs[] = {
 786	&loop_attr_backing_file.attr,
 787	&loop_attr_offset.attr,
 788	&loop_attr_sizelimit.attr,
 789	&loop_attr_autoclear.attr,
 
 
 790	NULL,
 791};
 792
 793static struct attribute_group loop_attribute_group = {
 794	.name = "loop",
 795	.attrs= loop_attrs,
 796};
 797
 798static int loop_sysfs_init(struct loop_device *lo)
 799{
 800	return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
 801				  &loop_attribute_group);
 802}
 803
 804static void loop_sysfs_exit(struct loop_device *lo)
 805{
 806	sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
 807			   &loop_attribute_group);
 
 808}
 809
 810static int loop_set_fd(struct loop_device *lo, fmode_t mode,
 811		       struct block_device *bdev, unsigned int arg)
 812{
 813	struct file	*file, *f;
 814	struct inode	*inode;
 815	struct address_space *mapping;
 816	unsigned lo_blocksize;
 817	int		lo_flags = 0;
 818	int		error;
 819	loff_t		size;
 820
 821	/* This is safe, since we have a reference from open(). */
 822	__module_get(THIS_MODULE);
 823
 824	error = -EBADF;
 825	file = fget(arg);
 826	if (!file)
 827		goto out;
 828
 829	error = -EBUSY;
 830	if (lo->lo_state != Lo_unbound)
 831		goto out_putf;
 832
 833	/* Avoid recursion */
 834	f = file;
 835	while (is_loop_device(f)) {
 836		struct loop_device *l;
 837
 838		if (f->f_mapping->host->i_bdev == bdev)
 839			goto out_putf;
 840
 841		l = f->f_mapping->host->i_bdev->bd_disk->private_data;
 842		if (l->lo_state == Lo_unbound) {
 843			error = -EINVAL;
 844			goto out_putf;
 845		}
 846		f = l->lo_backing_file;
 847	}
 848
 849	mapping = file->f_mapping;
 850	inode = mapping->host;
 851
 852	if (!(file->f_mode & FMODE_WRITE))
 853		lo_flags |= LO_FLAGS_READ_ONLY;
 
 
 
 
 
 
 
 854
 855	error = -EINVAL;
 856	if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
 857		const struct address_space_operations *aops = mapping->a_ops;
 858
 859		if (aops->write_begin)
 860			lo_flags |= LO_FLAGS_USE_AOPS;
 861		if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
 862			lo_flags |= LO_FLAGS_READ_ONLY;
 863
 864		lo_blocksize = S_ISBLK(inode->i_mode) ?
 865			inode->i_bdev->bd_block_size : PAGE_SIZE;
 
 
 
 
 
 
 
 866
 867		error = 0;
 868	} else {
 869		goto out_putf;
 
 870	}
 871
 872	size = get_loop_size(lo, file);
 873
 874	if ((loff_t)(sector_t)size != size) {
 875		error = -EFBIG;
 876		goto out_putf;
 
 
 
 
 
 877	}
 
 
 878
 879	if (!(mode & FMODE_WRITE))
 880		lo_flags |= LO_FLAGS_READ_ONLY;
 881
 882	set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
 883
 884	lo->lo_blocksize = lo_blocksize;
 885	lo->lo_device = bdev;
 886	lo->lo_flags = lo_flags;
 887	lo->lo_backing_file = file;
 888	lo->transfer = transfer_none;
 889	lo->ioctl = NULL;
 890	lo->lo_sizelimit = 0;
 891	lo->old_gfp_mask = mapping_gfp_mask(mapping);
 892	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
 893
 894	bio_list_init(&lo->lo_bio_list);
 895
 896	/*
 897	 * set queue make_request_fn, and add limits based on lower level
 898	 * device
 899	 */
 900	blk_queue_make_request(lo->lo_queue, loop_make_request);
 901	lo->lo_queue->queuedata = lo;
 902
 903	if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
 904		blk_queue_flush(lo->lo_queue, REQ_FLUSH);
 905
 906	set_capacity(lo->lo_disk, size);
 907	bd_set_size(bdev, size << 9);
 908	loop_sysfs_init(lo);
 909	/* let user-space know about the new size */
 910	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
 911
 912	set_blocksize(bdev, lo_blocksize);
 
 
 
 
 913
 914	lo->lo_thread = kthread_create(loop_thread, lo, "loop%d",
 915						lo->lo_number);
 916	if (IS_ERR(lo->lo_thread)) {
 917		error = PTR_ERR(lo->lo_thread);
 918		goto out_clr;
 919	}
 920	lo->lo_state = Lo_bound;
 921	wake_up_process(lo->lo_thread);
 922	if (max_part > 0)
 923		ioctl_by_bdev(bdev, BLKRRPART, 0);
 924	return 0;
 
 925
 926out_clr:
 927	loop_sysfs_exit(lo);
 928	lo->lo_thread = NULL;
 929	lo->lo_device = NULL;
 930	lo->lo_backing_file = NULL;
 931	lo->lo_flags = 0;
 932	set_capacity(lo->lo_disk, 0);
 933	invalidate_bdev(bdev);
 934	bd_set_size(bdev, 0);
 935	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
 936	mapping_set_gfp_mask(mapping, lo->old_gfp_mask);
 937	lo->lo_state = Lo_unbound;
 938 out_putf:
 939	fput(file);
 940 out:
 941	/* This is safe: open() is still holding a reference. */
 942	module_put(THIS_MODULE);
 943	return error;
 944}
 945
 946static int
 947loop_release_xfer(struct loop_device *lo)
 948{
 949	int err = 0;
 950	struct loop_func_table *xfer = lo->lo_encryption;
 951
 952	if (xfer) {
 953		if (xfer->release)
 954			err = xfer->release(lo);
 955		lo->transfer = NULL;
 956		lo->lo_encryption = NULL;
 957		module_put(xfer->owner);
 958	}
 959	return err;
 960}
 961
 962static int
 963loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
 964	       const struct loop_info64 *i)
 965{
 966	int err = 0;
 967
 968	if (xfer) {
 969		struct module *owner = xfer->owner;
 970
 971		if (!try_module_get(owner))
 972			return -EINVAL;
 973		if (xfer->init)
 974			err = xfer->init(lo, i);
 975		if (err)
 976			module_put(owner);
 977		else
 978			lo->lo_encryption = xfer;
 979	}
 980	return err;
 981}
 982
 983static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
 
 
 
 
 
 
 
 
 
 
 984{
 985	struct file *filp = lo->lo_backing_file;
 986	gfp_t gfp = lo->old_gfp_mask;
 
 987
 988	if (lo->lo_state != Lo_bound)
 989		return -ENXIO;
 990
 991	if (lo->lo_refcnt > 1)	/* we needed one fd for the ioctl */
 992		return -EBUSY;
 
 993
 994	if (filp == NULL)
 995		return -EINVAL;
 996
 997	spin_lock_irq(&lo->lo_lock);
 998	lo->lo_state = Lo_rundown;
 999	spin_unlock_irq(&lo->lo_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1000
1001	kthread_stop(lo->lo_thread);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1002
1003	spin_lock_irq(&lo->lo_lock);
1004	lo->lo_backing_file = NULL;
1005	spin_unlock_irq(&lo->lo_lock);
1006
1007	loop_release_xfer(lo);
1008	lo->transfer = NULL;
1009	lo->ioctl = NULL;
1010	lo->lo_device = NULL;
1011	lo->lo_encryption = NULL;
1012	lo->lo_offset = 0;
1013	lo->lo_sizelimit = 0;
1014	lo->lo_encrypt_key_size = 0;
1015	lo->lo_flags = 0;
1016	lo->lo_thread = NULL;
1017	memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
1018	memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
1019	memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1020	if (bdev)
 
 
 
 
1021		invalidate_bdev(bdev);
 
 
1022	set_capacity(lo->lo_disk, 0);
1023	loop_sysfs_exit(lo);
1024	if (bdev) {
1025		bd_set_size(bdev, 0);
1026		/* let user-space know about this change */
1027		kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1028	}
1029	mapping_set_gfp_mask(filp->f_mapping, gfp);
1030	lo->lo_state = Lo_unbound;
1031	/* This is safe: open() is still holding a reference. */
1032	module_put(THIS_MODULE);
1033	if (max_part > 0 && bdev)
1034		ioctl_by_bdev(bdev, BLKRRPART, 0);
1035	mutex_unlock(&lo->lo_ctl_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1036	/*
1037	 * Need not hold lo_ctl_mutex to fput backing file.
1038	 * Calling fput holding lo_ctl_mutex triggers a circular
1039	 * lock dependency possibility warning as fput can take
1040	 * bd_mutex which is usually taken before lo_ctl_mutex.
1041	 */
1042	fput(filp);
1043	return 0;
 
1044}
1045
1046static int
1047loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1048{
1049	int err;
1050	struct loop_func_table *xfer;
1051	uid_t uid = current_uid();
1052
1053	if (lo->lo_encrypt_key_size &&
1054	    lo->lo_key_owner != uid &&
1055	    !capable(CAP_SYS_ADMIN))
1056		return -EPERM;
1057	if (lo->lo_state != Lo_bound)
1058		return -ENXIO;
1059	if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
1060		return -EINVAL;
1061
1062	err = loop_release_xfer(lo);
1063	if (err)
1064		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1065
1066	if (info->lo_encrypt_type) {
1067		unsigned int type = info->lo_encrypt_type;
1068
1069		if (type >= MAX_LO_CRYPT)
1070			return -EINVAL;
1071		xfer = xfer_funcs[type];
1072		if (xfer == NULL)
1073			return -EINVAL;
1074	} else
1075		xfer = NULL;
 
 
1076
1077	err = loop_init_xfer(lo, xfer, info);
1078	if (err)
1079		return err;
 
 
 
 
 
 
 
 
 
 
1080
1081	if (lo->lo_offset != info->lo_offset ||
1082	    lo->lo_sizelimit != info->lo_sizelimit) {
1083		lo->lo_offset = info->lo_offset;
1084		lo->lo_sizelimit = info->lo_sizelimit;
1085		if (figure_loop_size(lo))
1086			return -EFBIG;
1087	}
1088
1089	memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
1090	memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
1091	lo->lo_file_name[LO_NAME_SIZE-1] = 0;
1092	lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
1093
1094	if (!xfer)
1095		xfer = &none_funcs;
1096	lo->transfer = xfer->transfer;
1097	lo->ioctl = xfer->ioctl;
 
 
 
 
1098
1099	if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) !=
1100	     (info->lo_flags & LO_FLAGS_AUTOCLEAR))
1101		lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
1102
1103	lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
1104	lo->lo_init[0] = info->lo_init[0];
1105	lo->lo_init[1] = info->lo_init[1];
1106	if (info->lo_encrypt_key_size) {
1107		memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
1108		       info->lo_encrypt_key_size);
1109		lo->lo_key_owner = uid;
1110	}	
1111
1112	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1113}
1114
1115static int
1116loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1117{
1118	struct file *file = lo->lo_backing_file;
1119	struct kstat stat;
1120	int error;
1121
1122	if (lo->lo_state != Lo_bound)
 
 
 
 
1123		return -ENXIO;
1124	error = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat);
1125	if (error)
1126		return error;
1127	memset(info, 0, sizeof(*info));
1128	info->lo_number = lo->lo_number;
1129	info->lo_device = huge_encode_dev(stat.dev);
1130	info->lo_inode = stat.ino;
1131	info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
1132	info->lo_offset = lo->lo_offset;
1133	info->lo_sizelimit = lo->lo_sizelimit;
1134	info->lo_flags = lo->lo_flags;
1135	memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1136	memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1137	info->lo_encrypt_type =
1138		lo->lo_encryption ? lo->lo_encryption->number : 0;
1139	if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1140		info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1141		memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1142		       lo->lo_encrypt_key_size);
1143	}
1144	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
1145}
1146
1147static void
1148loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1149{
1150	memset(info64, 0, sizeof(*info64));
1151	info64->lo_number = info->lo_number;
1152	info64->lo_device = info->lo_device;
1153	info64->lo_inode = info->lo_inode;
1154	info64->lo_rdevice = info->lo_rdevice;
1155	info64->lo_offset = info->lo_offset;
1156	info64->lo_sizelimit = 0;
1157	info64->lo_encrypt_type = info->lo_encrypt_type;
1158	info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1159	info64->lo_flags = info->lo_flags;
1160	info64->lo_init[0] = info->lo_init[0];
1161	info64->lo_init[1] = info->lo_init[1];
1162	if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1163		memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1164	else
1165		memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1166	memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1167}
1168
1169static int
1170loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1171{
1172	memset(info, 0, sizeof(*info));
1173	info->lo_number = info64->lo_number;
1174	info->lo_device = info64->lo_device;
1175	info->lo_inode = info64->lo_inode;
1176	info->lo_rdevice = info64->lo_rdevice;
1177	info->lo_offset = info64->lo_offset;
1178	info->lo_encrypt_type = info64->lo_encrypt_type;
1179	info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1180	info->lo_flags = info64->lo_flags;
1181	info->lo_init[0] = info64->lo_init[0];
1182	info->lo_init[1] = info64->lo_init[1];
1183	if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1184		memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1185	else
1186		memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1187	memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1188
1189	/* error in case values were truncated */
1190	if (info->lo_device != info64->lo_device ||
1191	    info->lo_rdevice != info64->lo_rdevice ||
1192	    info->lo_inode != info64->lo_inode ||
1193	    info->lo_offset != info64->lo_offset)
1194		return -EOVERFLOW;
1195
1196	return 0;
1197}
1198
1199static int
1200loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1201{
1202	struct loop_info info;
1203	struct loop_info64 info64;
1204
1205	if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1206		return -EFAULT;
1207	loop_info64_from_old(&info, &info64);
1208	return loop_set_status(lo, &info64);
1209}
1210
1211static int
1212loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1213{
1214	struct loop_info64 info64;
1215
1216	if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1217		return -EFAULT;
1218	return loop_set_status(lo, &info64);
1219}
1220
1221static int
1222loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1223	struct loop_info info;
1224	struct loop_info64 info64;
1225	int err = 0;
1226
1227	if (!arg)
1228		err = -EINVAL;
1229	if (!err)
1230		err = loop_get_status(lo, &info64);
1231	if (!err)
1232		err = loop_info64_to_old(&info64, &info);
1233	if (!err && copy_to_user(arg, &info, sizeof(info)))
1234		err = -EFAULT;
1235
1236	return err;
1237}
1238
1239static int
1240loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1241	struct loop_info64 info64;
1242	int err = 0;
1243
1244	if (!arg)
1245		err = -EINVAL;
1246	if (!err)
1247		err = loop_get_status(lo, &info64);
1248	if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1249		err = -EFAULT;
1250
1251	return err;
1252}
1253
1254static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
1255{
1256	int err;
1257	sector_t sec;
1258	loff_t sz;
1259
1260	err = -ENXIO;
1261	if (unlikely(lo->lo_state != Lo_bound))
 
 
 
 
 
 
 
 
 
 
 
 
1262		goto out;
1263	err = figure_loop_size(lo);
1264	if (unlikely(err))
1265		goto out;
1266	sec = get_capacity(lo->lo_disk);
1267	/* the width of sector_t may be narrow for bit-shift */
1268	sz = sec;
1269	sz <<= 9;
1270	mutex_lock(&bdev->bd_mutex);
1271	bd_set_size(bdev, sz);
1272	/* let user-space know about the new size */
1273	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1274	mutex_unlock(&bdev->bd_mutex);
1275
 
 
 
 
1276 out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1277	return err;
1278}
1279
1280static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1281	unsigned int cmd, unsigned long arg)
1282{
1283	struct loop_device *lo = bdev->bd_disk->private_data;
 
1284	int err;
1285
1286	mutex_lock_nested(&lo->lo_ctl_mutex, 1);
1287	switch (cmd) {
1288	case LOOP_SET_FD:
1289		err = loop_set_fd(lo, mode, bdev, arg);
1290		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1291	case LOOP_CHANGE_FD:
1292		err = loop_change_fd(lo, bdev, arg);
1293		break;
1294	case LOOP_CLR_FD:
1295		/* loop_clr_fd would have unlocked lo_ctl_mutex on success */
1296		err = loop_clr_fd(lo, bdev);
1297		if (!err)
1298			goto out_unlocked;
1299		break;
1300	case LOOP_SET_STATUS:
1301		err = loop_set_status_old(lo, (struct loop_info __user *) arg);
 
 
 
1302		break;
1303	case LOOP_GET_STATUS:
1304		err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1305		break;
1306	case LOOP_SET_STATUS64:
1307		err = loop_set_status64(lo, (struct loop_info64 __user *) arg);
 
 
 
1308		break;
1309	case LOOP_GET_STATUS64:
1310		err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
1311		break;
1312	case LOOP_SET_CAPACITY:
1313		err = -EPERM;
1314		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1315			err = loop_set_capacity(lo, bdev);
1316		break;
 
1317	default:
1318		err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
 
1319	}
1320	mutex_unlock(&lo->lo_ctl_mutex);
1321
1322out_unlocked:
1323	return err;
1324}
1325
1326#ifdef CONFIG_COMPAT
1327struct compat_loop_info {
1328	compat_int_t	lo_number;      /* ioctl r/o */
1329	compat_dev_t	lo_device;      /* ioctl r/o */
1330	compat_ulong_t	lo_inode;       /* ioctl r/o */
1331	compat_dev_t	lo_rdevice;     /* ioctl r/o */
1332	compat_int_t	lo_offset;
1333	compat_int_t	lo_encrypt_type;
1334	compat_int_t	lo_encrypt_key_size;    /* ioctl w/o */
1335	compat_int_t	lo_flags;       /* ioctl r/o */
1336	char		lo_name[LO_NAME_SIZE];
1337	unsigned char	lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1338	compat_ulong_t	lo_init[2];
1339	char		reserved[4];
1340};
1341
1342/*
1343 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1344 * - noinlined to reduce stack space usage in main part of driver
1345 */
1346static noinline int
1347loop_info64_from_compat(const struct compat_loop_info __user *arg,
1348			struct loop_info64 *info64)
1349{
1350	struct compat_loop_info info;
1351
1352	if (copy_from_user(&info, arg, sizeof(info)))
1353		return -EFAULT;
1354
1355	memset(info64, 0, sizeof(*info64));
1356	info64->lo_number = info.lo_number;
1357	info64->lo_device = info.lo_device;
1358	info64->lo_inode = info.lo_inode;
1359	info64->lo_rdevice = info.lo_rdevice;
1360	info64->lo_offset = info.lo_offset;
1361	info64->lo_sizelimit = 0;
1362	info64->lo_encrypt_type = info.lo_encrypt_type;
1363	info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
1364	info64->lo_flags = info.lo_flags;
1365	info64->lo_init[0] = info.lo_init[0];
1366	info64->lo_init[1] = info.lo_init[1];
1367	if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1368		memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
1369	else
1370		memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
1371	memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
1372	return 0;
1373}
1374
1375/*
1376 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1377 * - noinlined to reduce stack space usage in main part of driver
1378 */
1379static noinline int
1380loop_info64_to_compat(const struct loop_info64 *info64,
1381		      struct compat_loop_info __user *arg)
1382{
1383	struct compat_loop_info info;
1384
1385	memset(&info, 0, sizeof(info));
1386	info.lo_number = info64->lo_number;
1387	info.lo_device = info64->lo_device;
1388	info.lo_inode = info64->lo_inode;
1389	info.lo_rdevice = info64->lo_rdevice;
1390	info.lo_offset = info64->lo_offset;
1391	info.lo_encrypt_type = info64->lo_encrypt_type;
1392	info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
1393	info.lo_flags = info64->lo_flags;
1394	info.lo_init[0] = info64->lo_init[0];
1395	info.lo_init[1] = info64->lo_init[1];
1396	if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1397		memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1398	else
1399		memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
1400	memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1401
1402	/* error in case values were truncated */
1403	if (info.lo_device != info64->lo_device ||
1404	    info.lo_rdevice != info64->lo_rdevice ||
1405	    info.lo_inode != info64->lo_inode ||
1406	    info.lo_offset != info64->lo_offset ||
1407	    info.lo_init[0] != info64->lo_init[0] ||
1408	    info.lo_init[1] != info64->lo_init[1])
1409		return -EOVERFLOW;
1410
1411	if (copy_to_user(arg, &info, sizeof(info)))
1412		return -EFAULT;
1413	return 0;
1414}
1415
1416static int
1417loop_set_status_compat(struct loop_device *lo,
1418		       const struct compat_loop_info __user *arg)
1419{
1420	struct loop_info64 info64;
1421	int ret;
1422
1423	ret = loop_info64_from_compat(arg, &info64);
1424	if (ret < 0)
1425		return ret;
1426	return loop_set_status(lo, &info64);
1427}
1428
1429static int
1430loop_get_status_compat(struct loop_device *lo,
1431		       struct compat_loop_info __user *arg)
1432{
1433	struct loop_info64 info64;
1434	int err = 0;
1435
1436	if (!arg)
1437		err = -EINVAL;
1438	if (!err)
1439		err = loop_get_status(lo, &info64);
1440	if (!err)
1441		err = loop_info64_to_compat(&info64, arg);
1442	return err;
1443}
1444
1445static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1446			   unsigned int cmd, unsigned long arg)
1447{
1448	struct loop_device *lo = bdev->bd_disk->private_data;
1449	int err;
1450
1451	switch(cmd) {
1452	case LOOP_SET_STATUS:
1453		mutex_lock(&lo->lo_ctl_mutex);
1454		err = loop_set_status_compat(
1455			lo, (const struct compat_loop_info __user *) arg);
1456		mutex_unlock(&lo->lo_ctl_mutex);
1457		break;
1458	case LOOP_GET_STATUS:
1459		mutex_lock(&lo->lo_ctl_mutex);
1460		err = loop_get_status_compat(
1461			lo, (struct compat_loop_info __user *) arg);
1462		mutex_unlock(&lo->lo_ctl_mutex);
1463		break;
1464	case LOOP_SET_CAPACITY:
1465	case LOOP_CLR_FD:
1466	case LOOP_GET_STATUS64:
1467	case LOOP_SET_STATUS64:
 
1468		arg = (unsigned long) compat_ptr(arg);
 
1469	case LOOP_SET_FD:
1470	case LOOP_CHANGE_FD:
 
 
1471		err = lo_ioctl(bdev, mode, cmd, arg);
1472		break;
1473	default:
1474		err = -ENOIOCTLCMD;
1475		break;
1476	}
1477	return err;
1478}
1479#endif
1480
1481static int lo_open(struct block_device *bdev, fmode_t mode)
1482{
1483	struct loop_device *lo;
1484	int err = 0;
1485
1486	mutex_lock(&loop_index_mutex);
 
 
1487	lo = bdev->bd_disk->private_data;
1488	if (!lo) {
1489		err = -ENXIO;
1490		goto out;
1491	}
1492
1493	mutex_lock(&lo->lo_ctl_mutex);
1494	lo->lo_refcnt++;
1495	mutex_unlock(&lo->lo_ctl_mutex);
1496out:
1497	mutex_unlock(&loop_index_mutex);
1498	return err;
1499}
1500
1501static int lo_release(struct gendisk *disk, fmode_t mode)
1502{
1503	struct loop_device *lo = disk->private_data;
1504	int err;
1505
1506	mutex_lock(&lo->lo_ctl_mutex);
1507
1508	if (--lo->lo_refcnt)
1509		goto out;
 
 
1510
1511	if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
 
 
 
 
1512		/*
1513		 * In autoclear mode, stop the loop thread
1514		 * and remove configuration after last close.
1515		 */
1516		err = loop_clr_fd(lo, NULL);
1517		if (!err)
1518			goto out_unlocked;
1519	} else {
1520		/*
1521		 * Otherwise keep thread (if running) and config,
1522		 * but flush possible ongoing bios in thread.
1523		 */
1524		loop_flush(lo);
 
1525	}
1526
1527out:
1528	mutex_unlock(&lo->lo_ctl_mutex);
1529out_unlocked:
1530	return 0;
1531}
1532
1533static const struct block_device_operations lo_fops = {
1534	.owner =	THIS_MODULE,
1535	.open =		lo_open,
1536	.release =	lo_release,
1537	.ioctl =	lo_ioctl,
1538#ifdef CONFIG_COMPAT
1539	.compat_ioctl =	lo_compat_ioctl,
1540#endif
1541};
1542
1543/*
1544 * And now the modules code and kernel interface.
1545 */
1546static int max_loop;
1547module_param(max_loop, int, S_IRUGO);
1548MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1549module_param(max_part, int, S_IRUGO);
1550MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
1551MODULE_LICENSE("GPL");
1552MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1553
1554int loop_register_transfer(struct loop_func_table *funcs)
1555{
1556	unsigned int n = funcs->number;
1557
1558	if (n >= MAX_LO_CRYPT || xfer_funcs[n])
1559		return -EINVAL;
1560	xfer_funcs[n] = funcs;
1561	return 0;
1562}
1563
1564static int unregister_transfer_cb(int id, void *ptr, void *data)
1565{
1566	struct loop_device *lo = ptr;
1567	struct loop_func_table *xfer = data;
1568
1569	mutex_lock(&lo->lo_ctl_mutex);
1570	if (lo->lo_encryption == xfer)
1571		loop_release_xfer(lo);
1572	mutex_unlock(&lo->lo_ctl_mutex);
1573	return 0;
1574}
1575
1576int loop_unregister_transfer(int number)
1577{
1578	unsigned int n = number;
1579	struct loop_func_table *xfer;
1580
1581	if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1582		return -EINVAL;
1583
1584	xfer_funcs[n] = NULL;
1585	idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
1586	return 0;
1587}
1588
1589EXPORT_SYMBOL(loop_register_transfer);
1590EXPORT_SYMBOL(loop_unregister_transfer);
1591
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1592static int loop_add(struct loop_device **l, int i)
1593{
1594	struct loop_device *lo;
1595	struct gendisk *disk;
1596	int err;
1597
 
1598	lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1599	if (!lo) {
1600		err = -ENOMEM;
1601		goto out;
1602	}
1603
1604	err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
1605	if (err < 0)
1606		goto out_free_dev;
1607
 
1608	if (i >= 0) {
1609		int m;
1610
1611		/* create specific i in the index */
1612		err = idr_get_new_above(&loop_index_idr, lo, i, &m);
1613		if (err >= 0 && i != m) {
1614			idr_remove(&loop_index_idr, m);
1615			err = -EEXIST;
1616		}
1617	} else if (i == -1) {
1618		int m;
1619
1620		/* get next free nr */
1621		err = idr_get_new(&loop_index_idr, lo, &m);
1622		if (err >= 0)
1623			i = m;
1624	} else {
1625		err = -EINVAL;
1626	}
1627	if (err < 0)
1628		goto out_free_dev;
 
1629
1630	lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1631	if (!lo->lo_queue)
1632		goto out_free_dev;
 
 
 
 
 
1633
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1634	disk = lo->lo_disk = alloc_disk(1 << part_shift);
1635	if (!disk)
1636		goto out_free_queue;
1637
1638	mutex_init(&lo->lo_ctl_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1639	lo->lo_number		= i;
1640	lo->lo_thread		= NULL;
1641	init_waitqueue_head(&lo->lo_event);
1642	spin_lock_init(&lo->lo_lock);
1643	disk->major		= LOOP_MAJOR;
1644	disk->first_minor	= i << part_shift;
1645	disk->fops		= &lo_fops;
1646	disk->private_data	= lo;
1647	disk->queue		= lo->lo_queue;
1648	sprintf(disk->disk_name, "loop%d", i);
1649	add_disk(disk);
1650	*l = lo;
1651	return lo->lo_number;
1652
1653out_free_queue:
1654	blk_cleanup_queue(lo->lo_queue);
 
 
 
 
1655out_free_dev:
1656	kfree(lo);
1657out:
1658	return err;
1659}
1660
1661static void loop_remove(struct loop_device *lo)
1662{
1663	del_gendisk(lo->lo_disk);
1664	blk_cleanup_queue(lo->lo_queue);
 
1665	put_disk(lo->lo_disk);
1666	kfree(lo);
1667}
1668
1669static int find_free_cb(int id, void *ptr, void *data)
1670{
1671	struct loop_device *lo = ptr;
1672	struct loop_device **l = data;
1673
1674	if (lo->lo_state == Lo_unbound) {
1675		*l = lo;
1676		return 1;
1677	}
1678	return 0;
1679}
1680
1681static int loop_lookup(struct loop_device **l, int i)
1682{
1683	struct loop_device *lo;
1684	int ret = -ENODEV;
1685
1686	if (i < 0) {
1687		int err;
1688
1689		err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
1690		if (err == 1) {
1691			*l = lo;
1692			ret = lo->lo_number;
1693		}
1694		goto out;
1695	}
1696
1697	/* lookup and return a specific i */
1698	lo = idr_find(&loop_index_idr, i);
1699	if (lo) {
1700		*l = lo;
1701		ret = lo->lo_number;
1702	}
1703out:
1704	return ret;
1705}
1706
1707static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1708{
1709	struct loop_device *lo;
1710	struct kobject *kobj;
1711	int err;
1712
1713	mutex_lock(&loop_index_mutex);
1714	err = loop_lookup(&lo, MINOR(dev) >> part_shift);
1715	if (err < 0)
1716		err = loop_add(&lo, MINOR(dev) >> part_shift);
1717	if (err < 0)
1718		kobj = ERR_PTR(err);
1719	else
1720		kobj = get_disk(lo->lo_disk);
1721	mutex_unlock(&loop_index_mutex);
1722
1723	*part = 0;
1724	return kobj;
1725}
1726
1727static long loop_control_ioctl(struct file *file, unsigned int cmd,
1728			       unsigned long parm)
1729{
1730	struct loop_device *lo;
1731	int ret = -ENOSYS;
1732
1733	mutex_lock(&loop_index_mutex);
 
 
 
 
1734	switch (cmd) {
1735	case LOOP_CTL_ADD:
1736		ret = loop_lookup(&lo, parm);
1737		if (ret >= 0) {
1738			ret = -EEXIST;
1739			break;
1740		}
1741		ret = loop_add(&lo, parm);
1742		break;
1743	case LOOP_CTL_REMOVE:
1744		ret = loop_lookup(&lo, parm);
1745		if (ret < 0)
1746			break;
1747		mutex_lock(&lo->lo_ctl_mutex);
1748		if (lo->lo_state != Lo_unbound) {
1749			ret = -EBUSY;
1750			mutex_unlock(&lo->lo_ctl_mutex);
1751			break;
1752		}
1753		if (lo->lo_refcnt > 0) {
1754			ret = -EBUSY;
1755			mutex_unlock(&lo->lo_ctl_mutex);
1756			break;
1757		}
1758		lo->lo_disk->private_data = NULL;
1759		mutex_unlock(&lo->lo_ctl_mutex);
1760		idr_remove(&loop_index_idr, lo->lo_number);
1761		loop_remove(lo);
1762		break;
1763	case LOOP_CTL_GET_FREE:
1764		ret = loop_lookup(&lo, -1);
1765		if (ret >= 0)
1766			break;
1767		ret = loop_add(&lo, -1);
1768	}
1769	mutex_unlock(&loop_index_mutex);
1770
1771	return ret;
1772}
1773
1774static const struct file_operations loop_ctl_fops = {
1775	.open		= nonseekable_open,
1776	.unlocked_ioctl	= loop_control_ioctl,
1777	.compat_ioctl	= loop_control_ioctl,
1778	.owner		= THIS_MODULE,
1779	.llseek		= noop_llseek,
1780};
1781
1782static struct miscdevice loop_misc = {
1783	.minor		= LOOP_CTRL_MINOR,
1784	.name		= "loop-control",
1785	.fops		= &loop_ctl_fops,
1786};
1787
1788MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
1789MODULE_ALIAS("devname:loop-control");
1790
1791static int __init loop_init(void)
1792{
1793	int i, nr;
1794	unsigned long range;
1795	struct loop_device *lo;
1796	int err;
1797
1798	err = misc_register(&loop_misc);
1799	if (err < 0)
1800		return err;
1801
1802	part_shift = 0;
1803	if (max_part > 0) {
1804		part_shift = fls(max_part);
1805
1806		/*
1807		 * Adjust max_part according to part_shift as it is exported
1808		 * to user space so that user can decide correct minor number
1809		 * if [s]he want to create more devices.
1810		 *
1811		 * Note that -1 is required because partition 0 is reserved
1812		 * for the whole disk.
1813		 */
1814		max_part = (1UL << part_shift) - 1;
1815	}
1816
1817	if ((1UL << part_shift) > DISK_MAX_PARTS)
1818		return -EINVAL;
 
 
1819
1820	if (max_loop > 1UL << (MINORBITS - part_shift))
1821		return -EINVAL;
 
 
1822
1823	/*
1824	 * If max_loop is specified, create that many devices upfront.
1825	 * This also becomes a hard limit. If max_loop is not specified,
1826	 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
1827	 * init time. Loop devices can be requested on-demand with the
1828	 * /dev/loop-control interface, or be instantiated by accessing
1829	 * a 'dead' device node.
1830	 */
1831	if (max_loop) {
1832		nr = max_loop;
1833		range = max_loop << part_shift;
1834	} else {
1835		nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
1836		range = 1UL << MINORBITS;
1837	}
1838
1839	if (register_blkdev(LOOP_MAJOR, "loop"))
1840		return -EIO;
 
 
 
 
 
 
 
1841
1842	blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
1843				  THIS_MODULE, loop_probe, NULL, NULL);
1844
1845	/* pre-create number of devices given by config or max_loop */
1846	mutex_lock(&loop_index_mutex);
1847	for (i = 0; i < nr; i++)
1848		loop_add(&lo, i);
1849	mutex_unlock(&loop_index_mutex);
1850
1851	printk(KERN_INFO "loop: module loaded\n");
1852	return 0;
 
 
 
 
 
1853}
1854
1855static int loop_exit_cb(int id, void *ptr, void *data)
1856{
1857	struct loop_device *lo = ptr;
1858
1859	loop_remove(lo);
1860	return 0;
1861}
1862
1863static void __exit loop_exit(void)
1864{
1865	unsigned long range;
1866
1867	range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
1868
 
 
1869	idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
1870	idr_remove_all(&loop_index_idr);
1871	idr_destroy(&loop_index_idr);
1872
1873	blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
1874	unregister_blkdev(LOOP_MAJOR, "loop");
1875
1876	misc_deregister(&loop_misc);
 
 
1877}
1878
1879module_init(loop_init);
1880module_exit(loop_exit);
1881
1882#ifndef MODULE
1883static int __init max_loop_setup(char *str)
1884{
1885	max_loop = simple_strtol(str, NULL, 0);
1886	return 1;
1887}
1888
1889__setup("max_loop=", max_loop_setup);
1890#endif
v5.9
   1/*
   2 *  linux/drivers/block/loop.c
   3 *
   4 *  Written by Theodore Ts'o, 3/29/93
   5 *
   6 * Copyright 1993 by Theodore Ts'o.  Redistribution of this file is
   7 * permitted under the GNU General Public License.
   8 *
   9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
  10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
  11 *
  12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
  13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
  14 *
  15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
  16 *
  17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
  18 *
  19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
  20 *
  21 * Loadable modules and other fixes by AK, 1998
  22 *
  23 * Make real block number available to downstream transfer functions, enables
  24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
  25 * Reed H. Petty, rhp@draper.net
  26 *
  27 * Maximum number of loop devices now dynamic via max_loop module parameter.
  28 * Russell Kroll <rkroll@exploits.org> 19990701
  29 *
  30 * Maximum number of loop devices when compiled-in now selectable by passing
  31 * max_loop=<1-255> to the kernel on boot.
  32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
  33 *
  34 * Completely rewrite request handling to be make_request_fn style and
  35 * non blocking, pushing work to a helper thread. Lots of fixes from
  36 * Al Viro too.
  37 * Jens Axboe <axboe@suse.de>, Nov 2000
  38 *
  39 * Support up to 256 loop devices
  40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
  41 *
  42 * Support for falling back on the write file operation when the address space
  43 * operations write_begin is not available on the backing filesystem.
  44 * Anton Altaparmakov, 16 Feb 2005
  45 *
  46 * Still To Fix:
  47 * - Advisory locking is ignored here.
  48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
  49 *
  50 */
  51
  52#include <linux/module.h>
  53#include <linux/moduleparam.h>
  54#include <linux/sched.h>
  55#include <linux/fs.h>
  56#include <linux/file.h>
  57#include <linux/stat.h>
  58#include <linux/errno.h>
  59#include <linux/major.h>
  60#include <linux/wait.h>
  61#include <linux/blkdev.h>
  62#include <linux/blkpg.h>
  63#include <linux/init.h>
  64#include <linux/swap.h>
  65#include <linux/slab.h>
 
  66#include <linux/compat.h>
  67#include <linux/suspend.h>
  68#include <linux/freezer.h>
  69#include <linux/mutex.h>
  70#include <linux/writeback.h>
 
  71#include <linux/completion.h>
  72#include <linux/highmem.h>
  73#include <linux/kthread.h>
  74#include <linux/splice.h>
  75#include <linux/sysfs.h>
  76#include <linux/miscdevice.h>
  77#include <linux/falloc.h>
  78#include <linux/uio.h>
  79#include <linux/ioprio.h>
  80#include <linux/blk-cgroup.h>
  81
  82#include "loop.h"
  83
  84#include <linux/uaccess.h>
  85
  86static DEFINE_IDR(loop_index_idr);
  87static DEFINE_MUTEX(loop_ctl_mutex);
  88
  89static int max_part;
  90static int part_shift;
  91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  92static int transfer_xor(struct loop_device *lo, int cmd,
  93			struct page *raw_page, unsigned raw_off,
  94			struct page *loop_page, unsigned loop_off,
  95			int size, sector_t real_block)
  96{
  97	char *raw_buf = kmap_atomic(raw_page) + raw_off;
  98	char *loop_buf = kmap_atomic(loop_page) + loop_off;
  99	char *in, *out, *key;
 100	int i, keysize;
 101
 102	if (cmd == READ) {
 103		in = raw_buf;
 104		out = loop_buf;
 105	} else {
 106		in = loop_buf;
 107		out = raw_buf;
 108	}
 109
 110	key = lo->lo_encrypt_key;
 111	keysize = lo->lo_encrypt_key_size;
 112	for (i = 0; i < size; i++)
 113		*out++ = *in++ ^ key[(i & 511) % keysize];
 114
 115	kunmap_atomic(loop_buf);
 116	kunmap_atomic(raw_buf);
 117	cond_resched();
 118	return 0;
 119}
 120
 121static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
 122{
 123	if (unlikely(info->lo_encrypt_key_size <= 0))
 124		return -EINVAL;
 125	return 0;
 126}
 127
 128static struct loop_func_table none_funcs = {
 129	.number = LO_CRYPT_NONE,
 130}; 
 
 131
 132static struct loop_func_table xor_funcs = {
 133	.number = LO_CRYPT_XOR,
 134	.transfer = transfer_xor,
 135	.init = xor_init
 136}; 
 137
 138/* xfer_funcs[0] is special - its release function is never called */
 139static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
 140	&none_funcs,
 141	&xor_funcs
 142};
 143
 144static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
 145{
 146	loff_t loopsize;
 147
 148	/* Compute loopsize in bytes */
 149	loopsize = i_size_read(file->f_mapping->host);
 150	if (offset > 0)
 151		loopsize -= offset;
 152	/* offset is beyond i_size, weird but possible */
 153	if (loopsize < 0)
 154		return 0;
 155
 156	if (sizelimit > 0 && sizelimit < loopsize)
 157		loopsize = sizelimit;
 158	/*
 159	 * Unfortunately, if we want to do I/O on the device,
 160	 * the number of 512-byte sectors has to fit into a sector_t.
 161	 */
 162	return loopsize >> 9;
 163}
 164
 165static loff_t get_loop_size(struct loop_device *lo, struct file *file)
 
 166{
 167	return get_size(lo->lo_offset, lo->lo_sizelimit, file);
 168}
 169
 170static void __loop_update_dio(struct loop_device *lo, bool dio)
 171{
 172	struct file *file = lo->lo_backing_file;
 173	struct address_space *mapping = file->f_mapping;
 174	struct inode *inode = mapping->host;
 175	unsigned short sb_bsize = 0;
 176	unsigned dio_align = 0;
 177	bool use_dio;
 178
 179	if (inode->i_sb->s_bdev) {
 180		sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
 181		dio_align = sb_bsize - 1;
 182	}
 183
 184	/*
 185	 * We support direct I/O only if lo_offset is aligned with the
 186	 * logical I/O size of backing device, and the logical block
 187	 * size of loop is bigger than the backing device's and the loop
 188	 * needn't transform transfer.
 189	 *
 190	 * TODO: the above condition may be loosed in the future, and
 191	 * direct I/O may be switched runtime at that time because most
 192	 * of requests in sane applications should be PAGE_SIZE aligned
 193	 */
 194	if (dio) {
 195		if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
 196				!(lo->lo_offset & dio_align) &&
 197				mapping->a_ops->direct_IO &&
 198				!lo->transfer)
 199			use_dio = true;
 200		else
 201			use_dio = false;
 202	} else {
 203		use_dio = false;
 204	}
 205
 206	if (lo->use_dio == use_dio)
 207		return;
 208
 209	/* flush dirty pages before changing direct IO */
 210	vfs_fsync(file, 0);
 211
 212	/*
 213	 * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
 214	 * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
 215	 * will get updated by ioctl(LOOP_GET_STATUS)
 216	 */
 217	if (lo->lo_state == Lo_bound)
 218		blk_mq_freeze_queue(lo->lo_queue);
 219	lo->use_dio = use_dio;
 220	if (use_dio) {
 221		blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
 222		lo->lo_flags |= LO_FLAGS_DIRECT_IO;
 223	} else {
 224		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
 225		lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
 226	}
 227	if (lo->lo_state == Lo_bound)
 228		blk_mq_unfreeze_queue(lo->lo_queue);
 229}
 230
 231/**
 232 * loop_validate_block_size() - validates the passed in block size
 233 * @bsize: size to validate
 234 */
 235static int
 236loop_validate_block_size(unsigned short bsize)
 237{
 238	if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
 239		return -EINVAL;
 240
 241	return 0;
 242}
 243
 244/**
 245 * loop_set_size() - sets device size and notifies userspace
 246 * @lo: struct loop_device to set the size for
 247 * @size: new size of the loop device
 248 *
 249 * Callers must validate that the size passed into this function fits into
 250 * a sector_t, eg using loop_validate_size()
 251 */
 252static void loop_set_size(struct loop_device *lo, loff_t size)
 
 253{
 254	struct block_device *bdev = lo->lo_device;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 255
 256	bd_set_size(bdev, size << SECTOR_SHIFT);
 
 
 
 257
 258	set_capacity_revalidate_and_notify(lo->lo_disk, size, false);
 259}
 260
 261static inline int
 262lo_do_transfer(struct loop_device *lo, int cmd,
 263	       struct page *rpage, unsigned roffs,
 264	       struct page *lpage, unsigned loffs,
 265	       int size, sector_t rblock)
 266{
 267	int ret;
 
 
 
 268
 269	ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
 270	if (likely(!ret))
 271		return 0;
 272
 273	printk_ratelimited(KERN_ERR
 274		"loop: Transfer error at byte offset %llu, length %i.\n",
 275		(unsigned long long)rblock << 9, size);
 
 
 
 
 
 
 276	return ret;
 
 
 
 277}
 278
 279static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
 
 
 
 
 
 
 
 280{
 281	struct iov_iter i;
 282	ssize_t bw;
 
 283
 284	iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
 285
 286	file_start_write(file);
 287	bw = vfs_iter_write(file, &i, ppos, 0);
 288	file_end_write(file);
 289
 290	if (likely(bw ==  bvec->bv_len))
 291		return 0;
 292
 293	printk_ratelimited(KERN_ERR
 294		"loop: Write error at byte offset %llu, length %i.\n",
 295		(unsigned long long)*ppos, bvec->bv_len);
 296	if (bw >= 0)
 297		bw = -EIO;
 298	return bw;
 299}
 300
 301static int lo_write_simple(struct loop_device *lo, struct request *rq,
 302		loff_t pos)
 
 
 
 
 
 
 
 
 303{
 304	struct bio_vec bvec;
 305	struct req_iterator iter;
 306	int ret = 0;
 307
 308	rq_for_each_segment(bvec, rq, iter) {
 309		ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
 310		if (ret < 0)
 311			break;
 312		cond_resched();
 313	}
 314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 315	return ret;
 316}
 317
 318/*
 319 * This is the slow, transforming version that needs to double buffer the
 320 * data as it cannot do the transformations in place without having direct
 321 * access to the destination pages of the backing file.
 322 */
 323static int lo_write_transfer(struct loop_device *lo, struct request *rq,
 324		loff_t pos)
 325{
 326	struct bio_vec bvec, b;
 327	struct req_iterator iter;
 328	struct page *page;
 329	int ret = 0;
 
 330
 331	page = alloc_page(GFP_NOIO);
 332	if (unlikely(!page))
 333		return -ENOMEM;
 334
 335	rq_for_each_segment(bvec, rq, iter) {
 336		ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
 337			bvec.bv_offset, bvec.bv_len, pos >> 9);
 338		if (unlikely(ret))
 339			break;
 340
 341		b.bv_page = page;
 342		b.bv_offset = 0;
 343		b.bv_len = bvec.bv_len;
 344		ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
 345		if (ret < 0)
 346			break;
 
 
 
 
 
 347	}
 348
 349	__free_page(page);
 350	return ret;
 
 
 
 
 351}
 352
 353static int lo_read_simple(struct loop_device *lo, struct request *rq,
 354		loff_t pos)
 
 
 
 
 
 
 
 
 355{
 356	struct bio_vec bvec;
 357	struct req_iterator iter;
 358	struct iov_iter i;
 359	ssize_t len;
 360
 361	rq_for_each_segment(bvec, rq, iter) {
 362		iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
 363		len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
 364		if (len < 0)
 365			return len;
 
 366
 367		flush_dcache_page(bvec.bv_page);
 
 
 
 
 368
 369		if (len != bvec.bv_len) {
 370			struct bio *bio;
 371
 372			__rq_for_each_bio(bio, rq)
 373				zero_fill_bio(bio);
 374			break;
 375		}
 376		cond_resched();
 377	}
 378
 379	return 0;
 380}
 381
 382static int lo_read_transfer(struct loop_device *lo, struct request *rq,
 383		loff_t pos)
 384{
 385	struct bio_vec bvec, b;
 386	struct req_iterator iter;
 387	struct iov_iter i;
 388	struct page *page;
 389	ssize_t len;
 390	int ret = 0;
 391
 392	page = alloc_page(GFP_NOIO);
 393	if (unlikely(!page))
 394		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 395
 396	rq_for_each_segment(bvec, rq, iter) {
 397		loff_t offset = pos;
 398
 399		b.bv_page = page;
 400		b.bv_offset = 0;
 401		b.bv_len = bvec.bv_len;
 402
 403		iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
 404		len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
 405		if (len < 0) {
 406			ret = len;
 407			goto out_free_page;
 408		}
 409
 410		ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
 411			bvec.bv_offset, len, offset >> 9);
 412		if (ret)
 413			goto out_free_page;
 414
 415		flush_dcache_page(bvec.bv_page);
 
 
 
 
 416
 417		if (len != bvec.bv_len) {
 418			struct bio *bio;
 419
 420			__rq_for_each_bio(bio, rq)
 421				zero_fill_bio(bio);
 422			break;
 423		}
 424	}
 425
 426	ret = 0;
 427out_free_page:
 428	__free_page(page);
 429	return ret;
 430}
 431
 432static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
 433			int mode)
 434{
 435	/*
 436	 * We use fallocate to manipulate the space mappings used by the image
 437	 * a.k.a. discard/zerorange. However we do not support this if
 438	 * encryption is enabled, because it may give an attacker useful
 439	 * information.
 440	 */
 441	struct file *file = lo->lo_backing_file;
 442	struct request_queue *q = lo->lo_queue;
 443	int ret;
 444
 445	mode |= FALLOC_FL_KEEP_SIZE;
 
 
 
 446
 447	if (!blk_queue_discard(q)) {
 448		ret = -EOPNOTSUPP;
 449		goto out;
 450	}
 
 
 
 451
 452	ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
 453	if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
 454		ret = -EIO;
 455 out:
 456	return ret;
 457}
 458
 459static int lo_req_flush(struct loop_device *lo, struct request *rq)
 460{
 461	struct file *file = lo->lo_backing_file;
 462	int ret = vfs_fsync(file, 0);
 463	if (unlikely(ret && ret != -EINVAL))
 464		ret = -EIO;
 
 465
 
 466	return ret;
 467}
 468
 469static void lo_complete_rq(struct request *rq)
 
 
 
 470{
 471	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
 472	blk_status_t ret = BLK_STS_OK;
 473
 474	if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
 475	    req_op(rq) != REQ_OP_READ) {
 476		if (cmd->ret < 0)
 477			ret = errno_to_blk_status(cmd->ret);
 478		goto end_io;
 479	}
 480
 481	/*
 482	 * Short READ - if we got some data, advance our request and
 483	 * retry it. If we got no data, end the rest with EIO.
 484	 */
 485	if (cmd->ret) {
 486		blk_update_request(rq, BLK_STS_OK, cmd->ret);
 487		cmd->ret = 0;
 488		blk_mq_requeue_request(rq, true);
 489	} else {
 490		if (cmd->use_aio) {
 491			struct bio *bio = rq->bio;
 492
 493			while (bio) {
 494				zero_fill_bio(bio);
 495				bio = bio->bi_next;
 496			}
 497		}
 498		ret = BLK_STS_IOERR;
 499end_io:
 500		blk_mq_end_request(rq, ret);
 501	}
 502}
 503
 504static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
 
 
 
 505{
 506	struct request *rq = blk_mq_rq_from_pdu(cmd);
 507
 508	if (!atomic_dec_and_test(&cmd->ref))
 509		return;
 510	kfree(cmd->bvec);
 511	cmd->bvec = NULL;
 512	if (likely(!blk_should_fake_timeout(rq->q)))
 513		blk_mq_complete_request(rq);
 514}
 515
 516static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
 517{
 518	struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
 
 
 
 
 519
 520	if (cmd->css)
 521		css_put(cmd->css);
 522	cmd->ret = ret;
 523	lo_rw_aio_do_completion(cmd);
 524}
 525
 526static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
 527		     loff_t pos, bool rw)
 528{
 529	struct iov_iter iter;
 530	struct req_iterator rq_iter;
 531	struct bio_vec *bvec;
 532	struct request *rq = blk_mq_rq_from_pdu(cmd);
 533	struct bio *bio = rq->bio;
 534	struct file *file = lo->lo_backing_file;
 535	struct bio_vec tmp;
 536	unsigned int offset;
 537	int nr_bvec = 0;
 538	int ret;
 539
 540	rq_for_each_bvec(tmp, rq, rq_iter)
 541		nr_bvec++;
 
 
 
 542
 543	if (rq->bio != rq->biotail) {
 
 
 
 544
 545		bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
 546				     GFP_NOIO);
 547		if (!bvec)
 548			return -EIO;
 549		cmd->bvec = bvec;
 550
 551		/*
 552		 * The bios of the request may be started from the middle of
 553		 * the 'bvec' because of bio splitting, so we can't directly
 554		 * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
 555		 * API will take care of all details for us.
 556		 */
 557		rq_for_each_bvec(tmp, rq, rq_iter) {
 558			*bvec = tmp;
 559			bvec++;
 560		}
 561		bvec = cmd->bvec;
 562		offset = 0;
 563	} else {
 564		/*
 565		 * Same here, this bio may be started from the middle of the
 566		 * 'bvec' because of bio splitting, so offset from the bvec
 567		 * must be passed to iov iterator
 568		 */
 569		offset = bio->bi_iter.bi_bvec_done;
 570		bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
 571	}
 572	atomic_set(&cmd->ref, 2);
 573
 574	iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
 575	iter.iov_offset = offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 576
 577	cmd->iocb.ki_pos = pos;
 578	cmd->iocb.ki_filp = file;
 579	cmd->iocb.ki_complete = lo_rw_aio_complete;
 580	cmd->iocb.ki_flags = IOCB_DIRECT;
 581	cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
 582	if (cmd->css)
 583		kthread_associate_blkcg(cmd->css);
 584
 585	if (rw == WRITE)
 586		ret = call_write_iter(file, &cmd->iocb, &iter);
 587	else
 588		ret = call_read_iter(file, &cmd->iocb, &iter);
 589
 590	lo_rw_aio_do_completion(cmd);
 591	kthread_associate_blkcg(NULL);
 592
 593	if (ret != -EIOCBQUEUED)
 594		cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
 595	return 0;
 596}
 597
 598static int do_req_filebacked(struct loop_device *lo, struct request *rq)
 599{
 600	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
 601	loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
 
 602
 603	/*
 604	 * lo_write_simple and lo_read_simple should have been covered
 605	 * by io submit style function like lo_rw_aio(), one blocker
 606	 * is that lo_read_simple() need to call flush_dcache_page after
 607	 * the page is written from kernel, and it isn't easy to handle
 608	 * this in io submit style function which submits all segments
 609	 * of the req at one time. And direct read IO doesn't need to
 610	 * run flush_dcache_page().
 611	 */
 612	switch (req_op(rq)) {
 613	case REQ_OP_FLUSH:
 614		return lo_req_flush(lo, rq);
 615	case REQ_OP_WRITE_ZEROES:
 616		/*
 617		 * If the caller doesn't want deallocation, call zeroout to
 618		 * write zeroes the range.  Otherwise, punch them out.
 619		 */
 620		return lo_fallocate(lo, rq, pos,
 621			(rq->cmd_flags & REQ_NOUNMAP) ?
 622				FALLOC_FL_ZERO_RANGE :
 623				FALLOC_FL_PUNCH_HOLE);
 624	case REQ_OP_DISCARD:
 625		return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
 626	case REQ_OP_WRITE:
 627		if (lo->transfer)
 628			return lo_write_transfer(lo, rq, pos);
 629		else if (cmd->use_aio)
 630			return lo_rw_aio(lo, cmd, pos, WRITE);
 631		else
 632			return lo_write_simple(lo, rq, pos);
 633	case REQ_OP_READ:
 634		if (lo->transfer)
 635			return lo_read_transfer(lo, rq, pos);
 636		else if (cmd->use_aio)
 637			return lo_rw_aio(lo, cmd, pos, READ);
 638		else
 639			return lo_read_simple(lo, rq, pos);
 640	default:
 641		WARN_ON_ONCE(1);
 642		return -EIO;
 643	}
 644}
 645
 646static inline void loop_update_dio(struct loop_device *lo)
 647{
 648	__loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
 649				lo->use_dio);
 650}
 651
 652static void loop_reread_partitions(struct loop_device *lo,
 653				   struct block_device *bdev)
 
 
 
 
 654{
 655	int rc;
 656
 657	mutex_lock(&bdev->bd_mutex);
 658	rc = bdev_disk_changed(bdev, false);
 659	mutex_unlock(&bdev->bd_mutex);
 660	if (rc)
 661		pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
 662			__func__, lo->lo_number, lo->lo_file_name, rc);
 
 
 
 663}
 664
 665static inline int is_loop_device(struct file *file)
 
 
 
 666{
 667	struct inode *i = file->f_mapping->host;
 
 
 668
 669	return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
 670}
 671
 672static int loop_validate_file(struct file *file, struct block_device *bdev)
 
 
 
 673{
 674	struct inode	*inode = file->f_mapping->host;
 675	struct file	*f = file;
 
 676
 677	/* Avoid recursion */
 678	while (is_loop_device(f)) {
 679		struct loop_device *l;
 680
 681		if (f->f_mapping->host->i_bdev == bdev)
 682			return -EBADF;
 
 
 
 
 
 
 
 
 683
 684		l = f->f_mapping->host->i_bdev->bd_disk->private_data;
 685		if (l->lo_state != Lo_bound) {
 686			return -EINVAL;
 687		}
 688		f = l->lo_backing_file;
 689	}
 690	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
 691		return -EINVAL;
 692	return 0;
 693}
 694
 695/*
 696 * loop_change_fd switched the backing store of a loopback device to
 697 * a new file. This is useful for operating system installers to free up
 698 * the original file and in High Availability environments to switch to
 699 * an alternative location for the content in case of server meltdown.
 700 * This can only work if the loop device is used read-only, and if the
 701 * new backing store is the same size and type as the old backing store.
 702 */
 703static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
 704			  unsigned int arg)
 705{
 706	struct file	*file = NULL, *old_file;
 
 707	int		error;
 708	bool		partscan;
 709
 710	error = mutex_lock_killable(&loop_ctl_mutex);
 711	if (error)
 712		return error;
 713	error = -ENXIO;
 714	if (lo->lo_state != Lo_bound)
 715		goto out_err;
 716
 717	/* the loop device has to be read-only */
 718	error = -EINVAL;
 719	if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
 720		goto out_err;
 721
 722	error = -EBADF;
 723	file = fget(arg);
 724	if (!file)
 725		goto out_err;
 726
 727	error = loop_validate_file(file, bdev);
 728	if (error)
 729		goto out_err;
 730
 
 731	old_file = lo->lo_backing_file;
 732
 733	error = -EINVAL;
 734
 
 
 
 735	/* size of the new backing store needs to be the same */
 736	if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
 737		goto out_err;
 738
 739	/* and ... switch */
 740	blk_mq_freeze_queue(lo->lo_queue);
 741	mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
 742	lo->lo_backing_file = file;
 743	lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
 744	mapping_set_gfp_mask(file->f_mapping,
 745			     lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
 746	loop_update_dio(lo);
 747	blk_mq_unfreeze_queue(lo->lo_queue);
 748	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
 749	mutex_unlock(&loop_ctl_mutex);
 750	/*
 751	 * We must drop file reference outside of loop_ctl_mutex as dropping
 752	 * the file ref can take bd_mutex which creates circular locking
 753	 * dependency.
 754	 */
 755	fput(old_file);
 756	if (partscan)
 757		loop_reread_partitions(lo, bdev);
 758	return 0;
 759
 760out_err:
 761	mutex_unlock(&loop_ctl_mutex);
 762	if (file)
 763		fput(file);
 764	return error;
 765}
 766
 
 
 
 
 
 
 
 767/* loop sysfs attributes */
 768
 769static ssize_t loop_attr_show(struct device *dev, char *page,
 770			      ssize_t (*callback)(struct loop_device *, char *))
 771{
 772	struct gendisk *disk = dev_to_disk(dev);
 773	struct loop_device *lo = disk->private_data;
 774
 775	return callback(lo, page);
 776}
 777
 778#define LOOP_ATTR_RO(_name)						\
 779static ssize_t loop_attr_##_name##_show(struct loop_device *, char *);	\
 780static ssize_t loop_attr_do_show_##_name(struct device *d,		\
 781				struct device_attribute *attr, char *b)	\
 782{									\
 783	return loop_attr_show(d, b, loop_attr_##_name##_show);		\
 784}									\
 785static struct device_attribute loop_attr_##_name =			\
 786	__ATTR(_name, 0444, loop_attr_do_show_##_name, NULL);
 787
 788static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
 789{
 790	ssize_t ret;
 791	char *p = NULL;
 792
 793	spin_lock_irq(&lo->lo_lock);
 794	if (lo->lo_backing_file)
 795		p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
 796	spin_unlock_irq(&lo->lo_lock);
 797
 798	if (IS_ERR_OR_NULL(p))
 799		ret = PTR_ERR(p);
 800	else {
 801		ret = strlen(p);
 802		memmove(buf, p, ret);
 803		buf[ret++] = '\n';
 804		buf[ret] = 0;
 805	}
 806
 807	return ret;
 808}
 809
 810static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
 811{
 812	return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
 813}
 814
 815static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
 816{
 817	return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
 818}
 819
 820static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
 821{
 822	int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
 823
 824	return sprintf(buf, "%s\n", autoclear ? "1" : "0");
 825}
 826
 827static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
 828{
 829	int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
 830
 831	return sprintf(buf, "%s\n", partscan ? "1" : "0");
 832}
 833
 834static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
 835{
 836	int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
 837
 838	return sprintf(buf, "%s\n", dio ? "1" : "0");
 839}
 840
 841LOOP_ATTR_RO(backing_file);
 842LOOP_ATTR_RO(offset);
 843LOOP_ATTR_RO(sizelimit);
 844LOOP_ATTR_RO(autoclear);
 845LOOP_ATTR_RO(partscan);
 846LOOP_ATTR_RO(dio);
 847
 848static struct attribute *loop_attrs[] = {
 849	&loop_attr_backing_file.attr,
 850	&loop_attr_offset.attr,
 851	&loop_attr_sizelimit.attr,
 852	&loop_attr_autoclear.attr,
 853	&loop_attr_partscan.attr,
 854	&loop_attr_dio.attr,
 855	NULL,
 856};
 857
 858static struct attribute_group loop_attribute_group = {
 859	.name = "loop",
 860	.attrs= loop_attrs,
 861};
 862
 863static void loop_sysfs_init(struct loop_device *lo)
 864{
 865	lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
 866						&loop_attribute_group);
 867}
 868
 869static void loop_sysfs_exit(struct loop_device *lo)
 870{
 871	if (lo->sysfs_inited)
 872		sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
 873				   &loop_attribute_group);
 874}
 875
 876static void loop_config_discard(struct loop_device *lo)
 
 877{
 878	struct file *file = lo->lo_backing_file;
 879	struct inode *inode = file->f_mapping->host;
 880	struct request_queue *q = lo->lo_queue;
 881	u32 granularity, max_discard_sectors;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 882
 883	/*
 884	 * If the backing device is a block device, mirror its zeroing
 885	 * capability. Set the discard sectors to the block device's zeroing
 886	 * capabilities because loop discards result in blkdev_issue_zeroout(),
 887	 * not blkdev_issue_discard(). This maintains consistent behavior with
 888	 * file-backed loop devices: discarded regions read back as zero.
 889	 */
 890	if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
 891		struct request_queue *backingq;
 892
 893		backingq = bdev_get_queue(inode->i_bdev);
 
 
 894
 895		max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
 896		granularity = backingq->limits.discard_granularity ?:
 897			queue_physical_block_size(backingq);
 
 898
 899	/*
 900	 * We use punch hole to reclaim the free space used by the
 901	 * image a.k.a. discard. However we do not support discard if
 902	 * encryption is enabled, because it may give an attacker
 903	 * useful information.
 904	 */
 905	} else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
 906		max_discard_sectors = 0;
 907		granularity = 0;
 908
 
 909	} else {
 910		max_discard_sectors = UINT_MAX >> 9;
 911		granularity = inode->i_sb->s_blocksize;
 912	}
 913
 914	if (max_discard_sectors) {
 915		q->limits.discard_granularity = granularity;
 916		blk_queue_max_discard_sectors(q, max_discard_sectors);
 917		blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
 918		blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
 919	} else {
 920		q->limits.discard_granularity = 0;
 921		blk_queue_max_discard_sectors(q, 0);
 922		blk_queue_max_write_zeroes_sectors(q, 0);
 923		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
 924	}
 925	q->limits.discard_alignment = 0;
 926}
 927
 928static void loop_unprepare_queue(struct loop_device *lo)
 929{
 930	kthread_flush_worker(&lo->worker);
 931	kthread_stop(lo->worker_task);
 932}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 933
 934static int loop_kthread_worker_fn(void *worker_ptr)
 935{
 936	current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
 937	return kthread_worker_fn(worker_ptr);
 938}
 939
 940static int loop_prepare_queue(struct loop_device *lo)
 941{
 942	kthread_init_worker(&lo->worker);
 943	lo->worker_task = kthread_run(loop_kthread_worker_fn,
 944			&lo->worker, "loop%d", lo->lo_number);
 945	if (IS_ERR(lo->worker_task))
 946		return -ENOMEM;
 947	set_user_nice(lo->worker_task, MIN_NICE);
 
 
 948	return 0;
 949}
 950
 951static void loop_update_rotational(struct loop_device *lo)
 952{
 953	struct file *file = lo->lo_backing_file;
 954	struct inode *file_inode = file->f_mapping->host;
 955	struct block_device *file_bdev = file_inode->i_sb->s_bdev;
 956	struct request_queue *q = lo->lo_queue;
 957	bool nonrot = true;
 958
 959	/* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
 960	if (file_bdev)
 961		nonrot = blk_queue_nonrot(bdev_get_queue(file_bdev));
 962
 963	if (nonrot)
 964		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
 965	else
 966		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
 
 
 967}
 968
 969static int
 970loop_release_xfer(struct loop_device *lo)
 971{
 972	int err = 0;
 973	struct loop_func_table *xfer = lo->lo_encryption;
 974
 975	if (xfer) {
 976		if (xfer->release)
 977			err = xfer->release(lo);
 978		lo->transfer = NULL;
 979		lo->lo_encryption = NULL;
 980		module_put(xfer->owner);
 981	}
 982	return err;
 983}
 984
 985static int
 986loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
 987	       const struct loop_info64 *i)
 988{
 989	int err = 0;
 990
 991	if (xfer) {
 992		struct module *owner = xfer->owner;
 993
 994		if (!try_module_get(owner))
 995			return -EINVAL;
 996		if (xfer->init)
 997			err = xfer->init(lo, i);
 998		if (err)
 999			module_put(owner);
1000		else
1001			lo->lo_encryption = xfer;
1002	}
1003	return err;
1004}
1005
1006/**
1007 * loop_set_status_from_info - configure device from loop_info
1008 * @lo: struct loop_device to configure
1009 * @info: struct loop_info64 to configure the device with
1010 *
1011 * Configures the loop device parameters according to the passed
1012 * in loop_info64 configuration.
1013 */
1014static int
1015loop_set_status_from_info(struct loop_device *lo,
1016			  const struct loop_info64 *info)
1017{
1018	int err;
1019	struct loop_func_table *xfer;
1020	kuid_t uid = current_uid();
1021
1022	if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
1023		return -EINVAL;
1024
1025	err = loop_release_xfer(lo);
1026	if (err)
1027		return err;
1028
1029	if (info->lo_encrypt_type) {
1030		unsigned int type = info->lo_encrypt_type;
1031
1032		if (type >= MAX_LO_CRYPT)
1033			return -EINVAL;
1034		xfer = xfer_funcs[type];
1035		if (xfer == NULL)
1036			return -EINVAL;
1037	} else
1038		xfer = NULL;
1039
1040	err = loop_init_xfer(lo, xfer, info);
1041	if (err)
1042		return err;
1043
1044	lo->lo_offset = info->lo_offset;
1045	lo->lo_sizelimit = info->lo_sizelimit;
1046	memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
1047	memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
1048	lo->lo_file_name[LO_NAME_SIZE-1] = 0;
1049	lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
1050
1051	if (!xfer)
1052		xfer = &none_funcs;
1053	lo->transfer = xfer->transfer;
1054	lo->ioctl = xfer->ioctl;
1055
1056	lo->lo_flags = info->lo_flags;
1057
1058	lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
1059	lo->lo_init[0] = info->lo_init[0];
1060	lo->lo_init[1] = info->lo_init[1];
1061	if (info->lo_encrypt_key_size) {
1062		memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
1063		       info->lo_encrypt_key_size);
1064		lo->lo_key_owner = uid;
1065	}
1066
1067	return 0;
1068}
1069
1070static int loop_configure(struct loop_device *lo, fmode_t mode,
1071			  struct block_device *bdev,
1072			  const struct loop_config *config)
1073{
1074	struct file	*file;
1075	struct inode	*inode;
1076	struct address_space *mapping;
1077	struct block_device *claimed_bdev = NULL;
1078	int		error;
1079	loff_t		size;
1080	bool		partscan;
1081	unsigned short  bsize;
1082
1083	/* This is safe, since we have a reference from open(). */
1084	__module_get(THIS_MODULE);
1085
1086	error = -EBADF;
1087	file = fget(config->fd);
1088	if (!file)
1089		goto out;
1090
1091	/*
1092	 * If we don't hold exclusive handle for the device, upgrade to it
1093	 * here to avoid changing device under exclusive owner.
1094	 */
1095	if (!(mode & FMODE_EXCL)) {
1096		claimed_bdev = bdev->bd_contains;
1097		error = bd_prepare_to_claim(bdev, claimed_bdev, loop_configure);
1098		if (error)
1099			goto out_putf;
1100	}
1101
1102	error = mutex_lock_killable(&loop_ctl_mutex);
1103	if (error)
1104		goto out_bdev;
1105
1106	error = -EBUSY;
1107	if (lo->lo_state != Lo_unbound)
1108		goto out_unlock;
1109
1110	error = loop_validate_file(file, bdev);
1111	if (error)
1112		goto out_unlock;
1113
1114	mapping = file->f_mapping;
1115	inode = mapping->host;
1116
1117	if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
1118		error = -EINVAL;
1119		goto out_unlock;
1120	}
1121
1122	if (config->block_size) {
1123		error = loop_validate_block_size(config->block_size);
1124		if (error)
1125			goto out_unlock;
1126	}
1127
1128	error = loop_set_status_from_info(lo, &config->info);
1129	if (error)
1130		goto out_unlock;
1131
1132	if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
1133	    !file->f_op->write_iter)
1134		lo->lo_flags |= LO_FLAGS_READ_ONLY;
1135
1136	error = loop_prepare_queue(lo);
1137	if (error)
1138		goto out_unlock;
1139
1140	set_device_ro(bdev, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
1141
1142	lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
1143	lo->lo_device = bdev;
1144	lo->lo_backing_file = file;
1145	lo->old_gfp_mask = mapping_gfp_mask(mapping);
1146	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
1147
1148	if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
1149		blk_queue_write_cache(lo->lo_queue, true, false);
1150
1151	if (config->block_size)
1152		bsize = config->block_size;
1153	else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev)
1154		/* In case of direct I/O, match underlying block size */
1155		bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
1156	else
1157		bsize = 512;
1158
1159	blk_queue_logical_block_size(lo->lo_queue, bsize);
1160	blk_queue_physical_block_size(lo->lo_queue, bsize);
1161	blk_queue_io_min(lo->lo_queue, bsize);
1162
1163	loop_update_rotational(lo);
1164	loop_update_dio(lo);
1165	loop_sysfs_init(lo);
1166
1167	size = get_loop_size(lo, file);
1168	loop_set_size(lo, size);
1169
1170	set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
1171		      block_size(inode->i_bdev) : PAGE_SIZE);
1172
1173	lo->lo_state = Lo_bound;
1174	if (part_shift)
1175		lo->lo_flags |= LO_FLAGS_PARTSCAN;
1176	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
1177	if (partscan)
1178		lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
1179
1180	/* Grab the block_device to prevent its destruction after we
1181	 * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
1182	 */
1183	bdgrab(bdev);
1184	mutex_unlock(&loop_ctl_mutex);
1185	if (partscan)
1186		loop_reread_partitions(lo, bdev);
1187	if (claimed_bdev)
1188		bd_abort_claiming(bdev, claimed_bdev, loop_configure);
1189	return 0;
1190
1191out_unlock:
1192	mutex_unlock(&loop_ctl_mutex);
1193out_bdev:
1194	if (claimed_bdev)
1195		bd_abort_claiming(bdev, claimed_bdev, loop_configure);
1196out_putf:
1197	fput(file);
1198out:
1199	/* This is safe: open() is still holding a reference. */
1200	module_put(THIS_MODULE);
1201	return error;
1202}
1203
1204static int __loop_clr_fd(struct loop_device *lo, bool release)
1205{
1206	struct file *filp = NULL;
1207	gfp_t gfp = lo->old_gfp_mask;
1208	struct block_device *bdev = lo->lo_device;
1209	int err = 0;
1210	bool partscan = false;
1211	int lo_number;
1212
1213	mutex_lock(&loop_ctl_mutex);
1214	if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
1215		err = -ENXIO;
1216		goto out_unlock;
1217	}
1218
1219	filp = lo->lo_backing_file;
1220	if (filp == NULL) {
1221		err = -EINVAL;
1222		goto out_unlock;
1223	}
1224
1225	/* freeze request queue during the transition */
1226	blk_mq_freeze_queue(lo->lo_queue);
1227
1228	spin_lock_irq(&lo->lo_lock);
1229	lo->lo_backing_file = NULL;
1230	spin_unlock_irq(&lo->lo_lock);
1231
1232	loop_release_xfer(lo);
1233	lo->transfer = NULL;
1234	lo->ioctl = NULL;
1235	lo->lo_device = NULL;
1236	lo->lo_encryption = NULL;
1237	lo->lo_offset = 0;
1238	lo->lo_sizelimit = 0;
1239	lo->lo_encrypt_key_size = 0;
 
 
1240	memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
1241	memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
1242	memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1243	blk_queue_logical_block_size(lo->lo_queue, 512);
1244	blk_queue_physical_block_size(lo->lo_queue, 512);
1245	blk_queue_io_min(lo->lo_queue, 512);
1246	if (bdev) {
1247		bdput(bdev);
1248		invalidate_bdev(bdev);
1249		bdev->bd_inode->i_mapping->wb_err = 0;
1250	}
1251	set_capacity(lo->lo_disk, 0);
1252	loop_sysfs_exit(lo);
1253	if (bdev) {
1254		bd_set_size(bdev, 0);
1255		/* let user-space know about this change */
1256		kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1257	}
1258	mapping_set_gfp_mask(filp->f_mapping, gfp);
 
1259	/* This is safe: open() is still holding a reference. */
1260	module_put(THIS_MODULE);
1261	blk_mq_unfreeze_queue(lo->lo_queue);
1262
1263	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
1264	lo_number = lo->lo_number;
1265	loop_unprepare_queue(lo);
1266out_unlock:
1267	mutex_unlock(&loop_ctl_mutex);
1268	if (partscan) {
1269		/*
1270		 * bd_mutex has been held already in release path, so don't
1271		 * acquire it if this function is called in such case.
1272		 *
1273		 * If the reread partition isn't from release path, lo_refcnt
1274		 * must be at least one and it can only become zero when the
1275		 * current holder is released.
1276		 */
1277		if (!release)
1278			mutex_lock(&bdev->bd_mutex);
1279		err = bdev_disk_changed(bdev, false);
1280		if (!release)
1281			mutex_unlock(&bdev->bd_mutex);
1282		if (err)
1283			pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
1284				__func__, lo_number, err);
1285		/* Device is gone, no point in returning error */
1286		err = 0;
1287	}
1288
1289	/*
1290	 * lo->lo_state is set to Lo_unbound here after above partscan has
1291	 * finished.
1292	 *
1293	 * There cannot be anybody else entering __loop_clr_fd() as
1294	 * lo->lo_backing_file is already cleared and Lo_rundown state
1295	 * protects us from all the other places trying to change the 'lo'
1296	 * device.
1297	 */
1298	mutex_lock(&loop_ctl_mutex);
1299	lo->lo_flags = 0;
1300	if (!part_shift)
1301		lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1302	lo->lo_state = Lo_unbound;
1303	mutex_unlock(&loop_ctl_mutex);
1304
1305	/*
1306	 * Need not hold loop_ctl_mutex to fput backing file.
1307	 * Calling fput holding loop_ctl_mutex triggers a circular
1308	 * lock dependency possibility warning as fput can take
1309	 * bd_mutex which is usually taken before loop_ctl_mutex.
1310	 */
1311	if (filp)
1312		fput(filp);
1313	return err;
1314}
1315
1316static int loop_clr_fd(struct loop_device *lo)
 
1317{
1318	int err;
 
 
1319
1320	err = mutex_lock_killable(&loop_ctl_mutex);
 
 
 
 
 
 
 
 
 
1321	if (err)
1322		return err;
1323	if (lo->lo_state != Lo_bound) {
1324		mutex_unlock(&loop_ctl_mutex);
1325		return -ENXIO;
1326	}
1327	/*
1328	 * If we've explicitly asked to tear down the loop device,
1329	 * and it has an elevated reference count, set it for auto-teardown when
1330	 * the last reference goes away. This stops $!~#$@ udev from
1331	 * preventing teardown because it decided that it needs to run blkid on
1332	 * the loopback device whenever they appear. xfstests is notorious for
1333	 * failing tests because blkid via udev races with a losetup
1334	 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
1335	 * command to fail with EBUSY.
1336	 */
1337	if (atomic_read(&lo->lo_refcnt) > 1) {
1338		lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
1339		mutex_unlock(&loop_ctl_mutex);
1340		return 0;
1341	}
1342	lo->lo_state = Lo_rundown;
1343	mutex_unlock(&loop_ctl_mutex);
1344
1345	return __loop_clr_fd(lo, false);
1346}
1347
1348static int
1349loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1350{
1351	int err;
1352	struct block_device *bdev;
1353	kuid_t uid = current_uid();
1354	int prev_lo_flags;
1355	bool partscan = false;
1356	bool size_changed = false;
1357
1358	err = mutex_lock_killable(&loop_ctl_mutex);
1359	if (err)
1360		return err;
1361	if (lo->lo_encrypt_key_size &&
1362	    !uid_eq(lo->lo_key_owner, uid) &&
1363	    !capable(CAP_SYS_ADMIN)) {
1364		err = -EPERM;
1365		goto out_unlock;
1366	}
1367	if (lo->lo_state != Lo_bound) {
1368		err = -ENXIO;
1369		goto out_unlock;
1370	}
1371
1372	if (lo->lo_offset != info->lo_offset ||
1373	    lo->lo_sizelimit != info->lo_sizelimit) {
1374		size_changed = true;
1375		sync_blockdev(lo->lo_device);
1376		invalidate_bdev(lo->lo_device);
 
1377	}
1378
1379	/* I/O need to be drained during transfer transition */
1380	blk_mq_freeze_queue(lo->lo_queue);
 
 
1381
1382	if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) {
1383		/* If any pages were dirtied after invalidate_bdev(), try again */
1384		err = -EAGAIN;
1385		pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1386			__func__, lo->lo_number, lo->lo_file_name,
1387			lo->lo_device->bd_inode->i_mapping->nrpages);
1388		goto out_unfreeze;
1389	}
1390
1391	prev_lo_flags = lo->lo_flags;
 
 
1392
1393	err = loop_set_status_from_info(lo, info);
1394	if (err)
1395		goto out_unfreeze;
 
 
 
 
 
1396
1397	/* Mask out flags that can't be set using LOOP_SET_STATUS. */
1398	lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
1399	/* For those flags, use the previous values instead */
1400	lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
1401	/* For flags that can't be cleared, use previous values too */
1402	lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
1403
1404	if (size_changed) {
1405		loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
1406					   lo->lo_backing_file);
1407		loop_set_size(lo, new_size);
1408	}
1409
1410	loop_config_discard(lo);
1411
1412	/* update dio if lo_offset or transfer is changed */
1413	__loop_update_dio(lo, lo->use_dio);
1414
1415out_unfreeze:
1416	blk_mq_unfreeze_queue(lo->lo_queue);
1417
1418	if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
1419	     !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
1420		lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
1421		bdev = lo->lo_device;
1422		partscan = true;
1423	}
1424out_unlock:
1425	mutex_unlock(&loop_ctl_mutex);
1426	if (partscan)
1427		loop_reread_partitions(lo, bdev);
1428
1429	return err;
1430}
1431
1432static int
1433loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1434{
1435	struct path path;
1436	struct kstat stat;
1437	int ret;
1438
1439	ret = mutex_lock_killable(&loop_ctl_mutex);
1440	if (ret)
1441		return ret;
1442	if (lo->lo_state != Lo_bound) {
1443		mutex_unlock(&loop_ctl_mutex);
1444		return -ENXIO;
1445	}
1446
 
1447	memset(info, 0, sizeof(*info));
1448	info->lo_number = lo->lo_number;
 
 
 
1449	info->lo_offset = lo->lo_offset;
1450	info->lo_sizelimit = lo->lo_sizelimit;
1451	info->lo_flags = lo->lo_flags;
1452	memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1453	memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1454	info->lo_encrypt_type =
1455		lo->lo_encryption ? lo->lo_encryption->number : 0;
1456	if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1457		info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1458		memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1459		       lo->lo_encrypt_key_size);
1460	}
1461
1462	/* Drop loop_ctl_mutex while we call into the filesystem. */
1463	path = lo->lo_backing_file->f_path;
1464	path_get(&path);
1465	mutex_unlock(&loop_ctl_mutex);
1466	ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
1467	if (!ret) {
1468		info->lo_device = huge_encode_dev(stat.dev);
1469		info->lo_inode = stat.ino;
1470		info->lo_rdevice = huge_encode_dev(stat.rdev);
1471	}
1472	path_put(&path);
1473	return ret;
1474}
1475
1476static void
1477loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1478{
1479	memset(info64, 0, sizeof(*info64));
1480	info64->lo_number = info->lo_number;
1481	info64->lo_device = info->lo_device;
1482	info64->lo_inode = info->lo_inode;
1483	info64->lo_rdevice = info->lo_rdevice;
1484	info64->lo_offset = info->lo_offset;
1485	info64->lo_sizelimit = 0;
1486	info64->lo_encrypt_type = info->lo_encrypt_type;
1487	info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1488	info64->lo_flags = info->lo_flags;
1489	info64->lo_init[0] = info->lo_init[0];
1490	info64->lo_init[1] = info->lo_init[1];
1491	if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1492		memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1493	else
1494		memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1495	memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1496}
1497
1498static int
1499loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1500{
1501	memset(info, 0, sizeof(*info));
1502	info->lo_number = info64->lo_number;
1503	info->lo_device = info64->lo_device;
1504	info->lo_inode = info64->lo_inode;
1505	info->lo_rdevice = info64->lo_rdevice;
1506	info->lo_offset = info64->lo_offset;
1507	info->lo_encrypt_type = info64->lo_encrypt_type;
1508	info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1509	info->lo_flags = info64->lo_flags;
1510	info->lo_init[0] = info64->lo_init[0];
1511	info->lo_init[1] = info64->lo_init[1];
1512	if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1513		memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1514	else
1515		memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1516	memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1517
1518	/* error in case values were truncated */
1519	if (info->lo_device != info64->lo_device ||
1520	    info->lo_rdevice != info64->lo_rdevice ||
1521	    info->lo_inode != info64->lo_inode ||
1522	    info->lo_offset != info64->lo_offset)
1523		return -EOVERFLOW;
1524
1525	return 0;
1526}
1527
1528static int
1529loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1530{
1531	struct loop_info info;
1532	struct loop_info64 info64;
1533
1534	if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1535		return -EFAULT;
1536	loop_info64_from_old(&info, &info64);
1537	return loop_set_status(lo, &info64);
1538}
1539
1540static int
1541loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1542{
1543	struct loop_info64 info64;
1544
1545	if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1546		return -EFAULT;
1547	return loop_set_status(lo, &info64);
1548}
1549
1550static int
1551loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1552	struct loop_info info;
1553	struct loop_info64 info64;
1554	int err;
1555
1556	if (!arg)
1557		return -EINVAL;
1558	err = loop_get_status(lo, &info64);
 
1559	if (!err)
1560		err = loop_info64_to_old(&info64, &info);
1561	if (!err && copy_to_user(arg, &info, sizeof(info)))
1562		err = -EFAULT;
1563
1564	return err;
1565}
1566
1567static int
1568loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1569	struct loop_info64 info64;
1570	int err;
1571
1572	if (!arg)
1573		return -EINVAL;
1574	err = loop_get_status(lo, &info64);
 
1575	if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1576		err = -EFAULT;
1577
1578	return err;
1579}
1580
1581static int loop_set_capacity(struct loop_device *lo)
1582{
1583	loff_t size;
 
 
1584
 
1585	if (unlikely(lo->lo_state != Lo_bound))
1586		return -ENXIO;
1587
1588	size = get_loop_size(lo, lo->lo_backing_file);
1589	loop_set_size(lo, size);
1590
1591	return 0;
1592}
1593
1594static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1595{
1596	int error = -ENXIO;
1597	if (lo->lo_state != Lo_bound)
1598		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
1599
1600	__loop_update_dio(lo, !!arg);
1601	if (lo->use_dio == !!arg)
1602		return 0;
1603	error = -EINVAL;
1604 out:
1605	return error;
1606}
1607
1608static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
1609{
1610	int err = 0;
1611
1612	if (lo->lo_state != Lo_bound)
1613		return -ENXIO;
1614
1615	err = loop_validate_block_size(arg);
1616	if (err)
1617		return err;
1618
1619	if (lo->lo_queue->limits.logical_block_size == arg)
1620		return 0;
1621
1622	sync_blockdev(lo->lo_device);
1623	invalidate_bdev(lo->lo_device);
1624
1625	blk_mq_freeze_queue(lo->lo_queue);
1626
1627	/* invalidate_bdev should have truncated all the pages */
1628	if (lo->lo_device->bd_inode->i_mapping->nrpages) {
1629		err = -EAGAIN;
1630		pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1631			__func__, lo->lo_number, lo->lo_file_name,
1632			lo->lo_device->bd_inode->i_mapping->nrpages);
1633		goto out_unfreeze;
1634	}
1635
1636	blk_queue_logical_block_size(lo->lo_queue, arg);
1637	blk_queue_physical_block_size(lo->lo_queue, arg);
1638	blk_queue_io_min(lo->lo_queue, arg);
1639	loop_update_dio(lo);
1640out_unfreeze:
1641	blk_mq_unfreeze_queue(lo->lo_queue);
1642
1643	return err;
1644}
1645
1646static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
1647			   unsigned long arg)
1648{
1649	int err;
1650
1651	err = mutex_lock_killable(&loop_ctl_mutex);
1652	if (err)
1653		return err;
1654	switch (cmd) {
1655	case LOOP_SET_CAPACITY:
1656		err = loop_set_capacity(lo);
1657		break;
1658	case LOOP_SET_DIRECT_IO:
1659		err = loop_set_dio(lo, arg);
1660		break;
1661	case LOOP_SET_BLOCK_SIZE:
1662		err = loop_set_block_size(lo, arg);
1663		break;
1664	default:
1665		err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1666	}
1667	mutex_unlock(&loop_ctl_mutex);
1668	return err;
1669}
1670
1671static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1672	unsigned int cmd, unsigned long arg)
1673{
1674	struct loop_device *lo = bdev->bd_disk->private_data;
1675	void __user *argp = (void __user *) arg;
1676	int err;
1677
 
1678	switch (cmd) {
1679	case LOOP_SET_FD: {
1680		/*
1681		 * Legacy case - pass in a zeroed out struct loop_config with
1682		 * only the file descriptor set , which corresponds with the
1683		 * default parameters we'd have used otherwise.
1684		 */
1685		struct loop_config config;
1686
1687		memset(&config, 0, sizeof(config));
1688		config.fd = arg;
1689
1690		return loop_configure(lo, mode, bdev, &config);
1691	}
1692	case LOOP_CONFIGURE: {
1693		struct loop_config config;
1694
1695		if (copy_from_user(&config, argp, sizeof(config)))
1696			return -EFAULT;
1697
1698		return loop_configure(lo, mode, bdev, &config);
1699	}
1700	case LOOP_CHANGE_FD:
1701		return loop_change_fd(lo, bdev, arg);
 
1702	case LOOP_CLR_FD:
1703		return loop_clr_fd(lo);
 
 
 
 
1704	case LOOP_SET_STATUS:
1705		err = -EPERM;
1706		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1707			err = loop_set_status_old(lo, argp);
1708		}
1709		break;
1710	case LOOP_GET_STATUS:
1711		return loop_get_status_old(lo, argp);
 
1712	case LOOP_SET_STATUS64:
1713		err = -EPERM;
1714		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1715			err = loop_set_status64(lo, argp);
1716		}
1717		break;
1718	case LOOP_GET_STATUS64:
1719		return loop_get_status64(lo, argp);
 
1720	case LOOP_SET_CAPACITY:
1721	case LOOP_SET_DIRECT_IO:
1722	case LOOP_SET_BLOCK_SIZE:
1723		if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
1724			return -EPERM;
1725		fallthrough;
1726	default:
1727		err = lo_simple_ioctl(lo, cmd, arg);
1728		break;
1729	}
 
1730
 
1731	return err;
1732}
1733
1734#ifdef CONFIG_COMPAT
1735struct compat_loop_info {
1736	compat_int_t	lo_number;      /* ioctl r/o */
1737	compat_dev_t	lo_device;      /* ioctl r/o */
1738	compat_ulong_t	lo_inode;       /* ioctl r/o */
1739	compat_dev_t	lo_rdevice;     /* ioctl r/o */
1740	compat_int_t	lo_offset;
1741	compat_int_t	lo_encrypt_type;
1742	compat_int_t	lo_encrypt_key_size;    /* ioctl w/o */
1743	compat_int_t	lo_flags;       /* ioctl r/o */
1744	char		lo_name[LO_NAME_SIZE];
1745	unsigned char	lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1746	compat_ulong_t	lo_init[2];
1747	char		reserved[4];
1748};
1749
1750/*
1751 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1752 * - noinlined to reduce stack space usage in main part of driver
1753 */
1754static noinline int
1755loop_info64_from_compat(const struct compat_loop_info __user *arg,
1756			struct loop_info64 *info64)
1757{
1758	struct compat_loop_info info;
1759
1760	if (copy_from_user(&info, arg, sizeof(info)))
1761		return -EFAULT;
1762
1763	memset(info64, 0, sizeof(*info64));
1764	info64->lo_number = info.lo_number;
1765	info64->lo_device = info.lo_device;
1766	info64->lo_inode = info.lo_inode;
1767	info64->lo_rdevice = info.lo_rdevice;
1768	info64->lo_offset = info.lo_offset;
1769	info64->lo_sizelimit = 0;
1770	info64->lo_encrypt_type = info.lo_encrypt_type;
1771	info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
1772	info64->lo_flags = info.lo_flags;
1773	info64->lo_init[0] = info.lo_init[0];
1774	info64->lo_init[1] = info.lo_init[1];
1775	if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1776		memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
1777	else
1778		memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
1779	memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
1780	return 0;
1781}
1782
1783/*
1784 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1785 * - noinlined to reduce stack space usage in main part of driver
1786 */
1787static noinline int
1788loop_info64_to_compat(const struct loop_info64 *info64,
1789		      struct compat_loop_info __user *arg)
1790{
1791	struct compat_loop_info info;
1792
1793	memset(&info, 0, sizeof(info));
1794	info.lo_number = info64->lo_number;
1795	info.lo_device = info64->lo_device;
1796	info.lo_inode = info64->lo_inode;
1797	info.lo_rdevice = info64->lo_rdevice;
1798	info.lo_offset = info64->lo_offset;
1799	info.lo_encrypt_type = info64->lo_encrypt_type;
1800	info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
1801	info.lo_flags = info64->lo_flags;
1802	info.lo_init[0] = info64->lo_init[0];
1803	info.lo_init[1] = info64->lo_init[1];
1804	if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1805		memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1806	else
1807		memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
1808	memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1809
1810	/* error in case values were truncated */
1811	if (info.lo_device != info64->lo_device ||
1812	    info.lo_rdevice != info64->lo_rdevice ||
1813	    info.lo_inode != info64->lo_inode ||
1814	    info.lo_offset != info64->lo_offset ||
1815	    info.lo_init[0] != info64->lo_init[0] ||
1816	    info.lo_init[1] != info64->lo_init[1])
1817		return -EOVERFLOW;
1818
1819	if (copy_to_user(arg, &info, sizeof(info)))
1820		return -EFAULT;
1821	return 0;
1822}
1823
1824static int
1825loop_set_status_compat(struct loop_device *lo,
1826		       const struct compat_loop_info __user *arg)
1827{
1828	struct loop_info64 info64;
1829	int ret;
1830
1831	ret = loop_info64_from_compat(arg, &info64);
1832	if (ret < 0)
1833		return ret;
1834	return loop_set_status(lo, &info64);
1835}
1836
1837static int
1838loop_get_status_compat(struct loop_device *lo,
1839		       struct compat_loop_info __user *arg)
1840{
1841	struct loop_info64 info64;
1842	int err;
1843
1844	if (!arg)
1845		return -EINVAL;
1846	err = loop_get_status(lo, &info64);
 
1847	if (!err)
1848		err = loop_info64_to_compat(&info64, arg);
1849	return err;
1850}
1851
1852static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1853			   unsigned int cmd, unsigned long arg)
1854{
1855	struct loop_device *lo = bdev->bd_disk->private_data;
1856	int err;
1857
1858	switch(cmd) {
1859	case LOOP_SET_STATUS:
1860		err = loop_set_status_compat(lo,
1861			     (const struct compat_loop_info __user *)arg);
 
 
1862		break;
1863	case LOOP_GET_STATUS:
1864		err = loop_get_status_compat(lo,
1865				     (struct compat_loop_info __user *)arg);
 
 
1866		break;
1867	case LOOP_SET_CAPACITY:
1868	case LOOP_CLR_FD:
1869	case LOOP_GET_STATUS64:
1870	case LOOP_SET_STATUS64:
1871	case LOOP_CONFIGURE:
1872		arg = (unsigned long) compat_ptr(arg);
1873		fallthrough;
1874	case LOOP_SET_FD:
1875	case LOOP_CHANGE_FD:
1876	case LOOP_SET_BLOCK_SIZE:
1877	case LOOP_SET_DIRECT_IO:
1878		err = lo_ioctl(bdev, mode, cmd, arg);
1879		break;
1880	default:
1881		err = -ENOIOCTLCMD;
1882		break;
1883	}
1884	return err;
1885}
1886#endif
1887
1888static int lo_open(struct block_device *bdev, fmode_t mode)
1889{
1890	struct loop_device *lo;
1891	int err;
1892
1893	err = mutex_lock_killable(&loop_ctl_mutex);
1894	if (err)
1895		return err;
1896	lo = bdev->bd_disk->private_data;
1897	if (!lo) {
1898		err = -ENXIO;
1899		goto out;
1900	}
1901
1902	atomic_inc(&lo->lo_refcnt);
 
 
1903out:
1904	mutex_unlock(&loop_ctl_mutex);
1905	return err;
1906}
1907
1908static void lo_release(struct gendisk *disk, fmode_t mode)
1909{
1910	struct loop_device *lo;
 
 
 
1911
1912	mutex_lock(&loop_ctl_mutex);
1913	lo = disk->private_data;
1914	if (atomic_dec_return(&lo->lo_refcnt))
1915		goto out_unlock;
1916
1917	if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1918		if (lo->lo_state != Lo_bound)
1919			goto out_unlock;
1920		lo->lo_state = Lo_rundown;
1921		mutex_unlock(&loop_ctl_mutex);
1922		/*
1923		 * In autoclear mode, stop the loop thread
1924		 * and remove configuration after last close.
1925		 */
1926		__loop_clr_fd(lo, true);
1927		return;
1928	} else if (lo->lo_state == Lo_bound) {
 
1929		/*
1930		 * Otherwise keep thread (if running) and config,
1931		 * but flush possible ongoing bios in thread.
1932		 */
1933		blk_mq_freeze_queue(lo->lo_queue);
1934		blk_mq_unfreeze_queue(lo->lo_queue);
1935	}
1936
1937out_unlock:
1938	mutex_unlock(&loop_ctl_mutex);
 
 
1939}
1940
1941static const struct block_device_operations lo_fops = {
1942	.owner =	THIS_MODULE,
1943	.open =		lo_open,
1944	.release =	lo_release,
1945	.ioctl =	lo_ioctl,
1946#ifdef CONFIG_COMPAT
1947	.compat_ioctl =	lo_compat_ioctl,
1948#endif
1949};
1950
1951/*
1952 * And now the modules code and kernel interface.
1953 */
1954static int max_loop;
1955module_param(max_loop, int, 0444);
1956MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1957module_param(max_part, int, 0444);
1958MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
1959MODULE_LICENSE("GPL");
1960MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1961
1962int loop_register_transfer(struct loop_func_table *funcs)
1963{
1964	unsigned int n = funcs->number;
1965
1966	if (n >= MAX_LO_CRYPT || xfer_funcs[n])
1967		return -EINVAL;
1968	xfer_funcs[n] = funcs;
1969	return 0;
1970}
1971
1972static int unregister_transfer_cb(int id, void *ptr, void *data)
1973{
1974	struct loop_device *lo = ptr;
1975	struct loop_func_table *xfer = data;
1976
1977	mutex_lock(&loop_ctl_mutex);
1978	if (lo->lo_encryption == xfer)
1979		loop_release_xfer(lo);
1980	mutex_unlock(&loop_ctl_mutex);
1981	return 0;
1982}
1983
1984int loop_unregister_transfer(int number)
1985{
1986	unsigned int n = number;
1987	struct loop_func_table *xfer;
1988
1989	if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1990		return -EINVAL;
1991
1992	xfer_funcs[n] = NULL;
1993	idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
1994	return 0;
1995}
1996
1997EXPORT_SYMBOL(loop_register_transfer);
1998EXPORT_SYMBOL(loop_unregister_transfer);
1999
2000static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
2001		const struct blk_mq_queue_data *bd)
2002{
2003	struct request *rq = bd->rq;
2004	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
2005	struct loop_device *lo = rq->q->queuedata;
2006
2007	blk_mq_start_request(rq);
2008
2009	if (lo->lo_state != Lo_bound)
2010		return BLK_STS_IOERR;
2011
2012	switch (req_op(rq)) {
2013	case REQ_OP_FLUSH:
2014	case REQ_OP_DISCARD:
2015	case REQ_OP_WRITE_ZEROES:
2016		cmd->use_aio = false;
2017		break;
2018	default:
2019		cmd->use_aio = lo->use_dio;
2020		break;
2021	}
2022
2023	/* always use the first bio's css */
2024#ifdef CONFIG_BLK_CGROUP
2025	if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) {
2026		cmd->css = &bio_blkcg(rq->bio)->css;
2027		css_get(cmd->css);
2028	} else
2029#endif
2030		cmd->css = NULL;
2031	kthread_queue_work(&lo->worker, &cmd->work);
2032
2033	return BLK_STS_OK;
2034}
2035
2036static void loop_handle_cmd(struct loop_cmd *cmd)
2037{
2038	struct request *rq = blk_mq_rq_from_pdu(cmd);
2039	const bool write = op_is_write(req_op(rq));
2040	struct loop_device *lo = rq->q->queuedata;
2041	int ret = 0;
2042
2043	if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
2044		ret = -EIO;
2045		goto failed;
2046	}
2047
2048	ret = do_req_filebacked(lo, rq);
2049 failed:
2050	/* complete non-aio request */
2051	if (!cmd->use_aio || ret) {
2052		if (ret == -EOPNOTSUPP)
2053			cmd->ret = ret;
2054		else
2055			cmd->ret = ret ? -EIO : 0;
2056		if (likely(!blk_should_fake_timeout(rq->q)))
2057			blk_mq_complete_request(rq);
2058	}
2059}
2060
2061static void loop_queue_work(struct kthread_work *work)
2062{
2063	struct loop_cmd *cmd =
2064		container_of(work, struct loop_cmd, work);
2065
2066	loop_handle_cmd(cmd);
2067}
2068
2069static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
2070		unsigned int hctx_idx, unsigned int numa_node)
2071{
2072	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
2073
2074	kthread_init_work(&cmd->work, loop_queue_work);
2075	return 0;
2076}
2077
2078static const struct blk_mq_ops loop_mq_ops = {
2079	.queue_rq       = loop_queue_rq,
2080	.init_request	= loop_init_request,
2081	.complete	= lo_complete_rq,
2082};
2083
2084static int loop_add(struct loop_device **l, int i)
2085{
2086	struct loop_device *lo;
2087	struct gendisk *disk;
2088	int err;
2089
2090	err = -ENOMEM;
2091	lo = kzalloc(sizeof(*lo), GFP_KERNEL);
2092	if (!lo)
 
2093		goto out;
 
2094
2095	lo->lo_state = Lo_unbound;
 
 
2096
2097	/* allocate id, if @id >= 0, we're requesting that specific id */
2098	if (i >= 0) {
2099		err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
2100		if (err == -ENOSPC)
 
 
 
 
2101			err = -EEXIST;
 
 
 
 
 
 
 
 
2102	} else {
2103		err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
2104	}
2105	if (err < 0)
2106		goto out_free_dev;
2107	i = err;
2108
2109	err = -ENOMEM;
2110	lo->tag_set.ops = &loop_mq_ops;
2111	lo->tag_set.nr_hw_queues = 1;
2112	lo->tag_set.queue_depth = 128;
2113	lo->tag_set.numa_node = NUMA_NO_NODE;
2114	lo->tag_set.cmd_size = sizeof(struct loop_cmd);
2115	lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
2116	lo->tag_set.driver_data = lo;
2117
2118	err = blk_mq_alloc_tag_set(&lo->tag_set);
2119	if (err)
2120		goto out_free_idr;
2121
2122	lo->lo_queue = blk_mq_init_queue(&lo->tag_set);
2123	if (IS_ERR(lo->lo_queue)) {
2124		err = PTR_ERR(lo->lo_queue);
2125		goto out_cleanup_tags;
2126	}
2127	lo->lo_queue->queuedata = lo;
2128
2129	blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
2130
2131	/*
2132	 * By default, we do buffer IO, so it doesn't make sense to enable
2133	 * merge because the I/O submitted to backing file is handled page by
2134	 * page. For directio mode, merge does help to dispatch bigger request
2135	 * to underlayer disk. We will enable merge once directio is enabled.
2136	 */
2137	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
2138
2139	err = -ENOMEM;
2140	disk = lo->lo_disk = alloc_disk(1 << part_shift);
2141	if (!disk)
2142		goto out_free_queue;
2143
2144	/*
2145	 * Disable partition scanning by default. The in-kernel partition
2146	 * scanning can be requested individually per-device during its
2147	 * setup. Userspace can always add and remove partitions from all
2148	 * devices. The needed partition minors are allocated from the
2149	 * extended minor space, the main loop device numbers will continue
2150	 * to match the loop minors, regardless of the number of partitions
2151	 * used.
2152	 *
2153	 * If max_part is given, partition scanning is globally enabled for
2154	 * all loop devices. The minors for the main loop devices will be
2155	 * multiples of max_part.
2156	 *
2157	 * Note: Global-for-all-devices, set-only-at-init, read-only module
2158	 * parameteters like 'max_loop' and 'max_part' make things needlessly
2159	 * complicated, are too static, inflexible and may surprise
2160	 * userspace tools. Parameters like this in general should be avoided.
2161	 */
2162	if (!part_shift)
2163		disk->flags |= GENHD_FL_NO_PART_SCAN;
2164	disk->flags |= GENHD_FL_EXT_DEVT;
2165	atomic_set(&lo->lo_refcnt, 0);
2166	lo->lo_number		= i;
 
 
2167	spin_lock_init(&lo->lo_lock);
2168	disk->major		= LOOP_MAJOR;
2169	disk->first_minor	= i << part_shift;
2170	disk->fops		= &lo_fops;
2171	disk->private_data	= lo;
2172	disk->queue		= lo->lo_queue;
2173	sprintf(disk->disk_name, "loop%d", i);
2174	add_disk(disk);
2175	*l = lo;
2176	return lo->lo_number;
2177
2178out_free_queue:
2179	blk_cleanup_queue(lo->lo_queue);
2180out_cleanup_tags:
2181	blk_mq_free_tag_set(&lo->tag_set);
2182out_free_idr:
2183	idr_remove(&loop_index_idr, i);
2184out_free_dev:
2185	kfree(lo);
2186out:
2187	return err;
2188}
2189
2190static void loop_remove(struct loop_device *lo)
2191{
2192	del_gendisk(lo->lo_disk);
2193	blk_cleanup_queue(lo->lo_queue);
2194	blk_mq_free_tag_set(&lo->tag_set);
2195	put_disk(lo->lo_disk);
2196	kfree(lo);
2197}
2198
2199static int find_free_cb(int id, void *ptr, void *data)
2200{
2201	struct loop_device *lo = ptr;
2202	struct loop_device **l = data;
2203
2204	if (lo->lo_state == Lo_unbound) {
2205		*l = lo;
2206		return 1;
2207	}
2208	return 0;
2209}
2210
2211static int loop_lookup(struct loop_device **l, int i)
2212{
2213	struct loop_device *lo;
2214	int ret = -ENODEV;
2215
2216	if (i < 0) {
2217		int err;
2218
2219		err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
2220		if (err == 1) {
2221			*l = lo;
2222			ret = lo->lo_number;
2223		}
2224		goto out;
2225	}
2226
2227	/* lookup and return a specific i */
2228	lo = idr_find(&loop_index_idr, i);
2229	if (lo) {
2230		*l = lo;
2231		ret = lo->lo_number;
2232	}
2233out:
2234	return ret;
2235}
2236
2237static struct kobject *loop_probe(dev_t dev, int *part, void *data)
2238{
2239	struct loop_device *lo;
2240	struct kobject *kobj;
2241	int err;
2242
2243	mutex_lock(&loop_ctl_mutex);
2244	err = loop_lookup(&lo, MINOR(dev) >> part_shift);
2245	if (err < 0)
2246		err = loop_add(&lo, MINOR(dev) >> part_shift);
2247	if (err < 0)
2248		kobj = NULL;
2249	else
2250		kobj = get_disk_and_module(lo->lo_disk);
2251	mutex_unlock(&loop_ctl_mutex);
2252
2253	*part = 0;
2254	return kobj;
2255}
2256
2257static long loop_control_ioctl(struct file *file, unsigned int cmd,
2258			       unsigned long parm)
2259{
2260	struct loop_device *lo;
2261	int ret;
2262
2263	ret = mutex_lock_killable(&loop_ctl_mutex);
2264	if (ret)
2265		return ret;
2266
2267	ret = -ENOSYS;
2268	switch (cmd) {
2269	case LOOP_CTL_ADD:
2270		ret = loop_lookup(&lo, parm);
2271		if (ret >= 0) {
2272			ret = -EEXIST;
2273			break;
2274		}
2275		ret = loop_add(&lo, parm);
2276		break;
2277	case LOOP_CTL_REMOVE:
2278		ret = loop_lookup(&lo, parm);
2279		if (ret < 0)
2280			break;
 
2281		if (lo->lo_state != Lo_unbound) {
2282			ret = -EBUSY;
 
2283			break;
2284		}
2285		if (atomic_read(&lo->lo_refcnt) > 0) {
2286			ret = -EBUSY;
 
2287			break;
2288		}
2289		lo->lo_disk->private_data = NULL;
 
2290		idr_remove(&loop_index_idr, lo->lo_number);
2291		loop_remove(lo);
2292		break;
2293	case LOOP_CTL_GET_FREE:
2294		ret = loop_lookup(&lo, -1);
2295		if (ret >= 0)
2296			break;
2297		ret = loop_add(&lo, -1);
2298	}
2299	mutex_unlock(&loop_ctl_mutex);
2300
2301	return ret;
2302}
2303
2304static const struct file_operations loop_ctl_fops = {
2305	.open		= nonseekable_open,
2306	.unlocked_ioctl	= loop_control_ioctl,
2307	.compat_ioctl	= loop_control_ioctl,
2308	.owner		= THIS_MODULE,
2309	.llseek		= noop_llseek,
2310};
2311
2312static struct miscdevice loop_misc = {
2313	.minor		= LOOP_CTRL_MINOR,
2314	.name		= "loop-control",
2315	.fops		= &loop_ctl_fops,
2316};
2317
2318MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
2319MODULE_ALIAS("devname:loop-control");
2320
2321static int __init loop_init(void)
2322{
2323	int i, nr;
2324	unsigned long range;
2325	struct loop_device *lo;
2326	int err;
2327
 
 
 
 
2328	part_shift = 0;
2329	if (max_part > 0) {
2330		part_shift = fls(max_part);
2331
2332		/*
2333		 * Adjust max_part according to part_shift as it is exported
2334		 * to user space so that user can decide correct minor number
2335		 * if [s]he want to create more devices.
2336		 *
2337		 * Note that -1 is required because partition 0 is reserved
2338		 * for the whole disk.
2339		 */
2340		max_part = (1UL << part_shift) - 1;
2341	}
2342
2343	if ((1UL << part_shift) > DISK_MAX_PARTS) {
2344		err = -EINVAL;
2345		goto err_out;
2346	}
2347
2348	if (max_loop > 1UL << (MINORBITS - part_shift)) {
2349		err = -EINVAL;
2350		goto err_out;
2351	}
2352
2353	/*
2354	 * If max_loop is specified, create that many devices upfront.
2355	 * This also becomes a hard limit. If max_loop is not specified,
2356	 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
2357	 * init time. Loop devices can be requested on-demand with the
2358	 * /dev/loop-control interface, or be instantiated by accessing
2359	 * a 'dead' device node.
2360	 */
2361	if (max_loop) {
2362		nr = max_loop;
2363		range = max_loop << part_shift;
2364	} else {
2365		nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
2366		range = 1UL << MINORBITS;
2367	}
2368
2369	err = misc_register(&loop_misc);
2370	if (err < 0)
2371		goto err_out;
2372
2373
2374	if (register_blkdev(LOOP_MAJOR, "loop")) {
2375		err = -EIO;
2376		goto misc_out;
2377	}
2378
2379	blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
2380				  THIS_MODULE, loop_probe, NULL, NULL);
2381
2382	/* pre-create number of devices given by config or max_loop */
2383	mutex_lock(&loop_ctl_mutex);
2384	for (i = 0; i < nr; i++)
2385		loop_add(&lo, i);
2386	mutex_unlock(&loop_ctl_mutex);
2387
2388	printk(KERN_INFO "loop: module loaded\n");
2389	return 0;
2390
2391misc_out:
2392	misc_deregister(&loop_misc);
2393err_out:
2394	return err;
2395}
2396
2397static int loop_exit_cb(int id, void *ptr, void *data)
2398{
2399	struct loop_device *lo = ptr;
2400
2401	loop_remove(lo);
2402	return 0;
2403}
2404
2405static void __exit loop_exit(void)
2406{
2407	unsigned long range;
2408
2409	range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
2410
2411	mutex_lock(&loop_ctl_mutex);
2412
2413	idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
 
2414	idr_destroy(&loop_index_idr);
2415
2416	blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
2417	unregister_blkdev(LOOP_MAJOR, "loop");
2418
2419	misc_deregister(&loop_misc);
2420
2421	mutex_unlock(&loop_ctl_mutex);
2422}
2423
2424module_init(loop_init);
2425module_exit(loop_exit);
2426
2427#ifndef MODULE
2428static int __init max_loop_setup(char *str)
2429{
2430	max_loop = simple_strtol(str, NULL, 0);
2431	return 1;
2432}
2433
2434__setup("max_loop=", max_loop_setup);
2435#endif