Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename:  target_core_iblock.c
   4 *
   5 * This file contains the Storage Engine  <-> Linux BlockIO transport
   6 * specific functions.
   7 *
   8 * (c) Copyright 2003-2013 Datera, Inc.
   9 *
  10 * Nicholas A. Bellinger <nab@kernel.org>
  11 *
  12 ******************************************************************************/
  13
  14#include <linux/string.h>
  15#include <linux/parser.h>
  16#include <linux/timer.h>
  17#include <linux/fs.h>
  18#include <linux/blkdev.h>
  19#include <linux/blk-integrity.h>
  20#include <linux/slab.h>
  21#include <linux/spinlock.h>
  22#include <linux/bio.h>
 
  23#include <linux/file.h>
  24#include <linux/module.h>
  25#include <linux/scatterlist.h>
  26#include <linux/pr.h>
  27#include <scsi/scsi_proto.h>
  28#include <scsi/scsi_common.h>
  29#include <linux/unaligned.h>
  30
  31#include <target/target_core_base.h>
  32#include <target/target_core_backend.h>
  33
  34#include "target_core_iblock.h"
  35#include "target_core_pr.h"
  36
  37#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
  38#define IBLOCK_BIO_POOL_SIZE	128
  39
  40static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
  41{
  42	return container_of(dev, struct iblock_dev, dev);
  43}
  44
  45
  46static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
  47{
  48	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
  49		" Generic Target Core Stack %s\n", hba->hba_id,
  50		IBLOCK_VERSION, TARGET_CORE_VERSION);
  51	return 0;
  52}
  53
  54static void iblock_detach_hba(struct se_hba *hba)
  55{
  56}
  57
  58static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
  59{
  60	struct iblock_dev *ib_dev = NULL;
  61
  62	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
  63	if (!ib_dev) {
  64		pr_err("Unable to allocate struct iblock_dev\n");
  65		return NULL;
  66	}
  67
  68	ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
  69				   GFP_KERNEL);
  70	if (!ib_dev->ibd_plug)
  71		goto free_dev;
  72
  73	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
  74
  75	return &ib_dev->dev;
  76
  77free_dev:
  78	kfree(ib_dev);
  79	return NULL;
  80}
  81
  82static bool iblock_configure_unmap(struct se_device *dev)
  83{
  84	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
  85
  86	return target_configure_unmap_from_queue(&dev->dev_attrib,
  87						 ib_dev->ibd_bd);
  88}
  89
  90static int iblock_configure_device(struct se_device *dev)
  91{
  92	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
  93	struct request_queue *q;
  94	struct file *bdev_file;
  95	struct block_device *bd;
  96	struct blk_integrity *bi;
  97	blk_mode_t mode = BLK_OPEN_READ;
  98	unsigned int max_write_zeroes_sectors;
  99	int ret;
 100
 101	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
 102		pr_err("Missing udev_path= parameters for IBLOCK\n");
 103		return -EINVAL;
 104	}
 105
 106	ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
 107	if (ret) {
 108		pr_err("IBLOCK: Unable to create bioset\n");
 109		goto out;
 110	}
 111
 112	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
 113			ib_dev->ibd_udev_path);
 114
 
 115	if (!ib_dev->ibd_readonly)
 116		mode |= BLK_OPEN_WRITE;
 117	else
 118		dev->dev_flags |= DF_READ_ONLY;
 119
 120	bdev_file = bdev_file_open_by_path(ib_dev->ibd_udev_path, mode, ib_dev,
 121					NULL);
 122	if (IS_ERR(bdev_file)) {
 123		ret = PTR_ERR(bdev_file);
 124		goto out_free_bioset;
 125	}
 126	ib_dev->ibd_bdev_file = bdev_file;
 127	ib_dev->ibd_bd = bd = file_bdev(bdev_file);
 128
 129	q = bdev_get_queue(bd);
 130
 131	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
 132	dev->dev_attrib.hw_max_sectors = mult_frac(queue_max_hw_sectors(q),
 133			SECTOR_SIZE,
 134			dev->dev_attrib.hw_block_size);
 135	dev->dev_attrib.hw_queue_depth = q->nr_requests;
 136
 
 
 
 
 137	/*
 138	 * Enable write same emulation for IBLOCK and use 0xFFFF as
 139	 * the smaller WRITE_SAME(10) only has a two-byte block count.
 140	 */
 141	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
 142	if (max_write_zeroes_sectors)
 143		dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
 144	else
 145		dev->dev_attrib.max_write_same_len = 0xFFFF;
 146
 147	if (bdev_nonrot(bd))
 148		dev->dev_attrib.is_nonrot = 1;
 149
 150	bi = bdev_get_integrity(bd);
 151	if (!bi)
 152		return 0;
 153
 154	switch (bi->csum_type) {
 155	case BLK_INTEGRITY_CSUM_IP:
 156		pr_err("IBLOCK export of blk_integrity: %s not supported\n",
 157			blk_integrity_profile_name(bi));
 158		ret = -ENOSYS;
 159		goto out_blkdev_put;
 160	case BLK_INTEGRITY_CSUM_CRC:
 161		if (bi->flags & BLK_INTEGRITY_REF_TAG)
 162			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
 163		else
 164			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
 165		break;
 166	default:
 167		break;
 168	}
 169
 170	if (dev->dev_attrib.pi_prot_type) {
 171		struct bio_set *bs = &ib_dev->ibd_bio_set;
 172
 173		if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
 174			pr_err("Unable to allocate bioset for PI\n");
 175			ret = -ENOMEM;
 
 
 176			goto out_blkdev_put;
 177		}
 178		pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
 179			 &bs->bio_integrity_pool);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 180	}
 181
 182	dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
 183	return 0;
 184
 185out_blkdev_put:
 186	fput(ib_dev->ibd_bdev_file);
 187out_free_bioset:
 188	bioset_exit(&ib_dev->ibd_bio_set);
 189out:
 190	return ret;
 191}
 192
 193static void iblock_dev_call_rcu(struct rcu_head *p)
 194{
 195	struct se_device *dev = container_of(p, struct se_device, rcu_head);
 196	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 197
 198	kfree(ib_dev->ibd_plug);
 199	kfree(ib_dev);
 200}
 201
 202static void iblock_free_device(struct se_device *dev)
 203{
 204	call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
 205}
 206
 207static void iblock_destroy_device(struct se_device *dev)
 208{
 209	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 210
 211	if (ib_dev->ibd_bdev_file)
 212		fput(ib_dev->ibd_bdev_file);
 213	bioset_exit(&ib_dev->ibd_bio_set);
 214}
 215
 216static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
 217{
 218	struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
 219	struct iblock_dev_plug *ib_dev_plug;
 220
 221	/*
 222	 * Each se_device has a per cpu work this can be run from. We
 223	 * shouldn't have multiple threads on the same cpu calling this
 224	 * at the same time.
 225	 */
 226	ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
 227	if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
 228		return NULL;
 229
 230	blk_start_plug(&ib_dev_plug->blk_plug);
 231	return &ib_dev_plug->se_plug;
 232}
 233
 234static void iblock_unplug_device(struct se_dev_plug *se_plug)
 235{
 236	struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
 237					struct iblock_dev_plug, se_plug);
 238
 239	blk_finish_plug(&ib_dev_plug->blk_plug);
 240	clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
 241}
 242
 243static sector_t iblock_get_blocks(struct se_device *dev)
 244{
 245	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 246	u32 block_size = bdev_logical_block_size(ib_dev->ibd_bd);
 247	unsigned long long blocks_long =
 248		div_u64(bdev_nr_bytes(ib_dev->ibd_bd), block_size) - 1;
 249
 250	if (block_size == dev->dev_attrib.block_size)
 251		return blocks_long;
 252
 253	switch (block_size) {
 254	case 4096:
 255		switch (dev->dev_attrib.block_size) {
 256		case 2048:
 257			blocks_long <<= 1;
 258			break;
 259		case 1024:
 260			blocks_long <<= 2;
 261			break;
 262		case 512:
 263			blocks_long <<= 3;
 264			break;
 265		default:
 266			break;
 267		}
 268		break;
 269	case 2048:
 270		switch (dev->dev_attrib.block_size) {
 271		case 4096:
 272			blocks_long >>= 1;
 273			break;
 274		case 1024:
 275			blocks_long <<= 1;
 276			break;
 277		case 512:
 278			blocks_long <<= 2;
 279			break;
 280		default:
 281			break;
 282		}
 283		break;
 284	case 1024:
 285		switch (dev->dev_attrib.block_size) {
 286		case 4096:
 287			blocks_long >>= 2;
 288			break;
 289		case 2048:
 290			blocks_long >>= 1;
 291			break;
 292		case 512:
 293			blocks_long <<= 1;
 294			break;
 295		default:
 296			break;
 297		}
 298		break;
 299	case 512:
 300		switch (dev->dev_attrib.block_size) {
 301		case 4096:
 302			blocks_long >>= 3;
 303			break;
 304		case 2048:
 305			blocks_long >>= 2;
 306			break;
 307		case 1024:
 308			blocks_long >>= 1;
 309			break;
 310		default:
 311			break;
 312		}
 313		break;
 314	default:
 315		break;
 316	}
 317
 318	return blocks_long;
 319}
 320
 321static void iblock_complete_cmd(struct se_cmd *cmd, blk_status_t blk_status)
 322{
 323	struct iblock_req *ibr = cmd->priv;
 324	u8 status;
 325
 326	if (!refcount_dec_and_test(&ibr->pending))
 327		return;
 328
 329	if (blk_status == BLK_STS_RESV_CONFLICT)
 330		status = SAM_STAT_RESERVATION_CONFLICT;
 331	else if (atomic_read(&ibr->ib_bio_err_cnt))
 332		status = SAM_STAT_CHECK_CONDITION;
 333	else
 334		status = SAM_STAT_GOOD;
 335
 336	target_complete_cmd(cmd, status);
 337	kfree(ibr);
 338}
 339
 340static void iblock_bio_done(struct bio *bio)
 341{
 342	struct se_cmd *cmd = bio->bi_private;
 343	struct iblock_req *ibr = cmd->priv;
 344	blk_status_t blk_status = bio->bi_status;
 345
 346	if (bio->bi_status) {
 347		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
 348		/*
 349		 * Bump the ib_bio_err_cnt and release bio.
 350		 */
 351		atomic_inc(&ibr->ib_bio_err_cnt);
 352		smp_mb__after_atomic();
 353	}
 354
 355	bio_put(bio);
 356
 357	iblock_complete_cmd(cmd, blk_status);
 358}
 359
 360static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
 361				  blk_opf_t opf)
 
 362{
 363	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
 364	struct bio *bio;
 365
 366	/*
 367	 * Only allocate as many vector entries as the bio code allows us to,
 368	 * we'll loop later on until we have handled the whole request.
 369	 */
 370	bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf,
 371			       GFP_NOIO, &ib_dev->ibd_bio_set);
 
 
 372	if (!bio) {
 373		pr_err("Unable to allocate memory for bio\n");
 374		return NULL;
 375	}
 376
 
 377	bio->bi_private = cmd;
 378	bio->bi_end_io = &iblock_bio_done;
 379	bio->bi_iter.bi_sector = lba;
 
 380
 381	return bio;
 382}
 383
 384static void iblock_submit_bios(struct bio_list *list)
 385{
 386	struct blk_plug plug;
 387	struct bio *bio;
 388	/*
 389	 * The block layer handles nested plugs, so just plug/unplug to handle
 390	 * fabric drivers that didn't support batching and multi bio cmds.
 391	 */
 392	blk_start_plug(&plug);
 393	while ((bio = bio_list_pop(list)))
 394		submit_bio(bio);
 395	blk_finish_plug(&plug);
 396}
 397
 398static void iblock_end_io_flush(struct bio *bio)
 399{
 400	struct se_cmd *cmd = bio->bi_private;
 401
 402	if (bio->bi_status)
 403		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
 404
 405	if (cmd) {
 406		if (bio->bi_status)
 407			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
 408		else
 409			target_complete_cmd(cmd, SAM_STAT_GOOD);
 410	}
 411
 412	bio_put(bio);
 413}
 414
 415/*
 416 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
 417 * always flush the whole cache.
 418 */
 419static sense_reason_t
 420iblock_execute_sync_cache(struct se_cmd *cmd)
 421{
 422	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
 423	int immed = (cmd->t_task_cdb[1] & 0x2);
 424	struct bio *bio;
 425
 426	/*
 427	 * If the Immediate bit is set, queue up the GOOD response
 428	 * for this SYNCHRONIZE_CACHE op.
 429	 */
 430	if (immed)
 431		target_complete_cmd(cmd, SAM_STAT_GOOD);
 432
 433	bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH,
 434			GFP_KERNEL);
 435	bio->bi_end_io = iblock_end_io_flush;
 
 
 436	if (!immed)
 437		bio->bi_private = cmd;
 438	submit_bio(bio);
 439	return 0;
 440}
 441
 442static sense_reason_t
 443iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
 444{
 445	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
 446	struct se_device *dev = cmd->se_dev;
 447	int ret;
 448
 449	ret = blkdev_issue_discard(bdev,
 450				   target_to_linux_sector(dev, lba),
 451				   target_to_linux_sector(dev,  nolb),
 452				   GFP_KERNEL);
 453	if (ret < 0) {
 454		pr_err("blkdev_issue_discard() failed: %d\n", ret);
 455		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 456	}
 457
 458	return 0;
 459}
 460
 461static sense_reason_t
 462iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
 463{
 464	struct se_device *dev = cmd->se_dev;
 465	struct scatterlist *sg = &cmd->t_data_sg[0];
 466	unsigned char *buf, *not_zero;
 467	int ret;
 468
 469	buf = kmap(sg_page(sg)) + sg->offset;
 470	if (!buf)
 471		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 472	/*
 473	 * Fall back to block_execute_write_same() slow-path if
 474	 * incoming WRITE_SAME payload does not contain zeros.
 475	 */
 476	not_zero = memchr_inv(buf, 0x00, cmd->data_length);
 477	kunmap(sg_page(sg));
 478
 479	if (not_zero)
 480		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 481
 482	ret = blkdev_issue_zeroout(bdev,
 483				target_to_linux_sector(dev, cmd->t_task_lba),
 484				target_to_linux_sector(dev,
 485					sbc_get_write_same_sectors(cmd)),
 486				GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
 487	if (ret)
 488		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 489
 490	target_complete_cmd(cmd, SAM_STAT_GOOD);
 491	return 0;
 492}
 493
 494static sense_reason_t
 495iblock_execute_write_same(struct se_cmd *cmd)
 496{
 497	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
 498	struct iblock_req *ibr;
 499	struct scatterlist *sg;
 500	struct bio *bio;
 501	struct bio_list list;
 502	struct se_device *dev = cmd->se_dev;
 503	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
 504	sector_t sectors = target_to_linux_sector(dev,
 505					sbc_get_write_same_sectors(cmd));
 506
 507	if (cmd->prot_op) {
 508		pr_err("WRITE_SAME: Protection information with IBLOCK"
 509		       " backends not supported\n");
 510		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 511	}
 512
 513	if (!cmd->t_data_nents)
 514		return TCM_INVALID_CDB_FIELD;
 515
 516	sg = &cmd->t_data_sg[0];
 517
 518	if (cmd->t_data_nents > 1 ||
 519	    sg->length != cmd->se_dev->dev_attrib.block_size) {
 520		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
 521			" block_size: %u\n", cmd->t_data_nents, sg->length,
 522			cmd->se_dev->dev_attrib.block_size);
 523		return TCM_INVALID_CDB_FIELD;
 524	}
 525
 526	if (bdev_write_zeroes_sectors(bdev)) {
 527		if (!iblock_execute_zero_out(bdev, cmd))
 528			return 0;
 529	}
 530
 531	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
 532	if (!ibr)
 533		goto fail;
 534	cmd->priv = ibr;
 535
 536	bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
 537	if (!bio)
 538		goto fail_free_ibr;
 539
 540	bio_list_init(&list);
 541	bio_list_add(&list, bio);
 542
 543	refcount_set(&ibr->pending, 1);
 544
 545	while (sectors) {
 546		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
 547				!= sg->length) {
 548
 549			bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
 
 550			if (!bio)
 551				goto fail_put_bios;
 552
 553			refcount_inc(&ibr->pending);
 554			bio_list_add(&list, bio);
 555		}
 556
 557		/* Always in 512 byte units for Linux/Block */
 558		block_lba += sg->length >> SECTOR_SHIFT;
 559		sectors -= sg->length >> SECTOR_SHIFT;
 560	}
 561
 562	iblock_submit_bios(&list);
 563	return 0;
 564
 565fail_put_bios:
 566	while ((bio = bio_list_pop(&list)))
 567		bio_put(bio);
 568fail_free_ibr:
 569	kfree(ibr);
 570fail:
 571	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 572}
 573
 574enum {
 575	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
 576};
 577
 578static match_table_t tokens = {
 579	{Opt_udev_path, "udev_path=%s"},
 580	{Opt_readonly, "readonly=%d"},
 581	{Opt_force, "force=%d"},
 582	{Opt_err, NULL}
 583};
 584
 585static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
 586		const char *page, ssize_t count)
 587{
 588	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 589	char *orig, *ptr, *arg_p, *opts;
 590	substring_t args[MAX_OPT_ARGS];
 591	int ret = 0, token;
 592	unsigned long tmp_readonly;
 593
 594	opts = kstrdup(page, GFP_KERNEL);
 595	if (!opts)
 596		return -ENOMEM;
 597
 598	orig = opts;
 599
 600	while ((ptr = strsep(&opts, ",\n")) != NULL) {
 601		if (!*ptr)
 602			continue;
 603
 604		token = match_token(ptr, tokens, args);
 605		switch (token) {
 606		case Opt_udev_path:
 607			if (ib_dev->ibd_bd) {
 608				pr_err("Unable to set udev_path= while"
 609					" ib_dev->ibd_bd exists\n");
 610				ret = -EEXIST;
 611				goto out;
 612			}
 613			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
 614				SE_UDEV_PATH_LEN) == 0) {
 615				ret = -EINVAL;
 616				break;
 617			}
 618			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
 619					ib_dev->ibd_udev_path);
 620			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
 621			break;
 622		case Opt_readonly:
 623			arg_p = match_strdup(&args[0]);
 624			if (!arg_p) {
 625				ret = -ENOMEM;
 626				break;
 627			}
 628			ret = kstrtoul(arg_p, 0, &tmp_readonly);
 629			kfree(arg_p);
 630			if (ret < 0) {
 631				pr_err("kstrtoul() failed for"
 632						" readonly=\n");
 633				goto out;
 634			}
 635			ib_dev->ibd_readonly = tmp_readonly;
 636			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
 637			break;
 638		case Opt_force:
 639			break;
 640		default:
 641			break;
 642		}
 643	}
 644
 645out:
 646	kfree(orig);
 647	return (!ret) ? count : ret;
 648}
 649
 650static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
 651{
 652	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 653	struct block_device *bd = ib_dev->ibd_bd;
 
 654	ssize_t bl = 0;
 655
 656	if (bd)
 657		bl += sprintf(b + bl, "iBlock device: %pg", bd);
 
 658	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
 659		bl += sprintf(b + bl, "  UDEV PATH: %s",
 660				ib_dev->ibd_udev_path);
 661	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
 662
 663	bl += sprintf(b + bl, "        ");
 664	if (bd) {
 665		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
 666			MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
 667			"CLAIMED: IBLOCK");
 
 668	} else {
 669		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
 670	}
 671
 672	return bl;
 673}
 674
 675static int
 676iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
 677		 struct sg_mapping_iter *miter)
 678{
 679	struct se_device *dev = cmd->se_dev;
 680	struct blk_integrity *bi;
 681	struct bio_integrity_payload *bip;
 682	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 683	int rc;
 684	size_t resid, len;
 685
 686	bi = bdev_get_integrity(ib_dev->ibd_bd);
 687	if (!bi) {
 688		pr_err("Unable to locate bio_integrity\n");
 689		return -ENODEV;
 690	}
 691
 692	bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
 
 693	if (IS_ERR(bip)) {
 694		pr_err("Unable to allocate bio_integrity_payload\n");
 695		return PTR_ERR(bip);
 696	}
 697
 
 698	/* virtual start sector must be in integrity interval units */
 699	bip_set_seed(bip, bio->bi_iter.bi_sector >>
 700				  (bi->interval_exp - SECTOR_SHIFT));
 701
 702	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
 703		 (unsigned long long)bip->bip_iter.bi_sector);
 704
 705	resid = bio_integrity_bytes(bi, bio_sectors(bio));
 706	while (resid > 0 && sg_miter_next(miter)) {
 707
 708		len = min_t(size_t, miter->length, resid);
 709		rc = bio_integrity_add_page(bio, miter->page, len,
 710					    offset_in_page(miter->addr));
 711		if (rc != len) {
 712			pr_err("bio_integrity_add_page() failed; %d\n", rc);
 713			sg_miter_stop(miter);
 714			return -ENOMEM;
 715		}
 716
 717		pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
 718			  miter->page, len, offset_in_page(miter->addr));
 719
 720		resid -= len;
 721		if (len < miter->length)
 722			miter->consumed -= miter->length - len;
 723	}
 724	sg_miter_stop(miter);
 725
 726	return 0;
 727}
 728
 729static sense_reason_t
 730iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 731		  enum dma_data_direction data_direction)
 732{
 733	struct se_device *dev = cmd->se_dev;
 734	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
 735	struct iblock_req *ibr;
 736	struct bio *bio;
 737	struct bio_list list;
 738	struct scatterlist *sg;
 739	u32 sg_num = sgl_nents;
 740	blk_opf_t opf;
 741	unsigned bio_cnt;
 742	int i, rc;
 743	struct sg_mapping_iter prot_miter;
 744	unsigned int miter_dir;
 745
 746	if (data_direction == DMA_TO_DEVICE) {
 747		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 748
 749		/*
 750		 * Set bits to indicate WRITE_ODIRECT so we are not throttled
 751		 * by WBT.
 752		 */
 753		opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
 754		/*
 755		 * Force writethrough using REQ_FUA if a volatile write cache
 756		 * is not enabled, or if initiator set the Force Unit Access bit.
 757		 */
 758		miter_dir = SG_MITER_TO_SG;
 759		if (bdev_fua(ib_dev->ibd_bd)) {
 760			if (cmd->se_cmd_flags & SCF_FUA)
 761				opf |= REQ_FUA;
 762			else if (!bdev_write_cache(ib_dev->ibd_bd))
 763				opf |= REQ_FUA;
 764		}
 765	} else {
 766		opf = REQ_OP_READ;
 767		miter_dir = SG_MITER_FROM_SG;
 768	}
 769
 770	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
 771	if (!ibr)
 772		goto fail;
 773	cmd->priv = ibr;
 774
 775	if (!sgl_nents) {
 776		refcount_set(&ibr->pending, 1);
 777		iblock_complete_cmd(cmd, BLK_STS_OK);
 778		return 0;
 779	}
 780
 781	bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
 782	if (!bio)
 783		goto fail_free_ibr;
 784
 785	bio_list_init(&list);
 786	bio_list_add(&list, bio);
 787
 788	refcount_set(&ibr->pending, 2);
 789	bio_cnt = 1;
 790
 791	if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
 792		sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
 793			       miter_dir);
 
 794
 795	for_each_sg(sgl, sg, sgl_nents, i) {
 796		/*
 797		 * XXX: if the length the device accepts is shorter than the
 798		 *	length of the S/G list entry this will cause and
 799		 *	endless loop.  Better hope no driver uses huge pages.
 800		 */
 801		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
 802				!= sg->length) {
 803			if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
 804				rc = iblock_alloc_bip(cmd, bio, &prot_miter);
 805				if (rc)
 806					goto fail_put_bios;
 807			}
 808
 809			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
 810				iblock_submit_bios(&list);
 811				bio_cnt = 0;
 812			}
 813
 814			bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
 
 815			if (!bio)
 816				goto fail_put_bios;
 817
 818			refcount_inc(&ibr->pending);
 819			bio_list_add(&list, bio);
 820			bio_cnt++;
 821		}
 822
 823		/* Always in 512 byte units for Linux/Block */
 824		block_lba += sg->length >> SECTOR_SHIFT;
 825		sg_num--;
 826	}
 827
 828	if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
 829		rc = iblock_alloc_bip(cmd, bio, &prot_miter);
 830		if (rc)
 831			goto fail_put_bios;
 832	}
 833
 834	iblock_submit_bios(&list);
 835	iblock_complete_cmd(cmd, BLK_STS_OK);
 836	return 0;
 837
 838fail_put_bios:
 839	while ((bio = bio_list_pop(&list)))
 840		bio_put(bio);
 841fail_free_ibr:
 842	kfree(ibr);
 843fail:
 844	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 845}
 846
 847static sense_reason_t iblock_execute_pr_out(struct se_cmd *cmd, u8 sa, u64 key,
 848					    u64 sa_key, u8 type, bool aptpl)
 849{
 850	struct se_device *dev = cmd->se_dev;
 851	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 852	struct block_device *bdev = ib_dev->ibd_bd;
 853	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
 854	int ret;
 855
 856	if (!ops) {
 857		pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
 858		return TCM_UNSUPPORTED_SCSI_OPCODE;
 859	}
 860
 861	switch (sa) {
 862	case PRO_REGISTER:
 863	case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
 864		if (!ops->pr_register) {
 865			pr_err("block device does not support pr_register.\n");
 866			return TCM_UNSUPPORTED_SCSI_OPCODE;
 867		}
 868
 869		/* The block layer pr ops always enables aptpl */
 870		if (!aptpl)
 871			pr_info("APTPL not set by initiator, but will be used.\n");
 872
 873		ret = ops->pr_register(bdev, key, sa_key,
 874				sa == PRO_REGISTER ? 0 : PR_FL_IGNORE_KEY);
 875		break;
 876	case PRO_RESERVE:
 877		if (!ops->pr_reserve) {
 878			pr_err("block_device does not support pr_reserve.\n");
 879			return TCM_UNSUPPORTED_SCSI_OPCODE;
 880		}
 881
 882		ret = ops->pr_reserve(bdev, key, scsi_pr_type_to_block(type), 0);
 883		break;
 884	case PRO_CLEAR:
 885		if (!ops->pr_clear) {
 886			pr_err("block_device does not support pr_clear.\n");
 887			return TCM_UNSUPPORTED_SCSI_OPCODE;
 888		}
 889
 890		ret = ops->pr_clear(bdev, key);
 891		break;
 892	case PRO_PREEMPT:
 893	case PRO_PREEMPT_AND_ABORT:
 894		if (!ops->pr_clear) {
 895			pr_err("block_device does not support pr_preempt.\n");
 896			return TCM_UNSUPPORTED_SCSI_OPCODE;
 897		}
 898
 899		ret = ops->pr_preempt(bdev, key, sa_key,
 900				      scsi_pr_type_to_block(type),
 901				      sa == PRO_PREEMPT_AND_ABORT);
 902		break;
 903	case PRO_RELEASE:
 904		if (!ops->pr_clear) {
 905			pr_err("block_device does not support pr_pclear.\n");
 906			return TCM_UNSUPPORTED_SCSI_OPCODE;
 907		}
 908
 909		ret = ops->pr_release(bdev, key, scsi_pr_type_to_block(type));
 910		break;
 911	default:
 912		pr_err("Unknown PERSISTENT_RESERVE_OUT SA: 0x%02x\n", sa);
 913		return TCM_UNSUPPORTED_SCSI_OPCODE;
 914	}
 915
 916	if (!ret)
 917		return TCM_NO_SENSE;
 918	else if (ret == PR_STS_RESERVATION_CONFLICT)
 919		return TCM_RESERVATION_CONFLICT;
 920	else
 921		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 922}
 923
 924static void iblock_pr_report_caps(unsigned char *param_data)
 925{
 926	u16 len = 8;
 927
 928	put_unaligned_be16(len, &param_data[0]);
 929	/*
 930	 * When using the pr_ops passthrough method we only support exporting
 931	 * the device through one target port because from the backend module
 932	 * level we can't see the target port config. As a result we only
 933	 * support registration directly from the I_T nexus the cmd is sent
 934	 * through and do not set ATP_C here.
 935	 *
 936	 * The block layer pr_ops do not support passing in initiators so
 937	 * we don't set SIP_C here.
 938	 */
 939	/* PTPL_C: Persistence across Target Power Loss bit */
 940	param_data[2] |= 0x01;
 941	/*
 942	 * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
 943	 * set the TMV: Task Mask Valid bit.
 944	 */
 945	param_data[3] |= 0x80;
 946	/*
 947	 * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
 948	 */
 949	param_data[3] |= 0x10; /* ALLOW COMMANDs field 001b */
 950	/*
 951	 * PTPL_A: Persistence across Target Power Loss Active bit. The block
 952	 * layer pr ops always enables this so report it active.
 953	 */
 954	param_data[3] |= 0x01;
 955	/*
 956	 * Setup the PERSISTENT RESERVATION TYPE MASK from Table 212 spc4r37.
 957	 */
 958	param_data[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
 959	param_data[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
 960	param_data[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
 961	param_data[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
 962	param_data[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
 963	param_data[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
 964}
 965
 966static sense_reason_t iblock_pr_read_keys(struct se_cmd *cmd,
 967					  unsigned char *param_data)
 968{
 969	struct se_device *dev = cmd->se_dev;
 970	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 971	struct block_device *bdev = ib_dev->ibd_bd;
 972	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
 973	int i, len, paths, data_offset;
 974	struct pr_keys *keys;
 975	sense_reason_t ret;
 976
 977	if (!ops) {
 978		pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
 979		return TCM_UNSUPPORTED_SCSI_OPCODE;
 980	}
 981
 982	if (!ops->pr_read_keys) {
 983		pr_err("Block device does not support read_keys.\n");
 984		return TCM_UNSUPPORTED_SCSI_OPCODE;
 985	}
 986
 987	/*
 988	 * We don't know what's under us, but dm-multipath will register every
 989	 * path with the same key, so start off with enough space for 16 paths.
 990	 * which is not a lot of memory and should normally be enough.
 991	 */
 992	paths = 16;
 993retry:
 994	len = 8 * paths;
 995	keys = kzalloc(sizeof(*keys) + len, GFP_KERNEL);
 996	if (!keys)
 997		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 998
 999	keys->num_keys = paths;
1000	if (!ops->pr_read_keys(bdev, keys)) {
1001		if (keys->num_keys > paths) {
1002			kfree(keys);
1003			paths *= 2;
1004			goto retry;
1005		}
1006	} else {
1007		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1008		goto free_keys;
1009	}
1010
1011	ret = TCM_NO_SENSE;
1012
1013	put_unaligned_be32(keys->generation, &param_data[0]);
1014	if (!keys->num_keys) {
1015		put_unaligned_be32(0, &param_data[4]);
1016		goto free_keys;
1017	}
1018
1019	put_unaligned_be32(8 * keys->num_keys, &param_data[4]);
1020
1021	data_offset = 8;
1022	for (i = 0; i < keys->num_keys; i++) {
1023		if (data_offset + 8 > cmd->data_length)
1024			break;
1025
1026		put_unaligned_be64(keys->keys[i], &param_data[data_offset]);
1027		data_offset += 8;
1028	}
1029
1030free_keys:
1031	kfree(keys);
1032	return ret;
1033}
1034
1035static sense_reason_t iblock_pr_read_reservation(struct se_cmd *cmd,
1036						 unsigned char *param_data)
1037{
1038	struct se_device *dev = cmd->se_dev;
1039	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1040	struct block_device *bdev = ib_dev->ibd_bd;
1041	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
1042	struct pr_held_reservation rsv = { };
1043
1044	if (!ops) {
1045		pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
1046		return TCM_UNSUPPORTED_SCSI_OPCODE;
1047	}
1048
1049	if (!ops->pr_read_reservation) {
1050		pr_err("Block device does not support read_keys.\n");
1051		return TCM_UNSUPPORTED_SCSI_OPCODE;
1052	}
1053
1054	if (ops->pr_read_reservation(bdev, &rsv))
1055		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1056
1057	put_unaligned_be32(rsv.generation, &param_data[0]);
1058	if (!block_pr_type_to_scsi(rsv.type)) {
1059		put_unaligned_be32(0, &param_data[4]);
1060		return TCM_NO_SENSE;
1061	}
1062
1063	put_unaligned_be32(16, &param_data[4]);
1064
1065	if (cmd->data_length < 16)
1066		return TCM_NO_SENSE;
1067	put_unaligned_be64(rsv.key, &param_data[8]);
1068
1069	if (cmd->data_length < 22)
1070		return TCM_NO_SENSE;
1071	param_data[21] = block_pr_type_to_scsi(rsv.type);
1072
1073	return TCM_NO_SENSE;
1074}
1075
1076static sense_reason_t iblock_execute_pr_in(struct se_cmd *cmd, u8 sa,
1077					   unsigned char *param_data)
1078{
1079	sense_reason_t ret = TCM_NO_SENSE;
1080
1081	switch (sa) {
1082	case PRI_REPORT_CAPABILITIES:
1083		iblock_pr_report_caps(param_data);
1084		break;
1085	case PRI_READ_KEYS:
1086		ret = iblock_pr_read_keys(cmd, param_data);
1087		break;
1088	case PRI_READ_RESERVATION:
1089		ret = iblock_pr_read_reservation(cmd, param_data);
1090		break;
1091	default:
1092		pr_err("Unknown PERSISTENT_RESERVE_IN SA: 0x%02x\n", sa);
1093		return TCM_UNSUPPORTED_SCSI_OPCODE;
1094	}
1095
1096	return ret;
1097}
1098
1099static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
1100{
1101	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1102	struct block_device *bd = ib_dev->ibd_bd;
1103	int ret;
1104
1105	ret = bdev_alignment_offset(bd);
1106	if (ret == -1)
1107		return 0;
1108
1109	/* convert offset-bytes to offset-lbas */
1110	return ret / bdev_logical_block_size(bd);
1111}
1112
1113static unsigned int iblock_get_lbppbe(struct se_device *dev)
1114{
1115	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1116	struct block_device *bd = ib_dev->ibd_bd;
1117	unsigned int logs_per_phys =
1118		bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
1119
1120	return ilog2(logs_per_phys);
1121}
1122
1123static unsigned int iblock_get_io_min(struct se_device *dev)
1124{
1125	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1126	struct block_device *bd = ib_dev->ibd_bd;
1127
1128	return bdev_io_min(bd);
1129}
1130
1131static unsigned int iblock_get_io_opt(struct se_device *dev)
1132{
1133	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1134	struct block_device *bd = ib_dev->ibd_bd;
1135
1136	return bdev_io_opt(bd);
1137}
1138
1139static struct exec_cmd_ops iblock_exec_cmd_ops = {
1140	.execute_rw		= iblock_execute_rw,
1141	.execute_sync_cache	= iblock_execute_sync_cache,
1142	.execute_write_same	= iblock_execute_write_same,
1143	.execute_unmap		= iblock_execute_unmap,
1144	.execute_pr_out		= iblock_execute_pr_out,
1145	.execute_pr_in		= iblock_execute_pr_in,
1146};
1147
1148static sense_reason_t
1149iblock_parse_cdb(struct se_cmd *cmd)
1150{
1151	return sbc_parse_cdb(cmd, &iblock_exec_cmd_ops);
1152}
1153
1154static bool iblock_get_write_cache(struct se_device *dev)
1155{
1156	return bdev_write_cache(IBLOCK_DEV(dev)->ibd_bd);
 
 
 
 
1157}
1158
1159static const struct target_backend_ops iblock_ops = {
1160	.name			= "iblock",
1161	.inquiry_prod		= "IBLOCK",
1162	.transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR,
1163	.inquiry_rev		= IBLOCK_VERSION,
1164	.owner			= THIS_MODULE,
1165	.attach_hba		= iblock_attach_hba,
1166	.detach_hba		= iblock_detach_hba,
1167	.alloc_device		= iblock_alloc_device,
1168	.configure_device	= iblock_configure_device,
1169	.destroy_device		= iblock_destroy_device,
1170	.free_device		= iblock_free_device,
1171	.configure_unmap	= iblock_configure_unmap,
1172	.plug_device		= iblock_plug_device,
1173	.unplug_device		= iblock_unplug_device,
1174	.parse_cdb		= iblock_parse_cdb,
1175	.set_configfs_dev_params = iblock_set_configfs_dev_params,
1176	.show_configfs_dev_params = iblock_show_configfs_dev_params,
1177	.get_device_type	= sbc_get_device_type,
1178	.get_blocks		= iblock_get_blocks,
1179	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
1180	.get_lbppbe		= iblock_get_lbppbe,
1181	.get_io_min		= iblock_get_io_min,
1182	.get_io_opt		= iblock_get_io_opt,
1183	.get_write_cache	= iblock_get_write_cache,
1184	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
1185};
1186
1187static int __init iblock_module_init(void)
1188{
1189	return transport_backend_register(&iblock_ops);
1190}
1191
1192static void __exit iblock_module_exit(void)
1193{
1194	target_backend_unregister(&iblock_ops);
1195}
1196
1197MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
1198MODULE_AUTHOR("nab@Linux-iSCSI.org");
1199MODULE_LICENSE("GPL");
1200
1201module_init(iblock_module_init);
1202module_exit(iblock_module_exit);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*******************************************************************************
  3 * Filename:  target_core_iblock.c
  4 *
  5 * This file contains the Storage Engine  <-> Linux BlockIO transport
  6 * specific functions.
  7 *
  8 * (c) Copyright 2003-2013 Datera, Inc.
  9 *
 10 * Nicholas A. Bellinger <nab@kernel.org>
 11 *
 12 ******************************************************************************/
 13
 14#include <linux/string.h>
 15#include <linux/parser.h>
 16#include <linux/timer.h>
 17#include <linux/fs.h>
 18#include <linux/blkdev.h>
 
 19#include <linux/slab.h>
 20#include <linux/spinlock.h>
 21#include <linux/bio.h>
 22#include <linux/genhd.h>
 23#include <linux/file.h>
 24#include <linux/module.h>
 
 
 25#include <scsi/scsi_proto.h>
 26#include <asm/unaligned.h>
 
 27
 28#include <target/target_core_base.h>
 29#include <target/target_core_backend.h>
 30
 31#include "target_core_iblock.h"
 
 32
 33#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
 34#define IBLOCK_BIO_POOL_SIZE	128
 35
 36static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
 37{
 38	return container_of(dev, struct iblock_dev, dev);
 39}
 40
 41
 42static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
 43{
 44	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
 45		" Generic Target Core Stack %s\n", hba->hba_id,
 46		IBLOCK_VERSION, TARGET_CORE_VERSION);
 47	return 0;
 48}
 49
 50static void iblock_detach_hba(struct se_hba *hba)
 51{
 52}
 53
 54static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
 55{
 56	struct iblock_dev *ib_dev = NULL;
 57
 58	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
 59	if (!ib_dev) {
 60		pr_err("Unable to allocate struct iblock_dev\n");
 61		return NULL;
 62	}
 63
 
 
 
 
 
 64	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
 65
 66	return &ib_dev->dev;
 
 
 
 
 
 
 
 
 
 
 
 
 67}
 68
 69static int iblock_configure_device(struct se_device *dev)
 70{
 71	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 72	struct request_queue *q;
 73	struct block_device *bd = NULL;
 
 74	struct blk_integrity *bi;
 75	fmode_t mode;
 76	unsigned int max_write_zeroes_sectors;
 77	int ret = -ENOMEM;
 78
 79	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
 80		pr_err("Missing udev_path= parameters for IBLOCK\n");
 81		return -EINVAL;
 82	}
 83
 84	ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
 85	if (ret) {
 86		pr_err("IBLOCK: Unable to create bioset\n");
 87		goto out;
 88	}
 89
 90	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
 91			ib_dev->ibd_udev_path);
 92
 93	mode = FMODE_READ|FMODE_EXCL;
 94	if (!ib_dev->ibd_readonly)
 95		mode |= FMODE_WRITE;
 96	else
 97		dev->dev_flags |= DF_READ_ONLY;
 98
 99	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
100	if (IS_ERR(bd)) {
101		ret = PTR_ERR(bd);
 
102		goto out_free_bioset;
103	}
104	ib_dev->ibd_bd = bd;
 
105
106	q = bdev_get_queue(bd);
107
108	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
109	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
 
 
110	dev->dev_attrib.hw_queue_depth = q->nr_requests;
111
112	if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
113		pr_debug("IBLOCK: BLOCK Discard support available,"
114			 " disabled by default\n");
115
116	/*
117	 * Enable write same emulation for IBLOCK and use 0xFFFF as
118	 * the smaller WRITE_SAME(10) only has a two-byte block count.
119	 */
120	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
121	if (max_write_zeroes_sectors)
122		dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
123	else
124		dev->dev_attrib.max_write_same_len = 0xFFFF;
125
126	if (blk_queue_nonrot(q))
127		dev->dev_attrib.is_nonrot = 1;
128
129	bi = bdev_get_integrity(bd);
130	if (bi) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131		struct bio_set *bs = &ib_dev->ibd_bio_set;
132
133		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
134		    !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
135			pr_err("IBLOCK export of blk_integrity: %s not"
136			       " supported\n", bi->profile->name);
137			ret = -ENOSYS;
138			goto out_blkdev_put;
139		}
140
141		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
142			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
143		} else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
144			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
145		}
146
147		if (dev->dev_attrib.pi_prot_type) {
148			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
149				pr_err("Unable to allocate bioset for PI\n");
150				ret = -ENOMEM;
151				goto out_blkdev_put;
152			}
153			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
154				 &bs->bio_integrity_pool);
155		}
156		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
157	}
158
 
159	return 0;
160
161out_blkdev_put:
162	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
163out_free_bioset:
164	bioset_exit(&ib_dev->ibd_bio_set);
165out:
166	return ret;
167}
168
169static void iblock_dev_call_rcu(struct rcu_head *p)
170{
171	struct se_device *dev = container_of(p, struct se_device, rcu_head);
172	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
173
 
174	kfree(ib_dev);
175}
176
177static void iblock_free_device(struct se_device *dev)
178{
179	call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
180}
181
182static void iblock_destroy_device(struct se_device *dev)
183{
184	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
185
186	if (ib_dev->ibd_bd != NULL)
187		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
188	bioset_exit(&ib_dev->ibd_bio_set);
189}
190
191static unsigned long long iblock_emulate_read_cap_with_block_size(
192	struct se_device *dev,
193	struct block_device *bd,
194	struct request_queue *q)
195{
196	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
197					bdev_logical_block_size(bd)) - 1);
198	u32 block_size = bdev_logical_block_size(bd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
200	if (block_size == dev->dev_attrib.block_size)
201		return blocks_long;
202
203	switch (block_size) {
204	case 4096:
205		switch (dev->dev_attrib.block_size) {
206		case 2048:
207			blocks_long <<= 1;
208			break;
209		case 1024:
210			blocks_long <<= 2;
211			break;
212		case 512:
213			blocks_long <<= 3;
 
214		default:
215			break;
216		}
217		break;
218	case 2048:
219		switch (dev->dev_attrib.block_size) {
220		case 4096:
221			blocks_long >>= 1;
222			break;
223		case 1024:
224			blocks_long <<= 1;
225			break;
226		case 512:
227			blocks_long <<= 2;
228			break;
229		default:
230			break;
231		}
232		break;
233	case 1024:
234		switch (dev->dev_attrib.block_size) {
235		case 4096:
236			blocks_long >>= 2;
237			break;
238		case 2048:
239			blocks_long >>= 1;
240			break;
241		case 512:
242			blocks_long <<= 1;
243			break;
244		default:
245			break;
246		}
247		break;
248	case 512:
249		switch (dev->dev_attrib.block_size) {
250		case 4096:
251			blocks_long >>= 3;
252			break;
253		case 2048:
254			blocks_long >>= 2;
255			break;
256		case 1024:
257			blocks_long >>= 1;
258			break;
259		default:
260			break;
261		}
262		break;
263	default:
264		break;
265	}
266
267	return blocks_long;
268}
269
270static void iblock_complete_cmd(struct se_cmd *cmd)
271{
272	struct iblock_req *ibr = cmd->priv;
273	u8 status;
274
275	if (!refcount_dec_and_test(&ibr->pending))
276		return;
277
278	if (atomic_read(&ibr->ib_bio_err_cnt))
 
 
279		status = SAM_STAT_CHECK_CONDITION;
280	else
281		status = SAM_STAT_GOOD;
282
283	target_complete_cmd(cmd, status);
284	kfree(ibr);
285}
286
287static void iblock_bio_done(struct bio *bio)
288{
289	struct se_cmd *cmd = bio->bi_private;
290	struct iblock_req *ibr = cmd->priv;
 
291
292	if (bio->bi_status) {
293		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
294		/*
295		 * Bump the ib_bio_err_cnt and release bio.
296		 */
297		atomic_inc(&ibr->ib_bio_err_cnt);
298		smp_mb__after_atomic();
299	}
300
301	bio_put(bio);
302
303	iblock_complete_cmd(cmd);
304}
305
306static struct bio *
307iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
308	       int op_flags)
309{
310	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
311	struct bio *bio;
312
313	/*
314	 * Only allocate as many vector entries as the bio code allows us to,
315	 * we'll loop later on until we have handled the whole request.
316	 */
317	if (sg_num > BIO_MAX_PAGES)
318		sg_num = BIO_MAX_PAGES;
319
320	bio = bio_alloc_bioset(GFP_NOIO, sg_num, &ib_dev->ibd_bio_set);
321	if (!bio) {
322		pr_err("Unable to allocate memory for bio\n");
323		return NULL;
324	}
325
326	bio_set_dev(bio, ib_dev->ibd_bd);
327	bio->bi_private = cmd;
328	bio->bi_end_io = &iblock_bio_done;
329	bio->bi_iter.bi_sector = lba;
330	bio_set_op_attrs(bio, op, op_flags);
331
332	return bio;
333}
334
335static void iblock_submit_bios(struct bio_list *list)
336{
337	struct blk_plug plug;
338	struct bio *bio;
339
 
 
 
340	blk_start_plug(&plug);
341	while ((bio = bio_list_pop(list)))
342		submit_bio(bio);
343	blk_finish_plug(&plug);
344}
345
346static void iblock_end_io_flush(struct bio *bio)
347{
348	struct se_cmd *cmd = bio->bi_private;
349
350	if (bio->bi_status)
351		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
352
353	if (cmd) {
354		if (bio->bi_status)
355			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
356		else
357			target_complete_cmd(cmd, SAM_STAT_GOOD);
358	}
359
360	bio_put(bio);
361}
362
363/*
364 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
365 * always flush the whole cache.
366 */
367static sense_reason_t
368iblock_execute_sync_cache(struct se_cmd *cmd)
369{
370	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
371	int immed = (cmd->t_task_cdb[1] & 0x2);
372	struct bio *bio;
373
374	/*
375	 * If the Immediate bit is set, queue up the GOOD response
376	 * for this SYNCHRONIZE_CACHE op.
377	 */
378	if (immed)
379		target_complete_cmd(cmd, SAM_STAT_GOOD);
380
381	bio = bio_alloc(GFP_KERNEL, 0);
 
382	bio->bi_end_io = iblock_end_io_flush;
383	bio_set_dev(bio, ib_dev->ibd_bd);
384	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
385	if (!immed)
386		bio->bi_private = cmd;
387	submit_bio(bio);
388	return 0;
389}
390
391static sense_reason_t
392iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
393{
394	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
395	struct se_device *dev = cmd->se_dev;
396	int ret;
397
398	ret = blkdev_issue_discard(bdev,
399				   target_to_linux_sector(dev, lba),
400				   target_to_linux_sector(dev,  nolb),
401				   GFP_KERNEL, 0);
402	if (ret < 0) {
403		pr_err("blkdev_issue_discard() failed: %d\n", ret);
404		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
405	}
406
407	return 0;
408}
409
410static sense_reason_t
411iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
412{
413	struct se_device *dev = cmd->se_dev;
414	struct scatterlist *sg = &cmd->t_data_sg[0];
415	unsigned char *buf, *not_zero;
416	int ret;
417
418	buf = kmap(sg_page(sg)) + sg->offset;
419	if (!buf)
420		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
421	/*
422	 * Fall back to block_execute_write_same() slow-path if
423	 * incoming WRITE_SAME payload does not contain zeros.
424	 */
425	not_zero = memchr_inv(buf, 0x00, cmd->data_length);
426	kunmap(sg_page(sg));
427
428	if (not_zero)
429		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
430
431	ret = blkdev_issue_zeroout(bdev,
432				target_to_linux_sector(dev, cmd->t_task_lba),
433				target_to_linux_sector(dev,
434					sbc_get_write_same_sectors(cmd)),
435				GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
436	if (ret)
437		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
438
439	target_complete_cmd(cmd, GOOD);
440	return 0;
441}
442
443static sense_reason_t
444iblock_execute_write_same(struct se_cmd *cmd)
445{
446	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
447	struct iblock_req *ibr;
448	struct scatterlist *sg;
449	struct bio *bio;
450	struct bio_list list;
451	struct se_device *dev = cmd->se_dev;
452	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
453	sector_t sectors = target_to_linux_sector(dev,
454					sbc_get_write_same_sectors(cmd));
455
456	if (cmd->prot_op) {
457		pr_err("WRITE_SAME: Protection information with IBLOCK"
458		       " backends not supported\n");
459		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
460	}
 
 
 
 
461	sg = &cmd->t_data_sg[0];
462
463	if (cmd->t_data_nents > 1 ||
464	    sg->length != cmd->se_dev->dev_attrib.block_size) {
465		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
466			" block_size: %u\n", cmd->t_data_nents, sg->length,
467			cmd->se_dev->dev_attrib.block_size);
468		return TCM_INVALID_CDB_FIELD;
469	}
470
471	if (bdev_write_zeroes_sectors(bdev)) {
472		if (!iblock_execute_zero_out(bdev, cmd))
473			return 0;
474	}
475
476	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
477	if (!ibr)
478		goto fail;
479	cmd->priv = ibr;
480
481	bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
482	if (!bio)
483		goto fail_free_ibr;
484
485	bio_list_init(&list);
486	bio_list_add(&list, bio);
487
488	refcount_set(&ibr->pending, 1);
489
490	while (sectors) {
491		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
492				!= sg->length) {
493
494			bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
495					     0);
496			if (!bio)
497				goto fail_put_bios;
498
499			refcount_inc(&ibr->pending);
500			bio_list_add(&list, bio);
501		}
502
503		/* Always in 512 byte units for Linux/Block */
504		block_lba += sg->length >> SECTOR_SHIFT;
505		sectors -= sg->length >> SECTOR_SHIFT;
506	}
507
508	iblock_submit_bios(&list);
509	return 0;
510
511fail_put_bios:
512	while ((bio = bio_list_pop(&list)))
513		bio_put(bio);
514fail_free_ibr:
515	kfree(ibr);
516fail:
517	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
518}
519
520enum {
521	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
522};
523
524static match_table_t tokens = {
525	{Opt_udev_path, "udev_path=%s"},
526	{Opt_readonly, "readonly=%d"},
527	{Opt_force, "force=%d"},
528	{Opt_err, NULL}
529};
530
531static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
532		const char *page, ssize_t count)
533{
534	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
535	char *orig, *ptr, *arg_p, *opts;
536	substring_t args[MAX_OPT_ARGS];
537	int ret = 0, token;
538	unsigned long tmp_readonly;
539
540	opts = kstrdup(page, GFP_KERNEL);
541	if (!opts)
542		return -ENOMEM;
543
544	orig = opts;
545
546	while ((ptr = strsep(&opts, ",\n")) != NULL) {
547		if (!*ptr)
548			continue;
549
550		token = match_token(ptr, tokens, args);
551		switch (token) {
552		case Opt_udev_path:
553			if (ib_dev->ibd_bd) {
554				pr_err("Unable to set udev_path= while"
555					" ib_dev->ibd_bd exists\n");
556				ret = -EEXIST;
557				goto out;
558			}
559			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
560				SE_UDEV_PATH_LEN) == 0) {
561				ret = -EINVAL;
562				break;
563			}
564			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
565					ib_dev->ibd_udev_path);
566			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
567			break;
568		case Opt_readonly:
569			arg_p = match_strdup(&args[0]);
570			if (!arg_p) {
571				ret = -ENOMEM;
572				break;
573			}
574			ret = kstrtoul(arg_p, 0, &tmp_readonly);
575			kfree(arg_p);
576			if (ret < 0) {
577				pr_err("kstrtoul() failed for"
578						" readonly=\n");
579				goto out;
580			}
581			ib_dev->ibd_readonly = tmp_readonly;
582			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
583			break;
584		case Opt_force:
585			break;
586		default:
587			break;
588		}
589	}
590
591out:
592	kfree(orig);
593	return (!ret) ? count : ret;
594}
595
596static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
597{
598	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
599	struct block_device *bd = ib_dev->ibd_bd;
600	char buf[BDEVNAME_SIZE];
601	ssize_t bl = 0;
602
603	if (bd)
604		bl += sprintf(b + bl, "iBlock device: %s",
605				bdevname(bd, buf));
606	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
607		bl += sprintf(b + bl, "  UDEV PATH: %s",
608				ib_dev->ibd_udev_path);
609	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
610
611	bl += sprintf(b + bl, "        ");
612	if (bd) {
613		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
614			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
615			"" : (bd->bd_holder == ib_dev) ?
616			"CLAIMED: IBLOCK" : "CLAIMED: OS");
617	} else {
618		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
619	}
620
621	return bl;
622}
623
624static int
625iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
626		 struct sg_mapping_iter *miter)
627{
628	struct se_device *dev = cmd->se_dev;
629	struct blk_integrity *bi;
630	struct bio_integrity_payload *bip;
631	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
632	int rc;
633	size_t resid, len;
634
635	bi = bdev_get_integrity(ib_dev->ibd_bd);
636	if (!bi) {
637		pr_err("Unable to locate bio_integrity\n");
638		return -ENODEV;
639	}
640
641	bip = bio_integrity_alloc(bio, GFP_NOIO,
642			min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES));
643	if (IS_ERR(bip)) {
644		pr_err("Unable to allocate bio_integrity_payload\n");
645		return PTR_ERR(bip);
646	}
647
648	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
649	/* virtual start sector must be in integrity interval units */
650	bip_set_seed(bip, bio->bi_iter.bi_sector >>
651				  (bi->interval_exp - SECTOR_SHIFT));
652
653	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
654		 (unsigned long long)bip->bip_iter.bi_sector);
655
656	resid = bip->bip_iter.bi_size;
657	while (resid > 0 && sg_miter_next(miter)) {
658
659		len = min_t(size_t, miter->length, resid);
660		rc = bio_integrity_add_page(bio, miter->page, len,
661					    offset_in_page(miter->addr));
662		if (rc != len) {
663			pr_err("bio_integrity_add_page() failed; %d\n", rc);
664			sg_miter_stop(miter);
665			return -ENOMEM;
666		}
667
668		pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
669			  miter->page, len, offset_in_page(miter->addr));
670
671		resid -= len;
672		if (len < miter->length)
673			miter->consumed -= miter->length - len;
674	}
675	sg_miter_stop(miter);
676
677	return 0;
678}
679
680static sense_reason_t
681iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
682		  enum dma_data_direction data_direction)
683{
684	struct se_device *dev = cmd->se_dev;
685	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
686	struct iblock_req *ibr;
687	struct bio *bio;
688	struct bio_list list;
689	struct scatterlist *sg;
690	u32 sg_num = sgl_nents;
 
691	unsigned bio_cnt;
692	int i, rc, op, op_flags = 0;
693	struct sg_mapping_iter prot_miter;
 
694
695	if (data_direction == DMA_TO_DEVICE) {
696		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
697		struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
 
 
 
 
 
698		/*
699		 * Force writethrough using REQ_FUA if a volatile write cache
700		 * is not enabled, or if initiator set the Force Unit Access bit.
701		 */
702		op = REQ_OP_WRITE;
703		if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
704			if (cmd->se_cmd_flags & SCF_FUA)
705				op_flags = REQ_FUA;
706			else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
707				op_flags = REQ_FUA;
708		}
709	} else {
710		op = REQ_OP_READ;
 
711	}
712
713	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
714	if (!ibr)
715		goto fail;
716	cmd->priv = ibr;
717
718	if (!sgl_nents) {
719		refcount_set(&ibr->pending, 1);
720		iblock_complete_cmd(cmd);
721		return 0;
722	}
723
724	bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
725	if (!bio)
726		goto fail_free_ibr;
727
728	bio_list_init(&list);
729	bio_list_add(&list, bio);
730
731	refcount_set(&ibr->pending, 2);
732	bio_cnt = 1;
733
734	if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
735		sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
736			       op == REQ_OP_READ ? SG_MITER_FROM_SG :
737						   SG_MITER_TO_SG);
738
739	for_each_sg(sgl, sg, sgl_nents, i) {
740		/*
741		 * XXX: if the length the device accepts is shorter than the
742		 *	length of the S/G list entry this will cause and
743		 *	endless loop.  Better hope no driver uses huge pages.
744		 */
745		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
746				!= sg->length) {
747			if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
748				rc = iblock_alloc_bip(cmd, bio, &prot_miter);
749				if (rc)
750					goto fail_put_bios;
751			}
752
753			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
754				iblock_submit_bios(&list);
755				bio_cnt = 0;
756			}
757
758			bio = iblock_get_bio(cmd, block_lba, sg_num, op,
759					     op_flags);
760			if (!bio)
761				goto fail_put_bios;
762
763			refcount_inc(&ibr->pending);
764			bio_list_add(&list, bio);
765			bio_cnt++;
766		}
767
768		/* Always in 512 byte units for Linux/Block */
769		block_lba += sg->length >> SECTOR_SHIFT;
770		sg_num--;
771	}
772
773	if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
774		rc = iblock_alloc_bip(cmd, bio, &prot_miter);
775		if (rc)
776			goto fail_put_bios;
777	}
778
779	iblock_submit_bios(&list);
780	iblock_complete_cmd(cmd);
781	return 0;
782
783fail_put_bios:
784	while ((bio = bio_list_pop(&list)))
785		bio_put(bio);
786fail_free_ibr:
787	kfree(ibr);
788fail:
789	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
790}
791
792static sector_t iblock_get_blocks(struct se_device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
793{
 
794	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
795	struct block_device *bd = ib_dev->ibd_bd;
796	struct request_queue *q = bdev_get_queue(bd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
797
798	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
799}
800
801static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
802{
803	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
804	struct block_device *bd = ib_dev->ibd_bd;
805	int ret;
806
807	ret = bdev_alignment_offset(bd);
808	if (ret == -1)
809		return 0;
810
811	/* convert offset-bytes to offset-lbas */
812	return ret / bdev_logical_block_size(bd);
813}
814
815static unsigned int iblock_get_lbppbe(struct se_device *dev)
816{
817	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
818	struct block_device *bd = ib_dev->ibd_bd;
819	int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
 
820
821	return ilog2(logs_per_phys);
822}
823
824static unsigned int iblock_get_io_min(struct se_device *dev)
825{
826	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
827	struct block_device *bd = ib_dev->ibd_bd;
828
829	return bdev_io_min(bd);
830}
831
832static unsigned int iblock_get_io_opt(struct se_device *dev)
833{
834	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
835	struct block_device *bd = ib_dev->ibd_bd;
836
837	return bdev_io_opt(bd);
838}
839
840static struct sbc_ops iblock_sbc_ops = {
841	.execute_rw		= iblock_execute_rw,
842	.execute_sync_cache	= iblock_execute_sync_cache,
843	.execute_write_same	= iblock_execute_write_same,
844	.execute_unmap		= iblock_execute_unmap,
 
 
845};
846
847static sense_reason_t
848iblock_parse_cdb(struct se_cmd *cmd)
849{
850	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
851}
852
853static bool iblock_get_write_cache(struct se_device *dev)
854{
855	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
856	struct block_device *bd = ib_dev->ibd_bd;
857	struct request_queue *q = bdev_get_queue(bd);
858
859	return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
860}
861
862static const struct target_backend_ops iblock_ops = {
863	.name			= "iblock",
864	.inquiry_prod		= "IBLOCK",
 
865	.inquiry_rev		= IBLOCK_VERSION,
866	.owner			= THIS_MODULE,
867	.attach_hba		= iblock_attach_hba,
868	.detach_hba		= iblock_detach_hba,
869	.alloc_device		= iblock_alloc_device,
870	.configure_device	= iblock_configure_device,
871	.destroy_device		= iblock_destroy_device,
872	.free_device		= iblock_free_device,
 
 
 
873	.parse_cdb		= iblock_parse_cdb,
874	.set_configfs_dev_params = iblock_set_configfs_dev_params,
875	.show_configfs_dev_params = iblock_show_configfs_dev_params,
876	.get_device_type	= sbc_get_device_type,
877	.get_blocks		= iblock_get_blocks,
878	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
879	.get_lbppbe		= iblock_get_lbppbe,
880	.get_io_min		= iblock_get_io_min,
881	.get_io_opt		= iblock_get_io_opt,
882	.get_write_cache	= iblock_get_write_cache,
883	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
884};
885
886static int __init iblock_module_init(void)
887{
888	return transport_backend_register(&iblock_ops);
889}
890
891static void __exit iblock_module_exit(void)
892{
893	target_backend_unregister(&iblock_ops);
894}
895
896MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
897MODULE_AUTHOR("nab@Linux-iSCSI.org");
898MODULE_LICENSE("GPL");
899
900module_init(iblock_module_init);
901module_exit(iblock_module_exit);