Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*******************************************************************************
  3 * Filename:  target_core_iblock.c
  4 *
  5 * This file contains the Storage Engine  <-> Linux BlockIO transport
  6 * specific functions.
  7 *
  8 * (c) Copyright 2003-2013 Datera, Inc.
  9 *
 10 * Nicholas A. Bellinger <nab@kernel.org>
 11 *
 12 ******************************************************************************/
 13
 14#include <linux/string.h>
 15#include <linux/parser.h>
 16#include <linux/timer.h>
 17#include <linux/fs.h>
 18#include <linux/blkdev.h>
 
 19#include <linux/slab.h>
 20#include <linux/spinlock.h>
 21#include <linux/bio.h>
 22#include <linux/genhd.h>
 23#include <linux/file.h>
 24#include <linux/module.h>
 
 
 25#include <scsi/scsi_proto.h>
 
 26#include <asm/unaligned.h>
 27
 28#include <target/target_core_base.h>
 29#include <target/target_core_backend.h>
 30
 31#include "target_core_iblock.h"
 
 32
 33#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
 34#define IBLOCK_BIO_POOL_SIZE	128
 35
 36static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
 37{
 38	return container_of(dev, struct iblock_dev, dev);
 39}
 40
 41
 42static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
 43{
 44	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
 45		" Generic Target Core Stack %s\n", hba->hba_id,
 46		IBLOCK_VERSION, TARGET_CORE_VERSION);
 47	return 0;
 48}
 49
 50static void iblock_detach_hba(struct se_hba *hba)
 51{
 52}
 53
 54static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
 55{
 56	struct iblock_dev *ib_dev = NULL;
 57
 58	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
 59	if (!ib_dev) {
 60		pr_err("Unable to allocate struct iblock_dev\n");
 61		return NULL;
 62	}
 63
 
 
 
 
 
 64	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
 65
 66	return &ib_dev->dev;
 
 
 
 
 
 
 
 
 
 
 
 
 67}
 68
 69static int iblock_configure_device(struct se_device *dev)
 70{
 71	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 72	struct request_queue *q;
 73	struct block_device *bd = NULL;
 
 74	struct blk_integrity *bi;
 75	fmode_t mode;
 76	unsigned int max_write_zeroes_sectors;
 77	int ret = -ENOMEM;
 78
 79	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
 80		pr_err("Missing udev_path= parameters for IBLOCK\n");
 81		return -EINVAL;
 82	}
 83
 84	ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
 85	if (ret) {
 86		pr_err("IBLOCK: Unable to create bioset\n");
 87		goto out;
 88	}
 89
 90	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
 91			ib_dev->ibd_udev_path);
 92
 93	mode = FMODE_READ|FMODE_EXCL;
 94	if (!ib_dev->ibd_readonly)
 95		mode |= FMODE_WRITE;
 96	else
 97		dev->dev_flags |= DF_READ_ONLY;
 98
 99	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
100	if (IS_ERR(bd)) {
101		ret = PTR_ERR(bd);
 
102		goto out_free_bioset;
103	}
104	ib_dev->ibd_bd = bd;
 
105
106	q = bdev_get_queue(bd);
107
108	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
109	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
 
 
110	dev->dev_attrib.hw_queue_depth = q->nr_requests;
111
112	if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
113		pr_debug("IBLOCK: BLOCK Discard support available,"
114			 " disabled by default\n");
115
116	/*
117	 * Enable write same emulation for IBLOCK and use 0xFFFF as
118	 * the smaller WRITE_SAME(10) only has a two-byte block count.
119	 */
120	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
121	if (max_write_zeroes_sectors)
122		dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
123	else
124		dev->dev_attrib.max_write_same_len = 0xFFFF;
125
126	if (blk_queue_nonrot(q))
127		dev->dev_attrib.is_nonrot = 1;
128
129	bi = bdev_get_integrity(bd);
130	if (bi) {
131		struct bio_set *bs = &ib_dev->ibd_bio_set;
132
133		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
134		    !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
135			pr_err("IBLOCK export of blk_integrity: %s not"
136			       " supported\n", bi->profile->name);
137			ret = -ENOSYS;
138			goto out_blkdev_put;
139		}
140
141		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
142			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
143		} else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
144			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
145		}
146
147		if (dev->dev_attrib.pi_prot_type) {
148			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
149				pr_err("Unable to allocate bioset for PI\n");
150				ret = -ENOMEM;
151				goto out_blkdev_put;
152			}
153			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
154				 &bs->bio_integrity_pool);
155		}
156		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
157	}
158
159	return 0;
160
161out_blkdev_put:
162	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
163out_free_bioset:
164	bioset_exit(&ib_dev->ibd_bio_set);
165out:
166	return ret;
167}
168
169static void iblock_dev_call_rcu(struct rcu_head *p)
170{
171	struct se_device *dev = container_of(p, struct se_device, rcu_head);
172	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
173
 
174	kfree(ib_dev);
175}
176
177static void iblock_free_device(struct se_device *dev)
178{
179	call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
180}
181
182static void iblock_destroy_device(struct se_device *dev)
183{
184	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
185
186	if (ib_dev->ibd_bd != NULL)
187		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
188	bioset_exit(&ib_dev->ibd_bio_set);
189}
190
191static unsigned long long iblock_emulate_read_cap_with_block_size(
192	struct se_device *dev,
193	struct block_device *bd,
194	struct request_queue *q)
195{
196	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
197					bdev_logical_block_size(bd)) - 1);
198	u32 block_size = bdev_logical_block_size(bd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
200	if (block_size == dev->dev_attrib.block_size)
201		return blocks_long;
202
203	switch (block_size) {
204	case 4096:
205		switch (dev->dev_attrib.block_size) {
206		case 2048:
207			blocks_long <<= 1;
208			break;
209		case 1024:
210			blocks_long <<= 2;
211			break;
212		case 512:
213			blocks_long <<= 3;
 
214		default:
215			break;
216		}
217		break;
218	case 2048:
219		switch (dev->dev_attrib.block_size) {
220		case 4096:
221			blocks_long >>= 1;
222			break;
223		case 1024:
224			blocks_long <<= 1;
225			break;
226		case 512:
227			blocks_long <<= 2;
228			break;
229		default:
230			break;
231		}
232		break;
233	case 1024:
234		switch (dev->dev_attrib.block_size) {
235		case 4096:
236			blocks_long >>= 2;
237			break;
238		case 2048:
239			blocks_long >>= 1;
240			break;
241		case 512:
242			blocks_long <<= 1;
243			break;
244		default:
245			break;
246		}
247		break;
248	case 512:
249		switch (dev->dev_attrib.block_size) {
250		case 4096:
251			blocks_long >>= 3;
252			break;
253		case 2048:
254			blocks_long >>= 2;
255			break;
256		case 1024:
257			blocks_long >>= 1;
258			break;
259		default:
260			break;
261		}
262		break;
263	default:
264		break;
265	}
266
267	return blocks_long;
268}
269
270static void iblock_complete_cmd(struct se_cmd *cmd)
271{
272	struct iblock_req *ibr = cmd->priv;
273	u8 status;
274
275	if (!refcount_dec_and_test(&ibr->pending))
276		return;
277
278	if (atomic_read(&ibr->ib_bio_err_cnt))
 
 
279		status = SAM_STAT_CHECK_CONDITION;
280	else
281		status = SAM_STAT_GOOD;
282
283	target_complete_cmd(cmd, status);
284	kfree(ibr);
285}
286
287static void iblock_bio_done(struct bio *bio)
288{
289	struct se_cmd *cmd = bio->bi_private;
290	struct iblock_req *ibr = cmd->priv;
 
291
292	if (bio->bi_status) {
293		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
294		/*
295		 * Bump the ib_bio_err_cnt and release bio.
296		 */
297		atomic_inc(&ibr->ib_bio_err_cnt);
298		smp_mb__after_atomic();
299	}
300
301	bio_put(bio);
302
303	iblock_complete_cmd(cmd);
304}
305
306static struct bio *
307iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
308	       int op_flags)
309{
310	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
311	struct bio *bio;
312
313	/*
314	 * Only allocate as many vector entries as the bio code allows us to,
315	 * we'll loop later on until we have handled the whole request.
316	 */
317	if (sg_num > BIO_MAX_PAGES)
318		sg_num = BIO_MAX_PAGES;
319
320	bio = bio_alloc_bioset(GFP_NOIO, sg_num, &ib_dev->ibd_bio_set);
321	if (!bio) {
322		pr_err("Unable to allocate memory for bio\n");
323		return NULL;
324	}
325
326	bio_set_dev(bio, ib_dev->ibd_bd);
327	bio->bi_private = cmd;
328	bio->bi_end_io = &iblock_bio_done;
329	bio->bi_iter.bi_sector = lba;
330	bio_set_op_attrs(bio, op, op_flags);
331
332	return bio;
333}
334
335static void iblock_submit_bios(struct bio_list *list)
336{
337	struct blk_plug plug;
338	struct bio *bio;
339
 
 
 
340	blk_start_plug(&plug);
341	while ((bio = bio_list_pop(list)))
342		submit_bio(bio);
343	blk_finish_plug(&plug);
344}
345
346static void iblock_end_io_flush(struct bio *bio)
347{
348	struct se_cmd *cmd = bio->bi_private;
349
350	if (bio->bi_status)
351		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
352
353	if (cmd) {
354		if (bio->bi_status)
355			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
356		else
357			target_complete_cmd(cmd, SAM_STAT_GOOD);
358	}
359
360	bio_put(bio);
361}
362
363/*
364 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
365 * always flush the whole cache.
366 */
367static sense_reason_t
368iblock_execute_sync_cache(struct se_cmd *cmd)
369{
370	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
371	int immed = (cmd->t_task_cdb[1] & 0x2);
372	struct bio *bio;
373
374	/*
375	 * If the Immediate bit is set, queue up the GOOD response
376	 * for this SYNCHRONIZE_CACHE op.
377	 */
378	if (immed)
379		target_complete_cmd(cmd, SAM_STAT_GOOD);
380
381	bio = bio_alloc(GFP_KERNEL, 0);
 
382	bio->bi_end_io = iblock_end_io_flush;
383	bio_set_dev(bio, ib_dev->ibd_bd);
384	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
385	if (!immed)
386		bio->bi_private = cmd;
387	submit_bio(bio);
388	return 0;
389}
390
391static sense_reason_t
392iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
393{
394	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
395	struct se_device *dev = cmd->se_dev;
396	int ret;
397
398	ret = blkdev_issue_discard(bdev,
399				   target_to_linux_sector(dev, lba),
400				   target_to_linux_sector(dev,  nolb),
401				   GFP_KERNEL, 0);
402	if (ret < 0) {
403		pr_err("blkdev_issue_discard() failed: %d\n", ret);
404		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
405	}
406
407	return 0;
408}
409
410static sense_reason_t
411iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
412{
413	struct se_device *dev = cmd->se_dev;
414	struct scatterlist *sg = &cmd->t_data_sg[0];
415	unsigned char *buf, *not_zero;
416	int ret;
417
418	buf = kmap(sg_page(sg)) + sg->offset;
419	if (!buf)
420		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
421	/*
422	 * Fall back to block_execute_write_same() slow-path if
423	 * incoming WRITE_SAME payload does not contain zeros.
424	 */
425	not_zero = memchr_inv(buf, 0x00, cmd->data_length);
426	kunmap(sg_page(sg));
427
428	if (not_zero)
429		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
430
431	ret = blkdev_issue_zeroout(bdev,
432				target_to_linux_sector(dev, cmd->t_task_lba),
433				target_to_linux_sector(dev,
434					sbc_get_write_same_sectors(cmd)),
435				GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
436	if (ret)
437		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
438
439	target_complete_cmd(cmd, GOOD);
440	return 0;
441}
442
443static sense_reason_t
444iblock_execute_write_same(struct se_cmd *cmd)
445{
446	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
447	struct iblock_req *ibr;
448	struct scatterlist *sg;
449	struct bio *bio;
450	struct bio_list list;
451	struct se_device *dev = cmd->se_dev;
452	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
453	sector_t sectors = target_to_linux_sector(dev,
454					sbc_get_write_same_sectors(cmd));
455
456	if (cmd->prot_op) {
457		pr_err("WRITE_SAME: Protection information with IBLOCK"
458		       " backends not supported\n");
459		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
460	}
 
 
 
 
461	sg = &cmd->t_data_sg[0];
462
463	if (cmd->t_data_nents > 1 ||
464	    sg->length != cmd->se_dev->dev_attrib.block_size) {
465		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
466			" block_size: %u\n", cmd->t_data_nents, sg->length,
467			cmd->se_dev->dev_attrib.block_size);
468		return TCM_INVALID_CDB_FIELD;
469	}
470
471	if (bdev_write_zeroes_sectors(bdev)) {
472		if (!iblock_execute_zero_out(bdev, cmd))
473			return 0;
474	}
475
476	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
477	if (!ibr)
478		goto fail;
479	cmd->priv = ibr;
480
481	bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
482	if (!bio)
483		goto fail_free_ibr;
484
485	bio_list_init(&list);
486	bio_list_add(&list, bio);
487
488	refcount_set(&ibr->pending, 1);
489
490	while (sectors) {
491		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
492				!= sg->length) {
493
494			bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
495					     0);
496			if (!bio)
497				goto fail_put_bios;
498
499			refcount_inc(&ibr->pending);
500			bio_list_add(&list, bio);
501		}
502
503		/* Always in 512 byte units for Linux/Block */
504		block_lba += sg->length >> SECTOR_SHIFT;
505		sectors -= sg->length >> SECTOR_SHIFT;
506	}
507
508	iblock_submit_bios(&list);
509	return 0;
510
511fail_put_bios:
512	while ((bio = bio_list_pop(&list)))
513		bio_put(bio);
514fail_free_ibr:
515	kfree(ibr);
516fail:
517	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
518}
519
520enum {
521	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
522};
523
524static match_table_t tokens = {
525	{Opt_udev_path, "udev_path=%s"},
526	{Opt_readonly, "readonly=%d"},
527	{Opt_force, "force=%d"},
528	{Opt_err, NULL}
529};
530
531static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
532		const char *page, ssize_t count)
533{
534	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
535	char *orig, *ptr, *arg_p, *opts;
536	substring_t args[MAX_OPT_ARGS];
537	int ret = 0, token;
538	unsigned long tmp_readonly;
539
540	opts = kstrdup(page, GFP_KERNEL);
541	if (!opts)
542		return -ENOMEM;
543
544	orig = opts;
545
546	while ((ptr = strsep(&opts, ",\n")) != NULL) {
547		if (!*ptr)
548			continue;
549
550		token = match_token(ptr, tokens, args);
551		switch (token) {
552		case Opt_udev_path:
553			if (ib_dev->ibd_bd) {
554				pr_err("Unable to set udev_path= while"
555					" ib_dev->ibd_bd exists\n");
556				ret = -EEXIST;
557				goto out;
558			}
559			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
560				SE_UDEV_PATH_LEN) == 0) {
561				ret = -EINVAL;
562				break;
563			}
564			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
565					ib_dev->ibd_udev_path);
566			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
567			break;
568		case Opt_readonly:
569			arg_p = match_strdup(&args[0]);
570			if (!arg_p) {
571				ret = -ENOMEM;
572				break;
573			}
574			ret = kstrtoul(arg_p, 0, &tmp_readonly);
575			kfree(arg_p);
576			if (ret < 0) {
577				pr_err("kstrtoul() failed for"
578						" readonly=\n");
579				goto out;
580			}
581			ib_dev->ibd_readonly = tmp_readonly;
582			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
583			break;
584		case Opt_force:
585			break;
586		default:
587			break;
588		}
589	}
590
591out:
592	kfree(orig);
593	return (!ret) ? count : ret;
594}
595
596static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
597{
598	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
599	struct block_device *bd = ib_dev->ibd_bd;
600	char buf[BDEVNAME_SIZE];
601	ssize_t bl = 0;
602
603	if (bd)
604		bl += sprintf(b + bl, "iBlock device: %s",
605				bdevname(bd, buf));
606	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
607		bl += sprintf(b + bl, "  UDEV PATH: %s",
608				ib_dev->ibd_udev_path);
609	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
610
611	bl += sprintf(b + bl, "        ");
612	if (bd) {
613		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
614			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
615			"" : (bd->bd_holder == ib_dev) ?
616			"CLAIMED: IBLOCK" : "CLAIMED: OS");
617	} else {
618		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
619	}
620
621	return bl;
622}
623
624static int
625iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
626		 struct sg_mapping_iter *miter)
627{
628	struct se_device *dev = cmd->se_dev;
629	struct blk_integrity *bi;
630	struct bio_integrity_payload *bip;
631	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
632	int rc;
633	size_t resid, len;
634
635	bi = bdev_get_integrity(ib_dev->ibd_bd);
636	if (!bi) {
637		pr_err("Unable to locate bio_integrity\n");
638		return -ENODEV;
639	}
640
641	bip = bio_integrity_alloc(bio, GFP_NOIO,
642			min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES));
643	if (IS_ERR(bip)) {
644		pr_err("Unable to allocate bio_integrity_payload\n");
645		return PTR_ERR(bip);
646	}
647
648	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
649	/* virtual start sector must be in integrity interval units */
650	bip_set_seed(bip, bio->bi_iter.bi_sector >>
651				  (bi->interval_exp - SECTOR_SHIFT));
652
653	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
654		 (unsigned long long)bip->bip_iter.bi_sector);
655
656	resid = bip->bip_iter.bi_size;
657	while (resid > 0 && sg_miter_next(miter)) {
658
659		len = min_t(size_t, miter->length, resid);
660		rc = bio_integrity_add_page(bio, miter->page, len,
661					    offset_in_page(miter->addr));
662		if (rc != len) {
663			pr_err("bio_integrity_add_page() failed; %d\n", rc);
664			sg_miter_stop(miter);
665			return -ENOMEM;
666		}
667
668		pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
669			  miter->page, len, offset_in_page(miter->addr));
670
671		resid -= len;
672		if (len < miter->length)
673			miter->consumed -= miter->length - len;
674	}
675	sg_miter_stop(miter);
676
677	return 0;
678}
679
680static sense_reason_t
681iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
682		  enum dma_data_direction data_direction)
683{
684	struct se_device *dev = cmd->se_dev;
685	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
686	struct iblock_req *ibr;
687	struct bio *bio;
688	struct bio_list list;
689	struct scatterlist *sg;
690	u32 sg_num = sgl_nents;
 
691	unsigned bio_cnt;
692	int i, rc, op, op_flags = 0;
693	struct sg_mapping_iter prot_miter;
 
694
695	if (data_direction == DMA_TO_DEVICE) {
696		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
697		struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
 
 
 
 
 
698		/*
699		 * Force writethrough using REQ_FUA if a volatile write cache
700		 * is not enabled, or if initiator set the Force Unit Access bit.
701		 */
702		op = REQ_OP_WRITE;
703		if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
704			if (cmd->se_cmd_flags & SCF_FUA)
705				op_flags = REQ_FUA;
706			else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
707				op_flags = REQ_FUA;
708		}
709	} else {
710		op = REQ_OP_READ;
 
711	}
712
713	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
714	if (!ibr)
715		goto fail;
716	cmd->priv = ibr;
717
718	if (!sgl_nents) {
719		refcount_set(&ibr->pending, 1);
720		iblock_complete_cmd(cmd);
721		return 0;
722	}
723
724	bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
725	if (!bio)
726		goto fail_free_ibr;
727
728	bio_list_init(&list);
729	bio_list_add(&list, bio);
730
731	refcount_set(&ibr->pending, 2);
732	bio_cnt = 1;
733
734	if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
735		sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
736			       op == REQ_OP_READ ? SG_MITER_FROM_SG :
737						   SG_MITER_TO_SG);
738
739	for_each_sg(sgl, sg, sgl_nents, i) {
740		/*
741		 * XXX: if the length the device accepts is shorter than the
742		 *	length of the S/G list entry this will cause and
743		 *	endless loop.  Better hope no driver uses huge pages.
744		 */
745		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
746				!= sg->length) {
747			if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
748				rc = iblock_alloc_bip(cmd, bio, &prot_miter);
749				if (rc)
750					goto fail_put_bios;
751			}
752
753			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
754				iblock_submit_bios(&list);
755				bio_cnt = 0;
756			}
757
758			bio = iblock_get_bio(cmd, block_lba, sg_num, op,
759					     op_flags);
760			if (!bio)
761				goto fail_put_bios;
762
763			refcount_inc(&ibr->pending);
764			bio_list_add(&list, bio);
765			bio_cnt++;
766		}
767
768		/* Always in 512 byte units for Linux/Block */
769		block_lba += sg->length >> SECTOR_SHIFT;
770		sg_num--;
771	}
772
773	if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
774		rc = iblock_alloc_bip(cmd, bio, &prot_miter);
775		if (rc)
776			goto fail_put_bios;
777	}
778
779	iblock_submit_bios(&list);
780	iblock_complete_cmd(cmd);
781	return 0;
782
783fail_put_bios:
784	while ((bio = bio_list_pop(&list)))
785		bio_put(bio);
786fail_free_ibr:
787	kfree(ibr);
788fail:
789	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
790}
791
792static sector_t iblock_get_blocks(struct se_device *dev)
 
793{
 
794	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
795	struct block_device *bd = ib_dev->ibd_bd;
796	struct request_queue *q = bdev_get_queue(bd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
797
798	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
799}
800
801static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
802{
803	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
804	struct block_device *bd = ib_dev->ibd_bd;
805	int ret;
806
807	ret = bdev_alignment_offset(bd);
808	if (ret == -1)
809		return 0;
810
811	/* convert offset-bytes to offset-lbas */
812	return ret / bdev_logical_block_size(bd);
813}
814
815static unsigned int iblock_get_lbppbe(struct se_device *dev)
816{
817	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
818	struct block_device *bd = ib_dev->ibd_bd;
819	int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
 
820
821	return ilog2(logs_per_phys);
822}
823
824static unsigned int iblock_get_io_min(struct se_device *dev)
825{
826	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
827	struct block_device *bd = ib_dev->ibd_bd;
828
829	return bdev_io_min(bd);
830}
831
832static unsigned int iblock_get_io_opt(struct se_device *dev)
833{
834	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
835	struct block_device *bd = ib_dev->ibd_bd;
836
837	return bdev_io_opt(bd);
838}
839
840static struct sbc_ops iblock_sbc_ops = {
841	.execute_rw		= iblock_execute_rw,
842	.execute_sync_cache	= iblock_execute_sync_cache,
843	.execute_write_same	= iblock_execute_write_same,
844	.execute_unmap		= iblock_execute_unmap,
 
 
845};
846
847static sense_reason_t
848iblock_parse_cdb(struct se_cmd *cmd)
849{
850	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
851}
852
853static bool iblock_get_write_cache(struct se_device *dev)
854{
855	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
856	struct block_device *bd = ib_dev->ibd_bd;
857	struct request_queue *q = bdev_get_queue(bd);
858
859	return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
860}
861
862static const struct target_backend_ops iblock_ops = {
863	.name			= "iblock",
864	.inquiry_prod		= "IBLOCK",
 
865	.inquiry_rev		= IBLOCK_VERSION,
866	.owner			= THIS_MODULE,
867	.attach_hba		= iblock_attach_hba,
868	.detach_hba		= iblock_detach_hba,
869	.alloc_device		= iblock_alloc_device,
870	.configure_device	= iblock_configure_device,
871	.destroy_device		= iblock_destroy_device,
872	.free_device		= iblock_free_device,
 
 
 
873	.parse_cdb		= iblock_parse_cdb,
874	.set_configfs_dev_params = iblock_set_configfs_dev_params,
875	.show_configfs_dev_params = iblock_show_configfs_dev_params,
876	.get_device_type	= sbc_get_device_type,
877	.get_blocks		= iblock_get_blocks,
878	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
879	.get_lbppbe		= iblock_get_lbppbe,
880	.get_io_min		= iblock_get_io_min,
881	.get_io_opt		= iblock_get_io_opt,
882	.get_write_cache	= iblock_get_write_cache,
883	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
884};
885
886static int __init iblock_module_init(void)
887{
888	return transport_backend_register(&iblock_ops);
889}
890
891static void __exit iblock_module_exit(void)
892{
893	target_backend_unregister(&iblock_ops);
894}
895
896MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
897MODULE_AUTHOR("nab@Linux-iSCSI.org");
898MODULE_LICENSE("GPL");
899
900module_init(iblock_module_init);
901module_exit(iblock_module_exit);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename:  target_core_iblock.c
   4 *
   5 * This file contains the Storage Engine  <-> Linux BlockIO transport
   6 * specific functions.
   7 *
   8 * (c) Copyright 2003-2013 Datera, Inc.
   9 *
  10 * Nicholas A. Bellinger <nab@kernel.org>
  11 *
  12 ******************************************************************************/
  13
  14#include <linux/string.h>
  15#include <linux/parser.h>
  16#include <linux/timer.h>
  17#include <linux/fs.h>
  18#include <linux/blkdev.h>
  19#include <linux/blk-integrity.h>
  20#include <linux/slab.h>
  21#include <linux/spinlock.h>
  22#include <linux/bio.h>
 
  23#include <linux/file.h>
  24#include <linux/module.h>
  25#include <linux/scatterlist.h>
  26#include <linux/pr.h>
  27#include <scsi/scsi_proto.h>
  28#include <scsi/scsi_common.h>
  29#include <asm/unaligned.h>
  30
  31#include <target/target_core_base.h>
  32#include <target/target_core_backend.h>
  33
  34#include "target_core_iblock.h"
  35#include "target_core_pr.h"
  36
  37#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
  38#define IBLOCK_BIO_POOL_SIZE	128
  39
  40static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
  41{
  42	return container_of(dev, struct iblock_dev, dev);
  43}
  44
  45
  46static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
  47{
  48	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
  49		" Generic Target Core Stack %s\n", hba->hba_id,
  50		IBLOCK_VERSION, TARGET_CORE_VERSION);
  51	return 0;
  52}
  53
  54static void iblock_detach_hba(struct se_hba *hba)
  55{
  56}
  57
  58static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
  59{
  60	struct iblock_dev *ib_dev = NULL;
  61
  62	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
  63	if (!ib_dev) {
  64		pr_err("Unable to allocate struct iblock_dev\n");
  65		return NULL;
  66	}
  67
  68	ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
  69				   GFP_KERNEL);
  70	if (!ib_dev->ibd_plug)
  71		goto free_dev;
  72
  73	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
  74
  75	return &ib_dev->dev;
  76
  77free_dev:
  78	kfree(ib_dev);
  79	return NULL;
  80}
  81
  82static bool iblock_configure_unmap(struct se_device *dev)
  83{
  84	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
  85
  86	return target_configure_unmap_from_queue(&dev->dev_attrib,
  87						 ib_dev->ibd_bd);
  88}
  89
  90static int iblock_configure_device(struct se_device *dev)
  91{
  92	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
  93	struct request_queue *q;
  94	struct file *bdev_file;
  95	struct block_device *bd;
  96	struct blk_integrity *bi;
  97	blk_mode_t mode = BLK_OPEN_READ;
  98	unsigned int max_write_zeroes_sectors;
  99	int ret;
 100
 101	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
 102		pr_err("Missing udev_path= parameters for IBLOCK\n");
 103		return -EINVAL;
 104	}
 105
 106	ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
 107	if (ret) {
 108		pr_err("IBLOCK: Unable to create bioset\n");
 109		goto out;
 110	}
 111
 112	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
 113			ib_dev->ibd_udev_path);
 114
 
 115	if (!ib_dev->ibd_readonly)
 116		mode |= BLK_OPEN_WRITE;
 117	else
 118		dev->dev_flags |= DF_READ_ONLY;
 119
 120	bdev_file = bdev_file_open_by_path(ib_dev->ibd_udev_path, mode, ib_dev,
 121					NULL);
 122	if (IS_ERR(bdev_file)) {
 123		ret = PTR_ERR(bdev_file);
 124		goto out_free_bioset;
 125	}
 126	ib_dev->ibd_bdev_file = bdev_file;
 127	ib_dev->ibd_bd = bd = file_bdev(bdev_file);
 128
 129	q = bdev_get_queue(bd);
 130
 131	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
 132	dev->dev_attrib.hw_max_sectors = mult_frac(queue_max_hw_sectors(q),
 133			SECTOR_SIZE,
 134			dev->dev_attrib.hw_block_size);
 135	dev->dev_attrib.hw_queue_depth = q->nr_requests;
 136
 
 
 
 
 137	/*
 138	 * Enable write same emulation for IBLOCK and use 0xFFFF as
 139	 * the smaller WRITE_SAME(10) only has a two-byte block count.
 140	 */
 141	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
 142	if (max_write_zeroes_sectors)
 143		dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
 144	else
 145		dev->dev_attrib.max_write_same_len = 0xFFFF;
 146
 147	if (bdev_nonrot(bd))
 148		dev->dev_attrib.is_nonrot = 1;
 149
 150	bi = bdev_get_integrity(bd);
 151	if (bi) {
 152		struct bio_set *bs = &ib_dev->ibd_bio_set;
 153
 154		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
 155		    !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
 156			pr_err("IBLOCK export of blk_integrity: %s not"
 157			       " supported\n", bi->profile->name);
 158			ret = -ENOSYS;
 159			goto out_blkdev_put;
 160		}
 161
 162		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
 163			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
 164		} else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
 165			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
 166		}
 167
 168		if (dev->dev_attrib.pi_prot_type) {
 169			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
 170				pr_err("Unable to allocate bioset for PI\n");
 171				ret = -ENOMEM;
 172				goto out_blkdev_put;
 173			}
 174			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
 175				 &bs->bio_integrity_pool);
 176		}
 177		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
 178	}
 179
 180	return 0;
 181
 182out_blkdev_put:
 183	fput(ib_dev->ibd_bdev_file);
 184out_free_bioset:
 185	bioset_exit(&ib_dev->ibd_bio_set);
 186out:
 187	return ret;
 188}
 189
 190static void iblock_dev_call_rcu(struct rcu_head *p)
 191{
 192	struct se_device *dev = container_of(p, struct se_device, rcu_head);
 193	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 194
 195	kfree(ib_dev->ibd_plug);
 196	kfree(ib_dev);
 197}
 198
 199static void iblock_free_device(struct se_device *dev)
 200{
 201	call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
 202}
 203
 204static void iblock_destroy_device(struct se_device *dev)
 205{
 206	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 207
 208	if (ib_dev->ibd_bdev_file)
 209		fput(ib_dev->ibd_bdev_file);
 210	bioset_exit(&ib_dev->ibd_bio_set);
 211}
 212
 213static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
 214{
 215	struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
 216	struct iblock_dev_plug *ib_dev_plug;
 217
 218	/*
 219	 * Each se_device has a per cpu work this can be run from. We
 220	 * shouldn't have multiple threads on the same cpu calling this
 221	 * at the same time.
 222	 */
 223	ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
 224	if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
 225		return NULL;
 226
 227	blk_start_plug(&ib_dev_plug->blk_plug);
 228	return &ib_dev_plug->se_plug;
 229}
 230
 231static void iblock_unplug_device(struct se_dev_plug *se_plug)
 232{
 233	struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
 234					struct iblock_dev_plug, se_plug);
 235
 236	blk_finish_plug(&ib_dev_plug->blk_plug);
 237	clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
 238}
 239
 240static sector_t iblock_get_blocks(struct se_device *dev)
 241{
 242	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 243	u32 block_size = bdev_logical_block_size(ib_dev->ibd_bd);
 244	unsigned long long blocks_long =
 245		div_u64(bdev_nr_bytes(ib_dev->ibd_bd), block_size) - 1;
 246
 247	if (block_size == dev->dev_attrib.block_size)
 248		return blocks_long;
 249
 250	switch (block_size) {
 251	case 4096:
 252		switch (dev->dev_attrib.block_size) {
 253		case 2048:
 254			blocks_long <<= 1;
 255			break;
 256		case 1024:
 257			blocks_long <<= 2;
 258			break;
 259		case 512:
 260			blocks_long <<= 3;
 261			break;
 262		default:
 263			break;
 264		}
 265		break;
 266	case 2048:
 267		switch (dev->dev_attrib.block_size) {
 268		case 4096:
 269			blocks_long >>= 1;
 270			break;
 271		case 1024:
 272			blocks_long <<= 1;
 273			break;
 274		case 512:
 275			blocks_long <<= 2;
 276			break;
 277		default:
 278			break;
 279		}
 280		break;
 281	case 1024:
 282		switch (dev->dev_attrib.block_size) {
 283		case 4096:
 284			blocks_long >>= 2;
 285			break;
 286		case 2048:
 287			blocks_long >>= 1;
 288			break;
 289		case 512:
 290			blocks_long <<= 1;
 291			break;
 292		default:
 293			break;
 294		}
 295		break;
 296	case 512:
 297		switch (dev->dev_attrib.block_size) {
 298		case 4096:
 299			blocks_long >>= 3;
 300			break;
 301		case 2048:
 302			blocks_long >>= 2;
 303			break;
 304		case 1024:
 305			blocks_long >>= 1;
 306			break;
 307		default:
 308			break;
 309		}
 310		break;
 311	default:
 312		break;
 313	}
 314
 315	return blocks_long;
 316}
 317
 318static void iblock_complete_cmd(struct se_cmd *cmd, blk_status_t blk_status)
 319{
 320	struct iblock_req *ibr = cmd->priv;
 321	u8 status;
 322
 323	if (!refcount_dec_and_test(&ibr->pending))
 324		return;
 325
 326	if (blk_status == BLK_STS_RESV_CONFLICT)
 327		status = SAM_STAT_RESERVATION_CONFLICT;
 328	else if (atomic_read(&ibr->ib_bio_err_cnt))
 329		status = SAM_STAT_CHECK_CONDITION;
 330	else
 331		status = SAM_STAT_GOOD;
 332
 333	target_complete_cmd(cmd, status);
 334	kfree(ibr);
 335}
 336
 337static void iblock_bio_done(struct bio *bio)
 338{
 339	struct se_cmd *cmd = bio->bi_private;
 340	struct iblock_req *ibr = cmd->priv;
 341	blk_status_t blk_status = bio->bi_status;
 342
 343	if (bio->bi_status) {
 344		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
 345		/*
 346		 * Bump the ib_bio_err_cnt and release bio.
 347		 */
 348		atomic_inc(&ibr->ib_bio_err_cnt);
 349		smp_mb__after_atomic();
 350	}
 351
 352	bio_put(bio);
 353
 354	iblock_complete_cmd(cmd, blk_status);
 355}
 356
 357static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
 358				  blk_opf_t opf)
 
 359{
 360	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
 361	struct bio *bio;
 362
 363	/*
 364	 * Only allocate as many vector entries as the bio code allows us to,
 365	 * we'll loop later on until we have handled the whole request.
 366	 */
 367	bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf,
 368			       GFP_NOIO, &ib_dev->ibd_bio_set);
 
 
 369	if (!bio) {
 370		pr_err("Unable to allocate memory for bio\n");
 371		return NULL;
 372	}
 373
 
 374	bio->bi_private = cmd;
 375	bio->bi_end_io = &iblock_bio_done;
 376	bio->bi_iter.bi_sector = lba;
 
 377
 378	return bio;
 379}
 380
 381static void iblock_submit_bios(struct bio_list *list)
 382{
 383	struct blk_plug plug;
 384	struct bio *bio;
 385	/*
 386	 * The block layer handles nested plugs, so just plug/unplug to handle
 387	 * fabric drivers that didn't support batching and multi bio cmds.
 388	 */
 389	blk_start_plug(&plug);
 390	while ((bio = bio_list_pop(list)))
 391		submit_bio(bio);
 392	blk_finish_plug(&plug);
 393}
 394
 395static void iblock_end_io_flush(struct bio *bio)
 396{
 397	struct se_cmd *cmd = bio->bi_private;
 398
 399	if (bio->bi_status)
 400		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
 401
 402	if (cmd) {
 403		if (bio->bi_status)
 404			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
 405		else
 406			target_complete_cmd(cmd, SAM_STAT_GOOD);
 407	}
 408
 409	bio_put(bio);
 410}
 411
 412/*
 413 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
 414 * always flush the whole cache.
 415 */
 416static sense_reason_t
 417iblock_execute_sync_cache(struct se_cmd *cmd)
 418{
 419	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
 420	int immed = (cmd->t_task_cdb[1] & 0x2);
 421	struct bio *bio;
 422
 423	/*
 424	 * If the Immediate bit is set, queue up the GOOD response
 425	 * for this SYNCHRONIZE_CACHE op.
 426	 */
 427	if (immed)
 428		target_complete_cmd(cmd, SAM_STAT_GOOD);
 429
 430	bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH,
 431			GFP_KERNEL);
 432	bio->bi_end_io = iblock_end_io_flush;
 
 
 433	if (!immed)
 434		bio->bi_private = cmd;
 435	submit_bio(bio);
 436	return 0;
 437}
 438
 439static sense_reason_t
 440iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
 441{
 442	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
 443	struct se_device *dev = cmd->se_dev;
 444	int ret;
 445
 446	ret = blkdev_issue_discard(bdev,
 447				   target_to_linux_sector(dev, lba),
 448				   target_to_linux_sector(dev,  nolb),
 449				   GFP_KERNEL);
 450	if (ret < 0) {
 451		pr_err("blkdev_issue_discard() failed: %d\n", ret);
 452		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 453	}
 454
 455	return 0;
 456}
 457
 458static sense_reason_t
 459iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
 460{
 461	struct se_device *dev = cmd->se_dev;
 462	struct scatterlist *sg = &cmd->t_data_sg[0];
 463	unsigned char *buf, *not_zero;
 464	int ret;
 465
 466	buf = kmap(sg_page(sg)) + sg->offset;
 467	if (!buf)
 468		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 469	/*
 470	 * Fall back to block_execute_write_same() slow-path if
 471	 * incoming WRITE_SAME payload does not contain zeros.
 472	 */
 473	not_zero = memchr_inv(buf, 0x00, cmd->data_length);
 474	kunmap(sg_page(sg));
 475
 476	if (not_zero)
 477		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 478
 479	ret = blkdev_issue_zeroout(bdev,
 480				target_to_linux_sector(dev, cmd->t_task_lba),
 481				target_to_linux_sector(dev,
 482					sbc_get_write_same_sectors(cmd)),
 483				GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
 484	if (ret)
 485		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 486
 487	target_complete_cmd(cmd, SAM_STAT_GOOD);
 488	return 0;
 489}
 490
 491static sense_reason_t
 492iblock_execute_write_same(struct se_cmd *cmd)
 493{
 494	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
 495	struct iblock_req *ibr;
 496	struct scatterlist *sg;
 497	struct bio *bio;
 498	struct bio_list list;
 499	struct se_device *dev = cmd->se_dev;
 500	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
 501	sector_t sectors = target_to_linux_sector(dev,
 502					sbc_get_write_same_sectors(cmd));
 503
 504	if (cmd->prot_op) {
 505		pr_err("WRITE_SAME: Protection information with IBLOCK"
 506		       " backends not supported\n");
 507		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 508	}
 509
 510	if (!cmd->t_data_nents)
 511		return TCM_INVALID_CDB_FIELD;
 512
 513	sg = &cmd->t_data_sg[0];
 514
 515	if (cmd->t_data_nents > 1 ||
 516	    sg->length != cmd->se_dev->dev_attrib.block_size) {
 517		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
 518			" block_size: %u\n", cmd->t_data_nents, sg->length,
 519			cmd->se_dev->dev_attrib.block_size);
 520		return TCM_INVALID_CDB_FIELD;
 521	}
 522
 523	if (bdev_write_zeroes_sectors(bdev)) {
 524		if (!iblock_execute_zero_out(bdev, cmd))
 525			return 0;
 526	}
 527
 528	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
 529	if (!ibr)
 530		goto fail;
 531	cmd->priv = ibr;
 532
 533	bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
 534	if (!bio)
 535		goto fail_free_ibr;
 536
 537	bio_list_init(&list);
 538	bio_list_add(&list, bio);
 539
 540	refcount_set(&ibr->pending, 1);
 541
 542	while (sectors) {
 543		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
 544				!= sg->length) {
 545
 546			bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
 
 547			if (!bio)
 548				goto fail_put_bios;
 549
 550			refcount_inc(&ibr->pending);
 551			bio_list_add(&list, bio);
 552		}
 553
 554		/* Always in 512 byte units for Linux/Block */
 555		block_lba += sg->length >> SECTOR_SHIFT;
 556		sectors -= sg->length >> SECTOR_SHIFT;
 557	}
 558
 559	iblock_submit_bios(&list);
 560	return 0;
 561
 562fail_put_bios:
 563	while ((bio = bio_list_pop(&list)))
 564		bio_put(bio);
 565fail_free_ibr:
 566	kfree(ibr);
 567fail:
 568	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 569}
 570
 571enum {
 572	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
 573};
 574
 575static match_table_t tokens = {
 576	{Opt_udev_path, "udev_path=%s"},
 577	{Opt_readonly, "readonly=%d"},
 578	{Opt_force, "force=%d"},
 579	{Opt_err, NULL}
 580};
 581
 582static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
 583		const char *page, ssize_t count)
 584{
 585	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 586	char *orig, *ptr, *arg_p, *opts;
 587	substring_t args[MAX_OPT_ARGS];
 588	int ret = 0, token;
 589	unsigned long tmp_readonly;
 590
 591	opts = kstrdup(page, GFP_KERNEL);
 592	if (!opts)
 593		return -ENOMEM;
 594
 595	orig = opts;
 596
 597	while ((ptr = strsep(&opts, ",\n")) != NULL) {
 598		if (!*ptr)
 599			continue;
 600
 601		token = match_token(ptr, tokens, args);
 602		switch (token) {
 603		case Opt_udev_path:
 604			if (ib_dev->ibd_bd) {
 605				pr_err("Unable to set udev_path= while"
 606					" ib_dev->ibd_bd exists\n");
 607				ret = -EEXIST;
 608				goto out;
 609			}
 610			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
 611				SE_UDEV_PATH_LEN) == 0) {
 612				ret = -EINVAL;
 613				break;
 614			}
 615			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
 616					ib_dev->ibd_udev_path);
 617			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
 618			break;
 619		case Opt_readonly:
 620			arg_p = match_strdup(&args[0]);
 621			if (!arg_p) {
 622				ret = -ENOMEM;
 623				break;
 624			}
 625			ret = kstrtoul(arg_p, 0, &tmp_readonly);
 626			kfree(arg_p);
 627			if (ret < 0) {
 628				pr_err("kstrtoul() failed for"
 629						" readonly=\n");
 630				goto out;
 631			}
 632			ib_dev->ibd_readonly = tmp_readonly;
 633			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
 634			break;
 635		case Opt_force:
 636			break;
 637		default:
 638			break;
 639		}
 640	}
 641
 642out:
 643	kfree(orig);
 644	return (!ret) ? count : ret;
 645}
 646
 647static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
 648{
 649	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 650	struct block_device *bd = ib_dev->ibd_bd;
 
 651	ssize_t bl = 0;
 652
 653	if (bd)
 654		bl += sprintf(b + bl, "iBlock device: %pg", bd);
 
 655	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
 656		bl += sprintf(b + bl, "  UDEV PATH: %s",
 657				ib_dev->ibd_udev_path);
 658	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
 659
 660	bl += sprintf(b + bl, "        ");
 661	if (bd) {
 662		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
 663			MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
 664			"CLAIMED: IBLOCK");
 
 665	} else {
 666		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
 667	}
 668
 669	return bl;
 670}
 671
 672static int
 673iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
 674		 struct sg_mapping_iter *miter)
 675{
 676	struct se_device *dev = cmd->se_dev;
 677	struct blk_integrity *bi;
 678	struct bio_integrity_payload *bip;
 679	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 680	int rc;
 681	size_t resid, len;
 682
 683	bi = bdev_get_integrity(ib_dev->ibd_bd);
 684	if (!bi) {
 685		pr_err("Unable to locate bio_integrity\n");
 686		return -ENODEV;
 687	}
 688
 689	bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
 
 690	if (IS_ERR(bip)) {
 691		pr_err("Unable to allocate bio_integrity_payload\n");
 692		return PTR_ERR(bip);
 693	}
 694
 
 695	/* virtual start sector must be in integrity interval units */
 696	bip_set_seed(bip, bio->bi_iter.bi_sector >>
 697				  (bi->interval_exp - SECTOR_SHIFT));
 698
 699	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
 700		 (unsigned long long)bip->bip_iter.bi_sector);
 701
 702	resid = bio_integrity_bytes(bi, bio_sectors(bio));
 703	while (resid > 0 && sg_miter_next(miter)) {
 704
 705		len = min_t(size_t, miter->length, resid);
 706		rc = bio_integrity_add_page(bio, miter->page, len,
 707					    offset_in_page(miter->addr));
 708		if (rc != len) {
 709			pr_err("bio_integrity_add_page() failed; %d\n", rc);
 710			sg_miter_stop(miter);
 711			return -ENOMEM;
 712		}
 713
 714		pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
 715			  miter->page, len, offset_in_page(miter->addr));
 716
 717		resid -= len;
 718		if (len < miter->length)
 719			miter->consumed -= miter->length - len;
 720	}
 721	sg_miter_stop(miter);
 722
 723	return 0;
 724}
 725
 726static sense_reason_t
 727iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 728		  enum dma_data_direction data_direction)
 729{
 730	struct se_device *dev = cmd->se_dev;
 731	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
 732	struct iblock_req *ibr;
 733	struct bio *bio;
 734	struct bio_list list;
 735	struct scatterlist *sg;
 736	u32 sg_num = sgl_nents;
 737	blk_opf_t opf;
 738	unsigned bio_cnt;
 739	int i, rc;
 740	struct sg_mapping_iter prot_miter;
 741	unsigned int miter_dir;
 742
 743	if (data_direction == DMA_TO_DEVICE) {
 744		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 745
 746		/*
 747		 * Set bits to indicate WRITE_ODIRECT so we are not throttled
 748		 * by WBT.
 749		 */
 750		opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
 751		/*
 752		 * Force writethrough using REQ_FUA if a volatile write cache
 753		 * is not enabled, or if initiator set the Force Unit Access bit.
 754		 */
 755		miter_dir = SG_MITER_TO_SG;
 756		if (bdev_fua(ib_dev->ibd_bd)) {
 757			if (cmd->se_cmd_flags & SCF_FUA)
 758				opf |= REQ_FUA;
 759			else if (!bdev_write_cache(ib_dev->ibd_bd))
 760				opf |= REQ_FUA;
 761		}
 762	} else {
 763		opf = REQ_OP_READ;
 764		miter_dir = SG_MITER_FROM_SG;
 765	}
 766
 767	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
 768	if (!ibr)
 769		goto fail;
 770	cmd->priv = ibr;
 771
 772	if (!sgl_nents) {
 773		refcount_set(&ibr->pending, 1);
 774		iblock_complete_cmd(cmd, BLK_STS_OK);
 775		return 0;
 776	}
 777
 778	bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
 779	if (!bio)
 780		goto fail_free_ibr;
 781
 782	bio_list_init(&list);
 783	bio_list_add(&list, bio);
 784
 785	refcount_set(&ibr->pending, 2);
 786	bio_cnt = 1;
 787
 788	if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
 789		sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
 790			       miter_dir);
 
 791
 792	for_each_sg(sgl, sg, sgl_nents, i) {
 793		/*
 794		 * XXX: if the length the device accepts is shorter than the
 795		 *	length of the S/G list entry this will cause and
 796		 *	endless loop.  Better hope no driver uses huge pages.
 797		 */
 798		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
 799				!= sg->length) {
 800			if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
 801				rc = iblock_alloc_bip(cmd, bio, &prot_miter);
 802				if (rc)
 803					goto fail_put_bios;
 804			}
 805
 806			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
 807				iblock_submit_bios(&list);
 808				bio_cnt = 0;
 809			}
 810
 811			bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
 
 812			if (!bio)
 813				goto fail_put_bios;
 814
 815			refcount_inc(&ibr->pending);
 816			bio_list_add(&list, bio);
 817			bio_cnt++;
 818		}
 819
 820		/* Always in 512 byte units for Linux/Block */
 821		block_lba += sg->length >> SECTOR_SHIFT;
 822		sg_num--;
 823	}
 824
 825	if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
 826		rc = iblock_alloc_bip(cmd, bio, &prot_miter);
 827		if (rc)
 828			goto fail_put_bios;
 829	}
 830
 831	iblock_submit_bios(&list);
 832	iblock_complete_cmd(cmd, BLK_STS_OK);
 833	return 0;
 834
 835fail_put_bios:
 836	while ((bio = bio_list_pop(&list)))
 837		bio_put(bio);
 838fail_free_ibr:
 839	kfree(ibr);
 840fail:
 841	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 842}
 843
 844static sense_reason_t iblock_execute_pr_out(struct se_cmd *cmd, u8 sa, u64 key,
 845					    u64 sa_key, u8 type, bool aptpl)
 846{
 847	struct se_device *dev = cmd->se_dev;
 848	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 849	struct block_device *bdev = ib_dev->ibd_bd;
 850	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
 851	int ret;
 852
 853	if (!ops) {
 854		pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
 855		return TCM_UNSUPPORTED_SCSI_OPCODE;
 856	}
 857
 858	switch (sa) {
 859	case PRO_REGISTER:
 860	case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
 861		if (!ops->pr_register) {
 862			pr_err("block device does not support pr_register.\n");
 863			return TCM_UNSUPPORTED_SCSI_OPCODE;
 864		}
 865
 866		/* The block layer pr ops always enables aptpl */
 867		if (!aptpl)
 868			pr_info("APTPL not set by initiator, but will be used.\n");
 869
 870		ret = ops->pr_register(bdev, key, sa_key,
 871				sa == PRO_REGISTER ? 0 : PR_FL_IGNORE_KEY);
 872		break;
 873	case PRO_RESERVE:
 874		if (!ops->pr_reserve) {
 875			pr_err("block_device does not support pr_reserve.\n");
 876			return TCM_UNSUPPORTED_SCSI_OPCODE;
 877		}
 878
 879		ret = ops->pr_reserve(bdev, key, scsi_pr_type_to_block(type), 0);
 880		break;
 881	case PRO_CLEAR:
 882		if (!ops->pr_clear) {
 883			pr_err("block_device does not support pr_clear.\n");
 884			return TCM_UNSUPPORTED_SCSI_OPCODE;
 885		}
 886
 887		ret = ops->pr_clear(bdev, key);
 888		break;
 889	case PRO_PREEMPT:
 890	case PRO_PREEMPT_AND_ABORT:
 891		if (!ops->pr_clear) {
 892			pr_err("block_device does not support pr_preempt.\n");
 893			return TCM_UNSUPPORTED_SCSI_OPCODE;
 894		}
 895
 896		ret = ops->pr_preempt(bdev, key, sa_key,
 897				      scsi_pr_type_to_block(type),
 898				      sa == PRO_PREEMPT_AND_ABORT);
 899		break;
 900	case PRO_RELEASE:
 901		if (!ops->pr_clear) {
 902			pr_err("block_device does not support pr_pclear.\n");
 903			return TCM_UNSUPPORTED_SCSI_OPCODE;
 904		}
 905
 906		ret = ops->pr_release(bdev, key, scsi_pr_type_to_block(type));
 907		break;
 908	default:
 909		pr_err("Unknown PERSISTENT_RESERVE_OUT SA: 0x%02x\n", sa);
 910		return TCM_UNSUPPORTED_SCSI_OPCODE;
 911	}
 912
 913	if (!ret)
 914		return TCM_NO_SENSE;
 915	else if (ret == PR_STS_RESERVATION_CONFLICT)
 916		return TCM_RESERVATION_CONFLICT;
 917	else
 918		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 919}
 920
 921static void iblock_pr_report_caps(unsigned char *param_data)
 922{
 923	u16 len = 8;
 924
 925	put_unaligned_be16(len, &param_data[0]);
 926	/*
 927	 * When using the pr_ops passthrough method we only support exporting
 928	 * the device through one target port because from the backend module
 929	 * level we can't see the target port config. As a result we only
 930	 * support registration directly from the I_T nexus the cmd is sent
 931	 * through and do not set ATP_C here.
 932	 *
 933	 * The block layer pr_ops do not support passing in initiators so
 934	 * we don't set SIP_C here.
 935	 */
 936	/* PTPL_C: Persistence across Target Power Loss bit */
 937	param_data[2] |= 0x01;
 938	/*
 939	 * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
 940	 * set the TMV: Task Mask Valid bit.
 941	 */
 942	param_data[3] |= 0x80;
 943	/*
 944	 * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
 945	 */
 946	param_data[3] |= 0x10; /* ALLOW COMMANDs field 001b */
 947	/*
 948	 * PTPL_A: Persistence across Target Power Loss Active bit. The block
 949	 * layer pr ops always enables this so report it active.
 950	 */
 951	param_data[3] |= 0x01;
 952	/*
 953	 * Setup the PERSISTENT RESERVATION TYPE MASK from Table 212 spc4r37.
 954	 */
 955	param_data[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
 956	param_data[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
 957	param_data[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
 958	param_data[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
 959	param_data[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
 960	param_data[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
 961}
 962
 963static sense_reason_t iblock_pr_read_keys(struct se_cmd *cmd,
 964					  unsigned char *param_data)
 965{
 966	struct se_device *dev = cmd->se_dev;
 967	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 968	struct block_device *bdev = ib_dev->ibd_bd;
 969	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
 970	int i, len, paths, data_offset;
 971	struct pr_keys *keys;
 972	sense_reason_t ret;
 973
 974	if (!ops) {
 975		pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
 976		return TCM_UNSUPPORTED_SCSI_OPCODE;
 977	}
 978
 979	if (!ops->pr_read_keys) {
 980		pr_err("Block device does not support read_keys.\n");
 981		return TCM_UNSUPPORTED_SCSI_OPCODE;
 982	}
 983
 984	/*
 985	 * We don't know what's under us, but dm-multipath will register every
 986	 * path with the same key, so start off with enough space for 16 paths.
 987	 * which is not a lot of memory and should normally be enough.
 988	 */
 989	paths = 16;
 990retry:
 991	len = 8 * paths;
 992	keys = kzalloc(sizeof(*keys) + len, GFP_KERNEL);
 993	if (!keys)
 994		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 995
 996	keys->num_keys = paths;
 997	if (!ops->pr_read_keys(bdev, keys)) {
 998		if (keys->num_keys > paths) {
 999			kfree(keys);
1000			paths *= 2;
1001			goto retry;
1002		}
1003	} else {
1004		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1005		goto free_keys;
1006	}
1007
1008	ret = TCM_NO_SENSE;
1009
1010	put_unaligned_be32(keys->generation, &param_data[0]);
1011	if (!keys->num_keys) {
1012		put_unaligned_be32(0, &param_data[4]);
1013		goto free_keys;
1014	}
1015
1016	put_unaligned_be32(8 * keys->num_keys, &param_data[4]);
1017
1018	data_offset = 8;
1019	for (i = 0; i < keys->num_keys; i++) {
1020		if (data_offset + 8 > cmd->data_length)
1021			break;
1022
1023		put_unaligned_be64(keys->keys[i], &param_data[data_offset]);
1024		data_offset += 8;
1025	}
1026
1027free_keys:
1028	kfree(keys);
1029	return ret;
1030}
1031
1032static sense_reason_t iblock_pr_read_reservation(struct se_cmd *cmd,
1033						 unsigned char *param_data)
1034{
1035	struct se_device *dev = cmd->se_dev;
1036	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1037	struct block_device *bdev = ib_dev->ibd_bd;
1038	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
1039	struct pr_held_reservation rsv = { };
1040
1041	if (!ops) {
1042		pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
1043		return TCM_UNSUPPORTED_SCSI_OPCODE;
1044	}
1045
1046	if (!ops->pr_read_reservation) {
1047		pr_err("Block device does not support read_keys.\n");
1048		return TCM_UNSUPPORTED_SCSI_OPCODE;
1049	}
1050
1051	if (ops->pr_read_reservation(bdev, &rsv))
1052		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1053
1054	put_unaligned_be32(rsv.generation, &param_data[0]);
1055	if (!block_pr_type_to_scsi(rsv.type)) {
1056		put_unaligned_be32(0, &param_data[4]);
1057		return TCM_NO_SENSE;
1058	}
1059
1060	put_unaligned_be32(16, &param_data[4]);
1061
1062	if (cmd->data_length < 16)
1063		return TCM_NO_SENSE;
1064	put_unaligned_be64(rsv.key, &param_data[8]);
1065
1066	if (cmd->data_length < 22)
1067		return TCM_NO_SENSE;
1068	param_data[21] = block_pr_type_to_scsi(rsv.type);
1069
1070	return TCM_NO_SENSE;
1071}
1072
1073static sense_reason_t iblock_execute_pr_in(struct se_cmd *cmd, u8 sa,
1074					   unsigned char *param_data)
1075{
1076	sense_reason_t ret = TCM_NO_SENSE;
1077
1078	switch (sa) {
1079	case PRI_REPORT_CAPABILITIES:
1080		iblock_pr_report_caps(param_data);
1081		break;
1082	case PRI_READ_KEYS:
1083		ret = iblock_pr_read_keys(cmd, param_data);
1084		break;
1085	case PRI_READ_RESERVATION:
1086		ret = iblock_pr_read_reservation(cmd, param_data);
1087		break;
1088	default:
1089		pr_err("Unknown PERSISTENT_RESERVE_IN SA: 0x%02x\n", sa);
1090		return TCM_UNSUPPORTED_SCSI_OPCODE;
1091	}
1092
1093	return ret;
1094}
1095
1096static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
1097{
1098	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1099	struct block_device *bd = ib_dev->ibd_bd;
1100	int ret;
1101
1102	ret = bdev_alignment_offset(bd);
1103	if (ret == -1)
1104		return 0;
1105
1106	/* convert offset-bytes to offset-lbas */
1107	return ret / bdev_logical_block_size(bd);
1108}
1109
1110static unsigned int iblock_get_lbppbe(struct se_device *dev)
1111{
1112	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1113	struct block_device *bd = ib_dev->ibd_bd;
1114	unsigned int logs_per_phys =
1115		bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
1116
1117	return ilog2(logs_per_phys);
1118}
1119
1120static unsigned int iblock_get_io_min(struct se_device *dev)
1121{
1122	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1123	struct block_device *bd = ib_dev->ibd_bd;
1124
1125	return bdev_io_min(bd);
1126}
1127
1128static unsigned int iblock_get_io_opt(struct se_device *dev)
1129{
1130	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
1131	struct block_device *bd = ib_dev->ibd_bd;
1132
1133	return bdev_io_opt(bd);
1134}
1135
1136static struct exec_cmd_ops iblock_exec_cmd_ops = {
1137	.execute_rw		= iblock_execute_rw,
1138	.execute_sync_cache	= iblock_execute_sync_cache,
1139	.execute_write_same	= iblock_execute_write_same,
1140	.execute_unmap		= iblock_execute_unmap,
1141	.execute_pr_out		= iblock_execute_pr_out,
1142	.execute_pr_in		= iblock_execute_pr_in,
1143};
1144
1145static sense_reason_t
1146iblock_parse_cdb(struct se_cmd *cmd)
1147{
1148	return sbc_parse_cdb(cmd, &iblock_exec_cmd_ops);
1149}
1150
1151static bool iblock_get_write_cache(struct se_device *dev)
1152{
1153	return bdev_write_cache(IBLOCK_DEV(dev)->ibd_bd);
 
 
 
 
1154}
1155
1156static const struct target_backend_ops iblock_ops = {
1157	.name			= "iblock",
1158	.inquiry_prod		= "IBLOCK",
1159	.transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR,
1160	.inquiry_rev		= IBLOCK_VERSION,
1161	.owner			= THIS_MODULE,
1162	.attach_hba		= iblock_attach_hba,
1163	.detach_hba		= iblock_detach_hba,
1164	.alloc_device		= iblock_alloc_device,
1165	.configure_device	= iblock_configure_device,
1166	.destroy_device		= iblock_destroy_device,
1167	.free_device		= iblock_free_device,
1168	.configure_unmap	= iblock_configure_unmap,
1169	.plug_device		= iblock_plug_device,
1170	.unplug_device		= iblock_unplug_device,
1171	.parse_cdb		= iblock_parse_cdb,
1172	.set_configfs_dev_params = iblock_set_configfs_dev_params,
1173	.show_configfs_dev_params = iblock_show_configfs_dev_params,
1174	.get_device_type	= sbc_get_device_type,
1175	.get_blocks		= iblock_get_blocks,
1176	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
1177	.get_lbppbe		= iblock_get_lbppbe,
1178	.get_io_min		= iblock_get_io_min,
1179	.get_io_opt		= iblock_get_io_opt,
1180	.get_write_cache	= iblock_get_write_cache,
1181	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
1182};
1183
1184static int __init iblock_module_init(void)
1185{
1186	return transport_backend_register(&iblock_ops);
1187}
1188
1189static void __exit iblock_module_exit(void)
1190{
1191	target_backend_unregister(&iblock_ops);
1192}
1193
1194MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
1195MODULE_AUTHOR("nab@Linux-iSCSI.org");
1196MODULE_LICENSE("GPL");
1197
1198module_init(iblock_module_init);
1199module_exit(iblock_module_exit);