Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*******************************************************************************
  3 * Filename:  target_core_iblock.c
  4 *
  5 * This file contains the Storage Engine  <-> Linux BlockIO transport
  6 * specific functions.
  7 *
  8 * (c) Copyright 2003-2013 Datera, Inc.
  9 *
 10 * Nicholas A. Bellinger <nab@kernel.org>
 11 *
 12 ******************************************************************************/
 13
 14#include <linux/string.h>
 15#include <linux/parser.h>
 16#include <linux/timer.h>
 17#include <linux/fs.h>
 18#include <linux/blkdev.h>
 19#include <linux/slab.h>
 20#include <linux/spinlock.h>
 21#include <linux/bio.h>
 22#include <linux/genhd.h>
 23#include <linux/file.h>
 24#include <linux/module.h>
 25#include <scsi/scsi_proto.h>
 26#include <asm/unaligned.h>
 27
 28#include <target/target_core_base.h>
 29#include <target/target_core_backend.h>
 30
 31#include "target_core_iblock.h"
 32
 33#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
 34#define IBLOCK_BIO_POOL_SIZE	128
 35
 36static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
 37{
 38	return container_of(dev, struct iblock_dev, dev);
 39}
 40
 41
 42static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
 43{
 44	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
 45		" Generic Target Core Stack %s\n", hba->hba_id,
 46		IBLOCK_VERSION, TARGET_CORE_VERSION);
 47	return 0;
 48}
 49
 50static void iblock_detach_hba(struct se_hba *hba)
 51{
 52}
 53
 54static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
 55{
 56	struct iblock_dev *ib_dev = NULL;
 57
 58	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
 59	if (!ib_dev) {
 60		pr_err("Unable to allocate struct iblock_dev\n");
 61		return NULL;
 62	}
 63
 
 
 
 
 
 64	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
 65
 66	return &ib_dev->dev;
 
 
 
 
 67}
 68
 69static int iblock_configure_device(struct se_device *dev)
 70{
 71	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 72	struct request_queue *q;
 73	struct block_device *bd = NULL;
 74	struct blk_integrity *bi;
 75	fmode_t mode;
 76	unsigned int max_write_zeroes_sectors;
 77	int ret = -ENOMEM;
 78
 79	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
 80		pr_err("Missing udev_path= parameters for IBLOCK\n");
 81		return -EINVAL;
 82	}
 83
 84	ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
 85	if (ret) {
 86		pr_err("IBLOCK: Unable to create bioset\n");
 87		goto out;
 88	}
 89
 90	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
 91			ib_dev->ibd_udev_path);
 92
 93	mode = FMODE_READ|FMODE_EXCL;
 94	if (!ib_dev->ibd_readonly)
 95		mode |= FMODE_WRITE;
 96	else
 97		dev->dev_flags |= DF_READ_ONLY;
 98
 99	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
100	if (IS_ERR(bd)) {
101		ret = PTR_ERR(bd);
102		goto out_free_bioset;
103	}
104	ib_dev->ibd_bd = bd;
105
106	q = bdev_get_queue(bd);
107
108	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
109	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
110	dev->dev_attrib.hw_queue_depth = q->nr_requests;
111
112	if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
113		pr_debug("IBLOCK: BLOCK Discard support available,"
114			 " disabled by default\n");
115
116	/*
117	 * Enable write same emulation for IBLOCK and use 0xFFFF as
118	 * the smaller WRITE_SAME(10) only has a two-byte block count.
119	 */
120	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
121	if (max_write_zeroes_sectors)
122		dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
123	else
124		dev->dev_attrib.max_write_same_len = 0xFFFF;
125
126	if (blk_queue_nonrot(q))
127		dev->dev_attrib.is_nonrot = 1;
128
129	bi = bdev_get_integrity(bd);
130	if (bi) {
131		struct bio_set *bs = &ib_dev->ibd_bio_set;
132
133		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
134		    !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
135			pr_err("IBLOCK export of blk_integrity: %s not"
136			       " supported\n", bi->profile->name);
137			ret = -ENOSYS;
138			goto out_blkdev_put;
139		}
140
141		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
142			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
143		} else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
144			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
145		}
146
147		if (dev->dev_attrib.pi_prot_type) {
148			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
149				pr_err("Unable to allocate bioset for PI\n");
150				ret = -ENOMEM;
151				goto out_blkdev_put;
152			}
153			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
154				 &bs->bio_integrity_pool);
155		}
156		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
157	}
158
159	return 0;
160
161out_blkdev_put:
162	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
163out_free_bioset:
164	bioset_exit(&ib_dev->ibd_bio_set);
165out:
166	return ret;
167}
168
169static void iblock_dev_call_rcu(struct rcu_head *p)
170{
171	struct se_device *dev = container_of(p, struct se_device, rcu_head);
172	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
173
 
174	kfree(ib_dev);
175}
176
177static void iblock_free_device(struct se_device *dev)
178{
179	call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
180}
181
182static void iblock_destroy_device(struct se_device *dev)
183{
184	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
185
186	if (ib_dev->ibd_bd != NULL)
187		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
188	bioset_exit(&ib_dev->ibd_bio_set);
189}
190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191static unsigned long long iblock_emulate_read_cap_with_block_size(
192	struct se_device *dev,
193	struct block_device *bd,
194	struct request_queue *q)
195{
196	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
197					bdev_logical_block_size(bd)) - 1);
198	u32 block_size = bdev_logical_block_size(bd);
199
200	if (block_size == dev->dev_attrib.block_size)
201		return blocks_long;
202
203	switch (block_size) {
204	case 4096:
205		switch (dev->dev_attrib.block_size) {
206		case 2048:
207			blocks_long <<= 1;
208			break;
209		case 1024:
210			blocks_long <<= 2;
211			break;
212		case 512:
213			blocks_long <<= 3;
 
214		default:
215			break;
216		}
217		break;
218	case 2048:
219		switch (dev->dev_attrib.block_size) {
220		case 4096:
221			blocks_long >>= 1;
222			break;
223		case 1024:
224			blocks_long <<= 1;
225			break;
226		case 512:
227			blocks_long <<= 2;
228			break;
229		default:
230			break;
231		}
232		break;
233	case 1024:
234		switch (dev->dev_attrib.block_size) {
235		case 4096:
236			blocks_long >>= 2;
237			break;
238		case 2048:
239			blocks_long >>= 1;
240			break;
241		case 512:
242			blocks_long <<= 1;
243			break;
244		default:
245			break;
246		}
247		break;
248	case 512:
249		switch (dev->dev_attrib.block_size) {
250		case 4096:
251			blocks_long >>= 3;
252			break;
253		case 2048:
254			blocks_long >>= 2;
255			break;
256		case 1024:
257			blocks_long >>= 1;
258			break;
259		default:
260			break;
261		}
262		break;
263	default:
264		break;
265	}
266
267	return blocks_long;
268}
269
270static void iblock_complete_cmd(struct se_cmd *cmd)
271{
272	struct iblock_req *ibr = cmd->priv;
273	u8 status;
274
275	if (!refcount_dec_and_test(&ibr->pending))
276		return;
277
278	if (atomic_read(&ibr->ib_bio_err_cnt))
279		status = SAM_STAT_CHECK_CONDITION;
280	else
281		status = SAM_STAT_GOOD;
282
283	target_complete_cmd(cmd, status);
284	kfree(ibr);
285}
286
287static void iblock_bio_done(struct bio *bio)
288{
289	struct se_cmd *cmd = bio->bi_private;
290	struct iblock_req *ibr = cmd->priv;
291
292	if (bio->bi_status) {
293		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
294		/*
295		 * Bump the ib_bio_err_cnt and release bio.
296		 */
297		atomic_inc(&ibr->ib_bio_err_cnt);
298		smp_mb__after_atomic();
299	}
300
301	bio_put(bio);
302
303	iblock_complete_cmd(cmd);
304}
305
306static struct bio *
307iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
308	       int op_flags)
309{
310	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
311	struct bio *bio;
312
313	/*
314	 * Only allocate as many vector entries as the bio code allows us to,
315	 * we'll loop later on until we have handled the whole request.
316	 */
317	if (sg_num > BIO_MAX_PAGES)
318		sg_num = BIO_MAX_PAGES;
319
320	bio = bio_alloc_bioset(GFP_NOIO, sg_num, &ib_dev->ibd_bio_set);
321	if (!bio) {
322		pr_err("Unable to allocate memory for bio\n");
323		return NULL;
324	}
325
326	bio_set_dev(bio, ib_dev->ibd_bd);
327	bio->bi_private = cmd;
328	bio->bi_end_io = &iblock_bio_done;
329	bio->bi_iter.bi_sector = lba;
330	bio_set_op_attrs(bio, op, op_flags);
331
332	return bio;
333}
334
335static void iblock_submit_bios(struct bio_list *list)
336{
337	struct blk_plug plug;
338	struct bio *bio;
339
 
 
 
340	blk_start_plug(&plug);
341	while ((bio = bio_list_pop(list)))
342		submit_bio(bio);
343	blk_finish_plug(&plug);
344}
345
346static void iblock_end_io_flush(struct bio *bio)
347{
348	struct se_cmd *cmd = bio->bi_private;
349
350	if (bio->bi_status)
351		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
352
353	if (cmd) {
354		if (bio->bi_status)
355			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
356		else
357			target_complete_cmd(cmd, SAM_STAT_GOOD);
358	}
359
360	bio_put(bio);
361}
362
363/*
364 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
365 * always flush the whole cache.
366 */
367static sense_reason_t
368iblock_execute_sync_cache(struct se_cmd *cmd)
369{
370	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
371	int immed = (cmd->t_task_cdb[1] & 0x2);
372	struct bio *bio;
373
374	/*
375	 * If the Immediate bit is set, queue up the GOOD response
376	 * for this SYNCHRONIZE_CACHE op.
377	 */
378	if (immed)
379		target_complete_cmd(cmd, SAM_STAT_GOOD);
380
381	bio = bio_alloc(GFP_KERNEL, 0);
382	bio->bi_end_io = iblock_end_io_flush;
383	bio_set_dev(bio, ib_dev->ibd_bd);
384	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
385	if (!immed)
386		bio->bi_private = cmd;
387	submit_bio(bio);
388	return 0;
389}
390
391static sense_reason_t
392iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
393{
394	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
395	struct se_device *dev = cmd->se_dev;
396	int ret;
397
398	ret = blkdev_issue_discard(bdev,
399				   target_to_linux_sector(dev, lba),
400				   target_to_linux_sector(dev,  nolb),
401				   GFP_KERNEL, 0);
402	if (ret < 0) {
403		pr_err("blkdev_issue_discard() failed: %d\n", ret);
404		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
405	}
406
407	return 0;
408}
409
410static sense_reason_t
411iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
412{
413	struct se_device *dev = cmd->se_dev;
414	struct scatterlist *sg = &cmd->t_data_sg[0];
415	unsigned char *buf, *not_zero;
416	int ret;
417
418	buf = kmap(sg_page(sg)) + sg->offset;
419	if (!buf)
420		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
421	/*
422	 * Fall back to block_execute_write_same() slow-path if
423	 * incoming WRITE_SAME payload does not contain zeros.
424	 */
425	not_zero = memchr_inv(buf, 0x00, cmd->data_length);
426	kunmap(sg_page(sg));
427
428	if (not_zero)
429		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
430
431	ret = blkdev_issue_zeroout(bdev,
432				target_to_linux_sector(dev, cmd->t_task_lba),
433				target_to_linux_sector(dev,
434					sbc_get_write_same_sectors(cmd)),
435				GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
436	if (ret)
437		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
438
439	target_complete_cmd(cmd, GOOD);
440	return 0;
441}
442
443static sense_reason_t
444iblock_execute_write_same(struct se_cmd *cmd)
445{
446	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
447	struct iblock_req *ibr;
448	struct scatterlist *sg;
449	struct bio *bio;
450	struct bio_list list;
451	struct se_device *dev = cmd->se_dev;
452	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
453	sector_t sectors = target_to_linux_sector(dev,
454					sbc_get_write_same_sectors(cmd));
455
456	if (cmd->prot_op) {
457		pr_err("WRITE_SAME: Protection information with IBLOCK"
458		       " backends not supported\n");
459		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
460	}
461	sg = &cmd->t_data_sg[0];
462
463	if (cmd->t_data_nents > 1 ||
464	    sg->length != cmd->se_dev->dev_attrib.block_size) {
465		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
466			" block_size: %u\n", cmd->t_data_nents, sg->length,
467			cmd->se_dev->dev_attrib.block_size);
468		return TCM_INVALID_CDB_FIELD;
469	}
470
471	if (bdev_write_zeroes_sectors(bdev)) {
472		if (!iblock_execute_zero_out(bdev, cmd))
473			return 0;
474	}
475
476	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
477	if (!ibr)
478		goto fail;
479	cmd->priv = ibr;
480
481	bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
482	if (!bio)
483		goto fail_free_ibr;
484
485	bio_list_init(&list);
486	bio_list_add(&list, bio);
487
488	refcount_set(&ibr->pending, 1);
489
490	while (sectors) {
491		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
492				!= sg->length) {
493
494			bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
495					     0);
496			if (!bio)
497				goto fail_put_bios;
498
499			refcount_inc(&ibr->pending);
500			bio_list_add(&list, bio);
501		}
502
503		/* Always in 512 byte units for Linux/Block */
504		block_lba += sg->length >> SECTOR_SHIFT;
505		sectors -= sg->length >> SECTOR_SHIFT;
506	}
507
508	iblock_submit_bios(&list);
509	return 0;
510
511fail_put_bios:
512	while ((bio = bio_list_pop(&list)))
513		bio_put(bio);
514fail_free_ibr:
515	kfree(ibr);
516fail:
517	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
518}
519
520enum {
521	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
522};
523
524static match_table_t tokens = {
525	{Opt_udev_path, "udev_path=%s"},
526	{Opt_readonly, "readonly=%d"},
527	{Opt_force, "force=%d"},
528	{Opt_err, NULL}
529};
530
531static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
532		const char *page, ssize_t count)
533{
534	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
535	char *orig, *ptr, *arg_p, *opts;
536	substring_t args[MAX_OPT_ARGS];
537	int ret = 0, token;
538	unsigned long tmp_readonly;
539
540	opts = kstrdup(page, GFP_KERNEL);
541	if (!opts)
542		return -ENOMEM;
543
544	orig = opts;
545
546	while ((ptr = strsep(&opts, ",\n")) != NULL) {
547		if (!*ptr)
548			continue;
549
550		token = match_token(ptr, tokens, args);
551		switch (token) {
552		case Opt_udev_path:
553			if (ib_dev->ibd_bd) {
554				pr_err("Unable to set udev_path= while"
555					" ib_dev->ibd_bd exists\n");
556				ret = -EEXIST;
557				goto out;
558			}
559			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
560				SE_UDEV_PATH_LEN) == 0) {
561				ret = -EINVAL;
562				break;
563			}
564			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
565					ib_dev->ibd_udev_path);
566			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
567			break;
568		case Opt_readonly:
569			arg_p = match_strdup(&args[0]);
570			if (!arg_p) {
571				ret = -ENOMEM;
572				break;
573			}
574			ret = kstrtoul(arg_p, 0, &tmp_readonly);
575			kfree(arg_p);
576			if (ret < 0) {
577				pr_err("kstrtoul() failed for"
578						" readonly=\n");
579				goto out;
580			}
581			ib_dev->ibd_readonly = tmp_readonly;
582			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
583			break;
584		case Opt_force:
585			break;
586		default:
587			break;
588		}
589	}
590
591out:
592	kfree(orig);
593	return (!ret) ? count : ret;
594}
595
596static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
597{
598	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
599	struct block_device *bd = ib_dev->ibd_bd;
600	char buf[BDEVNAME_SIZE];
601	ssize_t bl = 0;
602
603	if (bd)
604		bl += sprintf(b + bl, "iBlock device: %s",
605				bdevname(bd, buf));
606	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
607		bl += sprintf(b + bl, "  UDEV PATH: %s",
608				ib_dev->ibd_udev_path);
609	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
610
611	bl += sprintf(b + bl, "        ");
612	if (bd) {
613		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
614			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
615			"" : (bd->bd_holder == ib_dev) ?
616			"CLAIMED: IBLOCK" : "CLAIMED: OS");
617	} else {
618		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
619	}
620
621	return bl;
622}
623
624static int
625iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
626		 struct sg_mapping_iter *miter)
627{
628	struct se_device *dev = cmd->se_dev;
629	struct blk_integrity *bi;
630	struct bio_integrity_payload *bip;
631	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
632	int rc;
633	size_t resid, len;
634
635	bi = bdev_get_integrity(ib_dev->ibd_bd);
636	if (!bi) {
637		pr_err("Unable to locate bio_integrity\n");
638		return -ENODEV;
639	}
640
641	bip = bio_integrity_alloc(bio, GFP_NOIO,
642			min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES));
643	if (IS_ERR(bip)) {
644		pr_err("Unable to allocate bio_integrity_payload\n");
645		return PTR_ERR(bip);
646	}
647
648	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
649	/* virtual start sector must be in integrity interval units */
650	bip_set_seed(bip, bio->bi_iter.bi_sector >>
651				  (bi->interval_exp - SECTOR_SHIFT));
652
653	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
654		 (unsigned long long)bip->bip_iter.bi_sector);
655
656	resid = bip->bip_iter.bi_size;
657	while (resid > 0 && sg_miter_next(miter)) {
658
659		len = min_t(size_t, miter->length, resid);
660		rc = bio_integrity_add_page(bio, miter->page, len,
661					    offset_in_page(miter->addr));
662		if (rc != len) {
663			pr_err("bio_integrity_add_page() failed; %d\n", rc);
664			sg_miter_stop(miter);
665			return -ENOMEM;
666		}
667
668		pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
669			  miter->page, len, offset_in_page(miter->addr));
670
671		resid -= len;
672		if (len < miter->length)
673			miter->consumed -= miter->length - len;
674	}
675	sg_miter_stop(miter);
676
677	return 0;
678}
679
680static sense_reason_t
681iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
682		  enum dma_data_direction data_direction)
683{
684	struct se_device *dev = cmd->se_dev;
685	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
686	struct iblock_req *ibr;
687	struct bio *bio;
688	struct bio_list list;
689	struct scatterlist *sg;
690	u32 sg_num = sgl_nents;
 
691	unsigned bio_cnt;
692	int i, rc, op, op_flags = 0;
693	struct sg_mapping_iter prot_miter;
 
694
695	if (data_direction == DMA_TO_DEVICE) {
696		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
697		struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
698		/*
699		 * Force writethrough using REQ_FUA if a volatile write cache
700		 * is not enabled, or if initiator set the Force Unit Access bit.
701		 */
702		op = REQ_OP_WRITE;
 
703		if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
704			if (cmd->se_cmd_flags & SCF_FUA)
705				op_flags = REQ_FUA;
706			else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
707				op_flags = REQ_FUA;
708		}
709	} else {
710		op = REQ_OP_READ;
 
711	}
712
713	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
714	if (!ibr)
715		goto fail;
716	cmd->priv = ibr;
717
718	if (!sgl_nents) {
719		refcount_set(&ibr->pending, 1);
720		iblock_complete_cmd(cmd);
721		return 0;
722	}
723
724	bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
725	if (!bio)
726		goto fail_free_ibr;
727
728	bio_list_init(&list);
729	bio_list_add(&list, bio);
730
731	refcount_set(&ibr->pending, 2);
732	bio_cnt = 1;
733
734	if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
735		sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
736			       op == REQ_OP_READ ? SG_MITER_FROM_SG :
737						   SG_MITER_TO_SG);
738
739	for_each_sg(sgl, sg, sgl_nents, i) {
740		/*
741		 * XXX: if the length the device accepts is shorter than the
742		 *	length of the S/G list entry this will cause and
743		 *	endless loop.  Better hope no driver uses huge pages.
744		 */
745		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
746				!= sg->length) {
747			if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
748				rc = iblock_alloc_bip(cmd, bio, &prot_miter);
749				if (rc)
750					goto fail_put_bios;
751			}
752
753			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
754				iblock_submit_bios(&list);
755				bio_cnt = 0;
756			}
757
758			bio = iblock_get_bio(cmd, block_lba, sg_num, op,
759					     op_flags);
760			if (!bio)
761				goto fail_put_bios;
762
763			refcount_inc(&ibr->pending);
764			bio_list_add(&list, bio);
765			bio_cnt++;
766		}
767
768		/* Always in 512 byte units for Linux/Block */
769		block_lba += sg->length >> SECTOR_SHIFT;
770		sg_num--;
771	}
772
773	if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
774		rc = iblock_alloc_bip(cmd, bio, &prot_miter);
775		if (rc)
776			goto fail_put_bios;
777	}
778
779	iblock_submit_bios(&list);
780	iblock_complete_cmd(cmd);
781	return 0;
782
783fail_put_bios:
784	while ((bio = bio_list_pop(&list)))
785		bio_put(bio);
786fail_free_ibr:
787	kfree(ibr);
788fail:
789	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
790}
791
792static sector_t iblock_get_blocks(struct se_device *dev)
793{
794	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
795	struct block_device *bd = ib_dev->ibd_bd;
796	struct request_queue *q = bdev_get_queue(bd);
797
798	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
799}
800
801static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
802{
803	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
804	struct block_device *bd = ib_dev->ibd_bd;
805	int ret;
806
807	ret = bdev_alignment_offset(bd);
808	if (ret == -1)
809		return 0;
810
811	/* convert offset-bytes to offset-lbas */
812	return ret / bdev_logical_block_size(bd);
813}
814
815static unsigned int iblock_get_lbppbe(struct se_device *dev)
816{
817	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
818	struct block_device *bd = ib_dev->ibd_bd;
819	int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
 
820
821	return ilog2(logs_per_phys);
822}
823
824static unsigned int iblock_get_io_min(struct se_device *dev)
825{
826	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
827	struct block_device *bd = ib_dev->ibd_bd;
828
829	return bdev_io_min(bd);
830}
831
832static unsigned int iblock_get_io_opt(struct se_device *dev)
833{
834	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
835	struct block_device *bd = ib_dev->ibd_bd;
836
837	return bdev_io_opt(bd);
838}
839
840static struct sbc_ops iblock_sbc_ops = {
841	.execute_rw		= iblock_execute_rw,
842	.execute_sync_cache	= iblock_execute_sync_cache,
843	.execute_write_same	= iblock_execute_write_same,
844	.execute_unmap		= iblock_execute_unmap,
845};
846
847static sense_reason_t
848iblock_parse_cdb(struct se_cmd *cmd)
849{
850	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
851}
852
853static bool iblock_get_write_cache(struct se_device *dev)
854{
855	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
856	struct block_device *bd = ib_dev->ibd_bd;
857	struct request_queue *q = bdev_get_queue(bd);
858
859	return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
860}
861
862static const struct target_backend_ops iblock_ops = {
863	.name			= "iblock",
864	.inquiry_prod		= "IBLOCK",
865	.inquiry_rev		= IBLOCK_VERSION,
866	.owner			= THIS_MODULE,
867	.attach_hba		= iblock_attach_hba,
868	.detach_hba		= iblock_detach_hba,
869	.alloc_device		= iblock_alloc_device,
870	.configure_device	= iblock_configure_device,
871	.destroy_device		= iblock_destroy_device,
872	.free_device		= iblock_free_device,
 
 
873	.parse_cdb		= iblock_parse_cdb,
874	.set_configfs_dev_params = iblock_set_configfs_dev_params,
875	.show_configfs_dev_params = iblock_show_configfs_dev_params,
876	.get_device_type	= sbc_get_device_type,
877	.get_blocks		= iblock_get_blocks,
878	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
879	.get_lbppbe		= iblock_get_lbppbe,
880	.get_io_min		= iblock_get_io_min,
881	.get_io_opt		= iblock_get_io_opt,
882	.get_write_cache	= iblock_get_write_cache,
883	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
884};
885
886static int __init iblock_module_init(void)
887{
888	return transport_backend_register(&iblock_ops);
889}
890
891static void __exit iblock_module_exit(void)
892{
893	target_backend_unregister(&iblock_ops);
894}
895
896MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
897MODULE_AUTHOR("nab@Linux-iSCSI.org");
898MODULE_LICENSE("GPL");
899
900module_init(iblock_module_init);
901module_exit(iblock_module_exit);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*******************************************************************************
  3 * Filename:  target_core_iblock.c
  4 *
  5 * This file contains the Storage Engine  <-> Linux BlockIO transport
  6 * specific functions.
  7 *
  8 * (c) Copyright 2003-2013 Datera, Inc.
  9 *
 10 * Nicholas A. Bellinger <nab@kernel.org>
 11 *
 12 ******************************************************************************/
 13
 14#include <linux/string.h>
 15#include <linux/parser.h>
 16#include <linux/timer.h>
 17#include <linux/fs.h>
 18#include <linux/blkdev.h>
 19#include <linux/slab.h>
 20#include <linux/spinlock.h>
 21#include <linux/bio.h>
 22#include <linux/genhd.h>
 23#include <linux/file.h>
 24#include <linux/module.h>
 25#include <scsi/scsi_proto.h>
 26#include <asm/unaligned.h>
 27
 28#include <target/target_core_base.h>
 29#include <target/target_core_backend.h>
 30
 31#include "target_core_iblock.h"
 32
 33#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
 34#define IBLOCK_BIO_POOL_SIZE	128
 35
 36static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
 37{
 38	return container_of(dev, struct iblock_dev, dev);
 39}
 40
 41
 42static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
 43{
 44	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
 45		" Generic Target Core Stack %s\n", hba->hba_id,
 46		IBLOCK_VERSION, TARGET_CORE_VERSION);
 47	return 0;
 48}
 49
 50static void iblock_detach_hba(struct se_hba *hba)
 51{
 52}
 53
 54static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
 55{
 56	struct iblock_dev *ib_dev = NULL;
 57
 58	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
 59	if (!ib_dev) {
 60		pr_err("Unable to allocate struct iblock_dev\n");
 61		return NULL;
 62	}
 63
 64	ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
 65				   GFP_KERNEL);
 66	if (!ib_dev->ibd_plug)
 67		goto free_dev;
 68
 69	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
 70
 71	return &ib_dev->dev;
 72
 73free_dev:
 74	kfree(ib_dev);
 75	return NULL;
 76}
 77
 78static int iblock_configure_device(struct se_device *dev)
 79{
 80	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 81	struct request_queue *q;
 82	struct block_device *bd = NULL;
 83	struct blk_integrity *bi;
 84	fmode_t mode;
 85	unsigned int max_write_zeroes_sectors;
 86	int ret = -ENOMEM;
 87
 88	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
 89		pr_err("Missing udev_path= parameters for IBLOCK\n");
 90		return -EINVAL;
 91	}
 92
 93	ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
 94	if (ret) {
 95		pr_err("IBLOCK: Unable to create bioset\n");
 96		goto out;
 97	}
 98
 99	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
100			ib_dev->ibd_udev_path);
101
102	mode = FMODE_READ|FMODE_EXCL;
103	if (!ib_dev->ibd_readonly)
104		mode |= FMODE_WRITE;
105	else
106		dev->dev_flags |= DF_READ_ONLY;
107
108	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
109	if (IS_ERR(bd)) {
110		ret = PTR_ERR(bd);
111		goto out_free_bioset;
112	}
113	ib_dev->ibd_bd = bd;
114
115	q = bdev_get_queue(bd);
116
117	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
118	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
119	dev->dev_attrib.hw_queue_depth = q->nr_requests;
120
121	if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
122		pr_debug("IBLOCK: BLOCK Discard support available,"
123			 " disabled by default\n");
124
125	/*
126	 * Enable write same emulation for IBLOCK and use 0xFFFF as
127	 * the smaller WRITE_SAME(10) only has a two-byte block count.
128	 */
129	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
130	if (max_write_zeroes_sectors)
131		dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
132	else
133		dev->dev_attrib.max_write_same_len = 0xFFFF;
134
135	if (blk_queue_nonrot(q))
136		dev->dev_attrib.is_nonrot = 1;
137
138	bi = bdev_get_integrity(bd);
139	if (bi) {
140		struct bio_set *bs = &ib_dev->ibd_bio_set;
141
142		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
143		    !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
144			pr_err("IBLOCK export of blk_integrity: %s not"
145			       " supported\n", bi->profile->name);
146			ret = -ENOSYS;
147			goto out_blkdev_put;
148		}
149
150		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
151			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
152		} else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
153			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
154		}
155
156		if (dev->dev_attrib.pi_prot_type) {
157			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
158				pr_err("Unable to allocate bioset for PI\n");
159				ret = -ENOMEM;
160				goto out_blkdev_put;
161			}
162			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
163				 &bs->bio_integrity_pool);
164		}
165		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
166	}
167
168	return 0;
169
170out_blkdev_put:
171	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
172out_free_bioset:
173	bioset_exit(&ib_dev->ibd_bio_set);
174out:
175	return ret;
176}
177
178static void iblock_dev_call_rcu(struct rcu_head *p)
179{
180	struct se_device *dev = container_of(p, struct se_device, rcu_head);
181	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
182
183	kfree(ib_dev->ibd_plug);
184	kfree(ib_dev);
185}
186
187static void iblock_free_device(struct se_device *dev)
188{
189	call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
190}
191
192static void iblock_destroy_device(struct se_device *dev)
193{
194	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
195
196	if (ib_dev->ibd_bd != NULL)
197		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
198	bioset_exit(&ib_dev->ibd_bio_set);
199}
200
201static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
202{
203	struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
204	struct iblock_dev_plug *ib_dev_plug;
205
206	/*
207	 * Each se_device has a per cpu work this can be run from. We
208	 * shouldn't have multiple threads on the same cpu calling this
209	 * at the same time.
210	 */
211	ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
212	if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
213		return NULL;
214
215	blk_start_plug(&ib_dev_plug->blk_plug);
216	return &ib_dev_plug->se_plug;
217}
218
219static void iblock_unplug_device(struct se_dev_plug *se_plug)
220{
221	struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
222					struct iblock_dev_plug, se_plug);
223
224	blk_finish_plug(&ib_dev_plug->blk_plug);
225	clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
226}
227
228static unsigned long long iblock_emulate_read_cap_with_block_size(
229	struct se_device *dev,
230	struct block_device *bd,
231	struct request_queue *q)
232{
233	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
234					bdev_logical_block_size(bd)) - 1);
235	u32 block_size = bdev_logical_block_size(bd);
236
237	if (block_size == dev->dev_attrib.block_size)
238		return blocks_long;
239
240	switch (block_size) {
241	case 4096:
242		switch (dev->dev_attrib.block_size) {
243		case 2048:
244			blocks_long <<= 1;
245			break;
246		case 1024:
247			blocks_long <<= 2;
248			break;
249		case 512:
250			blocks_long <<= 3;
251			break;
252		default:
253			break;
254		}
255		break;
256	case 2048:
257		switch (dev->dev_attrib.block_size) {
258		case 4096:
259			blocks_long >>= 1;
260			break;
261		case 1024:
262			blocks_long <<= 1;
263			break;
264		case 512:
265			blocks_long <<= 2;
266			break;
267		default:
268			break;
269		}
270		break;
271	case 1024:
272		switch (dev->dev_attrib.block_size) {
273		case 4096:
274			blocks_long >>= 2;
275			break;
276		case 2048:
277			blocks_long >>= 1;
278			break;
279		case 512:
280			blocks_long <<= 1;
281			break;
282		default:
283			break;
284		}
285		break;
286	case 512:
287		switch (dev->dev_attrib.block_size) {
288		case 4096:
289			blocks_long >>= 3;
290			break;
291		case 2048:
292			blocks_long >>= 2;
293			break;
294		case 1024:
295			blocks_long >>= 1;
296			break;
297		default:
298			break;
299		}
300		break;
301	default:
302		break;
303	}
304
305	return blocks_long;
306}
307
308static void iblock_complete_cmd(struct se_cmd *cmd)
309{
310	struct iblock_req *ibr = cmd->priv;
311	u8 status;
312
313	if (!refcount_dec_and_test(&ibr->pending))
314		return;
315
316	if (atomic_read(&ibr->ib_bio_err_cnt))
317		status = SAM_STAT_CHECK_CONDITION;
318	else
319		status = SAM_STAT_GOOD;
320
321	target_complete_cmd(cmd, status);
322	kfree(ibr);
323}
324
325static void iblock_bio_done(struct bio *bio)
326{
327	struct se_cmd *cmd = bio->bi_private;
328	struct iblock_req *ibr = cmd->priv;
329
330	if (bio->bi_status) {
331		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
332		/*
333		 * Bump the ib_bio_err_cnt and release bio.
334		 */
335		atomic_inc(&ibr->ib_bio_err_cnt);
336		smp_mb__after_atomic();
337	}
338
339	bio_put(bio);
340
341	iblock_complete_cmd(cmd);
342}
343
344static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
345				  unsigned int opf)
 
346{
347	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
348	struct bio *bio;
349
350	/*
351	 * Only allocate as many vector entries as the bio code allows us to,
352	 * we'll loop later on until we have handled the whole request.
353	 */
354	bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num),
355				&ib_dev->ibd_bio_set);
 
 
356	if (!bio) {
357		pr_err("Unable to allocate memory for bio\n");
358		return NULL;
359	}
360
361	bio_set_dev(bio, ib_dev->ibd_bd);
362	bio->bi_private = cmd;
363	bio->bi_end_io = &iblock_bio_done;
364	bio->bi_iter.bi_sector = lba;
365	bio->bi_opf = opf;
366
367	return bio;
368}
369
370static void iblock_submit_bios(struct bio_list *list)
371{
372	struct blk_plug plug;
373	struct bio *bio;
374	/*
375	 * The block layer handles nested plugs, so just plug/unplug to handle
376	 * fabric drivers that didn't support batching and multi bio cmds.
377	 */
378	blk_start_plug(&plug);
379	while ((bio = bio_list_pop(list)))
380		submit_bio(bio);
381	blk_finish_plug(&plug);
382}
383
384static void iblock_end_io_flush(struct bio *bio)
385{
386	struct se_cmd *cmd = bio->bi_private;
387
388	if (bio->bi_status)
389		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
390
391	if (cmd) {
392		if (bio->bi_status)
393			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
394		else
395			target_complete_cmd(cmd, SAM_STAT_GOOD);
396	}
397
398	bio_put(bio);
399}
400
401/*
402 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
403 * always flush the whole cache.
404 */
405static sense_reason_t
406iblock_execute_sync_cache(struct se_cmd *cmd)
407{
408	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
409	int immed = (cmd->t_task_cdb[1] & 0x2);
410	struct bio *bio;
411
412	/*
413	 * If the Immediate bit is set, queue up the GOOD response
414	 * for this SYNCHRONIZE_CACHE op.
415	 */
416	if (immed)
417		target_complete_cmd(cmd, SAM_STAT_GOOD);
418
419	bio = bio_alloc(GFP_KERNEL, 0);
420	bio->bi_end_io = iblock_end_io_flush;
421	bio_set_dev(bio, ib_dev->ibd_bd);
422	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
423	if (!immed)
424		bio->bi_private = cmd;
425	submit_bio(bio);
426	return 0;
427}
428
429static sense_reason_t
430iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
431{
432	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
433	struct se_device *dev = cmd->se_dev;
434	int ret;
435
436	ret = blkdev_issue_discard(bdev,
437				   target_to_linux_sector(dev, lba),
438				   target_to_linux_sector(dev,  nolb),
439				   GFP_KERNEL, 0);
440	if (ret < 0) {
441		pr_err("blkdev_issue_discard() failed: %d\n", ret);
442		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
443	}
444
445	return 0;
446}
447
448static sense_reason_t
449iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
450{
451	struct se_device *dev = cmd->se_dev;
452	struct scatterlist *sg = &cmd->t_data_sg[0];
453	unsigned char *buf, *not_zero;
454	int ret;
455
456	buf = kmap(sg_page(sg)) + sg->offset;
457	if (!buf)
458		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
459	/*
460	 * Fall back to block_execute_write_same() slow-path if
461	 * incoming WRITE_SAME payload does not contain zeros.
462	 */
463	not_zero = memchr_inv(buf, 0x00, cmd->data_length);
464	kunmap(sg_page(sg));
465
466	if (not_zero)
467		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
468
469	ret = blkdev_issue_zeroout(bdev,
470				target_to_linux_sector(dev, cmd->t_task_lba),
471				target_to_linux_sector(dev,
472					sbc_get_write_same_sectors(cmd)),
473				GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
474	if (ret)
475		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
476
477	target_complete_cmd(cmd, SAM_STAT_GOOD);
478	return 0;
479}
480
481static sense_reason_t
482iblock_execute_write_same(struct se_cmd *cmd)
483{
484	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
485	struct iblock_req *ibr;
486	struct scatterlist *sg;
487	struct bio *bio;
488	struct bio_list list;
489	struct se_device *dev = cmd->se_dev;
490	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
491	sector_t sectors = target_to_linux_sector(dev,
492					sbc_get_write_same_sectors(cmd));
493
494	if (cmd->prot_op) {
495		pr_err("WRITE_SAME: Protection information with IBLOCK"
496		       " backends not supported\n");
497		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
498	}
499	sg = &cmd->t_data_sg[0];
500
501	if (cmd->t_data_nents > 1 ||
502	    sg->length != cmd->se_dev->dev_attrib.block_size) {
503		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
504			" block_size: %u\n", cmd->t_data_nents, sg->length,
505			cmd->se_dev->dev_attrib.block_size);
506		return TCM_INVALID_CDB_FIELD;
507	}
508
509	if (bdev_write_zeroes_sectors(bdev)) {
510		if (!iblock_execute_zero_out(bdev, cmd))
511			return 0;
512	}
513
514	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
515	if (!ibr)
516		goto fail;
517	cmd->priv = ibr;
518
519	bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
520	if (!bio)
521		goto fail_free_ibr;
522
523	bio_list_init(&list);
524	bio_list_add(&list, bio);
525
526	refcount_set(&ibr->pending, 1);
527
528	while (sectors) {
529		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
530				!= sg->length) {
531
532			bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
 
533			if (!bio)
534				goto fail_put_bios;
535
536			refcount_inc(&ibr->pending);
537			bio_list_add(&list, bio);
538		}
539
540		/* Always in 512 byte units for Linux/Block */
541		block_lba += sg->length >> SECTOR_SHIFT;
542		sectors -= sg->length >> SECTOR_SHIFT;
543	}
544
545	iblock_submit_bios(&list);
546	return 0;
547
548fail_put_bios:
549	while ((bio = bio_list_pop(&list)))
550		bio_put(bio);
551fail_free_ibr:
552	kfree(ibr);
553fail:
554	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
555}
556
557enum {
558	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
559};
560
561static match_table_t tokens = {
562	{Opt_udev_path, "udev_path=%s"},
563	{Opt_readonly, "readonly=%d"},
564	{Opt_force, "force=%d"},
565	{Opt_err, NULL}
566};
567
568static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
569		const char *page, ssize_t count)
570{
571	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
572	char *orig, *ptr, *arg_p, *opts;
573	substring_t args[MAX_OPT_ARGS];
574	int ret = 0, token;
575	unsigned long tmp_readonly;
576
577	opts = kstrdup(page, GFP_KERNEL);
578	if (!opts)
579		return -ENOMEM;
580
581	orig = opts;
582
583	while ((ptr = strsep(&opts, ",\n")) != NULL) {
584		if (!*ptr)
585			continue;
586
587		token = match_token(ptr, tokens, args);
588		switch (token) {
589		case Opt_udev_path:
590			if (ib_dev->ibd_bd) {
591				pr_err("Unable to set udev_path= while"
592					" ib_dev->ibd_bd exists\n");
593				ret = -EEXIST;
594				goto out;
595			}
596			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
597				SE_UDEV_PATH_LEN) == 0) {
598				ret = -EINVAL;
599				break;
600			}
601			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
602					ib_dev->ibd_udev_path);
603			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
604			break;
605		case Opt_readonly:
606			arg_p = match_strdup(&args[0]);
607			if (!arg_p) {
608				ret = -ENOMEM;
609				break;
610			}
611			ret = kstrtoul(arg_p, 0, &tmp_readonly);
612			kfree(arg_p);
613			if (ret < 0) {
614				pr_err("kstrtoul() failed for"
615						" readonly=\n");
616				goto out;
617			}
618			ib_dev->ibd_readonly = tmp_readonly;
619			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
620			break;
621		case Opt_force:
622			break;
623		default:
624			break;
625		}
626	}
627
628out:
629	kfree(orig);
630	return (!ret) ? count : ret;
631}
632
633static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
634{
635	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
636	struct block_device *bd = ib_dev->ibd_bd;
637	char buf[BDEVNAME_SIZE];
638	ssize_t bl = 0;
639
640	if (bd)
641		bl += sprintf(b + bl, "iBlock device: %s",
642				bdevname(bd, buf));
643	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
644		bl += sprintf(b + bl, "  UDEV PATH: %s",
645				ib_dev->ibd_udev_path);
646	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
647
648	bl += sprintf(b + bl, "        ");
649	if (bd) {
650		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
651			MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
652			"CLAIMED: IBLOCK");
 
653	} else {
654		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
655	}
656
657	return bl;
658}
659
660static int
661iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
662		 struct sg_mapping_iter *miter)
663{
664	struct se_device *dev = cmd->se_dev;
665	struct blk_integrity *bi;
666	struct bio_integrity_payload *bip;
667	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
668	int rc;
669	size_t resid, len;
670
671	bi = bdev_get_integrity(ib_dev->ibd_bd);
672	if (!bi) {
673		pr_err("Unable to locate bio_integrity\n");
674		return -ENODEV;
675	}
676
677	bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
 
678	if (IS_ERR(bip)) {
679		pr_err("Unable to allocate bio_integrity_payload\n");
680		return PTR_ERR(bip);
681	}
682
683	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
684	/* virtual start sector must be in integrity interval units */
685	bip_set_seed(bip, bio->bi_iter.bi_sector >>
686				  (bi->interval_exp - SECTOR_SHIFT));
687
688	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
689		 (unsigned long long)bip->bip_iter.bi_sector);
690
691	resid = bip->bip_iter.bi_size;
692	while (resid > 0 && sg_miter_next(miter)) {
693
694		len = min_t(size_t, miter->length, resid);
695		rc = bio_integrity_add_page(bio, miter->page, len,
696					    offset_in_page(miter->addr));
697		if (rc != len) {
698			pr_err("bio_integrity_add_page() failed; %d\n", rc);
699			sg_miter_stop(miter);
700			return -ENOMEM;
701		}
702
703		pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
704			  miter->page, len, offset_in_page(miter->addr));
705
706		resid -= len;
707		if (len < miter->length)
708			miter->consumed -= miter->length - len;
709	}
710	sg_miter_stop(miter);
711
712	return 0;
713}
714
715static sense_reason_t
716iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
717		  enum dma_data_direction data_direction)
718{
719	struct se_device *dev = cmd->se_dev;
720	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
721	struct iblock_req *ibr;
722	struct bio *bio;
723	struct bio_list list;
724	struct scatterlist *sg;
725	u32 sg_num = sgl_nents;
726	unsigned int opf;
727	unsigned bio_cnt;
728	int i, rc;
729	struct sg_mapping_iter prot_miter;
730	unsigned int miter_dir;
731
732	if (data_direction == DMA_TO_DEVICE) {
733		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
734		struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
735		/*
736		 * Force writethrough using REQ_FUA if a volatile write cache
737		 * is not enabled, or if initiator set the Force Unit Access bit.
738		 */
739		opf = REQ_OP_WRITE;
740		miter_dir = SG_MITER_TO_SG;
741		if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
742			if (cmd->se_cmd_flags & SCF_FUA)
743				opf |= REQ_FUA;
744			else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
745				opf |= REQ_FUA;
746		}
747	} else {
748		opf = REQ_OP_READ;
749		miter_dir = SG_MITER_FROM_SG;
750	}
751
752	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
753	if (!ibr)
754		goto fail;
755	cmd->priv = ibr;
756
757	if (!sgl_nents) {
758		refcount_set(&ibr->pending, 1);
759		iblock_complete_cmd(cmd);
760		return 0;
761	}
762
763	bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
764	if (!bio)
765		goto fail_free_ibr;
766
767	bio_list_init(&list);
768	bio_list_add(&list, bio);
769
770	refcount_set(&ibr->pending, 2);
771	bio_cnt = 1;
772
773	if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
774		sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
775			       miter_dir);
 
776
777	for_each_sg(sgl, sg, sgl_nents, i) {
778		/*
779		 * XXX: if the length the device accepts is shorter than the
780		 *	length of the S/G list entry this will cause and
781		 *	endless loop.  Better hope no driver uses huge pages.
782		 */
783		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
784				!= sg->length) {
785			if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
786				rc = iblock_alloc_bip(cmd, bio, &prot_miter);
787				if (rc)
788					goto fail_put_bios;
789			}
790
791			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
792				iblock_submit_bios(&list);
793				bio_cnt = 0;
794			}
795
796			bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
 
797			if (!bio)
798				goto fail_put_bios;
799
800			refcount_inc(&ibr->pending);
801			bio_list_add(&list, bio);
802			bio_cnt++;
803		}
804
805		/* Always in 512 byte units for Linux/Block */
806		block_lba += sg->length >> SECTOR_SHIFT;
807		sg_num--;
808	}
809
810	if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
811		rc = iblock_alloc_bip(cmd, bio, &prot_miter);
812		if (rc)
813			goto fail_put_bios;
814	}
815
816	iblock_submit_bios(&list);
817	iblock_complete_cmd(cmd);
818	return 0;
819
820fail_put_bios:
821	while ((bio = bio_list_pop(&list)))
822		bio_put(bio);
823fail_free_ibr:
824	kfree(ibr);
825fail:
826	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
827}
828
829static sector_t iblock_get_blocks(struct se_device *dev)
830{
831	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
832	struct block_device *bd = ib_dev->ibd_bd;
833	struct request_queue *q = bdev_get_queue(bd);
834
835	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
836}
837
838static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
839{
840	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
841	struct block_device *bd = ib_dev->ibd_bd;
842	int ret;
843
844	ret = bdev_alignment_offset(bd);
845	if (ret == -1)
846		return 0;
847
848	/* convert offset-bytes to offset-lbas */
849	return ret / bdev_logical_block_size(bd);
850}
851
852static unsigned int iblock_get_lbppbe(struct se_device *dev)
853{
854	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
855	struct block_device *bd = ib_dev->ibd_bd;
856	unsigned int logs_per_phys =
857		bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
858
859	return ilog2(logs_per_phys);
860}
861
862static unsigned int iblock_get_io_min(struct se_device *dev)
863{
864	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
865	struct block_device *bd = ib_dev->ibd_bd;
866
867	return bdev_io_min(bd);
868}
869
870static unsigned int iblock_get_io_opt(struct se_device *dev)
871{
872	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
873	struct block_device *bd = ib_dev->ibd_bd;
874
875	return bdev_io_opt(bd);
876}
877
878static struct sbc_ops iblock_sbc_ops = {
879	.execute_rw		= iblock_execute_rw,
880	.execute_sync_cache	= iblock_execute_sync_cache,
881	.execute_write_same	= iblock_execute_write_same,
882	.execute_unmap		= iblock_execute_unmap,
883};
884
885static sense_reason_t
886iblock_parse_cdb(struct se_cmd *cmd)
887{
888	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
889}
890
891static bool iblock_get_write_cache(struct se_device *dev)
892{
893	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
894	struct block_device *bd = ib_dev->ibd_bd;
895	struct request_queue *q = bdev_get_queue(bd);
896
897	return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
898}
899
900static const struct target_backend_ops iblock_ops = {
901	.name			= "iblock",
902	.inquiry_prod		= "IBLOCK",
903	.inquiry_rev		= IBLOCK_VERSION,
904	.owner			= THIS_MODULE,
905	.attach_hba		= iblock_attach_hba,
906	.detach_hba		= iblock_detach_hba,
907	.alloc_device		= iblock_alloc_device,
908	.configure_device	= iblock_configure_device,
909	.destroy_device		= iblock_destroy_device,
910	.free_device		= iblock_free_device,
911	.plug_device		= iblock_plug_device,
912	.unplug_device		= iblock_unplug_device,
913	.parse_cdb		= iblock_parse_cdb,
914	.set_configfs_dev_params = iblock_set_configfs_dev_params,
915	.show_configfs_dev_params = iblock_show_configfs_dev_params,
916	.get_device_type	= sbc_get_device_type,
917	.get_blocks		= iblock_get_blocks,
918	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
919	.get_lbppbe		= iblock_get_lbppbe,
920	.get_io_min		= iblock_get_io_min,
921	.get_io_opt		= iblock_get_io_opt,
922	.get_write_cache	= iblock_get_write_cache,
923	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
924};
925
926static int __init iblock_module_init(void)
927{
928	return transport_backend_register(&iblock_ops);
929}
930
931static void __exit iblock_module_exit(void)
932{
933	target_backend_unregister(&iblock_ops);
934}
935
936MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
937MODULE_AUTHOR("nab@Linux-iSCSI.org");
938MODULE_LICENSE("GPL");
939
940module_init(iblock_module_init);
941module_exit(iblock_module_exit);