Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*******************************************************************************
  3 * Filename:  target_core_iblock.c
  4 *
  5 * This file contains the Storage Engine  <-> Linux BlockIO transport
  6 * specific functions.
  7 *
  8 * (c) Copyright 2003-2013 Datera, Inc.
  9 *
 10 * Nicholas A. Bellinger <nab@kernel.org>
 11 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 12 ******************************************************************************/
 13
 14#include <linux/string.h>
 15#include <linux/parser.h>
 16#include <linux/timer.h>
 17#include <linux/fs.h>
 18#include <linux/blkdev.h>
 19#include <linux/slab.h>
 20#include <linux/spinlock.h>
 21#include <linux/bio.h>
 22#include <linux/genhd.h>
 23#include <linux/file.h>
 24#include <linux/module.h>
 25#include <scsi/scsi_proto.h>
 26#include <asm/unaligned.h>
 27
 28#include <target/target_core_base.h>
 29#include <target/target_core_backend.h>
 30
 31#include "target_core_iblock.h"
 32
 33#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
 34#define IBLOCK_BIO_POOL_SIZE	128
 35
 36static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
 37{
 38	return container_of(dev, struct iblock_dev, dev);
 39}
 40
 41
 42static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
 43{
 44	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
 45		" Generic Target Core Stack %s\n", hba->hba_id,
 46		IBLOCK_VERSION, TARGET_CORE_VERSION);
 47	return 0;
 48}
 49
 50static void iblock_detach_hba(struct se_hba *hba)
 51{
 52}
 53
 54static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
 55{
 56	struct iblock_dev *ib_dev = NULL;
 57
 58	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
 59	if (!ib_dev) {
 60		pr_err("Unable to allocate struct iblock_dev\n");
 61		return NULL;
 62	}
 63
 64	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
 65
 66	return &ib_dev->dev;
 67}
 68
 69static int iblock_configure_device(struct se_device *dev)
 70{
 71	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 72	struct request_queue *q;
 73	struct block_device *bd = NULL;
 74	struct blk_integrity *bi;
 75	fmode_t mode;
 76	unsigned int max_write_zeroes_sectors;
 77	int ret = -ENOMEM;
 78
 79	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
 80		pr_err("Missing udev_path= parameters for IBLOCK\n");
 81		return -EINVAL;
 82	}
 83
 84	ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
 85	if (ret) {
 86		pr_err("IBLOCK: Unable to create bioset\n");
 87		goto out;
 88	}
 89
 90	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
 91			ib_dev->ibd_udev_path);
 92
 93	mode = FMODE_READ|FMODE_EXCL;
 94	if (!ib_dev->ibd_readonly)
 95		mode |= FMODE_WRITE;
 96	else
 97		dev->dev_flags |= DF_READ_ONLY;
 98
 99	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
100	if (IS_ERR(bd)) {
101		ret = PTR_ERR(bd);
102		goto out_free_bioset;
103	}
104	ib_dev->ibd_bd = bd;
105
106	q = bdev_get_queue(bd);
107
108	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
109	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
110	dev->dev_attrib.hw_queue_depth = q->nr_requests;
111
112	if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
 
113		pr_debug("IBLOCK: BLOCK Discard support available,"
114			 " disabled by default\n");
115
116	/*
117	 * Enable write same emulation for IBLOCK and use 0xFFFF as
118	 * the smaller WRITE_SAME(10) only has a two-byte block count.
119	 */
120	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
121	if (max_write_zeroes_sectors)
122		dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
123	else
124		dev->dev_attrib.max_write_same_len = 0xFFFF;
125
126	if (blk_queue_nonrot(q))
127		dev->dev_attrib.is_nonrot = 1;
128
129	bi = bdev_get_integrity(bd);
130	if (bi) {
131		struct bio_set *bs = &ib_dev->ibd_bio_set;
132
133		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
134		    !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
135			pr_err("IBLOCK export of blk_integrity: %s not"
136			       " supported\n", bi->profile->name);
137			ret = -ENOSYS;
138			goto out_blkdev_put;
139		}
140
141		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
142			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
143		} else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
144			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
145		}
146
147		if (dev->dev_attrib.pi_prot_type) {
148			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
149				pr_err("Unable to allocate bioset for PI\n");
150				ret = -ENOMEM;
151				goto out_blkdev_put;
152			}
153			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
154				 &bs->bio_integrity_pool);
155		}
156		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
157	}
158
159	return 0;
160
161out_blkdev_put:
162	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
163out_free_bioset:
164	bioset_exit(&ib_dev->ibd_bio_set);
 
165out:
166	return ret;
167}
168
169static void iblock_dev_call_rcu(struct rcu_head *p)
170{
171	struct se_device *dev = container_of(p, struct se_device, rcu_head);
172	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
173
174	kfree(ib_dev);
175}
176
177static void iblock_free_device(struct se_device *dev)
178{
179	call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
180}
181
182static void iblock_destroy_device(struct se_device *dev)
183{
184	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
185
186	if (ib_dev->ibd_bd != NULL)
187		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
188	bioset_exit(&ib_dev->ibd_bio_set);
 
 
 
189}
190
191static unsigned long long iblock_emulate_read_cap_with_block_size(
192	struct se_device *dev,
193	struct block_device *bd,
194	struct request_queue *q)
195{
196	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
197					bdev_logical_block_size(bd)) - 1);
198	u32 block_size = bdev_logical_block_size(bd);
199
200	if (block_size == dev->dev_attrib.block_size)
201		return blocks_long;
202
203	switch (block_size) {
204	case 4096:
205		switch (dev->dev_attrib.block_size) {
206		case 2048:
207			blocks_long <<= 1;
208			break;
209		case 1024:
210			blocks_long <<= 2;
211			break;
212		case 512:
213			blocks_long <<= 3;
214		default:
215			break;
216		}
217		break;
218	case 2048:
219		switch (dev->dev_attrib.block_size) {
220		case 4096:
221			blocks_long >>= 1;
222			break;
223		case 1024:
224			blocks_long <<= 1;
225			break;
226		case 512:
227			blocks_long <<= 2;
228			break;
229		default:
230			break;
231		}
232		break;
233	case 1024:
234		switch (dev->dev_attrib.block_size) {
235		case 4096:
236			blocks_long >>= 2;
237			break;
238		case 2048:
239			blocks_long >>= 1;
240			break;
241		case 512:
242			blocks_long <<= 1;
243			break;
244		default:
245			break;
246		}
247		break;
248	case 512:
249		switch (dev->dev_attrib.block_size) {
250		case 4096:
251			blocks_long >>= 3;
252			break;
253		case 2048:
254			blocks_long >>= 2;
255			break;
256		case 1024:
257			blocks_long >>= 1;
258			break;
259		default:
260			break;
261		}
262		break;
263	default:
264		break;
265	}
266
267	return blocks_long;
268}
269
270static void iblock_complete_cmd(struct se_cmd *cmd)
271{
272	struct iblock_req *ibr = cmd->priv;
273	u8 status;
274
275	if (!refcount_dec_and_test(&ibr->pending))
276		return;
277
278	if (atomic_read(&ibr->ib_bio_err_cnt))
279		status = SAM_STAT_CHECK_CONDITION;
280	else
281		status = SAM_STAT_GOOD;
282
283	target_complete_cmd(cmd, status);
284	kfree(ibr);
285}
286
287static void iblock_bio_done(struct bio *bio)
288{
289	struct se_cmd *cmd = bio->bi_private;
290	struct iblock_req *ibr = cmd->priv;
291
292	if (bio->bi_status) {
293		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
294		/*
295		 * Bump the ib_bio_err_cnt and release bio.
296		 */
297		atomic_inc(&ibr->ib_bio_err_cnt);
298		smp_mb__after_atomic();
299	}
300
301	bio_put(bio);
302
303	iblock_complete_cmd(cmd);
304}
305
306static struct bio *
307iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
308	       int op_flags)
309{
310	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
311	struct bio *bio;
312
313	/*
314	 * Only allocate as many vector entries as the bio code allows us to,
315	 * we'll loop later on until we have handled the whole request.
316	 */
317	if (sg_num > BIO_MAX_PAGES)
318		sg_num = BIO_MAX_PAGES;
319
320	bio = bio_alloc_bioset(GFP_NOIO, sg_num, &ib_dev->ibd_bio_set);
321	if (!bio) {
322		pr_err("Unable to allocate memory for bio\n");
323		return NULL;
324	}
325
326	bio_set_dev(bio, ib_dev->ibd_bd);
327	bio->bi_private = cmd;
328	bio->bi_end_io = &iblock_bio_done;
329	bio->bi_iter.bi_sector = lba;
330	bio_set_op_attrs(bio, op, op_flags);
331
332	return bio;
333}
334
335static void iblock_submit_bios(struct bio_list *list)
336{
337	struct blk_plug plug;
338	struct bio *bio;
339
340	blk_start_plug(&plug);
341	while ((bio = bio_list_pop(list)))
342		submit_bio(bio);
343	blk_finish_plug(&plug);
344}
345
346static void iblock_end_io_flush(struct bio *bio)
347{
348	struct se_cmd *cmd = bio->bi_private;
349
350	if (bio->bi_status)
351		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
352
353	if (cmd) {
354		if (bio->bi_status)
355			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
356		else
357			target_complete_cmd(cmd, SAM_STAT_GOOD);
358	}
359
360	bio_put(bio);
361}
362
363/*
364 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
365 * always flush the whole cache.
366 */
367static sense_reason_t
368iblock_execute_sync_cache(struct se_cmd *cmd)
369{
370	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
371	int immed = (cmd->t_task_cdb[1] & 0x2);
372	struct bio *bio;
373
374	/*
375	 * If the Immediate bit is set, queue up the GOOD response
376	 * for this SYNCHRONIZE_CACHE op.
377	 */
378	if (immed)
379		target_complete_cmd(cmd, SAM_STAT_GOOD);
380
381	bio = bio_alloc(GFP_KERNEL, 0);
382	bio->bi_end_io = iblock_end_io_flush;
383	bio_set_dev(bio, ib_dev->ibd_bd);
384	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
385	if (!immed)
386		bio->bi_private = cmd;
387	submit_bio(bio);
388	return 0;
389}
390
391static sense_reason_t
392iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
393{
394	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
395	struct se_device *dev = cmd->se_dev;
396	int ret;
397
398	ret = blkdev_issue_discard(bdev,
399				   target_to_linux_sector(dev, lba),
400				   target_to_linux_sector(dev,  nolb),
401				   GFP_KERNEL, 0);
402	if (ret < 0) {
403		pr_err("blkdev_issue_discard() failed: %d\n", ret);
404		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
405	}
406
407	return 0;
408}
409
410static sense_reason_t
411iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
412{
413	struct se_device *dev = cmd->se_dev;
414	struct scatterlist *sg = &cmd->t_data_sg[0];
415	unsigned char *buf, *not_zero;
416	int ret;
417
418	buf = kmap(sg_page(sg)) + sg->offset;
419	if (!buf)
420		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
421	/*
422	 * Fall back to block_execute_write_same() slow-path if
423	 * incoming WRITE_SAME payload does not contain zeros.
424	 */
425	not_zero = memchr_inv(buf, 0x00, cmd->data_length);
426	kunmap(sg_page(sg));
427
428	if (not_zero)
429		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
430
431	ret = blkdev_issue_zeroout(bdev,
432				target_to_linux_sector(dev, cmd->t_task_lba),
433				target_to_linux_sector(dev,
434					sbc_get_write_same_sectors(cmd)),
435				GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
 
 
436	if (ret)
437		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
438
439	target_complete_cmd(cmd, GOOD);
440	return 0;
441}
442
443static sense_reason_t
444iblock_execute_write_same(struct se_cmd *cmd)
445{
446	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
447	struct iblock_req *ibr;
448	struct scatterlist *sg;
449	struct bio *bio;
450	struct bio_list list;
451	struct se_device *dev = cmd->se_dev;
452	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
453	sector_t sectors = target_to_linux_sector(dev,
454					sbc_get_write_same_sectors(cmd));
455
456	if (cmd->prot_op) {
457		pr_err("WRITE_SAME: Protection information with IBLOCK"
458		       " backends not supported\n");
459		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
460	}
461	sg = &cmd->t_data_sg[0];
462
463	if (cmd->t_data_nents > 1 ||
464	    sg->length != cmd->se_dev->dev_attrib.block_size) {
465		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
466			" block_size: %u\n", cmd->t_data_nents, sg->length,
467			cmd->se_dev->dev_attrib.block_size);
468		return TCM_INVALID_CDB_FIELD;
469	}
470
471	if (bdev_write_zeroes_sectors(bdev)) {
472		if (!iblock_execute_zero_out(bdev, cmd))
473			return 0;
474	}
475
476	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
477	if (!ibr)
478		goto fail;
479	cmd->priv = ibr;
480
481	bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
482	if (!bio)
483		goto fail_free_ibr;
484
485	bio_list_init(&list);
486	bio_list_add(&list, bio);
487
488	refcount_set(&ibr->pending, 1);
489
490	while (sectors) {
491		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
492				!= sg->length) {
493
494			bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
495					     0);
496			if (!bio)
497				goto fail_put_bios;
498
499			refcount_inc(&ibr->pending);
500			bio_list_add(&list, bio);
501		}
502
503		/* Always in 512 byte units for Linux/Block */
504		block_lba += sg->length >> SECTOR_SHIFT;
505		sectors -= sg->length >> SECTOR_SHIFT;
506	}
507
508	iblock_submit_bios(&list);
509	return 0;
510
511fail_put_bios:
512	while ((bio = bio_list_pop(&list)))
513		bio_put(bio);
514fail_free_ibr:
515	kfree(ibr);
516fail:
517	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
518}
519
520enum {
521	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
522};
523
524static match_table_t tokens = {
525	{Opt_udev_path, "udev_path=%s"},
526	{Opt_readonly, "readonly=%d"},
527	{Opt_force, "force=%d"},
528	{Opt_err, NULL}
529};
530
531static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
532		const char *page, ssize_t count)
533{
534	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
535	char *orig, *ptr, *arg_p, *opts;
536	substring_t args[MAX_OPT_ARGS];
537	int ret = 0, token;
538	unsigned long tmp_readonly;
539
540	opts = kstrdup(page, GFP_KERNEL);
541	if (!opts)
542		return -ENOMEM;
543
544	orig = opts;
545
546	while ((ptr = strsep(&opts, ",\n")) != NULL) {
547		if (!*ptr)
548			continue;
549
550		token = match_token(ptr, tokens, args);
551		switch (token) {
552		case Opt_udev_path:
553			if (ib_dev->ibd_bd) {
554				pr_err("Unable to set udev_path= while"
555					" ib_dev->ibd_bd exists\n");
556				ret = -EEXIST;
557				goto out;
558			}
559			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
560				SE_UDEV_PATH_LEN) == 0) {
561				ret = -EINVAL;
562				break;
563			}
564			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
565					ib_dev->ibd_udev_path);
566			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
567			break;
568		case Opt_readonly:
569			arg_p = match_strdup(&args[0]);
570			if (!arg_p) {
571				ret = -ENOMEM;
572				break;
573			}
574			ret = kstrtoul(arg_p, 0, &tmp_readonly);
575			kfree(arg_p);
576			if (ret < 0) {
577				pr_err("kstrtoul() failed for"
578						" readonly=\n");
579				goto out;
580			}
581			ib_dev->ibd_readonly = tmp_readonly;
582			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
583			break;
584		case Opt_force:
585			break;
586		default:
587			break;
588		}
589	}
590
591out:
592	kfree(orig);
593	return (!ret) ? count : ret;
594}
595
596static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
597{
598	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
599	struct block_device *bd = ib_dev->ibd_bd;
600	char buf[BDEVNAME_SIZE];
601	ssize_t bl = 0;
602
603	if (bd)
604		bl += sprintf(b + bl, "iBlock device: %s",
605				bdevname(bd, buf));
606	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
607		bl += sprintf(b + bl, "  UDEV PATH: %s",
608				ib_dev->ibd_udev_path);
609	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
610
611	bl += sprintf(b + bl, "        ");
612	if (bd) {
613		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
614			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
615			"" : (bd->bd_holder == ib_dev) ?
616			"CLAIMED: IBLOCK" : "CLAIMED: OS");
617	} else {
618		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
619	}
620
621	return bl;
622}
623
624static int
625iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
626		 struct sg_mapping_iter *miter)
627{
628	struct se_device *dev = cmd->se_dev;
629	struct blk_integrity *bi;
630	struct bio_integrity_payload *bip;
631	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
632	int rc;
633	size_t resid, len;
634
635	bi = bdev_get_integrity(ib_dev->ibd_bd);
636	if (!bi) {
637		pr_err("Unable to locate bio_integrity\n");
638		return -ENODEV;
639	}
640
641	bip = bio_integrity_alloc(bio, GFP_NOIO,
642			min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES));
643	if (IS_ERR(bip)) {
644		pr_err("Unable to allocate bio_integrity_payload\n");
645		return PTR_ERR(bip);
646	}
647
648	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
649	/* virtual start sector must be in integrity interval units */
650	bip_set_seed(bip, bio->bi_iter.bi_sector >>
651				  (bi->interval_exp - SECTOR_SHIFT));
652
653	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
654		 (unsigned long long)bip->bip_iter.bi_sector);
655
656	resid = bip->bip_iter.bi_size;
657	while (resid > 0 && sg_miter_next(miter)) {
658
659		len = min_t(size_t, miter->length, resid);
660		rc = bio_integrity_add_page(bio, miter->page, len,
661					    offset_in_page(miter->addr));
662		if (rc != len) {
663			pr_err("bio_integrity_add_page() failed; %d\n", rc);
664			sg_miter_stop(miter);
665			return -ENOMEM;
666		}
667
668		pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
669			  miter->page, len, offset_in_page(miter->addr));
670
671		resid -= len;
672		if (len < miter->length)
673			miter->consumed -= miter->length - len;
674	}
675	sg_miter_stop(miter);
676
677	return 0;
678}
679
680static sense_reason_t
681iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
682		  enum dma_data_direction data_direction)
683{
684	struct se_device *dev = cmd->se_dev;
685	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
686	struct iblock_req *ibr;
687	struct bio *bio;
688	struct bio_list list;
689	struct scatterlist *sg;
690	u32 sg_num = sgl_nents;
691	unsigned bio_cnt;
692	int i, rc, op, op_flags = 0;
693	struct sg_mapping_iter prot_miter;
694
695	if (data_direction == DMA_TO_DEVICE) {
696		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
697		struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
698		/*
699		 * Force writethrough using REQ_FUA if a volatile write cache
700		 * is not enabled, or if initiator set the Force Unit Access bit.
701		 */
702		op = REQ_OP_WRITE;
703		if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
704			if (cmd->se_cmd_flags & SCF_FUA)
705				op_flags = REQ_FUA;
706			else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
707				op_flags = REQ_FUA;
 
 
 
 
708		}
709	} else {
710		op = REQ_OP_READ;
711	}
712
713	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
714	if (!ibr)
715		goto fail;
716	cmd->priv = ibr;
717
718	if (!sgl_nents) {
719		refcount_set(&ibr->pending, 1);
720		iblock_complete_cmd(cmd);
721		return 0;
722	}
723
724	bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
725	if (!bio)
726		goto fail_free_ibr;
727
 
728	bio_list_init(&list);
729	bio_list_add(&list, bio);
730
731	refcount_set(&ibr->pending, 2);
732	bio_cnt = 1;
733
734	if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
735		sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
736			       op == REQ_OP_READ ? SG_MITER_FROM_SG :
737						   SG_MITER_TO_SG);
738
739	for_each_sg(sgl, sg, sgl_nents, i) {
740		/*
741		 * XXX: if the length the device accepts is shorter than the
742		 *	length of the S/G list entry this will cause and
743		 *	endless loop.  Better hope no driver uses huge pages.
744		 */
745		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
746				!= sg->length) {
747			if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
748				rc = iblock_alloc_bip(cmd, bio, &prot_miter);
749				if (rc)
750					goto fail_put_bios;
751			}
752
753			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
754				iblock_submit_bios(&list);
755				bio_cnt = 0;
756			}
757
758			bio = iblock_get_bio(cmd, block_lba, sg_num, op,
759					     op_flags);
760			if (!bio)
761				goto fail_put_bios;
762
763			refcount_inc(&ibr->pending);
764			bio_list_add(&list, bio);
765			bio_cnt++;
766		}
767
768		/* Always in 512 byte units for Linux/Block */
769		block_lba += sg->length >> SECTOR_SHIFT;
770		sg_num--;
771	}
772
773	if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
774		rc = iblock_alloc_bip(cmd, bio, &prot_miter);
775		if (rc)
776			goto fail_put_bios;
777	}
778
779	iblock_submit_bios(&list);
780	iblock_complete_cmd(cmd);
781	return 0;
782
783fail_put_bios:
784	while ((bio = bio_list_pop(&list)))
785		bio_put(bio);
786fail_free_ibr:
787	kfree(ibr);
788fail:
789	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
790}
791
792static sector_t iblock_get_blocks(struct se_device *dev)
793{
794	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
795	struct block_device *bd = ib_dev->ibd_bd;
796	struct request_queue *q = bdev_get_queue(bd);
797
798	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
799}
800
801static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
802{
803	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
804	struct block_device *bd = ib_dev->ibd_bd;
805	int ret;
806
807	ret = bdev_alignment_offset(bd);
808	if (ret == -1)
809		return 0;
810
811	/* convert offset-bytes to offset-lbas */
812	return ret / bdev_logical_block_size(bd);
813}
814
815static unsigned int iblock_get_lbppbe(struct se_device *dev)
816{
817	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
818	struct block_device *bd = ib_dev->ibd_bd;
819	int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
820
821	return ilog2(logs_per_phys);
822}
823
824static unsigned int iblock_get_io_min(struct se_device *dev)
825{
826	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
827	struct block_device *bd = ib_dev->ibd_bd;
828
829	return bdev_io_min(bd);
830}
831
832static unsigned int iblock_get_io_opt(struct se_device *dev)
833{
834	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
835	struct block_device *bd = ib_dev->ibd_bd;
836
837	return bdev_io_opt(bd);
838}
839
840static struct sbc_ops iblock_sbc_ops = {
841	.execute_rw		= iblock_execute_rw,
842	.execute_sync_cache	= iblock_execute_sync_cache,
843	.execute_write_same	= iblock_execute_write_same,
844	.execute_unmap		= iblock_execute_unmap,
845};
846
847static sense_reason_t
848iblock_parse_cdb(struct se_cmd *cmd)
849{
850	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
851}
852
853static bool iblock_get_write_cache(struct se_device *dev)
854{
855	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
856	struct block_device *bd = ib_dev->ibd_bd;
857	struct request_queue *q = bdev_get_queue(bd);
858
859	return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
860}
861
862static const struct target_backend_ops iblock_ops = {
863	.name			= "iblock",
864	.inquiry_prod		= "IBLOCK",
865	.inquiry_rev		= IBLOCK_VERSION,
866	.owner			= THIS_MODULE,
867	.attach_hba		= iblock_attach_hba,
868	.detach_hba		= iblock_detach_hba,
869	.alloc_device		= iblock_alloc_device,
870	.configure_device	= iblock_configure_device,
871	.destroy_device		= iblock_destroy_device,
872	.free_device		= iblock_free_device,
873	.parse_cdb		= iblock_parse_cdb,
874	.set_configfs_dev_params = iblock_set_configfs_dev_params,
875	.show_configfs_dev_params = iblock_show_configfs_dev_params,
876	.get_device_type	= sbc_get_device_type,
877	.get_blocks		= iblock_get_blocks,
878	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
879	.get_lbppbe		= iblock_get_lbppbe,
880	.get_io_min		= iblock_get_io_min,
881	.get_io_opt		= iblock_get_io_opt,
882	.get_write_cache	= iblock_get_write_cache,
883	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
884};
885
886static int __init iblock_module_init(void)
887{
888	return transport_backend_register(&iblock_ops);
889}
890
891static void __exit iblock_module_exit(void)
892{
893	target_backend_unregister(&iblock_ops);
894}
895
896MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
897MODULE_AUTHOR("nab@Linux-iSCSI.org");
898MODULE_LICENSE("GPL");
899
900module_init(iblock_module_init);
901module_exit(iblock_module_exit);
v4.6
 
  1/*******************************************************************************
  2 * Filename:  target_core_iblock.c
  3 *
  4 * This file contains the Storage Engine  <-> Linux BlockIO transport
  5 * specific functions.
  6 *
  7 * (c) Copyright 2003-2013 Datera, Inc.
  8 *
  9 * Nicholas A. Bellinger <nab@kernel.org>
 10 *
 11 * This program is free software; you can redistribute it and/or modify
 12 * it under the terms of the GNU General Public License as published by
 13 * the Free Software Foundation; either version 2 of the License, or
 14 * (at your option) any later version.
 15 *
 16 * This program is distributed in the hope that it will be useful,
 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 19 * GNU General Public License for more details.
 20 *
 21 * You should have received a copy of the GNU General Public License
 22 * along with this program; if not, write to the Free Software
 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 24 *
 25 ******************************************************************************/
 26
 27#include <linux/string.h>
 28#include <linux/parser.h>
 29#include <linux/timer.h>
 30#include <linux/fs.h>
 31#include <linux/blkdev.h>
 32#include <linux/slab.h>
 33#include <linux/spinlock.h>
 34#include <linux/bio.h>
 35#include <linux/genhd.h>
 36#include <linux/file.h>
 37#include <linux/module.h>
 38#include <scsi/scsi_proto.h>
 39#include <asm/unaligned.h>
 40
 41#include <target/target_core_base.h>
 42#include <target/target_core_backend.h>
 43
 44#include "target_core_iblock.h"
 45
 46#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
 47#define IBLOCK_BIO_POOL_SIZE	128
 48
 49static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
 50{
 51	return container_of(dev, struct iblock_dev, dev);
 52}
 53
 54
 55static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
 56{
 57	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
 58		" Generic Target Core Stack %s\n", hba->hba_id,
 59		IBLOCK_VERSION, TARGET_CORE_VERSION);
 60	return 0;
 61}
 62
 63static void iblock_detach_hba(struct se_hba *hba)
 64{
 65}
 66
 67static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
 68{
 69	struct iblock_dev *ib_dev = NULL;
 70
 71	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
 72	if (!ib_dev) {
 73		pr_err("Unable to allocate struct iblock_dev\n");
 74		return NULL;
 75	}
 76
 77	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
 78
 79	return &ib_dev->dev;
 80}
 81
 82static int iblock_configure_device(struct se_device *dev)
 83{
 84	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 85	struct request_queue *q;
 86	struct block_device *bd = NULL;
 87	struct blk_integrity *bi;
 88	fmode_t mode;
 
 89	int ret = -ENOMEM;
 90
 91	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
 92		pr_err("Missing udev_path= parameters for IBLOCK\n");
 93		return -EINVAL;
 94	}
 95
 96	ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
 97	if (!ib_dev->ibd_bio_set) {
 98		pr_err("IBLOCK: Unable to create bioset\n");
 99		goto out;
100	}
101
102	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
103			ib_dev->ibd_udev_path);
104
105	mode = FMODE_READ|FMODE_EXCL;
106	if (!ib_dev->ibd_readonly)
107		mode |= FMODE_WRITE;
108	else
109		dev->dev_flags |= DF_READ_ONLY;
110
111	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
112	if (IS_ERR(bd)) {
113		ret = PTR_ERR(bd);
114		goto out_free_bioset;
115	}
116	ib_dev->ibd_bd = bd;
117
118	q = bdev_get_queue(bd);
119
120	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
121	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
122	dev->dev_attrib.hw_queue_depth = q->nr_requests;
123
124	if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
125					      dev->dev_attrib.hw_block_size))
126		pr_debug("IBLOCK: BLOCK Discard support available,"
127			 " disabled by default\n");
128
129	/*
130	 * Enable write same emulation for IBLOCK and use 0xFFFF as
131	 * the smaller WRITE_SAME(10) only has a two-byte block count.
132	 */
133	dev->dev_attrib.max_write_same_len = 0xFFFF;
 
 
 
 
134
135	if (blk_queue_nonrot(q))
136		dev->dev_attrib.is_nonrot = 1;
137
138	bi = bdev_get_integrity(bd);
139	if (bi) {
140		struct bio_set *bs = ib_dev->ibd_bio_set;
141
142		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
143		    !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
144			pr_err("IBLOCK export of blk_integrity: %s not"
145			       " supported\n", bi->profile->name);
146			ret = -ENOSYS;
147			goto out_blkdev_put;
148		}
149
150		if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
151			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
152		} else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
153			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
154		}
155
156		if (dev->dev_attrib.pi_prot_type) {
157			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
158				pr_err("Unable to allocate bioset for PI\n");
159				ret = -ENOMEM;
160				goto out_blkdev_put;
161			}
162			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
163				 bs->bio_integrity_pool);
164		}
165		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
166	}
167
168	return 0;
169
170out_blkdev_put:
171	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
172out_free_bioset:
173	bioset_free(ib_dev->ibd_bio_set);
174	ib_dev->ibd_bio_set = NULL;
175out:
176	return ret;
177}
178
179static void iblock_dev_call_rcu(struct rcu_head *p)
180{
181	struct se_device *dev = container_of(p, struct se_device, rcu_head);
182	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
183
184	kfree(ib_dev);
185}
186
187static void iblock_free_device(struct se_device *dev)
188{
 
 
 
 
 
189	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
190
191	if (ib_dev->ibd_bd != NULL)
192		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
193	if (ib_dev->ibd_bio_set != NULL)
194		bioset_free(ib_dev->ibd_bio_set);
195
196	call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
197}
198
199static unsigned long long iblock_emulate_read_cap_with_block_size(
200	struct se_device *dev,
201	struct block_device *bd,
202	struct request_queue *q)
203{
204	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
205					bdev_logical_block_size(bd)) - 1);
206	u32 block_size = bdev_logical_block_size(bd);
207
208	if (block_size == dev->dev_attrib.block_size)
209		return blocks_long;
210
211	switch (block_size) {
212	case 4096:
213		switch (dev->dev_attrib.block_size) {
214		case 2048:
215			blocks_long <<= 1;
216			break;
217		case 1024:
218			blocks_long <<= 2;
219			break;
220		case 512:
221			blocks_long <<= 3;
222		default:
223			break;
224		}
225		break;
226	case 2048:
227		switch (dev->dev_attrib.block_size) {
228		case 4096:
229			blocks_long >>= 1;
230			break;
231		case 1024:
232			blocks_long <<= 1;
233			break;
234		case 512:
235			blocks_long <<= 2;
236			break;
237		default:
238			break;
239		}
240		break;
241	case 1024:
242		switch (dev->dev_attrib.block_size) {
243		case 4096:
244			blocks_long >>= 2;
245			break;
246		case 2048:
247			blocks_long >>= 1;
248			break;
249		case 512:
250			blocks_long <<= 1;
251			break;
252		default:
253			break;
254		}
255		break;
256	case 512:
257		switch (dev->dev_attrib.block_size) {
258		case 4096:
259			blocks_long >>= 3;
260			break;
261		case 2048:
262			blocks_long >>= 2;
263			break;
264		case 1024:
265			blocks_long >>= 1;
266			break;
267		default:
268			break;
269		}
270		break;
271	default:
272		break;
273	}
274
275	return blocks_long;
276}
277
278static void iblock_complete_cmd(struct se_cmd *cmd)
279{
280	struct iblock_req *ibr = cmd->priv;
281	u8 status;
282
283	if (!atomic_dec_and_test(&ibr->pending))
284		return;
285
286	if (atomic_read(&ibr->ib_bio_err_cnt))
287		status = SAM_STAT_CHECK_CONDITION;
288	else
289		status = SAM_STAT_GOOD;
290
291	target_complete_cmd(cmd, status);
292	kfree(ibr);
293}
294
295static void iblock_bio_done(struct bio *bio)
296{
297	struct se_cmd *cmd = bio->bi_private;
298	struct iblock_req *ibr = cmd->priv;
299
300	if (bio->bi_error) {
301		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_error);
302		/*
303		 * Bump the ib_bio_err_cnt and release bio.
304		 */
305		atomic_inc(&ibr->ib_bio_err_cnt);
306		smp_mb__after_atomic();
307	}
308
309	bio_put(bio);
310
311	iblock_complete_cmd(cmd);
312}
313
314static struct bio *
315iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
 
316{
317	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
318	struct bio *bio;
319
320	/*
321	 * Only allocate as many vector entries as the bio code allows us to,
322	 * we'll loop later on until we have handled the whole request.
323	 */
324	if (sg_num > BIO_MAX_PAGES)
325		sg_num = BIO_MAX_PAGES;
326
327	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
328	if (!bio) {
329		pr_err("Unable to allocate memory for bio\n");
330		return NULL;
331	}
332
333	bio->bi_bdev = ib_dev->ibd_bd;
334	bio->bi_private = cmd;
335	bio->bi_end_io = &iblock_bio_done;
336	bio->bi_iter.bi_sector = lba;
 
337
338	return bio;
339}
340
341static void iblock_submit_bios(struct bio_list *list, int rw)
342{
343	struct blk_plug plug;
344	struct bio *bio;
345
346	blk_start_plug(&plug);
347	while ((bio = bio_list_pop(list)))
348		submit_bio(rw, bio);
349	blk_finish_plug(&plug);
350}
351
352static void iblock_end_io_flush(struct bio *bio)
353{
354	struct se_cmd *cmd = bio->bi_private;
355
356	if (bio->bi_error)
357		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error);
358
359	if (cmd) {
360		if (bio->bi_error)
361			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
362		else
363			target_complete_cmd(cmd, SAM_STAT_GOOD);
364	}
365
366	bio_put(bio);
367}
368
369/*
370 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
371 * always flush the whole cache.
372 */
373static sense_reason_t
374iblock_execute_sync_cache(struct se_cmd *cmd)
375{
376	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
377	int immed = (cmd->t_task_cdb[1] & 0x2);
378	struct bio *bio;
379
380	/*
381	 * If the Immediate bit is set, queue up the GOOD response
382	 * for this SYNCHRONIZE_CACHE op.
383	 */
384	if (immed)
385		target_complete_cmd(cmd, SAM_STAT_GOOD);
386
387	bio = bio_alloc(GFP_KERNEL, 0);
388	bio->bi_end_io = iblock_end_io_flush;
389	bio->bi_bdev = ib_dev->ibd_bd;
 
390	if (!immed)
391		bio->bi_private = cmd;
392	submit_bio(WRITE_FLUSH, bio);
393	return 0;
394}
395
396static sense_reason_t
397iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
398{
399	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
400	struct se_device *dev = cmd->se_dev;
401	int ret;
402
403	ret = blkdev_issue_discard(bdev,
404				   target_to_linux_sector(dev, lba),
405				   target_to_linux_sector(dev,  nolb),
406				   GFP_KERNEL, 0);
407	if (ret < 0) {
408		pr_err("blkdev_issue_discard() failed: %d\n", ret);
409		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
410	}
411
412	return 0;
413}
414
415static sense_reason_t
416iblock_execute_write_same_direct(struct block_device *bdev, struct se_cmd *cmd)
417{
418	struct se_device *dev = cmd->se_dev;
419	struct scatterlist *sg = &cmd->t_data_sg[0];
420	struct page *page = NULL;
421	int ret;
422
423	if (sg->offset) {
424		page = alloc_page(GFP_KERNEL);
425		if (!page)
426			return TCM_OUT_OF_RESOURCES;
427		sg_copy_to_buffer(sg, cmd->t_data_nents, page_address(page),
428				  dev->dev_attrib.block_size);
429	}
 
 
 
 
 
430
431	ret = blkdev_issue_write_same(bdev,
432				target_to_linux_sector(dev, cmd->t_task_lba),
433				target_to_linux_sector(dev,
434					sbc_get_write_same_sectors(cmd)),
435				GFP_KERNEL, page ? page : sg_page(sg));
436	if (page)
437		__free_page(page);
438	if (ret)
439		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
440
441	target_complete_cmd(cmd, GOOD);
442	return 0;
443}
444
445static sense_reason_t
446iblock_execute_write_same(struct se_cmd *cmd)
447{
448	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
449	struct iblock_req *ibr;
450	struct scatterlist *sg;
451	struct bio *bio;
452	struct bio_list list;
453	struct se_device *dev = cmd->se_dev;
454	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
455	sector_t sectors = target_to_linux_sector(dev,
456					sbc_get_write_same_sectors(cmd));
457
458	if (cmd->prot_op) {
459		pr_err("WRITE_SAME: Protection information with IBLOCK"
460		       " backends not supported\n");
461		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
462	}
463	sg = &cmd->t_data_sg[0];
464
465	if (cmd->t_data_nents > 1 ||
466	    sg->length != cmd->se_dev->dev_attrib.block_size) {
467		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
468			" block_size: %u\n", cmd->t_data_nents, sg->length,
469			cmd->se_dev->dev_attrib.block_size);
470		return TCM_INVALID_CDB_FIELD;
471	}
472
473	if (bdev_write_same(bdev))
474		return iblock_execute_write_same_direct(bdev, cmd);
 
 
475
476	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
477	if (!ibr)
478		goto fail;
479	cmd->priv = ibr;
480
481	bio = iblock_get_bio(cmd, block_lba, 1);
482	if (!bio)
483		goto fail_free_ibr;
484
485	bio_list_init(&list);
486	bio_list_add(&list, bio);
487
488	atomic_set(&ibr->pending, 1);
489
490	while (sectors) {
491		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
492				!= sg->length) {
493
494			bio = iblock_get_bio(cmd, block_lba, 1);
 
495			if (!bio)
496				goto fail_put_bios;
497
498			atomic_inc(&ibr->pending);
499			bio_list_add(&list, bio);
500		}
501
502		/* Always in 512 byte units for Linux/Block */
503		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
504		sectors -= 1;
505	}
506
507	iblock_submit_bios(&list, WRITE);
508	return 0;
509
510fail_put_bios:
511	while ((bio = bio_list_pop(&list)))
512		bio_put(bio);
513fail_free_ibr:
514	kfree(ibr);
515fail:
516	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
517}
518
519enum {
520	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
521};
522
523static match_table_t tokens = {
524	{Opt_udev_path, "udev_path=%s"},
525	{Opt_readonly, "readonly=%d"},
526	{Opt_force, "force=%d"},
527	{Opt_err, NULL}
528};
529
530static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
531		const char *page, ssize_t count)
532{
533	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
534	char *orig, *ptr, *arg_p, *opts;
535	substring_t args[MAX_OPT_ARGS];
536	int ret = 0, token;
537	unsigned long tmp_readonly;
538
539	opts = kstrdup(page, GFP_KERNEL);
540	if (!opts)
541		return -ENOMEM;
542
543	orig = opts;
544
545	while ((ptr = strsep(&opts, ",\n")) != NULL) {
546		if (!*ptr)
547			continue;
548
549		token = match_token(ptr, tokens, args);
550		switch (token) {
551		case Opt_udev_path:
552			if (ib_dev->ibd_bd) {
553				pr_err("Unable to set udev_path= while"
554					" ib_dev->ibd_bd exists\n");
555				ret = -EEXIST;
556				goto out;
557			}
558			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
559				SE_UDEV_PATH_LEN) == 0) {
560				ret = -EINVAL;
561				break;
562			}
563			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
564					ib_dev->ibd_udev_path);
565			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
566			break;
567		case Opt_readonly:
568			arg_p = match_strdup(&args[0]);
569			if (!arg_p) {
570				ret = -ENOMEM;
571				break;
572			}
573			ret = kstrtoul(arg_p, 0, &tmp_readonly);
574			kfree(arg_p);
575			if (ret < 0) {
576				pr_err("kstrtoul() failed for"
577						" readonly=\n");
578				goto out;
579			}
580			ib_dev->ibd_readonly = tmp_readonly;
581			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
582			break;
583		case Opt_force:
584			break;
585		default:
586			break;
587		}
588	}
589
590out:
591	kfree(orig);
592	return (!ret) ? count : ret;
593}
594
595static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
596{
597	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
598	struct block_device *bd = ib_dev->ibd_bd;
599	char buf[BDEVNAME_SIZE];
600	ssize_t bl = 0;
601
602	if (bd)
603		bl += sprintf(b + bl, "iBlock device: %s",
604				bdevname(bd, buf));
605	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
606		bl += sprintf(b + bl, "  UDEV PATH: %s",
607				ib_dev->ibd_udev_path);
608	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
609
610	bl += sprintf(b + bl, "        ");
611	if (bd) {
612		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
613			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
614			"" : (bd->bd_holder == ib_dev) ?
615			"CLAIMED: IBLOCK" : "CLAIMED: OS");
616	} else {
617		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
618	}
619
620	return bl;
621}
622
623static int
624iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
 
625{
626	struct se_device *dev = cmd->se_dev;
627	struct blk_integrity *bi;
628	struct bio_integrity_payload *bip;
629	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
630	struct scatterlist *sg;
631	int i, rc;
632
633	bi = bdev_get_integrity(ib_dev->ibd_bd);
634	if (!bi) {
635		pr_err("Unable to locate bio_integrity\n");
636		return -ENODEV;
637	}
638
639	bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
 
640	if (IS_ERR(bip)) {
641		pr_err("Unable to allocate bio_integrity_payload\n");
642		return PTR_ERR(bip);
643	}
644
645	bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
646			 dev->prot_length;
647	bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
 
648
649	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
650		 (unsigned long long)bip->bip_iter.bi_sector);
651
652	for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
 
653
654		rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
655					    sg->offset);
656		if (rc != sg->length) {
 
657			pr_err("bio_integrity_add_page() failed; %d\n", rc);
 
658			return -ENOMEM;
659		}
660
661		pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
662			 sg_page(sg), sg->length, sg->offset);
 
 
 
 
663	}
 
664
665	return 0;
666}
667
668static sense_reason_t
669iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
670		  enum dma_data_direction data_direction)
671{
672	struct se_device *dev = cmd->se_dev;
673	sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
674	struct iblock_req *ibr;
675	struct bio *bio, *bio_start;
676	struct bio_list list;
677	struct scatterlist *sg;
678	u32 sg_num = sgl_nents;
679	unsigned bio_cnt;
680	int rw = 0;
681	int i;
682
683	if (data_direction == DMA_TO_DEVICE) {
684		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
685		struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
686		/*
687		 * Force writethrough using WRITE_FUA if a volatile write cache
688		 * is not enabled, or if initiator set the Force Unit Access bit.
689		 */
690		if (q->flush_flags & REQ_FUA) {
 
691			if (cmd->se_cmd_flags & SCF_FUA)
692				rw = WRITE_FUA;
693			else if (!(q->flush_flags & REQ_FLUSH))
694				rw = WRITE_FUA;
695			else
696				rw = WRITE;
697		} else {
698			rw = WRITE;
699		}
700	} else {
701		rw = READ;
702	}
703
704	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
705	if (!ibr)
706		goto fail;
707	cmd->priv = ibr;
708
709	if (!sgl_nents) {
710		atomic_set(&ibr->pending, 1);
711		iblock_complete_cmd(cmd);
712		return 0;
713	}
714
715	bio = iblock_get_bio(cmd, block_lba, sgl_nents);
716	if (!bio)
717		goto fail_free_ibr;
718
719	bio_start = bio;
720	bio_list_init(&list);
721	bio_list_add(&list, bio);
722
723	atomic_set(&ibr->pending, 2);
724	bio_cnt = 1;
725
 
 
 
 
 
726	for_each_sg(sgl, sg, sgl_nents, i) {
727		/*
728		 * XXX: if the length the device accepts is shorter than the
729		 *	length of the S/G list entry this will cause and
730		 *	endless loop.  Better hope no driver uses huge pages.
731		 */
732		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
733				!= sg->length) {
 
 
 
 
 
 
734			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
735				iblock_submit_bios(&list, rw);
736				bio_cnt = 0;
737			}
738
739			bio = iblock_get_bio(cmd, block_lba, sg_num);
 
740			if (!bio)
741				goto fail_put_bios;
742
743			atomic_inc(&ibr->pending);
744			bio_list_add(&list, bio);
745			bio_cnt++;
746		}
747
748		/* Always in 512 byte units for Linux/Block */
749		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
750		sg_num--;
751	}
752
753	if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
754		int rc = iblock_alloc_bip(cmd, bio_start);
755		if (rc)
756			goto fail_put_bios;
757	}
758
759	iblock_submit_bios(&list, rw);
760	iblock_complete_cmd(cmd);
761	return 0;
762
763fail_put_bios:
764	while ((bio = bio_list_pop(&list)))
765		bio_put(bio);
766fail_free_ibr:
767	kfree(ibr);
768fail:
769	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
770}
771
772static sector_t iblock_get_blocks(struct se_device *dev)
773{
774	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
775	struct block_device *bd = ib_dev->ibd_bd;
776	struct request_queue *q = bdev_get_queue(bd);
777
778	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
779}
780
781static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
782{
783	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
784	struct block_device *bd = ib_dev->ibd_bd;
785	int ret;
786
787	ret = bdev_alignment_offset(bd);
788	if (ret == -1)
789		return 0;
790
791	/* convert offset-bytes to offset-lbas */
792	return ret / bdev_logical_block_size(bd);
793}
794
795static unsigned int iblock_get_lbppbe(struct se_device *dev)
796{
797	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
798	struct block_device *bd = ib_dev->ibd_bd;
799	int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
800
801	return ilog2(logs_per_phys);
802}
803
804static unsigned int iblock_get_io_min(struct se_device *dev)
805{
806	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
807	struct block_device *bd = ib_dev->ibd_bd;
808
809	return bdev_io_min(bd);
810}
811
812static unsigned int iblock_get_io_opt(struct se_device *dev)
813{
814	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
815	struct block_device *bd = ib_dev->ibd_bd;
816
817	return bdev_io_opt(bd);
818}
819
820static struct sbc_ops iblock_sbc_ops = {
821	.execute_rw		= iblock_execute_rw,
822	.execute_sync_cache	= iblock_execute_sync_cache,
823	.execute_write_same	= iblock_execute_write_same,
824	.execute_unmap		= iblock_execute_unmap,
825};
826
827static sense_reason_t
828iblock_parse_cdb(struct se_cmd *cmd)
829{
830	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
831}
832
833static bool iblock_get_write_cache(struct se_device *dev)
834{
835	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
836	struct block_device *bd = ib_dev->ibd_bd;
837	struct request_queue *q = bdev_get_queue(bd);
838
839	return q->flush_flags & REQ_FLUSH;
840}
841
842static const struct target_backend_ops iblock_ops = {
843	.name			= "iblock",
844	.inquiry_prod		= "IBLOCK",
845	.inquiry_rev		= IBLOCK_VERSION,
846	.owner			= THIS_MODULE,
847	.attach_hba		= iblock_attach_hba,
848	.detach_hba		= iblock_detach_hba,
849	.alloc_device		= iblock_alloc_device,
850	.configure_device	= iblock_configure_device,
 
851	.free_device		= iblock_free_device,
852	.parse_cdb		= iblock_parse_cdb,
853	.set_configfs_dev_params = iblock_set_configfs_dev_params,
854	.show_configfs_dev_params = iblock_show_configfs_dev_params,
855	.get_device_type	= sbc_get_device_type,
856	.get_blocks		= iblock_get_blocks,
857	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
858	.get_lbppbe		= iblock_get_lbppbe,
859	.get_io_min		= iblock_get_io_min,
860	.get_io_opt		= iblock_get_io_opt,
861	.get_write_cache	= iblock_get_write_cache,
862	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
863};
864
865static int __init iblock_module_init(void)
866{
867	return transport_backend_register(&iblock_ops);
868}
869
870static void __exit iblock_module_exit(void)
871{
872	target_backend_unregister(&iblock_ops);
873}
874
875MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
876MODULE_AUTHOR("nab@Linux-iSCSI.org");
877MODULE_LICENSE("GPL");
878
879module_init(iblock_module_init);
880module_exit(iblock_module_exit);