Linux Audio

Check our new training course

Loading...
v3.1
  1/*******************************************************************************
  2 * Filename:  target_core_file.c
  3 *
  4 * This file contains the Storage Engine <-> FILEIO transport specific functions
  5 *
  6 * Copyright (c) 2005 PyX Technologies, Inc.
  7 * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
  8 * Copyright (c) 2007-2010 Rising Tide Systems
  9 * Copyright (c) 2008-2010 Linux-iSCSI.org
 10 *
 11 * Nicholas A. Bellinger <nab@kernel.org>
 12 *
 13 * This program is free software; you can redistribute it and/or modify
 14 * it under the terms of the GNU General Public License as published by
 15 * the Free Software Foundation; either version 2 of the License, or
 16 * (at your option) any later version.
 17 *
 18 * This program is distributed in the hope that it will be useful,
 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 21 * GNU General Public License for more details.
 22 *
 23 * You should have received a copy of the GNU General Public License
 24 * along with this program; if not, write to the Free Software
 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 26 *
 27 ******************************************************************************/
 28
 29#include <linux/version.h>
 30#include <linux/string.h>
 31#include <linux/parser.h>
 32#include <linux/timer.h>
 33#include <linux/blkdev.h>
 34#include <linux/slab.h>
 35#include <linux/spinlock.h>
 36#include <scsi/scsi.h>
 37#include <scsi/scsi_host.h>
 
 
 
 38
 39#include <target/target_core_base.h>
 40#include <target/target_core_device.h>
 41#include <target/target_core_transport.h>
 42
 43#include "target_core_file.h"
 44
 45static struct se_subsystem_api fileio_template;
 
 
 
 46
 47/*	fd_attach_hba(): (Part of se_subsystem_api_t template)
 48 *
 49 *
 50 */
 51static int fd_attach_hba(struct se_hba *hba, u32 host_id)
 52{
 53	struct fd_host *fd_host;
 54
 55	fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
 56	if (!fd_host) {
 57		pr_err("Unable to allocate memory for struct fd_host\n");
 58		return -ENOMEM;
 59	}
 60
 61	fd_host->fd_host_id = host_id;
 62
 63	hba->hba_ptr = fd_host;
 64
 65	pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
 66		" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
 67		TARGET_CORE_MOD_VERSION);
 68	pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
 69		" MaxSectors: %u\n",
 70		hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
 71
 72	return 0;
 73}
 74
 75static void fd_detach_hba(struct se_hba *hba)
 76{
 77	struct fd_host *fd_host = hba->hba_ptr;
 78
 79	pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
 80		" Target Core\n", hba->hba_id, fd_host->fd_host_id);
 81
 82	kfree(fd_host);
 83	hba->hba_ptr = NULL;
 84}
 85
 86static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
 87{
 88	struct fd_dev *fd_dev;
 89	struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
 90
 91	fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
 92	if (!fd_dev) {
 93		pr_err("Unable to allocate memory for struct fd_dev\n");
 94		return NULL;
 95	}
 96
 97	fd_dev->fd_host = fd_host;
 98
 99	pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
100
101	return fd_dev;
102}
103
104/*	fd_create_virtdevice(): (Part of se_subsystem_api_t template)
105 *
106 *
107 */
108static struct se_device *fd_create_virtdevice(
109	struct se_hba *hba,
110	struct se_subsystem_dev *se_dev,
111	void *p)
112{
113	char *dev_p = NULL;
114	struct se_device *dev;
115	struct se_dev_limits dev_limits;
116	struct queue_limits *limits;
117	struct fd_dev *fd_dev = (struct fd_dev *) p;
118	struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
119	mm_segment_t old_fs;
120	struct file *file;
121	struct inode *inode = NULL;
122	int dev_flags = 0, flags, ret = -EINVAL;
123
124	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
125
126	old_fs = get_fs();
127	set_fs(get_ds());
128	dev_p = getname(fd_dev->fd_dev_name);
129	set_fs(old_fs);
130
131	if (IS_ERR(dev_p)) {
132		pr_err("getname(%s) failed: %lu\n",
133			fd_dev->fd_dev_name, IS_ERR(dev_p));
134		ret = PTR_ERR(dev_p);
135		goto fail;
136	}
137#if 0
138	if (di->no_create_file)
139		flags = O_RDWR | O_LARGEFILE;
140	else
141		flags = O_RDWR | O_CREAT | O_LARGEFILE;
142#else
143	flags = O_RDWR | O_CREAT | O_LARGEFILE;
144#endif
145/*	flags |= O_DIRECT; */
146	/*
147	 * If fd_buffered_io=1 has not been set explicitly (the default),
148	 * use O_SYNC to force FILEIO writes to disk.
149	 */
150	if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
151		flags |= O_SYNC;
152
153	file = filp_open(dev_p, flags, 0600);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154	if (IS_ERR(file)) {
155		pr_err("filp_open(%s) failed\n", dev_p);
156		ret = PTR_ERR(file);
157		goto fail;
158	}
159	if (!file || !file->f_dentry) {
160		pr_err("filp_open(%s) failed\n", dev_p);
161		goto fail;
162	}
163	fd_dev->fd_file = file;
164	/*
165	 * If using a block backend with this struct file, we extract
166	 * fd_dev->fd_[block,dev]_size from struct block_device.
167	 *
168	 * Otherwise, we use the passed fd_size= from configfs
169	 */
170	inode = file->f_mapping->host;
171	if (S_ISBLK(inode->i_mode)) {
172		struct request_queue *q;
173		/*
174		 * Setup the local scope queue_limits from struct request_queue->limits
175		 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
176		 */
177		q = bdev_get_queue(inode->i_bdev);
178		limits = &dev_limits.limits;
179		limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
180		limits->max_hw_sectors = queue_max_hw_sectors(q);
181		limits->max_sectors = queue_max_sectors(q);
182		/*
183		 * Determine the number of bytes from i_size_read() minus
184		 * one (1) logical sector from underlying struct block_device
185		 */
186		fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
187		fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
188				       fd_dev->fd_block_size);
189
190		pr_debug("FILEIO: Using size: %llu bytes from struct"
191			" block_device blocks: %llu logical_block_size: %d\n",
192			fd_dev->fd_dev_size,
193			div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
194			fd_dev->fd_block_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
195	} else {
196		if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
197			pr_err("FILEIO: Missing fd_dev_size="
198				" parameter, and no backing struct"
199				" block_device\n");
200			goto fail;
201		}
202
203		limits = &dev_limits.limits;
204		limits->logical_block_size = FD_BLOCKSIZE;
205		limits->max_hw_sectors = FD_MAX_SECTORS;
206		limits->max_sectors = FD_MAX_SECTORS;
207		fd_dev->fd_block_size = FD_BLOCKSIZE;
208	}
 
 
 
 
 
 
 
 
 
209
210	dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
211	dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
 
 
 
 
212
213	dev = transport_add_device_to_core_hba(hba, &fileio_template,
214				se_dev, dev_flags, fd_dev,
215				&dev_limits, "FILEIO", FD_VERSION);
216	if (!dev)
217		goto fail;
 
 
 
 
 
218
219	fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
220	fd_dev->fd_queue_depth = dev->queue_depth;
221
222	pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
223		" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
224			fd_dev->fd_dev_name, fd_dev->fd_dev_size);
225
226	putname(dev_p);
227	return dev;
228fail:
229	if (fd_dev->fd_file) {
230		filp_close(fd_dev->fd_file, NULL);
231		fd_dev->fd_file = NULL;
232	}
233	putname(dev_p);
234	return ERR_PTR(ret);
235}
236
237/*	fd_free_device(): (Part of se_subsystem_api_t template)
238 *
239 *
240 */
241static void fd_free_device(void *p)
242{
243	struct fd_dev *fd_dev = (struct fd_dev *) p;
244
245	if (fd_dev->fd_file) {
246		filp_close(fd_dev->fd_file, NULL);
247		fd_dev->fd_file = NULL;
248	}
249
250	kfree(fd_dev);
251}
252
253static inline struct fd_request *FILE_REQ(struct se_task *task)
254{
255	return container_of(task, struct fd_request, fd_task);
256}
257
258
259static struct se_task *
260fd_alloc_task(unsigned char *cdb)
261{
262	struct fd_request *fd_req;
263
264	fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
265	if (!fd_req) {
266		pr_err("Unable to allocate struct fd_request\n");
267		return NULL;
268	}
269
270	return &fd_req->fd_task;
271}
272
273static int fd_do_readv(struct se_task *task)
274{
275	struct fd_request *req = FILE_REQ(task);
276	struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
277	struct file *fd = dev->fd_file;
278	struct scatterlist *sg = task->task_sg;
279	struct iovec *iov;
280	mm_segment_t old_fs;
281	loff_t pos = (task->task_lba *
282		      task->se_dev->se_sub_dev->se_dev_attrib.block_size);
283	int ret = 0, i;
284
285	iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
286	if (!iov) {
287		pr_err("Unable to allocate fd_do_readv iov[]\n");
288		return -ENOMEM;
289	}
290
291	for (i = 0; i < task->task_sg_nents; i++) {
292		iov[i].iov_len = sg[i].length;
293		iov[i].iov_base = sg_virt(&sg[i]);
 
 
 
294	}
295
296	old_fs = get_fs();
297	set_fs(get_ds());
298	ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
299	set_fs(old_fs);
 
 
 
300
301	kfree(iov);
302	/*
303	 * Return zeros and GOOD status even if the READ did not return
304	 * the expected virt_size for struct file w/o a backing struct
305	 * block_device.
306	 */
307	if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
308		if (ret < 0 || ret != task->task_size) {
309			pr_err("vfs_readv() returned %d,"
310				" expecting %d for S_ISBLK\n", ret,
311				(int)task->task_size);
312			return (ret < 0 ? ret : -EINVAL);
313		}
314	} else {
315		if (ret < 0) {
316			pr_err("vfs_readv() returned %d for non"
317				" S_ISBLK\n", ret);
318			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319		}
320	}
321
322	return 1;
323}
324
325static int fd_do_writev(struct se_task *task)
 
326{
327	struct fd_request *req = FILE_REQ(task);
328	struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
329	struct file *fd = dev->fd_file;
330	struct scatterlist *sg = task->task_sg;
331	struct iovec *iov;
332	mm_segment_t old_fs;
333	loff_t pos = (task->task_lba *
334		      task->se_dev->se_sub_dev->se_dev_attrib.block_size);
335	int ret, i = 0;
336
337	iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
338	if (!iov) {
339		pr_err("Unable to allocate fd_do_writev iov[]\n");
340		return -ENOMEM;
341	}
342
343	for (i = 0; i < task->task_sg_nents; i++) {
344		iov[i].iov_len = sg[i].length;
345		iov[i].iov_base = sg_virt(&sg[i]);
346	}
347
348	old_fs = get_fs();
349	set_fs(get_ds());
350	ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
351	set_fs(old_fs);
352
353	kfree(iov);
354
355	if (ret < 0 || ret != task->task_size) {
356		pr_err("vfs_writev() returned %d\n", ret);
357		return (ret < 0 ? ret : -EINVAL);
358	}
359
360	return 1;
361}
362
363static void fd_emulate_sync_cache(struct se_task *task)
364{
365	struct se_cmd *cmd = task->task_se_cmd;
366	struct se_device *dev = cmd->se_dev;
367	struct fd_dev *fd_dev = dev->dev_ptr;
368	int immed = (cmd->t_task_cdb[1] & 0x2);
369	loff_t start, end;
370	int ret;
371
372	/*
373	 * If the Immediate bit is set, queue up the GOOD response
374	 * for this SYNCHRONIZE_CACHE op
375	 */
376	if (immed)
377		transport_complete_sync_cache(cmd, 1);
378
379	/*
380	 * Determine if we will be flushing the entire device.
381	 */
382	if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
383		start = 0;
384		end = LLONG_MAX;
385	} else {
386		start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
387		if (cmd->data_length)
388			end = start + cmd->data_length;
389		else
390			end = LLONG_MAX;
391	}
392
393	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
394	if (ret != 0)
395		pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
396
397	if (!immed)
398		transport_complete_sync_cache(cmd, ret == 0);
399}
400
401/*
402 * Tell TCM Core that we are capable of WriteCache emulation for
403 * an underlying struct se_device.
404 */
405static int fd_emulated_write_cache(struct se_device *dev)
406{
407	return 1;
408}
409
410static int fd_emulated_dpo(struct se_device *dev)
 
411{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
412	return 0;
413}
414/*
415 * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
416 * for TYPE_DISK.
417 */
418static int fd_emulated_fua_write(struct se_device *dev)
419{
420	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
421}
422
423static int fd_emulated_fua_read(struct se_device *dev)
 
424{
425	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426}
427
428/*
429 * WRITE Force Unit Access (FUA) emulation on a per struct se_task
430 * LBA range basis..
431 */
432static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
433{
434	struct se_device *dev = cmd->se_dev;
435	struct fd_dev *fd_dev = dev->dev_ptr;
436	loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
437	loff_t end = start + task->task_size;
438	int ret;
439
440	pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
441			task->task_lba, task->task_size);
 
 
 
442
443	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
444	if (ret != 0)
445		pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
446}
447
448static int fd_do_task(struct se_task *task)
 
 
449{
450	struct se_cmd *cmd = task->task_se_cmd;
451	struct se_device *dev = cmd->se_dev;
 
 
 
 
452	int ret = 0;
453
 
 
 
 
 
 
 
 
 
454	/*
455	 * Call vectorized fileio functions to map struct scatterlist
456	 * physical memory addresses to struct iovec virtual memory.
457	 */
458	if (task->task_data_direction == DMA_FROM_DEVICE) {
459		ret = fd_do_readv(task);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
460	} else {
461		ret = fd_do_writev(task);
 
 
 
 
 
 
 
 
462
463		if (ret > 0 &&
464		    dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
465		    dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
466		    cmd->t_tasks_fua) {
467			/*
468			 * We might need to be a bit smarter here
469			 * and return some sense data to let the initiator
470			 * know the FUA WRITE cache sync failed..?
471			 */
472			fd_emulate_write_fua(cmd, task);
 
 
 
 
 
 
 
 
473		}
474
 
 
 
 
 
 
 
475	}
476
477	if (ret < 0)
478		return ret;
479	if (ret) {
480		task->task_scsi_status = GOOD;
481		transport_complete_task(task, 1);
482	}
483	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
484}
485
486/*	fd_free_task(): (Part of se_subsystem_api_t template)
487 *
488 *
489 */
490static void fd_free_task(struct se_task *task)
491{
492	struct fd_request *req = FILE_REQ(task);
493
494	kfree(req);
495}
496
497enum {
498	Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
499};
500
501static match_table_t tokens = {
502	{Opt_fd_dev_name, "fd_dev_name=%s"},
503	{Opt_fd_dev_size, "fd_dev_size=%s"},
504	{Opt_fd_buffered_io, "fd_buffered_io=%d"},
505	{Opt_err, NULL}
506};
507
508static ssize_t fd_set_configfs_dev_params(
509	struct se_hba *hba,
510	struct se_subsystem_dev *se_dev,
511	const char *page, ssize_t count)
512{
513	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
514	char *orig, *ptr, *arg_p, *opts;
515	substring_t args[MAX_OPT_ARGS];
516	int ret = 0, arg, token;
517
518	opts = kstrdup(page, GFP_KERNEL);
519	if (!opts)
520		return -ENOMEM;
521
522	orig = opts;
523
524	while ((ptr = strsep(&opts, ",")) != NULL) {
525		if (!*ptr)
526			continue;
527
528		token = match_token(ptr, tokens, args);
529		switch (token) {
530		case Opt_fd_dev_name:
531			arg_p = match_strdup(&args[0]);
532			if (!arg_p) {
533				ret = -ENOMEM;
534				break;
535			}
536			snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
537					"%s", arg_p);
538			kfree(arg_p);
539			pr_debug("FILEIO: Referencing Path: %s\n",
540					fd_dev->fd_dev_name);
541			fd_dev->fbd_flags |= FBDF_HAS_PATH;
542			break;
543		case Opt_fd_dev_size:
544			arg_p = match_strdup(&args[0]);
545			if (!arg_p) {
546				ret = -ENOMEM;
547				break;
548			}
549			ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
550			kfree(arg_p);
551			if (ret < 0) {
552				pr_err("strict_strtoull() failed for"
553						" fd_dev_size=\n");
554				goto out;
555			}
556			pr_debug("FILEIO: Referencing Size: %llu"
557					" bytes\n", fd_dev->fd_dev_size);
558			fd_dev->fbd_flags |= FBDF_HAS_SIZE;
559			break;
560		case Opt_fd_buffered_io:
561			match_int(args, &arg);
 
 
562			if (arg != 1) {
563				pr_err("bogus fd_buffered_io=%d value\n", arg);
564				ret = -EINVAL;
565				goto out;
566			}
567
568			pr_debug("FILEIO: Using buffered I/O"
569				" operations for struct fd_dev\n");
570
571			fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
572			break;
573		default:
574			break;
575		}
576	}
577
578out:
579	kfree(orig);
580	return (!ret) ? count : ret;
581}
582
583static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
584{
585	struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
586
587	if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
588		pr_err("Missing fd_dev_name=\n");
589		return -EINVAL;
590	}
591
592	return 0;
593}
594
595static ssize_t fd_show_configfs_dev_params(
596	struct se_hba *hba,
597	struct se_subsystem_dev *se_dev,
598	char *b)
599{
600	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
601	ssize_t bl = 0;
602
603	bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
604	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
605		fd_dev->fd_dev_name, fd_dev->fd_dev_size,
606		(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
607		"Buffered" : "Synchronous");
608	return bl;
609}
610
611/*	fd_get_cdb(): (Part of se_subsystem_api_t template)
612 *
613 *
614 */
615static unsigned char *fd_get_cdb(struct se_task *task)
616{
617	struct fd_request *req = FILE_REQ(task);
 
 
 
 
 
 
 
 
 
 
 
 
618
619	return req->fd_scsi_cdb;
 
620}
621
622/*	fd_get_device_rev(): (Part of se_subsystem_api_t template)
623 *
624 *
625 */
626static u32 fd_get_device_rev(struct se_device *dev)
627{
628	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
629}
630
631/*	fd_get_device_type(): (Part of se_subsystem_api_t template)
632 *
633 *
634 */
635static u32 fd_get_device_type(struct se_device *dev)
636{
637	return TYPE_DISK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
638}
639
640static sector_t fd_get_blocks(struct se_device *dev)
641{
642	struct fd_dev *fd_dev = dev->dev_ptr;
643	unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
644			dev->se_sub_dev->se_dev_attrib.block_size);
645
646	return blocks_long;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
647}
648
649static struct se_subsystem_api fileio_template = {
650	.name			= "fileio",
 
 
651	.owner			= THIS_MODULE,
652	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
653	.attach_hba		= fd_attach_hba,
654	.detach_hba		= fd_detach_hba,
655	.allocate_virtdevice	= fd_allocate_virtdevice,
656	.create_virtdevice	= fd_create_virtdevice,
657	.free_device		= fd_free_device,
658	.dpo_emulated		= fd_emulated_dpo,
659	.fua_write_emulated	= fd_emulated_fua_write,
660	.fua_read_emulated	= fd_emulated_fua_read,
661	.write_cache_emulated	= fd_emulated_write_cache,
662	.alloc_task		= fd_alloc_task,
663	.do_task		= fd_do_task,
664	.do_sync_cache		= fd_emulate_sync_cache,
665	.free_task		= fd_free_task,
666	.check_configfs_dev_params = fd_check_configfs_dev_params,
667	.set_configfs_dev_params = fd_set_configfs_dev_params,
668	.show_configfs_dev_params = fd_show_configfs_dev_params,
669	.get_cdb		= fd_get_cdb,
670	.get_device_rev		= fd_get_device_rev,
671	.get_device_type	= fd_get_device_type,
672	.get_blocks		= fd_get_blocks,
 
 
 
 
673};
674
675static int __init fileio_module_init(void)
676{
677	return transport_subsystem_register(&fileio_template);
678}
679
680static void fileio_module_exit(void)
681{
682	transport_subsystem_release(&fileio_template);
683}
684
685MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
686MODULE_AUTHOR("nab@Linux-iSCSI.org");
687MODULE_LICENSE("GPL");
688
689module_init(fileio_module_init);
690module_exit(fileio_module_exit);
v4.6
  1/*******************************************************************************
  2 * Filename:  target_core_file.c
  3 *
  4 * This file contains the Storage Engine <-> FILEIO transport specific functions
  5 *
  6 * (c) Copyright 2005-2013 Datera, Inc.
 
 
 
  7 *
  8 * Nicholas A. Bellinger <nab@kernel.org>
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of the GNU General Public License as published by
 12 * the Free Software Foundation; either version 2 of the License, or
 13 * (at your option) any later version.
 14 *
 15 * This program is distributed in the hope that it will be useful,
 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 18 * GNU General Public License for more details.
 19 *
 20 * You should have received a copy of the GNU General Public License
 21 * along with this program; if not, write to the Free Software
 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 23 *
 24 ******************************************************************************/
 25
 
 26#include <linux/string.h>
 27#include <linux/parser.h>
 28#include <linux/timer.h>
 29#include <linux/blkdev.h>
 30#include <linux/slab.h>
 31#include <linux/spinlock.h>
 32#include <linux/module.h>
 33#include <linux/vmalloc.h>
 34#include <linux/falloc.h>
 35#include <scsi/scsi_proto.h>
 36#include <asm/unaligned.h>
 37
 38#include <target/target_core_base.h>
 39#include <target/target_core_backend.h>
 
 40
 41#include "target_core_file.h"
 42
 43static inline struct fd_dev *FD_DEV(struct se_device *dev)
 44{
 45	return container_of(dev, struct fd_dev, dev);
 46}
 47
 
 
 
 
 48static int fd_attach_hba(struct se_hba *hba, u32 host_id)
 49{
 50	struct fd_host *fd_host;
 51
 52	fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
 53	if (!fd_host) {
 54		pr_err("Unable to allocate memory for struct fd_host\n");
 55		return -ENOMEM;
 56	}
 57
 58	fd_host->fd_host_id = host_id;
 59
 60	hba->hba_ptr = fd_host;
 61
 62	pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
 63		" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
 64		TARGET_CORE_VERSION);
 65	pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
 66		hba->hba_id, fd_host->fd_host_id);
 
 67
 68	return 0;
 69}
 70
 71static void fd_detach_hba(struct se_hba *hba)
 72{
 73	struct fd_host *fd_host = hba->hba_ptr;
 74
 75	pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
 76		" Target Core\n", hba->hba_id, fd_host->fd_host_id);
 77
 78	kfree(fd_host);
 79	hba->hba_ptr = NULL;
 80}
 81
 82static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
 83{
 84	struct fd_dev *fd_dev;
 85	struct fd_host *fd_host = hba->hba_ptr;
 86
 87	fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
 88	if (!fd_dev) {
 89		pr_err("Unable to allocate memory for struct fd_dev\n");
 90		return NULL;
 91	}
 92
 93	fd_dev->fd_host = fd_host;
 94
 95	pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
 96
 97	return &fd_dev->dev;
 98}
 99
100static int fd_configure_device(struct se_device *dev)
101{
102	struct fd_dev *fd_dev = FD_DEV(dev);
103	struct fd_host *fd_host = dev->se_hba->hba_ptr;
 
 
 
 
 
 
 
 
 
 
 
 
104	struct file *file;
105	struct inode *inode = NULL;
106	int flags, ret = -EINVAL;
 
 
107
108	if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
109		pr_err("Missing fd_dev_name=\n");
110		return -EINVAL;
 
 
 
 
 
 
 
111	}
112
 
 
 
 
 
 
 
 
113	/*
114	 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
115	 * of pure timestamp updates.
116	 */
117	flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
 
118
119	/*
120	 * Optionally allow fd_buffered_io=1 to be enabled for people
121	 * who want use the fs buffer cache as an WriteCache mechanism.
122	 *
123	 * This means that in event of a hard failure, there is a risk
124	 * of silent data-loss if the SCSI client has *not* performed a
125	 * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
126	 * to write-out the entire device cache.
127	 */
128	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
129		pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
130		flags &= ~O_DSYNC;
131	}
132
133	file = filp_open(fd_dev->fd_dev_name, flags, 0600);
134	if (IS_ERR(file)) {
135		pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name);
136		ret = PTR_ERR(file);
137		goto fail;
138	}
 
 
 
 
139	fd_dev->fd_file = file;
140	/*
141	 * If using a block backend with this struct file, we extract
142	 * fd_dev->fd_[block,dev]_size from struct block_device.
143	 *
144	 * Otherwise, we use the passed fd_size= from configfs
145	 */
146	inode = file->f_mapping->host;
147	if (S_ISBLK(inode->i_mode)) {
148		struct request_queue *q = bdev_get_queue(inode->i_bdev);
149		unsigned long long dev_size;
150
151		fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
 
 
 
 
 
 
152		/*
153		 * Determine the number of bytes from i_size_read() minus
154		 * one (1) logical sector from underlying struct block_device
155		 */
156		dev_size = (i_size_read(file->f_mapping->host) -
 
157				       fd_dev->fd_block_size);
158
159		pr_debug("FILEIO: Using size: %llu bytes from struct"
160			" block_device blocks: %llu logical_block_size: %d\n",
161			dev_size, div_u64(dev_size, fd_dev->fd_block_size),
 
162			fd_dev->fd_block_size);
163
164		if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
165						      fd_dev->fd_block_size))
166			pr_debug("IFILE: BLOCK Discard support available,"
167				 " disabled by default\n");
168		/*
169		 * Enable write same emulation for IBLOCK and use 0xFFFF as
170		 * the smaller WRITE_SAME(10) only has a two-byte block count.
171		 */
172		dev->dev_attrib.max_write_same_len = 0xFFFF;
173
174		if (blk_queue_nonrot(q))
175			dev->dev_attrib.is_nonrot = 1;
176	} else {
177		if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
178			pr_err("FILEIO: Missing fd_dev_size="
179				" parameter, and no backing struct"
180				" block_device\n");
181			goto fail;
182		}
183
 
 
 
 
184		fd_dev->fd_block_size = FD_BLOCKSIZE;
185		/*
186		 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
187		 */
188		dev->dev_attrib.max_unmap_lba_count = 0x2000;
189		/*
190		 * Currently hardcoded to 1 in Linux/SCSI code..
191		 */
192		dev->dev_attrib.max_unmap_block_desc_count = 1;
193		dev->dev_attrib.unmap_granularity = 1;
194		dev->dev_attrib.unmap_granularity_alignment = 0;
195
196		/*
197		 * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
198		 * based upon struct iovec limit for vfs_writev()
199		 */
200		dev->dev_attrib.max_write_same_len = 0x1000;
201	}
202
203	dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
204	dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
205	dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
206	dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
207
208	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
209		pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
210			" with FDBD_HAS_BUFFERED_IO_WCE\n");
211		dev->dev_attrib.emulate_write_cache = 1;
212	}
213
214	fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
215	fd_dev->fd_queue_depth = dev->queue_depth;
216
217	pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
218		" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
219			fd_dev->fd_dev_name, fd_dev->fd_dev_size);
220
221	return 0;
 
222fail:
223	if (fd_dev->fd_file) {
224		filp_close(fd_dev->fd_file, NULL);
225		fd_dev->fd_file = NULL;
226	}
227	return ret;
 
228}
229
230static void fd_dev_call_rcu(struct rcu_head *p)
 
 
 
 
231{
232	struct se_device *dev = container_of(p, struct se_device, rcu_head);
233	struct fd_dev *fd_dev = FD_DEV(dev);
 
 
 
 
234
235	kfree(fd_dev);
236}
237
238static void fd_free_device(struct se_device *dev)
 
 
 
 
 
 
 
239{
240	struct fd_dev *fd_dev = FD_DEV(dev);
241
242	if (fd_dev->fd_file) {
243		filp_close(fd_dev->fd_file, NULL);
244		fd_dev->fd_file = NULL;
 
245	}
246	call_rcu(&dev->rcu_head, fd_dev_call_rcu);
 
247}
248
249static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
250		    u32 block_size, struct scatterlist *sgl,
251		    u32 sgl_nents, u32 data_length, int is_write)
252{
253	struct scatterlist *sg;
254	struct iov_iter iter;
255	struct bio_vec *bvec;
256	ssize_t len = 0;
257	loff_t pos = (cmd->t_task_lba * block_size);
 
258	int ret = 0, i;
259
260	bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
261	if (!bvec) {
262		pr_err("Unable to allocate fd_do_readv iov[]\n");
263		return -ENOMEM;
264	}
265
266	for_each_sg(sgl, sg, sgl_nents, i) {
267		bvec[i].bv_page = sg_page(sg);
268		bvec[i].bv_len = sg->length;
269		bvec[i].bv_offset = sg->offset;
270
271		len += sg->length;
272	}
273
274	iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len);
275	if (is_write)
276		ret = vfs_iter_write(fd, &iter, &pos);
277	else
278		ret = vfs_iter_read(fd, &iter, &pos);
279
280	kfree(bvec);
281
282	if (is_write) {
283		if (ret < 0 || ret != data_length) {
284			pr_err("%s() write returned %d\n", __func__, ret);
 
 
 
 
 
 
 
 
285			return (ret < 0 ? ret : -EINVAL);
286		}
287	} else {
288		/*
289		 * Return zeros and GOOD status even if the READ did not return
290		 * the expected virt_size for struct file w/o a backing struct
291		 * block_device.
292		 */
293		if (S_ISBLK(file_inode(fd)->i_mode)) {
294			if (ret < 0 || ret != data_length) {
295				pr_err("%s() returned %d, expecting %u for "
296						"S_ISBLK\n", __func__, ret,
297						data_length);
298				return (ret < 0 ? ret : -EINVAL);
299			}
300		} else {
301			if (ret < 0) {
302				pr_err("%s() returned %d for non S_ISBLK\n",
303						__func__, ret);
304				return ret;
305			}
306		}
307	}
 
308	return 1;
309}
310
311static sense_reason_t
312fd_execute_sync_cache(struct se_cmd *cmd)
313{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314	struct se_device *dev = cmd->se_dev;
315	struct fd_dev *fd_dev = FD_DEV(dev);
316	int immed = (cmd->t_task_cdb[1] & 0x2);
317	loff_t start, end;
318	int ret;
319
320	/*
321	 * If the Immediate bit is set, queue up the GOOD response
322	 * for this SYNCHRONIZE_CACHE op
323	 */
324	if (immed)
325		target_complete_cmd(cmd, SAM_STAT_GOOD);
326
327	/*
328	 * Determine if we will be flushing the entire device.
329	 */
330	if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
331		start = 0;
332		end = LLONG_MAX;
333	} else {
334		start = cmd->t_task_lba * dev->dev_attrib.block_size;
335		if (cmd->data_length)
336			end = start + cmd->data_length - 1;
337		else
338			end = LLONG_MAX;
339	}
340
341	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
342	if (ret != 0)
343		pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
344
345	if (immed)
346		return 0;
 
347
348	if (ret)
349		target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
350	else
351		target_complete_cmd(cmd, SAM_STAT_GOOD);
352
353	return 0;
 
354}
355
356static sense_reason_t
357fd_execute_write_same(struct se_cmd *cmd)
358{
359	struct se_device *se_dev = cmd->se_dev;
360	struct fd_dev *fd_dev = FD_DEV(se_dev);
361	loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
362	sector_t nolb = sbc_get_write_same_sectors(cmd);
363	struct iov_iter iter;
364	struct bio_vec *bvec;
365	unsigned int len = 0, i;
366	ssize_t ret;
367
368	if (!nolb) {
369		target_complete_cmd(cmd, SAM_STAT_GOOD);
370		return 0;
371	}
372	if (cmd->prot_op) {
373		pr_err("WRITE_SAME: Protection information with FILEIO"
374		       " backends not supported\n");
375		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
376	}
377
378	if (cmd->t_data_nents > 1 ||
379	    cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
380		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
381			" block_size: %u\n",
382			cmd->t_data_nents,
383			cmd->t_data_sg[0].length,
384			cmd->se_dev->dev_attrib.block_size);
385		return TCM_INVALID_CDB_FIELD;
386	}
387
388	bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL);
389	if (!bvec)
390		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
391
392	for (i = 0; i < nolb; i++) {
393		bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]);
394		bvec[i].bv_len = cmd->t_data_sg[0].length;
395		bvec[i].bv_offset = cmd->t_data_sg[0].offset;
396
397		len += se_dev->dev_attrib.block_size;
398	}
399
400	iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len);
401	ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos);
402
403	kfree(bvec);
404	if (ret < 0 || ret != len) {
405		pr_err("vfs_iter_write() returned %zd for write same\n", ret);
406		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
407	}
408
409	target_complete_cmd(cmd, SAM_STAT_GOOD);
410	return 0;
411}
412
413static int
414fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
415		void *buf, size_t bufsize)
 
416{
417	struct fd_dev *fd_dev = FD_DEV(se_dev);
418	struct file *prot_fd = fd_dev->fd_prot_file;
419	sector_t prot_length, prot;
420	loff_t pos = lba * se_dev->prot_length;
421
422	if (!prot_fd) {
423		pr_err("Unable to locate fd_dev->fd_prot_file\n");
424		return -ENODEV;
425	}
426
427	prot_length = nolb * se_dev->prot_length;
428
429	for (prot = 0; prot < prot_length;) {
430		sector_t len = min_t(sector_t, bufsize, prot_length - prot);
431		ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot);
432
433		if (ret != len) {
434			pr_err("vfs_write to prot file failed: %zd\n", ret);
435			return ret < 0 ? ret : -ENODEV;
436		}
437		prot += ret;
438	}
439
440	return 0;
441}
442
443static int
444fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
445{
446	void *buf;
447	int rc;
448
449	buf = (void *)__get_free_page(GFP_KERNEL);
450	if (!buf) {
451		pr_err("Unable to allocate FILEIO prot buf\n");
452		return -ENOMEM;
453	}
454	memset(buf, 0xff, PAGE_SIZE);
455
456	rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
457
458	free_page((unsigned long)buf);
459
460	return rc;
461}
462
463static sense_reason_t
464fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
 
 
 
465{
466	struct file *file = FD_DEV(cmd->se_dev)->fd_file;
467	struct inode *inode = file->f_mapping->host;
 
 
468	int ret;
469
470	if (cmd->se_dev->dev_attrib.pi_prot_type) {
471		ret = fd_do_prot_unmap(cmd, lba, nolb);
472		if (ret)
473			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
474	}
475
476	if (S_ISBLK(inode->i_mode)) {
477		/* The backend is block device, use discard */
478		struct block_device *bdev = inode->i_bdev;
479		struct se_device *dev = cmd->se_dev;
480
481		ret = blkdev_issue_discard(bdev,
482					   target_to_linux_sector(dev, lba),
483					   target_to_linux_sector(dev,  nolb),
484					   GFP_KERNEL, 0);
485		if (ret < 0) {
486			pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
487				ret);
488			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
489		}
490	} else {
491		/* The backend is normal file, use fallocate */
492		struct se_device *se_dev = cmd->se_dev;
493		loff_t pos = lba * se_dev->dev_attrib.block_size;
494		unsigned int len = nolb * se_dev->dev_attrib.block_size;
495		int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
496
497		if (!file->f_op->fallocate)
498			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
499
500		ret = file->f_op->fallocate(file, mode, pos, len);
501		if (ret < 0) {
502			pr_warn("FILEIO: fallocate() failed: %d\n", ret);
503			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
504		}
505	}
506
507	return 0;
508}
509
510static sense_reason_t
511fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
512	      enum dma_data_direction data_direction)
513{
 
514	struct se_device *dev = cmd->se_dev;
515	struct fd_dev *fd_dev = FD_DEV(dev);
516	struct file *file = fd_dev->fd_file;
517	struct file *pfile = fd_dev->fd_prot_file;
518	sense_reason_t rc;
519	int ret = 0;
520	/*
521	 * We are currently limited by the number of iovecs (2048) per
522	 * single vfs_[writev,readv] call.
523	 */
524	if (cmd->data_length > FD_MAX_BYTES) {
525		pr_err("FILEIO: Not able to process I/O of %u bytes due to"
526		       "FD_MAX_BYTES: %u iovec count limitiation\n",
527			cmd->data_length, FD_MAX_BYTES);
528		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
529	}
530	/*
531	 * Call vectorized fileio functions to map struct scatterlist
532	 * physical memory addresses to struct iovec virtual memory.
533	 */
534	if (data_direction == DMA_FROM_DEVICE) {
535		if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
536			ret = fd_do_rw(cmd, pfile, dev->prot_length,
537				       cmd->t_prot_sg, cmd->t_prot_nents,
538				       cmd->prot_length, 0);
539			if (ret < 0)
540				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
541		}
542
543		ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
544			       sgl, sgl_nents, cmd->data_length, 0);
545
546		if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
547			u32 sectors = cmd->data_length >>
548					ilog2(dev->dev_attrib.block_size);
549
550			rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
551					    0, cmd->t_prot_sg, 0);
552			if (rc)
553				return rc;
554		}
555	} else {
556		if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
557			u32 sectors = cmd->data_length >>
558					ilog2(dev->dev_attrib.block_size);
559
560			rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
561					    0, cmd->t_prot_sg, 0);
562			if (rc)
563				return rc;
564		}
565
566		ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
567			       sgl, sgl_nents, cmd->data_length, 1);
568		/*
569		 * Perform implicit vfs_fsync_range() for fd_do_writev() ops
570		 * for SCSI WRITEs with Forced Unit Access (FUA) set.
571		 * Allow this to happen independent of WCE=0 setting.
572		 */
573		if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) {
574			loff_t start = cmd->t_task_lba *
575				dev->dev_attrib.block_size;
576			loff_t end;
577
578			if (cmd->data_length)
579				end = start + cmd->data_length - 1;
580			else
581				end = LLONG_MAX;
582
583			vfs_fsync_range(fd_dev->fd_file, start, end, 1);
584		}
585
586		if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
587			ret = fd_do_rw(cmd, pfile, dev->prot_length,
588				       cmd->t_prot_sg, cmd->t_prot_nents,
589				       cmd->prot_length, 1);
590			if (ret < 0)
591				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
592		}
593	}
594
595	if (ret < 0)
596		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
 
 
 
 
 
597
598	if (ret)
599		target_complete_cmd(cmd, SAM_STAT_GOOD);
600	return 0;
 
 
 
 
 
 
601}
602
603enum {
604	Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
605};
606
607static match_table_t tokens = {
608	{Opt_fd_dev_name, "fd_dev_name=%s"},
609	{Opt_fd_dev_size, "fd_dev_size=%s"},
610	{Opt_fd_buffered_io, "fd_buffered_io=%d"},
611	{Opt_err, NULL}
612};
613
614static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
615		const char *page, ssize_t count)
 
 
616{
617	struct fd_dev *fd_dev = FD_DEV(dev);
618	char *orig, *ptr, *arg_p, *opts;
619	substring_t args[MAX_OPT_ARGS];
620	int ret = 0, arg, token;
621
622	opts = kstrdup(page, GFP_KERNEL);
623	if (!opts)
624		return -ENOMEM;
625
626	orig = opts;
627
628	while ((ptr = strsep(&opts, ",\n")) != NULL) {
629		if (!*ptr)
630			continue;
631
632		token = match_token(ptr, tokens, args);
633		switch (token) {
634		case Opt_fd_dev_name:
635			if (match_strlcpy(fd_dev->fd_dev_name, &args[0],
636				FD_MAX_DEV_NAME) == 0) {
637				ret = -EINVAL;
638				break;
639			}
 
 
 
640			pr_debug("FILEIO: Referencing Path: %s\n",
641					fd_dev->fd_dev_name);
642			fd_dev->fbd_flags |= FBDF_HAS_PATH;
643			break;
644		case Opt_fd_dev_size:
645			arg_p = match_strdup(&args[0]);
646			if (!arg_p) {
647				ret = -ENOMEM;
648				break;
649			}
650			ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size);
651			kfree(arg_p);
652			if (ret < 0) {
653				pr_err("kstrtoull() failed for"
654						" fd_dev_size=\n");
655				goto out;
656			}
657			pr_debug("FILEIO: Referencing Size: %llu"
658					" bytes\n", fd_dev->fd_dev_size);
659			fd_dev->fbd_flags |= FBDF_HAS_SIZE;
660			break;
661		case Opt_fd_buffered_io:
662			ret = match_int(args, &arg);
663			if (ret)
664				goto out;
665			if (arg != 1) {
666				pr_err("bogus fd_buffered_io=%d value\n", arg);
667				ret = -EINVAL;
668				goto out;
669			}
670
671			pr_debug("FILEIO: Using buffered I/O"
672				" operations for struct fd_dev\n");
673
674			fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
675			break;
676		default:
677			break;
678		}
679	}
680
681out:
682	kfree(orig);
683	return (!ret) ? count : ret;
684}
685
686static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
687{
688	struct fd_dev *fd_dev = FD_DEV(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
689	ssize_t bl = 0;
690
691	bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
692	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
693		fd_dev->fd_dev_name, fd_dev->fd_dev_size,
694		(fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
695		"Buffered-WCE" : "O_DSYNC");
696	return bl;
697}
698
699static sector_t fd_get_blocks(struct se_device *dev)
 
 
 
 
700{
701	struct fd_dev *fd_dev = FD_DEV(dev);
702	struct file *f = fd_dev->fd_file;
703	struct inode *i = f->f_mapping->host;
704	unsigned long long dev_size;
705	/*
706	 * When using a file that references an underlying struct block_device,
707	 * ensure dev_size is always based on the current inode size in order
708	 * to handle underlying block_device resize operations.
709	 */
710	if (S_ISBLK(i->i_mode))
711		dev_size = i_size_read(i);
712	else
713		dev_size = fd_dev->fd_dev_size;
714
715	return div_u64(dev_size - dev->dev_attrib.block_size,
716		       dev->dev_attrib.block_size);
717}
718
719static int fd_init_prot(struct se_device *dev)
 
 
 
 
720{
721	struct fd_dev *fd_dev = FD_DEV(dev);
722	struct file *prot_file, *file = fd_dev->fd_file;
723	struct inode *inode;
724	int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
725	char buf[FD_MAX_DEV_PROT_NAME];
726
727	if (!file) {
728		pr_err("Unable to locate fd_dev->fd_file\n");
729		return -ENODEV;
730	}
731
732	inode = file->f_mapping->host;
733	if (S_ISBLK(inode->i_mode)) {
734		pr_err("FILEIO Protection emulation only supported on"
735		       " !S_ISBLK\n");
736		return -ENOSYS;
737	}
738
739	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
740		flags &= ~O_DSYNC;
741
742	snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
743		 fd_dev->fd_dev_name);
744
745	prot_file = filp_open(buf, flags, 0600);
746	if (IS_ERR(prot_file)) {
747		pr_err("filp_open(%s) failed\n", buf);
748		ret = PTR_ERR(prot_file);
749		return ret;
750	}
751	fd_dev->fd_prot_file = prot_file;
752
753	return 0;
754}
755
756static int fd_format_prot(struct se_device *dev)
 
 
 
 
757{
758	unsigned char *buf;
759	int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
760	int ret;
761
762	if (!dev->dev_attrib.pi_prot_type) {
763		pr_err("Unable to format_prot while pi_prot_type == 0\n");
764		return -ENODEV;
765	}
766
767	buf = vzalloc(unit_size);
768	if (!buf) {
769		pr_err("Unable to allocate FILEIO prot buf\n");
770		return -ENOMEM;
771	}
772
773	pr_debug("Using FILEIO prot_length: %llu\n",
774		 (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
775					dev->prot_length);
776
777	memset(buf, 0xff, unit_size);
778	ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
779			      buf, unit_size);
780	vfree(buf);
781	return ret;
782}
783
784static void fd_free_prot(struct se_device *dev)
785{
786	struct fd_dev *fd_dev = FD_DEV(dev);
 
 
787
788	if (!fd_dev->fd_prot_file)
789		return;
790
791	filp_close(fd_dev->fd_prot_file, NULL);
792	fd_dev->fd_prot_file = NULL;
793}
794
795static struct sbc_ops fd_sbc_ops = {
796	.execute_rw		= fd_execute_rw,
797	.execute_sync_cache	= fd_execute_sync_cache,
798	.execute_write_same	= fd_execute_write_same,
799	.execute_unmap		= fd_execute_unmap,
800};
801
802static sense_reason_t
803fd_parse_cdb(struct se_cmd *cmd)
804{
805	return sbc_parse_cdb(cmd, &fd_sbc_ops);
806}
807
808static const struct target_backend_ops fileio_ops = {
809	.name			= "fileio",
810	.inquiry_prod		= "FILEIO",
811	.inquiry_rev		= FD_VERSION,
812	.owner			= THIS_MODULE,
 
813	.attach_hba		= fd_attach_hba,
814	.detach_hba		= fd_detach_hba,
815	.alloc_device		= fd_alloc_device,
816	.configure_device	= fd_configure_device,
817	.free_device		= fd_free_device,
818	.parse_cdb		= fd_parse_cdb,
 
 
 
 
 
 
 
 
819	.set_configfs_dev_params = fd_set_configfs_dev_params,
820	.show_configfs_dev_params = fd_show_configfs_dev_params,
821	.get_device_type	= sbc_get_device_type,
 
 
822	.get_blocks		= fd_get_blocks,
823	.init_prot		= fd_init_prot,
824	.format_prot		= fd_format_prot,
825	.free_prot		= fd_free_prot,
826	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
827};
828
829static int __init fileio_module_init(void)
830{
831	return transport_backend_register(&fileio_ops);
832}
833
834static void __exit fileio_module_exit(void)
835{
836	target_backend_unregister(&fileio_ops);
837}
838
839MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
840MODULE_AUTHOR("nab@Linux-iSCSI.org");
841MODULE_LICENSE("GPL");
842
843module_init(fileio_module_init);
844module_exit(fileio_module_exit);