Linux Audio

Check our new training course

Loading...
v3.1
  1/*******************************************************************************
  2 * Filename:  target_core_rd.c
  3 *
  4 * This file contains the Storage Engine <-> Ramdisk transport
  5 * specific functions.
  6 *
  7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
  8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
  9 * Copyright (c) 2007-2010 Rising Tide Systems
 10 * Copyright (c) 2008-2010 Linux-iSCSI.org
 11 *
 12 * Nicholas A. Bellinger <nab@kernel.org>
 13 *
 14 * This program is free software; you can redistribute it and/or modify
 15 * it under the terms of the GNU General Public License as published by
 16 * the Free Software Foundation; either version 2 of the License, or
 17 * (at your option) any later version.
 18 *
 19 * This program is distributed in the hope that it will be useful,
 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 22 * GNU General Public License for more details.
 23 *
 24 * You should have received a copy of the GNU General Public License
 25 * along with this program; if not, write to the Free Software
 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 27 *
 28 ******************************************************************************/
 29
 30#include <linux/version.h>
 31#include <linux/string.h>
 32#include <linux/parser.h>
 33#include <linux/timer.h>
 34#include <linux/blkdev.h>
 35#include <linux/slab.h>
 36#include <linux/spinlock.h>
 37#include <scsi/scsi.h>
 38#include <scsi/scsi_host.h>
 39
 40#include <target/target_core_base.h>
 41#include <target/target_core_device.h>
 42#include <target/target_core_transport.h>
 43#include <target/target_core_fabric_ops.h>
 44
 45#include "target_core_rd.h"
 46
 47static struct se_subsystem_api rd_mcp_template;
 
 
 
 48
 49/*	rd_attach_hba(): (Part of se_subsystem_api_t template)
 50 *
 51 *
 52 */
 53static int rd_attach_hba(struct se_hba *hba, u32 host_id)
 54{
 55	struct rd_host *rd_host;
 56
 57	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
 58	if (!rd_host) {
 59		pr_err("Unable to allocate memory for struct rd_host\n");
 60		return -ENOMEM;
 61	}
 62
 63	rd_host->rd_host_id = host_id;
 64
 65	hba->hba_ptr = rd_host;
 66
 67	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
 68		" Generic Target Core Stack %s\n", hba->hba_id,
 69		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
 70	pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
 71		" MaxSectors: %u\n", hba->hba_id,
 72		rd_host->rd_host_id, RD_MAX_SECTORS);
 73
 74	return 0;
 75}
 76
 77static void rd_detach_hba(struct se_hba *hba)
 78{
 79	struct rd_host *rd_host = hba->hba_ptr;
 80
 81	pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
 82		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
 83
 84	kfree(rd_host);
 85	hba->hba_ptr = NULL;
 86}
 87
 88/*	rd_release_device_space():
 89 *
 90 *
 91 */
 92static void rd_release_device_space(struct rd_dev *rd_dev)
 93{
 94	u32 i, j, page_count = 0, sg_per_table;
 95	struct rd_dev_sg_table *sg_table;
 96	struct page *pg;
 97	struct scatterlist *sg;
 
 98
 99	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
100		return;
101
102	sg_table = rd_dev->sg_table_array;
103
104	for (i = 0; i < rd_dev->sg_table_count; i++) {
105		sg = sg_table[i].sg_table;
106		sg_per_table = sg_table[i].rd_sg_count;
107
108		for (j = 0; j < sg_per_table; j++) {
109			pg = sg_page(&sg[j]);
110			if (pg) {
111				__free_page(pg);
112				page_count++;
113			}
114		}
115
116		kfree(sg);
117	}
118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
120		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
121		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
122		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
123
124	kfree(sg_table);
125	rd_dev->sg_table_array = NULL;
126	rd_dev->sg_table_count = 0;
127}
128
129
130/*	rd_build_device_space():
131 *
132 *
133 */
134static int rd_build_device_space(struct rd_dev *rd_dev)
 
135{
136	u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
137	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
138				sizeof(struct scatterlist));
139	struct rd_dev_sg_table *sg_table;
140	struct page *pg;
141	struct scatterlist *sg;
142
143	if (rd_dev->rd_page_count <= 0) {
144		pr_err("Illegal page count: %u for Ramdisk device\n",
145			rd_dev->rd_page_count);
146		return -EINVAL;
147	}
148	total_sg_needed = rd_dev->rd_page_count;
149
150	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
151
152	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
153	if (!sg_table) {
154		pr_err("Unable to allocate memory for Ramdisk"
155			" scatterlist tables\n");
156		return -ENOMEM;
157	}
158
159	rd_dev->sg_table_array = sg_table;
160	rd_dev->sg_table_count = sg_tables;
161
162	while (total_sg_needed) {
163		sg_per_table = (total_sg_needed > max_sg_per_table) ?
164			max_sg_per_table : total_sg_needed;
165
166		sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
167				GFP_KERNEL);
168		if (!sg) {
169			pr_err("Unable to allocate scatterlist array"
170				" for struct rd_dev\n");
171			return -ENOMEM;
172		}
173
174		sg_init_table(sg, sg_per_table);
175
176		sg_table[i].sg_table = sg;
177		sg_table[i].rd_sg_count = sg_per_table;
178		sg_table[i].page_start_offset = page_offset;
179		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
180						- 1;
181
182		for (j = 0; j < sg_per_table; j++) {
183			pg = alloc_pages(GFP_KERNEL, 0);
184			if (!pg) {
185				pr_err("Unable to allocate scatterlist"
186					" pages for struct rd_dev_sg_table\n");
187				return -ENOMEM;
188			}
189			sg_assign_page(&sg[j], pg);
190			sg[j].length = PAGE_SIZE;
 
 
 
 
191		}
192
193		page_offset += sg_per_table;
194		total_sg_needed -= sg_per_table;
195	}
196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
198		" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
199		rd_dev->rd_dev_id, rd_dev->rd_page_count,
200		rd_dev->sg_table_count);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
202	return 0;
203}
204
205static void *rd_allocate_virtdevice(
206	struct se_hba *hba,
207	const char *name,
208	int rd_direct)
209{
210	struct rd_dev *rd_dev;
211	struct rd_host *rd_host = hba->hba_ptr;
212
213	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
214	if (!rd_dev) {
215		pr_err("Unable to allocate memory for struct rd_dev\n");
216		return NULL;
217	}
218
219	rd_dev->rd_host = rd_host;
220	rd_dev->rd_direct = rd_direct;
221
222	return rd_dev;
223}
224
225static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
226{
227	return rd_allocate_virtdevice(hba, name, 0);
228}
229
230/*	rd_create_virtdevice():
231 *
232 *
233 */
234static struct se_device *rd_create_virtdevice(
235	struct se_hba *hba,
236	struct se_subsystem_dev *se_dev,
237	void *p,
238	int rd_direct)
239{
240	struct se_device *dev;
241	struct se_dev_limits dev_limits;
242	struct rd_dev *rd_dev = p;
243	struct rd_host *rd_host = hba->hba_ptr;
244	int dev_flags = 0, ret;
245	char prod[16], rev[4];
246
247	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
 
 
 
248
249	ret = rd_build_device_space(rd_dev);
250	if (ret < 0)
251		goto fail;
252
253	snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
254	snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
255						RD_MCP_VERSION);
256
257	dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
258	dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
259	dev_limits.limits.max_sectors = RD_MAX_SECTORS;
260	dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
261	dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
262
263	dev = transport_add_device_to_core_hba(hba,
264			&rd_mcp_template, se_dev, dev_flags, rd_dev,
265			&dev_limits, prod, rev);
266	if (!dev)
267		goto fail;
268
269	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
270	rd_dev->rd_queue_depth = dev->queue_depth;
271
272	pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
273		" %u pages in %u tables, %lu total bytes\n",
274		rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
275		"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
276		rd_dev->sg_table_count,
277		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
278
279	return dev;
280
281fail:
282	rd_release_device_space(rd_dev);
283	return ERR_PTR(ret);
284}
285
286static struct se_device *rd_MEMCPY_create_virtdevice(
287	struct se_hba *hba,
288	struct se_subsystem_dev *se_dev,
289	void *p)
290{
291	return rd_create_virtdevice(hba, se_dev, p, 0);
292}
293
294/*	rd_free_device(): (Part of se_subsystem_api_t template)
295 *
296 *
297 */
298static void rd_free_device(void *p)
299{
300	struct rd_dev *rd_dev = p;
301
302	rd_release_device_space(rd_dev);
303	kfree(rd_dev);
304}
305
306static inline struct rd_request *RD_REQ(struct se_task *task)
307{
308	return container_of(task, struct rd_request, rd_task);
309}
310
311static struct se_task *
312rd_alloc_task(unsigned char *cdb)
313{
314	struct rd_request *rd_req;
315
316	rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
317	if (!rd_req) {
318		pr_err("Unable to allocate struct rd_request\n");
319		return NULL;
320	}
321
322	return &rd_req->rd_task;
323}
324
325/*	rd_get_sg_table():
326 *
327 *
328 */
329static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
330{
331	u32 i;
332	struct rd_dev_sg_table *sg_table;
 
 
333
334	for (i = 0; i < rd_dev->sg_table_count; i++) {
 
335		sg_table = &rd_dev->sg_table_array[i];
336		if ((sg_table->page_start_offset <= page) &&
337		    (sg_table->page_end_offset >= page))
338			return sg_table;
339	}
340
341	pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
342			page);
343
344	return NULL;
345}
346
347/*	rd_MEMCPY_read():
348 *
349 *
350 */
351static int rd_MEMCPY_read(struct rd_request *req)
352{
353	struct se_task *task = &req->rd_task;
354	struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
355	struct rd_dev_sg_table *table;
356	struct scatterlist *sg_d, *sg_s;
357	void *dst, *src;
358	u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
359	u32 length, page_end = 0, table_sg_end;
360	u32 rd_offset = req->rd_offset;
361
362	table = rd_get_sg_table(dev, req->rd_page);
363	if (!table)
364		return -EINVAL;
365
366	table_sg_end = (table->page_end_offset - req->rd_page);
367	sg_d = task->task_sg;
368	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
369
370	pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
371		" %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
372		req->rd_page, req->rd_offset);
373
374	src_offset = rd_offset;
375
376	while (req->rd_size) {
377		if ((sg_d[i].length - dst_offset) <
378		    (sg_s[j].length - src_offset)) {
379			length = (sg_d[i].length - dst_offset);
380
381			pr_debug("Step 1 - sg_d[%d]: %p length: %d"
382				" offset: %u sg_s[%d].length: %u\n", i,
383				&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
384				sg_s[j].length);
385			pr_debug("Step 1 - length: %u dst_offset: %u"
386				" src_offset: %u\n", length, dst_offset,
387				src_offset);
388
389			if (length > req->rd_size)
390				length = req->rd_size;
391
392			dst = sg_virt(&sg_d[i++]) + dst_offset;
393			BUG_ON(!dst);
394
395			src = sg_virt(&sg_s[j]) + src_offset;
396			BUG_ON(!src);
397
398			dst_offset = 0;
399			src_offset = length;
400			page_end = 0;
401		} else {
402			length = (sg_s[j].length - src_offset);
403
404			pr_debug("Step 2 - sg_d[%d]: %p length: %d"
405				" offset: %u sg_s[%d].length: %u\n", i,
406				&sg_d[i], sg_d[i].length, sg_d[i].offset,
407				j, sg_s[j].length);
408			pr_debug("Step 2 - length: %u dst_offset: %u"
409				" src_offset: %u\n", length, dst_offset,
410				src_offset);
411
412			if (length > req->rd_size)
413				length = req->rd_size;
414
415			dst = sg_virt(&sg_d[i]) + dst_offset;
416			BUG_ON(!dst);
417
418			if (sg_d[i].length == length) {
419				i++;
420				dst_offset = 0;
421			} else
422				dst_offset = length;
423
424			src = sg_virt(&sg_s[j++]) + src_offset;
425			BUG_ON(!src);
426
427			src_offset = 0;
428			page_end = 1;
429		}
430
431		memcpy(dst, src, length);
432
433		pr_debug("page: %u, remaining size: %u, length: %u,"
434			" i: %u, j: %u\n", req->rd_page,
435			(req->rd_size - length), length, i, j);
436
437		req->rd_size -= length;
438		if (!req->rd_size)
439			return 0;
440
441		if (!page_end)
442			continue;
443
444		if (++req->rd_page <= table->page_end_offset) {
445			pr_debug("page: %u in same page table\n",
446				req->rd_page);
447			continue;
448		}
449
450		pr_debug("getting new page table for page: %u\n",
451				req->rd_page);
452
453		table = rd_get_sg_table(dev, req->rd_page);
454		if (!table)
455			return -EINVAL;
456
457		sg_s = &table->sg_table[j = 0];
 
 
 
 
 
458	}
459
460	return 0;
 
 
 
461}
462
463/*	rd_MEMCPY_write():
464 *
465 *
466 */
467static int rd_MEMCPY_write(struct rd_request *req)
468{
469	struct se_task *task = &req->rd_task;
470	struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
471	struct rd_dev_sg_table *table;
472	struct scatterlist *sg_d, *sg_s;
473	void *dst, *src;
474	u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
475	u32 length, page_end = 0, table_sg_end;
476	u32 rd_offset = req->rd_offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
477
478	table = rd_get_sg_table(dev, req->rd_page);
479	if (!table)
480		return -EINVAL;
481
482	table_sg_end = (table->page_end_offset - req->rd_page);
483	sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
484	sg_s = task->task_sg;
485
486	pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
487		" Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
488		req->rd_page, req->rd_offset);
489
490	dst_offset = rd_offset;
491
492	while (req->rd_size) {
493		if ((sg_s[i].length - src_offset) <
494		    (sg_d[j].length - dst_offset)) {
495			length = (sg_s[i].length - src_offset);
496
497			pr_debug("Step 1 - sg_s[%d]: %p length: %d"
498				" offset: %d sg_d[%d].length: %u\n", i,
499				&sg_s[i], sg_s[i].length, sg_s[i].offset,
500				j, sg_d[j].length);
501			pr_debug("Step 1 - length: %u src_offset: %u"
502				" dst_offset: %u\n", length, src_offset,
503				dst_offset);
504
505			if (length > req->rd_size)
506				length = req->rd_size;
507
508			src = sg_virt(&sg_s[i++]) + src_offset;
509			BUG_ON(!src);
510
511			dst = sg_virt(&sg_d[j]) + dst_offset;
512			BUG_ON(!dst);
513
514			src_offset = 0;
515			dst_offset = length;
516			page_end = 0;
517		} else {
518			length = (sg_d[j].length - dst_offset);
519
520			pr_debug("Step 2 - sg_s[%d]: %p length: %d"
521				" offset: %d sg_d[%d].length: %u\n", i,
522				&sg_s[i], sg_s[i].length, sg_s[i].offset,
523				j, sg_d[j].length);
524			pr_debug("Step 2 - length: %u src_offset: %u"
525				" dst_offset: %u\n", length, src_offset,
526				dst_offset);
527
528			if (length > req->rd_size)
529				length = req->rd_size;
530
531			src = sg_virt(&sg_s[i]) + src_offset;
532			BUG_ON(!src);
533
534			if (sg_s[i].length == length) {
535				i++;
536				src_offset = 0;
537			} else
538				src_offset = length;
539
540			dst = sg_virt(&sg_d[j++]) + dst_offset;
541			BUG_ON(!dst);
542
543			dst_offset = 0;
544			page_end = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
545		}
 
 
 
 
 
 
 
 
546
547		memcpy(dst, src, length);
548
549		pr_debug("page: %u, remaining size: %u, length: %u,"
550			" i: %u, j: %u\n", req->rd_page,
551			(req->rd_size - length), length, i, j);
552
553		req->rd_size -= length;
554		if (!req->rd_size)
555			return 0;
 
556
557		if (!page_end)
 
558			continue;
559
560		if (++req->rd_page <= table->page_end_offset) {
561			pr_debug("page: %u in same page table\n",
562				req->rd_page);
563			continue;
564		}
565
566		pr_debug("getting new page table for page: %u\n",
567				req->rd_page);
 
 
 
 
 
 
568
569		table = rd_get_sg_table(dev, req->rd_page);
570		if (!table)
571			return -EINVAL;
 
 
572
573		sg_d = &table->sg_table[j = 0];
 
574	}
 
575
576	return 0;
577}
 
 
 
578
579/*	rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
580 *
581 *
582 */
583static int rd_MEMCPY_do_task(struct se_task *task)
584{
585	struct se_device *dev = task->se_dev;
586	struct rd_request *req = RD_REQ(task);
587	unsigned long long lba;
588	int ret;
589
590	req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
591	lba = task->task_lba;
592	req->rd_offset = (do_div(lba,
593			  (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
594			   dev->se_sub_dev->se_dev_attrib.block_size;
595	req->rd_size = task->task_size;
596
597	if (task->task_data_direction == DMA_FROM_DEVICE)
598		ret = rd_MEMCPY_read(req);
599	else
600		ret = rd_MEMCPY_write(req);
601
602	if (ret != 0)
603		return ret;
604
605	task->task_scsi_status = GOOD;
606	transport_complete_task(task, 1);
607
608	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
609}
610
611/*	rd_free_task(): (Part of se_subsystem_api_t template)
612 *
613 *
614 */
615static void rd_free_task(struct se_task *task)
616{
617	kfree(RD_REQ(task));
618}
619
620enum {
621	Opt_rd_pages, Opt_err
622};
623
624static match_table_t tokens = {
625	{Opt_rd_pages, "rd_pages=%d"},
 
626	{Opt_err, NULL}
627};
628
629static ssize_t rd_set_configfs_dev_params(
630	struct se_hba *hba,
631	struct se_subsystem_dev *se_dev,
632	const char *page,
633	ssize_t count)
634{
635	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
636	char *orig, *ptr, *opts;
637	substring_t args[MAX_OPT_ARGS];
638	int ret = 0, arg, token;
639
640	opts = kstrdup(page, GFP_KERNEL);
641	if (!opts)
642		return -ENOMEM;
643
644	orig = opts;
645
646	while ((ptr = strsep(&opts, ",")) != NULL) {
647		if (!*ptr)
648			continue;
649
650		token = match_token(ptr, tokens, args);
651		switch (token) {
652		case Opt_rd_pages:
653			match_int(args, &arg);
654			rd_dev->rd_page_count = arg;
655			pr_debug("RAMDISK: Referencing Page"
656				" Count: %u\n", rd_dev->rd_page_count);
657			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
658			break;
 
 
 
 
 
 
 
 
659		default:
660			break;
661		}
662	}
663
664	kfree(orig);
665	return (!ret) ? count : ret;
666}
667
668static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
669{
670	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
671
672	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
673		pr_debug("Missing rd_pages= parameter\n");
674		return -EINVAL;
675	}
676
677	return 0;
678}
679
680static ssize_t rd_show_configfs_dev_params(
681	struct se_hba *hba,
682	struct se_subsystem_dev *se_dev,
683	char *b)
684{
685	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
686	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: %s\n",
687			rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
688			"rd_direct" : "rd_mcp");
689	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
690			"  SG_table_count: %u\n", rd_dev->rd_page_count,
691			PAGE_SIZE, rd_dev->sg_table_count);
 
692	return bl;
693}
694
695/*	rd_get_cdb(): (Part of se_subsystem_api_t template)
696 *
697 *
698 */
699static unsigned char *rd_get_cdb(struct se_task *task)
700{
701	struct rd_request *req = RD_REQ(task);
 
 
 
702
703	return req->rd_scsi_cdb;
704}
705
706static u32 rd_get_device_rev(struct se_device *dev)
707{
708	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
 
 
 
 
 
 
709}
710
711static u32 rd_get_device_type(struct se_device *dev)
712{
713	return TYPE_DISK;
 
 
714}
715
716static sector_t rd_get_blocks(struct se_device *dev)
717{
718	struct rd_dev *rd_dev = dev->dev_ptr;
719	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
720			dev->se_sub_dev->se_dev_attrib.block_size) - 1;
721
722	return blocks_long;
 
 
 
723}
724
725static struct se_subsystem_api rd_mcp_template = {
726	.name			= "rd_mcp",
 
 
727	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
728	.attach_hba		= rd_attach_hba,
729	.detach_hba		= rd_detach_hba,
730	.allocate_virtdevice	= rd_MEMCPY_allocate_virtdevice,
731	.create_virtdevice	= rd_MEMCPY_create_virtdevice,
732	.free_device		= rd_free_device,
733	.alloc_task		= rd_alloc_task,
734	.do_task		= rd_MEMCPY_do_task,
735	.free_task		= rd_free_task,
736	.check_configfs_dev_params = rd_check_configfs_dev_params,
737	.set_configfs_dev_params = rd_set_configfs_dev_params,
738	.show_configfs_dev_params = rd_show_configfs_dev_params,
739	.get_cdb		= rd_get_cdb,
740	.get_device_rev		= rd_get_device_rev,
741	.get_device_type	= rd_get_device_type,
742	.get_blocks		= rd_get_blocks,
 
 
743};
744
745int __init rd_module_init(void)
746{
747	int ret;
748
749	ret = transport_subsystem_register(&rd_mcp_template);
750	if (ret < 0) {
751		return ret;
752	}
753
754	return 0;
755}
756
757void rd_module_exit(void)
758{
759	transport_subsystem_release(&rd_mcp_template);
760}
v3.15
  1/*******************************************************************************
  2 * Filename:  target_core_rd.c
  3 *
  4 * This file contains the Storage Engine <-> Ramdisk transport
  5 * specific functions.
  6 *
  7 * (c) Copyright 2003-2013 Datera, Inc.
 
 
 
  8 *
  9 * Nicholas A. Bellinger <nab@kernel.org>
 10 *
 11 * This program is free software; you can redistribute it and/or modify
 12 * it under the terms of the GNU General Public License as published by
 13 * the Free Software Foundation; either version 2 of the License, or
 14 * (at your option) any later version.
 15 *
 16 * This program is distributed in the hope that it will be useful,
 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 19 * GNU General Public License for more details.
 20 *
 21 * You should have received a copy of the GNU General Public License
 22 * along with this program; if not, write to the Free Software
 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 24 *
 25 ******************************************************************************/
 26
 
 27#include <linux/string.h>
 28#include <linux/parser.h>
 29#include <linux/timer.h>
 
 30#include <linux/slab.h>
 31#include <linux/spinlock.h>
 32#include <scsi/scsi.h>
 33#include <scsi/scsi_host.h>
 34
 35#include <target/target_core_base.h>
 36#include <target/target_core_backend.h>
 
 
 37
 38#include "target_core_rd.h"
 39
 40static inline struct rd_dev *RD_DEV(struct se_device *dev)
 41{
 42	return container_of(dev, struct rd_dev, dev);
 43}
 44
 45/*	rd_attach_hba(): (Part of se_subsystem_api_t template)
 46 *
 47 *
 48 */
 49static int rd_attach_hba(struct se_hba *hba, u32 host_id)
 50{
 51	struct rd_host *rd_host;
 52
 53	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
 54	if (!rd_host) {
 55		pr_err("Unable to allocate memory for struct rd_host\n");
 56		return -ENOMEM;
 57	}
 58
 59	rd_host->rd_host_id = host_id;
 60
 61	hba->hba_ptr = rd_host;
 62
 63	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
 64		" Generic Target Core Stack %s\n", hba->hba_id,
 65		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
 
 
 
 66
 67	return 0;
 68}
 69
 70static void rd_detach_hba(struct se_hba *hba)
 71{
 72	struct rd_host *rd_host = hba->hba_ptr;
 73
 74	pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
 75		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
 76
 77	kfree(rd_host);
 78	hba->hba_ptr = NULL;
 79}
 80
 81static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
 82				 u32 sg_table_count)
 
 
 
 83{
 
 
 84	struct page *pg;
 85	struct scatterlist *sg;
 86	u32 i, j, page_count = 0, sg_per_table;
 87
 88	for (i = 0; i < sg_table_count; i++) {
 
 
 
 
 
 89		sg = sg_table[i].sg_table;
 90		sg_per_table = sg_table[i].rd_sg_count;
 91
 92		for (j = 0; j < sg_per_table; j++) {
 93			pg = sg_page(&sg[j]);
 94			if (pg) {
 95				__free_page(pg);
 96				page_count++;
 97			}
 98		}
 
 99		kfree(sg);
100	}
101
102	kfree(sg_table);
103	return page_count;
104}
105
106static void rd_release_device_space(struct rd_dev *rd_dev)
107{
108	u32 page_count;
109
110	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
111		return;
112
113	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
114					  rd_dev->sg_table_count);
115
116	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
117		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
118		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
119		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
120
 
121	rd_dev->sg_table_array = NULL;
122	rd_dev->sg_table_count = 0;
123}
124
125
126/*	rd_build_device_space():
127 *
128 *
129 */
130static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
131				 u32 total_sg_needed, unsigned char init_payload)
132{
133	u32 i = 0, j, page_offset = 0, sg_per_table;
134	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
135				sizeof(struct scatterlist));
 
136	struct page *pg;
137	struct scatterlist *sg;
138	unsigned char *p;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
140	while (total_sg_needed) {
141		sg_per_table = (total_sg_needed > max_sg_per_table) ?
142			max_sg_per_table : total_sg_needed;
143
144		sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
145				GFP_KERNEL);
146		if (!sg) {
147			pr_err("Unable to allocate scatterlist array"
148				" for struct rd_dev\n");
149			return -ENOMEM;
150		}
151
152		sg_init_table(sg, sg_per_table);
153
154		sg_table[i].sg_table = sg;
155		sg_table[i].rd_sg_count = sg_per_table;
156		sg_table[i].page_start_offset = page_offset;
157		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
158						- 1;
159
160		for (j = 0; j < sg_per_table; j++) {
161			pg = alloc_pages(GFP_KERNEL, 0);
162			if (!pg) {
163				pr_err("Unable to allocate scatterlist"
164					" pages for struct rd_dev_sg_table\n");
165				return -ENOMEM;
166			}
167			sg_assign_page(&sg[j], pg);
168			sg[j].length = PAGE_SIZE;
169
170			p = kmap(pg);
171			memset(p, init_payload, PAGE_SIZE);
172			kunmap(pg);
173		}
174
175		page_offset += sg_per_table;
176		total_sg_needed -= sg_per_table;
177	}
178
179	return 0;
180}
181
182static int rd_build_device_space(struct rd_dev *rd_dev)
183{
184	struct rd_dev_sg_table *sg_table;
185	u32 sg_tables, total_sg_needed;
186	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
187				sizeof(struct scatterlist));
188	int rc;
189
190	if (rd_dev->rd_page_count <= 0) {
191		pr_err("Illegal page count: %u for Ramdisk device\n",
192		       rd_dev->rd_page_count);
193		return -EINVAL;
194	}
195
196	/* Don't need backing pages for NULLIO */
197	if (rd_dev->rd_flags & RDF_NULLIO)
198		return 0;
199
200	total_sg_needed = rd_dev->rd_page_count;
201
202	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
203
204	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
205	if (!sg_table) {
206		pr_err("Unable to allocate memory for Ramdisk"
207		       " scatterlist tables\n");
208		return -ENOMEM;
209	}
210
211	rd_dev->sg_table_array = sg_table;
212	rd_dev->sg_table_count = sg_tables;
213
214	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
215	if (rc)
216		return rc;
217
218	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
219		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
220		 rd_dev->rd_dev_id, rd_dev->rd_page_count,
221		 rd_dev->sg_table_count);
222
223	return 0;
224}
225
226static void rd_release_prot_space(struct rd_dev *rd_dev)
227{
228	u32 page_count;
229
230	if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
231		return;
232
233	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
234					  rd_dev->sg_prot_count);
235
236	pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
237		 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
238		 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
239		 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
240
241	rd_dev->sg_prot_array = NULL;
242	rd_dev->sg_prot_count = 0;
243}
244
245static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
246{
247	struct rd_dev_sg_table *sg_table;
248	u32 total_sg_needed, sg_tables;
249	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
250				sizeof(struct scatterlist));
251	int rc;
252
253	if (rd_dev->rd_flags & RDF_NULLIO)
254		return 0;
255	/*
256	 * prot_length=8byte dif data
257	 * tot sg needed = rd_page_count * (PGSZ/block_size) *
258	 * 		   (prot_length/block_size) + pad
259	 * PGSZ canceled each other.
260	 */
261	total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
262
263	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
264
265	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
266	if (!sg_table) {
267		pr_err("Unable to allocate memory for Ramdisk protection"
268		       " scatterlist tables\n");
269		return -ENOMEM;
270	}
271
272	rd_dev->sg_prot_array = sg_table;
273	rd_dev->sg_prot_count = sg_tables;
274
275	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
276	if (rc)
277		return rc;
278
279	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
280		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
281		 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
282
283	return 0;
284}
285
286static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
 
 
 
287{
288	struct rd_dev *rd_dev;
289	struct rd_host *rd_host = hba->hba_ptr;
290
291	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
292	if (!rd_dev) {
293		pr_err("Unable to allocate memory for struct rd_dev\n");
294		return NULL;
295	}
296
297	rd_dev->rd_host = rd_host;
 
298
299	return &rd_dev->dev;
300}
301
302static int rd_configure_device(struct se_device *dev)
303{
304	struct rd_dev *rd_dev = RD_DEV(dev);
305	struct rd_host *rd_host = dev->se_hba->hba_ptr;
306	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
308	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
309		pr_debug("Missing rd_pages= parameter\n");
310		return -EINVAL;
311	}
312
313	ret = rd_build_device_space(rd_dev);
314	if (ret < 0)
315		goto fail;
316
317	dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
318	dev->dev_attrib.hw_max_sectors = UINT_MAX;
319	dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
 
 
 
 
 
 
 
 
 
 
 
 
320
321	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
 
322
323	pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
324		" %u pages in %u tables, %lu total bytes\n",
325		rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
 
326		rd_dev->sg_table_count,
327		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
328
329	return 0;
330
331fail:
332	rd_release_device_space(rd_dev);
333	return ret;
 
 
 
 
 
 
 
 
334}
335
336static void rd_free_device(struct se_device *dev)
 
 
 
 
337{
338	struct rd_dev *rd_dev = RD_DEV(dev);
339
340	rd_release_device_space(rd_dev);
341	kfree(rd_dev);
342}
343
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
345{
 
346	struct rd_dev_sg_table *sg_table;
347	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
348				sizeof(struct scatterlist));
349
350	i = page / sg_per_table;
351	if (i < rd_dev->sg_table_count) {
352		sg_table = &rd_dev->sg_table_array[i];
353		if ((sg_table->page_start_offset <= page) &&
354		    (sg_table->page_end_offset >= page))
355			return sg_table;
356	}
357
358	pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
359			page);
360
361	return NULL;
362}
363
364static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
 
 
 
 
365{
366	struct rd_dev_sg_table *sg_table;
367	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
368				sizeof(struct scatterlist));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369
370	i = page / sg_per_table;
371	if (i < rd_dev->sg_prot_count) {
372		sg_table = &rd_dev->sg_prot_array[i];
373		if ((sg_table->page_start_offset <= page) &&
374		     (sg_table->page_end_offset >= page))
375			return sg_table;
376	}
377
378	pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
379			page);
380
381	return NULL;
382}
383
384static sense_reason_t
385rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
386	      enum dma_data_direction data_direction)
 
 
387{
388	struct se_device *se_dev = cmd->se_dev;
389	struct rd_dev *dev = RD_DEV(se_dev);
390	struct rd_dev_sg_table *table;
391	struct scatterlist *rd_sg;
392	struct sg_mapping_iter m;
393	u32 rd_offset;
394	u32 rd_size;
395	u32 rd_page;
396	u32 src_len;
397	u64 tmp;
398	sense_reason_t rc;
399
400	if (dev->rd_flags & RDF_NULLIO) {
401		target_complete_cmd(cmd, SAM_STAT_GOOD);
402		return 0;
403	}
404
405	tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
406	rd_offset = do_div(tmp, PAGE_SIZE);
407	rd_page = tmp;
408	rd_size = cmd->data_length;
409
410	table = rd_get_sg_table(dev, rd_page);
411	if (!table)
412		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
413
414	rd_sg = &table->sg_table[rd_page - table->page_start_offset];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415
416	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
417			dev->rd_dev_id,
418			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
419			cmd->t_task_lba, rd_size, rd_page, rd_offset);
420
421	if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
422		struct rd_dev_sg_table *prot_table;
423		struct scatterlist *prot_sg;
424		u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
425		u32 prot_offset, prot_page;
426
427		tmp = cmd->t_task_lba * se_dev->prot_length;
428		prot_offset = do_div(tmp, PAGE_SIZE);
429		prot_page = tmp;
430
431		prot_table = rd_get_prot_table(dev, prot_page);
432		if (!prot_table)
433			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
434
435		prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
436
437		rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
438					  prot_sg, prot_offset);
439		if (rc)
440			return rc;
441	}
442
443	src_len = PAGE_SIZE - rd_offset;
444	sg_miter_start(&m, sgl, sgl_nents,
445			data_direction == DMA_FROM_DEVICE ?
446				SG_MITER_TO_SG : SG_MITER_FROM_SG);
447	while (rd_size) {
448		u32 len;
449		void *rd_addr;
450
451		sg_miter_next(&m);
452		if (!(u32)m.length) {
453			pr_debug("RD[%u]: invalid sgl %p len %zu\n",
454				 dev->rd_dev_id, m.addr, m.length);
455			sg_miter_stop(&m);
456			return TCM_INCORRECT_AMOUNT_OF_DATA;
457		}
458		len = min((u32)m.length, src_len);
459		if (len > rd_size) {
460			pr_debug("RD[%u]: size underrun page %d offset %d "
461				 "size %d\n", dev->rd_dev_id,
462				 rd_page, rd_offset, rd_size);
463			len = rd_size;
464		}
465		m.consumed = len;
466
467		rd_addr = sg_virt(rd_sg) + rd_offset;
 
 
 
 
468
469		if (data_direction == DMA_FROM_DEVICE)
470			memcpy(m.addr, rd_addr, len);
471		else
472			memcpy(rd_addr, m.addr, len);
473
474		rd_size -= len;
475		if (!rd_size)
476			continue;
477
478		src_len -= len;
479		if (src_len) {
480			rd_offset += len;
481			continue;
482		}
483
484		/* rd page completed, next one please */
485		rd_page++;
486		rd_offset = 0;
487		src_len = PAGE_SIZE;
488		if (rd_page <= table->page_end_offset) {
489			rd_sg++;
490			continue;
491		}
492
493		table = rd_get_sg_table(dev, rd_page);
494		if (!table) {
495			sg_miter_stop(&m);
496			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
497		}
498
499		/* since we increment, the first sg entry is correct */
500		rd_sg = table->sg_table;
501	}
502	sg_miter_stop(&m);
503
504	if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
505		struct rd_dev_sg_table *prot_table;
506		struct scatterlist *prot_sg;
507		u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
508		u32 prot_offset, prot_page;
509
510		tmp = cmd->t_task_lba * se_dev->prot_length;
511		prot_offset = do_div(tmp, PAGE_SIZE);
512		prot_page = tmp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
513
514		prot_table = rd_get_prot_table(dev, prot_page);
515		if (!prot_table)
516			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
517
518		prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
 
519
520		rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
521					 prot_sg, prot_offset);
522		if (rc)
523			return rc;
524	}
525
526	target_complete_cmd(cmd, SAM_STAT_GOOD);
527	return 0;
 
 
 
 
 
528}
529
530enum {
531	Opt_rd_pages, Opt_rd_nullio, Opt_err
532};
533
534static match_table_t tokens = {
535	{Opt_rd_pages, "rd_pages=%d"},
536	{Opt_rd_nullio, "rd_nullio=%d"},
537	{Opt_err, NULL}
538};
539
540static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
541		const char *page, ssize_t count)
 
 
 
542{
543	struct rd_dev *rd_dev = RD_DEV(dev);
544	char *orig, *ptr, *opts;
545	substring_t args[MAX_OPT_ARGS];
546	int ret = 0, arg, token;
547
548	opts = kstrdup(page, GFP_KERNEL);
549	if (!opts)
550		return -ENOMEM;
551
552	orig = opts;
553
554	while ((ptr = strsep(&opts, ",\n")) != NULL) {
555		if (!*ptr)
556			continue;
557
558		token = match_token(ptr, tokens, args);
559		switch (token) {
560		case Opt_rd_pages:
561			match_int(args, &arg);
562			rd_dev->rd_page_count = arg;
563			pr_debug("RAMDISK: Referencing Page"
564				" Count: %u\n", rd_dev->rd_page_count);
565			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
566			break;
567		case Opt_rd_nullio:
568			match_int(args, &arg);
569			if (arg != 1)
570				break;
571
572			pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
573			rd_dev->rd_flags |= RDF_NULLIO;
574			break;
575		default:
576			break;
577		}
578	}
579
580	kfree(orig);
581	return (!ret) ? count : ret;
582}
583
584static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
585{
586	struct rd_dev *rd_dev = RD_DEV(dev);
 
 
 
 
 
587
588	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
589			rd_dev->rd_dev_id);
 
 
 
 
 
 
 
 
 
 
590	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
591			"  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
592			PAGE_SIZE, rd_dev->sg_table_count,
593			!!(rd_dev->rd_flags & RDF_NULLIO));
594	return bl;
595}
596
597static sector_t rd_get_blocks(struct se_device *dev)
 
 
 
 
598{
599	struct rd_dev *rd_dev = RD_DEV(dev);
600
601	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
602			dev->dev_attrib.block_size) - 1;
603
604	return blocks_long;
605}
606
607static int rd_init_prot(struct se_device *dev)
608{
609	struct rd_dev *rd_dev = RD_DEV(dev);
610
611        if (!dev->dev_attrib.pi_prot_type)
612		return 0;
613
614	return rd_build_prot_space(rd_dev, dev->prot_length,
615				   dev->dev_attrib.block_size);
616}
617
618static void rd_free_prot(struct se_device *dev)
619{
620	struct rd_dev *rd_dev = RD_DEV(dev);
621
622	rd_release_prot_space(rd_dev);
623}
624
625static struct sbc_ops rd_sbc_ops = {
626	.execute_rw		= rd_execute_rw,
627};
 
 
628
629static sense_reason_t
630rd_parse_cdb(struct se_cmd *cmd)
631{
632	return sbc_parse_cdb(cmd, &rd_sbc_ops);
633}
634
635static struct se_subsystem_api rd_mcp_template = {
636	.name			= "rd_mcp",
637	.inquiry_prod		= "RAMDISK-MCP",
638	.inquiry_rev		= RD_MCP_VERSION,
639	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
640	.attach_hba		= rd_attach_hba,
641	.detach_hba		= rd_detach_hba,
642	.alloc_device		= rd_alloc_device,
643	.configure_device	= rd_configure_device,
644	.free_device		= rd_free_device,
645	.parse_cdb		= rd_parse_cdb,
 
 
 
646	.set_configfs_dev_params = rd_set_configfs_dev_params,
647	.show_configfs_dev_params = rd_show_configfs_dev_params,
648	.get_device_type	= sbc_get_device_type,
 
 
649	.get_blocks		= rd_get_blocks,
650	.init_prot		= rd_init_prot,
651	.free_prot		= rd_free_prot,
652};
653
654int __init rd_module_init(void)
655{
656	int ret;
657
658	ret = transport_subsystem_register(&rd_mcp_template);
659	if (ret < 0) {
660		return ret;
661	}
662
663	return 0;
664}
665
666void rd_module_exit(void)
667{
668	transport_subsystem_release(&rd_mcp_template);
669}