Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*******************************************************************************
  3 * Filename:  target_core_rd.c
  4 *
  5 * This file contains the Storage Engine <-> Ramdisk transport
  6 * specific functions.
  7 *
  8 * (c) Copyright 2003-2013 Datera, Inc.
  9 *
 10 * Nicholas A. Bellinger <nab@kernel.org>
 11 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 12 ******************************************************************************/
 13
 14#include <linux/string.h>
 15#include <linux/parser.h>
 16#include <linux/highmem.h>
 17#include <linux/timer.h>
 18#include <linux/scatterlist.h>
 19#include <linux/slab.h>
 20#include <linux/spinlock.h>
 21#include <scsi/scsi_proto.h>
 
 22
 23#include <target/target_core_base.h>
 24#include <target/target_core_backend.h>
 25
 26#include "target_core_rd.h"
 27
 28static inline struct rd_dev *RD_DEV(struct se_device *dev)
 29{
 30	return container_of(dev, struct rd_dev, dev);
 31}
 32
 
 
 
 
 33static int rd_attach_hba(struct se_hba *hba, u32 host_id)
 34{
 35	struct rd_host *rd_host;
 36
 37	rd_host = kzalloc(sizeof(*rd_host), GFP_KERNEL);
 38	if (!rd_host)
 
 39		return -ENOMEM;
 
 40
 41	rd_host->rd_host_id = host_id;
 42
 43	hba->hba_ptr = rd_host;
 44
 45	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
 46		" Generic Target Core Stack %s\n", hba->hba_id,
 47		RD_HBA_VERSION, TARGET_CORE_VERSION);
 48
 49	return 0;
 50}
 51
 52static void rd_detach_hba(struct se_hba *hba)
 53{
 54	struct rd_host *rd_host = hba->hba_ptr;
 55
 56	pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
 57		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
 58
 59	kfree(rd_host);
 60	hba->hba_ptr = NULL;
 61}
 62
 63static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
 64				 u32 sg_table_count)
 65{
 66	struct page *pg;
 67	struct scatterlist *sg;
 68	u32 i, j, page_count = 0, sg_per_table;
 69
 70	for (i = 0; i < sg_table_count; i++) {
 71		sg = sg_table[i].sg_table;
 72		sg_per_table = sg_table[i].rd_sg_count;
 73
 74		for (j = 0; j < sg_per_table; j++) {
 75			pg = sg_page(&sg[j]);
 76			if (pg) {
 77				__free_page(pg);
 78				page_count++;
 79			}
 80		}
 81		kfree(sg);
 82	}
 83
 84	kfree(sg_table);
 85	return page_count;
 86}
 87
 88static void rd_release_device_space(struct rd_dev *rd_dev)
 89{
 90	u32 page_count;
 91
 92	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
 93		return;
 94
 95	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
 96					  rd_dev->sg_table_count);
 97
 98	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
 99		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
100		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
101		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
102
103	rd_dev->sg_table_array = NULL;
104	rd_dev->sg_table_count = 0;
105}
106
107
108/*	rd_build_device_space():
109 *
110 *
111 */
112static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
113				 u32 total_sg_needed, unsigned char init_payload)
114{
115	u32 i = 0, j, page_offset = 0, sg_per_table;
116	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
117				sizeof(struct scatterlist));
118	struct page *pg;
119	struct scatterlist *sg;
120	unsigned char *p;
121
122	while (total_sg_needed) {
123		unsigned int chain_entry = 0;
124
125		sg_per_table = (total_sg_needed > max_sg_per_table) ?
126			max_sg_per_table : total_sg_needed;
127
128		/*
129		 * Reserve extra element for chain entry
130		 */
131		if (sg_per_table < total_sg_needed)
132			chain_entry = 1;
133
134		sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
135				GFP_KERNEL);
136		if (!sg)
 
 
137			return -ENOMEM;
138
139		sg_init_table(sg, sg_per_table + chain_entry);
140
141		if (i > 0) {
142			sg_chain(sg_table[i - 1].sg_table,
143				 max_sg_per_table + 1, sg);
144		}
145
 
 
146		sg_table[i].sg_table = sg;
147		sg_table[i].rd_sg_count = sg_per_table;
148		sg_table[i].page_start_offset = page_offset;
149		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
150						- 1;
151
152		for (j = 0; j < sg_per_table; j++) {
153			pg = alloc_pages(GFP_KERNEL, 0);
154			if (!pg) {
155				pr_err("Unable to allocate scatterlist"
156					" pages for struct rd_dev_sg_table\n");
157				return -ENOMEM;
158			}
159			sg_assign_page(&sg[j], pg);
160			sg[j].length = PAGE_SIZE;
161
162			p = kmap(pg);
163			memset(p, init_payload, PAGE_SIZE);
164			kunmap(pg);
165		}
166
167		page_offset += sg_per_table;
168		total_sg_needed -= sg_per_table;
169	}
170
171	return 0;
172}
173
174static int rd_build_device_space(struct rd_dev *rd_dev)
175{
176	struct rd_dev_sg_table *sg_table;
177	u32 sg_tables, total_sg_needed;
178	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
179				sizeof(struct scatterlist));
180	int rc;
181
182	if (rd_dev->rd_page_count <= 0) {
183		pr_err("Illegal page count: %u for Ramdisk device\n",
184		       rd_dev->rd_page_count);
185		return -EINVAL;
186	}
187
188	/* Don't need backing pages for NULLIO */
189	if (rd_dev->rd_flags & RDF_NULLIO)
190		return 0;
191
192	total_sg_needed = rd_dev->rd_page_count;
193
194	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
195	sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL);
196	if (!sg_table)
 
 
 
197		return -ENOMEM;
 
198
199	rd_dev->sg_table_array = sg_table;
200	rd_dev->sg_table_count = sg_tables;
201
202	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
203	if (rc)
204		return rc;
205
206	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
207		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
208		 rd_dev->rd_dev_id, rd_dev->rd_page_count,
209		 rd_dev->sg_table_count);
210
211	return 0;
212}
213
214static void rd_release_prot_space(struct rd_dev *rd_dev)
215{
216	u32 page_count;
217
218	if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
219		return;
220
221	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
222					  rd_dev->sg_prot_count);
223
224	pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
225		 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
226		 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
227		 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
228
229	rd_dev->sg_prot_array = NULL;
230	rd_dev->sg_prot_count = 0;
231}
232
233static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
234{
235	struct rd_dev_sg_table *sg_table;
236	u32 total_sg_needed, sg_tables;
237	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
238				sizeof(struct scatterlist));
239	int rc;
240
241	if (rd_dev->rd_flags & RDF_NULLIO)
242		return 0;
243	/*
244	 * prot_length=8byte dif data
245	 * tot sg needed = rd_page_count * (PGSZ/block_size) *
246	 * 		   (prot_length/block_size) + pad
247	 * PGSZ canceled each other.
248	 */
249	total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
250
251	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
252	sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL);
253	if (!sg_table)
 
 
 
254		return -ENOMEM;
 
255
256	rd_dev->sg_prot_array = sg_table;
257	rd_dev->sg_prot_count = sg_tables;
258
259	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
260	if (rc)
261		return rc;
262
263	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
264		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
265		 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
266
267	return 0;
268}
269
270static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
271{
272	struct rd_dev *rd_dev;
273	struct rd_host *rd_host = hba->hba_ptr;
274
275	rd_dev = kzalloc(sizeof(*rd_dev), GFP_KERNEL);
276	if (!rd_dev)
 
277		return NULL;
 
278
279	rd_dev->rd_host = rd_host;
280
281	return &rd_dev->dev;
282}
283
284static int rd_configure_device(struct se_device *dev)
285{
286	struct rd_dev *rd_dev = RD_DEV(dev);
287	struct rd_host *rd_host = dev->se_hba->hba_ptr;
288	int ret;
289
290	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
291		pr_debug("Missing rd_pages= parameter\n");
292		return -EINVAL;
293	}
294
295	ret = rd_build_device_space(rd_dev);
296	if (ret < 0)
297		goto fail;
298
299	dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
300	dev->dev_attrib.hw_max_sectors = UINT_MAX;
301	dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
302	dev->dev_attrib.is_nonrot = 1;
303
304	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
305
306	pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
307		" %u pages in %u tables, %lu total bytes\n",
308		rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
309		rd_dev->sg_table_count,
310		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
311
312	return 0;
313
314fail:
315	rd_release_device_space(rd_dev);
316	return ret;
317}
318
319static void rd_dev_call_rcu(struct rcu_head *p)
320{
321	struct se_device *dev = container_of(p, struct se_device, rcu_head);
322	struct rd_dev *rd_dev = RD_DEV(dev);
323
324	kfree(rd_dev);
325}
326
327static void rd_free_device(struct se_device *dev)
328{
329	call_rcu(&dev->rcu_head, rd_dev_call_rcu);
330}
331
332static void rd_destroy_device(struct se_device *dev)
333{
334	struct rd_dev *rd_dev = RD_DEV(dev);
335
336	rd_release_device_space(rd_dev);
 
337}
338
339static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
340{
341	struct rd_dev_sg_table *sg_table;
342	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
343				sizeof(struct scatterlist));
344
345	i = page / sg_per_table;
346	if (i < rd_dev->sg_table_count) {
347		sg_table = &rd_dev->sg_table_array[i];
348		if ((sg_table->page_start_offset <= page) &&
349		    (sg_table->page_end_offset >= page))
350			return sg_table;
351	}
352
353	pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
354			page);
355
356	return NULL;
357}
358
359static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
360{
361	struct rd_dev_sg_table *sg_table;
362	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
363				sizeof(struct scatterlist));
364
365	i = page / sg_per_table;
366	if (i < rd_dev->sg_prot_count) {
367		sg_table = &rd_dev->sg_prot_array[i];
368		if ((sg_table->page_start_offset <= page) &&
369		     (sg_table->page_end_offset >= page))
370			return sg_table;
371	}
372
373	pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
374			page);
375
376	return NULL;
377}
378
379static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
380{
381	struct se_device *se_dev = cmd->se_dev;
382	struct rd_dev *dev = RD_DEV(se_dev);
383	struct rd_dev_sg_table *prot_table;
384	struct scatterlist *prot_sg;
385	u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
386	u32 prot_offset, prot_page;
387	u32 prot_npages __maybe_unused;
388	u64 tmp;
389	sense_reason_t rc = 0;
390
391	tmp = cmd->t_task_lba * se_dev->prot_length;
392	prot_offset = do_div(tmp, PAGE_SIZE);
393	prot_page = tmp;
394
395	prot_table = rd_get_prot_table(dev, prot_page);
396	if (!prot_table)
397		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
398
399	prot_sg = &prot_table->sg_table[prot_page -
400					prot_table->page_start_offset];
401
402	if (se_dev->dev_attrib.pi_prot_verify) {
403		if (is_read)
404			rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
405					    prot_sg, prot_offset);
406		else
407			rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
408					    cmd->t_prot_sg, 0);
409	}
410	if (!rc)
411		sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
412
413	return rc;
414}
415
416static sense_reason_t
417rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
418	      enum dma_data_direction data_direction)
419{
420	struct se_device *se_dev = cmd->se_dev;
421	struct rd_dev *dev = RD_DEV(se_dev);
422	struct rd_dev_sg_table *table;
423	struct scatterlist *rd_sg;
424	struct sg_mapping_iter m;
425	u32 rd_offset;
426	u32 rd_size;
427	u32 rd_page;
428	u32 src_len;
429	u64 tmp;
430	sense_reason_t rc;
431
432	if (dev->rd_flags & RDF_NULLIO) {
433		target_complete_cmd(cmd, SAM_STAT_GOOD);
434		return 0;
435	}
436
437	tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
438	rd_offset = do_div(tmp, PAGE_SIZE);
439	rd_page = tmp;
440	rd_size = cmd->data_length;
441
442	table = rd_get_sg_table(dev, rd_page);
443	if (!table)
444		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
445
446	rd_sg = &table->sg_table[rd_page - table->page_start_offset];
447
448	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
449			dev->rd_dev_id,
450			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
451			cmd->t_task_lba, rd_size, rd_page, rd_offset);
452
453	if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
454	    data_direction == DMA_TO_DEVICE) {
455		rc = rd_do_prot_rw(cmd, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456		if (rc)
457			return rc;
458	}
459
460	src_len = PAGE_SIZE - rd_offset;
461	sg_miter_start(&m, sgl, sgl_nents,
462			data_direction == DMA_FROM_DEVICE ?
463				SG_MITER_TO_SG : SG_MITER_FROM_SG);
464	while (rd_size) {
465		u32 len;
466		void *rd_addr;
467
468		sg_miter_next(&m);
469		if (!(u32)m.length) {
470			pr_debug("RD[%u]: invalid sgl %p len %zu\n",
471				 dev->rd_dev_id, m.addr, m.length);
472			sg_miter_stop(&m);
473			return TCM_INCORRECT_AMOUNT_OF_DATA;
474		}
475		len = min((u32)m.length, src_len);
476		if (len > rd_size) {
477			pr_debug("RD[%u]: size underrun page %d offset %d "
478				 "size %d\n", dev->rd_dev_id,
479				 rd_page, rd_offset, rd_size);
480			len = rd_size;
481		}
482		m.consumed = len;
483
484		rd_addr = sg_virt(rd_sg) + rd_offset;
485
486		if (data_direction == DMA_FROM_DEVICE)
487			memcpy(m.addr, rd_addr, len);
488		else
489			memcpy(rd_addr, m.addr, len);
490
491		rd_size -= len;
492		if (!rd_size)
493			continue;
494
495		src_len -= len;
496		if (src_len) {
497			rd_offset += len;
498			continue;
499		}
500
501		/* rd page completed, next one please */
502		rd_page++;
503		rd_offset = 0;
504		src_len = PAGE_SIZE;
505		if (rd_page <= table->page_end_offset) {
506			rd_sg++;
507			continue;
508		}
509
510		table = rd_get_sg_table(dev, rd_page);
511		if (!table) {
512			sg_miter_stop(&m);
513			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
514		}
515
516		/* since we increment, the first sg entry is correct */
517		rd_sg = table->sg_table;
518	}
519	sg_miter_stop(&m);
520
521	if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
522	    data_direction == DMA_FROM_DEVICE) {
523		rc = rd_do_prot_rw(cmd, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
524		if (rc)
525			return rc;
526	}
527
528	target_complete_cmd(cmd, SAM_STAT_GOOD);
529	return 0;
530}
531
532enum {
533	Opt_rd_pages, Opt_rd_nullio, Opt_err
534};
535
536static match_table_t tokens = {
537	{Opt_rd_pages, "rd_pages=%d"},
538	{Opt_rd_nullio, "rd_nullio=%d"},
539	{Opt_err, NULL}
540};
541
542static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
543		const char *page, ssize_t count)
544{
545	struct rd_dev *rd_dev = RD_DEV(dev);
546	char *orig, *ptr, *opts;
547	substring_t args[MAX_OPT_ARGS];
548	int arg, token;
549
550	opts = kstrdup(page, GFP_KERNEL);
551	if (!opts)
552		return -ENOMEM;
553
554	orig = opts;
555
556	while ((ptr = strsep(&opts, ",\n")) != NULL) {
557		if (!*ptr)
558			continue;
559
560		token = match_token(ptr, tokens, args);
561		switch (token) {
562		case Opt_rd_pages:
563			match_int(args, &arg);
564			rd_dev->rd_page_count = arg;
565			pr_debug("RAMDISK: Referencing Page"
566				" Count: %u\n", rd_dev->rd_page_count);
567			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
568			break;
569		case Opt_rd_nullio:
570			match_int(args, &arg);
571			if (arg != 1)
572				break;
573
574			pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
575			rd_dev->rd_flags |= RDF_NULLIO;
576			break;
577		default:
578			break;
579		}
580	}
581
582	kfree(orig);
583	return count;
584}
585
586static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
587{
588	struct rd_dev *rd_dev = RD_DEV(dev);
589
590	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
591			rd_dev->rd_dev_id);
592	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
593			"  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
594			PAGE_SIZE, rd_dev->sg_table_count,
595			!!(rd_dev->rd_flags & RDF_NULLIO));
596	return bl;
597}
598
599static sector_t rd_get_blocks(struct se_device *dev)
600{
601	struct rd_dev *rd_dev = RD_DEV(dev);
602
603	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
604			dev->dev_attrib.block_size) - 1;
605
606	return blocks_long;
607}
608
609static int rd_init_prot(struct se_device *dev)
610{
611	struct rd_dev *rd_dev = RD_DEV(dev);
612
613        if (!dev->dev_attrib.pi_prot_type)
614		return 0;
615
616	return rd_build_prot_space(rd_dev, dev->prot_length,
617				   dev->dev_attrib.block_size);
618}
619
620static void rd_free_prot(struct se_device *dev)
621{
622	struct rd_dev *rd_dev = RD_DEV(dev);
623
624	rd_release_prot_space(rd_dev);
625}
626
627static struct sbc_ops rd_sbc_ops = {
628	.execute_rw		= rd_execute_rw,
629};
630
631static sense_reason_t
632rd_parse_cdb(struct se_cmd *cmd)
633{
634	return sbc_parse_cdb(cmd, &rd_sbc_ops);
635}
636
637static const struct target_backend_ops rd_mcp_ops = {
638	.name			= "rd_mcp",
639	.inquiry_prod		= "RAMDISK-MCP",
640	.inquiry_rev		= RD_MCP_VERSION,
 
641	.attach_hba		= rd_attach_hba,
642	.detach_hba		= rd_detach_hba,
643	.alloc_device		= rd_alloc_device,
644	.configure_device	= rd_configure_device,
645	.destroy_device		= rd_destroy_device,
646	.free_device		= rd_free_device,
647	.parse_cdb		= rd_parse_cdb,
648	.set_configfs_dev_params = rd_set_configfs_dev_params,
649	.show_configfs_dev_params = rd_show_configfs_dev_params,
650	.get_device_type	= sbc_get_device_type,
651	.get_blocks		= rd_get_blocks,
652	.init_prot		= rd_init_prot,
653	.free_prot		= rd_free_prot,
654	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
655};
656
657int __init rd_module_init(void)
658{
659	return transport_backend_register(&rd_mcp_ops);
 
 
 
 
 
 
 
660}
661
662void rd_module_exit(void)
663{
664	target_backend_unregister(&rd_mcp_ops);
665}
v3.15
 
  1/*******************************************************************************
  2 * Filename:  target_core_rd.c
  3 *
  4 * This file contains the Storage Engine <-> Ramdisk transport
  5 * specific functions.
  6 *
  7 * (c) Copyright 2003-2013 Datera, Inc.
  8 *
  9 * Nicholas A. Bellinger <nab@kernel.org>
 10 *
 11 * This program is free software; you can redistribute it and/or modify
 12 * it under the terms of the GNU General Public License as published by
 13 * the Free Software Foundation; either version 2 of the License, or
 14 * (at your option) any later version.
 15 *
 16 * This program is distributed in the hope that it will be useful,
 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 19 * GNU General Public License for more details.
 20 *
 21 * You should have received a copy of the GNU General Public License
 22 * along with this program; if not, write to the Free Software
 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 24 *
 25 ******************************************************************************/
 26
 27#include <linux/string.h>
 28#include <linux/parser.h>
 
 29#include <linux/timer.h>
 
 30#include <linux/slab.h>
 31#include <linux/spinlock.h>
 32#include <scsi/scsi.h>
 33#include <scsi/scsi_host.h>
 34
 35#include <target/target_core_base.h>
 36#include <target/target_core_backend.h>
 37
 38#include "target_core_rd.h"
 39
 40static inline struct rd_dev *RD_DEV(struct se_device *dev)
 41{
 42	return container_of(dev, struct rd_dev, dev);
 43}
 44
 45/*	rd_attach_hba(): (Part of se_subsystem_api_t template)
 46 *
 47 *
 48 */
 49static int rd_attach_hba(struct se_hba *hba, u32 host_id)
 50{
 51	struct rd_host *rd_host;
 52
 53	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
 54	if (!rd_host) {
 55		pr_err("Unable to allocate memory for struct rd_host\n");
 56		return -ENOMEM;
 57	}
 58
 59	rd_host->rd_host_id = host_id;
 60
 61	hba->hba_ptr = rd_host;
 62
 63	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
 64		" Generic Target Core Stack %s\n", hba->hba_id,
 65		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
 66
 67	return 0;
 68}
 69
 70static void rd_detach_hba(struct se_hba *hba)
 71{
 72	struct rd_host *rd_host = hba->hba_ptr;
 73
 74	pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
 75		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
 76
 77	kfree(rd_host);
 78	hba->hba_ptr = NULL;
 79}
 80
 81static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
 82				 u32 sg_table_count)
 83{
 84	struct page *pg;
 85	struct scatterlist *sg;
 86	u32 i, j, page_count = 0, sg_per_table;
 87
 88	for (i = 0; i < sg_table_count; i++) {
 89		sg = sg_table[i].sg_table;
 90		sg_per_table = sg_table[i].rd_sg_count;
 91
 92		for (j = 0; j < sg_per_table; j++) {
 93			pg = sg_page(&sg[j]);
 94			if (pg) {
 95				__free_page(pg);
 96				page_count++;
 97			}
 98		}
 99		kfree(sg);
100	}
101
102	kfree(sg_table);
103	return page_count;
104}
105
106static void rd_release_device_space(struct rd_dev *rd_dev)
107{
108	u32 page_count;
109
110	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
111		return;
112
113	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
114					  rd_dev->sg_table_count);
115
116	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
117		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
118		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
119		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
120
121	rd_dev->sg_table_array = NULL;
122	rd_dev->sg_table_count = 0;
123}
124
125
126/*	rd_build_device_space():
127 *
128 *
129 */
130static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
131				 u32 total_sg_needed, unsigned char init_payload)
132{
133	u32 i = 0, j, page_offset = 0, sg_per_table;
134	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
135				sizeof(struct scatterlist));
136	struct page *pg;
137	struct scatterlist *sg;
138	unsigned char *p;
139
140	while (total_sg_needed) {
 
 
141		sg_per_table = (total_sg_needed > max_sg_per_table) ?
142			max_sg_per_table : total_sg_needed;
143
144		sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
 
 
 
 
 
 
145				GFP_KERNEL);
146		if (!sg) {
147			pr_err("Unable to allocate scatterlist array"
148				" for struct rd_dev\n");
149			return -ENOMEM;
 
 
 
 
 
 
150		}
151
152		sg_init_table(sg, sg_per_table);
153
154		sg_table[i].sg_table = sg;
155		sg_table[i].rd_sg_count = sg_per_table;
156		sg_table[i].page_start_offset = page_offset;
157		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
158						- 1;
159
160		for (j = 0; j < sg_per_table; j++) {
161			pg = alloc_pages(GFP_KERNEL, 0);
162			if (!pg) {
163				pr_err("Unable to allocate scatterlist"
164					" pages for struct rd_dev_sg_table\n");
165				return -ENOMEM;
166			}
167			sg_assign_page(&sg[j], pg);
168			sg[j].length = PAGE_SIZE;
169
170			p = kmap(pg);
171			memset(p, init_payload, PAGE_SIZE);
172			kunmap(pg);
173		}
174
175		page_offset += sg_per_table;
176		total_sg_needed -= sg_per_table;
177	}
178
179	return 0;
180}
181
182static int rd_build_device_space(struct rd_dev *rd_dev)
183{
184	struct rd_dev_sg_table *sg_table;
185	u32 sg_tables, total_sg_needed;
186	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
187				sizeof(struct scatterlist));
188	int rc;
189
190	if (rd_dev->rd_page_count <= 0) {
191		pr_err("Illegal page count: %u for Ramdisk device\n",
192		       rd_dev->rd_page_count);
193		return -EINVAL;
194	}
195
196	/* Don't need backing pages for NULLIO */
197	if (rd_dev->rd_flags & RDF_NULLIO)
198		return 0;
199
200	total_sg_needed = rd_dev->rd_page_count;
201
202	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
203
204	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
205	if (!sg_table) {
206		pr_err("Unable to allocate memory for Ramdisk"
207		       " scatterlist tables\n");
208		return -ENOMEM;
209	}
210
211	rd_dev->sg_table_array = sg_table;
212	rd_dev->sg_table_count = sg_tables;
213
214	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
215	if (rc)
216		return rc;
217
218	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
219		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
220		 rd_dev->rd_dev_id, rd_dev->rd_page_count,
221		 rd_dev->sg_table_count);
222
223	return 0;
224}
225
226static void rd_release_prot_space(struct rd_dev *rd_dev)
227{
228	u32 page_count;
229
230	if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
231		return;
232
233	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
234					  rd_dev->sg_prot_count);
235
236	pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
237		 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
238		 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
239		 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
240
241	rd_dev->sg_prot_array = NULL;
242	rd_dev->sg_prot_count = 0;
243}
244
245static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
246{
247	struct rd_dev_sg_table *sg_table;
248	u32 total_sg_needed, sg_tables;
249	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
250				sizeof(struct scatterlist));
251	int rc;
252
253	if (rd_dev->rd_flags & RDF_NULLIO)
254		return 0;
255	/*
256	 * prot_length=8byte dif data
257	 * tot sg needed = rd_page_count * (PGSZ/block_size) *
258	 * 		   (prot_length/block_size) + pad
259	 * PGSZ canceled each other.
260	 */
261	total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
262
263	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
264
265	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
266	if (!sg_table) {
267		pr_err("Unable to allocate memory for Ramdisk protection"
268		       " scatterlist tables\n");
269		return -ENOMEM;
270	}
271
272	rd_dev->sg_prot_array = sg_table;
273	rd_dev->sg_prot_count = sg_tables;
274
275	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
276	if (rc)
277		return rc;
278
279	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
280		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
281		 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
282
283	return 0;
284}
285
286static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
287{
288	struct rd_dev *rd_dev;
289	struct rd_host *rd_host = hba->hba_ptr;
290
291	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
292	if (!rd_dev) {
293		pr_err("Unable to allocate memory for struct rd_dev\n");
294		return NULL;
295	}
296
297	rd_dev->rd_host = rd_host;
298
299	return &rd_dev->dev;
300}
301
302static int rd_configure_device(struct se_device *dev)
303{
304	struct rd_dev *rd_dev = RD_DEV(dev);
305	struct rd_host *rd_host = dev->se_hba->hba_ptr;
306	int ret;
307
308	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
309		pr_debug("Missing rd_pages= parameter\n");
310		return -EINVAL;
311	}
312
313	ret = rd_build_device_space(rd_dev);
314	if (ret < 0)
315		goto fail;
316
317	dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
318	dev->dev_attrib.hw_max_sectors = UINT_MAX;
319	dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
 
320
321	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
322
323	pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
324		" %u pages in %u tables, %lu total bytes\n",
325		rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
326		rd_dev->sg_table_count,
327		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
328
329	return 0;
330
331fail:
332	rd_release_device_space(rd_dev);
333	return ret;
334}
335
 
 
 
 
 
 
 
 
336static void rd_free_device(struct se_device *dev)
337{
 
 
 
 
 
338	struct rd_dev *rd_dev = RD_DEV(dev);
339
340	rd_release_device_space(rd_dev);
341	kfree(rd_dev);
342}
343
344static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
345{
346	struct rd_dev_sg_table *sg_table;
347	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
348				sizeof(struct scatterlist));
349
350	i = page / sg_per_table;
351	if (i < rd_dev->sg_table_count) {
352		sg_table = &rd_dev->sg_table_array[i];
353		if ((sg_table->page_start_offset <= page) &&
354		    (sg_table->page_end_offset >= page))
355			return sg_table;
356	}
357
358	pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
359			page);
360
361	return NULL;
362}
363
364static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
365{
366	struct rd_dev_sg_table *sg_table;
367	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
368				sizeof(struct scatterlist));
369
370	i = page / sg_per_table;
371	if (i < rd_dev->sg_prot_count) {
372		sg_table = &rd_dev->sg_prot_array[i];
373		if ((sg_table->page_start_offset <= page) &&
374		     (sg_table->page_end_offset >= page))
375			return sg_table;
376	}
377
378	pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
379			page);
380
381	return NULL;
382}
383
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384static sense_reason_t
385rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
386	      enum dma_data_direction data_direction)
387{
388	struct se_device *se_dev = cmd->se_dev;
389	struct rd_dev *dev = RD_DEV(se_dev);
390	struct rd_dev_sg_table *table;
391	struct scatterlist *rd_sg;
392	struct sg_mapping_iter m;
393	u32 rd_offset;
394	u32 rd_size;
395	u32 rd_page;
396	u32 src_len;
397	u64 tmp;
398	sense_reason_t rc;
399
400	if (dev->rd_flags & RDF_NULLIO) {
401		target_complete_cmd(cmd, SAM_STAT_GOOD);
402		return 0;
403	}
404
405	tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
406	rd_offset = do_div(tmp, PAGE_SIZE);
407	rd_page = tmp;
408	rd_size = cmd->data_length;
409
410	table = rd_get_sg_table(dev, rd_page);
411	if (!table)
412		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
413
414	rd_sg = &table->sg_table[rd_page - table->page_start_offset];
415
416	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
417			dev->rd_dev_id,
418			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
419			cmd->t_task_lba, rd_size, rd_page, rd_offset);
420
421	if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
422		struct rd_dev_sg_table *prot_table;
423		struct scatterlist *prot_sg;
424		u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
425		u32 prot_offset, prot_page;
426
427		tmp = cmd->t_task_lba * se_dev->prot_length;
428		prot_offset = do_div(tmp, PAGE_SIZE);
429		prot_page = tmp;
430
431		prot_table = rd_get_prot_table(dev, prot_page);
432		if (!prot_table)
433			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
434
435		prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
436
437		rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
438					  prot_sg, prot_offset);
439		if (rc)
440			return rc;
441	}
442
443	src_len = PAGE_SIZE - rd_offset;
444	sg_miter_start(&m, sgl, sgl_nents,
445			data_direction == DMA_FROM_DEVICE ?
446				SG_MITER_TO_SG : SG_MITER_FROM_SG);
447	while (rd_size) {
448		u32 len;
449		void *rd_addr;
450
451		sg_miter_next(&m);
452		if (!(u32)m.length) {
453			pr_debug("RD[%u]: invalid sgl %p len %zu\n",
454				 dev->rd_dev_id, m.addr, m.length);
455			sg_miter_stop(&m);
456			return TCM_INCORRECT_AMOUNT_OF_DATA;
457		}
458		len = min((u32)m.length, src_len);
459		if (len > rd_size) {
460			pr_debug("RD[%u]: size underrun page %d offset %d "
461				 "size %d\n", dev->rd_dev_id,
462				 rd_page, rd_offset, rd_size);
463			len = rd_size;
464		}
465		m.consumed = len;
466
467		rd_addr = sg_virt(rd_sg) + rd_offset;
468
469		if (data_direction == DMA_FROM_DEVICE)
470			memcpy(m.addr, rd_addr, len);
471		else
472			memcpy(rd_addr, m.addr, len);
473
474		rd_size -= len;
475		if (!rd_size)
476			continue;
477
478		src_len -= len;
479		if (src_len) {
480			rd_offset += len;
481			continue;
482		}
483
484		/* rd page completed, next one please */
485		rd_page++;
486		rd_offset = 0;
487		src_len = PAGE_SIZE;
488		if (rd_page <= table->page_end_offset) {
489			rd_sg++;
490			continue;
491		}
492
493		table = rd_get_sg_table(dev, rd_page);
494		if (!table) {
495			sg_miter_stop(&m);
496			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
497		}
498
499		/* since we increment, the first sg entry is correct */
500		rd_sg = table->sg_table;
501	}
502	sg_miter_stop(&m);
503
504	if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
505		struct rd_dev_sg_table *prot_table;
506		struct scatterlist *prot_sg;
507		u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
508		u32 prot_offset, prot_page;
509
510		tmp = cmd->t_task_lba * se_dev->prot_length;
511		prot_offset = do_div(tmp, PAGE_SIZE);
512		prot_page = tmp;
513
514		prot_table = rd_get_prot_table(dev, prot_page);
515		if (!prot_table)
516			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
517
518		prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
519
520		rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
521					 prot_sg, prot_offset);
522		if (rc)
523			return rc;
524	}
525
526	target_complete_cmd(cmd, SAM_STAT_GOOD);
527	return 0;
528}
529
530enum {
531	Opt_rd_pages, Opt_rd_nullio, Opt_err
532};
533
534static match_table_t tokens = {
535	{Opt_rd_pages, "rd_pages=%d"},
536	{Opt_rd_nullio, "rd_nullio=%d"},
537	{Opt_err, NULL}
538};
539
540static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
541		const char *page, ssize_t count)
542{
543	struct rd_dev *rd_dev = RD_DEV(dev);
544	char *orig, *ptr, *opts;
545	substring_t args[MAX_OPT_ARGS];
546	int ret = 0, arg, token;
547
548	opts = kstrdup(page, GFP_KERNEL);
549	if (!opts)
550		return -ENOMEM;
551
552	orig = opts;
553
554	while ((ptr = strsep(&opts, ",\n")) != NULL) {
555		if (!*ptr)
556			continue;
557
558		token = match_token(ptr, tokens, args);
559		switch (token) {
560		case Opt_rd_pages:
561			match_int(args, &arg);
562			rd_dev->rd_page_count = arg;
563			pr_debug("RAMDISK: Referencing Page"
564				" Count: %u\n", rd_dev->rd_page_count);
565			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
566			break;
567		case Opt_rd_nullio:
568			match_int(args, &arg);
569			if (arg != 1)
570				break;
571
572			pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
573			rd_dev->rd_flags |= RDF_NULLIO;
574			break;
575		default:
576			break;
577		}
578	}
579
580	kfree(orig);
581	return (!ret) ? count : ret;
582}
583
584static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
585{
586	struct rd_dev *rd_dev = RD_DEV(dev);
587
588	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
589			rd_dev->rd_dev_id);
590	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
591			"  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
592			PAGE_SIZE, rd_dev->sg_table_count,
593			!!(rd_dev->rd_flags & RDF_NULLIO));
594	return bl;
595}
596
597static sector_t rd_get_blocks(struct se_device *dev)
598{
599	struct rd_dev *rd_dev = RD_DEV(dev);
600
601	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
602			dev->dev_attrib.block_size) - 1;
603
604	return blocks_long;
605}
606
607static int rd_init_prot(struct se_device *dev)
608{
609	struct rd_dev *rd_dev = RD_DEV(dev);
610
611        if (!dev->dev_attrib.pi_prot_type)
612		return 0;
613
614	return rd_build_prot_space(rd_dev, dev->prot_length,
615				   dev->dev_attrib.block_size);
616}
617
618static void rd_free_prot(struct se_device *dev)
619{
620	struct rd_dev *rd_dev = RD_DEV(dev);
621
622	rd_release_prot_space(rd_dev);
623}
624
625static struct sbc_ops rd_sbc_ops = {
626	.execute_rw		= rd_execute_rw,
627};
628
629static sense_reason_t
630rd_parse_cdb(struct se_cmd *cmd)
631{
632	return sbc_parse_cdb(cmd, &rd_sbc_ops);
633}
634
635static struct se_subsystem_api rd_mcp_template = {
636	.name			= "rd_mcp",
637	.inquiry_prod		= "RAMDISK-MCP",
638	.inquiry_rev		= RD_MCP_VERSION,
639	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
640	.attach_hba		= rd_attach_hba,
641	.detach_hba		= rd_detach_hba,
642	.alloc_device		= rd_alloc_device,
643	.configure_device	= rd_configure_device,
 
644	.free_device		= rd_free_device,
645	.parse_cdb		= rd_parse_cdb,
646	.set_configfs_dev_params = rd_set_configfs_dev_params,
647	.show_configfs_dev_params = rd_show_configfs_dev_params,
648	.get_device_type	= sbc_get_device_type,
649	.get_blocks		= rd_get_blocks,
650	.init_prot		= rd_init_prot,
651	.free_prot		= rd_free_prot,
 
652};
653
654int __init rd_module_init(void)
655{
656	int ret;
657
658	ret = transport_subsystem_register(&rd_mcp_template);
659	if (ret < 0) {
660		return ret;
661	}
662
663	return 0;
664}
665
666void rd_module_exit(void)
667{
668	transport_subsystem_release(&rd_mcp_template);
669}