Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
4 * Copyright (C) 2014 Red Hat, Inc.
5 * Copyright (C) 2015 Arrikto, Inc.
6 * Copyright (C) 2017 Chinamobile, Inc.
7 */
8
9#include <linux/spinlock.h>
10#include <linux/module.h>
11#include <linux/idr.h>
12#include <linux/kernel.h>
13#include <linux/timer.h>
14#include <linux/parser.h>
15#include <linux/vmalloc.h>
16#include <linux/uio_driver.h>
17#include <linux/radix-tree.h>
18#include <linux/stringify.h>
19#include <linux/bitops.h>
20#include <linux/highmem.h>
21#include <linux/configfs.h>
22#include <linux/mutex.h>
23#include <linux/workqueue.h>
24#include <net/genetlink.h>
25#include <scsi/scsi_common.h>
26#include <scsi/scsi_proto.h>
27#include <target/target_core_base.h>
28#include <target/target_core_fabric.h>
29#include <target/target_core_backend.h>
30
31#include <linux/target_core_user.h>
32
33/**
34 * DOC: Userspace I/O
35 * Userspace I/O
36 * -------------
37 *
38 * Define a shared-memory interface for LIO to pass SCSI commands and
39 * data to userspace for processing. This is to allow backends that
40 * are too complex for in-kernel support to be possible.
41 *
42 * It uses the UIO framework to do a lot of the device-creation and
43 * introspection work for us.
44 *
45 * See the .h file for how the ring is laid out. Note that while the
46 * command ring is defined, the particulars of the data area are
47 * not. Offset values in the command entry point to other locations
48 * internal to the mmap-ed area. There is separate space outside the
49 * command ring for data buffers. This leaves maximum flexibility for
50 * moving buffer allocations, or even page flipping or other
51 * allocation techniques, without altering the command ring layout.
52 *
53 * SECURITY:
54 * The user process must be assumed to be malicious. There's no way to
55 * prevent it breaking the command ring protocol if it wants, but in
56 * order to prevent other issues we must only ever read *data* from
57 * the shared memory area, not offsets or sizes. This applies to
58 * command ring entries as well as the mailbox. Extra code needed for
59 * this may have a 'UAM' comment.
60 */
61
62#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
63
64/* For cmd area, the size is fixed 8MB */
65#define CMDR_SIZE (8 * 1024 * 1024)
66
67/*
68 * For data area, the block size is PAGE_SIZE and
69 * the total size is 256K * PAGE_SIZE.
70 */
71#define DATA_BLOCK_SIZE PAGE_SIZE
72#define DATA_BLOCK_SHIFT PAGE_SHIFT
73#define DATA_BLOCK_BITS_DEF (256 * 1024)
74
75#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
76#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
77
78/*
79 * Default number of global data blocks(512K * PAGE_SIZE)
80 * when the unmap thread will be started.
81 */
82#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
83
84static u8 tcmu_kern_cmd_reply_supported;
85static u8 tcmu_netlink_blocked;
86
87static struct device *tcmu_root_device;
88
89struct tcmu_hba {
90 u32 host_id;
91};
92
93#define TCMU_CONFIG_LEN 256
94
95static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
96static LIST_HEAD(tcmu_nl_cmd_list);
97
98struct tcmu_dev;
99
100struct tcmu_nl_cmd {
101 /* wake up thread waiting for reply */
102 struct completion complete;
103 struct list_head nl_list;
104 struct tcmu_dev *udev;
105 int cmd;
106 int status;
107};
108
109struct tcmu_dev {
110 struct list_head node;
111 struct kref kref;
112
113 struct se_device se_dev;
114
115 char *name;
116 struct se_hba *hba;
117
118#define TCMU_DEV_BIT_OPEN 0
119#define TCMU_DEV_BIT_BROKEN 1
120#define TCMU_DEV_BIT_BLOCKED 2
121#define TCMU_DEV_BIT_TMR_NOTIFY 3
122 unsigned long flags;
123
124 struct uio_info uio_info;
125
126 struct inode *inode;
127
128 struct tcmu_mailbox *mb_addr;
129 uint64_t dev_size;
130 u32 cmdr_size;
131 u32 cmdr_last_cleaned;
132 /* Offset of data area from start of mb */
133 /* Must add data_off and mb_addr to get the address */
134 size_t data_off;
135 size_t data_size;
136 uint32_t max_blocks;
137 size_t ring_size;
138
139 struct mutex cmdr_lock;
140 struct list_head qfull_queue;
141 struct list_head tmr_queue;
142
143 uint32_t dbi_max;
144 uint32_t dbi_thresh;
145 unsigned long *data_bitmap;
146 struct radix_tree_root data_blocks;
147
148 struct idr commands;
149
150 struct timer_list cmd_timer;
151 unsigned int cmd_time_out;
152 struct list_head inflight_queue;
153
154 struct timer_list qfull_timer;
155 int qfull_time_out;
156
157 struct list_head timedout_entry;
158
159 struct tcmu_nl_cmd curr_nl_cmd;
160
161 char dev_config[TCMU_CONFIG_LEN];
162
163 int nl_reply_supported;
164};
165
166#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
167
168#define CMDR_OFF sizeof(struct tcmu_mailbox)
169
170struct tcmu_cmd {
171 struct se_cmd *se_cmd;
172 struct tcmu_dev *tcmu_dev;
173 struct list_head queue_entry;
174
175 uint16_t cmd_id;
176
177 /* Can't use se_cmd when cleaning up expired cmds, because if
178 cmd has been completed then accessing se_cmd is off limits */
179 uint32_t dbi_cnt;
180 uint32_t dbi_cur;
181 uint32_t *dbi;
182
183 unsigned long deadline;
184
185#define TCMU_CMD_BIT_EXPIRED 0
186 unsigned long flags;
187};
188
189struct tcmu_tmr {
190 struct list_head queue_entry;
191
192 uint8_t tmr_type;
193 uint32_t tmr_cmd_cnt;
194 int16_t tmr_cmd_ids[0];
195};
196
197/*
198 * To avoid dead lock the mutex lock order should always be:
199 *
200 * mutex_lock(&root_udev_mutex);
201 * ...
202 * mutex_lock(&tcmu_dev->cmdr_lock);
203 * mutex_unlock(&tcmu_dev->cmdr_lock);
204 * ...
205 * mutex_unlock(&root_udev_mutex);
206 */
207static DEFINE_MUTEX(root_udev_mutex);
208static LIST_HEAD(root_udev);
209
210static DEFINE_SPINLOCK(timed_out_udevs_lock);
211static LIST_HEAD(timed_out_udevs);
212
213static struct kmem_cache *tcmu_cmd_cache;
214
215static atomic_t global_db_count = ATOMIC_INIT(0);
216static struct delayed_work tcmu_unmap_work;
217static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF;
218
219static int tcmu_set_global_max_data_area(const char *str,
220 const struct kernel_param *kp)
221{
222 int ret, max_area_mb;
223
224 ret = kstrtoint(str, 10, &max_area_mb);
225 if (ret)
226 return -EINVAL;
227
228 if (max_area_mb <= 0) {
229 pr_err("global_max_data_area must be larger than 0.\n");
230 return -EINVAL;
231 }
232
233 tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb);
234 if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
235 schedule_delayed_work(&tcmu_unmap_work, 0);
236 else
237 cancel_delayed_work_sync(&tcmu_unmap_work);
238
239 return 0;
240}
241
242static int tcmu_get_global_max_data_area(char *buffer,
243 const struct kernel_param *kp)
244{
245 return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
246}
247
248static const struct kernel_param_ops tcmu_global_max_data_area_op = {
249 .set = tcmu_set_global_max_data_area,
250 .get = tcmu_get_global_max_data_area,
251};
252
253module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL,
254 S_IWUSR | S_IRUGO);
255MODULE_PARM_DESC(global_max_data_area_mb,
256 "Max MBs allowed to be allocated to all the tcmu device's "
257 "data areas.");
258
259static int tcmu_get_block_netlink(char *buffer,
260 const struct kernel_param *kp)
261{
262 return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
263 "blocked" : "unblocked");
264}
265
266static int tcmu_set_block_netlink(const char *str,
267 const struct kernel_param *kp)
268{
269 int ret;
270 u8 val;
271
272 ret = kstrtou8(str, 0, &val);
273 if (ret < 0)
274 return ret;
275
276 if (val > 1) {
277 pr_err("Invalid block netlink value %u\n", val);
278 return -EINVAL;
279 }
280
281 tcmu_netlink_blocked = val;
282 return 0;
283}
284
285static const struct kernel_param_ops tcmu_block_netlink_op = {
286 .set = tcmu_set_block_netlink,
287 .get = tcmu_get_block_netlink,
288};
289
290module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
291MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
292
293static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
294{
295 struct tcmu_dev *udev = nl_cmd->udev;
296
297 if (!tcmu_netlink_blocked) {
298 pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
299 return -EBUSY;
300 }
301
302 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
303 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
304 nl_cmd->status = -EINTR;
305 list_del(&nl_cmd->nl_list);
306 complete(&nl_cmd->complete);
307 }
308 return 0;
309}
310
311static int tcmu_set_reset_netlink(const char *str,
312 const struct kernel_param *kp)
313{
314 struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
315 int ret;
316 u8 val;
317
318 ret = kstrtou8(str, 0, &val);
319 if (ret < 0)
320 return ret;
321
322 if (val != 1) {
323 pr_err("Invalid reset netlink value %u\n", val);
324 return -EINVAL;
325 }
326
327 mutex_lock(&tcmu_nl_cmd_mutex);
328 list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
329 ret = tcmu_fail_netlink_cmd(nl_cmd);
330 if (ret)
331 break;
332 }
333 mutex_unlock(&tcmu_nl_cmd_mutex);
334
335 return ret;
336}
337
338static const struct kernel_param_ops tcmu_reset_netlink_op = {
339 .set = tcmu_set_reset_netlink,
340};
341
342module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
343MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
344
345/* multicast group */
346enum tcmu_multicast_groups {
347 TCMU_MCGRP_CONFIG,
348};
349
350static const struct genl_multicast_group tcmu_mcgrps[] = {
351 [TCMU_MCGRP_CONFIG] = { .name = "config", },
352};
353
354static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
355 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
356 [TCMU_ATTR_MINOR] = { .type = NLA_U32 },
357 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
358 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
359 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
360};
361
362static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
363{
364 struct tcmu_dev *udev = NULL;
365 struct tcmu_nl_cmd *nl_cmd;
366 int dev_id, rc, ret = 0;
367
368 if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
369 !info->attrs[TCMU_ATTR_DEVICE_ID]) {
370 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
371 return -EINVAL;
372 }
373
374 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
375 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
376
377 mutex_lock(&tcmu_nl_cmd_mutex);
378 list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
379 if (nl_cmd->udev->se_dev.dev_index == dev_id) {
380 udev = nl_cmd->udev;
381 break;
382 }
383 }
384
385 if (!udev) {
386 pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
387 completed_cmd, rc, dev_id);
388 ret = -ENODEV;
389 goto unlock;
390 }
391 list_del(&nl_cmd->nl_list);
392
393 pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
394 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
395 nl_cmd->status);
396
397 if (nl_cmd->cmd != completed_cmd) {
398 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
399 udev->name, completed_cmd, nl_cmd->cmd);
400 ret = -EINVAL;
401 goto unlock;
402 }
403
404 nl_cmd->status = rc;
405 complete(&nl_cmd->complete);
406unlock:
407 mutex_unlock(&tcmu_nl_cmd_mutex);
408 return ret;
409}
410
411static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
412{
413 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
414}
415
416static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
417{
418 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
419}
420
421static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
422 struct genl_info *info)
423{
424 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
425}
426
427static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
428{
429 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
430 tcmu_kern_cmd_reply_supported =
431 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
432 printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
433 tcmu_kern_cmd_reply_supported);
434 }
435
436 return 0;
437}
438
439static const struct genl_ops tcmu_genl_ops[] = {
440 {
441 .cmd = TCMU_CMD_SET_FEATURES,
442 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
443 .flags = GENL_ADMIN_PERM,
444 .doit = tcmu_genl_set_features,
445 },
446 {
447 .cmd = TCMU_CMD_ADDED_DEVICE_DONE,
448 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
449 .flags = GENL_ADMIN_PERM,
450 .doit = tcmu_genl_add_dev_done,
451 },
452 {
453 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
454 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
455 .flags = GENL_ADMIN_PERM,
456 .doit = tcmu_genl_rm_dev_done,
457 },
458 {
459 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
460 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
461 .flags = GENL_ADMIN_PERM,
462 .doit = tcmu_genl_reconfig_dev_done,
463 },
464};
465
466/* Our generic netlink family */
467static struct genl_family tcmu_genl_family __ro_after_init = {
468 .module = THIS_MODULE,
469 .hdrsize = 0,
470 .name = "TCM-USER",
471 .version = 2,
472 .maxattr = TCMU_ATTR_MAX,
473 .policy = tcmu_attr_policy,
474 .mcgrps = tcmu_mcgrps,
475 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
476 .netnsok = true,
477 .ops = tcmu_genl_ops,
478 .n_ops = ARRAY_SIZE(tcmu_genl_ops),
479};
480
481#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
482#define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
483#define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
484#define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
485
486static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
487{
488 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
489 uint32_t i;
490
491 for (i = 0; i < len; i++)
492 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
493}
494
495static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
496 struct tcmu_cmd *tcmu_cmd)
497{
498 struct page *page;
499 int ret, dbi;
500
501 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
502 if (dbi == udev->dbi_thresh)
503 return false;
504
505 page = radix_tree_lookup(&udev->data_blocks, dbi);
506 if (!page) {
507 if (atomic_add_return(1, &global_db_count) >
508 tcmu_global_max_blocks)
509 schedule_delayed_work(&tcmu_unmap_work, 0);
510
511 /* try to get new page from the mm */
512 page = alloc_page(GFP_NOIO);
513 if (!page)
514 goto err_alloc;
515
516 ret = radix_tree_insert(&udev->data_blocks, dbi, page);
517 if (ret)
518 goto err_insert;
519 }
520
521 if (dbi > udev->dbi_max)
522 udev->dbi_max = dbi;
523
524 set_bit(dbi, udev->data_bitmap);
525 tcmu_cmd_set_dbi(tcmu_cmd, dbi);
526
527 return true;
528err_insert:
529 __free_page(page);
530err_alloc:
531 atomic_dec(&global_db_count);
532 return false;
533}
534
535static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
536 struct tcmu_cmd *tcmu_cmd)
537{
538 int i;
539
540 for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
541 if (!tcmu_get_empty_block(udev, tcmu_cmd))
542 return false;
543 }
544 return true;
545}
546
547static inline struct page *
548tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
549{
550 return radix_tree_lookup(&udev->data_blocks, dbi);
551}
552
553static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
554{
555 if (tcmu_cmd->se_cmd)
556 tcmu_cmd->se_cmd->priv = NULL;
557 kfree(tcmu_cmd->dbi);
558 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
559}
560
561static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
562{
563 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
564 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
565
566 if (se_cmd->se_cmd_flags & SCF_BIDI) {
567 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
568 data_length += round_up(se_cmd->t_bidi_data_sg->length,
569 DATA_BLOCK_SIZE);
570 }
571
572 return data_length;
573}
574
575static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
576{
577 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
578
579 return data_length / DATA_BLOCK_SIZE;
580}
581
582static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
583{
584 struct se_device *se_dev = se_cmd->se_dev;
585 struct tcmu_dev *udev = TCMU_DEV(se_dev);
586 struct tcmu_cmd *tcmu_cmd;
587
588 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO);
589 if (!tcmu_cmd)
590 return NULL;
591
592 INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
593 tcmu_cmd->se_cmd = se_cmd;
594 tcmu_cmd->tcmu_dev = udev;
595
596 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
597 tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
598 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
599 GFP_NOIO);
600 if (!tcmu_cmd->dbi) {
601 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
602 return NULL;
603 }
604
605 return tcmu_cmd;
606}
607
608static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
609{
610 unsigned long offset = offset_in_page(vaddr);
611 void *start = vaddr - offset;
612
613 size = round_up(size+offset, PAGE_SIZE);
614
615 while (size) {
616 flush_dcache_page(vmalloc_to_page(start));
617 start += PAGE_SIZE;
618 size -= PAGE_SIZE;
619 }
620}
621
622/*
623 * Some ring helper functions. We don't assume size is a power of 2 so
624 * we can't use circ_buf.h.
625 */
626static inline size_t spc_used(size_t head, size_t tail, size_t size)
627{
628 int diff = head - tail;
629
630 if (diff >= 0)
631 return diff;
632 else
633 return size + diff;
634}
635
636static inline size_t spc_free(size_t head, size_t tail, size_t size)
637{
638 /* Keep 1 byte unused or we can't tell full from empty */
639 return (size - spc_used(head, tail, size) - 1);
640}
641
642static inline size_t head_to_end(size_t head, size_t size)
643{
644 return size - head;
645}
646
647static inline void new_iov(struct iovec **iov, int *iov_cnt)
648{
649 struct iovec *iovec;
650
651 if (*iov_cnt != 0)
652 (*iov)++;
653 (*iov_cnt)++;
654
655 iovec = *iov;
656 memset(iovec, 0, sizeof(struct iovec));
657}
658
659#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
660
661/* offset is relative to mb_addr */
662static inline size_t get_block_offset_user(struct tcmu_dev *dev,
663 int dbi, int remaining)
664{
665 return dev->data_off + dbi * DATA_BLOCK_SIZE +
666 DATA_BLOCK_SIZE - remaining;
667}
668
669static inline size_t iov_tail(struct iovec *iov)
670{
671 return (size_t)iov->iov_base + iov->iov_len;
672}
673
674static void scatter_data_area(struct tcmu_dev *udev,
675 struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
676 unsigned int data_nents, struct iovec **iov,
677 int *iov_cnt, bool copy_data)
678{
679 int i, dbi;
680 int block_remaining = 0;
681 void *from, *to = NULL;
682 size_t copy_bytes, to_offset, offset;
683 struct scatterlist *sg;
684 struct page *page;
685
686 for_each_sg(data_sg, sg, data_nents, i) {
687 int sg_remaining = sg->length;
688 from = kmap_atomic(sg_page(sg)) + sg->offset;
689 while (sg_remaining > 0) {
690 if (block_remaining == 0) {
691 if (to) {
692 flush_dcache_page(page);
693 kunmap_atomic(to);
694 }
695
696 block_remaining = DATA_BLOCK_SIZE;
697 dbi = tcmu_cmd_get_dbi(tcmu_cmd);
698 page = tcmu_get_block_page(udev, dbi);
699 to = kmap_atomic(page);
700 }
701
702 /*
703 * Covert to virtual offset of the ring data area.
704 */
705 to_offset = get_block_offset_user(udev, dbi,
706 block_remaining);
707
708 /*
709 * The following code will gather and map the blocks
710 * to the same iovec when the blocks are all next to
711 * each other.
712 */
713 copy_bytes = min_t(size_t, sg_remaining,
714 block_remaining);
715 if (*iov_cnt != 0 &&
716 to_offset == iov_tail(*iov)) {
717 /*
718 * Will append to the current iovec, because
719 * the current block page is next to the
720 * previous one.
721 */
722 (*iov)->iov_len += copy_bytes;
723 } else {
724 /*
725 * Will allocate a new iovec because we are
726 * first time here or the current block page
727 * is not next to the previous one.
728 */
729 new_iov(iov, iov_cnt);
730 (*iov)->iov_base = (void __user *)to_offset;
731 (*iov)->iov_len = copy_bytes;
732 }
733
734 if (copy_data) {
735 offset = DATA_BLOCK_SIZE - block_remaining;
736 memcpy(to + offset,
737 from + sg->length - sg_remaining,
738 copy_bytes);
739 }
740
741 sg_remaining -= copy_bytes;
742 block_remaining -= copy_bytes;
743 }
744 kunmap_atomic(from - sg->offset);
745 }
746
747 if (to) {
748 flush_dcache_page(page);
749 kunmap_atomic(to);
750 }
751}
752
753static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
754 bool bidi, uint32_t read_len)
755{
756 struct se_cmd *se_cmd = cmd->se_cmd;
757 int i, dbi;
758 int block_remaining = 0;
759 void *from = NULL, *to;
760 size_t copy_bytes, offset;
761 struct scatterlist *sg, *data_sg;
762 struct page *page;
763 unsigned int data_nents;
764 uint32_t count = 0;
765
766 if (!bidi) {
767 data_sg = se_cmd->t_data_sg;
768 data_nents = se_cmd->t_data_nents;
769 } else {
770
771 /*
772 * For bidi case, the first count blocks are for Data-Out
773 * buffer blocks, and before gathering the Data-In buffer
774 * the Data-Out buffer blocks should be discarded.
775 */
776 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
777
778 data_sg = se_cmd->t_bidi_data_sg;
779 data_nents = se_cmd->t_bidi_data_nents;
780 }
781
782 tcmu_cmd_set_dbi_cur(cmd, count);
783
784 for_each_sg(data_sg, sg, data_nents, i) {
785 int sg_remaining = sg->length;
786 to = kmap_atomic(sg_page(sg)) + sg->offset;
787 while (sg_remaining > 0 && read_len > 0) {
788 if (block_remaining == 0) {
789 if (from)
790 kunmap_atomic(from);
791
792 block_remaining = DATA_BLOCK_SIZE;
793 dbi = tcmu_cmd_get_dbi(cmd);
794 page = tcmu_get_block_page(udev, dbi);
795 from = kmap_atomic(page);
796 flush_dcache_page(page);
797 }
798 copy_bytes = min_t(size_t, sg_remaining,
799 block_remaining);
800 if (read_len < copy_bytes)
801 copy_bytes = read_len;
802 offset = DATA_BLOCK_SIZE - block_remaining;
803 memcpy(to + sg->length - sg_remaining, from + offset,
804 copy_bytes);
805
806 sg_remaining -= copy_bytes;
807 block_remaining -= copy_bytes;
808 read_len -= copy_bytes;
809 }
810 kunmap_atomic(to - sg->offset);
811 if (read_len == 0)
812 break;
813 }
814 if (from)
815 kunmap_atomic(from);
816}
817
818static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
819{
820 return thresh - bitmap_weight(bitmap, thresh);
821}
822
823/*
824 * We can't queue a command until we have space available on the cmd ring *and*
825 * space available on the data area.
826 *
827 * Called with ring lock held.
828 */
829static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
830 size_t cmd_size, size_t data_needed)
831{
832 struct tcmu_mailbox *mb = udev->mb_addr;
833 uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
834 / DATA_BLOCK_SIZE;
835 size_t space, cmd_needed;
836 u32 cmd_head;
837
838 tcmu_flush_dcache_range(mb, sizeof(*mb));
839
840 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
841
842 /*
843 * If cmd end-of-ring space is too small then we need space for a NOP plus
844 * original cmd - cmds are internally contiguous.
845 */
846 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
847 cmd_needed = cmd_size;
848 else
849 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
850
851 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
852 if (space < cmd_needed) {
853 pr_debug("no cmd space: %u %u %u\n", cmd_head,
854 udev->cmdr_last_cleaned, udev->cmdr_size);
855 return false;
856 }
857
858 if (!data_needed)
859 return true;
860
861 /* try to check and get the data blocks as needed */
862 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
863 if ((space * DATA_BLOCK_SIZE) < data_needed) {
864 unsigned long blocks_left =
865 (udev->max_blocks - udev->dbi_thresh) + space;
866
867 if (blocks_left < blocks_needed) {
868 pr_debug("no data space: only %lu available, but ask for %zu\n",
869 blocks_left * DATA_BLOCK_SIZE,
870 data_needed);
871 return false;
872 }
873
874 udev->dbi_thresh += blocks_needed;
875 if (udev->dbi_thresh > udev->max_blocks)
876 udev->dbi_thresh = udev->max_blocks;
877 }
878
879 return tcmu_get_empty_blocks(udev, cmd);
880}
881
882static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
883{
884 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
885 sizeof(struct tcmu_cmd_entry));
886}
887
888static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
889 size_t base_command_size)
890{
891 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
892 size_t command_size;
893
894 command_size = base_command_size +
895 round_up(scsi_command_size(se_cmd->t_task_cdb),
896 TCMU_OP_ALIGN_SIZE);
897
898 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
899
900 return command_size;
901}
902
903static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
904 struct timer_list *timer)
905{
906 if (!tmo)
907 return;
908
909 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
910 if (!timer_pending(timer))
911 mod_timer(timer, tcmu_cmd->deadline);
912
913 pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
914 tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
915}
916
917static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
918{
919 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
920 unsigned int tmo;
921
922 /*
923 * For backwards compat if qfull_time_out is not set use
924 * cmd_time_out and if that's not set use the default time out.
925 */
926 if (!udev->qfull_time_out)
927 return -ETIMEDOUT;
928 else if (udev->qfull_time_out > 0)
929 tmo = udev->qfull_time_out;
930 else if (udev->cmd_time_out)
931 tmo = udev->cmd_time_out;
932 else
933 tmo = TCMU_TIME_OUT;
934
935 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
936
937 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
938 pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
939 tcmu_cmd, udev->name);
940 return 0;
941}
942
943static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
944{
945 struct tcmu_cmd_entry_hdr *hdr;
946 struct tcmu_mailbox *mb = udev->mb_addr;
947 uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
948
949 /* Insert a PAD if end-of-ring space is too small */
950 if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) {
951 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
952
953 hdr = (void *) mb + CMDR_OFF + cmd_head;
954 tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD);
955 tcmu_hdr_set_len(&hdr->len_op, pad_size);
956 hdr->cmd_id = 0; /* not used for PAD */
957 hdr->kflags = 0;
958 hdr->uflags = 0;
959 tcmu_flush_dcache_range(hdr, sizeof(*hdr));
960
961 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
962 tcmu_flush_dcache_range(mb, sizeof(*mb));
963
964 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
965 WARN_ON(cmd_head != 0);
966 }
967
968 return cmd_head;
969}
970
971/**
972 * queue_cmd_ring - queue cmd to ring or internally
973 * @tcmu_cmd: cmd to queue
974 * @scsi_err: TCM error code if failure (-1) returned.
975 *
976 * Returns:
977 * -1 we cannot queue internally or to the ring.
978 * 0 success
979 * 1 internally queued to wait for ring memory to free.
980 */
981static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
982{
983 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
984 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
985 size_t base_command_size, command_size;
986 struct tcmu_mailbox *mb = udev->mb_addr;
987 struct tcmu_cmd_entry *entry;
988 struct iovec *iov;
989 int iov_cnt, cmd_id;
990 uint32_t cmd_head;
991 uint64_t cdb_off;
992 bool copy_to_data_area;
993 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
994
995 *scsi_err = TCM_NO_SENSE;
996
997 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) {
998 *scsi_err = TCM_LUN_BUSY;
999 return -1;
1000 }
1001
1002 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1003 *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1004 return -1;
1005 }
1006
1007 /*
1008 * Must be a certain minimum size for response sense info, but
1009 * also may be larger if the iov array is large.
1010 *
1011 * We prepare as many iovs as possbile for potential uses here,
1012 * because it's expensive to tell how many regions are freed in
1013 * the bitmap & global data pool, as the size calculated here
1014 * will only be used to do the checks.
1015 *
1016 * The size will be recalculated later as actually needed to save
1017 * cmd area memories.
1018 */
1019 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
1020 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
1021
1022 if (!list_empty(&udev->qfull_queue))
1023 goto queue;
1024
1025 if ((command_size > (udev->cmdr_size / 2)) ||
1026 data_length > udev->data_size) {
1027 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
1028 "cmd ring/data area\n", command_size, data_length,
1029 udev->cmdr_size, udev->data_size);
1030 *scsi_err = TCM_INVALID_CDB_FIELD;
1031 return -1;
1032 }
1033
1034 if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
1035 /*
1036 * Don't leave commands partially setup because the unmap
1037 * thread might need the blocks to make forward progress.
1038 */
1039 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
1040 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1041 goto queue;
1042 }
1043
1044 cmd_head = ring_insert_padding(udev, command_size);
1045
1046 entry = (void *) mb + CMDR_OFF + cmd_head;
1047 memset(entry, 0, command_size);
1048 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
1049
1050 /* Handle allocating space from the data area */
1051 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1052 iov = &entry->req.iov[0];
1053 iov_cnt = 0;
1054 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
1055 || se_cmd->se_cmd_flags & SCF_BIDI);
1056 scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
1057 se_cmd->t_data_nents, &iov, &iov_cnt,
1058 copy_to_data_area);
1059 entry->req.iov_cnt = iov_cnt;
1060
1061 /* Handle BIDI commands */
1062 iov_cnt = 0;
1063 if (se_cmd->se_cmd_flags & SCF_BIDI) {
1064 iov++;
1065 scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
1066 se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
1067 false);
1068 }
1069 entry->req.iov_bidi_cnt = iov_cnt;
1070
1071 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
1072 if (cmd_id < 0) {
1073 pr_err("tcmu: Could not allocate cmd id.\n");
1074
1075 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
1076 *scsi_err = TCM_OUT_OF_RESOURCES;
1077 return -1;
1078 }
1079 tcmu_cmd->cmd_id = cmd_id;
1080
1081 pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
1082 tcmu_cmd, udev->name);
1083
1084 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
1085
1086 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
1087
1088 /*
1089 * Recalaulate the command's base size and size according
1090 * to the actual needs
1091 */
1092 base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
1093 entry->req.iov_bidi_cnt);
1094 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
1095
1096 tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
1097
1098 /* All offsets relative to mb_addr, not start of entry! */
1099 cdb_off = CMDR_OFF + cmd_head + base_command_size;
1100 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
1101 entry->req.cdb_off = cdb_off;
1102 tcmu_flush_dcache_range(entry, command_size);
1103
1104 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
1105 tcmu_flush_dcache_range(mb, sizeof(*mb));
1106
1107 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
1108
1109 /* TODO: only if FLUSH and FUA? */
1110 uio_event_notify(&udev->uio_info);
1111
1112 return 0;
1113
1114queue:
1115 if (add_to_qfull_queue(tcmu_cmd)) {
1116 *scsi_err = TCM_OUT_OF_RESOURCES;
1117 return -1;
1118 }
1119
1120 return 1;
1121}
1122
1123/**
1124 * queue_tmr_ring - queue tmr info to ring or internally
1125 * @udev: related tcmu_dev
1126 * @tmr: tcmu_tmr containing tmr info to queue
1127 *
1128 * Returns:
1129 * 0 success
1130 * 1 internally queued to wait for ring memory to free.
1131 */
1132static int
1133queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
1134{
1135 struct tcmu_tmr_entry *entry;
1136 int cmd_size;
1137 int id_list_sz;
1138 struct tcmu_mailbox *mb = udev->mb_addr;
1139 uint32_t cmd_head;
1140
1141 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
1142 goto out_free;
1143
1144 id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt;
1145 cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE);
1146
1147 if (!list_empty(&udev->tmr_queue) ||
1148 !is_ring_space_avail(udev, NULL, cmd_size, 0)) {
1149 list_add_tail(&tmr->queue_entry, &udev->tmr_queue);
1150 pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n",
1151 tmr, udev->name);
1152 return 1;
1153 }
1154
1155 cmd_head = ring_insert_padding(udev, cmd_size);
1156
1157 entry = (void *)mb + CMDR_OFF + cmd_head;
1158 memset(entry, 0, cmd_size);
1159 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR);
1160 tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size);
1161 entry->tmr_type = tmr->tmr_type;
1162 entry->cmd_cnt = tmr->tmr_cmd_cnt;
1163 memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz);
1164 tcmu_flush_dcache_range(entry, cmd_size);
1165
1166 UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size);
1167 tcmu_flush_dcache_range(mb, sizeof(*mb));
1168
1169 uio_event_notify(&udev->uio_info);
1170
1171out_free:
1172 kfree(tmr);
1173
1174 return 0;
1175}
1176
1177static sense_reason_t
1178tcmu_queue_cmd(struct se_cmd *se_cmd)
1179{
1180 struct se_device *se_dev = se_cmd->se_dev;
1181 struct tcmu_dev *udev = TCMU_DEV(se_dev);
1182 struct tcmu_cmd *tcmu_cmd;
1183 sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD;
1184 int ret = -1;
1185
1186 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
1187 if (!tcmu_cmd)
1188 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1189
1190 mutex_lock(&udev->cmdr_lock);
1191 se_cmd->priv = tcmu_cmd;
1192 if (!(se_cmd->transport_state & CMD_T_ABORTED))
1193 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1194 if (ret < 0)
1195 tcmu_free_cmd(tcmu_cmd);
1196 mutex_unlock(&udev->cmdr_lock);
1197 return scsi_ret;
1198}
1199
1200static void tcmu_set_next_deadline(struct list_head *queue,
1201 struct timer_list *timer)
1202{
1203 struct tcmu_cmd *cmd;
1204
1205 if (!list_empty(queue)) {
1206 cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry);
1207 mod_timer(timer, cmd->deadline);
1208 } else
1209 del_timer(timer);
1210}
1211
1212static int
1213tcmu_tmr_type(enum tcm_tmreq_table tmf)
1214{
1215 switch (tmf) {
1216 case TMR_ABORT_TASK: return TCMU_TMR_ABORT_TASK;
1217 case TMR_ABORT_TASK_SET: return TCMU_TMR_ABORT_TASK_SET;
1218 case TMR_CLEAR_ACA: return TCMU_TMR_CLEAR_ACA;
1219 case TMR_CLEAR_TASK_SET: return TCMU_TMR_CLEAR_TASK_SET;
1220 case TMR_LUN_RESET: return TCMU_TMR_LUN_RESET;
1221 case TMR_TARGET_WARM_RESET: return TCMU_TMR_TARGET_WARM_RESET;
1222 case TMR_TARGET_COLD_RESET: return TCMU_TMR_TARGET_COLD_RESET;
1223 case TMR_LUN_RESET_PRO: return TCMU_TMR_LUN_RESET_PRO;
1224 default: return TCMU_TMR_UNKNOWN;
1225 }
1226}
1227
1228static void
1229tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
1230 struct list_head *cmd_list)
1231{
1232 int i = 0, cmd_cnt = 0;
1233 bool unqueued = false;
1234 uint16_t *cmd_ids = NULL;
1235 struct tcmu_cmd *cmd;
1236 struct se_cmd *se_cmd;
1237 struct tcmu_tmr *tmr;
1238 struct tcmu_dev *udev = TCMU_DEV(se_dev);
1239
1240 mutex_lock(&udev->cmdr_lock);
1241
1242 /* First we check for aborted commands in qfull_queue */
1243 list_for_each_entry(se_cmd, cmd_list, state_list) {
1244 i++;
1245 if (!se_cmd->priv)
1246 continue;
1247 cmd = se_cmd->priv;
1248 /* Commands on qfull queue have no id yet */
1249 if (cmd->cmd_id) {
1250 cmd_cnt++;
1251 continue;
1252 }
1253 pr_debug("Removing aborted command %p from queue on dev %s.\n",
1254 cmd, udev->name);
1255
1256 list_del_init(&cmd->queue_entry);
1257 tcmu_free_cmd(cmd);
1258 target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
1259 unqueued = true;
1260 }
1261 if (unqueued)
1262 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1263
1264 if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags))
1265 goto unlock;
1266
1267 pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n",
1268 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt);
1269
1270 tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_KERNEL);
1271 if (!tmr)
1272 goto unlock;
1273
1274 tmr->tmr_type = tcmu_tmr_type(tmf);
1275 tmr->tmr_cmd_cnt = cmd_cnt;
1276
1277 if (cmd_cnt != 0) {
1278 cmd_cnt = 0;
1279 list_for_each_entry(se_cmd, cmd_list, state_list) {
1280 if (!se_cmd->priv)
1281 continue;
1282 cmd = se_cmd->priv;
1283 if (cmd->cmd_id)
1284 tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id;
1285 }
1286 }
1287
1288 queue_tmr_ring(udev, tmr);
1289
1290unlock:
1291 mutex_unlock(&udev->cmdr_lock);
1292}
1293
1294static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
1295{
1296 struct se_cmd *se_cmd = cmd->se_cmd;
1297 struct tcmu_dev *udev = cmd->tcmu_dev;
1298 bool read_len_valid = false;
1299 uint32_t read_len;
1300
1301 /*
1302 * cmd has been completed already from timeout, just reclaim
1303 * data area space and free cmd
1304 */
1305 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1306 WARN_ON_ONCE(se_cmd);
1307 goto out;
1308 }
1309
1310 list_del_init(&cmd->queue_entry);
1311
1312 tcmu_cmd_reset_dbi_cur(cmd);
1313
1314 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
1315 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1316 cmd->se_cmd);
1317 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
1318 goto done;
1319 }
1320
1321 read_len = se_cmd->data_length;
1322 if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1323 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1324 read_len_valid = true;
1325 if (entry->rsp.read_len < read_len)
1326 read_len = entry->rsp.read_len;
1327 }
1328
1329 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
1330 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
1331 if (!read_len_valid )
1332 goto done;
1333 else
1334 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
1335 }
1336 if (se_cmd->se_cmd_flags & SCF_BIDI) {
1337 /* Get Data-In buffer before clean up */
1338 gather_data_area(udev, cmd, true, read_len);
1339 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
1340 gather_data_area(udev, cmd, false, read_len);
1341 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
1342 /* TODO: */
1343 } else if (se_cmd->data_direction != DMA_NONE) {
1344 pr_warn("TCMU: data direction was %d!\n",
1345 se_cmd->data_direction);
1346 }
1347
1348done:
1349 if (read_len_valid) {
1350 pr_debug("read_len = %d\n", read_len);
1351 target_complete_cmd_with_length(cmd->se_cmd,
1352 entry->rsp.scsi_status, read_len);
1353 } else
1354 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
1355
1356out:
1357 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1358 tcmu_free_cmd(cmd);
1359}
1360
1361static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
1362{
1363 struct tcmu_tmr *tmr, *tmp;
1364 LIST_HEAD(tmrs);
1365
1366 if (list_empty(&udev->tmr_queue))
1367 return 1;
1368
1369 pr_debug("running %s's tmr queue\n", udev->name);
1370
1371 list_splice_init(&udev->tmr_queue, &tmrs);
1372
1373 list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) {
1374 list_del_init(&tmr->queue_entry);
1375
1376 pr_debug("removing tmr %p on dev %s from queue\n",
1377 tmr, udev->name);
1378
1379 if (queue_tmr_ring(udev, tmr)) {
1380 pr_debug("ran out of space during tmr queue run\n");
1381 /*
1382 * tmr was requeued, so just put all tmrs back in
1383 * the queue
1384 */
1385 list_splice_tail(&tmrs, &udev->tmr_queue);
1386 return 0;
1387 }
1388 }
1389
1390 return 1;
1391}
1392
1393static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1394{
1395 struct tcmu_mailbox *mb;
1396 struct tcmu_cmd *cmd;
1397 bool free_space = false;
1398
1399 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1400 pr_err("ring broken, not handling completions\n");
1401 return 0;
1402 }
1403
1404 mb = udev->mb_addr;
1405 tcmu_flush_dcache_range(mb, sizeof(*mb));
1406
1407 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
1408
1409 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
1410
1411 /*
1412 * Flush max. up to end of cmd ring since current entry might
1413 * be a padding that is shorter than sizeof(*entry)
1414 */
1415 size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
1416 udev->cmdr_size);
1417 tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
1418 ring_left : sizeof(*entry));
1419
1420 free_space = true;
1421
1422 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD ||
1423 tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) {
1424 UPDATE_HEAD(udev->cmdr_last_cleaned,
1425 tcmu_hdr_get_len(entry->hdr.len_op),
1426 udev->cmdr_size);
1427 continue;
1428 }
1429 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
1430
1431 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
1432 if (!cmd) {
1433 pr_err("cmd_id %u not found, ring is broken\n",
1434 entry->hdr.cmd_id);
1435 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
1436 break;
1437 }
1438
1439 tcmu_handle_completion(cmd, entry);
1440
1441 UPDATE_HEAD(udev->cmdr_last_cleaned,
1442 tcmu_hdr_get_len(entry->hdr.len_op),
1443 udev->cmdr_size);
1444 }
1445 if (free_space)
1446 free_space = tcmu_run_tmr_queue(udev);
1447
1448 if (atomic_read(&global_db_count) > tcmu_global_max_blocks &&
1449 idr_is_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
1450 /*
1451 * Allocated blocks exceeded global block limit, currently no
1452 * more pending or waiting commands so try to reclaim blocks.
1453 */
1454 schedule_delayed_work(&tcmu_unmap_work, 0);
1455 }
1456 if (udev->cmd_time_out)
1457 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
1458
1459 return free_space;
1460}
1461
1462static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
1463{
1464 struct se_cmd *se_cmd;
1465
1466 if (!time_after_eq(jiffies, cmd->deadline))
1467 return;
1468
1469 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1470 list_del_init(&cmd->queue_entry);
1471 se_cmd = cmd->se_cmd;
1472 se_cmd->priv = NULL;
1473 cmd->se_cmd = NULL;
1474
1475 pr_debug("Timing out inflight cmd %u on dev %s.\n",
1476 cmd->cmd_id, cmd->tcmu_dev->name);
1477
1478 target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
1479}
1480
1481static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
1482{
1483 struct se_cmd *se_cmd;
1484
1485 if (!time_after_eq(jiffies, cmd->deadline))
1486 return;
1487
1488 pr_debug("Timing out queued cmd %p on dev %s.\n",
1489 cmd, cmd->tcmu_dev->name);
1490
1491 list_del_init(&cmd->queue_entry);
1492 se_cmd = cmd->se_cmd;
1493 tcmu_free_cmd(cmd);
1494
1495 target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
1496}
1497
1498static void tcmu_device_timedout(struct tcmu_dev *udev)
1499{
1500 spin_lock(&timed_out_udevs_lock);
1501 if (list_empty(&udev->timedout_entry))
1502 list_add_tail(&udev->timedout_entry, &timed_out_udevs);
1503 spin_unlock(&timed_out_udevs_lock);
1504
1505 schedule_delayed_work(&tcmu_unmap_work, 0);
1506}
1507
1508static void tcmu_cmd_timedout(struct timer_list *t)
1509{
1510 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
1511
1512 pr_debug("%s cmd timeout has expired\n", udev->name);
1513 tcmu_device_timedout(udev);
1514}
1515
1516static void tcmu_qfull_timedout(struct timer_list *t)
1517{
1518 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
1519
1520 pr_debug("%s qfull timeout has expired\n", udev->name);
1521 tcmu_device_timedout(udev);
1522}
1523
1524static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
1525{
1526 struct tcmu_hba *tcmu_hba;
1527
1528 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
1529 if (!tcmu_hba)
1530 return -ENOMEM;
1531
1532 tcmu_hba->host_id = host_id;
1533 hba->hba_ptr = tcmu_hba;
1534
1535 return 0;
1536}
1537
1538static void tcmu_detach_hba(struct se_hba *hba)
1539{
1540 kfree(hba->hba_ptr);
1541 hba->hba_ptr = NULL;
1542}
1543
1544static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1545{
1546 struct tcmu_dev *udev;
1547
1548 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
1549 if (!udev)
1550 return NULL;
1551 kref_init(&udev->kref);
1552
1553 udev->name = kstrdup(name, GFP_KERNEL);
1554 if (!udev->name) {
1555 kfree(udev);
1556 return NULL;
1557 }
1558
1559 udev->hba = hba;
1560 udev->cmd_time_out = TCMU_TIME_OUT;
1561 udev->qfull_time_out = -1;
1562
1563 udev->max_blocks = DATA_BLOCK_BITS_DEF;
1564 mutex_init(&udev->cmdr_lock);
1565
1566 INIT_LIST_HEAD(&udev->node);
1567 INIT_LIST_HEAD(&udev->timedout_entry);
1568 INIT_LIST_HEAD(&udev->qfull_queue);
1569 INIT_LIST_HEAD(&udev->tmr_queue);
1570 INIT_LIST_HEAD(&udev->inflight_queue);
1571 idr_init(&udev->commands);
1572
1573 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
1574 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
1575
1576 INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
1577
1578 return &udev->se_dev;
1579}
1580
1581static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
1582{
1583 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1584 LIST_HEAD(cmds);
1585 sense_reason_t scsi_ret;
1586 int ret;
1587
1588 if (list_empty(&udev->qfull_queue))
1589 return;
1590
1591 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
1592
1593 list_splice_init(&udev->qfull_queue, &cmds);
1594
1595 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
1596 list_del_init(&tcmu_cmd->queue_entry);
1597
1598 pr_debug("removing cmd %p on dev %s from queue\n",
1599 tcmu_cmd, udev->name);
1600
1601 if (fail) {
1602 /*
1603 * We were not able to even start the command, so
1604 * fail with busy to allow a retry in case runner
1605 * was only temporarily down. If the device is being
1606 * removed then LIO core will do the right thing and
1607 * fail the retry.
1608 */
1609 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
1610 tcmu_free_cmd(tcmu_cmd);
1611 continue;
1612 }
1613
1614 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1615 if (ret < 0) {
1616 pr_debug("cmd %p on dev %s failed with %u\n",
1617 tcmu_cmd, udev->name, scsi_ret);
1618 /*
1619 * Ignore scsi_ret for now. target_complete_cmd
1620 * drops it.
1621 */
1622 target_complete_cmd(tcmu_cmd->se_cmd,
1623 SAM_STAT_CHECK_CONDITION);
1624 tcmu_free_cmd(tcmu_cmd);
1625 } else if (ret > 0) {
1626 pr_debug("ran out of space during cmdr queue run\n");
1627 /*
1628 * cmd was requeued, so just put all cmds back in
1629 * the queue
1630 */
1631 list_splice_tail(&cmds, &udev->qfull_queue);
1632 break;
1633 }
1634 }
1635
1636 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1637}
1638
1639static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1640{
1641 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1642
1643 mutex_lock(&udev->cmdr_lock);
1644 if (tcmu_handle_completions(udev))
1645 run_qfull_queue(udev, false);
1646 mutex_unlock(&udev->cmdr_lock);
1647
1648 return 0;
1649}
1650
1651/*
1652 * mmap code from uio.c. Copied here because we want to hook mmap()
1653 * and this stuff must come along.
1654 */
1655static int tcmu_find_mem_index(struct vm_area_struct *vma)
1656{
1657 struct tcmu_dev *udev = vma->vm_private_data;
1658 struct uio_info *info = &udev->uio_info;
1659
1660 if (vma->vm_pgoff < MAX_UIO_MAPS) {
1661 if (info->mem[vma->vm_pgoff].size == 0)
1662 return -1;
1663 return (int)vma->vm_pgoff;
1664 }
1665 return -1;
1666}
1667
1668static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
1669{
1670 struct page *page;
1671
1672 mutex_lock(&udev->cmdr_lock);
1673 page = tcmu_get_block_page(udev, dbi);
1674 if (likely(page)) {
1675 mutex_unlock(&udev->cmdr_lock);
1676 return page;
1677 }
1678
1679 /*
1680 * Userspace messed up and passed in a address not in the
1681 * data iov passed to it.
1682 */
1683 pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n",
1684 dbi, udev->name);
1685 page = NULL;
1686 mutex_unlock(&udev->cmdr_lock);
1687
1688 return page;
1689}
1690
1691static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
1692{
1693 struct tcmu_dev *udev = vmf->vma->vm_private_data;
1694 struct uio_info *info = &udev->uio_info;
1695 struct page *page;
1696 unsigned long offset;
1697 void *addr;
1698
1699 int mi = tcmu_find_mem_index(vmf->vma);
1700 if (mi < 0)
1701 return VM_FAULT_SIGBUS;
1702
1703 /*
1704 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1705 * to use mem[N].
1706 */
1707 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
1708
1709 if (offset < udev->data_off) {
1710 /* For the vmalloc()ed cmd area pages */
1711 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
1712 page = vmalloc_to_page(addr);
1713 } else {
1714 uint32_t dbi;
1715
1716 /* For the dynamically growing data area pages */
1717 dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
1718 page = tcmu_try_get_block_page(udev, dbi);
1719 if (!page)
1720 return VM_FAULT_SIGBUS;
1721 }
1722
1723 get_page(page);
1724 vmf->page = page;
1725 return 0;
1726}
1727
1728static const struct vm_operations_struct tcmu_vm_ops = {
1729 .fault = tcmu_vma_fault,
1730};
1731
1732static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
1733{
1734 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1735
1736 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1737 vma->vm_ops = &tcmu_vm_ops;
1738
1739 vma->vm_private_data = udev;
1740
1741 /* Ensure the mmap is exactly the right size */
1742 if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT))
1743 return -EINVAL;
1744
1745 return 0;
1746}
1747
1748static int tcmu_open(struct uio_info *info, struct inode *inode)
1749{
1750 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1751
1752 /* O_EXCL not supported for char devs, so fake it? */
1753 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
1754 return -EBUSY;
1755
1756 udev->inode = inode;
1757 kref_get(&udev->kref);
1758
1759 pr_debug("open\n");
1760
1761 return 0;
1762}
1763
1764static void tcmu_dev_call_rcu(struct rcu_head *p)
1765{
1766 struct se_device *dev = container_of(p, struct se_device, rcu_head);
1767 struct tcmu_dev *udev = TCMU_DEV(dev);
1768
1769 kfree(udev->uio_info.name);
1770 kfree(udev->name);
1771 kfree(udev);
1772}
1773
1774static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1775{
1776 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1777 kmem_cache_free(tcmu_cmd_cache, cmd);
1778 return 0;
1779 }
1780 return -EINVAL;
1781}
1782
1783static void tcmu_blocks_release(struct radix_tree_root *blocks,
1784 int start, int end)
1785{
1786 int i;
1787 struct page *page;
1788
1789 for (i = start; i < end; i++) {
1790 page = radix_tree_delete(blocks, i);
1791 if (page) {
1792 __free_page(page);
1793 atomic_dec(&global_db_count);
1794 }
1795 }
1796}
1797
1798static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
1799{
1800 struct tcmu_tmr *tmr, *tmp;
1801
1802 list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) {
1803 list_del_init(&tmr->queue_entry);
1804 kfree(tmr);
1805 }
1806}
1807
1808static void tcmu_dev_kref_release(struct kref *kref)
1809{
1810 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
1811 struct se_device *dev = &udev->se_dev;
1812 struct tcmu_cmd *cmd;
1813 bool all_expired = true;
1814 int i;
1815
1816 vfree(udev->mb_addr);
1817 udev->mb_addr = NULL;
1818
1819 spin_lock_bh(&timed_out_udevs_lock);
1820 if (!list_empty(&udev->timedout_entry))
1821 list_del(&udev->timedout_entry);
1822 spin_unlock_bh(&timed_out_udevs_lock);
1823
1824 /* Upper layer should drain all requests before calling this */
1825 mutex_lock(&udev->cmdr_lock);
1826 idr_for_each_entry(&udev->commands, cmd, i) {
1827 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1828 all_expired = false;
1829 }
1830 /* There can be left over TMR cmds. Remove them. */
1831 tcmu_remove_all_queued_tmr(udev);
1832 if (!list_empty(&udev->qfull_queue))
1833 all_expired = false;
1834 idr_destroy(&udev->commands);
1835 WARN_ON(!all_expired);
1836
1837 tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
1838 bitmap_free(udev->data_bitmap);
1839 mutex_unlock(&udev->cmdr_lock);
1840
1841 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1842}
1843
1844static int tcmu_release(struct uio_info *info, struct inode *inode)
1845{
1846 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1847
1848 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1849
1850 pr_debug("close\n");
1851 /* release ref from open */
1852 kref_put(&udev->kref, tcmu_dev_kref_release);
1853 return 0;
1854}
1855
1856static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
1857{
1858 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1859
1860 if (!tcmu_kern_cmd_reply_supported)
1861 return 0;
1862
1863 if (udev->nl_reply_supported <= 0)
1864 return 0;
1865
1866 mutex_lock(&tcmu_nl_cmd_mutex);
1867
1868 if (tcmu_netlink_blocked) {
1869 mutex_unlock(&tcmu_nl_cmd_mutex);
1870 pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
1871 udev->name);
1872 return -EAGAIN;
1873 }
1874
1875 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
1876 mutex_unlock(&tcmu_nl_cmd_mutex);
1877 pr_warn("netlink cmd %d already executing on %s\n",
1878 nl_cmd->cmd, udev->name);
1879 return -EBUSY;
1880 }
1881
1882 memset(nl_cmd, 0, sizeof(*nl_cmd));
1883 nl_cmd->cmd = cmd;
1884 nl_cmd->udev = udev;
1885 init_completion(&nl_cmd->complete);
1886 INIT_LIST_HEAD(&nl_cmd->nl_list);
1887
1888 list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
1889
1890 mutex_unlock(&tcmu_nl_cmd_mutex);
1891 return 0;
1892}
1893
1894static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
1895{
1896 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1897
1898 if (!tcmu_kern_cmd_reply_supported)
1899 return;
1900
1901 if (udev->nl_reply_supported <= 0)
1902 return;
1903
1904 mutex_lock(&tcmu_nl_cmd_mutex);
1905
1906 list_del(&nl_cmd->nl_list);
1907 memset(nl_cmd, 0, sizeof(*nl_cmd));
1908
1909 mutex_unlock(&tcmu_nl_cmd_mutex);
1910}
1911
1912static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
1913{
1914 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1915 int ret;
1916
1917 if (!tcmu_kern_cmd_reply_supported)
1918 return 0;
1919
1920 if (udev->nl_reply_supported <= 0)
1921 return 0;
1922
1923 pr_debug("sleeping for nl reply\n");
1924 wait_for_completion(&nl_cmd->complete);
1925
1926 mutex_lock(&tcmu_nl_cmd_mutex);
1927 nl_cmd->cmd = TCMU_CMD_UNSPEC;
1928 ret = nl_cmd->status;
1929 mutex_unlock(&tcmu_nl_cmd_mutex);
1930
1931 return ret;
1932}
1933
1934static int tcmu_netlink_event_init(struct tcmu_dev *udev,
1935 enum tcmu_genl_cmd cmd,
1936 struct sk_buff **buf, void **hdr)
1937{
1938 struct sk_buff *skb;
1939 void *msg_header;
1940 int ret = -ENOMEM;
1941
1942 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1943 if (!skb)
1944 return ret;
1945
1946 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
1947 if (!msg_header)
1948 goto free_skb;
1949
1950 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
1951 if (ret < 0)
1952 goto free_skb;
1953
1954 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
1955 if (ret < 0)
1956 goto free_skb;
1957
1958 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
1959 if (ret < 0)
1960 goto free_skb;
1961
1962 *buf = skb;
1963 *hdr = msg_header;
1964 return ret;
1965
1966free_skb:
1967 nlmsg_free(skb);
1968 return ret;
1969}
1970
1971static int tcmu_netlink_event_send(struct tcmu_dev *udev,
1972 enum tcmu_genl_cmd cmd,
1973 struct sk_buff *skb, void *msg_header)
1974{
1975 int ret;
1976
1977 genlmsg_end(skb, msg_header);
1978
1979 ret = tcmu_init_genl_cmd_reply(udev, cmd);
1980 if (ret) {
1981 nlmsg_free(skb);
1982 return ret;
1983 }
1984
1985 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1986 TCMU_MCGRP_CONFIG, GFP_KERNEL);
1987
1988 /* Wait during an add as the listener may not be up yet */
1989 if (ret == 0 ||
1990 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
1991 return tcmu_wait_genl_cmd_reply(udev);
1992 else
1993 tcmu_destroy_genl_cmd_reply(udev);
1994
1995 return ret;
1996}
1997
1998static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
1999{
2000 struct sk_buff *skb = NULL;
2001 void *msg_header = NULL;
2002 int ret = 0;
2003
2004 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
2005 &msg_header);
2006 if (ret < 0)
2007 return ret;
2008 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
2009 msg_header);
2010}
2011
2012static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
2013{
2014 struct sk_buff *skb = NULL;
2015 void *msg_header = NULL;
2016 int ret = 0;
2017
2018 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
2019 &skb, &msg_header);
2020 if (ret < 0)
2021 return ret;
2022 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
2023 skb, msg_header);
2024}
2025
2026static int tcmu_update_uio_info(struct tcmu_dev *udev)
2027{
2028 struct tcmu_hba *hba = udev->hba->hba_ptr;
2029 struct uio_info *info;
2030 char *str;
2031
2032 info = &udev->uio_info;
2033
2034 if (udev->dev_config[0])
2035 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id,
2036 udev->name, udev->dev_config);
2037 else
2038 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id,
2039 udev->name);
2040 if (!str)
2041 return -ENOMEM;
2042
2043 /* If the old string exists, free it */
2044 kfree(info->name);
2045 info->name = str;
2046
2047 return 0;
2048}
2049
2050static int tcmu_configure_device(struct se_device *dev)
2051{
2052 struct tcmu_dev *udev = TCMU_DEV(dev);
2053 struct uio_info *info;
2054 struct tcmu_mailbox *mb;
2055 int ret = 0;
2056
2057 ret = tcmu_update_uio_info(udev);
2058 if (ret)
2059 return ret;
2060
2061 info = &udev->uio_info;
2062
2063 mutex_lock(&udev->cmdr_lock);
2064 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
2065 mutex_unlock(&udev->cmdr_lock);
2066 if (!udev->data_bitmap) {
2067 ret = -ENOMEM;
2068 goto err_bitmap_alloc;
2069 }
2070
2071 udev->mb_addr = vzalloc(CMDR_SIZE);
2072 if (!udev->mb_addr) {
2073 ret = -ENOMEM;
2074 goto err_vzalloc;
2075 }
2076
2077 /* mailbox fits in first part of CMDR space */
2078 udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
2079 udev->data_off = CMDR_SIZE;
2080 udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE;
2081 udev->dbi_thresh = 0; /* Default in Idle state */
2082
2083 /* Initialise the mailbox of the ring buffer */
2084 mb = udev->mb_addr;
2085 mb->version = TCMU_MAILBOX_VERSION;
2086 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC |
2087 TCMU_MAILBOX_FLAG_CAP_READ_LEN |
2088 TCMU_MAILBOX_FLAG_CAP_TMR;
2089 mb->cmdr_off = CMDR_OFF;
2090 mb->cmdr_size = udev->cmdr_size;
2091
2092 WARN_ON(!PAGE_ALIGNED(udev->data_off));
2093 WARN_ON(udev->data_size % PAGE_SIZE);
2094 WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
2095
2096 info->version = __stringify(TCMU_MAILBOX_VERSION);
2097
2098 info->mem[0].name = "tcm-user command & data buffer";
2099 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
2100 info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE;
2101 info->mem[0].memtype = UIO_MEM_NONE;
2102
2103 info->irqcontrol = tcmu_irqcontrol;
2104 info->irq = UIO_IRQ_CUSTOM;
2105
2106 info->mmap = tcmu_mmap;
2107 info->open = tcmu_open;
2108 info->release = tcmu_release;
2109
2110 ret = uio_register_device(tcmu_root_device, info);
2111 if (ret)
2112 goto err_register;
2113
2114 /* User can set hw_block_size before enable the device */
2115 if (dev->dev_attrib.hw_block_size == 0)
2116 dev->dev_attrib.hw_block_size = 512;
2117 /* Other attributes can be configured in userspace */
2118 if (!dev->dev_attrib.hw_max_sectors)
2119 dev->dev_attrib.hw_max_sectors = 128;
2120 if (!dev->dev_attrib.emulate_write_cache)
2121 dev->dev_attrib.emulate_write_cache = 0;
2122 dev->dev_attrib.hw_queue_depth = 128;
2123
2124 /* If user didn't explicitly disable netlink reply support, use
2125 * module scope setting.
2126 */
2127 if (udev->nl_reply_supported >= 0)
2128 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
2129
2130 /*
2131 * Get a ref incase userspace does a close on the uio device before
2132 * LIO has initiated tcmu_free_device.
2133 */
2134 kref_get(&udev->kref);
2135
2136 ret = tcmu_send_dev_add_event(udev);
2137 if (ret)
2138 goto err_netlink;
2139
2140 mutex_lock(&root_udev_mutex);
2141 list_add(&udev->node, &root_udev);
2142 mutex_unlock(&root_udev_mutex);
2143
2144 return 0;
2145
2146err_netlink:
2147 kref_put(&udev->kref, tcmu_dev_kref_release);
2148 uio_unregister_device(&udev->uio_info);
2149err_register:
2150 vfree(udev->mb_addr);
2151 udev->mb_addr = NULL;
2152err_vzalloc:
2153 bitmap_free(udev->data_bitmap);
2154 udev->data_bitmap = NULL;
2155err_bitmap_alloc:
2156 kfree(info->name);
2157 info->name = NULL;
2158
2159 return ret;
2160}
2161
2162static void tcmu_free_device(struct se_device *dev)
2163{
2164 struct tcmu_dev *udev = TCMU_DEV(dev);
2165
2166 /* release ref from init */
2167 kref_put(&udev->kref, tcmu_dev_kref_release);
2168}
2169
2170static void tcmu_destroy_device(struct se_device *dev)
2171{
2172 struct tcmu_dev *udev = TCMU_DEV(dev);
2173
2174 del_timer_sync(&udev->cmd_timer);
2175 del_timer_sync(&udev->qfull_timer);
2176
2177 mutex_lock(&root_udev_mutex);
2178 list_del(&udev->node);
2179 mutex_unlock(&root_udev_mutex);
2180
2181 tcmu_send_dev_remove_event(udev);
2182
2183 uio_unregister_device(&udev->uio_info);
2184
2185 /* release ref from configure */
2186 kref_put(&udev->kref, tcmu_dev_kref_release);
2187}
2188
2189static void tcmu_unblock_dev(struct tcmu_dev *udev)
2190{
2191 mutex_lock(&udev->cmdr_lock);
2192 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags);
2193 mutex_unlock(&udev->cmdr_lock);
2194}
2195
2196static void tcmu_block_dev(struct tcmu_dev *udev)
2197{
2198 mutex_lock(&udev->cmdr_lock);
2199
2200 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2201 goto unlock;
2202
2203 /* complete IO that has executed successfully */
2204 tcmu_handle_completions(udev);
2205 /* fail IO waiting to be queued */
2206 run_qfull_queue(udev, true);
2207
2208unlock:
2209 mutex_unlock(&udev->cmdr_lock);
2210}
2211
2212static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2213{
2214 struct tcmu_mailbox *mb;
2215 struct tcmu_cmd *cmd;
2216 int i;
2217
2218 mutex_lock(&udev->cmdr_lock);
2219
2220 idr_for_each_entry(&udev->commands, cmd, i) {
2221 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
2222 cmd->cmd_id, udev->name,
2223 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
2224
2225 idr_remove(&udev->commands, i);
2226 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2227 WARN_ON(!cmd->se_cmd);
2228 list_del_init(&cmd->queue_entry);
2229 if (err_level == 1) {
2230 /*
2231 * Userspace was not able to start the
2232 * command or it is retryable.
2233 */
2234 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY);
2235 } else {
2236 /* hard failure */
2237 target_complete_cmd(cmd->se_cmd,
2238 SAM_STAT_CHECK_CONDITION);
2239 }
2240 }
2241 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
2242 tcmu_free_cmd(cmd);
2243 }
2244
2245 mb = udev->mb_addr;
2246 tcmu_flush_dcache_range(mb, sizeof(*mb));
2247 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned,
2248 mb->cmd_tail, mb->cmd_head);
2249
2250 udev->cmdr_last_cleaned = 0;
2251 mb->cmd_tail = 0;
2252 mb->cmd_head = 0;
2253 tcmu_flush_dcache_range(mb, sizeof(*mb));
2254 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
2255
2256 del_timer(&udev->cmd_timer);
2257
2258 /*
2259 * ring is empty and qfull queue never contains aborted commands.
2260 * So TMRs in tmr queue do not contain relevant cmd_ids.
2261 * After a ring reset userspace should do a fresh start, so
2262 * even LUN RESET message is no longer relevant.
2263 * Therefore remove all TMRs from qfull queue
2264 */
2265 tcmu_remove_all_queued_tmr(udev);
2266
2267 run_qfull_queue(udev, false);
2268
2269 mutex_unlock(&udev->cmdr_lock);
2270}
2271
2272enum {
2273 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
2274 Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err,
2275};
2276
2277static match_table_t tokens = {
2278 {Opt_dev_config, "dev_config=%s"},
2279 {Opt_dev_size, "dev_size=%s"},
2280 {Opt_hw_block_size, "hw_block_size=%d"},
2281 {Opt_hw_max_sectors, "hw_max_sectors=%d"},
2282 {Opt_nl_reply_supported, "nl_reply_supported=%d"},
2283 {Opt_max_data_area_mb, "max_data_area_mb=%d"},
2284 {Opt_err, NULL}
2285};
2286
2287static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
2288{
2289 int val, ret;
2290
2291 ret = match_int(arg, &val);
2292 if (ret < 0) {
2293 pr_err("match_int() failed for dev attrib. Error %d.\n",
2294 ret);
2295 return ret;
2296 }
2297
2298 if (val <= 0) {
2299 pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
2300 val);
2301 return -EINVAL;
2302 }
2303 *dev_attrib = val;
2304 return 0;
2305}
2306
2307static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
2308{
2309 int val, ret;
2310
2311 ret = match_int(arg, &val);
2312 if (ret < 0) {
2313 pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
2314 ret);
2315 return ret;
2316 }
2317
2318 if (val <= 0) {
2319 pr_err("Invalid max_data_area %d.\n", val);
2320 return -EINVAL;
2321 }
2322
2323 mutex_lock(&udev->cmdr_lock);
2324 if (udev->data_bitmap) {
2325 pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
2326 ret = -EINVAL;
2327 goto unlock;
2328 }
2329
2330 udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
2331 if (udev->max_blocks > tcmu_global_max_blocks) {
2332 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
2333 val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
2334 udev->max_blocks = tcmu_global_max_blocks;
2335 }
2336
2337unlock:
2338 mutex_unlock(&udev->cmdr_lock);
2339 return ret;
2340}
2341
2342static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
2343 const char *page, ssize_t count)
2344{
2345 struct tcmu_dev *udev = TCMU_DEV(dev);
2346 char *orig, *ptr, *opts;
2347 substring_t args[MAX_OPT_ARGS];
2348 int ret = 0, token;
2349
2350 opts = kstrdup(page, GFP_KERNEL);
2351 if (!opts)
2352 return -ENOMEM;
2353
2354 orig = opts;
2355
2356 while ((ptr = strsep(&opts, ",\n")) != NULL) {
2357 if (!*ptr)
2358 continue;
2359
2360 token = match_token(ptr, tokens, args);
2361 switch (token) {
2362 case Opt_dev_config:
2363 if (match_strlcpy(udev->dev_config, &args[0],
2364 TCMU_CONFIG_LEN) == 0) {
2365 ret = -EINVAL;
2366 break;
2367 }
2368 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
2369 break;
2370 case Opt_dev_size:
2371 ret = match_u64(&args[0], &udev->dev_size);
2372 if (ret < 0)
2373 pr_err("match_u64() failed for dev_size=. Error %d.\n",
2374 ret);
2375 break;
2376 case Opt_hw_block_size:
2377 ret = tcmu_set_dev_attrib(&args[0],
2378 &(dev->dev_attrib.hw_block_size));
2379 break;
2380 case Opt_hw_max_sectors:
2381 ret = tcmu_set_dev_attrib(&args[0],
2382 &(dev->dev_attrib.hw_max_sectors));
2383 break;
2384 case Opt_nl_reply_supported:
2385 ret = match_int(&args[0], &udev->nl_reply_supported);
2386 if (ret < 0)
2387 pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
2388 ret);
2389 break;
2390 case Opt_max_data_area_mb:
2391 ret = tcmu_set_max_blocks_param(udev, &args[0]);
2392 break;
2393 default:
2394 break;
2395 }
2396
2397 if (ret)
2398 break;
2399 }
2400
2401 kfree(orig);
2402 return (!ret) ? count : ret;
2403}
2404
2405static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
2406{
2407 struct tcmu_dev *udev = TCMU_DEV(dev);
2408 ssize_t bl = 0;
2409
2410 bl = sprintf(b + bl, "Config: %s ",
2411 udev->dev_config[0] ? udev->dev_config : "NULL");
2412 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
2413 bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
2414 TCMU_BLOCKS_TO_MBS(udev->max_blocks));
2415
2416 return bl;
2417}
2418
2419static sector_t tcmu_get_blocks(struct se_device *dev)
2420{
2421 struct tcmu_dev *udev = TCMU_DEV(dev);
2422
2423 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
2424 dev->dev_attrib.block_size);
2425}
2426
2427static sense_reason_t
2428tcmu_parse_cdb(struct se_cmd *cmd)
2429{
2430 return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
2431}
2432
2433static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
2434{
2435 struct se_dev_attrib *da = container_of(to_config_group(item),
2436 struct se_dev_attrib, da_group);
2437 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2438
2439 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
2440}
2441
2442static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
2443 size_t count)
2444{
2445 struct se_dev_attrib *da = container_of(to_config_group(item),
2446 struct se_dev_attrib, da_group);
2447 struct tcmu_dev *udev = container_of(da->da_dev,
2448 struct tcmu_dev, se_dev);
2449 u32 val;
2450 int ret;
2451
2452 if (da->da_dev->export_count) {
2453 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
2454 return -EINVAL;
2455 }
2456
2457 ret = kstrtou32(page, 0, &val);
2458 if (ret < 0)
2459 return ret;
2460
2461 udev->cmd_time_out = val * MSEC_PER_SEC;
2462 return count;
2463}
2464CONFIGFS_ATTR(tcmu_, cmd_time_out);
2465
2466static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
2467{
2468 struct se_dev_attrib *da = container_of(to_config_group(item),
2469 struct se_dev_attrib, da_group);
2470 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2471
2472 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
2473 udev->qfull_time_out :
2474 udev->qfull_time_out / MSEC_PER_SEC);
2475}
2476
2477static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
2478 const char *page, size_t count)
2479{
2480 struct se_dev_attrib *da = container_of(to_config_group(item),
2481 struct se_dev_attrib, da_group);
2482 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2483 s32 val;
2484 int ret;
2485
2486 ret = kstrtos32(page, 0, &val);
2487 if (ret < 0)
2488 return ret;
2489
2490 if (val >= 0) {
2491 udev->qfull_time_out = val * MSEC_PER_SEC;
2492 } else if (val == -1) {
2493 udev->qfull_time_out = val;
2494 } else {
2495 printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
2496 return -EINVAL;
2497 }
2498 return count;
2499}
2500CONFIGFS_ATTR(tcmu_, qfull_time_out);
2501
2502static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
2503{
2504 struct se_dev_attrib *da = container_of(to_config_group(item),
2505 struct se_dev_attrib, da_group);
2506 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2507
2508 return snprintf(page, PAGE_SIZE, "%u\n",
2509 TCMU_BLOCKS_TO_MBS(udev->max_blocks));
2510}
2511CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
2512
2513static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
2514{
2515 struct se_dev_attrib *da = container_of(to_config_group(item),
2516 struct se_dev_attrib, da_group);
2517 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2518
2519 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
2520}
2521
2522static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
2523 const char *reconfig_data)
2524{
2525 struct sk_buff *skb = NULL;
2526 void *msg_header = NULL;
2527 int ret = 0;
2528
2529 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2530 &skb, &msg_header);
2531 if (ret < 0)
2532 return ret;
2533 ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
2534 if (ret < 0) {
2535 nlmsg_free(skb);
2536 return ret;
2537 }
2538 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2539 skb, msg_header);
2540}
2541
2542
2543static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
2544 size_t count)
2545{
2546 struct se_dev_attrib *da = container_of(to_config_group(item),
2547 struct se_dev_attrib, da_group);
2548 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2549 int ret, len;
2550
2551 len = strlen(page);
2552 if (!len || len > TCMU_CONFIG_LEN - 1)
2553 return -EINVAL;
2554
2555 /* Check if device has been configured before */
2556 if (target_dev_configured(&udev->se_dev)) {
2557 ret = tcmu_send_dev_config_event(udev, page);
2558 if (ret) {
2559 pr_err("Unable to reconfigure device\n");
2560 return ret;
2561 }
2562 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2563
2564 ret = tcmu_update_uio_info(udev);
2565 if (ret)
2566 return ret;
2567 return count;
2568 }
2569 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2570
2571 return count;
2572}
2573CONFIGFS_ATTR(tcmu_, dev_config);
2574
2575static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
2576{
2577 struct se_dev_attrib *da = container_of(to_config_group(item),
2578 struct se_dev_attrib, da_group);
2579 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2580
2581 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
2582}
2583
2584static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
2585{
2586 struct sk_buff *skb = NULL;
2587 void *msg_header = NULL;
2588 int ret = 0;
2589
2590 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2591 &skb, &msg_header);
2592 if (ret < 0)
2593 return ret;
2594 ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
2595 size, TCMU_ATTR_PAD);
2596 if (ret < 0) {
2597 nlmsg_free(skb);
2598 return ret;
2599 }
2600 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2601 skb, msg_header);
2602}
2603
2604static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
2605 size_t count)
2606{
2607 struct se_dev_attrib *da = container_of(to_config_group(item),
2608 struct se_dev_attrib, da_group);
2609 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2610 u64 val;
2611 int ret;
2612
2613 ret = kstrtou64(page, 0, &val);
2614 if (ret < 0)
2615 return ret;
2616
2617 /* Check if device has been configured before */
2618 if (target_dev_configured(&udev->se_dev)) {
2619 ret = tcmu_send_dev_size_event(udev, val);
2620 if (ret) {
2621 pr_err("Unable to reconfigure device\n");
2622 return ret;
2623 }
2624 }
2625 udev->dev_size = val;
2626 return count;
2627}
2628CONFIGFS_ATTR(tcmu_, dev_size);
2629
2630static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
2631 char *page)
2632{
2633 struct se_dev_attrib *da = container_of(to_config_group(item),
2634 struct se_dev_attrib, da_group);
2635 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2636
2637 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
2638}
2639
2640static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
2641 const char *page, size_t count)
2642{
2643 struct se_dev_attrib *da = container_of(to_config_group(item),
2644 struct se_dev_attrib, da_group);
2645 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2646 s8 val;
2647 int ret;
2648
2649 ret = kstrtos8(page, 0, &val);
2650 if (ret < 0)
2651 return ret;
2652
2653 udev->nl_reply_supported = val;
2654 return count;
2655}
2656CONFIGFS_ATTR(tcmu_, nl_reply_supported);
2657
2658static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
2659 char *page)
2660{
2661 struct se_dev_attrib *da = container_of(to_config_group(item),
2662 struct se_dev_attrib, da_group);
2663
2664 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
2665}
2666
2667static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
2668{
2669 struct sk_buff *skb = NULL;
2670 void *msg_header = NULL;
2671 int ret = 0;
2672
2673 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2674 &skb, &msg_header);
2675 if (ret < 0)
2676 return ret;
2677 ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
2678 if (ret < 0) {
2679 nlmsg_free(skb);
2680 return ret;
2681 }
2682 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2683 skb, msg_header);
2684}
2685
2686static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
2687 const char *page, size_t count)
2688{
2689 struct se_dev_attrib *da = container_of(to_config_group(item),
2690 struct se_dev_attrib, da_group);
2691 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2692 u8 val;
2693 int ret;
2694
2695 ret = kstrtou8(page, 0, &val);
2696 if (ret < 0)
2697 return ret;
2698
2699 /* Check if device has been configured before */
2700 if (target_dev_configured(&udev->se_dev)) {
2701 ret = tcmu_send_emulate_write_cache(udev, val);
2702 if (ret) {
2703 pr_err("Unable to reconfigure device\n");
2704 return ret;
2705 }
2706 }
2707
2708 da->emulate_write_cache = val;
2709 return count;
2710}
2711CONFIGFS_ATTR(tcmu_, emulate_write_cache);
2712
2713static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page)
2714{
2715 struct se_dev_attrib *da = container_of(to_config_group(item),
2716 struct se_dev_attrib, da_group);
2717 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2718
2719 return snprintf(page, PAGE_SIZE, "%i\n",
2720 test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags));
2721}
2722
2723static ssize_t tcmu_tmr_notification_store(struct config_item *item,
2724 const char *page, size_t count)
2725{
2726 struct se_dev_attrib *da = container_of(to_config_group(item),
2727 struct se_dev_attrib, da_group);
2728 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2729 u8 val;
2730 int ret;
2731
2732 ret = kstrtou8(page, 0, &val);
2733 if (ret < 0)
2734 return ret;
2735 if (val > 1)
2736 return -EINVAL;
2737
2738 if (val)
2739 set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
2740 else
2741 clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
2742 return count;
2743}
2744CONFIGFS_ATTR(tcmu_, tmr_notification);
2745
2746static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
2747{
2748 struct se_device *se_dev = container_of(to_config_group(item),
2749 struct se_device,
2750 dev_action_group);
2751 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2752
2753 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2754 return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
2755 else
2756 return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
2757}
2758
2759static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
2760 size_t count)
2761{
2762 struct se_device *se_dev = container_of(to_config_group(item),
2763 struct se_device,
2764 dev_action_group);
2765 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2766 u8 val;
2767 int ret;
2768
2769 if (!target_dev_configured(&udev->se_dev)) {
2770 pr_err("Device is not configured.\n");
2771 return -EINVAL;
2772 }
2773
2774 ret = kstrtou8(page, 0, &val);
2775 if (ret < 0)
2776 return ret;
2777
2778 if (val > 1) {
2779 pr_err("Invalid block value %d\n", val);
2780 return -EINVAL;
2781 }
2782
2783 if (!val)
2784 tcmu_unblock_dev(udev);
2785 else
2786 tcmu_block_dev(udev);
2787 return count;
2788}
2789CONFIGFS_ATTR(tcmu_, block_dev);
2790
2791static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
2792 size_t count)
2793{
2794 struct se_device *se_dev = container_of(to_config_group(item),
2795 struct se_device,
2796 dev_action_group);
2797 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2798 u8 val;
2799 int ret;
2800
2801 if (!target_dev_configured(&udev->se_dev)) {
2802 pr_err("Device is not configured.\n");
2803 return -EINVAL;
2804 }
2805
2806 ret = kstrtou8(page, 0, &val);
2807 if (ret < 0)
2808 return ret;
2809
2810 if (val != 1 && val != 2) {
2811 pr_err("Invalid reset ring value %d\n", val);
2812 return -EINVAL;
2813 }
2814
2815 tcmu_reset_ring(udev, val);
2816 return count;
2817}
2818CONFIGFS_ATTR_WO(tcmu_, reset_ring);
2819
2820static struct configfs_attribute *tcmu_attrib_attrs[] = {
2821 &tcmu_attr_cmd_time_out,
2822 &tcmu_attr_qfull_time_out,
2823 &tcmu_attr_max_data_area_mb,
2824 &tcmu_attr_dev_config,
2825 &tcmu_attr_dev_size,
2826 &tcmu_attr_emulate_write_cache,
2827 &tcmu_attr_tmr_notification,
2828 &tcmu_attr_nl_reply_supported,
2829 NULL,
2830};
2831
2832static struct configfs_attribute **tcmu_attrs;
2833
2834static struct configfs_attribute *tcmu_action_attrs[] = {
2835 &tcmu_attr_block_dev,
2836 &tcmu_attr_reset_ring,
2837 NULL,
2838};
2839
2840static struct target_backend_ops tcmu_ops = {
2841 .name = "user",
2842 .owner = THIS_MODULE,
2843 .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH,
2844 .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR |
2845 TRANSPORT_FLAG_PASSTHROUGH_ALUA,
2846 .attach_hba = tcmu_attach_hba,
2847 .detach_hba = tcmu_detach_hba,
2848 .alloc_device = tcmu_alloc_device,
2849 .configure_device = tcmu_configure_device,
2850 .destroy_device = tcmu_destroy_device,
2851 .free_device = tcmu_free_device,
2852 .parse_cdb = tcmu_parse_cdb,
2853 .tmr_notify = tcmu_tmr_notify,
2854 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
2855 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
2856 .get_device_type = sbc_get_device_type,
2857 .get_blocks = tcmu_get_blocks,
2858 .tb_dev_action_attrs = tcmu_action_attrs,
2859};
2860
2861static void find_free_blocks(void)
2862{
2863 struct tcmu_dev *udev;
2864 loff_t off;
2865 u32 start, end, block, total_freed = 0;
2866
2867 if (atomic_read(&global_db_count) <= tcmu_global_max_blocks)
2868 return;
2869
2870 mutex_lock(&root_udev_mutex);
2871 list_for_each_entry(udev, &root_udev, node) {
2872 mutex_lock(&udev->cmdr_lock);
2873
2874 if (!target_dev_configured(&udev->se_dev)) {
2875 mutex_unlock(&udev->cmdr_lock);
2876 continue;
2877 }
2878
2879 /* Try to complete the finished commands first */
2880 if (tcmu_handle_completions(udev))
2881 run_qfull_queue(udev, false);
2882
2883 /* Skip the udevs in idle */
2884 if (!udev->dbi_thresh) {
2885 mutex_unlock(&udev->cmdr_lock);
2886 continue;
2887 }
2888
2889 end = udev->dbi_max + 1;
2890 block = find_last_bit(udev->data_bitmap, end);
2891 if (block == udev->dbi_max) {
2892 /*
2893 * The last bit is dbi_max, so it is not possible
2894 * reclaim any blocks.
2895 */
2896 mutex_unlock(&udev->cmdr_lock);
2897 continue;
2898 } else if (block == end) {
2899 /* The current udev will goto idle state */
2900 udev->dbi_thresh = start = 0;
2901 udev->dbi_max = 0;
2902 } else {
2903 udev->dbi_thresh = start = block + 1;
2904 udev->dbi_max = block;
2905 }
2906
2907 /* Here will truncate the data area from off */
2908 off = udev->data_off + start * DATA_BLOCK_SIZE;
2909 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
2910
2911 /* Release the block pages */
2912 tcmu_blocks_release(&udev->data_blocks, start, end);
2913 mutex_unlock(&udev->cmdr_lock);
2914
2915 total_freed += end - start;
2916 pr_debug("Freed %u blocks (total %u) from %s.\n", end - start,
2917 total_freed, udev->name);
2918 }
2919 mutex_unlock(&root_udev_mutex);
2920
2921 if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
2922 schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
2923}
2924
2925static void check_timedout_devices(void)
2926{
2927 struct tcmu_dev *udev, *tmp_dev;
2928 struct tcmu_cmd *cmd, *tmp_cmd;
2929 LIST_HEAD(devs);
2930
2931 spin_lock_bh(&timed_out_udevs_lock);
2932 list_splice_init(&timed_out_udevs, &devs);
2933
2934 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
2935 list_del_init(&udev->timedout_entry);
2936 spin_unlock_bh(&timed_out_udevs_lock);
2937
2938 mutex_lock(&udev->cmdr_lock);
2939
2940 /*
2941 * If cmd_time_out is disabled but qfull is set deadline
2942 * will only reflect the qfull timeout. Ignore it.
2943 */
2944 if (udev->cmd_time_out) {
2945 list_for_each_entry_safe(cmd, tmp_cmd,
2946 &udev->inflight_queue,
2947 queue_entry) {
2948 tcmu_check_expired_ring_cmd(cmd);
2949 }
2950 tcmu_set_next_deadline(&udev->inflight_queue,
2951 &udev->cmd_timer);
2952 }
2953 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
2954 queue_entry) {
2955 tcmu_check_expired_queue_cmd(cmd);
2956 }
2957 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
2958
2959 mutex_unlock(&udev->cmdr_lock);
2960
2961 spin_lock_bh(&timed_out_udevs_lock);
2962 }
2963
2964 spin_unlock_bh(&timed_out_udevs_lock);
2965}
2966
2967static void tcmu_unmap_work_fn(struct work_struct *work)
2968{
2969 check_timedout_devices();
2970 find_free_blocks();
2971}
2972
2973static int __init tcmu_module_init(void)
2974{
2975 int ret, i, k, len = 0;
2976
2977 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
2978
2979 INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
2980
2981 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
2982 sizeof(struct tcmu_cmd),
2983 __alignof__(struct tcmu_cmd),
2984 0, NULL);
2985 if (!tcmu_cmd_cache)
2986 return -ENOMEM;
2987
2988 tcmu_root_device = root_device_register("tcm_user");
2989 if (IS_ERR(tcmu_root_device)) {
2990 ret = PTR_ERR(tcmu_root_device);
2991 goto out_free_cache;
2992 }
2993
2994 ret = genl_register_family(&tcmu_genl_family);
2995 if (ret < 0) {
2996 goto out_unreg_device;
2997 }
2998
2999 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
3000 len += sizeof(struct configfs_attribute *);
3001 for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++)
3002 len += sizeof(struct configfs_attribute *);
3003 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++)
3004 len += sizeof(struct configfs_attribute *);
3005 len += sizeof(struct configfs_attribute *);
3006
3007 tcmu_attrs = kzalloc(len, GFP_KERNEL);
3008 if (!tcmu_attrs) {
3009 ret = -ENOMEM;
3010 goto out_unreg_genl;
3011 }
3012
3013 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
3014 tcmu_attrs[i] = passthrough_attrib_attrs[i];
3015 for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++)
3016 tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k];
3017 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++)
3018 tcmu_attrs[i++] = tcmu_attrib_attrs[k];
3019 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
3020
3021 ret = transport_backend_register(&tcmu_ops);
3022 if (ret)
3023 goto out_attrs;
3024
3025 return 0;
3026
3027out_attrs:
3028 kfree(tcmu_attrs);
3029out_unreg_genl:
3030 genl_unregister_family(&tcmu_genl_family);
3031out_unreg_device:
3032 root_device_unregister(tcmu_root_device);
3033out_free_cache:
3034 kmem_cache_destroy(tcmu_cmd_cache);
3035
3036 return ret;
3037}
3038
3039static void __exit tcmu_module_exit(void)
3040{
3041 cancel_delayed_work_sync(&tcmu_unmap_work);
3042 target_backend_unregister(&tcmu_ops);
3043 kfree(tcmu_attrs);
3044 genl_unregister_family(&tcmu_genl_family);
3045 root_device_unregister(tcmu_root_device);
3046 kmem_cache_destroy(tcmu_cmd_cache);
3047}
3048
3049MODULE_DESCRIPTION("TCM USER subsystem plugin");
3050MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
3051MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
3052MODULE_LICENSE("GPL");
3053
3054module_init(tcmu_module_init);
3055module_exit(tcmu_module_exit);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
4 * Copyright (C) 2014 Red Hat, Inc.
5 * Copyright (C) 2015 Arrikto, Inc.
6 * Copyright (C) 2017 Chinamobile, Inc.
7 */
8
9#include <linux/spinlock.h>
10#include <linux/module.h>
11#include <linux/idr.h>
12#include <linux/kernel.h>
13#include <linux/timer.h>
14#include <linux/parser.h>
15#include <linux/vmalloc.h>
16#include <linux/uio_driver.h>
17#include <linux/radix-tree.h>
18#include <linux/stringify.h>
19#include <linux/bitops.h>
20#include <linux/highmem.h>
21#include <linux/configfs.h>
22#include <linux/mutex.h>
23#include <linux/workqueue.h>
24#include <net/genetlink.h>
25#include <scsi/scsi_common.h>
26#include <scsi/scsi_proto.h>
27#include <target/target_core_base.h>
28#include <target/target_core_fabric.h>
29#include <target/target_core_backend.h>
30
31#include <linux/target_core_user.h>
32
33/**
34 * DOC: Userspace I/O
35 * Userspace I/O
36 * -------------
37 *
38 * Define a shared-memory interface for LIO to pass SCSI commands and
39 * data to userspace for processing. This is to allow backends that
40 * are too complex for in-kernel support to be possible.
41 *
42 * It uses the UIO framework to do a lot of the device-creation and
43 * introspection work for us.
44 *
45 * See the .h file for how the ring is laid out. Note that while the
46 * command ring is defined, the particulars of the data area are
47 * not. Offset values in the command entry point to other locations
48 * internal to the mmap-ed area. There is separate space outside the
49 * command ring for data buffers. This leaves maximum flexibility for
50 * moving buffer allocations, or even page flipping or other
51 * allocation techniques, without altering the command ring layout.
52 *
53 * SECURITY:
54 * The user process must be assumed to be malicious. There's no way to
55 * prevent it breaking the command ring protocol if it wants, but in
56 * order to prevent other issues we must only ever read *data* from
57 * the shared memory area, not offsets or sizes. This applies to
58 * command ring entries as well as the mailbox. Extra code needed for
59 * this may have a 'UAM' comment.
60 */
61
62#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
63
64/* For cmd area, the size is fixed 8MB */
65#define CMDR_SIZE (8 * 1024 * 1024)
66
67/*
68 * For data area, the block size is PAGE_SIZE and
69 * the total size is 256K * PAGE_SIZE.
70 */
71#define DATA_BLOCK_SIZE PAGE_SIZE
72#define DATA_BLOCK_SHIFT PAGE_SHIFT
73#define DATA_BLOCK_BITS_DEF (256 * 1024)
74
75#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
76#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
77
78/*
79 * Default number of global data blocks(512K * PAGE_SIZE)
80 * when the unmap thread will be started.
81 */
82#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
83
84static u8 tcmu_kern_cmd_reply_supported;
85static u8 tcmu_netlink_blocked;
86
87static struct device *tcmu_root_device;
88
89struct tcmu_hba {
90 u32 host_id;
91};
92
93#define TCMU_CONFIG_LEN 256
94
95static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
96static LIST_HEAD(tcmu_nl_cmd_list);
97
98struct tcmu_dev;
99
100struct tcmu_nl_cmd {
101 /* wake up thread waiting for reply */
102 struct completion complete;
103 struct list_head nl_list;
104 struct tcmu_dev *udev;
105 int cmd;
106 int status;
107};
108
109struct tcmu_dev {
110 struct list_head node;
111 struct kref kref;
112
113 struct se_device se_dev;
114
115 char *name;
116 struct se_hba *hba;
117
118#define TCMU_DEV_BIT_OPEN 0
119#define TCMU_DEV_BIT_BROKEN 1
120#define TCMU_DEV_BIT_BLOCKED 2
121 unsigned long flags;
122
123 struct uio_info uio_info;
124
125 struct inode *inode;
126
127 struct tcmu_mailbox *mb_addr;
128 uint64_t dev_size;
129 u32 cmdr_size;
130 u32 cmdr_last_cleaned;
131 /* Offset of data area from start of mb */
132 /* Must add data_off and mb_addr to get the address */
133 size_t data_off;
134 size_t data_size;
135 uint32_t max_blocks;
136 size_t ring_size;
137
138 struct mutex cmdr_lock;
139 struct list_head qfull_queue;
140
141 uint32_t dbi_max;
142 uint32_t dbi_thresh;
143 unsigned long *data_bitmap;
144 struct radix_tree_root data_blocks;
145
146 struct idr commands;
147
148 struct timer_list cmd_timer;
149 unsigned int cmd_time_out;
150 struct list_head inflight_queue;
151
152 struct timer_list qfull_timer;
153 int qfull_time_out;
154
155 struct list_head timedout_entry;
156
157 struct tcmu_nl_cmd curr_nl_cmd;
158
159 char dev_config[TCMU_CONFIG_LEN];
160
161 int nl_reply_supported;
162};
163
164#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
165
166#define CMDR_OFF sizeof(struct tcmu_mailbox)
167
168struct tcmu_cmd {
169 struct se_cmd *se_cmd;
170 struct tcmu_dev *tcmu_dev;
171 struct list_head queue_entry;
172
173 uint16_t cmd_id;
174
175 /* Can't use se_cmd when cleaning up expired cmds, because if
176 cmd has been completed then accessing se_cmd is off limits */
177 uint32_t dbi_cnt;
178 uint32_t dbi_cur;
179 uint32_t *dbi;
180
181 unsigned long deadline;
182
183#define TCMU_CMD_BIT_EXPIRED 0
184#define TCMU_CMD_BIT_INFLIGHT 1
185 unsigned long flags;
186};
187/*
188 * To avoid dead lock the mutex lock order should always be:
189 *
190 * mutex_lock(&root_udev_mutex);
191 * ...
192 * mutex_lock(&tcmu_dev->cmdr_lock);
193 * mutex_unlock(&tcmu_dev->cmdr_lock);
194 * ...
195 * mutex_unlock(&root_udev_mutex);
196 */
197static DEFINE_MUTEX(root_udev_mutex);
198static LIST_HEAD(root_udev);
199
200static DEFINE_SPINLOCK(timed_out_udevs_lock);
201static LIST_HEAD(timed_out_udevs);
202
203static struct kmem_cache *tcmu_cmd_cache;
204
205static atomic_t global_db_count = ATOMIC_INIT(0);
206static struct delayed_work tcmu_unmap_work;
207static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF;
208
209static int tcmu_set_global_max_data_area(const char *str,
210 const struct kernel_param *kp)
211{
212 int ret, max_area_mb;
213
214 ret = kstrtoint(str, 10, &max_area_mb);
215 if (ret)
216 return -EINVAL;
217
218 if (max_area_mb <= 0) {
219 pr_err("global_max_data_area must be larger than 0.\n");
220 return -EINVAL;
221 }
222
223 tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb);
224 if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
225 schedule_delayed_work(&tcmu_unmap_work, 0);
226 else
227 cancel_delayed_work_sync(&tcmu_unmap_work);
228
229 return 0;
230}
231
232static int tcmu_get_global_max_data_area(char *buffer,
233 const struct kernel_param *kp)
234{
235 return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
236}
237
238static const struct kernel_param_ops tcmu_global_max_data_area_op = {
239 .set = tcmu_set_global_max_data_area,
240 .get = tcmu_get_global_max_data_area,
241};
242
243module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL,
244 S_IWUSR | S_IRUGO);
245MODULE_PARM_DESC(global_max_data_area_mb,
246 "Max MBs allowed to be allocated to all the tcmu device's "
247 "data areas.");
248
249static int tcmu_get_block_netlink(char *buffer,
250 const struct kernel_param *kp)
251{
252 return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
253 "blocked" : "unblocked");
254}
255
256static int tcmu_set_block_netlink(const char *str,
257 const struct kernel_param *kp)
258{
259 int ret;
260 u8 val;
261
262 ret = kstrtou8(str, 0, &val);
263 if (ret < 0)
264 return ret;
265
266 if (val > 1) {
267 pr_err("Invalid block netlink value %u\n", val);
268 return -EINVAL;
269 }
270
271 tcmu_netlink_blocked = val;
272 return 0;
273}
274
275static const struct kernel_param_ops tcmu_block_netlink_op = {
276 .set = tcmu_set_block_netlink,
277 .get = tcmu_get_block_netlink,
278};
279
280module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
281MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
282
283static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
284{
285 struct tcmu_dev *udev = nl_cmd->udev;
286
287 if (!tcmu_netlink_blocked) {
288 pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
289 return -EBUSY;
290 }
291
292 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
293 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
294 nl_cmd->status = -EINTR;
295 list_del(&nl_cmd->nl_list);
296 complete(&nl_cmd->complete);
297 }
298 return 0;
299}
300
301static int tcmu_set_reset_netlink(const char *str,
302 const struct kernel_param *kp)
303{
304 struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
305 int ret;
306 u8 val;
307
308 ret = kstrtou8(str, 0, &val);
309 if (ret < 0)
310 return ret;
311
312 if (val != 1) {
313 pr_err("Invalid reset netlink value %u\n", val);
314 return -EINVAL;
315 }
316
317 mutex_lock(&tcmu_nl_cmd_mutex);
318 list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
319 ret = tcmu_fail_netlink_cmd(nl_cmd);
320 if (ret)
321 break;
322 }
323 mutex_unlock(&tcmu_nl_cmd_mutex);
324
325 return ret;
326}
327
328static const struct kernel_param_ops tcmu_reset_netlink_op = {
329 .set = tcmu_set_reset_netlink,
330};
331
332module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
333MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
334
335/* multicast group */
336enum tcmu_multicast_groups {
337 TCMU_MCGRP_CONFIG,
338};
339
340static const struct genl_multicast_group tcmu_mcgrps[] = {
341 [TCMU_MCGRP_CONFIG] = { .name = "config", },
342};
343
344static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
345 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
346 [TCMU_ATTR_MINOR] = { .type = NLA_U32 },
347 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
348 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
349 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
350};
351
352static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
353{
354 struct tcmu_dev *udev = NULL;
355 struct tcmu_nl_cmd *nl_cmd;
356 int dev_id, rc, ret = 0;
357
358 if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
359 !info->attrs[TCMU_ATTR_DEVICE_ID]) {
360 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
361 return -EINVAL;
362 }
363
364 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
365 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
366
367 mutex_lock(&tcmu_nl_cmd_mutex);
368 list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
369 if (nl_cmd->udev->se_dev.dev_index == dev_id) {
370 udev = nl_cmd->udev;
371 break;
372 }
373 }
374
375 if (!udev) {
376 pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
377 completed_cmd, rc, dev_id);
378 ret = -ENODEV;
379 goto unlock;
380 }
381 list_del(&nl_cmd->nl_list);
382
383 pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
384 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
385 nl_cmd->status);
386
387 if (nl_cmd->cmd != completed_cmd) {
388 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
389 udev->name, completed_cmd, nl_cmd->cmd);
390 ret = -EINVAL;
391 goto unlock;
392 }
393
394 nl_cmd->status = rc;
395 complete(&nl_cmd->complete);
396unlock:
397 mutex_unlock(&tcmu_nl_cmd_mutex);
398 return ret;
399}
400
401static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
402{
403 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
404}
405
406static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
407{
408 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
409}
410
411static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
412 struct genl_info *info)
413{
414 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
415}
416
417static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
418{
419 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
420 tcmu_kern_cmd_reply_supported =
421 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
422 printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
423 tcmu_kern_cmd_reply_supported);
424 }
425
426 return 0;
427}
428
429static const struct genl_ops tcmu_genl_ops[] = {
430 {
431 .cmd = TCMU_CMD_SET_FEATURES,
432 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
433 .flags = GENL_ADMIN_PERM,
434 .doit = tcmu_genl_set_features,
435 },
436 {
437 .cmd = TCMU_CMD_ADDED_DEVICE_DONE,
438 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
439 .flags = GENL_ADMIN_PERM,
440 .doit = tcmu_genl_add_dev_done,
441 },
442 {
443 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
444 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
445 .flags = GENL_ADMIN_PERM,
446 .doit = tcmu_genl_rm_dev_done,
447 },
448 {
449 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
450 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
451 .flags = GENL_ADMIN_PERM,
452 .doit = tcmu_genl_reconfig_dev_done,
453 },
454};
455
456/* Our generic netlink family */
457static struct genl_family tcmu_genl_family __ro_after_init = {
458 .module = THIS_MODULE,
459 .hdrsize = 0,
460 .name = "TCM-USER",
461 .version = 2,
462 .maxattr = TCMU_ATTR_MAX,
463 .policy = tcmu_attr_policy,
464 .mcgrps = tcmu_mcgrps,
465 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
466 .netnsok = true,
467 .ops = tcmu_genl_ops,
468 .n_ops = ARRAY_SIZE(tcmu_genl_ops),
469};
470
471#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
472#define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
473#define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
474#define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
475
476static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
477{
478 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
479 uint32_t i;
480
481 for (i = 0; i < len; i++)
482 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
483}
484
485static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
486 struct tcmu_cmd *tcmu_cmd)
487{
488 struct page *page;
489 int ret, dbi;
490
491 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
492 if (dbi == udev->dbi_thresh)
493 return false;
494
495 page = radix_tree_lookup(&udev->data_blocks, dbi);
496 if (!page) {
497 if (atomic_add_return(1, &global_db_count) >
498 tcmu_global_max_blocks)
499 schedule_delayed_work(&tcmu_unmap_work, 0);
500
501 /* try to get new page from the mm */
502 page = alloc_page(GFP_KERNEL);
503 if (!page)
504 goto err_alloc;
505
506 ret = radix_tree_insert(&udev->data_blocks, dbi, page);
507 if (ret)
508 goto err_insert;
509 }
510
511 if (dbi > udev->dbi_max)
512 udev->dbi_max = dbi;
513
514 set_bit(dbi, udev->data_bitmap);
515 tcmu_cmd_set_dbi(tcmu_cmd, dbi);
516
517 return true;
518err_insert:
519 __free_page(page);
520err_alloc:
521 atomic_dec(&global_db_count);
522 return false;
523}
524
525static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
526 struct tcmu_cmd *tcmu_cmd)
527{
528 int i;
529
530 for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
531 if (!tcmu_get_empty_block(udev, tcmu_cmd))
532 return false;
533 }
534 return true;
535}
536
537static inline struct page *
538tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
539{
540 return radix_tree_lookup(&udev->data_blocks, dbi);
541}
542
543static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
544{
545 kfree(tcmu_cmd->dbi);
546 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
547}
548
549static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
550{
551 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
552 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
553
554 if (se_cmd->se_cmd_flags & SCF_BIDI) {
555 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
556 data_length += round_up(se_cmd->t_bidi_data_sg->length,
557 DATA_BLOCK_SIZE);
558 }
559
560 return data_length;
561}
562
563static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
564{
565 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
566
567 return data_length / DATA_BLOCK_SIZE;
568}
569
570static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
571{
572 struct se_device *se_dev = se_cmd->se_dev;
573 struct tcmu_dev *udev = TCMU_DEV(se_dev);
574 struct tcmu_cmd *tcmu_cmd;
575
576 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
577 if (!tcmu_cmd)
578 return NULL;
579
580 INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
581 tcmu_cmd->se_cmd = se_cmd;
582 tcmu_cmd->tcmu_dev = udev;
583
584 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
585 tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
586 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
587 GFP_KERNEL);
588 if (!tcmu_cmd->dbi) {
589 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
590 return NULL;
591 }
592
593 return tcmu_cmd;
594}
595
596static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
597{
598 unsigned long offset = offset_in_page(vaddr);
599 void *start = vaddr - offset;
600
601 size = round_up(size+offset, PAGE_SIZE);
602
603 while (size) {
604 flush_dcache_page(virt_to_page(start));
605 start += PAGE_SIZE;
606 size -= PAGE_SIZE;
607 }
608}
609
610/*
611 * Some ring helper functions. We don't assume size is a power of 2 so
612 * we can't use circ_buf.h.
613 */
614static inline size_t spc_used(size_t head, size_t tail, size_t size)
615{
616 int diff = head - tail;
617
618 if (diff >= 0)
619 return diff;
620 else
621 return size + diff;
622}
623
624static inline size_t spc_free(size_t head, size_t tail, size_t size)
625{
626 /* Keep 1 byte unused or we can't tell full from empty */
627 return (size - spc_used(head, tail, size) - 1);
628}
629
630static inline size_t head_to_end(size_t head, size_t size)
631{
632 return size - head;
633}
634
635static inline void new_iov(struct iovec **iov, int *iov_cnt)
636{
637 struct iovec *iovec;
638
639 if (*iov_cnt != 0)
640 (*iov)++;
641 (*iov_cnt)++;
642
643 iovec = *iov;
644 memset(iovec, 0, sizeof(struct iovec));
645}
646
647#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
648
649/* offset is relative to mb_addr */
650static inline size_t get_block_offset_user(struct tcmu_dev *dev,
651 int dbi, int remaining)
652{
653 return dev->data_off + dbi * DATA_BLOCK_SIZE +
654 DATA_BLOCK_SIZE - remaining;
655}
656
657static inline size_t iov_tail(struct iovec *iov)
658{
659 return (size_t)iov->iov_base + iov->iov_len;
660}
661
662static void scatter_data_area(struct tcmu_dev *udev,
663 struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
664 unsigned int data_nents, struct iovec **iov,
665 int *iov_cnt, bool copy_data)
666{
667 int i, dbi;
668 int block_remaining = 0;
669 void *from, *to = NULL;
670 size_t copy_bytes, to_offset, offset;
671 struct scatterlist *sg;
672 struct page *page;
673
674 for_each_sg(data_sg, sg, data_nents, i) {
675 int sg_remaining = sg->length;
676 from = kmap_atomic(sg_page(sg)) + sg->offset;
677 while (sg_remaining > 0) {
678 if (block_remaining == 0) {
679 if (to)
680 kunmap_atomic(to);
681
682 block_remaining = DATA_BLOCK_SIZE;
683 dbi = tcmu_cmd_get_dbi(tcmu_cmd);
684 page = tcmu_get_block_page(udev, dbi);
685 to = kmap_atomic(page);
686 }
687
688 /*
689 * Covert to virtual offset of the ring data area.
690 */
691 to_offset = get_block_offset_user(udev, dbi,
692 block_remaining);
693
694 /*
695 * The following code will gather and map the blocks
696 * to the same iovec when the blocks are all next to
697 * each other.
698 */
699 copy_bytes = min_t(size_t, sg_remaining,
700 block_remaining);
701 if (*iov_cnt != 0 &&
702 to_offset == iov_tail(*iov)) {
703 /*
704 * Will append to the current iovec, because
705 * the current block page is next to the
706 * previous one.
707 */
708 (*iov)->iov_len += copy_bytes;
709 } else {
710 /*
711 * Will allocate a new iovec because we are
712 * first time here or the current block page
713 * is not next to the previous one.
714 */
715 new_iov(iov, iov_cnt);
716 (*iov)->iov_base = (void __user *)to_offset;
717 (*iov)->iov_len = copy_bytes;
718 }
719
720 if (copy_data) {
721 offset = DATA_BLOCK_SIZE - block_remaining;
722 memcpy(to + offset,
723 from + sg->length - sg_remaining,
724 copy_bytes);
725 tcmu_flush_dcache_range(to, copy_bytes);
726 }
727
728 sg_remaining -= copy_bytes;
729 block_remaining -= copy_bytes;
730 }
731 kunmap_atomic(from - sg->offset);
732 }
733
734 if (to)
735 kunmap_atomic(to);
736}
737
738static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
739 bool bidi, uint32_t read_len)
740{
741 struct se_cmd *se_cmd = cmd->se_cmd;
742 int i, dbi;
743 int block_remaining = 0;
744 void *from = NULL, *to;
745 size_t copy_bytes, offset;
746 struct scatterlist *sg, *data_sg;
747 struct page *page;
748 unsigned int data_nents;
749 uint32_t count = 0;
750
751 if (!bidi) {
752 data_sg = se_cmd->t_data_sg;
753 data_nents = se_cmd->t_data_nents;
754 } else {
755
756 /*
757 * For bidi case, the first count blocks are for Data-Out
758 * buffer blocks, and before gathering the Data-In buffer
759 * the Data-Out buffer blocks should be discarded.
760 */
761 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
762
763 data_sg = se_cmd->t_bidi_data_sg;
764 data_nents = se_cmd->t_bidi_data_nents;
765 }
766
767 tcmu_cmd_set_dbi_cur(cmd, count);
768
769 for_each_sg(data_sg, sg, data_nents, i) {
770 int sg_remaining = sg->length;
771 to = kmap_atomic(sg_page(sg)) + sg->offset;
772 while (sg_remaining > 0 && read_len > 0) {
773 if (block_remaining == 0) {
774 if (from)
775 kunmap_atomic(from);
776
777 block_remaining = DATA_BLOCK_SIZE;
778 dbi = tcmu_cmd_get_dbi(cmd);
779 page = tcmu_get_block_page(udev, dbi);
780 from = kmap_atomic(page);
781 }
782 copy_bytes = min_t(size_t, sg_remaining,
783 block_remaining);
784 if (read_len < copy_bytes)
785 copy_bytes = read_len;
786 offset = DATA_BLOCK_SIZE - block_remaining;
787 tcmu_flush_dcache_range(from, copy_bytes);
788 memcpy(to + sg->length - sg_remaining, from + offset,
789 copy_bytes);
790
791 sg_remaining -= copy_bytes;
792 block_remaining -= copy_bytes;
793 read_len -= copy_bytes;
794 }
795 kunmap_atomic(to - sg->offset);
796 if (read_len == 0)
797 break;
798 }
799 if (from)
800 kunmap_atomic(from);
801}
802
803static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
804{
805 return thresh - bitmap_weight(bitmap, thresh);
806}
807
808/*
809 * We can't queue a command until we have space available on the cmd ring *and*
810 * space available on the data area.
811 *
812 * Called with ring lock held.
813 */
814static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
815 size_t cmd_size, size_t data_needed)
816{
817 struct tcmu_mailbox *mb = udev->mb_addr;
818 uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
819 / DATA_BLOCK_SIZE;
820 size_t space, cmd_needed;
821 u32 cmd_head;
822
823 tcmu_flush_dcache_range(mb, sizeof(*mb));
824
825 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
826
827 /*
828 * If cmd end-of-ring space is too small then we need space for a NOP plus
829 * original cmd - cmds are internally contiguous.
830 */
831 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
832 cmd_needed = cmd_size;
833 else
834 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
835
836 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
837 if (space < cmd_needed) {
838 pr_debug("no cmd space: %u %u %u\n", cmd_head,
839 udev->cmdr_last_cleaned, udev->cmdr_size);
840 return false;
841 }
842
843 /* try to check and get the data blocks as needed */
844 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
845 if ((space * DATA_BLOCK_SIZE) < data_needed) {
846 unsigned long blocks_left =
847 (udev->max_blocks - udev->dbi_thresh) + space;
848
849 if (blocks_left < blocks_needed) {
850 pr_debug("no data space: only %lu available, but ask for %zu\n",
851 blocks_left * DATA_BLOCK_SIZE,
852 data_needed);
853 return false;
854 }
855
856 udev->dbi_thresh += blocks_needed;
857 if (udev->dbi_thresh > udev->max_blocks)
858 udev->dbi_thresh = udev->max_blocks;
859 }
860
861 return tcmu_get_empty_blocks(udev, cmd);
862}
863
864static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
865{
866 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
867 sizeof(struct tcmu_cmd_entry));
868}
869
870static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
871 size_t base_command_size)
872{
873 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
874 size_t command_size;
875
876 command_size = base_command_size +
877 round_up(scsi_command_size(se_cmd->t_task_cdb),
878 TCMU_OP_ALIGN_SIZE);
879
880 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
881
882 return command_size;
883}
884
885static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
886 struct timer_list *timer)
887{
888 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
889 int cmd_id;
890
891 if (tcmu_cmd->cmd_id)
892 goto setup_timer;
893
894 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
895 if (cmd_id < 0) {
896 pr_err("tcmu: Could not allocate cmd id.\n");
897 return cmd_id;
898 }
899 tcmu_cmd->cmd_id = cmd_id;
900
901 pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
902 udev->name, tmo / MSEC_PER_SEC);
903
904setup_timer:
905 if (!tmo)
906 return 0;
907
908 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
909 if (!timer_pending(timer))
910 mod_timer(timer, tcmu_cmd->deadline);
911
912 return 0;
913}
914
915static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
916{
917 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
918 unsigned int tmo;
919 int ret;
920
921 /*
922 * For backwards compat if qfull_time_out is not set use
923 * cmd_time_out and if that's not set use the default time out.
924 */
925 if (!udev->qfull_time_out)
926 return -ETIMEDOUT;
927 else if (udev->qfull_time_out > 0)
928 tmo = udev->qfull_time_out;
929 else if (udev->cmd_time_out)
930 tmo = udev->cmd_time_out;
931 else
932 tmo = TCMU_TIME_OUT;
933
934 ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
935 if (ret)
936 return ret;
937
938 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
939 pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
940 tcmu_cmd->cmd_id, udev->name);
941 return 0;
942}
943
944/**
945 * queue_cmd_ring - queue cmd to ring or internally
946 * @tcmu_cmd: cmd to queue
947 * @scsi_err: TCM error code if failure (-1) returned.
948 *
949 * Returns:
950 * -1 we cannot queue internally or to the ring.
951 * 0 success
952 * 1 internally queued to wait for ring memory to free.
953 */
954static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
955{
956 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
957 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
958 size_t base_command_size, command_size;
959 struct tcmu_mailbox *mb;
960 struct tcmu_cmd_entry *entry;
961 struct iovec *iov;
962 int iov_cnt, ret;
963 uint32_t cmd_head;
964 uint64_t cdb_off;
965 bool copy_to_data_area;
966 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
967
968 *scsi_err = TCM_NO_SENSE;
969
970 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) {
971 *scsi_err = TCM_LUN_BUSY;
972 return -1;
973 }
974
975 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
976 *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
977 return -1;
978 }
979
980 /*
981 * Must be a certain minimum size for response sense info, but
982 * also may be larger if the iov array is large.
983 *
984 * We prepare as many iovs as possbile for potential uses here,
985 * because it's expensive to tell how many regions are freed in
986 * the bitmap & global data pool, as the size calculated here
987 * will only be used to do the checks.
988 *
989 * The size will be recalculated later as actually needed to save
990 * cmd area memories.
991 */
992 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
993 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
994
995 if (!list_empty(&udev->qfull_queue))
996 goto queue;
997
998 mb = udev->mb_addr;
999 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
1000 if ((command_size > (udev->cmdr_size / 2)) ||
1001 data_length > udev->data_size) {
1002 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
1003 "cmd ring/data area\n", command_size, data_length,
1004 udev->cmdr_size, udev->data_size);
1005 *scsi_err = TCM_INVALID_CDB_FIELD;
1006 return -1;
1007 }
1008
1009 if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
1010 /*
1011 * Don't leave commands partially setup because the unmap
1012 * thread might need the blocks to make forward progress.
1013 */
1014 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
1015 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1016 goto queue;
1017 }
1018
1019 /* Insert a PAD if end-of-ring space is too small */
1020 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
1021 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
1022
1023 entry = (void *) mb + CMDR_OFF + cmd_head;
1024 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
1025 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
1026 entry->hdr.cmd_id = 0; /* not used for PAD */
1027 entry->hdr.kflags = 0;
1028 entry->hdr.uflags = 0;
1029 tcmu_flush_dcache_range(entry, sizeof(*entry));
1030
1031 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
1032 tcmu_flush_dcache_range(mb, sizeof(*mb));
1033
1034 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
1035 WARN_ON(cmd_head != 0);
1036 }
1037
1038 entry = (void *) mb + CMDR_OFF + cmd_head;
1039 memset(entry, 0, command_size);
1040 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
1041
1042 /* Handle allocating space from the data area */
1043 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1044 iov = &entry->req.iov[0];
1045 iov_cnt = 0;
1046 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
1047 || se_cmd->se_cmd_flags & SCF_BIDI);
1048 scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
1049 se_cmd->t_data_nents, &iov, &iov_cnt,
1050 copy_to_data_area);
1051 entry->req.iov_cnt = iov_cnt;
1052
1053 /* Handle BIDI commands */
1054 iov_cnt = 0;
1055 if (se_cmd->se_cmd_flags & SCF_BIDI) {
1056 iov++;
1057 scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
1058 se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
1059 false);
1060 }
1061 entry->req.iov_bidi_cnt = iov_cnt;
1062
1063 ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out,
1064 &udev->cmd_timer);
1065 if (ret) {
1066 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
1067
1068 *scsi_err = TCM_OUT_OF_RESOURCES;
1069 return -1;
1070 }
1071 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
1072
1073 /*
1074 * Recalaulate the command's base size and size according
1075 * to the actual needs
1076 */
1077 base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
1078 entry->req.iov_bidi_cnt);
1079 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
1080
1081 tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
1082
1083 /* All offsets relative to mb_addr, not start of entry! */
1084 cdb_off = CMDR_OFF + cmd_head + base_command_size;
1085 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
1086 entry->req.cdb_off = cdb_off;
1087 tcmu_flush_dcache_range(entry, sizeof(*entry));
1088
1089 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
1090 tcmu_flush_dcache_range(mb, sizeof(*mb));
1091
1092 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
1093 set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
1094
1095 /* TODO: only if FLUSH and FUA? */
1096 uio_event_notify(&udev->uio_info);
1097
1098 return 0;
1099
1100queue:
1101 if (add_to_qfull_queue(tcmu_cmd)) {
1102 *scsi_err = TCM_OUT_OF_RESOURCES;
1103 return -1;
1104 }
1105
1106 return 1;
1107}
1108
1109static sense_reason_t
1110tcmu_queue_cmd(struct se_cmd *se_cmd)
1111{
1112 struct se_device *se_dev = se_cmd->se_dev;
1113 struct tcmu_dev *udev = TCMU_DEV(se_dev);
1114 struct tcmu_cmd *tcmu_cmd;
1115 sense_reason_t scsi_ret;
1116 int ret;
1117
1118 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
1119 if (!tcmu_cmd)
1120 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1121
1122 mutex_lock(&udev->cmdr_lock);
1123 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1124 mutex_unlock(&udev->cmdr_lock);
1125 if (ret < 0)
1126 tcmu_free_cmd(tcmu_cmd);
1127 return scsi_ret;
1128}
1129
1130static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
1131{
1132 struct se_cmd *se_cmd = cmd->se_cmd;
1133 struct tcmu_dev *udev = cmd->tcmu_dev;
1134 bool read_len_valid = false;
1135 uint32_t read_len;
1136
1137 /*
1138 * cmd has been completed already from timeout, just reclaim
1139 * data area space and free cmd
1140 */
1141 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1142 WARN_ON_ONCE(se_cmd);
1143 goto out;
1144 }
1145
1146 list_del_init(&cmd->queue_entry);
1147
1148 tcmu_cmd_reset_dbi_cur(cmd);
1149
1150 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
1151 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1152 cmd->se_cmd);
1153 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
1154 goto done;
1155 }
1156
1157 read_len = se_cmd->data_length;
1158 if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1159 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1160 read_len_valid = true;
1161 if (entry->rsp.read_len < read_len)
1162 read_len = entry->rsp.read_len;
1163 }
1164
1165 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
1166 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
1167 if (!read_len_valid )
1168 goto done;
1169 else
1170 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
1171 }
1172 if (se_cmd->se_cmd_flags & SCF_BIDI) {
1173 /* Get Data-In buffer before clean up */
1174 gather_data_area(udev, cmd, true, read_len);
1175 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
1176 gather_data_area(udev, cmd, false, read_len);
1177 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
1178 /* TODO: */
1179 } else if (se_cmd->data_direction != DMA_NONE) {
1180 pr_warn("TCMU: data direction was %d!\n",
1181 se_cmd->data_direction);
1182 }
1183
1184done:
1185 if (read_len_valid) {
1186 pr_debug("read_len = %d\n", read_len);
1187 target_complete_cmd_with_length(cmd->se_cmd,
1188 entry->rsp.scsi_status, read_len);
1189 } else
1190 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
1191
1192out:
1193 cmd->se_cmd = NULL;
1194 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1195 tcmu_free_cmd(cmd);
1196}
1197
1198static void tcmu_set_next_deadline(struct list_head *queue,
1199 struct timer_list *timer)
1200{
1201 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1202 unsigned long deadline = 0;
1203
1204 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
1205 if (!time_after(jiffies, tcmu_cmd->deadline)) {
1206 deadline = tcmu_cmd->deadline;
1207 break;
1208 }
1209 }
1210
1211 if (deadline)
1212 mod_timer(timer, deadline);
1213 else
1214 del_timer(timer);
1215}
1216
1217static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1218{
1219 struct tcmu_mailbox *mb;
1220 struct tcmu_cmd *cmd;
1221 int handled = 0;
1222
1223 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1224 pr_err("ring broken, not handling completions\n");
1225 return 0;
1226 }
1227
1228 mb = udev->mb_addr;
1229 tcmu_flush_dcache_range(mb, sizeof(*mb));
1230
1231 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
1232
1233 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
1234
1235 tcmu_flush_dcache_range(entry, sizeof(*entry));
1236
1237 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
1238 UPDATE_HEAD(udev->cmdr_last_cleaned,
1239 tcmu_hdr_get_len(entry->hdr.len_op),
1240 udev->cmdr_size);
1241 continue;
1242 }
1243 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
1244
1245 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
1246 if (!cmd) {
1247 pr_err("cmd_id %u not found, ring is broken\n",
1248 entry->hdr.cmd_id);
1249 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
1250 break;
1251 }
1252
1253 tcmu_handle_completion(cmd, entry);
1254
1255 UPDATE_HEAD(udev->cmdr_last_cleaned,
1256 tcmu_hdr_get_len(entry->hdr.len_op),
1257 udev->cmdr_size);
1258
1259 handled++;
1260 }
1261
1262 if (mb->cmd_tail == mb->cmd_head) {
1263 /* no more pending commands */
1264 del_timer(&udev->cmd_timer);
1265
1266 if (list_empty(&udev->qfull_queue)) {
1267 /*
1268 * no more pending or waiting commands so try to
1269 * reclaim blocks if needed.
1270 */
1271 if (atomic_read(&global_db_count) >
1272 tcmu_global_max_blocks)
1273 schedule_delayed_work(&tcmu_unmap_work, 0);
1274 }
1275 } else if (udev->cmd_time_out) {
1276 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
1277 }
1278
1279 return handled;
1280}
1281
1282static int tcmu_check_expired_cmd(int id, void *p, void *data)
1283{
1284 struct tcmu_cmd *cmd = p;
1285 struct tcmu_dev *udev = cmd->tcmu_dev;
1286 u8 scsi_status;
1287 struct se_cmd *se_cmd;
1288 bool is_running;
1289
1290 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
1291 return 0;
1292
1293 if (!time_after(jiffies, cmd->deadline))
1294 return 0;
1295
1296 is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
1297 se_cmd = cmd->se_cmd;
1298
1299 if (is_running) {
1300 /*
1301 * If cmd_time_out is disabled but qfull is set deadline
1302 * will only reflect the qfull timeout. Ignore it.
1303 */
1304 if (!udev->cmd_time_out)
1305 return 0;
1306
1307 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1308 /*
1309 * target_complete_cmd will translate this to LUN COMM FAILURE
1310 */
1311 scsi_status = SAM_STAT_CHECK_CONDITION;
1312 list_del_init(&cmd->queue_entry);
1313 cmd->se_cmd = NULL;
1314 } else {
1315 list_del_init(&cmd->queue_entry);
1316 idr_remove(&udev->commands, id);
1317 tcmu_free_cmd(cmd);
1318 scsi_status = SAM_STAT_TASK_SET_FULL;
1319 }
1320
1321 pr_debug("Timing out cmd %u on dev %s that is %s.\n",
1322 id, udev->name, is_running ? "inflight" : "queued");
1323
1324 target_complete_cmd(se_cmd, scsi_status);
1325 return 0;
1326}
1327
1328static void tcmu_device_timedout(struct tcmu_dev *udev)
1329{
1330 spin_lock(&timed_out_udevs_lock);
1331 if (list_empty(&udev->timedout_entry))
1332 list_add_tail(&udev->timedout_entry, &timed_out_udevs);
1333 spin_unlock(&timed_out_udevs_lock);
1334
1335 schedule_delayed_work(&tcmu_unmap_work, 0);
1336}
1337
1338static void tcmu_cmd_timedout(struct timer_list *t)
1339{
1340 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
1341
1342 pr_debug("%s cmd timeout has expired\n", udev->name);
1343 tcmu_device_timedout(udev);
1344}
1345
1346static void tcmu_qfull_timedout(struct timer_list *t)
1347{
1348 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
1349
1350 pr_debug("%s qfull timeout has expired\n", udev->name);
1351 tcmu_device_timedout(udev);
1352}
1353
1354static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
1355{
1356 struct tcmu_hba *tcmu_hba;
1357
1358 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
1359 if (!tcmu_hba)
1360 return -ENOMEM;
1361
1362 tcmu_hba->host_id = host_id;
1363 hba->hba_ptr = tcmu_hba;
1364
1365 return 0;
1366}
1367
1368static void tcmu_detach_hba(struct se_hba *hba)
1369{
1370 kfree(hba->hba_ptr);
1371 hba->hba_ptr = NULL;
1372}
1373
1374static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1375{
1376 struct tcmu_dev *udev;
1377
1378 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
1379 if (!udev)
1380 return NULL;
1381 kref_init(&udev->kref);
1382
1383 udev->name = kstrdup(name, GFP_KERNEL);
1384 if (!udev->name) {
1385 kfree(udev);
1386 return NULL;
1387 }
1388
1389 udev->hba = hba;
1390 udev->cmd_time_out = TCMU_TIME_OUT;
1391 udev->qfull_time_out = -1;
1392
1393 udev->max_blocks = DATA_BLOCK_BITS_DEF;
1394 mutex_init(&udev->cmdr_lock);
1395
1396 INIT_LIST_HEAD(&udev->node);
1397 INIT_LIST_HEAD(&udev->timedout_entry);
1398 INIT_LIST_HEAD(&udev->qfull_queue);
1399 INIT_LIST_HEAD(&udev->inflight_queue);
1400 idr_init(&udev->commands);
1401
1402 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
1403 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
1404
1405 INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
1406
1407 return &udev->se_dev;
1408}
1409
1410static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
1411{
1412 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1413 LIST_HEAD(cmds);
1414 bool drained = true;
1415 sense_reason_t scsi_ret;
1416 int ret;
1417
1418 if (list_empty(&udev->qfull_queue))
1419 return true;
1420
1421 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
1422
1423 list_splice_init(&udev->qfull_queue, &cmds);
1424
1425 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
1426 list_del_init(&tcmu_cmd->queue_entry);
1427
1428 pr_debug("removing cmd %u on dev %s from queue\n",
1429 tcmu_cmd->cmd_id, udev->name);
1430
1431 if (fail) {
1432 idr_remove(&udev->commands, tcmu_cmd->cmd_id);
1433 /*
1434 * We were not able to even start the command, so
1435 * fail with busy to allow a retry in case runner
1436 * was only temporarily down. If the device is being
1437 * removed then LIO core will do the right thing and
1438 * fail the retry.
1439 */
1440 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
1441 tcmu_free_cmd(tcmu_cmd);
1442 continue;
1443 }
1444
1445 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1446 if (ret < 0) {
1447 pr_debug("cmd %u on dev %s failed with %u\n",
1448 tcmu_cmd->cmd_id, udev->name, scsi_ret);
1449
1450 idr_remove(&udev->commands, tcmu_cmd->cmd_id);
1451 /*
1452 * Ignore scsi_ret for now. target_complete_cmd
1453 * drops it.
1454 */
1455 target_complete_cmd(tcmu_cmd->se_cmd,
1456 SAM_STAT_CHECK_CONDITION);
1457 tcmu_free_cmd(tcmu_cmd);
1458 } else if (ret > 0) {
1459 pr_debug("ran out of space during cmdr queue run\n");
1460 /*
1461 * cmd was requeued, so just put all cmds back in
1462 * the queue
1463 */
1464 list_splice_tail(&cmds, &udev->qfull_queue);
1465 drained = false;
1466 break;
1467 }
1468 }
1469
1470 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1471 return drained;
1472}
1473
1474static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1475{
1476 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1477
1478 mutex_lock(&udev->cmdr_lock);
1479 tcmu_handle_completions(udev);
1480 run_qfull_queue(udev, false);
1481 mutex_unlock(&udev->cmdr_lock);
1482
1483 return 0;
1484}
1485
1486/*
1487 * mmap code from uio.c. Copied here because we want to hook mmap()
1488 * and this stuff must come along.
1489 */
1490static int tcmu_find_mem_index(struct vm_area_struct *vma)
1491{
1492 struct tcmu_dev *udev = vma->vm_private_data;
1493 struct uio_info *info = &udev->uio_info;
1494
1495 if (vma->vm_pgoff < MAX_UIO_MAPS) {
1496 if (info->mem[vma->vm_pgoff].size == 0)
1497 return -1;
1498 return (int)vma->vm_pgoff;
1499 }
1500 return -1;
1501}
1502
1503static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
1504{
1505 struct page *page;
1506
1507 mutex_lock(&udev->cmdr_lock);
1508 page = tcmu_get_block_page(udev, dbi);
1509 if (likely(page)) {
1510 mutex_unlock(&udev->cmdr_lock);
1511 return page;
1512 }
1513
1514 /*
1515 * Userspace messed up and passed in a address not in the
1516 * data iov passed to it.
1517 */
1518 pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n",
1519 dbi, udev->name);
1520 page = NULL;
1521 mutex_unlock(&udev->cmdr_lock);
1522
1523 return page;
1524}
1525
1526static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
1527{
1528 struct tcmu_dev *udev = vmf->vma->vm_private_data;
1529 struct uio_info *info = &udev->uio_info;
1530 struct page *page;
1531 unsigned long offset;
1532 void *addr;
1533
1534 int mi = tcmu_find_mem_index(vmf->vma);
1535 if (mi < 0)
1536 return VM_FAULT_SIGBUS;
1537
1538 /*
1539 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1540 * to use mem[N].
1541 */
1542 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
1543
1544 if (offset < udev->data_off) {
1545 /* For the vmalloc()ed cmd area pages */
1546 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
1547 page = vmalloc_to_page(addr);
1548 } else {
1549 uint32_t dbi;
1550
1551 /* For the dynamically growing data area pages */
1552 dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
1553 page = tcmu_try_get_block_page(udev, dbi);
1554 if (!page)
1555 return VM_FAULT_SIGBUS;
1556 }
1557
1558 get_page(page);
1559 vmf->page = page;
1560 return 0;
1561}
1562
1563static const struct vm_operations_struct tcmu_vm_ops = {
1564 .fault = tcmu_vma_fault,
1565};
1566
1567static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
1568{
1569 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1570
1571 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1572 vma->vm_ops = &tcmu_vm_ops;
1573
1574 vma->vm_private_data = udev;
1575
1576 /* Ensure the mmap is exactly the right size */
1577 if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT))
1578 return -EINVAL;
1579
1580 return 0;
1581}
1582
1583static int tcmu_open(struct uio_info *info, struct inode *inode)
1584{
1585 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1586
1587 /* O_EXCL not supported for char devs, so fake it? */
1588 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
1589 return -EBUSY;
1590
1591 udev->inode = inode;
1592 kref_get(&udev->kref);
1593
1594 pr_debug("open\n");
1595
1596 return 0;
1597}
1598
1599static void tcmu_dev_call_rcu(struct rcu_head *p)
1600{
1601 struct se_device *dev = container_of(p, struct se_device, rcu_head);
1602 struct tcmu_dev *udev = TCMU_DEV(dev);
1603
1604 kfree(udev->uio_info.name);
1605 kfree(udev->name);
1606 kfree(udev);
1607}
1608
1609static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1610{
1611 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1612 kmem_cache_free(tcmu_cmd_cache, cmd);
1613 return 0;
1614 }
1615 return -EINVAL;
1616}
1617
1618static void tcmu_blocks_release(struct radix_tree_root *blocks,
1619 int start, int end)
1620{
1621 int i;
1622 struct page *page;
1623
1624 for (i = start; i < end; i++) {
1625 page = radix_tree_delete(blocks, i);
1626 if (page) {
1627 __free_page(page);
1628 atomic_dec(&global_db_count);
1629 }
1630 }
1631}
1632
1633static void tcmu_dev_kref_release(struct kref *kref)
1634{
1635 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
1636 struct se_device *dev = &udev->se_dev;
1637 struct tcmu_cmd *cmd;
1638 bool all_expired = true;
1639 int i;
1640
1641 vfree(udev->mb_addr);
1642 udev->mb_addr = NULL;
1643
1644 spin_lock_bh(&timed_out_udevs_lock);
1645 if (!list_empty(&udev->timedout_entry))
1646 list_del(&udev->timedout_entry);
1647 spin_unlock_bh(&timed_out_udevs_lock);
1648
1649 /* Upper layer should drain all requests before calling this */
1650 mutex_lock(&udev->cmdr_lock);
1651 idr_for_each_entry(&udev->commands, cmd, i) {
1652 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1653 all_expired = false;
1654 }
1655 idr_destroy(&udev->commands);
1656 WARN_ON(!all_expired);
1657
1658 tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
1659 bitmap_free(udev->data_bitmap);
1660 mutex_unlock(&udev->cmdr_lock);
1661
1662 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1663}
1664
1665static int tcmu_release(struct uio_info *info, struct inode *inode)
1666{
1667 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1668
1669 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1670
1671 pr_debug("close\n");
1672 /* release ref from open */
1673 kref_put(&udev->kref, tcmu_dev_kref_release);
1674 return 0;
1675}
1676
1677static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
1678{
1679 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1680
1681 if (!tcmu_kern_cmd_reply_supported)
1682 return 0;
1683
1684 if (udev->nl_reply_supported <= 0)
1685 return 0;
1686
1687 mutex_lock(&tcmu_nl_cmd_mutex);
1688
1689 if (tcmu_netlink_blocked) {
1690 mutex_unlock(&tcmu_nl_cmd_mutex);
1691 pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
1692 udev->name);
1693 return -EAGAIN;
1694 }
1695
1696 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
1697 mutex_unlock(&tcmu_nl_cmd_mutex);
1698 pr_warn("netlink cmd %d already executing on %s\n",
1699 nl_cmd->cmd, udev->name);
1700 return -EBUSY;
1701 }
1702
1703 memset(nl_cmd, 0, sizeof(*nl_cmd));
1704 nl_cmd->cmd = cmd;
1705 nl_cmd->udev = udev;
1706 init_completion(&nl_cmd->complete);
1707 INIT_LIST_HEAD(&nl_cmd->nl_list);
1708
1709 list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
1710
1711 mutex_unlock(&tcmu_nl_cmd_mutex);
1712 return 0;
1713}
1714
1715static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
1716{
1717 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1718
1719 if (!tcmu_kern_cmd_reply_supported)
1720 return;
1721
1722 if (udev->nl_reply_supported <= 0)
1723 return;
1724
1725 mutex_lock(&tcmu_nl_cmd_mutex);
1726
1727 list_del(&nl_cmd->nl_list);
1728 memset(nl_cmd, 0, sizeof(*nl_cmd));
1729
1730 mutex_unlock(&tcmu_nl_cmd_mutex);
1731}
1732
1733static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
1734{
1735 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1736 int ret;
1737
1738 if (!tcmu_kern_cmd_reply_supported)
1739 return 0;
1740
1741 if (udev->nl_reply_supported <= 0)
1742 return 0;
1743
1744 pr_debug("sleeping for nl reply\n");
1745 wait_for_completion(&nl_cmd->complete);
1746
1747 mutex_lock(&tcmu_nl_cmd_mutex);
1748 nl_cmd->cmd = TCMU_CMD_UNSPEC;
1749 ret = nl_cmd->status;
1750 mutex_unlock(&tcmu_nl_cmd_mutex);
1751
1752 return ret;
1753}
1754
1755static int tcmu_netlink_event_init(struct tcmu_dev *udev,
1756 enum tcmu_genl_cmd cmd,
1757 struct sk_buff **buf, void **hdr)
1758{
1759 struct sk_buff *skb;
1760 void *msg_header;
1761 int ret = -ENOMEM;
1762
1763 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1764 if (!skb)
1765 return ret;
1766
1767 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
1768 if (!msg_header)
1769 goto free_skb;
1770
1771 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
1772 if (ret < 0)
1773 goto free_skb;
1774
1775 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
1776 if (ret < 0)
1777 goto free_skb;
1778
1779 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
1780 if (ret < 0)
1781 goto free_skb;
1782
1783 *buf = skb;
1784 *hdr = msg_header;
1785 return ret;
1786
1787free_skb:
1788 nlmsg_free(skb);
1789 return ret;
1790}
1791
1792static int tcmu_netlink_event_send(struct tcmu_dev *udev,
1793 enum tcmu_genl_cmd cmd,
1794 struct sk_buff *skb, void *msg_header)
1795{
1796 int ret;
1797
1798 genlmsg_end(skb, msg_header);
1799
1800 ret = tcmu_init_genl_cmd_reply(udev, cmd);
1801 if (ret) {
1802 nlmsg_free(skb);
1803 return ret;
1804 }
1805
1806 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1807 TCMU_MCGRP_CONFIG, GFP_KERNEL);
1808
1809 /* Wait during an add as the listener may not be up yet */
1810 if (ret == 0 ||
1811 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
1812 return tcmu_wait_genl_cmd_reply(udev);
1813 else
1814 tcmu_destroy_genl_cmd_reply(udev);
1815
1816 return ret;
1817}
1818
1819static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
1820{
1821 struct sk_buff *skb = NULL;
1822 void *msg_header = NULL;
1823 int ret = 0;
1824
1825 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
1826 &msg_header);
1827 if (ret < 0)
1828 return ret;
1829 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
1830 msg_header);
1831}
1832
1833static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
1834{
1835 struct sk_buff *skb = NULL;
1836 void *msg_header = NULL;
1837 int ret = 0;
1838
1839 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
1840 &skb, &msg_header);
1841 if (ret < 0)
1842 return ret;
1843 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
1844 skb, msg_header);
1845}
1846
1847static int tcmu_update_uio_info(struct tcmu_dev *udev)
1848{
1849 struct tcmu_hba *hba = udev->hba->hba_ptr;
1850 struct uio_info *info;
1851 char *str;
1852
1853 info = &udev->uio_info;
1854
1855 if (udev->dev_config[0])
1856 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id,
1857 udev->name, udev->dev_config);
1858 else
1859 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id,
1860 udev->name);
1861 if (!str)
1862 return -ENOMEM;
1863
1864 /* If the old string exists, free it */
1865 kfree(info->name);
1866 info->name = str;
1867
1868 return 0;
1869}
1870
1871static int tcmu_configure_device(struct se_device *dev)
1872{
1873 struct tcmu_dev *udev = TCMU_DEV(dev);
1874 struct uio_info *info;
1875 struct tcmu_mailbox *mb;
1876 int ret = 0;
1877
1878 ret = tcmu_update_uio_info(udev);
1879 if (ret)
1880 return ret;
1881
1882 info = &udev->uio_info;
1883
1884 mutex_lock(&udev->cmdr_lock);
1885 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
1886 mutex_unlock(&udev->cmdr_lock);
1887 if (!udev->data_bitmap) {
1888 ret = -ENOMEM;
1889 goto err_bitmap_alloc;
1890 }
1891
1892 udev->mb_addr = vzalloc(CMDR_SIZE);
1893 if (!udev->mb_addr) {
1894 ret = -ENOMEM;
1895 goto err_vzalloc;
1896 }
1897
1898 /* mailbox fits in first part of CMDR space */
1899 udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
1900 udev->data_off = CMDR_SIZE;
1901 udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE;
1902 udev->dbi_thresh = 0; /* Default in Idle state */
1903
1904 /* Initialise the mailbox of the ring buffer */
1905 mb = udev->mb_addr;
1906 mb->version = TCMU_MAILBOX_VERSION;
1907 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
1908 mb->cmdr_off = CMDR_OFF;
1909 mb->cmdr_size = udev->cmdr_size;
1910
1911 WARN_ON(!PAGE_ALIGNED(udev->data_off));
1912 WARN_ON(udev->data_size % PAGE_SIZE);
1913 WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
1914
1915 info->version = __stringify(TCMU_MAILBOX_VERSION);
1916
1917 info->mem[0].name = "tcm-user command & data buffer";
1918 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
1919 info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE;
1920 info->mem[0].memtype = UIO_MEM_NONE;
1921
1922 info->irqcontrol = tcmu_irqcontrol;
1923 info->irq = UIO_IRQ_CUSTOM;
1924
1925 info->mmap = tcmu_mmap;
1926 info->open = tcmu_open;
1927 info->release = tcmu_release;
1928
1929 ret = uio_register_device(tcmu_root_device, info);
1930 if (ret)
1931 goto err_register;
1932
1933 /* User can set hw_block_size before enable the device */
1934 if (dev->dev_attrib.hw_block_size == 0)
1935 dev->dev_attrib.hw_block_size = 512;
1936 /* Other attributes can be configured in userspace */
1937 if (!dev->dev_attrib.hw_max_sectors)
1938 dev->dev_attrib.hw_max_sectors = 128;
1939 if (!dev->dev_attrib.emulate_write_cache)
1940 dev->dev_attrib.emulate_write_cache = 0;
1941 dev->dev_attrib.hw_queue_depth = 128;
1942
1943 /* If user didn't explicitly disable netlink reply support, use
1944 * module scope setting.
1945 */
1946 if (udev->nl_reply_supported >= 0)
1947 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
1948
1949 /*
1950 * Get a ref incase userspace does a close on the uio device before
1951 * LIO has initiated tcmu_free_device.
1952 */
1953 kref_get(&udev->kref);
1954
1955 ret = tcmu_send_dev_add_event(udev);
1956 if (ret)
1957 goto err_netlink;
1958
1959 mutex_lock(&root_udev_mutex);
1960 list_add(&udev->node, &root_udev);
1961 mutex_unlock(&root_udev_mutex);
1962
1963 return 0;
1964
1965err_netlink:
1966 kref_put(&udev->kref, tcmu_dev_kref_release);
1967 uio_unregister_device(&udev->uio_info);
1968err_register:
1969 vfree(udev->mb_addr);
1970 udev->mb_addr = NULL;
1971err_vzalloc:
1972 bitmap_free(udev->data_bitmap);
1973 udev->data_bitmap = NULL;
1974err_bitmap_alloc:
1975 kfree(info->name);
1976 info->name = NULL;
1977
1978 return ret;
1979}
1980
1981static void tcmu_free_device(struct se_device *dev)
1982{
1983 struct tcmu_dev *udev = TCMU_DEV(dev);
1984
1985 /* release ref from init */
1986 kref_put(&udev->kref, tcmu_dev_kref_release);
1987}
1988
1989static void tcmu_destroy_device(struct se_device *dev)
1990{
1991 struct tcmu_dev *udev = TCMU_DEV(dev);
1992
1993 del_timer_sync(&udev->cmd_timer);
1994 del_timer_sync(&udev->qfull_timer);
1995
1996 mutex_lock(&root_udev_mutex);
1997 list_del(&udev->node);
1998 mutex_unlock(&root_udev_mutex);
1999
2000 tcmu_send_dev_remove_event(udev);
2001
2002 uio_unregister_device(&udev->uio_info);
2003
2004 /* release ref from configure */
2005 kref_put(&udev->kref, tcmu_dev_kref_release);
2006}
2007
2008static void tcmu_unblock_dev(struct tcmu_dev *udev)
2009{
2010 mutex_lock(&udev->cmdr_lock);
2011 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags);
2012 mutex_unlock(&udev->cmdr_lock);
2013}
2014
2015static void tcmu_block_dev(struct tcmu_dev *udev)
2016{
2017 mutex_lock(&udev->cmdr_lock);
2018
2019 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2020 goto unlock;
2021
2022 /* complete IO that has executed successfully */
2023 tcmu_handle_completions(udev);
2024 /* fail IO waiting to be queued */
2025 run_qfull_queue(udev, true);
2026
2027unlock:
2028 mutex_unlock(&udev->cmdr_lock);
2029}
2030
2031static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2032{
2033 struct tcmu_mailbox *mb;
2034 struct tcmu_cmd *cmd;
2035 int i;
2036
2037 mutex_lock(&udev->cmdr_lock);
2038
2039 idr_for_each_entry(&udev->commands, cmd, i) {
2040 if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
2041 continue;
2042
2043 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
2044 cmd->cmd_id, udev->name,
2045 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
2046
2047 idr_remove(&udev->commands, i);
2048 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2049 WARN_ON(!cmd->se_cmd);
2050 list_del_init(&cmd->queue_entry);
2051 if (err_level == 1) {
2052 /*
2053 * Userspace was not able to start the
2054 * command or it is retryable.
2055 */
2056 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY);
2057 } else {
2058 /* hard failure */
2059 target_complete_cmd(cmd->se_cmd,
2060 SAM_STAT_CHECK_CONDITION);
2061 }
2062 }
2063 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
2064 tcmu_free_cmd(cmd);
2065 }
2066
2067 mb = udev->mb_addr;
2068 tcmu_flush_dcache_range(mb, sizeof(*mb));
2069 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned,
2070 mb->cmd_tail, mb->cmd_head);
2071
2072 udev->cmdr_last_cleaned = 0;
2073 mb->cmd_tail = 0;
2074 mb->cmd_head = 0;
2075 tcmu_flush_dcache_range(mb, sizeof(*mb));
2076
2077 del_timer(&udev->cmd_timer);
2078
2079 mutex_unlock(&udev->cmdr_lock);
2080}
2081
2082enum {
2083 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
2084 Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err,
2085};
2086
2087static match_table_t tokens = {
2088 {Opt_dev_config, "dev_config=%s"},
2089 {Opt_dev_size, "dev_size=%s"},
2090 {Opt_hw_block_size, "hw_block_size=%d"},
2091 {Opt_hw_max_sectors, "hw_max_sectors=%d"},
2092 {Opt_nl_reply_supported, "nl_reply_supported=%d"},
2093 {Opt_max_data_area_mb, "max_data_area_mb=%d"},
2094 {Opt_err, NULL}
2095};
2096
2097static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
2098{
2099 int val, ret;
2100
2101 ret = match_int(arg, &val);
2102 if (ret < 0) {
2103 pr_err("match_int() failed for dev attrib. Error %d.\n",
2104 ret);
2105 return ret;
2106 }
2107
2108 if (val <= 0) {
2109 pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
2110 val);
2111 return -EINVAL;
2112 }
2113 *dev_attrib = val;
2114 return 0;
2115}
2116
2117static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
2118{
2119 int val, ret;
2120
2121 ret = match_int(arg, &val);
2122 if (ret < 0) {
2123 pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
2124 ret);
2125 return ret;
2126 }
2127
2128 if (val <= 0) {
2129 pr_err("Invalid max_data_area %d.\n", val);
2130 return -EINVAL;
2131 }
2132
2133 mutex_lock(&udev->cmdr_lock);
2134 if (udev->data_bitmap) {
2135 pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
2136 ret = -EINVAL;
2137 goto unlock;
2138 }
2139
2140 udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
2141 if (udev->max_blocks > tcmu_global_max_blocks) {
2142 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
2143 val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
2144 udev->max_blocks = tcmu_global_max_blocks;
2145 }
2146
2147unlock:
2148 mutex_unlock(&udev->cmdr_lock);
2149 return ret;
2150}
2151
2152static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
2153 const char *page, ssize_t count)
2154{
2155 struct tcmu_dev *udev = TCMU_DEV(dev);
2156 char *orig, *ptr, *opts;
2157 substring_t args[MAX_OPT_ARGS];
2158 int ret = 0, token;
2159
2160 opts = kstrdup(page, GFP_KERNEL);
2161 if (!opts)
2162 return -ENOMEM;
2163
2164 orig = opts;
2165
2166 while ((ptr = strsep(&opts, ",\n")) != NULL) {
2167 if (!*ptr)
2168 continue;
2169
2170 token = match_token(ptr, tokens, args);
2171 switch (token) {
2172 case Opt_dev_config:
2173 if (match_strlcpy(udev->dev_config, &args[0],
2174 TCMU_CONFIG_LEN) == 0) {
2175 ret = -EINVAL;
2176 break;
2177 }
2178 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
2179 break;
2180 case Opt_dev_size:
2181 ret = match_u64(&args[0], &udev->dev_size);
2182 if (ret < 0)
2183 pr_err("match_u64() failed for dev_size=. Error %d.\n",
2184 ret);
2185 break;
2186 case Opt_hw_block_size:
2187 ret = tcmu_set_dev_attrib(&args[0],
2188 &(dev->dev_attrib.hw_block_size));
2189 break;
2190 case Opt_hw_max_sectors:
2191 ret = tcmu_set_dev_attrib(&args[0],
2192 &(dev->dev_attrib.hw_max_sectors));
2193 break;
2194 case Opt_nl_reply_supported:
2195 ret = match_int(&args[0], &udev->nl_reply_supported);
2196 if (ret < 0)
2197 pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
2198 ret);
2199 break;
2200 case Opt_max_data_area_mb:
2201 ret = tcmu_set_max_blocks_param(udev, &args[0]);
2202 break;
2203 default:
2204 break;
2205 }
2206
2207 if (ret)
2208 break;
2209 }
2210
2211 kfree(orig);
2212 return (!ret) ? count : ret;
2213}
2214
2215static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
2216{
2217 struct tcmu_dev *udev = TCMU_DEV(dev);
2218 ssize_t bl = 0;
2219
2220 bl = sprintf(b + bl, "Config: %s ",
2221 udev->dev_config[0] ? udev->dev_config : "NULL");
2222 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
2223 bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
2224 TCMU_BLOCKS_TO_MBS(udev->max_blocks));
2225
2226 return bl;
2227}
2228
2229static sector_t tcmu_get_blocks(struct se_device *dev)
2230{
2231 struct tcmu_dev *udev = TCMU_DEV(dev);
2232
2233 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
2234 dev->dev_attrib.block_size);
2235}
2236
2237static sense_reason_t
2238tcmu_parse_cdb(struct se_cmd *cmd)
2239{
2240 return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
2241}
2242
2243static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
2244{
2245 struct se_dev_attrib *da = container_of(to_config_group(item),
2246 struct se_dev_attrib, da_group);
2247 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2248
2249 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
2250}
2251
2252static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
2253 size_t count)
2254{
2255 struct se_dev_attrib *da = container_of(to_config_group(item),
2256 struct se_dev_attrib, da_group);
2257 struct tcmu_dev *udev = container_of(da->da_dev,
2258 struct tcmu_dev, se_dev);
2259 u32 val;
2260 int ret;
2261
2262 if (da->da_dev->export_count) {
2263 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
2264 return -EINVAL;
2265 }
2266
2267 ret = kstrtou32(page, 0, &val);
2268 if (ret < 0)
2269 return ret;
2270
2271 udev->cmd_time_out = val * MSEC_PER_SEC;
2272 return count;
2273}
2274CONFIGFS_ATTR(tcmu_, cmd_time_out);
2275
2276static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
2277{
2278 struct se_dev_attrib *da = container_of(to_config_group(item),
2279 struct se_dev_attrib, da_group);
2280 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2281
2282 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
2283 udev->qfull_time_out :
2284 udev->qfull_time_out / MSEC_PER_SEC);
2285}
2286
2287static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
2288 const char *page, size_t count)
2289{
2290 struct se_dev_attrib *da = container_of(to_config_group(item),
2291 struct se_dev_attrib, da_group);
2292 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2293 s32 val;
2294 int ret;
2295
2296 ret = kstrtos32(page, 0, &val);
2297 if (ret < 0)
2298 return ret;
2299
2300 if (val >= 0) {
2301 udev->qfull_time_out = val * MSEC_PER_SEC;
2302 } else if (val == -1) {
2303 udev->qfull_time_out = val;
2304 } else {
2305 printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
2306 return -EINVAL;
2307 }
2308 return count;
2309}
2310CONFIGFS_ATTR(tcmu_, qfull_time_out);
2311
2312static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
2313{
2314 struct se_dev_attrib *da = container_of(to_config_group(item),
2315 struct se_dev_attrib, da_group);
2316 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2317
2318 return snprintf(page, PAGE_SIZE, "%u\n",
2319 TCMU_BLOCKS_TO_MBS(udev->max_blocks));
2320}
2321CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
2322
2323static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
2324{
2325 struct se_dev_attrib *da = container_of(to_config_group(item),
2326 struct se_dev_attrib, da_group);
2327 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2328
2329 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
2330}
2331
2332static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
2333 const char *reconfig_data)
2334{
2335 struct sk_buff *skb = NULL;
2336 void *msg_header = NULL;
2337 int ret = 0;
2338
2339 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2340 &skb, &msg_header);
2341 if (ret < 0)
2342 return ret;
2343 ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
2344 if (ret < 0) {
2345 nlmsg_free(skb);
2346 return ret;
2347 }
2348 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2349 skb, msg_header);
2350}
2351
2352
2353static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
2354 size_t count)
2355{
2356 struct se_dev_attrib *da = container_of(to_config_group(item),
2357 struct se_dev_attrib, da_group);
2358 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2359 int ret, len;
2360
2361 len = strlen(page);
2362 if (!len || len > TCMU_CONFIG_LEN - 1)
2363 return -EINVAL;
2364
2365 /* Check if device has been configured before */
2366 if (target_dev_configured(&udev->se_dev)) {
2367 ret = tcmu_send_dev_config_event(udev, page);
2368 if (ret) {
2369 pr_err("Unable to reconfigure device\n");
2370 return ret;
2371 }
2372 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2373
2374 ret = tcmu_update_uio_info(udev);
2375 if (ret)
2376 return ret;
2377 return count;
2378 }
2379 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2380
2381 return count;
2382}
2383CONFIGFS_ATTR(tcmu_, dev_config);
2384
2385static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
2386{
2387 struct se_dev_attrib *da = container_of(to_config_group(item),
2388 struct se_dev_attrib, da_group);
2389 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2390
2391 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
2392}
2393
2394static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
2395{
2396 struct sk_buff *skb = NULL;
2397 void *msg_header = NULL;
2398 int ret = 0;
2399
2400 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2401 &skb, &msg_header);
2402 if (ret < 0)
2403 return ret;
2404 ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
2405 size, TCMU_ATTR_PAD);
2406 if (ret < 0) {
2407 nlmsg_free(skb);
2408 return ret;
2409 }
2410 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2411 skb, msg_header);
2412}
2413
2414static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
2415 size_t count)
2416{
2417 struct se_dev_attrib *da = container_of(to_config_group(item),
2418 struct se_dev_attrib, da_group);
2419 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2420 u64 val;
2421 int ret;
2422
2423 ret = kstrtou64(page, 0, &val);
2424 if (ret < 0)
2425 return ret;
2426
2427 /* Check if device has been configured before */
2428 if (target_dev_configured(&udev->se_dev)) {
2429 ret = tcmu_send_dev_size_event(udev, val);
2430 if (ret) {
2431 pr_err("Unable to reconfigure device\n");
2432 return ret;
2433 }
2434 }
2435 udev->dev_size = val;
2436 return count;
2437}
2438CONFIGFS_ATTR(tcmu_, dev_size);
2439
2440static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
2441 char *page)
2442{
2443 struct se_dev_attrib *da = container_of(to_config_group(item),
2444 struct se_dev_attrib, da_group);
2445 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2446
2447 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
2448}
2449
2450static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
2451 const char *page, size_t count)
2452{
2453 struct se_dev_attrib *da = container_of(to_config_group(item),
2454 struct se_dev_attrib, da_group);
2455 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2456 s8 val;
2457 int ret;
2458
2459 ret = kstrtos8(page, 0, &val);
2460 if (ret < 0)
2461 return ret;
2462
2463 udev->nl_reply_supported = val;
2464 return count;
2465}
2466CONFIGFS_ATTR(tcmu_, nl_reply_supported);
2467
2468static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
2469 char *page)
2470{
2471 struct se_dev_attrib *da = container_of(to_config_group(item),
2472 struct se_dev_attrib, da_group);
2473
2474 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
2475}
2476
2477static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
2478{
2479 struct sk_buff *skb = NULL;
2480 void *msg_header = NULL;
2481 int ret = 0;
2482
2483 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2484 &skb, &msg_header);
2485 if (ret < 0)
2486 return ret;
2487 ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
2488 if (ret < 0) {
2489 nlmsg_free(skb);
2490 return ret;
2491 }
2492 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2493 skb, msg_header);
2494}
2495
2496static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
2497 const char *page, size_t count)
2498{
2499 struct se_dev_attrib *da = container_of(to_config_group(item),
2500 struct se_dev_attrib, da_group);
2501 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2502 u8 val;
2503 int ret;
2504
2505 ret = kstrtou8(page, 0, &val);
2506 if (ret < 0)
2507 return ret;
2508
2509 /* Check if device has been configured before */
2510 if (target_dev_configured(&udev->se_dev)) {
2511 ret = tcmu_send_emulate_write_cache(udev, val);
2512 if (ret) {
2513 pr_err("Unable to reconfigure device\n");
2514 return ret;
2515 }
2516 }
2517
2518 da->emulate_write_cache = val;
2519 return count;
2520}
2521CONFIGFS_ATTR(tcmu_, emulate_write_cache);
2522
2523static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
2524{
2525 struct se_device *se_dev = container_of(to_config_group(item),
2526 struct se_device,
2527 dev_action_group);
2528 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2529
2530 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2531 return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
2532 else
2533 return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
2534}
2535
2536static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
2537 size_t count)
2538{
2539 struct se_device *se_dev = container_of(to_config_group(item),
2540 struct se_device,
2541 dev_action_group);
2542 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2543 u8 val;
2544 int ret;
2545
2546 if (!target_dev_configured(&udev->se_dev)) {
2547 pr_err("Device is not configured.\n");
2548 return -EINVAL;
2549 }
2550
2551 ret = kstrtou8(page, 0, &val);
2552 if (ret < 0)
2553 return ret;
2554
2555 if (val > 1) {
2556 pr_err("Invalid block value %d\n", val);
2557 return -EINVAL;
2558 }
2559
2560 if (!val)
2561 tcmu_unblock_dev(udev);
2562 else
2563 tcmu_block_dev(udev);
2564 return count;
2565}
2566CONFIGFS_ATTR(tcmu_, block_dev);
2567
2568static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
2569 size_t count)
2570{
2571 struct se_device *se_dev = container_of(to_config_group(item),
2572 struct se_device,
2573 dev_action_group);
2574 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2575 u8 val;
2576 int ret;
2577
2578 if (!target_dev_configured(&udev->se_dev)) {
2579 pr_err("Device is not configured.\n");
2580 return -EINVAL;
2581 }
2582
2583 ret = kstrtou8(page, 0, &val);
2584 if (ret < 0)
2585 return ret;
2586
2587 if (val != 1 && val != 2) {
2588 pr_err("Invalid reset ring value %d\n", val);
2589 return -EINVAL;
2590 }
2591
2592 tcmu_reset_ring(udev, val);
2593 return count;
2594}
2595CONFIGFS_ATTR_WO(tcmu_, reset_ring);
2596
2597static struct configfs_attribute *tcmu_attrib_attrs[] = {
2598 &tcmu_attr_cmd_time_out,
2599 &tcmu_attr_qfull_time_out,
2600 &tcmu_attr_max_data_area_mb,
2601 &tcmu_attr_dev_config,
2602 &tcmu_attr_dev_size,
2603 &tcmu_attr_emulate_write_cache,
2604 &tcmu_attr_nl_reply_supported,
2605 NULL,
2606};
2607
2608static struct configfs_attribute **tcmu_attrs;
2609
2610static struct configfs_attribute *tcmu_action_attrs[] = {
2611 &tcmu_attr_block_dev,
2612 &tcmu_attr_reset_ring,
2613 NULL,
2614};
2615
2616static struct target_backend_ops tcmu_ops = {
2617 .name = "user",
2618 .owner = THIS_MODULE,
2619 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
2620 .attach_hba = tcmu_attach_hba,
2621 .detach_hba = tcmu_detach_hba,
2622 .alloc_device = tcmu_alloc_device,
2623 .configure_device = tcmu_configure_device,
2624 .destroy_device = tcmu_destroy_device,
2625 .free_device = tcmu_free_device,
2626 .parse_cdb = tcmu_parse_cdb,
2627 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
2628 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
2629 .get_device_type = sbc_get_device_type,
2630 .get_blocks = tcmu_get_blocks,
2631 .tb_dev_action_attrs = tcmu_action_attrs,
2632};
2633
2634static void find_free_blocks(void)
2635{
2636 struct tcmu_dev *udev;
2637 loff_t off;
2638 u32 start, end, block, total_freed = 0;
2639
2640 if (atomic_read(&global_db_count) <= tcmu_global_max_blocks)
2641 return;
2642
2643 mutex_lock(&root_udev_mutex);
2644 list_for_each_entry(udev, &root_udev, node) {
2645 mutex_lock(&udev->cmdr_lock);
2646
2647 if (!target_dev_configured(&udev->se_dev)) {
2648 mutex_unlock(&udev->cmdr_lock);
2649 continue;
2650 }
2651
2652 /* Try to complete the finished commands first */
2653 tcmu_handle_completions(udev);
2654
2655 /* Skip the udevs in idle */
2656 if (!udev->dbi_thresh) {
2657 mutex_unlock(&udev->cmdr_lock);
2658 continue;
2659 }
2660
2661 end = udev->dbi_max + 1;
2662 block = find_last_bit(udev->data_bitmap, end);
2663 if (block == udev->dbi_max) {
2664 /*
2665 * The last bit is dbi_max, so it is not possible
2666 * reclaim any blocks.
2667 */
2668 mutex_unlock(&udev->cmdr_lock);
2669 continue;
2670 } else if (block == end) {
2671 /* The current udev will goto idle state */
2672 udev->dbi_thresh = start = 0;
2673 udev->dbi_max = 0;
2674 } else {
2675 udev->dbi_thresh = start = block + 1;
2676 udev->dbi_max = block;
2677 }
2678
2679 /* Here will truncate the data area from off */
2680 off = udev->data_off + start * DATA_BLOCK_SIZE;
2681 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
2682
2683 /* Release the block pages */
2684 tcmu_blocks_release(&udev->data_blocks, start, end);
2685 mutex_unlock(&udev->cmdr_lock);
2686
2687 total_freed += end - start;
2688 pr_debug("Freed %u blocks (total %u) from %s.\n", end - start,
2689 total_freed, udev->name);
2690 }
2691 mutex_unlock(&root_udev_mutex);
2692
2693 if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
2694 schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
2695}
2696
2697static void check_timedout_devices(void)
2698{
2699 struct tcmu_dev *udev, *tmp_dev;
2700 LIST_HEAD(devs);
2701
2702 spin_lock_bh(&timed_out_udevs_lock);
2703 list_splice_init(&timed_out_udevs, &devs);
2704
2705 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
2706 list_del_init(&udev->timedout_entry);
2707 spin_unlock_bh(&timed_out_udevs_lock);
2708
2709 mutex_lock(&udev->cmdr_lock);
2710 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
2711
2712 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
2713 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
2714
2715 mutex_unlock(&udev->cmdr_lock);
2716
2717 spin_lock_bh(&timed_out_udevs_lock);
2718 }
2719
2720 spin_unlock_bh(&timed_out_udevs_lock);
2721}
2722
2723static void tcmu_unmap_work_fn(struct work_struct *work)
2724{
2725 check_timedout_devices();
2726 find_free_blocks();
2727}
2728
2729static int __init tcmu_module_init(void)
2730{
2731 int ret, i, k, len = 0;
2732
2733 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
2734
2735 INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
2736
2737 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
2738 sizeof(struct tcmu_cmd),
2739 __alignof__(struct tcmu_cmd),
2740 0, NULL);
2741 if (!tcmu_cmd_cache)
2742 return -ENOMEM;
2743
2744 tcmu_root_device = root_device_register("tcm_user");
2745 if (IS_ERR(tcmu_root_device)) {
2746 ret = PTR_ERR(tcmu_root_device);
2747 goto out_free_cache;
2748 }
2749
2750 ret = genl_register_family(&tcmu_genl_family);
2751 if (ret < 0) {
2752 goto out_unreg_device;
2753 }
2754
2755 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
2756 len += sizeof(struct configfs_attribute *);
2757 }
2758 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
2759 len += sizeof(struct configfs_attribute *);
2760 }
2761 len += sizeof(struct configfs_attribute *);
2762
2763 tcmu_attrs = kzalloc(len, GFP_KERNEL);
2764 if (!tcmu_attrs) {
2765 ret = -ENOMEM;
2766 goto out_unreg_genl;
2767 }
2768
2769 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
2770 tcmu_attrs[i] = passthrough_attrib_attrs[i];
2771 }
2772 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
2773 tcmu_attrs[i] = tcmu_attrib_attrs[k];
2774 i++;
2775 }
2776 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
2777
2778 ret = transport_backend_register(&tcmu_ops);
2779 if (ret)
2780 goto out_attrs;
2781
2782 return 0;
2783
2784out_attrs:
2785 kfree(tcmu_attrs);
2786out_unreg_genl:
2787 genl_unregister_family(&tcmu_genl_family);
2788out_unreg_device:
2789 root_device_unregister(tcmu_root_device);
2790out_free_cache:
2791 kmem_cache_destroy(tcmu_cmd_cache);
2792
2793 return ret;
2794}
2795
2796static void __exit tcmu_module_exit(void)
2797{
2798 cancel_delayed_work_sync(&tcmu_unmap_work);
2799 target_backend_unregister(&tcmu_ops);
2800 kfree(tcmu_attrs);
2801 genl_unregister_family(&tcmu_genl_family);
2802 root_device_unregister(tcmu_root_device);
2803 kmem_cache_destroy(tcmu_cmd_cache);
2804}
2805
2806MODULE_DESCRIPTION("TCM USER subsystem plugin");
2807MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
2808MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
2809MODULE_LICENSE("GPL");
2810
2811module_init(tcmu_module_init);
2812module_exit(tcmu_module_exit);