Loading...
1/*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 *
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 *
12 * Nicholas A. Bellinger <nab@kernel.org>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 *
28 ******************************************************************************/
29
30#include <linux/net.h>
31#include <linux/string.h>
32#include <linux/delay.h>
33#include <linux/timer.h>
34#include <linux/slab.h>
35#include <linux/spinlock.h>
36#include <linux/kthread.h>
37#include <linux/in.h>
38#include <net/sock.h>
39#include <net/tcp.h>
40#include <scsi/scsi.h>
41#include <scsi/scsi_device.h>
42
43#include <target/target_core_base.h>
44#include <target/target_core_device.h>
45#include <target/target_core_tpg.h>
46#include <target/target_core_transport.h>
47#include <target/target_core_fabric_ops.h>
48
49#include "target_core_alua.h"
50#include "target_core_hba.h"
51#include "target_core_pr.h"
52#include "target_core_ua.h"
53
54static void se_dev_start(struct se_device *dev);
55static void se_dev_stop(struct se_device *dev);
56
57static struct se_hba *lun0_hba;
58static struct se_subsystem_dev *lun0_su_dev;
59/* not static, needed by tpg.c */
60struct se_device *g_lun0_dev;
61
62int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
63{
64 struct se_lun *se_lun = NULL;
65 struct se_session *se_sess = se_cmd->se_sess;
66 struct se_device *dev;
67 unsigned long flags;
68
69 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
70 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
71 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
72 return -ENODEV;
73 }
74
75 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
76 se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
77 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
78 struct se_dev_entry *deve = se_cmd->se_deve;
79
80 deve->total_cmds++;
81 deve->total_bytes += se_cmd->data_length;
82
83 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
84 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
85 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
86 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
87 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
88 " Access for 0x%08x\n",
89 se_cmd->se_tfo->get_fabric_name(),
90 unpacked_lun);
91 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
92 return -EACCES;
93 }
94
95 if (se_cmd->data_direction == DMA_TO_DEVICE)
96 deve->write_bytes += se_cmd->data_length;
97 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
98 deve->read_bytes += se_cmd->data_length;
99
100 deve->deve_cmds++;
101
102 se_lun = deve->se_lun;
103 se_cmd->se_lun = deve->se_lun;
104 se_cmd->pr_res_key = deve->pr_res_key;
105 se_cmd->orig_fe_lun = unpacked_lun;
106 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
107 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
108 }
109 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
110
111 if (!se_lun) {
112 /*
113 * Use the se_portal_group->tpg_virt_lun0 to allow for
114 * REPORT_LUNS, et al to be returned when no active
115 * MappedLUN=0 exists for this Initiator Port.
116 */
117 if (unpacked_lun != 0) {
118 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
119 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
120 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
121 " Access for 0x%08x\n",
122 se_cmd->se_tfo->get_fabric_name(),
123 unpacked_lun);
124 return -ENODEV;
125 }
126 /*
127 * Force WRITE PROTECT for virtual LUN 0
128 */
129 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
130 (se_cmd->data_direction != DMA_NONE)) {
131 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
132 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
133 return -EACCES;
134 }
135
136 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
137 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
138 se_cmd->orig_fe_lun = 0;
139 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
140 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
141 }
142 /*
143 * Determine if the struct se_lun is online.
144 * FIXME: Check for LUN_RESET + UNIT Attention
145 */
146 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
147 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
148 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
149 return -ENODEV;
150 }
151
152 /* Directly associate cmd with se_dev */
153 se_cmd->se_dev = se_lun->lun_se_dev;
154
155 /* TODO: get rid of this and use atomics for stats */
156 dev = se_lun->lun_se_dev;
157 spin_lock_irqsave(&dev->stats_lock, flags);
158 dev->num_cmds++;
159 if (se_cmd->data_direction == DMA_TO_DEVICE)
160 dev->write_bytes += se_cmd->data_length;
161 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
162 dev->read_bytes += se_cmd->data_length;
163 spin_unlock_irqrestore(&dev->stats_lock, flags);
164
165 /*
166 * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
167 * for tracking state of struct se_cmds during LUN shutdown events.
168 */
169 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
170 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
171 atomic_set(&se_cmd->transport_lun_active, 1);
172 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
173
174 return 0;
175}
176EXPORT_SYMBOL(transport_lookup_cmd_lun);
177
178int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
179{
180 struct se_dev_entry *deve;
181 struct se_lun *se_lun = NULL;
182 struct se_session *se_sess = se_cmd->se_sess;
183 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
184 unsigned long flags;
185
186 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
187 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
188 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
189 return -ENODEV;
190 }
191
192 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
193 se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
194 deve = se_cmd->se_deve;
195
196 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
197 se_tmr->tmr_lun = deve->se_lun;
198 se_cmd->se_lun = deve->se_lun;
199 se_lun = deve->se_lun;
200 se_cmd->pr_res_key = deve->pr_res_key;
201 se_cmd->orig_fe_lun = unpacked_lun;
202 se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
203 }
204 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
205
206 if (!se_lun) {
207 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
208 " Access for 0x%08x\n",
209 se_cmd->se_tfo->get_fabric_name(),
210 unpacked_lun);
211 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
212 return -ENODEV;
213 }
214 /*
215 * Determine if the struct se_lun is online.
216 * FIXME: Check for LUN_RESET + UNIT Attention
217 */
218 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
219 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
220 return -ENODEV;
221 }
222
223 /* Directly associate cmd with se_dev */
224 se_cmd->se_dev = se_lun->lun_se_dev;
225 se_tmr->tmr_dev = se_lun->lun_se_dev;
226
227 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
228 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
229 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
230
231 return 0;
232}
233EXPORT_SYMBOL(transport_lookup_tmr_lun);
234
235/*
236 * This function is called from core_scsi3_emulate_pro_register_and_move()
237 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
238 * when a matching rtpi is found.
239 */
240struct se_dev_entry *core_get_se_deve_from_rtpi(
241 struct se_node_acl *nacl,
242 u16 rtpi)
243{
244 struct se_dev_entry *deve;
245 struct se_lun *lun;
246 struct se_port *port;
247 struct se_portal_group *tpg = nacl->se_tpg;
248 u32 i;
249
250 spin_lock_irq(&nacl->device_list_lock);
251 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
252 deve = &nacl->device_list[i];
253
254 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
255 continue;
256
257 lun = deve->se_lun;
258 if (!lun) {
259 pr_err("%s device entries device pointer is"
260 " NULL, but Initiator has access.\n",
261 tpg->se_tpg_tfo->get_fabric_name());
262 continue;
263 }
264 port = lun->lun_sep;
265 if (!port) {
266 pr_err("%s device entries device pointer is"
267 " NULL, but Initiator has access.\n",
268 tpg->se_tpg_tfo->get_fabric_name());
269 continue;
270 }
271 if (port->sep_rtpi != rtpi)
272 continue;
273
274 atomic_inc(&deve->pr_ref_count);
275 smp_mb__after_atomic_inc();
276 spin_unlock_irq(&nacl->device_list_lock);
277
278 return deve;
279 }
280 spin_unlock_irq(&nacl->device_list_lock);
281
282 return NULL;
283}
284
285int core_free_device_list_for_node(
286 struct se_node_acl *nacl,
287 struct se_portal_group *tpg)
288{
289 struct se_dev_entry *deve;
290 struct se_lun *lun;
291 u32 i;
292
293 if (!nacl->device_list)
294 return 0;
295
296 spin_lock_irq(&nacl->device_list_lock);
297 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
298 deve = &nacl->device_list[i];
299
300 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
301 continue;
302
303 if (!deve->se_lun) {
304 pr_err("%s device entries device pointer is"
305 " NULL, but Initiator has access.\n",
306 tpg->se_tpg_tfo->get_fabric_name());
307 continue;
308 }
309 lun = deve->se_lun;
310
311 spin_unlock_irq(&nacl->device_list_lock);
312 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
313 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
314 spin_lock_irq(&nacl->device_list_lock);
315 }
316 spin_unlock_irq(&nacl->device_list_lock);
317
318 kfree(nacl->device_list);
319 nacl->device_list = NULL;
320
321 return 0;
322}
323
324void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
325{
326 struct se_dev_entry *deve;
327
328 spin_lock_irq(&se_nacl->device_list_lock);
329 deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
330 deve->deve_cmds--;
331 spin_unlock_irq(&se_nacl->device_list_lock);
332}
333
334void core_update_device_list_access(
335 u32 mapped_lun,
336 u32 lun_access,
337 struct se_node_acl *nacl)
338{
339 struct se_dev_entry *deve;
340
341 spin_lock_irq(&nacl->device_list_lock);
342 deve = &nacl->device_list[mapped_lun];
343 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
344 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
345 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
346 } else {
347 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
348 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
349 }
350 spin_unlock_irq(&nacl->device_list_lock);
351}
352
353/* core_update_device_list_for_node():
354 *
355 *
356 */
357int core_update_device_list_for_node(
358 struct se_lun *lun,
359 struct se_lun_acl *lun_acl,
360 u32 mapped_lun,
361 u32 lun_access,
362 struct se_node_acl *nacl,
363 struct se_portal_group *tpg,
364 int enable)
365{
366 struct se_port *port = lun->lun_sep;
367 struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
368 int trans = 0;
369 /*
370 * If the MappedLUN entry is being disabled, the entry in
371 * port->sep_alua_list must be removed now before clearing the
372 * struct se_dev_entry pointers below as logic in
373 * core_alua_do_transition_tg_pt() depends on these being present.
374 */
375 if (!enable) {
376 /*
377 * deve->se_lun_acl will be NULL for demo-mode created LUNs
378 * that have not been explicitly concerted to MappedLUNs ->
379 * struct se_lun_acl, but we remove deve->alua_port_list from
380 * port->sep_alua_list. This also means that active UAs and
381 * NodeACL context specific PR metadata for demo-mode
382 * MappedLUN *deve will be released below..
383 */
384 spin_lock_bh(&port->sep_alua_lock);
385 list_del(&deve->alua_port_list);
386 spin_unlock_bh(&port->sep_alua_lock);
387 }
388
389 spin_lock_irq(&nacl->device_list_lock);
390 if (enable) {
391 /*
392 * Check if the call is handling demo mode -> explict LUN ACL
393 * transition. This transition must be for the same struct se_lun
394 * + mapped_lun that was setup in demo mode..
395 */
396 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
397 if (deve->se_lun_acl != NULL) {
398 pr_err("struct se_dev_entry->se_lun_acl"
399 " already set for demo mode -> explict"
400 " LUN ACL transition\n");
401 spin_unlock_irq(&nacl->device_list_lock);
402 return -EINVAL;
403 }
404 if (deve->se_lun != lun) {
405 pr_err("struct se_dev_entry->se_lun does"
406 " match passed struct se_lun for demo mode"
407 " -> explict LUN ACL transition\n");
408 spin_unlock_irq(&nacl->device_list_lock);
409 return -EINVAL;
410 }
411 deve->se_lun_acl = lun_acl;
412 trans = 1;
413 } else {
414 deve->se_lun = lun;
415 deve->se_lun_acl = lun_acl;
416 deve->mapped_lun = mapped_lun;
417 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
418 }
419
420 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
421 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
422 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
423 } else {
424 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
425 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
426 }
427
428 if (trans) {
429 spin_unlock_irq(&nacl->device_list_lock);
430 return 0;
431 }
432 deve->creation_time = get_jiffies_64();
433 deve->attach_count++;
434 spin_unlock_irq(&nacl->device_list_lock);
435
436 spin_lock_bh(&port->sep_alua_lock);
437 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
438 spin_unlock_bh(&port->sep_alua_lock);
439
440 return 0;
441 }
442 /*
443 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
444 * PR operation to complete.
445 */
446 spin_unlock_irq(&nacl->device_list_lock);
447 while (atomic_read(&deve->pr_ref_count) != 0)
448 cpu_relax();
449 spin_lock_irq(&nacl->device_list_lock);
450 /*
451 * Disable struct se_dev_entry LUN ACL mapping
452 */
453 core_scsi3_ua_release_all(deve);
454 deve->se_lun = NULL;
455 deve->se_lun_acl = NULL;
456 deve->lun_flags = 0;
457 deve->creation_time = 0;
458 deve->attach_count--;
459 spin_unlock_irq(&nacl->device_list_lock);
460
461 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
462 return 0;
463}
464
465/* core_clear_lun_from_tpg():
466 *
467 *
468 */
469void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
470{
471 struct se_node_acl *nacl;
472 struct se_dev_entry *deve;
473 u32 i;
474
475 spin_lock_irq(&tpg->acl_node_lock);
476 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
477 spin_unlock_irq(&tpg->acl_node_lock);
478
479 spin_lock_irq(&nacl->device_list_lock);
480 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
481 deve = &nacl->device_list[i];
482 if (lun != deve->se_lun)
483 continue;
484 spin_unlock_irq(&nacl->device_list_lock);
485
486 core_update_device_list_for_node(lun, NULL,
487 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
488 nacl, tpg, 0);
489
490 spin_lock_irq(&nacl->device_list_lock);
491 }
492 spin_unlock_irq(&nacl->device_list_lock);
493
494 spin_lock_irq(&tpg->acl_node_lock);
495 }
496 spin_unlock_irq(&tpg->acl_node_lock);
497}
498
499static struct se_port *core_alloc_port(struct se_device *dev)
500{
501 struct se_port *port, *port_tmp;
502
503 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
504 if (!port) {
505 pr_err("Unable to allocate struct se_port\n");
506 return ERR_PTR(-ENOMEM);
507 }
508 INIT_LIST_HEAD(&port->sep_alua_list);
509 INIT_LIST_HEAD(&port->sep_list);
510 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
511 spin_lock_init(&port->sep_alua_lock);
512 mutex_init(&port->sep_tg_pt_md_mutex);
513
514 spin_lock(&dev->se_port_lock);
515 if (dev->dev_port_count == 0x0000ffff) {
516 pr_warn("Reached dev->dev_port_count =="
517 " 0x0000ffff\n");
518 spin_unlock(&dev->se_port_lock);
519 return ERR_PTR(-ENOSPC);
520 }
521again:
522 /*
523 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
524 * Here is the table from spc4r17 section 7.7.3.8.
525 *
526 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
527 *
528 * Code Description
529 * 0h Reserved
530 * 1h Relative port 1, historically known as port A
531 * 2h Relative port 2, historically known as port B
532 * 3h to FFFFh Relative port 3 through 65 535
533 */
534 port->sep_rtpi = dev->dev_rpti_counter++;
535 if (!port->sep_rtpi)
536 goto again;
537
538 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
539 /*
540 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
541 * for 16-bit wrap..
542 */
543 if (port->sep_rtpi == port_tmp->sep_rtpi)
544 goto again;
545 }
546 spin_unlock(&dev->se_port_lock);
547
548 return port;
549}
550
551static void core_export_port(
552 struct se_device *dev,
553 struct se_portal_group *tpg,
554 struct se_port *port,
555 struct se_lun *lun)
556{
557 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
558 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
559
560 spin_lock(&dev->se_port_lock);
561 spin_lock(&lun->lun_sep_lock);
562 port->sep_tpg = tpg;
563 port->sep_lun = lun;
564 lun->lun_sep = port;
565 spin_unlock(&lun->lun_sep_lock);
566
567 list_add_tail(&port->sep_list, &dev->dev_sep_list);
568 spin_unlock(&dev->se_port_lock);
569
570 if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
571 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
572 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
573 pr_err("Unable to allocate t10_alua_tg_pt"
574 "_gp_member_t\n");
575 return;
576 }
577 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
578 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
579 su_dev->t10_alua.default_tg_pt_gp);
580 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
581 pr_debug("%s/%s: Adding to default ALUA Target Port"
582 " Group: alua/default_tg_pt_gp\n",
583 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
584 }
585
586 dev->dev_port_count++;
587 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
588}
589
590/*
591 * Called with struct se_device->se_port_lock spinlock held.
592 */
593static void core_release_port(struct se_device *dev, struct se_port *port)
594 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
595{
596 /*
597 * Wait for any port reference for PR ALL_TG_PT=1 operation
598 * to complete in __core_scsi3_alloc_registration()
599 */
600 spin_unlock(&dev->se_port_lock);
601 if (atomic_read(&port->sep_tg_pt_ref_cnt))
602 cpu_relax();
603 spin_lock(&dev->se_port_lock);
604
605 core_alua_free_tg_pt_gp_mem(port);
606
607 list_del(&port->sep_list);
608 dev->dev_port_count--;
609 kfree(port);
610}
611
612int core_dev_export(
613 struct se_device *dev,
614 struct se_portal_group *tpg,
615 struct se_lun *lun)
616{
617 struct se_port *port;
618
619 port = core_alloc_port(dev);
620 if (IS_ERR(port))
621 return PTR_ERR(port);
622
623 lun->lun_se_dev = dev;
624 se_dev_start(dev);
625
626 atomic_inc(&dev->dev_export_obj.obj_access_count);
627 core_export_port(dev, tpg, port, lun);
628 return 0;
629}
630
631void core_dev_unexport(
632 struct se_device *dev,
633 struct se_portal_group *tpg,
634 struct se_lun *lun)
635{
636 struct se_port *port = lun->lun_sep;
637
638 spin_lock(&lun->lun_sep_lock);
639 if (lun->lun_se_dev == NULL) {
640 spin_unlock(&lun->lun_sep_lock);
641 return;
642 }
643 spin_unlock(&lun->lun_sep_lock);
644
645 spin_lock(&dev->se_port_lock);
646 atomic_dec(&dev->dev_export_obj.obj_access_count);
647 core_release_port(dev, port);
648 spin_unlock(&dev->se_port_lock);
649
650 se_dev_stop(dev);
651 lun->lun_se_dev = NULL;
652}
653
654int transport_core_report_lun_response(struct se_cmd *se_cmd)
655{
656 struct se_dev_entry *deve;
657 struct se_lun *se_lun;
658 struct se_session *se_sess = se_cmd->se_sess;
659 struct se_task *se_task;
660 unsigned char *buf;
661 u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
662
663 list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
664 break;
665
666 if (!se_task) {
667 pr_err("Unable to locate struct se_task for struct se_cmd\n");
668 return PYX_TRANSPORT_LU_COMM_FAILURE;
669 }
670
671 buf = transport_kmap_first_data_page(se_cmd);
672
673 /*
674 * If no struct se_session pointer is present, this struct se_cmd is
675 * coming via a target_core_mod PASSTHROUGH op, and not through
676 * a $FABRIC_MOD. In that case, report LUN=0 only.
677 */
678 if (!se_sess) {
679 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
680 lun_count = 1;
681 goto done;
682 }
683
684 spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
685 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
686 deve = &se_sess->se_node_acl->device_list[i];
687 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
688 continue;
689 se_lun = deve->se_lun;
690 /*
691 * We determine the correct LUN LIST LENGTH even once we
692 * have reached the initial allocation length.
693 * See SPC2-R20 7.19.
694 */
695 lun_count++;
696 if ((cdb_offset + 8) >= se_cmd->data_length)
697 continue;
698
699 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
700 offset += 8;
701 cdb_offset += 8;
702 }
703 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
704
705 /*
706 * See SPC3 r07, page 159.
707 */
708done:
709 transport_kunmap_first_data_page(se_cmd);
710 lun_count *= 8;
711 buf[0] = ((lun_count >> 24) & 0xff);
712 buf[1] = ((lun_count >> 16) & 0xff);
713 buf[2] = ((lun_count >> 8) & 0xff);
714 buf[3] = (lun_count & 0xff);
715
716 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
717}
718
719/* se_release_device_for_hba():
720 *
721 *
722 */
723void se_release_device_for_hba(struct se_device *dev)
724{
725 struct se_hba *hba = dev->se_hba;
726
727 if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
728 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
729 (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
730 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
731 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
732 se_dev_stop(dev);
733
734 if (dev->dev_ptr) {
735 kthread_stop(dev->process_thread);
736 if (dev->transport->free_device)
737 dev->transport->free_device(dev->dev_ptr);
738 }
739
740 spin_lock(&hba->device_lock);
741 list_del(&dev->dev_list);
742 hba->dev_count--;
743 spin_unlock(&hba->device_lock);
744
745 core_scsi3_free_all_registrations(dev);
746 se_release_vpd_for_dev(dev);
747
748 kfree(dev);
749}
750
751void se_release_vpd_for_dev(struct se_device *dev)
752{
753 struct t10_vpd *vpd, *vpd_tmp;
754
755 spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
756 list_for_each_entry_safe(vpd, vpd_tmp,
757 &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
758 list_del(&vpd->vpd_list);
759 kfree(vpd);
760 }
761 spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
762}
763
764/* se_free_virtual_device():
765 *
766 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
767 */
768int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
769{
770 if (!list_empty(&dev->dev_sep_list))
771 dump_stack();
772
773 core_alua_free_lu_gp_mem(dev);
774 se_release_device_for_hba(dev);
775
776 return 0;
777}
778
779static void se_dev_start(struct se_device *dev)
780{
781 struct se_hba *hba = dev->se_hba;
782
783 spin_lock(&hba->device_lock);
784 atomic_inc(&dev->dev_obj.obj_access_count);
785 if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
786 if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
787 dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
788 dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
789 } else if (dev->dev_status &
790 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
791 dev->dev_status &=
792 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
793 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
794 }
795 }
796 spin_unlock(&hba->device_lock);
797}
798
799static void se_dev_stop(struct se_device *dev)
800{
801 struct se_hba *hba = dev->se_hba;
802
803 spin_lock(&hba->device_lock);
804 atomic_dec(&dev->dev_obj.obj_access_count);
805 if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
806 if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
807 dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
808 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
809 } else if (dev->dev_status &
810 TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
811 dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
812 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
813 }
814 }
815 spin_unlock(&hba->device_lock);
816}
817
818int se_dev_check_online(struct se_device *dev)
819{
820 unsigned long flags;
821 int ret;
822
823 spin_lock_irqsave(&dev->dev_status_lock, flags);
824 ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
825 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
826 spin_unlock_irqrestore(&dev->dev_status_lock, flags);
827
828 return ret;
829}
830
831int se_dev_check_shutdown(struct se_device *dev)
832{
833 int ret;
834
835 spin_lock_irq(&dev->dev_status_lock);
836 ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
837 spin_unlock_irq(&dev->dev_status_lock);
838
839 return ret;
840}
841
842u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
843{
844 u32 tmp, aligned_max_sectors;
845 /*
846 * Limit max_sectors to a PAGE_SIZE aligned value for modern
847 * transport_allocate_data_tasks() operation.
848 */
849 tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
850 aligned_max_sectors = (tmp / block_size);
851 if (max_sectors != aligned_max_sectors) {
852 printk(KERN_INFO "Rounding down aligned max_sectors from %u"
853 " to %u\n", max_sectors, aligned_max_sectors);
854 return aligned_max_sectors;
855 }
856
857 return max_sectors;
858}
859
860void se_dev_set_default_attribs(
861 struct se_device *dev,
862 struct se_dev_limits *dev_limits)
863{
864 struct queue_limits *limits = &dev_limits->limits;
865
866 dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
867 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
868 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
869 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
870 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
871 dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
872 dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
873 dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
874 dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
875 dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
876 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
877 dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
878 dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
879 /*
880 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
881 * iblock_create_virtdevice() from struct queue_limits values
882 * if blk_queue_discard()==1
883 */
884 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
885 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
886 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
887 dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
888 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
889 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
890 /*
891 * block_size is based on subsystem plugin dependent requirements.
892 */
893 dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
894 dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
895 /*
896 * max_sectors is based on subsystem plugin dependent requirements.
897 */
898 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
899 /*
900 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
901 */
902 limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
903 limits->logical_block_size);
904 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
905 /*
906 * Set optimal_sectors from max_sectors, which can be lowered via
907 * configfs.
908 */
909 dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
910 /*
911 * queue_depth is based on subsystem plugin dependent requirements.
912 */
913 dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
914 dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
915}
916
917int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
918{
919 if (task_timeout > DA_TASK_TIMEOUT_MAX) {
920 pr_err("dev[%p]: Passed task_timeout: %u larger then"
921 " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
922 return -EINVAL;
923 } else {
924 dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
925 pr_debug("dev[%p]: Set SE Device task_timeout: %u\n",
926 dev, task_timeout);
927 }
928
929 return 0;
930}
931
932int se_dev_set_max_unmap_lba_count(
933 struct se_device *dev,
934 u32 max_unmap_lba_count)
935{
936 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
937 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
938 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
939 return 0;
940}
941
942int se_dev_set_max_unmap_block_desc_count(
943 struct se_device *dev,
944 u32 max_unmap_block_desc_count)
945{
946 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
947 max_unmap_block_desc_count;
948 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
949 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
950 return 0;
951}
952
953int se_dev_set_unmap_granularity(
954 struct se_device *dev,
955 u32 unmap_granularity)
956{
957 dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
958 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
959 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
960 return 0;
961}
962
963int se_dev_set_unmap_granularity_alignment(
964 struct se_device *dev,
965 u32 unmap_granularity_alignment)
966{
967 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
968 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
969 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
970 return 0;
971}
972
973int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
974{
975 if ((flag != 0) && (flag != 1)) {
976 pr_err("Illegal value %d\n", flag);
977 return -EINVAL;
978 }
979 if (dev->transport->dpo_emulated == NULL) {
980 pr_err("dev->transport->dpo_emulated is NULL\n");
981 return -EINVAL;
982 }
983 if (dev->transport->dpo_emulated(dev) == 0) {
984 pr_err("dev->transport->dpo_emulated not supported\n");
985 return -EINVAL;
986 }
987 dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
988 pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation"
989 " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
990 return 0;
991}
992
993int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
994{
995 if ((flag != 0) && (flag != 1)) {
996 pr_err("Illegal value %d\n", flag);
997 return -EINVAL;
998 }
999 if (dev->transport->fua_write_emulated == NULL) {
1000 pr_err("dev->transport->fua_write_emulated is NULL\n");
1001 return -EINVAL;
1002 }
1003 if (dev->transport->fua_write_emulated(dev) == 0) {
1004 pr_err("dev->transport->fua_write_emulated not supported\n");
1005 return -EINVAL;
1006 }
1007 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
1008 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
1009 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
1010 return 0;
1011}
1012
1013int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
1014{
1015 if ((flag != 0) && (flag != 1)) {
1016 pr_err("Illegal value %d\n", flag);
1017 return -EINVAL;
1018 }
1019 if (dev->transport->fua_read_emulated == NULL) {
1020 pr_err("dev->transport->fua_read_emulated is NULL\n");
1021 return -EINVAL;
1022 }
1023 if (dev->transport->fua_read_emulated(dev) == 0) {
1024 pr_err("dev->transport->fua_read_emulated not supported\n");
1025 return -EINVAL;
1026 }
1027 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
1028 pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n",
1029 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
1030 return 0;
1031}
1032
1033int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1034{
1035 if ((flag != 0) && (flag != 1)) {
1036 pr_err("Illegal value %d\n", flag);
1037 return -EINVAL;
1038 }
1039 if (dev->transport->write_cache_emulated == NULL) {
1040 pr_err("dev->transport->write_cache_emulated is NULL\n");
1041 return -EINVAL;
1042 }
1043 if (dev->transport->write_cache_emulated(dev) == 0) {
1044 pr_err("dev->transport->write_cache_emulated not supported\n");
1045 return -EINVAL;
1046 }
1047 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
1048 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1049 dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
1050 return 0;
1051}
1052
1053int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1054{
1055 if ((flag != 0) && (flag != 1) && (flag != 2)) {
1056 pr_err("Illegal value %d\n", flag);
1057 return -EINVAL;
1058 }
1059
1060 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1061 pr_err("dev[%p]: Unable to change SE Device"
1062 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1063 " exists\n", dev,
1064 atomic_read(&dev->dev_export_obj.obj_access_count));
1065 return -EINVAL;
1066 }
1067 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
1068 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1069 dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
1070
1071 return 0;
1072}
1073
1074int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1075{
1076 if ((flag != 0) && (flag != 1)) {
1077 pr_err("Illegal value %d\n", flag);
1078 return -EINVAL;
1079 }
1080
1081 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1082 pr_err("dev[%p]: Unable to change SE Device TAS while"
1083 " dev_export_obj: %d count exists\n", dev,
1084 atomic_read(&dev->dev_export_obj.obj_access_count));
1085 return -EINVAL;
1086 }
1087 dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
1088 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1089 dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
1090
1091 return 0;
1092}
1093
1094int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1095{
1096 if ((flag != 0) && (flag != 1)) {
1097 pr_err("Illegal value %d\n", flag);
1098 return -EINVAL;
1099 }
1100 /*
1101 * We expect this value to be non-zero when generic Block Layer
1102 * Discard supported is detected iblock_create_virtdevice().
1103 */
1104 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1105 pr_err("Generic Block Discard not supported\n");
1106 return -ENOSYS;
1107 }
1108
1109 dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
1110 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1111 dev, flag);
1112 return 0;
1113}
1114
1115int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1116{
1117 if ((flag != 0) && (flag != 1)) {
1118 pr_err("Illegal value %d\n", flag);
1119 return -EINVAL;
1120 }
1121 /*
1122 * We expect this value to be non-zero when generic Block Layer
1123 * Discard supported is detected iblock_create_virtdevice().
1124 */
1125 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1126 pr_err("Generic Block Discard not supported\n");
1127 return -ENOSYS;
1128 }
1129
1130 dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
1131 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1132 dev, flag);
1133 return 0;
1134}
1135
1136int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1137{
1138 if ((flag != 0) && (flag != 1)) {
1139 pr_err("Illegal value %d\n", flag);
1140 return -EINVAL;
1141 }
1142 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
1143 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1144 (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1145 return 0;
1146}
1147
1148int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1149{
1150 if ((flag != 0) && (flag != 1)) {
1151 printk(KERN_ERR "Illegal value %d\n", flag);
1152 return -EINVAL;
1153 }
1154 dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
1155 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1156 dev, flag);
1157 return 0;
1158}
1159
1160int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1161{
1162 if (flag != 0) {
1163 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
1164 " reordering not implemented\n", dev);
1165 return -ENOSYS;
1166 }
1167 dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
1168 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
1169 return 0;
1170}
1171
1172/*
1173 * Note, this can only be called on unexported SE Device Object.
1174 */
1175int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1176{
1177 u32 orig_queue_depth = dev->queue_depth;
1178
1179 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1180 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1181 " dev_export_obj: %d count exists\n", dev,
1182 atomic_read(&dev->dev_export_obj.obj_access_count));
1183 return -EINVAL;
1184 }
1185 if (!queue_depth) {
1186 pr_err("dev[%p]: Illegal ZERO value for queue"
1187 "_depth\n", dev);
1188 return -EINVAL;
1189 }
1190
1191 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1192 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1193 pr_err("dev[%p]: Passed queue_depth: %u"
1194 " exceeds TCM/SE_Device TCQ: %u\n",
1195 dev, queue_depth,
1196 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1197 return -EINVAL;
1198 }
1199 } else {
1200 if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
1201 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1202 pr_err("dev[%p]: Passed queue_depth:"
1203 " %u exceeds TCM/SE_Device MAX"
1204 " TCQ: %u\n", dev, queue_depth,
1205 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1206 return -EINVAL;
1207 }
1208 }
1209 }
1210
1211 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1212 if (queue_depth > orig_queue_depth)
1213 atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
1214 else if (queue_depth < orig_queue_depth)
1215 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
1216
1217 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1218 dev, queue_depth);
1219 return 0;
1220}
1221
1222int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1223{
1224 int force = 0; /* Force setting for VDEVS */
1225
1226 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1227 pr_err("dev[%p]: Unable to change SE Device"
1228 " max_sectors while dev_export_obj: %d count exists\n",
1229 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1230 return -EINVAL;
1231 }
1232 if (!max_sectors) {
1233 pr_err("dev[%p]: Illegal ZERO value for"
1234 " max_sectors\n", dev);
1235 return -EINVAL;
1236 }
1237 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1238 pr_err("dev[%p]: Passed max_sectors: %u less than"
1239 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1240 DA_STATUS_MAX_SECTORS_MIN);
1241 return -EINVAL;
1242 }
1243 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1244 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1245 pr_err("dev[%p]: Passed max_sectors: %u"
1246 " greater than TCM/SE_Device max_sectors:"
1247 " %u\n", dev, max_sectors,
1248 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1249 return -EINVAL;
1250 }
1251 } else {
1252 if (!force && (max_sectors >
1253 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
1254 pr_err("dev[%p]: Passed max_sectors: %u"
1255 " greater than TCM/SE_Device max_sectors"
1256 ": %u, use force=1 to override.\n", dev,
1257 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1258 return -EINVAL;
1259 }
1260 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1261 pr_err("dev[%p]: Passed max_sectors: %u"
1262 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1263 " %u\n", dev, max_sectors,
1264 DA_STATUS_MAX_SECTORS_MAX);
1265 return -EINVAL;
1266 }
1267 }
1268 /*
1269 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1270 */
1271 max_sectors = se_dev_align_max_sectors(max_sectors,
1272 dev->se_sub_dev->se_dev_attrib.block_size);
1273
1274 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1275 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1276 dev, max_sectors);
1277 return 0;
1278}
1279
1280int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1281{
1282 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1283 pr_err("dev[%p]: Unable to change SE Device"
1284 " optimal_sectors while dev_export_obj: %d count exists\n",
1285 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1286 return -EINVAL;
1287 }
1288 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1289 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1290 " changed for TCM/pSCSI\n", dev);
1291 return -EINVAL;
1292 }
1293 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
1294 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1295 " greater than max_sectors: %u\n", dev,
1296 optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
1297 return -EINVAL;
1298 }
1299
1300 dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
1301 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1302 dev, optimal_sectors);
1303 return 0;
1304}
1305
1306int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1307{
1308 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1309 pr_err("dev[%p]: Unable to change SE Device block_size"
1310 " while dev_export_obj: %d count exists\n", dev,
1311 atomic_read(&dev->dev_export_obj.obj_access_count));
1312 return -EINVAL;
1313 }
1314
1315 if ((block_size != 512) &&
1316 (block_size != 1024) &&
1317 (block_size != 2048) &&
1318 (block_size != 4096)) {
1319 pr_err("dev[%p]: Illegal value for block_device: %u"
1320 " for SE device, must be 512, 1024, 2048 or 4096\n",
1321 dev, block_size);
1322 return -EINVAL;
1323 }
1324
1325 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1326 pr_err("dev[%p]: Not allowed to change block_size for"
1327 " Physical Device, use for Linux/SCSI to change"
1328 " block_size for underlying hardware\n", dev);
1329 return -EINVAL;
1330 }
1331
1332 dev->se_sub_dev->se_dev_attrib.block_size = block_size;
1333 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1334 dev, block_size);
1335 return 0;
1336}
1337
1338struct se_lun *core_dev_add_lun(
1339 struct se_portal_group *tpg,
1340 struct se_hba *hba,
1341 struct se_device *dev,
1342 u32 lun)
1343{
1344 struct se_lun *lun_p;
1345 u32 lun_access = 0;
1346
1347 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1348 pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
1349 atomic_read(&dev->dev_access_obj.obj_access_count));
1350 return NULL;
1351 }
1352
1353 lun_p = core_tpg_pre_addlun(tpg, lun);
1354 if ((IS_ERR(lun_p)) || !lun_p)
1355 return NULL;
1356
1357 if (dev->dev_flags & DF_READ_ONLY)
1358 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1359 else
1360 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
1361
1362 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
1363 return NULL;
1364
1365 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1366 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1367 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1368 tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
1369 /*
1370 * Update LUN maps for dynamically added initiators when
1371 * generate_node_acl is enabled.
1372 */
1373 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1374 struct se_node_acl *acl;
1375 spin_lock_irq(&tpg->acl_node_lock);
1376 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1377 if (acl->dynamic_node_acl &&
1378 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1379 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1380 spin_unlock_irq(&tpg->acl_node_lock);
1381 core_tpg_add_node_to_devs(acl, tpg);
1382 spin_lock_irq(&tpg->acl_node_lock);
1383 }
1384 }
1385 spin_unlock_irq(&tpg->acl_node_lock);
1386 }
1387
1388 return lun_p;
1389}
1390
1391/* core_dev_del_lun():
1392 *
1393 *
1394 */
1395int core_dev_del_lun(
1396 struct se_portal_group *tpg,
1397 u32 unpacked_lun)
1398{
1399 struct se_lun *lun;
1400 int ret = 0;
1401
1402 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
1403 if (!lun)
1404 return ret;
1405
1406 core_tpg_post_dellun(tpg, lun);
1407
1408 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1409 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1410 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1411 tpg->se_tpg_tfo->get_fabric_name());
1412
1413 return 0;
1414}
1415
1416struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1417{
1418 struct se_lun *lun;
1419
1420 spin_lock(&tpg->tpg_lun_lock);
1421 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1422 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1423 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1424 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1425 TRANSPORT_MAX_LUNS_PER_TPG-1,
1426 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1427 spin_unlock(&tpg->tpg_lun_lock);
1428 return NULL;
1429 }
1430 lun = &tpg->tpg_lun_list[unpacked_lun];
1431
1432 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1433 pr_err("%s Logical Unit Number: %u is not free on"
1434 " Target Portal Group: %hu, ignoring request.\n",
1435 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1436 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1437 spin_unlock(&tpg->tpg_lun_lock);
1438 return NULL;
1439 }
1440 spin_unlock(&tpg->tpg_lun_lock);
1441
1442 return lun;
1443}
1444
1445/* core_dev_get_lun():
1446 *
1447 *
1448 */
1449static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1450{
1451 struct se_lun *lun;
1452
1453 spin_lock(&tpg->tpg_lun_lock);
1454 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1455 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1456 "_TPG-1: %u for Target Portal Group: %hu\n",
1457 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1458 TRANSPORT_MAX_LUNS_PER_TPG-1,
1459 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1460 spin_unlock(&tpg->tpg_lun_lock);
1461 return NULL;
1462 }
1463 lun = &tpg->tpg_lun_list[unpacked_lun];
1464
1465 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1466 pr_err("%s Logical Unit Number: %u is not active on"
1467 " Target Portal Group: %hu, ignoring request.\n",
1468 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1469 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1470 spin_unlock(&tpg->tpg_lun_lock);
1471 return NULL;
1472 }
1473 spin_unlock(&tpg->tpg_lun_lock);
1474
1475 return lun;
1476}
1477
1478struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1479 struct se_portal_group *tpg,
1480 u32 mapped_lun,
1481 char *initiatorname,
1482 int *ret)
1483{
1484 struct se_lun_acl *lacl;
1485 struct se_node_acl *nacl;
1486
1487 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1488 pr_err("%s InitiatorName exceeds maximum size.\n",
1489 tpg->se_tpg_tfo->get_fabric_name());
1490 *ret = -EOVERFLOW;
1491 return NULL;
1492 }
1493 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1494 if (!nacl) {
1495 *ret = -EINVAL;
1496 return NULL;
1497 }
1498 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1499 if (!lacl) {
1500 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1501 *ret = -ENOMEM;
1502 return NULL;
1503 }
1504
1505 INIT_LIST_HEAD(&lacl->lacl_list);
1506 lacl->mapped_lun = mapped_lun;
1507 lacl->se_lun_nacl = nacl;
1508 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1509
1510 return lacl;
1511}
1512
1513int core_dev_add_initiator_node_lun_acl(
1514 struct se_portal_group *tpg,
1515 struct se_lun_acl *lacl,
1516 u32 unpacked_lun,
1517 u32 lun_access)
1518{
1519 struct se_lun *lun;
1520 struct se_node_acl *nacl;
1521
1522 lun = core_dev_get_lun(tpg, unpacked_lun);
1523 if (!lun) {
1524 pr_err("%s Logical Unit Number: %u is not active on"
1525 " Target Portal Group: %hu, ignoring request.\n",
1526 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1527 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1528 return -EINVAL;
1529 }
1530
1531 nacl = lacl->se_lun_nacl;
1532 if (!nacl)
1533 return -EINVAL;
1534
1535 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1536 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1537 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1538
1539 lacl->se_lun = lun;
1540
1541 if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
1542 lun_access, nacl, tpg, 1) < 0)
1543 return -EINVAL;
1544
1545 spin_lock(&lun->lun_acl_lock);
1546 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1547 atomic_inc(&lun->lun_acl_count);
1548 smp_mb__after_atomic_inc();
1549 spin_unlock(&lun->lun_acl_lock);
1550
1551 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1552 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1553 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1554 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1555 lacl->initiatorname);
1556 /*
1557 * Check to see if there are any existing persistent reservation APTPL
1558 * pre-registrations that need to be enabled for this LUN ACL..
1559 */
1560 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1561 return 0;
1562}
1563
1564/* core_dev_del_initiator_node_lun_acl():
1565 *
1566 *
1567 */
1568int core_dev_del_initiator_node_lun_acl(
1569 struct se_portal_group *tpg,
1570 struct se_lun *lun,
1571 struct se_lun_acl *lacl)
1572{
1573 struct se_node_acl *nacl;
1574
1575 nacl = lacl->se_lun_nacl;
1576 if (!nacl)
1577 return -EINVAL;
1578
1579 spin_lock(&lun->lun_acl_lock);
1580 list_del(&lacl->lacl_list);
1581 atomic_dec(&lun->lun_acl_count);
1582 smp_mb__after_atomic_dec();
1583 spin_unlock(&lun->lun_acl_lock);
1584
1585 core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
1586 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
1587
1588 lacl->se_lun = NULL;
1589
1590 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1591 " InitiatorNode: %s Mapped LUN: %u\n",
1592 tpg->se_tpg_tfo->get_fabric_name(),
1593 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1594 lacl->initiatorname, lacl->mapped_lun);
1595
1596 return 0;
1597}
1598
1599void core_dev_free_initiator_node_lun_acl(
1600 struct se_portal_group *tpg,
1601 struct se_lun_acl *lacl)
1602{
1603 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1604 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1605 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1606 tpg->se_tpg_tfo->get_fabric_name(),
1607 lacl->initiatorname, lacl->mapped_lun);
1608
1609 kfree(lacl);
1610}
1611
1612int core_dev_setup_virtual_lun0(void)
1613{
1614 struct se_hba *hba;
1615 struct se_device *dev;
1616 struct se_subsystem_dev *se_dev = NULL;
1617 struct se_subsystem_api *t;
1618 char buf[16];
1619 int ret;
1620
1621 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1622 if (IS_ERR(hba))
1623 return PTR_ERR(hba);
1624
1625 lun0_hba = hba;
1626 t = hba->transport;
1627
1628 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1629 if (!se_dev) {
1630 pr_err("Unable to allocate memory for"
1631 " struct se_subsystem_dev\n");
1632 ret = -ENOMEM;
1633 goto out;
1634 }
1635 INIT_LIST_HEAD(&se_dev->se_dev_node);
1636 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1637 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1638 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
1639 INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
1640 spin_lock_init(&se_dev->t10_pr.registration_lock);
1641 spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
1642 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
1643 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
1644 spin_lock_init(&se_dev->se_dev_lock);
1645 se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1646 se_dev->t10_wwn.t10_sub_dev = se_dev;
1647 se_dev->t10_alua.t10_sub_dev = se_dev;
1648 se_dev->se_dev_attrib.da_sub_dev = se_dev;
1649 se_dev->se_dev_hba = hba;
1650
1651 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1652 if (!se_dev->se_dev_su_ptr) {
1653 pr_err("Unable to locate subsystem dependent pointer"
1654 " from allocate_virtdevice()\n");
1655 ret = -ENOMEM;
1656 goto out;
1657 }
1658 lun0_su_dev = se_dev;
1659
1660 memset(buf, 0, 16);
1661 sprintf(buf, "rd_pages=8");
1662 t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
1663
1664 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1665 if (IS_ERR(dev)) {
1666 ret = PTR_ERR(dev);
1667 goto out;
1668 }
1669 se_dev->se_dev_ptr = dev;
1670 g_lun0_dev = dev;
1671
1672 return 0;
1673out:
1674 lun0_su_dev = NULL;
1675 kfree(se_dev);
1676 if (lun0_hba) {
1677 core_delete_hba(lun0_hba);
1678 lun0_hba = NULL;
1679 }
1680 return ret;
1681}
1682
1683
1684void core_dev_release_virtual_lun0(void)
1685{
1686 struct se_hba *hba = lun0_hba;
1687 struct se_subsystem_dev *su_dev = lun0_su_dev;
1688
1689 if (!hba)
1690 return;
1691
1692 if (g_lun0_dev)
1693 se_free_virtual_device(g_lun0_dev, hba);
1694
1695 kfree(su_dev);
1696 core_delete_hba(hba);
1697}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*******************************************************************************
3 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 *
5 * This file contains the TCM Virtual Device and Disk Transport
6 * agnostic related functions.
7 *
8 * (c) Copyright 2003-2013 Datera, Inc.
9 *
10 * Nicholas A. Bellinger <nab@kernel.org>
11 *
12 ******************************************************************************/
13
14#include <linux/net.h>
15#include <linux/string.h>
16#include <linux/delay.h>
17#include <linux/timer.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/kthread.h>
21#include <linux/in.h>
22#include <linux/export.h>
23#include <linux/t10-pi.h>
24#include <asm/unaligned.h>
25#include <net/sock.h>
26#include <net/tcp.h>
27#include <scsi/scsi_common.h>
28#include <scsi/scsi_proto.h>
29
30#include <target/target_core_base.h>
31#include <target/target_core_backend.h>
32#include <target/target_core_fabric.h>
33
34#include "target_core_internal.h"
35#include "target_core_alua.h"
36#include "target_core_pr.h"
37#include "target_core_ua.h"
38
39static DEFINE_MUTEX(device_mutex);
40static LIST_HEAD(device_list);
41static DEFINE_IDR(devices_idr);
42
43static struct se_hba *lun0_hba;
44/* not static, needed by tpg.c */
45struct se_device *g_lun0_dev;
46
47sense_reason_t
48transport_lookup_cmd_lun(struct se_cmd *se_cmd)
49{
50 struct se_lun *se_lun = NULL;
51 struct se_session *se_sess = se_cmd->se_sess;
52 struct se_node_acl *nacl = se_sess->se_node_acl;
53 struct se_dev_entry *deve;
54 sense_reason_t ret = TCM_NO_SENSE;
55
56 rcu_read_lock();
57 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
58 if (deve) {
59 atomic_long_inc(&deve->total_cmds);
60
61 if (se_cmd->data_direction == DMA_TO_DEVICE)
62 atomic_long_add(se_cmd->data_length,
63 &deve->write_bytes);
64 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
65 atomic_long_add(se_cmd->data_length,
66 &deve->read_bytes);
67
68 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
69 deve->lun_access_ro) {
70 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
71 " Access for 0x%08llx\n",
72 se_cmd->se_tfo->fabric_name,
73 se_cmd->orig_fe_lun);
74 rcu_read_unlock();
75 return TCM_WRITE_PROTECTED;
76 }
77
78 se_lun = deve->se_lun;
79
80 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
81 se_lun = NULL;
82 goto out_unlock;
83 }
84
85 se_cmd->se_lun = se_lun;
86 se_cmd->pr_res_key = deve->pr_res_key;
87 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
88 se_cmd->lun_ref_active = true;
89 }
90out_unlock:
91 rcu_read_unlock();
92
93 if (!se_lun) {
94 /*
95 * Use the se_portal_group->tpg_virt_lun0 to allow for
96 * REPORT_LUNS, et al to be returned when no active
97 * MappedLUN=0 exists for this Initiator Port.
98 */
99 if (se_cmd->orig_fe_lun != 0) {
100 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
101 " Access for 0x%08llx from %s\n",
102 se_cmd->se_tfo->fabric_name,
103 se_cmd->orig_fe_lun,
104 nacl->initiatorname);
105 return TCM_NON_EXISTENT_LUN;
106 }
107
108 /*
109 * Force WRITE PROTECT for virtual LUN 0
110 */
111 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
112 (se_cmd->data_direction != DMA_NONE))
113 return TCM_WRITE_PROTECTED;
114
115 se_lun = se_sess->se_tpg->tpg_virt_lun0;
116 if (!percpu_ref_tryget_live(&se_lun->lun_ref))
117 return TCM_NON_EXISTENT_LUN;
118
119 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
120 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
121 se_cmd->lun_ref_active = true;
122 }
123 /*
124 * RCU reference protected by percpu se_lun->lun_ref taken above that
125 * must drop to zero (including initial reference) before this se_lun
126 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
127 * target_core_fabric_configfs.c:target_fabric_port_release
128 */
129 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
130 atomic_long_inc(&se_cmd->se_dev->num_cmds);
131
132 if (se_cmd->data_direction == DMA_TO_DEVICE)
133 atomic_long_add(se_cmd->data_length,
134 &se_cmd->se_dev->write_bytes);
135 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
136 atomic_long_add(se_cmd->data_length,
137 &se_cmd->se_dev->read_bytes);
138
139 return ret;
140}
141EXPORT_SYMBOL(transport_lookup_cmd_lun);
142
143int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
144{
145 struct se_dev_entry *deve;
146 struct se_lun *se_lun = NULL;
147 struct se_session *se_sess = se_cmd->se_sess;
148 struct se_node_acl *nacl = se_sess->se_node_acl;
149 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
150 unsigned long flags;
151
152 rcu_read_lock();
153 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
154 if (deve) {
155 se_lun = deve->se_lun;
156
157 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
158 se_lun = NULL;
159 goto out_unlock;
160 }
161
162 se_cmd->se_lun = se_lun;
163 se_cmd->pr_res_key = deve->pr_res_key;
164 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
165 se_cmd->lun_ref_active = true;
166 }
167out_unlock:
168 rcu_read_unlock();
169
170 if (!se_lun) {
171 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
172 " Access for 0x%08llx for %s\n",
173 se_cmd->se_tfo->fabric_name,
174 se_cmd->orig_fe_lun,
175 nacl->initiatorname);
176 return -ENODEV;
177 }
178 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
179 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
180
181 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
182 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
183 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
184
185 return 0;
186}
187EXPORT_SYMBOL(transport_lookup_tmr_lun);
188
189bool target_lun_is_rdonly(struct se_cmd *cmd)
190{
191 struct se_session *se_sess = cmd->se_sess;
192 struct se_dev_entry *deve;
193 bool ret;
194
195 rcu_read_lock();
196 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
197 ret = deve && deve->lun_access_ro;
198 rcu_read_unlock();
199
200 return ret;
201}
202EXPORT_SYMBOL(target_lun_is_rdonly);
203
204/*
205 * This function is called from core_scsi3_emulate_pro_register_and_move()
206 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
207 * when a matching rtpi is found.
208 */
209struct se_dev_entry *core_get_se_deve_from_rtpi(
210 struct se_node_acl *nacl,
211 u16 rtpi)
212{
213 struct se_dev_entry *deve;
214 struct se_lun *lun;
215 struct se_portal_group *tpg = nacl->se_tpg;
216
217 rcu_read_lock();
218 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
219 lun = deve->se_lun;
220 if (!lun) {
221 pr_err("%s device entries device pointer is"
222 " NULL, but Initiator has access.\n",
223 tpg->se_tpg_tfo->fabric_name);
224 continue;
225 }
226 if (lun->lun_rtpi != rtpi)
227 continue;
228
229 kref_get(&deve->pr_kref);
230 rcu_read_unlock();
231
232 return deve;
233 }
234 rcu_read_unlock();
235
236 return NULL;
237}
238
239void core_free_device_list_for_node(
240 struct se_node_acl *nacl,
241 struct se_portal_group *tpg)
242{
243 struct se_dev_entry *deve;
244
245 mutex_lock(&nacl->lun_entry_mutex);
246 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
247 core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg);
248 mutex_unlock(&nacl->lun_entry_mutex);
249}
250
251void core_update_device_list_access(
252 u64 mapped_lun,
253 bool lun_access_ro,
254 struct se_node_acl *nacl)
255{
256 struct se_dev_entry *deve;
257
258 mutex_lock(&nacl->lun_entry_mutex);
259 deve = target_nacl_find_deve(nacl, mapped_lun);
260 if (deve)
261 deve->lun_access_ro = lun_access_ro;
262 mutex_unlock(&nacl->lun_entry_mutex);
263}
264
265/*
266 * Called with rcu_read_lock or nacl->device_list_lock held.
267 */
268struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
269{
270 struct se_dev_entry *deve;
271
272 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
273 if (deve->mapped_lun == mapped_lun)
274 return deve;
275
276 return NULL;
277}
278EXPORT_SYMBOL(target_nacl_find_deve);
279
280void target_pr_kref_release(struct kref *kref)
281{
282 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
283 pr_kref);
284 complete(&deve->pr_comp);
285}
286
287/*
288 * Establish UA condition on SCSI device - all LUNs
289 */
290void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq)
291{
292 struct se_dev_entry *se_deve;
293 struct se_lun *lun;
294
295 spin_lock(&dev->se_port_lock);
296 list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) {
297
298 spin_lock(&lun->lun_deve_lock);
299 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
300 core_scsi3_ua_allocate(se_deve, asc, ascq);
301 spin_unlock(&lun->lun_deve_lock);
302 }
303 spin_unlock(&dev->se_port_lock);
304}
305
306static void
307target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
308 bool skip_new)
309{
310 struct se_dev_entry *tmp;
311
312 rcu_read_lock();
313 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
314 if (skip_new && tmp == new)
315 continue;
316 core_scsi3_ua_allocate(tmp, 0x3F,
317 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
318 }
319 rcu_read_unlock();
320}
321
322int core_enable_device_list_for_node(
323 struct se_lun *lun,
324 struct se_lun_acl *lun_acl,
325 u64 mapped_lun,
326 bool lun_access_ro,
327 struct se_node_acl *nacl,
328 struct se_portal_group *tpg)
329{
330 struct se_dev_entry *orig, *new;
331
332 new = kzalloc(sizeof(*new), GFP_KERNEL);
333 if (!new) {
334 pr_err("Unable to allocate se_dev_entry memory\n");
335 return -ENOMEM;
336 }
337
338 spin_lock_init(&new->ua_lock);
339 INIT_LIST_HEAD(&new->ua_list);
340 INIT_LIST_HEAD(&new->lun_link);
341
342 new->mapped_lun = mapped_lun;
343 kref_init(&new->pr_kref);
344 init_completion(&new->pr_comp);
345
346 new->lun_access_ro = lun_access_ro;
347 new->creation_time = get_jiffies_64();
348 new->attach_count++;
349
350 mutex_lock(&nacl->lun_entry_mutex);
351 orig = target_nacl_find_deve(nacl, mapped_lun);
352 if (orig && orig->se_lun) {
353 struct se_lun *orig_lun = orig->se_lun;
354
355 if (orig_lun != lun) {
356 pr_err("Existing orig->se_lun doesn't match new lun"
357 " for dynamic -> explicit NodeACL conversion:"
358 " %s\n", nacl->initiatorname);
359 mutex_unlock(&nacl->lun_entry_mutex);
360 kfree(new);
361 return -EINVAL;
362 }
363 if (orig->se_lun_acl != NULL) {
364 pr_warn_ratelimited("Detected existing explicit"
365 " se_lun_acl->se_lun_group reference for %s"
366 " mapped_lun: %llu, failing\n",
367 nacl->initiatorname, mapped_lun);
368 mutex_unlock(&nacl->lun_entry_mutex);
369 kfree(new);
370 return -EINVAL;
371 }
372
373 new->se_lun = lun;
374 new->se_lun_acl = lun_acl;
375 hlist_del_rcu(&orig->link);
376 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
377 mutex_unlock(&nacl->lun_entry_mutex);
378
379 spin_lock(&lun->lun_deve_lock);
380 list_del(&orig->lun_link);
381 list_add_tail(&new->lun_link, &lun->lun_deve_list);
382 spin_unlock(&lun->lun_deve_lock);
383
384 kref_put(&orig->pr_kref, target_pr_kref_release);
385 wait_for_completion(&orig->pr_comp);
386
387 target_luns_data_has_changed(nacl, new, true);
388 kfree_rcu(orig, rcu_head);
389 return 0;
390 }
391
392 new->se_lun = lun;
393 new->se_lun_acl = lun_acl;
394 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
395 mutex_unlock(&nacl->lun_entry_mutex);
396
397 spin_lock(&lun->lun_deve_lock);
398 list_add_tail(&new->lun_link, &lun->lun_deve_list);
399 spin_unlock(&lun->lun_deve_lock);
400
401 target_luns_data_has_changed(nacl, new, true);
402 return 0;
403}
404
405void core_disable_device_list_for_node(
406 struct se_lun *lun,
407 struct se_dev_entry *orig,
408 struct se_node_acl *nacl,
409 struct se_portal_group *tpg)
410{
411 /*
412 * rcu_dereference_raw protected by se_lun->lun_group symlink
413 * reference to se_device->dev_group.
414 */
415 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
416
417 lockdep_assert_held(&nacl->lun_entry_mutex);
418
419 /*
420 * If the MappedLUN entry is being disabled, the entry in
421 * lun->lun_deve_list must be removed now before clearing the
422 * struct se_dev_entry pointers below as logic in
423 * core_alua_do_transition_tg_pt() depends on these being present.
424 *
425 * deve->se_lun_acl will be NULL for demo-mode created LUNs
426 * that have not been explicitly converted to MappedLUNs ->
427 * struct se_lun_acl, but we remove deve->lun_link from
428 * lun->lun_deve_list. This also means that active UAs and
429 * NodeACL context specific PR metadata for demo-mode
430 * MappedLUN *deve will be released below..
431 */
432 spin_lock(&lun->lun_deve_lock);
433 list_del(&orig->lun_link);
434 spin_unlock(&lun->lun_deve_lock);
435 /*
436 * Disable struct se_dev_entry LUN ACL mapping
437 */
438 core_scsi3_ua_release_all(orig);
439
440 hlist_del_rcu(&orig->link);
441 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
442 orig->lun_access_ro = false;
443 orig->creation_time = 0;
444 orig->attach_count--;
445 /*
446 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
447 * or REGISTER_AND_MOVE PR operation to complete.
448 */
449 kref_put(&orig->pr_kref, target_pr_kref_release);
450 wait_for_completion(&orig->pr_comp);
451
452 kfree_rcu(orig, rcu_head);
453
454 core_scsi3_free_pr_reg_from_nacl(dev, nacl);
455 target_luns_data_has_changed(nacl, NULL, false);
456}
457
458/* core_clear_lun_from_tpg():
459 *
460 *
461 */
462void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
463{
464 struct se_node_acl *nacl;
465 struct se_dev_entry *deve;
466
467 mutex_lock(&tpg->acl_node_mutex);
468 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
469
470 mutex_lock(&nacl->lun_entry_mutex);
471 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
472 if (lun != deve->se_lun)
473 continue;
474
475 core_disable_device_list_for_node(lun, deve, nacl, tpg);
476 }
477 mutex_unlock(&nacl->lun_entry_mutex);
478 }
479 mutex_unlock(&tpg->acl_node_mutex);
480}
481
482int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
483{
484 struct se_lun *tmp;
485
486 spin_lock(&dev->se_port_lock);
487 if (dev->export_count == 0x0000ffff) {
488 pr_warn("Reached dev->dev_port_count =="
489 " 0x0000ffff\n");
490 spin_unlock(&dev->se_port_lock);
491 return -ENOSPC;
492 }
493again:
494 /*
495 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
496 * Here is the table from spc4r17 section 7.7.3.8.
497 *
498 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
499 *
500 * Code Description
501 * 0h Reserved
502 * 1h Relative port 1, historically known as port A
503 * 2h Relative port 2, historically known as port B
504 * 3h to FFFFh Relative port 3 through 65 535
505 */
506 lun->lun_rtpi = dev->dev_rpti_counter++;
507 if (!lun->lun_rtpi)
508 goto again;
509
510 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
511 /*
512 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
513 * for 16-bit wrap..
514 */
515 if (lun->lun_rtpi == tmp->lun_rtpi)
516 goto again;
517 }
518 spin_unlock(&dev->se_port_lock);
519
520 return 0;
521}
522
523static void se_release_vpd_for_dev(struct se_device *dev)
524{
525 struct t10_vpd *vpd, *vpd_tmp;
526
527 spin_lock(&dev->t10_wwn.t10_vpd_lock);
528 list_for_each_entry_safe(vpd, vpd_tmp,
529 &dev->t10_wwn.t10_vpd_list, vpd_list) {
530 list_del(&vpd->vpd_list);
531 kfree(vpd);
532 }
533 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
534}
535
536static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
537{
538 u32 aligned_max_sectors;
539 u32 alignment;
540 /*
541 * Limit max_sectors to a PAGE_SIZE aligned value for modern
542 * transport_allocate_data_tasks() operation.
543 */
544 alignment = max(1ul, PAGE_SIZE / block_size);
545 aligned_max_sectors = rounddown(max_sectors, alignment);
546
547 if (max_sectors != aligned_max_sectors)
548 pr_info("Rounding down aligned max_sectors from %u to %u\n",
549 max_sectors, aligned_max_sectors);
550
551 return aligned_max_sectors;
552}
553
554int core_dev_add_lun(
555 struct se_portal_group *tpg,
556 struct se_device *dev,
557 struct se_lun *lun)
558{
559 int rc;
560
561 rc = core_tpg_add_lun(tpg, lun, false, dev);
562 if (rc < 0)
563 return rc;
564
565 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
566 " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
567 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
568 tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
569 /*
570 * Update LUN maps for dynamically added initiators when
571 * generate_node_acl is enabled.
572 */
573 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
574 struct se_node_acl *acl;
575
576 mutex_lock(&tpg->acl_node_mutex);
577 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
578 if (acl->dynamic_node_acl &&
579 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
580 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
581 core_tpg_add_node_to_devs(acl, tpg, lun);
582 }
583 }
584 mutex_unlock(&tpg->acl_node_mutex);
585 }
586
587 return 0;
588}
589
590/* core_dev_del_lun():
591 *
592 *
593 */
594void core_dev_del_lun(
595 struct se_portal_group *tpg,
596 struct se_lun *lun)
597{
598 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
599 " device object\n", tpg->se_tpg_tfo->fabric_name,
600 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
601 tpg->se_tpg_tfo->fabric_name);
602
603 core_tpg_remove_lun(tpg, lun);
604}
605
606struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
607 struct se_portal_group *tpg,
608 struct se_node_acl *nacl,
609 u64 mapped_lun,
610 int *ret)
611{
612 struct se_lun_acl *lacl;
613
614 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
615 pr_err("%s InitiatorName exceeds maximum size.\n",
616 tpg->se_tpg_tfo->fabric_name);
617 *ret = -EOVERFLOW;
618 return NULL;
619 }
620 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
621 if (!lacl) {
622 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
623 *ret = -ENOMEM;
624 return NULL;
625 }
626
627 lacl->mapped_lun = mapped_lun;
628 lacl->se_lun_nacl = nacl;
629
630 return lacl;
631}
632
633int core_dev_add_initiator_node_lun_acl(
634 struct se_portal_group *tpg,
635 struct se_lun_acl *lacl,
636 struct se_lun *lun,
637 bool lun_access_ro)
638{
639 struct se_node_acl *nacl = lacl->se_lun_nacl;
640 /*
641 * rcu_dereference_raw protected by se_lun->lun_group symlink
642 * reference to se_device->dev_group.
643 */
644 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
645
646 if (!nacl)
647 return -EINVAL;
648
649 if (lun->lun_access_ro)
650 lun_access_ro = true;
651
652 lacl->se_lun = lun;
653
654 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
655 lun_access_ro, nacl, tpg) < 0)
656 return -EINVAL;
657
658 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
659 " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
660 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
661 lun_access_ro ? "RO" : "RW",
662 nacl->initiatorname);
663 /*
664 * Check to see if there are any existing persistent reservation APTPL
665 * pre-registrations that need to be enabled for this LUN ACL..
666 */
667 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
668 lacl->mapped_lun);
669 return 0;
670}
671
672int core_dev_del_initiator_node_lun_acl(
673 struct se_lun *lun,
674 struct se_lun_acl *lacl)
675{
676 struct se_portal_group *tpg = lun->lun_tpg;
677 struct se_node_acl *nacl;
678 struct se_dev_entry *deve;
679
680 nacl = lacl->se_lun_nacl;
681 if (!nacl)
682 return -EINVAL;
683
684 mutex_lock(&nacl->lun_entry_mutex);
685 deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
686 if (deve)
687 core_disable_device_list_for_node(lun, deve, nacl, tpg);
688 mutex_unlock(&nacl->lun_entry_mutex);
689
690 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
691 " InitiatorNode: %s Mapped LUN: %llu\n",
692 tpg->se_tpg_tfo->fabric_name,
693 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
694 nacl->initiatorname, lacl->mapped_lun);
695
696 return 0;
697}
698
699void core_dev_free_initiator_node_lun_acl(
700 struct se_portal_group *tpg,
701 struct se_lun_acl *lacl)
702{
703 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
704 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
705 tpg->se_tpg_tfo->tpg_get_tag(tpg),
706 tpg->se_tpg_tfo->fabric_name,
707 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
708
709 kfree(lacl);
710}
711
712static void scsi_dump_inquiry(struct se_device *dev)
713{
714 struct t10_wwn *wwn = &dev->t10_wwn;
715 int device_type = dev->transport->get_device_type(dev);
716
717 /*
718 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
719 */
720 pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
721 wwn->vendor);
722 pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
723 wwn->model);
724 pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
725 wwn->revision);
726 pr_debug(" Type: %s ", scsi_device_type(device_type));
727}
728
729struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
730{
731 struct se_device *dev;
732 struct se_lun *xcopy_lun;
733 int i;
734
735 dev = hba->backend->ops->alloc_device(hba, name);
736 if (!dev)
737 return NULL;
738
739 dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
740 if (!dev->queues) {
741 dev->transport->free_device(dev);
742 return NULL;
743 }
744
745 dev->queue_cnt = nr_cpu_ids;
746 for (i = 0; i < dev->queue_cnt; i++) {
747 struct se_device_queue *q;
748
749 q = &dev->queues[i];
750 INIT_LIST_HEAD(&q->state_list);
751 spin_lock_init(&q->lock);
752
753 init_llist_head(&q->sq.cmd_list);
754 INIT_WORK(&q->sq.work, target_queued_submit_work);
755 }
756
757 dev->se_hba = hba;
758 dev->transport = hba->backend->ops;
759 dev->transport_flags = dev->transport->transport_flags_default;
760 dev->prot_length = sizeof(struct t10_pi_tuple);
761 dev->hba_index = hba->hba_index;
762
763 INIT_LIST_HEAD(&dev->dev_sep_list);
764 INIT_LIST_HEAD(&dev->dev_tmr_list);
765 INIT_LIST_HEAD(&dev->delayed_cmd_list);
766 INIT_LIST_HEAD(&dev->qf_cmd_list);
767 spin_lock_init(&dev->delayed_cmd_lock);
768 spin_lock_init(&dev->dev_reservation_lock);
769 spin_lock_init(&dev->se_port_lock);
770 spin_lock_init(&dev->se_tmr_lock);
771 spin_lock_init(&dev->qf_cmd_lock);
772 sema_init(&dev->caw_sem, 1);
773 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
774 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
775 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
776 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
777 spin_lock_init(&dev->t10_pr.registration_lock);
778 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
779 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
780 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
781 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
782 spin_lock_init(&dev->t10_alua.lba_map_lock);
783
784 INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
785
786 dev->t10_wwn.t10_dev = dev;
787 /*
788 * Use OpenFabrics IEEE Company ID: 00 14 05
789 */
790 dev->t10_wwn.company_id = 0x001405;
791
792 dev->t10_alua.t10_dev = dev;
793
794 dev->dev_attrib.da_dev = dev;
795 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
796 dev->dev_attrib.emulate_dpo = 1;
797 dev->dev_attrib.emulate_fua_write = 1;
798 dev->dev_attrib.emulate_fua_read = 1;
799 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
800 dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
801 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
802 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
803 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
804 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
805 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
806 dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
807 dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC;
808 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
809 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
810 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
811 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
812 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
813 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
814 dev->dev_attrib.max_unmap_block_desc_count =
815 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
816 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
817 dev->dev_attrib.unmap_granularity_alignment =
818 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
819 dev->dev_attrib.unmap_zeroes_data =
820 DA_UNMAP_ZEROES_DATA_DEFAULT;
821 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
822
823 xcopy_lun = &dev->xcopy_lun;
824 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
825 init_completion(&xcopy_lun->lun_shutdown_comp);
826 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
827 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
828 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
829 xcopy_lun->lun_tpg = &xcopy_pt_tpg;
830
831 /* Preload the default INQUIRY const values */
832 strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
833 strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
834 sizeof(dev->t10_wwn.model));
835 strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
836 sizeof(dev->t10_wwn.revision));
837
838 return dev;
839}
840
841/*
842 * Check if the underlying struct block_device supports discard and if yes
843 * configure the UNMAP parameters.
844 */
845bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
846 struct block_device *bdev)
847{
848 int block_size = bdev_logical_block_size(bdev);
849
850 if (!bdev_max_discard_sectors(bdev))
851 return false;
852
853 attrib->max_unmap_lba_count =
854 bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9);
855 /*
856 * Currently hardcoded to 1 in Linux/SCSI code..
857 */
858 attrib->max_unmap_block_desc_count = 1;
859 attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size;
860 attrib->unmap_granularity_alignment =
861 bdev_discard_alignment(bdev) / block_size;
862 return true;
863}
864EXPORT_SYMBOL(target_configure_unmap_from_queue);
865
866/*
867 * Convert from blocksize advertised to the initiator to the 512 byte
868 * units unconditionally used by the Linux block layer.
869 */
870sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
871{
872 switch (dev->dev_attrib.block_size) {
873 case 4096:
874 return lb << 3;
875 case 2048:
876 return lb << 2;
877 case 1024:
878 return lb << 1;
879 default:
880 return lb;
881 }
882}
883EXPORT_SYMBOL(target_to_linux_sector);
884
885struct devices_idr_iter {
886 struct config_item *prev_item;
887 int (*fn)(struct se_device *dev, void *data);
888 void *data;
889};
890
891static int target_devices_idr_iter(int id, void *p, void *data)
892 __must_hold(&device_mutex)
893{
894 struct devices_idr_iter *iter = data;
895 struct se_device *dev = p;
896 int ret;
897
898 config_item_put(iter->prev_item);
899 iter->prev_item = NULL;
900
901 /*
902 * We add the device early to the idr, so it can be used
903 * by backend modules during configuration. We do not want
904 * to allow other callers to access partially setup devices,
905 * so we skip them here.
906 */
907 if (!target_dev_configured(dev))
908 return 0;
909
910 iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
911 if (!iter->prev_item)
912 return 0;
913 mutex_unlock(&device_mutex);
914
915 ret = iter->fn(dev, iter->data);
916
917 mutex_lock(&device_mutex);
918 return ret;
919}
920
921/**
922 * target_for_each_device - iterate over configured devices
923 * @fn: iterator function
924 * @data: pointer to data that will be passed to fn
925 *
926 * fn must return 0 to continue looping over devices. non-zero will break
927 * from the loop and return that value to the caller.
928 */
929int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
930 void *data)
931{
932 struct devices_idr_iter iter = { .fn = fn, .data = data };
933 int ret;
934
935 mutex_lock(&device_mutex);
936 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
937 mutex_unlock(&device_mutex);
938 config_item_put(iter.prev_item);
939 return ret;
940}
941
942int target_configure_device(struct se_device *dev)
943{
944 struct se_hba *hba = dev->se_hba;
945 int ret, id;
946
947 if (target_dev_configured(dev)) {
948 pr_err("se_dev->se_dev_ptr already set for storage"
949 " object\n");
950 return -EEXIST;
951 }
952
953 /*
954 * Add early so modules like tcmu can use during its
955 * configuration.
956 */
957 mutex_lock(&device_mutex);
958 /*
959 * Use cyclic to try and avoid collisions with devices
960 * that were recently removed.
961 */
962 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
963 mutex_unlock(&device_mutex);
964 if (id < 0) {
965 ret = -ENOMEM;
966 goto out;
967 }
968 dev->dev_index = id;
969
970 ret = dev->transport->configure_device(dev);
971 if (ret)
972 goto out_free_index;
973
974 if (dev->transport->configure_unmap &&
975 dev->transport->configure_unmap(dev)) {
976 pr_debug("Discard support available, but disabled by default.\n");
977 }
978
979 /*
980 * XXX: there is not much point to have two different values here..
981 */
982 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
983 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
984
985 /*
986 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
987 */
988 dev->dev_attrib.hw_max_sectors =
989 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
990 dev->dev_attrib.hw_block_size);
991 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
992
993 dev->creation_time = get_jiffies_64();
994
995 ret = core_setup_alua(dev);
996 if (ret)
997 goto out_destroy_device;
998
999 /*
1000 * Setup work_queue for QUEUE_FULL
1001 */
1002 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1003
1004 scsi_dump_inquiry(dev);
1005
1006 spin_lock(&hba->device_lock);
1007 hba->dev_count++;
1008 spin_unlock(&hba->device_lock);
1009
1010 dev->dev_flags |= DF_CONFIGURED;
1011
1012 return 0;
1013
1014out_destroy_device:
1015 dev->transport->destroy_device(dev);
1016out_free_index:
1017 mutex_lock(&device_mutex);
1018 idr_remove(&devices_idr, dev->dev_index);
1019 mutex_unlock(&device_mutex);
1020out:
1021 se_release_vpd_for_dev(dev);
1022 return ret;
1023}
1024
1025void target_free_device(struct se_device *dev)
1026{
1027 struct se_hba *hba = dev->se_hba;
1028
1029 WARN_ON(!list_empty(&dev->dev_sep_list));
1030
1031 if (target_dev_configured(dev)) {
1032 dev->transport->destroy_device(dev);
1033
1034 mutex_lock(&device_mutex);
1035 idr_remove(&devices_idr, dev->dev_index);
1036 mutex_unlock(&device_mutex);
1037
1038 spin_lock(&hba->device_lock);
1039 hba->dev_count--;
1040 spin_unlock(&hba->device_lock);
1041 }
1042
1043 core_alua_free_lu_gp_mem(dev);
1044 core_alua_set_lba_map(dev, NULL, 0, 0);
1045 core_scsi3_free_all_registrations(dev);
1046 se_release_vpd_for_dev(dev);
1047
1048 if (dev->transport->free_prot)
1049 dev->transport->free_prot(dev);
1050
1051 kfree(dev->queues);
1052 dev->transport->free_device(dev);
1053}
1054
1055int core_dev_setup_virtual_lun0(void)
1056{
1057 struct se_hba *hba;
1058 struct se_device *dev;
1059 char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
1060 int ret;
1061
1062 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1063 if (IS_ERR(hba))
1064 return PTR_ERR(hba);
1065
1066 dev = target_alloc_device(hba, "virt_lun0");
1067 if (!dev) {
1068 ret = -ENOMEM;
1069 goto out_free_hba;
1070 }
1071
1072 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1073
1074 ret = target_configure_device(dev);
1075 if (ret)
1076 goto out_free_se_dev;
1077
1078 lun0_hba = hba;
1079 g_lun0_dev = dev;
1080 return 0;
1081
1082out_free_se_dev:
1083 target_free_device(dev);
1084out_free_hba:
1085 core_delete_hba(hba);
1086 return ret;
1087}
1088
1089
1090void core_dev_release_virtual_lun0(void)
1091{
1092 struct se_hba *hba = lun0_hba;
1093
1094 if (!hba)
1095 return;
1096
1097 if (g_lun0_dev)
1098 target_free_device(g_lun0_dev);
1099 core_delete_hba(hba);
1100}
1101
1102/*
1103 * Common CDB parsing for kernel and user passthrough.
1104 */
1105sense_reason_t
1106passthrough_parse_cdb(struct se_cmd *cmd,
1107 sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1108{
1109 unsigned char *cdb = cmd->t_task_cdb;
1110 struct se_device *dev = cmd->se_dev;
1111 unsigned int size;
1112
1113 /*
1114 * For REPORT LUNS we always need to emulate the response, for everything
1115 * else, pass it up.
1116 */
1117 if (cdb[0] == REPORT_LUNS) {
1118 cmd->execute_cmd = spc_emulate_report_luns;
1119 return TCM_NO_SENSE;
1120 }
1121
1122 /*
1123 * With emulate_pr disabled, all reservation requests should fail,
1124 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
1125 */
1126 if (!dev->dev_attrib.emulate_pr &&
1127 ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1128 (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1129 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1130 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1131 return TCM_UNSUPPORTED_SCSI_OPCODE;
1132 }
1133
1134 /*
1135 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1136 * emulate the response, since tcmu does not have the information
1137 * required to process these commands.
1138 */
1139 if (!(dev->transport_flags &
1140 TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1141 if (cdb[0] == PERSISTENT_RESERVE_IN) {
1142 cmd->execute_cmd = target_scsi3_emulate_pr_in;
1143 size = get_unaligned_be16(&cdb[7]);
1144 return target_cmd_size_check(cmd, size);
1145 }
1146 if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1147 cmd->execute_cmd = target_scsi3_emulate_pr_out;
1148 size = get_unaligned_be32(&cdb[5]);
1149 return target_cmd_size_check(cmd, size);
1150 }
1151
1152 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1153 cmd->execute_cmd = target_scsi2_reservation_release;
1154 if (cdb[0] == RELEASE_10)
1155 size = get_unaligned_be16(&cdb[7]);
1156 else
1157 size = cmd->data_length;
1158 return target_cmd_size_check(cmd, size);
1159 }
1160 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1161 cmd->execute_cmd = target_scsi2_reservation_reserve;
1162 if (cdb[0] == RESERVE_10)
1163 size = get_unaligned_be16(&cdb[7]);
1164 else
1165 size = cmd->data_length;
1166 return target_cmd_size_check(cmd, size);
1167 }
1168 }
1169
1170 /* Set DATA_CDB flag for ops that should have it */
1171 switch (cdb[0]) {
1172 case READ_6:
1173 case READ_10:
1174 case READ_12:
1175 case READ_16:
1176 case WRITE_6:
1177 case WRITE_10:
1178 case WRITE_12:
1179 case WRITE_16:
1180 case WRITE_VERIFY:
1181 case WRITE_VERIFY_12:
1182 case WRITE_VERIFY_16:
1183 case COMPARE_AND_WRITE:
1184 case XDWRITEREAD_10:
1185 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1186 break;
1187 case VARIABLE_LENGTH_CMD:
1188 switch (get_unaligned_be16(&cdb[8])) {
1189 case READ_32:
1190 case WRITE_32:
1191 case WRITE_VERIFY_32:
1192 case XDWRITEREAD_32:
1193 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1194 break;
1195 }
1196 }
1197
1198 cmd->execute_cmd = exec_cmd;
1199
1200 return TCM_NO_SENSE;
1201}
1202EXPORT_SYMBOL(passthrough_parse_cdb);