Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*******************************************************************************
3 * Filename: target_core_tpg.c
4 *
5 * This file contains generic Target Portal Group related functions.
6 *
7 * (c) Copyright 2002-2013 Datera, Inc.
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 ******************************************************************************/
12
13#include <linux/net.h>
14#include <linux/string.h>
15#include <linux/timer.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/in.h>
19#include <linux/export.h>
20#include <net/sock.h>
21#include <net/tcp.h>
22#include <scsi/scsi_proto.h>
23
24#include <target/target_core_base.h>
25#include <target/target_core_backend.h>
26#include <target/target_core_fabric.h>
27
28#include "target_core_internal.h"
29#include "target_core_alua.h"
30#include "target_core_pr.h"
31#include "target_core_ua.h"
32
33extern struct se_device *g_lun0_dev;
34static DEFINE_XARRAY_ALLOC(tpg_xa);
35
36/* __core_tpg_get_initiator_node_acl():
37 *
38 * mutex_lock(&tpg->acl_node_mutex); must be held when calling
39 */
40struct se_node_acl *__core_tpg_get_initiator_node_acl(
41 struct se_portal_group *tpg,
42 const char *initiatorname)
43{
44 struct se_node_acl *acl;
45
46 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
47 if (!strcmp(acl->initiatorname, initiatorname))
48 return acl;
49 }
50
51 return NULL;
52}
53
54/* core_tpg_get_initiator_node_acl():
55 *
56 *
57 */
58struct se_node_acl *core_tpg_get_initiator_node_acl(
59 struct se_portal_group *tpg,
60 unsigned char *initiatorname)
61{
62 struct se_node_acl *acl;
63 /*
64 * Obtain se_node_acl->acl_kref using fabric driver provided
65 * initiatorname[] during node acl endpoint lookup driven by
66 * new se_session login.
67 *
68 * The reference is held until se_session shutdown -> release
69 * occurs via fabric driver invoked transport_deregister_session()
70 * or transport_free_session() code.
71 */
72 mutex_lock(&tpg->acl_node_mutex);
73 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
74 if (acl) {
75 if (!kref_get_unless_zero(&acl->acl_kref))
76 acl = NULL;
77 }
78 mutex_unlock(&tpg->acl_node_mutex);
79
80 return acl;
81}
82EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
83
84void core_allocate_nexus_loss_ua(
85 struct se_node_acl *nacl)
86{
87 struct se_dev_entry *deve;
88
89 if (!nacl)
90 return;
91
92 rcu_read_lock();
93 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
94 core_scsi3_ua_allocate(deve, 0x29,
95 ASCQ_29H_NEXUS_LOSS_OCCURRED);
96 rcu_read_unlock();
97}
98EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
99
100/* core_tpg_add_node_to_devs():
101 *
102 *
103 */
104void core_tpg_add_node_to_devs(
105 struct se_node_acl *acl,
106 struct se_portal_group *tpg,
107 struct se_lun *lun_orig)
108{
109 bool lun_access_ro = true;
110 struct se_lun *lun;
111 struct se_device *dev;
112
113 mutex_lock(&tpg->tpg_lun_mutex);
114 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
115 if (lun_orig && lun != lun_orig)
116 continue;
117
118 dev = rcu_dereference_check(lun->lun_se_dev,
119 lockdep_is_held(&tpg->tpg_lun_mutex));
120 /*
121 * By default in LIO-Target $FABRIC_MOD,
122 * demo_mode_write_protect is ON, or READ_ONLY;
123 */
124 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
125 lun_access_ro = false;
126 } else {
127 /*
128 * Allow only optical drives to issue R/W in default RO
129 * demo mode.
130 */
131 if (dev->transport->get_device_type(dev) == TYPE_DISK)
132 lun_access_ro = true;
133 else
134 lun_access_ro = false;
135 }
136
137 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
138 " access for LUN in Demo Mode\n",
139 tpg->se_tpg_tfo->fabric_name,
140 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
141 lun_access_ro ? "READ-ONLY" : "READ-WRITE");
142
143 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
144 lun_access_ro, acl, tpg);
145 /*
146 * Check to see if there are any existing persistent reservation
147 * APTPL pre-registrations that need to be enabled for this dynamic
148 * LUN ACL now..
149 */
150 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
151 lun->unpacked_lun);
152 }
153 mutex_unlock(&tpg->tpg_lun_mutex);
154}
155
156static void
157target_set_nacl_queue_depth(struct se_portal_group *tpg,
158 struct se_node_acl *acl, u32 queue_depth)
159{
160 acl->queue_depth = queue_depth;
161
162 if (!acl->queue_depth) {
163 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
164 "defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
165 acl->initiatorname);
166 acl->queue_depth = 1;
167 }
168}
169
170static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
171 const unsigned char *initiatorname)
172{
173 struct se_node_acl *acl;
174 u32 queue_depth;
175
176 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
177 GFP_KERNEL);
178 if (!acl)
179 return NULL;
180
181 INIT_LIST_HEAD(&acl->acl_list);
182 INIT_LIST_HEAD(&acl->acl_sess_list);
183 INIT_HLIST_HEAD(&acl->lun_entry_hlist);
184 kref_init(&acl->acl_kref);
185 init_completion(&acl->acl_free_comp);
186 spin_lock_init(&acl->nacl_sess_lock);
187 mutex_init(&acl->lun_entry_mutex);
188 atomic_set(&acl->acl_pr_ref_count, 0);
189
190 if (tpg->se_tpg_tfo->tpg_get_default_depth)
191 queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
192 else
193 queue_depth = 1;
194 target_set_nacl_queue_depth(tpg, acl, queue_depth);
195
196 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
197 acl->se_tpg = tpg;
198 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
199
200 tpg->se_tpg_tfo->set_default_node_attributes(acl);
201
202 return acl;
203}
204
205static void target_add_node_acl(struct se_node_acl *acl)
206{
207 struct se_portal_group *tpg = acl->se_tpg;
208
209 mutex_lock(&tpg->acl_node_mutex);
210 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
211 mutex_unlock(&tpg->acl_node_mutex);
212
213 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
214 " Initiator Node: %s\n",
215 tpg->se_tpg_tfo->fabric_name,
216 tpg->se_tpg_tfo->tpg_get_tag(tpg),
217 acl->dynamic_node_acl ? "DYNAMIC" : "",
218 acl->queue_depth,
219 tpg->se_tpg_tfo->fabric_name,
220 acl->initiatorname);
221}
222
223bool target_tpg_has_node_acl(struct se_portal_group *tpg,
224 const char *initiatorname)
225{
226 struct se_node_acl *acl;
227 bool found = false;
228
229 mutex_lock(&tpg->acl_node_mutex);
230 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
231 if (!strcmp(acl->initiatorname, initiatorname)) {
232 found = true;
233 break;
234 }
235 }
236 mutex_unlock(&tpg->acl_node_mutex);
237
238 return found;
239}
240EXPORT_SYMBOL(target_tpg_has_node_acl);
241
242struct se_node_acl *core_tpg_check_initiator_node_acl(
243 struct se_portal_group *tpg,
244 unsigned char *initiatorname)
245{
246 struct se_node_acl *acl;
247
248 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
249 if (acl)
250 return acl;
251
252 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
253 return NULL;
254
255 acl = target_alloc_node_acl(tpg, initiatorname);
256 if (!acl)
257 return NULL;
258 /*
259 * When allocating a dynamically generated node_acl, go ahead
260 * and take the extra kref now before returning to the fabric
261 * driver caller.
262 *
263 * Note this reference will be released at session shutdown
264 * time within transport_free_session() code.
265 */
266 kref_get(&acl->acl_kref);
267 acl->dynamic_node_acl = 1;
268
269 /*
270 * Here we only create demo-mode MappedLUNs from the active
271 * TPG LUNs if the fabric is not explicitly asking for
272 * tpg_check_demo_mode_login_only() == 1.
273 */
274 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
275 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
276 core_tpg_add_node_to_devs(acl, tpg, NULL);
277
278 target_add_node_acl(acl);
279 return acl;
280}
281EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
282
283void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
284{
285 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
286 cpu_relax();
287}
288
289struct se_node_acl *core_tpg_add_initiator_node_acl(
290 struct se_portal_group *tpg,
291 const char *initiatorname)
292{
293 struct se_node_acl *acl;
294
295 mutex_lock(&tpg->acl_node_mutex);
296 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
297 if (acl) {
298 if (acl->dynamic_node_acl) {
299 acl->dynamic_node_acl = 0;
300 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
301 " for %s\n", tpg->se_tpg_tfo->fabric_name,
302 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
303 mutex_unlock(&tpg->acl_node_mutex);
304 return acl;
305 }
306
307 pr_err("ACL entry for %s Initiator"
308 " Node %s already exists for TPG %u, ignoring"
309 " request.\n", tpg->se_tpg_tfo->fabric_name,
310 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
311 mutex_unlock(&tpg->acl_node_mutex);
312 return ERR_PTR(-EEXIST);
313 }
314 mutex_unlock(&tpg->acl_node_mutex);
315
316 acl = target_alloc_node_acl(tpg, initiatorname);
317 if (!acl)
318 return ERR_PTR(-ENOMEM);
319
320 target_add_node_acl(acl);
321 return acl;
322}
323
324static void target_shutdown_sessions(struct se_node_acl *acl)
325{
326 struct se_session *sess;
327 unsigned long flags;
328
329restart:
330 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
331 list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
332 if (sess->cmd_cnt && atomic_read(&sess->cmd_cnt->stopped))
333 continue;
334
335 list_del_init(&sess->sess_acl_list);
336 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
337
338 if (acl->se_tpg->se_tpg_tfo->close_session)
339 acl->se_tpg->se_tpg_tfo->close_session(sess);
340 goto restart;
341 }
342 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
343}
344
345void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
346{
347 struct se_portal_group *tpg = acl->se_tpg;
348
349 mutex_lock(&tpg->acl_node_mutex);
350 if (acl->dynamic_node_acl)
351 acl->dynamic_node_acl = 0;
352 list_del_init(&acl->acl_list);
353 mutex_unlock(&tpg->acl_node_mutex);
354
355 target_shutdown_sessions(acl);
356
357 target_put_nacl(acl);
358 /*
359 * Wait for last target_put_nacl() to complete in target_complete_nacl()
360 * for active fabric session transport_deregister_session() callbacks.
361 */
362 wait_for_completion(&acl->acl_free_comp);
363
364 core_tpg_wait_for_nacl_pr_ref(acl);
365 core_free_device_list_for_node(acl, tpg);
366
367 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
368 " Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
369 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
370 tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
371
372 kfree(acl);
373}
374
375/* core_tpg_set_initiator_node_queue_depth():
376 *
377 *
378 */
379int core_tpg_set_initiator_node_queue_depth(
380 struct se_node_acl *acl,
381 u32 queue_depth)
382{
383 struct se_portal_group *tpg = acl->se_tpg;
384
385 /*
386 * Allow the setting of se_node_acl queue_depth to be idempotent,
387 * and not force a session shutdown event if the value is not
388 * changing.
389 */
390 if (acl->queue_depth == queue_depth)
391 return 0;
392 /*
393 * User has requested to change the queue depth for a Initiator Node.
394 * Change the value in the Node's struct se_node_acl, and call
395 * target_set_nacl_queue_depth() to set the new queue depth.
396 */
397 target_set_nacl_queue_depth(tpg, acl, queue_depth);
398
399 /*
400 * Shutdown all pending sessions to force session reinstatement.
401 */
402 target_shutdown_sessions(acl);
403
404 pr_debug("Successfully changed queue depth to: %d for Initiator"
405 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
406 acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
407 tpg->se_tpg_tfo->tpg_get_tag(tpg));
408
409 return 0;
410}
411EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
412
413/* core_tpg_set_initiator_node_tag():
414 *
415 * Initiator nodeacl tags are not used internally, but may be used by
416 * userspace to emulate aliases or groups.
417 * Returns length of newly-set tag or -EINVAL.
418 */
419int core_tpg_set_initiator_node_tag(
420 struct se_portal_group *tpg,
421 struct se_node_acl *acl,
422 const char *new_tag)
423{
424 if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
425 return -EINVAL;
426
427 if (!strncmp("NULL", new_tag, 4)) {
428 acl->acl_tag[0] = '\0';
429 return 0;
430 }
431
432 return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
433}
434EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
435
436static void core_tpg_lun_ref_release(struct percpu_ref *ref)
437{
438 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
439
440 complete(&lun->lun_shutdown_comp);
441}
442
443static int target_tpg_register_rtpi(struct se_portal_group *se_tpg)
444{
445 u32 val;
446 int ret;
447
448 if (se_tpg->rtpi_manual) {
449 ret = xa_insert(&tpg_xa, se_tpg->tpg_rtpi, se_tpg, GFP_KERNEL);
450 if (ret) {
451 pr_info("%s_TPG[%hu] - Can not set RTPI %#x, it is already busy",
452 se_tpg->se_tpg_tfo->fabric_name,
453 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
454 se_tpg->tpg_rtpi);
455 return -EINVAL;
456 }
457 } else {
458 ret = xa_alloc(&tpg_xa, &val, se_tpg,
459 XA_LIMIT(1, USHRT_MAX), GFP_KERNEL);
460 if (!ret)
461 se_tpg->tpg_rtpi = val;
462 }
463
464 return ret;
465}
466
467static void target_tpg_deregister_rtpi(struct se_portal_group *se_tpg)
468{
469 if (se_tpg->tpg_rtpi && se_tpg->enabled)
470 xa_erase(&tpg_xa, se_tpg->tpg_rtpi);
471}
472
473int target_tpg_enable(struct se_portal_group *se_tpg)
474{
475 int ret;
476
477 ret = target_tpg_register_rtpi(se_tpg);
478 if (ret)
479 return ret;
480
481 ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, true);
482 if (ret) {
483 target_tpg_deregister_rtpi(se_tpg);
484 return ret;
485 }
486
487 se_tpg->enabled = true;
488
489 return 0;
490}
491
492int target_tpg_disable(struct se_portal_group *se_tpg)
493{
494 int ret;
495
496 target_tpg_deregister_rtpi(se_tpg);
497
498 ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, false);
499 if (!ret)
500 se_tpg->enabled = false;
501
502 return ret;
503}
504
505/* Does not change se_wwn->priv. */
506int core_tpg_register(
507 struct se_wwn *se_wwn,
508 struct se_portal_group *se_tpg,
509 int proto_id)
510{
511 int ret;
512
513 if (!se_tpg)
514 return -EINVAL;
515 /*
516 * For the typical case where core_tpg_register() is called by a
517 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
518 * configfs context, use the original tf_ops pointer already saved
519 * by target-core in target_fabric_make_wwn().
520 *
521 * Otherwise, for special cases like iscsi-target discovery TPGs
522 * the caller is responsible for setting ->se_tpg_tfo ahead of
523 * calling core_tpg_register().
524 */
525 if (se_wwn)
526 se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
527
528 if (!se_tpg->se_tpg_tfo) {
529 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
530 return -EINVAL;
531 }
532
533 INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
534 se_tpg->proto_id = proto_id;
535 se_tpg->se_tpg_wwn = se_wwn;
536 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
537 INIT_LIST_HEAD(&se_tpg->acl_node_list);
538 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
539 spin_lock_init(&se_tpg->session_lock);
540 mutex_init(&se_tpg->tpg_lun_mutex);
541 mutex_init(&se_tpg->acl_node_mutex);
542
543 if (se_tpg->proto_id >= 0) {
544 se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
545 if (IS_ERR(se_tpg->tpg_virt_lun0))
546 return PTR_ERR(se_tpg->tpg_virt_lun0);
547
548 ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
549 true, g_lun0_dev);
550 if (ret < 0) {
551 kfree(se_tpg->tpg_virt_lun0);
552 return ret;
553 }
554 }
555
556 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
557 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
558 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
559 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
560 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
561
562 return 0;
563}
564EXPORT_SYMBOL(core_tpg_register);
565
566int core_tpg_deregister(struct se_portal_group *se_tpg)
567{
568 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
569 struct se_node_acl *nacl, *nacl_tmp;
570 LIST_HEAD(node_list);
571
572 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
573 "Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
574 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
575 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
576
577 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
578 cpu_relax();
579
580 mutex_lock(&se_tpg->acl_node_mutex);
581 list_splice_init(&se_tpg->acl_node_list, &node_list);
582 mutex_unlock(&se_tpg->acl_node_mutex);
583 /*
584 * Release any remaining demo-mode generated se_node_acl that have
585 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
586 * in transport_deregister_session().
587 */
588 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
589 list_del_init(&nacl->acl_list);
590
591 core_tpg_wait_for_nacl_pr_ref(nacl);
592 core_free_device_list_for_node(nacl, se_tpg);
593 kfree(nacl);
594 }
595
596 if (se_tpg->proto_id >= 0) {
597 core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
598 kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
599 }
600
601 target_tpg_deregister_rtpi(se_tpg);
602
603 return 0;
604}
605EXPORT_SYMBOL(core_tpg_deregister);
606
607struct se_lun *core_tpg_alloc_lun(
608 struct se_portal_group *tpg,
609 u64 unpacked_lun)
610{
611 struct se_lun *lun;
612
613 lun = kzalloc(sizeof(*lun), GFP_KERNEL);
614 if (!lun) {
615 pr_err("Unable to allocate se_lun memory\n");
616 return ERR_PTR(-ENOMEM);
617 }
618 lun->unpacked_lun = unpacked_lun;
619 atomic_set(&lun->lun_acl_count, 0);
620 init_completion(&lun->lun_shutdown_comp);
621 INIT_LIST_HEAD(&lun->lun_deve_list);
622 INIT_LIST_HEAD(&lun->lun_dev_link);
623 atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
624 spin_lock_init(&lun->lun_deve_lock);
625 mutex_init(&lun->lun_tg_pt_md_mutex);
626 INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
627 spin_lock_init(&lun->lun_tg_pt_gp_lock);
628 lun->lun_tpg = tpg;
629
630 return lun;
631}
632
633int core_tpg_add_lun(
634 struct se_portal_group *tpg,
635 struct se_lun *lun,
636 bool lun_access_ro,
637 struct se_device *dev)
638{
639 int ret;
640
641 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
642 GFP_KERNEL);
643 if (ret < 0)
644 goto out;
645
646 if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
647 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
648 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
649
650 mutex_lock(&tpg->tpg_lun_mutex);
651
652 spin_lock(&dev->se_port_lock);
653 lun->lun_index = dev->dev_index;
654 rcu_assign_pointer(lun->lun_se_dev, dev);
655 dev->export_count++;
656 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
657 spin_unlock(&dev->se_port_lock);
658
659 if (dev->dev_flags & DF_READ_ONLY)
660 lun->lun_access_ro = true;
661 else
662 lun->lun_access_ro = lun_access_ro;
663 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
664 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
665 mutex_unlock(&tpg->tpg_lun_mutex);
666
667 return 0;
668
669out:
670 return ret;
671}
672
673void core_tpg_remove_lun(
674 struct se_portal_group *tpg,
675 struct se_lun *lun)
676{
677 /*
678 * rcu_dereference_raw protected by se_lun->lun_group symlink
679 * reference to se_device->dev_group.
680 */
681 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
682
683 lun->lun_shutdown = true;
684
685 core_clear_lun_from_tpg(lun, tpg);
686 /*
687 * Wait for any active I/O references to percpu se_lun->lun_ref to
688 * be released. Also, se_lun->lun_ref is now used by PR and ALUA
689 * logic when referencing a remote target port during ALL_TGT_PT=1
690 * and generating UNIT_ATTENTIONs for ALUA access state transition.
691 */
692 transport_clear_lun_ref(lun);
693
694 mutex_lock(&tpg->tpg_lun_mutex);
695 if (lun->lun_se_dev) {
696 target_detach_tg_pt_gp(lun);
697
698 spin_lock(&dev->se_port_lock);
699 list_del(&lun->lun_dev_link);
700 dev->export_count--;
701 rcu_assign_pointer(lun->lun_se_dev, NULL);
702 spin_unlock(&dev->se_port_lock);
703 }
704 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
705 hlist_del_rcu(&lun->link);
706
707 lun->lun_shutdown = false;
708 mutex_unlock(&tpg->tpg_lun_mutex);
709
710 percpu_ref_exit(&lun->lun_ref);
711}
1/*******************************************************************************
2 * Filename: target_core_tpg.c
3 *
4 * This file contains generic Target Portal Group related functions.
5 *
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/net.h>
30#include <linux/string.h>
31#include <linux/timer.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/in.h>
35#include <net/sock.h>
36#include <net/tcp.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_cmnd.h>
39
40#include <target/target_core_base.h>
41#include <target/target_core_device.h>
42#include <target/target_core_tpg.h>
43#include <target/target_core_transport.h>
44#include <target/target_core_fabric_ops.h>
45
46#include "target_core_hba.h"
47#include "target_core_stat.h"
48
49extern struct se_device *g_lun0_dev;
50
51static DEFINE_SPINLOCK(tpg_lock);
52static LIST_HEAD(tpg_list);
53
54/* core_clear_initiator_node_from_tpg():
55 *
56 *
57 */
58static void core_clear_initiator_node_from_tpg(
59 struct se_node_acl *nacl,
60 struct se_portal_group *tpg)
61{
62 int i;
63 struct se_dev_entry *deve;
64 struct se_lun *lun;
65 struct se_lun_acl *acl, *acl_tmp;
66
67 spin_lock_irq(&nacl->device_list_lock);
68 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
69 deve = &nacl->device_list[i];
70
71 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
72 continue;
73
74 if (!deve->se_lun) {
75 pr_err("%s device entries device pointer is"
76 " NULL, but Initiator has access.\n",
77 tpg->se_tpg_tfo->get_fabric_name());
78 continue;
79 }
80
81 lun = deve->se_lun;
82 spin_unlock_irq(&nacl->device_list_lock);
83 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
84 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
85
86 spin_lock(&lun->lun_acl_lock);
87 list_for_each_entry_safe(acl, acl_tmp,
88 &lun->lun_acl_list, lacl_list) {
89 if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
90 (acl->mapped_lun == deve->mapped_lun))
91 break;
92 }
93
94 if (!acl) {
95 pr_err("Unable to locate struct se_lun_acl for %s,"
96 " mapped_lun: %u\n", nacl->initiatorname,
97 deve->mapped_lun);
98 spin_unlock(&lun->lun_acl_lock);
99 spin_lock_irq(&nacl->device_list_lock);
100 continue;
101 }
102
103 list_del(&acl->lacl_list);
104 spin_unlock(&lun->lun_acl_lock);
105
106 spin_lock_irq(&nacl->device_list_lock);
107 kfree(acl);
108 }
109 spin_unlock_irq(&nacl->device_list_lock);
110}
111
112/* __core_tpg_get_initiator_node_acl():
113 *
114 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
115 */
116struct se_node_acl *__core_tpg_get_initiator_node_acl(
117 struct se_portal_group *tpg,
118 const char *initiatorname)
119{
120 struct se_node_acl *acl;
121
122 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
123 if (!strcmp(acl->initiatorname, initiatorname))
124 return acl;
125 }
126
127 return NULL;
128}
129
130/* core_tpg_get_initiator_node_acl():
131 *
132 *
133 */
134struct se_node_acl *core_tpg_get_initiator_node_acl(
135 struct se_portal_group *tpg,
136 unsigned char *initiatorname)
137{
138 struct se_node_acl *acl;
139
140 spin_lock_irq(&tpg->acl_node_lock);
141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
142 if (!strcmp(acl->initiatorname, initiatorname) &&
143 !acl->dynamic_node_acl) {
144 spin_unlock_irq(&tpg->acl_node_lock);
145 return acl;
146 }
147 }
148 spin_unlock_irq(&tpg->acl_node_lock);
149
150 return NULL;
151}
152
153/* core_tpg_add_node_to_devs():
154 *
155 *
156 */
157void core_tpg_add_node_to_devs(
158 struct se_node_acl *acl,
159 struct se_portal_group *tpg)
160{
161 int i = 0;
162 u32 lun_access = 0;
163 struct se_lun *lun;
164 struct se_device *dev;
165
166 spin_lock(&tpg->tpg_lun_lock);
167 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
168 lun = &tpg->tpg_lun_list[i];
169 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
170 continue;
171
172 spin_unlock(&tpg->tpg_lun_lock);
173
174 dev = lun->lun_se_dev;
175 /*
176 * By default in LIO-Target $FABRIC_MOD,
177 * demo_mode_write_protect is ON, or READ_ONLY;
178 */
179 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
180 if (dev->dev_flags & DF_READ_ONLY)
181 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
182 else
183 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
184 } else {
185 /*
186 * Allow only optical drives to issue R/W in default RO
187 * demo mode.
188 */
189 if (dev->transport->get_device_type(dev) == TYPE_DISK)
190 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
191 else
192 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
193 }
194
195 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
196 " access for LUN in Demo Mode\n",
197 tpg->se_tpg_tfo->get_fabric_name(),
198 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
199 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
200 "READ-WRITE" : "READ-ONLY");
201
202 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
203 lun_access, acl, tpg, 1);
204 spin_lock(&tpg->tpg_lun_lock);
205 }
206 spin_unlock(&tpg->tpg_lun_lock);
207}
208
209/* core_set_queue_depth_for_node():
210 *
211 *
212 */
213static int core_set_queue_depth_for_node(
214 struct se_portal_group *tpg,
215 struct se_node_acl *acl)
216{
217 if (!acl->queue_depth) {
218 pr_err("Queue depth for %s Initiator Node: %s is 0,"
219 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
220 acl->initiatorname);
221 acl->queue_depth = 1;
222 }
223
224 return 0;
225}
226
227/* core_create_device_list_for_node():
228 *
229 *
230 */
231static int core_create_device_list_for_node(struct se_node_acl *nacl)
232{
233 struct se_dev_entry *deve;
234 int i;
235
236 nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
237 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
238 if (!nacl->device_list) {
239 pr_err("Unable to allocate memory for"
240 " struct se_node_acl->device_list\n");
241 return -ENOMEM;
242 }
243 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
244 deve = &nacl->device_list[i];
245
246 atomic_set(&deve->ua_count, 0);
247 atomic_set(&deve->pr_ref_count, 0);
248 spin_lock_init(&deve->ua_lock);
249 INIT_LIST_HEAD(&deve->alua_port_list);
250 INIT_LIST_HEAD(&deve->ua_list);
251 }
252
253 return 0;
254}
255
256/* core_tpg_check_initiator_node_acl()
257 *
258 *
259 */
260struct se_node_acl *core_tpg_check_initiator_node_acl(
261 struct se_portal_group *tpg,
262 unsigned char *initiatorname)
263{
264 struct se_node_acl *acl;
265
266 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
267 if (acl)
268 return acl;
269
270 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
271 return NULL;
272
273 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
274 if (!acl)
275 return NULL;
276
277 INIT_LIST_HEAD(&acl->acl_list);
278 INIT_LIST_HEAD(&acl->acl_sess_list);
279 spin_lock_init(&acl->device_list_lock);
280 spin_lock_init(&acl->nacl_sess_lock);
281 atomic_set(&acl->acl_pr_ref_count, 0);
282 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
283 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
284 acl->se_tpg = tpg;
285 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
286 spin_lock_init(&acl->stats_lock);
287 acl->dynamic_node_acl = 1;
288
289 tpg->se_tpg_tfo->set_default_node_attributes(acl);
290
291 if (core_create_device_list_for_node(acl) < 0) {
292 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
293 return NULL;
294 }
295
296 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
297 core_free_device_list_for_node(acl, tpg);
298 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
299 return NULL;
300 }
301 /*
302 * Here we only create demo-mode MappedLUNs from the active
303 * TPG LUNs if the fabric is not explictly asking for
304 * tpg_check_demo_mode_login_only() == 1.
305 */
306 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
307 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
308 do { ; } while (0);
309 else
310 core_tpg_add_node_to_devs(acl, tpg);
311
312 spin_lock_irq(&tpg->acl_node_lock);
313 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
314 tpg->num_node_acls++;
315 spin_unlock_irq(&tpg->acl_node_lock);
316
317 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
318 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
319 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
320 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
321
322 return acl;
323}
324EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
325
326void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
327{
328 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
329 cpu_relax();
330}
331
332void core_tpg_clear_object_luns(struct se_portal_group *tpg)
333{
334 int i, ret;
335 struct se_lun *lun;
336
337 spin_lock(&tpg->tpg_lun_lock);
338 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
339 lun = &tpg->tpg_lun_list[i];
340
341 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
342 (lun->lun_se_dev == NULL))
343 continue;
344
345 spin_unlock(&tpg->tpg_lun_lock);
346 ret = core_dev_del_lun(tpg, lun->unpacked_lun);
347 spin_lock(&tpg->tpg_lun_lock);
348 }
349 spin_unlock(&tpg->tpg_lun_lock);
350}
351EXPORT_SYMBOL(core_tpg_clear_object_luns);
352
353/* core_tpg_add_initiator_node_acl():
354 *
355 *
356 */
357struct se_node_acl *core_tpg_add_initiator_node_acl(
358 struct se_portal_group *tpg,
359 struct se_node_acl *se_nacl,
360 const char *initiatorname,
361 u32 queue_depth)
362{
363 struct se_node_acl *acl = NULL;
364
365 spin_lock_irq(&tpg->acl_node_lock);
366 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
367 if (acl) {
368 if (acl->dynamic_node_acl) {
369 acl->dynamic_node_acl = 0;
370 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
371 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
372 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
373 spin_unlock_irq(&tpg->acl_node_lock);
374 /*
375 * Release the locally allocated struct se_node_acl
376 * because * core_tpg_add_initiator_node_acl() returned
377 * a pointer to an existing demo mode node ACL.
378 */
379 if (se_nacl)
380 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
381 se_nacl);
382 goto done;
383 }
384
385 pr_err("ACL entry for %s Initiator"
386 " Node %s already exists for TPG %u, ignoring"
387 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
388 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
389 spin_unlock_irq(&tpg->acl_node_lock);
390 return ERR_PTR(-EEXIST);
391 }
392 spin_unlock_irq(&tpg->acl_node_lock);
393
394 if (!se_nacl) {
395 pr_err("struct se_node_acl pointer is NULL\n");
396 return ERR_PTR(-EINVAL);
397 }
398 /*
399 * For v4.x logic the se_node_acl_s is hanging off a fabric
400 * dependent structure allocated via
401 * struct target_core_fabric_ops->fabric_make_nodeacl()
402 */
403 acl = se_nacl;
404
405 INIT_LIST_HEAD(&acl->acl_list);
406 INIT_LIST_HEAD(&acl->acl_sess_list);
407 spin_lock_init(&acl->device_list_lock);
408 spin_lock_init(&acl->nacl_sess_lock);
409 atomic_set(&acl->acl_pr_ref_count, 0);
410 acl->queue_depth = queue_depth;
411 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
412 acl->se_tpg = tpg;
413 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
414 spin_lock_init(&acl->stats_lock);
415
416 tpg->se_tpg_tfo->set_default_node_attributes(acl);
417
418 if (core_create_device_list_for_node(acl) < 0) {
419 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
420 return ERR_PTR(-ENOMEM);
421 }
422
423 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
424 core_free_device_list_for_node(acl, tpg);
425 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
426 return ERR_PTR(-EINVAL);
427 }
428
429 spin_lock_irq(&tpg->acl_node_lock);
430 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
431 tpg->num_node_acls++;
432 spin_unlock_irq(&tpg->acl_node_lock);
433
434done:
435 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
436 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
437 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
438 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
439
440 return acl;
441}
442EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
443
444/* core_tpg_del_initiator_node_acl():
445 *
446 *
447 */
448int core_tpg_del_initiator_node_acl(
449 struct se_portal_group *tpg,
450 struct se_node_acl *acl,
451 int force)
452{
453 struct se_session *sess, *sess_tmp;
454 int dynamic_acl = 0;
455
456 spin_lock_irq(&tpg->acl_node_lock);
457 if (acl->dynamic_node_acl) {
458 acl->dynamic_node_acl = 0;
459 dynamic_acl = 1;
460 }
461 list_del(&acl->acl_list);
462 tpg->num_node_acls--;
463 spin_unlock_irq(&tpg->acl_node_lock);
464
465 spin_lock_bh(&tpg->session_lock);
466 list_for_each_entry_safe(sess, sess_tmp,
467 &tpg->tpg_sess_list, sess_list) {
468 if (sess->se_node_acl != acl)
469 continue;
470 /*
471 * Determine if the session needs to be closed by our context.
472 */
473 if (!tpg->se_tpg_tfo->shutdown_session(sess))
474 continue;
475
476 spin_unlock_bh(&tpg->session_lock);
477 /*
478 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
479 * forcefully shutdown the $FABRIC_MOD session/nexus.
480 */
481 tpg->se_tpg_tfo->close_session(sess);
482
483 spin_lock_bh(&tpg->session_lock);
484 }
485 spin_unlock_bh(&tpg->session_lock);
486
487 core_tpg_wait_for_nacl_pr_ref(acl);
488 core_clear_initiator_node_from_tpg(acl, tpg);
489 core_free_device_list_for_node(acl, tpg);
490
491 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
492 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
493 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
494 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
495
496 return 0;
497}
498EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
499
500/* core_tpg_set_initiator_node_queue_depth():
501 *
502 *
503 */
504int core_tpg_set_initiator_node_queue_depth(
505 struct se_portal_group *tpg,
506 unsigned char *initiatorname,
507 u32 queue_depth,
508 int force)
509{
510 struct se_session *sess, *init_sess = NULL;
511 struct se_node_acl *acl;
512 int dynamic_acl = 0;
513
514 spin_lock_irq(&tpg->acl_node_lock);
515 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
516 if (!acl) {
517 pr_err("Access Control List entry for %s Initiator"
518 " Node %s does not exists for TPG %hu, ignoring"
519 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
520 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
521 spin_unlock_irq(&tpg->acl_node_lock);
522 return -ENODEV;
523 }
524 if (acl->dynamic_node_acl) {
525 acl->dynamic_node_acl = 0;
526 dynamic_acl = 1;
527 }
528 spin_unlock_irq(&tpg->acl_node_lock);
529
530 spin_lock_bh(&tpg->session_lock);
531 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
532 if (sess->se_node_acl != acl)
533 continue;
534
535 if (!force) {
536 pr_err("Unable to change queue depth for %s"
537 " Initiator Node: %s while session is"
538 " operational. To forcefully change the queue"
539 " depth and force session reinstatement"
540 " use the \"force=1\" parameter.\n",
541 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
542 spin_unlock_bh(&tpg->session_lock);
543
544 spin_lock_irq(&tpg->acl_node_lock);
545 if (dynamic_acl)
546 acl->dynamic_node_acl = 1;
547 spin_unlock_irq(&tpg->acl_node_lock);
548 return -EEXIST;
549 }
550 /*
551 * Determine if the session needs to be closed by our context.
552 */
553 if (!tpg->se_tpg_tfo->shutdown_session(sess))
554 continue;
555
556 init_sess = sess;
557 break;
558 }
559
560 /*
561 * User has requested to change the queue depth for a Initiator Node.
562 * Change the value in the Node's struct se_node_acl, and call
563 * core_set_queue_depth_for_node() to add the requested queue depth.
564 *
565 * Finally call tpg->se_tpg_tfo->close_session() to force session
566 * reinstatement to occur if there is an active session for the
567 * $FABRIC_MOD Initiator Node in question.
568 */
569 acl->queue_depth = queue_depth;
570
571 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
572 spin_unlock_bh(&tpg->session_lock);
573 /*
574 * Force session reinstatement if
575 * core_set_queue_depth_for_node() failed, because we assume
576 * the $FABRIC_MOD has already the set session reinstatement
577 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
578 */
579 if (init_sess)
580 tpg->se_tpg_tfo->close_session(init_sess);
581
582 spin_lock_irq(&tpg->acl_node_lock);
583 if (dynamic_acl)
584 acl->dynamic_node_acl = 1;
585 spin_unlock_irq(&tpg->acl_node_lock);
586 return -EINVAL;
587 }
588 spin_unlock_bh(&tpg->session_lock);
589 /*
590 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
591 * forcefully shutdown the $FABRIC_MOD session/nexus.
592 */
593 if (init_sess)
594 tpg->se_tpg_tfo->close_session(init_sess);
595
596 pr_debug("Successfuly changed queue depth to: %d for Initiator"
597 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
598 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
599 tpg->se_tpg_tfo->tpg_get_tag(tpg));
600
601 spin_lock_irq(&tpg->acl_node_lock);
602 if (dynamic_acl)
603 acl->dynamic_node_acl = 1;
604 spin_unlock_irq(&tpg->acl_node_lock);
605
606 return 0;
607}
608EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
609
610static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
611{
612 /* Set in core_dev_setup_virtual_lun0() */
613 struct se_device *dev = g_lun0_dev;
614 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
615 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
616 int ret;
617
618 lun->unpacked_lun = 0;
619 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
620 atomic_set(&lun->lun_acl_count, 0);
621 init_completion(&lun->lun_shutdown_comp);
622 INIT_LIST_HEAD(&lun->lun_acl_list);
623 INIT_LIST_HEAD(&lun->lun_cmd_list);
624 spin_lock_init(&lun->lun_acl_lock);
625 spin_lock_init(&lun->lun_cmd_lock);
626 spin_lock_init(&lun->lun_sep_lock);
627
628 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
629 if (ret < 0)
630 return ret;
631
632 return 0;
633}
634
635static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
636{
637 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
638
639 core_tpg_post_dellun(se_tpg, lun);
640}
641
642int core_tpg_register(
643 struct target_core_fabric_ops *tfo,
644 struct se_wwn *se_wwn,
645 struct se_portal_group *se_tpg,
646 void *tpg_fabric_ptr,
647 int se_tpg_type)
648{
649 struct se_lun *lun;
650 u32 i;
651
652 se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
653 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
654 if (!se_tpg->tpg_lun_list) {
655 pr_err("Unable to allocate struct se_portal_group->"
656 "tpg_lun_list\n");
657 return -ENOMEM;
658 }
659
660 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
661 lun = &se_tpg->tpg_lun_list[i];
662 lun->unpacked_lun = i;
663 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
664 atomic_set(&lun->lun_acl_count, 0);
665 init_completion(&lun->lun_shutdown_comp);
666 INIT_LIST_HEAD(&lun->lun_acl_list);
667 INIT_LIST_HEAD(&lun->lun_cmd_list);
668 spin_lock_init(&lun->lun_acl_lock);
669 spin_lock_init(&lun->lun_cmd_lock);
670 spin_lock_init(&lun->lun_sep_lock);
671 }
672
673 se_tpg->se_tpg_type = se_tpg_type;
674 se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
675 se_tpg->se_tpg_tfo = tfo;
676 se_tpg->se_tpg_wwn = se_wwn;
677 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
678 INIT_LIST_HEAD(&se_tpg->acl_node_list);
679 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
680 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
681 spin_lock_init(&se_tpg->acl_node_lock);
682 spin_lock_init(&se_tpg->session_lock);
683 spin_lock_init(&se_tpg->tpg_lun_lock);
684
685 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
686 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
687 kfree(se_tpg);
688 return -ENOMEM;
689 }
690 }
691
692 spin_lock_bh(&tpg_lock);
693 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
694 spin_unlock_bh(&tpg_lock);
695
696 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
697 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
698 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
699 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
700 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
701
702 return 0;
703}
704EXPORT_SYMBOL(core_tpg_register);
705
706int core_tpg_deregister(struct se_portal_group *se_tpg)
707{
708 struct se_node_acl *nacl, *nacl_tmp;
709
710 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
711 " for endpoint: %s Portal Tag %u\n",
712 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
713 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
714 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
715 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
716
717 spin_lock_bh(&tpg_lock);
718 list_del(&se_tpg->se_tpg_node);
719 spin_unlock_bh(&tpg_lock);
720
721 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
722 cpu_relax();
723 /*
724 * Release any remaining demo-mode generated se_node_acl that have
725 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
726 * in transport_deregister_session().
727 */
728 spin_lock_irq(&se_tpg->acl_node_lock);
729 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
730 acl_list) {
731 list_del(&nacl->acl_list);
732 se_tpg->num_node_acls--;
733 spin_unlock_irq(&se_tpg->acl_node_lock);
734
735 core_tpg_wait_for_nacl_pr_ref(nacl);
736 core_free_device_list_for_node(nacl, se_tpg);
737 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
738
739 spin_lock_irq(&se_tpg->acl_node_lock);
740 }
741 spin_unlock_irq(&se_tpg->acl_node_lock);
742
743 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
744 core_tpg_release_virtual_lun0(se_tpg);
745
746 se_tpg->se_tpg_fabric_ptr = NULL;
747 kfree(se_tpg->tpg_lun_list);
748 return 0;
749}
750EXPORT_SYMBOL(core_tpg_deregister);
751
752struct se_lun *core_tpg_pre_addlun(
753 struct se_portal_group *tpg,
754 u32 unpacked_lun)
755{
756 struct se_lun *lun;
757
758 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
759 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
760 "-1: %u for Target Portal Group: %u\n",
761 tpg->se_tpg_tfo->get_fabric_name(),
762 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
763 tpg->se_tpg_tfo->tpg_get_tag(tpg));
764 return ERR_PTR(-EOVERFLOW);
765 }
766
767 spin_lock(&tpg->tpg_lun_lock);
768 lun = &tpg->tpg_lun_list[unpacked_lun];
769 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
770 pr_err("TPG Logical Unit Number: %u is already active"
771 " on %s Target Portal Group: %u, ignoring request.\n",
772 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
773 tpg->se_tpg_tfo->tpg_get_tag(tpg));
774 spin_unlock(&tpg->tpg_lun_lock);
775 return ERR_PTR(-EINVAL);
776 }
777 spin_unlock(&tpg->tpg_lun_lock);
778
779 return lun;
780}
781
782int core_tpg_post_addlun(
783 struct se_portal_group *tpg,
784 struct se_lun *lun,
785 u32 lun_access,
786 void *lun_ptr)
787{
788 int ret;
789
790 ret = core_dev_export(lun_ptr, tpg, lun);
791 if (ret < 0)
792 return ret;
793
794 spin_lock(&tpg->tpg_lun_lock);
795 lun->lun_access = lun_access;
796 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
797 spin_unlock(&tpg->tpg_lun_lock);
798
799 return 0;
800}
801
802static void core_tpg_shutdown_lun(
803 struct se_portal_group *tpg,
804 struct se_lun *lun)
805{
806 core_clear_lun_from_tpg(lun, tpg);
807 transport_clear_lun_from_sessions(lun);
808}
809
810struct se_lun *core_tpg_pre_dellun(
811 struct se_portal_group *tpg,
812 u32 unpacked_lun,
813 int *ret)
814{
815 struct se_lun *lun;
816
817 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
818 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
819 "-1: %u for Target Portal Group: %u\n",
820 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
821 TRANSPORT_MAX_LUNS_PER_TPG-1,
822 tpg->se_tpg_tfo->tpg_get_tag(tpg));
823 return ERR_PTR(-EOVERFLOW);
824 }
825
826 spin_lock(&tpg->tpg_lun_lock);
827 lun = &tpg->tpg_lun_list[unpacked_lun];
828 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
829 pr_err("%s Logical Unit Number: %u is not active on"
830 " Target Portal Group: %u, ignoring request.\n",
831 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
832 tpg->se_tpg_tfo->tpg_get_tag(tpg));
833 spin_unlock(&tpg->tpg_lun_lock);
834 return ERR_PTR(-ENODEV);
835 }
836 spin_unlock(&tpg->tpg_lun_lock);
837
838 return lun;
839}
840
841int core_tpg_post_dellun(
842 struct se_portal_group *tpg,
843 struct se_lun *lun)
844{
845 core_tpg_shutdown_lun(tpg, lun);
846
847 core_dev_unexport(lun->lun_se_dev, tpg, lun);
848
849 spin_lock(&tpg->tpg_lun_lock);
850 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
851 spin_unlock(&tpg->tpg_lun_lock);
852
853 return 0;
854}