Loading...
1/*******************************************************************************
2 * Filename: target_core_tpg.c
3 *
4 * This file contains generic Target Portal Group related functions.
5 *
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/net.h>
30#include <linux/string.h>
31#include <linux/timer.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/in.h>
35#include <net/sock.h>
36#include <net/tcp.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_cmnd.h>
39
40#include <target/target_core_base.h>
41#include <target/target_core_device.h>
42#include <target/target_core_tpg.h>
43#include <target/target_core_transport.h>
44#include <target/target_core_fabric_ops.h>
45
46#include "target_core_hba.h"
47#include "target_core_stat.h"
48
49extern struct se_device *g_lun0_dev;
50
51static DEFINE_SPINLOCK(tpg_lock);
52static LIST_HEAD(tpg_list);
53
54/* core_clear_initiator_node_from_tpg():
55 *
56 *
57 */
58static void core_clear_initiator_node_from_tpg(
59 struct se_node_acl *nacl,
60 struct se_portal_group *tpg)
61{
62 int i;
63 struct se_dev_entry *deve;
64 struct se_lun *lun;
65 struct se_lun_acl *acl, *acl_tmp;
66
67 spin_lock_irq(&nacl->device_list_lock);
68 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
69 deve = &nacl->device_list[i];
70
71 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
72 continue;
73
74 if (!deve->se_lun) {
75 pr_err("%s device entries device pointer is"
76 " NULL, but Initiator has access.\n",
77 tpg->se_tpg_tfo->get_fabric_name());
78 continue;
79 }
80
81 lun = deve->se_lun;
82 spin_unlock_irq(&nacl->device_list_lock);
83 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
84 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
85
86 spin_lock(&lun->lun_acl_lock);
87 list_for_each_entry_safe(acl, acl_tmp,
88 &lun->lun_acl_list, lacl_list) {
89 if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
90 (acl->mapped_lun == deve->mapped_lun))
91 break;
92 }
93
94 if (!acl) {
95 pr_err("Unable to locate struct se_lun_acl for %s,"
96 " mapped_lun: %u\n", nacl->initiatorname,
97 deve->mapped_lun);
98 spin_unlock(&lun->lun_acl_lock);
99 spin_lock_irq(&nacl->device_list_lock);
100 continue;
101 }
102
103 list_del(&acl->lacl_list);
104 spin_unlock(&lun->lun_acl_lock);
105
106 spin_lock_irq(&nacl->device_list_lock);
107 kfree(acl);
108 }
109 spin_unlock_irq(&nacl->device_list_lock);
110}
111
112/* __core_tpg_get_initiator_node_acl():
113 *
114 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
115 */
116struct se_node_acl *__core_tpg_get_initiator_node_acl(
117 struct se_portal_group *tpg,
118 const char *initiatorname)
119{
120 struct se_node_acl *acl;
121
122 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
123 if (!strcmp(acl->initiatorname, initiatorname))
124 return acl;
125 }
126
127 return NULL;
128}
129
130/* core_tpg_get_initiator_node_acl():
131 *
132 *
133 */
134struct se_node_acl *core_tpg_get_initiator_node_acl(
135 struct se_portal_group *tpg,
136 unsigned char *initiatorname)
137{
138 struct se_node_acl *acl;
139
140 spin_lock_irq(&tpg->acl_node_lock);
141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
142 if (!strcmp(acl->initiatorname, initiatorname) &&
143 !acl->dynamic_node_acl) {
144 spin_unlock_irq(&tpg->acl_node_lock);
145 return acl;
146 }
147 }
148 spin_unlock_irq(&tpg->acl_node_lock);
149
150 return NULL;
151}
152
153/* core_tpg_add_node_to_devs():
154 *
155 *
156 */
157void core_tpg_add_node_to_devs(
158 struct se_node_acl *acl,
159 struct se_portal_group *tpg)
160{
161 int i = 0;
162 u32 lun_access = 0;
163 struct se_lun *lun;
164 struct se_device *dev;
165
166 spin_lock(&tpg->tpg_lun_lock);
167 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
168 lun = &tpg->tpg_lun_list[i];
169 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
170 continue;
171
172 spin_unlock(&tpg->tpg_lun_lock);
173
174 dev = lun->lun_se_dev;
175 /*
176 * By default in LIO-Target $FABRIC_MOD,
177 * demo_mode_write_protect is ON, or READ_ONLY;
178 */
179 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
180 if (dev->dev_flags & DF_READ_ONLY)
181 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
182 else
183 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
184 } else {
185 /*
186 * Allow only optical drives to issue R/W in default RO
187 * demo mode.
188 */
189 if (dev->transport->get_device_type(dev) == TYPE_DISK)
190 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
191 else
192 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
193 }
194
195 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
196 " access for LUN in Demo Mode\n",
197 tpg->se_tpg_tfo->get_fabric_name(),
198 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
199 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
200 "READ-WRITE" : "READ-ONLY");
201
202 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
203 lun_access, acl, tpg, 1);
204 spin_lock(&tpg->tpg_lun_lock);
205 }
206 spin_unlock(&tpg->tpg_lun_lock);
207}
208
209/* core_set_queue_depth_for_node():
210 *
211 *
212 */
213static int core_set_queue_depth_for_node(
214 struct se_portal_group *tpg,
215 struct se_node_acl *acl)
216{
217 if (!acl->queue_depth) {
218 pr_err("Queue depth for %s Initiator Node: %s is 0,"
219 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
220 acl->initiatorname);
221 acl->queue_depth = 1;
222 }
223
224 return 0;
225}
226
227/* core_create_device_list_for_node():
228 *
229 *
230 */
231static int core_create_device_list_for_node(struct se_node_acl *nacl)
232{
233 struct se_dev_entry *deve;
234 int i;
235
236 nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
237 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
238 if (!nacl->device_list) {
239 pr_err("Unable to allocate memory for"
240 " struct se_node_acl->device_list\n");
241 return -ENOMEM;
242 }
243 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
244 deve = &nacl->device_list[i];
245
246 atomic_set(&deve->ua_count, 0);
247 atomic_set(&deve->pr_ref_count, 0);
248 spin_lock_init(&deve->ua_lock);
249 INIT_LIST_HEAD(&deve->alua_port_list);
250 INIT_LIST_HEAD(&deve->ua_list);
251 }
252
253 return 0;
254}
255
256/* core_tpg_check_initiator_node_acl()
257 *
258 *
259 */
260struct se_node_acl *core_tpg_check_initiator_node_acl(
261 struct se_portal_group *tpg,
262 unsigned char *initiatorname)
263{
264 struct se_node_acl *acl;
265
266 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
267 if (acl)
268 return acl;
269
270 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
271 return NULL;
272
273 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
274 if (!acl)
275 return NULL;
276
277 INIT_LIST_HEAD(&acl->acl_list);
278 INIT_LIST_HEAD(&acl->acl_sess_list);
279 spin_lock_init(&acl->device_list_lock);
280 spin_lock_init(&acl->nacl_sess_lock);
281 atomic_set(&acl->acl_pr_ref_count, 0);
282 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
283 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
284 acl->se_tpg = tpg;
285 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
286 spin_lock_init(&acl->stats_lock);
287 acl->dynamic_node_acl = 1;
288
289 tpg->se_tpg_tfo->set_default_node_attributes(acl);
290
291 if (core_create_device_list_for_node(acl) < 0) {
292 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
293 return NULL;
294 }
295
296 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
297 core_free_device_list_for_node(acl, tpg);
298 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
299 return NULL;
300 }
301 /*
302 * Here we only create demo-mode MappedLUNs from the active
303 * TPG LUNs if the fabric is not explictly asking for
304 * tpg_check_demo_mode_login_only() == 1.
305 */
306 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
307 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
308 do { ; } while (0);
309 else
310 core_tpg_add_node_to_devs(acl, tpg);
311
312 spin_lock_irq(&tpg->acl_node_lock);
313 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
314 tpg->num_node_acls++;
315 spin_unlock_irq(&tpg->acl_node_lock);
316
317 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
318 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
319 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
320 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
321
322 return acl;
323}
324EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
325
326void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
327{
328 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
329 cpu_relax();
330}
331
332void core_tpg_clear_object_luns(struct se_portal_group *tpg)
333{
334 int i, ret;
335 struct se_lun *lun;
336
337 spin_lock(&tpg->tpg_lun_lock);
338 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
339 lun = &tpg->tpg_lun_list[i];
340
341 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
342 (lun->lun_se_dev == NULL))
343 continue;
344
345 spin_unlock(&tpg->tpg_lun_lock);
346 ret = core_dev_del_lun(tpg, lun->unpacked_lun);
347 spin_lock(&tpg->tpg_lun_lock);
348 }
349 spin_unlock(&tpg->tpg_lun_lock);
350}
351EXPORT_SYMBOL(core_tpg_clear_object_luns);
352
353/* core_tpg_add_initiator_node_acl():
354 *
355 *
356 */
357struct se_node_acl *core_tpg_add_initiator_node_acl(
358 struct se_portal_group *tpg,
359 struct se_node_acl *se_nacl,
360 const char *initiatorname,
361 u32 queue_depth)
362{
363 struct se_node_acl *acl = NULL;
364
365 spin_lock_irq(&tpg->acl_node_lock);
366 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
367 if (acl) {
368 if (acl->dynamic_node_acl) {
369 acl->dynamic_node_acl = 0;
370 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
371 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
372 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
373 spin_unlock_irq(&tpg->acl_node_lock);
374 /*
375 * Release the locally allocated struct se_node_acl
376 * because * core_tpg_add_initiator_node_acl() returned
377 * a pointer to an existing demo mode node ACL.
378 */
379 if (se_nacl)
380 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
381 se_nacl);
382 goto done;
383 }
384
385 pr_err("ACL entry for %s Initiator"
386 " Node %s already exists for TPG %u, ignoring"
387 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
388 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
389 spin_unlock_irq(&tpg->acl_node_lock);
390 return ERR_PTR(-EEXIST);
391 }
392 spin_unlock_irq(&tpg->acl_node_lock);
393
394 if (!se_nacl) {
395 pr_err("struct se_node_acl pointer is NULL\n");
396 return ERR_PTR(-EINVAL);
397 }
398 /*
399 * For v4.x logic the se_node_acl_s is hanging off a fabric
400 * dependent structure allocated via
401 * struct target_core_fabric_ops->fabric_make_nodeacl()
402 */
403 acl = se_nacl;
404
405 INIT_LIST_HEAD(&acl->acl_list);
406 INIT_LIST_HEAD(&acl->acl_sess_list);
407 spin_lock_init(&acl->device_list_lock);
408 spin_lock_init(&acl->nacl_sess_lock);
409 atomic_set(&acl->acl_pr_ref_count, 0);
410 acl->queue_depth = queue_depth;
411 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
412 acl->se_tpg = tpg;
413 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
414 spin_lock_init(&acl->stats_lock);
415
416 tpg->se_tpg_tfo->set_default_node_attributes(acl);
417
418 if (core_create_device_list_for_node(acl) < 0) {
419 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
420 return ERR_PTR(-ENOMEM);
421 }
422
423 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
424 core_free_device_list_for_node(acl, tpg);
425 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
426 return ERR_PTR(-EINVAL);
427 }
428
429 spin_lock_irq(&tpg->acl_node_lock);
430 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
431 tpg->num_node_acls++;
432 spin_unlock_irq(&tpg->acl_node_lock);
433
434done:
435 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
436 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
437 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
438 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
439
440 return acl;
441}
442EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
443
444/* core_tpg_del_initiator_node_acl():
445 *
446 *
447 */
448int core_tpg_del_initiator_node_acl(
449 struct se_portal_group *tpg,
450 struct se_node_acl *acl,
451 int force)
452{
453 struct se_session *sess, *sess_tmp;
454 int dynamic_acl = 0;
455
456 spin_lock_irq(&tpg->acl_node_lock);
457 if (acl->dynamic_node_acl) {
458 acl->dynamic_node_acl = 0;
459 dynamic_acl = 1;
460 }
461 list_del(&acl->acl_list);
462 tpg->num_node_acls--;
463 spin_unlock_irq(&tpg->acl_node_lock);
464
465 spin_lock_bh(&tpg->session_lock);
466 list_for_each_entry_safe(sess, sess_tmp,
467 &tpg->tpg_sess_list, sess_list) {
468 if (sess->se_node_acl != acl)
469 continue;
470 /*
471 * Determine if the session needs to be closed by our context.
472 */
473 if (!tpg->se_tpg_tfo->shutdown_session(sess))
474 continue;
475
476 spin_unlock_bh(&tpg->session_lock);
477 /*
478 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
479 * forcefully shutdown the $FABRIC_MOD session/nexus.
480 */
481 tpg->se_tpg_tfo->close_session(sess);
482
483 spin_lock_bh(&tpg->session_lock);
484 }
485 spin_unlock_bh(&tpg->session_lock);
486
487 core_tpg_wait_for_nacl_pr_ref(acl);
488 core_clear_initiator_node_from_tpg(acl, tpg);
489 core_free_device_list_for_node(acl, tpg);
490
491 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
492 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
493 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
494 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
495
496 return 0;
497}
498EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
499
500/* core_tpg_set_initiator_node_queue_depth():
501 *
502 *
503 */
504int core_tpg_set_initiator_node_queue_depth(
505 struct se_portal_group *tpg,
506 unsigned char *initiatorname,
507 u32 queue_depth,
508 int force)
509{
510 struct se_session *sess, *init_sess = NULL;
511 struct se_node_acl *acl;
512 int dynamic_acl = 0;
513
514 spin_lock_irq(&tpg->acl_node_lock);
515 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
516 if (!acl) {
517 pr_err("Access Control List entry for %s Initiator"
518 " Node %s does not exists for TPG %hu, ignoring"
519 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
520 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
521 spin_unlock_irq(&tpg->acl_node_lock);
522 return -ENODEV;
523 }
524 if (acl->dynamic_node_acl) {
525 acl->dynamic_node_acl = 0;
526 dynamic_acl = 1;
527 }
528 spin_unlock_irq(&tpg->acl_node_lock);
529
530 spin_lock_bh(&tpg->session_lock);
531 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
532 if (sess->se_node_acl != acl)
533 continue;
534
535 if (!force) {
536 pr_err("Unable to change queue depth for %s"
537 " Initiator Node: %s while session is"
538 " operational. To forcefully change the queue"
539 " depth and force session reinstatement"
540 " use the \"force=1\" parameter.\n",
541 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
542 spin_unlock_bh(&tpg->session_lock);
543
544 spin_lock_irq(&tpg->acl_node_lock);
545 if (dynamic_acl)
546 acl->dynamic_node_acl = 1;
547 spin_unlock_irq(&tpg->acl_node_lock);
548 return -EEXIST;
549 }
550 /*
551 * Determine if the session needs to be closed by our context.
552 */
553 if (!tpg->se_tpg_tfo->shutdown_session(sess))
554 continue;
555
556 init_sess = sess;
557 break;
558 }
559
560 /*
561 * User has requested to change the queue depth for a Initiator Node.
562 * Change the value in the Node's struct se_node_acl, and call
563 * core_set_queue_depth_for_node() to add the requested queue depth.
564 *
565 * Finally call tpg->se_tpg_tfo->close_session() to force session
566 * reinstatement to occur if there is an active session for the
567 * $FABRIC_MOD Initiator Node in question.
568 */
569 acl->queue_depth = queue_depth;
570
571 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
572 spin_unlock_bh(&tpg->session_lock);
573 /*
574 * Force session reinstatement if
575 * core_set_queue_depth_for_node() failed, because we assume
576 * the $FABRIC_MOD has already the set session reinstatement
577 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
578 */
579 if (init_sess)
580 tpg->se_tpg_tfo->close_session(init_sess);
581
582 spin_lock_irq(&tpg->acl_node_lock);
583 if (dynamic_acl)
584 acl->dynamic_node_acl = 1;
585 spin_unlock_irq(&tpg->acl_node_lock);
586 return -EINVAL;
587 }
588 spin_unlock_bh(&tpg->session_lock);
589 /*
590 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
591 * forcefully shutdown the $FABRIC_MOD session/nexus.
592 */
593 if (init_sess)
594 tpg->se_tpg_tfo->close_session(init_sess);
595
596 pr_debug("Successfuly changed queue depth to: %d for Initiator"
597 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
598 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
599 tpg->se_tpg_tfo->tpg_get_tag(tpg));
600
601 spin_lock_irq(&tpg->acl_node_lock);
602 if (dynamic_acl)
603 acl->dynamic_node_acl = 1;
604 spin_unlock_irq(&tpg->acl_node_lock);
605
606 return 0;
607}
608EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
609
610static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
611{
612 /* Set in core_dev_setup_virtual_lun0() */
613 struct se_device *dev = g_lun0_dev;
614 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
615 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
616 int ret;
617
618 lun->unpacked_lun = 0;
619 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
620 atomic_set(&lun->lun_acl_count, 0);
621 init_completion(&lun->lun_shutdown_comp);
622 INIT_LIST_HEAD(&lun->lun_acl_list);
623 INIT_LIST_HEAD(&lun->lun_cmd_list);
624 spin_lock_init(&lun->lun_acl_lock);
625 spin_lock_init(&lun->lun_cmd_lock);
626 spin_lock_init(&lun->lun_sep_lock);
627
628 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
629 if (ret < 0)
630 return ret;
631
632 return 0;
633}
634
635static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
636{
637 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
638
639 core_tpg_post_dellun(se_tpg, lun);
640}
641
642int core_tpg_register(
643 struct target_core_fabric_ops *tfo,
644 struct se_wwn *se_wwn,
645 struct se_portal_group *se_tpg,
646 void *tpg_fabric_ptr,
647 int se_tpg_type)
648{
649 struct se_lun *lun;
650 u32 i;
651
652 se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
653 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
654 if (!se_tpg->tpg_lun_list) {
655 pr_err("Unable to allocate struct se_portal_group->"
656 "tpg_lun_list\n");
657 return -ENOMEM;
658 }
659
660 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
661 lun = &se_tpg->tpg_lun_list[i];
662 lun->unpacked_lun = i;
663 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
664 atomic_set(&lun->lun_acl_count, 0);
665 init_completion(&lun->lun_shutdown_comp);
666 INIT_LIST_HEAD(&lun->lun_acl_list);
667 INIT_LIST_HEAD(&lun->lun_cmd_list);
668 spin_lock_init(&lun->lun_acl_lock);
669 spin_lock_init(&lun->lun_cmd_lock);
670 spin_lock_init(&lun->lun_sep_lock);
671 }
672
673 se_tpg->se_tpg_type = se_tpg_type;
674 se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
675 se_tpg->se_tpg_tfo = tfo;
676 se_tpg->se_tpg_wwn = se_wwn;
677 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
678 INIT_LIST_HEAD(&se_tpg->acl_node_list);
679 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
680 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
681 spin_lock_init(&se_tpg->acl_node_lock);
682 spin_lock_init(&se_tpg->session_lock);
683 spin_lock_init(&se_tpg->tpg_lun_lock);
684
685 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
686 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
687 kfree(se_tpg);
688 return -ENOMEM;
689 }
690 }
691
692 spin_lock_bh(&tpg_lock);
693 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
694 spin_unlock_bh(&tpg_lock);
695
696 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
697 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
698 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
699 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
700 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
701
702 return 0;
703}
704EXPORT_SYMBOL(core_tpg_register);
705
706int core_tpg_deregister(struct se_portal_group *se_tpg)
707{
708 struct se_node_acl *nacl, *nacl_tmp;
709
710 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
711 " for endpoint: %s Portal Tag %u\n",
712 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
713 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
714 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
715 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
716
717 spin_lock_bh(&tpg_lock);
718 list_del(&se_tpg->se_tpg_node);
719 spin_unlock_bh(&tpg_lock);
720
721 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
722 cpu_relax();
723 /*
724 * Release any remaining demo-mode generated se_node_acl that have
725 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
726 * in transport_deregister_session().
727 */
728 spin_lock_irq(&se_tpg->acl_node_lock);
729 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
730 acl_list) {
731 list_del(&nacl->acl_list);
732 se_tpg->num_node_acls--;
733 spin_unlock_irq(&se_tpg->acl_node_lock);
734
735 core_tpg_wait_for_nacl_pr_ref(nacl);
736 core_free_device_list_for_node(nacl, se_tpg);
737 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
738
739 spin_lock_irq(&se_tpg->acl_node_lock);
740 }
741 spin_unlock_irq(&se_tpg->acl_node_lock);
742
743 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
744 core_tpg_release_virtual_lun0(se_tpg);
745
746 se_tpg->se_tpg_fabric_ptr = NULL;
747 kfree(se_tpg->tpg_lun_list);
748 return 0;
749}
750EXPORT_SYMBOL(core_tpg_deregister);
751
752struct se_lun *core_tpg_pre_addlun(
753 struct se_portal_group *tpg,
754 u32 unpacked_lun)
755{
756 struct se_lun *lun;
757
758 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
759 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
760 "-1: %u for Target Portal Group: %u\n",
761 tpg->se_tpg_tfo->get_fabric_name(),
762 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
763 tpg->se_tpg_tfo->tpg_get_tag(tpg));
764 return ERR_PTR(-EOVERFLOW);
765 }
766
767 spin_lock(&tpg->tpg_lun_lock);
768 lun = &tpg->tpg_lun_list[unpacked_lun];
769 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
770 pr_err("TPG Logical Unit Number: %u is already active"
771 " on %s Target Portal Group: %u, ignoring request.\n",
772 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
773 tpg->se_tpg_tfo->tpg_get_tag(tpg));
774 spin_unlock(&tpg->tpg_lun_lock);
775 return ERR_PTR(-EINVAL);
776 }
777 spin_unlock(&tpg->tpg_lun_lock);
778
779 return lun;
780}
781
782int core_tpg_post_addlun(
783 struct se_portal_group *tpg,
784 struct se_lun *lun,
785 u32 lun_access,
786 void *lun_ptr)
787{
788 int ret;
789
790 ret = core_dev_export(lun_ptr, tpg, lun);
791 if (ret < 0)
792 return ret;
793
794 spin_lock(&tpg->tpg_lun_lock);
795 lun->lun_access = lun_access;
796 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
797 spin_unlock(&tpg->tpg_lun_lock);
798
799 return 0;
800}
801
802static void core_tpg_shutdown_lun(
803 struct se_portal_group *tpg,
804 struct se_lun *lun)
805{
806 core_clear_lun_from_tpg(lun, tpg);
807 transport_clear_lun_from_sessions(lun);
808}
809
810struct se_lun *core_tpg_pre_dellun(
811 struct se_portal_group *tpg,
812 u32 unpacked_lun,
813 int *ret)
814{
815 struct se_lun *lun;
816
817 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
818 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
819 "-1: %u for Target Portal Group: %u\n",
820 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
821 TRANSPORT_MAX_LUNS_PER_TPG-1,
822 tpg->se_tpg_tfo->tpg_get_tag(tpg));
823 return ERR_PTR(-EOVERFLOW);
824 }
825
826 spin_lock(&tpg->tpg_lun_lock);
827 lun = &tpg->tpg_lun_list[unpacked_lun];
828 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
829 pr_err("%s Logical Unit Number: %u is not active on"
830 " Target Portal Group: %u, ignoring request.\n",
831 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
832 tpg->se_tpg_tfo->tpg_get_tag(tpg));
833 spin_unlock(&tpg->tpg_lun_lock);
834 return ERR_PTR(-ENODEV);
835 }
836 spin_unlock(&tpg->tpg_lun_lock);
837
838 return lun;
839}
840
841int core_tpg_post_dellun(
842 struct se_portal_group *tpg,
843 struct se_lun *lun)
844{
845 core_tpg_shutdown_lun(tpg, lun);
846
847 core_dev_unexport(lun->lun_se_dev, tpg, lun);
848
849 spin_lock(&tpg->tpg_lun_lock);
850 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
851 spin_unlock(&tpg->tpg_lun_lock);
852
853 return 0;
854}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*******************************************************************************
3 * Filename: target_core_tpg.c
4 *
5 * This file contains generic Target Portal Group related functions.
6 *
7 * (c) Copyright 2002-2013 Datera, Inc.
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 ******************************************************************************/
12
13#include <linux/net.h>
14#include <linux/string.h>
15#include <linux/timer.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/in.h>
19#include <linux/export.h>
20#include <net/sock.h>
21#include <net/tcp.h>
22#include <scsi/scsi_proto.h>
23
24#include <target/target_core_base.h>
25#include <target/target_core_backend.h>
26#include <target/target_core_fabric.h>
27
28#include "target_core_internal.h"
29#include "target_core_alua.h"
30#include "target_core_pr.h"
31#include "target_core_ua.h"
32
33extern struct se_device *g_lun0_dev;
34
35static DEFINE_SPINLOCK(tpg_lock);
36static LIST_HEAD(tpg_list);
37
38/* __core_tpg_get_initiator_node_acl():
39 *
40 * mutex_lock(&tpg->acl_node_mutex); must be held when calling
41 */
42struct se_node_acl *__core_tpg_get_initiator_node_acl(
43 struct se_portal_group *tpg,
44 const char *initiatorname)
45{
46 struct se_node_acl *acl;
47
48 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
49 if (!strcmp(acl->initiatorname, initiatorname))
50 return acl;
51 }
52
53 return NULL;
54}
55
56/* core_tpg_get_initiator_node_acl():
57 *
58 *
59 */
60struct se_node_acl *core_tpg_get_initiator_node_acl(
61 struct se_portal_group *tpg,
62 unsigned char *initiatorname)
63{
64 struct se_node_acl *acl;
65 /*
66 * Obtain se_node_acl->acl_kref using fabric driver provided
67 * initiatorname[] during node acl endpoint lookup driven by
68 * new se_session login.
69 *
70 * The reference is held until se_session shutdown -> release
71 * occurs via fabric driver invoked transport_deregister_session()
72 * or transport_free_session() code.
73 */
74 mutex_lock(&tpg->acl_node_mutex);
75 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
76 if (acl) {
77 if (!kref_get_unless_zero(&acl->acl_kref))
78 acl = NULL;
79 }
80 mutex_unlock(&tpg->acl_node_mutex);
81
82 return acl;
83}
84EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
85
86void core_allocate_nexus_loss_ua(
87 struct se_node_acl *nacl)
88{
89 struct se_dev_entry *deve;
90
91 if (!nacl)
92 return;
93
94 rcu_read_lock();
95 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
96 core_scsi3_ua_allocate(deve, 0x29,
97 ASCQ_29H_NEXUS_LOSS_OCCURRED);
98 rcu_read_unlock();
99}
100EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
101
102/* core_tpg_add_node_to_devs():
103 *
104 *
105 */
106void core_tpg_add_node_to_devs(
107 struct se_node_acl *acl,
108 struct se_portal_group *tpg,
109 struct se_lun *lun_orig)
110{
111 bool lun_access_ro = true;
112 struct se_lun *lun;
113 struct se_device *dev;
114
115 mutex_lock(&tpg->tpg_lun_mutex);
116 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
117 if (lun_orig && lun != lun_orig)
118 continue;
119
120 dev = rcu_dereference_check(lun->lun_se_dev,
121 lockdep_is_held(&tpg->tpg_lun_mutex));
122 /*
123 * By default in LIO-Target $FABRIC_MOD,
124 * demo_mode_write_protect is ON, or READ_ONLY;
125 */
126 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
127 lun_access_ro = false;
128 } else {
129 /*
130 * Allow only optical drives to issue R/W in default RO
131 * demo mode.
132 */
133 if (dev->transport->get_device_type(dev) == TYPE_DISK)
134 lun_access_ro = true;
135 else
136 lun_access_ro = false;
137 }
138
139 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
140 " access for LUN in Demo Mode\n",
141 tpg->se_tpg_tfo->fabric_name,
142 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
143 lun_access_ro ? "READ-ONLY" : "READ-WRITE");
144
145 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
146 lun_access_ro, acl, tpg);
147 /*
148 * Check to see if there are any existing persistent reservation
149 * APTPL pre-registrations that need to be enabled for this dynamic
150 * LUN ACL now..
151 */
152 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
153 lun->unpacked_lun);
154 }
155 mutex_unlock(&tpg->tpg_lun_mutex);
156}
157
158static void
159target_set_nacl_queue_depth(struct se_portal_group *tpg,
160 struct se_node_acl *acl, u32 queue_depth)
161{
162 acl->queue_depth = queue_depth;
163
164 if (!acl->queue_depth) {
165 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
166 "defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
167 acl->initiatorname);
168 acl->queue_depth = 1;
169 }
170}
171
172static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
173 const unsigned char *initiatorname)
174{
175 struct se_node_acl *acl;
176 u32 queue_depth;
177
178 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
179 GFP_KERNEL);
180 if (!acl)
181 return NULL;
182
183 INIT_LIST_HEAD(&acl->acl_list);
184 INIT_LIST_HEAD(&acl->acl_sess_list);
185 INIT_HLIST_HEAD(&acl->lun_entry_hlist);
186 kref_init(&acl->acl_kref);
187 init_completion(&acl->acl_free_comp);
188 spin_lock_init(&acl->nacl_sess_lock);
189 mutex_init(&acl->lun_entry_mutex);
190 atomic_set(&acl->acl_pr_ref_count, 0);
191
192 if (tpg->se_tpg_tfo->tpg_get_default_depth)
193 queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
194 else
195 queue_depth = 1;
196 target_set_nacl_queue_depth(tpg, acl, queue_depth);
197
198 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
199 acl->se_tpg = tpg;
200 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
201
202 tpg->se_tpg_tfo->set_default_node_attributes(acl);
203
204 return acl;
205}
206
207static void target_add_node_acl(struct se_node_acl *acl)
208{
209 struct se_portal_group *tpg = acl->se_tpg;
210
211 mutex_lock(&tpg->acl_node_mutex);
212 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
213 mutex_unlock(&tpg->acl_node_mutex);
214
215 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
216 " Initiator Node: %s\n",
217 tpg->se_tpg_tfo->fabric_name,
218 tpg->se_tpg_tfo->tpg_get_tag(tpg),
219 acl->dynamic_node_acl ? "DYNAMIC" : "",
220 acl->queue_depth,
221 tpg->se_tpg_tfo->fabric_name,
222 acl->initiatorname);
223}
224
225bool target_tpg_has_node_acl(struct se_portal_group *tpg,
226 const char *initiatorname)
227{
228 struct se_node_acl *acl;
229 bool found = false;
230
231 mutex_lock(&tpg->acl_node_mutex);
232 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
233 if (!strcmp(acl->initiatorname, initiatorname)) {
234 found = true;
235 break;
236 }
237 }
238 mutex_unlock(&tpg->acl_node_mutex);
239
240 return found;
241}
242EXPORT_SYMBOL(target_tpg_has_node_acl);
243
244struct se_node_acl *core_tpg_check_initiator_node_acl(
245 struct se_portal_group *tpg,
246 unsigned char *initiatorname)
247{
248 struct se_node_acl *acl;
249
250 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
251 if (acl)
252 return acl;
253
254 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
255 return NULL;
256
257 acl = target_alloc_node_acl(tpg, initiatorname);
258 if (!acl)
259 return NULL;
260 /*
261 * When allocating a dynamically generated node_acl, go ahead
262 * and take the extra kref now before returning to the fabric
263 * driver caller.
264 *
265 * Note this reference will be released at session shutdown
266 * time within transport_free_session() code.
267 */
268 kref_get(&acl->acl_kref);
269 acl->dynamic_node_acl = 1;
270
271 /*
272 * Here we only create demo-mode MappedLUNs from the active
273 * TPG LUNs if the fabric is not explicitly asking for
274 * tpg_check_demo_mode_login_only() == 1.
275 */
276 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
277 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
278 core_tpg_add_node_to_devs(acl, tpg, NULL);
279
280 target_add_node_acl(acl);
281 return acl;
282}
283EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
284
285void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
286{
287 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
288 cpu_relax();
289}
290
291struct se_node_acl *core_tpg_add_initiator_node_acl(
292 struct se_portal_group *tpg,
293 const char *initiatorname)
294{
295 struct se_node_acl *acl;
296
297 mutex_lock(&tpg->acl_node_mutex);
298 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
299 if (acl) {
300 if (acl->dynamic_node_acl) {
301 acl->dynamic_node_acl = 0;
302 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
303 " for %s\n", tpg->se_tpg_tfo->fabric_name,
304 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
305 mutex_unlock(&tpg->acl_node_mutex);
306 return acl;
307 }
308
309 pr_err("ACL entry for %s Initiator"
310 " Node %s already exists for TPG %u, ignoring"
311 " request.\n", tpg->se_tpg_tfo->fabric_name,
312 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
313 mutex_unlock(&tpg->acl_node_mutex);
314 return ERR_PTR(-EEXIST);
315 }
316 mutex_unlock(&tpg->acl_node_mutex);
317
318 acl = target_alloc_node_acl(tpg, initiatorname);
319 if (!acl)
320 return ERR_PTR(-ENOMEM);
321
322 target_add_node_acl(acl);
323 return acl;
324}
325
326static void target_shutdown_sessions(struct se_node_acl *acl)
327{
328 struct se_session *sess;
329 unsigned long flags;
330
331restart:
332 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
333 list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
334 if (sess->sess_tearing_down)
335 continue;
336
337 list_del_init(&sess->sess_acl_list);
338 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
339
340 if (acl->se_tpg->se_tpg_tfo->close_session)
341 acl->se_tpg->se_tpg_tfo->close_session(sess);
342 goto restart;
343 }
344 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
345}
346
347void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
348{
349 struct se_portal_group *tpg = acl->se_tpg;
350
351 mutex_lock(&tpg->acl_node_mutex);
352 if (acl->dynamic_node_acl)
353 acl->dynamic_node_acl = 0;
354 list_del_init(&acl->acl_list);
355 mutex_unlock(&tpg->acl_node_mutex);
356
357 target_shutdown_sessions(acl);
358
359 target_put_nacl(acl);
360 /*
361 * Wait for last target_put_nacl() to complete in target_complete_nacl()
362 * for active fabric session transport_deregister_session() callbacks.
363 */
364 wait_for_completion(&acl->acl_free_comp);
365
366 core_tpg_wait_for_nacl_pr_ref(acl);
367 core_free_device_list_for_node(acl, tpg);
368
369 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
370 " Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
371 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
372 tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
373
374 kfree(acl);
375}
376
377/* core_tpg_set_initiator_node_queue_depth():
378 *
379 *
380 */
381int core_tpg_set_initiator_node_queue_depth(
382 struct se_node_acl *acl,
383 u32 queue_depth)
384{
385 struct se_portal_group *tpg = acl->se_tpg;
386
387 /*
388 * Allow the setting of se_node_acl queue_depth to be idempotent,
389 * and not force a session shutdown event if the value is not
390 * changing.
391 */
392 if (acl->queue_depth == queue_depth)
393 return 0;
394 /*
395 * User has requested to change the queue depth for a Initiator Node.
396 * Change the value in the Node's struct se_node_acl, and call
397 * target_set_nacl_queue_depth() to set the new queue depth.
398 */
399 target_set_nacl_queue_depth(tpg, acl, queue_depth);
400
401 /*
402 * Shutdown all pending sessions to force session reinstatement.
403 */
404 target_shutdown_sessions(acl);
405
406 pr_debug("Successfully changed queue depth to: %d for Initiator"
407 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
408 acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
409 tpg->se_tpg_tfo->tpg_get_tag(tpg));
410
411 return 0;
412}
413EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
414
415/* core_tpg_set_initiator_node_tag():
416 *
417 * Initiator nodeacl tags are not used internally, but may be used by
418 * userspace to emulate aliases or groups.
419 * Returns length of newly-set tag or -EINVAL.
420 */
421int core_tpg_set_initiator_node_tag(
422 struct se_portal_group *tpg,
423 struct se_node_acl *acl,
424 const char *new_tag)
425{
426 if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
427 return -EINVAL;
428
429 if (!strncmp("NULL", new_tag, 4)) {
430 acl->acl_tag[0] = '\0';
431 return 0;
432 }
433
434 return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
435}
436EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
437
438static void core_tpg_lun_ref_release(struct percpu_ref *ref)
439{
440 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
441
442 complete(&lun->lun_shutdown_comp);
443}
444
445/* Does not change se_wwn->priv. */
446int core_tpg_register(
447 struct se_wwn *se_wwn,
448 struct se_portal_group *se_tpg,
449 int proto_id)
450{
451 int ret;
452
453 if (!se_tpg)
454 return -EINVAL;
455 /*
456 * For the typical case where core_tpg_register() is called by a
457 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
458 * configfs context, use the original tf_ops pointer already saved
459 * by target-core in target_fabric_make_wwn().
460 *
461 * Otherwise, for special cases like iscsi-target discovery TPGs
462 * the caller is responsible for setting ->se_tpg_tfo ahead of
463 * calling core_tpg_register().
464 */
465 if (se_wwn)
466 se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
467
468 if (!se_tpg->se_tpg_tfo) {
469 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
470 return -EINVAL;
471 }
472
473 INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
474 se_tpg->proto_id = proto_id;
475 se_tpg->se_tpg_wwn = se_wwn;
476 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
477 INIT_LIST_HEAD(&se_tpg->acl_node_list);
478 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
479 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
480 spin_lock_init(&se_tpg->session_lock);
481 mutex_init(&se_tpg->tpg_lun_mutex);
482 mutex_init(&se_tpg->acl_node_mutex);
483
484 if (se_tpg->proto_id >= 0) {
485 se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
486 if (IS_ERR(se_tpg->tpg_virt_lun0))
487 return PTR_ERR(se_tpg->tpg_virt_lun0);
488
489 ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
490 true, g_lun0_dev);
491 if (ret < 0) {
492 kfree(se_tpg->tpg_virt_lun0);
493 return ret;
494 }
495 }
496
497 spin_lock_bh(&tpg_lock);
498 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
499 spin_unlock_bh(&tpg_lock);
500
501 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
502 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
503 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
504 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
505 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
506
507 return 0;
508}
509EXPORT_SYMBOL(core_tpg_register);
510
511int core_tpg_deregister(struct se_portal_group *se_tpg)
512{
513 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
514 struct se_node_acl *nacl, *nacl_tmp;
515 LIST_HEAD(node_list);
516
517 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
518 "Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
519 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
520 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
521
522 spin_lock_bh(&tpg_lock);
523 list_del(&se_tpg->se_tpg_node);
524 spin_unlock_bh(&tpg_lock);
525
526 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
527 cpu_relax();
528
529 mutex_lock(&se_tpg->acl_node_mutex);
530 list_splice_init(&se_tpg->acl_node_list, &node_list);
531 mutex_unlock(&se_tpg->acl_node_mutex);
532 /*
533 * Release any remaining demo-mode generated se_node_acl that have
534 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
535 * in transport_deregister_session().
536 */
537 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
538 list_del_init(&nacl->acl_list);
539
540 core_tpg_wait_for_nacl_pr_ref(nacl);
541 core_free_device_list_for_node(nacl, se_tpg);
542 kfree(nacl);
543 }
544
545 if (se_tpg->proto_id >= 0) {
546 core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
547 kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
548 }
549
550 return 0;
551}
552EXPORT_SYMBOL(core_tpg_deregister);
553
554struct se_lun *core_tpg_alloc_lun(
555 struct se_portal_group *tpg,
556 u64 unpacked_lun)
557{
558 struct se_lun *lun;
559
560 lun = kzalloc(sizeof(*lun), GFP_KERNEL);
561 if (!lun) {
562 pr_err("Unable to allocate se_lun memory\n");
563 return ERR_PTR(-ENOMEM);
564 }
565 lun->unpacked_lun = unpacked_lun;
566 atomic_set(&lun->lun_acl_count, 0);
567 init_completion(&lun->lun_shutdown_comp);
568 INIT_LIST_HEAD(&lun->lun_deve_list);
569 INIT_LIST_HEAD(&lun->lun_dev_link);
570 atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
571 spin_lock_init(&lun->lun_deve_lock);
572 mutex_init(&lun->lun_tg_pt_md_mutex);
573 INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
574 spin_lock_init(&lun->lun_tg_pt_gp_lock);
575 lun->lun_tpg = tpg;
576
577 return lun;
578}
579
580int core_tpg_add_lun(
581 struct se_portal_group *tpg,
582 struct se_lun *lun,
583 bool lun_access_ro,
584 struct se_device *dev)
585{
586 int ret;
587
588 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
589 GFP_KERNEL);
590 if (ret < 0)
591 goto out;
592
593 ret = core_alloc_rtpi(lun, dev);
594 if (ret)
595 goto out_kill_ref;
596
597 if (!(dev->transport->transport_flags &
598 TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
599 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
600 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
601
602 mutex_lock(&tpg->tpg_lun_mutex);
603
604 spin_lock(&dev->se_port_lock);
605 lun->lun_index = dev->dev_index;
606 rcu_assign_pointer(lun->lun_se_dev, dev);
607 dev->export_count++;
608 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
609 spin_unlock(&dev->se_port_lock);
610
611 if (dev->dev_flags & DF_READ_ONLY)
612 lun->lun_access_ro = true;
613 else
614 lun->lun_access_ro = lun_access_ro;
615 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
616 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
617 mutex_unlock(&tpg->tpg_lun_mutex);
618
619 return 0;
620
621out_kill_ref:
622 percpu_ref_exit(&lun->lun_ref);
623out:
624 return ret;
625}
626
627void core_tpg_remove_lun(
628 struct se_portal_group *tpg,
629 struct se_lun *lun)
630{
631 /*
632 * rcu_dereference_raw protected by se_lun->lun_group symlink
633 * reference to se_device->dev_group.
634 */
635 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
636
637 lun->lun_shutdown = true;
638
639 core_clear_lun_from_tpg(lun, tpg);
640 /*
641 * Wait for any active I/O references to percpu se_lun->lun_ref to
642 * be released. Also, se_lun->lun_ref is now used by PR and ALUA
643 * logic when referencing a remote target port during ALL_TGT_PT=1
644 * and generating UNIT_ATTENTIONs for ALUA access state transition.
645 */
646 transport_clear_lun_ref(lun);
647
648 mutex_lock(&tpg->tpg_lun_mutex);
649 if (lun->lun_se_dev) {
650 target_detach_tg_pt_gp(lun);
651
652 spin_lock(&dev->se_port_lock);
653 list_del(&lun->lun_dev_link);
654 dev->export_count--;
655 rcu_assign_pointer(lun->lun_se_dev, NULL);
656 spin_unlock(&dev->se_port_lock);
657 }
658 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
659 hlist_del_rcu(&lun->link);
660
661 lun->lun_shutdown = false;
662 mutex_unlock(&tpg->tpg_lun_mutex);
663
664 percpu_ref_exit(&lun->lun_ref);
665}