Linux Audio

Check our new training course

Loading...
  1/*******************************************************************************
  2 * Filename:  target_core_tpg.c
  3 *
  4 * This file contains generic Target Portal Group related functions.
  5 *
  6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
  7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
  8 * Copyright (c) 2007-2010 Rising Tide Systems
  9 * Copyright (c) 2008-2010 Linux-iSCSI.org
 10 *
 11 * Nicholas A. Bellinger <nab@kernel.org>
 12 *
 13 * This program is free software; you can redistribute it and/or modify
 14 * it under the terms of the GNU General Public License as published by
 15 * the Free Software Foundation; either version 2 of the License, or
 16 * (at your option) any later version.
 17 *
 18 * This program is distributed in the hope that it will be useful,
 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 21 * GNU General Public License for more details.
 22 *
 23 * You should have received a copy of the GNU General Public License
 24 * along with this program; if not, write to the Free Software
 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 26 *
 27 ******************************************************************************/
 28
 29#include <linux/net.h>
 30#include <linux/string.h>
 31#include <linux/timer.h>
 32#include <linux/slab.h>
 33#include <linux/spinlock.h>
 34#include <linux/in.h>
 35#include <linux/export.h>
 36#include <net/sock.h>
 37#include <net/tcp.h>
 38#include <scsi/scsi.h>
 39#include <scsi/scsi_cmnd.h>
 40
 41#include <target/target_core_base.h>
 42#include <target/target_core_backend.h>
 43#include <target/target_core_fabric.h>
 44
 45#include "target_core_internal.h"
 46
 47extern struct se_device *g_lun0_dev;
 48
 49static DEFINE_SPINLOCK(tpg_lock);
 50static LIST_HEAD(tpg_list);
 51
 52/*	core_clear_initiator_node_from_tpg():
 53 *
 54 *
 55 */
 56static void core_clear_initiator_node_from_tpg(
 57	struct se_node_acl *nacl,
 58	struct se_portal_group *tpg)
 59{
 60	int i;
 61	struct se_dev_entry *deve;
 62	struct se_lun *lun;
 63
 64	spin_lock_irq(&nacl->device_list_lock);
 65	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 66		deve = nacl->device_list[i];
 67
 68		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
 69			continue;
 70
 71		if (!deve->se_lun) {
 72			pr_err("%s device entries device pointer is"
 73				" NULL, but Initiator has access.\n",
 74				tpg->se_tpg_tfo->get_fabric_name());
 75			continue;
 76		}
 77
 78		lun = deve->se_lun;
 79		spin_unlock_irq(&nacl->device_list_lock);
 80		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
 81			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
 82
 83		spin_lock_irq(&nacl->device_list_lock);
 84	}
 85	spin_unlock_irq(&nacl->device_list_lock);
 86}
 87
 88/*	__core_tpg_get_initiator_node_acl():
 89 *
 90 *	spin_lock_bh(&tpg->acl_node_lock); must be held when calling
 91 */
 92struct se_node_acl *__core_tpg_get_initiator_node_acl(
 93	struct se_portal_group *tpg,
 94	const char *initiatorname)
 95{
 96	struct se_node_acl *acl;
 97
 98	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
 99		if (!strcmp(acl->initiatorname, initiatorname))
100			return acl;
101	}
102
103	return NULL;
104}
105
106/*	core_tpg_get_initiator_node_acl():
107 *
108 *
109 */
110struct se_node_acl *core_tpg_get_initiator_node_acl(
111	struct se_portal_group *tpg,
112	unsigned char *initiatorname)
113{
114	struct se_node_acl *acl;
115
116	spin_lock_irq(&tpg->acl_node_lock);
117	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
118		if (!strcmp(acl->initiatorname, initiatorname) &&
119		    !acl->dynamic_node_acl) {
120			spin_unlock_irq(&tpg->acl_node_lock);
121			return acl;
122		}
123	}
124	spin_unlock_irq(&tpg->acl_node_lock);
125
126	return NULL;
127}
128
129/*	core_tpg_add_node_to_devs():
130 *
131 *
132 */
133void core_tpg_add_node_to_devs(
134	struct se_node_acl *acl,
135	struct se_portal_group *tpg)
136{
137	int i = 0;
138	u32 lun_access = 0;
139	struct se_lun *lun;
140	struct se_device *dev;
141
142	spin_lock(&tpg->tpg_lun_lock);
143	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
144		lun = tpg->tpg_lun_list[i];
145		if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
146			continue;
147
148		spin_unlock(&tpg->tpg_lun_lock);
149
150		dev = lun->lun_se_dev;
151		/*
152		 * By default in LIO-Target $FABRIC_MOD,
153		 * demo_mode_write_protect is ON, or READ_ONLY;
154		 */
155		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
156			lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
157		} else {
158			/*
159			 * Allow only optical drives to issue R/W in default RO
160			 * demo mode.
161			 */
162			if (dev->transport->get_device_type(dev) == TYPE_DISK)
163				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
164			else
165				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
166		}
167
168		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
169			" access for LUN in Demo Mode\n",
170			tpg->se_tpg_tfo->get_fabric_name(),
171			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
172			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
173			"READ-WRITE" : "READ-ONLY");
174
175		core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
176				lun_access, acl, tpg, 1);
177		spin_lock(&tpg->tpg_lun_lock);
178	}
179	spin_unlock(&tpg->tpg_lun_lock);
180}
181
182/*      core_set_queue_depth_for_node():
183 *
184 *
185 */
186static int core_set_queue_depth_for_node(
187	struct se_portal_group *tpg,
188	struct se_node_acl *acl)
189{
190	if (!acl->queue_depth) {
191		pr_err("Queue depth for %s Initiator Node: %s is 0,"
192			"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
193			acl->initiatorname);
194		acl->queue_depth = 1;
195	}
196
197	return 0;
198}
199
200void array_free(void *array, int n)
201{
202	void **a = array;
203	int i;
204
205	for (i = 0; i < n; i++)
206		kfree(a[i]);
207	kfree(a);
208}
209
210static void *array_zalloc(int n, size_t size, gfp_t flags)
211{
212	void **a;
213	int i;
214
215	a = kzalloc(n * sizeof(void*), flags);
216	if (!a)
217		return NULL;
218	for (i = 0; i < n; i++) {
219		a[i] = kzalloc(size, flags);
220		if (!a[i]) {
221			array_free(a, n);
222			return NULL;
223		}
224	}
225	return a;
226}
227
228/*      core_create_device_list_for_node():
229 *
230 *
231 */
232static int core_create_device_list_for_node(struct se_node_acl *nacl)
233{
234	struct se_dev_entry *deve;
235	int i;
236
237	nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
238			sizeof(struct se_dev_entry), GFP_KERNEL);
239	if (!nacl->device_list) {
240		pr_err("Unable to allocate memory for"
241			" struct se_node_acl->device_list\n");
242		return -ENOMEM;
243	}
244	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
245		deve = nacl->device_list[i];
246
247		atomic_set(&deve->ua_count, 0);
248		atomic_set(&deve->pr_ref_count, 0);
249		spin_lock_init(&deve->ua_lock);
250		INIT_LIST_HEAD(&deve->alua_port_list);
251		INIT_LIST_HEAD(&deve->ua_list);
252	}
253
254	return 0;
255}
256
257/*	core_tpg_check_initiator_node_acl()
258 *
259 *
260 */
261struct se_node_acl *core_tpg_check_initiator_node_acl(
262	struct se_portal_group *tpg,
263	unsigned char *initiatorname)
264{
265	struct se_node_acl *acl;
266
267	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
268	if (acl)
269		return acl;
270
271	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
272		return NULL;
273
274	acl =  tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
275	if (!acl)
276		return NULL;
277
278	INIT_LIST_HEAD(&acl->acl_list);
279	INIT_LIST_HEAD(&acl->acl_sess_list);
280	kref_init(&acl->acl_kref);
281	init_completion(&acl->acl_free_comp);
282	spin_lock_init(&acl->device_list_lock);
283	spin_lock_init(&acl->nacl_sess_lock);
284	atomic_set(&acl->acl_pr_ref_count, 0);
285	acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
286	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
287	acl->se_tpg = tpg;
288	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
289	spin_lock_init(&acl->stats_lock);
290	acl->dynamic_node_acl = 1;
291
292	tpg->se_tpg_tfo->set_default_node_attributes(acl);
293
294	if (core_create_device_list_for_node(acl) < 0) {
295		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
296		return NULL;
297	}
298
299	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
300		core_free_device_list_for_node(acl, tpg);
301		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
302		return NULL;
303	}
304	/*
305	 * Here we only create demo-mode MappedLUNs from the active
306	 * TPG LUNs if the fabric is not explictly asking for
307	 * tpg_check_demo_mode_login_only() == 1.
308	 */
309	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
310	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
311		do { ; } while (0);
312	else
313		core_tpg_add_node_to_devs(acl, tpg);
314
315	spin_lock_irq(&tpg->acl_node_lock);
316	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
317	tpg->num_node_acls++;
318	spin_unlock_irq(&tpg->acl_node_lock);
319
320	pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
321		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
322		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
323		tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
324
325	return acl;
326}
327EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
328
329void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
330{
331	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
332		cpu_relax();
333}
334
335void core_tpg_clear_object_luns(struct se_portal_group *tpg)
336{
337	int i;
338	struct se_lun *lun;
339
340	spin_lock(&tpg->tpg_lun_lock);
341	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
342		lun = tpg->tpg_lun_list[i];
343
344		if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
345		    (lun->lun_se_dev == NULL))
346			continue;
347
348		spin_unlock(&tpg->tpg_lun_lock);
349		core_dev_del_lun(tpg, lun->unpacked_lun);
350		spin_lock(&tpg->tpg_lun_lock);
351	}
352	spin_unlock(&tpg->tpg_lun_lock);
353}
354EXPORT_SYMBOL(core_tpg_clear_object_luns);
355
356/*	core_tpg_add_initiator_node_acl():
357 *
358 *
359 */
360struct se_node_acl *core_tpg_add_initiator_node_acl(
361	struct se_portal_group *tpg,
362	struct se_node_acl *se_nacl,
363	const char *initiatorname,
364	u32 queue_depth)
365{
366	struct se_node_acl *acl = NULL;
367
368	spin_lock_irq(&tpg->acl_node_lock);
369	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
370	if (acl) {
371		if (acl->dynamic_node_acl) {
372			acl->dynamic_node_acl = 0;
373			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
374				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
375				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
376			spin_unlock_irq(&tpg->acl_node_lock);
377			/*
378			 * Release the locally allocated struct se_node_acl
379			 * because * core_tpg_add_initiator_node_acl() returned
380			 * a pointer to an existing demo mode node ACL.
381			 */
382			if (se_nacl)
383				tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
384							se_nacl);
385			goto done;
386		}
387
388		pr_err("ACL entry for %s Initiator"
389			" Node %s already exists for TPG %u, ignoring"
390			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
391			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
392		spin_unlock_irq(&tpg->acl_node_lock);
393		return ERR_PTR(-EEXIST);
394	}
395	spin_unlock_irq(&tpg->acl_node_lock);
396
397	if (!se_nacl) {
398		pr_err("struct se_node_acl pointer is NULL\n");
399		return ERR_PTR(-EINVAL);
400	}
401	/*
402	 * For v4.x logic the se_node_acl_s is hanging off a fabric
403	 * dependent structure allocated via
404	 * struct target_core_fabric_ops->fabric_make_nodeacl()
405	 */
406	acl = se_nacl;
407
408	INIT_LIST_HEAD(&acl->acl_list);
409	INIT_LIST_HEAD(&acl->acl_sess_list);
410	kref_init(&acl->acl_kref);
411	init_completion(&acl->acl_free_comp);
412	spin_lock_init(&acl->device_list_lock);
413	spin_lock_init(&acl->nacl_sess_lock);
414	atomic_set(&acl->acl_pr_ref_count, 0);
415	acl->queue_depth = queue_depth;
416	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
417	acl->se_tpg = tpg;
418	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
419	spin_lock_init(&acl->stats_lock);
420
421	tpg->se_tpg_tfo->set_default_node_attributes(acl);
422
423	if (core_create_device_list_for_node(acl) < 0) {
424		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
425		return ERR_PTR(-ENOMEM);
426	}
427
428	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
429		core_free_device_list_for_node(acl, tpg);
430		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
431		return ERR_PTR(-EINVAL);
432	}
433
434	spin_lock_irq(&tpg->acl_node_lock);
435	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
436	tpg->num_node_acls++;
437	spin_unlock_irq(&tpg->acl_node_lock);
438
439done:
440	pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
441		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
442		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
443		tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
444
445	return acl;
446}
447EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
448
449/*	core_tpg_del_initiator_node_acl():
450 *
451 *
452 */
453int core_tpg_del_initiator_node_acl(
454	struct se_portal_group *tpg,
455	struct se_node_acl *acl,
456	int force)
457{
458	LIST_HEAD(sess_list);
459	struct se_session *sess, *sess_tmp;
460	unsigned long flags;
461	int rc;
462
463	spin_lock_irq(&tpg->acl_node_lock);
464	if (acl->dynamic_node_acl) {
465		acl->dynamic_node_acl = 0;
466	}
467	list_del(&acl->acl_list);
468	tpg->num_node_acls--;
469	spin_unlock_irq(&tpg->acl_node_lock);
470
471	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
472	acl->acl_stop = 1;
473
474	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
475				sess_acl_list) {
476		if (sess->sess_tearing_down != 0)
477			continue;
478
479		target_get_session(sess);
480		list_move(&sess->sess_acl_list, &sess_list);
481	}
482	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
483
484	list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
485		list_del(&sess->sess_acl_list);
486
487		rc = tpg->se_tpg_tfo->shutdown_session(sess);
488		target_put_session(sess);
489		if (!rc)
490			continue;
491		target_put_session(sess);
492	}
493	target_put_nacl(acl);
494	/*
495	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
496	 * for active fabric session transport_deregister_session() callbacks.
497	 */
498	wait_for_completion(&acl->acl_free_comp);
499
500	core_tpg_wait_for_nacl_pr_ref(acl);
501	core_clear_initiator_node_from_tpg(acl, tpg);
502	core_free_device_list_for_node(acl, tpg);
503
504	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
505		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
506		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
507		tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
508
509	return 0;
510}
511EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
512
513/*	core_tpg_set_initiator_node_queue_depth():
514 *
515 *
516 */
517int core_tpg_set_initiator_node_queue_depth(
518	struct se_portal_group *tpg,
519	unsigned char *initiatorname,
520	u32 queue_depth,
521	int force)
522{
523	struct se_session *sess, *init_sess = NULL;
524	struct se_node_acl *acl;
525	unsigned long flags;
526	int dynamic_acl = 0;
527
528	spin_lock_irq(&tpg->acl_node_lock);
529	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
530	if (!acl) {
531		pr_err("Access Control List entry for %s Initiator"
532			" Node %s does not exists for TPG %hu, ignoring"
533			" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
534			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
535		spin_unlock_irq(&tpg->acl_node_lock);
536		return -ENODEV;
537	}
538	if (acl->dynamic_node_acl) {
539		acl->dynamic_node_acl = 0;
540		dynamic_acl = 1;
541	}
542	spin_unlock_irq(&tpg->acl_node_lock);
543
544	spin_lock_irqsave(&tpg->session_lock, flags);
545	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
546		if (sess->se_node_acl != acl)
547			continue;
548
549		if (!force) {
550			pr_err("Unable to change queue depth for %s"
551				" Initiator Node: %s while session is"
552				" operational.  To forcefully change the queue"
553				" depth and force session reinstatement"
554				" use the \"force=1\" parameter.\n",
555				tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
556			spin_unlock_irqrestore(&tpg->session_lock, flags);
557
558			spin_lock_irq(&tpg->acl_node_lock);
559			if (dynamic_acl)
560				acl->dynamic_node_acl = 1;
561			spin_unlock_irq(&tpg->acl_node_lock);
562			return -EEXIST;
563		}
564		/*
565		 * Determine if the session needs to be closed by our context.
566		 */
567		if (!tpg->se_tpg_tfo->shutdown_session(sess))
568			continue;
569
570		init_sess = sess;
571		break;
572	}
573
574	/*
575	 * User has requested to change the queue depth for a Initiator Node.
576	 * Change the value in the Node's struct se_node_acl, and call
577	 * core_set_queue_depth_for_node() to add the requested queue depth.
578	 *
579	 * Finally call  tpg->se_tpg_tfo->close_session() to force session
580	 * reinstatement to occur if there is an active session for the
581	 * $FABRIC_MOD Initiator Node in question.
582	 */
583	acl->queue_depth = queue_depth;
584
585	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
586		spin_unlock_irqrestore(&tpg->session_lock, flags);
587		/*
588		 * Force session reinstatement if
589		 * core_set_queue_depth_for_node() failed, because we assume
590		 * the $FABRIC_MOD has already the set session reinstatement
591		 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
592		 */
593		if (init_sess)
594			tpg->se_tpg_tfo->close_session(init_sess);
595
596		spin_lock_irq(&tpg->acl_node_lock);
597		if (dynamic_acl)
598			acl->dynamic_node_acl = 1;
599		spin_unlock_irq(&tpg->acl_node_lock);
600		return -EINVAL;
601	}
602	spin_unlock_irqrestore(&tpg->session_lock, flags);
603	/*
604	 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
605	 * forcefully shutdown the $FABRIC_MOD session/nexus.
606	 */
607	if (init_sess)
608		tpg->se_tpg_tfo->close_session(init_sess);
609
610	pr_debug("Successfully changed queue depth to: %d for Initiator"
611		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
612		initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
613		tpg->se_tpg_tfo->tpg_get_tag(tpg));
614
615	spin_lock_irq(&tpg->acl_node_lock);
616	if (dynamic_acl)
617		acl->dynamic_node_acl = 1;
618	spin_unlock_irq(&tpg->acl_node_lock);
619
620	return 0;
621}
622EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
623
624static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
625{
626	/* Set in core_dev_setup_virtual_lun0() */
627	struct se_device *dev = g_lun0_dev;
628	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
629	u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
630	int ret;
631
632	lun->unpacked_lun = 0;
633	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
634	atomic_set(&lun->lun_acl_count, 0);
635	init_completion(&lun->lun_shutdown_comp);
636	INIT_LIST_HEAD(&lun->lun_acl_list);
637	INIT_LIST_HEAD(&lun->lun_cmd_list);
638	spin_lock_init(&lun->lun_acl_lock);
639	spin_lock_init(&lun->lun_cmd_lock);
640	spin_lock_init(&lun->lun_sep_lock);
641
642	ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
643	if (ret < 0)
644		return ret;
645
646	return 0;
647}
648
649static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
650{
651	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
652
653	core_tpg_post_dellun(se_tpg, lun);
654}
655
656int core_tpg_register(
657	struct target_core_fabric_ops *tfo,
658	struct se_wwn *se_wwn,
659	struct se_portal_group *se_tpg,
660	void *tpg_fabric_ptr,
661	int se_tpg_type)
662{
663	struct se_lun *lun;
664	u32 i;
665
666	se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
667			sizeof(struct se_lun), GFP_KERNEL);
668	if (!se_tpg->tpg_lun_list) {
669		pr_err("Unable to allocate struct se_portal_group->"
670				"tpg_lun_list\n");
671		return -ENOMEM;
672	}
673
674	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
675		lun = se_tpg->tpg_lun_list[i];
676		lun->unpacked_lun = i;
677		lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
678		atomic_set(&lun->lun_acl_count, 0);
679		init_completion(&lun->lun_shutdown_comp);
680		INIT_LIST_HEAD(&lun->lun_acl_list);
681		INIT_LIST_HEAD(&lun->lun_cmd_list);
682		spin_lock_init(&lun->lun_acl_lock);
683		spin_lock_init(&lun->lun_cmd_lock);
684		spin_lock_init(&lun->lun_sep_lock);
685	}
686
687	se_tpg->se_tpg_type = se_tpg_type;
688	se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
689	se_tpg->se_tpg_tfo = tfo;
690	se_tpg->se_tpg_wwn = se_wwn;
691	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
692	INIT_LIST_HEAD(&se_tpg->acl_node_list);
693	INIT_LIST_HEAD(&se_tpg->se_tpg_node);
694	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
695	spin_lock_init(&se_tpg->acl_node_lock);
696	spin_lock_init(&se_tpg->session_lock);
697	spin_lock_init(&se_tpg->tpg_lun_lock);
698
699	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
700		if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
701			kfree(se_tpg);
702			return -ENOMEM;
703		}
704	}
705
706	spin_lock_bh(&tpg_lock);
707	list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
708	spin_unlock_bh(&tpg_lock);
709
710	pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
711		" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
712		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
713		"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
714		"None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
715
716	return 0;
717}
718EXPORT_SYMBOL(core_tpg_register);
719
720int core_tpg_deregister(struct se_portal_group *se_tpg)
721{
722	struct se_node_acl *nacl, *nacl_tmp;
723
724	pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
725		" for endpoint: %s Portal Tag %u\n",
726		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
727		"Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
728		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
729		se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
730
731	spin_lock_bh(&tpg_lock);
732	list_del(&se_tpg->se_tpg_node);
733	spin_unlock_bh(&tpg_lock);
734
735	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
736		cpu_relax();
737	/*
738	 * Release any remaining demo-mode generated se_node_acl that have
739	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
740	 * in transport_deregister_session().
741	 */
742	spin_lock_irq(&se_tpg->acl_node_lock);
743	list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
744			acl_list) {
745		list_del(&nacl->acl_list);
746		se_tpg->num_node_acls--;
747		spin_unlock_irq(&se_tpg->acl_node_lock);
748
749		core_tpg_wait_for_nacl_pr_ref(nacl);
750		core_free_device_list_for_node(nacl, se_tpg);
751		se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
752
753		spin_lock_irq(&se_tpg->acl_node_lock);
754	}
755	spin_unlock_irq(&se_tpg->acl_node_lock);
756
757	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
758		core_tpg_release_virtual_lun0(se_tpg);
759
760	se_tpg->se_tpg_fabric_ptr = NULL;
761	array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
762	return 0;
763}
764EXPORT_SYMBOL(core_tpg_deregister);
765
766struct se_lun *core_tpg_pre_addlun(
767	struct se_portal_group *tpg,
768	u32 unpacked_lun)
769{
770	struct se_lun *lun;
771
772	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
773		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
774			"-1: %u for Target Portal Group: %u\n",
775			tpg->se_tpg_tfo->get_fabric_name(),
776			unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
777			tpg->se_tpg_tfo->tpg_get_tag(tpg));
778		return ERR_PTR(-EOVERFLOW);
779	}
780
781	spin_lock(&tpg->tpg_lun_lock);
782	lun = tpg->tpg_lun_list[unpacked_lun];
783	if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
784		pr_err("TPG Logical Unit Number: %u is already active"
785			" on %s Target Portal Group: %u, ignoring request.\n",
786			unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
787			tpg->se_tpg_tfo->tpg_get_tag(tpg));
788		spin_unlock(&tpg->tpg_lun_lock);
789		return ERR_PTR(-EINVAL);
790	}
791	spin_unlock(&tpg->tpg_lun_lock);
792
793	return lun;
794}
795
796int core_tpg_post_addlun(
797	struct se_portal_group *tpg,
798	struct se_lun *lun,
799	u32 lun_access,
800	void *lun_ptr)
801{
802	int ret;
803
804	ret = core_dev_export(lun_ptr, tpg, lun);
805	if (ret < 0)
806		return ret;
807
808	spin_lock(&tpg->tpg_lun_lock);
809	lun->lun_access = lun_access;
810	lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
811	spin_unlock(&tpg->tpg_lun_lock);
812
813	return 0;
814}
815
816static void core_tpg_shutdown_lun(
817	struct se_portal_group *tpg,
818	struct se_lun *lun)
819{
820	core_clear_lun_from_tpg(lun, tpg);
821	transport_clear_lun_from_sessions(lun);
822}
823
824struct se_lun *core_tpg_pre_dellun(
825	struct se_portal_group *tpg,
826	u32 unpacked_lun)
827{
828	struct se_lun *lun;
829
830	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
831		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
832			"-1: %u for Target Portal Group: %u\n",
833			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
834			TRANSPORT_MAX_LUNS_PER_TPG-1,
835			tpg->se_tpg_tfo->tpg_get_tag(tpg));
836		return ERR_PTR(-EOVERFLOW);
837	}
838
839	spin_lock(&tpg->tpg_lun_lock);
840	lun = tpg->tpg_lun_list[unpacked_lun];
841	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
842		pr_err("%s Logical Unit Number: %u is not active on"
843			" Target Portal Group: %u, ignoring request.\n",
844			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
845			tpg->se_tpg_tfo->tpg_get_tag(tpg));
846		spin_unlock(&tpg->tpg_lun_lock);
847		return ERR_PTR(-ENODEV);
848	}
849	spin_unlock(&tpg->tpg_lun_lock);
850
851	return lun;
852}
853
854int core_tpg_post_dellun(
855	struct se_portal_group *tpg,
856	struct se_lun *lun)
857{
858	core_tpg_shutdown_lun(tpg, lun);
859
860	core_dev_unexport(lun->lun_se_dev, tpg, lun);
861
862	spin_lock(&tpg->tpg_lun_lock);
863	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
864	spin_unlock(&tpg->tpg_lun_lock);
865
866	return 0;
867}