Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*******************************************************************************
  3 * Filename:  target_core_tpg.c
  4 *
  5 * This file contains generic Target Portal Group related functions.
  6 *
  7 * (c) Copyright 2002-2013 Datera, Inc.
  8 *
  9 * Nicholas A. Bellinger <nab@kernel.org>
 10 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 11 ******************************************************************************/
 12
 13#include <linux/net.h>
 14#include <linux/string.h>
 15#include <linux/timer.h>
 16#include <linux/slab.h>
 17#include <linux/spinlock.h>
 18#include <linux/in.h>
 19#include <linux/export.h>
 20#include <net/sock.h>
 21#include <net/tcp.h>
 22#include <scsi/scsi_proto.h>
 23
 24#include <target/target_core_base.h>
 25#include <target/target_core_backend.h>
 26#include <target/target_core_fabric.h>
 27
 28#include "target_core_internal.h"
 29#include "target_core_alua.h"
 30#include "target_core_pr.h"
 31#include "target_core_ua.h"
 32
 33extern struct se_device *g_lun0_dev;
 34static DEFINE_XARRAY_ALLOC(tpg_xa);
 
 
 35
 36/*	__core_tpg_get_initiator_node_acl():
 37 *
 38 *	mutex_lock(&tpg->acl_node_mutex); must be held when calling
 39 */
 40struct se_node_acl *__core_tpg_get_initiator_node_acl(
 41	struct se_portal_group *tpg,
 42	const char *initiatorname)
 43{
 44	struct se_node_acl *acl;
 45
 46	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
 47		if (!strcmp(acl->initiatorname, initiatorname))
 48			return acl;
 49	}
 50
 51	return NULL;
 52}
 53
 54/*	core_tpg_get_initiator_node_acl():
 55 *
 56 *
 57 */
 58struct se_node_acl *core_tpg_get_initiator_node_acl(
 59	struct se_portal_group *tpg,
 60	unsigned char *initiatorname)
 61{
 62	struct se_node_acl *acl;
 63	/*
 64	 * Obtain se_node_acl->acl_kref using fabric driver provided
 65	 * initiatorname[] during node acl endpoint lookup driven by
 66	 * new se_session login.
 67	 *
 68	 * The reference is held until se_session shutdown -> release
 69	 * occurs via fabric driver invoked transport_deregister_session()
 70	 * or transport_free_session() code.
 71	 */
 72	mutex_lock(&tpg->acl_node_mutex);
 73	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
 74	if (acl) {
 75		if (!kref_get_unless_zero(&acl->acl_kref))
 76			acl = NULL;
 77	}
 78	mutex_unlock(&tpg->acl_node_mutex);
 79
 80	return acl;
 81}
 82EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
 83
 84void core_allocate_nexus_loss_ua(
 85	struct se_node_acl *nacl)
 86{
 87	struct se_dev_entry *deve;
 88
 89	if (!nacl)
 90		return;
 91
 92	rcu_read_lock();
 93	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
 94		core_scsi3_ua_allocate(deve, 0x29,
 95			ASCQ_29H_NEXUS_LOSS_OCCURRED);
 96	rcu_read_unlock();
 97}
 98EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
 99
100/*	core_tpg_add_node_to_devs():
101 *
102 *
103 */
104void core_tpg_add_node_to_devs(
105	struct se_node_acl *acl,
106	struct se_portal_group *tpg,
107	struct se_lun *lun_orig)
108{
109	bool lun_access_ro = true;
110	struct se_lun *lun;
111	struct se_device *dev;
112
113	mutex_lock(&tpg->tpg_lun_mutex);
114	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
115		if (lun_orig && lun != lun_orig)
116			continue;
117
118		dev = rcu_dereference_check(lun->lun_se_dev,
119					    lockdep_is_held(&tpg->tpg_lun_mutex));
120		/*
121		 * By default in LIO-Target $FABRIC_MOD,
122		 * demo_mode_write_protect is ON, or READ_ONLY;
123		 */
124		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
125			lun_access_ro = false;
126		} else {
127			/*
128			 * Allow only optical drives to issue R/W in default RO
129			 * demo mode.
130			 */
131			if (dev->transport->get_device_type(dev) == TYPE_DISK)
132				lun_access_ro = true;
133			else
134				lun_access_ro = false;
135		}
136
137		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
138			" access for LUN in Demo Mode\n",
139			tpg->se_tpg_tfo->fabric_name,
140			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
141			lun_access_ro ? "READ-ONLY" : "READ-WRITE");
142
143		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
144						 lun_access_ro, acl, tpg);
145		/*
146		 * Check to see if there are any existing persistent reservation
147		 * APTPL pre-registrations that need to be enabled for this dynamic
148		 * LUN ACL now..
149		 */
150		core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
151						    lun->unpacked_lun);
152	}
153	mutex_unlock(&tpg->tpg_lun_mutex);
154}
155
156static void
157target_set_nacl_queue_depth(struct se_portal_group *tpg,
158			    struct se_node_acl *acl, u32 queue_depth)
159{
160	acl->queue_depth = queue_depth;
161
162	if (!acl->queue_depth) {
163		pr_warn("Queue depth for %s Initiator Node: %s is 0,"
164			"defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
165			acl->initiatorname);
166		acl->queue_depth = 1;
167	}
168}
169
170static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
171		const unsigned char *initiatorname)
172{
173	struct se_node_acl *acl;
174	u32 queue_depth;
175
176	acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
177			GFP_KERNEL);
178	if (!acl)
179		return NULL;
180
181	INIT_LIST_HEAD(&acl->acl_list);
182	INIT_LIST_HEAD(&acl->acl_sess_list);
183	INIT_HLIST_HEAD(&acl->lun_entry_hlist);
184	kref_init(&acl->acl_kref);
185	init_completion(&acl->acl_free_comp);
186	spin_lock_init(&acl->nacl_sess_lock);
187	mutex_init(&acl->lun_entry_mutex);
188	atomic_set(&acl->acl_pr_ref_count, 0);
189
190	if (tpg->se_tpg_tfo->tpg_get_default_depth)
191		queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
192	else
193		queue_depth = 1;
194	target_set_nacl_queue_depth(tpg, acl, queue_depth);
195
196	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
197	acl->se_tpg = tpg;
198	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
199
200	tpg->se_tpg_tfo->set_default_node_attributes(acl);
201
202	return acl;
203}
204
205static void target_add_node_acl(struct se_node_acl *acl)
206{
207	struct se_portal_group *tpg = acl->se_tpg;
208
209	mutex_lock(&tpg->acl_node_mutex);
210	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
211	mutex_unlock(&tpg->acl_node_mutex);
212
213	pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
214		" Initiator Node: %s\n",
215		tpg->se_tpg_tfo->fabric_name,
216		tpg->se_tpg_tfo->tpg_get_tag(tpg),
217		acl->dynamic_node_acl ? "DYNAMIC" : "",
218		acl->queue_depth,
219		tpg->se_tpg_tfo->fabric_name,
220		acl->initiatorname);
221}
222
223bool target_tpg_has_node_acl(struct se_portal_group *tpg,
224			     const char *initiatorname)
225{
226	struct se_node_acl *acl;
227	bool found = false;
228
229	mutex_lock(&tpg->acl_node_mutex);
230	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
231		if (!strcmp(acl->initiatorname, initiatorname)) {
232			found = true;
233			break;
234		}
235	}
236	mutex_unlock(&tpg->acl_node_mutex);
237
238	return found;
239}
240EXPORT_SYMBOL(target_tpg_has_node_acl);
241
242struct se_node_acl *core_tpg_check_initiator_node_acl(
243	struct se_portal_group *tpg,
244	unsigned char *initiatorname)
245{
246	struct se_node_acl *acl;
247
248	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
249	if (acl)
250		return acl;
251
252	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
253		return NULL;
254
255	acl = target_alloc_node_acl(tpg, initiatorname);
256	if (!acl)
257		return NULL;
258	/*
259	 * When allocating a dynamically generated node_acl, go ahead
260	 * and take the extra kref now before returning to the fabric
261	 * driver caller.
262	 *
263	 * Note this reference will be released at session shutdown
264	 * time within transport_free_session() code.
265	 */
266	kref_get(&acl->acl_kref);
267	acl->dynamic_node_acl = 1;
268
269	/*
270	 * Here we only create demo-mode MappedLUNs from the active
271	 * TPG LUNs if the fabric is not explicitly asking for
272	 * tpg_check_demo_mode_login_only() == 1.
273	 */
274	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
275	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
276		core_tpg_add_node_to_devs(acl, tpg, NULL);
277
278	target_add_node_acl(acl);
279	return acl;
280}
281EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
282
283void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
284{
285	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
286		cpu_relax();
287}
288
289struct se_node_acl *core_tpg_add_initiator_node_acl(
290	struct se_portal_group *tpg,
291	const char *initiatorname)
292{
293	struct se_node_acl *acl;
294
295	mutex_lock(&tpg->acl_node_mutex);
296	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
297	if (acl) {
298		if (acl->dynamic_node_acl) {
299			acl->dynamic_node_acl = 0;
300			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
301				" for %s\n", tpg->se_tpg_tfo->fabric_name,
302				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
303			mutex_unlock(&tpg->acl_node_mutex);
304			return acl;
305		}
306
307		pr_err("ACL entry for %s Initiator"
308			" Node %s already exists for TPG %u, ignoring"
309			" request.\n",  tpg->se_tpg_tfo->fabric_name,
310			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
311		mutex_unlock(&tpg->acl_node_mutex);
312		return ERR_PTR(-EEXIST);
313	}
314	mutex_unlock(&tpg->acl_node_mutex);
315
316	acl = target_alloc_node_acl(tpg, initiatorname);
317	if (!acl)
318		return ERR_PTR(-ENOMEM);
319
320	target_add_node_acl(acl);
321	return acl;
322}
323
324static void target_shutdown_sessions(struct se_node_acl *acl)
325{
326	struct se_session *sess;
 
 
327	unsigned long flags;
 
 
 
 
 
 
 
 
328
329restart:
330	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
331	list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
332		if (sess->cmd_cnt && atomic_read(&sess->cmd_cnt->stopped))
333			continue;
334
335		list_del_init(&sess->sess_acl_list);
336		spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
 
 
337
338		if (acl->se_tpg->se_tpg_tfo->close_session)
339			acl->se_tpg->se_tpg_tfo->close_session(sess);
340		goto restart;
341	}
342	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
343}
344
345void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
346{
347	struct se_portal_group *tpg = acl->se_tpg;
348
349	mutex_lock(&tpg->acl_node_mutex);
350	if (acl->dynamic_node_acl)
351		acl->dynamic_node_acl = 0;
352	list_del_init(&acl->acl_list);
353	mutex_unlock(&tpg->acl_node_mutex);
354
355	target_shutdown_sessions(acl);
 
356
 
 
 
 
 
 
357	target_put_nacl(acl);
358	/*
359	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
360	 * for active fabric session transport_deregister_session() callbacks.
361	 */
362	wait_for_completion(&acl->acl_free_comp);
363
364	core_tpg_wait_for_nacl_pr_ref(acl);
365	core_free_device_list_for_node(acl, tpg);
366
367	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
368		" Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
369		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
370		tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
371
372	kfree(acl);
373}
374
375/*	core_tpg_set_initiator_node_queue_depth():
376 *
377 *
378 */
379int core_tpg_set_initiator_node_queue_depth(
380	struct se_node_acl *acl,
381	u32 queue_depth)
382{
 
383	struct se_portal_group *tpg = acl->se_tpg;
 
 
 
384
385	/*
386	 * Allow the setting of se_node_acl queue_depth to be idempotent,
387	 * and not force a session shutdown event if the value is not
388	 * changing.
389	 */
390	if (acl->queue_depth == queue_depth)
391		return 0;
392	/*
393	 * User has requested to change the queue depth for a Initiator Node.
394	 * Change the value in the Node's struct se_node_acl, and call
395	 * target_set_nacl_queue_depth() to set the new queue depth.
396	 */
397	target_set_nacl_queue_depth(tpg, acl, queue_depth);
398
399	/*
400	 * Shutdown all pending sessions to force session reinstatement.
401	 */
402	target_shutdown_sessions(acl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
404	pr_debug("Successfully changed queue depth to: %d for Initiator"
405		" Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
406		acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
407		tpg->se_tpg_tfo->tpg_get_tag(tpg));
408
409	return 0;
410}
411EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
412
413/*	core_tpg_set_initiator_node_tag():
414 *
415 *	Initiator nodeacl tags are not used internally, but may be used by
416 *	userspace to emulate aliases or groups.
417 *	Returns length of newly-set tag or -EINVAL.
418 */
419int core_tpg_set_initiator_node_tag(
420	struct se_portal_group *tpg,
421	struct se_node_acl *acl,
422	const char *new_tag)
423{
424	if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
425		return -EINVAL;
426
427	if (!strncmp("NULL", new_tag, 4)) {
428		acl->acl_tag[0] = '\0';
429		return 0;
430	}
431
432	return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
433}
434EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
435
436static void core_tpg_lun_ref_release(struct percpu_ref *ref)
437{
438	struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
439
440	complete(&lun->lun_shutdown_comp);
441}
442
443static int target_tpg_register_rtpi(struct se_portal_group *se_tpg)
444{
445	u32 val;
446	int ret;
447
448	if (se_tpg->rtpi_manual) {
449		ret = xa_insert(&tpg_xa, se_tpg->tpg_rtpi, se_tpg, GFP_KERNEL);
450		if (ret) {
451			pr_info("%s_TPG[%hu] - Can not set RTPI %#x, it is already busy",
452				se_tpg->se_tpg_tfo->fabric_name,
453				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
454				se_tpg->tpg_rtpi);
455			return -EINVAL;
456		}
457	} else {
458		ret = xa_alloc(&tpg_xa, &val, se_tpg,
459			       XA_LIMIT(1, USHRT_MAX), GFP_KERNEL);
460		if (!ret)
461			se_tpg->tpg_rtpi = val;
462	}
463
464	return ret;
465}
466
467static void target_tpg_deregister_rtpi(struct se_portal_group *se_tpg)
468{
469	if (se_tpg->tpg_rtpi && se_tpg->enabled)
470		xa_erase(&tpg_xa, se_tpg->tpg_rtpi);
471}
472
473int target_tpg_enable(struct se_portal_group *se_tpg)
474{
475	int ret;
476
477	ret = target_tpg_register_rtpi(se_tpg);
478	if (ret)
479		return ret;
480
481	ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, true);
482	if (ret) {
483		target_tpg_deregister_rtpi(se_tpg);
484		return ret;
485	}
486
487	se_tpg->enabled = true;
488
489	return 0;
490}
491
492int target_tpg_disable(struct se_portal_group *se_tpg)
493{
494	int ret;
495
496	target_tpg_deregister_rtpi(se_tpg);
497
498	ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, false);
499	if (!ret)
500		se_tpg->enabled = false;
501
502	return ret;
503}
504
505/* Does not change se_wwn->priv. */
506int core_tpg_register(
507	struct se_wwn *se_wwn,
508	struct se_portal_group *se_tpg,
509	int proto_id)
510{
511	int ret;
512
513	if (!se_tpg)
514		return -EINVAL;
515	/*
516	 * For the typical case where core_tpg_register() is called by a
517	 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
518	 * configfs context, use the original tf_ops pointer already saved
519	 * by target-core in target_fabric_make_wwn().
520	 *
521	 * Otherwise, for special cases like iscsi-target discovery TPGs
522	 * the caller is responsible for setting ->se_tpg_tfo ahead of
523	 * calling core_tpg_register().
524	 */
525	if (se_wwn)
526		se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
527
528	if (!se_tpg->se_tpg_tfo) {
529		pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
530		return -EINVAL;
531	}
532
533	INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
534	se_tpg->proto_id = proto_id;
535	se_tpg->se_tpg_wwn = se_wwn;
536	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
537	INIT_LIST_HEAD(&se_tpg->acl_node_list);
 
538	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
539	spin_lock_init(&se_tpg->session_lock);
540	mutex_init(&se_tpg->tpg_lun_mutex);
541	mutex_init(&se_tpg->acl_node_mutex);
542
543	if (se_tpg->proto_id >= 0) {
544		se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
545		if (IS_ERR(se_tpg->tpg_virt_lun0))
546			return PTR_ERR(se_tpg->tpg_virt_lun0);
547
548		ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
549				true, g_lun0_dev);
550		if (ret < 0) {
551			kfree(se_tpg->tpg_virt_lun0);
552			return ret;
553		}
554	}
555
 
 
 
 
556	pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
557		 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
558		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
559		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
560		se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
561
562	return 0;
563}
564EXPORT_SYMBOL(core_tpg_register);
565
566int core_tpg_deregister(struct se_portal_group *se_tpg)
567{
568	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
569	struct se_node_acl *nacl, *nacl_tmp;
570	LIST_HEAD(node_list);
571
572	pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
573		 "Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
574		tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
575		se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
576
 
 
 
 
577	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
578		cpu_relax();
579
580	mutex_lock(&se_tpg->acl_node_mutex);
581	list_splice_init(&se_tpg->acl_node_list, &node_list);
582	mutex_unlock(&se_tpg->acl_node_mutex);
583	/*
584	 * Release any remaining demo-mode generated se_node_acl that have
585	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
586	 * in transport_deregister_session().
587	 */
588	list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
589		list_del_init(&nacl->acl_list);
590
591		core_tpg_wait_for_nacl_pr_ref(nacl);
592		core_free_device_list_for_node(nacl, se_tpg);
593		kfree(nacl);
594	}
595
596	if (se_tpg->proto_id >= 0) {
597		core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
598		kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
599	}
600
601	target_tpg_deregister_rtpi(se_tpg);
602
603	return 0;
604}
605EXPORT_SYMBOL(core_tpg_deregister);
606
607struct se_lun *core_tpg_alloc_lun(
608	struct se_portal_group *tpg,
609	u64 unpacked_lun)
610{
611	struct se_lun *lun;
612
613	lun = kzalloc(sizeof(*lun), GFP_KERNEL);
614	if (!lun) {
615		pr_err("Unable to allocate se_lun memory\n");
616		return ERR_PTR(-ENOMEM);
617	}
618	lun->unpacked_lun = unpacked_lun;
 
619	atomic_set(&lun->lun_acl_count, 0);
620	init_completion(&lun->lun_shutdown_comp);
621	INIT_LIST_HEAD(&lun->lun_deve_list);
622	INIT_LIST_HEAD(&lun->lun_dev_link);
623	atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
624	spin_lock_init(&lun->lun_deve_lock);
625	mutex_init(&lun->lun_tg_pt_md_mutex);
626	INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
627	spin_lock_init(&lun->lun_tg_pt_gp_lock);
628	lun->lun_tpg = tpg;
629
630	return lun;
631}
632
633int core_tpg_add_lun(
634	struct se_portal_group *tpg,
635	struct se_lun *lun,
636	bool lun_access_ro,
637	struct se_device *dev)
638{
639	int ret;
640
641	ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
642			      GFP_KERNEL);
643	if (ret < 0)
644		goto out;
645
646	if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
 
 
 
 
647	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
648		target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
649
650	mutex_lock(&tpg->tpg_lun_mutex);
651
652	spin_lock(&dev->se_port_lock);
653	lun->lun_index = dev->dev_index;
654	rcu_assign_pointer(lun->lun_se_dev, dev);
655	dev->export_count++;
656	list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
657	spin_unlock(&dev->se_port_lock);
658
659	if (dev->dev_flags & DF_READ_ONLY)
660		lun->lun_access_ro = true;
661	else
662		lun->lun_access_ro = lun_access_ro;
663	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
664		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
665	mutex_unlock(&tpg->tpg_lun_mutex);
666
667	return 0;
668
 
 
669out:
670	return ret;
671}
672
673void core_tpg_remove_lun(
674	struct se_portal_group *tpg,
675	struct se_lun *lun)
676{
677	/*
678	 * rcu_dereference_raw protected by se_lun->lun_group symlink
679	 * reference to se_device->dev_group.
680	 */
681	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
682
683	lun->lun_shutdown = true;
684
685	core_clear_lun_from_tpg(lun, tpg);
686	/*
687	 * Wait for any active I/O references to percpu se_lun->lun_ref to
688	 * be released.  Also, se_lun->lun_ref is now used by PR and ALUA
689	 * logic when referencing a remote target port during ALL_TGT_PT=1
690	 * and generating UNIT_ATTENTIONs for ALUA access state transition.
691	 */
692	transport_clear_lun_ref(lun);
693
694	mutex_lock(&tpg->tpg_lun_mutex);
695	if (lun->lun_se_dev) {
696		target_detach_tg_pt_gp(lun);
697
698		spin_lock(&dev->se_port_lock);
699		list_del(&lun->lun_dev_link);
700		dev->export_count--;
701		rcu_assign_pointer(lun->lun_se_dev, NULL);
702		spin_unlock(&dev->se_port_lock);
703	}
704	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
705		hlist_del_rcu(&lun->link);
706
707	lun->lun_shutdown = false;
708	mutex_unlock(&tpg->tpg_lun_mutex);
709
710	percpu_ref_exit(&lun->lun_ref);
711}
v4.6
 
  1/*******************************************************************************
  2 * Filename:  target_core_tpg.c
  3 *
  4 * This file contains generic Target Portal Group related functions.
  5 *
  6 * (c) Copyright 2002-2013 Datera, Inc.
  7 *
  8 * Nicholas A. Bellinger <nab@kernel.org>
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of the GNU General Public License as published by
 12 * the Free Software Foundation; either version 2 of the License, or
 13 * (at your option) any later version.
 14 *
 15 * This program is distributed in the hope that it will be useful,
 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 18 * GNU General Public License for more details.
 19 *
 20 * You should have received a copy of the GNU General Public License
 21 * along with this program; if not, write to the Free Software
 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 23 *
 24 ******************************************************************************/
 25
 26#include <linux/net.h>
 27#include <linux/string.h>
 28#include <linux/timer.h>
 29#include <linux/slab.h>
 30#include <linux/spinlock.h>
 31#include <linux/in.h>
 32#include <linux/export.h>
 33#include <net/sock.h>
 34#include <net/tcp.h>
 35#include <scsi/scsi_proto.h>
 36
 37#include <target/target_core_base.h>
 38#include <target/target_core_backend.h>
 39#include <target/target_core_fabric.h>
 40
 41#include "target_core_internal.h"
 42#include "target_core_alua.h"
 43#include "target_core_pr.h"
 44#include "target_core_ua.h"
 45
 46extern struct se_device *g_lun0_dev;
 47
 48static DEFINE_SPINLOCK(tpg_lock);
 49static LIST_HEAD(tpg_list);
 50
 51/*	__core_tpg_get_initiator_node_acl():
 52 *
 53 *	mutex_lock(&tpg->acl_node_mutex); must be held when calling
 54 */
 55struct se_node_acl *__core_tpg_get_initiator_node_acl(
 56	struct se_portal_group *tpg,
 57	const char *initiatorname)
 58{
 59	struct se_node_acl *acl;
 60
 61	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
 62		if (!strcmp(acl->initiatorname, initiatorname))
 63			return acl;
 64	}
 65
 66	return NULL;
 67}
 68
 69/*	core_tpg_get_initiator_node_acl():
 70 *
 71 *
 72 */
 73struct se_node_acl *core_tpg_get_initiator_node_acl(
 74	struct se_portal_group *tpg,
 75	unsigned char *initiatorname)
 76{
 77	struct se_node_acl *acl;
 78	/*
 79	 * Obtain se_node_acl->acl_kref using fabric driver provided
 80	 * initiatorname[] during node acl endpoint lookup driven by
 81	 * new se_session login.
 82	 *
 83	 * The reference is held until se_session shutdown -> release
 84	 * occurs via fabric driver invoked transport_deregister_session()
 85	 * or transport_free_session() code.
 86	 */
 87	mutex_lock(&tpg->acl_node_mutex);
 88	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
 89	if (acl) {
 90		if (!kref_get_unless_zero(&acl->acl_kref))
 91			acl = NULL;
 92	}
 93	mutex_unlock(&tpg->acl_node_mutex);
 94
 95	return acl;
 96}
 97EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
 98
 99void core_allocate_nexus_loss_ua(
100	struct se_node_acl *nacl)
101{
102	struct se_dev_entry *deve;
103
104	if (!nacl)
105		return;
106
107	rcu_read_lock();
108	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
109		core_scsi3_ua_allocate(deve, 0x29,
110			ASCQ_29H_NEXUS_LOSS_OCCURRED);
111	rcu_read_unlock();
112}
113EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
114
115/*	core_tpg_add_node_to_devs():
116 *
117 *
118 */
119void core_tpg_add_node_to_devs(
120	struct se_node_acl *acl,
121	struct se_portal_group *tpg,
122	struct se_lun *lun_orig)
123{
124	bool lun_access_ro = true;
125	struct se_lun *lun;
126	struct se_device *dev;
127
128	mutex_lock(&tpg->tpg_lun_mutex);
129	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
130		if (lun_orig && lun != lun_orig)
131			continue;
132
133		dev = rcu_dereference_check(lun->lun_se_dev,
134					    lockdep_is_held(&tpg->tpg_lun_mutex));
135		/*
136		 * By default in LIO-Target $FABRIC_MOD,
137		 * demo_mode_write_protect is ON, or READ_ONLY;
138		 */
139		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
140			lun_access_ro = false;
141		} else {
142			/*
143			 * Allow only optical drives to issue R/W in default RO
144			 * demo mode.
145			 */
146			if (dev->transport->get_device_type(dev) == TYPE_DISK)
147				lun_access_ro = true;
148			else
149				lun_access_ro = false;
150		}
151
152		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
153			" access for LUN in Demo Mode\n",
154			tpg->se_tpg_tfo->get_fabric_name(),
155			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
156			lun_access_ro ? "READ-ONLY" : "READ-WRITE");
157
158		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
159						 lun_access_ro, acl, tpg);
160		/*
161		 * Check to see if there are any existing persistent reservation
162		 * APTPL pre-registrations that need to be enabled for this dynamic
163		 * LUN ACL now..
164		 */
165		core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
166						    lun->unpacked_lun);
167	}
168	mutex_unlock(&tpg->tpg_lun_mutex);
169}
170
171static void
172target_set_nacl_queue_depth(struct se_portal_group *tpg,
173			    struct se_node_acl *acl, u32 queue_depth)
174{
175	acl->queue_depth = queue_depth;
176
177	if (!acl->queue_depth) {
178		pr_warn("Queue depth for %s Initiator Node: %s is 0,"
179			"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
180			acl->initiatorname);
181		acl->queue_depth = 1;
182	}
183}
184
185static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
186		const unsigned char *initiatorname)
187{
188	struct se_node_acl *acl;
189	u32 queue_depth;
190
191	acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
192			GFP_KERNEL);
193	if (!acl)
194		return NULL;
195
196	INIT_LIST_HEAD(&acl->acl_list);
197	INIT_LIST_HEAD(&acl->acl_sess_list);
198	INIT_HLIST_HEAD(&acl->lun_entry_hlist);
199	kref_init(&acl->acl_kref);
200	init_completion(&acl->acl_free_comp);
201	spin_lock_init(&acl->nacl_sess_lock);
202	mutex_init(&acl->lun_entry_mutex);
203	atomic_set(&acl->acl_pr_ref_count, 0);
204
205	if (tpg->se_tpg_tfo->tpg_get_default_depth)
206		queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
207	else
208		queue_depth = 1;
209	target_set_nacl_queue_depth(tpg, acl, queue_depth);
210
211	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
212	acl->se_tpg = tpg;
213	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
214
215	tpg->se_tpg_tfo->set_default_node_attributes(acl);
216
217	return acl;
218}
219
220static void target_add_node_acl(struct se_node_acl *acl)
221{
222	struct se_portal_group *tpg = acl->se_tpg;
223
224	mutex_lock(&tpg->acl_node_mutex);
225	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
226	mutex_unlock(&tpg->acl_node_mutex);
227
228	pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
229		" Initiator Node: %s\n",
230		tpg->se_tpg_tfo->get_fabric_name(),
231		tpg->se_tpg_tfo->tpg_get_tag(tpg),
232		acl->dynamic_node_acl ? "DYNAMIC" : "",
233		acl->queue_depth,
234		tpg->se_tpg_tfo->get_fabric_name(),
235		acl->initiatorname);
236}
237
238bool target_tpg_has_node_acl(struct se_portal_group *tpg,
239			     const char *initiatorname)
240{
241	struct se_node_acl *acl;
242	bool found = false;
243
244	mutex_lock(&tpg->acl_node_mutex);
245	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
246		if (!strcmp(acl->initiatorname, initiatorname)) {
247			found = true;
248			break;
249		}
250	}
251	mutex_unlock(&tpg->acl_node_mutex);
252
253	return found;
254}
255EXPORT_SYMBOL(target_tpg_has_node_acl);
256
257struct se_node_acl *core_tpg_check_initiator_node_acl(
258	struct se_portal_group *tpg,
259	unsigned char *initiatorname)
260{
261	struct se_node_acl *acl;
262
263	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
264	if (acl)
265		return acl;
266
267	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
268		return NULL;
269
270	acl = target_alloc_node_acl(tpg, initiatorname);
271	if (!acl)
272		return NULL;
273	/*
274	 * When allocating a dynamically generated node_acl, go ahead
275	 * and take the extra kref now before returning to the fabric
276	 * driver caller.
277	 *
278	 * Note this reference will be released at session shutdown
279	 * time within transport_free_session() code.
280	 */
281	kref_get(&acl->acl_kref);
282	acl->dynamic_node_acl = 1;
283
284	/*
285	 * Here we only create demo-mode MappedLUNs from the active
286	 * TPG LUNs if the fabric is not explicitly asking for
287	 * tpg_check_demo_mode_login_only() == 1.
288	 */
289	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
290	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
291		core_tpg_add_node_to_devs(acl, tpg, NULL);
292
293	target_add_node_acl(acl);
294	return acl;
295}
296EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
297
298void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
299{
300	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
301		cpu_relax();
302}
303
304struct se_node_acl *core_tpg_add_initiator_node_acl(
305	struct se_portal_group *tpg,
306	const char *initiatorname)
307{
308	struct se_node_acl *acl;
309
310	mutex_lock(&tpg->acl_node_mutex);
311	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
312	if (acl) {
313		if (acl->dynamic_node_acl) {
314			acl->dynamic_node_acl = 0;
315			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
316				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
317				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
318			mutex_unlock(&tpg->acl_node_mutex);
319			return acl;
320		}
321
322		pr_err("ACL entry for %s Initiator"
323			" Node %s already exists for TPG %u, ignoring"
324			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
325			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
326		mutex_unlock(&tpg->acl_node_mutex);
327		return ERR_PTR(-EEXIST);
328	}
329	mutex_unlock(&tpg->acl_node_mutex);
330
331	acl = target_alloc_node_acl(tpg, initiatorname);
332	if (!acl)
333		return ERR_PTR(-ENOMEM);
334
335	target_add_node_acl(acl);
336	return acl;
337}
338
339void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
340{
341	struct se_portal_group *tpg = acl->se_tpg;
342	LIST_HEAD(sess_list);
343	struct se_session *sess, *sess_tmp;
344	unsigned long flags;
345	int rc;
346
347	mutex_lock(&tpg->acl_node_mutex);
348	if (acl->dynamic_node_acl) {
349		acl->dynamic_node_acl = 0;
350	}
351	list_del(&acl->acl_list);
352	mutex_unlock(&tpg->acl_node_mutex);
353
 
354	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
355	acl->acl_stop = 1;
 
 
356
357	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
358				sess_acl_list) {
359		if (sess->sess_tearing_down != 0)
360			continue;
361
362		if (!target_get_session(sess))
363			continue;
364		list_move(&sess->sess_acl_list, &sess_list);
365	}
366	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
367
368	list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
369		list_del(&sess->sess_acl_list);
370
371		rc = tpg->se_tpg_tfo->shutdown_session(sess);
372		target_put_session(sess);
373		if (!rc)
374			continue;
375		target_put_session(sess);
376	}
377	target_put_nacl(acl);
378	/*
379	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
380	 * for active fabric session transport_deregister_session() callbacks.
381	 */
382	wait_for_completion(&acl->acl_free_comp);
383
384	core_tpg_wait_for_nacl_pr_ref(acl);
385	core_free_device_list_for_node(acl, tpg);
386
387	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
388		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
389		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
390		tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
391
392	kfree(acl);
393}
394
395/*	core_tpg_set_initiator_node_queue_depth():
396 *
397 *
398 */
399int core_tpg_set_initiator_node_queue_depth(
400	struct se_node_acl *acl,
401	u32 queue_depth)
402{
403	LIST_HEAD(sess_list);
404	struct se_portal_group *tpg = acl->se_tpg;
405	struct se_session *sess, *sess_tmp;
406	unsigned long flags;
407	int rc;
408
409	/*
 
 
 
 
 
 
 
410	 * User has requested to change the queue depth for a Initiator Node.
411	 * Change the value in the Node's struct se_node_acl, and call
412	 * target_set_nacl_queue_depth() to set the new queue depth.
413	 */
414	target_set_nacl_queue_depth(tpg, acl, queue_depth);
415
416	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
417	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
418				 sess_acl_list) {
419		if (sess->sess_tearing_down != 0)
420			continue;
421		if (!target_get_session(sess))
422			continue;
423		spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
424
425		/*
426		 * Finally call tpg->se_tpg_tfo->close_session() to force session
427		 * reinstatement to occur if there is an active session for the
428		 * $FABRIC_MOD Initiator Node in question.
429		 */
430		rc = tpg->se_tpg_tfo->shutdown_session(sess);
431		target_put_session(sess);
432		if (!rc) {
433			spin_lock_irqsave(&acl->nacl_sess_lock, flags);
434			continue;
435		}
436		target_put_session(sess);
437		spin_lock_irqsave(&acl->nacl_sess_lock, flags);
438	}
439	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
440
441	pr_debug("Successfully changed queue depth to: %d for Initiator"
442		" Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
443		acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
444		tpg->se_tpg_tfo->tpg_get_tag(tpg));
445
446	return 0;
447}
448EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
449
450/*	core_tpg_set_initiator_node_tag():
451 *
452 *	Initiator nodeacl tags are not used internally, but may be used by
453 *	userspace to emulate aliases or groups.
454 *	Returns length of newly-set tag or -EINVAL.
455 */
456int core_tpg_set_initiator_node_tag(
457	struct se_portal_group *tpg,
458	struct se_node_acl *acl,
459	const char *new_tag)
460{
461	if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
462		return -EINVAL;
463
464	if (!strncmp("NULL", new_tag, 4)) {
465		acl->acl_tag[0] = '\0';
466		return 0;
467	}
468
469	return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
470}
471EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
472
473static void core_tpg_lun_ref_release(struct percpu_ref *ref)
474{
475	struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
476
477	complete(&lun->lun_ref_comp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
478}
479
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
480int core_tpg_register(
481	struct se_wwn *se_wwn,
482	struct se_portal_group *se_tpg,
483	int proto_id)
484{
485	int ret;
486
487	if (!se_tpg)
488		return -EINVAL;
489	/*
490	 * For the typical case where core_tpg_register() is called by a
491	 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
492	 * configfs context, use the original tf_ops pointer already saved
493	 * by target-core in target_fabric_make_wwn().
494	 *
495	 * Otherwise, for special cases like iscsi-target discovery TPGs
496	 * the caller is responsible for setting ->se_tpg_tfo ahead of
497	 * calling core_tpg_register().
498	 */
499	if (se_wwn)
500		se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
501
502	if (!se_tpg->se_tpg_tfo) {
503		pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
504		return -EINVAL;
505	}
506
507	INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
508	se_tpg->proto_id = proto_id;
509	se_tpg->se_tpg_wwn = se_wwn;
510	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
511	INIT_LIST_HEAD(&se_tpg->acl_node_list);
512	INIT_LIST_HEAD(&se_tpg->se_tpg_node);
513	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
514	spin_lock_init(&se_tpg->session_lock);
515	mutex_init(&se_tpg->tpg_lun_mutex);
516	mutex_init(&se_tpg->acl_node_mutex);
517
518	if (se_tpg->proto_id >= 0) {
519		se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
520		if (IS_ERR(se_tpg->tpg_virt_lun0))
521			return PTR_ERR(se_tpg->tpg_virt_lun0);
522
523		ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
524				true, g_lun0_dev);
525		if (ret < 0) {
526			kfree(se_tpg->tpg_virt_lun0);
527			return ret;
528		}
529	}
530
531	spin_lock_bh(&tpg_lock);
532	list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
533	spin_unlock_bh(&tpg_lock);
534
535	pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
536		 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
537		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
538		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
539		se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
540
541	return 0;
542}
543EXPORT_SYMBOL(core_tpg_register);
544
545int core_tpg_deregister(struct se_portal_group *se_tpg)
546{
547	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
548	struct se_node_acl *nacl, *nacl_tmp;
549	LIST_HEAD(node_list);
550
551	pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
552		 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
553		tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
554		se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
555
556	spin_lock_bh(&tpg_lock);
557	list_del(&se_tpg->se_tpg_node);
558	spin_unlock_bh(&tpg_lock);
559
560	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
561		cpu_relax();
562
563	mutex_lock(&se_tpg->acl_node_mutex);
564	list_splice_init(&se_tpg->acl_node_list, &node_list);
565	mutex_unlock(&se_tpg->acl_node_mutex);
566	/*
567	 * Release any remaining demo-mode generated se_node_acl that have
568	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
569	 * in transport_deregister_session().
570	 */
571	list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
572		list_del(&nacl->acl_list);
573
574		core_tpg_wait_for_nacl_pr_ref(nacl);
575		core_free_device_list_for_node(nacl, se_tpg);
576		kfree(nacl);
577	}
578
579	if (se_tpg->proto_id >= 0) {
580		core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
581		kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
582	}
583
 
 
584	return 0;
585}
586EXPORT_SYMBOL(core_tpg_deregister);
587
588struct se_lun *core_tpg_alloc_lun(
589	struct se_portal_group *tpg,
590	u64 unpacked_lun)
591{
592	struct se_lun *lun;
593
594	lun = kzalloc(sizeof(*lun), GFP_KERNEL);
595	if (!lun) {
596		pr_err("Unable to allocate se_lun memory\n");
597		return ERR_PTR(-ENOMEM);
598	}
599	lun->unpacked_lun = unpacked_lun;
600	lun->lun_link_magic = SE_LUN_LINK_MAGIC;
601	atomic_set(&lun->lun_acl_count, 0);
602	init_completion(&lun->lun_ref_comp);
603	INIT_LIST_HEAD(&lun->lun_deve_list);
604	INIT_LIST_HEAD(&lun->lun_dev_link);
605	atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
606	spin_lock_init(&lun->lun_deve_lock);
607	mutex_init(&lun->lun_tg_pt_md_mutex);
608	INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
609	spin_lock_init(&lun->lun_tg_pt_gp_lock);
610	lun->lun_tpg = tpg;
611
612	return lun;
613}
614
615int core_tpg_add_lun(
616	struct se_portal_group *tpg,
617	struct se_lun *lun,
618	bool lun_access_ro,
619	struct se_device *dev)
620{
621	int ret;
622
623	ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
624			      GFP_KERNEL);
625	if (ret < 0)
626		goto out;
627
628	ret = core_alloc_rtpi(lun, dev);
629	if (ret)
630		goto out_kill_ref;
631
632	if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
633	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
634		target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
635
636	mutex_lock(&tpg->tpg_lun_mutex);
637
638	spin_lock(&dev->se_port_lock);
639	lun->lun_index = dev->dev_index;
640	rcu_assign_pointer(lun->lun_se_dev, dev);
641	dev->export_count++;
642	list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
643	spin_unlock(&dev->se_port_lock);
644
645	if (dev->dev_flags & DF_READ_ONLY)
646		lun->lun_access_ro = true;
647	else
648		lun->lun_access_ro = lun_access_ro;
649	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
650		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
651	mutex_unlock(&tpg->tpg_lun_mutex);
652
653	return 0;
654
655out_kill_ref:
656	percpu_ref_exit(&lun->lun_ref);
657out:
658	return ret;
659}
660
661void core_tpg_remove_lun(
662	struct se_portal_group *tpg,
663	struct se_lun *lun)
664{
665	/*
666	 * rcu_dereference_raw protected by se_lun->lun_group symlink
667	 * reference to se_device->dev_group.
668	 */
669	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
670
 
 
671	core_clear_lun_from_tpg(lun, tpg);
672	/*
673	 * Wait for any active I/O references to percpu se_lun->lun_ref to
674	 * be released.  Also, se_lun->lun_ref is now used by PR and ALUA
675	 * logic when referencing a remote target port during ALL_TGT_PT=1
676	 * and generating UNIT_ATTENTIONs for ALUA access state transition.
677	 */
678	transport_clear_lun_ref(lun);
679
680	mutex_lock(&tpg->tpg_lun_mutex);
681	if (lun->lun_se_dev) {
682		target_detach_tg_pt_gp(lun);
683
684		spin_lock(&dev->se_port_lock);
685		list_del(&lun->lun_dev_link);
686		dev->export_count--;
687		rcu_assign_pointer(lun->lun_se_dev, NULL);
688		spin_unlock(&dev->se_port_lock);
689	}
690	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
691		hlist_del_rcu(&lun->link);
 
 
692	mutex_unlock(&tpg->tpg_lun_mutex);
693
694	percpu_ref_exit(&lun->lun_ref);
695}