Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*******************************************************************************
  3 * Filename:  target_core_tpg.c
  4 *
  5 * This file contains generic Target Portal Group related functions.
  6 *
  7 * (c) Copyright 2002-2013 Datera, Inc.
  8 *
  9 * Nicholas A. Bellinger <nab@kernel.org>
 10 *
 11 ******************************************************************************/
 12
 13#include <linux/net.h>
 14#include <linux/string.h>
 15#include <linux/timer.h>
 16#include <linux/slab.h>
 17#include <linux/spinlock.h>
 18#include <linux/in.h>
 19#include <linux/export.h>
 20#include <net/sock.h>
 21#include <net/tcp.h>
 22#include <scsi/scsi_proto.h>
 23
 24#include <target/target_core_base.h>
 25#include <target/target_core_backend.h>
 26#include <target/target_core_fabric.h>
 27
 28#include "target_core_internal.h"
 29#include "target_core_alua.h"
 30#include "target_core_pr.h"
 31#include "target_core_ua.h"
 32
 33extern struct se_device *g_lun0_dev;
 34static DEFINE_XARRAY_ALLOC(tpg_xa);
 35
 36/*	__core_tpg_get_initiator_node_acl():
 37 *
 38 *	mutex_lock(&tpg->acl_node_mutex); must be held when calling
 39 */
 40struct se_node_acl *__core_tpg_get_initiator_node_acl(
 41	struct se_portal_group *tpg,
 42	const char *initiatorname)
 43{
 44	struct se_node_acl *acl;
 45
 46	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
 47		if (!strcmp(acl->initiatorname, initiatorname))
 48			return acl;
 49	}
 50
 51	return NULL;
 52}
 53
 54/*	core_tpg_get_initiator_node_acl():
 55 *
 56 *
 57 */
 58struct se_node_acl *core_tpg_get_initiator_node_acl(
 59	struct se_portal_group *tpg,
 60	unsigned char *initiatorname)
 61{
 62	struct se_node_acl *acl;
 63	/*
 64	 * Obtain se_node_acl->acl_kref using fabric driver provided
 65	 * initiatorname[] during node acl endpoint lookup driven by
 66	 * new se_session login.
 67	 *
 68	 * The reference is held until se_session shutdown -> release
 69	 * occurs via fabric driver invoked transport_deregister_session()
 70	 * or transport_free_session() code.
 71	 */
 72	mutex_lock(&tpg->acl_node_mutex);
 73	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
 74	if (acl) {
 75		if (!kref_get_unless_zero(&acl->acl_kref))
 76			acl = NULL;
 77	}
 78	mutex_unlock(&tpg->acl_node_mutex);
 79
 80	return acl;
 81}
 82EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
 83
 84void core_allocate_nexus_loss_ua(
 85	struct se_node_acl *nacl)
 86{
 87	struct se_dev_entry *deve;
 88
 89	if (!nacl)
 90		return;
 91
 92	rcu_read_lock();
 93	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
 94		core_scsi3_ua_allocate(deve, 0x29,
 95			ASCQ_29H_NEXUS_LOSS_OCCURRED);
 96	rcu_read_unlock();
 97}
 98EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
 99
100/*	core_tpg_add_node_to_devs():
101 *
102 *
103 */
104void core_tpg_add_node_to_devs(
105	struct se_node_acl *acl,
106	struct se_portal_group *tpg,
107	struct se_lun *lun_orig)
108{
109	bool lun_access_ro = true;
110	struct se_lun *lun;
111	struct se_device *dev;
112
113	mutex_lock(&tpg->tpg_lun_mutex);
114	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
115		if (lun_orig && lun != lun_orig)
116			continue;
117
118		dev = rcu_dereference_check(lun->lun_se_dev,
119					    lockdep_is_held(&tpg->tpg_lun_mutex));
120		/*
121		 * By default in LIO-Target $FABRIC_MOD,
122		 * demo_mode_write_protect is ON, or READ_ONLY;
123		 */
124		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
125			lun_access_ro = false;
126		} else {
127			/*
128			 * Allow only optical drives to issue R/W in default RO
129			 * demo mode.
130			 */
131			if (dev->transport->get_device_type(dev) == TYPE_DISK)
132				lun_access_ro = true;
133			else
134				lun_access_ro = false;
135		}
136
137		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
138			" access for LUN in Demo Mode\n",
139			tpg->se_tpg_tfo->fabric_name,
140			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
141			lun_access_ro ? "READ-ONLY" : "READ-WRITE");
142
143		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
144						 lun_access_ro, acl, tpg);
145		/*
146		 * Check to see if there are any existing persistent reservation
147		 * APTPL pre-registrations that need to be enabled for this dynamic
148		 * LUN ACL now..
149		 */
150		core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
151						    lun->unpacked_lun);
152	}
153	mutex_unlock(&tpg->tpg_lun_mutex);
154}
155
156static void
157target_set_nacl_queue_depth(struct se_portal_group *tpg,
158			    struct se_node_acl *acl, u32 queue_depth)
159{
160	acl->queue_depth = queue_depth;
161
162	if (!acl->queue_depth) {
163		pr_warn("Queue depth for %s Initiator Node: %s is 0,"
164			"defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
165			acl->initiatorname);
166		acl->queue_depth = 1;
167	}
168}
169
170static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
171		const unsigned char *initiatorname)
172{
173	struct se_node_acl *acl;
174	u32 queue_depth;
175
176	acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
177			GFP_KERNEL);
178	if (!acl)
179		return NULL;
180
181	INIT_LIST_HEAD(&acl->acl_list);
182	INIT_LIST_HEAD(&acl->acl_sess_list);
183	INIT_HLIST_HEAD(&acl->lun_entry_hlist);
184	kref_init(&acl->acl_kref);
185	init_completion(&acl->acl_free_comp);
186	spin_lock_init(&acl->nacl_sess_lock);
187	mutex_init(&acl->lun_entry_mutex);
188	atomic_set(&acl->acl_pr_ref_count, 0);
189
190	if (tpg->se_tpg_tfo->tpg_get_default_depth)
191		queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
192	else
193		queue_depth = 1;
194	target_set_nacl_queue_depth(tpg, acl, queue_depth);
195
196	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
197	acl->se_tpg = tpg;
198	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
199
200	tpg->se_tpg_tfo->set_default_node_attributes(acl);
201
202	return acl;
203}
204
205static void target_add_node_acl(struct se_node_acl *acl)
206{
207	struct se_portal_group *tpg = acl->se_tpg;
208
209	mutex_lock(&tpg->acl_node_mutex);
210	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
211	mutex_unlock(&tpg->acl_node_mutex);
212
213	pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
214		" Initiator Node: %s\n",
215		tpg->se_tpg_tfo->fabric_name,
216		tpg->se_tpg_tfo->tpg_get_tag(tpg),
217		acl->dynamic_node_acl ? "DYNAMIC" : "",
218		acl->queue_depth,
219		tpg->se_tpg_tfo->fabric_name,
220		acl->initiatorname);
221}
222
223bool target_tpg_has_node_acl(struct se_portal_group *tpg,
224			     const char *initiatorname)
225{
226	struct se_node_acl *acl;
227	bool found = false;
228
229	mutex_lock(&tpg->acl_node_mutex);
230	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
231		if (!strcmp(acl->initiatorname, initiatorname)) {
232			found = true;
233			break;
234		}
235	}
236	mutex_unlock(&tpg->acl_node_mutex);
237
238	return found;
239}
240EXPORT_SYMBOL(target_tpg_has_node_acl);
241
242struct se_node_acl *core_tpg_check_initiator_node_acl(
243	struct se_portal_group *tpg,
244	unsigned char *initiatorname)
245{
246	struct se_node_acl *acl;
247
248	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
249	if (acl)
250		return acl;
251
252	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
253		return NULL;
254
255	acl = target_alloc_node_acl(tpg, initiatorname);
256	if (!acl)
257		return NULL;
258	/*
259	 * When allocating a dynamically generated node_acl, go ahead
260	 * and take the extra kref now before returning to the fabric
261	 * driver caller.
262	 *
263	 * Note this reference will be released at session shutdown
264	 * time within transport_free_session() code.
265	 */
266	kref_get(&acl->acl_kref);
267	acl->dynamic_node_acl = 1;
268
269	/*
270	 * Here we only create demo-mode MappedLUNs from the active
271	 * TPG LUNs if the fabric is not explicitly asking for
272	 * tpg_check_demo_mode_login_only() == 1.
273	 */
274	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
275	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
276		core_tpg_add_node_to_devs(acl, tpg, NULL);
277
278	target_add_node_acl(acl);
279	return acl;
280}
281EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
282
283void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
284{
285	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
286		cpu_relax();
287}
288
289struct se_node_acl *core_tpg_add_initiator_node_acl(
290	struct se_portal_group *tpg,
291	const char *initiatorname)
292{
293	struct se_node_acl *acl;
294
295	mutex_lock(&tpg->acl_node_mutex);
296	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
297	if (acl) {
298		if (acl->dynamic_node_acl) {
299			acl->dynamic_node_acl = 0;
300			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
301				" for %s\n", tpg->se_tpg_tfo->fabric_name,
302				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
303			mutex_unlock(&tpg->acl_node_mutex);
304			return acl;
305		}
306
307		pr_err("ACL entry for %s Initiator"
308			" Node %s already exists for TPG %u, ignoring"
309			" request.\n",  tpg->se_tpg_tfo->fabric_name,
310			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
311		mutex_unlock(&tpg->acl_node_mutex);
312		return ERR_PTR(-EEXIST);
313	}
314	mutex_unlock(&tpg->acl_node_mutex);
315
316	acl = target_alloc_node_acl(tpg, initiatorname);
317	if (!acl)
318		return ERR_PTR(-ENOMEM);
319
320	target_add_node_acl(acl);
321	return acl;
322}
323
324static void target_shutdown_sessions(struct se_node_acl *acl)
325{
326	struct se_session *sess;
327	unsigned long flags;
328
329restart:
330	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
331	list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
332		if (sess->cmd_cnt && atomic_read(&sess->cmd_cnt->stopped))
333			continue;
334
335		list_del_init(&sess->sess_acl_list);
336		spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
337
338		if (acl->se_tpg->se_tpg_tfo->close_session)
339			acl->se_tpg->se_tpg_tfo->close_session(sess);
340		goto restart;
341	}
342	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
343}
344
345void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
346{
347	struct se_portal_group *tpg = acl->se_tpg;
348
349	mutex_lock(&tpg->acl_node_mutex);
350	if (acl->dynamic_node_acl)
351		acl->dynamic_node_acl = 0;
352	list_del_init(&acl->acl_list);
353	mutex_unlock(&tpg->acl_node_mutex);
354
355	target_shutdown_sessions(acl);
356
357	target_put_nacl(acl);
358	/*
359	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
360	 * for active fabric session transport_deregister_session() callbacks.
361	 */
362	wait_for_completion(&acl->acl_free_comp);
363
364	core_tpg_wait_for_nacl_pr_ref(acl);
365	core_free_device_list_for_node(acl, tpg);
366
367	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
368		" Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
369		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
370		tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
371
372	kfree(acl);
373}
374
375/*	core_tpg_set_initiator_node_queue_depth():
376 *
377 *
378 */
379int core_tpg_set_initiator_node_queue_depth(
380	struct se_node_acl *acl,
381	u32 queue_depth)
382{
383	struct se_portal_group *tpg = acl->se_tpg;
384
385	/*
386	 * Allow the setting of se_node_acl queue_depth to be idempotent,
387	 * and not force a session shutdown event if the value is not
388	 * changing.
389	 */
390	if (acl->queue_depth == queue_depth)
391		return 0;
392	/*
393	 * User has requested to change the queue depth for a Initiator Node.
394	 * Change the value in the Node's struct se_node_acl, and call
395	 * target_set_nacl_queue_depth() to set the new queue depth.
396	 */
397	target_set_nacl_queue_depth(tpg, acl, queue_depth);
398
399	/*
400	 * Shutdown all pending sessions to force session reinstatement.
401	 */
402	target_shutdown_sessions(acl);
403
404	pr_debug("Successfully changed queue depth to: %d for Initiator"
405		" Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
406		acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
407		tpg->se_tpg_tfo->tpg_get_tag(tpg));
408
409	return 0;
410}
411EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
412
413/*	core_tpg_set_initiator_node_tag():
414 *
415 *	Initiator nodeacl tags are not used internally, but may be used by
416 *	userspace to emulate aliases or groups.
417 *	Returns length of newly-set tag or -EINVAL.
418 */
419int core_tpg_set_initiator_node_tag(
420	struct se_portal_group *tpg,
421	struct se_node_acl *acl,
422	const char *new_tag)
423{
424	if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
425		return -EINVAL;
426
427	if (!strncmp("NULL", new_tag, 4)) {
428		acl->acl_tag[0] = '\0';
429		return 0;
430	}
431
432	return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
433}
434EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
435
436static void core_tpg_lun_ref_release(struct percpu_ref *ref)
437{
438	struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
439
440	complete(&lun->lun_shutdown_comp);
441}
442
443static int target_tpg_register_rtpi(struct se_portal_group *se_tpg)
444{
445	u32 val;
446	int ret;
447
448	if (se_tpg->rtpi_manual) {
449		ret = xa_insert(&tpg_xa, se_tpg->tpg_rtpi, se_tpg, GFP_KERNEL);
450		if (ret) {
451			pr_info("%s_TPG[%hu] - Can not set RTPI %#x, it is already busy",
452				se_tpg->se_tpg_tfo->fabric_name,
453				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
454				se_tpg->tpg_rtpi);
455			return -EINVAL;
456		}
457	} else {
458		ret = xa_alloc(&tpg_xa, &val, se_tpg,
459			       XA_LIMIT(1, USHRT_MAX), GFP_KERNEL);
460		if (!ret)
461			se_tpg->tpg_rtpi = val;
462	}
463
464	return ret;
465}
466
467static void target_tpg_deregister_rtpi(struct se_portal_group *se_tpg)
468{
469	if (se_tpg->tpg_rtpi && se_tpg->enabled)
470		xa_erase(&tpg_xa, se_tpg->tpg_rtpi);
471}
472
473int target_tpg_enable(struct se_portal_group *se_tpg)
474{
475	int ret;
476
477	ret = target_tpg_register_rtpi(se_tpg);
478	if (ret)
479		return ret;
480
481	ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, true);
482	if (ret) {
483		target_tpg_deregister_rtpi(se_tpg);
484		return ret;
485	}
486
487	se_tpg->enabled = true;
488
489	return 0;
490}
491
492int target_tpg_disable(struct se_portal_group *se_tpg)
493{
494	int ret;
495
496	target_tpg_deregister_rtpi(se_tpg);
497
498	ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, false);
499	if (!ret)
500		se_tpg->enabled = false;
501
502	return ret;
503}
504
505/* Does not change se_wwn->priv. */
506int core_tpg_register(
507	struct se_wwn *se_wwn,
508	struct se_portal_group *se_tpg,
509	int proto_id)
510{
511	int ret;
512
513	if (!se_tpg)
514		return -EINVAL;
515	/*
516	 * For the typical case where core_tpg_register() is called by a
517	 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
518	 * configfs context, use the original tf_ops pointer already saved
519	 * by target-core in target_fabric_make_wwn().
520	 *
521	 * Otherwise, for special cases like iscsi-target discovery TPGs
522	 * the caller is responsible for setting ->se_tpg_tfo ahead of
523	 * calling core_tpg_register().
524	 */
525	if (se_wwn)
526		se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
527
528	if (!se_tpg->se_tpg_tfo) {
529		pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
530		return -EINVAL;
531	}
532
533	INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
534	se_tpg->proto_id = proto_id;
535	se_tpg->se_tpg_wwn = se_wwn;
536	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
537	INIT_LIST_HEAD(&se_tpg->acl_node_list);
538	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
539	spin_lock_init(&se_tpg->session_lock);
540	mutex_init(&se_tpg->tpg_lun_mutex);
541	mutex_init(&se_tpg->acl_node_mutex);
542
543	if (se_tpg->proto_id >= 0) {
544		se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
545		if (IS_ERR(se_tpg->tpg_virt_lun0))
546			return PTR_ERR(se_tpg->tpg_virt_lun0);
547
548		ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
549				true, g_lun0_dev);
550		if (ret < 0) {
551			kfree(se_tpg->tpg_virt_lun0);
552			return ret;
553		}
554	}
555
556	pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
557		 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
558		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
559		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
560		se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
561
562	return 0;
563}
564EXPORT_SYMBOL(core_tpg_register);
565
566int core_tpg_deregister(struct se_portal_group *se_tpg)
567{
568	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
569	struct se_node_acl *nacl, *nacl_tmp;
570	LIST_HEAD(node_list);
571
572	pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
573		 "Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
574		tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
575		se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
576
577	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
578		cpu_relax();
579
580	mutex_lock(&se_tpg->acl_node_mutex);
581	list_splice_init(&se_tpg->acl_node_list, &node_list);
582	mutex_unlock(&se_tpg->acl_node_mutex);
583	/*
584	 * Release any remaining demo-mode generated se_node_acl that have
585	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
586	 * in transport_deregister_session().
587	 */
588	list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
589		list_del_init(&nacl->acl_list);
590
591		core_tpg_wait_for_nacl_pr_ref(nacl);
592		core_free_device_list_for_node(nacl, se_tpg);
593		kfree(nacl);
594	}
595
596	if (se_tpg->proto_id >= 0) {
597		core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
598		kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
599	}
600
601	target_tpg_deregister_rtpi(se_tpg);
602
603	return 0;
604}
605EXPORT_SYMBOL(core_tpg_deregister);
606
607struct se_lun *core_tpg_alloc_lun(
608	struct se_portal_group *tpg,
609	u64 unpacked_lun)
610{
611	struct se_lun *lun;
612
613	lun = kzalloc(sizeof(*lun), GFP_KERNEL);
614	if (!lun) {
615		pr_err("Unable to allocate se_lun memory\n");
616		return ERR_PTR(-ENOMEM);
617	}
618	lun->unpacked_lun = unpacked_lun;
619	atomic_set(&lun->lun_acl_count, 0);
620	init_completion(&lun->lun_shutdown_comp);
621	INIT_LIST_HEAD(&lun->lun_deve_list);
622	INIT_LIST_HEAD(&lun->lun_dev_link);
623	atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
624	spin_lock_init(&lun->lun_deve_lock);
625	mutex_init(&lun->lun_tg_pt_md_mutex);
626	INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
627	spin_lock_init(&lun->lun_tg_pt_gp_lock);
628	lun->lun_tpg = tpg;
629
630	return lun;
631}
632
633int core_tpg_add_lun(
634	struct se_portal_group *tpg,
635	struct se_lun *lun,
636	bool lun_access_ro,
637	struct se_device *dev)
638{
639	int ret;
640
641	ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
642			      GFP_KERNEL);
643	if (ret < 0)
644		goto out;
645
 
 
 
 
646	if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
647	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
648		target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
649
650	mutex_lock(&tpg->tpg_lun_mutex);
651
652	spin_lock(&dev->se_port_lock);
653	lun->lun_index = dev->dev_index;
654	rcu_assign_pointer(lun->lun_se_dev, dev);
655	dev->export_count++;
656	list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
657	spin_unlock(&dev->se_port_lock);
658
659	if (dev->dev_flags & DF_READ_ONLY)
660		lun->lun_access_ro = true;
661	else
662		lun->lun_access_ro = lun_access_ro;
663	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
664		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
665	mutex_unlock(&tpg->tpg_lun_mutex);
666
667	return 0;
668
 
 
669out:
670	return ret;
671}
672
673void core_tpg_remove_lun(
674	struct se_portal_group *tpg,
675	struct se_lun *lun)
676{
677	/*
678	 * rcu_dereference_raw protected by se_lun->lun_group symlink
679	 * reference to se_device->dev_group.
680	 */
681	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
682
683	lun->lun_shutdown = true;
684
685	core_clear_lun_from_tpg(lun, tpg);
686	/*
687	 * Wait for any active I/O references to percpu se_lun->lun_ref to
688	 * be released.  Also, se_lun->lun_ref is now used by PR and ALUA
689	 * logic when referencing a remote target port during ALL_TGT_PT=1
690	 * and generating UNIT_ATTENTIONs for ALUA access state transition.
691	 */
692	transport_clear_lun_ref(lun);
693
694	mutex_lock(&tpg->tpg_lun_mutex);
695	if (lun->lun_se_dev) {
696		target_detach_tg_pt_gp(lun);
697
698		spin_lock(&dev->se_port_lock);
699		list_del(&lun->lun_dev_link);
700		dev->export_count--;
701		rcu_assign_pointer(lun->lun_se_dev, NULL);
702		spin_unlock(&dev->se_port_lock);
703	}
704	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
705		hlist_del_rcu(&lun->link);
706
707	lun->lun_shutdown = false;
708	mutex_unlock(&tpg->tpg_lun_mutex);
709
710	percpu_ref_exit(&lun->lun_ref);
711}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*******************************************************************************
  3 * Filename:  target_core_tpg.c
  4 *
  5 * This file contains generic Target Portal Group related functions.
  6 *
  7 * (c) Copyright 2002-2013 Datera, Inc.
  8 *
  9 * Nicholas A. Bellinger <nab@kernel.org>
 10 *
 11 ******************************************************************************/
 12
 13#include <linux/net.h>
 14#include <linux/string.h>
 15#include <linux/timer.h>
 16#include <linux/slab.h>
 17#include <linux/spinlock.h>
 18#include <linux/in.h>
 19#include <linux/export.h>
 20#include <net/sock.h>
 21#include <net/tcp.h>
 22#include <scsi/scsi_proto.h>
 23
 24#include <target/target_core_base.h>
 25#include <target/target_core_backend.h>
 26#include <target/target_core_fabric.h>
 27
 28#include "target_core_internal.h"
 29#include "target_core_alua.h"
 30#include "target_core_pr.h"
 31#include "target_core_ua.h"
 32
 33extern struct se_device *g_lun0_dev;
 
 34
 35/*	__core_tpg_get_initiator_node_acl():
 36 *
 37 *	mutex_lock(&tpg->acl_node_mutex); must be held when calling
 38 */
 39struct se_node_acl *__core_tpg_get_initiator_node_acl(
 40	struct se_portal_group *tpg,
 41	const char *initiatorname)
 42{
 43	struct se_node_acl *acl;
 44
 45	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
 46		if (!strcmp(acl->initiatorname, initiatorname))
 47			return acl;
 48	}
 49
 50	return NULL;
 51}
 52
 53/*	core_tpg_get_initiator_node_acl():
 54 *
 55 *
 56 */
 57struct se_node_acl *core_tpg_get_initiator_node_acl(
 58	struct se_portal_group *tpg,
 59	unsigned char *initiatorname)
 60{
 61	struct se_node_acl *acl;
 62	/*
 63	 * Obtain se_node_acl->acl_kref using fabric driver provided
 64	 * initiatorname[] during node acl endpoint lookup driven by
 65	 * new se_session login.
 66	 *
 67	 * The reference is held until se_session shutdown -> release
 68	 * occurs via fabric driver invoked transport_deregister_session()
 69	 * or transport_free_session() code.
 70	 */
 71	mutex_lock(&tpg->acl_node_mutex);
 72	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
 73	if (acl) {
 74		if (!kref_get_unless_zero(&acl->acl_kref))
 75			acl = NULL;
 76	}
 77	mutex_unlock(&tpg->acl_node_mutex);
 78
 79	return acl;
 80}
 81EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
 82
 83void core_allocate_nexus_loss_ua(
 84	struct se_node_acl *nacl)
 85{
 86	struct se_dev_entry *deve;
 87
 88	if (!nacl)
 89		return;
 90
 91	rcu_read_lock();
 92	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
 93		core_scsi3_ua_allocate(deve, 0x29,
 94			ASCQ_29H_NEXUS_LOSS_OCCURRED);
 95	rcu_read_unlock();
 96}
 97EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
 98
 99/*	core_tpg_add_node_to_devs():
100 *
101 *
102 */
103void core_tpg_add_node_to_devs(
104	struct se_node_acl *acl,
105	struct se_portal_group *tpg,
106	struct se_lun *lun_orig)
107{
108	bool lun_access_ro = true;
109	struct se_lun *lun;
110	struct se_device *dev;
111
112	mutex_lock(&tpg->tpg_lun_mutex);
113	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
114		if (lun_orig && lun != lun_orig)
115			continue;
116
117		dev = rcu_dereference_check(lun->lun_se_dev,
118					    lockdep_is_held(&tpg->tpg_lun_mutex));
119		/*
120		 * By default in LIO-Target $FABRIC_MOD,
121		 * demo_mode_write_protect is ON, or READ_ONLY;
122		 */
123		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
124			lun_access_ro = false;
125		} else {
126			/*
127			 * Allow only optical drives to issue R/W in default RO
128			 * demo mode.
129			 */
130			if (dev->transport->get_device_type(dev) == TYPE_DISK)
131				lun_access_ro = true;
132			else
133				lun_access_ro = false;
134		}
135
136		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
137			" access for LUN in Demo Mode\n",
138			tpg->se_tpg_tfo->fabric_name,
139			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
140			lun_access_ro ? "READ-ONLY" : "READ-WRITE");
141
142		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
143						 lun_access_ro, acl, tpg);
144		/*
145		 * Check to see if there are any existing persistent reservation
146		 * APTPL pre-registrations that need to be enabled for this dynamic
147		 * LUN ACL now..
148		 */
149		core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
150						    lun->unpacked_lun);
151	}
152	mutex_unlock(&tpg->tpg_lun_mutex);
153}
154
155static void
156target_set_nacl_queue_depth(struct se_portal_group *tpg,
157			    struct se_node_acl *acl, u32 queue_depth)
158{
159	acl->queue_depth = queue_depth;
160
161	if (!acl->queue_depth) {
162		pr_warn("Queue depth for %s Initiator Node: %s is 0,"
163			"defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
164			acl->initiatorname);
165		acl->queue_depth = 1;
166	}
167}
168
169static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
170		const unsigned char *initiatorname)
171{
172	struct se_node_acl *acl;
173	u32 queue_depth;
174
175	acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
176			GFP_KERNEL);
177	if (!acl)
178		return NULL;
179
180	INIT_LIST_HEAD(&acl->acl_list);
181	INIT_LIST_HEAD(&acl->acl_sess_list);
182	INIT_HLIST_HEAD(&acl->lun_entry_hlist);
183	kref_init(&acl->acl_kref);
184	init_completion(&acl->acl_free_comp);
185	spin_lock_init(&acl->nacl_sess_lock);
186	mutex_init(&acl->lun_entry_mutex);
187	atomic_set(&acl->acl_pr_ref_count, 0);
188
189	if (tpg->se_tpg_tfo->tpg_get_default_depth)
190		queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
191	else
192		queue_depth = 1;
193	target_set_nacl_queue_depth(tpg, acl, queue_depth);
194
195	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
196	acl->se_tpg = tpg;
197	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
198
199	tpg->se_tpg_tfo->set_default_node_attributes(acl);
200
201	return acl;
202}
203
204static void target_add_node_acl(struct se_node_acl *acl)
205{
206	struct se_portal_group *tpg = acl->se_tpg;
207
208	mutex_lock(&tpg->acl_node_mutex);
209	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
210	mutex_unlock(&tpg->acl_node_mutex);
211
212	pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
213		" Initiator Node: %s\n",
214		tpg->se_tpg_tfo->fabric_name,
215		tpg->se_tpg_tfo->tpg_get_tag(tpg),
216		acl->dynamic_node_acl ? "DYNAMIC" : "",
217		acl->queue_depth,
218		tpg->se_tpg_tfo->fabric_name,
219		acl->initiatorname);
220}
221
222bool target_tpg_has_node_acl(struct se_portal_group *tpg,
223			     const char *initiatorname)
224{
225	struct se_node_acl *acl;
226	bool found = false;
227
228	mutex_lock(&tpg->acl_node_mutex);
229	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
230		if (!strcmp(acl->initiatorname, initiatorname)) {
231			found = true;
232			break;
233		}
234	}
235	mutex_unlock(&tpg->acl_node_mutex);
236
237	return found;
238}
239EXPORT_SYMBOL(target_tpg_has_node_acl);
240
241struct se_node_acl *core_tpg_check_initiator_node_acl(
242	struct se_portal_group *tpg,
243	unsigned char *initiatorname)
244{
245	struct se_node_acl *acl;
246
247	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
248	if (acl)
249		return acl;
250
251	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
252		return NULL;
253
254	acl = target_alloc_node_acl(tpg, initiatorname);
255	if (!acl)
256		return NULL;
257	/*
258	 * When allocating a dynamically generated node_acl, go ahead
259	 * and take the extra kref now before returning to the fabric
260	 * driver caller.
261	 *
262	 * Note this reference will be released at session shutdown
263	 * time within transport_free_session() code.
264	 */
265	kref_get(&acl->acl_kref);
266	acl->dynamic_node_acl = 1;
267
268	/*
269	 * Here we only create demo-mode MappedLUNs from the active
270	 * TPG LUNs if the fabric is not explicitly asking for
271	 * tpg_check_demo_mode_login_only() == 1.
272	 */
273	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
274	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
275		core_tpg_add_node_to_devs(acl, tpg, NULL);
276
277	target_add_node_acl(acl);
278	return acl;
279}
280EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
281
282void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
283{
284	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
285		cpu_relax();
286}
287
288struct se_node_acl *core_tpg_add_initiator_node_acl(
289	struct se_portal_group *tpg,
290	const char *initiatorname)
291{
292	struct se_node_acl *acl;
293
294	mutex_lock(&tpg->acl_node_mutex);
295	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
296	if (acl) {
297		if (acl->dynamic_node_acl) {
298			acl->dynamic_node_acl = 0;
299			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
300				" for %s\n", tpg->se_tpg_tfo->fabric_name,
301				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
302			mutex_unlock(&tpg->acl_node_mutex);
303			return acl;
304		}
305
306		pr_err("ACL entry for %s Initiator"
307			" Node %s already exists for TPG %u, ignoring"
308			" request.\n",  tpg->se_tpg_tfo->fabric_name,
309			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
310		mutex_unlock(&tpg->acl_node_mutex);
311		return ERR_PTR(-EEXIST);
312	}
313	mutex_unlock(&tpg->acl_node_mutex);
314
315	acl = target_alloc_node_acl(tpg, initiatorname);
316	if (!acl)
317		return ERR_PTR(-ENOMEM);
318
319	target_add_node_acl(acl);
320	return acl;
321}
322
323static void target_shutdown_sessions(struct se_node_acl *acl)
324{
325	struct se_session *sess;
326	unsigned long flags;
327
328restart:
329	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
330	list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
331		if (sess->sess_tearing_down)
332			continue;
333
334		list_del_init(&sess->sess_acl_list);
335		spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
336
337		if (acl->se_tpg->se_tpg_tfo->close_session)
338			acl->se_tpg->se_tpg_tfo->close_session(sess);
339		goto restart;
340	}
341	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
342}
343
344void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
345{
346	struct se_portal_group *tpg = acl->se_tpg;
347
348	mutex_lock(&tpg->acl_node_mutex);
349	if (acl->dynamic_node_acl)
350		acl->dynamic_node_acl = 0;
351	list_del_init(&acl->acl_list);
352	mutex_unlock(&tpg->acl_node_mutex);
353
354	target_shutdown_sessions(acl);
355
356	target_put_nacl(acl);
357	/*
358	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
359	 * for active fabric session transport_deregister_session() callbacks.
360	 */
361	wait_for_completion(&acl->acl_free_comp);
362
363	core_tpg_wait_for_nacl_pr_ref(acl);
364	core_free_device_list_for_node(acl, tpg);
365
366	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
367		" Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
368		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
369		tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
370
371	kfree(acl);
372}
373
374/*	core_tpg_set_initiator_node_queue_depth():
375 *
376 *
377 */
378int core_tpg_set_initiator_node_queue_depth(
379	struct se_node_acl *acl,
380	u32 queue_depth)
381{
382	struct se_portal_group *tpg = acl->se_tpg;
383
384	/*
385	 * Allow the setting of se_node_acl queue_depth to be idempotent,
386	 * and not force a session shutdown event if the value is not
387	 * changing.
388	 */
389	if (acl->queue_depth == queue_depth)
390		return 0;
391	/*
392	 * User has requested to change the queue depth for a Initiator Node.
393	 * Change the value in the Node's struct se_node_acl, and call
394	 * target_set_nacl_queue_depth() to set the new queue depth.
395	 */
396	target_set_nacl_queue_depth(tpg, acl, queue_depth);
397
398	/*
399	 * Shutdown all pending sessions to force session reinstatement.
400	 */
401	target_shutdown_sessions(acl);
402
403	pr_debug("Successfully changed queue depth to: %d for Initiator"
404		" Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
405		acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
406		tpg->se_tpg_tfo->tpg_get_tag(tpg));
407
408	return 0;
409}
410EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
411
412/*	core_tpg_set_initiator_node_tag():
413 *
414 *	Initiator nodeacl tags are not used internally, but may be used by
415 *	userspace to emulate aliases or groups.
416 *	Returns length of newly-set tag or -EINVAL.
417 */
418int core_tpg_set_initiator_node_tag(
419	struct se_portal_group *tpg,
420	struct se_node_acl *acl,
421	const char *new_tag)
422{
423	if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
424		return -EINVAL;
425
426	if (!strncmp("NULL", new_tag, 4)) {
427		acl->acl_tag[0] = '\0';
428		return 0;
429	}
430
431	return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
432}
433EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
434
435static void core_tpg_lun_ref_release(struct percpu_ref *ref)
436{
437	struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
438
439	complete(&lun->lun_shutdown_comp);
440}
441
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442/* Does not change se_wwn->priv. */
443int core_tpg_register(
444	struct se_wwn *se_wwn,
445	struct se_portal_group *se_tpg,
446	int proto_id)
447{
448	int ret;
449
450	if (!se_tpg)
451		return -EINVAL;
452	/*
453	 * For the typical case where core_tpg_register() is called by a
454	 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
455	 * configfs context, use the original tf_ops pointer already saved
456	 * by target-core in target_fabric_make_wwn().
457	 *
458	 * Otherwise, for special cases like iscsi-target discovery TPGs
459	 * the caller is responsible for setting ->se_tpg_tfo ahead of
460	 * calling core_tpg_register().
461	 */
462	if (se_wwn)
463		se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
464
465	if (!se_tpg->se_tpg_tfo) {
466		pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
467		return -EINVAL;
468	}
469
470	INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
471	se_tpg->proto_id = proto_id;
472	se_tpg->se_tpg_wwn = se_wwn;
473	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
474	INIT_LIST_HEAD(&se_tpg->acl_node_list);
475	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
476	spin_lock_init(&se_tpg->session_lock);
477	mutex_init(&se_tpg->tpg_lun_mutex);
478	mutex_init(&se_tpg->acl_node_mutex);
479
480	if (se_tpg->proto_id >= 0) {
481		se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
482		if (IS_ERR(se_tpg->tpg_virt_lun0))
483			return PTR_ERR(se_tpg->tpg_virt_lun0);
484
485		ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
486				true, g_lun0_dev);
487		if (ret < 0) {
488			kfree(se_tpg->tpg_virt_lun0);
489			return ret;
490		}
491	}
492
493	pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
494		 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
495		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
496		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
497		se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
498
499	return 0;
500}
501EXPORT_SYMBOL(core_tpg_register);
502
503int core_tpg_deregister(struct se_portal_group *se_tpg)
504{
505	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
506	struct se_node_acl *nacl, *nacl_tmp;
507	LIST_HEAD(node_list);
508
509	pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
510		 "Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
511		tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
512		se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
513
514	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
515		cpu_relax();
516
517	mutex_lock(&se_tpg->acl_node_mutex);
518	list_splice_init(&se_tpg->acl_node_list, &node_list);
519	mutex_unlock(&se_tpg->acl_node_mutex);
520	/*
521	 * Release any remaining demo-mode generated se_node_acl that have
522	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
523	 * in transport_deregister_session().
524	 */
525	list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
526		list_del_init(&nacl->acl_list);
527
528		core_tpg_wait_for_nacl_pr_ref(nacl);
529		core_free_device_list_for_node(nacl, se_tpg);
530		kfree(nacl);
531	}
532
533	if (se_tpg->proto_id >= 0) {
534		core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
535		kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
536	}
537
 
 
538	return 0;
539}
540EXPORT_SYMBOL(core_tpg_deregister);
541
542struct se_lun *core_tpg_alloc_lun(
543	struct se_portal_group *tpg,
544	u64 unpacked_lun)
545{
546	struct se_lun *lun;
547
548	lun = kzalloc(sizeof(*lun), GFP_KERNEL);
549	if (!lun) {
550		pr_err("Unable to allocate se_lun memory\n");
551		return ERR_PTR(-ENOMEM);
552	}
553	lun->unpacked_lun = unpacked_lun;
554	atomic_set(&lun->lun_acl_count, 0);
555	init_completion(&lun->lun_shutdown_comp);
556	INIT_LIST_HEAD(&lun->lun_deve_list);
557	INIT_LIST_HEAD(&lun->lun_dev_link);
558	atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
559	spin_lock_init(&lun->lun_deve_lock);
560	mutex_init(&lun->lun_tg_pt_md_mutex);
561	INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
562	spin_lock_init(&lun->lun_tg_pt_gp_lock);
563	lun->lun_tpg = tpg;
564
565	return lun;
566}
567
568int core_tpg_add_lun(
569	struct se_portal_group *tpg,
570	struct se_lun *lun,
571	bool lun_access_ro,
572	struct se_device *dev)
573{
574	int ret;
575
576	ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
577			      GFP_KERNEL);
578	if (ret < 0)
579		goto out;
580
581	ret = core_alloc_rtpi(lun, dev);
582	if (ret)
583		goto out_kill_ref;
584
585	if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
586	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
587		target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
588
589	mutex_lock(&tpg->tpg_lun_mutex);
590
591	spin_lock(&dev->se_port_lock);
592	lun->lun_index = dev->dev_index;
593	rcu_assign_pointer(lun->lun_se_dev, dev);
594	dev->export_count++;
595	list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
596	spin_unlock(&dev->se_port_lock);
597
598	if (dev->dev_flags & DF_READ_ONLY)
599		lun->lun_access_ro = true;
600	else
601		lun->lun_access_ro = lun_access_ro;
602	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
603		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
604	mutex_unlock(&tpg->tpg_lun_mutex);
605
606	return 0;
607
608out_kill_ref:
609	percpu_ref_exit(&lun->lun_ref);
610out:
611	return ret;
612}
613
614void core_tpg_remove_lun(
615	struct se_portal_group *tpg,
616	struct se_lun *lun)
617{
618	/*
619	 * rcu_dereference_raw protected by se_lun->lun_group symlink
620	 * reference to se_device->dev_group.
621	 */
622	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
623
624	lun->lun_shutdown = true;
625
626	core_clear_lun_from_tpg(lun, tpg);
627	/*
628	 * Wait for any active I/O references to percpu se_lun->lun_ref to
629	 * be released.  Also, se_lun->lun_ref is now used by PR and ALUA
630	 * logic when referencing a remote target port during ALL_TGT_PT=1
631	 * and generating UNIT_ATTENTIONs for ALUA access state transition.
632	 */
633	transport_clear_lun_ref(lun);
634
635	mutex_lock(&tpg->tpg_lun_mutex);
636	if (lun->lun_se_dev) {
637		target_detach_tg_pt_gp(lun);
638
639		spin_lock(&dev->se_port_lock);
640		list_del(&lun->lun_dev_link);
641		dev->export_count--;
642		rcu_assign_pointer(lun->lun_se_dev, NULL);
643		spin_unlock(&dev->se_port_lock);
644	}
645	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
646		hlist_del_rcu(&lun->link);
647
648	lun->lun_shutdown = false;
649	mutex_unlock(&tpg->tpg_lun_mutex);
650
651	percpu_ref_exit(&lun->lun_ref);
652}