Linux Audio

Check our new training course

Loading...
v3.1
  1/*******************************************************************************
  2 * Filename:  target_core_tpg.c
  3 *
  4 * This file contains generic Target Portal Group related functions.
  5 *
  6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
  7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
  8 * Copyright (c) 2007-2010 Rising Tide Systems
  9 * Copyright (c) 2008-2010 Linux-iSCSI.org
 10 *
 11 * Nicholas A. Bellinger <nab@kernel.org>
 12 *
 13 * This program is free software; you can redistribute it and/or modify
 14 * it under the terms of the GNU General Public License as published by
 15 * the Free Software Foundation; either version 2 of the License, or
 16 * (at your option) any later version.
 17 *
 18 * This program is distributed in the hope that it will be useful,
 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 21 * GNU General Public License for more details.
 22 *
 23 * You should have received a copy of the GNU General Public License
 24 * along with this program; if not, write to the Free Software
 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 26 *
 27 ******************************************************************************/
 28
 29#include <linux/net.h>
 30#include <linux/string.h>
 31#include <linux/timer.h>
 32#include <linux/slab.h>
 33#include <linux/spinlock.h>
 34#include <linux/in.h>
 
 35#include <net/sock.h>
 36#include <net/tcp.h>
 37#include <scsi/scsi.h>
 38#include <scsi/scsi_cmnd.h>
 39
 40#include <target/target_core_base.h>
 41#include <target/target_core_device.h>
 42#include <target/target_core_tpg.h>
 43#include <target/target_core_transport.h>
 44#include <target/target_core_fabric_ops.h>
 45
 46#include "target_core_hba.h"
 47#include "target_core_stat.h"
 
 
 48
 49extern struct se_device *g_lun0_dev;
 50
 51static DEFINE_SPINLOCK(tpg_lock);
 52static LIST_HEAD(tpg_list);
 53
 54/*	core_clear_initiator_node_from_tpg():
 55 *
 56 *
 57 */
 58static void core_clear_initiator_node_from_tpg(
 59	struct se_node_acl *nacl,
 60	struct se_portal_group *tpg)
 61{
 62	int i;
 63	struct se_dev_entry *deve;
 64	struct se_lun *lun;
 65	struct se_lun_acl *acl, *acl_tmp;
 66
 67	spin_lock_irq(&nacl->device_list_lock);
 68	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 69		deve = &nacl->device_list[i];
 70
 71		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
 72			continue;
 73
 74		if (!deve->se_lun) {
 75			pr_err("%s device entries device pointer is"
 76				" NULL, but Initiator has access.\n",
 77				tpg->se_tpg_tfo->get_fabric_name());
 78			continue;
 79		}
 80
 81		lun = deve->se_lun;
 82		spin_unlock_irq(&nacl->device_list_lock);
 83		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
 84			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
 85
 86		spin_lock(&lun->lun_acl_lock);
 87		list_for_each_entry_safe(acl, acl_tmp,
 88					&lun->lun_acl_list, lacl_list) {
 89			if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
 90			    (acl->mapped_lun == deve->mapped_lun))
 91				break;
 92		}
 93
 94		if (!acl) {
 95			pr_err("Unable to locate struct se_lun_acl for %s,"
 96				" mapped_lun: %u\n", nacl->initiatorname,
 97				deve->mapped_lun);
 98			spin_unlock(&lun->lun_acl_lock);
 99			spin_lock_irq(&nacl->device_list_lock);
100			continue;
101		}
102
103		list_del(&acl->lacl_list);
104		spin_unlock(&lun->lun_acl_lock);
105
106		spin_lock_irq(&nacl->device_list_lock);
107		kfree(acl);
108	}
109	spin_unlock_irq(&nacl->device_list_lock);
110}
111
112/*	__core_tpg_get_initiator_node_acl():
113 *
114 *	spin_lock_bh(&tpg->acl_node_lock); must be held when calling
115 */
116struct se_node_acl *__core_tpg_get_initiator_node_acl(
117	struct se_portal_group *tpg,
118	const char *initiatorname)
119{
120	struct se_node_acl *acl;
121
122	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
123		if (!strcmp(acl->initiatorname, initiatorname))
124			return acl;
125	}
126
127	return NULL;
128}
129
130/*	core_tpg_get_initiator_node_acl():
131 *
132 *
133 */
134struct se_node_acl *core_tpg_get_initiator_node_acl(
135	struct se_portal_group *tpg,
136	unsigned char *initiatorname)
137{
138	struct se_node_acl *acl;
139
140	spin_lock_irq(&tpg->acl_node_lock);
141	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
142		if (!strcmp(acl->initiatorname, initiatorname) &&
143		    !acl->dynamic_node_acl) {
144			spin_unlock_irq(&tpg->acl_node_lock);
145			return acl;
146		}
 
 
 
 
 
 
147	}
148	spin_unlock_irq(&tpg->acl_node_lock);
149
150	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151}
 
152
153/*	core_tpg_add_node_to_devs():
154 *
155 *
156 */
157void core_tpg_add_node_to_devs(
158	struct se_node_acl *acl,
159	struct se_portal_group *tpg)
 
160{
161	int i = 0;
162	u32 lun_access = 0;
163	struct se_lun *lun;
164	struct se_device *dev;
165
166	spin_lock(&tpg->tpg_lun_lock);
167	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
168		lun = &tpg->tpg_lun_list[i];
169		if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
170			continue;
171
172		spin_unlock(&tpg->tpg_lun_lock);
173
174		dev = lun->lun_se_dev;
175		/*
176		 * By default in LIO-Target $FABRIC_MOD,
177		 * demo_mode_write_protect is ON, or READ_ONLY;
178		 */
179		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
180			if (dev->dev_flags & DF_READ_ONLY)
181				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
182			else
183				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
184		} else {
185			/*
186			 * Allow only optical drives to issue R/W in default RO
187			 * demo mode.
188			 */
189			if (dev->transport->get_device_type(dev) == TYPE_DISK)
190				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
191			else
192				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
193		}
194
195		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
196			" access for LUN in Demo Mode\n",
197			tpg->se_tpg_tfo->get_fabric_name(),
198			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
199			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
200			"READ-WRITE" : "READ-ONLY");
201
202		core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
203				lun_access, acl, tpg, 1);
204		spin_lock(&tpg->tpg_lun_lock);
 
 
 
 
 
 
205	}
206	spin_unlock(&tpg->tpg_lun_lock);
207}
208
209/*      core_set_queue_depth_for_node():
210 *
211 *
212 */
213static int core_set_queue_depth_for_node(
214	struct se_portal_group *tpg,
215	struct se_node_acl *acl)
216{
 
 
217	if (!acl->queue_depth) {
218		pr_err("Queue depth for %s Initiator Node: %s is 0,"
219			"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
220			acl->initiatorname);
221		acl->queue_depth = 1;
222	}
 
223
224	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225}
226
227/*      core_create_device_list_for_node():
228 *
229 *
230 */
231static int core_create_device_list_for_node(struct se_node_acl *nacl)
232{
233	struct se_dev_entry *deve;
234	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
235
236	nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
237				TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
238	if (!nacl->device_list) {
239		pr_err("Unable to allocate memory for"
240			" struct se_node_acl->device_list\n");
241		return -ENOMEM;
242	}
243	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
244		deve = &nacl->device_list[i];
245
246		atomic_set(&deve->ua_count, 0);
247		atomic_set(&deve->pr_ref_count, 0);
248		spin_lock_init(&deve->ua_lock);
249		INIT_LIST_HEAD(&deve->alua_port_list);
250		INIT_LIST_HEAD(&deve->ua_list);
251	}
 
252
253	return 0;
254}
 
255
256/*	core_tpg_check_initiator_node_acl()
257 *
258 *
259 */
260struct se_node_acl *core_tpg_check_initiator_node_acl(
261	struct se_portal_group *tpg,
262	unsigned char *initiatorname)
263{
264	struct se_node_acl *acl;
265
266	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
267	if (acl)
268		return acl;
269
270	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
271		return NULL;
272
273	acl =  tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
274	if (!acl)
275		return NULL;
276
277	INIT_LIST_HEAD(&acl->acl_list);
278	INIT_LIST_HEAD(&acl->acl_sess_list);
279	spin_lock_init(&acl->device_list_lock);
280	spin_lock_init(&acl->nacl_sess_lock);
281	atomic_set(&acl->acl_pr_ref_count, 0);
282	acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
283	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
284	acl->se_tpg = tpg;
285	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
286	spin_lock_init(&acl->stats_lock);
287	acl->dynamic_node_acl = 1;
288
289	tpg->se_tpg_tfo->set_default_node_attributes(acl);
290
291	if (core_create_device_list_for_node(acl) < 0) {
292		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
293		return NULL;
294	}
295
296	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
297		core_free_device_list_for_node(acl, tpg);
298		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
299		return NULL;
300	}
301	/*
302	 * Here we only create demo-mode MappedLUNs from the active
303	 * TPG LUNs if the fabric is not explictly asking for
304	 * tpg_check_demo_mode_login_only() == 1.
305	 */
306	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
307	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
308		do { ; } while (0);
309	else
310		core_tpg_add_node_to_devs(acl, tpg);
311
312	spin_lock_irq(&tpg->acl_node_lock);
313	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
314	tpg->num_node_acls++;
315	spin_unlock_irq(&tpg->acl_node_lock);
316
317	pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
318		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
319		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
320		tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
321
 
322	return acl;
323}
324EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
325
326void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
327{
328	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
329		cpu_relax();
330}
331
332void core_tpg_clear_object_luns(struct se_portal_group *tpg)
333{
334	int i, ret;
335	struct se_lun *lun;
336
337	spin_lock(&tpg->tpg_lun_lock);
338	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
339		lun = &tpg->tpg_lun_list[i];
340
341		if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
342		    (lun->lun_se_dev == NULL))
343			continue;
344
345		spin_unlock(&tpg->tpg_lun_lock);
346		ret = core_dev_del_lun(tpg, lun->unpacked_lun);
347		spin_lock(&tpg->tpg_lun_lock);
348	}
349	spin_unlock(&tpg->tpg_lun_lock);
350}
351EXPORT_SYMBOL(core_tpg_clear_object_luns);
352
353/*	core_tpg_add_initiator_node_acl():
354 *
355 *
356 */
357struct se_node_acl *core_tpg_add_initiator_node_acl(
358	struct se_portal_group *tpg,
359	struct se_node_acl *se_nacl,
360	const char *initiatorname,
361	u32 queue_depth)
362{
363	struct se_node_acl *acl = NULL;
364
365	spin_lock_irq(&tpg->acl_node_lock);
366	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
367	if (acl) {
368		if (acl->dynamic_node_acl) {
369			acl->dynamic_node_acl = 0;
370			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
371				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
372				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
373			spin_unlock_irq(&tpg->acl_node_lock);
374			/*
375			 * Release the locally allocated struct se_node_acl
376			 * because * core_tpg_add_initiator_node_acl() returned
377			 * a pointer to an existing demo mode node ACL.
378			 */
379			if (se_nacl)
380				tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
381							se_nacl);
382			goto done;
383		}
384
385		pr_err("ACL entry for %s Initiator"
386			" Node %s already exists for TPG %u, ignoring"
387			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
388			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
389		spin_unlock_irq(&tpg->acl_node_lock);
390		return ERR_PTR(-EEXIST);
391	}
392	spin_unlock_irq(&tpg->acl_node_lock);
393
394	if (!se_nacl) {
395		pr_err("struct se_node_acl pointer is NULL\n");
396		return ERR_PTR(-EINVAL);
397	}
398	/*
399	 * For v4.x logic the se_node_acl_s is hanging off a fabric
400	 * dependent structure allocated via
401	 * struct target_core_fabric_ops->fabric_make_nodeacl()
402	 */
403	acl = se_nacl;
404
405	INIT_LIST_HEAD(&acl->acl_list);
406	INIT_LIST_HEAD(&acl->acl_sess_list);
407	spin_lock_init(&acl->device_list_lock);
408	spin_lock_init(&acl->nacl_sess_lock);
409	atomic_set(&acl->acl_pr_ref_count, 0);
410	acl->queue_depth = queue_depth;
411	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
412	acl->se_tpg = tpg;
413	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
414	spin_lock_init(&acl->stats_lock);
415
416	tpg->se_tpg_tfo->set_default_node_attributes(acl);
417
418	if (core_create_device_list_for_node(acl) < 0) {
419		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
420		return ERR_PTR(-ENOMEM);
421	}
422
423	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
424		core_free_device_list_for_node(acl, tpg);
425		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
426		return ERR_PTR(-EINVAL);
427	}
428
429	spin_lock_irq(&tpg->acl_node_lock);
430	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
431	tpg->num_node_acls++;
432	spin_unlock_irq(&tpg->acl_node_lock);
433
434done:
435	pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
436		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
437		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
438		tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
439
 
440	return acl;
441}
442EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
443
444/*	core_tpg_del_initiator_node_acl():
445 *
446 *
447 */
448int core_tpg_del_initiator_node_acl(
449	struct se_portal_group *tpg,
450	struct se_node_acl *acl,
451	int force)
452{
 
 
453	struct se_session *sess, *sess_tmp;
454	int dynamic_acl = 0;
 
455
456	spin_lock_irq(&tpg->acl_node_lock);
457	if (acl->dynamic_node_acl) {
458		acl->dynamic_node_acl = 0;
459		dynamic_acl = 1;
460	}
461	list_del(&acl->acl_list);
462	tpg->num_node_acls--;
463	spin_unlock_irq(&tpg->acl_node_lock);
 
 
464
465	spin_lock_bh(&tpg->session_lock);
466	list_for_each_entry_safe(sess, sess_tmp,
467				&tpg->tpg_sess_list, sess_list) {
468		if (sess->se_node_acl != acl)
469			continue;
470		/*
471		 * Determine if the session needs to be closed by our context.
472		 */
473		if (!tpg->se_tpg_tfo->shutdown_session(sess))
474			continue;
 
 
 
475
476		spin_unlock_bh(&tpg->session_lock);
477		/*
478		 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
479		 * forcefully shutdown the $FABRIC_MOD session/nexus.
480		 */
481		tpg->se_tpg_tfo->close_session(sess);
482
483		spin_lock_bh(&tpg->session_lock);
 
 
 
 
484	}
485	spin_unlock_bh(&tpg->session_lock);
 
 
 
 
 
486
487	core_tpg_wait_for_nacl_pr_ref(acl);
488	core_clear_initiator_node_from_tpg(acl, tpg);
489	core_free_device_list_for_node(acl, tpg);
490
491	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
492		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
493		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
494		tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
495
496	return 0;
497}
498EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
499
500/*	core_tpg_set_initiator_node_queue_depth():
501 *
502 *
503 */
504int core_tpg_set_initiator_node_queue_depth(
505	struct se_portal_group *tpg,
506	unsigned char *initiatorname,
507	u32 queue_depth,
508	int force)
509{
510	struct se_session *sess, *init_sess = NULL;
511	struct se_node_acl *acl;
512	int dynamic_acl = 0;
513
514	spin_lock_irq(&tpg->acl_node_lock);
515	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
516	if (!acl) {
517		pr_err("Access Control List entry for %s Initiator"
518			" Node %s does not exists for TPG %hu, ignoring"
519			" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
520			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
521		spin_unlock_irq(&tpg->acl_node_lock);
522		return -ENODEV;
523	}
524	if (acl->dynamic_node_acl) {
525		acl->dynamic_node_acl = 0;
526		dynamic_acl = 1;
527	}
528	spin_unlock_irq(&tpg->acl_node_lock);
529
530	spin_lock_bh(&tpg->session_lock);
531	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
532		if (sess->se_node_acl != acl)
533			continue;
534
535		if (!force) {
536			pr_err("Unable to change queue depth for %s"
537				" Initiator Node: %s while session is"
538				" operational.  To forcefully change the queue"
539				" depth and force session reinstatement"
540				" use the \"force=1\" parameter.\n",
541				tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
542			spin_unlock_bh(&tpg->session_lock);
543
544			spin_lock_irq(&tpg->acl_node_lock);
545			if (dynamic_acl)
546				acl->dynamic_node_acl = 1;
547			spin_unlock_irq(&tpg->acl_node_lock);
548			return -EEXIST;
549		}
550		/*
551		 * Determine if the session needs to be closed by our context.
552		 */
553		if (!tpg->se_tpg_tfo->shutdown_session(sess))
554			continue;
555
556		init_sess = sess;
557		break;
558	}
559
560	/*
561	 * User has requested to change the queue depth for a Initiator Node.
562	 * Change the value in the Node's struct se_node_acl, and call
563	 * core_set_queue_depth_for_node() to add the requested queue depth.
564	 *
565	 * Finally call  tpg->se_tpg_tfo->close_session() to force session
566	 * reinstatement to occur if there is an active session for the
567	 * $FABRIC_MOD Initiator Node in question.
568	 */
569	acl->queue_depth = queue_depth;
 
 
 
 
 
 
 
 
 
570
571	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
572		spin_unlock_bh(&tpg->session_lock);
573		/*
574		 * Force session reinstatement if
575		 * core_set_queue_depth_for_node() failed, because we assume
576		 * the $FABRIC_MOD has already the set session reinstatement
577		 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
578		 */
579		if (init_sess)
580			tpg->se_tpg_tfo->close_session(init_sess);
581
582		spin_lock_irq(&tpg->acl_node_lock);
583		if (dynamic_acl)
584			acl->dynamic_node_acl = 1;
585		spin_unlock_irq(&tpg->acl_node_lock);
586		return -EINVAL;
587	}
588	spin_unlock_bh(&tpg->session_lock);
589	/*
590	 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
591	 * forcefully shutdown the $FABRIC_MOD session/nexus.
592	 */
593	if (init_sess)
594		tpg->se_tpg_tfo->close_session(init_sess);
595
596	pr_debug("Successfuly changed queue depth to: %d for Initiator"
597		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
598		initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
599		tpg->se_tpg_tfo->tpg_get_tag(tpg));
600
601	spin_lock_irq(&tpg->acl_node_lock);
602	if (dynamic_acl)
603		acl->dynamic_node_acl = 1;
604	spin_unlock_irq(&tpg->acl_node_lock);
605
606	return 0;
607}
608EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
609
610static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
 
 
 
 
 
 
 
 
 
611{
612	/* Set in core_dev_setup_virtual_lun0() */
613	struct se_device *dev = g_lun0_dev;
614	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
615	u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
616	int ret;
617
618	lun->unpacked_lun = 0;
619	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
620	atomic_set(&lun->lun_acl_count, 0);
621	init_completion(&lun->lun_shutdown_comp);
622	INIT_LIST_HEAD(&lun->lun_acl_list);
623	INIT_LIST_HEAD(&lun->lun_cmd_list);
624	spin_lock_init(&lun->lun_acl_lock);
625	spin_lock_init(&lun->lun_cmd_lock);
626	spin_lock_init(&lun->lun_sep_lock);
627
628	ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
629	if (ret < 0)
630		return ret;
 
631
632	return 0;
633}
 
634
635static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
636{
637	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
638
639	core_tpg_post_dellun(se_tpg, lun);
640}
641
642int core_tpg_register(
643	struct target_core_fabric_ops *tfo,
644	struct se_wwn *se_wwn,
645	struct se_portal_group *se_tpg,
646	void *tpg_fabric_ptr,
647	int se_tpg_type)
648{
649	struct se_lun *lun;
650	u32 i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
651
652	se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
653				TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
654	if (!se_tpg->tpg_lun_list) {
655		pr_err("Unable to allocate struct se_portal_group->"
656				"tpg_lun_list\n");
657		return -ENOMEM;
658	}
659
660	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
661		lun = &se_tpg->tpg_lun_list[i];
662		lun->unpacked_lun = i;
663		lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
664		atomic_set(&lun->lun_acl_count, 0);
665		init_completion(&lun->lun_shutdown_comp);
666		INIT_LIST_HEAD(&lun->lun_acl_list);
667		INIT_LIST_HEAD(&lun->lun_cmd_list);
668		spin_lock_init(&lun->lun_acl_lock);
669		spin_lock_init(&lun->lun_cmd_lock);
670		spin_lock_init(&lun->lun_sep_lock);
671	}
672
673	se_tpg->se_tpg_type = se_tpg_type;
674	se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
675	se_tpg->se_tpg_tfo = tfo;
676	se_tpg->se_tpg_wwn = se_wwn;
677	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
678	INIT_LIST_HEAD(&se_tpg->acl_node_list);
679	INIT_LIST_HEAD(&se_tpg->se_tpg_node);
680	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
681	spin_lock_init(&se_tpg->acl_node_lock);
682	spin_lock_init(&se_tpg->session_lock);
683	spin_lock_init(&se_tpg->tpg_lun_lock);
 
684
685	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
686		if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
687			kfree(se_tpg);
688			return -ENOMEM;
 
 
 
 
 
 
689		}
690	}
691
692	spin_lock_bh(&tpg_lock);
693	list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
694	spin_unlock_bh(&tpg_lock);
695
696	pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
697		" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
698		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
699		"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
700		"None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
701
702	return 0;
703}
704EXPORT_SYMBOL(core_tpg_register);
705
706int core_tpg_deregister(struct se_portal_group *se_tpg)
707{
 
708	struct se_node_acl *nacl, *nacl_tmp;
 
709
710	pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
711		" for endpoint: %s Portal Tag %u\n",
712		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
713		"Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
714		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
715		se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
716
717	spin_lock_bh(&tpg_lock);
718	list_del(&se_tpg->se_tpg_node);
719	spin_unlock_bh(&tpg_lock);
720
721	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
722		cpu_relax();
 
 
 
 
723	/*
724	 * Release any remaining demo-mode generated se_node_acl that have
725	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
726	 * in transport_deregister_session().
727	 */
728	spin_lock_irq(&se_tpg->acl_node_lock);
729	list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
730			acl_list) {
731		list_del(&nacl->acl_list);
732		se_tpg->num_node_acls--;
733		spin_unlock_irq(&se_tpg->acl_node_lock);
734
735		core_tpg_wait_for_nacl_pr_ref(nacl);
736		core_free_device_list_for_node(nacl, se_tpg);
737		se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
738
739		spin_lock_irq(&se_tpg->acl_node_lock);
740	}
741	spin_unlock_irq(&se_tpg->acl_node_lock);
742
743	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
744		core_tpg_release_virtual_lun0(se_tpg);
 
 
745
746	se_tpg->se_tpg_fabric_ptr = NULL;
747	kfree(se_tpg->tpg_lun_list);
748	return 0;
749}
750EXPORT_SYMBOL(core_tpg_deregister);
751
752struct se_lun *core_tpg_pre_addlun(
753	struct se_portal_group *tpg,
754	u32 unpacked_lun)
755{
756	struct se_lun *lun;
757
758	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
759		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
760			"-1: %u for Target Portal Group: %u\n",
761			tpg->se_tpg_tfo->get_fabric_name(),
762			unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
763			tpg->se_tpg_tfo->tpg_get_tag(tpg));
764		return ERR_PTR(-EOVERFLOW);
765	}
766
767	spin_lock(&tpg->tpg_lun_lock);
768	lun = &tpg->tpg_lun_list[unpacked_lun];
769	if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
770		pr_err("TPG Logical Unit Number: %u is already active"
771			" on %s Target Portal Group: %u, ignoring request.\n",
772			unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
773			tpg->se_tpg_tfo->tpg_get_tag(tpg));
774		spin_unlock(&tpg->tpg_lun_lock);
775		return ERR_PTR(-EINVAL);
776	}
777	spin_unlock(&tpg->tpg_lun_lock);
 
 
 
 
 
 
 
 
 
 
 
778
779	return lun;
780}
781
782int core_tpg_post_addlun(
783	struct se_portal_group *tpg,
784	struct se_lun *lun,
785	u32 lun_access,
786	void *lun_ptr)
787{
788	int ret;
789
790	ret = core_dev_export(lun_ptr, tpg, lun);
 
791	if (ret < 0)
792		return ret;
793
794	spin_lock(&tpg->tpg_lun_lock);
795	lun->lun_access = lun_access;
796	lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
797	spin_unlock(&tpg->tpg_lun_lock);
798
799	return 0;
800}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
801
802static void core_tpg_shutdown_lun(
803	struct se_portal_group *tpg,
804	struct se_lun *lun)
805{
806	core_clear_lun_from_tpg(lun, tpg);
807	transport_clear_lun_from_sessions(lun);
808}
809
810struct se_lun *core_tpg_pre_dellun(
811	struct se_portal_group *tpg,
812	u32 unpacked_lun,
813	int *ret)
814{
815	struct se_lun *lun;
816
817	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
818		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
819			"-1: %u for Target Portal Group: %u\n",
820			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
821			TRANSPORT_MAX_LUNS_PER_TPG-1,
822			tpg->se_tpg_tfo->tpg_get_tag(tpg));
823		return ERR_PTR(-EOVERFLOW);
824	}
825
826	spin_lock(&tpg->tpg_lun_lock);
827	lun = &tpg->tpg_lun_list[unpacked_lun];
828	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
829		pr_err("%s Logical Unit Number: %u is not active on"
830			" Target Portal Group: %u, ignoring request.\n",
831			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
832			tpg->se_tpg_tfo->tpg_get_tag(tpg));
833		spin_unlock(&tpg->tpg_lun_lock);
834		return ERR_PTR(-ENODEV);
835	}
836	spin_unlock(&tpg->tpg_lun_lock);
837
838	return lun;
 
 
 
839}
840
841int core_tpg_post_dellun(
842	struct se_portal_group *tpg,
843	struct se_lun *lun)
844{
845	core_tpg_shutdown_lun(tpg, lun);
 
 
 
 
846
847	core_dev_unexport(lun->lun_se_dev, tpg, lun);
 
 
 
 
 
 
 
848
849	spin_lock(&tpg->tpg_lun_lock);
850	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
851	spin_unlock(&tpg->tpg_lun_lock);
 
 
 
 
 
 
 
 
 
 
852
853	return 0;
854}
v4.6
  1/*******************************************************************************
  2 * Filename:  target_core_tpg.c
  3 *
  4 * This file contains generic Target Portal Group related functions.
  5 *
  6 * (c) Copyright 2002-2013 Datera, Inc.
 
 
 
  7 *
  8 * Nicholas A. Bellinger <nab@kernel.org>
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of the GNU General Public License as published by
 12 * the Free Software Foundation; either version 2 of the License, or
 13 * (at your option) any later version.
 14 *
 15 * This program is distributed in the hope that it will be useful,
 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 18 * GNU General Public License for more details.
 19 *
 20 * You should have received a copy of the GNU General Public License
 21 * along with this program; if not, write to the Free Software
 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 23 *
 24 ******************************************************************************/
 25
 26#include <linux/net.h>
 27#include <linux/string.h>
 28#include <linux/timer.h>
 29#include <linux/slab.h>
 30#include <linux/spinlock.h>
 31#include <linux/in.h>
 32#include <linux/export.h>
 33#include <net/sock.h>
 34#include <net/tcp.h>
 35#include <scsi/scsi_proto.h>
 
 36
 37#include <target/target_core_base.h>
 38#include <target/target_core_backend.h>
 39#include <target/target_core_fabric.h>
 
 
 40
 41#include "target_core_internal.h"
 42#include "target_core_alua.h"
 43#include "target_core_pr.h"
 44#include "target_core_ua.h"
 45
 46extern struct se_device *g_lun0_dev;
 47
 48static DEFINE_SPINLOCK(tpg_lock);
 49static LIST_HEAD(tpg_list);
 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51/*	__core_tpg_get_initiator_node_acl():
 52 *
 53 *	mutex_lock(&tpg->acl_node_mutex); must be held when calling
 54 */
 55struct se_node_acl *__core_tpg_get_initiator_node_acl(
 56	struct se_portal_group *tpg,
 57	const char *initiatorname)
 58{
 59	struct se_node_acl *acl;
 60
 61	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
 62		if (!strcmp(acl->initiatorname, initiatorname))
 63			return acl;
 64	}
 65
 66	return NULL;
 67}
 68
 69/*	core_tpg_get_initiator_node_acl():
 70 *
 71 *
 72 */
 73struct se_node_acl *core_tpg_get_initiator_node_acl(
 74	struct se_portal_group *tpg,
 75	unsigned char *initiatorname)
 76{
 77	struct se_node_acl *acl;
 78	/*
 79	 * Obtain se_node_acl->acl_kref using fabric driver provided
 80	 * initiatorname[] during node acl endpoint lookup driven by
 81	 * new se_session login.
 82	 *
 83	 * The reference is held until se_session shutdown -> release
 84	 * occurs via fabric driver invoked transport_deregister_session()
 85	 * or transport_free_session() code.
 86	 */
 87	mutex_lock(&tpg->acl_node_mutex);
 88	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
 89	if (acl) {
 90		if (!kref_get_unless_zero(&acl->acl_kref))
 91			acl = NULL;
 92	}
 93	mutex_unlock(&tpg->acl_node_mutex);
 94
 95	return acl;
 96}
 97EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
 98
 99void core_allocate_nexus_loss_ua(
100	struct se_node_acl *nacl)
101{
102	struct se_dev_entry *deve;
103
104	if (!nacl)
105		return;
106
107	rcu_read_lock();
108	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
109		core_scsi3_ua_allocate(deve, 0x29,
110			ASCQ_29H_NEXUS_LOSS_OCCURRED);
111	rcu_read_unlock();
112}
113EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
114
115/*	core_tpg_add_node_to_devs():
116 *
117 *
118 */
119void core_tpg_add_node_to_devs(
120	struct se_node_acl *acl,
121	struct se_portal_group *tpg,
122	struct se_lun *lun_orig)
123{
124	bool lun_access_ro = true;
 
125	struct se_lun *lun;
126	struct se_device *dev;
127
128	mutex_lock(&tpg->tpg_lun_mutex);
129	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
130		if (lun_orig && lun != lun_orig)
 
131			continue;
132
133		dev = rcu_dereference_check(lun->lun_se_dev,
134					    lockdep_is_held(&tpg->tpg_lun_mutex));
 
135		/*
136		 * By default in LIO-Target $FABRIC_MOD,
137		 * demo_mode_write_protect is ON, or READ_ONLY;
138		 */
139		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
140			lun_access_ro = false;
 
 
 
141		} else {
142			/*
143			 * Allow only optical drives to issue R/W in default RO
144			 * demo mode.
145			 */
146			if (dev->transport->get_device_type(dev) == TYPE_DISK)
147				lun_access_ro = true;
148			else
149				lun_access_ro = false;
150		}
151
152		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
153			" access for LUN in Demo Mode\n",
154			tpg->se_tpg_tfo->get_fabric_name(),
155			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
156			lun_access_ro ? "READ-ONLY" : "READ-WRITE");
 
157
158		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
159						 lun_access_ro, acl, tpg);
160		/*
161		 * Check to see if there are any existing persistent reservation
162		 * APTPL pre-registrations that need to be enabled for this dynamic
163		 * LUN ACL now..
164		 */
165		core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
166						    lun->unpacked_lun);
167	}
168	mutex_unlock(&tpg->tpg_lun_mutex);
169}
170
171static void
172target_set_nacl_queue_depth(struct se_portal_group *tpg,
173			    struct se_node_acl *acl, u32 queue_depth)
 
 
 
 
174{
175	acl->queue_depth = queue_depth;
176
177	if (!acl->queue_depth) {
178		pr_warn("Queue depth for %s Initiator Node: %s is 0,"
179			"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
180			acl->initiatorname);
181		acl->queue_depth = 1;
182	}
183}
184
185static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
186		const unsigned char *initiatorname)
187{
188	struct se_node_acl *acl;
189	u32 queue_depth;
190
191	acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
192			GFP_KERNEL);
193	if (!acl)
194		return NULL;
195
196	INIT_LIST_HEAD(&acl->acl_list);
197	INIT_LIST_HEAD(&acl->acl_sess_list);
198	INIT_HLIST_HEAD(&acl->lun_entry_hlist);
199	kref_init(&acl->acl_kref);
200	init_completion(&acl->acl_free_comp);
201	spin_lock_init(&acl->nacl_sess_lock);
202	mutex_init(&acl->lun_entry_mutex);
203	atomic_set(&acl->acl_pr_ref_count, 0);
204
205	if (tpg->se_tpg_tfo->tpg_get_default_depth)
206		queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
207	else
208		queue_depth = 1;
209	target_set_nacl_queue_depth(tpg, acl, queue_depth);
210
211	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
212	acl->se_tpg = tpg;
213	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
214
215	tpg->se_tpg_tfo->set_default_node_attributes(acl);
216
217	return acl;
218}
219
220static void target_add_node_acl(struct se_node_acl *acl)
 
 
 
 
221{
222	struct se_portal_group *tpg = acl->se_tpg;
223
224	mutex_lock(&tpg->acl_node_mutex);
225	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
226	mutex_unlock(&tpg->acl_node_mutex);
227
228	pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
229		" Initiator Node: %s\n",
230		tpg->se_tpg_tfo->get_fabric_name(),
231		tpg->se_tpg_tfo->tpg_get_tag(tpg),
232		acl->dynamic_node_acl ? "DYNAMIC" : "",
233		acl->queue_depth,
234		tpg->se_tpg_tfo->get_fabric_name(),
235		acl->initiatorname);
236}
237
238bool target_tpg_has_node_acl(struct se_portal_group *tpg,
239			     const char *initiatorname)
240{
241	struct se_node_acl *acl;
242	bool found = false;
243
244	mutex_lock(&tpg->acl_node_mutex);
245	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
246		if (!strcmp(acl->initiatorname, initiatorname)) {
247			found = true;
248			break;
249		}
 
 
 
250	}
251	mutex_unlock(&tpg->acl_node_mutex);
252
253	return found;
254}
255EXPORT_SYMBOL(target_tpg_has_node_acl);
256
 
 
 
 
257struct se_node_acl *core_tpg_check_initiator_node_acl(
258	struct se_portal_group *tpg,
259	unsigned char *initiatorname)
260{
261	struct se_node_acl *acl;
262
263	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
264	if (acl)
265		return acl;
266
267	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
268		return NULL;
269
270	acl = target_alloc_node_acl(tpg, initiatorname);
271	if (!acl)
272		return NULL;
273	/*
274	 * When allocating a dynamically generated node_acl, go ahead
275	 * and take the extra kref now before returning to the fabric
276	 * driver caller.
277	 *
278	 * Note this reference will be released at session shutdown
279	 * time within transport_free_session() code.
280	 */
281	kref_get(&acl->acl_kref);
 
 
282	acl->dynamic_node_acl = 1;
283
 
 
 
 
 
 
 
 
 
 
 
 
284	/*
285	 * Here we only create demo-mode MappedLUNs from the active
286	 * TPG LUNs if the fabric is not explicitly asking for
287	 * tpg_check_demo_mode_login_only() == 1.
288	 */
289	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
290	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
291		core_tpg_add_node_to_devs(acl, tpg, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
292
293	target_add_node_acl(acl);
294	return acl;
295}
296EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
297
298void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
299{
300	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
301		cpu_relax();
302}
303
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304struct se_node_acl *core_tpg_add_initiator_node_acl(
305	struct se_portal_group *tpg,
306	const char *initiatorname)
 
 
307{
308	struct se_node_acl *acl;
309
310	mutex_lock(&tpg->acl_node_mutex);
311	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
312	if (acl) {
313		if (acl->dynamic_node_acl) {
314			acl->dynamic_node_acl = 0;
315			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
316				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
317				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
318			mutex_unlock(&tpg->acl_node_mutex);
319			return acl;
 
 
 
 
 
 
 
 
320		}
321
322		pr_err("ACL entry for %s Initiator"
323			" Node %s already exists for TPG %u, ignoring"
324			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
325			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
326		mutex_unlock(&tpg->acl_node_mutex);
327		return ERR_PTR(-EEXIST);
328	}
329	mutex_unlock(&tpg->acl_node_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
331	acl = target_alloc_node_acl(tpg, initiatorname);
332	if (!acl)
333		return ERR_PTR(-ENOMEM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334
335	target_add_node_acl(acl);
336	return acl;
337}
 
338
339void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
 
 
 
 
 
 
 
340{
341	struct se_portal_group *tpg = acl->se_tpg;
342	LIST_HEAD(sess_list);
343	struct se_session *sess, *sess_tmp;
344	unsigned long flags;
345	int rc;
346
347	mutex_lock(&tpg->acl_node_mutex);
348	if (acl->dynamic_node_acl) {
349		acl->dynamic_node_acl = 0;
 
350	}
351	list_del(&acl->acl_list);
352	mutex_unlock(&tpg->acl_node_mutex);
353
354	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
355	acl->acl_stop = 1;
356
357	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
358				sess_acl_list) {
359		if (sess->sess_tearing_down != 0)
 
360			continue;
361
362		if (!target_get_session(sess))
 
 
363			continue;
364		list_move(&sess->sess_acl_list, &sess_list);
365	}
366	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
367
368	list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
369		list_del(&sess->sess_acl_list);
 
 
 
 
370
371		rc = tpg->se_tpg_tfo->shutdown_session(sess);
372		target_put_session(sess);
373		if (!rc)
374			continue;
375		target_put_session(sess);
376	}
377	target_put_nacl(acl);
378	/*
379	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
380	 * for active fabric session transport_deregister_session() callbacks.
381	 */
382	wait_for_completion(&acl->acl_free_comp);
383
384	core_tpg_wait_for_nacl_pr_ref(acl);
 
385	core_free_device_list_for_node(acl, tpg);
386
387	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
388		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
389		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
390		tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
391
392	kfree(acl);
393}
 
394
395/*	core_tpg_set_initiator_node_queue_depth():
396 *
397 *
398 */
399int core_tpg_set_initiator_node_queue_depth(
400	struct se_node_acl *acl,
401	u32 queue_depth)
 
 
402{
403	LIST_HEAD(sess_list);
404	struct se_portal_group *tpg = acl->se_tpg;
405	struct se_session *sess, *sess_tmp;
406	unsigned long flags;
407	int rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
409	/*
410	 * User has requested to change the queue depth for a Initiator Node.
411	 * Change the value in the Node's struct se_node_acl, and call
412	 * target_set_nacl_queue_depth() to set the new queue depth.
 
 
 
 
413	 */
414	target_set_nacl_queue_depth(tpg, acl, queue_depth);
415
416	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
417	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
418				 sess_acl_list) {
419		if (sess->sess_tearing_down != 0)
420			continue;
421		if (!target_get_session(sess))
422			continue;
423		spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
424
 
 
425		/*
426		 * Finally call tpg->se_tpg_tfo->close_session() to force session
427		 * reinstatement to occur if there is an active session for the
428		 * $FABRIC_MOD Initiator Node in question.
 
429		 */
430		rc = tpg->se_tpg_tfo->shutdown_session(sess);
431		target_put_session(sess);
432		if (!rc) {
433			spin_lock_irqsave(&acl->nacl_sess_lock, flags);
434			continue;
435		}
436		target_put_session(sess);
437		spin_lock_irqsave(&acl->nacl_sess_lock, flags);
438	}
439	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
 
 
 
 
 
 
440
441	pr_debug("Successfully changed queue depth to: %d for Initiator"
442		" Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
443		acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
444		tpg->se_tpg_tfo->tpg_get_tag(tpg));
445
 
 
 
 
 
446	return 0;
447}
448EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
449
450/*	core_tpg_set_initiator_node_tag():
451 *
452 *	Initiator nodeacl tags are not used internally, but may be used by
453 *	userspace to emulate aliases or groups.
454 *	Returns length of newly-set tag or -EINVAL.
455 */
456int core_tpg_set_initiator_node_tag(
457	struct se_portal_group *tpg,
458	struct se_node_acl *acl,
459	const char *new_tag)
460{
461	if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
462		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
463
464	if (!strncmp("NULL", new_tag, 4)) {
465		acl->acl_tag[0] = '\0';
466		return 0;
467	}
468
469	return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
470}
471EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
472
473static void core_tpg_lun_ref_release(struct percpu_ref *ref)
474{
475	struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
476
477	complete(&lun->lun_ref_comp);
478}
479
480int core_tpg_register(
 
481	struct se_wwn *se_wwn,
482	struct se_portal_group *se_tpg,
483	int proto_id)
 
484{
485	int ret;
486
487	if (!se_tpg)
488		return -EINVAL;
489	/*
490	 * For the typical case where core_tpg_register() is called by a
491	 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
492	 * configfs context, use the original tf_ops pointer already saved
493	 * by target-core in target_fabric_make_wwn().
494	 *
495	 * Otherwise, for special cases like iscsi-target discovery TPGs
496	 * the caller is responsible for setting ->se_tpg_tfo ahead of
497	 * calling core_tpg_register().
498	 */
499	if (se_wwn)
500		se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
501
502	if (!se_tpg->se_tpg_tfo) {
503		pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
504		return -EINVAL;
505	}
506
507	INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
508	se_tpg->proto_id = proto_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
509	se_tpg->se_tpg_wwn = se_wwn;
510	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
511	INIT_LIST_HEAD(&se_tpg->acl_node_list);
512	INIT_LIST_HEAD(&se_tpg->se_tpg_node);
513	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
 
514	spin_lock_init(&se_tpg->session_lock);
515	mutex_init(&se_tpg->tpg_lun_mutex);
516	mutex_init(&se_tpg->acl_node_mutex);
517
518	if (se_tpg->proto_id >= 0) {
519		se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
520		if (IS_ERR(se_tpg->tpg_virt_lun0))
521			return PTR_ERR(se_tpg->tpg_virt_lun0);
522
523		ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
524				true, g_lun0_dev);
525		if (ret < 0) {
526			kfree(se_tpg->tpg_virt_lun0);
527			return ret;
528		}
529	}
530
531	spin_lock_bh(&tpg_lock);
532	list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
533	spin_unlock_bh(&tpg_lock);
534
535	pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
536		 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
537		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
538		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
539		se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
540
541	return 0;
542}
543EXPORT_SYMBOL(core_tpg_register);
544
545int core_tpg_deregister(struct se_portal_group *se_tpg)
546{
547	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
548	struct se_node_acl *nacl, *nacl_tmp;
549	LIST_HEAD(node_list);
550
551	pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
552		 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
553		tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
554		se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
 
 
555
556	spin_lock_bh(&tpg_lock);
557	list_del(&se_tpg->se_tpg_node);
558	spin_unlock_bh(&tpg_lock);
559
560	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
561		cpu_relax();
562
563	mutex_lock(&se_tpg->acl_node_mutex);
564	list_splice_init(&se_tpg->acl_node_list, &node_list);
565	mutex_unlock(&se_tpg->acl_node_mutex);
566	/*
567	 * Release any remaining demo-mode generated se_node_acl that have
568	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
569	 * in transport_deregister_session().
570	 */
571	list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
 
 
572		list_del(&nacl->acl_list);
 
 
573
574		core_tpg_wait_for_nacl_pr_ref(nacl);
575		core_free_device_list_for_node(nacl, se_tpg);
576		kfree(nacl);
 
 
577	}
 
578
579	if (se_tpg->proto_id >= 0) {
580		core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
581		kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
582	}
583
 
 
584	return 0;
585}
586EXPORT_SYMBOL(core_tpg_deregister);
587
588struct se_lun *core_tpg_alloc_lun(
589	struct se_portal_group *tpg,
590	u64 unpacked_lun)
591{
592	struct se_lun *lun;
593
594	lun = kzalloc(sizeof(*lun), GFP_KERNEL);
595	if (!lun) {
596		pr_err("Unable to allocate se_lun memory\n");
597		return ERR_PTR(-ENOMEM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
598	}
599	lun->unpacked_lun = unpacked_lun;
600	lun->lun_link_magic = SE_LUN_LINK_MAGIC;
601	atomic_set(&lun->lun_acl_count, 0);
602	init_completion(&lun->lun_ref_comp);
603	INIT_LIST_HEAD(&lun->lun_deve_list);
604	INIT_LIST_HEAD(&lun->lun_dev_link);
605	atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
606	spin_lock_init(&lun->lun_deve_lock);
607	mutex_init(&lun->lun_tg_pt_md_mutex);
608	INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
609	spin_lock_init(&lun->lun_tg_pt_gp_lock);
610	lun->lun_tpg = tpg;
611
612	return lun;
613}
614
615int core_tpg_add_lun(
616	struct se_portal_group *tpg,
617	struct se_lun *lun,
618	bool lun_access_ro,
619	struct se_device *dev)
620{
621	int ret;
622
623	ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
624			      GFP_KERNEL);
625	if (ret < 0)
626		goto out;
 
 
 
 
 
627
628	ret = core_alloc_rtpi(lun, dev);
629	if (ret)
630		goto out_kill_ref;
631
632	if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
633	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
634		target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
635
636	mutex_lock(&tpg->tpg_lun_mutex);
637
638	spin_lock(&dev->se_port_lock);
639	lun->lun_index = dev->dev_index;
640	rcu_assign_pointer(lun->lun_se_dev, dev);
641	dev->export_count++;
642	list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
643	spin_unlock(&dev->se_port_lock);
644
645	if (dev->dev_flags & DF_READ_ONLY)
646		lun->lun_access_ro = true;
647	else
648		lun->lun_access_ro = lun_access_ro;
649	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
650		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
651	mutex_unlock(&tpg->tpg_lun_mutex);
 
 
 
 
 
 
 
652
653	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
654
655out_kill_ref:
656	percpu_ref_exit(&lun->lun_ref);
657out:
658	return ret;
659}
660
661void core_tpg_remove_lun(
662	struct se_portal_group *tpg,
663	struct se_lun *lun)
664{
665	/*
666	 * rcu_dereference_raw protected by se_lun->lun_group symlink
667	 * reference to se_device->dev_group.
668	 */
669	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
670
671	core_clear_lun_from_tpg(lun, tpg);
672	/*
673	 * Wait for any active I/O references to percpu se_lun->lun_ref to
674	 * be released.  Also, se_lun->lun_ref is now used by PR and ALUA
675	 * logic when referencing a remote target port during ALL_TGT_PT=1
676	 * and generating UNIT_ATTENTIONs for ALUA access state transition.
677	 */
678	transport_clear_lun_ref(lun);
679
680	mutex_lock(&tpg->tpg_lun_mutex);
681	if (lun->lun_se_dev) {
682		target_detach_tg_pt_gp(lun);
683
684		spin_lock(&dev->se_port_lock);
685		list_del(&lun->lun_dev_link);
686		dev->export_count--;
687		rcu_assign_pointer(lun->lun_se_dev, NULL);
688		spin_unlock(&dev->se_port_lock);
689	}
690	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
691		hlist_del_rcu(&lun->link);
692	mutex_unlock(&tpg->tpg_lun_mutex);
693
694	percpu_ref_exit(&lun->lun_ref);
695}