Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*******************************************************************************
   2 * Filename:  target_core_alua.c
   3 *
   4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
   5 *
   6 * Copyright (c) 2009-2010 Rising Tide Systems
   7 * Copyright (c) 2009-2010 Linux-iSCSI.org
   8 *
   9 * Nicholas A. Bellinger <nab@kernel.org>
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2 of the License, or
  14 * (at your option) any later version.
  15 *
  16 * This program is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 * GNU General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; if not, write to the Free Software
  23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  24 *
  25 ******************************************************************************/
  26
  27#include <linux/version.h>
  28#include <linux/slab.h>
  29#include <linux/spinlock.h>
  30#include <linux/configfs.h>
  31#include <scsi/scsi.h>
  32#include <scsi/scsi_cmnd.h>
 
 
 
 
 
  33
  34#include <target/target_core_base.h>
  35#include <target/target_core_device.h>
  36#include <target/target_core_transport.h>
  37#include <target/target_core_fabric_ops.h>
  38#include <target/target_core_configfs.h>
  39
 
  40#include "target_core_alua.h"
  41#include "target_core_hba.h"
  42#include "target_core_ua.h"
  43
  44static int core_alua_check_transition(int state, int *primary);
 
  45static int core_alua_set_tg_pt_secondary_state(
  46		struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
  47		struct se_port *port, int explict, int offline);
 
 
 
 
  48
  49static u16 alua_lu_gps_counter;
  50static u32 alua_lu_gps_count;
  51
  52static DEFINE_SPINLOCK(lu_gps_lock);
  53static LIST_HEAD(lu_gps_list);
  54
  55struct t10_alua_lu_gp *default_lu_gp;
  56
  57/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58 * REPORT_TARGET_PORT_GROUPS
  59 *
  60 * See spc4r17 section 6.27
  61 */
  62int core_emulate_report_target_port_groups(struct se_cmd *cmd)
 
  63{
  64	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
  65	struct se_port *port;
  66	struct t10_alua_tg_pt_gp *tg_pt_gp;
  67	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
  68	unsigned char *buf;
  69	u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
  70				    Target port group descriptor */
  71
  72	buf = transport_kmap_first_data_page(cmd);
 
 
 
 
 
 
 
  73
  74	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
  75	list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
 
 
 
 
 
 
 
 
 
 
  76			tg_pt_gp_list) {
  77		/*
 
 
 
 
 
 
 
 
 
 
 
  78		 * PREF: Preferred target port bit, determine if this
  79		 * bit should be set for port group.
  80		 */
  81		if (tg_pt_gp->tg_pt_gp_pref)
  82			buf[off] = 0x80;
  83		/*
  84		 * Set the ASYMMETRIC ACCESS State
  85		 */
  86		buf[off++] |= (atomic_read(
  87			&tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
  88		/*
  89		 * Set supported ASYMMETRIC ACCESS State bits
  90		 */
  91		buf[off] = 0x80; /* T_SUP */
  92		buf[off] |= 0x40; /* O_SUP */
  93		buf[off] |= 0x8; /* U_SUP */
  94		buf[off] |= 0x4; /* S_SUP */
  95		buf[off] |= 0x2; /* AN_SUP */
  96		buf[off++] |= 0x1; /* AO_SUP */
  97		/*
  98		 * TARGET PORT GROUP
  99		 */
 100		buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
 101		buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
 102
 103		off++; /* Skip over Reserved */
 104		/*
 105		 * STATUS CODE
 106		 */
 107		buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
 108		/*
 109		 * Vendor Specific field
 110		 */
 111		buf[off++] = 0x00;
 112		/*
 113		 * TARGET PORT COUNT
 114		 */
 115		buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
 116		rd_len += 8;
 117
 118		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 119		list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
 120				tg_pt_gp_mem_list) {
 121			port = tg_pt_gp_mem->tg_pt;
 122			/*
 123			 * Start Target Port descriptor format
 124			 *
 125			 * See spc4r17 section 6.2.7 Table 247
 126			 */
 127			off += 2; /* Skip over Obsolete */
 128			/*
 129			 * Set RELATIVE TARGET PORT IDENTIFIER
 130			 */
 131			buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
 132			buf[off++] = (port->sep_rtpi & 0xff);
 133			rd_len += 4;
 134		}
 135		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 136	}
 137	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
 138	/*
 139	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 140	 */
 141	buf[0] = ((rd_len >> 24) & 0xff);
 142	buf[1] = ((rd_len >> 16) & 0xff);
 143	buf[2] = ((rd_len >> 8) & 0xff);
 144	buf[3] = (rd_len & 0xff);
 145
 146	transport_kunmap_first_data_page(cmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 147
 
 148	return 0;
 149}
 150
 151/*
 152 * SET_TARGET_PORT_GROUPS for explict ALUA operation.
 153 *
 154 * See spc4r17 section 6.35
 155 */
 156int core_emulate_set_target_port_groups(struct se_cmd *cmd)
 
 157{
 158	struct se_device *dev = cmd->se_dev;
 159	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
 160	struct se_port *port, *l_port = cmd->se_lun->lun_sep;
 161	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
 162	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
 163	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
 164	unsigned char *buf;
 165	unsigned char *ptr;
 
 166	u32 len = 4; /* Skip over RESERVED area in header */
 167	int alua_access_state, primary = 0, rc;
 168	u16 tg_pt_id, rtpi;
 169
 170	if (!l_port)
 171		return PYX_TRANSPORT_LU_COMM_FAILURE;
 
 
 
 172
 173	buf = transport_kmap_first_data_page(cmd);
 
 
 174
 175	/*
 176	 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
 177	 * for the local tg_pt_gp.
 178	 */
 179	l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
 180	if (!l_tg_pt_gp_mem) {
 181		pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
 182		rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 183		goto out;
 184	}
 185	spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
 186	l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
 187	if (!l_tg_pt_gp) {
 188		spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
 189		pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
 190		rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 191		goto out;
 192	}
 193	rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
 194	spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
 195
 196	if (!rc) {
 
 197		pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
 198				" while TPGS_EXPLICT_ALUA is disabled\n");
 199		rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 200		goto out;
 201	}
 
 
 202
 203	ptr = &buf[4]; /* Skip over RESERVED area in header */
 204
 205	while (len < cmd->data_length) {
 
 206		alua_access_state = (ptr[0] & 0x0f);
 207		/*
 208		 * Check the received ALUA access state, and determine if
 209		 * the state is a primary or secondary target port asymmetric
 210		 * access state.
 211		 */
 212		rc = core_alua_check_transition(alua_access_state, &primary);
 213		if (rc != 0) {
 
 214			/*
 215			 * If the SET TARGET PORT GROUPS attempts to establish
 216			 * an invalid combination of target port asymmetric
 217			 * access states or attempts to establish an
 218			 * unsupported target port asymmetric access state,
 219			 * then the command shall be terminated with CHECK
 220			 * CONDITION status, with the sense key set to ILLEGAL
 221			 * REQUEST, and the additional sense code set to INVALID
 222			 * FIELD IN PARAMETER LIST.
 223			 */
 224			rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
 225			goto out;
 226		}
 227		rc = -1;
 228		/*
 229		 * If the ASYMMETRIC ACCESS STATE field (see table 267)
 230		 * specifies a primary target port asymmetric access state,
 231		 * then the TARGET PORT GROUP OR TARGET PORT field specifies
 232		 * a primary target port group for which the primary target
 233		 * port asymmetric access state shall be changed. If the
 234		 * ASYMMETRIC ACCESS STATE field specifies a secondary target
 235		 * port asymmetric access state, then the TARGET PORT GROUP OR
 236		 * TARGET PORT field specifies the relative target port
 237		 * identifier (see 3.1.120) of the target port for which the
 238		 * secondary target port asymmetric access state shall be
 239		 * changed.
 240		 */
 241		if (primary) {
 242			tg_pt_id = ((ptr[2] << 8) & 0xff);
 243			tg_pt_id |= (ptr[3] & 0xff);
 244			/*
 245			 * Locate the matching target port group ID from
 246			 * the global tg_pt_gp list
 247			 */
 248			spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
 249			list_for_each_entry(tg_pt_gp,
 250					&su_dev->t10_alua.tg_pt_gps_list,
 251					tg_pt_gp_list) {
 252				if (!tg_pt_gp->tg_pt_gp_valid_id)
 253					continue;
 254
 255				if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
 256					continue;
 257
 258				atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
 259				smp_mb__after_atomic_inc();
 260				spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
 261
 262				rc = core_alua_do_port_transition(tg_pt_gp,
 263						dev, l_port, nacl,
 264						alua_access_state, 1);
 265
 266				spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
 267				atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
 268				smp_mb__after_atomic_dec();
 269				break;
 270			}
 271			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
 272			/*
 273			 * If not matching target port group ID can be located
 274			 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
 275			 */
 276			if (rc != 0) {
 277				rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
 278				goto out;
 279			}
 280		} else {
 
 
 281			/*
 282			 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
 283			 * the Target Port in question for the the incoming
 284			 * SET_TARGET_PORT_GROUPS op.
 285			 */
 286			rtpi = ((ptr[2] << 8) & 0xff);
 287			rtpi |= (ptr[3] & 0xff);
 288			/*
 289			 * Locate the matching relative target port identifer
 290			 * for the struct se_device storage object.
 291			 */
 292			spin_lock(&dev->se_port_lock);
 293			list_for_each_entry(port, &dev->dev_sep_list,
 294							sep_list) {
 295				if (port->sep_rtpi != rtpi)
 296					continue;
 297
 298				tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
 299				spin_unlock(&dev->se_port_lock);
 300
 301				rc = core_alua_set_tg_pt_secondary_state(
 302						tg_pt_gp_mem, port, 1, 1);
 
 303
 304				spin_lock(&dev->se_port_lock);
 305				break;
 306			}
 307			spin_unlock(&dev->se_port_lock);
 308			/*
 309			 * If not matching relative target port identifier can
 310			 * be located, throw an exception with ASCQ:
 311			 * INVALID_PARAMETER_LIST
 312			 */
 313			if (rc != 0) {
 314				rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
 315				goto out;
 316			}
 317		}
 318
 319		ptr += 4;
 320		len += 4;
 321	}
 322
 323out:
 324	transport_kunmap_first_data_page(cmd);
 
 
 
 
 325
 326	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 327}
 328
 329static inline int core_alua_state_nonoptimized(
 330	struct se_cmd *cmd,
 331	unsigned char *cdb,
 332	int nonop_delay_msecs,
 333	u8 *alua_ascq)
 334{
 335	/*
 336	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
 337	 * later to determine if processing of this cmd needs to be
 338	 * temporarily delayed for the Active/NonOptimized primary access state.
 339	 */
 340	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
 341	cmd->alua_nonop_delay = nonop_delay_msecs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 342	return 0;
 343}
 344
 345static inline int core_alua_state_standby(
 346	struct se_cmd *cmd,
 347	unsigned char *cdb,
 348	u8 *alua_ascq)
 349{
 350	/*
 351	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
 352	 * spc4r17 section 5.9.2.4.4
 353	 */
 354	switch (cdb[0]) {
 355	case INQUIRY:
 356	case LOG_SELECT:
 357	case LOG_SENSE:
 358	case MODE_SELECT:
 359	case MODE_SENSE:
 360	case REPORT_LUNS:
 361	case RECEIVE_DIAGNOSTIC:
 362	case SEND_DIAGNOSTIC:
 
 
 
 
 
 
 
 
 
 
 363	case MAINTENANCE_IN:
 364		switch (cdb[1]) {
 365		case MI_REPORT_TARGET_PGS:
 366			return 0;
 367		default:
 368			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
 369			return 1;
 370		}
 371	case MAINTENANCE_OUT:
 372		switch (cdb[1]) {
 373		case MO_SET_TARGET_PGS:
 374			return 0;
 375		default:
 376			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
 377			return 1;
 378		}
 379	case REQUEST_SENSE:
 380	case PERSISTENT_RESERVE_IN:
 381	case PERSISTENT_RESERVE_OUT:
 382	case READ_BUFFER:
 383	case WRITE_BUFFER:
 384		return 0;
 385	default:
 386		*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
 387		return 1;
 388	}
 389
 390	return 0;
 391}
 392
 393static inline int core_alua_state_unavailable(
 394	struct se_cmd *cmd,
 395	unsigned char *cdb,
 396	u8 *alua_ascq)
 397{
 398	/*
 399	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
 400	 * spc4r17 section 5.9.2.4.5
 401	 */
 402	switch (cdb[0]) {
 403	case INQUIRY:
 404	case REPORT_LUNS:
 
 405	case MAINTENANCE_IN:
 406		switch (cdb[1]) {
 407		case MI_REPORT_TARGET_PGS:
 408			return 0;
 409		default:
 410			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
 411			return 1;
 412		}
 413	case MAINTENANCE_OUT:
 414		switch (cdb[1]) {
 415		case MO_SET_TARGET_PGS:
 416			return 0;
 417		default:
 418			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
 419			return 1;
 420		}
 421	case REQUEST_SENSE:
 422	case READ_BUFFER:
 423	case WRITE_BUFFER:
 424		return 0;
 425	default:
 426		*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
 427		return 1;
 428	}
 429
 430	return 0;
 431}
 432
 433static inline int core_alua_state_transition(
 434	struct se_cmd *cmd,
 435	unsigned char *cdb,
 436	u8 *alua_ascq)
 437{
 438	/*
 439	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
 440	 * spc4r17 section 5.9.2.5
 441	 */
 442	switch (cdb[0]) {
 443	case INQUIRY:
 444	case REPORT_LUNS:
 
 445	case MAINTENANCE_IN:
 446		switch (cdb[1]) {
 447		case MI_REPORT_TARGET_PGS:
 448			return 0;
 449		default:
 450			*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
 451			return 1;
 452		}
 453	case REQUEST_SENSE:
 454	case READ_BUFFER:
 455	case WRITE_BUFFER:
 456		return 0;
 457	default:
 458		*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
 459		return 1;
 460	}
 461
 462	return 0;
 463}
 464
 465/*
 466 * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
 467 * in transport_cmd_sequencer().  This function is assigned to
 468 * struct t10_alua *->state_check() in core_setup_alua()
 469 */
 470static int core_alua_state_check_nop(
 471	struct se_cmd *cmd,
 472	unsigned char *cdb,
 473	u8 *alua_ascq)
 474{
 475	return 0;
 476}
 477
 478/*
 479 * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
 480 * This function is assigned to struct t10_alua *->state_check() in
 481 * core_setup_alua()
 482 *
 483 * Also, this function can return three different return codes to
 484 * signal transport_generic_cmd_sequencer()
 485 *
 486 * return 1: Is used to signal LUN not accecsable, and check condition/not ready
 487 * return 0: Used to signal success
 488 * reutrn -1: Used to signal failure, and invalid cdb field
 489 */
 490static int core_alua_state_check(
 491	struct se_cmd *cmd,
 492	unsigned char *cdb,
 493	u8 *alua_ascq)
 494{
 
 
 495	struct se_lun *lun = cmd->se_lun;
 496	struct se_port *port = lun->lun_sep;
 497	struct t10_alua_tg_pt_gp *tg_pt_gp;
 498	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
 499	int out_alua_state, nonop_delay_msecs;
 500
 501	if (!port)
 
 
 502		return 0;
 
 503	/*
 504	 * First, check for a struct se_port specific secondary ALUA target port
 505	 * access state: OFFLINE
 506	 */
 507	if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
 508		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
 509		pr_debug("ALUA: Got secondary offline status for local"
 510				" target port\n");
 511		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
 512		return 1;
 513	}
 514	 /*
 515	 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
 516	 * ALUA target port group, to obtain current ALUA access state.
 517	 * Otherwise look for the underlying struct se_device association with
 518	 * a ALUA logical unit group.
 519	 */
 520	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
 521	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 522	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
 523	out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
 524	nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
 525	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 
 
 526	/*
 527	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
 528	 * statement so the compiler knows explicitly to check this case first.
 529	 * For the Optimized ALUA access state case, we want to process the
 530	 * incoming fabric cmd ASAP..
 531	 */
 532	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
 533		return 0;
 534
 535	switch (out_alua_state) {
 536	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 537		return core_alua_state_nonoptimized(cmd, cdb,
 538					nonop_delay_msecs, alua_ascq);
 539	case ALUA_ACCESS_STATE_STANDBY:
 540		return core_alua_state_standby(cmd, cdb, alua_ascq);
 
 
 541	case ALUA_ACCESS_STATE_UNAVAILABLE:
 542		return core_alua_state_unavailable(cmd, cdb, alua_ascq);
 
 
 543	case ALUA_ACCESS_STATE_TRANSITION:
 544		return core_alua_state_transition(cmd, cdb, alua_ascq);
 
 
 
 
 
 
 545	/*
 546	 * OFFLINE is a secondary ALUA target port group access state, that is
 547	 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
 548	 */
 549	case ALUA_ACCESS_STATE_OFFLINE:
 550	default:
 551		pr_err("Unknown ALUA access state: 0x%02x\n",
 552				out_alua_state);
 553		return -EINVAL;
 554	}
 555
 556	return 0;
 557}
 558
 559/*
 560 * Check implict and explict ALUA state change request.
 561 */
 562static int core_alua_check_transition(int state, int *primary)
 
 563{
 
 
 
 
 564	switch (state) {
 565	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
 
 
 
 
 566	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 
 
 
 
 567	case ALUA_ACCESS_STATE_STANDBY:
 
 
 
 
 568	case ALUA_ACCESS_STATE_UNAVAILABLE:
 569		/*
 570		 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
 571		 * defined as primary target port asymmetric access states.
 572		 */
 
 
 
 573		*primary = 1;
 574		break;
 575	case ALUA_ACCESS_STATE_OFFLINE:
 576		/*
 577		 * OFFLINE state is defined as a secondary target port
 578		 * asymmetric access state.
 579		 */
 
 
 
 
 
 
 
 
 
 
 
 580		*primary = 0;
 581		break;
 582	default:
 583		pr_err("Unknown ALUA access state: 0x%02x\n", state);
 584		return -EINVAL;
 585	}
 586
 587	return 0;
 
 
 
 
 
 588}
 589
 590static char *core_alua_dump_state(int state)
 591{
 592	switch (state) {
 593	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
 594		return "Active/Optimized";
 595	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 596		return "Active/NonOptimized";
 
 
 597	case ALUA_ACCESS_STATE_STANDBY:
 598		return "Standby";
 599	case ALUA_ACCESS_STATE_UNAVAILABLE:
 600		return "Unavailable";
 601	case ALUA_ACCESS_STATE_OFFLINE:
 602		return "Offline";
 
 
 603	default:
 604		return "Unknown";
 605	}
 606
 607	return NULL;
 608}
 609
 610char *core_alua_dump_status(int status)
 611{
 612	switch (status) {
 613	case ALUA_STATUS_NONE:
 614		return "None";
 615	case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
 616		return "Altered by Explict STPG";
 617	case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
 618		return "Altered by Implict ALUA";
 619	default:
 620		return "Unknown";
 621	}
 622
 623	return NULL;
 624}
 625
 626/*
 627 * Used by fabric modules to determine when we need to delay processing
 628 * for the Active/NonOptimized paths..
 629 */
 630int core_alua_check_nonop_delay(
 631	struct se_cmd *cmd)
 632{
 633	if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
 634		return 0;
 635	if (in_interrupt())
 636		return 0;
 637	/*
 638	 * The ALUA Active/NonOptimized access state delay can be disabled
 639	 * in via configfs with a value of zero
 640	 */
 641	if (!cmd->alua_nonop_delay)
 642		return 0;
 643	/*
 644	 * struct se_cmd->alua_nonop_delay gets set by a target port group
 645	 * defined interval in core_alua_state_nonoptimized()
 646	 */
 647	msleep_interruptible(cmd->alua_nonop_delay);
 648	return 0;
 649}
 650EXPORT_SYMBOL(core_alua_check_nonop_delay);
 651
 652/*
 653 * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
 654 *
 655 */
 656static int core_alua_write_tpg_metadata(
 657	const char *path,
 658	unsigned char *md_buf,
 659	u32 md_buf_len)
 660{
 661	mm_segment_t old_fs;
 662	struct file *file;
 663	struct iovec iov[1];
 664	int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
 665
 666	memset(iov, 0, sizeof(struct iovec));
 667
 668	file = filp_open(path, flags, 0600);
 669	if (IS_ERR(file) || !file || !file->f_dentry) {
 670		pr_err("filp_open(%s) for ALUA metadata failed\n",
 671			path);
 672		return -ENODEV;
 673	}
 674
 675	iov[0].iov_base = &md_buf[0];
 676	iov[0].iov_len = md_buf_len;
 677
 678	old_fs = get_fs();
 679	set_fs(get_ds());
 680	ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
 681	set_fs(old_fs);
 682
 683	if (ret < 0) {
 684		pr_err("Error writing ALUA metadata file: %s\n", path);
 685		filp_close(file, NULL);
 686		return -EIO;
 687	}
 688	filp_close(file, NULL);
 689
 690	return 0;
 691}
 692
 693/*
 694 * Called with tg_pt_gp->tg_pt_gp_md_mutex held
 695 */
 696static int core_alua_update_tpg_primary_metadata(
 697	struct t10_alua_tg_pt_gp *tg_pt_gp,
 698	int primary_state,
 699	unsigned char *md_buf)
 700{
 701	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
 702	struct t10_wwn *wwn = &su_dev->t10_wwn;
 703	char path[ALUA_METADATA_PATH_LEN];
 704	int len;
 705
 706	memset(path, 0, ALUA_METADATA_PATH_LEN);
 707
 708	len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
 
 
 
 
 
 
 709			"tg_pt_gp_id=%hu\n"
 710			"alua_access_state=0x%02x\n"
 711			"alua_access_status=0x%02x\n",
 712			tg_pt_gp->tg_pt_gp_id, primary_state,
 
 713			tg_pt_gp->tg_pt_gp_alua_access_status);
 714
 715	snprintf(path, ALUA_METADATA_PATH_LEN,
 716		"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
 717		config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
 718
 719	return core_alua_write_tpg_metadata(path, md_buf, len);
 
 
 
 
 
 720}
 721
 722static int core_alua_do_transition_tg_pt(
 723	struct t10_alua_tg_pt_gp *tg_pt_gp,
 724	struct se_port *l_port,
 725	struct se_node_acl *nacl,
 726	unsigned char *md_buf,
 727	int new_state,
 728	int explict)
 729{
 730	struct se_dev_entry *se_deve;
 
 731	struct se_lun_acl *lacl;
 732	struct se_port *port;
 733	struct t10_alua_tg_pt_gp_member *mem;
 734	int old_state = 0;
 735	/*
 736	 * Save the old primary ALUA access state, and set the current state
 737	 * to ALUA_ACCESS_STATE_TRANSITION.
 738	 */
 739	old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
 740	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
 741			ALUA_ACCESS_STATE_TRANSITION);
 742	tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
 743				ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
 744				ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
 745	/*
 746	 * Check for the optional ALUA primary state transition delay
 747	 */
 748	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
 749		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
 750
 751	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 752	list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
 753				tg_pt_gp_mem_list) {
 754		port = mem->tg_pt;
 755		/*
 756		 * After an implicit target port asymmetric access state
 757		 * change, a device server shall establish a unit attention
 758		 * condition for the initiator port associated with every I_T
 759		 * nexus with the additional sense code set to ASYMMETRIC
 760		 * ACCESS STATE CHAGED.
 761		 *
 762		 * After an explicit target port asymmetric access state
 763		 * change, a device server shall establish a unit attention
 764		 * condition with the additional sense code set to ASYMMETRIC
 765		 * ACCESS STATE CHANGED for the initiator port associated with
 766		 * every I_T nexus other than the I_T nexus on which the SET
 767		 * TARGET PORT GROUPS command
 768		 */
 769		atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
 770		smp_mb__after_atomic_inc();
 771		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 772
 773		spin_lock_bh(&port->sep_alua_lock);
 774		list_for_each_entry(se_deve, &port->sep_alua_list,
 775					alua_port_list) {
 776			lacl = se_deve->se_lun_acl;
 
 777			/*
 778			 * se_deve->se_lun_acl pointer may be NULL for a
 779			 * entry created without explict Node+MappedLUN ACLs
 
 
 
 
 
 
 780			 */
 781			if (!lacl)
 
 
 
 782				continue;
 783
 784			if (explict &&
 785			   (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
 786			   (l_port != NULL) && (l_port == port))
 
 
 
 787				continue;
 788
 789			core_scsi3_ua_allocate(lacl->se_lun_nacl,
 790				se_deve->mapped_lun, 0x2A,
 791				ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
 792		}
 793		spin_unlock_bh(&port->sep_alua_lock);
 794
 795		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 796		atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
 797		smp_mb__after_atomic_dec();
 798	}
 799	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 800	/*
 801	 * Update the ALUA metadata buf that has been allocated in
 802	 * core_alua_do_port_transition(), this metadata will be written
 803	 * to struct file.
 804	 *
 805	 * Note that there is the case where we do not want to update the
 806	 * metadata when the saved metadata is being parsed in userspace
 807	 * when setting the existing port access state and access status.
 808	 *
 809	 * Also note that the failure to write out the ALUA metadata to
 810	 * struct file does NOT affect the actual ALUA transition.
 811	 */
 812	if (tg_pt_gp->tg_pt_gp_write_metadata) {
 813		mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
 814		core_alua_update_tpg_primary_metadata(tg_pt_gp,
 815					new_state, md_buf);
 816		mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
 817	}
 818	/*
 819	 * Set the current primary ALUA access state to the requested new state
 820	 */
 821	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
 822
 823	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
 824		" from primary access state %s to %s\n", (explict) ? "explict" :
 825		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
 826		tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
 
 827		core_alua_dump_state(new_state));
 828
 
 
 
 829	return 0;
 830}
 831
 832int core_alua_do_port_transition(
 833	struct t10_alua_tg_pt_gp *l_tg_pt_gp,
 834	struct se_device *l_dev,
 835	struct se_port *l_port,
 836	struct se_node_acl *l_nacl,
 837	int new_state,
 838	int explict)
 839{
 840	struct se_device *dev;
 841	struct se_port *port;
 842	struct se_subsystem_dev *su_dev;
 843	struct se_node_acl *nacl;
 844	struct t10_alua_lu_gp *lu_gp;
 845	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
 846	struct t10_alua_tg_pt_gp *tg_pt_gp;
 847	unsigned char *md_buf;
 848	int primary;
 849
 850	if (core_alua_check_transition(new_state, &primary) != 0)
 851		return -EINVAL;
 852
 853	md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
 854	if (!md_buf) {
 855		pr_err("Unable to allocate buf for ALUA metadata\n");
 856		return -ENOMEM;
 857	}
 858
 859	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
 860	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
 861	lu_gp = local_lu_gp_mem->lu_gp;
 862	atomic_inc(&lu_gp->lu_gp_ref_cnt);
 863	smp_mb__after_atomic_inc();
 864	spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
 865	/*
 866	 * For storage objects that are members of the 'default_lu_gp',
 867	 * we only do transition on the passed *l_tp_pt_gp, and not
 868	 * on all of the matching target port groups IDs in default_lu_gp.
 869	 */
 870	if (!lu_gp->lu_gp_id) {
 871		/*
 872		 * core_alua_do_transition_tg_pt() will always return
 873		 * success.
 874		 */
 875		core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
 876					md_buf, new_state, explict);
 877		atomic_dec(&lu_gp->lu_gp_ref_cnt);
 878		smp_mb__after_atomic_dec();
 879		kfree(md_buf);
 880		return 0;
 881	}
 882	/*
 883	 * For all other LU groups aside from 'default_lu_gp', walk all of
 884	 * the associated storage objects looking for a matching target port
 885	 * group ID from the local target port group.
 886	 */
 887	spin_lock(&lu_gp->lu_gp_lock);
 888	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
 889				lu_gp_mem_list) {
 890
 891		dev = lu_gp_mem->lu_gp_mem_dev;
 892		su_dev = dev->se_sub_dev;
 893		atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
 894		smp_mb__after_atomic_inc();
 895		spin_unlock(&lu_gp->lu_gp_lock);
 896
 897		spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
 898		list_for_each_entry(tg_pt_gp,
 899				&su_dev->t10_alua.tg_pt_gps_list,
 900				tg_pt_gp_list) {
 901
 902			if (!tg_pt_gp->tg_pt_gp_valid_id)
 903				continue;
 904			/*
 905			 * If the target behavior port asymmetric access state
 906			 * is changed for any target port group accessiable via
 907			 * a logical unit within a LU group, the target port
 908			 * behavior group asymmetric access states for the same
 909			 * target port group accessible via other logical units
 910			 * in that LU group will also change.
 911			 */
 912			if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
 913				continue;
 914
 915			if (l_tg_pt_gp == tg_pt_gp) {
 916				port = l_port;
 917				nacl = l_nacl;
 918			} else {
 919				port = NULL;
 920				nacl = NULL;
 921			}
 922			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
 923			smp_mb__after_atomic_inc();
 924			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
 925			/*
 926			 * core_alua_do_transition_tg_pt() will always return
 927			 * success.
 928			 */
 929			core_alua_do_transition_tg_pt(tg_pt_gp, port,
 930					nacl, md_buf, new_state, explict);
 931
 932			spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
 933			atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
 934			smp_mb__after_atomic_dec();
 
 935		}
 936		spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
 937
 938		spin_lock(&lu_gp->lu_gp_lock);
 939		atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
 940		smp_mb__after_atomic_dec();
 941	}
 942	spin_unlock(&lu_gp->lu_gp_lock);
 943
 944	pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
 945		" Group IDs: %hu %s transition to primary state: %s\n",
 946		config_item_name(&lu_gp->lu_gp_group.cg_item),
 947		l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
 948		core_alua_dump_state(new_state));
 
 
 
 949
 950	atomic_dec(&lu_gp->lu_gp_ref_cnt);
 951	smp_mb__after_atomic_dec();
 952	kfree(md_buf);
 953	return 0;
 954}
 955
 956/*
 957 * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
 958 */
 959static int core_alua_update_tpg_secondary_metadata(
 960	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
 961	struct se_port *port,
 962	unsigned char *md_buf,
 963	u32 md_buf_len)
 964{
 965	struct se_portal_group *se_tpg = port->sep_tpg;
 966	char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
 967	int len;
 968
 969	memset(path, 0, ALUA_METADATA_PATH_LEN);
 970	memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
 971
 972	len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
 973			se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
 974
 975	if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
 976		snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
 977				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
 
 
 
 978
 979	len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
 980			"alua_tg_pt_status=0x%02x\n",
 981			atomic_read(&port->sep_tg_pt_secondary_offline),
 982			port->sep_tg_pt_secondary_stat);
 983
 984	snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
 985			se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
 986			port->sep_lun->unpacked_lun);
 
 
 
 
 
 
 
 
 
 
 
 
 
 987
 988	return core_alua_write_tpg_metadata(path, md_buf, len);
 
 
 
 
 
 
 989}
 990
 991static int core_alua_set_tg_pt_secondary_state(
 992	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
 993	struct se_port *port,
 994	int explict,
 995	int offline)
 996{
 997	struct t10_alua_tg_pt_gp *tg_pt_gp;
 998	unsigned char *md_buf;
 999	u32 md_buf_len;
1000	int trans_delay_msecs;
1001
1002	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1003	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1004	if (!tg_pt_gp) {
1005		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1006		pr_err("Unable to complete secondary state"
1007				" transition\n");
1008		return -EINVAL;
1009	}
1010	trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1011	/*
1012	 * Set the secondary ALUA target port access state to OFFLINE
1013	 * or release the previously secondary state for struct se_port
1014	 */
1015	if (offline)
1016		atomic_set(&port->sep_tg_pt_secondary_offline, 1);
1017	else
1018		atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1019
1020	md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
1021	port->sep_tg_pt_secondary_stat = (explict) ?
1022			ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
1023			ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
1024
1025	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1026		" to secondary access state: %s\n", (explict) ? "explict" :
1027		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1028		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1029
1030	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1031	/*
1032	 * Do the optional transition delay after we set the secondary
1033	 * ALUA access state.
1034	 */
1035	if (trans_delay_msecs != 0)
1036		msleep_interruptible(trans_delay_msecs);
1037	/*
1038	 * See if we need to update the ALUA fabric port metadata for
1039	 * secondary state and status
1040	 */
1041	if (port->sep_tg_pt_secondary_write_md) {
1042		md_buf = kzalloc(md_buf_len, GFP_KERNEL);
1043		if (!md_buf) {
1044			pr_err("Unable to allocate md_buf for"
1045				" secondary ALUA access metadata\n");
1046			return -ENOMEM;
1047		}
1048		mutex_lock(&port->sep_tg_pt_md_mutex);
1049		core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
1050				md_buf, md_buf_len);
1051		mutex_unlock(&port->sep_tg_pt_md_mutex);
1052
1053		kfree(md_buf);
 
 
 
 
 
 
 
 
 
1054	}
 
 
 
 
 
 
 
1055
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1056	return 0;
1057}
1058
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1059struct t10_alua_lu_gp *
1060core_alua_allocate_lu_gp(const char *name, int def_group)
1061{
1062	struct t10_alua_lu_gp *lu_gp;
1063
1064	lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1065	if (!lu_gp) {
1066		pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1067		return ERR_PTR(-ENOMEM);
1068	}
1069	INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1070	INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1071	spin_lock_init(&lu_gp->lu_gp_lock);
1072	atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1073
1074	if (def_group) {
1075		lu_gp->lu_gp_id = alua_lu_gps_counter++;
1076		lu_gp->lu_gp_valid_id = 1;
1077		alua_lu_gps_count++;
1078	}
1079
1080	return lu_gp;
1081}
1082
1083int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1084{
1085	struct t10_alua_lu_gp *lu_gp_tmp;
1086	u16 lu_gp_id_tmp;
1087	/*
1088	 * The lu_gp->lu_gp_id may only be set once..
1089	 */
1090	if (lu_gp->lu_gp_valid_id) {
1091		pr_warn("ALUA LU Group already has a valid ID,"
1092			" ignoring request\n");
1093		return -EINVAL;
1094	}
1095
1096	spin_lock(&lu_gps_lock);
1097	if (alua_lu_gps_count == 0x0000ffff) {
1098		pr_err("Maximum ALUA alua_lu_gps_count:"
1099				" 0x0000ffff reached\n");
1100		spin_unlock(&lu_gps_lock);
1101		kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1102		return -ENOSPC;
1103	}
1104again:
1105	lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1106				alua_lu_gps_counter++;
1107
1108	list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1109		if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1110			if (!lu_gp_id)
1111				goto again;
1112
1113			pr_warn("ALUA Logical Unit Group ID: %hu"
1114				" already exists, ignoring request\n",
1115				lu_gp_id);
1116			spin_unlock(&lu_gps_lock);
1117			return -EINVAL;
1118		}
1119	}
1120
1121	lu_gp->lu_gp_id = lu_gp_id_tmp;
1122	lu_gp->lu_gp_valid_id = 1;
1123	list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1124	alua_lu_gps_count++;
1125	spin_unlock(&lu_gps_lock);
1126
1127	return 0;
1128}
1129
1130static struct t10_alua_lu_gp_member *
1131core_alua_allocate_lu_gp_mem(struct se_device *dev)
1132{
1133	struct t10_alua_lu_gp_member *lu_gp_mem;
1134
1135	lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1136	if (!lu_gp_mem) {
1137		pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1138		return ERR_PTR(-ENOMEM);
1139	}
1140	INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1141	spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1142	atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1143
1144	lu_gp_mem->lu_gp_mem_dev = dev;
1145	dev->dev_alua_lu_gp_mem = lu_gp_mem;
1146
1147	return lu_gp_mem;
1148}
1149
1150void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1151{
1152	struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1153	/*
1154	 * Once we have reached this point, config_item_put() has
1155	 * already been called from target_core_alua_drop_lu_gp().
1156	 *
1157	 * Here, we remove the *lu_gp from the global list so that
1158	 * no associations can be made while we are releasing
1159	 * struct t10_alua_lu_gp.
1160	 */
1161	spin_lock(&lu_gps_lock);
1162	atomic_set(&lu_gp->lu_gp_shutdown, 1);
1163	list_del(&lu_gp->lu_gp_node);
1164	alua_lu_gps_count--;
1165	spin_unlock(&lu_gps_lock);
1166	/*
1167	 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1168	 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1169	 * released with core_alua_put_lu_gp_from_name()
1170	 */
1171	while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1172		cpu_relax();
1173	/*
1174	 * Release reference to struct t10_alua_lu_gp * from all associated
1175	 * struct se_device.
1176	 */
1177	spin_lock(&lu_gp->lu_gp_lock);
1178	list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1179				&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1180		if (lu_gp_mem->lu_gp_assoc) {
1181			list_del(&lu_gp_mem->lu_gp_mem_list);
1182			lu_gp->lu_gp_members--;
1183			lu_gp_mem->lu_gp_assoc = 0;
1184		}
1185		spin_unlock(&lu_gp->lu_gp_lock);
1186		/*
1187		 *
1188		 * lu_gp_mem is associated with a single
1189		 * struct se_device->dev_alua_lu_gp_mem, and is released when
1190		 * struct se_device is released via core_alua_free_lu_gp_mem().
1191		 *
1192		 * If the passed lu_gp does NOT match the default_lu_gp, assume
1193		 * we want to re-assocate a given lu_gp_mem with default_lu_gp.
1194		 */
1195		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1196		if (lu_gp != default_lu_gp)
1197			__core_alua_attach_lu_gp_mem(lu_gp_mem,
1198					default_lu_gp);
1199		else
1200			lu_gp_mem->lu_gp = NULL;
1201		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1202
1203		spin_lock(&lu_gp->lu_gp_lock);
1204	}
1205	spin_unlock(&lu_gp->lu_gp_lock);
1206
1207	kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1208}
1209
1210void core_alua_free_lu_gp_mem(struct se_device *dev)
1211{
1212	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1213	struct t10_alua *alua = &su_dev->t10_alua;
1214	struct t10_alua_lu_gp *lu_gp;
1215	struct t10_alua_lu_gp_member *lu_gp_mem;
1216
1217	if (alua->alua_type != SPC3_ALUA_EMULATED)
1218		return;
1219
1220	lu_gp_mem = dev->dev_alua_lu_gp_mem;
1221	if (!lu_gp_mem)
1222		return;
1223
1224	while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1225		cpu_relax();
1226
1227	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1228	lu_gp = lu_gp_mem->lu_gp;
1229	if (lu_gp) {
1230		spin_lock(&lu_gp->lu_gp_lock);
1231		if (lu_gp_mem->lu_gp_assoc) {
1232			list_del(&lu_gp_mem->lu_gp_mem_list);
1233			lu_gp->lu_gp_members--;
1234			lu_gp_mem->lu_gp_assoc = 0;
1235		}
1236		spin_unlock(&lu_gp->lu_gp_lock);
1237		lu_gp_mem->lu_gp = NULL;
1238	}
1239	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1240
1241	kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1242}
1243
1244struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1245{
1246	struct t10_alua_lu_gp *lu_gp;
1247	struct config_item *ci;
1248
1249	spin_lock(&lu_gps_lock);
1250	list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1251		if (!lu_gp->lu_gp_valid_id)
1252			continue;
1253		ci = &lu_gp->lu_gp_group.cg_item;
1254		if (!strcmp(config_item_name(ci), name)) {
1255			atomic_inc(&lu_gp->lu_gp_ref_cnt);
1256			spin_unlock(&lu_gps_lock);
1257			return lu_gp;
1258		}
1259	}
1260	spin_unlock(&lu_gps_lock);
1261
1262	return NULL;
1263}
1264
1265void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1266{
1267	spin_lock(&lu_gps_lock);
1268	atomic_dec(&lu_gp->lu_gp_ref_cnt);
1269	spin_unlock(&lu_gps_lock);
1270}
1271
1272/*
1273 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1274 */
1275void __core_alua_attach_lu_gp_mem(
1276	struct t10_alua_lu_gp_member *lu_gp_mem,
1277	struct t10_alua_lu_gp *lu_gp)
1278{
1279	spin_lock(&lu_gp->lu_gp_lock);
1280	lu_gp_mem->lu_gp = lu_gp;
1281	lu_gp_mem->lu_gp_assoc = 1;
1282	list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1283	lu_gp->lu_gp_members++;
1284	spin_unlock(&lu_gp->lu_gp_lock);
1285}
1286
1287/*
1288 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1289 */
1290void __core_alua_drop_lu_gp_mem(
1291	struct t10_alua_lu_gp_member *lu_gp_mem,
1292	struct t10_alua_lu_gp *lu_gp)
1293{
1294	spin_lock(&lu_gp->lu_gp_lock);
1295	list_del(&lu_gp_mem->lu_gp_mem_list);
1296	lu_gp_mem->lu_gp = NULL;
1297	lu_gp_mem->lu_gp_assoc = 0;
1298	lu_gp->lu_gp_members--;
1299	spin_unlock(&lu_gp->lu_gp_lock);
1300}
1301
1302struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
1303	struct se_subsystem_dev *su_dev,
1304	const char *name,
1305	int def_group)
1306{
1307	struct t10_alua_tg_pt_gp *tg_pt_gp;
1308
1309	tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1310	if (!tg_pt_gp) {
1311		pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1312		return NULL;
1313	}
1314	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1315	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1316	mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1317	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1318	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1319	tg_pt_gp->tg_pt_gp_su_dev = su_dev;
1320	tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
1321	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1322		ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
1323	/*
1324	 * Enable both explict and implict ALUA support by default
1325	 */
1326	tg_pt_gp->tg_pt_gp_alua_access_type =
1327			TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
1328	/*
1329	 * Set the default Active/NonOptimized Delay in milliseconds
1330	 */
1331	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1332	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
 
 
 
 
 
 
 
 
1333
1334	if (def_group) {
1335		spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1336		tg_pt_gp->tg_pt_gp_id =
1337				su_dev->t10_alua.alua_tg_pt_gps_counter++;
1338		tg_pt_gp->tg_pt_gp_valid_id = 1;
1339		su_dev->t10_alua.alua_tg_pt_gps_count++;
1340		list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1341			      &su_dev->t10_alua.tg_pt_gps_list);
1342		spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1343	}
1344
1345	return tg_pt_gp;
1346}
1347
1348int core_alua_set_tg_pt_gp_id(
1349	struct t10_alua_tg_pt_gp *tg_pt_gp,
1350	u16 tg_pt_gp_id)
1351{
1352	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1353	struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1354	u16 tg_pt_gp_id_tmp;
 
1355	/*
1356	 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1357	 */
1358	if (tg_pt_gp->tg_pt_gp_valid_id) {
1359		pr_warn("ALUA TG PT Group already has a valid ID,"
1360			" ignoring request\n");
1361		return -EINVAL;
1362	}
1363
1364	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1365	if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1366		pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1367			" 0x0000ffff reached\n");
1368		spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1369		kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1370		return -ENOSPC;
1371	}
1372again:
1373	tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1374			su_dev->t10_alua.alua_tg_pt_gps_counter++;
1375
1376	list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
1377			tg_pt_gp_list) {
1378		if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1379			if (!tg_pt_gp_id)
1380				goto again;
1381
1382			pr_err("ALUA Target Port Group ID: %hu already"
1383				" exists, ignoring request\n", tg_pt_gp_id);
1384			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1385			return -EINVAL;
1386		}
1387	}
1388
1389	tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1390	tg_pt_gp->tg_pt_gp_valid_id = 1;
1391	list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1392			&su_dev->t10_alua.tg_pt_gps_list);
1393	su_dev->t10_alua.alua_tg_pt_gps_count++;
1394	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1395
1396	return 0;
1397}
1398
1399struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1400	struct se_port *port)
1401{
1402	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1403
1404	tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1405				GFP_KERNEL);
1406	if (!tg_pt_gp_mem) {
1407		pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1408		return ERR_PTR(-ENOMEM);
1409	}
1410	INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1411	spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1412	atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1413
1414	tg_pt_gp_mem->tg_pt = port;
1415	port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1416	atomic_set(&port->sep_tg_pt_gp_active, 1);
1417
1418	return tg_pt_gp_mem;
1419}
1420
1421void core_alua_free_tg_pt_gp(
1422	struct t10_alua_tg_pt_gp *tg_pt_gp)
1423{
1424	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1425	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
 
1426	/*
1427	 * Once we have reached this point, config_item_put() has already
1428	 * been called from target_core_alua_drop_tg_pt_gp().
1429	 *
1430	 * Here we remove *tg_pt_gp from the global list so that
1431	 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
1432	 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1433	 */
1434	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1435	list_del(&tg_pt_gp->tg_pt_gp_list);
1436	su_dev->t10_alua.alua_tg_pt_gps_counter--;
1437	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
 
 
 
1438	/*
1439	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1440	 * core_alua_get_tg_pt_gp_by_name() in
1441	 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1442	 * to be released with core_alua_put_tg_pt_gp_from_name().
1443	 */
1444	while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1445		cpu_relax();
 
1446	/*
1447	 * Release reference to struct t10_alua_tg_pt_gp from all associated
1448	 * struct se_port.
1449	 */
1450	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1451	list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1452			&tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1453		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1454			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1455			tg_pt_gp->tg_pt_gp_members--;
1456			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1457		}
1458		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1459		/*
1460		 * tg_pt_gp_mem is associated with a single
1461		 * se_port->sep_alua_tg_pt_gp_mem, and is released via
1462		 * core_alua_free_tg_pt_gp_mem().
1463		 *
1464		 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1465		 * assume we want to re-assocate a given tg_pt_gp_mem with
1466		 * default_tg_pt_gp.
1467		 */
1468		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1469		if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
1470			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1471					su_dev->t10_alua.default_tg_pt_gp);
1472		} else
1473			tg_pt_gp_mem->tg_pt_gp = NULL;
1474		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1475
1476		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1477	}
1478	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1479
1480	kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1481}
1482
1483void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1484{
1485	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1486	struct t10_alua *alua = &su_dev->t10_alua;
1487	struct t10_alua_tg_pt_gp *tg_pt_gp;
1488	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1489
1490	if (alua->alua_type != SPC3_ALUA_EMULATED)
1491		return;
1492
1493	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1494	if (!tg_pt_gp_mem)
1495		return;
1496
1497	while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1498		cpu_relax();
1499
1500	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1501	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1502	if (tg_pt_gp) {
1503		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1504		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1505			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1506			tg_pt_gp->tg_pt_gp_members--;
1507			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1508		}
1509		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1510		tg_pt_gp_mem->tg_pt_gp = NULL;
1511	}
1512	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1513
1514	kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1515}
1516
1517static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1518	struct se_subsystem_dev *su_dev,
1519	const char *name)
1520{
1521	struct t10_alua_tg_pt_gp *tg_pt_gp;
1522	struct config_item *ci;
1523
1524	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1525	list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
1526			tg_pt_gp_list) {
1527		if (!tg_pt_gp->tg_pt_gp_valid_id)
1528			continue;
1529		ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1530		if (!strcmp(config_item_name(ci), name)) {
1531			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1532			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1533			return tg_pt_gp;
1534		}
1535	}
1536	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1537
1538	return NULL;
1539}
1540
1541static void core_alua_put_tg_pt_gp_from_name(
1542	struct t10_alua_tg_pt_gp *tg_pt_gp)
1543{
1544	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1545
1546	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1547	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1548	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1549}
1550
1551/*
1552 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1553 */
1554void __core_alua_attach_tg_pt_gp_mem(
1555	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1556	struct t10_alua_tg_pt_gp *tg_pt_gp)
1557{
 
 
 
 
1558	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1559	tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1560	tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1561	list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1562			&tg_pt_gp->tg_pt_gp_mem_list);
1563	tg_pt_gp->tg_pt_gp_members++;
 
 
 
 
 
1564	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1565}
1566
1567/*
1568 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1569 */
1570static void __core_alua_drop_tg_pt_gp_mem(
1571	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1572	struct t10_alua_tg_pt_gp *tg_pt_gp)
1573{
 
 
 
 
 
 
 
 
 
 
1574	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1575	list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1576	tg_pt_gp_mem->tg_pt_gp = NULL;
1577	tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1578	tg_pt_gp->tg_pt_gp_members--;
1579	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 
 
1580}
1581
1582ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1583{
1584	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1585	struct config_item *tg_pt_ci;
1586	struct t10_alua *alua = &su_dev->t10_alua;
1587	struct t10_alua_tg_pt_gp *tg_pt_gp;
1588	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1589	ssize_t len = 0;
1590
1591	if (alua->alua_type != SPC3_ALUA_EMULATED)
1592		return len;
 
 
 
 
1593
1594	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1595	if (!tg_pt_gp_mem)
1596		return len;
 
 
1597
1598	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1599	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1600	if (tg_pt_gp) {
1601		tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1602		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1603			" %hu\nTG Port Primary Access State: %s\nTG Port "
1604			"Primary Access Status: %s\nTG Port Secondary Access"
1605			" State: %s\nTG Port Secondary Access Status: %s\n",
1606			config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1607			core_alua_dump_state(atomic_read(
1608					&tg_pt_gp->tg_pt_gp_alua_access_state)),
1609			core_alua_dump_status(
1610				tg_pt_gp->tg_pt_gp_alua_access_status),
1611			(atomic_read(&port->sep_tg_pt_secondary_offline)) ?
1612			"Offline" : "None",
1613			core_alua_dump_status(port->sep_tg_pt_secondary_stat));
1614	}
1615	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1616
1617	return len;
1618}
1619
1620ssize_t core_alua_store_tg_pt_gp_info(
1621	struct se_port *port,
1622	const char *page,
1623	size_t count)
1624{
1625	struct se_portal_group *tpg;
1626	struct se_lun *lun;
1627	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
 
 
 
1628	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1629	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1630	unsigned char buf[TG_PT_GROUP_NAME_BUF];
1631	int move = 0;
1632
1633	tpg = port->sep_tpg;
1634	lun = port->sep_lun;
1635
1636	if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1637		pr_warn("SPC3_ALUA_EMULATED not enabled for"
1638			" %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1639			tpg->se_tpg_tfo->tpg_get_tag(tpg),
1640			config_item_name(&lun->lun_group.cg_item));
1641		return -EINVAL;
1642	}
1643
1644	if (count > TG_PT_GROUP_NAME_BUF) {
1645		pr_err("ALUA Target Port Group alias too large!\n");
1646		return -EINVAL;
1647	}
1648	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1649	memcpy(buf, page, count);
1650	/*
1651	 * Any ALUA target port group alias besides "NULL" means we will be
1652	 * making a new group association.
1653	 */
1654	if (strcmp(strstrip(buf), "NULL")) {
1655		/*
1656		 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1657		 * struct t10_alua_tg_pt_gp.  This reference is released with
1658		 * core_alua_put_tg_pt_gp_from_name() below.
1659		 */
1660		tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
1661					strstrip(buf));
1662		if (!tg_pt_gp_new)
1663			return -ENODEV;
1664	}
1665	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1666	if (!tg_pt_gp_mem) {
1667		if (tg_pt_gp_new)
1668			core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1669		pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
1670		return -EINVAL;
1671	}
1672
1673	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1674	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1675	if (tg_pt_gp) {
1676		/*
1677		 * Clearing an existing tg_pt_gp association, and replacing
1678		 * with the default_tg_pt_gp.
1679		 */
1680		if (!tg_pt_gp_new) {
1681			pr_debug("Target_Core_ConfigFS: Moving"
1682				" %s/tpgt_%hu/%s from ALUA Target Port Group:"
1683				" alua/%s, ID: %hu back to"
1684				" default_tg_pt_gp\n",
1685				tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1686				tpg->se_tpg_tfo->tpg_get_tag(tpg),
1687				config_item_name(&lun->lun_group.cg_item),
1688				config_item_name(
1689					&tg_pt_gp->tg_pt_gp_group.cg_item),
1690				tg_pt_gp->tg_pt_gp_id);
1691
1692			__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1693			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1694					su_dev->t10_alua.default_tg_pt_gp);
1695			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1696
1697			return count;
1698		}
1699		/*
1700		 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
1701		 */
1702		__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1703		move = 1;
1704	}
1705	/*
1706	 * Associate tg_pt_gp_mem with tg_pt_gp_new.
1707	 */
1708	__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
1709	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1710	pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1711		" Target Port Group: alua/%s, ID: %hu\n", (move) ?
1712		"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1713		tpg->se_tpg_tfo->tpg_get_tag(tpg),
1714		config_item_name(&lun->lun_group.cg_item),
1715		config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1716		tg_pt_gp_new->tg_pt_gp_id);
1717
1718	core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1719	return count;
1720}
1721
1722ssize_t core_alua_show_access_type(
1723	struct t10_alua_tg_pt_gp *tg_pt_gp,
1724	char *page)
1725{
1726	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
1727	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
1728		return sprintf(page, "Implict and Explict\n");
1729	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
1730		return sprintf(page, "Implict\n");
1731	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
1732		return sprintf(page, "Explict\n");
1733	else
1734		return sprintf(page, "None\n");
1735}
1736
1737ssize_t core_alua_store_access_type(
1738	struct t10_alua_tg_pt_gp *tg_pt_gp,
1739	const char *page,
1740	size_t count)
1741{
1742	unsigned long tmp;
1743	int ret;
1744
1745	ret = strict_strtoul(page, 0, &tmp);
1746	if (ret < 0) {
1747		pr_err("Unable to extract alua_access_type\n");
1748		return -EINVAL;
1749	}
1750	if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
1751		pr_err("Illegal value for alua_access_type:"
1752				" %lu\n", tmp);
1753		return -EINVAL;
1754	}
1755	if (tmp == 3)
1756		tg_pt_gp->tg_pt_gp_alua_access_type =
1757			TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
1758	else if (tmp == 2)
1759		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
1760	else if (tmp == 1)
1761		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
1762	else
1763		tg_pt_gp->tg_pt_gp_alua_access_type = 0;
1764
1765	return count;
1766}
1767
1768ssize_t core_alua_show_nonop_delay_msecs(
1769	struct t10_alua_tg_pt_gp *tg_pt_gp,
1770	char *page)
1771{
1772	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
1773}
1774
1775ssize_t core_alua_store_nonop_delay_msecs(
1776	struct t10_alua_tg_pt_gp *tg_pt_gp,
1777	const char *page,
1778	size_t count)
1779{
1780	unsigned long tmp;
1781	int ret;
1782
1783	ret = strict_strtoul(page, 0, &tmp);
1784	if (ret < 0) {
1785		pr_err("Unable to extract nonop_delay_msecs\n");
1786		return -EINVAL;
1787	}
1788	if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
1789		pr_err("Passed nonop_delay_msecs: %lu, exceeds"
1790			" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
1791			ALUA_MAX_NONOP_DELAY_MSECS);
1792		return -EINVAL;
1793	}
1794	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
1795
1796	return count;
1797}
1798
1799ssize_t core_alua_show_trans_delay_msecs(
1800	struct t10_alua_tg_pt_gp *tg_pt_gp,
1801	char *page)
1802{
1803	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1804}
1805
1806ssize_t core_alua_store_trans_delay_msecs(
1807	struct t10_alua_tg_pt_gp *tg_pt_gp,
1808	const char *page,
1809	size_t count)
1810{
1811	unsigned long tmp;
1812	int ret;
1813
1814	ret = strict_strtoul(page, 0, &tmp);
1815	if (ret < 0) {
1816		pr_err("Unable to extract trans_delay_msecs\n");
1817		return -EINVAL;
1818	}
1819	if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
1820		pr_err("Passed trans_delay_msecs: %lu, exceeds"
1821			" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
1822			ALUA_MAX_TRANS_DELAY_MSECS);
1823		return -EINVAL;
1824	}
1825	tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
1826
1827	return count;
1828}
1829
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1830ssize_t core_alua_show_preferred_bit(
1831	struct t10_alua_tg_pt_gp *tg_pt_gp,
1832	char *page)
1833{
1834	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
1835}
1836
1837ssize_t core_alua_store_preferred_bit(
1838	struct t10_alua_tg_pt_gp *tg_pt_gp,
1839	const char *page,
1840	size_t count)
1841{
1842	unsigned long tmp;
1843	int ret;
1844
1845	ret = strict_strtoul(page, 0, &tmp);
1846	if (ret < 0) {
1847		pr_err("Unable to extract preferred ALUA value\n");
1848		return -EINVAL;
1849	}
1850	if ((tmp != 0) && (tmp != 1)) {
1851		pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
1852		return -EINVAL;
1853	}
1854	tg_pt_gp->tg_pt_gp_pref = (int)tmp;
1855
1856	return count;
1857}
1858
1859ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
1860{
1861	if (!lun->lun_sep)
1862		return -ENODEV;
1863
1864	return sprintf(page, "%d\n",
1865		atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
1866}
1867
1868ssize_t core_alua_store_offline_bit(
1869	struct se_lun *lun,
1870	const char *page,
1871	size_t count)
1872{
1873	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
 
 
 
 
1874	unsigned long tmp;
1875	int ret;
1876
1877	if (!lun->lun_sep)
 
1878		return -ENODEV;
1879
1880	ret = strict_strtoul(page, 0, &tmp);
1881	if (ret < 0) {
1882		pr_err("Unable to extract alua_tg_pt_offline value\n");
1883		return -EINVAL;
1884	}
1885	if ((tmp != 0) && (tmp != 1)) {
1886		pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
1887				tmp);
1888		return -EINVAL;
1889	}
1890	tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
1891	if (!tg_pt_gp_mem) {
1892		pr_err("Unable to locate *tg_pt_gp_mem\n");
1893		return -EINVAL;
1894	}
1895
1896	ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
1897			lun->lun_sep, 0, (int)tmp);
1898	if (ret < 0)
1899		return -EINVAL;
1900
1901	return count;
1902}
1903
1904ssize_t core_alua_show_secondary_status(
1905	struct se_lun *lun,
1906	char *page)
1907{
1908	return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
1909}
1910
1911ssize_t core_alua_store_secondary_status(
1912	struct se_lun *lun,
1913	const char *page,
1914	size_t count)
1915{
1916	unsigned long tmp;
1917	int ret;
1918
1919	ret = strict_strtoul(page, 0, &tmp);
1920	if (ret < 0) {
1921		pr_err("Unable to extract alua_tg_pt_status\n");
1922		return -EINVAL;
1923	}
1924	if ((tmp != ALUA_STATUS_NONE) &&
1925	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
1926	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
1927		pr_err("Illegal value for alua_tg_pt_status: %lu\n",
1928				tmp);
1929		return -EINVAL;
1930	}
1931	lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
1932
1933	return count;
1934}
1935
1936ssize_t core_alua_show_secondary_write_metadata(
1937	struct se_lun *lun,
1938	char *page)
1939{
1940	return sprintf(page, "%d\n",
1941			lun->lun_sep->sep_tg_pt_secondary_write_md);
1942}
1943
1944ssize_t core_alua_store_secondary_write_metadata(
1945	struct se_lun *lun,
1946	const char *page,
1947	size_t count)
1948{
1949	unsigned long tmp;
1950	int ret;
1951
1952	ret = strict_strtoul(page, 0, &tmp);
1953	if (ret < 0) {
1954		pr_err("Unable to extract alua_tg_pt_write_md\n");
1955		return -EINVAL;
1956	}
1957	if ((tmp != 0) && (tmp != 1)) {
1958		pr_err("Illegal value for alua_tg_pt_write_md:"
1959				" %lu\n", tmp);
1960		return -EINVAL;
1961	}
1962	lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
1963
1964	return count;
1965}
1966
1967int core_setup_alua(struct se_device *dev, int force_pt)
1968{
1969	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1970	struct t10_alua *alua = &su_dev->t10_alua;
1971	struct t10_alua_lu_gp_member *lu_gp_mem;
1972	/*
1973	 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
1974	 * of the Underlying SCSI hardware.  In Linux/SCSI terms, this can
1975	 * cause a problem because libata and some SATA RAID HBAs appear
1976	 * under Linux/SCSI, but emulate SCSI logic themselves.
1977	 */
1978	if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
1979	    !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
1980		alua->alua_type = SPC_ALUA_PASSTHROUGH;
1981		alua->alua_state_check = &core_alua_state_check_nop;
1982		pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
1983			" emulation\n", dev->transport->name);
1984		return 0;
1985	}
1986	/*
1987	 * If SPC-3 or above is reported by real or emulated struct se_device,
1988	 * use emulated ALUA.
1989	 */
1990	if (dev->transport->get_device_rev(dev) >= SCSI_3) {
1991		pr_debug("%s: Enabling ALUA Emulation for SPC-3"
1992			" device\n", dev->transport->name);
1993		/*
1994		 * Associate this struct se_device with the default ALUA
1995		 * LUN Group.
1996		 */
1997		lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
1998		if (IS_ERR(lu_gp_mem))
1999			return PTR_ERR(lu_gp_mem);
2000
2001		alua->alua_type = SPC3_ALUA_EMULATED;
2002		alua->alua_state_check = &core_alua_state_check;
2003		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2004		__core_alua_attach_lu_gp_mem(lu_gp_mem,
2005				default_lu_gp);
2006		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2007
2008		pr_debug("%s: Adding to default ALUA LU Group:"
2009			" core/alua/lu_gps/default_lu_gp\n",
2010			dev->transport->name);
2011	} else {
2012		alua->alua_type = SPC2_ALUA_DISABLED;
2013		alua->alua_state_check = &core_alua_state_check_nop;
2014		pr_debug("%s: Disabling ALUA Emulation for SPC-2"
2015			" device\n", dev->transport->name);
2016	}
2017
2018	return 0;
2019}
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename:  target_core_alua.c
   4 *
   5 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
   6 *
   7 * (c) Copyright 2009-2013 Datera, Inc.
 
   8 *
   9 * Nicholas A. Bellinger <nab@kernel.org>
  10 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  11 ******************************************************************************/
  12
 
  13#include <linux/slab.h>
  14#include <linux/spinlock.h>
  15#include <linux/configfs.h>
  16#include <linux/delay.h>
  17#include <linux/export.h>
  18#include <linux/fcntl.h>
  19#include <linux/file.h>
  20#include <linux/fs.h>
  21#include <scsi/scsi_proto.h>
  22#include <asm/unaligned.h>
  23
  24#include <target/target_core_base.h>
  25#include <target/target_core_backend.h>
  26#include <target/target_core_fabric.h>
 
 
  27
  28#include "target_core_internal.h"
  29#include "target_core_alua.h"
 
  30#include "target_core_ua.h"
  31
  32static sense_reason_t core_alua_check_transition(int state, int valid,
  33						 int *primary, int explicit);
  34static int core_alua_set_tg_pt_secondary_state(
  35		struct se_lun *lun, int explicit, int offline);
  36
  37static char *core_alua_dump_state(int state);
  38
  39static void __target_attach_tg_pt_gp(struct se_lun *lun,
  40		struct t10_alua_tg_pt_gp *tg_pt_gp);
  41
  42static u16 alua_lu_gps_counter;
  43static u32 alua_lu_gps_count;
  44
  45static DEFINE_SPINLOCK(lu_gps_lock);
  46static LIST_HEAD(lu_gps_list);
  47
  48struct t10_alua_lu_gp *default_lu_gp;
  49
  50/*
  51 * REPORT REFERRALS
  52 *
  53 * See sbc3r35 section 5.23
  54 */
  55sense_reason_t
  56target_emulate_report_referrals(struct se_cmd *cmd)
  57{
  58	struct se_device *dev = cmd->se_dev;
  59	struct t10_alua_lba_map *map;
  60	struct t10_alua_lba_map_member *map_mem;
  61	unsigned char *buf;
  62	u32 rd_len = 0, off;
  63
  64	if (cmd->data_length < 4) {
  65		pr_warn("REPORT REFERRALS allocation length %u too"
  66			" small\n", cmd->data_length);
  67		return TCM_INVALID_CDB_FIELD;
  68	}
  69
  70	buf = transport_kmap_data_sg(cmd);
  71	if (!buf)
  72		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  73
  74	off = 4;
  75	spin_lock(&dev->t10_alua.lba_map_lock);
  76	if (list_empty(&dev->t10_alua.lba_map_list)) {
  77		spin_unlock(&dev->t10_alua.lba_map_lock);
  78		transport_kunmap_data_sg(cmd);
  79
  80		return TCM_UNSUPPORTED_SCSI_OPCODE;
  81	}
  82
  83	list_for_each_entry(map, &dev->t10_alua.lba_map_list,
  84			    lba_map_list) {
  85		int desc_num = off + 3;
  86		int pg_num;
  87
  88		off += 4;
  89		if (cmd->data_length > off)
  90			put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
  91		off += 8;
  92		if (cmd->data_length > off)
  93			put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
  94		off += 8;
  95		rd_len += 20;
  96		pg_num = 0;
  97		list_for_each_entry(map_mem, &map->lba_map_mem_list,
  98				    lba_map_mem_list) {
  99			int alua_state = map_mem->lba_map_mem_alua_state;
 100			int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
 101
 102			if (cmd->data_length > off)
 103				buf[off] = alua_state & 0x0f;
 104			off += 2;
 105			if (cmd->data_length > off)
 106				buf[off] = (alua_pg_id >> 8) & 0xff;
 107			off++;
 108			if (cmd->data_length > off)
 109				buf[off] = (alua_pg_id & 0xff);
 110			off++;
 111			rd_len += 4;
 112			pg_num++;
 113		}
 114		if (cmd->data_length > desc_num)
 115			buf[desc_num] = pg_num;
 116	}
 117	spin_unlock(&dev->t10_alua.lba_map_lock);
 118
 119	/*
 120	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 121	 */
 122	put_unaligned_be16(rd_len, &buf[2]);
 123
 124	transport_kunmap_data_sg(cmd);
 125
 126	target_complete_cmd(cmd, GOOD);
 127	return 0;
 128}
 129
 130/*
 131 * REPORT_TARGET_PORT_GROUPS
 132 *
 133 * See spc4r17 section 6.27
 134 */
 135sense_reason_t
 136target_emulate_report_target_port_groups(struct se_cmd *cmd)
 137{
 138	struct se_device *dev = cmd->se_dev;
 
 139	struct t10_alua_tg_pt_gp *tg_pt_gp;
 140	struct se_lun *lun;
 141	unsigned char *buf;
 142	u32 rd_len = 0, off;
 143	int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
 144
 145	/*
 146	 * Skip over RESERVED area to first Target port group descriptor
 147	 * depending on the PARAMETER DATA FORMAT type..
 148	 */
 149	if (ext_hdr != 0)
 150		off = 8;
 151	else
 152		off = 4;
 153
 154	if (cmd->data_length < off) {
 155		pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
 156			" small for %s header\n", cmd->data_length,
 157			(ext_hdr) ? "extended" : "normal");
 158		return TCM_INVALID_CDB_FIELD;
 159	}
 160	buf = transport_kmap_data_sg(cmd);
 161	if (!buf)
 162		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 163
 164	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 165	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
 166			tg_pt_gp_list) {
 167		/*
 168		 * Check if the Target port group and Target port descriptor list
 169		 * based on tg_pt_gp_members count will fit into the response payload.
 170		 * Otherwise, bump rd_len to let the initiator know we have exceeded
 171		 * the allocation length and the response is truncated.
 172		 */
 173		if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
 174		     cmd->data_length) {
 175			rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
 176			continue;
 177		}
 178		/*
 179		 * PREF: Preferred target port bit, determine if this
 180		 * bit should be set for port group.
 181		 */
 182		if (tg_pt_gp->tg_pt_gp_pref)
 183			buf[off] = 0x80;
 184		/*
 185		 * Set the ASYMMETRIC ACCESS State
 186		 */
 187		buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
 
 188		/*
 189		 * Set supported ASYMMETRIC ACCESS State bits
 190		 */
 191		buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
 
 
 
 
 
 192		/*
 193		 * TARGET PORT GROUP
 194		 */
 195		put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]);
 196		off += 2;
 197
 198		off++; /* Skip over Reserved */
 199		/*
 200		 * STATUS CODE
 201		 */
 202		buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
 203		/*
 204		 * Vendor Specific field
 205		 */
 206		buf[off++] = 0x00;
 207		/*
 208		 * TARGET PORT COUNT
 209		 */
 210		buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
 211		rd_len += 8;
 212
 213		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 214		list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
 215				lun_tg_pt_gp_link) {
 
 216			/*
 217			 * Start Target Port descriptor format
 218			 *
 219			 * See spc4r17 section 6.2.7 Table 247
 220			 */
 221			off += 2; /* Skip over Obsolete */
 222			/*
 223			 * Set RELATIVE TARGET PORT IDENTIFIER
 224			 */
 225			put_unaligned_be16(lun->lun_rtpi, &buf[off]);
 226			off += 2;
 227			rd_len += 4;
 228		}
 229		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 230	}
 231	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 232	/*
 233	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 234	 */
 235	put_unaligned_be32(rd_len, &buf[0]);
 
 
 
 236
 237	/*
 238	 * Fill in the Extended header parameter data format if requested
 239	 */
 240	if (ext_hdr != 0) {
 241		buf[4] = 0x10;
 242		/*
 243		 * Set the implicit transition time (in seconds) for the application
 244		 * client to use as a base for it's transition timeout value.
 245		 *
 246		 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
 247		 * this CDB was received upon to determine this value individually
 248		 * for ALUA target port group.
 249		 */
 250		spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock);
 251		tg_pt_gp = cmd->se_lun->lun_tg_pt_gp;
 252		if (tg_pt_gp)
 253			buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
 254		spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock);
 255	}
 256	transport_kunmap_data_sg(cmd);
 257
 258	target_complete_cmd_with_length(cmd, GOOD, rd_len + 4);
 259	return 0;
 260}
 261
 262/*
 263 * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
 264 *
 265 * See spc4r17 section 6.35
 266 */
 267sense_reason_t
 268target_emulate_set_target_port_groups(struct se_cmd *cmd)
 269{
 270	struct se_device *dev = cmd->se_dev;
 271	struct se_lun *l_lun = cmd->se_lun;
 
 272	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
 273	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
 
 274	unsigned char *buf;
 275	unsigned char *ptr;
 276	sense_reason_t rc = TCM_NO_SENSE;
 277	u32 len = 4; /* Skip over RESERVED area in header */
 278	int alua_access_state, primary = 0, valid_states;
 279	u16 tg_pt_id, rtpi;
 280
 281	if (cmd->data_length < 4) {
 282		pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
 283			" small\n", cmd->data_length);
 284		return TCM_INVALID_PARAMETER_LIST;
 285	}
 286
 287	buf = transport_kmap_data_sg(cmd);
 288	if (!buf)
 289		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 290
 291	/*
 292	 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
 293	 * for the local tg_pt_gp.
 294	 */
 295	spin_lock(&l_lun->lun_tg_pt_gp_lock);
 296	l_tg_pt_gp = l_lun->lun_tg_pt_gp;
 
 
 
 
 
 
 297	if (!l_tg_pt_gp) {
 298		spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 299		pr_err("Unable to access l_lun->tg_pt_gp\n");
 300		rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 301		goto out;
 302	}
 
 
 303
 304	if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
 305		spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 306		pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
 307				" while TPGS_EXPLICIT_ALUA is disabled\n");
 308		rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 309		goto out;
 310	}
 311	valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
 312	spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 313
 314	ptr = &buf[4]; /* Skip over RESERVED area in header */
 315
 316	while (len < cmd->data_length) {
 317		bool found = false;
 318		alua_access_state = (ptr[0] & 0x0f);
 319		/*
 320		 * Check the received ALUA access state, and determine if
 321		 * the state is a primary or secondary target port asymmetric
 322		 * access state.
 323		 */
 324		rc = core_alua_check_transition(alua_access_state, valid_states,
 325						&primary, 1);
 326		if (rc) {
 327			/*
 328			 * If the SET TARGET PORT GROUPS attempts to establish
 329			 * an invalid combination of target port asymmetric
 330			 * access states or attempts to establish an
 331			 * unsupported target port asymmetric access state,
 332			 * then the command shall be terminated with CHECK
 333			 * CONDITION status, with the sense key set to ILLEGAL
 334			 * REQUEST, and the additional sense code set to INVALID
 335			 * FIELD IN PARAMETER LIST.
 336			 */
 
 337			goto out;
 338		}
 339
 340		/*
 341		 * If the ASYMMETRIC ACCESS STATE field (see table 267)
 342		 * specifies a primary target port asymmetric access state,
 343		 * then the TARGET PORT GROUP OR TARGET PORT field specifies
 344		 * a primary target port group for which the primary target
 345		 * port asymmetric access state shall be changed. If the
 346		 * ASYMMETRIC ACCESS STATE field specifies a secondary target
 347		 * port asymmetric access state, then the TARGET PORT GROUP OR
 348		 * TARGET PORT field specifies the relative target port
 349		 * identifier (see 3.1.120) of the target port for which the
 350		 * secondary target port asymmetric access state shall be
 351		 * changed.
 352		 */
 353		if (primary) {
 354			tg_pt_id = get_unaligned_be16(ptr + 2);
 
 355			/*
 356			 * Locate the matching target port group ID from
 357			 * the global tg_pt_gp list
 358			 */
 359			spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 360			list_for_each_entry(tg_pt_gp,
 361					&dev->t10_alua.tg_pt_gps_list,
 362					tg_pt_gp_list) {
 363				if (!tg_pt_gp->tg_pt_gp_valid_id)
 364					continue;
 365
 366				if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
 367					continue;
 368
 369				atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
 370
 371				spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 372
 373				if (!core_alua_do_port_transition(tg_pt_gp,
 374						dev, l_lun, nacl,
 375						alua_access_state, 1))
 376					found = true;
 377
 378				spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 379				atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
 380				break;
 381			}
 382			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
 
 
 
 
 
 
 
 383		} else {
 384			struct se_lun *lun;
 385
 386			/*
 387			 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
 388			 * the Target Port in question for the the incoming
 389			 * SET_TARGET_PORT_GROUPS op.
 390			 */
 391			rtpi = get_unaligned_be16(ptr + 2);
 
 392			/*
 393			 * Locate the matching relative target port identifier
 394			 * for the struct se_device storage object.
 395			 */
 396			spin_lock(&dev->se_port_lock);
 397			list_for_each_entry(lun, &dev->dev_sep_list,
 398							lun_dev_link) {
 399				if (lun->lun_rtpi != rtpi)
 400					continue;
 401
 402				// XXX: racy unlock
 403				spin_unlock(&dev->se_port_lock);
 404
 405				if (!core_alua_set_tg_pt_secondary_state(
 406						lun, 1, 1))
 407					found = true;
 408
 409				spin_lock(&dev->se_port_lock);
 410				break;
 411			}
 412			spin_unlock(&dev->se_port_lock);
 413		}
 414
 415		if (!found) {
 416			rc = TCM_INVALID_PARAMETER_LIST;
 417			goto out;
 
 
 
 
 418		}
 419
 420		ptr += 4;
 421		len += 4;
 422	}
 423
 424out:
 425	transport_kunmap_data_sg(cmd);
 426	if (!rc)
 427		target_complete_cmd(cmd, GOOD);
 428	return rc;
 429}
 430
 431static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
 432{
 433	/*
 434	 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
 435	 * The ALUA additional sense code qualifier (ASCQ) is determined
 436	 * by the ALUA primary or secondary access state..
 437	 */
 438	pr_debug("[%s]: ALUA TG Port not available, "
 439		"SenseKey: NOT_READY, ASC/ASCQ: "
 440		"0x04/0x%02x\n",
 441		cmd->se_tfo->fabric_name, alua_ascq);
 442
 443	cmd->scsi_asc = 0x04;
 444	cmd->scsi_ascq = alua_ascq;
 445}
 446
 447static inline void core_alua_state_nonoptimized(
 448	struct se_cmd *cmd,
 449	unsigned char *cdb,
 450	int nonop_delay_msecs)
 
 451{
 452	/*
 453	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
 454	 * later to determine if processing of this cmd needs to be
 455	 * temporarily delayed for the Active/NonOptimized primary access state.
 456	 */
 457	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
 458	cmd->alua_nonop_delay = nonop_delay_msecs;
 459}
 460
 461static inline int core_alua_state_lba_dependent(
 462	struct se_cmd *cmd,
 463	struct t10_alua_tg_pt_gp *tg_pt_gp)
 464{
 465	struct se_device *dev = cmd->se_dev;
 466	u64 segment_size, segment_mult, sectors, lba;
 467
 468	/* Only need to check for cdb actually containing LBAs */
 469	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
 470		return 0;
 471
 472	spin_lock(&dev->t10_alua.lba_map_lock);
 473	segment_size = dev->t10_alua.lba_map_segment_size;
 474	segment_mult = dev->t10_alua.lba_map_segment_multiplier;
 475	sectors = cmd->data_length / dev->dev_attrib.block_size;
 476
 477	lba = cmd->t_task_lba;
 478	while (lba < cmd->t_task_lba + sectors) {
 479		struct t10_alua_lba_map *cur_map = NULL, *map;
 480		struct t10_alua_lba_map_member *map_mem;
 481
 482		list_for_each_entry(map, &dev->t10_alua.lba_map_list,
 483				    lba_map_list) {
 484			u64 start_lba, last_lba;
 485			u64 first_lba = map->lba_map_first_lba;
 486
 487			if (segment_mult) {
 488				u64 tmp = lba;
 489				start_lba = do_div(tmp, segment_size * segment_mult);
 490
 491				last_lba = first_lba + segment_size - 1;
 492				if (start_lba >= first_lba &&
 493				    start_lba <= last_lba) {
 494					lba += segment_size;
 495					cur_map = map;
 496					break;
 497				}
 498			} else {
 499				last_lba = map->lba_map_last_lba;
 500				if (lba >= first_lba && lba <= last_lba) {
 501					lba = last_lba + 1;
 502					cur_map = map;
 503					break;
 504				}
 505			}
 506		}
 507		if (!cur_map) {
 508			spin_unlock(&dev->t10_alua.lba_map_lock);
 509			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 510			return 1;
 511		}
 512		list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
 513				    lba_map_mem_list) {
 514			if (map_mem->lba_map_mem_alua_pg_id !=
 515			    tg_pt_gp->tg_pt_gp_id)
 516				continue;
 517			switch(map_mem->lba_map_mem_alua_state) {
 518			case ALUA_ACCESS_STATE_STANDBY:
 519				spin_unlock(&dev->t10_alua.lba_map_lock);
 520				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 521				return 1;
 522			case ALUA_ACCESS_STATE_UNAVAILABLE:
 523				spin_unlock(&dev->t10_alua.lba_map_lock);
 524				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 525				return 1;
 526			default:
 527				break;
 528			}
 529		}
 530	}
 531	spin_unlock(&dev->t10_alua.lba_map_lock);
 532	return 0;
 533}
 534
 535static inline int core_alua_state_standby(
 536	struct se_cmd *cmd,
 537	unsigned char *cdb)
 
 538{
 539	/*
 540	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
 541	 * spc4r17 section 5.9.2.4.4
 542	 */
 543	switch (cdb[0]) {
 544	case INQUIRY:
 545	case LOG_SELECT:
 546	case LOG_SENSE:
 547	case MODE_SELECT:
 548	case MODE_SENSE:
 549	case REPORT_LUNS:
 550	case RECEIVE_DIAGNOSTIC:
 551	case SEND_DIAGNOSTIC:
 552	case READ_CAPACITY:
 553		return 0;
 554	case SERVICE_ACTION_IN_16:
 555		switch (cdb[1] & 0x1f) {
 556		case SAI_READ_CAPACITY_16:
 557			return 0;
 558		default:
 559			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 560			return 1;
 561		}
 562	case MAINTENANCE_IN:
 563		switch (cdb[1] & 0x1f) {
 564		case MI_REPORT_TARGET_PGS:
 565			return 0;
 566		default:
 567			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 568			return 1;
 569		}
 570	case MAINTENANCE_OUT:
 571		switch (cdb[1]) {
 572		case MO_SET_TARGET_PGS:
 573			return 0;
 574		default:
 575			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 576			return 1;
 577		}
 578	case REQUEST_SENSE:
 579	case PERSISTENT_RESERVE_IN:
 580	case PERSISTENT_RESERVE_OUT:
 581	case READ_BUFFER:
 582	case WRITE_BUFFER:
 583		return 0;
 584	default:
 585		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 586		return 1;
 587	}
 588
 589	return 0;
 590}
 591
 592static inline int core_alua_state_unavailable(
 593	struct se_cmd *cmd,
 594	unsigned char *cdb)
 
 595{
 596	/*
 597	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
 598	 * spc4r17 section 5.9.2.4.5
 599	 */
 600	switch (cdb[0]) {
 601	case INQUIRY:
 602	case REPORT_LUNS:
 603		return 0;
 604	case MAINTENANCE_IN:
 605		switch (cdb[1] & 0x1f) {
 606		case MI_REPORT_TARGET_PGS:
 607			return 0;
 608		default:
 609			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 610			return 1;
 611		}
 612	case MAINTENANCE_OUT:
 613		switch (cdb[1]) {
 614		case MO_SET_TARGET_PGS:
 615			return 0;
 616		default:
 617			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 618			return 1;
 619		}
 620	case REQUEST_SENSE:
 621	case READ_BUFFER:
 622	case WRITE_BUFFER:
 623		return 0;
 624	default:
 625		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 626		return 1;
 627	}
 628
 629	return 0;
 630}
 631
 632static inline int core_alua_state_transition(
 633	struct se_cmd *cmd,
 634	unsigned char *cdb)
 
 635{
 636	/*
 637	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
 638	 * spc4r17 section 5.9.2.5
 639	 */
 640	switch (cdb[0]) {
 641	case INQUIRY:
 642	case REPORT_LUNS:
 643		return 0;
 644	case MAINTENANCE_IN:
 645		switch (cdb[1] & 0x1f) {
 646		case MI_REPORT_TARGET_PGS:
 647			return 0;
 648		default:
 649			set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 650			return 1;
 651		}
 652	case REQUEST_SENSE:
 653	case READ_BUFFER:
 654	case WRITE_BUFFER:
 655		return 0;
 656	default:
 657		set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 658		return 1;
 659	}
 660
 661	return 0;
 662}
 663
 664/*
 665 * return 1: Is used to signal LUN not accessible, and check condition/not ready
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 666 * return 0: Used to signal success
 667 * return -1: Used to signal failure, and invalid cdb field
 668 */
 669sense_reason_t
 670target_alua_state_check(struct se_cmd *cmd)
 
 
 671{
 672	struct se_device *dev = cmd->se_dev;
 673	unsigned char *cdb = cmd->t_task_cdb;
 674	struct se_lun *lun = cmd->se_lun;
 
 675	struct t10_alua_tg_pt_gp *tg_pt_gp;
 
 676	int out_alua_state, nonop_delay_msecs;
 677
 678	if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
 679		return 0;
 680	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
 681		return 0;
 682
 683	/*
 684	 * First, check for a struct se_port specific secondary ALUA target port
 685	 * access state: OFFLINE
 686	 */
 687	if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
 
 688		pr_debug("ALUA: Got secondary offline status for local"
 689				" target port\n");
 690		set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
 691		return TCM_CHECK_CONDITION_NOT_READY;
 692	}
 693
 694	if (!lun->lun_tg_pt_gp)
 695		return 0;
 696
 697	spin_lock(&lun->lun_tg_pt_gp_lock);
 698	tg_pt_gp = lun->lun_tg_pt_gp;
 699	out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
 
 
 
 700	nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
 701
 702	// XXX: keeps using tg_pt_gp witout reference after unlock
 703	spin_unlock(&lun->lun_tg_pt_gp_lock);
 704	/*
 705	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
 706	 * statement so the compiler knows explicitly to check this case first.
 707	 * For the Optimized ALUA access state case, we want to process the
 708	 * incoming fabric cmd ASAP..
 709	 */
 710	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
 711		return 0;
 712
 713	switch (out_alua_state) {
 714	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 715		core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
 716		break;
 717	case ALUA_ACCESS_STATE_STANDBY:
 718		if (core_alua_state_standby(cmd, cdb))
 719			return TCM_CHECK_CONDITION_NOT_READY;
 720		break;
 721	case ALUA_ACCESS_STATE_UNAVAILABLE:
 722		if (core_alua_state_unavailable(cmd, cdb))
 723			return TCM_CHECK_CONDITION_NOT_READY;
 724		break;
 725	case ALUA_ACCESS_STATE_TRANSITION:
 726		if (core_alua_state_transition(cmd, cdb))
 727			return TCM_CHECK_CONDITION_NOT_READY;
 728		break;
 729	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 730		if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
 731			return TCM_CHECK_CONDITION_NOT_READY;
 732		break;
 733	/*
 734	 * OFFLINE is a secondary ALUA target port group access state, that is
 735	 * handled above with struct se_lun->lun_tg_pt_secondary_offline=1
 736	 */
 737	case ALUA_ACCESS_STATE_OFFLINE:
 738	default:
 739		pr_err("Unknown ALUA access state: 0x%02x\n",
 740				out_alua_state);
 741		return TCM_INVALID_CDB_FIELD;
 742	}
 743
 744	return 0;
 745}
 746
 747/*
 748 * Check implicit and explicit ALUA state change request.
 749 */
 750static sense_reason_t
 751core_alua_check_transition(int state, int valid, int *primary, int explicit)
 752{
 753	/*
 754	 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
 755	 * defined as primary target port asymmetric access states.
 756	 */
 757	switch (state) {
 758	case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 759		if (!(valid & ALUA_AO_SUP))
 760			goto not_supported;
 761		*primary = 1;
 762		break;
 763	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 764		if (!(valid & ALUA_AN_SUP))
 765			goto not_supported;
 766		*primary = 1;
 767		break;
 768	case ALUA_ACCESS_STATE_STANDBY:
 769		if (!(valid & ALUA_S_SUP))
 770			goto not_supported;
 771		*primary = 1;
 772		break;
 773	case ALUA_ACCESS_STATE_UNAVAILABLE:
 774		if (!(valid & ALUA_U_SUP))
 775			goto not_supported;
 776		*primary = 1;
 777		break;
 778	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 779		if (!(valid & ALUA_LBD_SUP))
 780			goto not_supported;
 781		*primary = 1;
 782		break;
 783	case ALUA_ACCESS_STATE_OFFLINE:
 784		/*
 785		 * OFFLINE state is defined as a secondary target port
 786		 * asymmetric access state.
 787		 */
 788		if (!(valid & ALUA_O_SUP))
 789			goto not_supported;
 790		*primary = 0;
 791		break;
 792	case ALUA_ACCESS_STATE_TRANSITION:
 793		if (!(valid & ALUA_T_SUP) || explicit)
 794			/*
 795			 * Transitioning is set internally and by tcmu daemon,
 796			 * and cannot be selected through a STPG.
 797			 */
 798			goto not_supported;
 799		*primary = 0;
 800		break;
 801	default:
 802		pr_err("Unknown ALUA access state: 0x%02x\n", state);
 803		return TCM_INVALID_PARAMETER_LIST;
 804	}
 805
 806	return 0;
 807
 808not_supported:
 809	pr_err("ALUA access state %s not supported",
 810	       core_alua_dump_state(state));
 811	return TCM_INVALID_PARAMETER_LIST;
 812}
 813
 814static char *core_alua_dump_state(int state)
 815{
 816	switch (state) {
 817	case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 818		return "Active/Optimized";
 819	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 820		return "Active/NonOptimized";
 821	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 822		return "LBA Dependent";
 823	case ALUA_ACCESS_STATE_STANDBY:
 824		return "Standby";
 825	case ALUA_ACCESS_STATE_UNAVAILABLE:
 826		return "Unavailable";
 827	case ALUA_ACCESS_STATE_OFFLINE:
 828		return "Offline";
 829	case ALUA_ACCESS_STATE_TRANSITION:
 830		return "Transitioning";
 831	default:
 832		return "Unknown";
 833	}
 834
 835	return NULL;
 836}
 837
 838char *core_alua_dump_status(int status)
 839{
 840	switch (status) {
 841	case ALUA_STATUS_NONE:
 842		return "None";
 843	case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
 844		return "Altered by Explicit STPG";
 845	case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
 846		return "Altered by Implicit ALUA";
 847	default:
 848		return "Unknown";
 849	}
 850
 851	return NULL;
 852}
 853
 854/*
 855 * Used by fabric modules to determine when we need to delay processing
 856 * for the Active/NonOptimized paths..
 857 */
 858int core_alua_check_nonop_delay(
 859	struct se_cmd *cmd)
 860{
 861	if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
 862		return 0;
 863	if (in_interrupt())
 864		return 0;
 865	/*
 866	 * The ALUA Active/NonOptimized access state delay can be disabled
 867	 * in via configfs with a value of zero
 868	 */
 869	if (!cmd->alua_nonop_delay)
 870		return 0;
 871	/*
 872	 * struct se_cmd->alua_nonop_delay gets set by a target port group
 873	 * defined interval in core_alua_state_nonoptimized()
 874	 */
 875	msleep_interruptible(cmd->alua_nonop_delay);
 876	return 0;
 877}
 878EXPORT_SYMBOL(core_alua_check_nonop_delay);
 879
 
 
 
 
 880static int core_alua_write_tpg_metadata(
 881	const char *path,
 882	unsigned char *md_buf,
 883	u32 md_buf_len)
 884{
 885	struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
 886	loff_t pos = 0;
 887	int ret;
 888
 889	if (IS_ERR(file)) {
 890		pr_err("filp_open(%s) for ALUA metadata failed\n", path);
 
 
 
 
 
 891		return -ENODEV;
 892	}
 893	ret = kernel_write(file, md_buf, md_buf_len, &pos);
 894	if (ret < 0)
 
 
 
 
 
 
 
 
 895		pr_err("Error writing ALUA metadata file: %s\n", path);
 896	fput(file);
 897	return (ret < 0) ? -EIO : 0;
 
 
 
 
 898}
 899
 
 
 
 900static int core_alua_update_tpg_primary_metadata(
 901	struct t10_alua_tg_pt_gp *tg_pt_gp)
 
 
 902{
 903	unsigned char *md_buf;
 904	struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
 905	char *path;
 906	int len, rc;
 907
 908	lockdep_assert_held(&tg_pt_gp->tg_pt_gp_transition_mutex);
 909
 910	md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
 911	if (!md_buf) {
 912		pr_err("Unable to allocate buf for ALUA metadata\n");
 913		return -ENOMEM;
 914	}
 915
 916	len = snprintf(md_buf, ALUA_MD_BUF_LEN,
 917			"tg_pt_gp_id=%hu\n"
 918			"alua_access_state=0x%02x\n"
 919			"alua_access_status=0x%02x\n",
 920			tg_pt_gp->tg_pt_gp_id,
 921			tg_pt_gp->tg_pt_gp_alua_access_state,
 922			tg_pt_gp->tg_pt_gp_alua_access_status);
 923
 924	rc = -ENOMEM;
 925	path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
 926			&wwn->unit_serial[0],
 927			config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
 928	if (path) {
 929		rc = core_alua_write_tpg_metadata(path, md_buf, len);
 930		kfree(path);
 931	}
 932	kfree(md_buf);
 933	return rc;
 934}
 935
 936static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
 
 
 
 
 
 
 937{
 938	struct se_dev_entry *se_deve;
 939	struct se_lun *lun;
 940	struct se_lun_acl *lacl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 941
 942	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 943	list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
 944				lun_tg_pt_gp_link) {
 
 945		/*
 946		 * After an implicit target port asymmetric access state
 947		 * change, a device server shall establish a unit attention
 948		 * condition for the initiator port associated with every I_T
 949		 * nexus with the additional sense code set to ASYMMETRIC
 950		 * ACCESS STATE CHANGED.
 951		 *
 952		 * After an explicit target port asymmetric access state
 953		 * change, a device server shall establish a unit attention
 954		 * condition with the additional sense code set to ASYMMETRIC
 955		 * ACCESS STATE CHANGED for the initiator port associated with
 956		 * every I_T nexus other than the I_T nexus on which the SET
 957		 * TARGET PORT GROUPS command
 958		 */
 959		if (!percpu_ref_tryget_live(&lun->lun_ref))
 960			continue;
 961		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 962
 963		spin_lock(&lun->lun_deve_lock);
 964		list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
 965			lacl = rcu_dereference_check(se_deve->se_lun_acl,
 966					lockdep_is_held(&lun->lun_deve_lock));
 967
 968			/*
 969			 * spc4r37 p.242:
 970			 * After an explicit target port asymmetric access
 971			 * state change, a device server shall establish a
 972			 * unit attention condition with the additional sense
 973			 * code set to ASYMMETRIC ACCESS STATE CHANGED for
 974			 * the initiator port associated with every I_T nexus
 975			 * other than the I_T nexus on which the SET TARGET
 976			 * PORT GROUPS command was received.
 977			 */
 978			if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
 979			     ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
 980			   (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
 981			    (tg_pt_gp->tg_pt_gp_alua_lun == lun))
 982				continue;
 983
 984			/*
 985			 * se_deve->se_lun_acl pointer may be NULL for a
 986			 * entry created without explicit Node+MappedLUN ACLs
 987			 */
 988			if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
 989			    (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
 990				continue;
 991
 992			core_scsi3_ua_allocate(se_deve, 0x2A,
 
 993				ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
 994		}
 995		spin_unlock(&lun->lun_deve_lock);
 996
 997		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 998		percpu_ref_put(&lun->lun_ref);
 
 999	}
1000	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1001}
1002
1003static int core_alua_do_transition_tg_pt(
1004	struct t10_alua_tg_pt_gp *tg_pt_gp,
1005	int new_state,
1006	int explicit)
1007{
1008	int prev_state;
1009
1010	mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1011	/* Nothing to be done here */
1012	if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
1013		mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1014		return 0;
1015	}
1016
1017	if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
1018		mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1019		return -EAGAIN;
1020	}
1021
1022	/*
1023	 * Save the old primary ALUA access state, and set the current state
1024	 * to ALUA_ACCESS_STATE_TRANSITION.
1025	 */
1026	prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
1027	tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
1028	tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1029				ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1030				ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1031
1032	core_alua_queue_state_change_ua(tg_pt_gp);
1033
1034	if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
1035		mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1036		return 0;
1037	}
1038
1039	/*
1040	 * Check for the optional ALUA primary state transition delay
1041	 */
1042	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1043		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1044
1045	/*
1046	 * Set the current primary ALUA access state to the requested new state
1047	 */
1048	tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
1049
1050	/*
1051	 * Update the ALUA metadata buf that has been allocated in
1052	 * core_alua_do_port_transition(), this metadata will be written
1053	 * to struct file.
1054	 *
1055	 * Note that there is the case where we do not want to update the
1056	 * metadata when the saved metadata is being parsed in userspace
1057	 * when setting the existing port access state and access status.
1058	 *
1059	 * Also note that the failure to write out the ALUA metadata to
1060	 * struct file does NOT affect the actual ALUA transition.
1061	 */
1062	if (tg_pt_gp->tg_pt_gp_write_metadata) {
1063		core_alua_update_tpg_primary_metadata(tg_pt_gp);
 
 
 
1064	}
 
 
 
 
1065
1066	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1067		" from primary access state %s to %s\n", (explicit) ? "explicit" :
1068		"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1069		tg_pt_gp->tg_pt_gp_id,
1070		core_alua_dump_state(prev_state),
1071		core_alua_dump_state(new_state));
1072
1073	core_alua_queue_state_change_ua(tg_pt_gp);
1074
1075	mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1076	return 0;
1077}
1078
1079int core_alua_do_port_transition(
1080	struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1081	struct se_device *l_dev,
1082	struct se_lun *l_lun,
1083	struct se_node_acl *l_nacl,
1084	int new_state,
1085	int explicit)
1086{
1087	struct se_device *dev;
 
 
 
1088	struct t10_alua_lu_gp *lu_gp;
1089	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1090	struct t10_alua_tg_pt_gp *tg_pt_gp;
1091	int primary, valid_states, rc = 0;
 
1092
1093	if (l_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
1094		return -ENODEV;
1095
1096	valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1097	if (core_alua_check_transition(new_state, valid_states, &primary,
1098				       explicit) != 0)
1099		return -EINVAL;
 
1100
1101	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1102	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1103	lu_gp = local_lu_gp_mem->lu_gp;
1104	atomic_inc(&lu_gp->lu_gp_ref_cnt);
 
1105	spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1106	/*
1107	 * For storage objects that are members of the 'default_lu_gp',
1108	 * we only do transition on the passed *l_tp_pt_gp, and not
1109	 * on all of the matching target port groups IDs in default_lu_gp.
1110	 */
1111	if (!lu_gp->lu_gp_id) {
1112		/*
1113		 * core_alua_do_transition_tg_pt() will always return
1114		 * success.
1115		 */
1116		l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1117		l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1118		rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1119						   new_state, explicit);
1120		atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1121		return rc;
1122	}
1123	/*
1124	 * For all other LU groups aside from 'default_lu_gp', walk all of
1125	 * the associated storage objects looking for a matching target port
1126	 * group ID from the local target port group.
1127	 */
1128	spin_lock(&lu_gp->lu_gp_lock);
1129	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1130				lu_gp_mem_list) {
1131
1132		dev = lu_gp_mem->lu_gp_mem_dev;
1133		atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
 
 
1134		spin_unlock(&lu_gp->lu_gp_lock);
1135
1136		spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1137		list_for_each_entry(tg_pt_gp,
1138				&dev->t10_alua.tg_pt_gps_list,
1139				tg_pt_gp_list) {
1140
1141			if (!tg_pt_gp->tg_pt_gp_valid_id)
1142				continue;
1143			/*
1144			 * If the target behavior port asymmetric access state
1145			 * is changed for any target port group accessible via
1146			 * a logical unit within a LU group, the target port
1147			 * behavior group asymmetric access states for the same
1148			 * target port group accessible via other logical units
1149			 * in that LU group will also change.
1150			 */
1151			if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1152				continue;
1153
1154			if (l_tg_pt_gp == tg_pt_gp) {
1155				tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1156				tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1157			} else {
1158				tg_pt_gp->tg_pt_gp_alua_lun = NULL;
1159				tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1160			}
1161			atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1162			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
1163			/*
1164			 * core_alua_do_transition_tg_pt() will always return
1165			 * success.
1166			 */
1167			rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1168					new_state, explicit);
1169
1170			spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1171			atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1172			if (rc)
1173				break;
1174		}
1175		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1176
1177		spin_lock(&lu_gp->lu_gp_lock);
1178		atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
 
1179	}
1180	spin_unlock(&lu_gp->lu_gp_lock);
1181
1182	if (!rc) {
1183		pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1184			 " Group IDs: %hu %s transition to primary state: %s\n",
1185			 config_item_name(&lu_gp->lu_gp_group.cg_item),
1186			 l_tg_pt_gp->tg_pt_gp_id,
1187			 (explicit) ? "explicit" : "implicit",
1188			 core_alua_dump_state(new_state));
1189	}
1190
1191	atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1192	return rc;
 
 
1193}
1194
1195static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
 
 
 
 
 
 
 
1196{
1197	struct se_portal_group *se_tpg = lun->lun_tpg;
1198	unsigned char *md_buf;
1199	char *path;
1200	int len, rc;
 
 
1201
1202	mutex_lock(&lun->lun_tg_pt_md_mutex);
 
1203
1204	md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1205	if (!md_buf) {
1206		pr_err("Unable to allocate buf for ALUA metadata\n");
1207		rc = -ENOMEM;
1208		goto out_unlock;
1209	}
1210
1211	len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1212			"alua_tg_pt_status=0x%02x\n",
1213			atomic_read(&lun->lun_tg_pt_secondary_offline),
1214			lun->lun_tg_pt_secondary_stat);
1215
1216	if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
1217		path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
1218				db_root, se_tpg->se_tpg_tfo->fabric_name,
1219				se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1220				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
1221				lun->unpacked_lun);
1222	} else {
1223		path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
1224				db_root, se_tpg->se_tpg_tfo->fabric_name,
1225				se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1226				lun->unpacked_lun);
1227	}
1228	if (!path) {
1229		rc = -ENOMEM;
1230		goto out_free;
1231	}
1232
1233	rc = core_alua_write_tpg_metadata(path, md_buf, len);
1234	kfree(path);
1235out_free:
1236	kfree(md_buf);
1237out_unlock:
1238	mutex_unlock(&lun->lun_tg_pt_md_mutex);
1239	return rc;
1240}
1241
1242static int core_alua_set_tg_pt_secondary_state(
1243	struct se_lun *lun,
1244	int explicit,
 
1245	int offline)
1246{
1247	struct t10_alua_tg_pt_gp *tg_pt_gp;
 
 
1248	int trans_delay_msecs;
1249
1250	spin_lock(&lun->lun_tg_pt_gp_lock);
1251	tg_pt_gp = lun->lun_tg_pt_gp;
1252	if (!tg_pt_gp) {
1253		spin_unlock(&lun->lun_tg_pt_gp_lock);
1254		pr_err("Unable to complete secondary state"
1255				" transition\n");
1256		return -EINVAL;
1257	}
1258	trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1259	/*
1260	 * Set the secondary ALUA target port access state to OFFLINE
1261	 * or release the previously secondary state for struct se_lun
1262	 */
1263	if (offline)
1264		atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
1265	else
1266		atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
1267
1268	lun->lun_tg_pt_secondary_stat = (explicit) ?
1269			ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1270			ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
 
1271
1272	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1273		" to secondary access state: %s\n", (explicit) ? "explicit" :
1274		"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1275		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1276
1277	spin_unlock(&lun->lun_tg_pt_gp_lock);
1278	/*
1279	 * Do the optional transition delay after we set the secondary
1280	 * ALUA access state.
1281	 */
1282	if (trans_delay_msecs != 0)
1283		msleep_interruptible(trans_delay_msecs);
1284	/*
1285	 * See if we need to update the ALUA fabric port metadata for
1286	 * secondary state and status
1287	 */
1288	if (lun->lun_tg_pt_secondary_write_md)
1289		core_alua_update_tpg_secondary_metadata(lun);
1290
1291	return 0;
1292}
 
 
 
 
 
 
1293
1294struct t10_alua_lba_map *
1295core_alua_allocate_lba_map(struct list_head *list,
1296			   u64 first_lba, u64 last_lba)
1297{
1298	struct t10_alua_lba_map *lba_map;
1299
1300	lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1301	if (!lba_map) {
1302		pr_err("Unable to allocate struct t10_alua_lba_map\n");
1303		return ERR_PTR(-ENOMEM);
1304	}
1305	INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1306	lba_map->lba_map_first_lba = first_lba;
1307	lba_map->lba_map_last_lba = last_lba;
1308
1309	list_add_tail(&lba_map->lba_map_list, list);
1310	return lba_map;
1311}
1312
1313int
1314core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1315			       int pg_id, int state)
1316{
1317	struct t10_alua_lba_map_member *lba_map_mem;
1318
1319	list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1320			    lba_map_mem_list) {
1321		if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1322			pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1323			return -EINVAL;
1324		}
1325	}
1326
1327	lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1328	if (!lba_map_mem) {
1329		pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1330		return -ENOMEM;
1331	}
1332	lba_map_mem->lba_map_mem_alua_state = state;
1333	lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1334
1335	list_add_tail(&lba_map_mem->lba_map_mem_list,
1336		      &lba_map->lba_map_mem_list);
1337	return 0;
1338}
1339
1340void
1341core_alua_free_lba_map(struct list_head *lba_list)
1342{
1343	struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1344	struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1345
1346	list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1347				 lba_map_list) {
1348		list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1349					 &lba_map->lba_map_mem_list,
1350					 lba_map_mem_list) {
1351			list_del(&lba_map_mem->lba_map_mem_list);
1352			kmem_cache_free(t10_alua_lba_map_mem_cache,
1353					lba_map_mem);
1354		}
1355		list_del(&lba_map->lba_map_list);
1356		kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1357	}
1358}
1359
1360void
1361core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1362		      int segment_size, int segment_mult)
1363{
1364	struct list_head old_lba_map_list;
1365	struct t10_alua_tg_pt_gp *tg_pt_gp;
1366	int activate = 0, supported;
1367
1368	INIT_LIST_HEAD(&old_lba_map_list);
1369	spin_lock(&dev->t10_alua.lba_map_lock);
1370	dev->t10_alua.lba_map_segment_size = segment_size;
1371	dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1372	list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1373	if (lba_map_list) {
1374		list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1375		activate = 1;
1376	}
1377	spin_unlock(&dev->t10_alua.lba_map_lock);
1378	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1379	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1380			    tg_pt_gp_list) {
1381
1382		if (!tg_pt_gp->tg_pt_gp_valid_id)
1383			continue;
1384		supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1385		if (activate)
1386			supported |= ALUA_LBD_SUP;
1387		else
1388			supported &= ~ALUA_LBD_SUP;
1389		tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1390	}
1391	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1392	core_alua_free_lba_map(&old_lba_map_list);
1393}
1394
1395struct t10_alua_lu_gp *
1396core_alua_allocate_lu_gp(const char *name, int def_group)
1397{
1398	struct t10_alua_lu_gp *lu_gp;
1399
1400	lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1401	if (!lu_gp) {
1402		pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1403		return ERR_PTR(-ENOMEM);
1404	}
1405	INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1406	INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1407	spin_lock_init(&lu_gp->lu_gp_lock);
1408	atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1409
1410	if (def_group) {
1411		lu_gp->lu_gp_id = alua_lu_gps_counter++;
1412		lu_gp->lu_gp_valid_id = 1;
1413		alua_lu_gps_count++;
1414	}
1415
1416	return lu_gp;
1417}
1418
1419int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1420{
1421	struct t10_alua_lu_gp *lu_gp_tmp;
1422	u16 lu_gp_id_tmp;
1423	/*
1424	 * The lu_gp->lu_gp_id may only be set once..
1425	 */
1426	if (lu_gp->lu_gp_valid_id) {
1427		pr_warn("ALUA LU Group already has a valid ID,"
1428			" ignoring request\n");
1429		return -EINVAL;
1430	}
1431
1432	spin_lock(&lu_gps_lock);
1433	if (alua_lu_gps_count == 0x0000ffff) {
1434		pr_err("Maximum ALUA alua_lu_gps_count:"
1435				" 0x0000ffff reached\n");
1436		spin_unlock(&lu_gps_lock);
1437		kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1438		return -ENOSPC;
1439	}
1440again:
1441	lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1442				alua_lu_gps_counter++;
1443
1444	list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1445		if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1446			if (!lu_gp_id)
1447				goto again;
1448
1449			pr_warn("ALUA Logical Unit Group ID: %hu"
1450				" already exists, ignoring request\n",
1451				lu_gp_id);
1452			spin_unlock(&lu_gps_lock);
1453			return -EINVAL;
1454		}
1455	}
1456
1457	lu_gp->lu_gp_id = lu_gp_id_tmp;
1458	lu_gp->lu_gp_valid_id = 1;
1459	list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1460	alua_lu_gps_count++;
1461	spin_unlock(&lu_gps_lock);
1462
1463	return 0;
1464}
1465
1466static struct t10_alua_lu_gp_member *
1467core_alua_allocate_lu_gp_mem(struct se_device *dev)
1468{
1469	struct t10_alua_lu_gp_member *lu_gp_mem;
1470
1471	lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1472	if (!lu_gp_mem) {
1473		pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1474		return ERR_PTR(-ENOMEM);
1475	}
1476	INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1477	spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1478	atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1479
1480	lu_gp_mem->lu_gp_mem_dev = dev;
1481	dev->dev_alua_lu_gp_mem = lu_gp_mem;
1482
1483	return lu_gp_mem;
1484}
1485
1486void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1487{
1488	struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1489	/*
1490	 * Once we have reached this point, config_item_put() has
1491	 * already been called from target_core_alua_drop_lu_gp().
1492	 *
1493	 * Here, we remove the *lu_gp from the global list so that
1494	 * no associations can be made while we are releasing
1495	 * struct t10_alua_lu_gp.
1496	 */
1497	spin_lock(&lu_gps_lock);
 
1498	list_del(&lu_gp->lu_gp_node);
1499	alua_lu_gps_count--;
1500	spin_unlock(&lu_gps_lock);
1501	/*
1502	 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1503	 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1504	 * released with core_alua_put_lu_gp_from_name()
1505	 */
1506	while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1507		cpu_relax();
1508	/*
1509	 * Release reference to struct t10_alua_lu_gp * from all associated
1510	 * struct se_device.
1511	 */
1512	spin_lock(&lu_gp->lu_gp_lock);
1513	list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1514				&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1515		if (lu_gp_mem->lu_gp_assoc) {
1516			list_del(&lu_gp_mem->lu_gp_mem_list);
1517			lu_gp->lu_gp_members--;
1518			lu_gp_mem->lu_gp_assoc = 0;
1519		}
1520		spin_unlock(&lu_gp->lu_gp_lock);
1521		/*
1522		 *
1523		 * lu_gp_mem is associated with a single
1524		 * struct se_device->dev_alua_lu_gp_mem, and is released when
1525		 * struct se_device is released via core_alua_free_lu_gp_mem().
1526		 *
1527		 * If the passed lu_gp does NOT match the default_lu_gp, assume
1528		 * we want to re-associate a given lu_gp_mem with default_lu_gp.
1529		 */
1530		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1531		if (lu_gp != default_lu_gp)
1532			__core_alua_attach_lu_gp_mem(lu_gp_mem,
1533					default_lu_gp);
1534		else
1535			lu_gp_mem->lu_gp = NULL;
1536		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1537
1538		spin_lock(&lu_gp->lu_gp_lock);
1539	}
1540	spin_unlock(&lu_gp->lu_gp_lock);
1541
1542	kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1543}
1544
1545void core_alua_free_lu_gp_mem(struct se_device *dev)
1546{
 
 
1547	struct t10_alua_lu_gp *lu_gp;
1548	struct t10_alua_lu_gp_member *lu_gp_mem;
1549
 
 
 
1550	lu_gp_mem = dev->dev_alua_lu_gp_mem;
1551	if (!lu_gp_mem)
1552		return;
1553
1554	while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1555		cpu_relax();
1556
1557	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1558	lu_gp = lu_gp_mem->lu_gp;
1559	if (lu_gp) {
1560		spin_lock(&lu_gp->lu_gp_lock);
1561		if (lu_gp_mem->lu_gp_assoc) {
1562			list_del(&lu_gp_mem->lu_gp_mem_list);
1563			lu_gp->lu_gp_members--;
1564			lu_gp_mem->lu_gp_assoc = 0;
1565		}
1566		spin_unlock(&lu_gp->lu_gp_lock);
1567		lu_gp_mem->lu_gp = NULL;
1568	}
1569	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1570
1571	kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1572}
1573
1574struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1575{
1576	struct t10_alua_lu_gp *lu_gp;
1577	struct config_item *ci;
1578
1579	spin_lock(&lu_gps_lock);
1580	list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1581		if (!lu_gp->lu_gp_valid_id)
1582			continue;
1583		ci = &lu_gp->lu_gp_group.cg_item;
1584		if (!strcmp(config_item_name(ci), name)) {
1585			atomic_inc(&lu_gp->lu_gp_ref_cnt);
1586			spin_unlock(&lu_gps_lock);
1587			return lu_gp;
1588		}
1589	}
1590	spin_unlock(&lu_gps_lock);
1591
1592	return NULL;
1593}
1594
1595void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1596{
1597	spin_lock(&lu_gps_lock);
1598	atomic_dec(&lu_gp->lu_gp_ref_cnt);
1599	spin_unlock(&lu_gps_lock);
1600}
1601
1602/*
1603 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1604 */
1605void __core_alua_attach_lu_gp_mem(
1606	struct t10_alua_lu_gp_member *lu_gp_mem,
1607	struct t10_alua_lu_gp *lu_gp)
1608{
1609	spin_lock(&lu_gp->lu_gp_lock);
1610	lu_gp_mem->lu_gp = lu_gp;
1611	lu_gp_mem->lu_gp_assoc = 1;
1612	list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1613	lu_gp->lu_gp_members++;
1614	spin_unlock(&lu_gp->lu_gp_lock);
1615}
1616
1617/*
1618 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1619 */
1620void __core_alua_drop_lu_gp_mem(
1621	struct t10_alua_lu_gp_member *lu_gp_mem,
1622	struct t10_alua_lu_gp *lu_gp)
1623{
1624	spin_lock(&lu_gp->lu_gp_lock);
1625	list_del(&lu_gp_mem->lu_gp_mem_list);
1626	lu_gp_mem->lu_gp = NULL;
1627	lu_gp_mem->lu_gp_assoc = 0;
1628	lu_gp->lu_gp_members--;
1629	spin_unlock(&lu_gp->lu_gp_lock);
1630}
1631
1632struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1633		const char *name, int def_group)
 
 
1634{
1635	struct t10_alua_tg_pt_gp *tg_pt_gp;
1636
1637	tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1638	if (!tg_pt_gp) {
1639		pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1640		return NULL;
1641	}
1642	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1643	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1644	mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
1645	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1646	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1647	tg_pt_gp->tg_pt_gp_dev = dev;
1648	tg_pt_gp->tg_pt_gp_alua_access_state =
1649			ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
 
1650	/*
1651	 * Enable both explicit and implicit ALUA support by default
1652	 */
1653	tg_pt_gp->tg_pt_gp_alua_access_type =
1654			TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1655	/*
1656	 * Set the default Active/NonOptimized Delay in milliseconds
1657	 */
1658	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1659	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1660	tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1661
1662	/*
1663	 * Enable all supported states
1664	 */
1665	tg_pt_gp->tg_pt_gp_alua_supported_states =
1666	    ALUA_T_SUP | ALUA_O_SUP |
1667	    ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1668
1669	if (def_group) {
1670		spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1671		tg_pt_gp->tg_pt_gp_id =
1672				dev->t10_alua.alua_tg_pt_gps_counter++;
1673		tg_pt_gp->tg_pt_gp_valid_id = 1;
1674		dev->t10_alua.alua_tg_pt_gps_count++;
1675		list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1676			      &dev->t10_alua.tg_pt_gps_list);
1677		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1678	}
1679
1680	return tg_pt_gp;
1681}
1682
1683int core_alua_set_tg_pt_gp_id(
1684	struct t10_alua_tg_pt_gp *tg_pt_gp,
1685	u16 tg_pt_gp_id)
1686{
1687	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1688	struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1689	u16 tg_pt_gp_id_tmp;
1690
1691	/*
1692	 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1693	 */
1694	if (tg_pt_gp->tg_pt_gp_valid_id) {
1695		pr_warn("ALUA TG PT Group already has a valid ID,"
1696			" ignoring request\n");
1697		return -EINVAL;
1698	}
1699
1700	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1701	if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1702		pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1703			" 0x0000ffff reached\n");
1704		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1705		kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1706		return -ENOSPC;
1707	}
1708again:
1709	tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1710			dev->t10_alua.alua_tg_pt_gps_counter++;
1711
1712	list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1713			tg_pt_gp_list) {
1714		if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1715			if (!tg_pt_gp_id)
1716				goto again;
1717
1718			pr_err("ALUA Target Port Group ID: %hu already"
1719				" exists, ignoring request\n", tg_pt_gp_id);
1720			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1721			return -EINVAL;
1722		}
1723	}
1724
1725	tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1726	tg_pt_gp->tg_pt_gp_valid_id = 1;
1727	list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1728			&dev->t10_alua.tg_pt_gps_list);
1729	dev->t10_alua.alua_tg_pt_gps_count++;
1730	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1731
1732	return 0;
1733}
1734
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1735void core_alua_free_tg_pt_gp(
1736	struct t10_alua_tg_pt_gp *tg_pt_gp)
1737{
1738	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1739	struct se_lun *lun, *next;
1740
1741	/*
1742	 * Once we have reached this point, config_item_put() has already
1743	 * been called from target_core_alua_drop_tg_pt_gp().
1744	 *
1745	 * Here we remove *tg_pt_gp from the global list so that
1746	 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1747	 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1748	 */
1749	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1750	if (tg_pt_gp->tg_pt_gp_valid_id) {
1751		list_del(&tg_pt_gp->tg_pt_gp_list);
1752		dev->t10_alua.alua_tg_pt_gps_count--;
1753	}
1754	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1755
1756	/*
1757	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1758	 * core_alua_get_tg_pt_gp_by_name() in
1759	 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1760	 * to be released with core_alua_put_tg_pt_gp_from_name().
1761	 */
1762	while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1763		cpu_relax();
1764
1765	/*
1766	 * Release reference to struct t10_alua_tg_pt_gp from all associated
1767	 * struct se_port.
1768	 */
1769	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1770	list_for_each_entry_safe(lun, next,
1771			&tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
1772		list_del_init(&lun->lun_tg_pt_gp_link);
1773		tg_pt_gp->tg_pt_gp_members--;
1774
 
 
1775		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1776		/*
 
 
 
 
1777		 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1778		 * assume we want to re-associate a given tg_pt_gp_mem with
1779		 * default_tg_pt_gp.
1780		 */
1781		spin_lock(&lun->lun_tg_pt_gp_lock);
1782		if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1783			__target_attach_tg_pt_gp(lun,
1784					dev->t10_alua.default_tg_pt_gp);
1785		} else
1786			lun->lun_tg_pt_gp = NULL;
1787		spin_unlock(&lun->lun_tg_pt_gp_lock);
1788
1789		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1790	}
1791	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1792
1793	kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1794}
1795
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1796static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1797		struct se_device *dev, const char *name)
 
1798{
1799	struct t10_alua_tg_pt_gp *tg_pt_gp;
1800	struct config_item *ci;
1801
1802	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1803	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1804			tg_pt_gp_list) {
1805		if (!tg_pt_gp->tg_pt_gp_valid_id)
1806			continue;
1807		ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1808		if (!strcmp(config_item_name(ci), name)) {
1809			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1810			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1811			return tg_pt_gp;
1812		}
1813	}
1814	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1815
1816	return NULL;
1817}
1818
1819static void core_alua_put_tg_pt_gp_from_name(
1820	struct t10_alua_tg_pt_gp *tg_pt_gp)
1821{
1822	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1823
1824	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1825	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1826	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1827}
1828
1829static void __target_attach_tg_pt_gp(struct se_lun *lun,
1830		struct t10_alua_tg_pt_gp *tg_pt_gp)
 
 
 
 
1831{
1832	struct se_dev_entry *se_deve;
1833
1834	assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1835
1836	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1837	lun->lun_tg_pt_gp = tg_pt_gp;
1838	list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
 
 
1839	tg_pt_gp->tg_pt_gp_members++;
1840	spin_lock(&lun->lun_deve_lock);
1841	list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
1842		core_scsi3_ua_allocate(se_deve, 0x3f,
1843				       ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
1844	spin_unlock(&lun->lun_deve_lock);
1845	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1846}
1847
1848void target_attach_tg_pt_gp(struct se_lun *lun,
1849		struct t10_alua_tg_pt_gp *tg_pt_gp)
 
 
 
 
1850{
1851	spin_lock(&lun->lun_tg_pt_gp_lock);
1852	__target_attach_tg_pt_gp(lun, tg_pt_gp);
1853	spin_unlock(&lun->lun_tg_pt_gp_lock);
1854}
1855
1856static void __target_detach_tg_pt_gp(struct se_lun *lun,
1857		struct t10_alua_tg_pt_gp *tg_pt_gp)
1858{
1859	assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1860
1861	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1862	list_del_init(&lun->lun_tg_pt_gp_link);
 
 
1863	tg_pt_gp->tg_pt_gp_members--;
1864	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1865
1866	lun->lun_tg_pt_gp = NULL;
1867}
1868
1869void target_detach_tg_pt_gp(struct se_lun *lun)
1870{
 
 
 
1871	struct t10_alua_tg_pt_gp *tg_pt_gp;
 
 
1872
1873	spin_lock(&lun->lun_tg_pt_gp_lock);
1874	tg_pt_gp = lun->lun_tg_pt_gp;
1875	if (tg_pt_gp)
1876		__target_detach_tg_pt_gp(lun, tg_pt_gp);
1877	spin_unlock(&lun->lun_tg_pt_gp_lock);
1878}
1879
1880ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1881{
1882	struct config_item *tg_pt_ci;
1883	struct t10_alua_tg_pt_gp *tg_pt_gp;
1884	ssize_t len = 0;
1885
1886	spin_lock(&lun->lun_tg_pt_gp_lock);
1887	tg_pt_gp = lun->lun_tg_pt_gp;
1888	if (tg_pt_gp) {
1889		tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1890		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1891			" %hu\nTG Port Primary Access State: %s\nTG Port "
1892			"Primary Access Status: %s\nTG Port Secondary Access"
1893			" State: %s\nTG Port Secondary Access Status: %s\n",
1894			config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1895			core_alua_dump_state(
1896				tg_pt_gp->tg_pt_gp_alua_access_state),
1897			core_alua_dump_status(
1898				tg_pt_gp->tg_pt_gp_alua_access_status),
1899			atomic_read(&lun->lun_tg_pt_secondary_offline) ?
1900			"Offline" : "None",
1901			core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
1902	}
1903	spin_unlock(&lun->lun_tg_pt_gp_lock);
1904
1905	return len;
1906}
1907
1908ssize_t core_alua_store_tg_pt_gp_info(
1909	struct se_lun *lun,
1910	const char *page,
1911	size_t count)
1912{
1913	struct se_portal_group *tpg = lun->lun_tpg;
1914	/*
1915	 * rcu_dereference_raw protected by se_lun->lun_group symlink
1916	 * reference to se_device->dev_group.
1917	 */
1918	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
1919	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
 
1920	unsigned char buf[TG_PT_GROUP_NAME_BUF];
1921	int move = 0;
1922
1923	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
1924	    (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
1925		return -ENODEV;
 
 
 
 
 
 
 
1926
1927	if (count > TG_PT_GROUP_NAME_BUF) {
1928		pr_err("ALUA Target Port Group alias too large!\n");
1929		return -EINVAL;
1930	}
1931	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1932	memcpy(buf, page, count);
1933	/*
1934	 * Any ALUA target port group alias besides "NULL" means we will be
1935	 * making a new group association.
1936	 */
1937	if (strcmp(strstrip(buf), "NULL")) {
1938		/*
1939		 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1940		 * struct t10_alua_tg_pt_gp.  This reference is released with
1941		 * core_alua_put_tg_pt_gp_from_name() below.
1942		 */
1943		tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
1944					strstrip(buf));
1945		if (!tg_pt_gp_new)
1946			return -ENODEV;
1947	}
 
 
 
 
 
 
 
1948
1949	spin_lock(&lun->lun_tg_pt_gp_lock);
1950	tg_pt_gp = lun->lun_tg_pt_gp;
1951	if (tg_pt_gp) {
1952		/*
1953		 * Clearing an existing tg_pt_gp association, and replacing
1954		 * with the default_tg_pt_gp.
1955		 */
1956		if (!tg_pt_gp_new) {
1957			pr_debug("Target_Core_ConfigFS: Moving"
1958				" %s/tpgt_%hu/%s from ALUA Target Port Group:"
1959				" alua/%s, ID: %hu back to"
1960				" default_tg_pt_gp\n",
1961				tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1962				tpg->se_tpg_tfo->tpg_get_tag(tpg),
1963				config_item_name(&lun->lun_group.cg_item),
1964				config_item_name(
1965					&tg_pt_gp->tg_pt_gp_group.cg_item),
1966				tg_pt_gp->tg_pt_gp_id);
1967
1968			__target_detach_tg_pt_gp(lun, tg_pt_gp);
1969			__target_attach_tg_pt_gp(lun,
1970					dev->t10_alua.default_tg_pt_gp);
1971			spin_unlock(&lun->lun_tg_pt_gp_lock);
1972
1973			return count;
1974		}
1975		__target_detach_tg_pt_gp(lun, tg_pt_gp);
 
 
 
1976		move = 1;
1977	}
1978
1979	__target_attach_tg_pt_gp(lun, tg_pt_gp_new);
1980	spin_unlock(&lun->lun_tg_pt_gp_lock);
 
 
1981	pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1982		" Target Port Group: alua/%s, ID: %hu\n", (move) ?
1983		"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1984		tpg->se_tpg_tfo->tpg_get_tag(tpg),
1985		config_item_name(&lun->lun_group.cg_item),
1986		config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1987		tg_pt_gp_new->tg_pt_gp_id);
1988
1989	core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1990	return count;
1991}
1992
1993ssize_t core_alua_show_access_type(
1994	struct t10_alua_tg_pt_gp *tg_pt_gp,
1995	char *page)
1996{
1997	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
1998	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
1999		return sprintf(page, "Implicit and Explicit\n");
2000	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
2001		return sprintf(page, "Implicit\n");
2002	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
2003		return sprintf(page, "Explicit\n");
2004	else
2005		return sprintf(page, "None\n");
2006}
2007
2008ssize_t core_alua_store_access_type(
2009	struct t10_alua_tg_pt_gp *tg_pt_gp,
2010	const char *page,
2011	size_t count)
2012{
2013	unsigned long tmp;
2014	int ret;
2015
2016	ret = kstrtoul(page, 0, &tmp);
2017	if (ret < 0) {
2018		pr_err("Unable to extract alua_access_type\n");
2019		return ret;
2020	}
2021	if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2022		pr_err("Illegal value for alua_access_type:"
2023				" %lu\n", tmp);
2024		return -EINVAL;
2025	}
2026	if (tmp == 3)
2027		tg_pt_gp->tg_pt_gp_alua_access_type =
2028			TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2029	else if (tmp == 2)
2030		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2031	else if (tmp == 1)
2032		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2033	else
2034		tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2035
2036	return count;
2037}
2038
2039ssize_t core_alua_show_nonop_delay_msecs(
2040	struct t10_alua_tg_pt_gp *tg_pt_gp,
2041	char *page)
2042{
2043	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2044}
2045
2046ssize_t core_alua_store_nonop_delay_msecs(
2047	struct t10_alua_tg_pt_gp *tg_pt_gp,
2048	const char *page,
2049	size_t count)
2050{
2051	unsigned long tmp;
2052	int ret;
2053
2054	ret = kstrtoul(page, 0, &tmp);
2055	if (ret < 0) {
2056		pr_err("Unable to extract nonop_delay_msecs\n");
2057		return ret;
2058	}
2059	if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2060		pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2061			" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2062			ALUA_MAX_NONOP_DELAY_MSECS);
2063		return -EINVAL;
2064	}
2065	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2066
2067	return count;
2068}
2069
2070ssize_t core_alua_show_trans_delay_msecs(
2071	struct t10_alua_tg_pt_gp *tg_pt_gp,
2072	char *page)
2073{
2074	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2075}
2076
2077ssize_t core_alua_store_trans_delay_msecs(
2078	struct t10_alua_tg_pt_gp *tg_pt_gp,
2079	const char *page,
2080	size_t count)
2081{
2082	unsigned long tmp;
2083	int ret;
2084
2085	ret = kstrtoul(page, 0, &tmp);
2086	if (ret < 0) {
2087		pr_err("Unable to extract trans_delay_msecs\n");
2088		return ret;
2089	}
2090	if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2091		pr_err("Passed trans_delay_msecs: %lu, exceeds"
2092			" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2093			ALUA_MAX_TRANS_DELAY_MSECS);
2094		return -EINVAL;
2095	}
2096	tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2097
2098	return count;
2099}
2100
2101ssize_t core_alua_show_implicit_trans_secs(
2102	struct t10_alua_tg_pt_gp *tg_pt_gp,
2103	char *page)
2104{
2105	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2106}
2107
2108ssize_t core_alua_store_implicit_trans_secs(
2109	struct t10_alua_tg_pt_gp *tg_pt_gp,
2110	const char *page,
2111	size_t count)
2112{
2113	unsigned long tmp;
2114	int ret;
2115
2116	ret = kstrtoul(page, 0, &tmp);
2117	if (ret < 0) {
2118		pr_err("Unable to extract implicit_trans_secs\n");
2119		return ret;
2120	}
2121	if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2122		pr_err("Passed implicit_trans_secs: %lu, exceeds"
2123			" ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2124			ALUA_MAX_IMPLICIT_TRANS_SECS);
2125		return  -EINVAL;
2126	}
2127	tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2128
2129	return count;
2130}
2131
2132ssize_t core_alua_show_preferred_bit(
2133	struct t10_alua_tg_pt_gp *tg_pt_gp,
2134	char *page)
2135{
2136	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2137}
2138
2139ssize_t core_alua_store_preferred_bit(
2140	struct t10_alua_tg_pt_gp *tg_pt_gp,
2141	const char *page,
2142	size_t count)
2143{
2144	unsigned long tmp;
2145	int ret;
2146
2147	ret = kstrtoul(page, 0, &tmp);
2148	if (ret < 0) {
2149		pr_err("Unable to extract preferred ALUA value\n");
2150		return ret;
2151	}
2152	if ((tmp != 0) && (tmp != 1)) {
2153		pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2154		return -EINVAL;
2155	}
2156	tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2157
2158	return count;
2159}
2160
2161ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2162{
 
 
 
2163	return sprintf(page, "%d\n",
2164		atomic_read(&lun->lun_tg_pt_secondary_offline));
2165}
2166
2167ssize_t core_alua_store_offline_bit(
2168	struct se_lun *lun,
2169	const char *page,
2170	size_t count)
2171{
2172	/*
2173	 * rcu_dereference_raw protected by se_lun->lun_group symlink
2174	 * reference to se_device->dev_group.
2175	 */
2176	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
2177	unsigned long tmp;
2178	int ret;
2179
2180	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
2181	    (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
2182		return -ENODEV;
2183
2184	ret = kstrtoul(page, 0, &tmp);
2185	if (ret < 0) {
2186		pr_err("Unable to extract alua_tg_pt_offline value\n");
2187		return ret;
2188	}
2189	if ((tmp != 0) && (tmp != 1)) {
2190		pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2191				tmp);
2192		return -EINVAL;
2193	}
 
 
 
 
 
2194
2195	ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
 
2196	if (ret < 0)
2197		return -EINVAL;
2198
2199	return count;
2200}
2201
2202ssize_t core_alua_show_secondary_status(
2203	struct se_lun *lun,
2204	char *page)
2205{
2206	return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
2207}
2208
2209ssize_t core_alua_store_secondary_status(
2210	struct se_lun *lun,
2211	const char *page,
2212	size_t count)
2213{
2214	unsigned long tmp;
2215	int ret;
2216
2217	ret = kstrtoul(page, 0, &tmp);
2218	if (ret < 0) {
2219		pr_err("Unable to extract alua_tg_pt_status\n");
2220		return ret;
2221	}
2222	if ((tmp != ALUA_STATUS_NONE) &&
2223	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2224	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2225		pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2226				tmp);
2227		return -EINVAL;
2228	}
2229	lun->lun_tg_pt_secondary_stat = (int)tmp;
2230
2231	return count;
2232}
2233
2234ssize_t core_alua_show_secondary_write_metadata(
2235	struct se_lun *lun,
2236	char *page)
2237{
2238	return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
 
2239}
2240
2241ssize_t core_alua_store_secondary_write_metadata(
2242	struct se_lun *lun,
2243	const char *page,
2244	size_t count)
2245{
2246	unsigned long tmp;
2247	int ret;
2248
2249	ret = kstrtoul(page, 0, &tmp);
2250	if (ret < 0) {
2251		pr_err("Unable to extract alua_tg_pt_write_md\n");
2252		return ret;
2253	}
2254	if ((tmp != 0) && (tmp != 1)) {
2255		pr_err("Illegal value for alua_tg_pt_write_md:"
2256				" %lu\n", tmp);
2257		return -EINVAL;
2258	}
2259	lun->lun_tg_pt_secondary_write_md = (int)tmp;
2260
2261	return count;
2262}
2263
2264int core_setup_alua(struct se_device *dev)
2265{
2266	if (!(dev->transport_flags &
2267	     TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
2268	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2269		struct t10_alua_lu_gp_member *lu_gp_mem;
2270
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2271		/*
2272		 * Associate this struct se_device with the default ALUA
2273		 * LUN Group.
2274		 */
2275		lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2276		if (IS_ERR(lu_gp_mem))
2277			return PTR_ERR(lu_gp_mem);
2278
 
 
2279		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2280		__core_alua_attach_lu_gp_mem(lu_gp_mem,
2281				default_lu_gp);
2282		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2283
2284		pr_debug("%s: Adding to default ALUA LU Group:"
2285			" core/alua/lu_gps/default_lu_gp\n",
2286			dev->transport->name);
 
 
 
 
 
2287	}
2288
2289	return 0;
2290}