Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename:  target_core_alua.c
   4 *
   5 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
   6 *
   7 * (c) Copyright 2009-2013 Datera, Inc.
   8 *
   9 * Nicholas A. Bellinger <nab@kernel.org>
  10 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  11 ******************************************************************************/
  12
  13#include <linux/slab.h>
  14#include <linux/spinlock.h>
  15#include <linux/configfs.h>
  16#include <linux/delay.h>
  17#include <linux/export.h>
  18#include <linux/fcntl.h>
  19#include <linux/file.h>
  20#include <linux/fs.h>
  21#include <scsi/scsi_proto.h>
  22#include <asm/unaligned.h>
  23
  24#include <target/target_core_base.h>
  25#include <target/target_core_backend.h>
  26#include <target/target_core_fabric.h>
  27
  28#include "target_core_internal.h"
  29#include "target_core_alua.h"
  30#include "target_core_ua.h"
  31
  32static sense_reason_t core_alua_check_transition(int state, int valid,
  33						 int *primary, int explicit);
  34static int core_alua_set_tg_pt_secondary_state(
  35		struct se_lun *lun, int explicit, int offline);
  36
  37static char *core_alua_dump_state(int state);
  38
  39static void __target_attach_tg_pt_gp(struct se_lun *lun,
  40		struct t10_alua_tg_pt_gp *tg_pt_gp);
  41
  42static u16 alua_lu_gps_counter;
  43static u32 alua_lu_gps_count;
  44
  45static DEFINE_SPINLOCK(lu_gps_lock);
  46static LIST_HEAD(lu_gps_list);
  47
  48struct t10_alua_lu_gp *default_lu_gp;
  49
  50/*
  51 * REPORT REFERRALS
  52 *
  53 * See sbc3r35 section 5.23
  54 */
  55sense_reason_t
  56target_emulate_report_referrals(struct se_cmd *cmd)
  57{
  58	struct se_device *dev = cmd->se_dev;
  59	struct t10_alua_lba_map *map;
  60	struct t10_alua_lba_map_member *map_mem;
  61	unsigned char *buf;
  62	u32 rd_len = 0, off;
  63
  64	if (cmd->data_length < 4) {
  65		pr_warn("REPORT REFERRALS allocation length %u too"
  66			" small\n", cmd->data_length);
  67		return TCM_INVALID_CDB_FIELD;
  68	}
  69
  70	buf = transport_kmap_data_sg(cmd);
  71	if (!buf)
  72		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  73
  74	off = 4;
  75	spin_lock(&dev->t10_alua.lba_map_lock);
  76	if (list_empty(&dev->t10_alua.lba_map_list)) {
  77		spin_unlock(&dev->t10_alua.lba_map_lock);
  78		transport_kunmap_data_sg(cmd);
  79
  80		return TCM_UNSUPPORTED_SCSI_OPCODE;
  81	}
  82
  83	list_for_each_entry(map, &dev->t10_alua.lba_map_list,
  84			    lba_map_list) {
  85		int desc_num = off + 3;
  86		int pg_num;
  87
  88		off += 4;
  89		if (cmd->data_length > off)
  90			put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
  91		off += 8;
  92		if (cmd->data_length > off)
  93			put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
  94		off += 8;
  95		rd_len += 20;
  96		pg_num = 0;
  97		list_for_each_entry(map_mem, &map->lba_map_mem_list,
  98				    lba_map_mem_list) {
  99			int alua_state = map_mem->lba_map_mem_alua_state;
 100			int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
 101
 102			if (cmd->data_length > off)
 103				buf[off] = alua_state & 0x0f;
 104			off += 2;
 105			if (cmd->data_length > off)
 106				buf[off] = (alua_pg_id >> 8) & 0xff;
 107			off++;
 108			if (cmd->data_length > off)
 109				buf[off] = (alua_pg_id & 0xff);
 110			off++;
 111			rd_len += 4;
 112			pg_num++;
 113		}
 114		if (cmd->data_length > desc_num)
 115			buf[desc_num] = pg_num;
 116	}
 117	spin_unlock(&dev->t10_alua.lba_map_lock);
 118
 119	/*
 120	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 121	 */
 122	put_unaligned_be16(rd_len, &buf[2]);
 123
 124	transport_kunmap_data_sg(cmd);
 125
 126	target_complete_cmd(cmd, GOOD);
 127	return 0;
 128}
 129
 130/*
 131 * REPORT_TARGET_PORT_GROUPS
 132 *
 133 * See spc4r17 section 6.27
 134 */
 135sense_reason_t
 136target_emulate_report_target_port_groups(struct se_cmd *cmd)
 137{
 138	struct se_device *dev = cmd->se_dev;
 139	struct t10_alua_tg_pt_gp *tg_pt_gp;
 140	struct se_lun *lun;
 141	unsigned char *buf;
 142	u32 rd_len = 0, off;
 143	int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
 144
 145	/*
 146	 * Skip over RESERVED area to first Target port group descriptor
 147	 * depending on the PARAMETER DATA FORMAT type..
 148	 */
 149	if (ext_hdr != 0)
 150		off = 8;
 151	else
 152		off = 4;
 153
 154	if (cmd->data_length < off) {
 155		pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
 156			" small for %s header\n", cmd->data_length,
 157			(ext_hdr) ? "extended" : "normal");
 158		return TCM_INVALID_CDB_FIELD;
 159	}
 160	buf = transport_kmap_data_sg(cmd);
 161	if (!buf)
 162		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 163
 164	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 165	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
 166			tg_pt_gp_list) {
 167		/*
 168		 * Check if the Target port group and Target port descriptor list
 169		 * based on tg_pt_gp_members count will fit into the response payload.
 170		 * Otherwise, bump rd_len to let the initiator know we have exceeded
 171		 * the allocation length and the response is truncated.
 172		 */
 173		if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
 174		     cmd->data_length) {
 175			rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
 176			continue;
 177		}
 178		/*
 179		 * PREF: Preferred target port bit, determine if this
 180		 * bit should be set for port group.
 181		 */
 182		if (tg_pt_gp->tg_pt_gp_pref)
 183			buf[off] = 0x80;
 184		/*
 185		 * Set the ASYMMETRIC ACCESS State
 186		 */
 187		buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
 188		/*
 189		 * Set supported ASYMMETRIC ACCESS State bits
 190		 */
 191		buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
 192		/*
 193		 * TARGET PORT GROUP
 194		 */
 195		put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]);
 196		off += 2;
 197
 198		off++; /* Skip over Reserved */
 199		/*
 200		 * STATUS CODE
 201		 */
 202		buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
 203		/*
 204		 * Vendor Specific field
 205		 */
 206		buf[off++] = 0x00;
 207		/*
 208		 * TARGET PORT COUNT
 209		 */
 210		buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
 211		rd_len += 8;
 212
 213		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 214		list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
 215				lun_tg_pt_gp_link) {
 216			/*
 217			 * Start Target Port descriptor format
 218			 *
 219			 * See spc4r17 section 6.2.7 Table 247
 220			 */
 221			off += 2; /* Skip over Obsolete */
 222			/*
 223			 * Set RELATIVE TARGET PORT IDENTIFIER
 224			 */
 225			put_unaligned_be16(lun->lun_rtpi, &buf[off]);
 226			off += 2;
 227			rd_len += 4;
 228		}
 229		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 230	}
 231	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 232	/*
 233	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 234	 */
 235	put_unaligned_be32(rd_len, &buf[0]);
 236
 237	/*
 238	 * Fill in the Extended header parameter data format if requested
 239	 */
 240	if (ext_hdr != 0) {
 241		buf[4] = 0x10;
 242		/*
 243		 * Set the implicit transition time (in seconds) for the application
 244		 * client to use as a base for it's transition timeout value.
 245		 *
 246		 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
 247		 * this CDB was received upon to determine this value individually
 248		 * for ALUA target port group.
 249		 */
 250		spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock);
 251		tg_pt_gp = cmd->se_lun->lun_tg_pt_gp;
 252		if (tg_pt_gp)
 253			buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
 254		spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock);
 255	}
 256	transport_kunmap_data_sg(cmd);
 257
 258	target_complete_cmd_with_length(cmd, GOOD, rd_len + 4);
 259	return 0;
 260}
 261
 262/*
 263 * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
 264 *
 265 * See spc4r17 section 6.35
 266 */
 267sense_reason_t
 268target_emulate_set_target_port_groups(struct se_cmd *cmd)
 269{
 270	struct se_device *dev = cmd->se_dev;
 271	struct se_lun *l_lun = cmd->se_lun;
 272	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
 273	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
 274	unsigned char *buf;
 275	unsigned char *ptr;
 276	sense_reason_t rc = TCM_NO_SENSE;
 277	u32 len = 4; /* Skip over RESERVED area in header */
 278	int alua_access_state, primary = 0, valid_states;
 279	u16 tg_pt_id, rtpi;
 280
 281	if (cmd->data_length < 4) {
 282		pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
 283			" small\n", cmd->data_length);
 284		return TCM_INVALID_PARAMETER_LIST;
 285	}
 286
 287	buf = transport_kmap_data_sg(cmd);
 288	if (!buf)
 289		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 290
 291	/*
 292	 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
 293	 * for the local tg_pt_gp.
 294	 */
 295	spin_lock(&l_lun->lun_tg_pt_gp_lock);
 296	l_tg_pt_gp = l_lun->lun_tg_pt_gp;
 297	if (!l_tg_pt_gp) {
 298		spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 299		pr_err("Unable to access l_lun->tg_pt_gp\n");
 300		rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 301		goto out;
 302	}
 303
 304	if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
 305		spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 306		pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
 307				" while TPGS_EXPLICIT_ALUA is disabled\n");
 308		rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 309		goto out;
 310	}
 311	valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
 312	spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 313
 314	ptr = &buf[4]; /* Skip over RESERVED area in header */
 315
 316	while (len < cmd->data_length) {
 317		bool found = false;
 318		alua_access_state = (ptr[0] & 0x0f);
 319		/*
 320		 * Check the received ALUA access state, and determine if
 321		 * the state is a primary or secondary target port asymmetric
 322		 * access state.
 323		 */
 324		rc = core_alua_check_transition(alua_access_state, valid_states,
 325						&primary, 1);
 326		if (rc) {
 327			/*
 328			 * If the SET TARGET PORT GROUPS attempts to establish
 329			 * an invalid combination of target port asymmetric
 330			 * access states or attempts to establish an
 331			 * unsupported target port asymmetric access state,
 332			 * then the command shall be terminated with CHECK
 333			 * CONDITION status, with the sense key set to ILLEGAL
 334			 * REQUEST, and the additional sense code set to INVALID
 335			 * FIELD IN PARAMETER LIST.
 336			 */
 337			goto out;
 338		}
 339
 340		/*
 341		 * If the ASYMMETRIC ACCESS STATE field (see table 267)
 342		 * specifies a primary target port asymmetric access state,
 343		 * then the TARGET PORT GROUP OR TARGET PORT field specifies
 344		 * a primary target port group for which the primary target
 345		 * port asymmetric access state shall be changed. If the
 346		 * ASYMMETRIC ACCESS STATE field specifies a secondary target
 347		 * port asymmetric access state, then the TARGET PORT GROUP OR
 348		 * TARGET PORT field specifies the relative target port
 349		 * identifier (see 3.1.120) of the target port for which the
 350		 * secondary target port asymmetric access state shall be
 351		 * changed.
 352		 */
 353		if (primary) {
 354			tg_pt_id = get_unaligned_be16(ptr + 2);
 355			/*
 356			 * Locate the matching target port group ID from
 357			 * the global tg_pt_gp list
 358			 */
 359			spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 360			list_for_each_entry(tg_pt_gp,
 361					&dev->t10_alua.tg_pt_gps_list,
 362					tg_pt_gp_list) {
 363				if (!tg_pt_gp->tg_pt_gp_valid_id)
 364					continue;
 365
 366				if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
 367					continue;
 368
 369				atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
 370
 371				spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 372
 373				if (!core_alua_do_port_transition(tg_pt_gp,
 374						dev, l_lun, nacl,
 375						alua_access_state, 1))
 376					found = true;
 377
 378				spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 379				atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
 380				break;
 381			}
 382			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 383		} else {
 384			struct se_lun *lun;
 385
 386			/*
 387			 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
 388			 * the Target Port in question for the the incoming
 389			 * SET_TARGET_PORT_GROUPS op.
 390			 */
 391			rtpi = get_unaligned_be16(ptr + 2);
 392			/*
 393			 * Locate the matching relative target port identifier
 394			 * for the struct se_device storage object.
 395			 */
 396			spin_lock(&dev->se_port_lock);
 397			list_for_each_entry(lun, &dev->dev_sep_list,
 398							lun_dev_link) {
 399				if (lun->lun_rtpi != rtpi)
 400					continue;
 401
 402				// XXX: racy unlock
 403				spin_unlock(&dev->se_port_lock);
 404
 405				if (!core_alua_set_tg_pt_secondary_state(
 406						lun, 1, 1))
 407					found = true;
 408
 409				spin_lock(&dev->se_port_lock);
 410				break;
 411			}
 412			spin_unlock(&dev->se_port_lock);
 413		}
 414
 415		if (!found) {
 416			rc = TCM_INVALID_PARAMETER_LIST;
 417			goto out;
 418		}
 419
 420		ptr += 4;
 421		len += 4;
 422	}
 423
 424out:
 425	transport_kunmap_data_sg(cmd);
 426	if (!rc)
 427		target_complete_cmd(cmd, GOOD);
 428	return rc;
 429}
 430
 431static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
 432{
 433	/*
 434	 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
 435	 * The ALUA additional sense code qualifier (ASCQ) is determined
 436	 * by the ALUA primary or secondary access state..
 437	 */
 438	pr_debug("[%s]: ALUA TG Port not available, "
 439		"SenseKey: NOT_READY, ASC/ASCQ: "
 440		"0x04/0x%02x\n",
 441		cmd->se_tfo->fabric_name, alua_ascq);
 442
 443	cmd->scsi_asc = 0x04;
 444	cmd->scsi_ascq = alua_ascq;
 445}
 446
 447static inline void core_alua_state_nonoptimized(
 448	struct se_cmd *cmd,
 449	unsigned char *cdb,
 450	int nonop_delay_msecs)
 451{
 452	/*
 453	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
 454	 * later to determine if processing of this cmd needs to be
 455	 * temporarily delayed for the Active/NonOptimized primary access state.
 456	 */
 457	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
 458	cmd->alua_nonop_delay = nonop_delay_msecs;
 459}
 460
 461static inline int core_alua_state_lba_dependent(
 462	struct se_cmd *cmd,
 463	struct t10_alua_tg_pt_gp *tg_pt_gp)
 464{
 465	struct se_device *dev = cmd->se_dev;
 466	u64 segment_size, segment_mult, sectors, lba;
 467
 468	/* Only need to check for cdb actually containing LBAs */
 469	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
 470		return 0;
 471
 472	spin_lock(&dev->t10_alua.lba_map_lock);
 473	segment_size = dev->t10_alua.lba_map_segment_size;
 474	segment_mult = dev->t10_alua.lba_map_segment_multiplier;
 475	sectors = cmd->data_length / dev->dev_attrib.block_size;
 476
 477	lba = cmd->t_task_lba;
 478	while (lba < cmd->t_task_lba + sectors) {
 479		struct t10_alua_lba_map *cur_map = NULL, *map;
 480		struct t10_alua_lba_map_member *map_mem;
 481
 482		list_for_each_entry(map, &dev->t10_alua.lba_map_list,
 483				    lba_map_list) {
 484			u64 start_lba, last_lba;
 485			u64 first_lba = map->lba_map_first_lba;
 486
 487			if (segment_mult) {
 488				u64 tmp = lba;
 489				start_lba = do_div(tmp, segment_size * segment_mult);
 490
 491				last_lba = first_lba + segment_size - 1;
 492				if (start_lba >= first_lba &&
 493				    start_lba <= last_lba) {
 494					lba += segment_size;
 495					cur_map = map;
 496					break;
 497				}
 498			} else {
 499				last_lba = map->lba_map_last_lba;
 500				if (lba >= first_lba && lba <= last_lba) {
 501					lba = last_lba + 1;
 502					cur_map = map;
 503					break;
 504				}
 505			}
 506		}
 507		if (!cur_map) {
 508			spin_unlock(&dev->t10_alua.lba_map_lock);
 509			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 510			return 1;
 511		}
 512		list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
 513				    lba_map_mem_list) {
 514			if (map_mem->lba_map_mem_alua_pg_id !=
 515			    tg_pt_gp->tg_pt_gp_id)
 516				continue;
 517			switch(map_mem->lba_map_mem_alua_state) {
 518			case ALUA_ACCESS_STATE_STANDBY:
 519				spin_unlock(&dev->t10_alua.lba_map_lock);
 520				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 521				return 1;
 522			case ALUA_ACCESS_STATE_UNAVAILABLE:
 523				spin_unlock(&dev->t10_alua.lba_map_lock);
 524				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 525				return 1;
 526			default:
 527				break;
 528			}
 529		}
 530	}
 531	spin_unlock(&dev->t10_alua.lba_map_lock);
 532	return 0;
 533}
 534
 535static inline int core_alua_state_standby(
 536	struct se_cmd *cmd,
 537	unsigned char *cdb)
 538{
 539	/*
 540	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
 541	 * spc4r17 section 5.9.2.4.4
 542	 */
 543	switch (cdb[0]) {
 544	case INQUIRY:
 545	case LOG_SELECT:
 546	case LOG_SENSE:
 547	case MODE_SELECT:
 548	case MODE_SENSE:
 549	case REPORT_LUNS:
 550	case RECEIVE_DIAGNOSTIC:
 551	case SEND_DIAGNOSTIC:
 552	case READ_CAPACITY:
 553		return 0;
 554	case SERVICE_ACTION_IN_16:
 555		switch (cdb[1] & 0x1f) {
 556		case SAI_READ_CAPACITY_16:
 557			return 0;
 558		default:
 559			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 560			return 1;
 561		}
 562	case MAINTENANCE_IN:
 563		switch (cdb[1] & 0x1f) {
 564		case MI_REPORT_TARGET_PGS:
 565			return 0;
 566		default:
 567			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 568			return 1;
 569		}
 570	case MAINTENANCE_OUT:
 571		switch (cdb[1]) {
 572		case MO_SET_TARGET_PGS:
 573			return 0;
 574		default:
 575			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 576			return 1;
 577		}
 578	case REQUEST_SENSE:
 579	case PERSISTENT_RESERVE_IN:
 580	case PERSISTENT_RESERVE_OUT:
 581	case READ_BUFFER:
 582	case WRITE_BUFFER:
 583		return 0;
 584	default:
 585		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 586		return 1;
 587	}
 588
 589	return 0;
 590}
 591
 592static inline int core_alua_state_unavailable(
 593	struct se_cmd *cmd,
 594	unsigned char *cdb)
 595{
 596	/*
 597	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
 598	 * spc4r17 section 5.9.2.4.5
 599	 */
 600	switch (cdb[0]) {
 601	case INQUIRY:
 602	case REPORT_LUNS:
 603		return 0;
 604	case MAINTENANCE_IN:
 605		switch (cdb[1] & 0x1f) {
 606		case MI_REPORT_TARGET_PGS:
 607			return 0;
 608		default:
 609			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 610			return 1;
 611		}
 612	case MAINTENANCE_OUT:
 613		switch (cdb[1]) {
 614		case MO_SET_TARGET_PGS:
 615			return 0;
 616		default:
 617			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 618			return 1;
 619		}
 620	case REQUEST_SENSE:
 621	case READ_BUFFER:
 622	case WRITE_BUFFER:
 623		return 0;
 624	default:
 625		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 626		return 1;
 627	}
 628
 629	return 0;
 630}
 631
 632static inline int core_alua_state_transition(
 633	struct se_cmd *cmd,
 634	unsigned char *cdb)
 635{
 636	/*
 637	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
 638	 * spc4r17 section 5.9.2.5
 639	 */
 640	switch (cdb[0]) {
 641	case INQUIRY:
 642	case REPORT_LUNS:
 643		return 0;
 644	case MAINTENANCE_IN:
 645		switch (cdb[1] & 0x1f) {
 646		case MI_REPORT_TARGET_PGS:
 647			return 0;
 648		default:
 649			set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 650			return 1;
 651		}
 652	case REQUEST_SENSE:
 653	case READ_BUFFER:
 654	case WRITE_BUFFER:
 655		return 0;
 656	default:
 657		set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 658		return 1;
 659	}
 660
 661	return 0;
 662}
 663
 664/*
 665 * return 1: Is used to signal LUN not accessible, and check condition/not ready
 666 * return 0: Used to signal success
 667 * return -1: Used to signal failure, and invalid cdb field
 668 */
 669sense_reason_t
 670target_alua_state_check(struct se_cmd *cmd)
 671{
 672	struct se_device *dev = cmd->se_dev;
 673	unsigned char *cdb = cmd->t_task_cdb;
 674	struct se_lun *lun = cmd->se_lun;
 675	struct t10_alua_tg_pt_gp *tg_pt_gp;
 676	int out_alua_state, nonop_delay_msecs;
 677
 678	if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
 679		return 0;
 680	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
 681		return 0;
 682
 683	/*
 684	 * First, check for a struct se_port specific secondary ALUA target port
 685	 * access state: OFFLINE
 686	 */
 687	if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
 688		pr_debug("ALUA: Got secondary offline status for local"
 689				" target port\n");
 690		set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
 691		return TCM_CHECK_CONDITION_NOT_READY;
 692	}
 693
 694	if (!lun->lun_tg_pt_gp)
 695		return 0;
 696
 697	spin_lock(&lun->lun_tg_pt_gp_lock);
 698	tg_pt_gp = lun->lun_tg_pt_gp;
 699	out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
 700	nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
 701
 702	// XXX: keeps using tg_pt_gp witout reference after unlock
 703	spin_unlock(&lun->lun_tg_pt_gp_lock);
 704	/*
 705	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
 706	 * statement so the compiler knows explicitly to check this case first.
 707	 * For the Optimized ALUA access state case, we want to process the
 708	 * incoming fabric cmd ASAP..
 709	 */
 710	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
 711		return 0;
 712
 713	switch (out_alua_state) {
 714	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 715		core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
 716		break;
 717	case ALUA_ACCESS_STATE_STANDBY:
 718		if (core_alua_state_standby(cmd, cdb))
 719			return TCM_CHECK_CONDITION_NOT_READY;
 720		break;
 721	case ALUA_ACCESS_STATE_UNAVAILABLE:
 722		if (core_alua_state_unavailable(cmd, cdb))
 723			return TCM_CHECK_CONDITION_NOT_READY;
 724		break;
 725	case ALUA_ACCESS_STATE_TRANSITION:
 726		if (core_alua_state_transition(cmd, cdb))
 727			return TCM_CHECK_CONDITION_NOT_READY;
 728		break;
 729	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 730		if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
 731			return TCM_CHECK_CONDITION_NOT_READY;
 732		break;
 733	/*
 734	 * OFFLINE is a secondary ALUA target port group access state, that is
 735	 * handled above with struct se_lun->lun_tg_pt_secondary_offline=1
 736	 */
 737	case ALUA_ACCESS_STATE_OFFLINE:
 738	default:
 739		pr_err("Unknown ALUA access state: 0x%02x\n",
 740				out_alua_state);
 741		return TCM_INVALID_CDB_FIELD;
 742	}
 743
 744	return 0;
 745}
 746
 747/*
 748 * Check implicit and explicit ALUA state change request.
 749 */
 750static sense_reason_t
 751core_alua_check_transition(int state, int valid, int *primary, int explicit)
 752{
 753	/*
 754	 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
 755	 * defined as primary target port asymmetric access states.
 756	 */
 757	switch (state) {
 758	case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 759		if (!(valid & ALUA_AO_SUP))
 760			goto not_supported;
 761		*primary = 1;
 762		break;
 763	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 764		if (!(valid & ALUA_AN_SUP))
 765			goto not_supported;
 766		*primary = 1;
 767		break;
 768	case ALUA_ACCESS_STATE_STANDBY:
 769		if (!(valid & ALUA_S_SUP))
 770			goto not_supported;
 771		*primary = 1;
 772		break;
 773	case ALUA_ACCESS_STATE_UNAVAILABLE:
 774		if (!(valid & ALUA_U_SUP))
 775			goto not_supported;
 776		*primary = 1;
 777		break;
 778	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 779		if (!(valid & ALUA_LBD_SUP))
 780			goto not_supported;
 781		*primary = 1;
 782		break;
 783	case ALUA_ACCESS_STATE_OFFLINE:
 784		/*
 785		 * OFFLINE state is defined as a secondary target port
 786		 * asymmetric access state.
 787		 */
 788		if (!(valid & ALUA_O_SUP))
 789			goto not_supported;
 790		*primary = 0;
 791		break;
 792	case ALUA_ACCESS_STATE_TRANSITION:
 793		if (!(valid & ALUA_T_SUP) || explicit)
 794			/*
 795			 * Transitioning is set internally and by tcmu daemon,
 796			 * and cannot be selected through a STPG.
 797			 */
 798			goto not_supported;
 799		*primary = 0;
 800		break;
 801	default:
 802		pr_err("Unknown ALUA access state: 0x%02x\n", state);
 803		return TCM_INVALID_PARAMETER_LIST;
 804	}
 805
 806	return 0;
 807
 808not_supported:
 809	pr_err("ALUA access state %s not supported",
 810	       core_alua_dump_state(state));
 811	return TCM_INVALID_PARAMETER_LIST;
 812}
 813
 814static char *core_alua_dump_state(int state)
 815{
 816	switch (state) {
 817	case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 818		return "Active/Optimized";
 819	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 820		return "Active/NonOptimized";
 821	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 822		return "LBA Dependent";
 823	case ALUA_ACCESS_STATE_STANDBY:
 824		return "Standby";
 825	case ALUA_ACCESS_STATE_UNAVAILABLE:
 826		return "Unavailable";
 827	case ALUA_ACCESS_STATE_OFFLINE:
 828		return "Offline";
 829	case ALUA_ACCESS_STATE_TRANSITION:
 830		return "Transitioning";
 831	default:
 832		return "Unknown";
 833	}
 834
 835	return NULL;
 836}
 837
 838char *core_alua_dump_status(int status)
 839{
 840	switch (status) {
 841	case ALUA_STATUS_NONE:
 842		return "None";
 843	case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
 844		return "Altered by Explicit STPG";
 845	case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
 846		return "Altered by Implicit ALUA";
 847	default:
 848		return "Unknown";
 849	}
 850
 851	return NULL;
 852}
 853
 854/*
 855 * Used by fabric modules to determine when we need to delay processing
 856 * for the Active/NonOptimized paths..
 857 */
 858int core_alua_check_nonop_delay(
 859	struct se_cmd *cmd)
 860{
 861	if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
 862		return 0;
 863	if (in_interrupt())
 864		return 0;
 865	/*
 866	 * The ALUA Active/NonOptimized access state delay can be disabled
 867	 * in via configfs with a value of zero
 868	 */
 869	if (!cmd->alua_nonop_delay)
 870		return 0;
 871	/*
 872	 * struct se_cmd->alua_nonop_delay gets set by a target port group
 873	 * defined interval in core_alua_state_nonoptimized()
 874	 */
 875	msleep_interruptible(cmd->alua_nonop_delay);
 876	return 0;
 877}
 878EXPORT_SYMBOL(core_alua_check_nonop_delay);
 879
 880static int core_alua_write_tpg_metadata(
 881	const char *path,
 882	unsigned char *md_buf,
 883	u32 md_buf_len)
 884{
 885	struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
 886	loff_t pos = 0;
 887	int ret;
 888
 889	if (IS_ERR(file)) {
 890		pr_err("filp_open(%s) for ALUA metadata failed\n", path);
 891		return -ENODEV;
 892	}
 893	ret = kernel_write(file, md_buf, md_buf_len, &pos);
 894	if (ret < 0)
 895		pr_err("Error writing ALUA metadata file: %s\n", path);
 896	fput(file);
 897	return (ret < 0) ? -EIO : 0;
 898}
 899
 
 
 
 900static int core_alua_update_tpg_primary_metadata(
 901	struct t10_alua_tg_pt_gp *tg_pt_gp)
 902{
 903	unsigned char *md_buf;
 904	struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
 905	char *path;
 906	int len, rc;
 907
 908	lockdep_assert_held(&tg_pt_gp->tg_pt_gp_transition_mutex);
 909
 910	md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
 911	if (!md_buf) {
 912		pr_err("Unable to allocate buf for ALUA metadata\n");
 913		return -ENOMEM;
 914	}
 915
 916	len = snprintf(md_buf, ALUA_MD_BUF_LEN,
 917			"tg_pt_gp_id=%hu\n"
 918			"alua_access_state=0x%02x\n"
 919			"alua_access_status=0x%02x\n",
 920			tg_pt_gp->tg_pt_gp_id,
 921			tg_pt_gp->tg_pt_gp_alua_access_state,
 922			tg_pt_gp->tg_pt_gp_alua_access_status);
 923
 924	rc = -ENOMEM;
 925	path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
 926			&wwn->unit_serial[0],
 927			config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
 928	if (path) {
 929		rc = core_alua_write_tpg_metadata(path, md_buf, len);
 930		kfree(path);
 931	}
 932	kfree(md_buf);
 933	return rc;
 934}
 935
 936static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
 937{
 938	struct se_dev_entry *se_deve;
 939	struct se_lun *lun;
 940	struct se_lun_acl *lacl;
 941
 942	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 943	list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
 944				lun_tg_pt_gp_link) {
 945		/*
 946		 * After an implicit target port asymmetric access state
 947		 * change, a device server shall establish a unit attention
 948		 * condition for the initiator port associated with every I_T
 949		 * nexus with the additional sense code set to ASYMMETRIC
 950		 * ACCESS STATE CHANGED.
 951		 *
 952		 * After an explicit target port asymmetric access state
 953		 * change, a device server shall establish a unit attention
 954		 * condition with the additional sense code set to ASYMMETRIC
 955		 * ACCESS STATE CHANGED for the initiator port associated with
 956		 * every I_T nexus other than the I_T nexus on which the SET
 957		 * TARGET PORT GROUPS command
 958		 */
 959		if (!percpu_ref_tryget_live(&lun->lun_ref))
 960			continue;
 961		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 962
 963		spin_lock(&lun->lun_deve_lock);
 964		list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
 965			lacl = rcu_dereference_check(se_deve->se_lun_acl,
 966					lockdep_is_held(&lun->lun_deve_lock));
 967
 968			/*
 969			 * spc4r37 p.242:
 970			 * After an explicit target port asymmetric access
 971			 * state change, a device server shall establish a
 972			 * unit attention condition with the additional sense
 973			 * code set to ASYMMETRIC ACCESS STATE CHANGED for
 974			 * the initiator port associated with every I_T nexus
 975			 * other than the I_T nexus on which the SET TARGET
 976			 * PORT GROUPS command was received.
 977			 */
 978			if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
 979			     ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
 980			   (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
 981			    (tg_pt_gp->tg_pt_gp_alua_lun == lun))
 982				continue;
 983
 984			/*
 985			 * se_deve->se_lun_acl pointer may be NULL for a
 986			 * entry created without explicit Node+MappedLUN ACLs
 987			 */
 988			if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
 989			    (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
 990				continue;
 991
 992			core_scsi3_ua_allocate(se_deve, 0x2A,
 993				ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
 994		}
 995		spin_unlock(&lun->lun_deve_lock);
 996
 997		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 998		percpu_ref_put(&lun->lun_ref);
 999	}
1000	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1001}
1002
1003static int core_alua_do_transition_tg_pt(
1004	struct t10_alua_tg_pt_gp *tg_pt_gp,
1005	int new_state,
1006	int explicit)
1007{
1008	int prev_state;
1009
1010	mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1011	/* Nothing to be done here */
1012	if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
1013		mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1014		return 0;
1015	}
1016
1017	if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
1018		mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1019		return -EAGAIN;
1020	}
1021
1022	/*
1023	 * Save the old primary ALUA access state, and set the current state
1024	 * to ALUA_ACCESS_STATE_TRANSITION.
1025	 */
1026	prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
1027	tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
1028	tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1029				ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1030				ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1031
1032	core_alua_queue_state_change_ua(tg_pt_gp);
1033
1034	if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
1035		mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1036		return 0;
1037	}
1038
1039	/*
1040	 * Check for the optional ALUA primary state transition delay
1041	 */
1042	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1043		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1044
1045	/*
1046	 * Set the current primary ALUA access state to the requested new state
1047	 */
1048	tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
1049
1050	/*
1051	 * Update the ALUA metadata buf that has been allocated in
1052	 * core_alua_do_port_transition(), this metadata will be written
1053	 * to struct file.
1054	 *
1055	 * Note that there is the case where we do not want to update the
1056	 * metadata when the saved metadata is being parsed in userspace
1057	 * when setting the existing port access state and access status.
1058	 *
1059	 * Also note that the failure to write out the ALUA metadata to
1060	 * struct file does NOT affect the actual ALUA transition.
1061	 */
1062	if (tg_pt_gp->tg_pt_gp_write_metadata) {
1063		core_alua_update_tpg_primary_metadata(tg_pt_gp);
1064	}
1065
1066	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1067		" from primary access state %s to %s\n", (explicit) ? "explicit" :
1068		"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1069		tg_pt_gp->tg_pt_gp_id,
1070		core_alua_dump_state(prev_state),
1071		core_alua_dump_state(new_state));
1072
1073	core_alua_queue_state_change_ua(tg_pt_gp);
1074
1075	mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1076	return 0;
1077}
1078
1079int core_alua_do_port_transition(
1080	struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1081	struct se_device *l_dev,
1082	struct se_lun *l_lun,
1083	struct se_node_acl *l_nacl,
1084	int new_state,
1085	int explicit)
1086{
1087	struct se_device *dev;
1088	struct t10_alua_lu_gp *lu_gp;
1089	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1090	struct t10_alua_tg_pt_gp *tg_pt_gp;
1091	int primary, valid_states, rc = 0;
1092
1093	if (l_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
1094		return -ENODEV;
1095
1096	valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1097	if (core_alua_check_transition(new_state, valid_states, &primary,
1098				       explicit) != 0)
1099		return -EINVAL;
1100
1101	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1102	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1103	lu_gp = local_lu_gp_mem->lu_gp;
1104	atomic_inc(&lu_gp->lu_gp_ref_cnt);
1105	spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1106	/*
1107	 * For storage objects that are members of the 'default_lu_gp',
1108	 * we only do transition on the passed *l_tp_pt_gp, and not
1109	 * on all of the matching target port groups IDs in default_lu_gp.
1110	 */
1111	if (!lu_gp->lu_gp_id) {
1112		/*
1113		 * core_alua_do_transition_tg_pt() will always return
1114		 * success.
1115		 */
1116		l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1117		l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1118		rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1119						   new_state, explicit);
1120		atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1121		return rc;
1122	}
1123	/*
1124	 * For all other LU groups aside from 'default_lu_gp', walk all of
1125	 * the associated storage objects looking for a matching target port
1126	 * group ID from the local target port group.
1127	 */
1128	spin_lock(&lu_gp->lu_gp_lock);
1129	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1130				lu_gp_mem_list) {
1131
1132		dev = lu_gp_mem->lu_gp_mem_dev;
1133		atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1134		spin_unlock(&lu_gp->lu_gp_lock);
1135
1136		spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1137		list_for_each_entry(tg_pt_gp,
1138				&dev->t10_alua.tg_pt_gps_list,
1139				tg_pt_gp_list) {
1140
1141			if (!tg_pt_gp->tg_pt_gp_valid_id)
1142				continue;
1143			/*
1144			 * If the target behavior port asymmetric access state
1145			 * is changed for any target port group accessible via
1146			 * a logical unit within a LU group, the target port
1147			 * behavior group asymmetric access states for the same
1148			 * target port group accessible via other logical units
1149			 * in that LU group will also change.
1150			 */
1151			if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1152				continue;
1153
1154			if (l_tg_pt_gp == tg_pt_gp) {
1155				tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1156				tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1157			} else {
1158				tg_pt_gp->tg_pt_gp_alua_lun = NULL;
1159				tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1160			}
1161			atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1162			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1163			/*
1164			 * core_alua_do_transition_tg_pt() will always return
1165			 * success.
1166			 */
1167			rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1168					new_state, explicit);
1169
1170			spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1171			atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1172			if (rc)
1173				break;
1174		}
1175		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1176
1177		spin_lock(&lu_gp->lu_gp_lock);
1178		atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1179	}
1180	spin_unlock(&lu_gp->lu_gp_lock);
1181
1182	if (!rc) {
1183		pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1184			 " Group IDs: %hu %s transition to primary state: %s\n",
1185			 config_item_name(&lu_gp->lu_gp_group.cg_item),
1186			 l_tg_pt_gp->tg_pt_gp_id,
1187			 (explicit) ? "explicit" : "implicit",
1188			 core_alua_dump_state(new_state));
1189	}
1190
1191	atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1192	return rc;
1193}
1194
1195static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1196{
1197	struct se_portal_group *se_tpg = lun->lun_tpg;
1198	unsigned char *md_buf;
1199	char *path;
1200	int len, rc;
1201
1202	mutex_lock(&lun->lun_tg_pt_md_mutex);
1203
1204	md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1205	if (!md_buf) {
1206		pr_err("Unable to allocate buf for ALUA metadata\n");
1207		rc = -ENOMEM;
1208		goto out_unlock;
1209	}
1210
1211	len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1212			"alua_tg_pt_status=0x%02x\n",
1213			atomic_read(&lun->lun_tg_pt_secondary_offline),
1214			lun->lun_tg_pt_secondary_stat);
1215
1216	if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
1217		path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
1218				db_root, se_tpg->se_tpg_tfo->fabric_name,
1219				se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1220				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
1221				lun->unpacked_lun);
1222	} else {
1223		path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
1224				db_root, se_tpg->se_tpg_tfo->fabric_name,
1225				se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1226				lun->unpacked_lun);
1227	}
1228	if (!path) {
1229		rc = -ENOMEM;
1230		goto out_free;
1231	}
1232
1233	rc = core_alua_write_tpg_metadata(path, md_buf, len);
1234	kfree(path);
1235out_free:
1236	kfree(md_buf);
1237out_unlock:
1238	mutex_unlock(&lun->lun_tg_pt_md_mutex);
1239	return rc;
1240}
1241
1242static int core_alua_set_tg_pt_secondary_state(
1243	struct se_lun *lun,
1244	int explicit,
1245	int offline)
1246{
1247	struct t10_alua_tg_pt_gp *tg_pt_gp;
1248	int trans_delay_msecs;
1249
1250	spin_lock(&lun->lun_tg_pt_gp_lock);
1251	tg_pt_gp = lun->lun_tg_pt_gp;
1252	if (!tg_pt_gp) {
1253		spin_unlock(&lun->lun_tg_pt_gp_lock);
1254		pr_err("Unable to complete secondary state"
1255				" transition\n");
1256		return -EINVAL;
1257	}
1258	trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1259	/*
1260	 * Set the secondary ALUA target port access state to OFFLINE
1261	 * or release the previously secondary state for struct se_lun
1262	 */
1263	if (offline)
1264		atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
1265	else
1266		atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
1267
1268	lun->lun_tg_pt_secondary_stat = (explicit) ?
1269			ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1270			ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1271
1272	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1273		" to secondary access state: %s\n", (explicit) ? "explicit" :
1274		"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1275		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1276
1277	spin_unlock(&lun->lun_tg_pt_gp_lock);
1278	/*
1279	 * Do the optional transition delay after we set the secondary
1280	 * ALUA access state.
1281	 */
1282	if (trans_delay_msecs != 0)
1283		msleep_interruptible(trans_delay_msecs);
1284	/*
1285	 * See if we need to update the ALUA fabric port metadata for
1286	 * secondary state and status
1287	 */
1288	if (lun->lun_tg_pt_secondary_write_md)
1289		core_alua_update_tpg_secondary_metadata(lun);
1290
1291	return 0;
1292}
1293
1294struct t10_alua_lba_map *
1295core_alua_allocate_lba_map(struct list_head *list,
1296			   u64 first_lba, u64 last_lba)
1297{
1298	struct t10_alua_lba_map *lba_map;
1299
1300	lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1301	if (!lba_map) {
1302		pr_err("Unable to allocate struct t10_alua_lba_map\n");
1303		return ERR_PTR(-ENOMEM);
1304	}
1305	INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1306	lba_map->lba_map_first_lba = first_lba;
1307	lba_map->lba_map_last_lba = last_lba;
1308
1309	list_add_tail(&lba_map->lba_map_list, list);
1310	return lba_map;
1311}
1312
1313int
1314core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1315			       int pg_id, int state)
1316{
1317	struct t10_alua_lba_map_member *lba_map_mem;
1318
1319	list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1320			    lba_map_mem_list) {
1321		if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1322			pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1323			return -EINVAL;
1324		}
1325	}
1326
1327	lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1328	if (!lba_map_mem) {
1329		pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1330		return -ENOMEM;
1331	}
1332	lba_map_mem->lba_map_mem_alua_state = state;
1333	lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1334
1335	list_add_tail(&lba_map_mem->lba_map_mem_list,
1336		      &lba_map->lba_map_mem_list);
1337	return 0;
1338}
1339
1340void
1341core_alua_free_lba_map(struct list_head *lba_list)
1342{
1343	struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1344	struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1345
1346	list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1347				 lba_map_list) {
1348		list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1349					 &lba_map->lba_map_mem_list,
1350					 lba_map_mem_list) {
1351			list_del(&lba_map_mem->lba_map_mem_list);
1352			kmem_cache_free(t10_alua_lba_map_mem_cache,
1353					lba_map_mem);
1354		}
1355		list_del(&lba_map->lba_map_list);
1356		kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1357	}
1358}
1359
1360void
1361core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1362		      int segment_size, int segment_mult)
1363{
1364	struct list_head old_lba_map_list;
1365	struct t10_alua_tg_pt_gp *tg_pt_gp;
1366	int activate = 0, supported;
1367
1368	INIT_LIST_HEAD(&old_lba_map_list);
1369	spin_lock(&dev->t10_alua.lba_map_lock);
1370	dev->t10_alua.lba_map_segment_size = segment_size;
1371	dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1372	list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1373	if (lba_map_list) {
1374		list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1375		activate = 1;
1376	}
1377	spin_unlock(&dev->t10_alua.lba_map_lock);
1378	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1379	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1380			    tg_pt_gp_list) {
1381
1382		if (!tg_pt_gp->tg_pt_gp_valid_id)
1383			continue;
1384		supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1385		if (activate)
1386			supported |= ALUA_LBD_SUP;
1387		else
1388			supported &= ~ALUA_LBD_SUP;
1389		tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1390	}
1391	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1392	core_alua_free_lba_map(&old_lba_map_list);
1393}
1394
1395struct t10_alua_lu_gp *
1396core_alua_allocate_lu_gp(const char *name, int def_group)
1397{
1398	struct t10_alua_lu_gp *lu_gp;
1399
1400	lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1401	if (!lu_gp) {
1402		pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1403		return ERR_PTR(-ENOMEM);
1404	}
1405	INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1406	INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1407	spin_lock_init(&lu_gp->lu_gp_lock);
1408	atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1409
1410	if (def_group) {
1411		lu_gp->lu_gp_id = alua_lu_gps_counter++;
1412		lu_gp->lu_gp_valid_id = 1;
1413		alua_lu_gps_count++;
1414	}
1415
1416	return lu_gp;
1417}
1418
1419int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1420{
1421	struct t10_alua_lu_gp *lu_gp_tmp;
1422	u16 lu_gp_id_tmp;
1423	/*
1424	 * The lu_gp->lu_gp_id may only be set once..
1425	 */
1426	if (lu_gp->lu_gp_valid_id) {
1427		pr_warn("ALUA LU Group already has a valid ID,"
1428			" ignoring request\n");
1429		return -EINVAL;
1430	}
1431
1432	spin_lock(&lu_gps_lock);
1433	if (alua_lu_gps_count == 0x0000ffff) {
1434		pr_err("Maximum ALUA alua_lu_gps_count:"
1435				" 0x0000ffff reached\n");
1436		spin_unlock(&lu_gps_lock);
1437		kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1438		return -ENOSPC;
1439	}
1440again:
1441	lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1442				alua_lu_gps_counter++;
1443
1444	list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1445		if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1446			if (!lu_gp_id)
1447				goto again;
1448
1449			pr_warn("ALUA Logical Unit Group ID: %hu"
1450				" already exists, ignoring request\n",
1451				lu_gp_id);
1452			spin_unlock(&lu_gps_lock);
1453			return -EINVAL;
1454		}
1455	}
1456
1457	lu_gp->lu_gp_id = lu_gp_id_tmp;
1458	lu_gp->lu_gp_valid_id = 1;
1459	list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1460	alua_lu_gps_count++;
1461	spin_unlock(&lu_gps_lock);
1462
1463	return 0;
1464}
1465
1466static struct t10_alua_lu_gp_member *
1467core_alua_allocate_lu_gp_mem(struct se_device *dev)
1468{
1469	struct t10_alua_lu_gp_member *lu_gp_mem;
1470
1471	lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1472	if (!lu_gp_mem) {
1473		pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1474		return ERR_PTR(-ENOMEM);
1475	}
1476	INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1477	spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1478	atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1479
1480	lu_gp_mem->lu_gp_mem_dev = dev;
1481	dev->dev_alua_lu_gp_mem = lu_gp_mem;
1482
1483	return lu_gp_mem;
1484}
1485
1486void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1487{
1488	struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1489	/*
1490	 * Once we have reached this point, config_item_put() has
1491	 * already been called from target_core_alua_drop_lu_gp().
1492	 *
1493	 * Here, we remove the *lu_gp from the global list so that
1494	 * no associations can be made while we are releasing
1495	 * struct t10_alua_lu_gp.
1496	 */
1497	spin_lock(&lu_gps_lock);
1498	list_del(&lu_gp->lu_gp_node);
1499	alua_lu_gps_count--;
1500	spin_unlock(&lu_gps_lock);
1501	/*
1502	 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1503	 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1504	 * released with core_alua_put_lu_gp_from_name()
1505	 */
1506	while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1507		cpu_relax();
1508	/*
1509	 * Release reference to struct t10_alua_lu_gp * from all associated
1510	 * struct se_device.
1511	 */
1512	spin_lock(&lu_gp->lu_gp_lock);
1513	list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1514				&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1515		if (lu_gp_mem->lu_gp_assoc) {
1516			list_del(&lu_gp_mem->lu_gp_mem_list);
1517			lu_gp->lu_gp_members--;
1518			lu_gp_mem->lu_gp_assoc = 0;
1519		}
1520		spin_unlock(&lu_gp->lu_gp_lock);
1521		/*
1522		 *
1523		 * lu_gp_mem is associated with a single
1524		 * struct se_device->dev_alua_lu_gp_mem, and is released when
1525		 * struct se_device is released via core_alua_free_lu_gp_mem().
1526		 *
1527		 * If the passed lu_gp does NOT match the default_lu_gp, assume
1528		 * we want to re-associate a given lu_gp_mem with default_lu_gp.
1529		 */
1530		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1531		if (lu_gp != default_lu_gp)
1532			__core_alua_attach_lu_gp_mem(lu_gp_mem,
1533					default_lu_gp);
1534		else
1535			lu_gp_mem->lu_gp = NULL;
1536		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1537
1538		spin_lock(&lu_gp->lu_gp_lock);
1539	}
1540	spin_unlock(&lu_gp->lu_gp_lock);
1541
1542	kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1543}
1544
1545void core_alua_free_lu_gp_mem(struct se_device *dev)
1546{
1547	struct t10_alua_lu_gp *lu_gp;
1548	struct t10_alua_lu_gp_member *lu_gp_mem;
1549
1550	lu_gp_mem = dev->dev_alua_lu_gp_mem;
1551	if (!lu_gp_mem)
1552		return;
1553
1554	while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1555		cpu_relax();
1556
1557	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1558	lu_gp = lu_gp_mem->lu_gp;
1559	if (lu_gp) {
1560		spin_lock(&lu_gp->lu_gp_lock);
1561		if (lu_gp_mem->lu_gp_assoc) {
1562			list_del(&lu_gp_mem->lu_gp_mem_list);
1563			lu_gp->lu_gp_members--;
1564			lu_gp_mem->lu_gp_assoc = 0;
1565		}
1566		spin_unlock(&lu_gp->lu_gp_lock);
1567		lu_gp_mem->lu_gp = NULL;
1568	}
1569	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1570
1571	kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1572}
1573
1574struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1575{
1576	struct t10_alua_lu_gp *lu_gp;
1577	struct config_item *ci;
1578
1579	spin_lock(&lu_gps_lock);
1580	list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1581		if (!lu_gp->lu_gp_valid_id)
1582			continue;
1583		ci = &lu_gp->lu_gp_group.cg_item;
1584		if (!strcmp(config_item_name(ci), name)) {
1585			atomic_inc(&lu_gp->lu_gp_ref_cnt);
1586			spin_unlock(&lu_gps_lock);
1587			return lu_gp;
1588		}
1589	}
1590	spin_unlock(&lu_gps_lock);
1591
1592	return NULL;
1593}
1594
1595void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1596{
1597	spin_lock(&lu_gps_lock);
1598	atomic_dec(&lu_gp->lu_gp_ref_cnt);
1599	spin_unlock(&lu_gps_lock);
1600}
1601
1602/*
1603 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1604 */
1605void __core_alua_attach_lu_gp_mem(
1606	struct t10_alua_lu_gp_member *lu_gp_mem,
1607	struct t10_alua_lu_gp *lu_gp)
1608{
1609	spin_lock(&lu_gp->lu_gp_lock);
1610	lu_gp_mem->lu_gp = lu_gp;
1611	lu_gp_mem->lu_gp_assoc = 1;
1612	list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1613	lu_gp->lu_gp_members++;
1614	spin_unlock(&lu_gp->lu_gp_lock);
1615}
1616
1617/*
1618 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1619 */
1620void __core_alua_drop_lu_gp_mem(
1621	struct t10_alua_lu_gp_member *lu_gp_mem,
1622	struct t10_alua_lu_gp *lu_gp)
1623{
1624	spin_lock(&lu_gp->lu_gp_lock);
1625	list_del(&lu_gp_mem->lu_gp_mem_list);
1626	lu_gp_mem->lu_gp = NULL;
1627	lu_gp_mem->lu_gp_assoc = 0;
1628	lu_gp->lu_gp_members--;
1629	spin_unlock(&lu_gp->lu_gp_lock);
1630}
1631
1632struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1633		const char *name, int def_group)
1634{
1635	struct t10_alua_tg_pt_gp *tg_pt_gp;
1636
1637	tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1638	if (!tg_pt_gp) {
1639		pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1640		return NULL;
1641	}
1642	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1643	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1644	mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
1645	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1646	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1647	tg_pt_gp->tg_pt_gp_dev = dev;
1648	tg_pt_gp->tg_pt_gp_alua_access_state =
1649			ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1650	/*
1651	 * Enable both explicit and implicit ALUA support by default
1652	 */
1653	tg_pt_gp->tg_pt_gp_alua_access_type =
1654			TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1655	/*
1656	 * Set the default Active/NonOptimized Delay in milliseconds
1657	 */
1658	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1659	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1660	tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1661
1662	/*
1663	 * Enable all supported states
1664	 */
1665	tg_pt_gp->tg_pt_gp_alua_supported_states =
1666	    ALUA_T_SUP | ALUA_O_SUP |
1667	    ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1668
1669	if (def_group) {
1670		spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1671		tg_pt_gp->tg_pt_gp_id =
1672				dev->t10_alua.alua_tg_pt_gps_counter++;
1673		tg_pt_gp->tg_pt_gp_valid_id = 1;
1674		dev->t10_alua.alua_tg_pt_gps_count++;
1675		list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1676			      &dev->t10_alua.tg_pt_gps_list);
1677		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1678	}
1679
1680	return tg_pt_gp;
1681}
1682
1683int core_alua_set_tg_pt_gp_id(
1684	struct t10_alua_tg_pt_gp *tg_pt_gp,
1685	u16 tg_pt_gp_id)
1686{
1687	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1688	struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1689	u16 tg_pt_gp_id_tmp;
1690
1691	/*
1692	 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1693	 */
1694	if (tg_pt_gp->tg_pt_gp_valid_id) {
1695		pr_warn("ALUA TG PT Group already has a valid ID,"
1696			" ignoring request\n");
1697		return -EINVAL;
1698	}
1699
1700	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1701	if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1702		pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1703			" 0x0000ffff reached\n");
1704		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1705		kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1706		return -ENOSPC;
1707	}
1708again:
1709	tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1710			dev->t10_alua.alua_tg_pt_gps_counter++;
1711
1712	list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1713			tg_pt_gp_list) {
1714		if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1715			if (!tg_pt_gp_id)
1716				goto again;
1717
1718			pr_err("ALUA Target Port Group ID: %hu already"
1719				" exists, ignoring request\n", tg_pt_gp_id);
1720			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1721			return -EINVAL;
1722		}
1723	}
1724
1725	tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1726	tg_pt_gp->tg_pt_gp_valid_id = 1;
1727	list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1728			&dev->t10_alua.tg_pt_gps_list);
1729	dev->t10_alua.alua_tg_pt_gps_count++;
1730	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1731
1732	return 0;
1733}
1734
1735void core_alua_free_tg_pt_gp(
1736	struct t10_alua_tg_pt_gp *tg_pt_gp)
1737{
1738	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1739	struct se_lun *lun, *next;
1740
1741	/*
1742	 * Once we have reached this point, config_item_put() has already
1743	 * been called from target_core_alua_drop_tg_pt_gp().
1744	 *
1745	 * Here we remove *tg_pt_gp from the global list so that
1746	 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1747	 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1748	 */
1749	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1750	if (tg_pt_gp->tg_pt_gp_valid_id) {
1751		list_del(&tg_pt_gp->tg_pt_gp_list);
1752		dev->t10_alua.alua_tg_pt_gps_count--;
1753	}
1754	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1755
1756	/*
1757	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1758	 * core_alua_get_tg_pt_gp_by_name() in
1759	 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1760	 * to be released with core_alua_put_tg_pt_gp_from_name().
1761	 */
1762	while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1763		cpu_relax();
1764
1765	/*
1766	 * Release reference to struct t10_alua_tg_pt_gp from all associated
1767	 * struct se_port.
1768	 */
1769	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1770	list_for_each_entry_safe(lun, next,
1771			&tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
1772		list_del_init(&lun->lun_tg_pt_gp_link);
1773		tg_pt_gp->tg_pt_gp_members--;
1774
1775		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1776		/*
1777		 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1778		 * assume we want to re-associate a given tg_pt_gp_mem with
1779		 * default_tg_pt_gp.
1780		 */
1781		spin_lock(&lun->lun_tg_pt_gp_lock);
1782		if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1783			__target_attach_tg_pt_gp(lun,
1784					dev->t10_alua.default_tg_pt_gp);
1785		} else
1786			lun->lun_tg_pt_gp = NULL;
1787		spin_unlock(&lun->lun_tg_pt_gp_lock);
1788
1789		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1790	}
1791	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1792
1793	kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1794}
1795
1796static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1797		struct se_device *dev, const char *name)
1798{
1799	struct t10_alua_tg_pt_gp *tg_pt_gp;
1800	struct config_item *ci;
1801
1802	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1803	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1804			tg_pt_gp_list) {
1805		if (!tg_pt_gp->tg_pt_gp_valid_id)
1806			continue;
1807		ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1808		if (!strcmp(config_item_name(ci), name)) {
1809			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1810			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1811			return tg_pt_gp;
1812		}
1813	}
1814	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1815
1816	return NULL;
1817}
1818
1819static void core_alua_put_tg_pt_gp_from_name(
1820	struct t10_alua_tg_pt_gp *tg_pt_gp)
1821{
1822	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1823
1824	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1825	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1826	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1827}
1828
1829static void __target_attach_tg_pt_gp(struct se_lun *lun,
1830		struct t10_alua_tg_pt_gp *tg_pt_gp)
1831{
1832	struct se_dev_entry *se_deve;
1833
1834	assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1835
1836	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1837	lun->lun_tg_pt_gp = tg_pt_gp;
1838	list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
1839	tg_pt_gp->tg_pt_gp_members++;
1840	spin_lock(&lun->lun_deve_lock);
1841	list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
1842		core_scsi3_ua_allocate(se_deve, 0x3f,
1843				       ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
1844	spin_unlock(&lun->lun_deve_lock);
1845	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1846}
1847
1848void target_attach_tg_pt_gp(struct se_lun *lun,
1849		struct t10_alua_tg_pt_gp *tg_pt_gp)
1850{
1851	spin_lock(&lun->lun_tg_pt_gp_lock);
1852	__target_attach_tg_pt_gp(lun, tg_pt_gp);
1853	spin_unlock(&lun->lun_tg_pt_gp_lock);
1854}
1855
1856static void __target_detach_tg_pt_gp(struct se_lun *lun,
1857		struct t10_alua_tg_pt_gp *tg_pt_gp)
1858{
1859	assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1860
1861	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1862	list_del_init(&lun->lun_tg_pt_gp_link);
1863	tg_pt_gp->tg_pt_gp_members--;
1864	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1865
1866	lun->lun_tg_pt_gp = NULL;
1867}
1868
1869void target_detach_tg_pt_gp(struct se_lun *lun)
1870{
1871	struct t10_alua_tg_pt_gp *tg_pt_gp;
1872
1873	spin_lock(&lun->lun_tg_pt_gp_lock);
1874	tg_pt_gp = lun->lun_tg_pt_gp;
1875	if (tg_pt_gp)
1876		__target_detach_tg_pt_gp(lun, tg_pt_gp);
1877	spin_unlock(&lun->lun_tg_pt_gp_lock);
1878}
1879
1880ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1881{
1882	struct config_item *tg_pt_ci;
1883	struct t10_alua_tg_pt_gp *tg_pt_gp;
1884	ssize_t len = 0;
1885
1886	spin_lock(&lun->lun_tg_pt_gp_lock);
1887	tg_pt_gp = lun->lun_tg_pt_gp;
1888	if (tg_pt_gp) {
1889		tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1890		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1891			" %hu\nTG Port Primary Access State: %s\nTG Port "
1892			"Primary Access Status: %s\nTG Port Secondary Access"
1893			" State: %s\nTG Port Secondary Access Status: %s\n",
1894			config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1895			core_alua_dump_state(
1896				tg_pt_gp->tg_pt_gp_alua_access_state),
1897			core_alua_dump_status(
1898				tg_pt_gp->tg_pt_gp_alua_access_status),
1899			atomic_read(&lun->lun_tg_pt_secondary_offline) ?
1900			"Offline" : "None",
1901			core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
1902	}
1903	spin_unlock(&lun->lun_tg_pt_gp_lock);
1904
1905	return len;
1906}
1907
1908ssize_t core_alua_store_tg_pt_gp_info(
1909	struct se_lun *lun,
1910	const char *page,
1911	size_t count)
1912{
1913	struct se_portal_group *tpg = lun->lun_tpg;
1914	/*
1915	 * rcu_dereference_raw protected by se_lun->lun_group symlink
1916	 * reference to se_device->dev_group.
1917	 */
1918	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
1919	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1920	unsigned char buf[TG_PT_GROUP_NAME_BUF];
1921	int move = 0;
1922
1923	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
1924	    (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
1925		return -ENODEV;
1926
1927	if (count > TG_PT_GROUP_NAME_BUF) {
1928		pr_err("ALUA Target Port Group alias too large!\n");
1929		return -EINVAL;
1930	}
1931	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1932	memcpy(buf, page, count);
1933	/*
1934	 * Any ALUA target port group alias besides "NULL" means we will be
1935	 * making a new group association.
1936	 */
1937	if (strcmp(strstrip(buf), "NULL")) {
1938		/*
1939		 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1940		 * struct t10_alua_tg_pt_gp.  This reference is released with
1941		 * core_alua_put_tg_pt_gp_from_name() below.
1942		 */
1943		tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
1944					strstrip(buf));
1945		if (!tg_pt_gp_new)
1946			return -ENODEV;
1947	}
1948
1949	spin_lock(&lun->lun_tg_pt_gp_lock);
1950	tg_pt_gp = lun->lun_tg_pt_gp;
1951	if (tg_pt_gp) {
1952		/*
1953		 * Clearing an existing tg_pt_gp association, and replacing
1954		 * with the default_tg_pt_gp.
1955		 */
1956		if (!tg_pt_gp_new) {
1957			pr_debug("Target_Core_ConfigFS: Moving"
1958				" %s/tpgt_%hu/%s from ALUA Target Port Group:"
1959				" alua/%s, ID: %hu back to"
1960				" default_tg_pt_gp\n",
1961				tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1962				tpg->se_tpg_tfo->tpg_get_tag(tpg),
1963				config_item_name(&lun->lun_group.cg_item),
1964				config_item_name(
1965					&tg_pt_gp->tg_pt_gp_group.cg_item),
1966				tg_pt_gp->tg_pt_gp_id);
1967
1968			__target_detach_tg_pt_gp(lun, tg_pt_gp);
1969			__target_attach_tg_pt_gp(lun,
1970					dev->t10_alua.default_tg_pt_gp);
1971			spin_unlock(&lun->lun_tg_pt_gp_lock);
1972
1973			return count;
1974		}
1975		__target_detach_tg_pt_gp(lun, tg_pt_gp);
1976		move = 1;
1977	}
1978
1979	__target_attach_tg_pt_gp(lun, tg_pt_gp_new);
1980	spin_unlock(&lun->lun_tg_pt_gp_lock);
1981	pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1982		" Target Port Group: alua/%s, ID: %hu\n", (move) ?
1983		"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1984		tpg->se_tpg_tfo->tpg_get_tag(tpg),
1985		config_item_name(&lun->lun_group.cg_item),
1986		config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1987		tg_pt_gp_new->tg_pt_gp_id);
1988
1989	core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1990	return count;
1991}
1992
1993ssize_t core_alua_show_access_type(
1994	struct t10_alua_tg_pt_gp *tg_pt_gp,
1995	char *page)
1996{
1997	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
1998	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
1999		return sprintf(page, "Implicit and Explicit\n");
2000	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
2001		return sprintf(page, "Implicit\n");
2002	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
2003		return sprintf(page, "Explicit\n");
2004	else
2005		return sprintf(page, "None\n");
2006}
2007
2008ssize_t core_alua_store_access_type(
2009	struct t10_alua_tg_pt_gp *tg_pt_gp,
2010	const char *page,
2011	size_t count)
2012{
2013	unsigned long tmp;
2014	int ret;
2015
2016	ret = kstrtoul(page, 0, &tmp);
2017	if (ret < 0) {
2018		pr_err("Unable to extract alua_access_type\n");
2019		return ret;
2020	}
2021	if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2022		pr_err("Illegal value for alua_access_type:"
2023				" %lu\n", tmp);
2024		return -EINVAL;
2025	}
2026	if (tmp == 3)
2027		tg_pt_gp->tg_pt_gp_alua_access_type =
2028			TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2029	else if (tmp == 2)
2030		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2031	else if (tmp == 1)
2032		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2033	else
2034		tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2035
2036	return count;
2037}
2038
2039ssize_t core_alua_show_nonop_delay_msecs(
2040	struct t10_alua_tg_pt_gp *tg_pt_gp,
2041	char *page)
2042{
2043	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2044}
2045
2046ssize_t core_alua_store_nonop_delay_msecs(
2047	struct t10_alua_tg_pt_gp *tg_pt_gp,
2048	const char *page,
2049	size_t count)
2050{
2051	unsigned long tmp;
2052	int ret;
2053
2054	ret = kstrtoul(page, 0, &tmp);
2055	if (ret < 0) {
2056		pr_err("Unable to extract nonop_delay_msecs\n");
2057		return ret;
2058	}
2059	if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2060		pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2061			" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2062			ALUA_MAX_NONOP_DELAY_MSECS);
2063		return -EINVAL;
2064	}
2065	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2066
2067	return count;
2068}
2069
2070ssize_t core_alua_show_trans_delay_msecs(
2071	struct t10_alua_tg_pt_gp *tg_pt_gp,
2072	char *page)
2073{
2074	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2075}
2076
2077ssize_t core_alua_store_trans_delay_msecs(
2078	struct t10_alua_tg_pt_gp *tg_pt_gp,
2079	const char *page,
2080	size_t count)
2081{
2082	unsigned long tmp;
2083	int ret;
2084
2085	ret = kstrtoul(page, 0, &tmp);
2086	if (ret < 0) {
2087		pr_err("Unable to extract trans_delay_msecs\n");
2088		return ret;
2089	}
2090	if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2091		pr_err("Passed trans_delay_msecs: %lu, exceeds"
2092			" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2093			ALUA_MAX_TRANS_DELAY_MSECS);
2094		return -EINVAL;
2095	}
2096	tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2097
2098	return count;
2099}
2100
2101ssize_t core_alua_show_implicit_trans_secs(
2102	struct t10_alua_tg_pt_gp *tg_pt_gp,
2103	char *page)
2104{
2105	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2106}
2107
2108ssize_t core_alua_store_implicit_trans_secs(
2109	struct t10_alua_tg_pt_gp *tg_pt_gp,
2110	const char *page,
2111	size_t count)
2112{
2113	unsigned long tmp;
2114	int ret;
2115
2116	ret = kstrtoul(page, 0, &tmp);
2117	if (ret < 0) {
2118		pr_err("Unable to extract implicit_trans_secs\n");
2119		return ret;
2120	}
2121	if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2122		pr_err("Passed implicit_trans_secs: %lu, exceeds"
2123			" ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2124			ALUA_MAX_IMPLICIT_TRANS_SECS);
2125		return  -EINVAL;
2126	}
2127	tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2128
2129	return count;
2130}
2131
2132ssize_t core_alua_show_preferred_bit(
2133	struct t10_alua_tg_pt_gp *tg_pt_gp,
2134	char *page)
2135{
2136	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2137}
2138
2139ssize_t core_alua_store_preferred_bit(
2140	struct t10_alua_tg_pt_gp *tg_pt_gp,
2141	const char *page,
2142	size_t count)
2143{
2144	unsigned long tmp;
2145	int ret;
2146
2147	ret = kstrtoul(page, 0, &tmp);
2148	if (ret < 0) {
2149		pr_err("Unable to extract preferred ALUA value\n");
2150		return ret;
2151	}
2152	if ((tmp != 0) && (tmp != 1)) {
2153		pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2154		return -EINVAL;
2155	}
2156	tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2157
2158	return count;
2159}
2160
2161ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2162{
2163	return sprintf(page, "%d\n",
2164		atomic_read(&lun->lun_tg_pt_secondary_offline));
2165}
2166
2167ssize_t core_alua_store_offline_bit(
2168	struct se_lun *lun,
2169	const char *page,
2170	size_t count)
2171{
2172	/*
2173	 * rcu_dereference_raw protected by se_lun->lun_group symlink
2174	 * reference to se_device->dev_group.
2175	 */
2176	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
2177	unsigned long tmp;
2178	int ret;
2179
2180	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
2181	    (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
2182		return -ENODEV;
2183
2184	ret = kstrtoul(page, 0, &tmp);
2185	if (ret < 0) {
2186		pr_err("Unable to extract alua_tg_pt_offline value\n");
2187		return ret;
2188	}
2189	if ((tmp != 0) && (tmp != 1)) {
2190		pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2191				tmp);
2192		return -EINVAL;
2193	}
2194
2195	ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
2196	if (ret < 0)
2197		return -EINVAL;
2198
2199	return count;
2200}
2201
2202ssize_t core_alua_show_secondary_status(
2203	struct se_lun *lun,
2204	char *page)
2205{
2206	return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
2207}
2208
2209ssize_t core_alua_store_secondary_status(
2210	struct se_lun *lun,
2211	const char *page,
2212	size_t count)
2213{
2214	unsigned long tmp;
2215	int ret;
2216
2217	ret = kstrtoul(page, 0, &tmp);
2218	if (ret < 0) {
2219		pr_err("Unable to extract alua_tg_pt_status\n");
2220		return ret;
2221	}
2222	if ((tmp != ALUA_STATUS_NONE) &&
2223	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2224	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2225		pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2226				tmp);
2227		return -EINVAL;
2228	}
2229	lun->lun_tg_pt_secondary_stat = (int)tmp;
2230
2231	return count;
2232}
2233
2234ssize_t core_alua_show_secondary_write_metadata(
2235	struct se_lun *lun,
2236	char *page)
2237{
2238	return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
2239}
2240
2241ssize_t core_alua_store_secondary_write_metadata(
2242	struct se_lun *lun,
2243	const char *page,
2244	size_t count)
2245{
2246	unsigned long tmp;
2247	int ret;
2248
2249	ret = kstrtoul(page, 0, &tmp);
2250	if (ret < 0) {
2251		pr_err("Unable to extract alua_tg_pt_write_md\n");
2252		return ret;
2253	}
2254	if ((tmp != 0) && (tmp != 1)) {
2255		pr_err("Illegal value for alua_tg_pt_write_md:"
2256				" %lu\n", tmp);
2257		return -EINVAL;
2258	}
2259	lun->lun_tg_pt_secondary_write_md = (int)tmp;
2260
2261	return count;
2262}
2263
2264int core_setup_alua(struct se_device *dev)
2265{
2266	if (!(dev->transport_flags &
2267	     TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
2268	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2269		struct t10_alua_lu_gp_member *lu_gp_mem;
2270
2271		/*
2272		 * Associate this struct se_device with the default ALUA
2273		 * LUN Group.
2274		 */
2275		lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2276		if (IS_ERR(lu_gp_mem))
2277			return PTR_ERR(lu_gp_mem);
2278
2279		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2280		__core_alua_attach_lu_gp_mem(lu_gp_mem,
2281				default_lu_gp);
2282		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2283
2284		pr_debug("%s: Adding to default ALUA LU Group:"
2285			" core/alua/lu_gps/default_lu_gp\n",
2286			dev->transport->name);
2287	}
2288
2289	return 0;
2290}
v4.17
 
   1/*******************************************************************************
   2 * Filename:  target_core_alua.c
   3 *
   4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
   5 *
   6 * (c) Copyright 2009-2013 Datera, Inc.
   7 *
   8 * Nicholas A. Bellinger <nab@kernel.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23 *
  24 ******************************************************************************/
  25
  26#include <linux/slab.h>
  27#include <linux/spinlock.h>
  28#include <linux/configfs.h>
  29#include <linux/delay.h>
  30#include <linux/export.h>
  31#include <linux/fcntl.h>
  32#include <linux/file.h>
  33#include <linux/fs.h>
  34#include <scsi/scsi_proto.h>
  35#include <asm/unaligned.h>
  36
  37#include <target/target_core_base.h>
  38#include <target/target_core_backend.h>
  39#include <target/target_core_fabric.h>
  40
  41#include "target_core_internal.h"
  42#include "target_core_alua.h"
  43#include "target_core_ua.h"
  44
  45static sense_reason_t core_alua_check_transition(int state, int valid,
  46						 int *primary, int explicit);
  47static int core_alua_set_tg_pt_secondary_state(
  48		struct se_lun *lun, int explicit, int offline);
  49
  50static char *core_alua_dump_state(int state);
  51
  52static void __target_attach_tg_pt_gp(struct se_lun *lun,
  53		struct t10_alua_tg_pt_gp *tg_pt_gp);
  54
  55static u16 alua_lu_gps_counter;
  56static u32 alua_lu_gps_count;
  57
  58static DEFINE_SPINLOCK(lu_gps_lock);
  59static LIST_HEAD(lu_gps_list);
  60
  61struct t10_alua_lu_gp *default_lu_gp;
  62
  63/*
  64 * REPORT REFERRALS
  65 *
  66 * See sbc3r35 section 5.23
  67 */
  68sense_reason_t
  69target_emulate_report_referrals(struct se_cmd *cmd)
  70{
  71	struct se_device *dev = cmd->se_dev;
  72	struct t10_alua_lba_map *map;
  73	struct t10_alua_lba_map_member *map_mem;
  74	unsigned char *buf;
  75	u32 rd_len = 0, off;
  76
  77	if (cmd->data_length < 4) {
  78		pr_warn("REPORT REFERRALS allocation length %u too"
  79			" small\n", cmd->data_length);
  80		return TCM_INVALID_CDB_FIELD;
  81	}
  82
  83	buf = transport_kmap_data_sg(cmd);
  84	if (!buf)
  85		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  86
  87	off = 4;
  88	spin_lock(&dev->t10_alua.lba_map_lock);
  89	if (list_empty(&dev->t10_alua.lba_map_list)) {
  90		spin_unlock(&dev->t10_alua.lba_map_lock);
  91		transport_kunmap_data_sg(cmd);
  92
  93		return TCM_UNSUPPORTED_SCSI_OPCODE;
  94	}
  95
  96	list_for_each_entry(map, &dev->t10_alua.lba_map_list,
  97			    lba_map_list) {
  98		int desc_num = off + 3;
  99		int pg_num;
 100
 101		off += 4;
 102		if (cmd->data_length > off)
 103			put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
 104		off += 8;
 105		if (cmd->data_length > off)
 106			put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
 107		off += 8;
 108		rd_len += 20;
 109		pg_num = 0;
 110		list_for_each_entry(map_mem, &map->lba_map_mem_list,
 111				    lba_map_mem_list) {
 112			int alua_state = map_mem->lba_map_mem_alua_state;
 113			int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
 114
 115			if (cmd->data_length > off)
 116				buf[off] = alua_state & 0x0f;
 117			off += 2;
 118			if (cmd->data_length > off)
 119				buf[off] = (alua_pg_id >> 8) & 0xff;
 120			off++;
 121			if (cmd->data_length > off)
 122				buf[off] = (alua_pg_id & 0xff);
 123			off++;
 124			rd_len += 4;
 125			pg_num++;
 126		}
 127		if (cmd->data_length > desc_num)
 128			buf[desc_num] = pg_num;
 129	}
 130	spin_unlock(&dev->t10_alua.lba_map_lock);
 131
 132	/*
 133	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 134	 */
 135	put_unaligned_be16(rd_len, &buf[2]);
 136
 137	transport_kunmap_data_sg(cmd);
 138
 139	target_complete_cmd(cmd, GOOD);
 140	return 0;
 141}
 142
 143/*
 144 * REPORT_TARGET_PORT_GROUPS
 145 *
 146 * See spc4r17 section 6.27
 147 */
 148sense_reason_t
 149target_emulate_report_target_port_groups(struct se_cmd *cmd)
 150{
 151	struct se_device *dev = cmd->se_dev;
 152	struct t10_alua_tg_pt_gp *tg_pt_gp;
 153	struct se_lun *lun;
 154	unsigned char *buf;
 155	u32 rd_len = 0, off;
 156	int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
 157
 158	/*
 159	 * Skip over RESERVED area to first Target port group descriptor
 160	 * depending on the PARAMETER DATA FORMAT type..
 161	 */
 162	if (ext_hdr != 0)
 163		off = 8;
 164	else
 165		off = 4;
 166
 167	if (cmd->data_length < off) {
 168		pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
 169			" small for %s header\n", cmd->data_length,
 170			(ext_hdr) ? "extended" : "normal");
 171		return TCM_INVALID_CDB_FIELD;
 172	}
 173	buf = transport_kmap_data_sg(cmd);
 174	if (!buf)
 175		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 176
 177	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 178	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
 179			tg_pt_gp_list) {
 180		/*
 181		 * Check if the Target port group and Target port descriptor list
 182		 * based on tg_pt_gp_members count will fit into the response payload.
 183		 * Otherwise, bump rd_len to let the initiator know we have exceeded
 184		 * the allocation length and the response is truncated.
 185		 */
 186		if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
 187		     cmd->data_length) {
 188			rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
 189			continue;
 190		}
 191		/*
 192		 * PREF: Preferred target port bit, determine if this
 193		 * bit should be set for port group.
 194		 */
 195		if (tg_pt_gp->tg_pt_gp_pref)
 196			buf[off] = 0x80;
 197		/*
 198		 * Set the ASYMMETRIC ACCESS State
 199		 */
 200		buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
 201		/*
 202		 * Set supported ASYMMETRIC ACCESS State bits
 203		 */
 204		buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
 205		/*
 206		 * TARGET PORT GROUP
 207		 */
 208		put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]);
 209		off += 2;
 210
 211		off++; /* Skip over Reserved */
 212		/*
 213		 * STATUS CODE
 214		 */
 215		buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
 216		/*
 217		 * Vendor Specific field
 218		 */
 219		buf[off++] = 0x00;
 220		/*
 221		 * TARGET PORT COUNT
 222		 */
 223		buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
 224		rd_len += 8;
 225
 226		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 227		list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
 228				lun_tg_pt_gp_link) {
 229			/*
 230			 * Start Target Port descriptor format
 231			 *
 232			 * See spc4r17 section 6.2.7 Table 247
 233			 */
 234			off += 2; /* Skip over Obsolete */
 235			/*
 236			 * Set RELATIVE TARGET PORT IDENTIFIER
 237			 */
 238			put_unaligned_be16(lun->lun_rtpi, &buf[off]);
 239			off += 2;
 240			rd_len += 4;
 241		}
 242		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 243	}
 244	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 245	/*
 246	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 247	 */
 248	put_unaligned_be32(rd_len, &buf[0]);
 249
 250	/*
 251	 * Fill in the Extended header parameter data format if requested
 252	 */
 253	if (ext_hdr != 0) {
 254		buf[4] = 0x10;
 255		/*
 256		 * Set the implicit transition time (in seconds) for the application
 257		 * client to use as a base for it's transition timeout value.
 258		 *
 259		 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
 260		 * this CDB was received upon to determine this value individually
 261		 * for ALUA target port group.
 262		 */
 263		spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock);
 264		tg_pt_gp = cmd->se_lun->lun_tg_pt_gp;
 265		if (tg_pt_gp)
 266			buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
 267		spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock);
 268	}
 269	transport_kunmap_data_sg(cmd);
 270
 271	target_complete_cmd(cmd, GOOD);
 272	return 0;
 273}
 274
 275/*
 276 * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
 277 *
 278 * See spc4r17 section 6.35
 279 */
 280sense_reason_t
 281target_emulate_set_target_port_groups(struct se_cmd *cmd)
 282{
 283	struct se_device *dev = cmd->se_dev;
 284	struct se_lun *l_lun = cmd->se_lun;
 285	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
 286	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
 287	unsigned char *buf;
 288	unsigned char *ptr;
 289	sense_reason_t rc = TCM_NO_SENSE;
 290	u32 len = 4; /* Skip over RESERVED area in header */
 291	int alua_access_state, primary = 0, valid_states;
 292	u16 tg_pt_id, rtpi;
 293
 294	if (cmd->data_length < 4) {
 295		pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
 296			" small\n", cmd->data_length);
 297		return TCM_INVALID_PARAMETER_LIST;
 298	}
 299
 300	buf = transport_kmap_data_sg(cmd);
 301	if (!buf)
 302		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 303
 304	/*
 305	 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
 306	 * for the local tg_pt_gp.
 307	 */
 308	spin_lock(&l_lun->lun_tg_pt_gp_lock);
 309	l_tg_pt_gp = l_lun->lun_tg_pt_gp;
 310	if (!l_tg_pt_gp) {
 311		spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 312		pr_err("Unable to access l_lun->tg_pt_gp\n");
 313		rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 314		goto out;
 315	}
 316
 317	if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
 318		spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 319		pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
 320				" while TPGS_EXPLICIT_ALUA is disabled\n");
 321		rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 322		goto out;
 323	}
 324	valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
 325	spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 326
 327	ptr = &buf[4]; /* Skip over RESERVED area in header */
 328
 329	while (len < cmd->data_length) {
 330		bool found = false;
 331		alua_access_state = (ptr[0] & 0x0f);
 332		/*
 333		 * Check the received ALUA access state, and determine if
 334		 * the state is a primary or secondary target port asymmetric
 335		 * access state.
 336		 */
 337		rc = core_alua_check_transition(alua_access_state, valid_states,
 338						&primary, 1);
 339		if (rc) {
 340			/*
 341			 * If the SET TARGET PORT GROUPS attempts to establish
 342			 * an invalid combination of target port asymmetric
 343			 * access states or attempts to establish an
 344			 * unsupported target port asymmetric access state,
 345			 * then the command shall be terminated with CHECK
 346			 * CONDITION status, with the sense key set to ILLEGAL
 347			 * REQUEST, and the additional sense code set to INVALID
 348			 * FIELD IN PARAMETER LIST.
 349			 */
 350			goto out;
 351		}
 352
 353		/*
 354		 * If the ASYMMETRIC ACCESS STATE field (see table 267)
 355		 * specifies a primary target port asymmetric access state,
 356		 * then the TARGET PORT GROUP OR TARGET PORT field specifies
 357		 * a primary target port group for which the primary target
 358		 * port asymmetric access state shall be changed. If the
 359		 * ASYMMETRIC ACCESS STATE field specifies a secondary target
 360		 * port asymmetric access state, then the TARGET PORT GROUP OR
 361		 * TARGET PORT field specifies the relative target port
 362		 * identifier (see 3.1.120) of the target port for which the
 363		 * secondary target port asymmetric access state shall be
 364		 * changed.
 365		 */
 366		if (primary) {
 367			tg_pt_id = get_unaligned_be16(ptr + 2);
 368			/*
 369			 * Locate the matching target port group ID from
 370			 * the global tg_pt_gp list
 371			 */
 372			spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 373			list_for_each_entry(tg_pt_gp,
 374					&dev->t10_alua.tg_pt_gps_list,
 375					tg_pt_gp_list) {
 376				if (!tg_pt_gp->tg_pt_gp_valid_id)
 377					continue;
 378
 379				if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
 380					continue;
 381
 382				atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
 383
 384				spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 385
 386				if (!core_alua_do_port_transition(tg_pt_gp,
 387						dev, l_lun, nacl,
 388						alua_access_state, 1))
 389					found = true;
 390
 391				spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 392				atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
 393				break;
 394			}
 395			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 396		} else {
 397			struct se_lun *lun;
 398
 399			/*
 400			 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
 401			 * the Target Port in question for the the incoming
 402			 * SET_TARGET_PORT_GROUPS op.
 403			 */
 404			rtpi = get_unaligned_be16(ptr + 2);
 405			/*
 406			 * Locate the matching relative target port identifier
 407			 * for the struct se_device storage object.
 408			 */
 409			spin_lock(&dev->se_port_lock);
 410			list_for_each_entry(lun, &dev->dev_sep_list,
 411							lun_dev_link) {
 412				if (lun->lun_rtpi != rtpi)
 413					continue;
 414
 415				// XXX: racy unlock
 416				spin_unlock(&dev->se_port_lock);
 417
 418				if (!core_alua_set_tg_pt_secondary_state(
 419						lun, 1, 1))
 420					found = true;
 421
 422				spin_lock(&dev->se_port_lock);
 423				break;
 424			}
 425			spin_unlock(&dev->se_port_lock);
 426		}
 427
 428		if (!found) {
 429			rc = TCM_INVALID_PARAMETER_LIST;
 430			goto out;
 431		}
 432
 433		ptr += 4;
 434		len += 4;
 435	}
 436
 437out:
 438	transport_kunmap_data_sg(cmd);
 439	if (!rc)
 440		target_complete_cmd(cmd, GOOD);
 441	return rc;
 442}
 443
 444static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
 445{
 446	/*
 447	 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
 448	 * The ALUA additional sense code qualifier (ASCQ) is determined
 449	 * by the ALUA primary or secondary access state..
 450	 */
 451	pr_debug("[%s]: ALUA TG Port not available, "
 452		"SenseKey: NOT_READY, ASC/ASCQ: "
 453		"0x04/0x%02x\n",
 454		cmd->se_tfo->get_fabric_name(), alua_ascq);
 455
 456	cmd->scsi_asc = 0x04;
 457	cmd->scsi_ascq = alua_ascq;
 458}
 459
 460static inline void core_alua_state_nonoptimized(
 461	struct se_cmd *cmd,
 462	unsigned char *cdb,
 463	int nonop_delay_msecs)
 464{
 465	/*
 466	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
 467	 * later to determine if processing of this cmd needs to be
 468	 * temporarily delayed for the Active/NonOptimized primary access state.
 469	 */
 470	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
 471	cmd->alua_nonop_delay = nonop_delay_msecs;
 472}
 473
 474static inline int core_alua_state_lba_dependent(
 475	struct se_cmd *cmd,
 476	struct t10_alua_tg_pt_gp *tg_pt_gp)
 477{
 478	struct se_device *dev = cmd->se_dev;
 479	u64 segment_size, segment_mult, sectors, lba;
 480
 481	/* Only need to check for cdb actually containing LBAs */
 482	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
 483		return 0;
 484
 485	spin_lock(&dev->t10_alua.lba_map_lock);
 486	segment_size = dev->t10_alua.lba_map_segment_size;
 487	segment_mult = dev->t10_alua.lba_map_segment_multiplier;
 488	sectors = cmd->data_length / dev->dev_attrib.block_size;
 489
 490	lba = cmd->t_task_lba;
 491	while (lba < cmd->t_task_lba + sectors) {
 492		struct t10_alua_lba_map *cur_map = NULL, *map;
 493		struct t10_alua_lba_map_member *map_mem;
 494
 495		list_for_each_entry(map, &dev->t10_alua.lba_map_list,
 496				    lba_map_list) {
 497			u64 start_lba, last_lba;
 498			u64 first_lba = map->lba_map_first_lba;
 499
 500			if (segment_mult) {
 501				u64 tmp = lba;
 502				start_lba = do_div(tmp, segment_size * segment_mult);
 503
 504				last_lba = first_lba + segment_size - 1;
 505				if (start_lba >= first_lba &&
 506				    start_lba <= last_lba) {
 507					lba += segment_size;
 508					cur_map = map;
 509					break;
 510				}
 511			} else {
 512				last_lba = map->lba_map_last_lba;
 513				if (lba >= first_lba && lba <= last_lba) {
 514					lba = last_lba + 1;
 515					cur_map = map;
 516					break;
 517				}
 518			}
 519		}
 520		if (!cur_map) {
 521			spin_unlock(&dev->t10_alua.lba_map_lock);
 522			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 523			return 1;
 524		}
 525		list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
 526				    lba_map_mem_list) {
 527			if (map_mem->lba_map_mem_alua_pg_id !=
 528			    tg_pt_gp->tg_pt_gp_id)
 529				continue;
 530			switch(map_mem->lba_map_mem_alua_state) {
 531			case ALUA_ACCESS_STATE_STANDBY:
 532				spin_unlock(&dev->t10_alua.lba_map_lock);
 533				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 534				return 1;
 535			case ALUA_ACCESS_STATE_UNAVAILABLE:
 536				spin_unlock(&dev->t10_alua.lba_map_lock);
 537				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 538				return 1;
 539			default:
 540				break;
 541			}
 542		}
 543	}
 544	spin_unlock(&dev->t10_alua.lba_map_lock);
 545	return 0;
 546}
 547
 548static inline int core_alua_state_standby(
 549	struct se_cmd *cmd,
 550	unsigned char *cdb)
 551{
 552	/*
 553	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
 554	 * spc4r17 section 5.9.2.4.4
 555	 */
 556	switch (cdb[0]) {
 557	case INQUIRY:
 558	case LOG_SELECT:
 559	case LOG_SENSE:
 560	case MODE_SELECT:
 561	case MODE_SENSE:
 562	case REPORT_LUNS:
 563	case RECEIVE_DIAGNOSTIC:
 564	case SEND_DIAGNOSTIC:
 565	case READ_CAPACITY:
 566		return 0;
 567	case SERVICE_ACTION_IN_16:
 568		switch (cdb[1] & 0x1f) {
 569		case SAI_READ_CAPACITY_16:
 570			return 0;
 571		default:
 572			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 573			return 1;
 574		}
 575	case MAINTENANCE_IN:
 576		switch (cdb[1] & 0x1f) {
 577		case MI_REPORT_TARGET_PGS:
 578			return 0;
 579		default:
 580			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 581			return 1;
 582		}
 583	case MAINTENANCE_OUT:
 584		switch (cdb[1]) {
 585		case MO_SET_TARGET_PGS:
 586			return 0;
 587		default:
 588			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 589			return 1;
 590		}
 591	case REQUEST_SENSE:
 592	case PERSISTENT_RESERVE_IN:
 593	case PERSISTENT_RESERVE_OUT:
 594	case READ_BUFFER:
 595	case WRITE_BUFFER:
 596		return 0;
 597	default:
 598		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 599		return 1;
 600	}
 601
 602	return 0;
 603}
 604
 605static inline int core_alua_state_unavailable(
 606	struct se_cmd *cmd,
 607	unsigned char *cdb)
 608{
 609	/*
 610	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
 611	 * spc4r17 section 5.9.2.4.5
 612	 */
 613	switch (cdb[0]) {
 614	case INQUIRY:
 615	case REPORT_LUNS:
 616		return 0;
 617	case MAINTENANCE_IN:
 618		switch (cdb[1] & 0x1f) {
 619		case MI_REPORT_TARGET_PGS:
 620			return 0;
 621		default:
 622			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 623			return 1;
 624		}
 625	case MAINTENANCE_OUT:
 626		switch (cdb[1]) {
 627		case MO_SET_TARGET_PGS:
 628			return 0;
 629		default:
 630			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 631			return 1;
 632		}
 633	case REQUEST_SENSE:
 634	case READ_BUFFER:
 635	case WRITE_BUFFER:
 636		return 0;
 637	default:
 638		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 639		return 1;
 640	}
 641
 642	return 0;
 643}
 644
 645static inline int core_alua_state_transition(
 646	struct se_cmd *cmd,
 647	unsigned char *cdb)
 648{
 649	/*
 650	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
 651	 * spc4r17 section 5.9.2.5
 652	 */
 653	switch (cdb[0]) {
 654	case INQUIRY:
 655	case REPORT_LUNS:
 656		return 0;
 657	case MAINTENANCE_IN:
 658		switch (cdb[1] & 0x1f) {
 659		case MI_REPORT_TARGET_PGS:
 660			return 0;
 661		default:
 662			set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 663			return 1;
 664		}
 665	case REQUEST_SENSE:
 666	case READ_BUFFER:
 667	case WRITE_BUFFER:
 668		return 0;
 669	default:
 670		set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 671		return 1;
 672	}
 673
 674	return 0;
 675}
 676
 677/*
 678 * return 1: Is used to signal LUN not accessible, and check condition/not ready
 679 * return 0: Used to signal success
 680 * return -1: Used to signal failure, and invalid cdb field
 681 */
 682sense_reason_t
 683target_alua_state_check(struct se_cmd *cmd)
 684{
 685	struct se_device *dev = cmd->se_dev;
 686	unsigned char *cdb = cmd->t_task_cdb;
 687	struct se_lun *lun = cmd->se_lun;
 688	struct t10_alua_tg_pt_gp *tg_pt_gp;
 689	int out_alua_state, nonop_delay_msecs;
 690
 691	if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
 692		return 0;
 693	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
 694		return 0;
 695
 696	/*
 697	 * First, check for a struct se_port specific secondary ALUA target port
 698	 * access state: OFFLINE
 699	 */
 700	if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
 701		pr_debug("ALUA: Got secondary offline status for local"
 702				" target port\n");
 703		set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
 704		return TCM_CHECK_CONDITION_NOT_READY;
 705	}
 706
 707	if (!lun->lun_tg_pt_gp)
 708		return 0;
 709
 710	spin_lock(&lun->lun_tg_pt_gp_lock);
 711	tg_pt_gp = lun->lun_tg_pt_gp;
 712	out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
 713	nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
 714
 715	// XXX: keeps using tg_pt_gp witout reference after unlock
 716	spin_unlock(&lun->lun_tg_pt_gp_lock);
 717	/*
 718	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
 719	 * statement so the compiler knows explicitly to check this case first.
 720	 * For the Optimized ALUA access state case, we want to process the
 721	 * incoming fabric cmd ASAP..
 722	 */
 723	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
 724		return 0;
 725
 726	switch (out_alua_state) {
 727	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 728		core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
 729		break;
 730	case ALUA_ACCESS_STATE_STANDBY:
 731		if (core_alua_state_standby(cmd, cdb))
 732			return TCM_CHECK_CONDITION_NOT_READY;
 733		break;
 734	case ALUA_ACCESS_STATE_UNAVAILABLE:
 735		if (core_alua_state_unavailable(cmd, cdb))
 736			return TCM_CHECK_CONDITION_NOT_READY;
 737		break;
 738	case ALUA_ACCESS_STATE_TRANSITION:
 739		if (core_alua_state_transition(cmd, cdb))
 740			return TCM_CHECK_CONDITION_NOT_READY;
 741		break;
 742	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 743		if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
 744			return TCM_CHECK_CONDITION_NOT_READY;
 745		break;
 746	/*
 747	 * OFFLINE is a secondary ALUA target port group access state, that is
 748	 * handled above with struct se_lun->lun_tg_pt_secondary_offline=1
 749	 */
 750	case ALUA_ACCESS_STATE_OFFLINE:
 751	default:
 752		pr_err("Unknown ALUA access state: 0x%02x\n",
 753				out_alua_state);
 754		return TCM_INVALID_CDB_FIELD;
 755	}
 756
 757	return 0;
 758}
 759
 760/*
 761 * Check implicit and explicit ALUA state change request.
 762 */
 763static sense_reason_t
 764core_alua_check_transition(int state, int valid, int *primary, int explicit)
 765{
 766	/*
 767	 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
 768	 * defined as primary target port asymmetric access states.
 769	 */
 770	switch (state) {
 771	case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 772		if (!(valid & ALUA_AO_SUP))
 773			goto not_supported;
 774		*primary = 1;
 775		break;
 776	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 777		if (!(valid & ALUA_AN_SUP))
 778			goto not_supported;
 779		*primary = 1;
 780		break;
 781	case ALUA_ACCESS_STATE_STANDBY:
 782		if (!(valid & ALUA_S_SUP))
 783			goto not_supported;
 784		*primary = 1;
 785		break;
 786	case ALUA_ACCESS_STATE_UNAVAILABLE:
 787		if (!(valid & ALUA_U_SUP))
 788			goto not_supported;
 789		*primary = 1;
 790		break;
 791	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 792		if (!(valid & ALUA_LBD_SUP))
 793			goto not_supported;
 794		*primary = 1;
 795		break;
 796	case ALUA_ACCESS_STATE_OFFLINE:
 797		/*
 798		 * OFFLINE state is defined as a secondary target port
 799		 * asymmetric access state.
 800		 */
 801		if (!(valid & ALUA_O_SUP))
 802			goto not_supported;
 803		*primary = 0;
 804		break;
 805	case ALUA_ACCESS_STATE_TRANSITION:
 806		if (!(valid & ALUA_T_SUP) || explicit)
 807			/*
 808			 * Transitioning is set internally and by tcmu daemon,
 809			 * and cannot be selected through a STPG.
 810			 */
 811			goto not_supported;
 812		*primary = 0;
 813		break;
 814	default:
 815		pr_err("Unknown ALUA access state: 0x%02x\n", state);
 816		return TCM_INVALID_PARAMETER_LIST;
 817	}
 818
 819	return 0;
 820
 821not_supported:
 822	pr_err("ALUA access state %s not supported",
 823	       core_alua_dump_state(state));
 824	return TCM_INVALID_PARAMETER_LIST;
 825}
 826
 827static char *core_alua_dump_state(int state)
 828{
 829	switch (state) {
 830	case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 831		return "Active/Optimized";
 832	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 833		return "Active/NonOptimized";
 834	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 835		return "LBA Dependent";
 836	case ALUA_ACCESS_STATE_STANDBY:
 837		return "Standby";
 838	case ALUA_ACCESS_STATE_UNAVAILABLE:
 839		return "Unavailable";
 840	case ALUA_ACCESS_STATE_OFFLINE:
 841		return "Offline";
 842	case ALUA_ACCESS_STATE_TRANSITION:
 843		return "Transitioning";
 844	default:
 845		return "Unknown";
 846	}
 847
 848	return NULL;
 849}
 850
 851char *core_alua_dump_status(int status)
 852{
 853	switch (status) {
 854	case ALUA_STATUS_NONE:
 855		return "None";
 856	case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
 857		return "Altered by Explicit STPG";
 858	case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
 859		return "Altered by Implicit ALUA";
 860	default:
 861		return "Unknown";
 862	}
 863
 864	return NULL;
 865}
 866
 867/*
 868 * Used by fabric modules to determine when we need to delay processing
 869 * for the Active/NonOptimized paths..
 870 */
 871int core_alua_check_nonop_delay(
 872	struct se_cmd *cmd)
 873{
 874	if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
 875		return 0;
 876	if (in_interrupt())
 877		return 0;
 878	/*
 879	 * The ALUA Active/NonOptimized access state delay can be disabled
 880	 * in via configfs with a value of zero
 881	 */
 882	if (!cmd->alua_nonop_delay)
 883		return 0;
 884	/*
 885	 * struct se_cmd->alua_nonop_delay gets set by a target port group
 886	 * defined interval in core_alua_state_nonoptimized()
 887	 */
 888	msleep_interruptible(cmd->alua_nonop_delay);
 889	return 0;
 890}
 891EXPORT_SYMBOL(core_alua_check_nonop_delay);
 892
 893static int core_alua_write_tpg_metadata(
 894	const char *path,
 895	unsigned char *md_buf,
 896	u32 md_buf_len)
 897{
 898	struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
 899	loff_t pos = 0;
 900	int ret;
 901
 902	if (IS_ERR(file)) {
 903		pr_err("filp_open(%s) for ALUA metadata failed\n", path);
 904		return -ENODEV;
 905	}
 906	ret = kernel_write(file, md_buf, md_buf_len, &pos);
 907	if (ret < 0)
 908		pr_err("Error writing ALUA metadata file: %s\n", path);
 909	fput(file);
 910	return (ret < 0) ? -EIO : 0;
 911}
 912
 913/*
 914 * Called with tg_pt_gp->tg_pt_gp_transition_mutex held
 915 */
 916static int core_alua_update_tpg_primary_metadata(
 917	struct t10_alua_tg_pt_gp *tg_pt_gp)
 918{
 919	unsigned char *md_buf;
 920	struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
 921	char *path;
 922	int len, rc;
 923
 
 
 924	md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
 925	if (!md_buf) {
 926		pr_err("Unable to allocate buf for ALUA metadata\n");
 927		return -ENOMEM;
 928	}
 929
 930	len = snprintf(md_buf, ALUA_MD_BUF_LEN,
 931			"tg_pt_gp_id=%hu\n"
 932			"alua_access_state=0x%02x\n"
 933			"alua_access_status=0x%02x\n",
 934			tg_pt_gp->tg_pt_gp_id,
 935			tg_pt_gp->tg_pt_gp_alua_access_state,
 936			tg_pt_gp->tg_pt_gp_alua_access_status);
 937
 938	rc = -ENOMEM;
 939	path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
 940			&wwn->unit_serial[0],
 941			config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
 942	if (path) {
 943		rc = core_alua_write_tpg_metadata(path, md_buf, len);
 944		kfree(path);
 945	}
 946	kfree(md_buf);
 947	return rc;
 948}
 949
 950static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
 951{
 952	struct se_dev_entry *se_deve;
 953	struct se_lun *lun;
 954	struct se_lun_acl *lacl;
 955
 956	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 957	list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
 958				lun_tg_pt_gp_link) {
 959		/*
 960		 * After an implicit target port asymmetric access state
 961		 * change, a device server shall establish a unit attention
 962		 * condition for the initiator port associated with every I_T
 963		 * nexus with the additional sense code set to ASYMMETRIC
 964		 * ACCESS STATE CHANGED.
 965		 *
 966		 * After an explicit target port asymmetric access state
 967		 * change, a device server shall establish a unit attention
 968		 * condition with the additional sense code set to ASYMMETRIC
 969		 * ACCESS STATE CHANGED for the initiator port associated with
 970		 * every I_T nexus other than the I_T nexus on which the SET
 971		 * TARGET PORT GROUPS command
 972		 */
 973		if (!percpu_ref_tryget_live(&lun->lun_ref))
 974			continue;
 975		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 976
 977		spin_lock(&lun->lun_deve_lock);
 978		list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
 979			lacl = rcu_dereference_check(se_deve->se_lun_acl,
 980					lockdep_is_held(&lun->lun_deve_lock));
 981
 982			/*
 983			 * spc4r37 p.242:
 984			 * After an explicit target port asymmetric access
 985			 * state change, a device server shall establish a
 986			 * unit attention condition with the additional sense
 987			 * code set to ASYMMETRIC ACCESS STATE CHANGED for
 988			 * the initiator port associated with every I_T nexus
 989			 * other than the I_T nexus on which the SET TARGET
 990			 * PORT GROUPS command was received.
 991			 */
 992			if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
 993			     ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
 994			   (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
 995			    (tg_pt_gp->tg_pt_gp_alua_lun == lun))
 996				continue;
 997
 998			/*
 999			 * se_deve->se_lun_acl pointer may be NULL for a
1000			 * entry created without explicit Node+MappedLUN ACLs
1001			 */
1002			if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
1003			    (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
1004				continue;
1005
1006			core_scsi3_ua_allocate(se_deve, 0x2A,
1007				ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
1008		}
1009		spin_unlock(&lun->lun_deve_lock);
1010
1011		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1012		percpu_ref_put(&lun->lun_ref);
1013	}
1014	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1015}
1016
1017static int core_alua_do_transition_tg_pt(
1018	struct t10_alua_tg_pt_gp *tg_pt_gp,
1019	int new_state,
1020	int explicit)
1021{
1022	int prev_state;
1023
1024	mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1025	/* Nothing to be done here */
1026	if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
1027		mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1028		return 0;
1029	}
1030
1031	if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
1032		mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1033		return -EAGAIN;
1034	}
1035
1036	/*
1037	 * Save the old primary ALUA access state, and set the current state
1038	 * to ALUA_ACCESS_STATE_TRANSITION.
1039	 */
1040	prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
1041	tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
1042	tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1043				ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1044				ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1045
1046	core_alua_queue_state_change_ua(tg_pt_gp);
1047
1048	if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
1049		mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1050		return 0;
1051	}
1052
1053	/*
1054	 * Check for the optional ALUA primary state transition delay
1055	 */
1056	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1057		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1058
1059	/*
1060	 * Set the current primary ALUA access state to the requested new state
1061	 */
1062	tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
1063
1064	/*
1065	 * Update the ALUA metadata buf that has been allocated in
1066	 * core_alua_do_port_transition(), this metadata will be written
1067	 * to struct file.
1068	 *
1069	 * Note that there is the case where we do not want to update the
1070	 * metadata when the saved metadata is being parsed in userspace
1071	 * when setting the existing port access state and access status.
1072	 *
1073	 * Also note that the failure to write out the ALUA metadata to
1074	 * struct file does NOT affect the actual ALUA transition.
1075	 */
1076	if (tg_pt_gp->tg_pt_gp_write_metadata) {
1077		core_alua_update_tpg_primary_metadata(tg_pt_gp);
1078	}
1079
1080	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1081		" from primary access state %s to %s\n", (explicit) ? "explicit" :
1082		"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1083		tg_pt_gp->tg_pt_gp_id,
1084		core_alua_dump_state(prev_state),
1085		core_alua_dump_state(new_state));
1086
1087	core_alua_queue_state_change_ua(tg_pt_gp);
1088
1089	mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1090	return 0;
1091}
1092
1093int core_alua_do_port_transition(
1094	struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1095	struct se_device *l_dev,
1096	struct se_lun *l_lun,
1097	struct se_node_acl *l_nacl,
1098	int new_state,
1099	int explicit)
1100{
1101	struct se_device *dev;
1102	struct t10_alua_lu_gp *lu_gp;
1103	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1104	struct t10_alua_tg_pt_gp *tg_pt_gp;
1105	int primary, valid_states, rc = 0;
1106
1107	if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
1108		return -ENODEV;
1109
1110	valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1111	if (core_alua_check_transition(new_state, valid_states, &primary,
1112				       explicit) != 0)
1113		return -EINVAL;
1114
1115	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1116	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1117	lu_gp = local_lu_gp_mem->lu_gp;
1118	atomic_inc(&lu_gp->lu_gp_ref_cnt);
1119	spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1120	/*
1121	 * For storage objects that are members of the 'default_lu_gp',
1122	 * we only do transition on the passed *l_tp_pt_gp, and not
1123	 * on all of the matching target port groups IDs in default_lu_gp.
1124	 */
1125	if (!lu_gp->lu_gp_id) {
1126		/*
1127		 * core_alua_do_transition_tg_pt() will always return
1128		 * success.
1129		 */
1130		l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1131		l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1132		rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1133						   new_state, explicit);
1134		atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1135		return rc;
1136	}
1137	/*
1138	 * For all other LU groups aside from 'default_lu_gp', walk all of
1139	 * the associated storage objects looking for a matching target port
1140	 * group ID from the local target port group.
1141	 */
1142	spin_lock(&lu_gp->lu_gp_lock);
1143	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1144				lu_gp_mem_list) {
1145
1146		dev = lu_gp_mem->lu_gp_mem_dev;
1147		atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1148		spin_unlock(&lu_gp->lu_gp_lock);
1149
1150		spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1151		list_for_each_entry(tg_pt_gp,
1152				&dev->t10_alua.tg_pt_gps_list,
1153				tg_pt_gp_list) {
1154
1155			if (!tg_pt_gp->tg_pt_gp_valid_id)
1156				continue;
1157			/*
1158			 * If the target behavior port asymmetric access state
1159			 * is changed for any target port group accessible via
1160			 * a logical unit within a LU group, the target port
1161			 * behavior group asymmetric access states for the same
1162			 * target port group accessible via other logical units
1163			 * in that LU group will also change.
1164			 */
1165			if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1166				continue;
1167
1168			if (l_tg_pt_gp == tg_pt_gp) {
1169				tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1170				tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1171			} else {
1172				tg_pt_gp->tg_pt_gp_alua_lun = NULL;
1173				tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1174			}
1175			atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1176			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1177			/*
1178			 * core_alua_do_transition_tg_pt() will always return
1179			 * success.
1180			 */
1181			rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1182					new_state, explicit);
1183
1184			spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1185			atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1186			if (rc)
1187				break;
1188		}
1189		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1190
1191		spin_lock(&lu_gp->lu_gp_lock);
1192		atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1193	}
1194	spin_unlock(&lu_gp->lu_gp_lock);
1195
1196	if (!rc) {
1197		pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1198			 " Group IDs: %hu %s transition to primary state: %s\n",
1199			 config_item_name(&lu_gp->lu_gp_group.cg_item),
1200			 l_tg_pt_gp->tg_pt_gp_id,
1201			 (explicit) ? "explicit" : "implicit",
1202			 core_alua_dump_state(new_state));
1203	}
1204
1205	atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1206	return rc;
1207}
1208
1209static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1210{
1211	struct se_portal_group *se_tpg = lun->lun_tpg;
1212	unsigned char *md_buf;
1213	char *path;
1214	int len, rc;
1215
1216	mutex_lock(&lun->lun_tg_pt_md_mutex);
1217
1218	md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1219	if (!md_buf) {
1220		pr_err("Unable to allocate buf for ALUA metadata\n");
1221		rc = -ENOMEM;
1222		goto out_unlock;
1223	}
1224
1225	len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1226			"alua_tg_pt_status=0x%02x\n",
1227			atomic_read(&lun->lun_tg_pt_secondary_offline),
1228			lun->lun_tg_pt_secondary_stat);
1229
1230	if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
1231		path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
1232				db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
1233				se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1234				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
1235				lun->unpacked_lun);
1236	} else {
1237		path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
1238				db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
1239				se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1240				lun->unpacked_lun);
1241	}
1242	if (!path) {
1243		rc = -ENOMEM;
1244		goto out_free;
1245	}
1246
1247	rc = core_alua_write_tpg_metadata(path, md_buf, len);
1248	kfree(path);
1249out_free:
1250	kfree(md_buf);
1251out_unlock:
1252	mutex_unlock(&lun->lun_tg_pt_md_mutex);
1253	return rc;
1254}
1255
1256static int core_alua_set_tg_pt_secondary_state(
1257	struct se_lun *lun,
1258	int explicit,
1259	int offline)
1260{
1261	struct t10_alua_tg_pt_gp *tg_pt_gp;
1262	int trans_delay_msecs;
1263
1264	spin_lock(&lun->lun_tg_pt_gp_lock);
1265	tg_pt_gp = lun->lun_tg_pt_gp;
1266	if (!tg_pt_gp) {
1267		spin_unlock(&lun->lun_tg_pt_gp_lock);
1268		pr_err("Unable to complete secondary state"
1269				" transition\n");
1270		return -EINVAL;
1271	}
1272	trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1273	/*
1274	 * Set the secondary ALUA target port access state to OFFLINE
1275	 * or release the previously secondary state for struct se_lun
1276	 */
1277	if (offline)
1278		atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
1279	else
1280		atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
1281
1282	lun->lun_tg_pt_secondary_stat = (explicit) ?
1283			ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1284			ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1285
1286	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1287		" to secondary access state: %s\n", (explicit) ? "explicit" :
1288		"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1289		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1290
1291	spin_unlock(&lun->lun_tg_pt_gp_lock);
1292	/*
1293	 * Do the optional transition delay after we set the secondary
1294	 * ALUA access state.
1295	 */
1296	if (trans_delay_msecs != 0)
1297		msleep_interruptible(trans_delay_msecs);
1298	/*
1299	 * See if we need to update the ALUA fabric port metadata for
1300	 * secondary state and status
1301	 */
1302	if (lun->lun_tg_pt_secondary_write_md)
1303		core_alua_update_tpg_secondary_metadata(lun);
1304
1305	return 0;
1306}
1307
1308struct t10_alua_lba_map *
1309core_alua_allocate_lba_map(struct list_head *list,
1310			   u64 first_lba, u64 last_lba)
1311{
1312	struct t10_alua_lba_map *lba_map;
1313
1314	lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1315	if (!lba_map) {
1316		pr_err("Unable to allocate struct t10_alua_lba_map\n");
1317		return ERR_PTR(-ENOMEM);
1318	}
1319	INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1320	lba_map->lba_map_first_lba = first_lba;
1321	lba_map->lba_map_last_lba = last_lba;
1322
1323	list_add_tail(&lba_map->lba_map_list, list);
1324	return lba_map;
1325}
1326
1327int
1328core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1329			       int pg_id, int state)
1330{
1331	struct t10_alua_lba_map_member *lba_map_mem;
1332
1333	list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1334			    lba_map_mem_list) {
1335		if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1336			pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1337			return -EINVAL;
1338		}
1339	}
1340
1341	lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1342	if (!lba_map_mem) {
1343		pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1344		return -ENOMEM;
1345	}
1346	lba_map_mem->lba_map_mem_alua_state = state;
1347	lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1348
1349	list_add_tail(&lba_map_mem->lba_map_mem_list,
1350		      &lba_map->lba_map_mem_list);
1351	return 0;
1352}
1353
1354void
1355core_alua_free_lba_map(struct list_head *lba_list)
1356{
1357	struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1358	struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1359
1360	list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1361				 lba_map_list) {
1362		list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1363					 &lba_map->lba_map_mem_list,
1364					 lba_map_mem_list) {
1365			list_del(&lba_map_mem->lba_map_mem_list);
1366			kmem_cache_free(t10_alua_lba_map_mem_cache,
1367					lba_map_mem);
1368		}
1369		list_del(&lba_map->lba_map_list);
1370		kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1371	}
1372}
1373
1374void
1375core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1376		      int segment_size, int segment_mult)
1377{
1378	struct list_head old_lba_map_list;
1379	struct t10_alua_tg_pt_gp *tg_pt_gp;
1380	int activate = 0, supported;
1381
1382	INIT_LIST_HEAD(&old_lba_map_list);
1383	spin_lock(&dev->t10_alua.lba_map_lock);
1384	dev->t10_alua.lba_map_segment_size = segment_size;
1385	dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1386	list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1387	if (lba_map_list) {
1388		list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1389		activate = 1;
1390	}
1391	spin_unlock(&dev->t10_alua.lba_map_lock);
1392	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1393	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1394			    tg_pt_gp_list) {
1395
1396		if (!tg_pt_gp->tg_pt_gp_valid_id)
1397			continue;
1398		supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1399		if (activate)
1400			supported |= ALUA_LBD_SUP;
1401		else
1402			supported &= ~ALUA_LBD_SUP;
1403		tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1404	}
1405	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1406	core_alua_free_lba_map(&old_lba_map_list);
1407}
1408
1409struct t10_alua_lu_gp *
1410core_alua_allocate_lu_gp(const char *name, int def_group)
1411{
1412	struct t10_alua_lu_gp *lu_gp;
1413
1414	lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1415	if (!lu_gp) {
1416		pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1417		return ERR_PTR(-ENOMEM);
1418	}
1419	INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1420	INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1421	spin_lock_init(&lu_gp->lu_gp_lock);
1422	atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1423
1424	if (def_group) {
1425		lu_gp->lu_gp_id = alua_lu_gps_counter++;
1426		lu_gp->lu_gp_valid_id = 1;
1427		alua_lu_gps_count++;
1428	}
1429
1430	return lu_gp;
1431}
1432
1433int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1434{
1435	struct t10_alua_lu_gp *lu_gp_tmp;
1436	u16 lu_gp_id_tmp;
1437	/*
1438	 * The lu_gp->lu_gp_id may only be set once..
1439	 */
1440	if (lu_gp->lu_gp_valid_id) {
1441		pr_warn("ALUA LU Group already has a valid ID,"
1442			" ignoring request\n");
1443		return -EINVAL;
1444	}
1445
1446	spin_lock(&lu_gps_lock);
1447	if (alua_lu_gps_count == 0x0000ffff) {
1448		pr_err("Maximum ALUA alua_lu_gps_count:"
1449				" 0x0000ffff reached\n");
1450		spin_unlock(&lu_gps_lock);
1451		kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1452		return -ENOSPC;
1453	}
1454again:
1455	lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1456				alua_lu_gps_counter++;
1457
1458	list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1459		if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1460			if (!lu_gp_id)
1461				goto again;
1462
1463			pr_warn("ALUA Logical Unit Group ID: %hu"
1464				" already exists, ignoring request\n",
1465				lu_gp_id);
1466			spin_unlock(&lu_gps_lock);
1467			return -EINVAL;
1468		}
1469	}
1470
1471	lu_gp->lu_gp_id = lu_gp_id_tmp;
1472	lu_gp->lu_gp_valid_id = 1;
1473	list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1474	alua_lu_gps_count++;
1475	spin_unlock(&lu_gps_lock);
1476
1477	return 0;
1478}
1479
1480static struct t10_alua_lu_gp_member *
1481core_alua_allocate_lu_gp_mem(struct se_device *dev)
1482{
1483	struct t10_alua_lu_gp_member *lu_gp_mem;
1484
1485	lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1486	if (!lu_gp_mem) {
1487		pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1488		return ERR_PTR(-ENOMEM);
1489	}
1490	INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1491	spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1492	atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1493
1494	lu_gp_mem->lu_gp_mem_dev = dev;
1495	dev->dev_alua_lu_gp_mem = lu_gp_mem;
1496
1497	return lu_gp_mem;
1498}
1499
1500void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1501{
1502	struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1503	/*
1504	 * Once we have reached this point, config_item_put() has
1505	 * already been called from target_core_alua_drop_lu_gp().
1506	 *
1507	 * Here, we remove the *lu_gp from the global list so that
1508	 * no associations can be made while we are releasing
1509	 * struct t10_alua_lu_gp.
1510	 */
1511	spin_lock(&lu_gps_lock);
1512	list_del(&lu_gp->lu_gp_node);
1513	alua_lu_gps_count--;
1514	spin_unlock(&lu_gps_lock);
1515	/*
1516	 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1517	 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1518	 * released with core_alua_put_lu_gp_from_name()
1519	 */
1520	while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1521		cpu_relax();
1522	/*
1523	 * Release reference to struct t10_alua_lu_gp * from all associated
1524	 * struct se_device.
1525	 */
1526	spin_lock(&lu_gp->lu_gp_lock);
1527	list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1528				&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1529		if (lu_gp_mem->lu_gp_assoc) {
1530			list_del(&lu_gp_mem->lu_gp_mem_list);
1531			lu_gp->lu_gp_members--;
1532			lu_gp_mem->lu_gp_assoc = 0;
1533		}
1534		spin_unlock(&lu_gp->lu_gp_lock);
1535		/*
1536		 *
1537		 * lu_gp_mem is associated with a single
1538		 * struct se_device->dev_alua_lu_gp_mem, and is released when
1539		 * struct se_device is released via core_alua_free_lu_gp_mem().
1540		 *
1541		 * If the passed lu_gp does NOT match the default_lu_gp, assume
1542		 * we want to re-associate a given lu_gp_mem with default_lu_gp.
1543		 */
1544		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1545		if (lu_gp != default_lu_gp)
1546			__core_alua_attach_lu_gp_mem(lu_gp_mem,
1547					default_lu_gp);
1548		else
1549			lu_gp_mem->lu_gp = NULL;
1550		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1551
1552		spin_lock(&lu_gp->lu_gp_lock);
1553	}
1554	spin_unlock(&lu_gp->lu_gp_lock);
1555
1556	kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1557}
1558
1559void core_alua_free_lu_gp_mem(struct se_device *dev)
1560{
1561	struct t10_alua_lu_gp *lu_gp;
1562	struct t10_alua_lu_gp_member *lu_gp_mem;
1563
1564	lu_gp_mem = dev->dev_alua_lu_gp_mem;
1565	if (!lu_gp_mem)
1566		return;
1567
1568	while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1569		cpu_relax();
1570
1571	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1572	lu_gp = lu_gp_mem->lu_gp;
1573	if (lu_gp) {
1574		spin_lock(&lu_gp->lu_gp_lock);
1575		if (lu_gp_mem->lu_gp_assoc) {
1576			list_del(&lu_gp_mem->lu_gp_mem_list);
1577			lu_gp->lu_gp_members--;
1578			lu_gp_mem->lu_gp_assoc = 0;
1579		}
1580		spin_unlock(&lu_gp->lu_gp_lock);
1581		lu_gp_mem->lu_gp = NULL;
1582	}
1583	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1584
1585	kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1586}
1587
1588struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1589{
1590	struct t10_alua_lu_gp *lu_gp;
1591	struct config_item *ci;
1592
1593	spin_lock(&lu_gps_lock);
1594	list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1595		if (!lu_gp->lu_gp_valid_id)
1596			continue;
1597		ci = &lu_gp->lu_gp_group.cg_item;
1598		if (!strcmp(config_item_name(ci), name)) {
1599			atomic_inc(&lu_gp->lu_gp_ref_cnt);
1600			spin_unlock(&lu_gps_lock);
1601			return lu_gp;
1602		}
1603	}
1604	spin_unlock(&lu_gps_lock);
1605
1606	return NULL;
1607}
1608
1609void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1610{
1611	spin_lock(&lu_gps_lock);
1612	atomic_dec(&lu_gp->lu_gp_ref_cnt);
1613	spin_unlock(&lu_gps_lock);
1614}
1615
1616/*
1617 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1618 */
1619void __core_alua_attach_lu_gp_mem(
1620	struct t10_alua_lu_gp_member *lu_gp_mem,
1621	struct t10_alua_lu_gp *lu_gp)
1622{
1623	spin_lock(&lu_gp->lu_gp_lock);
1624	lu_gp_mem->lu_gp = lu_gp;
1625	lu_gp_mem->lu_gp_assoc = 1;
1626	list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1627	lu_gp->lu_gp_members++;
1628	spin_unlock(&lu_gp->lu_gp_lock);
1629}
1630
1631/*
1632 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1633 */
1634void __core_alua_drop_lu_gp_mem(
1635	struct t10_alua_lu_gp_member *lu_gp_mem,
1636	struct t10_alua_lu_gp *lu_gp)
1637{
1638	spin_lock(&lu_gp->lu_gp_lock);
1639	list_del(&lu_gp_mem->lu_gp_mem_list);
1640	lu_gp_mem->lu_gp = NULL;
1641	lu_gp_mem->lu_gp_assoc = 0;
1642	lu_gp->lu_gp_members--;
1643	spin_unlock(&lu_gp->lu_gp_lock);
1644}
1645
1646struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1647		const char *name, int def_group)
1648{
1649	struct t10_alua_tg_pt_gp *tg_pt_gp;
1650
1651	tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1652	if (!tg_pt_gp) {
1653		pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1654		return NULL;
1655	}
1656	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1657	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1658	mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
1659	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1660	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1661	tg_pt_gp->tg_pt_gp_dev = dev;
1662	tg_pt_gp->tg_pt_gp_alua_access_state =
1663			ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1664	/*
1665	 * Enable both explicit and implicit ALUA support by default
1666	 */
1667	tg_pt_gp->tg_pt_gp_alua_access_type =
1668			TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1669	/*
1670	 * Set the default Active/NonOptimized Delay in milliseconds
1671	 */
1672	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1673	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1674	tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1675
1676	/*
1677	 * Enable all supported states
1678	 */
1679	tg_pt_gp->tg_pt_gp_alua_supported_states =
1680	    ALUA_T_SUP | ALUA_O_SUP |
1681	    ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1682
1683	if (def_group) {
1684		spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1685		tg_pt_gp->tg_pt_gp_id =
1686				dev->t10_alua.alua_tg_pt_gps_counter++;
1687		tg_pt_gp->tg_pt_gp_valid_id = 1;
1688		dev->t10_alua.alua_tg_pt_gps_count++;
1689		list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1690			      &dev->t10_alua.tg_pt_gps_list);
1691		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1692	}
1693
1694	return tg_pt_gp;
1695}
1696
1697int core_alua_set_tg_pt_gp_id(
1698	struct t10_alua_tg_pt_gp *tg_pt_gp,
1699	u16 tg_pt_gp_id)
1700{
1701	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1702	struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1703	u16 tg_pt_gp_id_tmp;
1704
1705	/*
1706	 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1707	 */
1708	if (tg_pt_gp->tg_pt_gp_valid_id) {
1709		pr_warn("ALUA TG PT Group already has a valid ID,"
1710			" ignoring request\n");
1711		return -EINVAL;
1712	}
1713
1714	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1715	if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1716		pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1717			" 0x0000ffff reached\n");
1718		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1719		kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1720		return -ENOSPC;
1721	}
1722again:
1723	tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1724			dev->t10_alua.alua_tg_pt_gps_counter++;
1725
1726	list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1727			tg_pt_gp_list) {
1728		if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1729			if (!tg_pt_gp_id)
1730				goto again;
1731
1732			pr_err("ALUA Target Port Group ID: %hu already"
1733				" exists, ignoring request\n", tg_pt_gp_id);
1734			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1735			return -EINVAL;
1736		}
1737	}
1738
1739	tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1740	tg_pt_gp->tg_pt_gp_valid_id = 1;
1741	list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1742			&dev->t10_alua.tg_pt_gps_list);
1743	dev->t10_alua.alua_tg_pt_gps_count++;
1744	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1745
1746	return 0;
1747}
1748
1749void core_alua_free_tg_pt_gp(
1750	struct t10_alua_tg_pt_gp *tg_pt_gp)
1751{
1752	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1753	struct se_lun *lun, *next;
1754
1755	/*
1756	 * Once we have reached this point, config_item_put() has already
1757	 * been called from target_core_alua_drop_tg_pt_gp().
1758	 *
1759	 * Here we remove *tg_pt_gp from the global list so that
1760	 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1761	 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1762	 */
1763	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1764	list_del(&tg_pt_gp->tg_pt_gp_list);
1765	dev->t10_alua.alua_tg_pt_gps_counter--;
 
 
1766	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1767
1768	/*
1769	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1770	 * core_alua_get_tg_pt_gp_by_name() in
1771	 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1772	 * to be released with core_alua_put_tg_pt_gp_from_name().
1773	 */
1774	while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1775		cpu_relax();
1776
1777	/*
1778	 * Release reference to struct t10_alua_tg_pt_gp from all associated
1779	 * struct se_port.
1780	 */
1781	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1782	list_for_each_entry_safe(lun, next,
1783			&tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
1784		list_del_init(&lun->lun_tg_pt_gp_link);
1785		tg_pt_gp->tg_pt_gp_members--;
1786
1787		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1788		/*
1789		 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1790		 * assume we want to re-associate a given tg_pt_gp_mem with
1791		 * default_tg_pt_gp.
1792		 */
1793		spin_lock(&lun->lun_tg_pt_gp_lock);
1794		if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1795			__target_attach_tg_pt_gp(lun,
1796					dev->t10_alua.default_tg_pt_gp);
1797		} else
1798			lun->lun_tg_pt_gp = NULL;
1799		spin_unlock(&lun->lun_tg_pt_gp_lock);
1800
1801		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1802	}
1803	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1804
1805	kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1806}
1807
1808static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1809		struct se_device *dev, const char *name)
1810{
1811	struct t10_alua_tg_pt_gp *tg_pt_gp;
1812	struct config_item *ci;
1813
1814	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1815	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1816			tg_pt_gp_list) {
1817		if (!tg_pt_gp->tg_pt_gp_valid_id)
1818			continue;
1819		ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1820		if (!strcmp(config_item_name(ci), name)) {
1821			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1822			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1823			return tg_pt_gp;
1824		}
1825	}
1826	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1827
1828	return NULL;
1829}
1830
1831static void core_alua_put_tg_pt_gp_from_name(
1832	struct t10_alua_tg_pt_gp *tg_pt_gp)
1833{
1834	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1835
1836	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1837	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1838	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1839}
1840
1841static void __target_attach_tg_pt_gp(struct se_lun *lun,
1842		struct t10_alua_tg_pt_gp *tg_pt_gp)
1843{
1844	struct se_dev_entry *se_deve;
1845
1846	assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1847
1848	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1849	lun->lun_tg_pt_gp = tg_pt_gp;
1850	list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
1851	tg_pt_gp->tg_pt_gp_members++;
1852	spin_lock(&lun->lun_deve_lock);
1853	list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
1854		core_scsi3_ua_allocate(se_deve, 0x3f,
1855				       ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
1856	spin_unlock(&lun->lun_deve_lock);
1857	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1858}
1859
1860void target_attach_tg_pt_gp(struct se_lun *lun,
1861		struct t10_alua_tg_pt_gp *tg_pt_gp)
1862{
1863	spin_lock(&lun->lun_tg_pt_gp_lock);
1864	__target_attach_tg_pt_gp(lun, tg_pt_gp);
1865	spin_unlock(&lun->lun_tg_pt_gp_lock);
1866}
1867
1868static void __target_detach_tg_pt_gp(struct se_lun *lun,
1869		struct t10_alua_tg_pt_gp *tg_pt_gp)
1870{
1871	assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1872
1873	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1874	list_del_init(&lun->lun_tg_pt_gp_link);
1875	tg_pt_gp->tg_pt_gp_members--;
1876	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1877
1878	lun->lun_tg_pt_gp = NULL;
1879}
1880
1881void target_detach_tg_pt_gp(struct se_lun *lun)
1882{
1883	struct t10_alua_tg_pt_gp *tg_pt_gp;
1884
1885	spin_lock(&lun->lun_tg_pt_gp_lock);
1886	tg_pt_gp = lun->lun_tg_pt_gp;
1887	if (tg_pt_gp)
1888		__target_detach_tg_pt_gp(lun, tg_pt_gp);
1889	spin_unlock(&lun->lun_tg_pt_gp_lock);
1890}
1891
1892ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1893{
1894	struct config_item *tg_pt_ci;
1895	struct t10_alua_tg_pt_gp *tg_pt_gp;
1896	ssize_t len = 0;
1897
1898	spin_lock(&lun->lun_tg_pt_gp_lock);
1899	tg_pt_gp = lun->lun_tg_pt_gp;
1900	if (tg_pt_gp) {
1901		tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1902		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1903			" %hu\nTG Port Primary Access State: %s\nTG Port "
1904			"Primary Access Status: %s\nTG Port Secondary Access"
1905			" State: %s\nTG Port Secondary Access Status: %s\n",
1906			config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1907			core_alua_dump_state(
1908				tg_pt_gp->tg_pt_gp_alua_access_state),
1909			core_alua_dump_status(
1910				tg_pt_gp->tg_pt_gp_alua_access_status),
1911			atomic_read(&lun->lun_tg_pt_secondary_offline) ?
1912			"Offline" : "None",
1913			core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
1914	}
1915	spin_unlock(&lun->lun_tg_pt_gp_lock);
1916
1917	return len;
1918}
1919
1920ssize_t core_alua_store_tg_pt_gp_info(
1921	struct se_lun *lun,
1922	const char *page,
1923	size_t count)
1924{
1925	struct se_portal_group *tpg = lun->lun_tpg;
1926	/*
1927	 * rcu_dereference_raw protected by se_lun->lun_group symlink
1928	 * reference to se_device->dev_group.
1929	 */
1930	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
1931	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1932	unsigned char buf[TG_PT_GROUP_NAME_BUF];
1933	int move = 0;
1934
1935	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
1936	    (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
1937		return -ENODEV;
1938
1939	if (count > TG_PT_GROUP_NAME_BUF) {
1940		pr_err("ALUA Target Port Group alias too large!\n");
1941		return -EINVAL;
1942	}
1943	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1944	memcpy(buf, page, count);
1945	/*
1946	 * Any ALUA target port group alias besides "NULL" means we will be
1947	 * making a new group association.
1948	 */
1949	if (strcmp(strstrip(buf), "NULL")) {
1950		/*
1951		 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1952		 * struct t10_alua_tg_pt_gp.  This reference is released with
1953		 * core_alua_put_tg_pt_gp_from_name() below.
1954		 */
1955		tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
1956					strstrip(buf));
1957		if (!tg_pt_gp_new)
1958			return -ENODEV;
1959	}
1960
1961	spin_lock(&lun->lun_tg_pt_gp_lock);
1962	tg_pt_gp = lun->lun_tg_pt_gp;
1963	if (tg_pt_gp) {
1964		/*
1965		 * Clearing an existing tg_pt_gp association, and replacing
1966		 * with the default_tg_pt_gp.
1967		 */
1968		if (!tg_pt_gp_new) {
1969			pr_debug("Target_Core_ConfigFS: Moving"
1970				" %s/tpgt_%hu/%s from ALUA Target Port Group:"
1971				" alua/%s, ID: %hu back to"
1972				" default_tg_pt_gp\n",
1973				tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1974				tpg->se_tpg_tfo->tpg_get_tag(tpg),
1975				config_item_name(&lun->lun_group.cg_item),
1976				config_item_name(
1977					&tg_pt_gp->tg_pt_gp_group.cg_item),
1978				tg_pt_gp->tg_pt_gp_id);
1979
1980			__target_detach_tg_pt_gp(lun, tg_pt_gp);
1981			__target_attach_tg_pt_gp(lun,
1982					dev->t10_alua.default_tg_pt_gp);
1983			spin_unlock(&lun->lun_tg_pt_gp_lock);
1984
1985			return count;
1986		}
1987		__target_detach_tg_pt_gp(lun, tg_pt_gp);
1988		move = 1;
1989	}
1990
1991	__target_attach_tg_pt_gp(lun, tg_pt_gp_new);
1992	spin_unlock(&lun->lun_tg_pt_gp_lock);
1993	pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1994		" Target Port Group: alua/%s, ID: %hu\n", (move) ?
1995		"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1996		tpg->se_tpg_tfo->tpg_get_tag(tpg),
1997		config_item_name(&lun->lun_group.cg_item),
1998		config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1999		tg_pt_gp_new->tg_pt_gp_id);
2000
2001	core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
2002	return count;
2003}
2004
2005ssize_t core_alua_show_access_type(
2006	struct t10_alua_tg_pt_gp *tg_pt_gp,
2007	char *page)
2008{
2009	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
2010	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
2011		return sprintf(page, "Implicit and Explicit\n");
2012	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
2013		return sprintf(page, "Implicit\n");
2014	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
2015		return sprintf(page, "Explicit\n");
2016	else
2017		return sprintf(page, "None\n");
2018}
2019
2020ssize_t core_alua_store_access_type(
2021	struct t10_alua_tg_pt_gp *tg_pt_gp,
2022	const char *page,
2023	size_t count)
2024{
2025	unsigned long tmp;
2026	int ret;
2027
2028	ret = kstrtoul(page, 0, &tmp);
2029	if (ret < 0) {
2030		pr_err("Unable to extract alua_access_type\n");
2031		return ret;
2032	}
2033	if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2034		pr_err("Illegal value for alua_access_type:"
2035				" %lu\n", tmp);
2036		return -EINVAL;
2037	}
2038	if (tmp == 3)
2039		tg_pt_gp->tg_pt_gp_alua_access_type =
2040			TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2041	else if (tmp == 2)
2042		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2043	else if (tmp == 1)
2044		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2045	else
2046		tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2047
2048	return count;
2049}
2050
2051ssize_t core_alua_show_nonop_delay_msecs(
2052	struct t10_alua_tg_pt_gp *tg_pt_gp,
2053	char *page)
2054{
2055	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2056}
2057
2058ssize_t core_alua_store_nonop_delay_msecs(
2059	struct t10_alua_tg_pt_gp *tg_pt_gp,
2060	const char *page,
2061	size_t count)
2062{
2063	unsigned long tmp;
2064	int ret;
2065
2066	ret = kstrtoul(page, 0, &tmp);
2067	if (ret < 0) {
2068		pr_err("Unable to extract nonop_delay_msecs\n");
2069		return ret;
2070	}
2071	if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2072		pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2073			" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2074			ALUA_MAX_NONOP_DELAY_MSECS);
2075		return -EINVAL;
2076	}
2077	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2078
2079	return count;
2080}
2081
2082ssize_t core_alua_show_trans_delay_msecs(
2083	struct t10_alua_tg_pt_gp *tg_pt_gp,
2084	char *page)
2085{
2086	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2087}
2088
2089ssize_t core_alua_store_trans_delay_msecs(
2090	struct t10_alua_tg_pt_gp *tg_pt_gp,
2091	const char *page,
2092	size_t count)
2093{
2094	unsigned long tmp;
2095	int ret;
2096
2097	ret = kstrtoul(page, 0, &tmp);
2098	if (ret < 0) {
2099		pr_err("Unable to extract trans_delay_msecs\n");
2100		return ret;
2101	}
2102	if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2103		pr_err("Passed trans_delay_msecs: %lu, exceeds"
2104			" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2105			ALUA_MAX_TRANS_DELAY_MSECS);
2106		return -EINVAL;
2107	}
2108	tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2109
2110	return count;
2111}
2112
2113ssize_t core_alua_show_implicit_trans_secs(
2114	struct t10_alua_tg_pt_gp *tg_pt_gp,
2115	char *page)
2116{
2117	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2118}
2119
2120ssize_t core_alua_store_implicit_trans_secs(
2121	struct t10_alua_tg_pt_gp *tg_pt_gp,
2122	const char *page,
2123	size_t count)
2124{
2125	unsigned long tmp;
2126	int ret;
2127
2128	ret = kstrtoul(page, 0, &tmp);
2129	if (ret < 0) {
2130		pr_err("Unable to extract implicit_trans_secs\n");
2131		return ret;
2132	}
2133	if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2134		pr_err("Passed implicit_trans_secs: %lu, exceeds"
2135			" ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2136			ALUA_MAX_IMPLICIT_TRANS_SECS);
2137		return  -EINVAL;
2138	}
2139	tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2140
2141	return count;
2142}
2143
2144ssize_t core_alua_show_preferred_bit(
2145	struct t10_alua_tg_pt_gp *tg_pt_gp,
2146	char *page)
2147{
2148	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2149}
2150
2151ssize_t core_alua_store_preferred_bit(
2152	struct t10_alua_tg_pt_gp *tg_pt_gp,
2153	const char *page,
2154	size_t count)
2155{
2156	unsigned long tmp;
2157	int ret;
2158
2159	ret = kstrtoul(page, 0, &tmp);
2160	if (ret < 0) {
2161		pr_err("Unable to extract preferred ALUA value\n");
2162		return ret;
2163	}
2164	if ((tmp != 0) && (tmp != 1)) {
2165		pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2166		return -EINVAL;
2167	}
2168	tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2169
2170	return count;
2171}
2172
2173ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2174{
2175	return sprintf(page, "%d\n",
2176		atomic_read(&lun->lun_tg_pt_secondary_offline));
2177}
2178
2179ssize_t core_alua_store_offline_bit(
2180	struct se_lun *lun,
2181	const char *page,
2182	size_t count)
2183{
2184	/*
2185	 * rcu_dereference_raw protected by se_lun->lun_group symlink
2186	 * reference to se_device->dev_group.
2187	 */
2188	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
2189	unsigned long tmp;
2190	int ret;
2191
2192	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
2193	    (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
2194		return -ENODEV;
2195
2196	ret = kstrtoul(page, 0, &tmp);
2197	if (ret < 0) {
2198		pr_err("Unable to extract alua_tg_pt_offline value\n");
2199		return ret;
2200	}
2201	if ((tmp != 0) && (tmp != 1)) {
2202		pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2203				tmp);
2204		return -EINVAL;
2205	}
2206
2207	ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
2208	if (ret < 0)
2209		return -EINVAL;
2210
2211	return count;
2212}
2213
2214ssize_t core_alua_show_secondary_status(
2215	struct se_lun *lun,
2216	char *page)
2217{
2218	return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
2219}
2220
2221ssize_t core_alua_store_secondary_status(
2222	struct se_lun *lun,
2223	const char *page,
2224	size_t count)
2225{
2226	unsigned long tmp;
2227	int ret;
2228
2229	ret = kstrtoul(page, 0, &tmp);
2230	if (ret < 0) {
2231		pr_err("Unable to extract alua_tg_pt_status\n");
2232		return ret;
2233	}
2234	if ((tmp != ALUA_STATUS_NONE) &&
2235	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2236	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2237		pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2238				tmp);
2239		return -EINVAL;
2240	}
2241	lun->lun_tg_pt_secondary_stat = (int)tmp;
2242
2243	return count;
2244}
2245
2246ssize_t core_alua_show_secondary_write_metadata(
2247	struct se_lun *lun,
2248	char *page)
2249{
2250	return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
2251}
2252
2253ssize_t core_alua_store_secondary_write_metadata(
2254	struct se_lun *lun,
2255	const char *page,
2256	size_t count)
2257{
2258	unsigned long tmp;
2259	int ret;
2260
2261	ret = kstrtoul(page, 0, &tmp);
2262	if (ret < 0) {
2263		pr_err("Unable to extract alua_tg_pt_write_md\n");
2264		return ret;
2265	}
2266	if ((tmp != 0) && (tmp != 1)) {
2267		pr_err("Illegal value for alua_tg_pt_write_md:"
2268				" %lu\n", tmp);
2269		return -EINVAL;
2270	}
2271	lun->lun_tg_pt_secondary_write_md = (int)tmp;
2272
2273	return count;
2274}
2275
2276int core_setup_alua(struct se_device *dev)
2277{
2278	if (!(dev->transport->transport_flags &
2279	     TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
2280	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2281		struct t10_alua_lu_gp_member *lu_gp_mem;
2282
2283		/*
2284		 * Associate this struct se_device with the default ALUA
2285		 * LUN Group.
2286		 */
2287		lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2288		if (IS_ERR(lu_gp_mem))
2289			return PTR_ERR(lu_gp_mem);
2290
2291		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2292		__core_alua_attach_lu_gp_mem(lu_gp_mem,
2293				default_lu_gp);
2294		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2295
2296		pr_debug("%s: Adding to default ALUA LU Group:"
2297			" core/alua/lu_gps/default_lu_gp\n",
2298			dev->transport->name);
2299	}
2300
2301	return 0;
2302}