Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
   3 *
   4 * Copyright (C) 2011  Chris Boot <bootc@bootc.net>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software Foundation,
  18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19 */
  20
  21#define KMSG_COMPONENT "sbp_target"
  22#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  23
  24#include <linux/kernel.h>
  25#include <linux/module.h>
  26#include <linux/init.h>
  27#include <linux/types.h>
  28#include <linux/string.h>
  29#include <linux/configfs.h>
  30#include <linux/ctype.h>
  31#include <linux/delay.h>
  32#include <linux/firewire.h>
  33#include <linux/firewire-constants.h>
  34#include <scsi/scsi_proto.h>
  35#include <scsi/scsi_tcq.h>
  36#include <target/target_core_base.h>
  37#include <target/target_core_backend.h>
  38#include <target/target_core_fabric.h>
  39#include <asm/unaligned.h>
  40
  41#include "sbp_target.h"
  42
  43/* FireWire address region for management and command block address handlers */
  44static const struct fw_address_region sbp_register_region = {
  45	.start	= CSR_REGISTER_BASE + 0x10000,
  46	.end	= 0x1000000000000ULL,
  47};
  48
  49static const u32 sbp_unit_directory_template[] = {
  50	0x1200609e, /* unit_specifier_id: NCITS/T10 */
  51	0x13010483, /* unit_sw_version: 1155D Rev 4 */
  52	0x3800609e, /* command_set_specifier_id: NCITS/T10 */
  53	0x390104d8, /* command_set: SPC-2 */
  54	0x3b000000, /* command_set_revision: 0 */
  55	0x3c000001, /* firmware_revision: 1 */
  56};
  57
  58#define SESSION_MAINTENANCE_INTERVAL HZ
  59
  60static atomic_t login_id = ATOMIC_INIT(0);
  61
  62static void session_maintenance_work(struct work_struct *);
  63static int sbp_run_transaction(struct fw_card *, int, int, int, int,
  64		unsigned long long, void *, size_t);
  65
  66static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
  67{
  68	int ret;
  69	__be32 high, low;
  70
  71	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  72			req->node_addr, req->generation, req->speed,
  73			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
  74			&high, sizeof(high));
  75	if (ret != RCODE_COMPLETE)
  76		return ret;
  77
  78	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  79			req->node_addr, req->generation, req->speed,
  80			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
  81			&low, sizeof(low));
  82	if (ret != RCODE_COMPLETE)
  83		return ret;
  84
  85	*guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
  86
  87	return RCODE_COMPLETE;
  88}
  89
  90static struct sbp_session *sbp_session_find_by_guid(
  91	struct sbp_tpg *tpg, u64 guid)
  92{
  93	struct se_session *se_sess;
  94	struct sbp_session *sess, *found = NULL;
  95
  96	spin_lock_bh(&tpg->se_tpg.session_lock);
  97	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
  98		sess = se_sess->fabric_sess_ptr;
  99		if (sess->guid == guid)
 100			found = sess;
 101	}
 102	spin_unlock_bh(&tpg->se_tpg.session_lock);
 103
 104	return found;
 105}
 106
 107static struct sbp_login_descriptor *sbp_login_find_by_lun(
 108		struct sbp_session *session, u32 unpacked_lun)
 109{
 110	struct sbp_login_descriptor *login, *found = NULL;
 111
 112	spin_lock_bh(&session->lock);
 113	list_for_each_entry(login, &session->login_list, link) {
 114		if (login->login_lun == unpacked_lun)
 115			found = login;
 116	}
 117	spin_unlock_bh(&session->lock);
 118
 119	return found;
 120}
 121
 122static int sbp_login_count_all_by_lun(
 123		struct sbp_tpg *tpg,
 124		u32 unpacked_lun,
 125		int exclusive)
 126{
 127	struct se_session *se_sess;
 128	struct sbp_session *sess;
 129	struct sbp_login_descriptor *login;
 130	int count = 0;
 131
 132	spin_lock_bh(&tpg->se_tpg.session_lock);
 133	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 134		sess = se_sess->fabric_sess_ptr;
 135
 136		spin_lock_bh(&sess->lock);
 137		list_for_each_entry(login, &sess->login_list, link) {
 138			if (login->login_lun != unpacked_lun)
 139				continue;
 140
 141			if (!exclusive || login->exclusive)
 142				count++;
 143		}
 144		spin_unlock_bh(&sess->lock);
 145	}
 146	spin_unlock_bh(&tpg->se_tpg.session_lock);
 147
 148	return count;
 149}
 150
 151static struct sbp_login_descriptor *sbp_login_find_by_id(
 152	struct sbp_tpg *tpg, int login_id)
 153{
 154	struct se_session *se_sess;
 155	struct sbp_session *sess;
 156	struct sbp_login_descriptor *login, *found = NULL;
 157
 158	spin_lock_bh(&tpg->se_tpg.session_lock);
 159	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 160		sess = se_sess->fabric_sess_ptr;
 161
 162		spin_lock_bh(&sess->lock);
 163		list_for_each_entry(login, &sess->login_list, link) {
 164			if (login->login_id == login_id)
 165				found = login;
 166		}
 167		spin_unlock_bh(&sess->lock);
 168	}
 169	spin_unlock_bh(&tpg->se_tpg.session_lock);
 170
 171	return found;
 172}
 173
 174static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
 175{
 176	struct se_portal_group *se_tpg = &tpg->se_tpg;
 177	struct se_lun *se_lun;
 178
 179	rcu_read_lock();
 180	hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
 181		if (se_lun->unpacked_lun == login_lun) {
 182			rcu_read_unlock();
 183			*err = 0;
 184			return login_lun;
 185		}
 186	}
 187	rcu_read_unlock();
 188
 189	*err = -ENODEV;
 190	return login_lun;
 191}
 192
 193static struct sbp_session *sbp_session_create(
 194		struct sbp_tpg *tpg,
 195		u64 guid)
 196{
 197	struct sbp_session *sess;
 198	int ret;
 199	char guid_str[17];
 200
 201	snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
 202
 203	sess = kmalloc(sizeof(*sess), GFP_KERNEL);
 204	if (!sess) {
 205		pr_err("failed to allocate session descriptor\n");
 206		return ERR_PTR(-ENOMEM);
 207	}
 208	spin_lock_init(&sess->lock);
 209	INIT_LIST_HEAD(&sess->login_list);
 210	INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
 211	sess->guid = guid;
 212
 213	sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
 214					     sizeof(struct sbp_target_request),
 215					     TARGET_PROT_NORMAL, guid_str,
 216					     sess, NULL);
 217	if (IS_ERR(sess->se_sess)) {
 218		pr_err("failed to init se_session\n");
 219		ret = PTR_ERR(sess->se_sess);
 220		kfree(sess);
 221		return ERR_PTR(ret);
 222	}
 223
 224	return sess;
 225}
 226
 227static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
 228{
 229	spin_lock_bh(&sess->lock);
 230	if (!list_empty(&sess->login_list)) {
 231		spin_unlock_bh(&sess->lock);
 232		return;
 233	}
 234	spin_unlock_bh(&sess->lock);
 235
 236	if (cancel_work)
 237		cancel_delayed_work_sync(&sess->maint_work);
 238
 239	transport_deregister_session_configfs(sess->se_sess);
 240	transport_deregister_session(sess->se_sess);
 241
 242	if (sess->card)
 243		fw_card_put(sess->card);
 244
 245	kfree(sess);
 246}
 247
 248static void sbp_target_agent_unregister(struct sbp_target_agent *);
 249
 250static void sbp_login_release(struct sbp_login_descriptor *login,
 251	bool cancel_work)
 252{
 253	struct sbp_session *sess = login->sess;
 254
 255	/* FIXME: abort/wait on tasks */
 256
 257	sbp_target_agent_unregister(login->tgt_agt);
 258
 259	if (sess) {
 260		spin_lock_bh(&sess->lock);
 261		list_del(&login->link);
 262		spin_unlock_bh(&sess->lock);
 263
 264		sbp_session_release(sess, cancel_work);
 265	}
 266
 267	kfree(login);
 268}
 269
 270static struct sbp_target_agent *sbp_target_agent_register(
 271	struct sbp_login_descriptor *);
 272
 273static void sbp_management_request_login(
 274	struct sbp_management_agent *agent, struct sbp_management_request *req,
 275	int *status_data_size)
 276{
 277	struct sbp_tport *tport = agent->tport;
 278	struct sbp_tpg *tpg = tport->tpg;
 279	struct sbp_session *sess;
 280	struct sbp_login_descriptor *login;
 281	struct sbp_login_response_block *response;
 282	u64 guid;
 283	u32 unpacked_lun;
 284	int login_response_len, ret;
 285
 286	unpacked_lun = sbp_get_lun_from_tpg(tpg,
 287			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
 288	if (ret) {
 289		pr_notice("login to unknown LUN: %d\n",
 290			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
 291
 292		req->status.status = cpu_to_be32(
 293			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 294			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
 295		return;
 296	}
 297
 298	ret = read_peer_guid(&guid, req);
 299	if (ret != RCODE_COMPLETE) {
 300		pr_warn("failed to read peer GUID: %d\n", ret);
 301
 302		req->status.status = cpu_to_be32(
 303			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 304			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 305		return;
 306	}
 307
 308	pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
 309		unpacked_lun, guid);
 310
 311	sess = sbp_session_find_by_guid(tpg, guid);
 312	if (sess) {
 313		login = sbp_login_find_by_lun(sess, unpacked_lun);
 314		if (login) {
 315			pr_notice("initiator already logged-in\n");
 316
 317			/*
 318			 * SBP-2 R4 says we should return access denied, but
 319			 * that can confuse initiators. Instead we need to
 320			 * treat this like a reconnect, but send the login
 321			 * response block like a fresh login.
 322			 *
 323			 * This is required particularly in the case of Apple
 324			 * devices booting off the FireWire target, where
 325			 * the firmware has an active login to the target. When
 326			 * the OS takes control of the session it issues its own
 327			 * LOGIN rather than a RECONNECT. To avoid the machine
 328			 * waiting until the reconnect_hold expires, we can skip
 329			 * the ACCESS_DENIED errors to speed things up.
 330			 */
 331
 332			goto already_logged_in;
 333		}
 334	}
 335
 336	/*
 337	 * check exclusive bit in login request
 338	 * reject with access_denied if any logins present
 339	 */
 340	if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
 341			sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
 342		pr_warn("refusing exclusive login with other active logins\n");
 343
 344		req->status.status = cpu_to_be32(
 345			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 346			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 347		return;
 348	}
 349
 350	/*
 351	 * check exclusive bit in any existing login descriptor
 352	 * reject with access_denied if any exclusive logins present
 353	 */
 354	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
 355		pr_warn("refusing login while another exclusive login present\n");
 356
 357		req->status.status = cpu_to_be32(
 358			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 359			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 360		return;
 361	}
 362
 363	/*
 364	 * check we haven't exceeded the number of allowed logins
 365	 * reject with resources_unavailable if we have
 366	 */
 367	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
 368			tport->max_logins_per_lun) {
 369		pr_warn("max number of logins reached\n");
 370
 371		req->status.status = cpu_to_be32(
 372			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 373			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 374		return;
 375	}
 376
 377	if (!sess) {
 378		sess = sbp_session_create(tpg, guid);
 379		if (IS_ERR(sess)) {
 380			switch (PTR_ERR(sess)) {
 381			case -EPERM:
 382				ret = SBP_STATUS_ACCESS_DENIED;
 383				break;
 384			default:
 385				ret = SBP_STATUS_RESOURCES_UNAVAIL;
 386				break;
 387			}
 388
 389			req->status.status = cpu_to_be32(
 390				STATUS_BLOCK_RESP(
 391					STATUS_RESP_REQUEST_COMPLETE) |
 392				STATUS_BLOCK_SBP_STATUS(ret));
 393			return;
 394		}
 395
 396		sess->node_id = req->node_addr;
 397		sess->card = fw_card_get(req->card);
 398		sess->generation = req->generation;
 399		sess->speed = req->speed;
 400
 401		schedule_delayed_work(&sess->maint_work,
 402				SESSION_MAINTENANCE_INTERVAL);
 403	}
 404
 405	/* only take the latest reconnect_hold into account */
 406	sess->reconnect_hold = min(
 407		1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
 408		tport->max_reconnect_timeout) - 1;
 409
 410	login = kmalloc(sizeof(*login), GFP_KERNEL);
 411	if (!login) {
 412		pr_err("failed to allocate login descriptor\n");
 413
 414		sbp_session_release(sess, true);
 415
 416		req->status.status = cpu_to_be32(
 417			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 418			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 419		return;
 420	}
 421
 422	login->sess = sess;
 423	login->login_lun = unpacked_lun;
 424	login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
 425	login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
 426	login->login_id = atomic_inc_return(&login_id);
 427
 428	login->tgt_agt = sbp_target_agent_register(login);
 429	if (IS_ERR(login->tgt_agt)) {
 430		ret = PTR_ERR(login->tgt_agt);
 431		pr_err("failed to map command block handler: %d\n", ret);
 432
 433		sbp_session_release(sess, true);
 434		kfree(login);
 435
 436		req->status.status = cpu_to_be32(
 437			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 438			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 439		return;
 440	}
 441
 442	spin_lock_bh(&sess->lock);
 443	list_add_tail(&login->link, &sess->login_list);
 444	spin_unlock_bh(&sess->lock);
 445
 446already_logged_in:
 447	response = kzalloc(sizeof(*response), GFP_KERNEL);
 448	if (!response) {
 449		pr_err("failed to allocate login response block\n");
 450
 451		sbp_login_release(login, true);
 452
 453		req->status.status = cpu_to_be32(
 454			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 455			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 456		return;
 457	}
 458
 459	login_response_len = clamp_val(
 460			LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
 461			12, sizeof(*response));
 462	response->misc = cpu_to_be32(
 463		((login_response_len & 0xffff) << 16) |
 464		(login->login_id & 0xffff));
 465	response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
 466	addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
 467		&response->command_block_agent);
 468
 469	ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
 470		sess->node_id, sess->generation, sess->speed,
 471		sbp2_pointer_to_addr(&req->orb.ptr2), response,
 472		login_response_len);
 473	if (ret != RCODE_COMPLETE) {
 474		pr_debug("failed to write login response block: %x\n", ret);
 475
 476		kfree(response);
 477		sbp_login_release(login, true);
 478
 479		req->status.status = cpu_to_be32(
 480			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 481			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 482		return;
 483	}
 484
 485	kfree(response);
 486
 487	req->status.status = cpu_to_be32(
 488		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 489		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 490}
 491
 492static void sbp_management_request_query_logins(
 493	struct sbp_management_agent *agent, struct sbp_management_request *req,
 494	int *status_data_size)
 495{
 496	pr_notice("QUERY LOGINS not implemented\n");
 497	/* FIXME: implement */
 498
 499	req->status.status = cpu_to_be32(
 500		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 501		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
 502}
 503
 504static void sbp_management_request_reconnect(
 505	struct sbp_management_agent *agent, struct sbp_management_request *req,
 506	int *status_data_size)
 507{
 508	struct sbp_tport *tport = agent->tport;
 509	struct sbp_tpg *tpg = tport->tpg;
 510	int ret;
 511	u64 guid;
 512	struct sbp_login_descriptor *login;
 513
 514	ret = read_peer_guid(&guid, req);
 515	if (ret != RCODE_COMPLETE) {
 516		pr_warn("failed to read peer GUID: %d\n", ret);
 517
 518		req->status.status = cpu_to_be32(
 519			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 520			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 521		return;
 522	}
 523
 524	pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
 525
 526	login = sbp_login_find_by_id(tpg,
 527		RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
 528
 529	if (!login) {
 530		pr_err("mgt_agent RECONNECT unknown login ID\n");
 531
 532		req->status.status = cpu_to_be32(
 533			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 534			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 535		return;
 536	}
 537
 538	if (login->sess->guid != guid) {
 539		pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
 540
 541		req->status.status = cpu_to_be32(
 542			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 543			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 544		return;
 545	}
 546
 547	spin_lock_bh(&login->sess->lock);
 548	if (login->sess->card)
 549		fw_card_put(login->sess->card);
 550
 551	/* update the node details */
 552	login->sess->generation = req->generation;
 553	login->sess->node_id = req->node_addr;
 554	login->sess->card = fw_card_get(req->card);
 555	login->sess->speed = req->speed;
 556	spin_unlock_bh(&login->sess->lock);
 557
 558	req->status.status = cpu_to_be32(
 559		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 560		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 561}
 562
 563static void sbp_management_request_logout(
 564	struct sbp_management_agent *agent, struct sbp_management_request *req,
 565	int *status_data_size)
 566{
 567	struct sbp_tport *tport = agent->tport;
 568	struct sbp_tpg *tpg = tport->tpg;
 569	int id;
 570	struct sbp_login_descriptor *login;
 571
 572	id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
 573
 574	login = sbp_login_find_by_id(tpg, id);
 575	if (!login) {
 576		pr_warn("cannot find login: %d\n", id);
 577
 578		req->status.status = cpu_to_be32(
 579			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 580			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
 581		return;
 582	}
 583
 584	pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
 585		login->login_lun, login->login_id);
 586
 587	if (req->node_addr != login->sess->node_id) {
 588		pr_warn("logout from different node ID\n");
 589
 590		req->status.status = cpu_to_be32(
 591			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 592			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 593		return;
 594	}
 595
 596	sbp_login_release(login, true);
 597
 598	req->status.status = cpu_to_be32(
 599		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 600		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 601}
 602
 603static void session_check_for_reset(struct sbp_session *sess)
 604{
 605	bool card_valid = false;
 606
 607	spin_lock_bh(&sess->lock);
 608
 609	if (sess->card) {
 610		spin_lock_irq(&sess->card->lock);
 611		card_valid = (sess->card->local_node != NULL);
 612		spin_unlock_irq(&sess->card->lock);
 613
 614		if (!card_valid) {
 615			fw_card_put(sess->card);
 616			sess->card = NULL;
 617		}
 618	}
 619
 620	if (!card_valid || (sess->generation != sess->card->generation)) {
 621		pr_info("Waiting for reconnect from node: %016llx\n",
 622				sess->guid);
 623
 624		sess->node_id = -1;
 625		sess->reconnect_expires = get_jiffies_64() +
 626			((sess->reconnect_hold + 1) * HZ);
 627	}
 628
 629	spin_unlock_bh(&sess->lock);
 630}
 631
 632static void session_reconnect_expired(struct sbp_session *sess)
 633{
 634	struct sbp_login_descriptor *login, *temp;
 635	LIST_HEAD(login_list);
 636
 637	pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
 638
 639	spin_lock_bh(&sess->lock);
 640	list_for_each_entry_safe(login, temp, &sess->login_list, link) {
 641		login->sess = NULL;
 642		list_move_tail(&login->link, &login_list);
 643	}
 644	spin_unlock_bh(&sess->lock);
 645
 646	list_for_each_entry_safe(login, temp, &login_list, link) {
 647		list_del(&login->link);
 648		sbp_login_release(login, false);
 649	}
 650
 651	sbp_session_release(sess, false);
 652}
 653
 654static void session_maintenance_work(struct work_struct *work)
 655{
 656	struct sbp_session *sess = container_of(work, struct sbp_session,
 657			maint_work.work);
 658
 659	/* could be called while tearing down the session */
 660	spin_lock_bh(&sess->lock);
 661	if (list_empty(&sess->login_list)) {
 662		spin_unlock_bh(&sess->lock);
 663		return;
 664	}
 665	spin_unlock_bh(&sess->lock);
 666
 667	if (sess->node_id != -1) {
 668		/* check for bus reset and make node_id invalid */
 669		session_check_for_reset(sess);
 670
 671		schedule_delayed_work(&sess->maint_work,
 672				SESSION_MAINTENANCE_INTERVAL);
 673	} else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
 674		/* still waiting for reconnect */
 675		schedule_delayed_work(&sess->maint_work,
 676				SESSION_MAINTENANCE_INTERVAL);
 677	} else {
 678		/* reconnect timeout has expired */
 679		session_reconnect_expired(sess);
 680	}
 681}
 682
 683static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
 684		struct sbp_target_agent *agent)
 685{
 686	int state;
 687
 688	switch (tcode) {
 689	case TCODE_READ_QUADLET_REQUEST:
 690		pr_debug("tgt_agent AGENT_STATE READ\n");
 691
 692		spin_lock_bh(&agent->lock);
 693		state = agent->state;
 694		spin_unlock_bh(&agent->lock);
 695
 696		*(__be32 *)data = cpu_to_be32(state);
 697
 698		return RCODE_COMPLETE;
 699
 700	case TCODE_WRITE_QUADLET_REQUEST:
 701		/* ignored */
 702		return RCODE_COMPLETE;
 703
 704	default:
 705		return RCODE_TYPE_ERROR;
 706	}
 707}
 708
 709static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
 710		struct sbp_target_agent *agent)
 711{
 712	switch (tcode) {
 713	case TCODE_WRITE_QUADLET_REQUEST:
 714		pr_debug("tgt_agent AGENT_RESET\n");
 715		spin_lock_bh(&agent->lock);
 716		agent->state = AGENT_STATE_RESET;
 717		spin_unlock_bh(&agent->lock);
 718		return RCODE_COMPLETE;
 719
 720	default:
 721		return RCODE_TYPE_ERROR;
 722	}
 723}
 724
 725static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
 726		struct sbp_target_agent *agent)
 727{
 728	struct sbp2_pointer *ptr = data;
 729
 730	switch (tcode) {
 731	case TCODE_WRITE_BLOCK_REQUEST:
 732		spin_lock_bh(&agent->lock);
 733		if (agent->state != AGENT_STATE_SUSPENDED &&
 734				agent->state != AGENT_STATE_RESET) {
 735			spin_unlock_bh(&agent->lock);
 736			pr_notice("Ignoring ORB_POINTER write while active.\n");
 737			return RCODE_CONFLICT_ERROR;
 738		}
 739		agent->state = AGENT_STATE_ACTIVE;
 740		spin_unlock_bh(&agent->lock);
 741
 742		agent->orb_pointer = sbp2_pointer_to_addr(ptr);
 743		agent->doorbell = false;
 744
 745		pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
 746				agent->orb_pointer);
 747
 748		queue_work(system_unbound_wq, &agent->work);
 749
 750		return RCODE_COMPLETE;
 751
 752	case TCODE_READ_BLOCK_REQUEST:
 753		pr_debug("tgt_agent ORB_POINTER READ\n");
 754		spin_lock_bh(&agent->lock);
 755		addr_to_sbp2_pointer(agent->orb_pointer, ptr);
 756		spin_unlock_bh(&agent->lock);
 757		return RCODE_COMPLETE;
 758
 759	default:
 760		return RCODE_TYPE_ERROR;
 761	}
 762}
 763
 764static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
 765		struct sbp_target_agent *agent)
 766{
 767	switch (tcode) {
 768	case TCODE_WRITE_QUADLET_REQUEST:
 769		spin_lock_bh(&agent->lock);
 770		if (agent->state != AGENT_STATE_SUSPENDED) {
 771			spin_unlock_bh(&agent->lock);
 772			pr_debug("Ignoring DOORBELL while active.\n");
 773			return RCODE_CONFLICT_ERROR;
 774		}
 775		agent->state = AGENT_STATE_ACTIVE;
 776		spin_unlock_bh(&agent->lock);
 777
 778		agent->doorbell = true;
 779
 780		pr_debug("tgt_agent DOORBELL\n");
 781
 782		queue_work(system_unbound_wq, &agent->work);
 783
 784		return RCODE_COMPLETE;
 785
 786	case TCODE_READ_QUADLET_REQUEST:
 787		return RCODE_COMPLETE;
 788
 789	default:
 790		return RCODE_TYPE_ERROR;
 791	}
 792}
 793
 794static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
 795		int tcode, void *data, struct sbp_target_agent *agent)
 796{
 797	switch (tcode) {
 798	case TCODE_WRITE_QUADLET_REQUEST:
 799		pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
 800		/* ignored as we don't send unsolicited status */
 801		return RCODE_COMPLETE;
 802
 803	case TCODE_READ_QUADLET_REQUEST:
 804		return RCODE_COMPLETE;
 805
 806	default:
 807		return RCODE_TYPE_ERROR;
 808	}
 809}
 810
 811static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
 812		int tcode, int destination, int source, int generation,
 813		unsigned long long offset, void *data, size_t length,
 814		void *callback_data)
 815{
 816	struct sbp_target_agent *agent = callback_data;
 817	struct sbp_session *sess = agent->login->sess;
 818	int sess_gen, sess_node, rcode;
 819
 820	spin_lock_bh(&sess->lock);
 821	sess_gen = sess->generation;
 822	sess_node = sess->node_id;
 823	spin_unlock_bh(&sess->lock);
 824
 825	if (generation != sess_gen) {
 826		pr_notice("ignoring request with wrong generation\n");
 827		rcode = RCODE_TYPE_ERROR;
 828		goto out;
 829	}
 830
 831	if (source != sess_node) {
 832		pr_notice("ignoring request from foreign node (%x != %x)\n",
 833				source, sess_node);
 834		rcode = RCODE_TYPE_ERROR;
 835		goto out;
 836	}
 837
 838	/* turn offset into the offset from the start of the block */
 839	offset -= agent->handler.offset;
 840
 841	if (offset == 0x00 && length == 4) {
 842		/* AGENT_STATE */
 843		rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
 844	} else if (offset == 0x04 && length == 4) {
 845		/* AGENT_RESET */
 846		rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
 847	} else if (offset == 0x08 && length == 8) {
 848		/* ORB_POINTER */
 849		rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
 850	} else if (offset == 0x10 && length == 4) {
 851		/* DOORBELL */
 852		rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
 853	} else if (offset == 0x14 && length == 4) {
 854		/* UNSOLICITED_STATUS_ENABLE */
 855		rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
 856				data, agent);
 857	} else {
 858		rcode = RCODE_ADDRESS_ERROR;
 859	}
 860
 861out:
 862	fw_send_response(card, request, rcode);
 863}
 864
 865static void sbp_handle_command(struct sbp_target_request *);
 866static int sbp_send_status(struct sbp_target_request *);
 867static void sbp_free_request(struct sbp_target_request *);
 868
 869static void tgt_agent_process_work(struct work_struct *work)
 870{
 871	struct sbp_target_request *req =
 872		container_of(work, struct sbp_target_request, work);
 873
 874	pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
 875			req->orb_pointer,
 876			sbp2_pointer_to_addr(&req->orb.next_orb),
 877			sbp2_pointer_to_addr(&req->orb.data_descriptor),
 878			be32_to_cpu(req->orb.misc));
 879
 880	if (req->orb_pointer >> 32)
 881		pr_debug("ORB with high bits set\n");
 882
 883	switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
 884		case 0:/* Format specified by this standard */
 885			sbp_handle_command(req);
 886			return;
 887		case 1: /* Reserved for future standardization */
 888		case 2: /* Vendor-dependent */
 889			req->status.status |= cpu_to_be32(
 890					STATUS_BLOCK_RESP(
 891						STATUS_RESP_REQUEST_COMPLETE) |
 892					STATUS_BLOCK_DEAD(0) |
 893					STATUS_BLOCK_LEN(1) |
 894					STATUS_BLOCK_SBP_STATUS(
 895						SBP_STATUS_REQ_TYPE_NOTSUPP));
 896			sbp_send_status(req);
 897			return;
 898		case 3: /* Dummy ORB */
 899			req->status.status |= cpu_to_be32(
 900					STATUS_BLOCK_RESP(
 901						STATUS_RESP_REQUEST_COMPLETE) |
 902					STATUS_BLOCK_DEAD(0) |
 903					STATUS_BLOCK_LEN(1) |
 904					STATUS_BLOCK_SBP_STATUS(
 905						SBP_STATUS_DUMMY_ORB_COMPLETE));
 906			sbp_send_status(req);
 907			return;
 908		default:
 909			BUG();
 910	}
 911}
 912
 913/* used to double-check we haven't been issued an AGENT_RESET */
 914static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
 915{
 916	bool active;
 917
 918	spin_lock_bh(&agent->lock);
 919	active = (agent->state == AGENT_STATE_ACTIVE);
 920	spin_unlock_bh(&agent->lock);
 921
 922	return active;
 923}
 924
 925static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
 926	struct fw_card *card, u64 next_orb)
 927{
 928	struct se_session *se_sess = sess->se_sess;
 929	struct sbp_target_request *req;
 930	int tag;
 931
 932	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
 933	if (tag < 0)
 934		return ERR_PTR(-ENOMEM);
 935
 936	req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
 937	memset(req, 0, sizeof(*req));
 938	req->se_cmd.map_tag = tag;
 939	req->se_cmd.tag = next_orb;
 940
 941	return req;
 942}
 943
 944static void tgt_agent_fetch_work(struct work_struct *work)
 945{
 946	struct sbp_target_agent *agent =
 947		container_of(work, struct sbp_target_agent, work);
 948	struct sbp_session *sess = agent->login->sess;
 949	struct sbp_target_request *req;
 950	int ret;
 951	bool doorbell = agent->doorbell;
 952	u64 next_orb = agent->orb_pointer;
 953
 954	while (next_orb && tgt_agent_check_active(agent)) {
 955		req = sbp_mgt_get_req(sess, sess->card, next_orb);
 956		if (IS_ERR(req)) {
 957			spin_lock_bh(&agent->lock);
 958			agent->state = AGENT_STATE_DEAD;
 959			spin_unlock_bh(&agent->lock);
 960			return;
 961		}
 962
 963		req->login = agent->login;
 964		req->orb_pointer = next_orb;
 965
 966		req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
 967					req->orb_pointer >> 32));
 968		req->status.orb_low = cpu_to_be32(
 969				req->orb_pointer & 0xfffffffc);
 970
 971		/* read in the ORB */
 972		ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
 973				sess->node_id, sess->generation, sess->speed,
 974				req->orb_pointer, &req->orb, sizeof(req->orb));
 975		if (ret != RCODE_COMPLETE) {
 976			pr_debug("tgt_orb fetch failed: %x\n", ret);
 977			req->status.status |= cpu_to_be32(
 978					STATUS_BLOCK_SRC(
 979						STATUS_SRC_ORB_FINISHED) |
 980					STATUS_BLOCK_RESP(
 981						STATUS_RESP_TRANSPORT_FAILURE) |
 982					STATUS_BLOCK_DEAD(1) |
 983					STATUS_BLOCK_LEN(1) |
 984					STATUS_BLOCK_SBP_STATUS(
 985						SBP_STATUS_UNSPECIFIED_ERROR));
 986			spin_lock_bh(&agent->lock);
 987			agent->state = AGENT_STATE_DEAD;
 988			spin_unlock_bh(&agent->lock);
 989
 990			sbp_send_status(req);
 991			return;
 992		}
 993
 994		/* check the next_ORB field */
 995		if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
 996			next_orb = 0;
 997			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
 998						STATUS_SRC_ORB_FINISHED));
 999		} else {
1000			next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
1001			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1002						STATUS_SRC_ORB_CONTINUING));
1003		}
1004
1005		if (tgt_agent_check_active(agent) && !doorbell) {
1006			INIT_WORK(&req->work, tgt_agent_process_work);
1007			queue_work(system_unbound_wq, &req->work);
1008		} else {
1009			/* don't process this request, just check next_ORB */
1010			sbp_free_request(req);
1011		}
1012
1013		spin_lock_bh(&agent->lock);
1014		doorbell = agent->doorbell = false;
1015
1016		/* check if we should carry on processing */
1017		if (next_orb)
1018			agent->orb_pointer = next_orb;
1019		else
1020			agent->state = AGENT_STATE_SUSPENDED;
1021
1022		spin_unlock_bh(&agent->lock);
1023	};
1024}
1025
1026static struct sbp_target_agent *sbp_target_agent_register(
1027		struct sbp_login_descriptor *login)
1028{
1029	struct sbp_target_agent *agent;
1030	int ret;
1031
1032	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1033	if (!agent)
1034		return ERR_PTR(-ENOMEM);
1035
1036	spin_lock_init(&agent->lock);
1037
1038	agent->handler.length = 0x20;
1039	agent->handler.address_callback = tgt_agent_rw;
1040	agent->handler.callback_data = agent;
1041
1042	agent->login = login;
1043	agent->state = AGENT_STATE_RESET;
1044	INIT_WORK(&agent->work, tgt_agent_fetch_work);
1045	agent->orb_pointer = 0;
1046	agent->doorbell = false;
1047
1048	ret = fw_core_add_address_handler(&agent->handler,
1049			&sbp_register_region);
1050	if (ret < 0) {
1051		kfree(agent);
1052		return ERR_PTR(ret);
1053	}
1054
1055	return agent;
1056}
1057
1058static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1059{
1060	fw_core_remove_address_handler(&agent->handler);
1061	cancel_work_sync(&agent->work);
1062	kfree(agent);
1063}
1064
1065/*
1066 * Simple wrapper around fw_run_transaction that retries the transaction several
1067 * times in case of failure, with an exponential backoff.
1068 */
1069static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1070		int generation, int speed, unsigned long long offset,
1071		void *payload, size_t length)
1072{
1073	int attempt, ret, delay;
1074
1075	for (attempt = 1; attempt <= 5; attempt++) {
1076		ret = fw_run_transaction(card, tcode, destination_id,
1077				generation, speed, offset, payload, length);
1078
1079		switch (ret) {
1080		case RCODE_COMPLETE:
1081		case RCODE_TYPE_ERROR:
1082		case RCODE_ADDRESS_ERROR:
1083		case RCODE_GENERATION:
1084			return ret;
1085
1086		default:
1087			delay = 5 * attempt * attempt;
1088			usleep_range(delay, delay * 2);
1089		}
1090	}
1091
1092	return ret;
1093}
1094
1095/*
1096 * Wrapper around sbp_run_transaction that gets the card, destination,
1097 * generation and speed out of the request's session.
1098 */
1099static int sbp_run_request_transaction(struct sbp_target_request *req,
1100		int tcode, unsigned long long offset, void *payload,
1101		size_t length)
1102{
1103	struct sbp_login_descriptor *login = req->login;
1104	struct sbp_session *sess = login->sess;
1105	struct fw_card *card;
1106	int node_id, generation, speed, ret;
1107
1108	spin_lock_bh(&sess->lock);
1109	card = fw_card_get(sess->card);
1110	node_id = sess->node_id;
1111	generation = sess->generation;
1112	speed = sess->speed;
1113	spin_unlock_bh(&sess->lock);
1114
1115	ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1116			offset, payload, length);
1117
1118	fw_card_put(card);
1119
1120	return ret;
1121}
1122
1123static int sbp_fetch_command(struct sbp_target_request *req)
1124{
1125	int ret, cmd_len, copy_len;
1126
1127	cmd_len = scsi_command_size(req->orb.command_block);
1128
1129	req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1130	if (!req->cmd_buf)
1131		return -ENOMEM;
1132
1133	memcpy(req->cmd_buf, req->orb.command_block,
1134		min_t(int, cmd_len, sizeof(req->orb.command_block)));
1135
1136	if (cmd_len > sizeof(req->orb.command_block)) {
1137		pr_debug("sbp_fetch_command: filling in long command\n");
1138		copy_len = cmd_len - sizeof(req->orb.command_block);
1139
1140		ret = sbp_run_request_transaction(req,
1141				TCODE_READ_BLOCK_REQUEST,
1142				req->orb_pointer + sizeof(req->orb),
1143				req->cmd_buf + sizeof(req->orb.command_block),
1144				copy_len);
1145		if (ret != RCODE_COMPLETE)
1146			return -EIO;
1147	}
1148
1149	return 0;
1150}
1151
1152static int sbp_fetch_page_table(struct sbp_target_request *req)
1153{
1154	int pg_tbl_sz, ret;
1155	struct sbp_page_table_entry *pg_tbl;
1156
1157	if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1158		return 0;
1159
1160	pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1161		sizeof(struct sbp_page_table_entry);
1162
1163	pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1164	if (!pg_tbl)
1165		return -ENOMEM;
1166
1167	ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1168			sbp2_pointer_to_addr(&req->orb.data_descriptor),
1169			pg_tbl, pg_tbl_sz);
1170	if (ret != RCODE_COMPLETE) {
1171		kfree(pg_tbl);
1172		return -EIO;
1173	}
1174
1175	req->pg_tbl = pg_tbl;
1176	return 0;
1177}
1178
1179static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1180	u32 *data_len, enum dma_data_direction *data_dir)
1181{
1182	int data_size, direction, idx;
1183
1184	data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1185	direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1186
1187	if (!data_size) {
1188		*data_len = 0;
1189		*data_dir = DMA_NONE;
1190		return;
1191	}
1192
1193	*data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1194
1195	if (req->pg_tbl) {
1196		*data_len = 0;
1197		for (idx = 0; idx < data_size; idx++) {
1198			*data_len += be16_to_cpu(
1199					req->pg_tbl[idx].segment_length);
1200		}
1201	} else {
1202		*data_len = data_size;
1203	}
1204}
1205
1206static void sbp_handle_command(struct sbp_target_request *req)
1207{
1208	struct sbp_login_descriptor *login = req->login;
1209	struct sbp_session *sess = login->sess;
1210	int ret, unpacked_lun;
1211	u32 data_length;
1212	enum dma_data_direction data_dir;
1213
1214	ret = sbp_fetch_command(req);
1215	if (ret) {
1216		pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1217		goto err;
1218	}
1219
1220	ret = sbp_fetch_page_table(req);
1221	if (ret) {
1222		pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1223			ret);
1224		goto err;
1225	}
1226
1227	unpacked_lun = req->login->login_lun;
1228	sbp_calc_data_length_direction(req, &data_length, &data_dir);
1229
1230	pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1231			req->orb_pointer, unpacked_lun, data_length, data_dir);
1232
1233	/* only used for printk until we do TMRs */
1234	req->se_cmd.tag = req->orb_pointer;
1235	if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1236			      req->sense_buf, unpacked_lun, data_length,
1237			      TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
1238		goto err;
1239
1240	return;
1241
1242err:
1243	req->status.status |= cpu_to_be32(
1244		STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1245		STATUS_BLOCK_DEAD(0) |
1246		STATUS_BLOCK_LEN(1) |
1247		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1248	sbp_send_status(req);
1249}
1250
1251/*
1252 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1253 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1254 */
1255static int sbp_rw_data(struct sbp_target_request *req)
1256{
1257	struct sbp_session *sess = req->login->sess;
1258	int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1259		generation, num_pte, length, tfr_length,
1260		rcode = RCODE_COMPLETE;
1261	struct sbp_page_table_entry *pte;
1262	unsigned long long offset;
1263	struct fw_card *card;
1264	struct sg_mapping_iter iter;
1265
1266	if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1267		tcode = TCODE_WRITE_BLOCK_REQUEST;
1268		sg_miter_flags = SG_MITER_FROM_SG;
1269	} else {
1270		tcode = TCODE_READ_BLOCK_REQUEST;
1271		sg_miter_flags = SG_MITER_TO_SG;
1272	}
1273
1274	max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1275	speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1276
1277	pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1278	if (pg_size) {
1279		pr_err("sbp_run_transaction: page size ignored\n");
1280		pg_size = 0x100 << pg_size;
1281	}
1282
1283	spin_lock_bh(&sess->lock);
1284	card = fw_card_get(sess->card);
1285	node_id = sess->node_id;
1286	generation = sess->generation;
1287	spin_unlock_bh(&sess->lock);
1288
1289	if (req->pg_tbl) {
1290		pte = req->pg_tbl;
1291		num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1292
1293		offset = 0;
1294		length = 0;
1295	} else {
1296		pte = NULL;
1297		num_pte = 0;
1298
1299		offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1300		length = req->se_cmd.data_length;
1301	}
1302
1303	sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1304		sg_miter_flags);
1305
1306	while (length || num_pte) {
1307		if (!length) {
1308			offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1309				be32_to_cpu(pte->segment_base_lo);
1310			length = be16_to_cpu(pte->segment_length);
1311
1312			pte++;
1313			num_pte--;
1314		}
1315
1316		sg_miter_next(&iter);
1317
1318		tfr_length = min3(length, max_payload, (int)iter.length);
1319
1320		/* FIXME: take page_size into account */
1321
1322		rcode = sbp_run_transaction(card, tcode, node_id,
1323				generation, speed,
1324				offset, iter.addr, tfr_length);
1325
1326		if (rcode != RCODE_COMPLETE)
1327			break;
1328
1329		length -= tfr_length;
1330		offset += tfr_length;
1331		iter.consumed = tfr_length;
1332	}
1333
1334	sg_miter_stop(&iter);
1335	fw_card_put(card);
1336
1337	if (rcode == RCODE_COMPLETE) {
1338		WARN_ON(length != 0);
1339		return 0;
1340	} else {
1341		return -EIO;
1342	}
1343}
1344
1345static int sbp_send_status(struct sbp_target_request *req)
1346{
1347	int rc, ret = 0, length;
1348	struct sbp_login_descriptor *login = req->login;
1349
1350	length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1351
1352	rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1353			login->status_fifo_addr, &req->status, length);
1354	if (rc != RCODE_COMPLETE) {
1355		pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
1356		ret = -EIO;
1357		goto put_ref;
1358	}
1359
1360	pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1361			req->orb_pointer);
1362	/*
1363	 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
1364	 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
1365	 * final se_cmd->cmd_kref put.
1366	 */
1367put_ref:
1368	target_put_sess_cmd(&req->se_cmd);
1369	return ret;
1370}
1371
1372static void sbp_sense_mangle(struct sbp_target_request *req)
1373{
1374	struct se_cmd *se_cmd = &req->se_cmd;
1375	u8 *sense = req->sense_buf;
1376	u8 *status = req->status.data;
1377
1378	WARN_ON(se_cmd->scsi_sense_length < 18);
1379
1380	switch (sense[0] & 0x7f) { 		/* sfmt */
1381	case 0x70: /* current, fixed */
1382		status[0] = 0 << 6;
1383		break;
1384	case 0x71: /* deferred, fixed */
1385		status[0] = 1 << 6;
1386		break;
1387	case 0x72: /* current, descriptor */
1388	case 0x73: /* deferred, descriptor */
1389	default:
1390		/*
1391		 * TODO: SBP-3 specifies what we should do with descriptor
1392		 * format sense data
1393		 */
1394		pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1395			sense[0]);
1396		req->status.status |= cpu_to_be32(
1397			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1398			STATUS_BLOCK_DEAD(0) |
1399			STATUS_BLOCK_LEN(1) |
1400			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1401		return;
1402	}
1403
1404	status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1405	status[1] =
1406		(sense[0] & 0x80) |		/* valid */
1407		((sense[2] & 0xe0) >> 1) |	/* mark, eom, ili */
1408		(sense[2] & 0x0f);		/* sense_key */
1409	status[2] = se_cmd->scsi_asc;		/* sense_code */
1410	status[3] = se_cmd->scsi_ascq;		/* sense_qualifier */
1411
1412	/* information */
1413	status[4] = sense[3];
1414	status[5] = sense[4];
1415	status[6] = sense[5];
1416	status[7] = sense[6];
1417
1418	/* CDB-dependent */
1419	status[8] = sense[8];
1420	status[9] = sense[9];
1421	status[10] = sense[10];
1422	status[11] = sense[11];
1423
1424	/* fru */
1425	status[12] = sense[14];
1426
1427	/* sense_key-dependent */
1428	status[13] = sense[15];
1429	status[14] = sense[16];
1430	status[15] = sense[17];
1431
1432	req->status.status |= cpu_to_be32(
1433		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1434		STATUS_BLOCK_DEAD(0) |
1435		STATUS_BLOCK_LEN(5) |
1436		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1437}
1438
1439static int sbp_send_sense(struct sbp_target_request *req)
1440{
1441	struct se_cmd *se_cmd = &req->se_cmd;
1442
1443	if (se_cmd->scsi_sense_length) {
1444		sbp_sense_mangle(req);
1445	} else {
1446		req->status.status |= cpu_to_be32(
1447			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1448			STATUS_BLOCK_DEAD(0) |
1449			STATUS_BLOCK_LEN(1) |
1450			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1451	}
1452
1453	return sbp_send_status(req);
1454}
1455
1456static void sbp_free_request(struct sbp_target_request *req)
1457{
1458	struct se_cmd *se_cmd = &req->se_cmd;
1459	struct se_session *se_sess = se_cmd->se_sess;
1460
1461	kfree(req->pg_tbl);
1462	kfree(req->cmd_buf);
1463
1464	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
1465}
1466
1467static void sbp_mgt_agent_process(struct work_struct *work)
1468{
1469	struct sbp_management_agent *agent =
1470		container_of(work, struct sbp_management_agent, work);
1471	struct sbp_management_request *req = agent->request;
1472	int ret;
1473	int status_data_len = 0;
1474
1475	/* fetch the ORB from the initiator */
1476	ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1477		req->node_addr, req->generation, req->speed,
1478		agent->orb_offset, &req->orb, sizeof(req->orb));
1479	if (ret != RCODE_COMPLETE) {
1480		pr_debug("mgt_orb fetch failed: %x\n", ret);
1481		goto out;
1482	}
1483
1484	pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1485		sbp2_pointer_to_addr(&req->orb.ptr1),
1486		sbp2_pointer_to_addr(&req->orb.ptr2),
1487		be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1488		sbp2_pointer_to_addr(&req->orb.status_fifo));
1489
1490	if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1491		ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1492		pr_err("mgt_orb bad request\n");
1493		goto out;
1494	}
1495
1496	switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1497	case MANAGEMENT_ORB_FUNCTION_LOGIN:
1498		sbp_management_request_login(agent, req, &status_data_len);
1499		break;
1500
1501	case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1502		sbp_management_request_query_logins(agent, req,
1503				&status_data_len);
1504		break;
1505
1506	case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1507		sbp_management_request_reconnect(agent, req, &status_data_len);
1508		break;
1509
1510	case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1511		pr_notice("SET PASSWORD not implemented\n");
1512
1513		req->status.status = cpu_to_be32(
1514			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1515			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1516
1517		break;
1518
1519	case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1520		sbp_management_request_logout(agent, req, &status_data_len);
1521		break;
1522
1523	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1524		pr_notice("ABORT TASK not implemented\n");
1525
1526		req->status.status = cpu_to_be32(
1527			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1528			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1529
1530		break;
1531
1532	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1533		pr_notice("ABORT TASK SET not implemented\n");
1534
1535		req->status.status = cpu_to_be32(
1536			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1537			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1538
1539		break;
1540
1541	case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1542		pr_notice("LOGICAL UNIT RESET not implemented\n");
1543
1544		req->status.status = cpu_to_be32(
1545			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1546			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1547
1548		break;
1549
1550	case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1551		pr_notice("TARGET RESET not implemented\n");
1552
1553		req->status.status = cpu_to_be32(
1554			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1555			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1556
1557		break;
1558
1559	default:
1560		pr_notice("unknown management function 0x%x\n",
1561			MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1562
1563		req->status.status = cpu_to_be32(
1564			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1565			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1566
1567		break;
1568	}
1569
1570	req->status.status |= cpu_to_be32(
1571		STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1572		STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1573		STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1574	req->status.orb_low = cpu_to_be32(agent->orb_offset);
1575
1576	/* write the status block back to the initiator */
1577	ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1578		req->node_addr, req->generation, req->speed,
1579		sbp2_pointer_to_addr(&req->orb.status_fifo),
1580		&req->status, 8 + status_data_len);
1581	if (ret != RCODE_COMPLETE) {
1582		pr_debug("mgt_orb status write failed: %x\n", ret);
1583		goto out;
1584	}
1585
1586out:
1587	fw_card_put(req->card);
1588	kfree(req);
1589
1590	spin_lock_bh(&agent->lock);
1591	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1592	spin_unlock_bh(&agent->lock);
1593}
1594
1595static void sbp_mgt_agent_rw(struct fw_card *card,
1596	struct fw_request *request, int tcode, int destination, int source,
1597	int generation, unsigned long long offset, void *data, size_t length,
1598	void *callback_data)
1599{
1600	struct sbp_management_agent *agent = callback_data;
1601	struct sbp2_pointer *ptr = data;
1602	int rcode = RCODE_ADDRESS_ERROR;
1603
1604	if (!agent->tport->enable)
1605		goto out;
1606
1607	if ((offset != agent->handler.offset) || (length != 8))
1608		goto out;
1609
1610	if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1611		struct sbp_management_request *req;
1612		int prev_state;
1613
1614		spin_lock_bh(&agent->lock);
1615		prev_state = agent->state;
1616		agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1617		spin_unlock_bh(&agent->lock);
1618
1619		if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1620			pr_notice("ignoring management request while busy\n");
1621			rcode = RCODE_CONFLICT_ERROR;
1622			goto out;
1623		}
1624		req = kzalloc(sizeof(*req), GFP_ATOMIC);
1625		if (!req) {
1626			rcode = RCODE_CONFLICT_ERROR;
1627			goto out;
1628		}
1629
1630		req->card = fw_card_get(card);
1631		req->generation = generation;
1632		req->node_addr = source;
1633		req->speed = fw_get_request_speed(request);
1634
1635		agent->orb_offset = sbp2_pointer_to_addr(ptr);
1636		agent->request = req;
1637
1638		queue_work(system_unbound_wq, &agent->work);
1639		rcode = RCODE_COMPLETE;
1640	} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1641		addr_to_sbp2_pointer(agent->orb_offset, ptr);
1642		rcode = RCODE_COMPLETE;
1643	} else {
1644		rcode = RCODE_TYPE_ERROR;
1645	}
1646
1647out:
1648	fw_send_response(card, request, rcode);
1649}
1650
1651static struct sbp_management_agent *sbp_management_agent_register(
1652		struct sbp_tport *tport)
1653{
1654	int ret;
1655	struct sbp_management_agent *agent;
1656
1657	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1658	if (!agent)
1659		return ERR_PTR(-ENOMEM);
1660
1661	spin_lock_init(&agent->lock);
1662	agent->tport = tport;
1663	agent->handler.length = 0x08;
1664	agent->handler.address_callback = sbp_mgt_agent_rw;
1665	agent->handler.callback_data = agent;
1666	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1667	INIT_WORK(&agent->work, sbp_mgt_agent_process);
1668	agent->orb_offset = 0;
1669	agent->request = NULL;
1670
1671	ret = fw_core_add_address_handler(&agent->handler,
1672			&sbp_register_region);
1673	if (ret < 0) {
1674		kfree(agent);
1675		return ERR_PTR(ret);
1676	}
1677
1678	return agent;
1679}
1680
1681static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1682{
1683	fw_core_remove_address_handler(&agent->handler);
1684	cancel_work_sync(&agent->work);
1685	kfree(agent);
1686}
1687
1688static int sbp_check_true(struct se_portal_group *se_tpg)
1689{
1690	return 1;
1691}
1692
1693static int sbp_check_false(struct se_portal_group *se_tpg)
1694{
1695	return 0;
1696}
1697
1698static char *sbp_get_fabric_name(void)
1699{
1700	return "sbp";
1701}
1702
1703static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1704{
1705	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1706	struct sbp_tport *tport = tpg->tport;
1707
1708	return &tport->tport_name[0];
1709}
1710
1711static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1712{
1713	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1714	return tpg->tport_tpgt;
1715}
1716
1717static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1718{
1719	return 1;
1720}
1721
1722static void sbp_release_cmd(struct se_cmd *se_cmd)
1723{
1724	struct sbp_target_request *req = container_of(se_cmd,
1725			struct sbp_target_request, se_cmd);
1726
1727	sbp_free_request(req);
1728}
1729
 
 
 
 
 
 
 
 
 
 
1730static u32 sbp_sess_get_index(struct se_session *se_sess)
1731{
1732	return 0;
1733}
1734
1735static int sbp_write_pending(struct se_cmd *se_cmd)
1736{
1737	struct sbp_target_request *req = container_of(se_cmd,
1738			struct sbp_target_request, se_cmd);
1739	int ret;
1740
1741	ret = sbp_rw_data(req);
1742	if (ret) {
1743		req->status.status |= cpu_to_be32(
1744			STATUS_BLOCK_RESP(
1745				STATUS_RESP_TRANSPORT_FAILURE) |
1746			STATUS_BLOCK_DEAD(0) |
1747			STATUS_BLOCK_LEN(1) |
1748			STATUS_BLOCK_SBP_STATUS(
1749				SBP_STATUS_UNSPECIFIED_ERROR));
1750		sbp_send_status(req);
1751		return ret;
1752	}
1753
1754	target_execute_cmd(se_cmd);
1755	return 0;
1756}
1757
1758static int sbp_write_pending_status(struct se_cmd *se_cmd)
1759{
1760	return 0;
1761}
1762
1763static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1764{
1765	return;
1766}
1767
1768static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1769{
1770	return 0;
1771}
1772
1773static int sbp_queue_data_in(struct se_cmd *se_cmd)
1774{
1775	struct sbp_target_request *req = container_of(se_cmd,
1776			struct sbp_target_request, se_cmd);
1777	int ret;
1778
1779	ret = sbp_rw_data(req);
1780	if (ret) {
1781		req->status.status |= cpu_to_be32(
1782			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1783			STATUS_BLOCK_DEAD(0) |
1784			STATUS_BLOCK_LEN(1) |
1785			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1786		sbp_send_status(req);
1787		return ret;
1788	}
1789
1790	return sbp_send_sense(req);
1791}
1792
1793/*
1794 * Called after command (no data transfer) or after the write (to device)
1795 * operation is completed
1796 */
1797static int sbp_queue_status(struct se_cmd *se_cmd)
1798{
1799	struct sbp_target_request *req = container_of(se_cmd,
1800			struct sbp_target_request, se_cmd);
1801
1802	return sbp_send_sense(req);
1803}
1804
1805static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1806{
1807}
1808
1809static void sbp_aborted_task(struct se_cmd *se_cmd)
1810{
1811	return;
1812}
1813
1814static int sbp_check_stop_free(struct se_cmd *se_cmd)
1815{
1816	struct sbp_target_request *req = container_of(se_cmd,
1817			struct sbp_target_request, se_cmd);
1818
1819	return transport_generic_free_cmd(&req->se_cmd, 0);
1820}
1821
1822static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1823{
1824	struct se_lun *lun;
1825	int count = 0;
1826
1827	rcu_read_lock();
1828	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
1829		count++;
1830	rcu_read_unlock();
1831
1832	return count;
1833}
1834
1835static int sbp_update_unit_directory(struct sbp_tport *tport)
1836{
1837	struct se_lun *lun;
1838	int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
1839	u32 *data;
1840
1841	if (tport->unit_directory.data) {
1842		fw_core_remove_descriptor(&tport->unit_directory);
1843		kfree(tport->unit_directory.data);
1844		tport->unit_directory.data = NULL;
1845	}
1846
1847	if (!tport->enable || !tport->tpg)
1848		return 0;
1849
1850	num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1851
1852	/*
1853	 * Number of entries in the final unit directory:
1854	 *  - all of those in the template
1855	 *  - management_agent
1856	 *  - unit_characteristics
1857	 *  - reconnect_timeout
1858	 *  - unit unique ID
1859	 *  - one for each LUN
1860	 *
1861	 *  MUST NOT include leaf or sub-directory entries
1862	 */
1863	num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1864
1865	if (tport->directory_id != -1)
1866		num_entries++;
1867
1868	/* allocate num_entries + 4 for the header and unique ID leaf */
1869	data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1870	if (!data)
1871		return -ENOMEM;
1872
1873	/* directory_length */
1874	data[idx++] = num_entries << 16;
1875
1876	/* directory_id */
1877	if (tport->directory_id != -1)
1878		data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1879
1880	/* unit directory template */
1881	memcpy(&data[idx], sbp_unit_directory_template,
1882			sizeof(sbp_unit_directory_template));
1883	idx += ARRAY_SIZE(sbp_unit_directory_template);
1884
1885	/* management_agent */
1886	mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
1887	data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
1888
1889	/* unit_characteristics */
1890	data[idx++] = 0x3a000000 |
1891		(((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
1892		SBP_ORB_FETCH_SIZE;
1893
1894	/* reconnect_timeout */
1895	data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
1896
1897	/* unit unique ID (leaf is just after LUNs) */
1898	data[idx++] = 0x8d000000 | (num_luns + 1);
1899
1900	rcu_read_lock();
1901	hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
1902		struct se_device *dev;
1903		int type;
1904		/*
1905		 * rcu_dereference_raw protected by se_lun->lun_group symlink
1906		 * reference to se_device->dev_group.
1907		 */
1908		dev = rcu_dereference_raw(lun->lun_se_dev);
1909		type = dev->transport->get_device_type(dev);
1910
1911		/* logical_unit_number */
1912		data[idx++] = 0x14000000 |
1913			((type << 16) & 0x1f0000) |
1914			(lun->unpacked_lun & 0xffff);
1915	}
1916	rcu_read_unlock();
1917
1918	/* unit unique ID leaf */
1919	data[idx++] = 2 << 16;
1920	data[idx++] = tport->guid >> 32;
1921	data[idx++] = tport->guid;
1922
1923	tport->unit_directory.length = idx;
1924	tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
1925	tport->unit_directory.data = data;
1926
1927	ret = fw_core_add_descriptor(&tport->unit_directory);
1928	if (ret < 0) {
1929		kfree(tport->unit_directory.data);
1930		tport->unit_directory.data = NULL;
1931	}
1932
1933	return ret;
1934}
1935
1936static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
1937{
1938	const char *cp;
1939	char c, nibble;
1940	int pos = 0, err;
1941
1942	*wwn = 0;
1943	for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
1944		c = *cp;
1945		if (c == '\n' && cp[1] == '\0')
1946			continue;
1947		if (c == '\0') {
1948			err = 2;
1949			if (pos != 16)
1950				goto fail;
1951			return cp - name;
1952		}
1953		err = 3;
1954		if (isdigit(c))
1955			nibble = c - '0';
1956		else if (isxdigit(c))
1957			nibble = tolower(c) - 'a' + 10;
1958		else
1959			goto fail;
1960		*wwn = (*wwn << 4) | nibble;
1961		pos++;
1962	}
1963	err = 4;
1964fail:
1965	printk(KERN_INFO "err %u len %zu pos %u\n",
1966			err, cp - name, pos);
1967	return -1;
1968}
1969
1970static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
1971{
1972	return snprintf(buf, len, "%016llx", wwn);
1973}
1974
1975static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1976{
1977	u64 guid = 0;
1978
1979	if (sbp_parse_wwn(name, &guid) < 0)
1980		return -EINVAL;
1981	return 0;
1982}
1983
1984static int sbp_post_link_lun(
1985		struct se_portal_group *se_tpg,
1986		struct se_lun *se_lun)
1987{
1988	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1989
1990	return sbp_update_unit_directory(tpg->tport);
1991}
1992
1993static void sbp_pre_unlink_lun(
1994		struct se_portal_group *se_tpg,
1995		struct se_lun *se_lun)
1996{
1997	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1998	struct sbp_tport *tport = tpg->tport;
1999	int ret;
2000
2001	if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
2002		tport->enable = 0;
2003
2004	ret = sbp_update_unit_directory(tport);
2005	if (ret < 0)
2006		pr_err("unlink LUN: failed to update unit directory\n");
2007}
2008
2009static struct se_portal_group *sbp_make_tpg(
2010		struct se_wwn *wwn,
2011		struct config_group *group,
2012		const char *name)
2013{
2014	struct sbp_tport *tport =
2015		container_of(wwn, struct sbp_tport, tport_wwn);
2016
2017	struct sbp_tpg *tpg;
2018	unsigned long tpgt;
2019	int ret;
2020
2021	if (strstr(name, "tpgt_") != name)
2022		return ERR_PTR(-EINVAL);
2023	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2024		return ERR_PTR(-EINVAL);
2025
2026	if (tport->tpg) {
2027		pr_err("Only one TPG per Unit is possible.\n");
2028		return ERR_PTR(-EBUSY);
2029	}
2030
2031	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2032	if (!tpg) {
2033		pr_err("Unable to allocate struct sbp_tpg\n");
2034		return ERR_PTR(-ENOMEM);
2035	}
2036
2037	tpg->tport = tport;
2038	tpg->tport_tpgt = tpgt;
2039	tport->tpg = tpg;
2040
2041	/* default attribute values */
2042	tport->enable = 0;
2043	tport->directory_id = -1;
2044	tport->mgt_orb_timeout = 15;
2045	tport->max_reconnect_timeout = 5;
2046	tport->max_logins_per_lun = 1;
2047
2048	tport->mgt_agt = sbp_management_agent_register(tport);
2049	if (IS_ERR(tport->mgt_agt)) {
2050		ret = PTR_ERR(tport->mgt_agt);
2051		goto out_free_tpg;
2052	}
2053
2054	ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
2055	if (ret < 0)
2056		goto out_unreg_mgt_agt;
2057
2058	return &tpg->se_tpg;
2059
2060out_unreg_mgt_agt:
2061	sbp_management_agent_unregister(tport->mgt_agt);
2062out_free_tpg:
2063	tport->tpg = NULL;
2064	kfree(tpg);
2065	return ERR_PTR(ret);
2066}
2067
2068static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2069{
2070	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2071	struct sbp_tport *tport = tpg->tport;
2072
2073	core_tpg_deregister(se_tpg);
2074	sbp_management_agent_unregister(tport->mgt_agt);
2075	tport->tpg = NULL;
2076	kfree(tpg);
2077}
2078
2079static struct se_wwn *sbp_make_tport(
2080		struct target_fabric_configfs *tf,
2081		struct config_group *group,
2082		const char *name)
2083{
2084	struct sbp_tport *tport;
2085	u64 guid = 0;
2086
2087	if (sbp_parse_wwn(name, &guid) < 0)
2088		return ERR_PTR(-EINVAL);
2089
2090	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2091	if (!tport) {
2092		pr_err("Unable to allocate struct sbp_tport\n");
2093		return ERR_PTR(-ENOMEM);
2094	}
2095
2096	tport->guid = guid;
2097	sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2098
2099	return &tport->tport_wwn;
2100}
2101
2102static void sbp_drop_tport(struct se_wwn *wwn)
2103{
2104	struct sbp_tport *tport =
2105		container_of(wwn, struct sbp_tport, tport_wwn);
2106
2107	kfree(tport);
2108}
2109
2110static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
2111{
2112	return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2113}
2114
2115CONFIGFS_ATTR_RO(sbp_wwn_, version);
2116
2117static struct configfs_attribute *sbp_wwn_attrs[] = {
2118	&sbp_wwn_attr_version,
2119	NULL,
2120};
2121
2122static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
2123{
2124	struct se_portal_group *se_tpg = to_tpg(item);
2125	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2126	struct sbp_tport *tport = tpg->tport;
2127
2128	if (tport->directory_id == -1)
2129		return sprintf(page, "implicit\n");
2130	else
2131		return sprintf(page, "%06x\n", tport->directory_id);
2132}
2133
2134static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
2135		const char *page, size_t count)
2136{
2137	struct se_portal_group *se_tpg = to_tpg(item);
2138	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2139	struct sbp_tport *tport = tpg->tport;
2140	unsigned long val;
2141
2142	if (tport->enable) {
2143		pr_err("Cannot change the directory_id on an active target.\n");
2144		return -EBUSY;
2145	}
2146
2147	if (strstr(page, "implicit") == page) {
2148		tport->directory_id = -1;
2149	} else {
2150		if (kstrtoul(page, 16, &val) < 0)
2151			return -EINVAL;
2152		if (val > 0xffffff)
2153			return -EINVAL;
2154
2155		tport->directory_id = val;
2156	}
2157
2158	return count;
2159}
2160
2161static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page)
2162{
2163	struct se_portal_group *se_tpg = to_tpg(item);
2164	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2165	struct sbp_tport *tport = tpg->tport;
2166	return sprintf(page, "%d\n", tport->enable);
2167}
2168
2169static ssize_t sbp_tpg_enable_store(struct config_item *item,
2170		const char *page, size_t count)
2171{
2172	struct se_portal_group *se_tpg = to_tpg(item);
2173	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2174	struct sbp_tport *tport = tpg->tport;
2175	unsigned long val;
2176	int ret;
2177
2178	if (kstrtoul(page, 0, &val) < 0)
2179		return -EINVAL;
2180	if ((val != 0) && (val != 1))
2181		return -EINVAL;
2182
2183	if (tport->enable == val)
2184		return count;
2185
2186	if (val) {
2187		if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2188			pr_err("Cannot enable a target with no LUNs!\n");
2189			return -EINVAL;
2190		}
2191	} else {
2192		/* XXX: force-shutdown sessions instead? */
2193		spin_lock_bh(&se_tpg->session_lock);
2194		if (!list_empty(&se_tpg->tpg_sess_list)) {
2195			spin_unlock_bh(&se_tpg->session_lock);
2196			return -EBUSY;
2197		}
2198		spin_unlock_bh(&se_tpg->session_lock);
2199	}
2200
2201	tport->enable = val;
2202
2203	ret = sbp_update_unit_directory(tport);
2204	if (ret < 0) {
2205		pr_err("Could not update Config ROM\n");
2206		return ret;
2207	}
2208
2209	return count;
2210}
2211
2212CONFIGFS_ATTR(sbp_tpg_, directory_id);
2213CONFIGFS_ATTR(sbp_tpg_, enable);
2214
2215static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2216	&sbp_tpg_attr_directory_id,
2217	&sbp_tpg_attr_enable,
2218	NULL,
2219};
2220
2221static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
2222		char *page)
2223{
2224	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2225	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2226	struct sbp_tport *tport = tpg->tport;
2227	return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2228}
2229
2230static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
2231		const char *page, size_t count)
2232{
2233	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2234	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2235	struct sbp_tport *tport = tpg->tport;
2236	unsigned long val;
2237	int ret;
2238
2239	if (kstrtoul(page, 0, &val) < 0)
2240		return -EINVAL;
2241	if ((val < 1) || (val > 127))
2242		return -EINVAL;
2243
2244	if (tport->mgt_orb_timeout == val)
2245		return count;
2246
2247	tport->mgt_orb_timeout = val;
2248
2249	ret = sbp_update_unit_directory(tport);
2250	if (ret < 0)
2251		return ret;
2252
2253	return count;
2254}
2255
2256static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
2257		char *page)
2258{
2259	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2260	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2261	struct sbp_tport *tport = tpg->tport;
2262	return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2263}
2264
2265static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
2266		const char *page, size_t count)
2267{
2268	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2269	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2270	struct sbp_tport *tport = tpg->tport;
2271	unsigned long val;
2272	int ret;
2273
2274	if (kstrtoul(page, 0, &val) < 0)
2275		return -EINVAL;
2276	if ((val < 1) || (val > 32767))
2277		return -EINVAL;
2278
2279	if (tport->max_reconnect_timeout == val)
2280		return count;
2281
2282	tport->max_reconnect_timeout = val;
2283
2284	ret = sbp_update_unit_directory(tport);
2285	if (ret < 0)
2286		return ret;
2287
2288	return count;
2289}
2290
2291static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
2292		char *page)
2293{
2294	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2295	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2296	struct sbp_tport *tport = tpg->tport;
2297	return sprintf(page, "%d\n", tport->max_logins_per_lun);
2298}
2299
2300static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
2301		const char *page, size_t count)
2302{
2303	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2304	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2305	struct sbp_tport *tport = tpg->tport;
2306	unsigned long val;
2307
2308	if (kstrtoul(page, 0, &val) < 0)
2309		return -EINVAL;
2310	if ((val < 1) || (val > 127))
2311		return -EINVAL;
2312
2313	/* XXX: also check against current count? */
2314
2315	tport->max_logins_per_lun = val;
2316
2317	return count;
2318}
2319
2320CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
2321CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
2322CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
2323
2324static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2325	&sbp_tpg_attrib_attr_mgt_orb_timeout,
2326	&sbp_tpg_attrib_attr_max_reconnect_timeout,
2327	&sbp_tpg_attrib_attr_max_logins_per_lun,
2328	NULL,
2329};
2330
2331static const struct target_core_fabric_ops sbp_ops = {
2332	.module				= THIS_MODULE,
2333	.name				= "sbp",
2334	.get_fabric_name		= sbp_get_fabric_name,
2335	.tpg_get_wwn			= sbp_get_fabric_wwn,
2336	.tpg_get_tag			= sbp_get_tag,
2337	.tpg_check_demo_mode		= sbp_check_true,
2338	.tpg_check_demo_mode_cache	= sbp_check_true,
2339	.tpg_check_demo_mode_write_protect = sbp_check_false,
2340	.tpg_check_prod_mode_write_protect = sbp_check_false,
2341	.tpg_get_inst_index		= sbp_tpg_get_inst_index,
2342	.release_cmd			= sbp_release_cmd,
 
 
2343	.sess_get_index			= sbp_sess_get_index,
2344	.write_pending			= sbp_write_pending,
2345	.write_pending_status		= sbp_write_pending_status,
2346	.set_default_node_attributes	= sbp_set_default_node_attrs,
2347	.get_cmd_state			= sbp_get_cmd_state,
2348	.queue_data_in			= sbp_queue_data_in,
2349	.queue_status			= sbp_queue_status,
2350	.queue_tm_rsp			= sbp_queue_tm_rsp,
2351	.aborted_task			= sbp_aborted_task,
2352	.check_stop_free		= sbp_check_stop_free,
2353
2354	.fabric_make_wwn		= sbp_make_tport,
2355	.fabric_drop_wwn		= sbp_drop_tport,
2356	.fabric_make_tpg		= sbp_make_tpg,
2357	.fabric_drop_tpg		= sbp_drop_tpg,
2358	.fabric_post_link		= sbp_post_link_lun,
2359	.fabric_pre_unlink		= sbp_pre_unlink_lun,
2360	.fabric_make_np			= NULL,
2361	.fabric_drop_np			= NULL,
2362	.fabric_init_nodeacl		= sbp_init_nodeacl,
2363
2364	.tfc_wwn_attrs			= sbp_wwn_attrs,
2365	.tfc_tpg_base_attrs		= sbp_tpg_base_attrs,
2366	.tfc_tpg_attrib_attrs		= sbp_tpg_attrib_attrs,
2367};
2368
2369static int __init sbp_init(void)
2370{
2371	return target_register_template(&sbp_ops);
2372};
2373
2374static void __exit sbp_exit(void)
2375{
2376	target_unregister_template(&sbp_ops);
2377};
2378
2379MODULE_DESCRIPTION("FireWire SBP fabric driver");
2380MODULE_LICENSE("GPL");
2381module_init(sbp_init);
2382module_exit(sbp_exit);
v4.6
   1/*
   2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
   3 *
   4 * Copyright (C) 2011  Chris Boot <bootc@bootc.net>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software Foundation,
  18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19 */
  20
  21#define KMSG_COMPONENT "sbp_target"
  22#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  23
  24#include <linux/kernel.h>
  25#include <linux/module.h>
  26#include <linux/init.h>
  27#include <linux/types.h>
  28#include <linux/string.h>
  29#include <linux/configfs.h>
  30#include <linux/ctype.h>
 
  31#include <linux/firewire.h>
  32#include <linux/firewire-constants.h>
  33#include <scsi/scsi_proto.h>
  34#include <scsi/scsi_tcq.h>
  35#include <target/target_core_base.h>
  36#include <target/target_core_backend.h>
  37#include <target/target_core_fabric.h>
  38#include <asm/unaligned.h>
  39
  40#include "sbp_target.h"
  41
  42/* FireWire address region for management and command block address handlers */
  43static const struct fw_address_region sbp_register_region = {
  44	.start	= CSR_REGISTER_BASE + 0x10000,
  45	.end	= 0x1000000000000ULL,
  46};
  47
  48static const u32 sbp_unit_directory_template[] = {
  49	0x1200609e, /* unit_specifier_id: NCITS/T10 */
  50	0x13010483, /* unit_sw_version: 1155D Rev 4 */
  51	0x3800609e, /* command_set_specifier_id: NCITS/T10 */
  52	0x390104d8, /* command_set: SPC-2 */
  53	0x3b000000, /* command_set_revision: 0 */
  54	0x3c000001, /* firmware_revision: 1 */
  55};
  56
  57#define SESSION_MAINTENANCE_INTERVAL HZ
  58
  59static atomic_t login_id = ATOMIC_INIT(0);
  60
  61static void session_maintenance_work(struct work_struct *);
  62static int sbp_run_transaction(struct fw_card *, int, int, int, int,
  63		unsigned long long, void *, size_t);
  64
  65static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
  66{
  67	int ret;
  68	__be32 high, low;
  69
  70	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  71			req->node_addr, req->generation, req->speed,
  72			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
  73			&high, sizeof(high));
  74	if (ret != RCODE_COMPLETE)
  75		return ret;
  76
  77	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  78			req->node_addr, req->generation, req->speed,
  79			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
  80			&low, sizeof(low));
  81	if (ret != RCODE_COMPLETE)
  82		return ret;
  83
  84	*guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
  85
  86	return RCODE_COMPLETE;
  87}
  88
  89static struct sbp_session *sbp_session_find_by_guid(
  90	struct sbp_tpg *tpg, u64 guid)
  91{
  92	struct se_session *se_sess;
  93	struct sbp_session *sess, *found = NULL;
  94
  95	spin_lock_bh(&tpg->se_tpg.session_lock);
  96	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
  97		sess = se_sess->fabric_sess_ptr;
  98		if (sess->guid == guid)
  99			found = sess;
 100	}
 101	spin_unlock_bh(&tpg->se_tpg.session_lock);
 102
 103	return found;
 104}
 105
 106static struct sbp_login_descriptor *sbp_login_find_by_lun(
 107		struct sbp_session *session, u32 unpacked_lun)
 108{
 109	struct sbp_login_descriptor *login, *found = NULL;
 110
 111	spin_lock_bh(&session->lock);
 112	list_for_each_entry(login, &session->login_list, link) {
 113		if (login->login_lun == unpacked_lun)
 114			found = login;
 115	}
 116	spin_unlock_bh(&session->lock);
 117
 118	return found;
 119}
 120
 121static int sbp_login_count_all_by_lun(
 122		struct sbp_tpg *tpg,
 123		u32 unpacked_lun,
 124		int exclusive)
 125{
 126	struct se_session *se_sess;
 127	struct sbp_session *sess;
 128	struct sbp_login_descriptor *login;
 129	int count = 0;
 130
 131	spin_lock_bh(&tpg->se_tpg.session_lock);
 132	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 133		sess = se_sess->fabric_sess_ptr;
 134
 135		spin_lock_bh(&sess->lock);
 136		list_for_each_entry(login, &sess->login_list, link) {
 137			if (login->login_lun != unpacked_lun)
 138				continue;
 139
 140			if (!exclusive || login->exclusive)
 141				count++;
 142		}
 143		spin_unlock_bh(&sess->lock);
 144	}
 145	spin_unlock_bh(&tpg->se_tpg.session_lock);
 146
 147	return count;
 148}
 149
 150static struct sbp_login_descriptor *sbp_login_find_by_id(
 151	struct sbp_tpg *tpg, int login_id)
 152{
 153	struct se_session *se_sess;
 154	struct sbp_session *sess;
 155	struct sbp_login_descriptor *login, *found = NULL;
 156
 157	spin_lock_bh(&tpg->se_tpg.session_lock);
 158	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 159		sess = se_sess->fabric_sess_ptr;
 160
 161		spin_lock_bh(&sess->lock);
 162		list_for_each_entry(login, &sess->login_list, link) {
 163			if (login->login_id == login_id)
 164				found = login;
 165		}
 166		spin_unlock_bh(&sess->lock);
 167	}
 168	spin_unlock_bh(&tpg->se_tpg.session_lock);
 169
 170	return found;
 171}
 172
 173static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
 174{
 175	struct se_portal_group *se_tpg = &tpg->se_tpg;
 176	struct se_lun *se_lun;
 177
 178	rcu_read_lock();
 179	hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
 180		if (se_lun->unpacked_lun == login_lun) {
 181			rcu_read_unlock();
 182			*err = 0;
 183			return login_lun;
 184		}
 185	}
 186	rcu_read_unlock();
 187
 188	*err = -ENODEV;
 189	return login_lun;
 190}
 191
 192static struct sbp_session *sbp_session_create(
 193		struct sbp_tpg *tpg,
 194		u64 guid)
 195{
 196	struct sbp_session *sess;
 197	int ret;
 198	char guid_str[17];
 199
 200	snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
 201
 202	sess = kmalloc(sizeof(*sess), GFP_KERNEL);
 203	if (!sess) {
 204		pr_err("failed to allocate session descriptor\n");
 205		return ERR_PTR(-ENOMEM);
 206	}
 207	spin_lock_init(&sess->lock);
 208	INIT_LIST_HEAD(&sess->login_list);
 209	INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
 210	sess->guid = guid;
 211
 212	sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
 213					     sizeof(struct sbp_target_request),
 214					     TARGET_PROT_NORMAL, guid_str,
 215					     sess, NULL);
 216	if (IS_ERR(sess->se_sess)) {
 217		pr_err("failed to init se_session\n");
 218		ret = PTR_ERR(sess->se_sess);
 219		kfree(sess);
 220		return ERR_PTR(ret);
 221	}
 222
 223	return sess;
 224}
 225
 226static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
 227{
 228	spin_lock_bh(&sess->lock);
 229	if (!list_empty(&sess->login_list)) {
 230		spin_unlock_bh(&sess->lock);
 231		return;
 232	}
 233	spin_unlock_bh(&sess->lock);
 234
 235	if (cancel_work)
 236		cancel_delayed_work_sync(&sess->maint_work);
 237
 238	transport_deregister_session_configfs(sess->se_sess);
 239	transport_deregister_session(sess->se_sess);
 240
 241	if (sess->card)
 242		fw_card_put(sess->card);
 243
 244	kfree(sess);
 245}
 246
 247static void sbp_target_agent_unregister(struct sbp_target_agent *);
 248
 249static void sbp_login_release(struct sbp_login_descriptor *login,
 250	bool cancel_work)
 251{
 252	struct sbp_session *sess = login->sess;
 253
 254	/* FIXME: abort/wait on tasks */
 255
 256	sbp_target_agent_unregister(login->tgt_agt);
 257
 258	if (sess) {
 259		spin_lock_bh(&sess->lock);
 260		list_del(&login->link);
 261		spin_unlock_bh(&sess->lock);
 262
 263		sbp_session_release(sess, cancel_work);
 264	}
 265
 266	kfree(login);
 267}
 268
 269static struct sbp_target_agent *sbp_target_agent_register(
 270	struct sbp_login_descriptor *);
 271
 272static void sbp_management_request_login(
 273	struct sbp_management_agent *agent, struct sbp_management_request *req,
 274	int *status_data_size)
 275{
 276	struct sbp_tport *tport = agent->tport;
 277	struct sbp_tpg *tpg = tport->tpg;
 278	struct sbp_session *sess;
 279	struct sbp_login_descriptor *login;
 280	struct sbp_login_response_block *response;
 281	u64 guid;
 282	u32 unpacked_lun;
 283	int login_response_len, ret;
 284
 285	unpacked_lun = sbp_get_lun_from_tpg(tpg,
 286			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
 287	if (ret) {
 288		pr_notice("login to unknown LUN: %d\n",
 289			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
 290
 291		req->status.status = cpu_to_be32(
 292			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 293			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
 294		return;
 295	}
 296
 297	ret = read_peer_guid(&guid, req);
 298	if (ret != RCODE_COMPLETE) {
 299		pr_warn("failed to read peer GUID: %d\n", ret);
 300
 301		req->status.status = cpu_to_be32(
 302			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 303			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 304		return;
 305	}
 306
 307	pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
 308		unpacked_lun, guid);
 309
 310	sess = sbp_session_find_by_guid(tpg, guid);
 311	if (sess) {
 312		login = sbp_login_find_by_lun(sess, unpacked_lun);
 313		if (login) {
 314			pr_notice("initiator already logged-in\n");
 315
 316			/*
 317			 * SBP-2 R4 says we should return access denied, but
 318			 * that can confuse initiators. Instead we need to
 319			 * treat this like a reconnect, but send the login
 320			 * response block like a fresh login.
 321			 *
 322			 * This is required particularly in the case of Apple
 323			 * devices booting off the FireWire target, where
 324			 * the firmware has an active login to the target. When
 325			 * the OS takes control of the session it issues its own
 326			 * LOGIN rather than a RECONNECT. To avoid the machine
 327			 * waiting until the reconnect_hold expires, we can skip
 328			 * the ACCESS_DENIED errors to speed things up.
 329			 */
 330
 331			goto already_logged_in;
 332		}
 333	}
 334
 335	/*
 336	 * check exclusive bit in login request
 337	 * reject with access_denied if any logins present
 338	 */
 339	if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
 340			sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
 341		pr_warn("refusing exclusive login with other active logins\n");
 342
 343		req->status.status = cpu_to_be32(
 344			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 345			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 346		return;
 347	}
 348
 349	/*
 350	 * check exclusive bit in any existing login descriptor
 351	 * reject with access_denied if any exclusive logins present
 352	 */
 353	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
 354		pr_warn("refusing login while another exclusive login present\n");
 355
 356		req->status.status = cpu_to_be32(
 357			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 358			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 359		return;
 360	}
 361
 362	/*
 363	 * check we haven't exceeded the number of allowed logins
 364	 * reject with resources_unavailable if we have
 365	 */
 366	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
 367			tport->max_logins_per_lun) {
 368		pr_warn("max number of logins reached\n");
 369
 370		req->status.status = cpu_to_be32(
 371			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 372			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 373		return;
 374	}
 375
 376	if (!sess) {
 377		sess = sbp_session_create(tpg, guid);
 378		if (IS_ERR(sess)) {
 379			switch (PTR_ERR(sess)) {
 380			case -EPERM:
 381				ret = SBP_STATUS_ACCESS_DENIED;
 382				break;
 383			default:
 384				ret = SBP_STATUS_RESOURCES_UNAVAIL;
 385				break;
 386			}
 387
 388			req->status.status = cpu_to_be32(
 389				STATUS_BLOCK_RESP(
 390					STATUS_RESP_REQUEST_COMPLETE) |
 391				STATUS_BLOCK_SBP_STATUS(ret));
 392			return;
 393		}
 394
 395		sess->node_id = req->node_addr;
 396		sess->card = fw_card_get(req->card);
 397		sess->generation = req->generation;
 398		sess->speed = req->speed;
 399
 400		schedule_delayed_work(&sess->maint_work,
 401				SESSION_MAINTENANCE_INTERVAL);
 402	}
 403
 404	/* only take the latest reconnect_hold into account */
 405	sess->reconnect_hold = min(
 406		1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
 407		tport->max_reconnect_timeout) - 1;
 408
 409	login = kmalloc(sizeof(*login), GFP_KERNEL);
 410	if (!login) {
 411		pr_err("failed to allocate login descriptor\n");
 412
 413		sbp_session_release(sess, true);
 414
 415		req->status.status = cpu_to_be32(
 416			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 417			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 418		return;
 419	}
 420
 421	login->sess = sess;
 422	login->login_lun = unpacked_lun;
 423	login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
 424	login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
 425	login->login_id = atomic_inc_return(&login_id);
 426
 427	login->tgt_agt = sbp_target_agent_register(login);
 428	if (IS_ERR(login->tgt_agt)) {
 429		ret = PTR_ERR(login->tgt_agt);
 430		pr_err("failed to map command block handler: %d\n", ret);
 431
 432		sbp_session_release(sess, true);
 433		kfree(login);
 434
 435		req->status.status = cpu_to_be32(
 436			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 437			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 438		return;
 439	}
 440
 441	spin_lock_bh(&sess->lock);
 442	list_add_tail(&login->link, &sess->login_list);
 443	spin_unlock_bh(&sess->lock);
 444
 445already_logged_in:
 446	response = kzalloc(sizeof(*response), GFP_KERNEL);
 447	if (!response) {
 448		pr_err("failed to allocate login response block\n");
 449
 450		sbp_login_release(login, true);
 451
 452		req->status.status = cpu_to_be32(
 453			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 454			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 455		return;
 456	}
 457
 458	login_response_len = clamp_val(
 459			LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
 460			12, sizeof(*response));
 461	response->misc = cpu_to_be32(
 462		((login_response_len & 0xffff) << 16) |
 463		(login->login_id & 0xffff));
 464	response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
 465	addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
 466		&response->command_block_agent);
 467
 468	ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
 469		sess->node_id, sess->generation, sess->speed,
 470		sbp2_pointer_to_addr(&req->orb.ptr2), response,
 471		login_response_len);
 472	if (ret != RCODE_COMPLETE) {
 473		pr_debug("failed to write login response block: %x\n", ret);
 474
 475		kfree(response);
 476		sbp_login_release(login, true);
 477
 478		req->status.status = cpu_to_be32(
 479			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 480			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 481		return;
 482	}
 483
 484	kfree(response);
 485
 486	req->status.status = cpu_to_be32(
 487		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 488		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 489}
 490
 491static void sbp_management_request_query_logins(
 492	struct sbp_management_agent *agent, struct sbp_management_request *req,
 493	int *status_data_size)
 494{
 495	pr_notice("QUERY LOGINS not implemented\n");
 496	/* FIXME: implement */
 497
 498	req->status.status = cpu_to_be32(
 499		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 500		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
 501}
 502
 503static void sbp_management_request_reconnect(
 504	struct sbp_management_agent *agent, struct sbp_management_request *req,
 505	int *status_data_size)
 506{
 507	struct sbp_tport *tport = agent->tport;
 508	struct sbp_tpg *tpg = tport->tpg;
 509	int ret;
 510	u64 guid;
 511	struct sbp_login_descriptor *login;
 512
 513	ret = read_peer_guid(&guid, req);
 514	if (ret != RCODE_COMPLETE) {
 515		pr_warn("failed to read peer GUID: %d\n", ret);
 516
 517		req->status.status = cpu_to_be32(
 518			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 519			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 520		return;
 521	}
 522
 523	pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
 524
 525	login = sbp_login_find_by_id(tpg,
 526		RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
 527
 528	if (!login) {
 529		pr_err("mgt_agent RECONNECT unknown login ID\n");
 530
 531		req->status.status = cpu_to_be32(
 532			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 533			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 534		return;
 535	}
 536
 537	if (login->sess->guid != guid) {
 538		pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
 539
 540		req->status.status = cpu_to_be32(
 541			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 542			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 543		return;
 544	}
 545
 546	spin_lock_bh(&login->sess->lock);
 547	if (login->sess->card)
 548		fw_card_put(login->sess->card);
 549
 550	/* update the node details */
 551	login->sess->generation = req->generation;
 552	login->sess->node_id = req->node_addr;
 553	login->sess->card = fw_card_get(req->card);
 554	login->sess->speed = req->speed;
 555	spin_unlock_bh(&login->sess->lock);
 556
 557	req->status.status = cpu_to_be32(
 558		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 559		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 560}
 561
 562static void sbp_management_request_logout(
 563	struct sbp_management_agent *agent, struct sbp_management_request *req,
 564	int *status_data_size)
 565{
 566	struct sbp_tport *tport = agent->tport;
 567	struct sbp_tpg *tpg = tport->tpg;
 568	int id;
 569	struct sbp_login_descriptor *login;
 570
 571	id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
 572
 573	login = sbp_login_find_by_id(tpg, id);
 574	if (!login) {
 575		pr_warn("cannot find login: %d\n", id);
 576
 577		req->status.status = cpu_to_be32(
 578			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 579			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
 580		return;
 581	}
 582
 583	pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
 584		login->login_lun, login->login_id);
 585
 586	if (req->node_addr != login->sess->node_id) {
 587		pr_warn("logout from different node ID\n");
 588
 589		req->status.status = cpu_to_be32(
 590			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 591			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 592		return;
 593	}
 594
 595	sbp_login_release(login, true);
 596
 597	req->status.status = cpu_to_be32(
 598		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 599		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 600}
 601
 602static void session_check_for_reset(struct sbp_session *sess)
 603{
 604	bool card_valid = false;
 605
 606	spin_lock_bh(&sess->lock);
 607
 608	if (sess->card) {
 609		spin_lock_irq(&sess->card->lock);
 610		card_valid = (sess->card->local_node != NULL);
 611		spin_unlock_irq(&sess->card->lock);
 612
 613		if (!card_valid) {
 614			fw_card_put(sess->card);
 615			sess->card = NULL;
 616		}
 617	}
 618
 619	if (!card_valid || (sess->generation != sess->card->generation)) {
 620		pr_info("Waiting for reconnect from node: %016llx\n",
 621				sess->guid);
 622
 623		sess->node_id = -1;
 624		sess->reconnect_expires = get_jiffies_64() +
 625			((sess->reconnect_hold + 1) * HZ);
 626	}
 627
 628	spin_unlock_bh(&sess->lock);
 629}
 630
 631static void session_reconnect_expired(struct sbp_session *sess)
 632{
 633	struct sbp_login_descriptor *login, *temp;
 634	LIST_HEAD(login_list);
 635
 636	pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
 637
 638	spin_lock_bh(&sess->lock);
 639	list_for_each_entry_safe(login, temp, &sess->login_list, link) {
 640		login->sess = NULL;
 641		list_move_tail(&login->link, &login_list);
 642	}
 643	spin_unlock_bh(&sess->lock);
 644
 645	list_for_each_entry_safe(login, temp, &login_list, link) {
 646		list_del(&login->link);
 647		sbp_login_release(login, false);
 648	}
 649
 650	sbp_session_release(sess, false);
 651}
 652
 653static void session_maintenance_work(struct work_struct *work)
 654{
 655	struct sbp_session *sess = container_of(work, struct sbp_session,
 656			maint_work.work);
 657
 658	/* could be called while tearing down the session */
 659	spin_lock_bh(&sess->lock);
 660	if (list_empty(&sess->login_list)) {
 661		spin_unlock_bh(&sess->lock);
 662		return;
 663	}
 664	spin_unlock_bh(&sess->lock);
 665
 666	if (sess->node_id != -1) {
 667		/* check for bus reset and make node_id invalid */
 668		session_check_for_reset(sess);
 669
 670		schedule_delayed_work(&sess->maint_work,
 671				SESSION_MAINTENANCE_INTERVAL);
 672	} else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
 673		/* still waiting for reconnect */
 674		schedule_delayed_work(&sess->maint_work,
 675				SESSION_MAINTENANCE_INTERVAL);
 676	} else {
 677		/* reconnect timeout has expired */
 678		session_reconnect_expired(sess);
 679	}
 680}
 681
 682static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
 683		struct sbp_target_agent *agent)
 684{
 685	int state;
 686
 687	switch (tcode) {
 688	case TCODE_READ_QUADLET_REQUEST:
 689		pr_debug("tgt_agent AGENT_STATE READ\n");
 690
 691		spin_lock_bh(&agent->lock);
 692		state = agent->state;
 693		spin_unlock_bh(&agent->lock);
 694
 695		*(__be32 *)data = cpu_to_be32(state);
 696
 697		return RCODE_COMPLETE;
 698
 699	case TCODE_WRITE_QUADLET_REQUEST:
 700		/* ignored */
 701		return RCODE_COMPLETE;
 702
 703	default:
 704		return RCODE_TYPE_ERROR;
 705	}
 706}
 707
 708static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
 709		struct sbp_target_agent *agent)
 710{
 711	switch (tcode) {
 712	case TCODE_WRITE_QUADLET_REQUEST:
 713		pr_debug("tgt_agent AGENT_RESET\n");
 714		spin_lock_bh(&agent->lock);
 715		agent->state = AGENT_STATE_RESET;
 716		spin_unlock_bh(&agent->lock);
 717		return RCODE_COMPLETE;
 718
 719	default:
 720		return RCODE_TYPE_ERROR;
 721	}
 722}
 723
 724static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
 725		struct sbp_target_agent *agent)
 726{
 727	struct sbp2_pointer *ptr = data;
 728
 729	switch (tcode) {
 730	case TCODE_WRITE_BLOCK_REQUEST:
 731		spin_lock_bh(&agent->lock);
 732		if (agent->state != AGENT_STATE_SUSPENDED &&
 733				agent->state != AGENT_STATE_RESET) {
 734			spin_unlock_bh(&agent->lock);
 735			pr_notice("Ignoring ORB_POINTER write while active.\n");
 736			return RCODE_CONFLICT_ERROR;
 737		}
 738		agent->state = AGENT_STATE_ACTIVE;
 739		spin_unlock_bh(&agent->lock);
 740
 741		agent->orb_pointer = sbp2_pointer_to_addr(ptr);
 742		agent->doorbell = false;
 743
 744		pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
 745				agent->orb_pointer);
 746
 747		queue_work(system_unbound_wq, &agent->work);
 748
 749		return RCODE_COMPLETE;
 750
 751	case TCODE_READ_BLOCK_REQUEST:
 752		pr_debug("tgt_agent ORB_POINTER READ\n");
 753		spin_lock_bh(&agent->lock);
 754		addr_to_sbp2_pointer(agent->orb_pointer, ptr);
 755		spin_unlock_bh(&agent->lock);
 756		return RCODE_COMPLETE;
 757
 758	default:
 759		return RCODE_TYPE_ERROR;
 760	}
 761}
 762
 763static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
 764		struct sbp_target_agent *agent)
 765{
 766	switch (tcode) {
 767	case TCODE_WRITE_QUADLET_REQUEST:
 768		spin_lock_bh(&agent->lock);
 769		if (agent->state != AGENT_STATE_SUSPENDED) {
 770			spin_unlock_bh(&agent->lock);
 771			pr_debug("Ignoring DOORBELL while active.\n");
 772			return RCODE_CONFLICT_ERROR;
 773		}
 774		agent->state = AGENT_STATE_ACTIVE;
 775		spin_unlock_bh(&agent->lock);
 776
 777		agent->doorbell = true;
 778
 779		pr_debug("tgt_agent DOORBELL\n");
 780
 781		queue_work(system_unbound_wq, &agent->work);
 782
 783		return RCODE_COMPLETE;
 784
 785	case TCODE_READ_QUADLET_REQUEST:
 786		return RCODE_COMPLETE;
 787
 788	default:
 789		return RCODE_TYPE_ERROR;
 790	}
 791}
 792
 793static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
 794		int tcode, void *data, struct sbp_target_agent *agent)
 795{
 796	switch (tcode) {
 797	case TCODE_WRITE_QUADLET_REQUEST:
 798		pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
 799		/* ignored as we don't send unsolicited status */
 800		return RCODE_COMPLETE;
 801
 802	case TCODE_READ_QUADLET_REQUEST:
 803		return RCODE_COMPLETE;
 804
 805	default:
 806		return RCODE_TYPE_ERROR;
 807	}
 808}
 809
 810static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
 811		int tcode, int destination, int source, int generation,
 812		unsigned long long offset, void *data, size_t length,
 813		void *callback_data)
 814{
 815	struct sbp_target_agent *agent = callback_data;
 816	struct sbp_session *sess = agent->login->sess;
 817	int sess_gen, sess_node, rcode;
 818
 819	spin_lock_bh(&sess->lock);
 820	sess_gen = sess->generation;
 821	sess_node = sess->node_id;
 822	spin_unlock_bh(&sess->lock);
 823
 824	if (generation != sess_gen) {
 825		pr_notice("ignoring request with wrong generation\n");
 826		rcode = RCODE_TYPE_ERROR;
 827		goto out;
 828	}
 829
 830	if (source != sess_node) {
 831		pr_notice("ignoring request from foreign node (%x != %x)\n",
 832				source, sess_node);
 833		rcode = RCODE_TYPE_ERROR;
 834		goto out;
 835	}
 836
 837	/* turn offset into the offset from the start of the block */
 838	offset -= agent->handler.offset;
 839
 840	if (offset == 0x00 && length == 4) {
 841		/* AGENT_STATE */
 842		rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
 843	} else if (offset == 0x04 && length == 4) {
 844		/* AGENT_RESET */
 845		rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
 846	} else if (offset == 0x08 && length == 8) {
 847		/* ORB_POINTER */
 848		rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
 849	} else if (offset == 0x10 && length == 4) {
 850		/* DOORBELL */
 851		rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
 852	} else if (offset == 0x14 && length == 4) {
 853		/* UNSOLICITED_STATUS_ENABLE */
 854		rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
 855				data, agent);
 856	} else {
 857		rcode = RCODE_ADDRESS_ERROR;
 858	}
 859
 860out:
 861	fw_send_response(card, request, rcode);
 862}
 863
 864static void sbp_handle_command(struct sbp_target_request *);
 865static int sbp_send_status(struct sbp_target_request *);
 866static void sbp_free_request(struct sbp_target_request *);
 867
 868static void tgt_agent_process_work(struct work_struct *work)
 869{
 870	struct sbp_target_request *req =
 871		container_of(work, struct sbp_target_request, work);
 872
 873	pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
 874			req->orb_pointer,
 875			sbp2_pointer_to_addr(&req->orb.next_orb),
 876			sbp2_pointer_to_addr(&req->orb.data_descriptor),
 877			be32_to_cpu(req->orb.misc));
 878
 879	if (req->orb_pointer >> 32)
 880		pr_debug("ORB with high bits set\n");
 881
 882	switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
 883		case 0:/* Format specified by this standard */
 884			sbp_handle_command(req);
 885			return;
 886		case 1: /* Reserved for future standardization */
 887		case 2: /* Vendor-dependent */
 888			req->status.status |= cpu_to_be32(
 889					STATUS_BLOCK_RESP(
 890						STATUS_RESP_REQUEST_COMPLETE) |
 891					STATUS_BLOCK_DEAD(0) |
 892					STATUS_BLOCK_LEN(1) |
 893					STATUS_BLOCK_SBP_STATUS(
 894						SBP_STATUS_REQ_TYPE_NOTSUPP));
 895			sbp_send_status(req);
 896			return;
 897		case 3: /* Dummy ORB */
 898			req->status.status |= cpu_to_be32(
 899					STATUS_BLOCK_RESP(
 900						STATUS_RESP_REQUEST_COMPLETE) |
 901					STATUS_BLOCK_DEAD(0) |
 902					STATUS_BLOCK_LEN(1) |
 903					STATUS_BLOCK_SBP_STATUS(
 904						SBP_STATUS_DUMMY_ORB_COMPLETE));
 905			sbp_send_status(req);
 906			return;
 907		default:
 908			BUG();
 909	}
 910}
 911
 912/* used to double-check we haven't been issued an AGENT_RESET */
 913static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
 914{
 915	bool active;
 916
 917	spin_lock_bh(&agent->lock);
 918	active = (agent->state == AGENT_STATE_ACTIVE);
 919	spin_unlock_bh(&agent->lock);
 920
 921	return active;
 922}
 923
 924static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
 925	struct fw_card *card, u64 next_orb)
 926{
 927	struct se_session *se_sess = sess->se_sess;
 928	struct sbp_target_request *req;
 929	int tag;
 930
 931	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
 932	if (tag < 0)
 933		return ERR_PTR(-ENOMEM);
 934
 935	req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
 936	memset(req, 0, sizeof(*req));
 937	req->se_cmd.map_tag = tag;
 938	req->se_cmd.tag = next_orb;
 939
 940	return req;
 941}
 942
 943static void tgt_agent_fetch_work(struct work_struct *work)
 944{
 945	struct sbp_target_agent *agent =
 946		container_of(work, struct sbp_target_agent, work);
 947	struct sbp_session *sess = agent->login->sess;
 948	struct sbp_target_request *req;
 949	int ret;
 950	bool doorbell = agent->doorbell;
 951	u64 next_orb = agent->orb_pointer;
 952
 953	while (next_orb && tgt_agent_check_active(agent)) {
 954		req = sbp_mgt_get_req(sess, sess->card, next_orb);
 955		if (IS_ERR(req)) {
 956			spin_lock_bh(&agent->lock);
 957			agent->state = AGENT_STATE_DEAD;
 958			spin_unlock_bh(&agent->lock);
 959			return;
 960		}
 961
 962		req->login = agent->login;
 963		req->orb_pointer = next_orb;
 964
 965		req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
 966					req->orb_pointer >> 32));
 967		req->status.orb_low = cpu_to_be32(
 968				req->orb_pointer & 0xfffffffc);
 969
 970		/* read in the ORB */
 971		ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
 972				sess->node_id, sess->generation, sess->speed,
 973				req->orb_pointer, &req->orb, sizeof(req->orb));
 974		if (ret != RCODE_COMPLETE) {
 975			pr_debug("tgt_orb fetch failed: %x\n", ret);
 976			req->status.status |= cpu_to_be32(
 977					STATUS_BLOCK_SRC(
 978						STATUS_SRC_ORB_FINISHED) |
 979					STATUS_BLOCK_RESP(
 980						STATUS_RESP_TRANSPORT_FAILURE) |
 981					STATUS_BLOCK_DEAD(1) |
 982					STATUS_BLOCK_LEN(1) |
 983					STATUS_BLOCK_SBP_STATUS(
 984						SBP_STATUS_UNSPECIFIED_ERROR));
 985			spin_lock_bh(&agent->lock);
 986			agent->state = AGENT_STATE_DEAD;
 987			spin_unlock_bh(&agent->lock);
 988
 989			sbp_send_status(req);
 990			return;
 991		}
 992
 993		/* check the next_ORB field */
 994		if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
 995			next_orb = 0;
 996			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
 997						STATUS_SRC_ORB_FINISHED));
 998		} else {
 999			next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
1000			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1001						STATUS_SRC_ORB_CONTINUING));
1002		}
1003
1004		if (tgt_agent_check_active(agent) && !doorbell) {
1005			INIT_WORK(&req->work, tgt_agent_process_work);
1006			queue_work(system_unbound_wq, &req->work);
1007		} else {
1008			/* don't process this request, just check next_ORB */
1009			sbp_free_request(req);
1010		}
1011
1012		spin_lock_bh(&agent->lock);
1013		doorbell = agent->doorbell = false;
1014
1015		/* check if we should carry on processing */
1016		if (next_orb)
1017			agent->orb_pointer = next_orb;
1018		else
1019			agent->state = AGENT_STATE_SUSPENDED;
1020
1021		spin_unlock_bh(&agent->lock);
1022	};
1023}
1024
1025static struct sbp_target_agent *sbp_target_agent_register(
1026		struct sbp_login_descriptor *login)
1027{
1028	struct sbp_target_agent *agent;
1029	int ret;
1030
1031	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1032	if (!agent)
1033		return ERR_PTR(-ENOMEM);
1034
1035	spin_lock_init(&agent->lock);
1036
1037	agent->handler.length = 0x20;
1038	agent->handler.address_callback = tgt_agent_rw;
1039	agent->handler.callback_data = agent;
1040
1041	agent->login = login;
1042	agent->state = AGENT_STATE_RESET;
1043	INIT_WORK(&agent->work, tgt_agent_fetch_work);
1044	agent->orb_pointer = 0;
1045	agent->doorbell = false;
1046
1047	ret = fw_core_add_address_handler(&agent->handler,
1048			&sbp_register_region);
1049	if (ret < 0) {
1050		kfree(agent);
1051		return ERR_PTR(ret);
1052	}
1053
1054	return agent;
1055}
1056
1057static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1058{
1059	fw_core_remove_address_handler(&agent->handler);
1060	cancel_work_sync(&agent->work);
1061	kfree(agent);
1062}
1063
1064/*
1065 * Simple wrapper around fw_run_transaction that retries the transaction several
1066 * times in case of failure, with an exponential backoff.
1067 */
1068static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1069		int generation, int speed, unsigned long long offset,
1070		void *payload, size_t length)
1071{
1072	int attempt, ret, delay;
1073
1074	for (attempt = 1; attempt <= 5; attempt++) {
1075		ret = fw_run_transaction(card, tcode, destination_id,
1076				generation, speed, offset, payload, length);
1077
1078		switch (ret) {
1079		case RCODE_COMPLETE:
1080		case RCODE_TYPE_ERROR:
1081		case RCODE_ADDRESS_ERROR:
1082		case RCODE_GENERATION:
1083			return ret;
1084
1085		default:
1086			delay = 5 * attempt * attempt;
1087			usleep_range(delay, delay * 2);
1088		}
1089	}
1090
1091	return ret;
1092}
1093
1094/*
1095 * Wrapper around sbp_run_transaction that gets the card, destination,
1096 * generation and speed out of the request's session.
1097 */
1098static int sbp_run_request_transaction(struct sbp_target_request *req,
1099		int tcode, unsigned long long offset, void *payload,
1100		size_t length)
1101{
1102	struct sbp_login_descriptor *login = req->login;
1103	struct sbp_session *sess = login->sess;
1104	struct fw_card *card;
1105	int node_id, generation, speed, ret;
1106
1107	spin_lock_bh(&sess->lock);
1108	card = fw_card_get(sess->card);
1109	node_id = sess->node_id;
1110	generation = sess->generation;
1111	speed = sess->speed;
1112	spin_unlock_bh(&sess->lock);
1113
1114	ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1115			offset, payload, length);
1116
1117	fw_card_put(card);
1118
1119	return ret;
1120}
1121
1122static int sbp_fetch_command(struct sbp_target_request *req)
1123{
1124	int ret, cmd_len, copy_len;
1125
1126	cmd_len = scsi_command_size(req->orb.command_block);
1127
1128	req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1129	if (!req->cmd_buf)
1130		return -ENOMEM;
1131
1132	memcpy(req->cmd_buf, req->orb.command_block,
1133		min_t(int, cmd_len, sizeof(req->orb.command_block)));
1134
1135	if (cmd_len > sizeof(req->orb.command_block)) {
1136		pr_debug("sbp_fetch_command: filling in long command\n");
1137		copy_len = cmd_len - sizeof(req->orb.command_block);
1138
1139		ret = sbp_run_request_transaction(req,
1140				TCODE_READ_BLOCK_REQUEST,
1141				req->orb_pointer + sizeof(req->orb),
1142				req->cmd_buf + sizeof(req->orb.command_block),
1143				copy_len);
1144		if (ret != RCODE_COMPLETE)
1145			return -EIO;
1146	}
1147
1148	return 0;
1149}
1150
1151static int sbp_fetch_page_table(struct sbp_target_request *req)
1152{
1153	int pg_tbl_sz, ret;
1154	struct sbp_page_table_entry *pg_tbl;
1155
1156	if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1157		return 0;
1158
1159	pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1160		sizeof(struct sbp_page_table_entry);
1161
1162	pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1163	if (!pg_tbl)
1164		return -ENOMEM;
1165
1166	ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1167			sbp2_pointer_to_addr(&req->orb.data_descriptor),
1168			pg_tbl, pg_tbl_sz);
1169	if (ret != RCODE_COMPLETE) {
1170		kfree(pg_tbl);
1171		return -EIO;
1172	}
1173
1174	req->pg_tbl = pg_tbl;
1175	return 0;
1176}
1177
1178static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1179	u32 *data_len, enum dma_data_direction *data_dir)
1180{
1181	int data_size, direction, idx;
1182
1183	data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1184	direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1185
1186	if (!data_size) {
1187		*data_len = 0;
1188		*data_dir = DMA_NONE;
1189		return;
1190	}
1191
1192	*data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1193
1194	if (req->pg_tbl) {
1195		*data_len = 0;
1196		for (idx = 0; idx < data_size; idx++) {
1197			*data_len += be16_to_cpu(
1198					req->pg_tbl[idx].segment_length);
1199		}
1200	} else {
1201		*data_len = data_size;
1202	}
1203}
1204
1205static void sbp_handle_command(struct sbp_target_request *req)
1206{
1207	struct sbp_login_descriptor *login = req->login;
1208	struct sbp_session *sess = login->sess;
1209	int ret, unpacked_lun;
1210	u32 data_length;
1211	enum dma_data_direction data_dir;
1212
1213	ret = sbp_fetch_command(req);
1214	if (ret) {
1215		pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1216		goto err;
1217	}
1218
1219	ret = sbp_fetch_page_table(req);
1220	if (ret) {
1221		pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1222			ret);
1223		goto err;
1224	}
1225
1226	unpacked_lun = req->login->login_lun;
1227	sbp_calc_data_length_direction(req, &data_length, &data_dir);
1228
1229	pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1230			req->orb_pointer, unpacked_lun, data_length, data_dir);
1231
1232	/* only used for printk until we do TMRs */
1233	req->se_cmd.tag = req->orb_pointer;
1234	if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1235			      req->sense_buf, unpacked_lun, data_length,
1236			      TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
1237		goto err;
1238
1239	return;
1240
1241err:
1242	req->status.status |= cpu_to_be32(
1243		STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1244		STATUS_BLOCK_DEAD(0) |
1245		STATUS_BLOCK_LEN(1) |
1246		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1247	sbp_send_status(req);
1248}
1249
1250/*
1251 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1252 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1253 */
1254static int sbp_rw_data(struct sbp_target_request *req)
1255{
1256	struct sbp_session *sess = req->login->sess;
1257	int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1258		generation, num_pte, length, tfr_length,
1259		rcode = RCODE_COMPLETE;
1260	struct sbp_page_table_entry *pte;
1261	unsigned long long offset;
1262	struct fw_card *card;
1263	struct sg_mapping_iter iter;
1264
1265	if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1266		tcode = TCODE_WRITE_BLOCK_REQUEST;
1267		sg_miter_flags = SG_MITER_FROM_SG;
1268	} else {
1269		tcode = TCODE_READ_BLOCK_REQUEST;
1270		sg_miter_flags = SG_MITER_TO_SG;
1271	}
1272
1273	max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1274	speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1275
1276	pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1277	if (pg_size) {
1278		pr_err("sbp_run_transaction: page size ignored\n");
1279		pg_size = 0x100 << pg_size;
1280	}
1281
1282	spin_lock_bh(&sess->lock);
1283	card = fw_card_get(sess->card);
1284	node_id = sess->node_id;
1285	generation = sess->generation;
1286	spin_unlock_bh(&sess->lock);
1287
1288	if (req->pg_tbl) {
1289		pte = req->pg_tbl;
1290		num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1291
1292		offset = 0;
1293		length = 0;
1294	} else {
1295		pte = NULL;
1296		num_pte = 0;
1297
1298		offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1299		length = req->se_cmd.data_length;
1300	}
1301
1302	sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1303		sg_miter_flags);
1304
1305	while (length || num_pte) {
1306		if (!length) {
1307			offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1308				be32_to_cpu(pte->segment_base_lo);
1309			length = be16_to_cpu(pte->segment_length);
1310
1311			pte++;
1312			num_pte--;
1313		}
1314
1315		sg_miter_next(&iter);
1316
1317		tfr_length = min3(length, max_payload, (int)iter.length);
1318
1319		/* FIXME: take page_size into account */
1320
1321		rcode = sbp_run_transaction(card, tcode, node_id,
1322				generation, speed,
1323				offset, iter.addr, tfr_length);
1324
1325		if (rcode != RCODE_COMPLETE)
1326			break;
1327
1328		length -= tfr_length;
1329		offset += tfr_length;
1330		iter.consumed = tfr_length;
1331	}
1332
1333	sg_miter_stop(&iter);
1334	fw_card_put(card);
1335
1336	if (rcode == RCODE_COMPLETE) {
1337		WARN_ON(length != 0);
1338		return 0;
1339	} else {
1340		return -EIO;
1341	}
1342}
1343
1344static int sbp_send_status(struct sbp_target_request *req)
1345{
1346	int rc, ret = 0, length;
1347	struct sbp_login_descriptor *login = req->login;
1348
1349	length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1350
1351	rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1352			login->status_fifo_addr, &req->status, length);
1353	if (rc != RCODE_COMPLETE) {
1354		pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
1355		ret = -EIO;
1356		goto put_ref;
1357	}
1358
1359	pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1360			req->orb_pointer);
1361	/*
1362	 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
1363	 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
1364	 * final se_cmd->cmd_kref put.
1365	 */
1366put_ref:
1367	target_put_sess_cmd(&req->se_cmd);
1368	return ret;
1369}
1370
1371static void sbp_sense_mangle(struct sbp_target_request *req)
1372{
1373	struct se_cmd *se_cmd = &req->se_cmd;
1374	u8 *sense = req->sense_buf;
1375	u8 *status = req->status.data;
1376
1377	WARN_ON(se_cmd->scsi_sense_length < 18);
1378
1379	switch (sense[0] & 0x7f) { 		/* sfmt */
1380	case 0x70: /* current, fixed */
1381		status[0] = 0 << 6;
1382		break;
1383	case 0x71: /* deferred, fixed */
1384		status[0] = 1 << 6;
1385		break;
1386	case 0x72: /* current, descriptor */
1387	case 0x73: /* deferred, descriptor */
1388	default:
1389		/*
1390		 * TODO: SBP-3 specifies what we should do with descriptor
1391		 * format sense data
1392		 */
1393		pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1394			sense[0]);
1395		req->status.status |= cpu_to_be32(
1396			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1397			STATUS_BLOCK_DEAD(0) |
1398			STATUS_BLOCK_LEN(1) |
1399			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1400		return;
1401	}
1402
1403	status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1404	status[1] =
1405		(sense[0] & 0x80) |		/* valid */
1406		((sense[2] & 0xe0) >> 1) |	/* mark, eom, ili */
1407		(sense[2] & 0x0f);		/* sense_key */
1408	status[2] = se_cmd->scsi_asc;		/* sense_code */
1409	status[3] = se_cmd->scsi_ascq;		/* sense_qualifier */
1410
1411	/* information */
1412	status[4] = sense[3];
1413	status[5] = sense[4];
1414	status[6] = sense[5];
1415	status[7] = sense[6];
1416
1417	/* CDB-dependent */
1418	status[8] = sense[8];
1419	status[9] = sense[9];
1420	status[10] = sense[10];
1421	status[11] = sense[11];
1422
1423	/* fru */
1424	status[12] = sense[14];
1425
1426	/* sense_key-dependent */
1427	status[13] = sense[15];
1428	status[14] = sense[16];
1429	status[15] = sense[17];
1430
1431	req->status.status |= cpu_to_be32(
1432		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1433		STATUS_BLOCK_DEAD(0) |
1434		STATUS_BLOCK_LEN(5) |
1435		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1436}
1437
1438static int sbp_send_sense(struct sbp_target_request *req)
1439{
1440	struct se_cmd *se_cmd = &req->se_cmd;
1441
1442	if (se_cmd->scsi_sense_length) {
1443		sbp_sense_mangle(req);
1444	} else {
1445		req->status.status |= cpu_to_be32(
1446			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1447			STATUS_BLOCK_DEAD(0) |
1448			STATUS_BLOCK_LEN(1) |
1449			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1450	}
1451
1452	return sbp_send_status(req);
1453}
1454
1455static void sbp_free_request(struct sbp_target_request *req)
1456{
1457	struct se_cmd *se_cmd = &req->se_cmd;
1458	struct se_session *se_sess = se_cmd->se_sess;
1459
1460	kfree(req->pg_tbl);
1461	kfree(req->cmd_buf);
1462
1463	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
1464}
1465
1466static void sbp_mgt_agent_process(struct work_struct *work)
1467{
1468	struct sbp_management_agent *agent =
1469		container_of(work, struct sbp_management_agent, work);
1470	struct sbp_management_request *req = agent->request;
1471	int ret;
1472	int status_data_len = 0;
1473
1474	/* fetch the ORB from the initiator */
1475	ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1476		req->node_addr, req->generation, req->speed,
1477		agent->orb_offset, &req->orb, sizeof(req->orb));
1478	if (ret != RCODE_COMPLETE) {
1479		pr_debug("mgt_orb fetch failed: %x\n", ret);
1480		goto out;
1481	}
1482
1483	pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1484		sbp2_pointer_to_addr(&req->orb.ptr1),
1485		sbp2_pointer_to_addr(&req->orb.ptr2),
1486		be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1487		sbp2_pointer_to_addr(&req->orb.status_fifo));
1488
1489	if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1490		ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1491		pr_err("mgt_orb bad request\n");
1492		goto out;
1493	}
1494
1495	switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1496	case MANAGEMENT_ORB_FUNCTION_LOGIN:
1497		sbp_management_request_login(agent, req, &status_data_len);
1498		break;
1499
1500	case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1501		sbp_management_request_query_logins(agent, req,
1502				&status_data_len);
1503		break;
1504
1505	case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1506		sbp_management_request_reconnect(agent, req, &status_data_len);
1507		break;
1508
1509	case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1510		pr_notice("SET PASSWORD not implemented\n");
1511
1512		req->status.status = cpu_to_be32(
1513			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1514			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1515
1516		break;
1517
1518	case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1519		sbp_management_request_logout(agent, req, &status_data_len);
1520		break;
1521
1522	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1523		pr_notice("ABORT TASK not implemented\n");
1524
1525		req->status.status = cpu_to_be32(
1526			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1527			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1528
1529		break;
1530
1531	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1532		pr_notice("ABORT TASK SET not implemented\n");
1533
1534		req->status.status = cpu_to_be32(
1535			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1536			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1537
1538		break;
1539
1540	case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1541		pr_notice("LOGICAL UNIT RESET not implemented\n");
1542
1543		req->status.status = cpu_to_be32(
1544			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1545			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1546
1547		break;
1548
1549	case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1550		pr_notice("TARGET RESET not implemented\n");
1551
1552		req->status.status = cpu_to_be32(
1553			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1554			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1555
1556		break;
1557
1558	default:
1559		pr_notice("unknown management function 0x%x\n",
1560			MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1561
1562		req->status.status = cpu_to_be32(
1563			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1564			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1565
1566		break;
1567	}
1568
1569	req->status.status |= cpu_to_be32(
1570		STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1571		STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1572		STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1573	req->status.orb_low = cpu_to_be32(agent->orb_offset);
1574
1575	/* write the status block back to the initiator */
1576	ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1577		req->node_addr, req->generation, req->speed,
1578		sbp2_pointer_to_addr(&req->orb.status_fifo),
1579		&req->status, 8 + status_data_len);
1580	if (ret != RCODE_COMPLETE) {
1581		pr_debug("mgt_orb status write failed: %x\n", ret);
1582		goto out;
1583	}
1584
1585out:
1586	fw_card_put(req->card);
1587	kfree(req);
1588
1589	spin_lock_bh(&agent->lock);
1590	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1591	spin_unlock_bh(&agent->lock);
1592}
1593
1594static void sbp_mgt_agent_rw(struct fw_card *card,
1595	struct fw_request *request, int tcode, int destination, int source,
1596	int generation, unsigned long long offset, void *data, size_t length,
1597	void *callback_data)
1598{
1599	struct sbp_management_agent *agent = callback_data;
1600	struct sbp2_pointer *ptr = data;
1601	int rcode = RCODE_ADDRESS_ERROR;
1602
1603	if (!agent->tport->enable)
1604		goto out;
1605
1606	if ((offset != agent->handler.offset) || (length != 8))
1607		goto out;
1608
1609	if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1610		struct sbp_management_request *req;
1611		int prev_state;
1612
1613		spin_lock_bh(&agent->lock);
1614		prev_state = agent->state;
1615		agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1616		spin_unlock_bh(&agent->lock);
1617
1618		if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1619			pr_notice("ignoring management request while busy\n");
1620			rcode = RCODE_CONFLICT_ERROR;
1621			goto out;
1622		}
1623		req = kzalloc(sizeof(*req), GFP_ATOMIC);
1624		if (!req) {
1625			rcode = RCODE_CONFLICT_ERROR;
1626			goto out;
1627		}
1628
1629		req->card = fw_card_get(card);
1630		req->generation = generation;
1631		req->node_addr = source;
1632		req->speed = fw_get_request_speed(request);
1633
1634		agent->orb_offset = sbp2_pointer_to_addr(ptr);
1635		agent->request = req;
1636
1637		queue_work(system_unbound_wq, &agent->work);
1638		rcode = RCODE_COMPLETE;
1639	} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1640		addr_to_sbp2_pointer(agent->orb_offset, ptr);
1641		rcode = RCODE_COMPLETE;
1642	} else {
1643		rcode = RCODE_TYPE_ERROR;
1644	}
1645
1646out:
1647	fw_send_response(card, request, rcode);
1648}
1649
1650static struct sbp_management_agent *sbp_management_agent_register(
1651		struct sbp_tport *tport)
1652{
1653	int ret;
1654	struct sbp_management_agent *agent;
1655
1656	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1657	if (!agent)
1658		return ERR_PTR(-ENOMEM);
1659
1660	spin_lock_init(&agent->lock);
1661	agent->tport = tport;
1662	agent->handler.length = 0x08;
1663	agent->handler.address_callback = sbp_mgt_agent_rw;
1664	agent->handler.callback_data = agent;
1665	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1666	INIT_WORK(&agent->work, sbp_mgt_agent_process);
1667	agent->orb_offset = 0;
1668	agent->request = NULL;
1669
1670	ret = fw_core_add_address_handler(&agent->handler,
1671			&sbp_register_region);
1672	if (ret < 0) {
1673		kfree(agent);
1674		return ERR_PTR(ret);
1675	}
1676
1677	return agent;
1678}
1679
1680static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1681{
1682	fw_core_remove_address_handler(&agent->handler);
1683	cancel_work_sync(&agent->work);
1684	kfree(agent);
1685}
1686
1687static int sbp_check_true(struct se_portal_group *se_tpg)
1688{
1689	return 1;
1690}
1691
1692static int sbp_check_false(struct se_portal_group *se_tpg)
1693{
1694	return 0;
1695}
1696
1697static char *sbp_get_fabric_name(void)
1698{
1699	return "sbp";
1700}
1701
1702static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1703{
1704	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1705	struct sbp_tport *tport = tpg->tport;
1706
1707	return &tport->tport_name[0];
1708}
1709
1710static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1711{
1712	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1713	return tpg->tport_tpgt;
1714}
1715
1716static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1717{
1718	return 1;
1719}
1720
1721static void sbp_release_cmd(struct se_cmd *se_cmd)
1722{
1723	struct sbp_target_request *req = container_of(se_cmd,
1724			struct sbp_target_request, se_cmd);
1725
1726	sbp_free_request(req);
1727}
1728
1729static int sbp_shutdown_session(struct se_session *se_sess)
1730{
1731	return 0;
1732}
1733
1734static void sbp_close_session(struct se_session *se_sess)
1735{
1736	return;
1737}
1738
1739static u32 sbp_sess_get_index(struct se_session *se_sess)
1740{
1741	return 0;
1742}
1743
1744static int sbp_write_pending(struct se_cmd *se_cmd)
1745{
1746	struct sbp_target_request *req = container_of(se_cmd,
1747			struct sbp_target_request, se_cmd);
1748	int ret;
1749
1750	ret = sbp_rw_data(req);
1751	if (ret) {
1752		req->status.status |= cpu_to_be32(
1753			STATUS_BLOCK_RESP(
1754				STATUS_RESP_TRANSPORT_FAILURE) |
1755			STATUS_BLOCK_DEAD(0) |
1756			STATUS_BLOCK_LEN(1) |
1757			STATUS_BLOCK_SBP_STATUS(
1758				SBP_STATUS_UNSPECIFIED_ERROR));
1759		sbp_send_status(req);
1760		return ret;
1761	}
1762
1763	target_execute_cmd(se_cmd);
1764	return 0;
1765}
1766
1767static int sbp_write_pending_status(struct se_cmd *se_cmd)
1768{
1769	return 0;
1770}
1771
1772static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1773{
1774	return;
1775}
1776
1777static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1778{
1779	return 0;
1780}
1781
1782static int sbp_queue_data_in(struct se_cmd *se_cmd)
1783{
1784	struct sbp_target_request *req = container_of(se_cmd,
1785			struct sbp_target_request, se_cmd);
1786	int ret;
1787
1788	ret = sbp_rw_data(req);
1789	if (ret) {
1790		req->status.status |= cpu_to_be32(
1791			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1792			STATUS_BLOCK_DEAD(0) |
1793			STATUS_BLOCK_LEN(1) |
1794			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1795		sbp_send_status(req);
1796		return ret;
1797	}
1798
1799	return sbp_send_sense(req);
1800}
1801
1802/*
1803 * Called after command (no data transfer) or after the write (to device)
1804 * operation is completed
1805 */
1806static int sbp_queue_status(struct se_cmd *se_cmd)
1807{
1808	struct sbp_target_request *req = container_of(se_cmd,
1809			struct sbp_target_request, se_cmd);
1810
1811	return sbp_send_sense(req);
1812}
1813
1814static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1815{
1816}
1817
1818static void sbp_aborted_task(struct se_cmd *se_cmd)
1819{
1820	return;
1821}
1822
1823static int sbp_check_stop_free(struct se_cmd *se_cmd)
1824{
1825	struct sbp_target_request *req = container_of(se_cmd,
1826			struct sbp_target_request, se_cmd);
1827
1828	return transport_generic_free_cmd(&req->se_cmd, 0);
1829}
1830
1831static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1832{
1833	struct se_lun *lun;
1834	int count = 0;
1835
1836	rcu_read_lock();
1837	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
1838		count++;
1839	rcu_read_unlock();
1840
1841	return count;
1842}
1843
1844static int sbp_update_unit_directory(struct sbp_tport *tport)
1845{
1846	struct se_lun *lun;
1847	int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
1848	u32 *data;
1849
1850	if (tport->unit_directory.data) {
1851		fw_core_remove_descriptor(&tport->unit_directory);
1852		kfree(tport->unit_directory.data);
1853		tport->unit_directory.data = NULL;
1854	}
1855
1856	if (!tport->enable || !tport->tpg)
1857		return 0;
1858
1859	num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1860
1861	/*
1862	 * Number of entries in the final unit directory:
1863	 *  - all of those in the template
1864	 *  - management_agent
1865	 *  - unit_characteristics
1866	 *  - reconnect_timeout
1867	 *  - unit unique ID
1868	 *  - one for each LUN
1869	 *
1870	 *  MUST NOT include leaf or sub-directory entries
1871	 */
1872	num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1873
1874	if (tport->directory_id != -1)
1875		num_entries++;
1876
1877	/* allocate num_entries + 4 for the header and unique ID leaf */
1878	data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1879	if (!data)
1880		return -ENOMEM;
1881
1882	/* directory_length */
1883	data[idx++] = num_entries << 16;
1884
1885	/* directory_id */
1886	if (tport->directory_id != -1)
1887		data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1888
1889	/* unit directory template */
1890	memcpy(&data[idx], sbp_unit_directory_template,
1891			sizeof(sbp_unit_directory_template));
1892	idx += ARRAY_SIZE(sbp_unit_directory_template);
1893
1894	/* management_agent */
1895	mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
1896	data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
1897
1898	/* unit_characteristics */
1899	data[idx++] = 0x3a000000 |
1900		(((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
1901		SBP_ORB_FETCH_SIZE;
1902
1903	/* reconnect_timeout */
1904	data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
1905
1906	/* unit unique ID (leaf is just after LUNs) */
1907	data[idx++] = 0x8d000000 | (num_luns + 1);
1908
1909	rcu_read_lock();
1910	hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
1911		struct se_device *dev;
1912		int type;
1913		/*
1914		 * rcu_dereference_raw protected by se_lun->lun_group symlink
1915		 * reference to se_device->dev_group.
1916		 */
1917		dev = rcu_dereference_raw(lun->lun_se_dev);
1918		type = dev->transport->get_device_type(dev);
1919
1920		/* logical_unit_number */
1921		data[idx++] = 0x14000000 |
1922			((type << 16) & 0x1f0000) |
1923			(lun->unpacked_lun & 0xffff);
1924	}
1925	rcu_read_unlock();
1926
1927	/* unit unique ID leaf */
1928	data[idx++] = 2 << 16;
1929	data[idx++] = tport->guid >> 32;
1930	data[idx++] = tport->guid;
1931
1932	tport->unit_directory.length = idx;
1933	tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
1934	tport->unit_directory.data = data;
1935
1936	ret = fw_core_add_descriptor(&tport->unit_directory);
1937	if (ret < 0) {
1938		kfree(tport->unit_directory.data);
1939		tport->unit_directory.data = NULL;
1940	}
1941
1942	return ret;
1943}
1944
1945static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
1946{
1947	const char *cp;
1948	char c, nibble;
1949	int pos = 0, err;
1950
1951	*wwn = 0;
1952	for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
1953		c = *cp;
1954		if (c == '\n' && cp[1] == '\0')
1955			continue;
1956		if (c == '\0') {
1957			err = 2;
1958			if (pos != 16)
1959				goto fail;
1960			return cp - name;
1961		}
1962		err = 3;
1963		if (isdigit(c))
1964			nibble = c - '0';
1965		else if (isxdigit(c))
1966			nibble = tolower(c) - 'a' + 10;
1967		else
1968			goto fail;
1969		*wwn = (*wwn << 4) | nibble;
1970		pos++;
1971	}
1972	err = 4;
1973fail:
1974	printk(KERN_INFO "err %u len %zu pos %u\n",
1975			err, cp - name, pos);
1976	return -1;
1977}
1978
1979static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
1980{
1981	return snprintf(buf, len, "%016llx", wwn);
1982}
1983
1984static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1985{
1986	u64 guid = 0;
1987
1988	if (sbp_parse_wwn(name, &guid) < 0)
1989		return -EINVAL;
1990	return 0;
1991}
1992
1993static int sbp_post_link_lun(
1994		struct se_portal_group *se_tpg,
1995		struct se_lun *se_lun)
1996{
1997	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1998
1999	return sbp_update_unit_directory(tpg->tport);
2000}
2001
2002static void sbp_pre_unlink_lun(
2003		struct se_portal_group *se_tpg,
2004		struct se_lun *se_lun)
2005{
2006	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2007	struct sbp_tport *tport = tpg->tport;
2008	int ret;
2009
2010	if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
2011		tport->enable = 0;
2012
2013	ret = sbp_update_unit_directory(tport);
2014	if (ret < 0)
2015		pr_err("unlink LUN: failed to update unit directory\n");
2016}
2017
2018static struct se_portal_group *sbp_make_tpg(
2019		struct se_wwn *wwn,
2020		struct config_group *group,
2021		const char *name)
2022{
2023	struct sbp_tport *tport =
2024		container_of(wwn, struct sbp_tport, tport_wwn);
2025
2026	struct sbp_tpg *tpg;
2027	unsigned long tpgt;
2028	int ret;
2029
2030	if (strstr(name, "tpgt_") != name)
2031		return ERR_PTR(-EINVAL);
2032	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2033		return ERR_PTR(-EINVAL);
2034
2035	if (tport->tpg) {
2036		pr_err("Only one TPG per Unit is possible.\n");
2037		return ERR_PTR(-EBUSY);
2038	}
2039
2040	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2041	if (!tpg) {
2042		pr_err("Unable to allocate struct sbp_tpg\n");
2043		return ERR_PTR(-ENOMEM);
2044	}
2045
2046	tpg->tport = tport;
2047	tpg->tport_tpgt = tpgt;
2048	tport->tpg = tpg;
2049
2050	/* default attribute values */
2051	tport->enable = 0;
2052	tport->directory_id = -1;
2053	tport->mgt_orb_timeout = 15;
2054	tport->max_reconnect_timeout = 5;
2055	tport->max_logins_per_lun = 1;
2056
2057	tport->mgt_agt = sbp_management_agent_register(tport);
2058	if (IS_ERR(tport->mgt_agt)) {
2059		ret = PTR_ERR(tport->mgt_agt);
2060		goto out_free_tpg;
2061	}
2062
2063	ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
2064	if (ret < 0)
2065		goto out_unreg_mgt_agt;
2066
2067	return &tpg->se_tpg;
2068
2069out_unreg_mgt_agt:
2070	sbp_management_agent_unregister(tport->mgt_agt);
2071out_free_tpg:
2072	tport->tpg = NULL;
2073	kfree(tpg);
2074	return ERR_PTR(ret);
2075}
2076
2077static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2078{
2079	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2080	struct sbp_tport *tport = tpg->tport;
2081
2082	core_tpg_deregister(se_tpg);
2083	sbp_management_agent_unregister(tport->mgt_agt);
2084	tport->tpg = NULL;
2085	kfree(tpg);
2086}
2087
2088static struct se_wwn *sbp_make_tport(
2089		struct target_fabric_configfs *tf,
2090		struct config_group *group,
2091		const char *name)
2092{
2093	struct sbp_tport *tport;
2094	u64 guid = 0;
2095
2096	if (sbp_parse_wwn(name, &guid) < 0)
2097		return ERR_PTR(-EINVAL);
2098
2099	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2100	if (!tport) {
2101		pr_err("Unable to allocate struct sbp_tport\n");
2102		return ERR_PTR(-ENOMEM);
2103	}
2104
2105	tport->guid = guid;
2106	sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2107
2108	return &tport->tport_wwn;
2109}
2110
2111static void sbp_drop_tport(struct se_wwn *wwn)
2112{
2113	struct sbp_tport *tport =
2114		container_of(wwn, struct sbp_tport, tport_wwn);
2115
2116	kfree(tport);
2117}
2118
2119static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
2120{
2121	return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2122}
2123
2124CONFIGFS_ATTR_RO(sbp_wwn_, version);
2125
2126static struct configfs_attribute *sbp_wwn_attrs[] = {
2127	&sbp_wwn_attr_version,
2128	NULL,
2129};
2130
2131static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
2132{
2133	struct se_portal_group *se_tpg = to_tpg(item);
2134	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2135	struct sbp_tport *tport = tpg->tport;
2136
2137	if (tport->directory_id == -1)
2138		return sprintf(page, "implicit\n");
2139	else
2140		return sprintf(page, "%06x\n", tport->directory_id);
2141}
2142
2143static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
2144		const char *page, size_t count)
2145{
2146	struct se_portal_group *se_tpg = to_tpg(item);
2147	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2148	struct sbp_tport *tport = tpg->tport;
2149	unsigned long val;
2150
2151	if (tport->enable) {
2152		pr_err("Cannot change the directory_id on an active target.\n");
2153		return -EBUSY;
2154	}
2155
2156	if (strstr(page, "implicit") == page) {
2157		tport->directory_id = -1;
2158	} else {
2159		if (kstrtoul(page, 16, &val) < 0)
2160			return -EINVAL;
2161		if (val > 0xffffff)
2162			return -EINVAL;
2163
2164		tport->directory_id = val;
2165	}
2166
2167	return count;
2168}
2169
2170static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page)
2171{
2172	struct se_portal_group *se_tpg = to_tpg(item);
2173	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2174	struct sbp_tport *tport = tpg->tport;
2175	return sprintf(page, "%d\n", tport->enable);
2176}
2177
2178static ssize_t sbp_tpg_enable_store(struct config_item *item,
2179		const char *page, size_t count)
2180{
2181	struct se_portal_group *se_tpg = to_tpg(item);
2182	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2183	struct sbp_tport *tport = tpg->tport;
2184	unsigned long val;
2185	int ret;
2186
2187	if (kstrtoul(page, 0, &val) < 0)
2188		return -EINVAL;
2189	if ((val != 0) && (val != 1))
2190		return -EINVAL;
2191
2192	if (tport->enable == val)
2193		return count;
2194
2195	if (val) {
2196		if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2197			pr_err("Cannot enable a target with no LUNs!\n");
2198			return -EINVAL;
2199		}
2200	} else {
2201		/* XXX: force-shutdown sessions instead? */
2202		spin_lock_bh(&se_tpg->session_lock);
2203		if (!list_empty(&se_tpg->tpg_sess_list)) {
2204			spin_unlock_bh(&se_tpg->session_lock);
2205			return -EBUSY;
2206		}
2207		spin_unlock_bh(&se_tpg->session_lock);
2208	}
2209
2210	tport->enable = val;
2211
2212	ret = sbp_update_unit_directory(tport);
2213	if (ret < 0) {
2214		pr_err("Could not update Config ROM\n");
2215		return ret;
2216	}
2217
2218	return count;
2219}
2220
2221CONFIGFS_ATTR(sbp_tpg_, directory_id);
2222CONFIGFS_ATTR(sbp_tpg_, enable);
2223
2224static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2225	&sbp_tpg_attr_directory_id,
2226	&sbp_tpg_attr_enable,
2227	NULL,
2228};
2229
2230static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
2231		char *page)
2232{
2233	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2234	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2235	struct sbp_tport *tport = tpg->tport;
2236	return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2237}
2238
2239static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
2240		const char *page, size_t count)
2241{
2242	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2243	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2244	struct sbp_tport *tport = tpg->tport;
2245	unsigned long val;
2246	int ret;
2247
2248	if (kstrtoul(page, 0, &val) < 0)
2249		return -EINVAL;
2250	if ((val < 1) || (val > 127))
2251		return -EINVAL;
2252
2253	if (tport->mgt_orb_timeout == val)
2254		return count;
2255
2256	tport->mgt_orb_timeout = val;
2257
2258	ret = sbp_update_unit_directory(tport);
2259	if (ret < 0)
2260		return ret;
2261
2262	return count;
2263}
2264
2265static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
2266		char *page)
2267{
2268	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2269	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2270	struct sbp_tport *tport = tpg->tport;
2271	return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2272}
2273
2274static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
2275		const char *page, size_t count)
2276{
2277	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2278	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2279	struct sbp_tport *tport = tpg->tport;
2280	unsigned long val;
2281	int ret;
2282
2283	if (kstrtoul(page, 0, &val) < 0)
2284		return -EINVAL;
2285	if ((val < 1) || (val > 32767))
2286		return -EINVAL;
2287
2288	if (tport->max_reconnect_timeout == val)
2289		return count;
2290
2291	tport->max_reconnect_timeout = val;
2292
2293	ret = sbp_update_unit_directory(tport);
2294	if (ret < 0)
2295		return ret;
2296
2297	return count;
2298}
2299
2300static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
2301		char *page)
2302{
2303	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2304	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2305	struct sbp_tport *tport = tpg->tport;
2306	return sprintf(page, "%d\n", tport->max_logins_per_lun);
2307}
2308
2309static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
2310		const char *page, size_t count)
2311{
2312	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2313	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2314	struct sbp_tport *tport = tpg->tport;
2315	unsigned long val;
2316
2317	if (kstrtoul(page, 0, &val) < 0)
2318		return -EINVAL;
2319	if ((val < 1) || (val > 127))
2320		return -EINVAL;
2321
2322	/* XXX: also check against current count? */
2323
2324	tport->max_logins_per_lun = val;
2325
2326	return count;
2327}
2328
2329CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
2330CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
2331CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
2332
2333static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2334	&sbp_tpg_attrib_attr_mgt_orb_timeout,
2335	&sbp_tpg_attrib_attr_max_reconnect_timeout,
2336	&sbp_tpg_attrib_attr_max_logins_per_lun,
2337	NULL,
2338};
2339
2340static const struct target_core_fabric_ops sbp_ops = {
2341	.module				= THIS_MODULE,
2342	.name				= "sbp",
2343	.get_fabric_name		= sbp_get_fabric_name,
2344	.tpg_get_wwn			= sbp_get_fabric_wwn,
2345	.tpg_get_tag			= sbp_get_tag,
2346	.tpg_check_demo_mode		= sbp_check_true,
2347	.tpg_check_demo_mode_cache	= sbp_check_true,
2348	.tpg_check_demo_mode_write_protect = sbp_check_false,
2349	.tpg_check_prod_mode_write_protect = sbp_check_false,
2350	.tpg_get_inst_index		= sbp_tpg_get_inst_index,
2351	.release_cmd			= sbp_release_cmd,
2352	.shutdown_session		= sbp_shutdown_session,
2353	.close_session			= sbp_close_session,
2354	.sess_get_index			= sbp_sess_get_index,
2355	.write_pending			= sbp_write_pending,
2356	.write_pending_status		= sbp_write_pending_status,
2357	.set_default_node_attributes	= sbp_set_default_node_attrs,
2358	.get_cmd_state			= sbp_get_cmd_state,
2359	.queue_data_in			= sbp_queue_data_in,
2360	.queue_status			= sbp_queue_status,
2361	.queue_tm_rsp			= sbp_queue_tm_rsp,
2362	.aborted_task			= sbp_aborted_task,
2363	.check_stop_free		= sbp_check_stop_free,
2364
2365	.fabric_make_wwn		= sbp_make_tport,
2366	.fabric_drop_wwn		= sbp_drop_tport,
2367	.fabric_make_tpg		= sbp_make_tpg,
2368	.fabric_drop_tpg		= sbp_drop_tpg,
2369	.fabric_post_link		= sbp_post_link_lun,
2370	.fabric_pre_unlink		= sbp_pre_unlink_lun,
2371	.fabric_make_np			= NULL,
2372	.fabric_drop_np			= NULL,
2373	.fabric_init_nodeacl		= sbp_init_nodeacl,
2374
2375	.tfc_wwn_attrs			= sbp_wwn_attrs,
2376	.tfc_tpg_base_attrs		= sbp_tpg_base_attrs,
2377	.tfc_tpg_attrib_attrs		= sbp_tpg_attrib_attrs,
2378};
2379
2380static int __init sbp_init(void)
2381{
2382	return target_register_template(&sbp_ops);
2383};
2384
2385static void __exit sbp_exit(void)
2386{
2387	target_unregister_template(&sbp_ops);
2388};
2389
2390MODULE_DESCRIPTION("FireWire SBP fabric driver");
2391MODULE_LICENSE("GPL");
2392module_init(sbp_init);
2393module_exit(sbp_exit);