Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
   3 *
   4 * Copyright (C) 2011  Chris Boot <bootc@bootc.net>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software Foundation,
  18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19 */
  20
  21#define KMSG_COMPONENT "sbp_target"
  22#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  23
  24#include <linux/kernel.h>
  25#include <linux/module.h>
  26#include <linux/init.h>
  27#include <linux/types.h>
  28#include <linux/string.h>
  29#include <linux/configfs.h>
  30#include <linux/ctype.h>
  31#include <linux/delay.h>
  32#include <linux/firewire.h>
  33#include <linux/firewire-constants.h>
  34#include <scsi/scsi_proto.h>
  35#include <scsi/scsi_tcq.h>
  36#include <target/target_core_base.h>
  37#include <target/target_core_backend.h>
  38#include <target/target_core_fabric.h>
  39#include <asm/unaligned.h>
  40
  41#include "sbp_target.h"
  42
  43/* FireWire address region for management and command block address handlers */
  44static const struct fw_address_region sbp_register_region = {
  45	.start	= CSR_REGISTER_BASE + 0x10000,
  46	.end	= 0x1000000000000ULL,
  47};
  48
  49static const u32 sbp_unit_directory_template[] = {
  50	0x1200609e, /* unit_specifier_id: NCITS/T10 */
  51	0x13010483, /* unit_sw_version: 1155D Rev 4 */
  52	0x3800609e, /* command_set_specifier_id: NCITS/T10 */
  53	0x390104d8, /* command_set: SPC-2 */
  54	0x3b000000, /* command_set_revision: 0 */
  55	0x3c000001, /* firmware_revision: 1 */
  56};
  57
  58#define SESSION_MAINTENANCE_INTERVAL HZ
  59
  60static atomic_t login_id = ATOMIC_INIT(0);
  61
  62static void session_maintenance_work(struct work_struct *);
  63static int sbp_run_transaction(struct fw_card *, int, int, int, int,
  64		unsigned long long, void *, size_t);
  65
  66static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
  67{
  68	int ret;
  69	__be32 high, low;
  70
  71	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  72			req->node_addr, req->generation, req->speed,
  73			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
  74			&high, sizeof(high));
  75	if (ret != RCODE_COMPLETE)
  76		return ret;
  77
  78	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  79			req->node_addr, req->generation, req->speed,
  80			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
  81			&low, sizeof(low));
  82	if (ret != RCODE_COMPLETE)
  83		return ret;
  84
  85	*guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
  86
  87	return RCODE_COMPLETE;
  88}
  89
  90static struct sbp_session *sbp_session_find_by_guid(
  91	struct sbp_tpg *tpg, u64 guid)
  92{
  93	struct se_session *se_sess;
  94	struct sbp_session *sess, *found = NULL;
  95
  96	spin_lock_bh(&tpg->se_tpg.session_lock);
  97	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
  98		sess = se_sess->fabric_sess_ptr;
  99		if (sess->guid == guid)
 100			found = sess;
 101	}
 102	spin_unlock_bh(&tpg->se_tpg.session_lock);
 103
 104	return found;
 105}
 106
 107static struct sbp_login_descriptor *sbp_login_find_by_lun(
 108		struct sbp_session *session, u32 unpacked_lun)
 109{
 110	struct sbp_login_descriptor *login, *found = NULL;
 111
 112	spin_lock_bh(&session->lock);
 113	list_for_each_entry(login, &session->login_list, link) {
 114		if (login->login_lun == unpacked_lun)
 115			found = login;
 116	}
 117	spin_unlock_bh(&session->lock);
 118
 119	return found;
 120}
 121
 122static int sbp_login_count_all_by_lun(
 123		struct sbp_tpg *tpg,
 124		u32 unpacked_lun,
 125		int exclusive)
 126{
 127	struct se_session *se_sess;
 128	struct sbp_session *sess;
 129	struct sbp_login_descriptor *login;
 130	int count = 0;
 131
 132	spin_lock_bh(&tpg->se_tpg.session_lock);
 133	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 134		sess = se_sess->fabric_sess_ptr;
 135
 136		spin_lock_bh(&sess->lock);
 137		list_for_each_entry(login, &sess->login_list, link) {
 138			if (login->login_lun != unpacked_lun)
 139				continue;
 140
 141			if (!exclusive || login->exclusive)
 142				count++;
 143		}
 144		spin_unlock_bh(&sess->lock);
 145	}
 146	spin_unlock_bh(&tpg->se_tpg.session_lock);
 147
 148	return count;
 149}
 150
 151static struct sbp_login_descriptor *sbp_login_find_by_id(
 152	struct sbp_tpg *tpg, int login_id)
 153{
 154	struct se_session *se_sess;
 155	struct sbp_session *sess;
 156	struct sbp_login_descriptor *login, *found = NULL;
 157
 158	spin_lock_bh(&tpg->se_tpg.session_lock);
 159	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 160		sess = se_sess->fabric_sess_ptr;
 161
 162		spin_lock_bh(&sess->lock);
 163		list_for_each_entry(login, &sess->login_list, link) {
 164			if (login->login_id == login_id)
 165				found = login;
 166		}
 167		spin_unlock_bh(&sess->lock);
 168	}
 169	spin_unlock_bh(&tpg->se_tpg.session_lock);
 170
 171	return found;
 172}
 173
 174static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
 175{
 176	struct se_portal_group *se_tpg = &tpg->se_tpg;
 177	struct se_lun *se_lun;
 178
 179	rcu_read_lock();
 180	hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
 181		if (se_lun->unpacked_lun == login_lun) {
 182			rcu_read_unlock();
 183			*err = 0;
 184			return login_lun;
 185		}
 186	}
 187	rcu_read_unlock();
 188
 189	*err = -ENODEV;
 190	return login_lun;
 191}
 192
 193static struct sbp_session *sbp_session_create(
 194		struct sbp_tpg *tpg,
 195		u64 guid)
 196{
 197	struct sbp_session *sess;
 198	int ret;
 199	char guid_str[17];
 200
 201	snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
 202
 203	sess = kmalloc(sizeof(*sess), GFP_KERNEL);
 204	if (!sess) {
 205		pr_err("failed to allocate session descriptor\n");
 206		return ERR_PTR(-ENOMEM);
 207	}
 208	spin_lock_init(&sess->lock);
 209	INIT_LIST_HEAD(&sess->login_list);
 210	INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
 211	sess->guid = guid;
 212
 213	sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
 214					     sizeof(struct sbp_target_request),
 215					     TARGET_PROT_NORMAL, guid_str,
 216					     sess, NULL);
 217	if (IS_ERR(sess->se_sess)) {
 218		pr_err("failed to init se_session\n");
 219		ret = PTR_ERR(sess->se_sess);
 220		kfree(sess);
 221		return ERR_PTR(ret);
 222	}
 223
 224	return sess;
 225}
 226
 227static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
 228{
 229	spin_lock_bh(&sess->lock);
 230	if (!list_empty(&sess->login_list)) {
 231		spin_unlock_bh(&sess->lock);
 232		return;
 233	}
 234	spin_unlock_bh(&sess->lock);
 235
 236	if (cancel_work)
 237		cancel_delayed_work_sync(&sess->maint_work);
 238
 239	transport_deregister_session_configfs(sess->se_sess);
 240	transport_deregister_session(sess->se_sess);
 241
 242	if (sess->card)
 243		fw_card_put(sess->card);
 244
 245	kfree(sess);
 246}
 247
 248static void sbp_target_agent_unregister(struct sbp_target_agent *);
 249
 250static void sbp_login_release(struct sbp_login_descriptor *login,
 251	bool cancel_work)
 252{
 253	struct sbp_session *sess = login->sess;
 254
 255	/* FIXME: abort/wait on tasks */
 256
 257	sbp_target_agent_unregister(login->tgt_agt);
 258
 259	if (sess) {
 260		spin_lock_bh(&sess->lock);
 261		list_del(&login->link);
 262		spin_unlock_bh(&sess->lock);
 263
 264		sbp_session_release(sess, cancel_work);
 265	}
 266
 267	kfree(login);
 268}
 269
 270static struct sbp_target_agent *sbp_target_agent_register(
 271	struct sbp_login_descriptor *);
 272
 273static void sbp_management_request_login(
 274	struct sbp_management_agent *agent, struct sbp_management_request *req,
 275	int *status_data_size)
 276{
 277	struct sbp_tport *tport = agent->tport;
 278	struct sbp_tpg *tpg = tport->tpg;
 279	struct sbp_session *sess;
 280	struct sbp_login_descriptor *login;
 281	struct sbp_login_response_block *response;
 282	u64 guid;
 283	u32 unpacked_lun;
 284	int login_response_len, ret;
 285
 286	unpacked_lun = sbp_get_lun_from_tpg(tpg,
 287			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
 288	if (ret) {
 289		pr_notice("login to unknown LUN: %d\n",
 290			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
 291
 292		req->status.status = cpu_to_be32(
 293			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 294			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
 295		return;
 296	}
 297
 298	ret = read_peer_guid(&guid, req);
 299	if (ret != RCODE_COMPLETE) {
 300		pr_warn("failed to read peer GUID: %d\n", ret);
 301
 302		req->status.status = cpu_to_be32(
 303			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 304			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 305		return;
 306	}
 307
 308	pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
 309		unpacked_lun, guid);
 310
 311	sess = sbp_session_find_by_guid(tpg, guid);
 312	if (sess) {
 313		login = sbp_login_find_by_lun(sess, unpacked_lun);
 314		if (login) {
 315			pr_notice("initiator already logged-in\n");
 316
 317			/*
 318			 * SBP-2 R4 says we should return access denied, but
 319			 * that can confuse initiators. Instead we need to
 320			 * treat this like a reconnect, but send the login
 321			 * response block like a fresh login.
 322			 *
 323			 * This is required particularly in the case of Apple
 324			 * devices booting off the FireWire target, where
 325			 * the firmware has an active login to the target. When
 326			 * the OS takes control of the session it issues its own
 327			 * LOGIN rather than a RECONNECT. To avoid the machine
 328			 * waiting until the reconnect_hold expires, we can skip
 329			 * the ACCESS_DENIED errors to speed things up.
 330			 */
 331
 332			goto already_logged_in;
 333		}
 334	}
 335
 336	/*
 337	 * check exclusive bit in login request
 338	 * reject with access_denied if any logins present
 339	 */
 340	if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
 341			sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
 342		pr_warn("refusing exclusive login with other active logins\n");
 343
 344		req->status.status = cpu_to_be32(
 345			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 346			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 347		return;
 348	}
 349
 350	/*
 351	 * check exclusive bit in any existing login descriptor
 352	 * reject with access_denied if any exclusive logins present
 353	 */
 354	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
 355		pr_warn("refusing login while another exclusive login present\n");
 356
 357		req->status.status = cpu_to_be32(
 358			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 359			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 360		return;
 361	}
 362
 363	/*
 364	 * check we haven't exceeded the number of allowed logins
 365	 * reject with resources_unavailable if we have
 366	 */
 367	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
 368			tport->max_logins_per_lun) {
 369		pr_warn("max number of logins reached\n");
 370
 371		req->status.status = cpu_to_be32(
 372			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 373			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 374		return;
 375	}
 376
 377	if (!sess) {
 378		sess = sbp_session_create(tpg, guid);
 379		if (IS_ERR(sess)) {
 380			switch (PTR_ERR(sess)) {
 381			case -EPERM:
 382				ret = SBP_STATUS_ACCESS_DENIED;
 383				break;
 384			default:
 385				ret = SBP_STATUS_RESOURCES_UNAVAIL;
 386				break;
 387			}
 388
 389			req->status.status = cpu_to_be32(
 390				STATUS_BLOCK_RESP(
 391					STATUS_RESP_REQUEST_COMPLETE) |
 392				STATUS_BLOCK_SBP_STATUS(ret));
 393			return;
 394		}
 395
 396		sess->node_id = req->node_addr;
 397		sess->card = fw_card_get(req->card);
 398		sess->generation = req->generation;
 399		sess->speed = req->speed;
 400
 401		schedule_delayed_work(&sess->maint_work,
 402				SESSION_MAINTENANCE_INTERVAL);
 403	}
 404
 405	/* only take the latest reconnect_hold into account */
 406	sess->reconnect_hold = min(
 407		1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
 408		tport->max_reconnect_timeout) - 1;
 409
 410	login = kmalloc(sizeof(*login), GFP_KERNEL);
 411	if (!login) {
 412		pr_err("failed to allocate login descriptor\n");
 413
 414		sbp_session_release(sess, true);
 415
 416		req->status.status = cpu_to_be32(
 417			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 418			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 419		return;
 420	}
 421
 422	login->sess = sess;
 423	login->login_lun = unpacked_lun;
 424	login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
 425	login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
 426	login->login_id = atomic_inc_return(&login_id);
 427
 428	login->tgt_agt = sbp_target_agent_register(login);
 429	if (IS_ERR(login->tgt_agt)) {
 430		ret = PTR_ERR(login->tgt_agt);
 431		pr_err("failed to map command block handler: %d\n", ret);
 432
 433		sbp_session_release(sess, true);
 434		kfree(login);
 435
 436		req->status.status = cpu_to_be32(
 437			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 438			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 439		return;
 440	}
 441
 442	spin_lock_bh(&sess->lock);
 443	list_add_tail(&login->link, &sess->login_list);
 444	spin_unlock_bh(&sess->lock);
 445
 446already_logged_in:
 447	response = kzalloc(sizeof(*response), GFP_KERNEL);
 448	if (!response) {
 449		pr_err("failed to allocate login response block\n");
 450
 451		sbp_login_release(login, true);
 452
 453		req->status.status = cpu_to_be32(
 454			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 455			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 456		return;
 457	}
 458
 459	login_response_len = clamp_val(
 460			LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
 461			12, sizeof(*response));
 462	response->misc = cpu_to_be32(
 463		((login_response_len & 0xffff) << 16) |
 464		(login->login_id & 0xffff));
 465	response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
 466	addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
 467		&response->command_block_agent);
 468
 469	ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
 470		sess->node_id, sess->generation, sess->speed,
 471		sbp2_pointer_to_addr(&req->orb.ptr2), response,
 472		login_response_len);
 473	if (ret != RCODE_COMPLETE) {
 474		pr_debug("failed to write login response block: %x\n", ret);
 475
 476		kfree(response);
 477		sbp_login_release(login, true);
 478
 479		req->status.status = cpu_to_be32(
 480			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 481			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 482		return;
 483	}
 484
 485	kfree(response);
 486
 487	req->status.status = cpu_to_be32(
 488		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 489		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 490}
 491
 492static void sbp_management_request_query_logins(
 493	struct sbp_management_agent *agent, struct sbp_management_request *req,
 494	int *status_data_size)
 495{
 496	pr_notice("QUERY LOGINS not implemented\n");
 497	/* FIXME: implement */
 498
 499	req->status.status = cpu_to_be32(
 500		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 501		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
 502}
 503
 504static void sbp_management_request_reconnect(
 505	struct sbp_management_agent *agent, struct sbp_management_request *req,
 506	int *status_data_size)
 507{
 508	struct sbp_tport *tport = agent->tport;
 509	struct sbp_tpg *tpg = tport->tpg;
 510	int ret;
 511	u64 guid;
 512	struct sbp_login_descriptor *login;
 513
 514	ret = read_peer_guid(&guid, req);
 515	if (ret != RCODE_COMPLETE) {
 516		pr_warn("failed to read peer GUID: %d\n", ret);
 517
 518		req->status.status = cpu_to_be32(
 519			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 520			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 521		return;
 522	}
 523
 524	pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
 525
 526	login = sbp_login_find_by_id(tpg,
 527		RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
 528
 529	if (!login) {
 530		pr_err("mgt_agent RECONNECT unknown login ID\n");
 531
 532		req->status.status = cpu_to_be32(
 533			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 534			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 535		return;
 536	}
 537
 538	if (login->sess->guid != guid) {
 539		pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
 540
 541		req->status.status = cpu_to_be32(
 542			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 543			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 544		return;
 545	}
 546
 547	spin_lock_bh(&login->sess->lock);
 548	if (login->sess->card)
 549		fw_card_put(login->sess->card);
 550
 551	/* update the node details */
 552	login->sess->generation = req->generation;
 553	login->sess->node_id = req->node_addr;
 554	login->sess->card = fw_card_get(req->card);
 555	login->sess->speed = req->speed;
 556	spin_unlock_bh(&login->sess->lock);
 557
 558	req->status.status = cpu_to_be32(
 559		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 560		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 561}
 562
 563static void sbp_management_request_logout(
 564	struct sbp_management_agent *agent, struct sbp_management_request *req,
 565	int *status_data_size)
 566{
 567	struct sbp_tport *tport = agent->tport;
 568	struct sbp_tpg *tpg = tport->tpg;
 569	int id;
 570	struct sbp_login_descriptor *login;
 571
 572	id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
 573
 574	login = sbp_login_find_by_id(tpg, id);
 575	if (!login) {
 576		pr_warn("cannot find login: %d\n", id);
 577
 578		req->status.status = cpu_to_be32(
 579			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 580			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
 581		return;
 582	}
 583
 584	pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
 585		login->login_lun, login->login_id);
 586
 587	if (req->node_addr != login->sess->node_id) {
 588		pr_warn("logout from different node ID\n");
 589
 590		req->status.status = cpu_to_be32(
 591			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 592			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 593		return;
 594	}
 595
 596	sbp_login_release(login, true);
 597
 598	req->status.status = cpu_to_be32(
 599		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 600		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 601}
 602
 603static void session_check_for_reset(struct sbp_session *sess)
 604{
 605	bool card_valid = false;
 606
 607	spin_lock_bh(&sess->lock);
 608
 609	if (sess->card) {
 610		spin_lock_irq(&sess->card->lock);
 611		card_valid = (sess->card->local_node != NULL);
 612		spin_unlock_irq(&sess->card->lock);
 613
 614		if (!card_valid) {
 615			fw_card_put(sess->card);
 616			sess->card = NULL;
 617		}
 618	}
 619
 620	if (!card_valid || (sess->generation != sess->card->generation)) {
 621		pr_info("Waiting for reconnect from node: %016llx\n",
 622				sess->guid);
 623
 624		sess->node_id = -1;
 625		sess->reconnect_expires = get_jiffies_64() +
 626			((sess->reconnect_hold + 1) * HZ);
 627	}
 628
 629	spin_unlock_bh(&sess->lock);
 630}
 631
 632static void session_reconnect_expired(struct sbp_session *sess)
 633{
 634	struct sbp_login_descriptor *login, *temp;
 635	LIST_HEAD(login_list);
 636
 637	pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
 638
 639	spin_lock_bh(&sess->lock);
 640	list_for_each_entry_safe(login, temp, &sess->login_list, link) {
 641		login->sess = NULL;
 642		list_move_tail(&login->link, &login_list);
 643	}
 644	spin_unlock_bh(&sess->lock);
 645
 646	list_for_each_entry_safe(login, temp, &login_list, link) {
 647		list_del(&login->link);
 648		sbp_login_release(login, false);
 649	}
 650
 651	sbp_session_release(sess, false);
 652}
 653
 654static void session_maintenance_work(struct work_struct *work)
 655{
 656	struct sbp_session *sess = container_of(work, struct sbp_session,
 657			maint_work.work);
 658
 659	/* could be called while tearing down the session */
 660	spin_lock_bh(&sess->lock);
 661	if (list_empty(&sess->login_list)) {
 662		spin_unlock_bh(&sess->lock);
 663		return;
 664	}
 665	spin_unlock_bh(&sess->lock);
 666
 667	if (sess->node_id != -1) {
 668		/* check for bus reset and make node_id invalid */
 669		session_check_for_reset(sess);
 670
 671		schedule_delayed_work(&sess->maint_work,
 672				SESSION_MAINTENANCE_INTERVAL);
 673	} else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
 674		/* still waiting for reconnect */
 675		schedule_delayed_work(&sess->maint_work,
 676				SESSION_MAINTENANCE_INTERVAL);
 677	} else {
 678		/* reconnect timeout has expired */
 679		session_reconnect_expired(sess);
 680	}
 681}
 682
 683static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
 684		struct sbp_target_agent *agent)
 685{
 686	int state;
 687
 688	switch (tcode) {
 689	case TCODE_READ_QUADLET_REQUEST:
 690		pr_debug("tgt_agent AGENT_STATE READ\n");
 691
 692		spin_lock_bh(&agent->lock);
 693		state = agent->state;
 694		spin_unlock_bh(&agent->lock);
 695
 696		*(__be32 *)data = cpu_to_be32(state);
 697
 698		return RCODE_COMPLETE;
 699
 700	case TCODE_WRITE_QUADLET_REQUEST:
 701		/* ignored */
 702		return RCODE_COMPLETE;
 703
 704	default:
 705		return RCODE_TYPE_ERROR;
 706	}
 707}
 708
 709static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
 710		struct sbp_target_agent *agent)
 711{
 712	switch (tcode) {
 713	case TCODE_WRITE_QUADLET_REQUEST:
 714		pr_debug("tgt_agent AGENT_RESET\n");
 715		spin_lock_bh(&agent->lock);
 716		agent->state = AGENT_STATE_RESET;
 717		spin_unlock_bh(&agent->lock);
 718		return RCODE_COMPLETE;
 719
 720	default:
 721		return RCODE_TYPE_ERROR;
 722	}
 723}
 724
 725static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
 726		struct sbp_target_agent *agent)
 727{
 728	struct sbp2_pointer *ptr = data;
 729
 730	switch (tcode) {
 731	case TCODE_WRITE_BLOCK_REQUEST:
 732		spin_lock_bh(&agent->lock);
 733		if (agent->state != AGENT_STATE_SUSPENDED &&
 734				agent->state != AGENT_STATE_RESET) {
 735			spin_unlock_bh(&agent->lock);
 736			pr_notice("Ignoring ORB_POINTER write while active.\n");
 737			return RCODE_CONFLICT_ERROR;
 738		}
 739		agent->state = AGENT_STATE_ACTIVE;
 740		spin_unlock_bh(&agent->lock);
 741
 742		agent->orb_pointer = sbp2_pointer_to_addr(ptr);
 743		agent->doorbell = false;
 744
 745		pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
 746				agent->orb_pointer);
 747
 748		queue_work(system_unbound_wq, &agent->work);
 749
 750		return RCODE_COMPLETE;
 751
 752	case TCODE_READ_BLOCK_REQUEST:
 753		pr_debug("tgt_agent ORB_POINTER READ\n");
 754		spin_lock_bh(&agent->lock);
 755		addr_to_sbp2_pointer(agent->orb_pointer, ptr);
 756		spin_unlock_bh(&agent->lock);
 757		return RCODE_COMPLETE;
 758
 759	default:
 760		return RCODE_TYPE_ERROR;
 761	}
 762}
 763
 764static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
 765		struct sbp_target_agent *agent)
 766{
 767	switch (tcode) {
 768	case TCODE_WRITE_QUADLET_REQUEST:
 769		spin_lock_bh(&agent->lock);
 770		if (agent->state != AGENT_STATE_SUSPENDED) {
 771			spin_unlock_bh(&agent->lock);
 772			pr_debug("Ignoring DOORBELL while active.\n");
 773			return RCODE_CONFLICT_ERROR;
 774		}
 775		agent->state = AGENT_STATE_ACTIVE;
 776		spin_unlock_bh(&agent->lock);
 777
 778		agent->doorbell = true;
 779
 780		pr_debug("tgt_agent DOORBELL\n");
 781
 782		queue_work(system_unbound_wq, &agent->work);
 783
 784		return RCODE_COMPLETE;
 785
 786	case TCODE_READ_QUADLET_REQUEST:
 787		return RCODE_COMPLETE;
 788
 789	default:
 790		return RCODE_TYPE_ERROR;
 791	}
 792}
 793
 794static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
 795		int tcode, void *data, struct sbp_target_agent *agent)
 796{
 797	switch (tcode) {
 798	case TCODE_WRITE_QUADLET_REQUEST:
 799		pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
 800		/* ignored as we don't send unsolicited status */
 801		return RCODE_COMPLETE;
 802
 803	case TCODE_READ_QUADLET_REQUEST:
 804		return RCODE_COMPLETE;
 805
 806	default:
 807		return RCODE_TYPE_ERROR;
 808	}
 809}
 810
 811static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
 812		int tcode, int destination, int source, int generation,
 813		unsigned long long offset, void *data, size_t length,
 814		void *callback_data)
 815{
 816	struct sbp_target_agent *agent = callback_data;
 817	struct sbp_session *sess = agent->login->sess;
 818	int sess_gen, sess_node, rcode;
 819
 820	spin_lock_bh(&sess->lock);
 821	sess_gen = sess->generation;
 822	sess_node = sess->node_id;
 823	spin_unlock_bh(&sess->lock);
 824
 825	if (generation != sess_gen) {
 826		pr_notice("ignoring request with wrong generation\n");
 827		rcode = RCODE_TYPE_ERROR;
 828		goto out;
 829	}
 830
 831	if (source != sess_node) {
 832		pr_notice("ignoring request from foreign node (%x != %x)\n",
 833				source, sess_node);
 834		rcode = RCODE_TYPE_ERROR;
 835		goto out;
 836	}
 837
 838	/* turn offset into the offset from the start of the block */
 839	offset -= agent->handler.offset;
 840
 841	if (offset == 0x00 && length == 4) {
 842		/* AGENT_STATE */
 843		rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
 844	} else if (offset == 0x04 && length == 4) {
 845		/* AGENT_RESET */
 846		rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
 847	} else if (offset == 0x08 && length == 8) {
 848		/* ORB_POINTER */
 849		rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
 850	} else if (offset == 0x10 && length == 4) {
 851		/* DOORBELL */
 852		rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
 853	} else if (offset == 0x14 && length == 4) {
 854		/* UNSOLICITED_STATUS_ENABLE */
 855		rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
 856				data, agent);
 857	} else {
 858		rcode = RCODE_ADDRESS_ERROR;
 859	}
 860
 861out:
 862	fw_send_response(card, request, rcode);
 863}
 864
 865static void sbp_handle_command(struct sbp_target_request *);
 866static int sbp_send_status(struct sbp_target_request *);
 867static void sbp_free_request(struct sbp_target_request *);
 868
 869static void tgt_agent_process_work(struct work_struct *work)
 870{
 871	struct sbp_target_request *req =
 872		container_of(work, struct sbp_target_request, work);
 873
 874	pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
 875			req->orb_pointer,
 876			sbp2_pointer_to_addr(&req->orb.next_orb),
 877			sbp2_pointer_to_addr(&req->orb.data_descriptor),
 878			be32_to_cpu(req->orb.misc));
 879
 880	if (req->orb_pointer >> 32)
 881		pr_debug("ORB with high bits set\n");
 882
 883	switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
 884		case 0:/* Format specified by this standard */
 885			sbp_handle_command(req);
 886			return;
 887		case 1: /* Reserved for future standardization */
 888		case 2: /* Vendor-dependent */
 889			req->status.status |= cpu_to_be32(
 890					STATUS_BLOCK_RESP(
 891						STATUS_RESP_REQUEST_COMPLETE) |
 892					STATUS_BLOCK_DEAD(0) |
 893					STATUS_BLOCK_LEN(1) |
 894					STATUS_BLOCK_SBP_STATUS(
 895						SBP_STATUS_REQ_TYPE_NOTSUPP));
 896			sbp_send_status(req);
 897			return;
 898		case 3: /* Dummy ORB */
 899			req->status.status |= cpu_to_be32(
 900					STATUS_BLOCK_RESP(
 901						STATUS_RESP_REQUEST_COMPLETE) |
 902					STATUS_BLOCK_DEAD(0) |
 903					STATUS_BLOCK_LEN(1) |
 904					STATUS_BLOCK_SBP_STATUS(
 905						SBP_STATUS_DUMMY_ORB_COMPLETE));
 906			sbp_send_status(req);
 907			return;
 908		default:
 909			BUG();
 910	}
 911}
 912
 913/* used to double-check we haven't been issued an AGENT_RESET */
 914static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
 915{
 916	bool active;
 917
 918	spin_lock_bh(&agent->lock);
 919	active = (agent->state == AGENT_STATE_ACTIVE);
 920	spin_unlock_bh(&agent->lock);
 921
 922	return active;
 923}
 924
 925static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
 926	struct fw_card *card, u64 next_orb)
 927{
 928	struct se_session *se_sess = sess->se_sess;
 929	struct sbp_target_request *req;
 930	int tag;
 931
 932	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
 933	if (tag < 0)
 934		return ERR_PTR(-ENOMEM);
 935
 936	req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
 937	memset(req, 0, sizeof(*req));
 938	req->se_cmd.map_tag = tag;
 
 939	req->se_cmd.tag = next_orb;
 940
 941	return req;
 942}
 943
 944static void tgt_agent_fetch_work(struct work_struct *work)
 945{
 946	struct sbp_target_agent *agent =
 947		container_of(work, struct sbp_target_agent, work);
 948	struct sbp_session *sess = agent->login->sess;
 949	struct sbp_target_request *req;
 950	int ret;
 951	bool doorbell = agent->doorbell;
 952	u64 next_orb = agent->orb_pointer;
 953
 954	while (next_orb && tgt_agent_check_active(agent)) {
 955		req = sbp_mgt_get_req(sess, sess->card, next_orb);
 956		if (IS_ERR(req)) {
 957			spin_lock_bh(&agent->lock);
 958			agent->state = AGENT_STATE_DEAD;
 959			spin_unlock_bh(&agent->lock);
 960			return;
 961		}
 962
 963		req->login = agent->login;
 964		req->orb_pointer = next_orb;
 965
 966		req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
 967					req->orb_pointer >> 32));
 968		req->status.orb_low = cpu_to_be32(
 969				req->orb_pointer & 0xfffffffc);
 970
 971		/* read in the ORB */
 972		ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
 973				sess->node_id, sess->generation, sess->speed,
 974				req->orb_pointer, &req->orb, sizeof(req->orb));
 975		if (ret != RCODE_COMPLETE) {
 976			pr_debug("tgt_orb fetch failed: %x\n", ret);
 977			req->status.status |= cpu_to_be32(
 978					STATUS_BLOCK_SRC(
 979						STATUS_SRC_ORB_FINISHED) |
 980					STATUS_BLOCK_RESP(
 981						STATUS_RESP_TRANSPORT_FAILURE) |
 982					STATUS_BLOCK_DEAD(1) |
 983					STATUS_BLOCK_LEN(1) |
 984					STATUS_BLOCK_SBP_STATUS(
 985						SBP_STATUS_UNSPECIFIED_ERROR));
 986			spin_lock_bh(&agent->lock);
 987			agent->state = AGENT_STATE_DEAD;
 988			spin_unlock_bh(&agent->lock);
 989
 990			sbp_send_status(req);
 991			return;
 992		}
 993
 994		/* check the next_ORB field */
 995		if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
 996			next_orb = 0;
 997			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
 998						STATUS_SRC_ORB_FINISHED));
 999		} else {
1000			next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
1001			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1002						STATUS_SRC_ORB_CONTINUING));
1003		}
1004
1005		if (tgt_agent_check_active(agent) && !doorbell) {
1006			INIT_WORK(&req->work, tgt_agent_process_work);
1007			queue_work(system_unbound_wq, &req->work);
1008		} else {
1009			/* don't process this request, just check next_ORB */
1010			sbp_free_request(req);
1011		}
1012
1013		spin_lock_bh(&agent->lock);
1014		doorbell = agent->doorbell = false;
1015
1016		/* check if we should carry on processing */
1017		if (next_orb)
1018			agent->orb_pointer = next_orb;
1019		else
1020			agent->state = AGENT_STATE_SUSPENDED;
1021
1022		spin_unlock_bh(&agent->lock);
1023	};
1024}
1025
1026static struct sbp_target_agent *sbp_target_agent_register(
1027		struct sbp_login_descriptor *login)
1028{
1029	struct sbp_target_agent *agent;
1030	int ret;
1031
1032	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1033	if (!agent)
1034		return ERR_PTR(-ENOMEM);
1035
1036	spin_lock_init(&agent->lock);
1037
1038	agent->handler.length = 0x20;
1039	agent->handler.address_callback = tgt_agent_rw;
1040	agent->handler.callback_data = agent;
1041
1042	agent->login = login;
1043	agent->state = AGENT_STATE_RESET;
1044	INIT_WORK(&agent->work, tgt_agent_fetch_work);
1045	agent->orb_pointer = 0;
1046	agent->doorbell = false;
1047
1048	ret = fw_core_add_address_handler(&agent->handler,
1049			&sbp_register_region);
1050	if (ret < 0) {
1051		kfree(agent);
1052		return ERR_PTR(ret);
1053	}
1054
1055	return agent;
1056}
1057
1058static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1059{
1060	fw_core_remove_address_handler(&agent->handler);
1061	cancel_work_sync(&agent->work);
1062	kfree(agent);
1063}
1064
1065/*
1066 * Simple wrapper around fw_run_transaction that retries the transaction several
1067 * times in case of failure, with an exponential backoff.
1068 */
1069static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1070		int generation, int speed, unsigned long long offset,
1071		void *payload, size_t length)
1072{
1073	int attempt, ret, delay;
1074
1075	for (attempt = 1; attempt <= 5; attempt++) {
1076		ret = fw_run_transaction(card, tcode, destination_id,
1077				generation, speed, offset, payload, length);
1078
1079		switch (ret) {
1080		case RCODE_COMPLETE:
1081		case RCODE_TYPE_ERROR:
1082		case RCODE_ADDRESS_ERROR:
1083		case RCODE_GENERATION:
1084			return ret;
1085
1086		default:
1087			delay = 5 * attempt * attempt;
1088			usleep_range(delay, delay * 2);
1089		}
1090	}
1091
1092	return ret;
1093}
1094
1095/*
1096 * Wrapper around sbp_run_transaction that gets the card, destination,
1097 * generation and speed out of the request's session.
1098 */
1099static int sbp_run_request_transaction(struct sbp_target_request *req,
1100		int tcode, unsigned long long offset, void *payload,
1101		size_t length)
1102{
1103	struct sbp_login_descriptor *login = req->login;
1104	struct sbp_session *sess = login->sess;
1105	struct fw_card *card;
1106	int node_id, generation, speed, ret;
1107
1108	spin_lock_bh(&sess->lock);
1109	card = fw_card_get(sess->card);
1110	node_id = sess->node_id;
1111	generation = sess->generation;
1112	speed = sess->speed;
1113	spin_unlock_bh(&sess->lock);
1114
1115	ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1116			offset, payload, length);
1117
1118	fw_card_put(card);
1119
1120	return ret;
1121}
1122
1123static int sbp_fetch_command(struct sbp_target_request *req)
1124{
1125	int ret, cmd_len, copy_len;
1126
1127	cmd_len = scsi_command_size(req->orb.command_block);
1128
1129	req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1130	if (!req->cmd_buf)
1131		return -ENOMEM;
1132
1133	memcpy(req->cmd_buf, req->orb.command_block,
1134		min_t(int, cmd_len, sizeof(req->orb.command_block)));
1135
1136	if (cmd_len > sizeof(req->orb.command_block)) {
1137		pr_debug("sbp_fetch_command: filling in long command\n");
1138		copy_len = cmd_len - sizeof(req->orb.command_block);
1139
1140		ret = sbp_run_request_transaction(req,
1141				TCODE_READ_BLOCK_REQUEST,
1142				req->orb_pointer + sizeof(req->orb),
1143				req->cmd_buf + sizeof(req->orb.command_block),
1144				copy_len);
1145		if (ret != RCODE_COMPLETE)
1146			return -EIO;
1147	}
1148
1149	return 0;
1150}
1151
1152static int sbp_fetch_page_table(struct sbp_target_request *req)
1153{
1154	int pg_tbl_sz, ret;
1155	struct sbp_page_table_entry *pg_tbl;
1156
1157	if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1158		return 0;
1159
1160	pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1161		sizeof(struct sbp_page_table_entry);
1162
1163	pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1164	if (!pg_tbl)
1165		return -ENOMEM;
1166
1167	ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1168			sbp2_pointer_to_addr(&req->orb.data_descriptor),
1169			pg_tbl, pg_tbl_sz);
1170	if (ret != RCODE_COMPLETE) {
1171		kfree(pg_tbl);
1172		return -EIO;
1173	}
1174
1175	req->pg_tbl = pg_tbl;
1176	return 0;
1177}
1178
1179static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1180	u32 *data_len, enum dma_data_direction *data_dir)
1181{
1182	int data_size, direction, idx;
1183
1184	data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1185	direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1186
1187	if (!data_size) {
1188		*data_len = 0;
1189		*data_dir = DMA_NONE;
1190		return;
1191	}
1192
1193	*data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1194
1195	if (req->pg_tbl) {
1196		*data_len = 0;
1197		for (idx = 0; idx < data_size; idx++) {
1198			*data_len += be16_to_cpu(
1199					req->pg_tbl[idx].segment_length);
1200		}
1201	} else {
1202		*data_len = data_size;
1203	}
1204}
1205
1206static void sbp_handle_command(struct sbp_target_request *req)
1207{
1208	struct sbp_login_descriptor *login = req->login;
1209	struct sbp_session *sess = login->sess;
1210	int ret, unpacked_lun;
1211	u32 data_length;
1212	enum dma_data_direction data_dir;
1213
1214	ret = sbp_fetch_command(req);
1215	if (ret) {
1216		pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1217		goto err;
1218	}
1219
1220	ret = sbp_fetch_page_table(req);
1221	if (ret) {
1222		pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1223			ret);
1224		goto err;
1225	}
1226
1227	unpacked_lun = req->login->login_lun;
1228	sbp_calc_data_length_direction(req, &data_length, &data_dir);
1229
1230	pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1231			req->orb_pointer, unpacked_lun, data_length, data_dir);
1232
1233	/* only used for printk until we do TMRs */
1234	req->se_cmd.tag = req->orb_pointer;
1235	if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1236			      req->sense_buf, unpacked_lun, data_length,
1237			      TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
1238		goto err;
1239
1240	return;
1241
1242err:
1243	req->status.status |= cpu_to_be32(
1244		STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1245		STATUS_BLOCK_DEAD(0) |
1246		STATUS_BLOCK_LEN(1) |
1247		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1248	sbp_send_status(req);
1249}
1250
1251/*
1252 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1253 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1254 */
1255static int sbp_rw_data(struct sbp_target_request *req)
1256{
1257	struct sbp_session *sess = req->login->sess;
1258	int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1259		generation, num_pte, length, tfr_length,
1260		rcode = RCODE_COMPLETE;
1261	struct sbp_page_table_entry *pte;
1262	unsigned long long offset;
1263	struct fw_card *card;
1264	struct sg_mapping_iter iter;
1265
1266	if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1267		tcode = TCODE_WRITE_BLOCK_REQUEST;
1268		sg_miter_flags = SG_MITER_FROM_SG;
1269	} else {
1270		tcode = TCODE_READ_BLOCK_REQUEST;
1271		sg_miter_flags = SG_MITER_TO_SG;
1272	}
1273
1274	max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1275	speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1276
1277	pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1278	if (pg_size) {
1279		pr_err("sbp_run_transaction: page size ignored\n");
1280		pg_size = 0x100 << pg_size;
1281	}
1282
1283	spin_lock_bh(&sess->lock);
1284	card = fw_card_get(sess->card);
1285	node_id = sess->node_id;
1286	generation = sess->generation;
1287	spin_unlock_bh(&sess->lock);
1288
1289	if (req->pg_tbl) {
1290		pte = req->pg_tbl;
1291		num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1292
1293		offset = 0;
1294		length = 0;
1295	} else {
1296		pte = NULL;
1297		num_pte = 0;
1298
1299		offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1300		length = req->se_cmd.data_length;
1301	}
1302
1303	sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1304		sg_miter_flags);
1305
1306	while (length || num_pte) {
1307		if (!length) {
1308			offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1309				be32_to_cpu(pte->segment_base_lo);
1310			length = be16_to_cpu(pte->segment_length);
1311
1312			pte++;
1313			num_pte--;
1314		}
1315
1316		sg_miter_next(&iter);
1317
1318		tfr_length = min3(length, max_payload, (int)iter.length);
1319
1320		/* FIXME: take page_size into account */
1321
1322		rcode = sbp_run_transaction(card, tcode, node_id,
1323				generation, speed,
1324				offset, iter.addr, tfr_length);
1325
1326		if (rcode != RCODE_COMPLETE)
1327			break;
1328
1329		length -= tfr_length;
1330		offset += tfr_length;
1331		iter.consumed = tfr_length;
1332	}
1333
1334	sg_miter_stop(&iter);
1335	fw_card_put(card);
1336
1337	if (rcode == RCODE_COMPLETE) {
1338		WARN_ON(length != 0);
1339		return 0;
1340	} else {
1341		return -EIO;
1342	}
1343}
1344
1345static int sbp_send_status(struct sbp_target_request *req)
1346{
1347	int rc, ret = 0, length;
1348	struct sbp_login_descriptor *login = req->login;
1349
1350	length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1351
1352	rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1353			login->status_fifo_addr, &req->status, length);
1354	if (rc != RCODE_COMPLETE) {
1355		pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
1356		ret = -EIO;
1357		goto put_ref;
1358	}
1359
1360	pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1361			req->orb_pointer);
1362	/*
1363	 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
1364	 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
1365	 * final se_cmd->cmd_kref put.
1366	 */
1367put_ref:
1368	target_put_sess_cmd(&req->se_cmd);
1369	return ret;
1370}
1371
1372static void sbp_sense_mangle(struct sbp_target_request *req)
1373{
1374	struct se_cmd *se_cmd = &req->se_cmd;
1375	u8 *sense = req->sense_buf;
1376	u8 *status = req->status.data;
1377
1378	WARN_ON(se_cmd->scsi_sense_length < 18);
1379
1380	switch (sense[0] & 0x7f) { 		/* sfmt */
1381	case 0x70: /* current, fixed */
1382		status[0] = 0 << 6;
1383		break;
1384	case 0x71: /* deferred, fixed */
1385		status[0] = 1 << 6;
1386		break;
1387	case 0x72: /* current, descriptor */
1388	case 0x73: /* deferred, descriptor */
1389	default:
1390		/*
1391		 * TODO: SBP-3 specifies what we should do with descriptor
1392		 * format sense data
1393		 */
1394		pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1395			sense[0]);
1396		req->status.status |= cpu_to_be32(
1397			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1398			STATUS_BLOCK_DEAD(0) |
1399			STATUS_BLOCK_LEN(1) |
1400			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1401		return;
1402	}
1403
1404	status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1405	status[1] =
1406		(sense[0] & 0x80) |		/* valid */
1407		((sense[2] & 0xe0) >> 1) |	/* mark, eom, ili */
1408		(sense[2] & 0x0f);		/* sense_key */
1409	status[2] = se_cmd->scsi_asc;		/* sense_code */
1410	status[3] = se_cmd->scsi_ascq;		/* sense_qualifier */
1411
1412	/* information */
1413	status[4] = sense[3];
1414	status[5] = sense[4];
1415	status[6] = sense[5];
1416	status[7] = sense[6];
1417
1418	/* CDB-dependent */
1419	status[8] = sense[8];
1420	status[9] = sense[9];
1421	status[10] = sense[10];
1422	status[11] = sense[11];
1423
1424	/* fru */
1425	status[12] = sense[14];
1426
1427	/* sense_key-dependent */
1428	status[13] = sense[15];
1429	status[14] = sense[16];
1430	status[15] = sense[17];
1431
1432	req->status.status |= cpu_to_be32(
1433		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1434		STATUS_BLOCK_DEAD(0) |
1435		STATUS_BLOCK_LEN(5) |
1436		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1437}
1438
1439static int sbp_send_sense(struct sbp_target_request *req)
1440{
1441	struct se_cmd *se_cmd = &req->se_cmd;
1442
1443	if (se_cmd->scsi_sense_length) {
1444		sbp_sense_mangle(req);
1445	} else {
1446		req->status.status |= cpu_to_be32(
1447			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1448			STATUS_BLOCK_DEAD(0) |
1449			STATUS_BLOCK_LEN(1) |
1450			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1451	}
1452
1453	return sbp_send_status(req);
1454}
1455
1456static void sbp_free_request(struct sbp_target_request *req)
1457{
1458	struct se_cmd *se_cmd = &req->se_cmd;
1459	struct se_session *se_sess = se_cmd->se_sess;
1460
1461	kfree(req->pg_tbl);
1462	kfree(req->cmd_buf);
1463
1464	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
1465}
1466
1467static void sbp_mgt_agent_process(struct work_struct *work)
1468{
1469	struct sbp_management_agent *agent =
1470		container_of(work, struct sbp_management_agent, work);
1471	struct sbp_management_request *req = agent->request;
1472	int ret;
1473	int status_data_len = 0;
1474
1475	/* fetch the ORB from the initiator */
1476	ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1477		req->node_addr, req->generation, req->speed,
1478		agent->orb_offset, &req->orb, sizeof(req->orb));
1479	if (ret != RCODE_COMPLETE) {
1480		pr_debug("mgt_orb fetch failed: %x\n", ret);
1481		goto out;
1482	}
1483
1484	pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1485		sbp2_pointer_to_addr(&req->orb.ptr1),
1486		sbp2_pointer_to_addr(&req->orb.ptr2),
1487		be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1488		sbp2_pointer_to_addr(&req->orb.status_fifo));
1489
1490	if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1491		ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1492		pr_err("mgt_orb bad request\n");
1493		goto out;
1494	}
1495
1496	switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1497	case MANAGEMENT_ORB_FUNCTION_LOGIN:
1498		sbp_management_request_login(agent, req, &status_data_len);
1499		break;
1500
1501	case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1502		sbp_management_request_query_logins(agent, req,
1503				&status_data_len);
1504		break;
1505
1506	case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1507		sbp_management_request_reconnect(agent, req, &status_data_len);
1508		break;
1509
1510	case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1511		pr_notice("SET PASSWORD not implemented\n");
1512
1513		req->status.status = cpu_to_be32(
1514			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1515			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1516
1517		break;
1518
1519	case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1520		sbp_management_request_logout(agent, req, &status_data_len);
1521		break;
1522
1523	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1524		pr_notice("ABORT TASK not implemented\n");
1525
1526		req->status.status = cpu_to_be32(
1527			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1528			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1529
1530		break;
1531
1532	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1533		pr_notice("ABORT TASK SET not implemented\n");
1534
1535		req->status.status = cpu_to_be32(
1536			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1537			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1538
1539		break;
1540
1541	case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1542		pr_notice("LOGICAL UNIT RESET not implemented\n");
1543
1544		req->status.status = cpu_to_be32(
1545			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1546			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1547
1548		break;
1549
1550	case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1551		pr_notice("TARGET RESET not implemented\n");
1552
1553		req->status.status = cpu_to_be32(
1554			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1555			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1556
1557		break;
1558
1559	default:
1560		pr_notice("unknown management function 0x%x\n",
1561			MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1562
1563		req->status.status = cpu_to_be32(
1564			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1565			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1566
1567		break;
1568	}
1569
1570	req->status.status |= cpu_to_be32(
1571		STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1572		STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1573		STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1574	req->status.orb_low = cpu_to_be32(agent->orb_offset);
1575
1576	/* write the status block back to the initiator */
1577	ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1578		req->node_addr, req->generation, req->speed,
1579		sbp2_pointer_to_addr(&req->orb.status_fifo),
1580		&req->status, 8 + status_data_len);
1581	if (ret != RCODE_COMPLETE) {
1582		pr_debug("mgt_orb status write failed: %x\n", ret);
1583		goto out;
1584	}
1585
1586out:
1587	fw_card_put(req->card);
1588	kfree(req);
1589
1590	spin_lock_bh(&agent->lock);
1591	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1592	spin_unlock_bh(&agent->lock);
1593}
1594
1595static void sbp_mgt_agent_rw(struct fw_card *card,
1596	struct fw_request *request, int tcode, int destination, int source,
1597	int generation, unsigned long long offset, void *data, size_t length,
1598	void *callback_data)
1599{
1600	struct sbp_management_agent *agent = callback_data;
1601	struct sbp2_pointer *ptr = data;
1602	int rcode = RCODE_ADDRESS_ERROR;
1603
1604	if (!agent->tport->enable)
1605		goto out;
1606
1607	if ((offset != agent->handler.offset) || (length != 8))
1608		goto out;
1609
1610	if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1611		struct sbp_management_request *req;
1612		int prev_state;
1613
1614		spin_lock_bh(&agent->lock);
1615		prev_state = agent->state;
1616		agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1617		spin_unlock_bh(&agent->lock);
1618
1619		if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1620			pr_notice("ignoring management request while busy\n");
1621			rcode = RCODE_CONFLICT_ERROR;
1622			goto out;
1623		}
1624		req = kzalloc(sizeof(*req), GFP_ATOMIC);
1625		if (!req) {
1626			rcode = RCODE_CONFLICT_ERROR;
1627			goto out;
1628		}
1629
1630		req->card = fw_card_get(card);
1631		req->generation = generation;
1632		req->node_addr = source;
1633		req->speed = fw_get_request_speed(request);
1634
1635		agent->orb_offset = sbp2_pointer_to_addr(ptr);
1636		agent->request = req;
1637
1638		queue_work(system_unbound_wq, &agent->work);
1639		rcode = RCODE_COMPLETE;
1640	} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1641		addr_to_sbp2_pointer(agent->orb_offset, ptr);
1642		rcode = RCODE_COMPLETE;
1643	} else {
1644		rcode = RCODE_TYPE_ERROR;
1645	}
1646
1647out:
1648	fw_send_response(card, request, rcode);
1649}
1650
1651static struct sbp_management_agent *sbp_management_agent_register(
1652		struct sbp_tport *tport)
1653{
1654	int ret;
1655	struct sbp_management_agent *agent;
1656
1657	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1658	if (!agent)
1659		return ERR_PTR(-ENOMEM);
1660
1661	spin_lock_init(&agent->lock);
1662	agent->tport = tport;
1663	agent->handler.length = 0x08;
1664	agent->handler.address_callback = sbp_mgt_agent_rw;
1665	agent->handler.callback_data = agent;
1666	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1667	INIT_WORK(&agent->work, sbp_mgt_agent_process);
1668	agent->orb_offset = 0;
1669	agent->request = NULL;
1670
1671	ret = fw_core_add_address_handler(&agent->handler,
1672			&sbp_register_region);
1673	if (ret < 0) {
1674		kfree(agent);
1675		return ERR_PTR(ret);
1676	}
1677
1678	return agent;
1679}
1680
1681static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1682{
1683	fw_core_remove_address_handler(&agent->handler);
1684	cancel_work_sync(&agent->work);
1685	kfree(agent);
1686}
1687
1688static int sbp_check_true(struct se_portal_group *se_tpg)
1689{
1690	return 1;
1691}
1692
1693static int sbp_check_false(struct se_portal_group *se_tpg)
1694{
1695	return 0;
1696}
1697
1698static char *sbp_get_fabric_name(void)
1699{
1700	return "sbp";
1701}
1702
1703static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1704{
1705	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1706	struct sbp_tport *tport = tpg->tport;
1707
1708	return &tport->tport_name[0];
1709}
1710
1711static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1712{
1713	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1714	return tpg->tport_tpgt;
1715}
1716
1717static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1718{
1719	return 1;
1720}
1721
1722static void sbp_release_cmd(struct se_cmd *se_cmd)
1723{
1724	struct sbp_target_request *req = container_of(se_cmd,
1725			struct sbp_target_request, se_cmd);
1726
1727	sbp_free_request(req);
1728}
1729
1730static u32 sbp_sess_get_index(struct se_session *se_sess)
1731{
1732	return 0;
1733}
1734
1735static int sbp_write_pending(struct se_cmd *se_cmd)
1736{
1737	struct sbp_target_request *req = container_of(se_cmd,
1738			struct sbp_target_request, se_cmd);
1739	int ret;
1740
1741	ret = sbp_rw_data(req);
1742	if (ret) {
1743		req->status.status |= cpu_to_be32(
1744			STATUS_BLOCK_RESP(
1745				STATUS_RESP_TRANSPORT_FAILURE) |
1746			STATUS_BLOCK_DEAD(0) |
1747			STATUS_BLOCK_LEN(1) |
1748			STATUS_BLOCK_SBP_STATUS(
1749				SBP_STATUS_UNSPECIFIED_ERROR));
1750		sbp_send_status(req);
1751		return ret;
1752	}
1753
1754	target_execute_cmd(se_cmd);
1755	return 0;
1756}
1757
1758static int sbp_write_pending_status(struct se_cmd *se_cmd)
1759{
1760	return 0;
1761}
1762
1763static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1764{
1765	return;
1766}
1767
1768static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1769{
1770	return 0;
1771}
1772
1773static int sbp_queue_data_in(struct se_cmd *se_cmd)
1774{
1775	struct sbp_target_request *req = container_of(se_cmd,
1776			struct sbp_target_request, se_cmd);
1777	int ret;
1778
1779	ret = sbp_rw_data(req);
1780	if (ret) {
1781		req->status.status |= cpu_to_be32(
1782			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1783			STATUS_BLOCK_DEAD(0) |
1784			STATUS_BLOCK_LEN(1) |
1785			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1786		sbp_send_status(req);
1787		return ret;
1788	}
1789
1790	return sbp_send_sense(req);
1791}
1792
1793/*
1794 * Called after command (no data transfer) or after the write (to device)
1795 * operation is completed
1796 */
1797static int sbp_queue_status(struct se_cmd *se_cmd)
1798{
1799	struct sbp_target_request *req = container_of(se_cmd,
1800			struct sbp_target_request, se_cmd);
1801
1802	return sbp_send_sense(req);
1803}
1804
1805static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1806{
1807}
1808
1809static void sbp_aborted_task(struct se_cmd *se_cmd)
1810{
1811	return;
1812}
1813
1814static int sbp_check_stop_free(struct se_cmd *se_cmd)
1815{
1816	struct sbp_target_request *req = container_of(se_cmd,
1817			struct sbp_target_request, se_cmd);
1818
1819	return transport_generic_free_cmd(&req->se_cmd, 0);
1820}
1821
1822static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1823{
1824	struct se_lun *lun;
1825	int count = 0;
1826
1827	rcu_read_lock();
1828	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
1829		count++;
1830	rcu_read_unlock();
1831
1832	return count;
1833}
1834
1835static int sbp_update_unit_directory(struct sbp_tport *tport)
1836{
1837	struct se_lun *lun;
1838	int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
1839	u32 *data;
1840
1841	if (tport->unit_directory.data) {
1842		fw_core_remove_descriptor(&tport->unit_directory);
1843		kfree(tport->unit_directory.data);
1844		tport->unit_directory.data = NULL;
1845	}
1846
1847	if (!tport->enable || !tport->tpg)
1848		return 0;
1849
1850	num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1851
1852	/*
1853	 * Number of entries in the final unit directory:
1854	 *  - all of those in the template
1855	 *  - management_agent
1856	 *  - unit_characteristics
1857	 *  - reconnect_timeout
1858	 *  - unit unique ID
1859	 *  - one for each LUN
1860	 *
1861	 *  MUST NOT include leaf or sub-directory entries
1862	 */
1863	num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1864
1865	if (tport->directory_id != -1)
1866		num_entries++;
1867
1868	/* allocate num_entries + 4 for the header and unique ID leaf */
1869	data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1870	if (!data)
1871		return -ENOMEM;
1872
1873	/* directory_length */
1874	data[idx++] = num_entries << 16;
1875
1876	/* directory_id */
1877	if (tport->directory_id != -1)
1878		data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1879
1880	/* unit directory template */
1881	memcpy(&data[idx], sbp_unit_directory_template,
1882			sizeof(sbp_unit_directory_template));
1883	idx += ARRAY_SIZE(sbp_unit_directory_template);
1884
1885	/* management_agent */
1886	mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
1887	data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
1888
1889	/* unit_characteristics */
1890	data[idx++] = 0x3a000000 |
1891		(((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
1892		SBP_ORB_FETCH_SIZE;
1893
1894	/* reconnect_timeout */
1895	data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
1896
1897	/* unit unique ID (leaf is just after LUNs) */
1898	data[idx++] = 0x8d000000 | (num_luns + 1);
1899
1900	rcu_read_lock();
1901	hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
1902		struct se_device *dev;
1903		int type;
1904		/*
1905		 * rcu_dereference_raw protected by se_lun->lun_group symlink
1906		 * reference to se_device->dev_group.
1907		 */
1908		dev = rcu_dereference_raw(lun->lun_se_dev);
1909		type = dev->transport->get_device_type(dev);
1910
1911		/* logical_unit_number */
1912		data[idx++] = 0x14000000 |
1913			((type << 16) & 0x1f0000) |
1914			(lun->unpacked_lun & 0xffff);
1915	}
1916	rcu_read_unlock();
1917
1918	/* unit unique ID leaf */
1919	data[idx++] = 2 << 16;
1920	data[idx++] = tport->guid >> 32;
1921	data[idx++] = tport->guid;
1922
1923	tport->unit_directory.length = idx;
1924	tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
1925	tport->unit_directory.data = data;
1926
1927	ret = fw_core_add_descriptor(&tport->unit_directory);
1928	if (ret < 0) {
1929		kfree(tport->unit_directory.data);
1930		tport->unit_directory.data = NULL;
1931	}
1932
1933	return ret;
1934}
1935
1936static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
1937{
1938	const char *cp;
1939	char c, nibble;
1940	int pos = 0, err;
1941
1942	*wwn = 0;
1943	for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
1944		c = *cp;
1945		if (c == '\n' && cp[1] == '\0')
1946			continue;
1947		if (c == '\0') {
1948			err = 2;
1949			if (pos != 16)
1950				goto fail;
1951			return cp - name;
1952		}
1953		err = 3;
1954		if (isdigit(c))
1955			nibble = c - '0';
1956		else if (isxdigit(c))
1957			nibble = tolower(c) - 'a' + 10;
1958		else
1959			goto fail;
1960		*wwn = (*wwn << 4) | nibble;
1961		pos++;
1962	}
1963	err = 4;
1964fail:
1965	printk(KERN_INFO "err %u len %zu pos %u\n",
1966			err, cp - name, pos);
1967	return -1;
1968}
1969
1970static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
1971{
1972	return snprintf(buf, len, "%016llx", wwn);
1973}
1974
1975static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1976{
1977	u64 guid = 0;
1978
1979	if (sbp_parse_wwn(name, &guid) < 0)
1980		return -EINVAL;
1981	return 0;
1982}
1983
1984static int sbp_post_link_lun(
1985		struct se_portal_group *se_tpg,
1986		struct se_lun *se_lun)
1987{
1988	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1989
1990	return sbp_update_unit_directory(tpg->tport);
1991}
1992
1993static void sbp_pre_unlink_lun(
1994		struct se_portal_group *se_tpg,
1995		struct se_lun *se_lun)
1996{
1997	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1998	struct sbp_tport *tport = tpg->tport;
1999	int ret;
2000
2001	if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
2002		tport->enable = 0;
2003
2004	ret = sbp_update_unit_directory(tport);
2005	if (ret < 0)
2006		pr_err("unlink LUN: failed to update unit directory\n");
2007}
2008
2009static struct se_portal_group *sbp_make_tpg(
2010		struct se_wwn *wwn,
2011		struct config_group *group,
2012		const char *name)
2013{
2014	struct sbp_tport *tport =
2015		container_of(wwn, struct sbp_tport, tport_wwn);
2016
2017	struct sbp_tpg *tpg;
2018	unsigned long tpgt;
2019	int ret;
2020
2021	if (strstr(name, "tpgt_") != name)
2022		return ERR_PTR(-EINVAL);
2023	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2024		return ERR_PTR(-EINVAL);
2025
2026	if (tport->tpg) {
2027		pr_err("Only one TPG per Unit is possible.\n");
2028		return ERR_PTR(-EBUSY);
2029	}
2030
2031	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2032	if (!tpg) {
2033		pr_err("Unable to allocate struct sbp_tpg\n");
2034		return ERR_PTR(-ENOMEM);
2035	}
2036
2037	tpg->tport = tport;
2038	tpg->tport_tpgt = tpgt;
2039	tport->tpg = tpg;
2040
2041	/* default attribute values */
2042	tport->enable = 0;
2043	tport->directory_id = -1;
2044	tport->mgt_orb_timeout = 15;
2045	tport->max_reconnect_timeout = 5;
2046	tport->max_logins_per_lun = 1;
2047
2048	tport->mgt_agt = sbp_management_agent_register(tport);
2049	if (IS_ERR(tport->mgt_agt)) {
2050		ret = PTR_ERR(tport->mgt_agt);
2051		goto out_free_tpg;
2052	}
2053
2054	ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
2055	if (ret < 0)
2056		goto out_unreg_mgt_agt;
2057
2058	return &tpg->se_tpg;
2059
2060out_unreg_mgt_agt:
2061	sbp_management_agent_unregister(tport->mgt_agt);
2062out_free_tpg:
2063	tport->tpg = NULL;
2064	kfree(tpg);
2065	return ERR_PTR(ret);
2066}
2067
2068static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2069{
2070	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2071	struct sbp_tport *tport = tpg->tport;
2072
2073	core_tpg_deregister(se_tpg);
2074	sbp_management_agent_unregister(tport->mgt_agt);
2075	tport->tpg = NULL;
2076	kfree(tpg);
2077}
2078
2079static struct se_wwn *sbp_make_tport(
2080		struct target_fabric_configfs *tf,
2081		struct config_group *group,
2082		const char *name)
2083{
2084	struct sbp_tport *tport;
2085	u64 guid = 0;
2086
2087	if (sbp_parse_wwn(name, &guid) < 0)
2088		return ERR_PTR(-EINVAL);
2089
2090	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2091	if (!tport) {
2092		pr_err("Unable to allocate struct sbp_tport\n");
2093		return ERR_PTR(-ENOMEM);
2094	}
2095
2096	tport->guid = guid;
2097	sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2098
2099	return &tport->tport_wwn;
2100}
2101
2102static void sbp_drop_tport(struct se_wwn *wwn)
2103{
2104	struct sbp_tport *tport =
2105		container_of(wwn, struct sbp_tport, tport_wwn);
2106
2107	kfree(tport);
2108}
2109
2110static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
2111{
2112	return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2113}
2114
2115CONFIGFS_ATTR_RO(sbp_wwn_, version);
2116
2117static struct configfs_attribute *sbp_wwn_attrs[] = {
2118	&sbp_wwn_attr_version,
2119	NULL,
2120};
2121
2122static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
2123{
2124	struct se_portal_group *se_tpg = to_tpg(item);
2125	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2126	struct sbp_tport *tport = tpg->tport;
2127
2128	if (tport->directory_id == -1)
2129		return sprintf(page, "implicit\n");
2130	else
2131		return sprintf(page, "%06x\n", tport->directory_id);
2132}
2133
2134static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
2135		const char *page, size_t count)
2136{
2137	struct se_portal_group *se_tpg = to_tpg(item);
2138	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2139	struct sbp_tport *tport = tpg->tport;
2140	unsigned long val;
2141
2142	if (tport->enable) {
2143		pr_err("Cannot change the directory_id on an active target.\n");
2144		return -EBUSY;
2145	}
2146
2147	if (strstr(page, "implicit") == page) {
2148		tport->directory_id = -1;
2149	} else {
2150		if (kstrtoul(page, 16, &val) < 0)
2151			return -EINVAL;
2152		if (val > 0xffffff)
2153			return -EINVAL;
2154
2155		tport->directory_id = val;
2156	}
2157
2158	return count;
2159}
2160
2161static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page)
2162{
2163	struct se_portal_group *se_tpg = to_tpg(item);
2164	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2165	struct sbp_tport *tport = tpg->tport;
2166	return sprintf(page, "%d\n", tport->enable);
2167}
2168
2169static ssize_t sbp_tpg_enable_store(struct config_item *item,
2170		const char *page, size_t count)
2171{
2172	struct se_portal_group *se_tpg = to_tpg(item);
2173	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2174	struct sbp_tport *tport = tpg->tport;
2175	unsigned long val;
2176	int ret;
2177
2178	if (kstrtoul(page, 0, &val) < 0)
2179		return -EINVAL;
2180	if ((val != 0) && (val != 1))
2181		return -EINVAL;
2182
2183	if (tport->enable == val)
2184		return count;
2185
2186	if (val) {
2187		if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2188			pr_err("Cannot enable a target with no LUNs!\n");
2189			return -EINVAL;
2190		}
2191	} else {
2192		/* XXX: force-shutdown sessions instead? */
2193		spin_lock_bh(&se_tpg->session_lock);
2194		if (!list_empty(&se_tpg->tpg_sess_list)) {
2195			spin_unlock_bh(&se_tpg->session_lock);
2196			return -EBUSY;
2197		}
2198		spin_unlock_bh(&se_tpg->session_lock);
2199	}
2200
2201	tport->enable = val;
2202
2203	ret = sbp_update_unit_directory(tport);
2204	if (ret < 0) {
2205		pr_err("Could not update Config ROM\n");
2206		return ret;
2207	}
2208
2209	return count;
2210}
2211
2212CONFIGFS_ATTR(sbp_tpg_, directory_id);
2213CONFIGFS_ATTR(sbp_tpg_, enable);
2214
2215static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2216	&sbp_tpg_attr_directory_id,
2217	&sbp_tpg_attr_enable,
2218	NULL,
2219};
2220
2221static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
2222		char *page)
2223{
2224	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2225	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2226	struct sbp_tport *tport = tpg->tport;
2227	return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2228}
2229
2230static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
2231		const char *page, size_t count)
2232{
2233	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2234	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2235	struct sbp_tport *tport = tpg->tport;
2236	unsigned long val;
2237	int ret;
2238
2239	if (kstrtoul(page, 0, &val) < 0)
2240		return -EINVAL;
2241	if ((val < 1) || (val > 127))
2242		return -EINVAL;
2243
2244	if (tport->mgt_orb_timeout == val)
2245		return count;
2246
2247	tport->mgt_orb_timeout = val;
2248
2249	ret = sbp_update_unit_directory(tport);
2250	if (ret < 0)
2251		return ret;
2252
2253	return count;
2254}
2255
2256static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
2257		char *page)
2258{
2259	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2260	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2261	struct sbp_tport *tport = tpg->tport;
2262	return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2263}
2264
2265static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
2266		const char *page, size_t count)
2267{
2268	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2269	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2270	struct sbp_tport *tport = tpg->tport;
2271	unsigned long val;
2272	int ret;
2273
2274	if (kstrtoul(page, 0, &val) < 0)
2275		return -EINVAL;
2276	if ((val < 1) || (val > 32767))
2277		return -EINVAL;
2278
2279	if (tport->max_reconnect_timeout == val)
2280		return count;
2281
2282	tport->max_reconnect_timeout = val;
2283
2284	ret = sbp_update_unit_directory(tport);
2285	if (ret < 0)
2286		return ret;
2287
2288	return count;
2289}
2290
2291static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
2292		char *page)
2293{
2294	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2295	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2296	struct sbp_tport *tport = tpg->tport;
2297	return sprintf(page, "%d\n", tport->max_logins_per_lun);
2298}
2299
2300static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
2301		const char *page, size_t count)
2302{
2303	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2304	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2305	struct sbp_tport *tport = tpg->tport;
2306	unsigned long val;
2307
2308	if (kstrtoul(page, 0, &val) < 0)
2309		return -EINVAL;
2310	if ((val < 1) || (val > 127))
2311		return -EINVAL;
2312
2313	/* XXX: also check against current count? */
2314
2315	tport->max_logins_per_lun = val;
2316
2317	return count;
2318}
2319
2320CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
2321CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
2322CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
2323
2324static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2325	&sbp_tpg_attrib_attr_mgt_orb_timeout,
2326	&sbp_tpg_attrib_attr_max_reconnect_timeout,
2327	&sbp_tpg_attrib_attr_max_logins_per_lun,
2328	NULL,
2329};
2330
2331static const struct target_core_fabric_ops sbp_ops = {
2332	.module				= THIS_MODULE,
2333	.name				= "sbp",
2334	.get_fabric_name		= sbp_get_fabric_name,
2335	.tpg_get_wwn			= sbp_get_fabric_wwn,
2336	.tpg_get_tag			= sbp_get_tag,
2337	.tpg_check_demo_mode		= sbp_check_true,
2338	.tpg_check_demo_mode_cache	= sbp_check_true,
2339	.tpg_check_demo_mode_write_protect = sbp_check_false,
2340	.tpg_check_prod_mode_write_protect = sbp_check_false,
2341	.tpg_get_inst_index		= sbp_tpg_get_inst_index,
2342	.release_cmd			= sbp_release_cmd,
2343	.sess_get_index			= sbp_sess_get_index,
2344	.write_pending			= sbp_write_pending,
2345	.write_pending_status		= sbp_write_pending_status,
2346	.set_default_node_attributes	= sbp_set_default_node_attrs,
2347	.get_cmd_state			= sbp_get_cmd_state,
2348	.queue_data_in			= sbp_queue_data_in,
2349	.queue_status			= sbp_queue_status,
2350	.queue_tm_rsp			= sbp_queue_tm_rsp,
2351	.aborted_task			= sbp_aborted_task,
2352	.check_stop_free		= sbp_check_stop_free,
2353
2354	.fabric_make_wwn		= sbp_make_tport,
2355	.fabric_drop_wwn		= sbp_drop_tport,
2356	.fabric_make_tpg		= sbp_make_tpg,
 
2357	.fabric_drop_tpg		= sbp_drop_tpg,
2358	.fabric_post_link		= sbp_post_link_lun,
2359	.fabric_pre_unlink		= sbp_pre_unlink_lun,
2360	.fabric_make_np			= NULL,
2361	.fabric_drop_np			= NULL,
2362	.fabric_init_nodeacl		= sbp_init_nodeacl,
2363
2364	.tfc_wwn_attrs			= sbp_wwn_attrs,
2365	.tfc_tpg_base_attrs		= sbp_tpg_base_attrs,
2366	.tfc_tpg_attrib_attrs		= sbp_tpg_attrib_attrs,
 
 
 
2367};
2368
2369static int __init sbp_init(void)
2370{
2371	return target_register_template(&sbp_ops);
2372};
2373
2374static void __exit sbp_exit(void)
2375{
2376	target_unregister_template(&sbp_ops);
2377};
2378
2379MODULE_DESCRIPTION("FireWire SBP fabric driver");
2380MODULE_LICENSE("GPL");
2381module_init(sbp_init);
2382module_exit(sbp_exit);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SBP2 target driver (SCSI over IEEE1394 in target mode)
   4 *
   5 * Copyright (C) 2011  Chris Boot <bootc@bootc.net>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   6 */
   7
   8#define KMSG_COMPONENT "sbp_target"
   9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  10
  11#include <linux/kernel.h>
  12#include <linux/module.h>
  13#include <linux/init.h>
  14#include <linux/types.h>
  15#include <linux/string.h>
  16#include <linux/configfs.h>
  17#include <linux/ctype.h>
  18#include <linux/delay.h>
  19#include <linux/firewire.h>
  20#include <linux/firewire-constants.h>
  21#include <scsi/scsi_proto.h>
  22#include <scsi/scsi_tcq.h>
  23#include <target/target_core_base.h>
  24#include <target/target_core_backend.h>
  25#include <target/target_core_fabric.h>
  26#include <linux/unaligned.h>
  27
  28#include "sbp_target.h"
  29
  30/* FireWire address region for management and command block address handlers */
  31static const struct fw_address_region sbp_register_region = {
  32	.start	= CSR_REGISTER_BASE + 0x10000,
  33	.end	= 0x1000000000000ULL,
  34};
  35
  36static const u32 sbp_unit_directory_template[] = {
  37	0x1200609e, /* unit_specifier_id: NCITS/T10 */
  38	0x13010483, /* unit_sw_version: 1155D Rev 4 */
  39	0x3800609e, /* command_set_specifier_id: NCITS/T10 */
  40	0x390104d8, /* command_set: SPC-2 */
  41	0x3b000000, /* command_set_revision: 0 */
  42	0x3c000001, /* firmware_revision: 1 */
  43};
  44
  45#define SESSION_MAINTENANCE_INTERVAL HZ
  46
  47static atomic_t login_id = ATOMIC_INIT(0);
  48
  49static void session_maintenance_work(struct work_struct *);
  50static int sbp_run_transaction(struct fw_card *, int, int, int, int,
  51		unsigned long long, void *, size_t);
  52
  53static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
  54{
  55	int ret;
  56	__be32 high, low;
  57
  58	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  59			req->node_addr, req->generation, req->speed,
  60			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
  61			&high, sizeof(high));
  62	if (ret != RCODE_COMPLETE)
  63		return ret;
  64
  65	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  66			req->node_addr, req->generation, req->speed,
  67			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
  68			&low, sizeof(low));
  69	if (ret != RCODE_COMPLETE)
  70		return ret;
  71
  72	*guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
  73
  74	return RCODE_COMPLETE;
  75}
  76
  77static struct sbp_session *sbp_session_find_by_guid(
  78	struct sbp_tpg *tpg, u64 guid)
  79{
  80	struct se_session *se_sess;
  81	struct sbp_session *sess, *found = NULL;
  82
  83	spin_lock_bh(&tpg->se_tpg.session_lock);
  84	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
  85		sess = se_sess->fabric_sess_ptr;
  86		if (sess->guid == guid)
  87			found = sess;
  88	}
  89	spin_unlock_bh(&tpg->se_tpg.session_lock);
  90
  91	return found;
  92}
  93
  94static struct sbp_login_descriptor *sbp_login_find_by_lun(
  95		struct sbp_session *session, u32 unpacked_lun)
  96{
  97	struct sbp_login_descriptor *login, *found = NULL;
  98
  99	spin_lock_bh(&session->lock);
 100	list_for_each_entry(login, &session->login_list, link) {
 101		if (login->login_lun == unpacked_lun)
 102			found = login;
 103	}
 104	spin_unlock_bh(&session->lock);
 105
 106	return found;
 107}
 108
 109static int sbp_login_count_all_by_lun(
 110		struct sbp_tpg *tpg,
 111		u32 unpacked_lun,
 112		int exclusive)
 113{
 114	struct se_session *se_sess;
 115	struct sbp_session *sess;
 116	struct sbp_login_descriptor *login;
 117	int count = 0;
 118
 119	spin_lock_bh(&tpg->se_tpg.session_lock);
 120	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 121		sess = se_sess->fabric_sess_ptr;
 122
 123		spin_lock_bh(&sess->lock);
 124		list_for_each_entry(login, &sess->login_list, link) {
 125			if (login->login_lun != unpacked_lun)
 126				continue;
 127
 128			if (!exclusive || login->exclusive)
 129				count++;
 130		}
 131		spin_unlock_bh(&sess->lock);
 132	}
 133	spin_unlock_bh(&tpg->se_tpg.session_lock);
 134
 135	return count;
 136}
 137
 138static struct sbp_login_descriptor *sbp_login_find_by_id(
 139	struct sbp_tpg *tpg, int login_id)
 140{
 141	struct se_session *se_sess;
 142	struct sbp_session *sess;
 143	struct sbp_login_descriptor *login, *found = NULL;
 144
 145	spin_lock_bh(&tpg->se_tpg.session_lock);
 146	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 147		sess = se_sess->fabric_sess_ptr;
 148
 149		spin_lock_bh(&sess->lock);
 150		list_for_each_entry(login, &sess->login_list, link) {
 151			if (login->login_id == login_id)
 152				found = login;
 153		}
 154		spin_unlock_bh(&sess->lock);
 155	}
 156	spin_unlock_bh(&tpg->se_tpg.session_lock);
 157
 158	return found;
 159}
 160
 161static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
 162{
 163	struct se_portal_group *se_tpg = &tpg->se_tpg;
 164	struct se_lun *se_lun;
 165
 166	rcu_read_lock();
 167	hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
 168		if (se_lun->unpacked_lun == login_lun) {
 169			rcu_read_unlock();
 170			*err = 0;
 171			return login_lun;
 172		}
 173	}
 174	rcu_read_unlock();
 175
 176	*err = -ENODEV;
 177	return login_lun;
 178}
 179
 180static struct sbp_session *sbp_session_create(
 181		struct sbp_tpg *tpg,
 182		u64 guid)
 183{
 184	struct sbp_session *sess;
 185	int ret;
 186	char guid_str[17];
 187
 188	snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
 189
 190	sess = kmalloc(sizeof(*sess), GFP_KERNEL);
 191	if (!sess)
 
 192		return ERR_PTR(-ENOMEM);
 193
 194	spin_lock_init(&sess->lock);
 195	INIT_LIST_HEAD(&sess->login_list);
 196	INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
 197	sess->guid = guid;
 198
 199	sess->se_sess = target_setup_session(&tpg->se_tpg, 128,
 200					     sizeof(struct sbp_target_request),
 201					     TARGET_PROT_NORMAL, guid_str,
 202					     sess, NULL);
 203	if (IS_ERR(sess->se_sess)) {
 204		pr_err("failed to init se_session\n");
 205		ret = PTR_ERR(sess->se_sess);
 206		kfree(sess);
 207		return ERR_PTR(ret);
 208	}
 209
 210	return sess;
 211}
 212
 213static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
 214{
 215	spin_lock_bh(&sess->lock);
 216	if (!list_empty(&sess->login_list)) {
 217		spin_unlock_bh(&sess->lock);
 218		return;
 219	}
 220	spin_unlock_bh(&sess->lock);
 221
 222	if (cancel_work)
 223		cancel_delayed_work_sync(&sess->maint_work);
 224
 225	target_remove_session(sess->se_sess);
 
 226
 227	if (sess->card)
 228		fw_card_put(sess->card);
 229
 230	kfree(sess);
 231}
 232
 233static void sbp_target_agent_unregister(struct sbp_target_agent *);
 234
 235static void sbp_login_release(struct sbp_login_descriptor *login,
 236	bool cancel_work)
 237{
 238	struct sbp_session *sess = login->sess;
 239
 240	/* FIXME: abort/wait on tasks */
 241
 242	sbp_target_agent_unregister(login->tgt_agt);
 243
 244	if (sess) {
 245		spin_lock_bh(&sess->lock);
 246		list_del(&login->link);
 247		spin_unlock_bh(&sess->lock);
 248
 249		sbp_session_release(sess, cancel_work);
 250	}
 251
 252	kfree(login);
 253}
 254
 255static struct sbp_target_agent *sbp_target_agent_register(
 256	struct sbp_login_descriptor *);
 257
 258static void sbp_management_request_login(
 259	struct sbp_management_agent *agent, struct sbp_management_request *req,
 260	int *status_data_size)
 261{
 262	struct sbp_tport *tport = agent->tport;
 263	struct sbp_tpg *tpg = tport->tpg;
 264	struct sbp_session *sess;
 265	struct sbp_login_descriptor *login;
 266	struct sbp_login_response_block *response;
 267	u64 guid;
 268	u32 unpacked_lun;
 269	int login_response_len, ret;
 270
 271	unpacked_lun = sbp_get_lun_from_tpg(tpg,
 272			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
 273	if (ret) {
 274		pr_notice("login to unknown LUN: %d\n",
 275			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
 276
 277		req->status.status = cpu_to_be32(
 278			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 279			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
 280		return;
 281	}
 282
 283	ret = read_peer_guid(&guid, req);
 284	if (ret != RCODE_COMPLETE) {
 285		pr_warn("failed to read peer GUID: %d\n", ret);
 286
 287		req->status.status = cpu_to_be32(
 288			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 289			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 290		return;
 291	}
 292
 293	pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
 294		unpacked_lun, guid);
 295
 296	sess = sbp_session_find_by_guid(tpg, guid);
 297	if (sess) {
 298		login = sbp_login_find_by_lun(sess, unpacked_lun);
 299		if (login) {
 300			pr_notice("initiator already logged-in\n");
 301
 302			/*
 303			 * SBP-2 R4 says we should return access denied, but
 304			 * that can confuse initiators. Instead we need to
 305			 * treat this like a reconnect, but send the login
 306			 * response block like a fresh login.
 307			 *
 308			 * This is required particularly in the case of Apple
 309			 * devices booting off the FireWire target, where
 310			 * the firmware has an active login to the target. When
 311			 * the OS takes control of the session it issues its own
 312			 * LOGIN rather than a RECONNECT. To avoid the machine
 313			 * waiting until the reconnect_hold expires, we can skip
 314			 * the ACCESS_DENIED errors to speed things up.
 315			 */
 316
 317			goto already_logged_in;
 318		}
 319	}
 320
 321	/*
 322	 * check exclusive bit in login request
 323	 * reject with access_denied if any logins present
 324	 */
 325	if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
 326			sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
 327		pr_warn("refusing exclusive login with other active logins\n");
 328
 329		req->status.status = cpu_to_be32(
 330			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 331			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 332		return;
 333	}
 334
 335	/*
 336	 * check exclusive bit in any existing login descriptor
 337	 * reject with access_denied if any exclusive logins present
 338	 */
 339	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
 340		pr_warn("refusing login while another exclusive login present\n");
 341
 342		req->status.status = cpu_to_be32(
 343			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 344			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 345		return;
 346	}
 347
 348	/*
 349	 * check we haven't exceeded the number of allowed logins
 350	 * reject with resources_unavailable if we have
 351	 */
 352	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
 353			tport->max_logins_per_lun) {
 354		pr_warn("max number of logins reached\n");
 355
 356		req->status.status = cpu_to_be32(
 357			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 358			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 359		return;
 360	}
 361
 362	if (!sess) {
 363		sess = sbp_session_create(tpg, guid);
 364		if (IS_ERR(sess)) {
 365			switch (PTR_ERR(sess)) {
 366			case -EPERM:
 367				ret = SBP_STATUS_ACCESS_DENIED;
 368				break;
 369			default:
 370				ret = SBP_STATUS_RESOURCES_UNAVAIL;
 371				break;
 372			}
 373
 374			req->status.status = cpu_to_be32(
 375				STATUS_BLOCK_RESP(
 376					STATUS_RESP_REQUEST_COMPLETE) |
 377				STATUS_BLOCK_SBP_STATUS(ret));
 378			return;
 379		}
 380
 381		sess->node_id = req->node_addr;
 382		sess->card = fw_card_get(req->card);
 383		sess->generation = req->generation;
 384		sess->speed = req->speed;
 385
 386		schedule_delayed_work(&sess->maint_work,
 387				SESSION_MAINTENANCE_INTERVAL);
 388	}
 389
 390	/* only take the latest reconnect_hold into account */
 391	sess->reconnect_hold = min(
 392		1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
 393		tport->max_reconnect_timeout) - 1;
 394
 395	login = kmalloc(sizeof(*login), GFP_KERNEL);
 396	if (!login) {
 397		pr_err("failed to allocate login descriptor\n");
 398
 399		sbp_session_release(sess, true);
 400
 401		req->status.status = cpu_to_be32(
 402			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 403			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 404		return;
 405	}
 406
 407	login->sess = sess;
 408	login->login_lun = unpacked_lun;
 409	login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
 410	login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
 411	login->login_id = atomic_inc_return(&login_id);
 412
 413	login->tgt_agt = sbp_target_agent_register(login);
 414	if (IS_ERR(login->tgt_agt)) {
 415		ret = PTR_ERR(login->tgt_agt);
 416		pr_err("failed to map command block handler: %d\n", ret);
 417
 418		sbp_session_release(sess, true);
 419		kfree(login);
 420
 421		req->status.status = cpu_to_be32(
 422			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 423			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 424		return;
 425	}
 426
 427	spin_lock_bh(&sess->lock);
 428	list_add_tail(&login->link, &sess->login_list);
 429	spin_unlock_bh(&sess->lock);
 430
 431already_logged_in:
 432	response = kzalloc(sizeof(*response), GFP_KERNEL);
 433	if (!response) {
 434		pr_err("failed to allocate login response block\n");
 435
 436		sbp_login_release(login, true);
 437
 438		req->status.status = cpu_to_be32(
 439			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 440			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 441		return;
 442	}
 443
 444	login_response_len = clamp_val(
 445			LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
 446			12, sizeof(*response));
 447	response->misc = cpu_to_be32(
 448		((login_response_len & 0xffff) << 16) |
 449		(login->login_id & 0xffff));
 450	response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
 451	addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
 452		&response->command_block_agent);
 453
 454	ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
 455		sess->node_id, sess->generation, sess->speed,
 456		sbp2_pointer_to_addr(&req->orb.ptr2), response,
 457		login_response_len);
 458	if (ret != RCODE_COMPLETE) {
 459		pr_debug("failed to write login response block: %x\n", ret);
 460
 461		kfree(response);
 462		sbp_login_release(login, true);
 463
 464		req->status.status = cpu_to_be32(
 465			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 466			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 467		return;
 468	}
 469
 470	kfree(response);
 471
 472	req->status.status = cpu_to_be32(
 473		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 474		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 475}
 476
 477static void sbp_management_request_query_logins(
 478	struct sbp_management_agent *agent, struct sbp_management_request *req,
 479	int *status_data_size)
 480{
 481	pr_notice("QUERY LOGINS not implemented\n");
 482	/* FIXME: implement */
 483
 484	req->status.status = cpu_to_be32(
 485		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 486		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
 487}
 488
 489static void sbp_management_request_reconnect(
 490	struct sbp_management_agent *agent, struct sbp_management_request *req,
 491	int *status_data_size)
 492{
 493	struct sbp_tport *tport = agent->tport;
 494	struct sbp_tpg *tpg = tport->tpg;
 495	int ret;
 496	u64 guid;
 497	struct sbp_login_descriptor *login;
 498
 499	ret = read_peer_guid(&guid, req);
 500	if (ret != RCODE_COMPLETE) {
 501		pr_warn("failed to read peer GUID: %d\n", ret);
 502
 503		req->status.status = cpu_to_be32(
 504			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 505			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 506		return;
 507	}
 508
 509	pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
 510
 511	login = sbp_login_find_by_id(tpg,
 512		RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
 513
 514	if (!login) {
 515		pr_err("mgt_agent RECONNECT unknown login ID\n");
 516
 517		req->status.status = cpu_to_be32(
 518			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 519			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 520		return;
 521	}
 522
 523	if (login->sess->guid != guid) {
 524		pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
 525
 526		req->status.status = cpu_to_be32(
 527			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 528			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 529		return;
 530	}
 531
 532	spin_lock_bh(&login->sess->lock);
 533	if (login->sess->card)
 534		fw_card_put(login->sess->card);
 535
 536	/* update the node details */
 537	login->sess->generation = req->generation;
 538	login->sess->node_id = req->node_addr;
 539	login->sess->card = fw_card_get(req->card);
 540	login->sess->speed = req->speed;
 541	spin_unlock_bh(&login->sess->lock);
 542
 543	req->status.status = cpu_to_be32(
 544		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 545		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 546}
 547
 548static void sbp_management_request_logout(
 549	struct sbp_management_agent *agent, struct sbp_management_request *req,
 550	int *status_data_size)
 551{
 552	struct sbp_tport *tport = agent->tport;
 553	struct sbp_tpg *tpg = tport->tpg;
 554	int id;
 555	struct sbp_login_descriptor *login;
 556
 557	id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
 558
 559	login = sbp_login_find_by_id(tpg, id);
 560	if (!login) {
 561		pr_warn("cannot find login: %d\n", id);
 562
 563		req->status.status = cpu_to_be32(
 564			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 565			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
 566		return;
 567	}
 568
 569	pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
 570		login->login_lun, login->login_id);
 571
 572	if (req->node_addr != login->sess->node_id) {
 573		pr_warn("logout from different node ID\n");
 574
 575		req->status.status = cpu_to_be32(
 576			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 577			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 578		return;
 579	}
 580
 581	sbp_login_release(login, true);
 582
 583	req->status.status = cpu_to_be32(
 584		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 585		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 586}
 587
 588static void session_check_for_reset(struct sbp_session *sess)
 589{
 590	bool card_valid = false;
 591
 592	spin_lock_bh(&sess->lock);
 593
 594	if (sess->card) {
 595		spin_lock_irq(&sess->card->lock);
 596		card_valid = (sess->card->local_node != NULL);
 597		spin_unlock_irq(&sess->card->lock);
 598
 599		if (!card_valid) {
 600			fw_card_put(sess->card);
 601			sess->card = NULL;
 602		}
 603	}
 604
 605	if (!card_valid || (sess->generation != sess->card->generation)) {
 606		pr_info("Waiting for reconnect from node: %016llx\n",
 607				sess->guid);
 608
 609		sess->node_id = -1;
 610		sess->reconnect_expires = get_jiffies_64() +
 611			((sess->reconnect_hold + 1) * HZ);
 612	}
 613
 614	spin_unlock_bh(&sess->lock);
 615}
 616
 617static void session_reconnect_expired(struct sbp_session *sess)
 618{
 619	struct sbp_login_descriptor *login, *temp;
 620	LIST_HEAD(login_list);
 621
 622	pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
 623
 624	spin_lock_bh(&sess->lock);
 625	list_for_each_entry_safe(login, temp, &sess->login_list, link) {
 626		login->sess = NULL;
 627		list_move_tail(&login->link, &login_list);
 628	}
 629	spin_unlock_bh(&sess->lock);
 630
 631	list_for_each_entry_safe(login, temp, &login_list, link) {
 632		list_del(&login->link);
 633		sbp_login_release(login, false);
 634	}
 635
 636	sbp_session_release(sess, false);
 637}
 638
 639static void session_maintenance_work(struct work_struct *work)
 640{
 641	struct sbp_session *sess = container_of(work, struct sbp_session,
 642			maint_work.work);
 643
 644	/* could be called while tearing down the session */
 645	spin_lock_bh(&sess->lock);
 646	if (list_empty(&sess->login_list)) {
 647		spin_unlock_bh(&sess->lock);
 648		return;
 649	}
 650	spin_unlock_bh(&sess->lock);
 651
 652	if (sess->node_id != -1) {
 653		/* check for bus reset and make node_id invalid */
 654		session_check_for_reset(sess);
 655
 656		schedule_delayed_work(&sess->maint_work,
 657				SESSION_MAINTENANCE_INTERVAL);
 658	} else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
 659		/* still waiting for reconnect */
 660		schedule_delayed_work(&sess->maint_work,
 661				SESSION_MAINTENANCE_INTERVAL);
 662	} else {
 663		/* reconnect timeout has expired */
 664		session_reconnect_expired(sess);
 665	}
 666}
 667
 668static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
 669		struct sbp_target_agent *agent)
 670{
 671	int state;
 672
 673	switch (tcode) {
 674	case TCODE_READ_QUADLET_REQUEST:
 675		pr_debug("tgt_agent AGENT_STATE READ\n");
 676
 677		spin_lock_bh(&agent->lock);
 678		state = agent->state;
 679		spin_unlock_bh(&agent->lock);
 680
 681		*(__be32 *)data = cpu_to_be32(state);
 682
 683		return RCODE_COMPLETE;
 684
 685	case TCODE_WRITE_QUADLET_REQUEST:
 686		/* ignored */
 687		return RCODE_COMPLETE;
 688
 689	default:
 690		return RCODE_TYPE_ERROR;
 691	}
 692}
 693
 694static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
 695		struct sbp_target_agent *agent)
 696{
 697	switch (tcode) {
 698	case TCODE_WRITE_QUADLET_REQUEST:
 699		pr_debug("tgt_agent AGENT_RESET\n");
 700		spin_lock_bh(&agent->lock);
 701		agent->state = AGENT_STATE_RESET;
 702		spin_unlock_bh(&agent->lock);
 703		return RCODE_COMPLETE;
 704
 705	default:
 706		return RCODE_TYPE_ERROR;
 707	}
 708}
 709
 710static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
 711		struct sbp_target_agent *agent)
 712{
 713	struct sbp2_pointer *ptr = data;
 714
 715	switch (tcode) {
 716	case TCODE_WRITE_BLOCK_REQUEST:
 717		spin_lock_bh(&agent->lock);
 718		if (agent->state != AGENT_STATE_SUSPENDED &&
 719				agent->state != AGENT_STATE_RESET) {
 720			spin_unlock_bh(&agent->lock);
 721			pr_notice("Ignoring ORB_POINTER write while active.\n");
 722			return RCODE_CONFLICT_ERROR;
 723		}
 724		agent->state = AGENT_STATE_ACTIVE;
 725		spin_unlock_bh(&agent->lock);
 726
 727		agent->orb_pointer = sbp2_pointer_to_addr(ptr);
 728		agent->doorbell = false;
 729
 730		pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
 731				agent->orb_pointer);
 732
 733		queue_work(system_unbound_wq, &agent->work);
 734
 735		return RCODE_COMPLETE;
 736
 737	case TCODE_READ_BLOCK_REQUEST:
 738		pr_debug("tgt_agent ORB_POINTER READ\n");
 739		spin_lock_bh(&agent->lock);
 740		addr_to_sbp2_pointer(agent->orb_pointer, ptr);
 741		spin_unlock_bh(&agent->lock);
 742		return RCODE_COMPLETE;
 743
 744	default:
 745		return RCODE_TYPE_ERROR;
 746	}
 747}
 748
 749static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
 750		struct sbp_target_agent *agent)
 751{
 752	switch (tcode) {
 753	case TCODE_WRITE_QUADLET_REQUEST:
 754		spin_lock_bh(&agent->lock);
 755		if (agent->state != AGENT_STATE_SUSPENDED) {
 756			spin_unlock_bh(&agent->lock);
 757			pr_debug("Ignoring DOORBELL while active.\n");
 758			return RCODE_CONFLICT_ERROR;
 759		}
 760		agent->state = AGENT_STATE_ACTIVE;
 761		spin_unlock_bh(&agent->lock);
 762
 763		agent->doorbell = true;
 764
 765		pr_debug("tgt_agent DOORBELL\n");
 766
 767		queue_work(system_unbound_wq, &agent->work);
 768
 769		return RCODE_COMPLETE;
 770
 771	case TCODE_READ_QUADLET_REQUEST:
 772		return RCODE_COMPLETE;
 773
 774	default:
 775		return RCODE_TYPE_ERROR;
 776	}
 777}
 778
 779static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
 780		int tcode, void *data, struct sbp_target_agent *agent)
 781{
 782	switch (tcode) {
 783	case TCODE_WRITE_QUADLET_REQUEST:
 784		pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
 785		/* ignored as we don't send unsolicited status */
 786		return RCODE_COMPLETE;
 787
 788	case TCODE_READ_QUADLET_REQUEST:
 789		return RCODE_COMPLETE;
 790
 791	default:
 792		return RCODE_TYPE_ERROR;
 793	}
 794}
 795
 796static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
 797		int tcode, int destination, int source, int generation,
 798		unsigned long long offset, void *data, size_t length,
 799		void *callback_data)
 800{
 801	struct sbp_target_agent *agent = callback_data;
 802	struct sbp_session *sess = agent->login->sess;
 803	int sess_gen, sess_node, rcode;
 804
 805	spin_lock_bh(&sess->lock);
 806	sess_gen = sess->generation;
 807	sess_node = sess->node_id;
 808	spin_unlock_bh(&sess->lock);
 809
 810	if (generation != sess_gen) {
 811		pr_notice("ignoring request with wrong generation\n");
 812		rcode = RCODE_TYPE_ERROR;
 813		goto out;
 814	}
 815
 816	if (source != sess_node) {
 817		pr_notice("ignoring request from foreign node (%x != %x)\n",
 818				source, sess_node);
 819		rcode = RCODE_TYPE_ERROR;
 820		goto out;
 821	}
 822
 823	/* turn offset into the offset from the start of the block */
 824	offset -= agent->handler.offset;
 825
 826	if (offset == 0x00 && length == 4) {
 827		/* AGENT_STATE */
 828		rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
 829	} else if (offset == 0x04 && length == 4) {
 830		/* AGENT_RESET */
 831		rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
 832	} else if (offset == 0x08 && length == 8) {
 833		/* ORB_POINTER */
 834		rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
 835	} else if (offset == 0x10 && length == 4) {
 836		/* DOORBELL */
 837		rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
 838	} else if (offset == 0x14 && length == 4) {
 839		/* UNSOLICITED_STATUS_ENABLE */
 840		rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
 841				data, agent);
 842	} else {
 843		rcode = RCODE_ADDRESS_ERROR;
 844	}
 845
 846out:
 847	fw_send_response(card, request, rcode);
 848}
 849
 850static void sbp_handle_command(struct sbp_target_request *);
 851static int sbp_send_status(struct sbp_target_request *);
 852static void sbp_free_request(struct sbp_target_request *);
 853
 854static void tgt_agent_process_work(struct work_struct *work)
 855{
 856	struct sbp_target_request *req =
 857		container_of(work, struct sbp_target_request, work);
 858
 859	pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
 860			req->orb_pointer,
 861			sbp2_pointer_to_addr(&req->orb.next_orb),
 862			sbp2_pointer_to_addr(&req->orb.data_descriptor),
 863			be32_to_cpu(req->orb.misc));
 864
 865	if (req->orb_pointer >> 32)
 866		pr_debug("ORB with high bits set\n");
 867
 868	switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
 869		case 0:/* Format specified by this standard */
 870			sbp_handle_command(req);
 871			return;
 872		case 1: /* Reserved for future standardization */
 873		case 2: /* Vendor-dependent */
 874			req->status.status |= cpu_to_be32(
 875					STATUS_BLOCK_RESP(
 876						STATUS_RESP_REQUEST_COMPLETE) |
 877					STATUS_BLOCK_DEAD(0) |
 878					STATUS_BLOCK_LEN(1) |
 879					STATUS_BLOCK_SBP_STATUS(
 880						SBP_STATUS_REQ_TYPE_NOTSUPP));
 881			sbp_send_status(req);
 882			return;
 883		case 3: /* Dummy ORB */
 884			req->status.status |= cpu_to_be32(
 885					STATUS_BLOCK_RESP(
 886						STATUS_RESP_REQUEST_COMPLETE) |
 887					STATUS_BLOCK_DEAD(0) |
 888					STATUS_BLOCK_LEN(1) |
 889					STATUS_BLOCK_SBP_STATUS(
 890						SBP_STATUS_DUMMY_ORB_COMPLETE));
 891			sbp_send_status(req);
 892			return;
 893		default:
 894			BUG();
 895	}
 896}
 897
 898/* used to double-check we haven't been issued an AGENT_RESET */
 899static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
 900{
 901	bool active;
 902
 903	spin_lock_bh(&agent->lock);
 904	active = (agent->state == AGENT_STATE_ACTIVE);
 905	spin_unlock_bh(&agent->lock);
 906
 907	return active;
 908}
 909
 910static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
 911	struct fw_card *card, u64 next_orb)
 912{
 913	struct se_session *se_sess = sess->se_sess;
 914	struct sbp_target_request *req;
 915	int tag, cpu;
 916
 917	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
 918	if (tag < 0)
 919		return ERR_PTR(-ENOMEM);
 920
 921	req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
 922	memset(req, 0, sizeof(*req));
 923	req->se_cmd.map_tag = tag;
 924	req->se_cmd.map_cpu = cpu;
 925	req->se_cmd.tag = next_orb;
 926
 927	return req;
 928}
 929
 930static void tgt_agent_fetch_work(struct work_struct *work)
 931{
 932	struct sbp_target_agent *agent =
 933		container_of(work, struct sbp_target_agent, work);
 934	struct sbp_session *sess = agent->login->sess;
 935	struct sbp_target_request *req;
 936	int ret;
 937	bool doorbell = agent->doorbell;
 938	u64 next_orb = agent->orb_pointer;
 939
 940	while (next_orb && tgt_agent_check_active(agent)) {
 941		req = sbp_mgt_get_req(sess, sess->card, next_orb);
 942		if (IS_ERR(req)) {
 943			spin_lock_bh(&agent->lock);
 944			agent->state = AGENT_STATE_DEAD;
 945			spin_unlock_bh(&agent->lock);
 946			return;
 947		}
 948
 949		req->login = agent->login;
 950		req->orb_pointer = next_orb;
 951
 952		req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
 953					req->orb_pointer >> 32));
 954		req->status.orb_low = cpu_to_be32(
 955				req->orb_pointer & 0xfffffffc);
 956
 957		/* read in the ORB */
 958		ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
 959				sess->node_id, sess->generation, sess->speed,
 960				req->orb_pointer, &req->orb, sizeof(req->orb));
 961		if (ret != RCODE_COMPLETE) {
 962			pr_debug("tgt_orb fetch failed: %x\n", ret);
 963			req->status.status |= cpu_to_be32(
 964					STATUS_BLOCK_SRC(
 965						STATUS_SRC_ORB_FINISHED) |
 966					STATUS_BLOCK_RESP(
 967						STATUS_RESP_TRANSPORT_FAILURE) |
 968					STATUS_BLOCK_DEAD(1) |
 969					STATUS_BLOCK_LEN(1) |
 970					STATUS_BLOCK_SBP_STATUS(
 971						SBP_STATUS_UNSPECIFIED_ERROR));
 972			spin_lock_bh(&agent->lock);
 973			agent->state = AGENT_STATE_DEAD;
 974			spin_unlock_bh(&agent->lock);
 975
 976			sbp_send_status(req);
 977			return;
 978		}
 979
 980		/* check the next_ORB field */
 981		if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
 982			next_orb = 0;
 983			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
 984						STATUS_SRC_ORB_FINISHED));
 985		} else {
 986			next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
 987			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
 988						STATUS_SRC_ORB_CONTINUING));
 989		}
 990
 991		if (tgt_agent_check_active(agent) && !doorbell) {
 992			INIT_WORK(&req->work, tgt_agent_process_work);
 993			queue_work(system_unbound_wq, &req->work);
 994		} else {
 995			/* don't process this request, just check next_ORB */
 996			sbp_free_request(req);
 997		}
 998
 999		spin_lock_bh(&agent->lock);
1000		doorbell = agent->doorbell = false;
1001
1002		/* check if we should carry on processing */
1003		if (next_orb)
1004			agent->orb_pointer = next_orb;
1005		else
1006			agent->state = AGENT_STATE_SUSPENDED;
1007
1008		spin_unlock_bh(&agent->lock);
1009	}
1010}
1011
1012static struct sbp_target_agent *sbp_target_agent_register(
1013		struct sbp_login_descriptor *login)
1014{
1015	struct sbp_target_agent *agent;
1016	int ret;
1017
1018	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1019	if (!agent)
1020		return ERR_PTR(-ENOMEM);
1021
1022	spin_lock_init(&agent->lock);
1023
1024	agent->handler.length = 0x20;
1025	agent->handler.address_callback = tgt_agent_rw;
1026	agent->handler.callback_data = agent;
1027
1028	agent->login = login;
1029	agent->state = AGENT_STATE_RESET;
1030	INIT_WORK(&agent->work, tgt_agent_fetch_work);
1031	agent->orb_pointer = 0;
1032	agent->doorbell = false;
1033
1034	ret = fw_core_add_address_handler(&agent->handler,
1035			&sbp_register_region);
1036	if (ret < 0) {
1037		kfree(agent);
1038		return ERR_PTR(ret);
1039	}
1040
1041	return agent;
1042}
1043
1044static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1045{
1046	fw_core_remove_address_handler(&agent->handler);
1047	cancel_work_sync(&agent->work);
1048	kfree(agent);
1049}
1050
1051/*
1052 * Simple wrapper around fw_run_transaction that retries the transaction several
1053 * times in case of failure, with an exponential backoff.
1054 */
1055static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1056		int generation, int speed, unsigned long long offset,
1057		void *payload, size_t length)
1058{
1059	int attempt, ret, delay;
1060
1061	for (attempt = 1; attempt <= 5; attempt++) {
1062		ret = fw_run_transaction(card, tcode, destination_id,
1063				generation, speed, offset, payload, length);
1064
1065		switch (ret) {
1066		case RCODE_COMPLETE:
1067		case RCODE_TYPE_ERROR:
1068		case RCODE_ADDRESS_ERROR:
1069		case RCODE_GENERATION:
1070			return ret;
1071
1072		default:
1073			delay = 5 * attempt * attempt;
1074			usleep_range(delay, delay * 2);
1075		}
1076	}
1077
1078	return ret;
1079}
1080
1081/*
1082 * Wrapper around sbp_run_transaction that gets the card, destination,
1083 * generation and speed out of the request's session.
1084 */
1085static int sbp_run_request_transaction(struct sbp_target_request *req,
1086		int tcode, unsigned long long offset, void *payload,
1087		size_t length)
1088{
1089	struct sbp_login_descriptor *login = req->login;
1090	struct sbp_session *sess = login->sess;
1091	struct fw_card *card;
1092	int node_id, generation, speed, ret;
1093
1094	spin_lock_bh(&sess->lock);
1095	card = fw_card_get(sess->card);
1096	node_id = sess->node_id;
1097	generation = sess->generation;
1098	speed = sess->speed;
1099	spin_unlock_bh(&sess->lock);
1100
1101	ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1102			offset, payload, length);
1103
1104	fw_card_put(card);
1105
1106	return ret;
1107}
1108
1109static int sbp_fetch_command(struct sbp_target_request *req)
1110{
1111	int ret, cmd_len, copy_len;
1112
1113	cmd_len = scsi_command_size(req->orb.command_block);
1114
1115	req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1116	if (!req->cmd_buf)
1117		return -ENOMEM;
1118
1119	memcpy(req->cmd_buf, req->orb.command_block,
1120		min_t(int, cmd_len, sizeof(req->orb.command_block)));
1121
1122	if (cmd_len > sizeof(req->orb.command_block)) {
1123		pr_debug("sbp_fetch_command: filling in long command\n");
1124		copy_len = cmd_len - sizeof(req->orb.command_block);
1125
1126		ret = sbp_run_request_transaction(req,
1127				TCODE_READ_BLOCK_REQUEST,
1128				req->orb_pointer + sizeof(req->orb),
1129				req->cmd_buf + sizeof(req->orb.command_block),
1130				copy_len);
1131		if (ret != RCODE_COMPLETE)
1132			return -EIO;
1133	}
1134
1135	return 0;
1136}
1137
1138static int sbp_fetch_page_table(struct sbp_target_request *req)
1139{
1140	int pg_tbl_sz, ret;
1141	struct sbp_page_table_entry *pg_tbl;
1142
1143	if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1144		return 0;
1145
1146	pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1147		sizeof(struct sbp_page_table_entry);
1148
1149	pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1150	if (!pg_tbl)
1151		return -ENOMEM;
1152
1153	ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1154			sbp2_pointer_to_addr(&req->orb.data_descriptor),
1155			pg_tbl, pg_tbl_sz);
1156	if (ret != RCODE_COMPLETE) {
1157		kfree(pg_tbl);
1158		return -EIO;
1159	}
1160
1161	req->pg_tbl = pg_tbl;
1162	return 0;
1163}
1164
1165static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1166	u32 *data_len, enum dma_data_direction *data_dir)
1167{
1168	int data_size, direction, idx;
1169
1170	data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1171	direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1172
1173	if (!data_size) {
1174		*data_len = 0;
1175		*data_dir = DMA_NONE;
1176		return;
1177	}
1178
1179	*data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1180
1181	if (req->pg_tbl) {
1182		*data_len = 0;
1183		for (idx = 0; idx < data_size; idx++) {
1184			*data_len += be16_to_cpu(
1185					req->pg_tbl[idx].segment_length);
1186		}
1187	} else {
1188		*data_len = data_size;
1189	}
1190}
1191
1192static void sbp_handle_command(struct sbp_target_request *req)
1193{
1194	struct sbp_login_descriptor *login = req->login;
1195	struct sbp_session *sess = login->sess;
1196	int ret, unpacked_lun;
1197	u32 data_length;
1198	enum dma_data_direction data_dir;
1199
1200	ret = sbp_fetch_command(req);
1201	if (ret) {
1202		pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1203		goto err;
1204	}
1205
1206	ret = sbp_fetch_page_table(req);
1207	if (ret) {
1208		pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1209			ret);
1210		goto err;
1211	}
1212
1213	unpacked_lun = req->login->login_lun;
1214	sbp_calc_data_length_direction(req, &data_length, &data_dir);
1215
1216	pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1217			req->orb_pointer, unpacked_lun, data_length, data_dir);
1218
1219	/* only used for printk until we do TMRs */
1220	req->se_cmd.tag = req->orb_pointer;
1221	target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1222			  req->sense_buf, unpacked_lun, data_length,
1223			  TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF);
 
 
1224	return;
1225
1226err:
1227	req->status.status |= cpu_to_be32(
1228		STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1229		STATUS_BLOCK_DEAD(0) |
1230		STATUS_BLOCK_LEN(1) |
1231		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1232	sbp_send_status(req);
1233}
1234
1235/*
1236 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1237 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1238 */
1239static int sbp_rw_data(struct sbp_target_request *req)
1240{
1241	struct sbp_session *sess = req->login->sess;
1242	int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1243		generation, num_pte, length, tfr_length,
1244		rcode = RCODE_COMPLETE;
1245	struct sbp_page_table_entry *pte;
1246	unsigned long long offset;
1247	struct fw_card *card;
1248	struct sg_mapping_iter iter;
1249
1250	if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1251		tcode = TCODE_WRITE_BLOCK_REQUEST;
1252		sg_miter_flags = SG_MITER_FROM_SG;
1253	} else {
1254		tcode = TCODE_READ_BLOCK_REQUEST;
1255		sg_miter_flags = SG_MITER_TO_SG;
1256	}
1257
1258	max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1259	speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1260
1261	pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1262	if (pg_size) {
1263		pr_err("sbp_run_transaction: page size ignored\n");
 
1264	}
1265
1266	spin_lock_bh(&sess->lock);
1267	card = fw_card_get(sess->card);
1268	node_id = sess->node_id;
1269	generation = sess->generation;
1270	spin_unlock_bh(&sess->lock);
1271
1272	if (req->pg_tbl) {
1273		pte = req->pg_tbl;
1274		num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1275
1276		offset = 0;
1277		length = 0;
1278	} else {
1279		pte = NULL;
1280		num_pte = 0;
1281
1282		offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1283		length = req->se_cmd.data_length;
1284	}
1285
1286	sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1287		sg_miter_flags);
1288
1289	while (length || num_pte) {
1290		if (!length) {
1291			offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1292				be32_to_cpu(pte->segment_base_lo);
1293			length = be16_to_cpu(pte->segment_length);
1294
1295			pte++;
1296			num_pte--;
1297		}
1298
1299		sg_miter_next(&iter);
1300
1301		tfr_length = min3(length, max_payload, (int)iter.length);
1302
1303		/* FIXME: take page_size into account */
1304
1305		rcode = sbp_run_transaction(card, tcode, node_id,
1306				generation, speed,
1307				offset, iter.addr, tfr_length);
1308
1309		if (rcode != RCODE_COMPLETE)
1310			break;
1311
1312		length -= tfr_length;
1313		offset += tfr_length;
1314		iter.consumed = tfr_length;
1315	}
1316
1317	sg_miter_stop(&iter);
1318	fw_card_put(card);
1319
1320	if (rcode == RCODE_COMPLETE) {
1321		WARN_ON(length != 0);
1322		return 0;
1323	} else {
1324		return -EIO;
1325	}
1326}
1327
1328static int sbp_send_status(struct sbp_target_request *req)
1329{
1330	int rc, ret = 0, length;
1331	struct sbp_login_descriptor *login = req->login;
1332
1333	length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1334
1335	rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1336			login->status_fifo_addr, &req->status, length);
1337	if (rc != RCODE_COMPLETE) {
1338		pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
1339		ret = -EIO;
1340		goto put_ref;
1341	}
1342
1343	pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1344			req->orb_pointer);
1345	/*
1346	 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
1347	 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
1348	 * final se_cmd->cmd_kref put.
1349	 */
1350put_ref:
1351	target_put_sess_cmd(&req->se_cmd);
1352	return ret;
1353}
1354
1355static void sbp_sense_mangle(struct sbp_target_request *req)
1356{
1357	struct se_cmd *se_cmd = &req->se_cmd;
1358	u8 *sense = req->sense_buf;
1359	u8 *status = req->status.data;
1360
1361	WARN_ON(se_cmd->scsi_sense_length < 18);
1362
1363	switch (sense[0] & 0x7f) { 		/* sfmt */
1364	case 0x70: /* current, fixed */
1365		status[0] = 0 << 6;
1366		break;
1367	case 0x71: /* deferred, fixed */
1368		status[0] = 1 << 6;
1369		break;
1370	case 0x72: /* current, descriptor */
1371	case 0x73: /* deferred, descriptor */
1372	default:
1373		/*
1374		 * TODO: SBP-3 specifies what we should do with descriptor
1375		 * format sense data
1376		 */
1377		pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1378			sense[0]);
1379		req->status.status |= cpu_to_be32(
1380			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1381			STATUS_BLOCK_DEAD(0) |
1382			STATUS_BLOCK_LEN(1) |
1383			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1384		return;
1385	}
1386
1387	status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1388	status[1] =
1389		(sense[0] & 0x80) |		/* valid */
1390		((sense[2] & 0xe0) >> 1) |	/* mark, eom, ili */
1391		(sense[2] & 0x0f);		/* sense_key */
1392	status[2] = 0;				/* XXX sense_code */
1393	status[3] = 0;				/* XXX sense_qualifier */
1394
1395	/* information */
1396	status[4] = sense[3];
1397	status[5] = sense[4];
1398	status[6] = sense[5];
1399	status[7] = sense[6];
1400
1401	/* CDB-dependent */
1402	status[8] = sense[8];
1403	status[9] = sense[9];
1404	status[10] = sense[10];
1405	status[11] = sense[11];
1406
1407	/* fru */
1408	status[12] = sense[14];
1409
1410	/* sense_key-dependent */
1411	status[13] = sense[15];
1412	status[14] = sense[16];
1413	status[15] = sense[17];
1414
1415	req->status.status |= cpu_to_be32(
1416		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1417		STATUS_BLOCK_DEAD(0) |
1418		STATUS_BLOCK_LEN(5) |
1419		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1420}
1421
1422static int sbp_send_sense(struct sbp_target_request *req)
1423{
1424	struct se_cmd *se_cmd = &req->se_cmd;
1425
1426	if (se_cmd->scsi_sense_length) {
1427		sbp_sense_mangle(req);
1428	} else {
1429		req->status.status |= cpu_to_be32(
1430			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1431			STATUS_BLOCK_DEAD(0) |
1432			STATUS_BLOCK_LEN(1) |
1433			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1434	}
1435
1436	return sbp_send_status(req);
1437}
1438
1439static void sbp_free_request(struct sbp_target_request *req)
1440{
1441	struct se_cmd *se_cmd = &req->se_cmd;
1442	struct se_session *se_sess = se_cmd->se_sess;
1443
1444	kfree(req->pg_tbl);
1445	kfree(req->cmd_buf);
1446
1447	target_free_tag(se_sess, se_cmd);
1448}
1449
1450static void sbp_mgt_agent_process(struct work_struct *work)
1451{
1452	struct sbp_management_agent *agent =
1453		container_of(work, struct sbp_management_agent, work);
1454	struct sbp_management_request *req = agent->request;
1455	int ret;
1456	int status_data_len = 0;
1457
1458	/* fetch the ORB from the initiator */
1459	ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1460		req->node_addr, req->generation, req->speed,
1461		agent->orb_offset, &req->orb, sizeof(req->orb));
1462	if (ret != RCODE_COMPLETE) {
1463		pr_debug("mgt_orb fetch failed: %x\n", ret);
1464		goto out;
1465	}
1466
1467	pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1468		sbp2_pointer_to_addr(&req->orb.ptr1),
1469		sbp2_pointer_to_addr(&req->orb.ptr2),
1470		be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1471		sbp2_pointer_to_addr(&req->orb.status_fifo));
1472
1473	if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1474		ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1475		pr_err("mgt_orb bad request\n");
1476		goto out;
1477	}
1478
1479	switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1480	case MANAGEMENT_ORB_FUNCTION_LOGIN:
1481		sbp_management_request_login(agent, req, &status_data_len);
1482		break;
1483
1484	case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1485		sbp_management_request_query_logins(agent, req,
1486				&status_data_len);
1487		break;
1488
1489	case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1490		sbp_management_request_reconnect(agent, req, &status_data_len);
1491		break;
1492
1493	case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1494		pr_notice("SET PASSWORD not implemented\n");
1495
1496		req->status.status = cpu_to_be32(
1497			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1498			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1499
1500		break;
1501
1502	case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1503		sbp_management_request_logout(agent, req, &status_data_len);
1504		break;
1505
1506	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1507		pr_notice("ABORT TASK not implemented\n");
1508
1509		req->status.status = cpu_to_be32(
1510			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1511			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1512
1513		break;
1514
1515	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1516		pr_notice("ABORT TASK SET not implemented\n");
1517
1518		req->status.status = cpu_to_be32(
1519			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1520			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1521
1522		break;
1523
1524	case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1525		pr_notice("LOGICAL UNIT RESET not implemented\n");
1526
1527		req->status.status = cpu_to_be32(
1528			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1529			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1530
1531		break;
1532
1533	case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1534		pr_notice("TARGET RESET not implemented\n");
1535
1536		req->status.status = cpu_to_be32(
1537			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1538			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1539
1540		break;
1541
1542	default:
1543		pr_notice("unknown management function 0x%x\n",
1544			MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1545
1546		req->status.status = cpu_to_be32(
1547			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1548			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1549
1550		break;
1551	}
1552
1553	req->status.status |= cpu_to_be32(
1554		STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1555		STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1556		STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1557	req->status.orb_low = cpu_to_be32(agent->orb_offset);
1558
1559	/* write the status block back to the initiator */
1560	ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1561		req->node_addr, req->generation, req->speed,
1562		sbp2_pointer_to_addr(&req->orb.status_fifo),
1563		&req->status, 8 + status_data_len);
1564	if (ret != RCODE_COMPLETE) {
1565		pr_debug("mgt_orb status write failed: %x\n", ret);
1566		goto out;
1567	}
1568
1569out:
1570	fw_card_put(req->card);
1571	kfree(req);
1572
1573	spin_lock_bh(&agent->lock);
1574	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1575	spin_unlock_bh(&agent->lock);
1576}
1577
1578static void sbp_mgt_agent_rw(struct fw_card *card,
1579	struct fw_request *request, int tcode, int destination, int source,
1580	int generation, unsigned long long offset, void *data, size_t length,
1581	void *callback_data)
1582{
1583	struct sbp_management_agent *agent = callback_data;
1584	struct sbp2_pointer *ptr = data;
1585	int rcode = RCODE_ADDRESS_ERROR;
1586
1587	if (!agent->tport->enable)
1588		goto out;
1589
1590	if ((offset != agent->handler.offset) || (length != 8))
1591		goto out;
1592
1593	if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1594		struct sbp_management_request *req;
1595		int prev_state;
1596
1597		spin_lock_bh(&agent->lock);
1598		prev_state = agent->state;
1599		agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1600		spin_unlock_bh(&agent->lock);
1601
1602		if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1603			pr_notice("ignoring management request while busy\n");
1604			rcode = RCODE_CONFLICT_ERROR;
1605			goto out;
1606		}
1607		req = kzalloc(sizeof(*req), GFP_ATOMIC);
1608		if (!req) {
1609			rcode = RCODE_CONFLICT_ERROR;
1610			goto out;
1611		}
1612
1613		req->card = fw_card_get(card);
1614		req->generation = generation;
1615		req->node_addr = source;
1616		req->speed = fw_get_request_speed(request);
1617
1618		agent->orb_offset = sbp2_pointer_to_addr(ptr);
1619		agent->request = req;
1620
1621		queue_work(system_unbound_wq, &agent->work);
1622		rcode = RCODE_COMPLETE;
1623	} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1624		addr_to_sbp2_pointer(agent->orb_offset, ptr);
1625		rcode = RCODE_COMPLETE;
1626	} else {
1627		rcode = RCODE_TYPE_ERROR;
1628	}
1629
1630out:
1631	fw_send_response(card, request, rcode);
1632}
1633
1634static struct sbp_management_agent *sbp_management_agent_register(
1635		struct sbp_tport *tport)
1636{
1637	int ret;
1638	struct sbp_management_agent *agent;
1639
1640	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1641	if (!agent)
1642		return ERR_PTR(-ENOMEM);
1643
1644	spin_lock_init(&agent->lock);
1645	agent->tport = tport;
1646	agent->handler.length = 0x08;
1647	agent->handler.address_callback = sbp_mgt_agent_rw;
1648	agent->handler.callback_data = agent;
1649	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1650	INIT_WORK(&agent->work, sbp_mgt_agent_process);
1651	agent->orb_offset = 0;
1652	agent->request = NULL;
1653
1654	ret = fw_core_add_address_handler(&agent->handler,
1655			&sbp_register_region);
1656	if (ret < 0) {
1657		kfree(agent);
1658		return ERR_PTR(ret);
1659	}
1660
1661	return agent;
1662}
1663
1664static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1665{
1666	fw_core_remove_address_handler(&agent->handler);
1667	cancel_work_sync(&agent->work);
1668	kfree(agent);
1669}
1670
1671static int sbp_check_true(struct se_portal_group *se_tpg)
1672{
1673	return 1;
1674}
1675
 
 
 
 
 
 
 
 
 
 
1676static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1677{
1678	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1679	struct sbp_tport *tport = tpg->tport;
1680
1681	return &tport->tport_name[0];
1682}
1683
1684static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1685{
1686	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1687	return tpg->tport_tpgt;
1688}
1689
 
 
 
 
 
1690static void sbp_release_cmd(struct se_cmd *se_cmd)
1691{
1692	struct sbp_target_request *req = container_of(se_cmd,
1693			struct sbp_target_request, se_cmd);
1694
1695	sbp_free_request(req);
1696}
1697
 
 
 
 
 
1698static int sbp_write_pending(struct se_cmd *se_cmd)
1699{
1700	struct sbp_target_request *req = container_of(se_cmd,
1701			struct sbp_target_request, se_cmd);
1702	int ret;
1703
1704	ret = sbp_rw_data(req);
1705	if (ret) {
1706		req->status.status |= cpu_to_be32(
1707			STATUS_BLOCK_RESP(
1708				STATUS_RESP_TRANSPORT_FAILURE) |
1709			STATUS_BLOCK_DEAD(0) |
1710			STATUS_BLOCK_LEN(1) |
1711			STATUS_BLOCK_SBP_STATUS(
1712				SBP_STATUS_UNSPECIFIED_ERROR));
1713		sbp_send_status(req);
1714		return ret;
1715	}
1716
1717	target_execute_cmd(se_cmd);
1718	return 0;
1719}
1720
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1721static int sbp_queue_data_in(struct se_cmd *se_cmd)
1722{
1723	struct sbp_target_request *req = container_of(se_cmd,
1724			struct sbp_target_request, se_cmd);
1725	int ret;
1726
1727	ret = sbp_rw_data(req);
1728	if (ret) {
1729		req->status.status |= cpu_to_be32(
1730			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1731			STATUS_BLOCK_DEAD(0) |
1732			STATUS_BLOCK_LEN(1) |
1733			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1734		sbp_send_status(req);
1735		return ret;
1736	}
1737
1738	return sbp_send_sense(req);
1739}
1740
1741/*
1742 * Called after command (no data transfer) or after the write (to device)
1743 * operation is completed
1744 */
1745static int sbp_queue_status(struct se_cmd *se_cmd)
1746{
1747	struct sbp_target_request *req = container_of(se_cmd,
1748			struct sbp_target_request, se_cmd);
1749
1750	return sbp_send_sense(req);
1751}
1752
1753static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1754{
1755}
1756
1757static void sbp_aborted_task(struct se_cmd *se_cmd)
1758{
1759	return;
1760}
1761
1762static int sbp_check_stop_free(struct se_cmd *se_cmd)
1763{
1764	struct sbp_target_request *req = container_of(se_cmd,
1765			struct sbp_target_request, se_cmd);
1766
1767	return transport_generic_free_cmd(&req->se_cmd, 0);
1768}
1769
1770static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1771{
1772	struct se_lun *lun;
1773	int count = 0;
1774
1775	rcu_read_lock();
1776	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
1777		count++;
1778	rcu_read_unlock();
1779
1780	return count;
1781}
1782
1783static int sbp_update_unit_directory(struct sbp_tport *tport)
1784{
1785	struct se_lun *lun;
1786	int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
1787	u32 *data;
1788
1789	if (tport->unit_directory.data) {
1790		fw_core_remove_descriptor(&tport->unit_directory);
1791		kfree(tport->unit_directory.data);
1792		tport->unit_directory.data = NULL;
1793	}
1794
1795	if (!tport->enable || !tport->tpg)
1796		return 0;
1797
1798	num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1799
1800	/*
1801	 * Number of entries in the final unit directory:
1802	 *  - all of those in the template
1803	 *  - management_agent
1804	 *  - unit_characteristics
1805	 *  - reconnect_timeout
1806	 *  - unit unique ID
1807	 *  - one for each LUN
1808	 *
1809	 *  MUST NOT include leaf or sub-directory entries
1810	 */
1811	num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1812
1813	if (tport->directory_id != -1)
1814		num_entries++;
1815
1816	/* allocate num_entries + 4 for the header and unique ID leaf */
1817	data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1818	if (!data)
1819		return -ENOMEM;
1820
1821	/* directory_length */
1822	data[idx++] = num_entries << 16;
1823
1824	/* directory_id */
1825	if (tport->directory_id != -1)
1826		data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1827
1828	/* unit directory template */
1829	memcpy(&data[idx], sbp_unit_directory_template,
1830			sizeof(sbp_unit_directory_template));
1831	idx += ARRAY_SIZE(sbp_unit_directory_template);
1832
1833	/* management_agent */
1834	mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
1835	data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
1836
1837	/* unit_characteristics */
1838	data[idx++] = 0x3a000000 |
1839		(((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
1840		SBP_ORB_FETCH_SIZE;
1841
1842	/* reconnect_timeout */
1843	data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
1844
1845	/* unit unique ID (leaf is just after LUNs) */
1846	data[idx++] = 0x8d000000 | (num_luns + 1);
1847
1848	rcu_read_lock();
1849	hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
1850		struct se_device *dev;
1851		int type;
1852		/*
1853		 * rcu_dereference_raw protected by se_lun->lun_group symlink
1854		 * reference to se_device->dev_group.
1855		 */
1856		dev = rcu_dereference_raw(lun->lun_se_dev);
1857		type = dev->transport->get_device_type(dev);
1858
1859		/* logical_unit_number */
1860		data[idx++] = 0x14000000 |
1861			((type << 16) & 0x1f0000) |
1862			(lun->unpacked_lun & 0xffff);
1863	}
1864	rcu_read_unlock();
1865
1866	/* unit unique ID leaf */
1867	data[idx++] = 2 << 16;
1868	data[idx++] = tport->guid >> 32;
1869	data[idx++] = tport->guid;
1870
1871	tport->unit_directory.length = idx;
1872	tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
1873	tport->unit_directory.data = data;
1874
1875	ret = fw_core_add_descriptor(&tport->unit_directory);
1876	if (ret < 0) {
1877		kfree(tport->unit_directory.data);
1878		tport->unit_directory.data = NULL;
1879	}
1880
1881	return ret;
1882}
1883
1884static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
1885{
1886	const char *cp;
1887	char c, nibble;
1888	int pos = 0, err;
1889
1890	*wwn = 0;
1891	for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
1892		c = *cp;
1893		if (c == '\n' && cp[1] == '\0')
1894			continue;
1895		if (c == '\0') {
1896			err = 2;
1897			if (pos != 16)
1898				goto fail;
1899			return cp - name;
1900		}
1901		err = 3;
1902		if (isdigit(c))
1903			nibble = c - '0';
1904		else if (isxdigit(c))
1905			nibble = tolower(c) - 'a' + 10;
1906		else
1907			goto fail;
1908		*wwn = (*wwn << 4) | nibble;
1909		pos++;
1910	}
1911	err = 4;
1912fail:
1913	printk(KERN_INFO "err %u len %zu pos %u\n",
1914			err, cp - name, pos);
1915	return -1;
1916}
1917
1918static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
1919{
1920	return snprintf(buf, len, "%016llx", wwn);
1921}
1922
1923static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1924{
1925	u64 guid = 0;
1926
1927	if (sbp_parse_wwn(name, &guid) < 0)
1928		return -EINVAL;
1929	return 0;
1930}
1931
1932static int sbp_post_link_lun(
1933		struct se_portal_group *se_tpg,
1934		struct se_lun *se_lun)
1935{
1936	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1937
1938	return sbp_update_unit_directory(tpg->tport);
1939}
1940
1941static void sbp_pre_unlink_lun(
1942		struct se_portal_group *se_tpg,
1943		struct se_lun *se_lun)
1944{
1945	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1946	struct sbp_tport *tport = tpg->tport;
1947	int ret;
1948
1949	if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
1950		tport->enable = 0;
1951
1952	ret = sbp_update_unit_directory(tport);
1953	if (ret < 0)
1954		pr_err("unlink LUN: failed to update unit directory\n");
1955}
1956
1957static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn,
1958					    const char *name)
 
 
1959{
1960	struct sbp_tport *tport =
1961		container_of(wwn, struct sbp_tport, tport_wwn);
1962
1963	struct sbp_tpg *tpg;
1964	unsigned long tpgt;
1965	int ret;
1966
1967	if (strstr(name, "tpgt_") != name)
1968		return ERR_PTR(-EINVAL);
1969	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1970		return ERR_PTR(-EINVAL);
1971
1972	if (tport->tpg) {
1973		pr_err("Only one TPG per Unit is possible.\n");
1974		return ERR_PTR(-EBUSY);
1975	}
1976
1977	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
1978	if (!tpg)
 
1979		return ERR_PTR(-ENOMEM);
 
1980
1981	tpg->tport = tport;
1982	tpg->tport_tpgt = tpgt;
1983	tport->tpg = tpg;
1984
1985	/* default attribute values */
1986	tport->enable = 0;
1987	tport->directory_id = -1;
1988	tport->mgt_orb_timeout = 15;
1989	tport->max_reconnect_timeout = 5;
1990	tport->max_logins_per_lun = 1;
1991
1992	tport->mgt_agt = sbp_management_agent_register(tport);
1993	if (IS_ERR(tport->mgt_agt)) {
1994		ret = PTR_ERR(tport->mgt_agt);
1995		goto out_free_tpg;
1996	}
1997
1998	ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
1999	if (ret < 0)
2000		goto out_unreg_mgt_agt;
2001
2002	return &tpg->se_tpg;
2003
2004out_unreg_mgt_agt:
2005	sbp_management_agent_unregister(tport->mgt_agt);
2006out_free_tpg:
2007	tport->tpg = NULL;
2008	kfree(tpg);
2009	return ERR_PTR(ret);
2010}
2011
2012static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2013{
2014	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2015	struct sbp_tport *tport = tpg->tport;
2016
2017	core_tpg_deregister(se_tpg);
2018	sbp_management_agent_unregister(tport->mgt_agt);
2019	tport->tpg = NULL;
2020	kfree(tpg);
2021}
2022
2023static struct se_wwn *sbp_make_tport(
2024		struct target_fabric_configfs *tf,
2025		struct config_group *group,
2026		const char *name)
2027{
2028	struct sbp_tport *tport;
2029	u64 guid = 0;
2030
2031	if (sbp_parse_wwn(name, &guid) < 0)
2032		return ERR_PTR(-EINVAL);
2033
2034	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2035	if (!tport)
 
2036		return ERR_PTR(-ENOMEM);
 
2037
2038	tport->guid = guid;
2039	sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2040
2041	return &tport->tport_wwn;
2042}
2043
2044static void sbp_drop_tport(struct se_wwn *wwn)
2045{
2046	struct sbp_tport *tport =
2047		container_of(wwn, struct sbp_tport, tport_wwn);
2048
2049	kfree(tport);
2050}
2051
2052static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
2053{
2054	return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2055}
2056
2057CONFIGFS_ATTR_RO(sbp_wwn_, version);
2058
2059static struct configfs_attribute *sbp_wwn_attrs[] = {
2060	&sbp_wwn_attr_version,
2061	NULL,
2062};
2063
2064static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
2065{
2066	struct se_portal_group *se_tpg = to_tpg(item);
2067	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2068	struct sbp_tport *tport = tpg->tport;
2069
2070	if (tport->directory_id == -1)
2071		return sprintf(page, "implicit\n");
2072	else
2073		return sprintf(page, "%06x\n", tport->directory_id);
2074}
2075
2076static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
2077		const char *page, size_t count)
2078{
2079	struct se_portal_group *se_tpg = to_tpg(item);
2080	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2081	struct sbp_tport *tport = tpg->tport;
2082	unsigned long val;
2083
2084	if (tport->enable) {
2085		pr_err("Cannot change the directory_id on an active target.\n");
2086		return -EBUSY;
2087	}
2088
2089	if (strstr(page, "implicit") == page) {
2090		tport->directory_id = -1;
2091	} else {
2092		if (kstrtoul(page, 16, &val) < 0)
2093			return -EINVAL;
2094		if (val > 0xffffff)
2095			return -EINVAL;
2096
2097		tport->directory_id = val;
2098	}
2099
2100	return count;
2101}
2102
2103static int sbp_enable_tpg(struct se_portal_group *se_tpg, bool enable)
2104{
 
2105	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2106	struct sbp_tport *tport = tpg->tport;
 
 
 
 
 
 
 
 
 
 
2107	int ret;
2108
2109	if (enable) {
 
 
 
 
 
 
 
 
2110		if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2111			pr_err("Cannot enable a target with no LUNs!\n");
2112			return -EINVAL;
2113		}
2114	} else {
2115		/* XXX: force-shutdown sessions instead? */
2116		spin_lock_bh(&se_tpg->session_lock);
2117		if (!list_empty(&se_tpg->tpg_sess_list)) {
2118			spin_unlock_bh(&se_tpg->session_lock);
2119			return -EBUSY;
2120		}
2121		spin_unlock_bh(&se_tpg->session_lock);
2122	}
2123
2124	tport->enable = enable;
2125
2126	ret = sbp_update_unit_directory(tport);
2127	if (ret < 0) {
2128		pr_err("Could not update Config ROM\n");
2129		return ret;
2130	}
2131
2132	return 0;
2133}
2134
2135CONFIGFS_ATTR(sbp_tpg_, directory_id);
 
2136
2137static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2138	&sbp_tpg_attr_directory_id,
 
2139	NULL,
2140};
2141
2142static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
2143		char *page)
2144{
2145	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2146	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2147	struct sbp_tport *tport = tpg->tport;
2148	return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2149}
2150
2151static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
2152		const char *page, size_t count)
2153{
2154	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2155	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2156	struct sbp_tport *tport = tpg->tport;
2157	unsigned long val;
2158	int ret;
2159
2160	if (kstrtoul(page, 0, &val) < 0)
2161		return -EINVAL;
2162	if ((val < 1) || (val > 127))
2163		return -EINVAL;
2164
2165	if (tport->mgt_orb_timeout == val)
2166		return count;
2167
2168	tport->mgt_orb_timeout = val;
2169
2170	ret = sbp_update_unit_directory(tport);
2171	if (ret < 0)
2172		return ret;
2173
2174	return count;
2175}
2176
2177static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
2178		char *page)
2179{
2180	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2181	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2182	struct sbp_tport *tport = tpg->tport;
2183	return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2184}
2185
2186static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
2187		const char *page, size_t count)
2188{
2189	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2190	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2191	struct sbp_tport *tport = tpg->tport;
2192	unsigned long val;
2193	int ret;
2194
2195	if (kstrtoul(page, 0, &val) < 0)
2196		return -EINVAL;
2197	if ((val < 1) || (val > 32767))
2198		return -EINVAL;
2199
2200	if (tport->max_reconnect_timeout == val)
2201		return count;
2202
2203	tport->max_reconnect_timeout = val;
2204
2205	ret = sbp_update_unit_directory(tport);
2206	if (ret < 0)
2207		return ret;
2208
2209	return count;
2210}
2211
2212static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
2213		char *page)
2214{
2215	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2216	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2217	struct sbp_tport *tport = tpg->tport;
2218	return sprintf(page, "%d\n", tport->max_logins_per_lun);
2219}
2220
2221static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
2222		const char *page, size_t count)
2223{
2224	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2225	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2226	struct sbp_tport *tport = tpg->tport;
2227	unsigned long val;
2228
2229	if (kstrtoul(page, 0, &val) < 0)
2230		return -EINVAL;
2231	if ((val < 1) || (val > 127))
2232		return -EINVAL;
2233
2234	/* XXX: also check against current count? */
2235
2236	tport->max_logins_per_lun = val;
2237
2238	return count;
2239}
2240
2241CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
2242CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
2243CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
2244
2245static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2246	&sbp_tpg_attrib_attr_mgt_orb_timeout,
2247	&sbp_tpg_attrib_attr_max_reconnect_timeout,
2248	&sbp_tpg_attrib_attr_max_logins_per_lun,
2249	NULL,
2250};
2251
2252static const struct target_core_fabric_ops sbp_ops = {
2253	.module				= THIS_MODULE,
2254	.fabric_name			= "sbp",
 
2255	.tpg_get_wwn			= sbp_get_fabric_wwn,
2256	.tpg_get_tag			= sbp_get_tag,
2257	.tpg_check_demo_mode		= sbp_check_true,
2258	.tpg_check_demo_mode_cache	= sbp_check_true,
 
 
 
2259	.release_cmd			= sbp_release_cmd,
 
2260	.write_pending			= sbp_write_pending,
 
 
 
2261	.queue_data_in			= sbp_queue_data_in,
2262	.queue_status			= sbp_queue_status,
2263	.queue_tm_rsp			= sbp_queue_tm_rsp,
2264	.aborted_task			= sbp_aborted_task,
2265	.check_stop_free		= sbp_check_stop_free,
2266
2267	.fabric_make_wwn		= sbp_make_tport,
2268	.fabric_drop_wwn		= sbp_drop_tport,
2269	.fabric_make_tpg		= sbp_make_tpg,
2270	.fabric_enable_tpg		= sbp_enable_tpg,
2271	.fabric_drop_tpg		= sbp_drop_tpg,
2272	.fabric_post_link		= sbp_post_link_lun,
2273	.fabric_pre_unlink		= sbp_pre_unlink_lun,
2274	.fabric_make_np			= NULL,
2275	.fabric_drop_np			= NULL,
2276	.fabric_init_nodeacl		= sbp_init_nodeacl,
2277
2278	.tfc_wwn_attrs			= sbp_wwn_attrs,
2279	.tfc_tpg_base_attrs		= sbp_tpg_base_attrs,
2280	.tfc_tpg_attrib_attrs		= sbp_tpg_attrib_attrs,
2281
2282	.default_submit_type		= TARGET_DIRECT_SUBMIT,
2283	.direct_submit_supp		= 1,
2284};
2285
2286static int __init sbp_init(void)
2287{
2288	return target_register_template(&sbp_ops);
2289};
2290
2291static void __exit sbp_exit(void)
2292{
2293	target_unregister_template(&sbp_ops);
2294};
2295
2296MODULE_DESCRIPTION("FireWire SBP fabric driver");
2297MODULE_LICENSE("GPL");
2298module_init(sbp_init);
2299module_exit(sbp_exit);