Linux Audio

Check our new training course

Loading...
   1/*
   2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
   3 *
   4 * Copyright (C) 2011  Chris Boot <bootc@bootc.net>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software Foundation,
  18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19 */
  20
  21#define KMSG_COMPONENT "sbp_target"
  22#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  23
  24#include <linux/kernel.h>
  25#include <linux/module.h>
  26#include <linux/init.h>
  27#include <linux/types.h>
  28#include <linux/string.h>
  29#include <linux/configfs.h>
  30#include <linux/ctype.h>
  31#include <linux/firewire.h>
  32#include <linux/firewire-constants.h>
  33#include <scsi/scsi.h>
  34#include <scsi/scsi_tcq.h>
  35#include <target/target_core_base.h>
  36#include <target/target_core_backend.h>
  37#include <target/target_core_fabric.h>
  38#include <target/target_core_fabric_configfs.h>
  39#include <target/target_core_configfs.h>
  40#include <target/configfs_macros.h>
  41#include <asm/unaligned.h>
  42
  43#include "sbp_target.h"
  44
  45/* Local pointer to allocated TCM configfs fabric module */
  46static struct target_fabric_configfs *sbp_fabric_configfs;
  47
  48/* FireWire address region for management and command block address handlers */
  49static const struct fw_address_region sbp_register_region = {
  50	.start	= CSR_REGISTER_BASE + 0x10000,
  51	.end	= 0x1000000000000ULL,
  52};
  53
  54static const u32 sbp_unit_directory_template[] = {
  55	0x1200609e, /* unit_specifier_id: NCITS/T10 */
  56	0x13010483, /* unit_sw_version: 1155D Rev 4 */
  57	0x3800609e, /* command_set_specifier_id: NCITS/T10 */
  58	0x390104d8, /* command_set: SPC-2 */
  59	0x3b000000, /* command_set_revision: 0 */
  60	0x3c000001, /* firmware_revision: 1 */
  61};
  62
  63#define SESSION_MAINTENANCE_INTERVAL HZ
  64
  65static atomic_t login_id = ATOMIC_INIT(0);
  66
  67static void session_maintenance_work(struct work_struct *);
  68static int sbp_run_transaction(struct fw_card *, int, int, int, int,
  69		unsigned long long, void *, size_t);
  70
  71static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
  72{
  73	int ret;
  74	__be32 high, low;
  75
  76	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  77			req->node_addr, req->generation, req->speed,
  78			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
  79			&high, sizeof(high));
  80	if (ret != RCODE_COMPLETE)
  81		return ret;
  82
  83	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  84			req->node_addr, req->generation, req->speed,
  85			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
  86			&low, sizeof(low));
  87	if (ret != RCODE_COMPLETE)
  88		return ret;
  89
  90	*guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
  91
  92	return RCODE_COMPLETE;
  93}
  94
  95static struct sbp_session *sbp_session_find_by_guid(
  96	struct sbp_tpg *tpg, u64 guid)
  97{
  98	struct se_session *se_sess;
  99	struct sbp_session *sess, *found = NULL;
 100
 101	spin_lock_bh(&tpg->se_tpg.session_lock);
 102	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 103		sess = se_sess->fabric_sess_ptr;
 104		if (sess->guid == guid)
 105			found = sess;
 106	}
 107	spin_unlock_bh(&tpg->se_tpg.session_lock);
 108
 109	return found;
 110}
 111
 112static struct sbp_login_descriptor *sbp_login_find_by_lun(
 113		struct sbp_session *session, struct se_lun *lun)
 114{
 115	struct sbp_login_descriptor *login, *found = NULL;
 116
 117	spin_lock_bh(&session->lock);
 118	list_for_each_entry(login, &session->login_list, link) {
 119		if (login->lun == lun)
 120			found = login;
 121	}
 122	spin_unlock_bh(&session->lock);
 123
 124	return found;
 125}
 126
 127static int sbp_login_count_all_by_lun(
 128		struct sbp_tpg *tpg,
 129		struct se_lun *lun,
 130		int exclusive)
 131{
 132	struct se_session *se_sess;
 133	struct sbp_session *sess;
 134	struct sbp_login_descriptor *login;
 135	int count = 0;
 136
 137	spin_lock_bh(&tpg->se_tpg.session_lock);
 138	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 139		sess = se_sess->fabric_sess_ptr;
 140
 141		spin_lock_bh(&sess->lock);
 142		list_for_each_entry(login, &sess->login_list, link) {
 143			if (login->lun != lun)
 144				continue;
 145
 146			if (!exclusive || login->exclusive)
 147				count++;
 148		}
 149		spin_unlock_bh(&sess->lock);
 150	}
 151	spin_unlock_bh(&tpg->se_tpg.session_lock);
 152
 153	return count;
 154}
 155
 156static struct sbp_login_descriptor *sbp_login_find_by_id(
 157	struct sbp_tpg *tpg, int login_id)
 158{
 159	struct se_session *se_sess;
 160	struct sbp_session *sess;
 161	struct sbp_login_descriptor *login, *found = NULL;
 162
 163	spin_lock_bh(&tpg->se_tpg.session_lock);
 164	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 165		sess = se_sess->fabric_sess_ptr;
 166
 167		spin_lock_bh(&sess->lock);
 168		list_for_each_entry(login, &sess->login_list, link) {
 169			if (login->login_id == login_id)
 170				found = login;
 171		}
 172		spin_unlock_bh(&sess->lock);
 173	}
 174	spin_unlock_bh(&tpg->se_tpg.session_lock);
 175
 176	return found;
 177}
 178
 179static struct se_lun *sbp_get_lun_from_tpg(struct sbp_tpg *tpg, int lun)
 180{
 181	struct se_portal_group *se_tpg = &tpg->se_tpg;
 182	struct se_lun *se_lun;
 183
 184	if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
 185		return ERR_PTR(-EINVAL);
 186
 187	spin_lock(&se_tpg->tpg_lun_lock);
 188	se_lun = se_tpg->tpg_lun_list[lun];
 189
 190	if (se_lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
 191		se_lun = ERR_PTR(-ENODEV);
 192
 193	spin_unlock(&se_tpg->tpg_lun_lock);
 194
 195	return se_lun;
 196}
 197
 198static struct sbp_session *sbp_session_create(
 199		struct sbp_tpg *tpg,
 200		u64 guid)
 201{
 202	struct sbp_session *sess;
 203	int ret;
 204	char guid_str[17];
 205	struct se_node_acl *se_nacl;
 206
 207	sess = kmalloc(sizeof(*sess), GFP_KERNEL);
 208	if (!sess) {
 209		pr_err("failed to allocate session descriptor\n");
 210		return ERR_PTR(-ENOMEM);
 211	}
 212
 213	sess->se_sess = transport_init_session();
 214	if (IS_ERR(sess->se_sess)) {
 215		pr_err("failed to init se_session\n");
 216
 217		ret = PTR_ERR(sess->se_sess);
 218		kfree(sess);
 219		return ERR_PTR(ret);
 220	}
 221
 222	snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
 223
 224	se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
 225	if (!se_nacl) {
 226		pr_warn("Node ACL not found for %s\n", guid_str);
 227
 228		transport_free_session(sess->se_sess);
 229		kfree(sess);
 230
 231		return ERR_PTR(-EPERM);
 232	}
 233
 234	sess->se_sess->se_node_acl = se_nacl;
 235
 236	spin_lock_init(&sess->lock);
 237	INIT_LIST_HEAD(&sess->login_list);
 238	INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
 239
 240	sess->guid = guid;
 241
 242	transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
 243
 244	return sess;
 245}
 246
 247static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
 248{
 249	spin_lock_bh(&sess->lock);
 250	if (!list_empty(&sess->login_list)) {
 251		spin_unlock_bh(&sess->lock);
 252		return;
 253	}
 254	spin_unlock_bh(&sess->lock);
 255
 256	if (cancel_work)
 257		cancel_delayed_work_sync(&sess->maint_work);
 258
 259	transport_deregister_session_configfs(sess->se_sess);
 260	transport_deregister_session(sess->se_sess);
 261
 262	if (sess->card)
 263		fw_card_put(sess->card);
 264
 265	kfree(sess);
 266}
 267
 268static void sbp_target_agent_unregister(struct sbp_target_agent *);
 269
 270static void sbp_login_release(struct sbp_login_descriptor *login,
 271	bool cancel_work)
 272{
 273	struct sbp_session *sess = login->sess;
 274
 275	/* FIXME: abort/wait on tasks */
 276
 277	sbp_target_agent_unregister(login->tgt_agt);
 278
 279	if (sess) {
 280		spin_lock_bh(&sess->lock);
 281		list_del(&login->link);
 282		spin_unlock_bh(&sess->lock);
 283
 284		sbp_session_release(sess, cancel_work);
 285	}
 286
 287	kfree(login);
 288}
 289
 290static struct sbp_target_agent *sbp_target_agent_register(
 291	struct sbp_login_descriptor *);
 292
 293static void sbp_management_request_login(
 294	struct sbp_management_agent *agent, struct sbp_management_request *req,
 295	int *status_data_size)
 296{
 297	struct sbp_tport *tport = agent->tport;
 298	struct sbp_tpg *tpg = tport->tpg;
 299	struct se_lun *se_lun;
 300	int ret;
 301	u64 guid;
 302	struct sbp_session *sess;
 303	struct sbp_login_descriptor *login;
 304	struct sbp_login_response_block *response;
 305	int login_response_len;
 306
 307	se_lun = sbp_get_lun_from_tpg(tpg,
 308			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
 309	if (IS_ERR(se_lun)) {
 310		pr_notice("login to unknown LUN: %d\n",
 311			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
 312
 313		req->status.status = cpu_to_be32(
 314			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 315			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
 316		return;
 317	}
 318
 319	ret = read_peer_guid(&guid, req);
 320	if (ret != RCODE_COMPLETE) {
 321		pr_warn("failed to read peer GUID: %d\n", ret);
 322
 323		req->status.status = cpu_to_be32(
 324			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 325			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 326		return;
 327	}
 328
 329	pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
 330		se_lun->unpacked_lun, guid);
 331
 332	sess = sbp_session_find_by_guid(tpg, guid);
 333	if (sess) {
 334		login = sbp_login_find_by_lun(sess, se_lun);
 335		if (login) {
 336			pr_notice("initiator already logged-in\n");
 337
 338			/*
 339			 * SBP-2 R4 says we should return access denied, but
 340			 * that can confuse initiators. Instead we need to
 341			 * treat this like a reconnect, but send the login
 342			 * response block like a fresh login.
 343			 *
 344			 * This is required particularly in the case of Apple
 345			 * devices booting off the FireWire target, where
 346			 * the firmware has an active login to the target. When
 347			 * the OS takes control of the session it issues its own
 348			 * LOGIN rather than a RECONNECT. To avoid the machine
 349			 * waiting until the reconnect_hold expires, we can skip
 350			 * the ACCESS_DENIED errors to speed things up.
 351			 */
 352
 353			goto already_logged_in;
 354		}
 355	}
 356
 357	/*
 358	 * check exclusive bit in login request
 359	 * reject with access_denied if any logins present
 360	 */
 361	if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
 362			sbp_login_count_all_by_lun(tpg, se_lun, 0)) {
 363		pr_warn("refusing exclusive login with other active logins\n");
 364
 365		req->status.status = cpu_to_be32(
 366			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 367			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 368		return;
 369	}
 370
 371	/*
 372	 * check exclusive bit in any existing login descriptor
 373	 * reject with access_denied if any exclusive logins present
 374	 */
 375	if (sbp_login_count_all_by_lun(tpg, se_lun, 1)) {
 376		pr_warn("refusing login while another exclusive login present\n");
 377
 378		req->status.status = cpu_to_be32(
 379			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 380			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 381		return;
 382	}
 383
 384	/*
 385	 * check we haven't exceeded the number of allowed logins
 386	 * reject with resources_unavailable if we have
 387	 */
 388	if (sbp_login_count_all_by_lun(tpg, se_lun, 0) >=
 389			tport->max_logins_per_lun) {
 390		pr_warn("max number of logins reached\n");
 391
 392		req->status.status = cpu_to_be32(
 393			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 394			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 395		return;
 396	}
 397
 398	if (!sess) {
 399		sess = sbp_session_create(tpg, guid);
 400		if (IS_ERR(sess)) {
 401			switch (PTR_ERR(sess)) {
 402			case -EPERM:
 403				ret = SBP_STATUS_ACCESS_DENIED;
 404				break;
 405			default:
 406				ret = SBP_STATUS_RESOURCES_UNAVAIL;
 407				break;
 408			}
 409
 410			req->status.status = cpu_to_be32(
 411				STATUS_BLOCK_RESP(
 412					STATUS_RESP_REQUEST_COMPLETE) |
 413				STATUS_BLOCK_SBP_STATUS(ret));
 414			return;
 415		}
 416
 417		sess->node_id = req->node_addr;
 418		sess->card = fw_card_get(req->card);
 419		sess->generation = req->generation;
 420		sess->speed = req->speed;
 421
 422		schedule_delayed_work(&sess->maint_work,
 423				SESSION_MAINTENANCE_INTERVAL);
 424	}
 425
 426	/* only take the latest reconnect_hold into account */
 427	sess->reconnect_hold = min(
 428		1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
 429		tport->max_reconnect_timeout) - 1;
 430
 431	login = kmalloc(sizeof(*login), GFP_KERNEL);
 432	if (!login) {
 433		pr_err("failed to allocate login descriptor\n");
 434
 435		sbp_session_release(sess, true);
 436
 437		req->status.status = cpu_to_be32(
 438			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 439			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 440		return;
 441	}
 442
 443	login->sess = sess;
 444	login->lun = se_lun;
 445	login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
 446	login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
 447	login->login_id = atomic_inc_return(&login_id);
 448
 449	login->tgt_agt = sbp_target_agent_register(login);
 450	if (IS_ERR(login->tgt_agt)) {
 451		ret = PTR_ERR(login->tgt_agt);
 452		pr_err("failed to map command block handler: %d\n", ret);
 453
 454		sbp_session_release(sess, true);
 455		kfree(login);
 456
 457		req->status.status = cpu_to_be32(
 458			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 459			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 460		return;
 461	}
 462
 463	spin_lock_bh(&sess->lock);
 464	list_add_tail(&login->link, &sess->login_list);
 465	spin_unlock_bh(&sess->lock);
 466
 467already_logged_in:
 468	response = kzalloc(sizeof(*response), GFP_KERNEL);
 469	if (!response) {
 470		pr_err("failed to allocate login response block\n");
 471
 472		sbp_login_release(login, true);
 473
 474		req->status.status = cpu_to_be32(
 475			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 476			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 477		return;
 478	}
 479
 480	login_response_len = clamp_val(
 481			LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
 482			12, sizeof(*response));
 483	response->misc = cpu_to_be32(
 484		((login_response_len & 0xffff) << 16) |
 485		(login->login_id & 0xffff));
 486	response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
 487	addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
 488		&response->command_block_agent);
 489
 490	ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
 491		sess->node_id, sess->generation, sess->speed,
 492		sbp2_pointer_to_addr(&req->orb.ptr2), response,
 493		login_response_len);
 494	if (ret != RCODE_COMPLETE) {
 495		pr_debug("failed to write login response block: %x\n", ret);
 496
 497		kfree(response);
 498		sbp_login_release(login, true);
 499
 500		req->status.status = cpu_to_be32(
 501			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 502			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 503		return;
 504	}
 505
 506	kfree(response);
 507
 508	req->status.status = cpu_to_be32(
 509		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 510		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 511}
 512
 513static void sbp_management_request_query_logins(
 514	struct sbp_management_agent *agent, struct sbp_management_request *req,
 515	int *status_data_size)
 516{
 517	pr_notice("QUERY LOGINS not implemented\n");
 518	/* FIXME: implement */
 519
 520	req->status.status = cpu_to_be32(
 521		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 522		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
 523}
 524
 525static void sbp_management_request_reconnect(
 526	struct sbp_management_agent *agent, struct sbp_management_request *req,
 527	int *status_data_size)
 528{
 529	struct sbp_tport *tport = agent->tport;
 530	struct sbp_tpg *tpg = tport->tpg;
 531	int ret;
 532	u64 guid;
 533	struct sbp_login_descriptor *login;
 534
 535	ret = read_peer_guid(&guid, req);
 536	if (ret != RCODE_COMPLETE) {
 537		pr_warn("failed to read peer GUID: %d\n", ret);
 538
 539		req->status.status = cpu_to_be32(
 540			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 541			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 542		return;
 543	}
 544
 545	pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
 546
 547	login = sbp_login_find_by_id(tpg,
 548		RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
 549
 550	if (!login) {
 551		pr_err("mgt_agent RECONNECT unknown login ID\n");
 552
 553		req->status.status = cpu_to_be32(
 554			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 555			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 556		return;
 557	}
 558
 559	if (login->sess->guid != guid) {
 560		pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
 561
 562		req->status.status = cpu_to_be32(
 563			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 564			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 565		return;
 566	}
 567
 568	spin_lock_bh(&login->sess->lock);
 569	if (login->sess->card)
 570		fw_card_put(login->sess->card);
 571
 572	/* update the node details */
 573	login->sess->generation = req->generation;
 574	login->sess->node_id = req->node_addr;
 575	login->sess->card = fw_card_get(req->card);
 576	login->sess->speed = req->speed;
 577	spin_unlock_bh(&login->sess->lock);
 578
 579	req->status.status = cpu_to_be32(
 580		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 581		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 582}
 583
 584static void sbp_management_request_logout(
 585	struct sbp_management_agent *agent, struct sbp_management_request *req,
 586	int *status_data_size)
 587{
 588	struct sbp_tport *tport = agent->tport;
 589	struct sbp_tpg *tpg = tport->tpg;
 590	int id;
 591	struct sbp_login_descriptor *login;
 592
 593	id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
 594
 595	login = sbp_login_find_by_id(tpg, id);
 596	if (!login) {
 597		pr_warn("cannot find login: %d\n", id);
 598
 599		req->status.status = cpu_to_be32(
 600			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 601			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
 602		return;
 603	}
 604
 605	pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
 606		login->lun->unpacked_lun, login->login_id);
 607
 608	if (req->node_addr != login->sess->node_id) {
 609		pr_warn("logout from different node ID\n");
 610
 611		req->status.status = cpu_to_be32(
 612			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 613			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 614		return;
 615	}
 616
 617	sbp_login_release(login, true);
 618
 619	req->status.status = cpu_to_be32(
 620		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 621		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 622}
 623
 624static void session_check_for_reset(struct sbp_session *sess)
 625{
 626	bool card_valid = false;
 627
 628	spin_lock_bh(&sess->lock);
 629
 630	if (sess->card) {
 631		spin_lock_irq(&sess->card->lock);
 632		card_valid = (sess->card->local_node != NULL);
 633		spin_unlock_irq(&sess->card->lock);
 634
 635		if (!card_valid) {
 636			fw_card_put(sess->card);
 637			sess->card = NULL;
 638		}
 639	}
 640
 641	if (!card_valid || (sess->generation != sess->card->generation)) {
 642		pr_info("Waiting for reconnect from node: %016llx\n",
 643				sess->guid);
 644
 645		sess->node_id = -1;
 646		sess->reconnect_expires = get_jiffies_64() +
 647			((sess->reconnect_hold + 1) * HZ);
 648	}
 649
 650	spin_unlock_bh(&sess->lock);
 651}
 652
 653static void session_reconnect_expired(struct sbp_session *sess)
 654{
 655	struct sbp_login_descriptor *login, *temp;
 656	LIST_HEAD(login_list);
 657
 658	pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
 659
 660	spin_lock_bh(&sess->lock);
 661	list_for_each_entry_safe(login, temp, &sess->login_list, link) {
 662		login->sess = NULL;
 663		list_del(&login->link);
 664		list_add_tail(&login->link, &login_list);
 665	}
 666	spin_unlock_bh(&sess->lock);
 667
 668	list_for_each_entry_safe(login, temp, &login_list, link) {
 669		list_del(&login->link);
 670		sbp_login_release(login, false);
 671	}
 672
 673	sbp_session_release(sess, false);
 674}
 675
 676static void session_maintenance_work(struct work_struct *work)
 677{
 678	struct sbp_session *sess = container_of(work, struct sbp_session,
 679			maint_work.work);
 680
 681	/* could be called while tearing down the session */
 682	spin_lock_bh(&sess->lock);
 683	if (list_empty(&sess->login_list)) {
 684		spin_unlock_bh(&sess->lock);
 685		return;
 686	}
 687	spin_unlock_bh(&sess->lock);
 688
 689	if (sess->node_id != -1) {
 690		/* check for bus reset and make node_id invalid */
 691		session_check_for_reset(sess);
 692
 693		schedule_delayed_work(&sess->maint_work,
 694				SESSION_MAINTENANCE_INTERVAL);
 695	} else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
 696		/* still waiting for reconnect */
 697		schedule_delayed_work(&sess->maint_work,
 698				SESSION_MAINTENANCE_INTERVAL);
 699	} else {
 700		/* reconnect timeout has expired */
 701		session_reconnect_expired(sess);
 702	}
 703}
 704
 705static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
 706		struct sbp_target_agent *agent)
 707{
 708	__be32 state;
 709
 710	switch (tcode) {
 711	case TCODE_READ_QUADLET_REQUEST:
 712		pr_debug("tgt_agent AGENT_STATE READ\n");
 713
 714		spin_lock_bh(&agent->lock);
 715		state = cpu_to_be32(agent->state);
 716		spin_unlock_bh(&agent->lock);
 717		memcpy(data, &state, sizeof(state));
 718
 719		return RCODE_COMPLETE;
 720
 721	case TCODE_WRITE_QUADLET_REQUEST:
 722		/* ignored */
 723		return RCODE_COMPLETE;
 724
 725	default:
 726		return RCODE_TYPE_ERROR;
 727	}
 728}
 729
 730static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
 731		struct sbp_target_agent *agent)
 732{
 733	switch (tcode) {
 734	case TCODE_WRITE_QUADLET_REQUEST:
 735		pr_debug("tgt_agent AGENT_RESET\n");
 736		spin_lock_bh(&agent->lock);
 737		agent->state = AGENT_STATE_RESET;
 738		spin_unlock_bh(&agent->lock);
 739		return RCODE_COMPLETE;
 740
 741	default:
 742		return RCODE_TYPE_ERROR;
 743	}
 744}
 745
 746static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
 747		struct sbp_target_agent *agent)
 748{
 749	struct sbp2_pointer *ptr = data;
 750
 751	switch (tcode) {
 752	case TCODE_WRITE_BLOCK_REQUEST:
 753		spin_lock_bh(&agent->lock);
 754		if (agent->state != AGENT_STATE_SUSPENDED &&
 755				agent->state != AGENT_STATE_RESET) {
 756			spin_unlock_bh(&agent->lock);
 757			pr_notice("Ignoring ORB_POINTER write while active.\n");
 758			return RCODE_CONFLICT_ERROR;
 759		}
 760		agent->state = AGENT_STATE_ACTIVE;
 761		spin_unlock_bh(&agent->lock);
 762
 763		agent->orb_pointer = sbp2_pointer_to_addr(ptr);
 764		agent->doorbell = false;
 765
 766		pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
 767				agent->orb_pointer);
 768
 769		queue_work(system_unbound_wq, &agent->work);
 770
 771		return RCODE_COMPLETE;
 772
 773	case TCODE_READ_BLOCK_REQUEST:
 774		pr_debug("tgt_agent ORB_POINTER READ\n");
 775		spin_lock_bh(&agent->lock);
 776		addr_to_sbp2_pointer(agent->orb_pointer, ptr);
 777		spin_unlock_bh(&agent->lock);
 778		return RCODE_COMPLETE;
 779
 780	default:
 781		return RCODE_TYPE_ERROR;
 782	}
 783}
 784
 785static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
 786		struct sbp_target_agent *agent)
 787{
 788	switch (tcode) {
 789	case TCODE_WRITE_QUADLET_REQUEST:
 790		spin_lock_bh(&agent->lock);
 791		if (agent->state != AGENT_STATE_SUSPENDED) {
 792			spin_unlock_bh(&agent->lock);
 793			pr_debug("Ignoring DOORBELL while active.\n");
 794			return RCODE_CONFLICT_ERROR;
 795		}
 796		agent->state = AGENT_STATE_ACTIVE;
 797		spin_unlock_bh(&agent->lock);
 798
 799		agent->doorbell = true;
 800
 801		pr_debug("tgt_agent DOORBELL\n");
 802
 803		queue_work(system_unbound_wq, &agent->work);
 804
 805		return RCODE_COMPLETE;
 806
 807	case TCODE_READ_QUADLET_REQUEST:
 808		return RCODE_COMPLETE;
 809
 810	default:
 811		return RCODE_TYPE_ERROR;
 812	}
 813}
 814
 815static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
 816		int tcode, void *data, struct sbp_target_agent *agent)
 817{
 818	switch (tcode) {
 819	case TCODE_WRITE_QUADLET_REQUEST:
 820		pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
 821		/* ignored as we don't send unsolicited status */
 822		return RCODE_COMPLETE;
 823
 824	case TCODE_READ_QUADLET_REQUEST:
 825		return RCODE_COMPLETE;
 826
 827	default:
 828		return RCODE_TYPE_ERROR;
 829	}
 830}
 831
 832static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
 833		int tcode, int destination, int source, int generation,
 834		unsigned long long offset, void *data, size_t length,
 835		void *callback_data)
 836{
 837	struct sbp_target_agent *agent = callback_data;
 838	struct sbp_session *sess = agent->login->sess;
 839	int sess_gen, sess_node, rcode;
 840
 841	spin_lock_bh(&sess->lock);
 842	sess_gen = sess->generation;
 843	sess_node = sess->node_id;
 844	spin_unlock_bh(&sess->lock);
 845
 846	if (generation != sess_gen) {
 847		pr_notice("ignoring request with wrong generation\n");
 848		rcode = RCODE_TYPE_ERROR;
 849		goto out;
 850	}
 851
 852	if (source != sess_node) {
 853		pr_notice("ignoring request from foreign node (%x != %x)\n",
 854				source, sess_node);
 855		rcode = RCODE_TYPE_ERROR;
 856		goto out;
 857	}
 858
 859	/* turn offset into the offset from the start of the block */
 860	offset -= agent->handler.offset;
 861
 862	if (offset == 0x00 && length == 4) {
 863		/* AGENT_STATE */
 864		rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
 865	} else if (offset == 0x04 && length == 4) {
 866		/* AGENT_RESET */
 867		rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
 868	} else if (offset == 0x08 && length == 8) {
 869		/* ORB_POINTER */
 870		rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
 871	} else if (offset == 0x10 && length == 4) {
 872		/* DOORBELL */
 873		rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
 874	} else if (offset == 0x14 && length == 4) {
 875		/* UNSOLICITED_STATUS_ENABLE */
 876		rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
 877				data, agent);
 878	} else {
 879		rcode = RCODE_ADDRESS_ERROR;
 880	}
 881
 882out:
 883	fw_send_response(card, request, rcode);
 884}
 885
 886static void sbp_handle_command(struct sbp_target_request *);
 887static int sbp_send_status(struct sbp_target_request *);
 888static void sbp_free_request(struct sbp_target_request *);
 889
 890static void tgt_agent_process_work(struct work_struct *work)
 891{
 892	struct sbp_target_request *req =
 893		container_of(work, struct sbp_target_request, work);
 894
 895	pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
 896			req->orb_pointer,
 897			sbp2_pointer_to_addr(&req->orb.next_orb),
 898			sbp2_pointer_to_addr(&req->orb.data_descriptor),
 899			be32_to_cpu(req->orb.misc));
 900
 901	if (req->orb_pointer >> 32)
 902		pr_debug("ORB with high bits set\n");
 903
 904	switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
 905		case 0:/* Format specified by this standard */
 906			sbp_handle_command(req);
 907			return;
 908		case 1: /* Reserved for future standardization */
 909		case 2: /* Vendor-dependent */
 910			req->status.status |= cpu_to_be32(
 911					STATUS_BLOCK_RESP(
 912						STATUS_RESP_REQUEST_COMPLETE) |
 913					STATUS_BLOCK_DEAD(0) |
 914					STATUS_BLOCK_LEN(1) |
 915					STATUS_BLOCK_SBP_STATUS(
 916						SBP_STATUS_REQ_TYPE_NOTSUPP));
 917			sbp_send_status(req);
 918			sbp_free_request(req);
 919			return;
 920		case 3: /* Dummy ORB */
 921			req->status.status |= cpu_to_be32(
 922					STATUS_BLOCK_RESP(
 923						STATUS_RESP_REQUEST_COMPLETE) |
 924					STATUS_BLOCK_DEAD(0) |
 925					STATUS_BLOCK_LEN(1) |
 926					STATUS_BLOCK_SBP_STATUS(
 927						SBP_STATUS_DUMMY_ORB_COMPLETE));
 928			sbp_send_status(req);
 929			sbp_free_request(req);
 930			return;
 931		default:
 932			BUG();
 933	}
 934}
 935
 936/* used to double-check we haven't been issued an AGENT_RESET */
 937static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
 938{
 939	bool active;
 940
 941	spin_lock_bh(&agent->lock);
 942	active = (agent->state == AGENT_STATE_ACTIVE);
 943	spin_unlock_bh(&agent->lock);
 944
 945	return active;
 946}
 947
 948static void tgt_agent_fetch_work(struct work_struct *work)
 949{
 950	struct sbp_target_agent *agent =
 951		container_of(work, struct sbp_target_agent, work);
 952	struct sbp_session *sess = agent->login->sess;
 953	struct sbp_target_request *req;
 954	int ret;
 955	bool doorbell = agent->doorbell;
 956	u64 next_orb = agent->orb_pointer;
 957
 958	while (next_orb && tgt_agent_check_active(agent)) {
 959		req = kzalloc(sizeof(*req), GFP_KERNEL);
 960		if (!req) {
 961			spin_lock_bh(&agent->lock);
 962			agent->state = AGENT_STATE_DEAD;
 963			spin_unlock_bh(&agent->lock);
 964			return;
 965		}
 966
 967		req->login = agent->login;
 968		req->orb_pointer = next_orb;
 969
 970		req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
 971					req->orb_pointer >> 32));
 972		req->status.orb_low = cpu_to_be32(
 973				req->orb_pointer & 0xfffffffc);
 974
 975		/* read in the ORB */
 976		ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
 977				sess->node_id, sess->generation, sess->speed,
 978				req->orb_pointer, &req->orb, sizeof(req->orb));
 979		if (ret != RCODE_COMPLETE) {
 980			pr_debug("tgt_orb fetch failed: %x\n", ret);
 981			req->status.status |= cpu_to_be32(
 982					STATUS_BLOCK_SRC(
 983						STATUS_SRC_ORB_FINISHED) |
 984					STATUS_BLOCK_RESP(
 985						STATUS_RESP_TRANSPORT_FAILURE) |
 986					STATUS_BLOCK_DEAD(1) |
 987					STATUS_BLOCK_LEN(1) |
 988					STATUS_BLOCK_SBP_STATUS(
 989						SBP_STATUS_UNSPECIFIED_ERROR));
 990			spin_lock_bh(&agent->lock);
 991			agent->state = AGENT_STATE_DEAD;
 992			spin_unlock_bh(&agent->lock);
 993
 994			sbp_send_status(req);
 995			sbp_free_request(req);
 996			return;
 997		}
 998
 999		/* check the next_ORB field */
1000		if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
1001			next_orb = 0;
1002			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1003						STATUS_SRC_ORB_FINISHED));
1004		} else {
1005			next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
1006			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1007						STATUS_SRC_ORB_CONTINUING));
1008		}
1009
1010		if (tgt_agent_check_active(agent) && !doorbell) {
1011			INIT_WORK(&req->work, tgt_agent_process_work);
1012			queue_work(system_unbound_wq, &req->work);
1013		} else {
1014			/* don't process this request, just check next_ORB */
1015			sbp_free_request(req);
1016		}
1017
1018		spin_lock_bh(&agent->lock);
1019		doorbell = agent->doorbell = false;
1020
1021		/* check if we should carry on processing */
1022		if (next_orb)
1023			agent->orb_pointer = next_orb;
1024		else
1025			agent->state = AGENT_STATE_SUSPENDED;
1026
1027		spin_unlock_bh(&agent->lock);
1028	};
1029}
1030
1031static struct sbp_target_agent *sbp_target_agent_register(
1032		struct sbp_login_descriptor *login)
1033{
1034	struct sbp_target_agent *agent;
1035	int ret;
1036
1037	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1038	if (!agent)
1039		return ERR_PTR(-ENOMEM);
1040
1041	spin_lock_init(&agent->lock);
1042
1043	agent->handler.length = 0x20;
1044	agent->handler.address_callback = tgt_agent_rw;
1045	agent->handler.callback_data = agent;
1046
1047	agent->login = login;
1048	agent->state = AGENT_STATE_RESET;
1049	INIT_WORK(&agent->work, tgt_agent_fetch_work);
1050	agent->orb_pointer = 0;
1051	agent->doorbell = false;
1052
1053	ret = fw_core_add_address_handler(&agent->handler,
1054			&sbp_register_region);
1055	if (ret < 0) {
1056		kfree(agent);
1057		return ERR_PTR(ret);
1058	}
1059
1060	return agent;
1061}
1062
1063static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1064{
1065	fw_core_remove_address_handler(&agent->handler);
1066	cancel_work_sync(&agent->work);
1067	kfree(agent);
1068}
1069
1070/*
1071 * Simple wrapper around fw_run_transaction that retries the transaction several
1072 * times in case of failure, with an exponential backoff.
1073 */
1074static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1075		int generation, int speed, unsigned long long offset,
1076		void *payload, size_t length)
1077{
1078	int attempt, ret, delay;
1079
1080	for (attempt = 1; attempt <= 5; attempt++) {
1081		ret = fw_run_transaction(card, tcode, destination_id,
1082				generation, speed, offset, payload, length);
1083
1084		switch (ret) {
1085		case RCODE_COMPLETE:
1086		case RCODE_TYPE_ERROR:
1087		case RCODE_ADDRESS_ERROR:
1088		case RCODE_GENERATION:
1089			return ret;
1090
1091		default:
1092			delay = 5 * attempt * attempt;
1093			usleep_range(delay, delay * 2);
1094		}
1095	}
1096
1097	return ret;
1098}
1099
1100/*
1101 * Wrapper around sbp_run_transaction that gets the card, destination,
1102 * generation and speed out of the request's session.
1103 */
1104static int sbp_run_request_transaction(struct sbp_target_request *req,
1105		int tcode, unsigned long long offset, void *payload,
1106		size_t length)
1107{
1108	struct sbp_login_descriptor *login = req->login;
1109	struct sbp_session *sess = login->sess;
1110	struct fw_card *card;
1111	int node_id, generation, speed, ret;
1112
1113	spin_lock_bh(&sess->lock);
1114	card = fw_card_get(sess->card);
1115	node_id = sess->node_id;
1116	generation = sess->generation;
1117	speed = sess->speed;
1118	spin_unlock_bh(&sess->lock);
1119
1120	ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1121			offset, payload, length);
1122
1123	fw_card_put(card);
1124
1125	return ret;
1126}
1127
1128static int sbp_fetch_command(struct sbp_target_request *req)
1129{
1130	int ret, cmd_len, copy_len;
1131
1132	cmd_len = scsi_command_size(req->orb.command_block);
1133
1134	req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1135	if (!req->cmd_buf)
1136		return -ENOMEM;
1137
1138	memcpy(req->cmd_buf, req->orb.command_block,
1139		min_t(int, cmd_len, sizeof(req->orb.command_block)));
1140
1141	if (cmd_len > sizeof(req->orb.command_block)) {
1142		pr_debug("sbp_fetch_command: filling in long command\n");
1143		copy_len = cmd_len - sizeof(req->orb.command_block);
1144
1145		ret = sbp_run_request_transaction(req,
1146				TCODE_READ_BLOCK_REQUEST,
1147				req->orb_pointer + sizeof(req->orb),
1148				req->cmd_buf + sizeof(req->orb.command_block),
1149				copy_len);
1150		if (ret != RCODE_COMPLETE)
1151			return -EIO;
1152	}
1153
1154	return 0;
1155}
1156
1157static int sbp_fetch_page_table(struct sbp_target_request *req)
1158{
1159	int pg_tbl_sz, ret;
1160	struct sbp_page_table_entry *pg_tbl;
1161
1162	if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1163		return 0;
1164
1165	pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1166		sizeof(struct sbp_page_table_entry);
1167
1168	pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1169	if (!pg_tbl)
1170		return -ENOMEM;
1171
1172	ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1173			sbp2_pointer_to_addr(&req->orb.data_descriptor),
1174			pg_tbl, pg_tbl_sz);
1175	if (ret != RCODE_COMPLETE) {
1176		kfree(pg_tbl);
1177		return -EIO;
1178	}
1179
1180	req->pg_tbl = pg_tbl;
1181	return 0;
1182}
1183
1184static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1185	u32 *data_len, enum dma_data_direction *data_dir)
1186{
1187	int data_size, direction, idx;
1188
1189	data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1190	direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1191
1192	if (!data_size) {
1193		*data_len = 0;
1194		*data_dir = DMA_NONE;
1195		return;
1196	}
1197
1198	*data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1199
1200	if (req->pg_tbl) {
1201		*data_len = 0;
1202		for (idx = 0; idx < data_size; idx++) {
1203			*data_len += be16_to_cpu(
1204					req->pg_tbl[idx].segment_length);
1205		}
1206	} else {
1207		*data_len = data_size;
1208	}
1209}
1210
1211static void sbp_handle_command(struct sbp_target_request *req)
1212{
1213	struct sbp_login_descriptor *login = req->login;
1214	struct sbp_session *sess = login->sess;
1215	int ret, unpacked_lun;
1216	u32 data_length;
1217	enum dma_data_direction data_dir;
1218
1219	ret = sbp_fetch_command(req);
1220	if (ret) {
1221		pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1222		req->status.status |= cpu_to_be32(
1223			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1224			STATUS_BLOCK_DEAD(0) |
1225			STATUS_BLOCK_LEN(1) |
1226			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1227		sbp_send_status(req);
1228		sbp_free_request(req);
1229		return;
1230	}
1231
1232	ret = sbp_fetch_page_table(req);
1233	if (ret) {
1234		pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1235			ret);
1236		req->status.status |= cpu_to_be32(
1237			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1238			STATUS_BLOCK_DEAD(0) |
1239			STATUS_BLOCK_LEN(1) |
1240			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1241		sbp_send_status(req);
1242		sbp_free_request(req);
1243		return;
1244	}
1245
1246	unpacked_lun = req->login->lun->unpacked_lun;
1247	sbp_calc_data_length_direction(req, &data_length, &data_dir);
1248
1249	pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1250			req->orb_pointer, unpacked_lun, data_length, data_dir);
1251
1252	target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1253			req->sense_buf, unpacked_lun, data_length,
1254			MSG_SIMPLE_TAG, data_dir, 0);
1255}
1256
1257/*
1258 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1259 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1260 */
1261static int sbp_rw_data(struct sbp_target_request *req)
1262{
1263	struct sbp_session *sess = req->login->sess;
1264	int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1265		generation, num_pte, length, tfr_length,
1266		rcode = RCODE_COMPLETE;
1267	struct sbp_page_table_entry *pte;
1268	unsigned long long offset;
1269	struct fw_card *card;
1270	struct sg_mapping_iter iter;
1271
1272	if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1273		tcode = TCODE_WRITE_BLOCK_REQUEST;
1274		sg_miter_flags = SG_MITER_FROM_SG;
1275	} else {
1276		tcode = TCODE_READ_BLOCK_REQUEST;
1277		sg_miter_flags = SG_MITER_TO_SG;
1278	}
1279
1280	max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1281	speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1282
1283	pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1284	if (pg_size) {
1285		pr_err("sbp_run_transaction: page size ignored\n");
1286		pg_size = 0x100 << pg_size;
1287	}
1288
1289	spin_lock_bh(&sess->lock);
1290	card = fw_card_get(sess->card);
1291	node_id = sess->node_id;
1292	generation = sess->generation;
1293	spin_unlock_bh(&sess->lock);
1294
1295	if (req->pg_tbl) {
1296		pte = req->pg_tbl;
1297		num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1298
1299		offset = 0;
1300		length = 0;
1301	} else {
1302		pte = NULL;
1303		num_pte = 0;
1304
1305		offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1306		length = req->se_cmd.data_length;
1307	}
1308
1309	sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1310		sg_miter_flags);
1311
1312	while (length || num_pte) {
1313		if (!length) {
1314			offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1315				be32_to_cpu(pte->segment_base_lo);
1316			length = be16_to_cpu(pte->segment_length);
1317
1318			pte++;
1319			num_pte--;
1320		}
1321
1322		sg_miter_next(&iter);
1323
1324		tfr_length = min3(length, max_payload, (int)iter.length);
1325
1326		/* FIXME: take page_size into account */
1327
1328		rcode = sbp_run_transaction(card, tcode, node_id,
1329				generation, speed,
1330				offset, iter.addr, tfr_length);
1331
1332		if (rcode != RCODE_COMPLETE)
1333			break;
1334
1335		length -= tfr_length;
1336		offset += tfr_length;
1337		iter.consumed = tfr_length;
1338	}
1339
1340	sg_miter_stop(&iter);
1341	fw_card_put(card);
1342
1343	if (rcode == RCODE_COMPLETE) {
1344		WARN_ON(length != 0);
1345		return 0;
1346	} else {
1347		return -EIO;
1348	}
1349}
1350
1351static int sbp_send_status(struct sbp_target_request *req)
1352{
1353	int ret, length;
1354	struct sbp_login_descriptor *login = req->login;
1355
1356	length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1357
1358	ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1359			login->status_fifo_addr, &req->status, length);
1360	if (ret != RCODE_COMPLETE) {
1361		pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
1362		return -EIO;
1363	}
1364
1365	pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1366			req->orb_pointer);
1367
1368	return 0;
1369}
1370
1371static void sbp_sense_mangle(struct sbp_target_request *req)
1372{
1373	struct se_cmd *se_cmd = &req->se_cmd;
1374	u8 *sense = req->sense_buf;
1375	u8 *status = req->status.data;
1376
1377	WARN_ON(se_cmd->scsi_sense_length < 18);
1378
1379	switch (sense[0] & 0x7f) { 		/* sfmt */
1380	case 0x70: /* current, fixed */
1381		status[0] = 0 << 6;
1382		break;
1383	case 0x71: /* deferred, fixed */
1384		status[0] = 1 << 6;
1385		break;
1386	case 0x72: /* current, descriptor */
1387	case 0x73: /* deferred, descriptor */
1388	default:
1389		/*
1390		 * TODO: SBP-3 specifies what we should do with descriptor
1391		 * format sense data
1392		 */
1393		pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1394			sense[0]);
1395		req->status.status |= cpu_to_be32(
1396			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1397			STATUS_BLOCK_DEAD(0) |
1398			STATUS_BLOCK_LEN(1) |
1399			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1400		return;
1401	}
1402
1403	status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1404	status[1] =
1405		(sense[0] & 0x80) |		/* valid */
1406		((sense[2] & 0xe0) >> 1) |	/* mark, eom, ili */
1407		(sense[2] & 0x0f);		/* sense_key */
1408	status[2] = se_cmd->scsi_asc;		/* sense_code */
1409	status[3] = se_cmd->scsi_ascq;		/* sense_qualifier */
1410
1411	/* information */
1412	status[4] = sense[3];
1413	status[5] = sense[4];
1414	status[6] = sense[5];
1415	status[7] = sense[6];
1416
1417	/* CDB-dependent */
1418	status[8] = sense[8];
1419	status[9] = sense[9];
1420	status[10] = sense[10];
1421	status[11] = sense[11];
1422
1423	/* fru */
1424	status[12] = sense[14];
1425
1426	/* sense_key-dependent */
1427	status[13] = sense[15];
1428	status[14] = sense[16];
1429	status[15] = sense[17];
1430
1431	req->status.status |= cpu_to_be32(
1432		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1433		STATUS_BLOCK_DEAD(0) |
1434		STATUS_BLOCK_LEN(5) |
1435		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1436}
1437
1438static int sbp_send_sense(struct sbp_target_request *req)
1439{
1440	struct se_cmd *se_cmd = &req->se_cmd;
1441
1442	if (se_cmd->scsi_sense_length) {
1443		sbp_sense_mangle(req);
1444	} else {
1445		req->status.status |= cpu_to_be32(
1446			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1447			STATUS_BLOCK_DEAD(0) |
1448			STATUS_BLOCK_LEN(1) |
1449			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1450	}
1451
1452	return sbp_send_status(req);
1453}
1454
1455static void sbp_free_request(struct sbp_target_request *req)
1456{
1457	kfree(req->pg_tbl);
1458	kfree(req->cmd_buf);
1459	kfree(req);
1460}
1461
1462static void sbp_mgt_agent_process(struct work_struct *work)
1463{
1464	struct sbp_management_agent *agent =
1465		container_of(work, struct sbp_management_agent, work);
1466	struct sbp_management_request *req = agent->request;
1467	int ret;
1468	int status_data_len = 0;
1469
1470	/* fetch the ORB from the initiator */
1471	ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1472		req->node_addr, req->generation, req->speed,
1473		agent->orb_offset, &req->orb, sizeof(req->orb));
1474	if (ret != RCODE_COMPLETE) {
1475		pr_debug("mgt_orb fetch failed: %x\n", ret);
1476		goto out;
1477	}
1478
1479	pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1480		sbp2_pointer_to_addr(&req->orb.ptr1),
1481		sbp2_pointer_to_addr(&req->orb.ptr2),
1482		be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1483		sbp2_pointer_to_addr(&req->orb.status_fifo));
1484
1485	if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1486		ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1487		pr_err("mgt_orb bad request\n");
1488		goto out;
1489	}
1490
1491	switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1492	case MANAGEMENT_ORB_FUNCTION_LOGIN:
1493		sbp_management_request_login(agent, req, &status_data_len);
1494		break;
1495
1496	case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1497		sbp_management_request_query_logins(agent, req,
1498				&status_data_len);
1499		break;
1500
1501	case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1502		sbp_management_request_reconnect(agent, req, &status_data_len);
1503		break;
1504
1505	case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1506		pr_notice("SET PASSWORD not implemented\n");
1507
1508		req->status.status = cpu_to_be32(
1509			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1510			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1511
1512		break;
1513
1514	case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1515		sbp_management_request_logout(agent, req, &status_data_len);
1516		break;
1517
1518	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1519		pr_notice("ABORT TASK not implemented\n");
1520
1521		req->status.status = cpu_to_be32(
1522			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1523			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1524
1525		break;
1526
1527	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1528		pr_notice("ABORT TASK SET not implemented\n");
1529
1530		req->status.status = cpu_to_be32(
1531			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1532			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1533
1534		break;
1535
1536	case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1537		pr_notice("LOGICAL UNIT RESET not implemented\n");
1538
1539		req->status.status = cpu_to_be32(
1540			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1541			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1542
1543		break;
1544
1545	case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1546		pr_notice("TARGET RESET not implemented\n");
1547
1548		req->status.status = cpu_to_be32(
1549			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1550			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1551
1552		break;
1553
1554	default:
1555		pr_notice("unknown management function 0x%x\n",
1556			MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1557
1558		req->status.status = cpu_to_be32(
1559			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1560			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1561
1562		break;
1563	}
1564
1565	req->status.status |= cpu_to_be32(
1566		STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1567		STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1568		STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1569	req->status.orb_low = cpu_to_be32(agent->orb_offset);
1570
1571	/* write the status block back to the initiator */
1572	ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1573		req->node_addr, req->generation, req->speed,
1574		sbp2_pointer_to_addr(&req->orb.status_fifo),
1575		&req->status, 8 + status_data_len);
1576	if (ret != RCODE_COMPLETE) {
1577		pr_debug("mgt_orb status write failed: %x\n", ret);
1578		goto out;
1579	}
1580
1581out:
1582	fw_card_put(req->card);
1583	kfree(req);
1584
1585	spin_lock_bh(&agent->lock);
1586	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1587	spin_unlock_bh(&agent->lock);
1588}
1589
1590static void sbp_mgt_agent_rw(struct fw_card *card,
1591	struct fw_request *request, int tcode, int destination, int source,
1592	int generation, unsigned long long offset, void *data, size_t length,
1593	void *callback_data)
1594{
1595	struct sbp_management_agent *agent = callback_data;
1596	struct sbp2_pointer *ptr = data;
1597	int rcode = RCODE_ADDRESS_ERROR;
1598
1599	if (!agent->tport->enable)
1600		goto out;
1601
1602	if ((offset != agent->handler.offset) || (length != 8))
1603		goto out;
1604
1605	if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1606		struct sbp_management_request *req;
1607		int prev_state;
1608
1609		spin_lock_bh(&agent->lock);
1610		prev_state = agent->state;
1611		agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1612		spin_unlock_bh(&agent->lock);
1613
1614		if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1615			pr_notice("ignoring management request while busy\n");
1616			rcode = RCODE_CONFLICT_ERROR;
1617			goto out;
1618		}
1619
1620		req = kzalloc(sizeof(*req), GFP_ATOMIC);
1621		if (!req) {
1622			rcode = RCODE_CONFLICT_ERROR;
1623			goto out;
1624		}
1625
1626		req->card = fw_card_get(card);
1627		req->generation = generation;
1628		req->node_addr = source;
1629		req->speed = fw_get_request_speed(request);
1630
1631		agent->orb_offset = sbp2_pointer_to_addr(ptr);
1632		agent->request = req;
1633
1634		queue_work(system_unbound_wq, &agent->work);
1635		rcode = RCODE_COMPLETE;
1636	} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1637		addr_to_sbp2_pointer(agent->orb_offset, ptr);
1638		rcode = RCODE_COMPLETE;
1639	} else {
1640		rcode = RCODE_TYPE_ERROR;
1641	}
1642
1643out:
1644	fw_send_response(card, request, rcode);
1645}
1646
1647static struct sbp_management_agent *sbp_management_agent_register(
1648		struct sbp_tport *tport)
1649{
1650	int ret;
1651	struct sbp_management_agent *agent;
1652
1653	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1654	if (!agent)
1655		return ERR_PTR(-ENOMEM);
1656
1657	spin_lock_init(&agent->lock);
1658	agent->tport = tport;
1659	agent->handler.length = 0x08;
1660	agent->handler.address_callback = sbp_mgt_agent_rw;
1661	agent->handler.callback_data = agent;
1662	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1663	INIT_WORK(&agent->work, sbp_mgt_agent_process);
1664	agent->orb_offset = 0;
1665	agent->request = NULL;
1666
1667	ret = fw_core_add_address_handler(&agent->handler,
1668			&sbp_register_region);
1669	if (ret < 0) {
1670		kfree(agent);
1671		return ERR_PTR(ret);
1672	}
1673
1674	return agent;
1675}
1676
1677static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1678{
1679	fw_core_remove_address_handler(&agent->handler);
1680	cancel_work_sync(&agent->work);
1681	kfree(agent);
1682}
1683
1684static int sbp_check_true(struct se_portal_group *se_tpg)
1685{
1686	return 1;
1687}
1688
1689static int sbp_check_false(struct se_portal_group *se_tpg)
1690{
1691	return 0;
1692}
1693
1694static char *sbp_get_fabric_name(void)
1695{
1696	return "sbp";
1697}
1698
1699static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1700{
1701	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1702	struct sbp_tport *tport = tpg->tport;
1703
1704	return &tport->tport_name[0];
1705}
1706
1707static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1708{
1709	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1710	return tpg->tport_tpgt;
1711}
1712
1713static u32 sbp_get_default_depth(struct se_portal_group *se_tpg)
1714{
1715	return 1;
1716}
1717
1718static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg)
1719{
1720	struct sbp_nacl *nacl;
1721
1722	nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL);
1723	if (!nacl) {
1724		pr_err("Unable to alocate struct sbp_nacl\n");
1725		return NULL;
1726	}
1727
1728	return &nacl->se_node_acl;
1729}
1730
1731static void sbp_release_fabric_acl(
1732	struct se_portal_group *se_tpg,
1733	struct se_node_acl *se_nacl)
1734{
1735	struct sbp_nacl *nacl =
1736		container_of(se_nacl, struct sbp_nacl, se_node_acl);
1737	kfree(nacl);
1738}
1739
1740static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1741{
1742	return 1;
1743}
1744
1745static void sbp_release_cmd(struct se_cmd *se_cmd)
1746{
1747	struct sbp_target_request *req = container_of(se_cmd,
1748			struct sbp_target_request, se_cmd);
1749
1750	sbp_free_request(req);
1751}
1752
1753static int sbp_shutdown_session(struct se_session *se_sess)
1754{
1755	return 0;
1756}
1757
1758static void sbp_close_session(struct se_session *se_sess)
1759{
1760	return;
1761}
1762
1763static u32 sbp_sess_get_index(struct se_session *se_sess)
1764{
1765	return 0;
1766}
1767
1768static int sbp_write_pending(struct se_cmd *se_cmd)
1769{
1770	struct sbp_target_request *req = container_of(se_cmd,
1771			struct sbp_target_request, se_cmd);
1772	int ret;
1773
1774	ret = sbp_rw_data(req);
1775	if (ret) {
1776		req->status.status |= cpu_to_be32(
1777			STATUS_BLOCK_RESP(
1778				STATUS_RESP_TRANSPORT_FAILURE) |
1779			STATUS_BLOCK_DEAD(0) |
1780			STATUS_BLOCK_LEN(1) |
1781			STATUS_BLOCK_SBP_STATUS(
1782				SBP_STATUS_UNSPECIFIED_ERROR));
1783		sbp_send_status(req);
1784		return ret;
1785	}
1786
1787	transport_generic_process_write(se_cmd);
1788
1789	return 0;
1790}
1791
1792static int sbp_write_pending_status(struct se_cmd *se_cmd)
1793{
1794	return 0;
1795}
1796
1797static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1798{
1799	return;
1800}
1801
1802static u32 sbp_get_task_tag(struct se_cmd *se_cmd)
1803{
1804	struct sbp_target_request *req = container_of(se_cmd,
1805			struct sbp_target_request, se_cmd);
1806
1807	/* only used for printk until we do TMRs */
1808	return (u32)req->orb_pointer;
1809}
1810
1811static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1812{
1813	return 0;
1814}
1815
1816static int sbp_queue_data_in(struct se_cmd *se_cmd)
1817{
1818	struct sbp_target_request *req = container_of(se_cmd,
1819			struct sbp_target_request, se_cmd);
1820	int ret;
1821
1822	ret = sbp_rw_data(req);
1823	if (ret) {
1824		req->status.status |= cpu_to_be32(
1825			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1826			STATUS_BLOCK_DEAD(0) |
1827			STATUS_BLOCK_LEN(1) |
1828			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1829		sbp_send_status(req);
1830		return ret;
1831	}
1832
1833	return sbp_send_sense(req);
1834}
1835
1836/*
1837 * Called after command (no data transfer) or after the write (to device)
1838 * operation is completed
1839 */
1840static int sbp_queue_status(struct se_cmd *se_cmd)
1841{
1842	struct sbp_target_request *req = container_of(se_cmd,
1843			struct sbp_target_request, se_cmd);
1844
1845	return sbp_send_sense(req);
1846}
1847
1848static int sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1849{
1850	return 0;
1851}
1852
1853static u16 sbp_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
1854{
1855	return 0;
1856}
1857
1858static u16 sbp_get_fabric_sense_len(void)
1859{
1860	return 0;
1861}
1862
1863static int sbp_check_stop_free(struct se_cmd *se_cmd)
1864{
1865	struct sbp_target_request *req = container_of(se_cmd,
1866			struct sbp_target_request, se_cmd);
1867
1868	transport_generic_free_cmd(&req->se_cmd, 0);
1869	return 1;
1870}
1871
1872/*
1873 * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3)
1874 */
1875static u8 sbp_get_fabric_proto_ident(struct se_portal_group *se_tpg)
1876{
1877	/*
1878	 * Return a IEEE 1394 SCSI Protocol identifier for loopback operations
1879	 * This is defined in section 7.5.1 Table 362 in spc4r17
1880	 */
1881	return SCSI_PROTOCOL_SBP;
1882}
1883
1884static u32 sbp_get_pr_transport_id(
1885	struct se_portal_group *se_tpg,
1886	struct se_node_acl *se_nacl,
1887	struct t10_pr_registration *pr_reg,
1888	int *format_code,
1889	unsigned char *buf)
1890{
1891	int ret;
1892
1893	/*
1894	 * Set PROTOCOL IDENTIFIER to 3h for SBP
1895	 */
1896	buf[0] = SCSI_PROTOCOL_SBP;
1897	/*
1898	 * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
1899	 * over IEEE 1394
1900	 */
1901	ret = hex2bin(&buf[8], se_nacl->initiatorname, 8);
1902	if (ret < 0)
1903		pr_debug("sbp transport_id: invalid hex string\n");
1904
1905	/*
1906	 * The IEEE 1394 Transport ID is a hardcoded 24-byte length
1907	 */
1908	return 24;
1909}
1910
1911static u32 sbp_get_pr_transport_id_len(
1912	struct se_portal_group *se_tpg,
1913	struct se_node_acl *se_nacl,
1914	struct t10_pr_registration *pr_reg,
1915	int *format_code)
1916{
1917	*format_code = 0;
1918	/*
1919	 * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
1920	 * over IEEE 1394
1921	 *
1922	 * The SBP Transport ID is a hardcoded 24-byte length
1923	 */
1924	return 24;
1925}
1926
1927/*
1928 * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
1929 * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
1930 */
1931static char *sbp_parse_pr_out_transport_id(
1932	struct se_portal_group *se_tpg,
1933	const char *buf,
1934	u32 *out_tid_len,
1935	char **port_nexus_ptr)
1936{
1937	/*
1938	 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID
1939	 * for initiator ports using SCSI over SBP Serial SCSI Protocol
1940	 *
1941	 * The TransportID for a IEEE 1394 Initiator Port is of fixed size of
1942	 * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier,
1943	 * so we return the **port_nexus_ptr set to NULL.
1944	 */
1945	*port_nexus_ptr = NULL;
1946	*out_tid_len = 24;
1947
1948	return (char *)&buf[8];
1949}
1950
1951static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1952{
1953	int i, count = 0;
1954
1955	spin_lock(&tpg->tpg_lun_lock);
1956	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
1957		struct se_lun *se_lun = tpg->tpg_lun_list[i];
1958
1959		if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
1960			continue;
1961
1962		count++;
1963	}
1964	spin_unlock(&tpg->tpg_lun_lock);
1965
1966	return count;
1967}
1968
1969static int sbp_update_unit_directory(struct sbp_tport *tport)
1970{
1971	int num_luns, num_entries, idx = 0, mgt_agt_addr, ret, i;
1972	u32 *data;
1973
1974	if (tport->unit_directory.data) {
1975		fw_core_remove_descriptor(&tport->unit_directory);
1976		kfree(tport->unit_directory.data);
1977		tport->unit_directory.data = NULL;
1978	}
1979
1980	if (!tport->enable || !tport->tpg)
1981		return 0;
1982
1983	num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1984
1985	/*
1986	 * Number of entries in the final unit directory:
1987	 *  - all of those in the template
1988	 *  - management_agent
1989	 *  - unit_characteristics
1990	 *  - reconnect_timeout
1991	 *  - unit unique ID
1992	 *  - one for each LUN
1993	 *
1994	 *  MUST NOT include leaf or sub-directory entries
1995	 */
1996	num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1997
1998	if (tport->directory_id != -1)
1999		num_entries++;
2000
2001	/* allocate num_entries + 4 for the header and unique ID leaf */
2002	data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
2003	if (!data)
2004		return -ENOMEM;
2005
2006	/* directory_length */
2007	data[idx++] = num_entries << 16;
2008
2009	/* directory_id */
2010	if (tport->directory_id != -1)
2011		data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
2012
2013	/* unit directory template */
2014	memcpy(&data[idx], sbp_unit_directory_template,
2015			sizeof(sbp_unit_directory_template));
2016	idx += ARRAY_SIZE(sbp_unit_directory_template);
2017
2018	/* management_agent */
2019	mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
2020	data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
2021
2022	/* unit_characteristics */
2023	data[idx++] = 0x3a000000 |
2024		(((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
2025		SBP_ORB_FETCH_SIZE;
2026
2027	/* reconnect_timeout */
2028	data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
2029
2030	/* unit unique ID (leaf is just after LUNs) */
2031	data[idx++] = 0x8d000000 | (num_luns + 1);
2032
2033	spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
2034	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
2035		struct se_lun *se_lun = tport->tpg->se_tpg.tpg_lun_list[i];
2036		struct se_device *dev;
2037		int type;
2038
2039		if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
2040			continue;
2041
2042		spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
2043
2044		dev = se_lun->lun_se_dev;
2045		type = dev->transport->get_device_type(dev);
2046
2047		/* logical_unit_number */
2048		data[idx++] = 0x14000000 |
2049			((type << 16) & 0x1f0000) |
2050			(se_lun->unpacked_lun & 0xffff);
2051
2052		spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
2053	}
2054	spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
2055
2056	/* unit unique ID leaf */
2057	data[idx++] = 2 << 16;
2058	data[idx++] = tport->guid >> 32;
2059	data[idx++] = tport->guid;
2060
2061	tport->unit_directory.length = idx;
2062	tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
2063	tport->unit_directory.data = data;
2064
2065	ret = fw_core_add_descriptor(&tport->unit_directory);
2066	if (ret < 0) {
2067		kfree(tport->unit_directory.data);
2068		tport->unit_directory.data = NULL;
2069	}
2070
2071	return ret;
2072}
2073
2074static ssize_t sbp_parse_wwn(const char *name, u64 *wwn, int strict)
2075{
2076	const char *cp;
2077	char c, nibble;
2078	int pos = 0, err;
2079
2080	*wwn = 0;
2081	for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
2082		c = *cp;
2083		if (c == '\n' && cp[1] == '\0')
2084			continue;
2085		if (c == '\0') {
2086			err = 2;
2087			if (pos != 16)
2088				goto fail;
2089			return cp - name;
2090		}
2091		err = 3;
2092		if (isdigit(c))
2093			nibble = c - '0';
2094		else if (isxdigit(c) && (islower(c) || !strict))
2095			nibble = tolower(c) - 'a' + 10;
2096		else
2097			goto fail;
2098		*wwn = (*wwn << 4) | nibble;
2099		pos++;
2100	}
2101	err = 4;
2102fail:
2103	printk(KERN_INFO "err %u len %zu pos %u\n",
2104			err, cp - name, pos);
2105	return -1;
2106}
2107
2108static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
2109{
2110	return snprintf(buf, len, "%016llx", wwn);
2111}
2112
2113static struct se_node_acl *sbp_make_nodeacl(
2114		struct se_portal_group *se_tpg,
2115		struct config_group *group,
2116		const char *name)
2117{
2118	struct se_node_acl *se_nacl, *se_nacl_new;
2119	struct sbp_nacl *nacl;
2120	u64 guid = 0;
2121	u32 nexus_depth = 1;
2122
2123	if (sbp_parse_wwn(name, &guid, 1) < 0)
2124		return ERR_PTR(-EINVAL);
2125
2126	se_nacl_new = sbp_alloc_fabric_acl(se_tpg);
2127	if (!se_nacl_new)
2128		return ERR_PTR(-ENOMEM);
2129
2130	/*
2131	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
2132	 * when converting a NodeACL from demo mode -> explict
2133	 */
2134	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
2135			name, nexus_depth);
2136	if (IS_ERR(se_nacl)) {
2137		sbp_release_fabric_acl(se_tpg, se_nacl_new);
2138		return se_nacl;
2139	}
2140
2141	nacl = container_of(se_nacl, struct sbp_nacl, se_node_acl);
2142	nacl->guid = guid;
2143	sbp_format_wwn(nacl->iport_name, SBP_NAMELEN, guid);
2144
2145	return se_nacl;
2146}
2147
2148static void sbp_drop_nodeacl(struct se_node_acl *se_acl)
2149{
2150	struct sbp_nacl *nacl =
2151		container_of(se_acl, struct sbp_nacl, se_node_acl);
2152
2153	core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
2154	kfree(nacl);
2155}
2156
2157static int sbp_post_link_lun(
2158		struct se_portal_group *se_tpg,
2159		struct se_lun *se_lun)
2160{
2161	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2162
2163	return sbp_update_unit_directory(tpg->tport);
2164}
2165
2166static void sbp_pre_unlink_lun(
2167		struct se_portal_group *se_tpg,
2168		struct se_lun *se_lun)
2169{
2170	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2171	struct sbp_tport *tport = tpg->tport;
2172	int ret;
2173
2174	if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
2175		tport->enable = 0;
2176
2177	ret = sbp_update_unit_directory(tport);
2178	if (ret < 0)
2179		pr_err("unlink LUN: failed to update unit directory\n");
2180}
2181
2182static struct se_portal_group *sbp_make_tpg(
2183		struct se_wwn *wwn,
2184		struct config_group *group,
2185		const char *name)
2186{
2187	struct sbp_tport *tport =
2188		container_of(wwn, struct sbp_tport, tport_wwn);
2189
2190	struct sbp_tpg *tpg;
2191	unsigned long tpgt;
2192	int ret;
2193
2194	if (strstr(name, "tpgt_") != name)
2195		return ERR_PTR(-EINVAL);
2196	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2197		return ERR_PTR(-EINVAL);
2198
2199	if (tport->tpg) {
2200		pr_err("Only one TPG per Unit is possible.\n");
2201		return ERR_PTR(-EBUSY);
2202	}
2203
2204	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2205	if (!tpg) {
2206		pr_err("Unable to allocate struct sbp_tpg\n");
2207		return ERR_PTR(-ENOMEM);
2208	}
2209
2210	tpg->tport = tport;
2211	tpg->tport_tpgt = tpgt;
2212	tport->tpg = tpg;
2213
2214	/* default attribute values */
2215	tport->enable = 0;
2216	tport->directory_id = -1;
2217	tport->mgt_orb_timeout = 15;
2218	tport->max_reconnect_timeout = 5;
2219	tport->max_logins_per_lun = 1;
2220
2221	tport->mgt_agt = sbp_management_agent_register(tport);
2222	if (IS_ERR(tport->mgt_agt)) {
2223		ret = PTR_ERR(tport->mgt_agt);
2224		kfree(tpg);
2225		return ERR_PTR(ret);
2226	}
2227
2228	ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
2229			&tpg->se_tpg, (void *)tpg,
2230			TRANSPORT_TPG_TYPE_NORMAL);
2231	if (ret < 0) {
2232		sbp_management_agent_unregister(tport->mgt_agt);
2233		kfree(tpg);
2234		return ERR_PTR(ret);
2235	}
2236
2237	return &tpg->se_tpg;
2238}
2239
2240static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2241{
2242	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2243	struct sbp_tport *tport = tpg->tport;
2244
2245	core_tpg_deregister(se_tpg);
2246	sbp_management_agent_unregister(tport->mgt_agt);
2247	tport->tpg = NULL;
2248	kfree(tpg);
2249}
2250
2251static struct se_wwn *sbp_make_tport(
2252		struct target_fabric_configfs *tf,
2253		struct config_group *group,
2254		const char *name)
2255{
2256	struct sbp_tport *tport;
2257	u64 guid = 0;
2258
2259	if (sbp_parse_wwn(name, &guid, 1) < 0)
2260		return ERR_PTR(-EINVAL);
2261
2262	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2263	if (!tport) {
2264		pr_err("Unable to allocate struct sbp_tport\n");
2265		return ERR_PTR(-ENOMEM);
2266	}
2267
2268	tport->guid = guid;
2269	sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2270
2271	return &tport->tport_wwn;
2272}
2273
2274static void sbp_drop_tport(struct se_wwn *wwn)
2275{
2276	struct sbp_tport *tport =
2277		container_of(wwn, struct sbp_tport, tport_wwn);
2278
2279	kfree(tport);
2280}
2281
2282static ssize_t sbp_wwn_show_attr_version(
2283		struct target_fabric_configfs *tf,
2284		char *page)
2285{
2286	return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2287}
2288
2289TF_WWN_ATTR_RO(sbp, version);
2290
2291static struct configfs_attribute *sbp_wwn_attrs[] = {
2292	&sbp_wwn_version.attr,
2293	NULL,
2294};
2295
2296static ssize_t sbp_tpg_show_directory_id(
2297		struct se_portal_group *se_tpg,
2298		char *page)
2299{
2300	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2301	struct sbp_tport *tport = tpg->tport;
2302
2303	if (tport->directory_id == -1)
2304		return sprintf(page, "implicit\n");
2305	else
2306		return sprintf(page, "%06x\n", tport->directory_id);
2307}
2308
2309static ssize_t sbp_tpg_store_directory_id(
2310		struct se_portal_group *se_tpg,
2311		const char *page,
2312		size_t count)
2313{
2314	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2315	struct sbp_tport *tport = tpg->tport;
2316	unsigned long val;
2317
2318	if (tport->enable) {
2319		pr_err("Cannot change the directory_id on an active target.\n");
2320		return -EBUSY;
2321	}
2322
2323	if (strstr(page, "implicit") == page) {
2324		tport->directory_id = -1;
2325	} else {
2326		if (kstrtoul(page, 16, &val) < 0)
2327			return -EINVAL;
2328		if (val > 0xffffff)
2329			return -EINVAL;
2330
2331		tport->directory_id = val;
2332	}
2333
2334	return count;
2335}
2336
2337static ssize_t sbp_tpg_show_enable(
2338		struct se_portal_group *se_tpg,
2339		char *page)
2340{
2341	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2342	struct sbp_tport *tport = tpg->tport;
2343	return sprintf(page, "%d\n", tport->enable);
2344}
2345
2346static ssize_t sbp_tpg_store_enable(
2347		struct se_portal_group *se_tpg,
2348		const char *page,
2349		size_t count)
2350{
2351	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2352	struct sbp_tport *tport = tpg->tport;
2353	unsigned long val;
2354	int ret;
2355
2356	if (kstrtoul(page, 0, &val) < 0)
2357		return -EINVAL;
2358	if ((val != 0) && (val != 1))
2359		return -EINVAL;
2360
2361	if (tport->enable == val)
2362		return count;
2363
2364	if (val) {
2365		if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2366			pr_err("Cannot enable a target with no LUNs!\n");
2367			return -EINVAL;
2368		}
2369	} else {
2370		/* XXX: force-shutdown sessions instead? */
2371		spin_lock_bh(&se_tpg->session_lock);
2372		if (!list_empty(&se_tpg->tpg_sess_list)) {
2373			spin_unlock_bh(&se_tpg->session_lock);
2374			return -EBUSY;
2375		}
2376		spin_unlock_bh(&se_tpg->session_lock);
2377	}
2378
2379	tport->enable = val;
2380
2381	ret = sbp_update_unit_directory(tport);
2382	if (ret < 0) {
2383		pr_err("Could not update Config ROM\n");
2384		return ret;
2385	}
2386
2387	return count;
2388}
2389
2390TF_TPG_BASE_ATTR(sbp, directory_id, S_IRUGO | S_IWUSR);
2391TF_TPG_BASE_ATTR(sbp, enable, S_IRUGO | S_IWUSR);
2392
2393static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2394	&sbp_tpg_directory_id.attr,
2395	&sbp_tpg_enable.attr,
2396	NULL,
2397};
2398
2399static ssize_t sbp_tpg_attrib_show_mgt_orb_timeout(
2400		struct se_portal_group *se_tpg,
2401		char *page)
2402{
2403	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2404	struct sbp_tport *tport = tpg->tport;
2405	return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2406}
2407
2408static ssize_t sbp_tpg_attrib_store_mgt_orb_timeout(
2409		struct se_portal_group *se_tpg,
2410		const char *page,
2411		size_t count)
2412{
2413	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2414	struct sbp_tport *tport = tpg->tport;
2415	unsigned long val;
2416	int ret;
2417
2418	if (kstrtoul(page, 0, &val) < 0)
2419		return -EINVAL;
2420	if ((val < 1) || (val > 127))
2421		return -EINVAL;
2422
2423	if (tport->mgt_orb_timeout == val)
2424		return count;
2425
2426	tport->mgt_orb_timeout = val;
2427
2428	ret = sbp_update_unit_directory(tport);
2429	if (ret < 0)
2430		return ret;
2431
2432	return count;
2433}
2434
2435static ssize_t sbp_tpg_attrib_show_max_reconnect_timeout(
2436		struct se_portal_group *se_tpg,
2437		char *page)
2438{
2439	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2440	struct sbp_tport *tport = tpg->tport;
2441	return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2442}
2443
2444static ssize_t sbp_tpg_attrib_store_max_reconnect_timeout(
2445		struct se_portal_group *se_tpg,
2446		const char *page,
2447		size_t count)
2448{
2449	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2450	struct sbp_tport *tport = tpg->tport;
2451	unsigned long val;
2452	int ret;
2453
2454	if (kstrtoul(page, 0, &val) < 0)
2455		return -EINVAL;
2456	if ((val < 1) || (val > 32767))
2457		return -EINVAL;
2458
2459	if (tport->max_reconnect_timeout == val)
2460		return count;
2461
2462	tport->max_reconnect_timeout = val;
2463
2464	ret = sbp_update_unit_directory(tport);
2465	if (ret < 0)
2466		return ret;
2467
2468	return count;
2469}
2470
2471static ssize_t sbp_tpg_attrib_show_max_logins_per_lun(
2472		struct se_portal_group *se_tpg,
2473		char *page)
2474{
2475	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2476	struct sbp_tport *tport = tpg->tport;
2477	return sprintf(page, "%d\n", tport->max_logins_per_lun);
2478}
2479
2480static ssize_t sbp_tpg_attrib_store_max_logins_per_lun(
2481		struct se_portal_group *se_tpg,
2482		const char *page,
2483		size_t count)
2484{
2485	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2486	struct sbp_tport *tport = tpg->tport;
2487	unsigned long val;
2488
2489	if (kstrtoul(page, 0, &val) < 0)
2490		return -EINVAL;
2491	if ((val < 1) || (val > 127))
2492		return -EINVAL;
2493
2494	/* XXX: also check against current count? */
2495
2496	tport->max_logins_per_lun = val;
2497
2498	return count;
2499}
2500
2501TF_TPG_ATTRIB_ATTR(sbp, mgt_orb_timeout, S_IRUGO | S_IWUSR);
2502TF_TPG_ATTRIB_ATTR(sbp, max_reconnect_timeout, S_IRUGO | S_IWUSR);
2503TF_TPG_ATTRIB_ATTR(sbp, max_logins_per_lun, S_IRUGO | S_IWUSR);
2504
2505static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2506	&sbp_tpg_attrib_mgt_orb_timeout.attr,
2507	&sbp_tpg_attrib_max_reconnect_timeout.attr,
2508	&sbp_tpg_attrib_max_logins_per_lun.attr,
2509	NULL,
2510};
2511
2512static struct target_core_fabric_ops sbp_ops = {
2513	.get_fabric_name		= sbp_get_fabric_name,
2514	.get_fabric_proto_ident		= sbp_get_fabric_proto_ident,
2515	.tpg_get_wwn			= sbp_get_fabric_wwn,
2516	.tpg_get_tag			= sbp_get_tag,
2517	.tpg_get_default_depth		= sbp_get_default_depth,
2518	.tpg_get_pr_transport_id	= sbp_get_pr_transport_id,
2519	.tpg_get_pr_transport_id_len	= sbp_get_pr_transport_id_len,
2520	.tpg_parse_pr_out_transport_id	= sbp_parse_pr_out_transport_id,
2521	.tpg_check_demo_mode		= sbp_check_true,
2522	.tpg_check_demo_mode_cache	= sbp_check_true,
2523	.tpg_check_demo_mode_write_protect = sbp_check_false,
2524	.tpg_check_prod_mode_write_protect = sbp_check_false,
2525	.tpg_alloc_fabric_acl		= sbp_alloc_fabric_acl,
2526	.tpg_release_fabric_acl		= sbp_release_fabric_acl,
2527	.tpg_get_inst_index		= sbp_tpg_get_inst_index,
2528	.release_cmd			= sbp_release_cmd,
2529	.shutdown_session		= sbp_shutdown_session,
2530	.close_session			= sbp_close_session,
2531	.sess_get_index			= sbp_sess_get_index,
2532	.write_pending			= sbp_write_pending,
2533	.write_pending_status		= sbp_write_pending_status,
2534	.set_default_node_attributes	= sbp_set_default_node_attrs,
2535	.get_task_tag			= sbp_get_task_tag,
2536	.get_cmd_state			= sbp_get_cmd_state,
2537	.queue_data_in			= sbp_queue_data_in,
2538	.queue_status			= sbp_queue_status,
2539	.queue_tm_rsp			= sbp_queue_tm_rsp,
2540	.get_fabric_sense_len		= sbp_get_fabric_sense_len,
2541	.set_fabric_sense_len		= sbp_set_fabric_sense_len,
2542	.check_stop_free		= sbp_check_stop_free,
2543
2544	.fabric_make_wwn		= sbp_make_tport,
2545	.fabric_drop_wwn		= sbp_drop_tport,
2546	.fabric_make_tpg		= sbp_make_tpg,
2547	.fabric_drop_tpg		= sbp_drop_tpg,
2548	.fabric_post_link		= sbp_post_link_lun,
2549	.fabric_pre_unlink		= sbp_pre_unlink_lun,
2550	.fabric_make_np			= NULL,
2551	.fabric_drop_np			= NULL,
2552	.fabric_make_nodeacl		= sbp_make_nodeacl,
2553	.fabric_drop_nodeacl		= sbp_drop_nodeacl,
2554};
2555
2556static int sbp_register_configfs(void)
2557{
2558	struct target_fabric_configfs *fabric;
2559	int ret;
2560
2561	fabric = target_fabric_configfs_init(THIS_MODULE, "sbp");
2562	if (!fabric) {
2563		pr_err("target_fabric_configfs_init() failed\n");
2564		return -ENOMEM;
2565	}
2566
2567	fabric->tf_ops = sbp_ops;
2568
2569	/*
2570	 * Setup default attribute lists for various fabric->tf_cit_tmpl
2571	 */
2572	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
2573	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
2574	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
2575	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
2576	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
2577	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2578	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2579	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2580	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2581
2582	ret = target_fabric_configfs_register(fabric);
2583	if (ret < 0) {
2584		pr_err("target_fabric_configfs_register() failed for SBP\n");
2585		return ret;
2586	}
2587
2588	sbp_fabric_configfs = fabric;
2589
2590	return 0;
2591};
2592
2593static void sbp_deregister_configfs(void)
2594{
2595	if (!sbp_fabric_configfs)
2596		return;
2597
2598	target_fabric_configfs_deregister(sbp_fabric_configfs);
2599	sbp_fabric_configfs = NULL;
2600};
2601
2602static int __init sbp_init(void)
2603{
2604	int ret;
2605
2606	ret = sbp_register_configfs();
2607	if (ret < 0)
2608		return ret;
2609
2610	return 0;
2611};
2612
2613static void sbp_exit(void)
2614{
2615	sbp_deregister_configfs();
2616};
2617
2618MODULE_DESCRIPTION("FireWire SBP fabric driver");
2619MODULE_LICENSE("GPL");
2620module_init(sbp_init);
2621module_exit(sbp_exit);