Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SBP2 target driver (SCSI over IEEE1394 in target mode)
   4 *
   5 * Copyright (C) 2011  Chris Boot <bootc@bootc.net>
   6 */
   7
   8#define KMSG_COMPONENT "sbp_target"
   9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  10
  11#include <linux/kernel.h>
  12#include <linux/module.h>
  13#include <linux/init.h>
  14#include <linux/types.h>
  15#include <linux/string.h>
  16#include <linux/configfs.h>
  17#include <linux/ctype.h>
  18#include <linux/delay.h>
  19#include <linux/firewire.h>
  20#include <linux/firewire-constants.h>
  21#include <scsi/scsi_proto.h>
  22#include <scsi/scsi_tcq.h>
  23#include <target/target_core_base.h>
  24#include <target/target_core_backend.h>
  25#include <target/target_core_fabric.h>
  26#include <asm/unaligned.h>
  27
  28#include "sbp_target.h"
  29
  30/* FireWire address region for management and command block address handlers */
  31static const struct fw_address_region sbp_register_region = {
  32	.start	= CSR_REGISTER_BASE + 0x10000,
  33	.end	= 0x1000000000000ULL,
  34};
  35
  36static const u32 sbp_unit_directory_template[] = {
  37	0x1200609e, /* unit_specifier_id: NCITS/T10 */
  38	0x13010483, /* unit_sw_version: 1155D Rev 4 */
  39	0x3800609e, /* command_set_specifier_id: NCITS/T10 */
  40	0x390104d8, /* command_set: SPC-2 */
  41	0x3b000000, /* command_set_revision: 0 */
  42	0x3c000001, /* firmware_revision: 1 */
  43};
  44
  45#define SESSION_MAINTENANCE_INTERVAL HZ
  46
  47static atomic_t login_id = ATOMIC_INIT(0);
  48
  49static void session_maintenance_work(struct work_struct *);
  50static int sbp_run_transaction(struct fw_card *, int, int, int, int,
  51		unsigned long long, void *, size_t);
  52
  53static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
  54{
  55	int ret;
  56	__be32 high, low;
  57
  58	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  59			req->node_addr, req->generation, req->speed,
  60			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
  61			&high, sizeof(high));
  62	if (ret != RCODE_COMPLETE)
  63		return ret;
  64
  65	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  66			req->node_addr, req->generation, req->speed,
  67			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
  68			&low, sizeof(low));
  69	if (ret != RCODE_COMPLETE)
  70		return ret;
  71
  72	*guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
  73
  74	return RCODE_COMPLETE;
  75}
  76
  77static struct sbp_session *sbp_session_find_by_guid(
  78	struct sbp_tpg *tpg, u64 guid)
  79{
  80	struct se_session *se_sess;
  81	struct sbp_session *sess, *found = NULL;
  82
  83	spin_lock_bh(&tpg->se_tpg.session_lock);
  84	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
  85		sess = se_sess->fabric_sess_ptr;
  86		if (sess->guid == guid)
  87			found = sess;
  88	}
  89	spin_unlock_bh(&tpg->se_tpg.session_lock);
  90
  91	return found;
  92}
  93
  94static struct sbp_login_descriptor *sbp_login_find_by_lun(
  95		struct sbp_session *session, u32 unpacked_lun)
  96{
  97	struct sbp_login_descriptor *login, *found = NULL;
  98
  99	spin_lock_bh(&session->lock);
 100	list_for_each_entry(login, &session->login_list, link) {
 101		if (login->login_lun == unpacked_lun)
 102			found = login;
 103	}
 104	spin_unlock_bh(&session->lock);
 105
 106	return found;
 107}
 108
 109static int sbp_login_count_all_by_lun(
 110		struct sbp_tpg *tpg,
 111		u32 unpacked_lun,
 112		int exclusive)
 113{
 114	struct se_session *se_sess;
 115	struct sbp_session *sess;
 116	struct sbp_login_descriptor *login;
 117	int count = 0;
 118
 119	spin_lock_bh(&tpg->se_tpg.session_lock);
 120	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 121		sess = se_sess->fabric_sess_ptr;
 122
 123		spin_lock_bh(&sess->lock);
 124		list_for_each_entry(login, &sess->login_list, link) {
 125			if (login->login_lun != unpacked_lun)
 126				continue;
 127
 128			if (!exclusive || login->exclusive)
 129				count++;
 130		}
 131		spin_unlock_bh(&sess->lock);
 132	}
 133	spin_unlock_bh(&tpg->se_tpg.session_lock);
 134
 135	return count;
 136}
 137
 138static struct sbp_login_descriptor *sbp_login_find_by_id(
 139	struct sbp_tpg *tpg, int login_id)
 140{
 141	struct se_session *se_sess;
 142	struct sbp_session *sess;
 143	struct sbp_login_descriptor *login, *found = NULL;
 144
 145	spin_lock_bh(&tpg->se_tpg.session_lock);
 146	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 147		sess = se_sess->fabric_sess_ptr;
 148
 149		spin_lock_bh(&sess->lock);
 150		list_for_each_entry(login, &sess->login_list, link) {
 151			if (login->login_id == login_id)
 152				found = login;
 153		}
 154		spin_unlock_bh(&sess->lock);
 155	}
 156	spin_unlock_bh(&tpg->se_tpg.session_lock);
 157
 158	return found;
 159}
 160
 161static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
 162{
 163	struct se_portal_group *se_tpg = &tpg->se_tpg;
 164	struct se_lun *se_lun;
 165
 166	rcu_read_lock();
 167	hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
 168		if (se_lun->unpacked_lun == login_lun) {
 169			rcu_read_unlock();
 170			*err = 0;
 171			return login_lun;
 172		}
 173	}
 174	rcu_read_unlock();
 175
 176	*err = -ENODEV;
 177	return login_lun;
 178}
 179
 180static struct sbp_session *sbp_session_create(
 181		struct sbp_tpg *tpg,
 182		u64 guid)
 183{
 184	struct sbp_session *sess;
 185	int ret;
 186	char guid_str[17];
 187
 188	snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
 189
 190	sess = kmalloc(sizeof(*sess), GFP_KERNEL);
 191	if (!sess)
 192		return ERR_PTR(-ENOMEM);
 193
 194	spin_lock_init(&sess->lock);
 195	INIT_LIST_HEAD(&sess->login_list);
 196	INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
 197	sess->guid = guid;
 198
 199	sess->se_sess = target_setup_session(&tpg->se_tpg, 128,
 200					     sizeof(struct sbp_target_request),
 201					     TARGET_PROT_NORMAL, guid_str,
 202					     sess, NULL);
 203	if (IS_ERR(sess->se_sess)) {
 204		pr_err("failed to init se_session\n");
 205		ret = PTR_ERR(sess->se_sess);
 206		kfree(sess);
 207		return ERR_PTR(ret);
 208	}
 209
 210	return sess;
 211}
 212
 213static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
 214{
 215	spin_lock_bh(&sess->lock);
 216	if (!list_empty(&sess->login_list)) {
 217		spin_unlock_bh(&sess->lock);
 218		return;
 219	}
 220	spin_unlock_bh(&sess->lock);
 221
 222	if (cancel_work)
 223		cancel_delayed_work_sync(&sess->maint_work);
 224
 225	target_remove_session(sess->se_sess);
 226
 227	if (sess->card)
 228		fw_card_put(sess->card);
 229
 230	kfree(sess);
 231}
 232
 233static void sbp_target_agent_unregister(struct sbp_target_agent *);
 234
 235static void sbp_login_release(struct sbp_login_descriptor *login,
 236	bool cancel_work)
 237{
 238	struct sbp_session *sess = login->sess;
 239
 240	/* FIXME: abort/wait on tasks */
 241
 242	sbp_target_agent_unregister(login->tgt_agt);
 243
 244	if (sess) {
 245		spin_lock_bh(&sess->lock);
 246		list_del(&login->link);
 247		spin_unlock_bh(&sess->lock);
 248
 249		sbp_session_release(sess, cancel_work);
 250	}
 251
 252	kfree(login);
 253}
 254
 255static struct sbp_target_agent *sbp_target_agent_register(
 256	struct sbp_login_descriptor *);
 257
 258static void sbp_management_request_login(
 259	struct sbp_management_agent *agent, struct sbp_management_request *req,
 260	int *status_data_size)
 261{
 262	struct sbp_tport *tport = agent->tport;
 263	struct sbp_tpg *tpg = tport->tpg;
 264	struct sbp_session *sess;
 265	struct sbp_login_descriptor *login;
 266	struct sbp_login_response_block *response;
 267	u64 guid;
 268	u32 unpacked_lun;
 269	int login_response_len, ret;
 270
 271	unpacked_lun = sbp_get_lun_from_tpg(tpg,
 272			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
 273	if (ret) {
 274		pr_notice("login to unknown LUN: %d\n",
 275			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
 276
 277		req->status.status = cpu_to_be32(
 278			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 279			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
 280		return;
 281	}
 282
 283	ret = read_peer_guid(&guid, req);
 284	if (ret != RCODE_COMPLETE) {
 285		pr_warn("failed to read peer GUID: %d\n", ret);
 286
 287		req->status.status = cpu_to_be32(
 288			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 289			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 290		return;
 291	}
 292
 293	pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
 294		unpacked_lun, guid);
 295
 296	sess = sbp_session_find_by_guid(tpg, guid);
 297	if (sess) {
 298		login = sbp_login_find_by_lun(sess, unpacked_lun);
 299		if (login) {
 300			pr_notice("initiator already logged-in\n");
 301
 302			/*
 303			 * SBP-2 R4 says we should return access denied, but
 304			 * that can confuse initiators. Instead we need to
 305			 * treat this like a reconnect, but send the login
 306			 * response block like a fresh login.
 307			 *
 308			 * This is required particularly in the case of Apple
 309			 * devices booting off the FireWire target, where
 310			 * the firmware has an active login to the target. When
 311			 * the OS takes control of the session it issues its own
 312			 * LOGIN rather than a RECONNECT. To avoid the machine
 313			 * waiting until the reconnect_hold expires, we can skip
 314			 * the ACCESS_DENIED errors to speed things up.
 315			 */
 316
 317			goto already_logged_in;
 318		}
 319	}
 320
 321	/*
 322	 * check exclusive bit in login request
 323	 * reject with access_denied if any logins present
 324	 */
 325	if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
 326			sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
 327		pr_warn("refusing exclusive login with other active logins\n");
 328
 329		req->status.status = cpu_to_be32(
 330			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 331			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 332		return;
 333	}
 334
 335	/*
 336	 * check exclusive bit in any existing login descriptor
 337	 * reject with access_denied if any exclusive logins present
 338	 */
 339	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
 340		pr_warn("refusing login while another exclusive login present\n");
 341
 342		req->status.status = cpu_to_be32(
 343			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 344			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 345		return;
 346	}
 347
 348	/*
 349	 * check we haven't exceeded the number of allowed logins
 350	 * reject with resources_unavailable if we have
 351	 */
 352	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
 353			tport->max_logins_per_lun) {
 354		pr_warn("max number of logins reached\n");
 355
 356		req->status.status = cpu_to_be32(
 357			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 358			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 359		return;
 360	}
 361
 362	if (!sess) {
 363		sess = sbp_session_create(tpg, guid);
 364		if (IS_ERR(sess)) {
 365			switch (PTR_ERR(sess)) {
 366			case -EPERM:
 367				ret = SBP_STATUS_ACCESS_DENIED;
 368				break;
 369			default:
 370				ret = SBP_STATUS_RESOURCES_UNAVAIL;
 371				break;
 372			}
 373
 374			req->status.status = cpu_to_be32(
 375				STATUS_BLOCK_RESP(
 376					STATUS_RESP_REQUEST_COMPLETE) |
 377				STATUS_BLOCK_SBP_STATUS(ret));
 378			return;
 379		}
 380
 381		sess->node_id = req->node_addr;
 382		sess->card = fw_card_get(req->card);
 383		sess->generation = req->generation;
 384		sess->speed = req->speed;
 385
 386		schedule_delayed_work(&sess->maint_work,
 387				SESSION_MAINTENANCE_INTERVAL);
 388	}
 389
 390	/* only take the latest reconnect_hold into account */
 391	sess->reconnect_hold = min(
 392		1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
 393		tport->max_reconnect_timeout) - 1;
 394
 395	login = kmalloc(sizeof(*login), GFP_KERNEL);
 396	if (!login) {
 397		pr_err("failed to allocate login descriptor\n");
 398
 399		sbp_session_release(sess, true);
 400
 401		req->status.status = cpu_to_be32(
 402			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 403			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 404		return;
 405	}
 406
 407	login->sess = sess;
 408	login->login_lun = unpacked_lun;
 409	login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
 410	login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
 411	login->login_id = atomic_inc_return(&login_id);
 412
 413	login->tgt_agt = sbp_target_agent_register(login);
 414	if (IS_ERR(login->tgt_agt)) {
 415		ret = PTR_ERR(login->tgt_agt);
 416		pr_err("failed to map command block handler: %d\n", ret);
 417
 418		sbp_session_release(sess, true);
 419		kfree(login);
 420
 421		req->status.status = cpu_to_be32(
 422			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 423			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 424		return;
 425	}
 426
 427	spin_lock_bh(&sess->lock);
 428	list_add_tail(&login->link, &sess->login_list);
 429	spin_unlock_bh(&sess->lock);
 430
 431already_logged_in:
 432	response = kzalloc(sizeof(*response), GFP_KERNEL);
 433	if (!response) {
 434		pr_err("failed to allocate login response block\n");
 435
 436		sbp_login_release(login, true);
 437
 438		req->status.status = cpu_to_be32(
 439			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 440			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 441		return;
 442	}
 443
 444	login_response_len = clamp_val(
 445			LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
 446			12, sizeof(*response));
 447	response->misc = cpu_to_be32(
 448		((login_response_len & 0xffff) << 16) |
 449		(login->login_id & 0xffff));
 450	response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
 451	addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
 452		&response->command_block_agent);
 453
 454	ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
 455		sess->node_id, sess->generation, sess->speed,
 456		sbp2_pointer_to_addr(&req->orb.ptr2), response,
 457		login_response_len);
 458	if (ret != RCODE_COMPLETE) {
 459		pr_debug("failed to write login response block: %x\n", ret);
 460
 461		kfree(response);
 462		sbp_login_release(login, true);
 463
 464		req->status.status = cpu_to_be32(
 465			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 466			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 467		return;
 468	}
 469
 470	kfree(response);
 471
 472	req->status.status = cpu_to_be32(
 473		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 474		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 475}
 476
 477static void sbp_management_request_query_logins(
 478	struct sbp_management_agent *agent, struct sbp_management_request *req,
 479	int *status_data_size)
 480{
 481	pr_notice("QUERY LOGINS not implemented\n");
 482	/* FIXME: implement */
 483
 484	req->status.status = cpu_to_be32(
 485		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 486		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
 487}
 488
 489static void sbp_management_request_reconnect(
 490	struct sbp_management_agent *agent, struct sbp_management_request *req,
 491	int *status_data_size)
 492{
 493	struct sbp_tport *tport = agent->tport;
 494	struct sbp_tpg *tpg = tport->tpg;
 495	int ret;
 496	u64 guid;
 497	struct sbp_login_descriptor *login;
 498
 499	ret = read_peer_guid(&guid, req);
 500	if (ret != RCODE_COMPLETE) {
 501		pr_warn("failed to read peer GUID: %d\n", ret);
 502
 503		req->status.status = cpu_to_be32(
 504			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 505			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 506		return;
 507	}
 508
 509	pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
 510
 511	login = sbp_login_find_by_id(tpg,
 512		RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
 513
 514	if (!login) {
 515		pr_err("mgt_agent RECONNECT unknown login ID\n");
 516
 517		req->status.status = cpu_to_be32(
 518			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 519			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 520		return;
 521	}
 522
 523	if (login->sess->guid != guid) {
 524		pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
 525
 526		req->status.status = cpu_to_be32(
 527			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 528			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 529		return;
 530	}
 531
 532	spin_lock_bh(&login->sess->lock);
 533	if (login->sess->card)
 534		fw_card_put(login->sess->card);
 535
 536	/* update the node details */
 537	login->sess->generation = req->generation;
 538	login->sess->node_id = req->node_addr;
 539	login->sess->card = fw_card_get(req->card);
 540	login->sess->speed = req->speed;
 541	spin_unlock_bh(&login->sess->lock);
 542
 543	req->status.status = cpu_to_be32(
 544		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 545		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 546}
 547
 548static void sbp_management_request_logout(
 549	struct sbp_management_agent *agent, struct sbp_management_request *req,
 550	int *status_data_size)
 551{
 552	struct sbp_tport *tport = agent->tport;
 553	struct sbp_tpg *tpg = tport->tpg;
 554	int id;
 555	struct sbp_login_descriptor *login;
 556
 557	id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
 558
 559	login = sbp_login_find_by_id(tpg, id);
 560	if (!login) {
 561		pr_warn("cannot find login: %d\n", id);
 562
 563		req->status.status = cpu_to_be32(
 564			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 565			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
 566		return;
 567	}
 568
 569	pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
 570		login->login_lun, login->login_id);
 571
 572	if (req->node_addr != login->sess->node_id) {
 573		pr_warn("logout from different node ID\n");
 574
 575		req->status.status = cpu_to_be32(
 576			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 577			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 578		return;
 579	}
 580
 581	sbp_login_release(login, true);
 582
 583	req->status.status = cpu_to_be32(
 584		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 585		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 586}
 587
 588static void session_check_for_reset(struct sbp_session *sess)
 589{
 590	bool card_valid = false;
 591
 592	spin_lock_bh(&sess->lock);
 593
 594	if (sess->card) {
 595		spin_lock_irq(&sess->card->lock);
 596		card_valid = (sess->card->local_node != NULL);
 597		spin_unlock_irq(&sess->card->lock);
 598
 599		if (!card_valid) {
 600			fw_card_put(sess->card);
 601			sess->card = NULL;
 602		}
 603	}
 604
 605	if (!card_valid || (sess->generation != sess->card->generation)) {
 606		pr_info("Waiting for reconnect from node: %016llx\n",
 607				sess->guid);
 608
 609		sess->node_id = -1;
 610		sess->reconnect_expires = get_jiffies_64() +
 611			((sess->reconnect_hold + 1) * HZ);
 612	}
 613
 614	spin_unlock_bh(&sess->lock);
 615}
 616
 617static void session_reconnect_expired(struct sbp_session *sess)
 618{
 619	struct sbp_login_descriptor *login, *temp;
 620	LIST_HEAD(login_list);
 621
 622	pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
 623
 624	spin_lock_bh(&sess->lock);
 625	list_for_each_entry_safe(login, temp, &sess->login_list, link) {
 626		login->sess = NULL;
 627		list_move_tail(&login->link, &login_list);
 628	}
 629	spin_unlock_bh(&sess->lock);
 630
 631	list_for_each_entry_safe(login, temp, &login_list, link) {
 632		list_del(&login->link);
 633		sbp_login_release(login, false);
 634	}
 635
 636	sbp_session_release(sess, false);
 637}
 638
 639static void session_maintenance_work(struct work_struct *work)
 640{
 641	struct sbp_session *sess = container_of(work, struct sbp_session,
 642			maint_work.work);
 643
 644	/* could be called while tearing down the session */
 645	spin_lock_bh(&sess->lock);
 646	if (list_empty(&sess->login_list)) {
 647		spin_unlock_bh(&sess->lock);
 648		return;
 649	}
 650	spin_unlock_bh(&sess->lock);
 651
 652	if (sess->node_id != -1) {
 653		/* check for bus reset and make node_id invalid */
 654		session_check_for_reset(sess);
 655
 656		schedule_delayed_work(&sess->maint_work,
 657				SESSION_MAINTENANCE_INTERVAL);
 658	} else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
 659		/* still waiting for reconnect */
 660		schedule_delayed_work(&sess->maint_work,
 661				SESSION_MAINTENANCE_INTERVAL);
 662	} else {
 663		/* reconnect timeout has expired */
 664		session_reconnect_expired(sess);
 665	}
 666}
 667
 668static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
 669		struct sbp_target_agent *agent)
 670{
 671	int state;
 672
 673	switch (tcode) {
 674	case TCODE_READ_QUADLET_REQUEST:
 675		pr_debug("tgt_agent AGENT_STATE READ\n");
 676
 677		spin_lock_bh(&agent->lock);
 678		state = agent->state;
 679		spin_unlock_bh(&agent->lock);
 680
 681		*(__be32 *)data = cpu_to_be32(state);
 682
 683		return RCODE_COMPLETE;
 684
 685	case TCODE_WRITE_QUADLET_REQUEST:
 686		/* ignored */
 687		return RCODE_COMPLETE;
 688
 689	default:
 690		return RCODE_TYPE_ERROR;
 691	}
 692}
 693
 694static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
 695		struct sbp_target_agent *agent)
 696{
 697	switch (tcode) {
 698	case TCODE_WRITE_QUADLET_REQUEST:
 699		pr_debug("tgt_agent AGENT_RESET\n");
 700		spin_lock_bh(&agent->lock);
 701		agent->state = AGENT_STATE_RESET;
 702		spin_unlock_bh(&agent->lock);
 703		return RCODE_COMPLETE;
 704
 705	default:
 706		return RCODE_TYPE_ERROR;
 707	}
 708}
 709
 710static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
 711		struct sbp_target_agent *agent)
 712{
 713	struct sbp2_pointer *ptr = data;
 714
 715	switch (tcode) {
 716	case TCODE_WRITE_BLOCK_REQUEST:
 717		spin_lock_bh(&agent->lock);
 718		if (agent->state != AGENT_STATE_SUSPENDED &&
 719				agent->state != AGENT_STATE_RESET) {
 720			spin_unlock_bh(&agent->lock);
 721			pr_notice("Ignoring ORB_POINTER write while active.\n");
 722			return RCODE_CONFLICT_ERROR;
 723		}
 724		agent->state = AGENT_STATE_ACTIVE;
 725		spin_unlock_bh(&agent->lock);
 726
 727		agent->orb_pointer = sbp2_pointer_to_addr(ptr);
 728		agent->doorbell = false;
 729
 730		pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
 731				agent->orb_pointer);
 732
 733		queue_work(system_unbound_wq, &agent->work);
 734
 735		return RCODE_COMPLETE;
 736
 737	case TCODE_READ_BLOCK_REQUEST:
 738		pr_debug("tgt_agent ORB_POINTER READ\n");
 739		spin_lock_bh(&agent->lock);
 740		addr_to_sbp2_pointer(agent->orb_pointer, ptr);
 741		spin_unlock_bh(&agent->lock);
 742		return RCODE_COMPLETE;
 743
 744	default:
 745		return RCODE_TYPE_ERROR;
 746	}
 747}
 748
 749static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
 750		struct sbp_target_agent *agent)
 751{
 752	switch (tcode) {
 753	case TCODE_WRITE_QUADLET_REQUEST:
 754		spin_lock_bh(&agent->lock);
 755		if (agent->state != AGENT_STATE_SUSPENDED) {
 756			spin_unlock_bh(&agent->lock);
 757			pr_debug("Ignoring DOORBELL while active.\n");
 758			return RCODE_CONFLICT_ERROR;
 759		}
 760		agent->state = AGENT_STATE_ACTIVE;
 761		spin_unlock_bh(&agent->lock);
 762
 763		agent->doorbell = true;
 764
 765		pr_debug("tgt_agent DOORBELL\n");
 766
 767		queue_work(system_unbound_wq, &agent->work);
 768
 769		return RCODE_COMPLETE;
 770
 771	case TCODE_READ_QUADLET_REQUEST:
 772		return RCODE_COMPLETE;
 773
 774	default:
 775		return RCODE_TYPE_ERROR;
 776	}
 777}
 778
 779static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
 780		int tcode, void *data, struct sbp_target_agent *agent)
 781{
 782	switch (tcode) {
 783	case TCODE_WRITE_QUADLET_REQUEST:
 784		pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
 785		/* ignored as we don't send unsolicited status */
 786		return RCODE_COMPLETE;
 787
 788	case TCODE_READ_QUADLET_REQUEST:
 789		return RCODE_COMPLETE;
 790
 791	default:
 792		return RCODE_TYPE_ERROR;
 793	}
 794}
 795
 796static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
 797		int tcode, int destination, int source, int generation,
 798		unsigned long long offset, void *data, size_t length,
 799		void *callback_data)
 800{
 801	struct sbp_target_agent *agent = callback_data;
 802	struct sbp_session *sess = agent->login->sess;
 803	int sess_gen, sess_node, rcode;
 804
 805	spin_lock_bh(&sess->lock);
 806	sess_gen = sess->generation;
 807	sess_node = sess->node_id;
 808	spin_unlock_bh(&sess->lock);
 809
 810	if (generation != sess_gen) {
 811		pr_notice("ignoring request with wrong generation\n");
 812		rcode = RCODE_TYPE_ERROR;
 813		goto out;
 814	}
 815
 816	if (source != sess_node) {
 817		pr_notice("ignoring request from foreign node (%x != %x)\n",
 818				source, sess_node);
 819		rcode = RCODE_TYPE_ERROR;
 820		goto out;
 821	}
 822
 823	/* turn offset into the offset from the start of the block */
 824	offset -= agent->handler.offset;
 825
 826	if (offset == 0x00 && length == 4) {
 827		/* AGENT_STATE */
 828		rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
 829	} else if (offset == 0x04 && length == 4) {
 830		/* AGENT_RESET */
 831		rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
 832	} else if (offset == 0x08 && length == 8) {
 833		/* ORB_POINTER */
 834		rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
 835	} else if (offset == 0x10 && length == 4) {
 836		/* DOORBELL */
 837		rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
 838	} else if (offset == 0x14 && length == 4) {
 839		/* UNSOLICITED_STATUS_ENABLE */
 840		rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
 841				data, agent);
 842	} else {
 843		rcode = RCODE_ADDRESS_ERROR;
 844	}
 845
 846out:
 847	fw_send_response(card, request, rcode);
 848}
 849
 850static void sbp_handle_command(struct sbp_target_request *);
 851static int sbp_send_status(struct sbp_target_request *);
 852static void sbp_free_request(struct sbp_target_request *);
 853
 854static void tgt_agent_process_work(struct work_struct *work)
 855{
 856	struct sbp_target_request *req =
 857		container_of(work, struct sbp_target_request, work);
 858
 859	pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
 860			req->orb_pointer,
 861			sbp2_pointer_to_addr(&req->orb.next_orb),
 862			sbp2_pointer_to_addr(&req->orb.data_descriptor),
 863			be32_to_cpu(req->orb.misc));
 864
 865	if (req->orb_pointer >> 32)
 866		pr_debug("ORB with high bits set\n");
 867
 868	switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
 869		case 0:/* Format specified by this standard */
 870			sbp_handle_command(req);
 871			return;
 872		case 1: /* Reserved for future standardization */
 873		case 2: /* Vendor-dependent */
 874			req->status.status |= cpu_to_be32(
 875					STATUS_BLOCK_RESP(
 876						STATUS_RESP_REQUEST_COMPLETE) |
 877					STATUS_BLOCK_DEAD(0) |
 878					STATUS_BLOCK_LEN(1) |
 879					STATUS_BLOCK_SBP_STATUS(
 880						SBP_STATUS_REQ_TYPE_NOTSUPP));
 881			sbp_send_status(req);
 882			return;
 883		case 3: /* Dummy ORB */
 884			req->status.status |= cpu_to_be32(
 885					STATUS_BLOCK_RESP(
 886						STATUS_RESP_REQUEST_COMPLETE) |
 887					STATUS_BLOCK_DEAD(0) |
 888					STATUS_BLOCK_LEN(1) |
 889					STATUS_BLOCK_SBP_STATUS(
 890						SBP_STATUS_DUMMY_ORB_COMPLETE));
 891			sbp_send_status(req);
 892			return;
 893		default:
 894			BUG();
 895	}
 896}
 897
 898/* used to double-check we haven't been issued an AGENT_RESET */
 899static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
 900{
 901	bool active;
 902
 903	spin_lock_bh(&agent->lock);
 904	active = (agent->state == AGENT_STATE_ACTIVE);
 905	spin_unlock_bh(&agent->lock);
 906
 907	return active;
 908}
 909
 910static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
 911	struct fw_card *card, u64 next_orb)
 912{
 913	struct se_session *se_sess = sess->se_sess;
 914	struct sbp_target_request *req;
 915	int tag, cpu;
 916
 917	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
 918	if (tag < 0)
 919		return ERR_PTR(-ENOMEM);
 920
 921	req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
 922	memset(req, 0, sizeof(*req));
 923	req->se_cmd.map_tag = tag;
 924	req->se_cmd.map_cpu = cpu;
 925	req->se_cmd.tag = next_orb;
 926
 927	return req;
 928}
 929
 930static void tgt_agent_fetch_work(struct work_struct *work)
 931{
 932	struct sbp_target_agent *agent =
 933		container_of(work, struct sbp_target_agent, work);
 934	struct sbp_session *sess = agent->login->sess;
 935	struct sbp_target_request *req;
 936	int ret;
 937	bool doorbell = agent->doorbell;
 938	u64 next_orb = agent->orb_pointer;
 939
 940	while (next_orb && tgt_agent_check_active(agent)) {
 941		req = sbp_mgt_get_req(sess, sess->card, next_orb);
 942		if (IS_ERR(req)) {
 943			spin_lock_bh(&agent->lock);
 944			agent->state = AGENT_STATE_DEAD;
 945			spin_unlock_bh(&agent->lock);
 946			return;
 947		}
 948
 949		req->login = agent->login;
 950		req->orb_pointer = next_orb;
 951
 952		req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
 953					req->orb_pointer >> 32));
 954		req->status.orb_low = cpu_to_be32(
 955				req->orb_pointer & 0xfffffffc);
 956
 957		/* read in the ORB */
 958		ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
 959				sess->node_id, sess->generation, sess->speed,
 960				req->orb_pointer, &req->orb, sizeof(req->orb));
 961		if (ret != RCODE_COMPLETE) {
 962			pr_debug("tgt_orb fetch failed: %x\n", ret);
 963			req->status.status |= cpu_to_be32(
 964					STATUS_BLOCK_SRC(
 965						STATUS_SRC_ORB_FINISHED) |
 966					STATUS_BLOCK_RESP(
 967						STATUS_RESP_TRANSPORT_FAILURE) |
 968					STATUS_BLOCK_DEAD(1) |
 969					STATUS_BLOCK_LEN(1) |
 970					STATUS_BLOCK_SBP_STATUS(
 971						SBP_STATUS_UNSPECIFIED_ERROR));
 972			spin_lock_bh(&agent->lock);
 973			agent->state = AGENT_STATE_DEAD;
 974			spin_unlock_bh(&agent->lock);
 975
 976			sbp_send_status(req);
 977			return;
 978		}
 979
 980		/* check the next_ORB field */
 981		if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
 982			next_orb = 0;
 983			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
 984						STATUS_SRC_ORB_FINISHED));
 985		} else {
 986			next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
 987			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
 988						STATUS_SRC_ORB_CONTINUING));
 989		}
 990
 991		if (tgt_agent_check_active(agent) && !doorbell) {
 992			INIT_WORK(&req->work, tgt_agent_process_work);
 993			queue_work(system_unbound_wq, &req->work);
 994		} else {
 995			/* don't process this request, just check next_ORB */
 996			sbp_free_request(req);
 997		}
 998
 999		spin_lock_bh(&agent->lock);
1000		doorbell = agent->doorbell = false;
1001
1002		/* check if we should carry on processing */
1003		if (next_orb)
1004			agent->orb_pointer = next_orb;
1005		else
1006			agent->state = AGENT_STATE_SUSPENDED;
1007
1008		spin_unlock_bh(&agent->lock);
1009	}
1010}
1011
1012static struct sbp_target_agent *sbp_target_agent_register(
1013		struct sbp_login_descriptor *login)
1014{
1015	struct sbp_target_agent *agent;
1016	int ret;
1017
1018	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1019	if (!agent)
1020		return ERR_PTR(-ENOMEM);
1021
1022	spin_lock_init(&agent->lock);
1023
1024	agent->handler.length = 0x20;
1025	agent->handler.address_callback = tgt_agent_rw;
1026	agent->handler.callback_data = agent;
1027
1028	agent->login = login;
1029	agent->state = AGENT_STATE_RESET;
1030	INIT_WORK(&agent->work, tgt_agent_fetch_work);
1031	agent->orb_pointer = 0;
1032	agent->doorbell = false;
1033
1034	ret = fw_core_add_address_handler(&agent->handler,
1035			&sbp_register_region);
1036	if (ret < 0) {
1037		kfree(agent);
1038		return ERR_PTR(ret);
1039	}
1040
1041	return agent;
1042}
1043
1044static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1045{
1046	fw_core_remove_address_handler(&agent->handler);
1047	cancel_work_sync(&agent->work);
1048	kfree(agent);
1049}
1050
1051/*
1052 * Simple wrapper around fw_run_transaction that retries the transaction several
1053 * times in case of failure, with an exponential backoff.
1054 */
1055static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1056		int generation, int speed, unsigned long long offset,
1057		void *payload, size_t length)
1058{
1059	int attempt, ret, delay;
1060
1061	for (attempt = 1; attempt <= 5; attempt++) {
1062		ret = fw_run_transaction(card, tcode, destination_id,
1063				generation, speed, offset, payload, length);
1064
1065		switch (ret) {
1066		case RCODE_COMPLETE:
1067		case RCODE_TYPE_ERROR:
1068		case RCODE_ADDRESS_ERROR:
1069		case RCODE_GENERATION:
1070			return ret;
1071
1072		default:
1073			delay = 5 * attempt * attempt;
1074			usleep_range(delay, delay * 2);
1075		}
1076	}
1077
1078	return ret;
1079}
1080
1081/*
1082 * Wrapper around sbp_run_transaction that gets the card, destination,
1083 * generation and speed out of the request's session.
1084 */
1085static int sbp_run_request_transaction(struct sbp_target_request *req,
1086		int tcode, unsigned long long offset, void *payload,
1087		size_t length)
1088{
1089	struct sbp_login_descriptor *login = req->login;
1090	struct sbp_session *sess = login->sess;
1091	struct fw_card *card;
1092	int node_id, generation, speed, ret;
1093
1094	spin_lock_bh(&sess->lock);
1095	card = fw_card_get(sess->card);
1096	node_id = sess->node_id;
1097	generation = sess->generation;
1098	speed = sess->speed;
1099	spin_unlock_bh(&sess->lock);
1100
1101	ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1102			offset, payload, length);
1103
1104	fw_card_put(card);
1105
1106	return ret;
1107}
1108
1109static int sbp_fetch_command(struct sbp_target_request *req)
1110{
1111	int ret, cmd_len, copy_len;
1112
1113	cmd_len = scsi_command_size(req->orb.command_block);
1114
1115	req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1116	if (!req->cmd_buf)
1117		return -ENOMEM;
1118
1119	memcpy(req->cmd_buf, req->orb.command_block,
1120		min_t(int, cmd_len, sizeof(req->orb.command_block)));
1121
1122	if (cmd_len > sizeof(req->orb.command_block)) {
1123		pr_debug("sbp_fetch_command: filling in long command\n");
1124		copy_len = cmd_len - sizeof(req->orb.command_block);
1125
1126		ret = sbp_run_request_transaction(req,
1127				TCODE_READ_BLOCK_REQUEST,
1128				req->orb_pointer + sizeof(req->orb),
1129				req->cmd_buf + sizeof(req->orb.command_block),
1130				copy_len);
1131		if (ret != RCODE_COMPLETE)
1132			return -EIO;
1133	}
1134
1135	return 0;
1136}
1137
1138static int sbp_fetch_page_table(struct sbp_target_request *req)
1139{
1140	int pg_tbl_sz, ret;
1141	struct sbp_page_table_entry *pg_tbl;
1142
1143	if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1144		return 0;
1145
1146	pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1147		sizeof(struct sbp_page_table_entry);
1148
1149	pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1150	if (!pg_tbl)
1151		return -ENOMEM;
1152
1153	ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1154			sbp2_pointer_to_addr(&req->orb.data_descriptor),
1155			pg_tbl, pg_tbl_sz);
1156	if (ret != RCODE_COMPLETE) {
1157		kfree(pg_tbl);
1158		return -EIO;
1159	}
1160
1161	req->pg_tbl = pg_tbl;
1162	return 0;
1163}
1164
1165static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1166	u32 *data_len, enum dma_data_direction *data_dir)
1167{
1168	int data_size, direction, idx;
1169
1170	data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1171	direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1172
1173	if (!data_size) {
1174		*data_len = 0;
1175		*data_dir = DMA_NONE;
1176		return;
1177	}
1178
1179	*data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1180
1181	if (req->pg_tbl) {
1182		*data_len = 0;
1183		for (idx = 0; idx < data_size; idx++) {
1184			*data_len += be16_to_cpu(
1185					req->pg_tbl[idx].segment_length);
1186		}
1187	} else {
1188		*data_len = data_size;
1189	}
1190}
1191
1192static void sbp_handle_command(struct sbp_target_request *req)
1193{
1194	struct sbp_login_descriptor *login = req->login;
1195	struct sbp_session *sess = login->sess;
1196	int ret, unpacked_lun;
1197	u32 data_length;
1198	enum dma_data_direction data_dir;
1199
1200	ret = sbp_fetch_command(req);
1201	if (ret) {
1202		pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1203		goto err;
1204	}
1205
1206	ret = sbp_fetch_page_table(req);
1207	if (ret) {
1208		pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1209			ret);
1210		goto err;
1211	}
1212
1213	unpacked_lun = req->login->login_lun;
1214	sbp_calc_data_length_direction(req, &data_length, &data_dir);
1215
1216	pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1217			req->orb_pointer, unpacked_lun, data_length, data_dir);
1218
1219	/* only used for printk until we do TMRs */
1220	req->se_cmd.tag = req->orb_pointer;
1221	target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1222			  req->sense_buf, unpacked_lun, data_length,
1223			  TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF);
1224	return;
1225
1226err:
1227	req->status.status |= cpu_to_be32(
1228		STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1229		STATUS_BLOCK_DEAD(0) |
1230		STATUS_BLOCK_LEN(1) |
1231		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1232	sbp_send_status(req);
1233}
1234
1235/*
1236 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1237 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1238 */
1239static int sbp_rw_data(struct sbp_target_request *req)
1240{
1241	struct sbp_session *sess = req->login->sess;
1242	int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1243		generation, num_pte, length, tfr_length,
1244		rcode = RCODE_COMPLETE;
1245	struct sbp_page_table_entry *pte;
1246	unsigned long long offset;
1247	struct fw_card *card;
1248	struct sg_mapping_iter iter;
1249
1250	if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1251		tcode = TCODE_WRITE_BLOCK_REQUEST;
1252		sg_miter_flags = SG_MITER_FROM_SG;
1253	} else {
1254		tcode = TCODE_READ_BLOCK_REQUEST;
1255		sg_miter_flags = SG_MITER_TO_SG;
1256	}
1257
1258	max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1259	speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1260
1261	pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1262	if (pg_size) {
1263		pr_err("sbp_run_transaction: page size ignored\n");
1264	}
1265
1266	spin_lock_bh(&sess->lock);
1267	card = fw_card_get(sess->card);
1268	node_id = sess->node_id;
1269	generation = sess->generation;
1270	spin_unlock_bh(&sess->lock);
1271
1272	if (req->pg_tbl) {
1273		pte = req->pg_tbl;
1274		num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1275
1276		offset = 0;
1277		length = 0;
1278	} else {
1279		pte = NULL;
1280		num_pte = 0;
1281
1282		offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1283		length = req->se_cmd.data_length;
1284	}
1285
1286	sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1287		sg_miter_flags);
1288
1289	while (length || num_pte) {
1290		if (!length) {
1291			offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1292				be32_to_cpu(pte->segment_base_lo);
1293			length = be16_to_cpu(pte->segment_length);
1294
1295			pte++;
1296			num_pte--;
1297		}
1298
1299		sg_miter_next(&iter);
1300
1301		tfr_length = min3(length, max_payload, (int)iter.length);
1302
1303		/* FIXME: take page_size into account */
1304
1305		rcode = sbp_run_transaction(card, tcode, node_id,
1306				generation, speed,
1307				offset, iter.addr, tfr_length);
1308
1309		if (rcode != RCODE_COMPLETE)
1310			break;
1311
1312		length -= tfr_length;
1313		offset += tfr_length;
1314		iter.consumed = tfr_length;
1315	}
1316
1317	sg_miter_stop(&iter);
1318	fw_card_put(card);
1319
1320	if (rcode == RCODE_COMPLETE) {
1321		WARN_ON(length != 0);
1322		return 0;
1323	} else {
1324		return -EIO;
1325	}
1326}
1327
1328static int sbp_send_status(struct sbp_target_request *req)
1329{
1330	int rc, ret = 0, length;
1331	struct sbp_login_descriptor *login = req->login;
1332
1333	length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1334
1335	rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1336			login->status_fifo_addr, &req->status, length);
1337	if (rc != RCODE_COMPLETE) {
1338		pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
1339		ret = -EIO;
1340		goto put_ref;
1341	}
1342
1343	pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1344			req->orb_pointer);
1345	/*
1346	 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
1347	 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
1348	 * final se_cmd->cmd_kref put.
1349	 */
1350put_ref:
1351	target_put_sess_cmd(&req->se_cmd);
1352	return ret;
1353}
1354
1355static void sbp_sense_mangle(struct sbp_target_request *req)
1356{
1357	struct se_cmd *se_cmd = &req->se_cmd;
1358	u8 *sense = req->sense_buf;
1359	u8 *status = req->status.data;
1360
1361	WARN_ON(se_cmd->scsi_sense_length < 18);
1362
1363	switch (sense[0] & 0x7f) { 		/* sfmt */
1364	case 0x70: /* current, fixed */
1365		status[0] = 0 << 6;
1366		break;
1367	case 0x71: /* deferred, fixed */
1368		status[0] = 1 << 6;
1369		break;
1370	case 0x72: /* current, descriptor */
1371	case 0x73: /* deferred, descriptor */
1372	default:
1373		/*
1374		 * TODO: SBP-3 specifies what we should do with descriptor
1375		 * format sense data
1376		 */
1377		pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1378			sense[0]);
1379		req->status.status |= cpu_to_be32(
1380			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1381			STATUS_BLOCK_DEAD(0) |
1382			STATUS_BLOCK_LEN(1) |
1383			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1384		return;
1385	}
1386
1387	status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1388	status[1] =
1389		(sense[0] & 0x80) |		/* valid */
1390		((sense[2] & 0xe0) >> 1) |	/* mark, eom, ili */
1391		(sense[2] & 0x0f);		/* sense_key */
1392	status[2] = 0;				/* XXX sense_code */
1393	status[3] = 0;				/* XXX sense_qualifier */
1394
1395	/* information */
1396	status[4] = sense[3];
1397	status[5] = sense[4];
1398	status[6] = sense[5];
1399	status[7] = sense[6];
1400
1401	/* CDB-dependent */
1402	status[8] = sense[8];
1403	status[9] = sense[9];
1404	status[10] = sense[10];
1405	status[11] = sense[11];
1406
1407	/* fru */
1408	status[12] = sense[14];
1409
1410	/* sense_key-dependent */
1411	status[13] = sense[15];
1412	status[14] = sense[16];
1413	status[15] = sense[17];
1414
1415	req->status.status |= cpu_to_be32(
1416		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1417		STATUS_BLOCK_DEAD(0) |
1418		STATUS_BLOCK_LEN(5) |
1419		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1420}
1421
1422static int sbp_send_sense(struct sbp_target_request *req)
1423{
1424	struct se_cmd *se_cmd = &req->se_cmd;
1425
1426	if (se_cmd->scsi_sense_length) {
1427		sbp_sense_mangle(req);
1428	} else {
1429		req->status.status |= cpu_to_be32(
1430			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1431			STATUS_BLOCK_DEAD(0) |
1432			STATUS_BLOCK_LEN(1) |
1433			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1434	}
1435
1436	return sbp_send_status(req);
1437}
1438
1439static void sbp_free_request(struct sbp_target_request *req)
1440{
1441	struct se_cmd *se_cmd = &req->se_cmd;
1442	struct se_session *se_sess = se_cmd->se_sess;
1443
1444	kfree(req->pg_tbl);
1445	kfree(req->cmd_buf);
1446
1447	target_free_tag(se_sess, se_cmd);
1448}
1449
1450static void sbp_mgt_agent_process(struct work_struct *work)
1451{
1452	struct sbp_management_agent *agent =
1453		container_of(work, struct sbp_management_agent, work);
1454	struct sbp_management_request *req = agent->request;
1455	int ret;
1456	int status_data_len = 0;
1457
1458	/* fetch the ORB from the initiator */
1459	ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1460		req->node_addr, req->generation, req->speed,
1461		agent->orb_offset, &req->orb, sizeof(req->orb));
1462	if (ret != RCODE_COMPLETE) {
1463		pr_debug("mgt_orb fetch failed: %x\n", ret);
1464		goto out;
1465	}
1466
1467	pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1468		sbp2_pointer_to_addr(&req->orb.ptr1),
1469		sbp2_pointer_to_addr(&req->orb.ptr2),
1470		be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1471		sbp2_pointer_to_addr(&req->orb.status_fifo));
1472
1473	if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1474		ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1475		pr_err("mgt_orb bad request\n");
1476		goto out;
1477	}
1478
1479	switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1480	case MANAGEMENT_ORB_FUNCTION_LOGIN:
1481		sbp_management_request_login(agent, req, &status_data_len);
1482		break;
1483
1484	case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1485		sbp_management_request_query_logins(agent, req,
1486				&status_data_len);
1487		break;
1488
1489	case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1490		sbp_management_request_reconnect(agent, req, &status_data_len);
1491		break;
1492
1493	case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1494		pr_notice("SET PASSWORD not implemented\n");
1495
1496		req->status.status = cpu_to_be32(
1497			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1498			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1499
1500		break;
1501
1502	case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1503		sbp_management_request_logout(agent, req, &status_data_len);
1504		break;
1505
1506	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1507		pr_notice("ABORT TASK not implemented\n");
1508
1509		req->status.status = cpu_to_be32(
1510			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1511			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1512
1513		break;
1514
1515	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1516		pr_notice("ABORT TASK SET not implemented\n");
1517
1518		req->status.status = cpu_to_be32(
1519			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1520			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1521
1522		break;
1523
1524	case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1525		pr_notice("LOGICAL UNIT RESET not implemented\n");
1526
1527		req->status.status = cpu_to_be32(
1528			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1529			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1530
1531		break;
1532
1533	case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1534		pr_notice("TARGET RESET not implemented\n");
1535
1536		req->status.status = cpu_to_be32(
1537			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1538			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1539
1540		break;
1541
1542	default:
1543		pr_notice("unknown management function 0x%x\n",
1544			MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1545
1546		req->status.status = cpu_to_be32(
1547			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1548			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1549
1550		break;
1551	}
1552
1553	req->status.status |= cpu_to_be32(
1554		STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1555		STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1556		STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1557	req->status.orb_low = cpu_to_be32(agent->orb_offset);
1558
1559	/* write the status block back to the initiator */
1560	ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1561		req->node_addr, req->generation, req->speed,
1562		sbp2_pointer_to_addr(&req->orb.status_fifo),
1563		&req->status, 8 + status_data_len);
1564	if (ret != RCODE_COMPLETE) {
1565		pr_debug("mgt_orb status write failed: %x\n", ret);
1566		goto out;
1567	}
1568
1569out:
1570	fw_card_put(req->card);
1571	kfree(req);
1572
1573	spin_lock_bh(&agent->lock);
1574	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1575	spin_unlock_bh(&agent->lock);
1576}
1577
1578static void sbp_mgt_agent_rw(struct fw_card *card,
1579	struct fw_request *request, int tcode, int destination, int source,
1580	int generation, unsigned long long offset, void *data, size_t length,
1581	void *callback_data)
1582{
1583	struct sbp_management_agent *agent = callback_data;
1584	struct sbp2_pointer *ptr = data;
1585	int rcode = RCODE_ADDRESS_ERROR;
1586
1587	if (!agent->tport->enable)
1588		goto out;
1589
1590	if ((offset != agent->handler.offset) || (length != 8))
1591		goto out;
1592
1593	if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1594		struct sbp_management_request *req;
1595		int prev_state;
1596
1597		spin_lock_bh(&agent->lock);
1598		prev_state = agent->state;
1599		agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1600		spin_unlock_bh(&agent->lock);
1601
1602		if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1603			pr_notice("ignoring management request while busy\n");
1604			rcode = RCODE_CONFLICT_ERROR;
1605			goto out;
1606		}
1607		req = kzalloc(sizeof(*req), GFP_ATOMIC);
1608		if (!req) {
1609			rcode = RCODE_CONFLICT_ERROR;
1610			goto out;
1611		}
1612
1613		req->card = fw_card_get(card);
1614		req->generation = generation;
1615		req->node_addr = source;
1616		req->speed = fw_get_request_speed(request);
1617
1618		agent->orb_offset = sbp2_pointer_to_addr(ptr);
1619		agent->request = req;
1620
1621		queue_work(system_unbound_wq, &agent->work);
1622		rcode = RCODE_COMPLETE;
1623	} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1624		addr_to_sbp2_pointer(agent->orb_offset, ptr);
1625		rcode = RCODE_COMPLETE;
1626	} else {
1627		rcode = RCODE_TYPE_ERROR;
1628	}
1629
1630out:
1631	fw_send_response(card, request, rcode);
1632}
1633
1634static struct sbp_management_agent *sbp_management_agent_register(
1635		struct sbp_tport *tport)
1636{
1637	int ret;
1638	struct sbp_management_agent *agent;
1639
1640	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1641	if (!agent)
1642		return ERR_PTR(-ENOMEM);
1643
1644	spin_lock_init(&agent->lock);
1645	agent->tport = tport;
1646	agent->handler.length = 0x08;
1647	agent->handler.address_callback = sbp_mgt_agent_rw;
1648	agent->handler.callback_data = agent;
1649	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1650	INIT_WORK(&agent->work, sbp_mgt_agent_process);
1651	agent->orb_offset = 0;
1652	agent->request = NULL;
1653
1654	ret = fw_core_add_address_handler(&agent->handler,
1655			&sbp_register_region);
1656	if (ret < 0) {
1657		kfree(agent);
1658		return ERR_PTR(ret);
1659	}
1660
1661	return agent;
1662}
1663
1664static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1665{
1666	fw_core_remove_address_handler(&agent->handler);
1667	cancel_work_sync(&agent->work);
1668	kfree(agent);
1669}
1670
1671static int sbp_check_true(struct se_portal_group *se_tpg)
1672{
1673	return 1;
1674}
1675
 
 
 
 
 
1676static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1677{
1678	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1679	struct sbp_tport *tport = tpg->tport;
1680
1681	return &tport->tport_name[0];
1682}
1683
1684static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1685{
1686	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1687	return tpg->tport_tpgt;
1688}
1689
 
 
 
 
 
1690static void sbp_release_cmd(struct se_cmd *se_cmd)
1691{
1692	struct sbp_target_request *req = container_of(se_cmd,
1693			struct sbp_target_request, se_cmd);
1694
1695	sbp_free_request(req);
1696}
1697
 
 
 
 
 
1698static int sbp_write_pending(struct se_cmd *se_cmd)
1699{
1700	struct sbp_target_request *req = container_of(se_cmd,
1701			struct sbp_target_request, se_cmd);
1702	int ret;
1703
1704	ret = sbp_rw_data(req);
1705	if (ret) {
1706		req->status.status |= cpu_to_be32(
1707			STATUS_BLOCK_RESP(
1708				STATUS_RESP_TRANSPORT_FAILURE) |
1709			STATUS_BLOCK_DEAD(0) |
1710			STATUS_BLOCK_LEN(1) |
1711			STATUS_BLOCK_SBP_STATUS(
1712				SBP_STATUS_UNSPECIFIED_ERROR));
1713		sbp_send_status(req);
1714		return ret;
1715	}
1716
1717	target_execute_cmd(se_cmd);
1718	return 0;
1719}
1720
 
 
 
 
 
 
 
 
 
 
1721static int sbp_queue_data_in(struct se_cmd *se_cmd)
1722{
1723	struct sbp_target_request *req = container_of(se_cmd,
1724			struct sbp_target_request, se_cmd);
1725	int ret;
1726
1727	ret = sbp_rw_data(req);
1728	if (ret) {
1729		req->status.status |= cpu_to_be32(
1730			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1731			STATUS_BLOCK_DEAD(0) |
1732			STATUS_BLOCK_LEN(1) |
1733			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1734		sbp_send_status(req);
1735		return ret;
1736	}
1737
1738	return sbp_send_sense(req);
1739}
1740
1741/*
1742 * Called after command (no data transfer) or after the write (to device)
1743 * operation is completed
1744 */
1745static int sbp_queue_status(struct se_cmd *se_cmd)
1746{
1747	struct sbp_target_request *req = container_of(se_cmd,
1748			struct sbp_target_request, se_cmd);
1749
1750	return sbp_send_sense(req);
1751}
1752
1753static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1754{
1755}
1756
1757static void sbp_aborted_task(struct se_cmd *se_cmd)
1758{
1759	return;
1760}
1761
1762static int sbp_check_stop_free(struct se_cmd *se_cmd)
1763{
1764	struct sbp_target_request *req = container_of(se_cmd,
1765			struct sbp_target_request, se_cmd);
1766
1767	return transport_generic_free_cmd(&req->se_cmd, 0);
1768}
1769
1770static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1771{
1772	struct se_lun *lun;
1773	int count = 0;
1774
1775	rcu_read_lock();
1776	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
1777		count++;
1778	rcu_read_unlock();
1779
1780	return count;
1781}
1782
1783static int sbp_update_unit_directory(struct sbp_tport *tport)
1784{
1785	struct se_lun *lun;
1786	int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
1787	u32 *data;
1788
1789	if (tport->unit_directory.data) {
1790		fw_core_remove_descriptor(&tport->unit_directory);
1791		kfree(tport->unit_directory.data);
1792		tport->unit_directory.data = NULL;
1793	}
1794
1795	if (!tport->enable || !tport->tpg)
1796		return 0;
1797
1798	num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1799
1800	/*
1801	 * Number of entries in the final unit directory:
1802	 *  - all of those in the template
1803	 *  - management_agent
1804	 *  - unit_characteristics
1805	 *  - reconnect_timeout
1806	 *  - unit unique ID
1807	 *  - one for each LUN
1808	 *
1809	 *  MUST NOT include leaf or sub-directory entries
1810	 */
1811	num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1812
1813	if (tport->directory_id != -1)
1814		num_entries++;
1815
1816	/* allocate num_entries + 4 for the header and unique ID leaf */
1817	data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1818	if (!data)
1819		return -ENOMEM;
1820
1821	/* directory_length */
1822	data[idx++] = num_entries << 16;
1823
1824	/* directory_id */
1825	if (tport->directory_id != -1)
1826		data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1827
1828	/* unit directory template */
1829	memcpy(&data[idx], sbp_unit_directory_template,
1830			sizeof(sbp_unit_directory_template));
1831	idx += ARRAY_SIZE(sbp_unit_directory_template);
1832
1833	/* management_agent */
1834	mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
1835	data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
1836
1837	/* unit_characteristics */
1838	data[idx++] = 0x3a000000 |
1839		(((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
1840		SBP_ORB_FETCH_SIZE;
1841
1842	/* reconnect_timeout */
1843	data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
1844
1845	/* unit unique ID (leaf is just after LUNs) */
1846	data[idx++] = 0x8d000000 | (num_luns + 1);
1847
1848	rcu_read_lock();
1849	hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
1850		struct se_device *dev;
1851		int type;
1852		/*
1853		 * rcu_dereference_raw protected by se_lun->lun_group symlink
1854		 * reference to se_device->dev_group.
1855		 */
1856		dev = rcu_dereference_raw(lun->lun_se_dev);
1857		type = dev->transport->get_device_type(dev);
1858
1859		/* logical_unit_number */
1860		data[idx++] = 0x14000000 |
1861			((type << 16) & 0x1f0000) |
1862			(lun->unpacked_lun & 0xffff);
1863	}
1864	rcu_read_unlock();
1865
1866	/* unit unique ID leaf */
1867	data[idx++] = 2 << 16;
1868	data[idx++] = tport->guid >> 32;
1869	data[idx++] = tport->guid;
1870
1871	tport->unit_directory.length = idx;
1872	tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
1873	tport->unit_directory.data = data;
1874
1875	ret = fw_core_add_descriptor(&tport->unit_directory);
1876	if (ret < 0) {
1877		kfree(tport->unit_directory.data);
1878		tport->unit_directory.data = NULL;
1879	}
1880
1881	return ret;
1882}
1883
1884static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
1885{
1886	const char *cp;
1887	char c, nibble;
1888	int pos = 0, err;
1889
1890	*wwn = 0;
1891	for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
1892		c = *cp;
1893		if (c == '\n' && cp[1] == '\0')
1894			continue;
1895		if (c == '\0') {
1896			err = 2;
1897			if (pos != 16)
1898				goto fail;
1899			return cp - name;
1900		}
1901		err = 3;
1902		if (isdigit(c))
1903			nibble = c - '0';
1904		else if (isxdigit(c))
1905			nibble = tolower(c) - 'a' + 10;
1906		else
1907			goto fail;
1908		*wwn = (*wwn << 4) | nibble;
1909		pos++;
1910	}
1911	err = 4;
1912fail:
1913	printk(KERN_INFO "err %u len %zu pos %u\n",
1914			err, cp - name, pos);
1915	return -1;
1916}
1917
1918static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
1919{
1920	return snprintf(buf, len, "%016llx", wwn);
1921}
1922
1923static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1924{
1925	u64 guid = 0;
1926
1927	if (sbp_parse_wwn(name, &guid) < 0)
1928		return -EINVAL;
1929	return 0;
1930}
1931
1932static int sbp_post_link_lun(
1933		struct se_portal_group *se_tpg,
1934		struct se_lun *se_lun)
1935{
1936	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1937
1938	return sbp_update_unit_directory(tpg->tport);
1939}
1940
1941static void sbp_pre_unlink_lun(
1942		struct se_portal_group *se_tpg,
1943		struct se_lun *se_lun)
1944{
1945	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1946	struct sbp_tport *tport = tpg->tport;
1947	int ret;
1948
1949	if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
1950		tport->enable = 0;
1951
1952	ret = sbp_update_unit_directory(tport);
1953	if (ret < 0)
1954		pr_err("unlink LUN: failed to update unit directory\n");
1955}
1956
1957static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn,
1958					    const char *name)
1959{
1960	struct sbp_tport *tport =
1961		container_of(wwn, struct sbp_tport, tport_wwn);
1962
1963	struct sbp_tpg *tpg;
1964	unsigned long tpgt;
1965	int ret;
1966
1967	if (strstr(name, "tpgt_") != name)
1968		return ERR_PTR(-EINVAL);
1969	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1970		return ERR_PTR(-EINVAL);
1971
1972	if (tport->tpg) {
1973		pr_err("Only one TPG per Unit is possible.\n");
1974		return ERR_PTR(-EBUSY);
1975	}
1976
1977	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
1978	if (!tpg)
1979		return ERR_PTR(-ENOMEM);
1980
1981	tpg->tport = tport;
1982	tpg->tport_tpgt = tpgt;
1983	tport->tpg = tpg;
1984
1985	/* default attribute values */
1986	tport->enable = 0;
1987	tport->directory_id = -1;
1988	tport->mgt_orb_timeout = 15;
1989	tport->max_reconnect_timeout = 5;
1990	tport->max_logins_per_lun = 1;
1991
1992	tport->mgt_agt = sbp_management_agent_register(tport);
1993	if (IS_ERR(tport->mgt_agt)) {
1994		ret = PTR_ERR(tport->mgt_agt);
1995		goto out_free_tpg;
1996	}
1997
1998	ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
1999	if (ret < 0)
2000		goto out_unreg_mgt_agt;
2001
2002	return &tpg->se_tpg;
2003
2004out_unreg_mgt_agt:
2005	sbp_management_agent_unregister(tport->mgt_agt);
2006out_free_tpg:
2007	tport->tpg = NULL;
2008	kfree(tpg);
2009	return ERR_PTR(ret);
2010}
2011
2012static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2013{
2014	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2015	struct sbp_tport *tport = tpg->tport;
2016
2017	core_tpg_deregister(se_tpg);
2018	sbp_management_agent_unregister(tport->mgt_agt);
2019	tport->tpg = NULL;
2020	kfree(tpg);
2021}
2022
2023static struct se_wwn *sbp_make_tport(
2024		struct target_fabric_configfs *tf,
2025		struct config_group *group,
2026		const char *name)
2027{
2028	struct sbp_tport *tport;
2029	u64 guid = 0;
2030
2031	if (sbp_parse_wwn(name, &guid) < 0)
2032		return ERR_PTR(-EINVAL);
2033
2034	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2035	if (!tport)
2036		return ERR_PTR(-ENOMEM);
2037
2038	tport->guid = guid;
2039	sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2040
2041	return &tport->tport_wwn;
2042}
2043
2044static void sbp_drop_tport(struct se_wwn *wwn)
2045{
2046	struct sbp_tport *tport =
2047		container_of(wwn, struct sbp_tport, tport_wwn);
2048
2049	kfree(tport);
2050}
2051
2052static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
2053{
2054	return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2055}
2056
2057CONFIGFS_ATTR_RO(sbp_wwn_, version);
2058
2059static struct configfs_attribute *sbp_wwn_attrs[] = {
2060	&sbp_wwn_attr_version,
2061	NULL,
2062};
2063
2064static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
2065{
2066	struct se_portal_group *se_tpg = to_tpg(item);
2067	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2068	struct sbp_tport *tport = tpg->tport;
2069
2070	if (tport->directory_id == -1)
2071		return sprintf(page, "implicit\n");
2072	else
2073		return sprintf(page, "%06x\n", tport->directory_id);
2074}
2075
2076static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
2077		const char *page, size_t count)
2078{
2079	struct se_portal_group *se_tpg = to_tpg(item);
2080	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2081	struct sbp_tport *tport = tpg->tport;
2082	unsigned long val;
2083
2084	if (tport->enable) {
2085		pr_err("Cannot change the directory_id on an active target.\n");
2086		return -EBUSY;
2087	}
2088
2089	if (strstr(page, "implicit") == page) {
2090		tport->directory_id = -1;
2091	} else {
2092		if (kstrtoul(page, 16, &val) < 0)
2093			return -EINVAL;
2094		if (val > 0xffffff)
2095			return -EINVAL;
2096
2097		tport->directory_id = val;
2098	}
2099
2100	return count;
2101}
2102
2103static int sbp_enable_tpg(struct se_portal_group *se_tpg, bool enable)
 
 
 
 
 
 
 
 
 
2104{
 
2105	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2106	struct sbp_tport *tport = tpg->tport;
 
2107	int ret;
2108
2109	if (enable) {
 
 
 
 
 
 
 
 
2110		if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2111			pr_err("Cannot enable a target with no LUNs!\n");
2112			return -EINVAL;
2113		}
2114	} else {
2115		/* XXX: force-shutdown sessions instead? */
2116		spin_lock_bh(&se_tpg->session_lock);
2117		if (!list_empty(&se_tpg->tpg_sess_list)) {
2118			spin_unlock_bh(&se_tpg->session_lock);
2119			return -EBUSY;
2120		}
2121		spin_unlock_bh(&se_tpg->session_lock);
2122	}
2123
2124	tport->enable = enable;
2125
2126	ret = sbp_update_unit_directory(tport);
2127	if (ret < 0) {
2128		pr_err("Could not update Config ROM\n");
2129		return ret;
2130	}
2131
2132	return 0;
2133}
2134
2135CONFIGFS_ATTR(sbp_tpg_, directory_id);
 
2136
2137static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2138	&sbp_tpg_attr_directory_id,
 
2139	NULL,
2140};
2141
2142static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
2143		char *page)
2144{
2145	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2146	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2147	struct sbp_tport *tport = tpg->tport;
2148	return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2149}
2150
2151static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
2152		const char *page, size_t count)
2153{
2154	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2155	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2156	struct sbp_tport *tport = tpg->tport;
2157	unsigned long val;
2158	int ret;
2159
2160	if (kstrtoul(page, 0, &val) < 0)
2161		return -EINVAL;
2162	if ((val < 1) || (val > 127))
2163		return -EINVAL;
2164
2165	if (tport->mgt_orb_timeout == val)
2166		return count;
2167
2168	tport->mgt_orb_timeout = val;
2169
2170	ret = sbp_update_unit_directory(tport);
2171	if (ret < 0)
2172		return ret;
2173
2174	return count;
2175}
2176
2177static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
2178		char *page)
2179{
2180	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2181	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2182	struct sbp_tport *tport = tpg->tport;
2183	return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2184}
2185
2186static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
2187		const char *page, size_t count)
2188{
2189	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2190	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2191	struct sbp_tport *tport = tpg->tport;
2192	unsigned long val;
2193	int ret;
2194
2195	if (kstrtoul(page, 0, &val) < 0)
2196		return -EINVAL;
2197	if ((val < 1) || (val > 32767))
2198		return -EINVAL;
2199
2200	if (tport->max_reconnect_timeout == val)
2201		return count;
2202
2203	tport->max_reconnect_timeout = val;
2204
2205	ret = sbp_update_unit_directory(tport);
2206	if (ret < 0)
2207		return ret;
2208
2209	return count;
2210}
2211
2212static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
2213		char *page)
2214{
2215	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2216	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2217	struct sbp_tport *tport = tpg->tport;
2218	return sprintf(page, "%d\n", tport->max_logins_per_lun);
2219}
2220
2221static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
2222		const char *page, size_t count)
2223{
2224	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2225	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2226	struct sbp_tport *tport = tpg->tport;
2227	unsigned long val;
2228
2229	if (kstrtoul(page, 0, &val) < 0)
2230		return -EINVAL;
2231	if ((val < 1) || (val > 127))
2232		return -EINVAL;
2233
2234	/* XXX: also check against current count? */
2235
2236	tport->max_logins_per_lun = val;
2237
2238	return count;
2239}
2240
2241CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
2242CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
2243CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
2244
2245static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2246	&sbp_tpg_attrib_attr_mgt_orb_timeout,
2247	&sbp_tpg_attrib_attr_max_reconnect_timeout,
2248	&sbp_tpg_attrib_attr_max_logins_per_lun,
2249	NULL,
2250};
2251
2252static const struct target_core_fabric_ops sbp_ops = {
2253	.module				= THIS_MODULE,
2254	.fabric_name			= "sbp",
2255	.tpg_get_wwn			= sbp_get_fabric_wwn,
2256	.tpg_get_tag			= sbp_get_tag,
2257	.tpg_check_demo_mode		= sbp_check_true,
2258	.tpg_check_demo_mode_cache	= sbp_check_true,
 
 
 
2259	.release_cmd			= sbp_release_cmd,
 
2260	.write_pending			= sbp_write_pending,
 
 
2261	.queue_data_in			= sbp_queue_data_in,
2262	.queue_status			= sbp_queue_status,
2263	.queue_tm_rsp			= sbp_queue_tm_rsp,
2264	.aborted_task			= sbp_aborted_task,
2265	.check_stop_free		= sbp_check_stop_free,
2266
2267	.fabric_make_wwn		= sbp_make_tport,
2268	.fabric_drop_wwn		= sbp_drop_tport,
2269	.fabric_make_tpg		= sbp_make_tpg,
2270	.fabric_enable_tpg		= sbp_enable_tpg,
2271	.fabric_drop_tpg		= sbp_drop_tpg,
2272	.fabric_post_link		= sbp_post_link_lun,
2273	.fabric_pre_unlink		= sbp_pre_unlink_lun,
2274	.fabric_make_np			= NULL,
2275	.fabric_drop_np			= NULL,
2276	.fabric_init_nodeacl		= sbp_init_nodeacl,
2277
2278	.tfc_wwn_attrs			= sbp_wwn_attrs,
2279	.tfc_tpg_base_attrs		= sbp_tpg_base_attrs,
2280	.tfc_tpg_attrib_attrs		= sbp_tpg_attrib_attrs,
2281
2282	.default_submit_type		= TARGET_DIRECT_SUBMIT,
2283	.direct_submit_supp		= 1,
2284};
2285
2286static int __init sbp_init(void)
2287{
2288	return target_register_template(&sbp_ops);
2289};
2290
2291static void __exit sbp_exit(void)
2292{
2293	target_unregister_template(&sbp_ops);
2294};
2295
2296MODULE_DESCRIPTION("FireWire SBP fabric driver");
2297MODULE_LICENSE("GPL");
2298module_init(sbp_init);
2299module_exit(sbp_exit);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SBP2 target driver (SCSI over IEEE1394 in target mode)
   4 *
   5 * Copyright (C) 2011  Chris Boot <bootc@bootc.net>
   6 */
   7
   8#define KMSG_COMPONENT "sbp_target"
   9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  10
  11#include <linux/kernel.h>
  12#include <linux/module.h>
  13#include <linux/init.h>
  14#include <linux/types.h>
  15#include <linux/string.h>
  16#include <linux/configfs.h>
  17#include <linux/ctype.h>
  18#include <linux/delay.h>
  19#include <linux/firewire.h>
  20#include <linux/firewire-constants.h>
  21#include <scsi/scsi_proto.h>
  22#include <scsi/scsi_tcq.h>
  23#include <target/target_core_base.h>
  24#include <target/target_core_backend.h>
  25#include <target/target_core_fabric.h>
  26#include <asm/unaligned.h>
  27
  28#include "sbp_target.h"
  29
  30/* FireWire address region for management and command block address handlers */
  31static const struct fw_address_region sbp_register_region = {
  32	.start	= CSR_REGISTER_BASE + 0x10000,
  33	.end	= 0x1000000000000ULL,
  34};
  35
  36static const u32 sbp_unit_directory_template[] = {
  37	0x1200609e, /* unit_specifier_id: NCITS/T10 */
  38	0x13010483, /* unit_sw_version: 1155D Rev 4 */
  39	0x3800609e, /* command_set_specifier_id: NCITS/T10 */
  40	0x390104d8, /* command_set: SPC-2 */
  41	0x3b000000, /* command_set_revision: 0 */
  42	0x3c000001, /* firmware_revision: 1 */
  43};
  44
  45#define SESSION_MAINTENANCE_INTERVAL HZ
  46
  47static atomic_t login_id = ATOMIC_INIT(0);
  48
  49static void session_maintenance_work(struct work_struct *);
  50static int sbp_run_transaction(struct fw_card *, int, int, int, int,
  51		unsigned long long, void *, size_t);
  52
  53static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
  54{
  55	int ret;
  56	__be32 high, low;
  57
  58	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  59			req->node_addr, req->generation, req->speed,
  60			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
  61			&high, sizeof(high));
  62	if (ret != RCODE_COMPLETE)
  63		return ret;
  64
  65	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  66			req->node_addr, req->generation, req->speed,
  67			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
  68			&low, sizeof(low));
  69	if (ret != RCODE_COMPLETE)
  70		return ret;
  71
  72	*guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
  73
  74	return RCODE_COMPLETE;
  75}
  76
  77static struct sbp_session *sbp_session_find_by_guid(
  78	struct sbp_tpg *tpg, u64 guid)
  79{
  80	struct se_session *se_sess;
  81	struct sbp_session *sess, *found = NULL;
  82
  83	spin_lock_bh(&tpg->se_tpg.session_lock);
  84	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
  85		sess = se_sess->fabric_sess_ptr;
  86		if (sess->guid == guid)
  87			found = sess;
  88	}
  89	spin_unlock_bh(&tpg->se_tpg.session_lock);
  90
  91	return found;
  92}
  93
  94static struct sbp_login_descriptor *sbp_login_find_by_lun(
  95		struct sbp_session *session, u32 unpacked_lun)
  96{
  97	struct sbp_login_descriptor *login, *found = NULL;
  98
  99	spin_lock_bh(&session->lock);
 100	list_for_each_entry(login, &session->login_list, link) {
 101		if (login->login_lun == unpacked_lun)
 102			found = login;
 103	}
 104	spin_unlock_bh(&session->lock);
 105
 106	return found;
 107}
 108
 109static int sbp_login_count_all_by_lun(
 110		struct sbp_tpg *tpg,
 111		u32 unpacked_lun,
 112		int exclusive)
 113{
 114	struct se_session *se_sess;
 115	struct sbp_session *sess;
 116	struct sbp_login_descriptor *login;
 117	int count = 0;
 118
 119	spin_lock_bh(&tpg->se_tpg.session_lock);
 120	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 121		sess = se_sess->fabric_sess_ptr;
 122
 123		spin_lock_bh(&sess->lock);
 124		list_for_each_entry(login, &sess->login_list, link) {
 125			if (login->login_lun != unpacked_lun)
 126				continue;
 127
 128			if (!exclusive || login->exclusive)
 129				count++;
 130		}
 131		spin_unlock_bh(&sess->lock);
 132	}
 133	spin_unlock_bh(&tpg->se_tpg.session_lock);
 134
 135	return count;
 136}
 137
 138static struct sbp_login_descriptor *sbp_login_find_by_id(
 139	struct sbp_tpg *tpg, int login_id)
 140{
 141	struct se_session *se_sess;
 142	struct sbp_session *sess;
 143	struct sbp_login_descriptor *login, *found = NULL;
 144
 145	spin_lock_bh(&tpg->se_tpg.session_lock);
 146	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
 147		sess = se_sess->fabric_sess_ptr;
 148
 149		spin_lock_bh(&sess->lock);
 150		list_for_each_entry(login, &sess->login_list, link) {
 151			if (login->login_id == login_id)
 152				found = login;
 153		}
 154		spin_unlock_bh(&sess->lock);
 155	}
 156	spin_unlock_bh(&tpg->se_tpg.session_lock);
 157
 158	return found;
 159}
 160
 161static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
 162{
 163	struct se_portal_group *se_tpg = &tpg->se_tpg;
 164	struct se_lun *se_lun;
 165
 166	rcu_read_lock();
 167	hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
 168		if (se_lun->unpacked_lun == login_lun) {
 169			rcu_read_unlock();
 170			*err = 0;
 171			return login_lun;
 172		}
 173	}
 174	rcu_read_unlock();
 175
 176	*err = -ENODEV;
 177	return login_lun;
 178}
 179
 180static struct sbp_session *sbp_session_create(
 181		struct sbp_tpg *tpg,
 182		u64 guid)
 183{
 184	struct sbp_session *sess;
 185	int ret;
 186	char guid_str[17];
 187
 188	snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
 189
 190	sess = kmalloc(sizeof(*sess), GFP_KERNEL);
 191	if (!sess)
 192		return ERR_PTR(-ENOMEM);
 193
 194	spin_lock_init(&sess->lock);
 195	INIT_LIST_HEAD(&sess->login_list);
 196	INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
 197	sess->guid = guid;
 198
 199	sess->se_sess = target_setup_session(&tpg->se_tpg, 128,
 200					     sizeof(struct sbp_target_request),
 201					     TARGET_PROT_NORMAL, guid_str,
 202					     sess, NULL);
 203	if (IS_ERR(sess->se_sess)) {
 204		pr_err("failed to init se_session\n");
 205		ret = PTR_ERR(sess->se_sess);
 206		kfree(sess);
 207		return ERR_PTR(ret);
 208	}
 209
 210	return sess;
 211}
 212
 213static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
 214{
 215	spin_lock_bh(&sess->lock);
 216	if (!list_empty(&sess->login_list)) {
 217		spin_unlock_bh(&sess->lock);
 218		return;
 219	}
 220	spin_unlock_bh(&sess->lock);
 221
 222	if (cancel_work)
 223		cancel_delayed_work_sync(&sess->maint_work);
 224
 225	target_remove_session(sess->se_sess);
 226
 227	if (sess->card)
 228		fw_card_put(sess->card);
 229
 230	kfree(sess);
 231}
 232
 233static void sbp_target_agent_unregister(struct sbp_target_agent *);
 234
 235static void sbp_login_release(struct sbp_login_descriptor *login,
 236	bool cancel_work)
 237{
 238	struct sbp_session *sess = login->sess;
 239
 240	/* FIXME: abort/wait on tasks */
 241
 242	sbp_target_agent_unregister(login->tgt_agt);
 243
 244	if (sess) {
 245		spin_lock_bh(&sess->lock);
 246		list_del(&login->link);
 247		spin_unlock_bh(&sess->lock);
 248
 249		sbp_session_release(sess, cancel_work);
 250	}
 251
 252	kfree(login);
 253}
 254
 255static struct sbp_target_agent *sbp_target_agent_register(
 256	struct sbp_login_descriptor *);
 257
 258static void sbp_management_request_login(
 259	struct sbp_management_agent *agent, struct sbp_management_request *req,
 260	int *status_data_size)
 261{
 262	struct sbp_tport *tport = agent->tport;
 263	struct sbp_tpg *tpg = tport->tpg;
 264	struct sbp_session *sess;
 265	struct sbp_login_descriptor *login;
 266	struct sbp_login_response_block *response;
 267	u64 guid;
 268	u32 unpacked_lun;
 269	int login_response_len, ret;
 270
 271	unpacked_lun = sbp_get_lun_from_tpg(tpg,
 272			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
 273	if (ret) {
 274		pr_notice("login to unknown LUN: %d\n",
 275			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
 276
 277		req->status.status = cpu_to_be32(
 278			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 279			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
 280		return;
 281	}
 282
 283	ret = read_peer_guid(&guid, req);
 284	if (ret != RCODE_COMPLETE) {
 285		pr_warn("failed to read peer GUID: %d\n", ret);
 286
 287		req->status.status = cpu_to_be32(
 288			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 289			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 290		return;
 291	}
 292
 293	pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
 294		unpacked_lun, guid);
 295
 296	sess = sbp_session_find_by_guid(tpg, guid);
 297	if (sess) {
 298		login = sbp_login_find_by_lun(sess, unpacked_lun);
 299		if (login) {
 300			pr_notice("initiator already logged-in\n");
 301
 302			/*
 303			 * SBP-2 R4 says we should return access denied, but
 304			 * that can confuse initiators. Instead we need to
 305			 * treat this like a reconnect, but send the login
 306			 * response block like a fresh login.
 307			 *
 308			 * This is required particularly in the case of Apple
 309			 * devices booting off the FireWire target, where
 310			 * the firmware has an active login to the target. When
 311			 * the OS takes control of the session it issues its own
 312			 * LOGIN rather than a RECONNECT. To avoid the machine
 313			 * waiting until the reconnect_hold expires, we can skip
 314			 * the ACCESS_DENIED errors to speed things up.
 315			 */
 316
 317			goto already_logged_in;
 318		}
 319	}
 320
 321	/*
 322	 * check exclusive bit in login request
 323	 * reject with access_denied if any logins present
 324	 */
 325	if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
 326			sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
 327		pr_warn("refusing exclusive login with other active logins\n");
 328
 329		req->status.status = cpu_to_be32(
 330			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 331			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 332		return;
 333	}
 334
 335	/*
 336	 * check exclusive bit in any existing login descriptor
 337	 * reject with access_denied if any exclusive logins present
 338	 */
 339	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
 340		pr_warn("refusing login while another exclusive login present\n");
 341
 342		req->status.status = cpu_to_be32(
 343			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 344			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 345		return;
 346	}
 347
 348	/*
 349	 * check we haven't exceeded the number of allowed logins
 350	 * reject with resources_unavailable if we have
 351	 */
 352	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
 353			tport->max_logins_per_lun) {
 354		pr_warn("max number of logins reached\n");
 355
 356		req->status.status = cpu_to_be32(
 357			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 358			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 359		return;
 360	}
 361
 362	if (!sess) {
 363		sess = sbp_session_create(tpg, guid);
 364		if (IS_ERR(sess)) {
 365			switch (PTR_ERR(sess)) {
 366			case -EPERM:
 367				ret = SBP_STATUS_ACCESS_DENIED;
 368				break;
 369			default:
 370				ret = SBP_STATUS_RESOURCES_UNAVAIL;
 371				break;
 372			}
 373
 374			req->status.status = cpu_to_be32(
 375				STATUS_BLOCK_RESP(
 376					STATUS_RESP_REQUEST_COMPLETE) |
 377				STATUS_BLOCK_SBP_STATUS(ret));
 378			return;
 379		}
 380
 381		sess->node_id = req->node_addr;
 382		sess->card = fw_card_get(req->card);
 383		sess->generation = req->generation;
 384		sess->speed = req->speed;
 385
 386		schedule_delayed_work(&sess->maint_work,
 387				SESSION_MAINTENANCE_INTERVAL);
 388	}
 389
 390	/* only take the latest reconnect_hold into account */
 391	sess->reconnect_hold = min(
 392		1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
 393		tport->max_reconnect_timeout) - 1;
 394
 395	login = kmalloc(sizeof(*login), GFP_KERNEL);
 396	if (!login) {
 397		pr_err("failed to allocate login descriptor\n");
 398
 399		sbp_session_release(sess, true);
 400
 401		req->status.status = cpu_to_be32(
 402			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 403			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 404		return;
 405	}
 406
 407	login->sess = sess;
 408	login->login_lun = unpacked_lun;
 409	login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
 410	login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
 411	login->login_id = atomic_inc_return(&login_id);
 412
 413	login->tgt_agt = sbp_target_agent_register(login);
 414	if (IS_ERR(login->tgt_agt)) {
 415		ret = PTR_ERR(login->tgt_agt);
 416		pr_err("failed to map command block handler: %d\n", ret);
 417
 418		sbp_session_release(sess, true);
 419		kfree(login);
 420
 421		req->status.status = cpu_to_be32(
 422			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 423			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 424		return;
 425	}
 426
 427	spin_lock_bh(&sess->lock);
 428	list_add_tail(&login->link, &sess->login_list);
 429	spin_unlock_bh(&sess->lock);
 430
 431already_logged_in:
 432	response = kzalloc(sizeof(*response), GFP_KERNEL);
 433	if (!response) {
 434		pr_err("failed to allocate login response block\n");
 435
 436		sbp_login_release(login, true);
 437
 438		req->status.status = cpu_to_be32(
 439			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 440			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
 441		return;
 442	}
 443
 444	login_response_len = clamp_val(
 445			LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
 446			12, sizeof(*response));
 447	response->misc = cpu_to_be32(
 448		((login_response_len & 0xffff) << 16) |
 449		(login->login_id & 0xffff));
 450	response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
 451	addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
 452		&response->command_block_agent);
 453
 454	ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
 455		sess->node_id, sess->generation, sess->speed,
 456		sbp2_pointer_to_addr(&req->orb.ptr2), response,
 457		login_response_len);
 458	if (ret != RCODE_COMPLETE) {
 459		pr_debug("failed to write login response block: %x\n", ret);
 460
 461		kfree(response);
 462		sbp_login_release(login, true);
 463
 464		req->status.status = cpu_to_be32(
 465			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 466			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 467		return;
 468	}
 469
 470	kfree(response);
 471
 472	req->status.status = cpu_to_be32(
 473		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 474		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 475}
 476
 477static void sbp_management_request_query_logins(
 478	struct sbp_management_agent *agent, struct sbp_management_request *req,
 479	int *status_data_size)
 480{
 481	pr_notice("QUERY LOGINS not implemented\n");
 482	/* FIXME: implement */
 483
 484	req->status.status = cpu_to_be32(
 485		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 486		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
 487}
 488
 489static void sbp_management_request_reconnect(
 490	struct sbp_management_agent *agent, struct sbp_management_request *req,
 491	int *status_data_size)
 492{
 493	struct sbp_tport *tport = agent->tport;
 494	struct sbp_tpg *tpg = tport->tpg;
 495	int ret;
 496	u64 guid;
 497	struct sbp_login_descriptor *login;
 498
 499	ret = read_peer_guid(&guid, req);
 500	if (ret != RCODE_COMPLETE) {
 501		pr_warn("failed to read peer GUID: %d\n", ret);
 502
 503		req->status.status = cpu_to_be32(
 504			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
 505			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 506		return;
 507	}
 508
 509	pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
 510
 511	login = sbp_login_find_by_id(tpg,
 512		RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
 513
 514	if (!login) {
 515		pr_err("mgt_agent RECONNECT unknown login ID\n");
 516
 517		req->status.status = cpu_to_be32(
 518			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 519			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 520		return;
 521	}
 522
 523	if (login->sess->guid != guid) {
 524		pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
 525
 526		req->status.status = cpu_to_be32(
 527			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 528			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 529		return;
 530	}
 531
 532	spin_lock_bh(&login->sess->lock);
 533	if (login->sess->card)
 534		fw_card_put(login->sess->card);
 535
 536	/* update the node details */
 537	login->sess->generation = req->generation;
 538	login->sess->node_id = req->node_addr;
 539	login->sess->card = fw_card_get(req->card);
 540	login->sess->speed = req->speed;
 541	spin_unlock_bh(&login->sess->lock);
 542
 543	req->status.status = cpu_to_be32(
 544		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 545		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 546}
 547
 548static void sbp_management_request_logout(
 549	struct sbp_management_agent *agent, struct sbp_management_request *req,
 550	int *status_data_size)
 551{
 552	struct sbp_tport *tport = agent->tport;
 553	struct sbp_tpg *tpg = tport->tpg;
 554	int id;
 555	struct sbp_login_descriptor *login;
 556
 557	id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
 558
 559	login = sbp_login_find_by_id(tpg, id);
 560	if (!login) {
 561		pr_warn("cannot find login: %d\n", id);
 562
 563		req->status.status = cpu_to_be32(
 564			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 565			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
 566		return;
 567	}
 568
 569	pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
 570		login->login_lun, login->login_id);
 571
 572	if (req->node_addr != login->sess->node_id) {
 573		pr_warn("logout from different node ID\n");
 574
 575		req->status.status = cpu_to_be32(
 576			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 577			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
 578		return;
 579	}
 580
 581	sbp_login_release(login, true);
 582
 583	req->status.status = cpu_to_be32(
 584		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
 585		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
 586}
 587
 588static void session_check_for_reset(struct sbp_session *sess)
 589{
 590	bool card_valid = false;
 591
 592	spin_lock_bh(&sess->lock);
 593
 594	if (sess->card) {
 595		spin_lock_irq(&sess->card->lock);
 596		card_valid = (sess->card->local_node != NULL);
 597		spin_unlock_irq(&sess->card->lock);
 598
 599		if (!card_valid) {
 600			fw_card_put(sess->card);
 601			sess->card = NULL;
 602		}
 603	}
 604
 605	if (!card_valid || (sess->generation != sess->card->generation)) {
 606		pr_info("Waiting for reconnect from node: %016llx\n",
 607				sess->guid);
 608
 609		sess->node_id = -1;
 610		sess->reconnect_expires = get_jiffies_64() +
 611			((sess->reconnect_hold + 1) * HZ);
 612	}
 613
 614	spin_unlock_bh(&sess->lock);
 615}
 616
 617static void session_reconnect_expired(struct sbp_session *sess)
 618{
 619	struct sbp_login_descriptor *login, *temp;
 620	LIST_HEAD(login_list);
 621
 622	pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
 623
 624	spin_lock_bh(&sess->lock);
 625	list_for_each_entry_safe(login, temp, &sess->login_list, link) {
 626		login->sess = NULL;
 627		list_move_tail(&login->link, &login_list);
 628	}
 629	spin_unlock_bh(&sess->lock);
 630
 631	list_for_each_entry_safe(login, temp, &login_list, link) {
 632		list_del(&login->link);
 633		sbp_login_release(login, false);
 634	}
 635
 636	sbp_session_release(sess, false);
 637}
 638
 639static void session_maintenance_work(struct work_struct *work)
 640{
 641	struct sbp_session *sess = container_of(work, struct sbp_session,
 642			maint_work.work);
 643
 644	/* could be called while tearing down the session */
 645	spin_lock_bh(&sess->lock);
 646	if (list_empty(&sess->login_list)) {
 647		spin_unlock_bh(&sess->lock);
 648		return;
 649	}
 650	spin_unlock_bh(&sess->lock);
 651
 652	if (sess->node_id != -1) {
 653		/* check for bus reset and make node_id invalid */
 654		session_check_for_reset(sess);
 655
 656		schedule_delayed_work(&sess->maint_work,
 657				SESSION_MAINTENANCE_INTERVAL);
 658	} else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
 659		/* still waiting for reconnect */
 660		schedule_delayed_work(&sess->maint_work,
 661				SESSION_MAINTENANCE_INTERVAL);
 662	} else {
 663		/* reconnect timeout has expired */
 664		session_reconnect_expired(sess);
 665	}
 666}
 667
 668static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
 669		struct sbp_target_agent *agent)
 670{
 671	int state;
 672
 673	switch (tcode) {
 674	case TCODE_READ_QUADLET_REQUEST:
 675		pr_debug("tgt_agent AGENT_STATE READ\n");
 676
 677		spin_lock_bh(&agent->lock);
 678		state = agent->state;
 679		spin_unlock_bh(&agent->lock);
 680
 681		*(__be32 *)data = cpu_to_be32(state);
 682
 683		return RCODE_COMPLETE;
 684
 685	case TCODE_WRITE_QUADLET_REQUEST:
 686		/* ignored */
 687		return RCODE_COMPLETE;
 688
 689	default:
 690		return RCODE_TYPE_ERROR;
 691	}
 692}
 693
 694static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
 695		struct sbp_target_agent *agent)
 696{
 697	switch (tcode) {
 698	case TCODE_WRITE_QUADLET_REQUEST:
 699		pr_debug("tgt_agent AGENT_RESET\n");
 700		spin_lock_bh(&agent->lock);
 701		agent->state = AGENT_STATE_RESET;
 702		spin_unlock_bh(&agent->lock);
 703		return RCODE_COMPLETE;
 704
 705	default:
 706		return RCODE_TYPE_ERROR;
 707	}
 708}
 709
 710static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
 711		struct sbp_target_agent *agent)
 712{
 713	struct sbp2_pointer *ptr = data;
 714
 715	switch (tcode) {
 716	case TCODE_WRITE_BLOCK_REQUEST:
 717		spin_lock_bh(&agent->lock);
 718		if (agent->state != AGENT_STATE_SUSPENDED &&
 719				agent->state != AGENT_STATE_RESET) {
 720			spin_unlock_bh(&agent->lock);
 721			pr_notice("Ignoring ORB_POINTER write while active.\n");
 722			return RCODE_CONFLICT_ERROR;
 723		}
 724		agent->state = AGENT_STATE_ACTIVE;
 725		spin_unlock_bh(&agent->lock);
 726
 727		agent->orb_pointer = sbp2_pointer_to_addr(ptr);
 728		agent->doorbell = false;
 729
 730		pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
 731				agent->orb_pointer);
 732
 733		queue_work(system_unbound_wq, &agent->work);
 734
 735		return RCODE_COMPLETE;
 736
 737	case TCODE_READ_BLOCK_REQUEST:
 738		pr_debug("tgt_agent ORB_POINTER READ\n");
 739		spin_lock_bh(&agent->lock);
 740		addr_to_sbp2_pointer(agent->orb_pointer, ptr);
 741		spin_unlock_bh(&agent->lock);
 742		return RCODE_COMPLETE;
 743
 744	default:
 745		return RCODE_TYPE_ERROR;
 746	}
 747}
 748
 749static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
 750		struct sbp_target_agent *agent)
 751{
 752	switch (tcode) {
 753	case TCODE_WRITE_QUADLET_REQUEST:
 754		spin_lock_bh(&agent->lock);
 755		if (agent->state != AGENT_STATE_SUSPENDED) {
 756			spin_unlock_bh(&agent->lock);
 757			pr_debug("Ignoring DOORBELL while active.\n");
 758			return RCODE_CONFLICT_ERROR;
 759		}
 760		agent->state = AGENT_STATE_ACTIVE;
 761		spin_unlock_bh(&agent->lock);
 762
 763		agent->doorbell = true;
 764
 765		pr_debug("tgt_agent DOORBELL\n");
 766
 767		queue_work(system_unbound_wq, &agent->work);
 768
 769		return RCODE_COMPLETE;
 770
 771	case TCODE_READ_QUADLET_REQUEST:
 772		return RCODE_COMPLETE;
 773
 774	default:
 775		return RCODE_TYPE_ERROR;
 776	}
 777}
 778
 779static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
 780		int tcode, void *data, struct sbp_target_agent *agent)
 781{
 782	switch (tcode) {
 783	case TCODE_WRITE_QUADLET_REQUEST:
 784		pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
 785		/* ignored as we don't send unsolicited status */
 786		return RCODE_COMPLETE;
 787
 788	case TCODE_READ_QUADLET_REQUEST:
 789		return RCODE_COMPLETE;
 790
 791	default:
 792		return RCODE_TYPE_ERROR;
 793	}
 794}
 795
 796static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
 797		int tcode, int destination, int source, int generation,
 798		unsigned long long offset, void *data, size_t length,
 799		void *callback_data)
 800{
 801	struct sbp_target_agent *agent = callback_data;
 802	struct sbp_session *sess = agent->login->sess;
 803	int sess_gen, sess_node, rcode;
 804
 805	spin_lock_bh(&sess->lock);
 806	sess_gen = sess->generation;
 807	sess_node = sess->node_id;
 808	spin_unlock_bh(&sess->lock);
 809
 810	if (generation != sess_gen) {
 811		pr_notice("ignoring request with wrong generation\n");
 812		rcode = RCODE_TYPE_ERROR;
 813		goto out;
 814	}
 815
 816	if (source != sess_node) {
 817		pr_notice("ignoring request from foreign node (%x != %x)\n",
 818				source, sess_node);
 819		rcode = RCODE_TYPE_ERROR;
 820		goto out;
 821	}
 822
 823	/* turn offset into the offset from the start of the block */
 824	offset -= agent->handler.offset;
 825
 826	if (offset == 0x00 && length == 4) {
 827		/* AGENT_STATE */
 828		rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
 829	} else if (offset == 0x04 && length == 4) {
 830		/* AGENT_RESET */
 831		rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
 832	} else if (offset == 0x08 && length == 8) {
 833		/* ORB_POINTER */
 834		rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
 835	} else if (offset == 0x10 && length == 4) {
 836		/* DOORBELL */
 837		rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
 838	} else if (offset == 0x14 && length == 4) {
 839		/* UNSOLICITED_STATUS_ENABLE */
 840		rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
 841				data, agent);
 842	} else {
 843		rcode = RCODE_ADDRESS_ERROR;
 844	}
 845
 846out:
 847	fw_send_response(card, request, rcode);
 848}
 849
 850static void sbp_handle_command(struct sbp_target_request *);
 851static int sbp_send_status(struct sbp_target_request *);
 852static void sbp_free_request(struct sbp_target_request *);
 853
 854static void tgt_agent_process_work(struct work_struct *work)
 855{
 856	struct sbp_target_request *req =
 857		container_of(work, struct sbp_target_request, work);
 858
 859	pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
 860			req->orb_pointer,
 861			sbp2_pointer_to_addr(&req->orb.next_orb),
 862			sbp2_pointer_to_addr(&req->orb.data_descriptor),
 863			be32_to_cpu(req->orb.misc));
 864
 865	if (req->orb_pointer >> 32)
 866		pr_debug("ORB with high bits set\n");
 867
 868	switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
 869		case 0:/* Format specified by this standard */
 870			sbp_handle_command(req);
 871			return;
 872		case 1: /* Reserved for future standardization */
 873		case 2: /* Vendor-dependent */
 874			req->status.status |= cpu_to_be32(
 875					STATUS_BLOCK_RESP(
 876						STATUS_RESP_REQUEST_COMPLETE) |
 877					STATUS_BLOCK_DEAD(0) |
 878					STATUS_BLOCK_LEN(1) |
 879					STATUS_BLOCK_SBP_STATUS(
 880						SBP_STATUS_REQ_TYPE_NOTSUPP));
 881			sbp_send_status(req);
 882			return;
 883		case 3: /* Dummy ORB */
 884			req->status.status |= cpu_to_be32(
 885					STATUS_BLOCK_RESP(
 886						STATUS_RESP_REQUEST_COMPLETE) |
 887					STATUS_BLOCK_DEAD(0) |
 888					STATUS_BLOCK_LEN(1) |
 889					STATUS_BLOCK_SBP_STATUS(
 890						SBP_STATUS_DUMMY_ORB_COMPLETE));
 891			sbp_send_status(req);
 892			return;
 893		default:
 894			BUG();
 895	}
 896}
 897
 898/* used to double-check we haven't been issued an AGENT_RESET */
 899static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
 900{
 901	bool active;
 902
 903	spin_lock_bh(&agent->lock);
 904	active = (agent->state == AGENT_STATE_ACTIVE);
 905	spin_unlock_bh(&agent->lock);
 906
 907	return active;
 908}
 909
 910static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
 911	struct fw_card *card, u64 next_orb)
 912{
 913	struct se_session *se_sess = sess->se_sess;
 914	struct sbp_target_request *req;
 915	int tag, cpu;
 916
 917	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
 918	if (tag < 0)
 919		return ERR_PTR(-ENOMEM);
 920
 921	req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
 922	memset(req, 0, sizeof(*req));
 923	req->se_cmd.map_tag = tag;
 924	req->se_cmd.map_cpu = cpu;
 925	req->se_cmd.tag = next_orb;
 926
 927	return req;
 928}
 929
 930static void tgt_agent_fetch_work(struct work_struct *work)
 931{
 932	struct sbp_target_agent *agent =
 933		container_of(work, struct sbp_target_agent, work);
 934	struct sbp_session *sess = agent->login->sess;
 935	struct sbp_target_request *req;
 936	int ret;
 937	bool doorbell = agent->doorbell;
 938	u64 next_orb = agent->orb_pointer;
 939
 940	while (next_orb && tgt_agent_check_active(agent)) {
 941		req = sbp_mgt_get_req(sess, sess->card, next_orb);
 942		if (IS_ERR(req)) {
 943			spin_lock_bh(&agent->lock);
 944			agent->state = AGENT_STATE_DEAD;
 945			spin_unlock_bh(&agent->lock);
 946			return;
 947		}
 948
 949		req->login = agent->login;
 950		req->orb_pointer = next_orb;
 951
 952		req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
 953					req->orb_pointer >> 32));
 954		req->status.orb_low = cpu_to_be32(
 955				req->orb_pointer & 0xfffffffc);
 956
 957		/* read in the ORB */
 958		ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
 959				sess->node_id, sess->generation, sess->speed,
 960				req->orb_pointer, &req->orb, sizeof(req->orb));
 961		if (ret != RCODE_COMPLETE) {
 962			pr_debug("tgt_orb fetch failed: %x\n", ret);
 963			req->status.status |= cpu_to_be32(
 964					STATUS_BLOCK_SRC(
 965						STATUS_SRC_ORB_FINISHED) |
 966					STATUS_BLOCK_RESP(
 967						STATUS_RESP_TRANSPORT_FAILURE) |
 968					STATUS_BLOCK_DEAD(1) |
 969					STATUS_BLOCK_LEN(1) |
 970					STATUS_BLOCK_SBP_STATUS(
 971						SBP_STATUS_UNSPECIFIED_ERROR));
 972			spin_lock_bh(&agent->lock);
 973			agent->state = AGENT_STATE_DEAD;
 974			spin_unlock_bh(&agent->lock);
 975
 976			sbp_send_status(req);
 977			return;
 978		}
 979
 980		/* check the next_ORB field */
 981		if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
 982			next_orb = 0;
 983			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
 984						STATUS_SRC_ORB_FINISHED));
 985		} else {
 986			next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
 987			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
 988						STATUS_SRC_ORB_CONTINUING));
 989		}
 990
 991		if (tgt_agent_check_active(agent) && !doorbell) {
 992			INIT_WORK(&req->work, tgt_agent_process_work);
 993			queue_work(system_unbound_wq, &req->work);
 994		} else {
 995			/* don't process this request, just check next_ORB */
 996			sbp_free_request(req);
 997		}
 998
 999		spin_lock_bh(&agent->lock);
1000		doorbell = agent->doorbell = false;
1001
1002		/* check if we should carry on processing */
1003		if (next_orb)
1004			agent->orb_pointer = next_orb;
1005		else
1006			agent->state = AGENT_STATE_SUSPENDED;
1007
1008		spin_unlock_bh(&agent->lock);
1009	}
1010}
1011
1012static struct sbp_target_agent *sbp_target_agent_register(
1013		struct sbp_login_descriptor *login)
1014{
1015	struct sbp_target_agent *agent;
1016	int ret;
1017
1018	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1019	if (!agent)
1020		return ERR_PTR(-ENOMEM);
1021
1022	spin_lock_init(&agent->lock);
1023
1024	agent->handler.length = 0x20;
1025	agent->handler.address_callback = tgt_agent_rw;
1026	agent->handler.callback_data = agent;
1027
1028	agent->login = login;
1029	agent->state = AGENT_STATE_RESET;
1030	INIT_WORK(&agent->work, tgt_agent_fetch_work);
1031	agent->orb_pointer = 0;
1032	agent->doorbell = false;
1033
1034	ret = fw_core_add_address_handler(&agent->handler,
1035			&sbp_register_region);
1036	if (ret < 0) {
1037		kfree(agent);
1038		return ERR_PTR(ret);
1039	}
1040
1041	return agent;
1042}
1043
1044static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1045{
1046	fw_core_remove_address_handler(&agent->handler);
1047	cancel_work_sync(&agent->work);
1048	kfree(agent);
1049}
1050
1051/*
1052 * Simple wrapper around fw_run_transaction that retries the transaction several
1053 * times in case of failure, with an exponential backoff.
1054 */
1055static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1056		int generation, int speed, unsigned long long offset,
1057		void *payload, size_t length)
1058{
1059	int attempt, ret, delay;
1060
1061	for (attempt = 1; attempt <= 5; attempt++) {
1062		ret = fw_run_transaction(card, tcode, destination_id,
1063				generation, speed, offset, payload, length);
1064
1065		switch (ret) {
1066		case RCODE_COMPLETE:
1067		case RCODE_TYPE_ERROR:
1068		case RCODE_ADDRESS_ERROR:
1069		case RCODE_GENERATION:
1070			return ret;
1071
1072		default:
1073			delay = 5 * attempt * attempt;
1074			usleep_range(delay, delay * 2);
1075		}
1076	}
1077
1078	return ret;
1079}
1080
1081/*
1082 * Wrapper around sbp_run_transaction that gets the card, destination,
1083 * generation and speed out of the request's session.
1084 */
1085static int sbp_run_request_transaction(struct sbp_target_request *req,
1086		int tcode, unsigned long long offset, void *payload,
1087		size_t length)
1088{
1089	struct sbp_login_descriptor *login = req->login;
1090	struct sbp_session *sess = login->sess;
1091	struct fw_card *card;
1092	int node_id, generation, speed, ret;
1093
1094	spin_lock_bh(&sess->lock);
1095	card = fw_card_get(sess->card);
1096	node_id = sess->node_id;
1097	generation = sess->generation;
1098	speed = sess->speed;
1099	spin_unlock_bh(&sess->lock);
1100
1101	ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1102			offset, payload, length);
1103
1104	fw_card_put(card);
1105
1106	return ret;
1107}
1108
1109static int sbp_fetch_command(struct sbp_target_request *req)
1110{
1111	int ret, cmd_len, copy_len;
1112
1113	cmd_len = scsi_command_size(req->orb.command_block);
1114
1115	req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1116	if (!req->cmd_buf)
1117		return -ENOMEM;
1118
1119	memcpy(req->cmd_buf, req->orb.command_block,
1120		min_t(int, cmd_len, sizeof(req->orb.command_block)));
1121
1122	if (cmd_len > sizeof(req->orb.command_block)) {
1123		pr_debug("sbp_fetch_command: filling in long command\n");
1124		copy_len = cmd_len - sizeof(req->orb.command_block);
1125
1126		ret = sbp_run_request_transaction(req,
1127				TCODE_READ_BLOCK_REQUEST,
1128				req->orb_pointer + sizeof(req->orb),
1129				req->cmd_buf + sizeof(req->orb.command_block),
1130				copy_len);
1131		if (ret != RCODE_COMPLETE)
1132			return -EIO;
1133	}
1134
1135	return 0;
1136}
1137
1138static int sbp_fetch_page_table(struct sbp_target_request *req)
1139{
1140	int pg_tbl_sz, ret;
1141	struct sbp_page_table_entry *pg_tbl;
1142
1143	if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1144		return 0;
1145
1146	pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1147		sizeof(struct sbp_page_table_entry);
1148
1149	pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1150	if (!pg_tbl)
1151		return -ENOMEM;
1152
1153	ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1154			sbp2_pointer_to_addr(&req->orb.data_descriptor),
1155			pg_tbl, pg_tbl_sz);
1156	if (ret != RCODE_COMPLETE) {
1157		kfree(pg_tbl);
1158		return -EIO;
1159	}
1160
1161	req->pg_tbl = pg_tbl;
1162	return 0;
1163}
1164
1165static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1166	u32 *data_len, enum dma_data_direction *data_dir)
1167{
1168	int data_size, direction, idx;
1169
1170	data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1171	direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1172
1173	if (!data_size) {
1174		*data_len = 0;
1175		*data_dir = DMA_NONE;
1176		return;
1177	}
1178
1179	*data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1180
1181	if (req->pg_tbl) {
1182		*data_len = 0;
1183		for (idx = 0; idx < data_size; idx++) {
1184			*data_len += be16_to_cpu(
1185					req->pg_tbl[idx].segment_length);
1186		}
1187	} else {
1188		*data_len = data_size;
1189	}
1190}
1191
1192static void sbp_handle_command(struct sbp_target_request *req)
1193{
1194	struct sbp_login_descriptor *login = req->login;
1195	struct sbp_session *sess = login->sess;
1196	int ret, unpacked_lun;
1197	u32 data_length;
1198	enum dma_data_direction data_dir;
1199
1200	ret = sbp_fetch_command(req);
1201	if (ret) {
1202		pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1203		goto err;
1204	}
1205
1206	ret = sbp_fetch_page_table(req);
1207	if (ret) {
1208		pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1209			ret);
1210		goto err;
1211	}
1212
1213	unpacked_lun = req->login->login_lun;
1214	sbp_calc_data_length_direction(req, &data_length, &data_dir);
1215
1216	pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1217			req->orb_pointer, unpacked_lun, data_length, data_dir);
1218
1219	/* only used for printk until we do TMRs */
1220	req->se_cmd.tag = req->orb_pointer;
1221	target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1222			  req->sense_buf, unpacked_lun, data_length,
1223			  TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF);
1224	return;
1225
1226err:
1227	req->status.status |= cpu_to_be32(
1228		STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1229		STATUS_BLOCK_DEAD(0) |
1230		STATUS_BLOCK_LEN(1) |
1231		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1232	sbp_send_status(req);
1233}
1234
1235/*
1236 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1237 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1238 */
1239static int sbp_rw_data(struct sbp_target_request *req)
1240{
1241	struct sbp_session *sess = req->login->sess;
1242	int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1243		generation, num_pte, length, tfr_length,
1244		rcode = RCODE_COMPLETE;
1245	struct sbp_page_table_entry *pte;
1246	unsigned long long offset;
1247	struct fw_card *card;
1248	struct sg_mapping_iter iter;
1249
1250	if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1251		tcode = TCODE_WRITE_BLOCK_REQUEST;
1252		sg_miter_flags = SG_MITER_FROM_SG;
1253	} else {
1254		tcode = TCODE_READ_BLOCK_REQUEST;
1255		sg_miter_flags = SG_MITER_TO_SG;
1256	}
1257
1258	max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1259	speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1260
1261	pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1262	if (pg_size) {
1263		pr_err("sbp_run_transaction: page size ignored\n");
1264	}
1265
1266	spin_lock_bh(&sess->lock);
1267	card = fw_card_get(sess->card);
1268	node_id = sess->node_id;
1269	generation = sess->generation;
1270	spin_unlock_bh(&sess->lock);
1271
1272	if (req->pg_tbl) {
1273		pte = req->pg_tbl;
1274		num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1275
1276		offset = 0;
1277		length = 0;
1278	} else {
1279		pte = NULL;
1280		num_pte = 0;
1281
1282		offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1283		length = req->se_cmd.data_length;
1284	}
1285
1286	sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1287		sg_miter_flags);
1288
1289	while (length || num_pte) {
1290		if (!length) {
1291			offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1292				be32_to_cpu(pte->segment_base_lo);
1293			length = be16_to_cpu(pte->segment_length);
1294
1295			pte++;
1296			num_pte--;
1297		}
1298
1299		sg_miter_next(&iter);
1300
1301		tfr_length = min3(length, max_payload, (int)iter.length);
1302
1303		/* FIXME: take page_size into account */
1304
1305		rcode = sbp_run_transaction(card, tcode, node_id,
1306				generation, speed,
1307				offset, iter.addr, tfr_length);
1308
1309		if (rcode != RCODE_COMPLETE)
1310			break;
1311
1312		length -= tfr_length;
1313		offset += tfr_length;
1314		iter.consumed = tfr_length;
1315	}
1316
1317	sg_miter_stop(&iter);
1318	fw_card_put(card);
1319
1320	if (rcode == RCODE_COMPLETE) {
1321		WARN_ON(length != 0);
1322		return 0;
1323	} else {
1324		return -EIO;
1325	}
1326}
1327
1328static int sbp_send_status(struct sbp_target_request *req)
1329{
1330	int rc, ret = 0, length;
1331	struct sbp_login_descriptor *login = req->login;
1332
1333	length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1334
1335	rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1336			login->status_fifo_addr, &req->status, length);
1337	if (rc != RCODE_COMPLETE) {
1338		pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
1339		ret = -EIO;
1340		goto put_ref;
1341	}
1342
1343	pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1344			req->orb_pointer);
1345	/*
1346	 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
1347	 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
1348	 * final se_cmd->cmd_kref put.
1349	 */
1350put_ref:
1351	target_put_sess_cmd(&req->se_cmd);
1352	return ret;
1353}
1354
1355static void sbp_sense_mangle(struct sbp_target_request *req)
1356{
1357	struct se_cmd *se_cmd = &req->se_cmd;
1358	u8 *sense = req->sense_buf;
1359	u8 *status = req->status.data;
1360
1361	WARN_ON(se_cmd->scsi_sense_length < 18);
1362
1363	switch (sense[0] & 0x7f) { 		/* sfmt */
1364	case 0x70: /* current, fixed */
1365		status[0] = 0 << 6;
1366		break;
1367	case 0x71: /* deferred, fixed */
1368		status[0] = 1 << 6;
1369		break;
1370	case 0x72: /* current, descriptor */
1371	case 0x73: /* deferred, descriptor */
1372	default:
1373		/*
1374		 * TODO: SBP-3 specifies what we should do with descriptor
1375		 * format sense data
1376		 */
1377		pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1378			sense[0]);
1379		req->status.status |= cpu_to_be32(
1380			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1381			STATUS_BLOCK_DEAD(0) |
1382			STATUS_BLOCK_LEN(1) |
1383			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1384		return;
1385	}
1386
1387	status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1388	status[1] =
1389		(sense[0] & 0x80) |		/* valid */
1390		((sense[2] & 0xe0) >> 1) |	/* mark, eom, ili */
1391		(sense[2] & 0x0f);		/* sense_key */
1392	status[2] = se_cmd->scsi_asc;		/* sense_code */
1393	status[3] = se_cmd->scsi_ascq;		/* sense_qualifier */
1394
1395	/* information */
1396	status[4] = sense[3];
1397	status[5] = sense[4];
1398	status[6] = sense[5];
1399	status[7] = sense[6];
1400
1401	/* CDB-dependent */
1402	status[8] = sense[8];
1403	status[9] = sense[9];
1404	status[10] = sense[10];
1405	status[11] = sense[11];
1406
1407	/* fru */
1408	status[12] = sense[14];
1409
1410	/* sense_key-dependent */
1411	status[13] = sense[15];
1412	status[14] = sense[16];
1413	status[15] = sense[17];
1414
1415	req->status.status |= cpu_to_be32(
1416		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1417		STATUS_BLOCK_DEAD(0) |
1418		STATUS_BLOCK_LEN(5) |
1419		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1420}
1421
1422static int sbp_send_sense(struct sbp_target_request *req)
1423{
1424	struct se_cmd *se_cmd = &req->se_cmd;
1425
1426	if (se_cmd->scsi_sense_length) {
1427		sbp_sense_mangle(req);
1428	} else {
1429		req->status.status |= cpu_to_be32(
1430			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1431			STATUS_BLOCK_DEAD(0) |
1432			STATUS_BLOCK_LEN(1) |
1433			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1434	}
1435
1436	return sbp_send_status(req);
1437}
1438
1439static void sbp_free_request(struct sbp_target_request *req)
1440{
1441	struct se_cmd *se_cmd = &req->se_cmd;
1442	struct se_session *se_sess = se_cmd->se_sess;
1443
1444	kfree(req->pg_tbl);
1445	kfree(req->cmd_buf);
1446
1447	target_free_tag(se_sess, se_cmd);
1448}
1449
1450static void sbp_mgt_agent_process(struct work_struct *work)
1451{
1452	struct sbp_management_agent *agent =
1453		container_of(work, struct sbp_management_agent, work);
1454	struct sbp_management_request *req = agent->request;
1455	int ret;
1456	int status_data_len = 0;
1457
1458	/* fetch the ORB from the initiator */
1459	ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1460		req->node_addr, req->generation, req->speed,
1461		agent->orb_offset, &req->orb, sizeof(req->orb));
1462	if (ret != RCODE_COMPLETE) {
1463		pr_debug("mgt_orb fetch failed: %x\n", ret);
1464		goto out;
1465	}
1466
1467	pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1468		sbp2_pointer_to_addr(&req->orb.ptr1),
1469		sbp2_pointer_to_addr(&req->orb.ptr2),
1470		be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1471		sbp2_pointer_to_addr(&req->orb.status_fifo));
1472
1473	if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1474		ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1475		pr_err("mgt_orb bad request\n");
1476		goto out;
1477	}
1478
1479	switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1480	case MANAGEMENT_ORB_FUNCTION_LOGIN:
1481		sbp_management_request_login(agent, req, &status_data_len);
1482		break;
1483
1484	case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1485		sbp_management_request_query_logins(agent, req,
1486				&status_data_len);
1487		break;
1488
1489	case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1490		sbp_management_request_reconnect(agent, req, &status_data_len);
1491		break;
1492
1493	case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1494		pr_notice("SET PASSWORD not implemented\n");
1495
1496		req->status.status = cpu_to_be32(
1497			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1498			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1499
1500		break;
1501
1502	case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1503		sbp_management_request_logout(agent, req, &status_data_len);
1504		break;
1505
1506	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1507		pr_notice("ABORT TASK not implemented\n");
1508
1509		req->status.status = cpu_to_be32(
1510			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1511			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1512
1513		break;
1514
1515	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1516		pr_notice("ABORT TASK SET not implemented\n");
1517
1518		req->status.status = cpu_to_be32(
1519			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1520			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1521
1522		break;
1523
1524	case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1525		pr_notice("LOGICAL UNIT RESET not implemented\n");
1526
1527		req->status.status = cpu_to_be32(
1528			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1529			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1530
1531		break;
1532
1533	case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1534		pr_notice("TARGET RESET not implemented\n");
1535
1536		req->status.status = cpu_to_be32(
1537			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1538			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1539
1540		break;
1541
1542	default:
1543		pr_notice("unknown management function 0x%x\n",
1544			MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1545
1546		req->status.status = cpu_to_be32(
1547			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1548			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1549
1550		break;
1551	}
1552
1553	req->status.status |= cpu_to_be32(
1554		STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1555		STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1556		STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1557	req->status.orb_low = cpu_to_be32(agent->orb_offset);
1558
1559	/* write the status block back to the initiator */
1560	ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1561		req->node_addr, req->generation, req->speed,
1562		sbp2_pointer_to_addr(&req->orb.status_fifo),
1563		&req->status, 8 + status_data_len);
1564	if (ret != RCODE_COMPLETE) {
1565		pr_debug("mgt_orb status write failed: %x\n", ret);
1566		goto out;
1567	}
1568
1569out:
1570	fw_card_put(req->card);
1571	kfree(req);
1572
1573	spin_lock_bh(&agent->lock);
1574	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1575	spin_unlock_bh(&agent->lock);
1576}
1577
1578static void sbp_mgt_agent_rw(struct fw_card *card,
1579	struct fw_request *request, int tcode, int destination, int source,
1580	int generation, unsigned long long offset, void *data, size_t length,
1581	void *callback_data)
1582{
1583	struct sbp_management_agent *agent = callback_data;
1584	struct sbp2_pointer *ptr = data;
1585	int rcode = RCODE_ADDRESS_ERROR;
1586
1587	if (!agent->tport->enable)
1588		goto out;
1589
1590	if ((offset != agent->handler.offset) || (length != 8))
1591		goto out;
1592
1593	if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1594		struct sbp_management_request *req;
1595		int prev_state;
1596
1597		spin_lock_bh(&agent->lock);
1598		prev_state = agent->state;
1599		agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1600		spin_unlock_bh(&agent->lock);
1601
1602		if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1603			pr_notice("ignoring management request while busy\n");
1604			rcode = RCODE_CONFLICT_ERROR;
1605			goto out;
1606		}
1607		req = kzalloc(sizeof(*req), GFP_ATOMIC);
1608		if (!req) {
1609			rcode = RCODE_CONFLICT_ERROR;
1610			goto out;
1611		}
1612
1613		req->card = fw_card_get(card);
1614		req->generation = generation;
1615		req->node_addr = source;
1616		req->speed = fw_get_request_speed(request);
1617
1618		agent->orb_offset = sbp2_pointer_to_addr(ptr);
1619		agent->request = req;
1620
1621		queue_work(system_unbound_wq, &agent->work);
1622		rcode = RCODE_COMPLETE;
1623	} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1624		addr_to_sbp2_pointer(agent->orb_offset, ptr);
1625		rcode = RCODE_COMPLETE;
1626	} else {
1627		rcode = RCODE_TYPE_ERROR;
1628	}
1629
1630out:
1631	fw_send_response(card, request, rcode);
1632}
1633
1634static struct sbp_management_agent *sbp_management_agent_register(
1635		struct sbp_tport *tport)
1636{
1637	int ret;
1638	struct sbp_management_agent *agent;
1639
1640	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1641	if (!agent)
1642		return ERR_PTR(-ENOMEM);
1643
1644	spin_lock_init(&agent->lock);
1645	agent->tport = tport;
1646	agent->handler.length = 0x08;
1647	agent->handler.address_callback = sbp_mgt_agent_rw;
1648	agent->handler.callback_data = agent;
1649	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1650	INIT_WORK(&agent->work, sbp_mgt_agent_process);
1651	agent->orb_offset = 0;
1652	agent->request = NULL;
1653
1654	ret = fw_core_add_address_handler(&agent->handler,
1655			&sbp_register_region);
1656	if (ret < 0) {
1657		kfree(agent);
1658		return ERR_PTR(ret);
1659	}
1660
1661	return agent;
1662}
1663
1664static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1665{
1666	fw_core_remove_address_handler(&agent->handler);
1667	cancel_work_sync(&agent->work);
1668	kfree(agent);
1669}
1670
1671static int sbp_check_true(struct se_portal_group *se_tpg)
1672{
1673	return 1;
1674}
1675
1676static int sbp_check_false(struct se_portal_group *se_tpg)
1677{
1678	return 0;
1679}
1680
1681static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1682{
1683	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1684	struct sbp_tport *tport = tpg->tport;
1685
1686	return &tport->tport_name[0];
1687}
1688
1689static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1690{
1691	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1692	return tpg->tport_tpgt;
1693}
1694
1695static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1696{
1697	return 1;
1698}
1699
1700static void sbp_release_cmd(struct se_cmd *se_cmd)
1701{
1702	struct sbp_target_request *req = container_of(se_cmd,
1703			struct sbp_target_request, se_cmd);
1704
1705	sbp_free_request(req);
1706}
1707
1708static u32 sbp_sess_get_index(struct se_session *se_sess)
1709{
1710	return 0;
1711}
1712
1713static int sbp_write_pending(struct se_cmd *se_cmd)
1714{
1715	struct sbp_target_request *req = container_of(se_cmd,
1716			struct sbp_target_request, se_cmd);
1717	int ret;
1718
1719	ret = sbp_rw_data(req);
1720	if (ret) {
1721		req->status.status |= cpu_to_be32(
1722			STATUS_BLOCK_RESP(
1723				STATUS_RESP_TRANSPORT_FAILURE) |
1724			STATUS_BLOCK_DEAD(0) |
1725			STATUS_BLOCK_LEN(1) |
1726			STATUS_BLOCK_SBP_STATUS(
1727				SBP_STATUS_UNSPECIFIED_ERROR));
1728		sbp_send_status(req);
1729		return ret;
1730	}
1731
1732	target_execute_cmd(se_cmd);
1733	return 0;
1734}
1735
1736static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1737{
1738	return;
1739}
1740
1741static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1742{
1743	return 0;
1744}
1745
1746static int sbp_queue_data_in(struct se_cmd *se_cmd)
1747{
1748	struct sbp_target_request *req = container_of(se_cmd,
1749			struct sbp_target_request, se_cmd);
1750	int ret;
1751
1752	ret = sbp_rw_data(req);
1753	if (ret) {
1754		req->status.status |= cpu_to_be32(
1755			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1756			STATUS_BLOCK_DEAD(0) |
1757			STATUS_BLOCK_LEN(1) |
1758			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1759		sbp_send_status(req);
1760		return ret;
1761	}
1762
1763	return sbp_send_sense(req);
1764}
1765
1766/*
1767 * Called after command (no data transfer) or after the write (to device)
1768 * operation is completed
1769 */
1770static int sbp_queue_status(struct se_cmd *se_cmd)
1771{
1772	struct sbp_target_request *req = container_of(se_cmd,
1773			struct sbp_target_request, se_cmd);
1774
1775	return sbp_send_sense(req);
1776}
1777
1778static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1779{
1780}
1781
1782static void sbp_aborted_task(struct se_cmd *se_cmd)
1783{
1784	return;
1785}
1786
1787static int sbp_check_stop_free(struct se_cmd *se_cmd)
1788{
1789	struct sbp_target_request *req = container_of(se_cmd,
1790			struct sbp_target_request, se_cmd);
1791
1792	return transport_generic_free_cmd(&req->se_cmd, 0);
1793}
1794
1795static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1796{
1797	struct se_lun *lun;
1798	int count = 0;
1799
1800	rcu_read_lock();
1801	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
1802		count++;
1803	rcu_read_unlock();
1804
1805	return count;
1806}
1807
1808static int sbp_update_unit_directory(struct sbp_tport *tport)
1809{
1810	struct se_lun *lun;
1811	int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
1812	u32 *data;
1813
1814	if (tport->unit_directory.data) {
1815		fw_core_remove_descriptor(&tport->unit_directory);
1816		kfree(tport->unit_directory.data);
1817		tport->unit_directory.data = NULL;
1818	}
1819
1820	if (!tport->enable || !tport->tpg)
1821		return 0;
1822
1823	num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1824
1825	/*
1826	 * Number of entries in the final unit directory:
1827	 *  - all of those in the template
1828	 *  - management_agent
1829	 *  - unit_characteristics
1830	 *  - reconnect_timeout
1831	 *  - unit unique ID
1832	 *  - one for each LUN
1833	 *
1834	 *  MUST NOT include leaf or sub-directory entries
1835	 */
1836	num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1837
1838	if (tport->directory_id != -1)
1839		num_entries++;
1840
1841	/* allocate num_entries + 4 for the header and unique ID leaf */
1842	data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1843	if (!data)
1844		return -ENOMEM;
1845
1846	/* directory_length */
1847	data[idx++] = num_entries << 16;
1848
1849	/* directory_id */
1850	if (tport->directory_id != -1)
1851		data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1852
1853	/* unit directory template */
1854	memcpy(&data[idx], sbp_unit_directory_template,
1855			sizeof(sbp_unit_directory_template));
1856	idx += ARRAY_SIZE(sbp_unit_directory_template);
1857
1858	/* management_agent */
1859	mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
1860	data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
1861
1862	/* unit_characteristics */
1863	data[idx++] = 0x3a000000 |
1864		(((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
1865		SBP_ORB_FETCH_SIZE;
1866
1867	/* reconnect_timeout */
1868	data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
1869
1870	/* unit unique ID (leaf is just after LUNs) */
1871	data[idx++] = 0x8d000000 | (num_luns + 1);
1872
1873	rcu_read_lock();
1874	hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
1875		struct se_device *dev;
1876		int type;
1877		/*
1878		 * rcu_dereference_raw protected by se_lun->lun_group symlink
1879		 * reference to se_device->dev_group.
1880		 */
1881		dev = rcu_dereference_raw(lun->lun_se_dev);
1882		type = dev->transport->get_device_type(dev);
1883
1884		/* logical_unit_number */
1885		data[idx++] = 0x14000000 |
1886			((type << 16) & 0x1f0000) |
1887			(lun->unpacked_lun & 0xffff);
1888	}
1889	rcu_read_unlock();
1890
1891	/* unit unique ID leaf */
1892	data[idx++] = 2 << 16;
1893	data[idx++] = tport->guid >> 32;
1894	data[idx++] = tport->guid;
1895
1896	tport->unit_directory.length = idx;
1897	tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
1898	tport->unit_directory.data = data;
1899
1900	ret = fw_core_add_descriptor(&tport->unit_directory);
1901	if (ret < 0) {
1902		kfree(tport->unit_directory.data);
1903		tport->unit_directory.data = NULL;
1904	}
1905
1906	return ret;
1907}
1908
1909static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
1910{
1911	const char *cp;
1912	char c, nibble;
1913	int pos = 0, err;
1914
1915	*wwn = 0;
1916	for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
1917		c = *cp;
1918		if (c == '\n' && cp[1] == '\0')
1919			continue;
1920		if (c == '\0') {
1921			err = 2;
1922			if (pos != 16)
1923				goto fail;
1924			return cp - name;
1925		}
1926		err = 3;
1927		if (isdigit(c))
1928			nibble = c - '0';
1929		else if (isxdigit(c))
1930			nibble = tolower(c) - 'a' + 10;
1931		else
1932			goto fail;
1933		*wwn = (*wwn << 4) | nibble;
1934		pos++;
1935	}
1936	err = 4;
1937fail:
1938	printk(KERN_INFO "err %u len %zu pos %u\n",
1939			err, cp - name, pos);
1940	return -1;
1941}
1942
1943static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
1944{
1945	return snprintf(buf, len, "%016llx", wwn);
1946}
1947
1948static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1949{
1950	u64 guid = 0;
1951
1952	if (sbp_parse_wwn(name, &guid) < 0)
1953		return -EINVAL;
1954	return 0;
1955}
1956
1957static int sbp_post_link_lun(
1958		struct se_portal_group *se_tpg,
1959		struct se_lun *se_lun)
1960{
1961	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1962
1963	return sbp_update_unit_directory(tpg->tport);
1964}
1965
1966static void sbp_pre_unlink_lun(
1967		struct se_portal_group *se_tpg,
1968		struct se_lun *se_lun)
1969{
1970	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1971	struct sbp_tport *tport = tpg->tport;
1972	int ret;
1973
1974	if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
1975		tport->enable = 0;
1976
1977	ret = sbp_update_unit_directory(tport);
1978	if (ret < 0)
1979		pr_err("unlink LUN: failed to update unit directory\n");
1980}
1981
1982static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn,
1983					    const char *name)
1984{
1985	struct sbp_tport *tport =
1986		container_of(wwn, struct sbp_tport, tport_wwn);
1987
1988	struct sbp_tpg *tpg;
1989	unsigned long tpgt;
1990	int ret;
1991
1992	if (strstr(name, "tpgt_") != name)
1993		return ERR_PTR(-EINVAL);
1994	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1995		return ERR_PTR(-EINVAL);
1996
1997	if (tport->tpg) {
1998		pr_err("Only one TPG per Unit is possible.\n");
1999		return ERR_PTR(-EBUSY);
2000	}
2001
2002	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2003	if (!tpg)
2004		return ERR_PTR(-ENOMEM);
2005
2006	tpg->tport = tport;
2007	tpg->tport_tpgt = tpgt;
2008	tport->tpg = tpg;
2009
2010	/* default attribute values */
2011	tport->enable = 0;
2012	tport->directory_id = -1;
2013	tport->mgt_orb_timeout = 15;
2014	tport->max_reconnect_timeout = 5;
2015	tport->max_logins_per_lun = 1;
2016
2017	tport->mgt_agt = sbp_management_agent_register(tport);
2018	if (IS_ERR(tport->mgt_agt)) {
2019		ret = PTR_ERR(tport->mgt_agt);
2020		goto out_free_tpg;
2021	}
2022
2023	ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
2024	if (ret < 0)
2025		goto out_unreg_mgt_agt;
2026
2027	return &tpg->se_tpg;
2028
2029out_unreg_mgt_agt:
2030	sbp_management_agent_unregister(tport->mgt_agt);
2031out_free_tpg:
2032	tport->tpg = NULL;
2033	kfree(tpg);
2034	return ERR_PTR(ret);
2035}
2036
2037static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2038{
2039	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2040	struct sbp_tport *tport = tpg->tport;
2041
2042	core_tpg_deregister(se_tpg);
2043	sbp_management_agent_unregister(tport->mgt_agt);
2044	tport->tpg = NULL;
2045	kfree(tpg);
2046}
2047
2048static struct se_wwn *sbp_make_tport(
2049		struct target_fabric_configfs *tf,
2050		struct config_group *group,
2051		const char *name)
2052{
2053	struct sbp_tport *tport;
2054	u64 guid = 0;
2055
2056	if (sbp_parse_wwn(name, &guid) < 0)
2057		return ERR_PTR(-EINVAL);
2058
2059	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2060	if (!tport)
2061		return ERR_PTR(-ENOMEM);
2062
2063	tport->guid = guid;
2064	sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2065
2066	return &tport->tport_wwn;
2067}
2068
2069static void sbp_drop_tport(struct se_wwn *wwn)
2070{
2071	struct sbp_tport *tport =
2072		container_of(wwn, struct sbp_tport, tport_wwn);
2073
2074	kfree(tport);
2075}
2076
2077static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
2078{
2079	return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2080}
2081
2082CONFIGFS_ATTR_RO(sbp_wwn_, version);
2083
2084static struct configfs_attribute *sbp_wwn_attrs[] = {
2085	&sbp_wwn_attr_version,
2086	NULL,
2087};
2088
2089static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
2090{
2091	struct se_portal_group *se_tpg = to_tpg(item);
2092	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2093	struct sbp_tport *tport = tpg->tport;
2094
2095	if (tport->directory_id == -1)
2096		return sprintf(page, "implicit\n");
2097	else
2098		return sprintf(page, "%06x\n", tport->directory_id);
2099}
2100
2101static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
2102		const char *page, size_t count)
2103{
2104	struct se_portal_group *se_tpg = to_tpg(item);
2105	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2106	struct sbp_tport *tport = tpg->tport;
2107	unsigned long val;
2108
2109	if (tport->enable) {
2110		pr_err("Cannot change the directory_id on an active target.\n");
2111		return -EBUSY;
2112	}
2113
2114	if (strstr(page, "implicit") == page) {
2115		tport->directory_id = -1;
2116	} else {
2117		if (kstrtoul(page, 16, &val) < 0)
2118			return -EINVAL;
2119		if (val > 0xffffff)
2120			return -EINVAL;
2121
2122		tport->directory_id = val;
2123	}
2124
2125	return count;
2126}
2127
2128static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page)
2129{
2130	struct se_portal_group *se_tpg = to_tpg(item);
2131	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2132	struct sbp_tport *tport = tpg->tport;
2133	return sprintf(page, "%d\n", tport->enable);
2134}
2135
2136static ssize_t sbp_tpg_enable_store(struct config_item *item,
2137		const char *page, size_t count)
2138{
2139	struct se_portal_group *se_tpg = to_tpg(item);
2140	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2141	struct sbp_tport *tport = tpg->tport;
2142	unsigned long val;
2143	int ret;
2144
2145	if (kstrtoul(page, 0, &val) < 0)
2146		return -EINVAL;
2147	if ((val != 0) && (val != 1))
2148		return -EINVAL;
2149
2150	if (tport->enable == val)
2151		return count;
2152
2153	if (val) {
2154		if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2155			pr_err("Cannot enable a target with no LUNs!\n");
2156			return -EINVAL;
2157		}
2158	} else {
2159		/* XXX: force-shutdown sessions instead? */
2160		spin_lock_bh(&se_tpg->session_lock);
2161		if (!list_empty(&se_tpg->tpg_sess_list)) {
2162			spin_unlock_bh(&se_tpg->session_lock);
2163			return -EBUSY;
2164		}
2165		spin_unlock_bh(&se_tpg->session_lock);
2166	}
2167
2168	tport->enable = val;
2169
2170	ret = sbp_update_unit_directory(tport);
2171	if (ret < 0) {
2172		pr_err("Could not update Config ROM\n");
2173		return ret;
2174	}
2175
2176	return count;
2177}
2178
2179CONFIGFS_ATTR(sbp_tpg_, directory_id);
2180CONFIGFS_ATTR(sbp_tpg_, enable);
2181
2182static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2183	&sbp_tpg_attr_directory_id,
2184	&sbp_tpg_attr_enable,
2185	NULL,
2186};
2187
2188static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
2189		char *page)
2190{
2191	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2192	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2193	struct sbp_tport *tport = tpg->tport;
2194	return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2195}
2196
2197static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
2198		const char *page, size_t count)
2199{
2200	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2201	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2202	struct sbp_tport *tport = tpg->tport;
2203	unsigned long val;
2204	int ret;
2205
2206	if (kstrtoul(page, 0, &val) < 0)
2207		return -EINVAL;
2208	if ((val < 1) || (val > 127))
2209		return -EINVAL;
2210
2211	if (tport->mgt_orb_timeout == val)
2212		return count;
2213
2214	tport->mgt_orb_timeout = val;
2215
2216	ret = sbp_update_unit_directory(tport);
2217	if (ret < 0)
2218		return ret;
2219
2220	return count;
2221}
2222
2223static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
2224		char *page)
2225{
2226	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2227	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2228	struct sbp_tport *tport = tpg->tport;
2229	return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2230}
2231
2232static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
2233		const char *page, size_t count)
2234{
2235	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2236	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2237	struct sbp_tport *tport = tpg->tport;
2238	unsigned long val;
2239	int ret;
2240
2241	if (kstrtoul(page, 0, &val) < 0)
2242		return -EINVAL;
2243	if ((val < 1) || (val > 32767))
2244		return -EINVAL;
2245
2246	if (tport->max_reconnect_timeout == val)
2247		return count;
2248
2249	tport->max_reconnect_timeout = val;
2250
2251	ret = sbp_update_unit_directory(tport);
2252	if (ret < 0)
2253		return ret;
2254
2255	return count;
2256}
2257
2258static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
2259		char *page)
2260{
2261	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2262	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2263	struct sbp_tport *tport = tpg->tport;
2264	return sprintf(page, "%d\n", tport->max_logins_per_lun);
2265}
2266
2267static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
2268		const char *page, size_t count)
2269{
2270	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2271	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2272	struct sbp_tport *tport = tpg->tport;
2273	unsigned long val;
2274
2275	if (kstrtoul(page, 0, &val) < 0)
2276		return -EINVAL;
2277	if ((val < 1) || (val > 127))
2278		return -EINVAL;
2279
2280	/* XXX: also check against current count? */
2281
2282	tport->max_logins_per_lun = val;
2283
2284	return count;
2285}
2286
2287CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
2288CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
2289CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
2290
2291static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2292	&sbp_tpg_attrib_attr_mgt_orb_timeout,
2293	&sbp_tpg_attrib_attr_max_reconnect_timeout,
2294	&sbp_tpg_attrib_attr_max_logins_per_lun,
2295	NULL,
2296};
2297
2298static const struct target_core_fabric_ops sbp_ops = {
2299	.module				= THIS_MODULE,
2300	.fabric_name			= "sbp",
2301	.tpg_get_wwn			= sbp_get_fabric_wwn,
2302	.tpg_get_tag			= sbp_get_tag,
2303	.tpg_check_demo_mode		= sbp_check_true,
2304	.tpg_check_demo_mode_cache	= sbp_check_true,
2305	.tpg_check_demo_mode_write_protect = sbp_check_false,
2306	.tpg_check_prod_mode_write_protect = sbp_check_false,
2307	.tpg_get_inst_index		= sbp_tpg_get_inst_index,
2308	.release_cmd			= sbp_release_cmd,
2309	.sess_get_index			= sbp_sess_get_index,
2310	.write_pending			= sbp_write_pending,
2311	.set_default_node_attributes	= sbp_set_default_node_attrs,
2312	.get_cmd_state			= sbp_get_cmd_state,
2313	.queue_data_in			= sbp_queue_data_in,
2314	.queue_status			= sbp_queue_status,
2315	.queue_tm_rsp			= sbp_queue_tm_rsp,
2316	.aborted_task			= sbp_aborted_task,
2317	.check_stop_free		= sbp_check_stop_free,
2318
2319	.fabric_make_wwn		= sbp_make_tport,
2320	.fabric_drop_wwn		= sbp_drop_tport,
2321	.fabric_make_tpg		= sbp_make_tpg,
 
2322	.fabric_drop_tpg		= sbp_drop_tpg,
2323	.fabric_post_link		= sbp_post_link_lun,
2324	.fabric_pre_unlink		= sbp_pre_unlink_lun,
2325	.fabric_make_np			= NULL,
2326	.fabric_drop_np			= NULL,
2327	.fabric_init_nodeacl		= sbp_init_nodeacl,
2328
2329	.tfc_wwn_attrs			= sbp_wwn_attrs,
2330	.tfc_tpg_base_attrs		= sbp_tpg_base_attrs,
2331	.tfc_tpg_attrib_attrs		= sbp_tpg_attrib_attrs,
 
 
 
2332};
2333
2334static int __init sbp_init(void)
2335{
2336	return target_register_template(&sbp_ops);
2337};
2338
2339static void __exit sbp_exit(void)
2340{
2341	target_unregister_template(&sbp_ops);
2342};
2343
2344MODULE_DESCRIPTION("FireWire SBP fabric driver");
2345MODULE_LICENSE("GPL");
2346module_init(sbp_init);
2347module_exit(sbp_exit);