Linux Audio

Check our new training course

Loading...
   1/*
   2 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#define pr_fmt(fmt) PFX fmt
  34
  35#include <linux/module.h>
  36#include <linux/init.h>
  37#include <linux/slab.h>
  38#include <linux/err.h>
  39#include <linux/string.h>
  40#include <linux/parser.h>
  41#include <linux/random.h>
  42#include <linux/jiffies.h>
  43
  44#include <linux/atomic.h>
  45
  46#include <scsi/scsi.h>
  47#include <scsi/scsi_device.h>
  48#include <scsi/scsi_dbg.h>
  49#include <scsi/srp.h>
  50#include <scsi/scsi_transport_srp.h>
  51
  52#include "ib_srp.h"
  53
  54#define DRV_NAME	"ib_srp"
  55#define PFX		DRV_NAME ": "
  56#define DRV_VERSION	"0.2"
  57#define DRV_RELDATE	"November 1, 2005"
  58
  59MODULE_AUTHOR("Roland Dreier");
  60MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
  61		   "v" DRV_VERSION " (" DRV_RELDATE ")");
  62MODULE_LICENSE("Dual BSD/GPL");
  63
  64static unsigned int srp_sg_tablesize;
  65static unsigned int cmd_sg_entries;
  66static unsigned int indirect_sg_entries;
  67static bool allow_ext_sg;
  68static int topspin_workarounds = 1;
  69
  70module_param(srp_sg_tablesize, uint, 0444);
  71MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
  72
  73module_param(cmd_sg_entries, uint, 0444);
  74MODULE_PARM_DESC(cmd_sg_entries,
  75		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
  76
  77module_param(indirect_sg_entries, uint, 0444);
  78MODULE_PARM_DESC(indirect_sg_entries,
  79		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
  80
  81module_param(allow_ext_sg, bool, 0444);
  82MODULE_PARM_DESC(allow_ext_sg,
  83		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
  84
  85module_param(topspin_workarounds, int, 0444);
  86MODULE_PARM_DESC(topspin_workarounds,
  87		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
  88
  89static void srp_add_one(struct ib_device *device);
  90static void srp_remove_one(struct ib_device *device);
  91static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
  92static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
  93static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
  94
  95static struct scsi_transport_template *ib_srp_transport_template;
  96
  97static struct ib_client srp_client = {
  98	.name   = "srp",
  99	.add    = srp_add_one,
 100	.remove = srp_remove_one
 101};
 102
 103static struct ib_sa_client srp_sa_client;
 104
 105static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
 106{
 107	return (struct srp_target_port *) host->hostdata;
 108}
 109
 110static const char *srp_target_info(struct Scsi_Host *host)
 111{
 112	return host_to_target(host)->target_name;
 113}
 114
 115static int srp_target_is_topspin(struct srp_target_port *target)
 116{
 117	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
 118	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
 119
 120	return topspin_workarounds &&
 121		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
 122		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
 123}
 124
 125static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
 126				   gfp_t gfp_mask,
 127				   enum dma_data_direction direction)
 128{
 129	struct srp_iu *iu;
 130
 131	iu = kmalloc(sizeof *iu, gfp_mask);
 132	if (!iu)
 133		goto out;
 134
 135	iu->buf = kzalloc(size, gfp_mask);
 136	if (!iu->buf)
 137		goto out_free_iu;
 138
 139	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
 140				    direction);
 141	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
 142		goto out_free_buf;
 143
 144	iu->size      = size;
 145	iu->direction = direction;
 146
 147	return iu;
 148
 149out_free_buf:
 150	kfree(iu->buf);
 151out_free_iu:
 152	kfree(iu);
 153out:
 154	return NULL;
 155}
 156
 157static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
 158{
 159	if (!iu)
 160		return;
 161
 162	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
 163			    iu->direction);
 164	kfree(iu->buf);
 165	kfree(iu);
 166}
 167
 168static void srp_qp_event(struct ib_event *event, void *context)
 169{
 170	pr_debug("QP event %d\n", event->event);
 171}
 172
 173static int srp_init_qp(struct srp_target_port *target,
 174		       struct ib_qp *qp)
 175{
 176	struct ib_qp_attr *attr;
 177	int ret;
 178
 179	attr = kmalloc(sizeof *attr, GFP_KERNEL);
 180	if (!attr)
 181		return -ENOMEM;
 182
 183	ret = ib_find_pkey(target->srp_host->srp_dev->dev,
 184			   target->srp_host->port,
 185			   be16_to_cpu(target->path.pkey),
 186			   &attr->pkey_index);
 187	if (ret)
 188		goto out;
 189
 190	attr->qp_state        = IB_QPS_INIT;
 191	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
 192				    IB_ACCESS_REMOTE_WRITE);
 193	attr->port_num        = target->srp_host->port;
 194
 195	ret = ib_modify_qp(qp, attr,
 196			   IB_QP_STATE		|
 197			   IB_QP_PKEY_INDEX	|
 198			   IB_QP_ACCESS_FLAGS	|
 199			   IB_QP_PORT);
 200
 201out:
 202	kfree(attr);
 203	return ret;
 204}
 205
 206static int srp_new_cm_id(struct srp_target_port *target)
 207{
 208	struct ib_cm_id *new_cm_id;
 209
 210	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
 211				    srp_cm_handler, target);
 212	if (IS_ERR(new_cm_id))
 213		return PTR_ERR(new_cm_id);
 214
 215	if (target->cm_id)
 216		ib_destroy_cm_id(target->cm_id);
 217	target->cm_id = new_cm_id;
 218
 219	return 0;
 220}
 221
 222static int srp_create_target_ib(struct srp_target_port *target)
 223{
 224	struct ib_qp_init_attr *init_attr;
 225	int ret;
 226
 227	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
 228	if (!init_attr)
 229		return -ENOMEM;
 230
 231	target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
 232				       srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
 233	if (IS_ERR(target->recv_cq)) {
 234		ret = PTR_ERR(target->recv_cq);
 235		goto err;
 236	}
 237
 238	target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
 239				       srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
 240	if (IS_ERR(target->send_cq)) {
 241		ret = PTR_ERR(target->send_cq);
 242		goto err_recv_cq;
 243	}
 244
 245	ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
 246
 247	init_attr->event_handler       = srp_qp_event;
 248	init_attr->cap.max_send_wr     = SRP_SQ_SIZE;
 249	init_attr->cap.max_recv_wr     = SRP_RQ_SIZE;
 250	init_attr->cap.max_recv_sge    = 1;
 251	init_attr->cap.max_send_sge    = 1;
 252	init_attr->sq_sig_type         = IB_SIGNAL_ALL_WR;
 253	init_attr->qp_type             = IB_QPT_RC;
 254	init_attr->send_cq             = target->send_cq;
 255	init_attr->recv_cq             = target->recv_cq;
 256
 257	target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
 258	if (IS_ERR(target->qp)) {
 259		ret = PTR_ERR(target->qp);
 260		goto err_send_cq;
 261	}
 262
 263	ret = srp_init_qp(target, target->qp);
 264	if (ret)
 265		goto err_qp;
 266
 267	kfree(init_attr);
 268	return 0;
 269
 270err_qp:
 271	ib_destroy_qp(target->qp);
 272
 273err_send_cq:
 274	ib_destroy_cq(target->send_cq);
 275
 276err_recv_cq:
 277	ib_destroy_cq(target->recv_cq);
 278
 279err:
 280	kfree(init_attr);
 281	return ret;
 282}
 283
 284static void srp_free_target_ib(struct srp_target_port *target)
 285{
 286	int i;
 287
 288	ib_destroy_qp(target->qp);
 289	ib_destroy_cq(target->send_cq);
 290	ib_destroy_cq(target->recv_cq);
 291
 292	for (i = 0; i < SRP_RQ_SIZE; ++i)
 293		srp_free_iu(target->srp_host, target->rx_ring[i]);
 294	for (i = 0; i < SRP_SQ_SIZE; ++i)
 295		srp_free_iu(target->srp_host, target->tx_ring[i]);
 296}
 297
 298static void srp_path_rec_completion(int status,
 299				    struct ib_sa_path_rec *pathrec,
 300				    void *target_ptr)
 301{
 302	struct srp_target_port *target = target_ptr;
 303
 304	target->status = status;
 305	if (status)
 306		shost_printk(KERN_ERR, target->scsi_host,
 307			     PFX "Got failed path rec status %d\n", status);
 308	else
 309		target->path = *pathrec;
 310	complete(&target->done);
 311}
 312
 313static int srp_lookup_path(struct srp_target_port *target)
 314{
 315	target->path.numb_path = 1;
 316
 317	init_completion(&target->done);
 318
 319	target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
 320						   target->srp_host->srp_dev->dev,
 321						   target->srp_host->port,
 322						   &target->path,
 323						   IB_SA_PATH_REC_SERVICE_ID	|
 324						   IB_SA_PATH_REC_DGID		|
 325						   IB_SA_PATH_REC_SGID		|
 326						   IB_SA_PATH_REC_NUMB_PATH	|
 327						   IB_SA_PATH_REC_PKEY,
 328						   SRP_PATH_REC_TIMEOUT_MS,
 329						   GFP_KERNEL,
 330						   srp_path_rec_completion,
 331						   target, &target->path_query);
 332	if (target->path_query_id < 0)
 333		return target->path_query_id;
 334
 335	wait_for_completion(&target->done);
 336
 337	if (target->status < 0)
 338		shost_printk(KERN_WARNING, target->scsi_host,
 339			     PFX "Path record query failed\n");
 340
 341	return target->status;
 342}
 343
 344static int srp_send_req(struct srp_target_port *target)
 345{
 346	struct {
 347		struct ib_cm_req_param param;
 348		struct srp_login_req   priv;
 349	} *req = NULL;
 350	int status;
 351
 352	req = kzalloc(sizeof *req, GFP_KERNEL);
 353	if (!req)
 354		return -ENOMEM;
 355
 356	req->param.primary_path 	      = &target->path;
 357	req->param.alternate_path 	      = NULL;
 358	req->param.service_id 		      = target->service_id;
 359	req->param.qp_num 		      = target->qp->qp_num;
 360	req->param.qp_type 		      = target->qp->qp_type;
 361	req->param.private_data 	      = &req->priv;
 362	req->param.private_data_len 	      = sizeof req->priv;
 363	req->param.flow_control 	      = 1;
 364
 365	get_random_bytes(&req->param.starting_psn, 4);
 366	req->param.starting_psn 	     &= 0xffffff;
 367
 368	/*
 369	 * Pick some arbitrary defaults here; we could make these
 370	 * module parameters if anyone cared about setting them.
 371	 */
 372	req->param.responder_resources	      = 4;
 373	req->param.remote_cm_response_timeout = 20;
 374	req->param.local_cm_response_timeout  = 20;
 375	req->param.retry_count 		      = 7;
 376	req->param.rnr_retry_count 	      = 7;
 377	req->param.max_cm_retries 	      = 15;
 378
 379	req->priv.opcode     	= SRP_LOGIN_REQ;
 380	req->priv.tag        	= 0;
 381	req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
 382	req->priv.req_buf_fmt 	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
 383					      SRP_BUF_FORMAT_INDIRECT);
 384	/*
 385	 * In the published SRP specification (draft rev. 16a), the
 386	 * port identifier format is 8 bytes of ID extension followed
 387	 * by 8 bytes of GUID.  Older drafts put the two halves in the
 388	 * opposite order, so that the GUID comes first.
 389	 *
 390	 * Targets conforming to these obsolete drafts can be
 391	 * recognized by the I/O Class they report.
 392	 */
 393	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
 394		memcpy(req->priv.initiator_port_id,
 395		       &target->path.sgid.global.interface_id, 8);
 396		memcpy(req->priv.initiator_port_id + 8,
 397		       &target->initiator_ext, 8);
 398		memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
 399		memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
 400	} else {
 401		memcpy(req->priv.initiator_port_id,
 402		       &target->initiator_ext, 8);
 403		memcpy(req->priv.initiator_port_id + 8,
 404		       &target->path.sgid.global.interface_id, 8);
 405		memcpy(req->priv.target_port_id,     &target->id_ext, 8);
 406		memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
 407	}
 408
 409	/*
 410	 * Topspin/Cisco SRP targets will reject our login unless we
 411	 * zero out the first 8 bytes of our initiator port ID and set
 412	 * the second 8 bytes to the local node GUID.
 413	 */
 414	if (srp_target_is_topspin(target)) {
 415		shost_printk(KERN_DEBUG, target->scsi_host,
 416			     PFX "Topspin/Cisco initiator port ID workaround "
 417			     "activated for target GUID %016llx\n",
 418			     (unsigned long long) be64_to_cpu(target->ioc_guid));
 419		memset(req->priv.initiator_port_id, 0, 8);
 420		memcpy(req->priv.initiator_port_id + 8,
 421		       &target->srp_host->srp_dev->dev->node_guid, 8);
 422	}
 423
 424	status = ib_send_cm_req(target->cm_id, &req->param);
 425
 426	kfree(req);
 427
 428	return status;
 429}
 430
 431static void srp_disconnect_target(struct srp_target_port *target)
 432{
 433	/* XXX should send SRP_I_LOGOUT request */
 434
 435	init_completion(&target->done);
 436	if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
 437		shost_printk(KERN_DEBUG, target->scsi_host,
 438			     PFX "Sending CM DREQ failed\n");
 439		return;
 440	}
 441	wait_for_completion(&target->done);
 442}
 443
 444static bool srp_change_state(struct srp_target_port *target,
 445			    enum srp_target_state old,
 446			    enum srp_target_state new)
 447{
 448	bool changed = false;
 449
 450	spin_lock_irq(&target->lock);
 451	if (target->state == old) {
 452		target->state = new;
 453		changed = true;
 454	}
 455	spin_unlock_irq(&target->lock);
 456	return changed;
 457}
 458
 459static void srp_free_req_data(struct srp_target_port *target)
 460{
 461	struct ib_device *ibdev = target->srp_host->srp_dev->dev;
 462	struct srp_request *req;
 463	int i;
 464
 465	for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
 466		kfree(req->fmr_list);
 467		kfree(req->map_page);
 468		if (req->indirect_dma_addr) {
 469			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
 470					    target->indirect_size,
 471					    DMA_TO_DEVICE);
 472		}
 473		kfree(req->indirect_desc);
 474	}
 475}
 476
 477/**
 478 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
 479 * @shost: SCSI host whose attributes to remove from sysfs.
 480 *
 481 * Note: Any attributes defined in the host template and that did not exist
 482 * before invocation of this function will be ignored.
 483 */
 484static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
 485{
 486	struct device_attribute **attr;
 487
 488	for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
 489		device_remove_file(&shost->shost_dev, *attr);
 490}
 491
 492static void srp_remove_work(struct work_struct *work)
 493{
 494	struct srp_target_port *target =
 495		container_of(work, struct srp_target_port, work);
 496
 497	if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
 498		return;
 499
 500	spin_lock(&target->srp_host->target_lock);
 501	list_del(&target->list);
 502	spin_unlock(&target->srp_host->target_lock);
 503
 504	srp_del_scsi_host_attr(target->scsi_host);
 505	srp_remove_host(target->scsi_host);
 506	scsi_remove_host(target->scsi_host);
 507	ib_destroy_cm_id(target->cm_id);
 508	srp_free_target_ib(target);
 509	srp_free_req_data(target);
 510	scsi_host_put(target->scsi_host);
 511}
 512
 513static int srp_connect_target(struct srp_target_port *target)
 514{
 515	int retries = 3;
 516	int ret;
 517
 518	ret = srp_lookup_path(target);
 519	if (ret)
 520		return ret;
 521
 522	while (1) {
 523		init_completion(&target->done);
 524		ret = srp_send_req(target);
 525		if (ret)
 526			return ret;
 527		wait_for_completion(&target->done);
 528
 529		/*
 530		 * The CM event handling code will set status to
 531		 * SRP_PORT_REDIRECT if we get a port redirect REJ
 532		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
 533		 * redirect REJ back.
 534		 */
 535		switch (target->status) {
 536		case 0:
 537			return 0;
 538
 539		case SRP_PORT_REDIRECT:
 540			ret = srp_lookup_path(target);
 541			if (ret)
 542				return ret;
 543			break;
 544
 545		case SRP_DLID_REDIRECT:
 546			break;
 547
 548		case SRP_STALE_CONN:
 549			/* Our current CM id was stale, and is now in timewait.
 550			 * Try to reconnect with a new one.
 551			 */
 552			if (!retries-- || srp_new_cm_id(target)) {
 553				shost_printk(KERN_ERR, target->scsi_host, PFX
 554					     "giving up on stale connection\n");
 555				target->status = -ECONNRESET;
 556				return target->status;
 557			}
 558
 559			shost_printk(KERN_ERR, target->scsi_host, PFX
 560				     "retrying stale connection\n");
 561			break;
 562
 563		default:
 564			return target->status;
 565		}
 566	}
 567}
 568
 569static void srp_unmap_data(struct scsi_cmnd *scmnd,
 570			   struct srp_target_port *target,
 571			   struct srp_request *req)
 572{
 573	struct ib_device *ibdev = target->srp_host->srp_dev->dev;
 574	struct ib_pool_fmr **pfmr;
 575
 576	if (!scsi_sglist(scmnd) ||
 577	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
 578	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
 579		return;
 580
 581	pfmr = req->fmr_list;
 582	while (req->nfmr--)
 583		ib_fmr_pool_unmap(*pfmr++);
 584
 585	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
 586			scmnd->sc_data_direction);
 587}
 588
 589/**
 590 * srp_claim_req - Take ownership of the scmnd associated with a request.
 591 * @target: SRP target port.
 592 * @req: SRP request.
 593 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
 594 *         ownership of @req->scmnd if it equals @scmnd.
 595 *
 596 * Return value:
 597 * Either NULL or a pointer to the SCSI command the caller became owner of.
 598 */
 599static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
 600				       struct srp_request *req,
 601				       struct scsi_cmnd *scmnd)
 602{
 603	unsigned long flags;
 604
 605	spin_lock_irqsave(&target->lock, flags);
 606	if (!scmnd) {
 607		scmnd = req->scmnd;
 608		req->scmnd = NULL;
 609	} else if (req->scmnd == scmnd) {
 610		req->scmnd = NULL;
 611	} else {
 612		scmnd = NULL;
 613	}
 614	spin_unlock_irqrestore(&target->lock, flags);
 615
 616	return scmnd;
 617}
 618
 619/**
 620 * srp_free_req() - Unmap data and add request to the free request list.
 621 */
 622static void srp_free_req(struct srp_target_port *target,
 623			 struct srp_request *req, struct scsi_cmnd *scmnd,
 624			 s32 req_lim_delta)
 625{
 626	unsigned long flags;
 627
 628	srp_unmap_data(scmnd, target, req);
 629
 630	spin_lock_irqsave(&target->lock, flags);
 631	target->req_lim += req_lim_delta;
 632	list_add_tail(&req->list, &target->free_reqs);
 633	spin_unlock_irqrestore(&target->lock, flags);
 634}
 635
 636static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
 637{
 638	struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
 639
 640	if (scmnd) {
 641		srp_free_req(target, req, scmnd, 0);
 642		scmnd->result = DID_RESET << 16;
 643		scmnd->scsi_done(scmnd);
 644	}
 645}
 646
 647static int srp_reconnect_target(struct srp_target_port *target)
 648{
 649	struct ib_qp_attr qp_attr;
 650	struct ib_wc wc;
 651	int i, ret;
 652
 653	if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
 654		return -EAGAIN;
 655
 656	srp_disconnect_target(target);
 657	/*
 658	 * Now get a new local CM ID so that we avoid confusing the
 659	 * target in case things are really fouled up.
 660	 */
 661	ret = srp_new_cm_id(target);
 662	if (ret)
 663		goto err;
 664
 665	qp_attr.qp_state = IB_QPS_RESET;
 666	ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
 667	if (ret)
 668		goto err;
 669
 670	ret = srp_init_qp(target, target->qp);
 671	if (ret)
 672		goto err;
 673
 674	while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
 675		; /* nothing */
 676	while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
 677		; /* nothing */
 678
 679	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
 680		struct srp_request *req = &target->req_ring[i];
 681		if (req->scmnd)
 682			srp_reset_req(target, req);
 683	}
 684
 685	INIT_LIST_HEAD(&target->free_tx);
 686	for (i = 0; i < SRP_SQ_SIZE; ++i)
 687		list_add(&target->tx_ring[i]->list, &target->free_tx);
 688
 689	target->qp_in_error = 0;
 690	ret = srp_connect_target(target);
 691	if (ret)
 692		goto err;
 693
 694	if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
 695		ret = -EAGAIN;
 696
 697	return ret;
 698
 699err:
 700	shost_printk(KERN_ERR, target->scsi_host,
 701		     PFX "reconnect failed (%d), removing target port.\n", ret);
 702
 703	/*
 704	 * We couldn't reconnect, so kill our target port off.
 705	 * However, we have to defer the real removal because we
 706	 * are in the context of the SCSI error handler now, which
 707	 * will deadlock if we call scsi_remove_host().
 708	 *
 709	 * Schedule our work inside the lock to avoid a race with
 710	 * the flush_scheduled_work() in srp_remove_one().
 711	 */
 712	spin_lock_irq(&target->lock);
 713	if (target->state == SRP_TARGET_CONNECTING) {
 714		target->state = SRP_TARGET_DEAD;
 715		INIT_WORK(&target->work, srp_remove_work);
 716		queue_work(ib_wq, &target->work);
 717	}
 718	spin_unlock_irq(&target->lock);
 719
 720	return ret;
 721}
 722
 723static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
 724			 unsigned int dma_len, u32 rkey)
 725{
 726	struct srp_direct_buf *desc = state->desc;
 727
 728	desc->va = cpu_to_be64(dma_addr);
 729	desc->key = cpu_to_be32(rkey);
 730	desc->len = cpu_to_be32(dma_len);
 731
 732	state->total_len += dma_len;
 733	state->desc++;
 734	state->ndesc++;
 735}
 736
 737static int srp_map_finish_fmr(struct srp_map_state *state,
 738			      struct srp_target_port *target)
 739{
 740	struct srp_device *dev = target->srp_host->srp_dev;
 741	struct ib_pool_fmr *fmr;
 742	u64 io_addr = 0;
 743
 744	if (!state->npages)
 745		return 0;
 746
 747	if (state->npages == 1) {
 748		srp_map_desc(state, state->base_dma_addr, state->fmr_len,
 749			     target->rkey);
 750		state->npages = state->fmr_len = 0;
 751		return 0;
 752	}
 753
 754	fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
 755				   state->npages, io_addr);
 756	if (IS_ERR(fmr))
 757		return PTR_ERR(fmr);
 758
 759	*state->next_fmr++ = fmr;
 760	state->nfmr++;
 761
 762	srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
 763	state->npages = state->fmr_len = 0;
 764	return 0;
 765}
 766
 767static void srp_map_update_start(struct srp_map_state *state,
 768				 struct scatterlist *sg, int sg_index,
 769				 dma_addr_t dma_addr)
 770{
 771	state->unmapped_sg = sg;
 772	state->unmapped_index = sg_index;
 773	state->unmapped_addr = dma_addr;
 774}
 775
 776static int srp_map_sg_entry(struct srp_map_state *state,
 777			    struct srp_target_port *target,
 778			    struct scatterlist *sg, int sg_index,
 779			    int use_fmr)
 780{
 781	struct srp_device *dev = target->srp_host->srp_dev;
 782	struct ib_device *ibdev = dev->dev;
 783	dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
 784	unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
 785	unsigned int len;
 786	int ret;
 787
 788	if (!dma_len)
 789		return 0;
 790
 791	if (use_fmr == SRP_MAP_NO_FMR) {
 792		/* Once we're in direct map mode for a request, we don't
 793		 * go back to FMR mode, so no need to update anything
 794		 * other than the descriptor.
 795		 */
 796		srp_map_desc(state, dma_addr, dma_len, target->rkey);
 797		return 0;
 798	}
 799
 800	/* If we start at an offset into the FMR page, don't merge into
 801	 * the current FMR. Finish it out, and use the kernel's MR for this
 802	 * sg entry. This is to avoid potential bugs on some SRP targets
 803	 * that were never quite defined, but went away when the initiator
 804	 * avoided using FMR on such page fragments.
 805	 */
 806	if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
 807		ret = srp_map_finish_fmr(state, target);
 808		if (ret)
 809			return ret;
 810
 811		srp_map_desc(state, dma_addr, dma_len, target->rkey);
 812		srp_map_update_start(state, NULL, 0, 0);
 813		return 0;
 814	}
 815
 816	/* If this is the first sg to go into the FMR, save our position.
 817	 * We need to know the first unmapped entry, its index, and the
 818	 * first unmapped address within that entry to be able to restart
 819	 * mapping after an error.
 820	 */
 821	if (!state->unmapped_sg)
 822		srp_map_update_start(state, sg, sg_index, dma_addr);
 823
 824	while (dma_len) {
 825		if (state->npages == SRP_FMR_SIZE) {
 826			ret = srp_map_finish_fmr(state, target);
 827			if (ret)
 828				return ret;
 829
 830			srp_map_update_start(state, sg, sg_index, dma_addr);
 831		}
 832
 833		len = min_t(unsigned int, dma_len, dev->fmr_page_size);
 834
 835		if (!state->npages)
 836			state->base_dma_addr = dma_addr;
 837		state->pages[state->npages++] = dma_addr;
 838		state->fmr_len += len;
 839		dma_addr += len;
 840		dma_len -= len;
 841	}
 842
 843	/* If the last entry of the FMR wasn't a full page, then we need to
 844	 * close it out and start a new one -- we can only merge at page
 845	 * boundries.
 846	 */
 847	ret = 0;
 848	if (len != dev->fmr_page_size) {
 849		ret = srp_map_finish_fmr(state, target);
 850		if (!ret)
 851			srp_map_update_start(state, NULL, 0, 0);
 852	}
 853	return ret;
 854}
 855
 856static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
 857			struct srp_request *req)
 858{
 859	struct scatterlist *scat, *sg;
 860	struct srp_cmd *cmd = req->cmd->buf;
 861	int i, len, nents, count, use_fmr;
 862	struct srp_device *dev;
 863	struct ib_device *ibdev;
 864	struct srp_map_state state;
 865	struct srp_indirect_buf *indirect_hdr;
 866	u32 table_len;
 867	u8 fmt;
 868
 869	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
 870		return sizeof (struct srp_cmd);
 871
 872	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
 873	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
 874		shost_printk(KERN_WARNING, target->scsi_host,
 875			     PFX "Unhandled data direction %d\n",
 876			     scmnd->sc_data_direction);
 877		return -EINVAL;
 878	}
 879
 880	nents = scsi_sg_count(scmnd);
 881	scat  = scsi_sglist(scmnd);
 882
 883	dev = target->srp_host->srp_dev;
 884	ibdev = dev->dev;
 885
 886	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
 887	if (unlikely(count == 0))
 888		return -EIO;
 889
 890	fmt = SRP_DATA_DESC_DIRECT;
 891	len = sizeof (struct srp_cmd) +	sizeof (struct srp_direct_buf);
 892
 893	if (count == 1) {
 894		/*
 895		 * The midlayer only generated a single gather/scatter
 896		 * entry, or DMA mapping coalesced everything to a
 897		 * single entry.  So a direct descriptor along with
 898		 * the DMA MR suffices.
 899		 */
 900		struct srp_direct_buf *buf = (void *) cmd->add_data;
 901
 902		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
 903		buf->key = cpu_to_be32(target->rkey);
 904		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
 905
 906		req->nfmr = 0;
 907		goto map_complete;
 908	}
 909
 910	/* We have more than one scatter/gather entry, so build our indirect
 911	 * descriptor table, trying to merge as many entries with FMR as we
 912	 * can.
 913	 */
 914	indirect_hdr = (void *) cmd->add_data;
 915
 916	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
 917				   target->indirect_size, DMA_TO_DEVICE);
 918
 919	memset(&state, 0, sizeof(state));
 920	state.desc	= req->indirect_desc;
 921	state.pages	= req->map_page;
 922	state.next_fmr	= req->fmr_list;
 923
 924	use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
 925
 926	for_each_sg(scat, sg, count, i) {
 927		if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
 928			/* FMR mapping failed, so backtrack to the first
 929			 * unmapped entry and continue on without using FMR.
 930			 */
 931			dma_addr_t dma_addr;
 932			unsigned int dma_len;
 933
 934backtrack:
 935			sg = state.unmapped_sg;
 936			i = state.unmapped_index;
 937
 938			dma_addr = ib_sg_dma_address(ibdev, sg);
 939			dma_len = ib_sg_dma_len(ibdev, sg);
 940			dma_len -= (state.unmapped_addr - dma_addr);
 941			dma_addr = state.unmapped_addr;
 942			use_fmr = SRP_MAP_NO_FMR;
 943			srp_map_desc(&state, dma_addr, dma_len, target->rkey);
 944		}
 945	}
 946
 947	if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
 948		goto backtrack;
 949
 950	/* We've mapped the request, now pull as much of the indirect
 951	 * descriptor table as we can into the command buffer. If this
 952	 * target is not using an external indirect table, we are
 953	 * guaranteed to fit into the command, as the SCSI layer won't
 954	 * give us more S/G entries than we allow.
 955	 */
 956	req->nfmr = state.nfmr;
 957	if (state.ndesc == 1) {
 958		/* FMR mapping was able to collapse this to one entry,
 959		 * so use a direct descriptor.
 960		 */
 961		struct srp_direct_buf *buf = (void *) cmd->add_data;
 962
 963		*buf = req->indirect_desc[0];
 964		goto map_complete;
 965	}
 966
 967	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
 968						!target->allow_ext_sg)) {
 969		shost_printk(KERN_ERR, target->scsi_host,
 970			     "Could not fit S/G list into SRP_CMD\n");
 971		return -EIO;
 972	}
 973
 974	count = min(state.ndesc, target->cmd_sg_cnt);
 975	table_len = state.ndesc * sizeof (struct srp_direct_buf);
 976
 977	fmt = SRP_DATA_DESC_INDIRECT;
 978	len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
 979	len += count * sizeof (struct srp_direct_buf);
 980
 981	memcpy(indirect_hdr->desc_list, req->indirect_desc,
 982	       count * sizeof (struct srp_direct_buf));
 983
 984	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
 985	indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
 986	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
 987	indirect_hdr->len = cpu_to_be32(state.total_len);
 988
 989	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
 990		cmd->data_out_desc_cnt = count;
 991	else
 992		cmd->data_in_desc_cnt = count;
 993
 994	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
 995				      DMA_TO_DEVICE);
 996
 997map_complete:
 998	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
 999		cmd->buf_fmt = fmt << 4;
1000	else
1001		cmd->buf_fmt = fmt;
1002
1003	return len;
1004}
1005
1006/*
1007 * Return an IU and possible credit to the free pool
1008 */
1009static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1010			  enum srp_iu_type iu_type)
1011{
1012	unsigned long flags;
1013
1014	spin_lock_irqsave(&target->lock, flags);
1015	list_add(&iu->list, &target->free_tx);
1016	if (iu_type != SRP_IU_RSP)
1017		++target->req_lim;
1018	spin_unlock_irqrestore(&target->lock, flags);
1019}
1020
1021/*
1022 * Must be called with target->lock held to protect req_lim and free_tx.
1023 * If IU is not sent, it must be returned using srp_put_tx_iu().
1024 *
1025 * Note:
1026 * An upper limit for the number of allocated information units for each
1027 * request type is:
1028 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1029 *   more than Scsi_Host.can_queue requests.
1030 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1031 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1032 *   one unanswered SRP request to an initiator.
1033 */
1034static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1035				      enum srp_iu_type iu_type)
1036{
1037	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1038	struct srp_iu *iu;
1039
1040	srp_send_completion(target->send_cq, target);
1041
1042	if (list_empty(&target->free_tx))
1043		return NULL;
1044
1045	/* Initiator responses to target requests do not consume credits */
1046	if (iu_type != SRP_IU_RSP) {
1047		if (target->req_lim <= rsv) {
1048			++target->zero_req_lim;
1049			return NULL;
1050		}
1051
1052		--target->req_lim;
1053	}
1054
1055	iu = list_first_entry(&target->free_tx, struct srp_iu, list);
1056	list_del(&iu->list);
1057	return iu;
1058}
1059
1060static int srp_post_send(struct srp_target_port *target,
1061			 struct srp_iu *iu, int len)
1062{
1063	struct ib_sge list;
1064	struct ib_send_wr wr, *bad_wr;
1065
1066	list.addr   = iu->dma;
1067	list.length = len;
1068	list.lkey   = target->lkey;
1069
1070	wr.next       = NULL;
1071	wr.wr_id      = (uintptr_t) iu;
1072	wr.sg_list    = &list;
1073	wr.num_sge    = 1;
1074	wr.opcode     = IB_WR_SEND;
1075	wr.send_flags = IB_SEND_SIGNALED;
1076
1077	return ib_post_send(target->qp, &wr, &bad_wr);
1078}
1079
1080static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
1081{
1082	struct ib_recv_wr wr, *bad_wr;
1083	struct ib_sge list;
1084
1085	list.addr   = iu->dma;
1086	list.length = iu->size;
1087	list.lkey   = target->lkey;
1088
1089	wr.next     = NULL;
1090	wr.wr_id    = (uintptr_t) iu;
1091	wr.sg_list  = &list;
1092	wr.num_sge  = 1;
1093
1094	return ib_post_recv(target->qp, &wr, &bad_wr);
1095}
1096
1097static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1098{
1099	struct srp_request *req;
1100	struct scsi_cmnd *scmnd;
1101	unsigned long flags;
1102
1103	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1104		spin_lock_irqsave(&target->lock, flags);
1105		target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1106		spin_unlock_irqrestore(&target->lock, flags);
1107
1108		target->tsk_mgmt_status = -1;
1109		if (be32_to_cpu(rsp->resp_data_len) >= 4)
1110			target->tsk_mgmt_status = rsp->data[3];
1111		complete(&target->tsk_mgmt_done);
1112	} else {
1113		req = &target->req_ring[rsp->tag];
1114		scmnd = srp_claim_req(target, req, NULL);
1115		if (!scmnd) {
1116			shost_printk(KERN_ERR, target->scsi_host,
1117				     "Null scmnd for RSP w/tag %016llx\n",
1118				     (unsigned long long) rsp->tag);
1119
1120			spin_lock_irqsave(&target->lock, flags);
1121			target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1122			spin_unlock_irqrestore(&target->lock, flags);
1123
1124			return;
1125		}
1126		scmnd->result = rsp->status;
1127
1128		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1129			memcpy(scmnd->sense_buffer, rsp->data +
1130			       be32_to_cpu(rsp->resp_data_len),
1131			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1132				     SCSI_SENSE_BUFFERSIZE));
1133		}
1134
1135		if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
1136			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1137		else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
1138			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1139
1140		srp_free_req(target, req, scmnd,
1141			     be32_to_cpu(rsp->req_lim_delta));
1142
1143		scmnd->host_scribble = NULL;
1144		scmnd->scsi_done(scmnd);
1145	}
1146}
1147
1148static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1149			       void *rsp, int len)
1150{
1151	struct ib_device *dev = target->srp_host->srp_dev->dev;
1152	unsigned long flags;
1153	struct srp_iu *iu;
1154	int err;
1155
1156	spin_lock_irqsave(&target->lock, flags);
1157	target->req_lim += req_delta;
1158	iu = __srp_get_tx_iu(target, SRP_IU_RSP);
1159	spin_unlock_irqrestore(&target->lock, flags);
1160
1161	if (!iu) {
1162		shost_printk(KERN_ERR, target->scsi_host, PFX
1163			     "no IU available to send response\n");
1164		return 1;
1165	}
1166
1167	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1168	memcpy(iu->buf, rsp, len);
1169	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1170
1171	err = srp_post_send(target, iu, len);
1172	if (err) {
1173		shost_printk(KERN_ERR, target->scsi_host, PFX
1174			     "unable to post response: %d\n", err);
1175		srp_put_tx_iu(target, iu, SRP_IU_RSP);
1176	}
1177
1178	return err;
1179}
1180
1181static void srp_process_cred_req(struct srp_target_port *target,
1182				 struct srp_cred_req *req)
1183{
1184	struct srp_cred_rsp rsp = {
1185		.opcode = SRP_CRED_RSP,
1186		.tag = req->tag,
1187	};
1188	s32 delta = be32_to_cpu(req->req_lim_delta);
1189
1190	if (srp_response_common(target, delta, &rsp, sizeof rsp))
1191		shost_printk(KERN_ERR, target->scsi_host, PFX
1192			     "problems processing SRP_CRED_REQ\n");
1193}
1194
1195static void srp_process_aer_req(struct srp_target_port *target,
1196				struct srp_aer_req *req)
1197{
1198	struct srp_aer_rsp rsp = {
1199		.opcode = SRP_AER_RSP,
1200		.tag = req->tag,
1201	};
1202	s32 delta = be32_to_cpu(req->req_lim_delta);
1203
1204	shost_printk(KERN_ERR, target->scsi_host, PFX
1205		     "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1206
1207	if (srp_response_common(target, delta, &rsp, sizeof rsp))
1208		shost_printk(KERN_ERR, target->scsi_host, PFX
1209			     "problems processing SRP_AER_REQ\n");
1210}
1211
1212static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1213{
1214	struct ib_device *dev = target->srp_host->srp_dev->dev;
1215	struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1216	int res;
1217	u8 opcode;
1218
1219	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1220				   DMA_FROM_DEVICE);
1221
1222	opcode = *(u8 *) iu->buf;
1223
1224	if (0) {
1225		shost_printk(KERN_ERR, target->scsi_host,
1226			     PFX "recv completion, opcode 0x%02x\n", opcode);
1227		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1228			       iu->buf, wc->byte_len, true);
1229	}
1230
1231	switch (opcode) {
1232	case SRP_RSP:
1233		srp_process_rsp(target, iu->buf);
1234		break;
1235
1236	case SRP_CRED_REQ:
1237		srp_process_cred_req(target, iu->buf);
1238		break;
1239
1240	case SRP_AER_REQ:
1241		srp_process_aer_req(target, iu->buf);
1242		break;
1243
1244	case SRP_T_LOGOUT:
1245		/* XXX Handle target logout */
1246		shost_printk(KERN_WARNING, target->scsi_host,
1247			     PFX "Got target logout request\n");
1248		break;
1249
1250	default:
1251		shost_printk(KERN_WARNING, target->scsi_host,
1252			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1253		break;
1254	}
1255
1256	ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1257				      DMA_FROM_DEVICE);
1258
1259	res = srp_post_recv(target, iu);
1260	if (res != 0)
1261		shost_printk(KERN_ERR, target->scsi_host,
1262			     PFX "Recv failed with error code %d\n", res);
1263}
1264
1265static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1266{
1267	struct srp_target_port *target = target_ptr;
1268	struct ib_wc wc;
1269
1270	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1271	while (ib_poll_cq(cq, 1, &wc) > 0) {
1272		if (wc.status) {
1273			shost_printk(KERN_ERR, target->scsi_host,
1274				     PFX "failed receive status %d\n",
1275				     wc.status);
1276			target->qp_in_error = 1;
1277			break;
1278		}
1279
1280		srp_handle_recv(target, &wc);
1281	}
1282}
1283
1284static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1285{
1286	struct srp_target_port *target = target_ptr;
1287	struct ib_wc wc;
1288	struct srp_iu *iu;
1289
1290	while (ib_poll_cq(cq, 1, &wc) > 0) {
1291		if (wc.status) {
1292			shost_printk(KERN_ERR, target->scsi_host,
1293				     PFX "failed send status %d\n",
1294				     wc.status);
1295			target->qp_in_error = 1;
1296			break;
1297		}
1298
1299		iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1300		list_add(&iu->list, &target->free_tx);
1301	}
1302}
1303
1304static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1305{
1306	struct srp_target_port *target = host_to_target(shost);
1307	struct srp_request *req;
1308	struct srp_iu *iu;
1309	struct srp_cmd *cmd;
1310	struct ib_device *dev;
1311	unsigned long flags;
1312	int len;
1313
1314	if (target->state == SRP_TARGET_CONNECTING)
1315		goto err;
1316
1317	if (target->state == SRP_TARGET_DEAD ||
1318	    target->state == SRP_TARGET_REMOVED) {
1319		scmnd->result = DID_BAD_TARGET << 16;
1320		scmnd->scsi_done(scmnd);
1321		return 0;
1322	}
1323
1324	spin_lock_irqsave(&target->lock, flags);
1325	iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1326	if (!iu)
1327		goto err_unlock;
1328
1329	req = list_first_entry(&target->free_reqs, struct srp_request, list);
1330	list_del(&req->list);
1331	spin_unlock_irqrestore(&target->lock, flags);
1332
1333	dev = target->srp_host->srp_dev->dev;
1334	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1335				   DMA_TO_DEVICE);
1336
1337	scmnd->result        = 0;
1338	scmnd->host_scribble = (void *) req;
1339
1340	cmd = iu->buf;
1341	memset(cmd, 0, sizeof *cmd);
1342
1343	cmd->opcode = SRP_CMD;
1344	cmd->lun    = cpu_to_be64((u64) scmnd->device->lun << 48);
1345	cmd->tag    = req->index;
1346	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1347
1348	req->scmnd    = scmnd;
1349	req->cmd      = iu;
1350
1351	len = srp_map_data(scmnd, target, req);
1352	if (len < 0) {
1353		shost_printk(KERN_ERR, target->scsi_host,
1354			     PFX "Failed to map data\n");
1355		goto err_iu;
1356	}
1357
1358	ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
1359				      DMA_TO_DEVICE);
1360
1361	if (srp_post_send(target, iu, len)) {
1362		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1363		goto err_unmap;
1364	}
1365
1366	return 0;
1367
1368err_unmap:
1369	srp_unmap_data(scmnd, target, req);
1370
1371err_iu:
1372	srp_put_tx_iu(target, iu, SRP_IU_CMD);
1373
1374	spin_lock_irqsave(&target->lock, flags);
1375	list_add(&req->list, &target->free_reqs);
1376
1377err_unlock:
1378	spin_unlock_irqrestore(&target->lock, flags);
1379
1380err:
1381	return SCSI_MLQUEUE_HOST_BUSY;
1382}
1383
1384static int srp_alloc_iu_bufs(struct srp_target_port *target)
1385{
1386	int i;
1387
1388	for (i = 0; i < SRP_RQ_SIZE; ++i) {
1389		target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1390						  target->max_ti_iu_len,
1391						  GFP_KERNEL, DMA_FROM_DEVICE);
1392		if (!target->rx_ring[i])
1393			goto err;
1394	}
1395
1396	for (i = 0; i < SRP_SQ_SIZE; ++i) {
1397		target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1398						  target->max_iu_len,
1399						  GFP_KERNEL, DMA_TO_DEVICE);
1400		if (!target->tx_ring[i])
1401			goto err;
1402
1403		list_add(&target->tx_ring[i]->list, &target->free_tx);
1404	}
1405
1406	return 0;
1407
1408err:
1409	for (i = 0; i < SRP_RQ_SIZE; ++i) {
1410		srp_free_iu(target->srp_host, target->rx_ring[i]);
1411		target->rx_ring[i] = NULL;
1412	}
1413
1414	for (i = 0; i < SRP_SQ_SIZE; ++i) {
1415		srp_free_iu(target->srp_host, target->tx_ring[i]);
1416		target->tx_ring[i] = NULL;
1417	}
1418
1419	return -ENOMEM;
1420}
1421
1422static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1423			       struct srp_login_rsp *lrsp,
1424			       struct srp_target_port *target)
1425{
1426	struct ib_qp_attr *qp_attr = NULL;
1427	int attr_mask = 0;
1428	int ret;
1429	int i;
1430
1431	if (lrsp->opcode == SRP_LOGIN_RSP) {
1432		target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1433		target->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
1434
1435		/*
1436		 * Reserve credits for task management so we don't
1437		 * bounce requests back to the SCSI mid-layer.
1438		 */
1439		target->scsi_host->can_queue
1440			= min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1441			      target->scsi_host->can_queue);
1442	} else {
1443		shost_printk(KERN_WARNING, target->scsi_host,
1444			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1445		ret = -ECONNRESET;
1446		goto error;
1447	}
1448
1449	if (!target->rx_ring[0]) {
1450		ret = srp_alloc_iu_bufs(target);
1451		if (ret)
1452			goto error;
1453	}
1454
1455	ret = -ENOMEM;
1456	qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1457	if (!qp_attr)
1458		goto error;
1459
1460	qp_attr->qp_state = IB_QPS_RTR;
1461	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1462	if (ret)
1463		goto error_free;
1464
1465	ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1466	if (ret)
1467		goto error_free;
1468
1469	for (i = 0; i < SRP_RQ_SIZE; i++) {
1470		struct srp_iu *iu = target->rx_ring[i];
1471		ret = srp_post_recv(target, iu);
1472		if (ret)
1473			goto error_free;
1474	}
1475
1476	qp_attr->qp_state = IB_QPS_RTS;
1477	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1478	if (ret)
1479		goto error_free;
1480
1481	ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1482	if (ret)
1483		goto error_free;
1484
1485	ret = ib_send_cm_rtu(cm_id, NULL, 0);
1486
1487error_free:
1488	kfree(qp_attr);
1489
1490error:
1491	target->status = ret;
1492}
1493
1494static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1495			       struct ib_cm_event *event,
1496			       struct srp_target_port *target)
1497{
1498	struct Scsi_Host *shost = target->scsi_host;
1499	struct ib_class_port_info *cpi;
1500	int opcode;
1501
1502	switch (event->param.rej_rcvd.reason) {
1503	case IB_CM_REJ_PORT_CM_REDIRECT:
1504		cpi = event->param.rej_rcvd.ari;
1505		target->path.dlid = cpi->redirect_lid;
1506		target->path.pkey = cpi->redirect_pkey;
1507		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1508		memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1509
1510		target->status = target->path.dlid ?
1511			SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1512		break;
1513
1514	case IB_CM_REJ_PORT_REDIRECT:
1515		if (srp_target_is_topspin(target)) {
1516			/*
1517			 * Topspin/Cisco SRP gateways incorrectly send
1518			 * reject reason code 25 when they mean 24
1519			 * (port redirect).
1520			 */
1521			memcpy(target->path.dgid.raw,
1522			       event->param.rej_rcvd.ari, 16);
1523
1524			shost_printk(KERN_DEBUG, shost,
1525				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1526				     (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1527				     (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1528
1529			target->status = SRP_PORT_REDIRECT;
1530		} else {
1531			shost_printk(KERN_WARNING, shost,
1532				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1533			target->status = -ECONNRESET;
1534		}
1535		break;
1536
1537	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1538		shost_printk(KERN_WARNING, shost,
1539			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1540		target->status = -ECONNRESET;
1541		break;
1542
1543	case IB_CM_REJ_CONSUMER_DEFINED:
1544		opcode = *(u8 *) event->private_data;
1545		if (opcode == SRP_LOGIN_REJ) {
1546			struct srp_login_rej *rej = event->private_data;
1547			u32 reason = be32_to_cpu(rej->reason);
1548
1549			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1550				shost_printk(KERN_WARNING, shost,
1551					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1552			else
1553				shost_printk(KERN_WARNING, shost,
1554					    PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1555		} else
1556			shost_printk(KERN_WARNING, shost,
1557				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1558				     " opcode 0x%02x\n", opcode);
1559		target->status = -ECONNRESET;
1560		break;
1561
1562	case IB_CM_REJ_STALE_CONN:
1563		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
1564		target->status = SRP_STALE_CONN;
1565		break;
1566
1567	default:
1568		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
1569			     event->param.rej_rcvd.reason);
1570		target->status = -ECONNRESET;
1571	}
1572}
1573
1574static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1575{
1576	struct srp_target_port *target = cm_id->context;
1577	int comp = 0;
1578
1579	switch (event->event) {
1580	case IB_CM_REQ_ERROR:
1581		shost_printk(KERN_DEBUG, target->scsi_host,
1582			     PFX "Sending CM REQ failed\n");
1583		comp = 1;
1584		target->status = -ECONNRESET;
1585		break;
1586
1587	case IB_CM_REP_RECEIVED:
1588		comp = 1;
1589		srp_cm_rep_handler(cm_id, event->private_data, target);
1590		break;
1591
1592	case IB_CM_REJ_RECEIVED:
1593		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
1594		comp = 1;
1595
1596		srp_cm_rej_handler(cm_id, event, target);
1597		break;
1598
1599	case IB_CM_DREQ_RECEIVED:
1600		shost_printk(KERN_WARNING, target->scsi_host,
1601			     PFX "DREQ received - connection closed\n");
1602		if (ib_send_cm_drep(cm_id, NULL, 0))
1603			shost_printk(KERN_ERR, target->scsi_host,
1604				     PFX "Sending CM DREP failed\n");
1605		break;
1606
1607	case IB_CM_TIMEWAIT_EXIT:
1608		shost_printk(KERN_ERR, target->scsi_host,
1609			     PFX "connection closed\n");
1610
1611		comp = 1;
1612		target->status = 0;
1613		break;
1614
1615	case IB_CM_MRA_RECEIVED:
1616	case IB_CM_DREQ_ERROR:
1617	case IB_CM_DREP_RECEIVED:
1618		break;
1619
1620	default:
1621		shost_printk(KERN_WARNING, target->scsi_host,
1622			     PFX "Unhandled CM event %d\n", event->event);
1623		break;
1624	}
1625
1626	if (comp)
1627		complete(&target->done);
1628
1629	return 0;
1630}
1631
1632static int srp_send_tsk_mgmt(struct srp_target_port *target,
1633			     u64 req_tag, unsigned int lun, u8 func)
1634{
1635	struct ib_device *dev = target->srp_host->srp_dev->dev;
1636	struct srp_iu *iu;
1637	struct srp_tsk_mgmt *tsk_mgmt;
1638
1639	if (target->state == SRP_TARGET_DEAD ||
1640	    target->state == SRP_TARGET_REMOVED)
1641		return -1;
1642
1643	init_completion(&target->tsk_mgmt_done);
1644
1645	spin_lock_irq(&target->lock);
1646	iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1647	spin_unlock_irq(&target->lock);
1648
1649	if (!iu)
1650		return -1;
1651
1652	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1653				   DMA_TO_DEVICE);
1654	tsk_mgmt = iu->buf;
1655	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1656
1657	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
1658	tsk_mgmt->lun		= cpu_to_be64((u64) lun << 48);
1659	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
1660	tsk_mgmt->tsk_mgmt_func = func;
1661	tsk_mgmt->task_tag	= req_tag;
1662
1663	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1664				      DMA_TO_DEVICE);
1665	if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1666		srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1667		return -1;
1668	}
1669
1670	if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
1671					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1672		return -1;
1673
1674	return 0;
1675}
1676
1677static int srp_abort(struct scsi_cmnd *scmnd)
1678{
1679	struct srp_target_port *target = host_to_target(scmnd->device->host);
1680	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
1681
1682	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1683
1684	if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
1685		return FAILED;
1686	srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1687			  SRP_TSK_ABORT_TASK);
1688	srp_free_req(target, req, scmnd, 0);
1689	scmnd->result = DID_ABORT << 16;
1690	scmnd->scsi_done(scmnd);
1691
1692	return SUCCESS;
1693}
1694
1695static int srp_reset_device(struct scsi_cmnd *scmnd)
1696{
1697	struct srp_target_port *target = host_to_target(scmnd->device->host);
1698	int i;
1699
1700	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1701
1702	if (target->qp_in_error)
1703		return FAILED;
1704	if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1705			      SRP_TSK_LUN_RESET))
1706		return FAILED;
1707	if (target->tsk_mgmt_status)
1708		return FAILED;
1709
1710	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1711		struct srp_request *req = &target->req_ring[i];
1712		if (req->scmnd && req->scmnd->device == scmnd->device)
1713			srp_reset_req(target, req);
1714	}
1715
1716	return SUCCESS;
1717}
1718
1719static int srp_reset_host(struct scsi_cmnd *scmnd)
1720{
1721	struct srp_target_port *target = host_to_target(scmnd->device->host);
1722	int ret = FAILED;
1723
1724	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
1725
1726	if (!srp_reconnect_target(target))
1727		ret = SUCCESS;
1728
1729	return ret;
1730}
1731
1732static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1733			   char *buf)
1734{
1735	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1736
1737	return sprintf(buf, "0x%016llx\n",
1738		       (unsigned long long) be64_to_cpu(target->id_ext));
1739}
1740
1741static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
1742			     char *buf)
1743{
1744	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1745
1746	return sprintf(buf, "0x%016llx\n",
1747		       (unsigned long long) be64_to_cpu(target->ioc_guid));
1748}
1749
1750static ssize_t show_service_id(struct device *dev,
1751			       struct device_attribute *attr, char *buf)
1752{
1753	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1754
1755	return sprintf(buf, "0x%016llx\n",
1756		       (unsigned long long) be64_to_cpu(target->service_id));
1757}
1758
1759static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
1760			 char *buf)
1761{
1762	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1763
1764	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1765}
1766
1767static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
1768			 char *buf)
1769{
1770	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1771
1772	return sprintf(buf, "%pI6\n", target->path.dgid.raw);
1773}
1774
1775static ssize_t show_orig_dgid(struct device *dev,
1776			      struct device_attribute *attr, char *buf)
1777{
1778	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1779
1780	return sprintf(buf, "%pI6\n", target->orig_dgid);
1781}
1782
1783static ssize_t show_req_lim(struct device *dev,
1784			    struct device_attribute *attr, char *buf)
1785{
1786	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1787
1788	return sprintf(buf, "%d\n", target->req_lim);
1789}
1790
1791static ssize_t show_zero_req_lim(struct device *dev,
1792				 struct device_attribute *attr, char *buf)
1793{
1794	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1795
1796	return sprintf(buf, "%d\n", target->zero_req_lim);
1797}
1798
1799static ssize_t show_local_ib_port(struct device *dev,
1800				  struct device_attribute *attr, char *buf)
1801{
1802	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1803
1804	return sprintf(buf, "%d\n", target->srp_host->port);
1805}
1806
1807static ssize_t show_local_ib_device(struct device *dev,
1808				    struct device_attribute *attr, char *buf)
1809{
1810	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1811
1812	return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
1813}
1814
1815static ssize_t show_cmd_sg_entries(struct device *dev,
1816				   struct device_attribute *attr, char *buf)
1817{
1818	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1819
1820	return sprintf(buf, "%u\n", target->cmd_sg_cnt);
1821}
1822
1823static ssize_t show_allow_ext_sg(struct device *dev,
1824				 struct device_attribute *attr, char *buf)
1825{
1826	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1827
1828	return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
1829}
1830
1831static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
1832static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
1833static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
1834static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
1835static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
1836static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
1837static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
1838static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
1839static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
1840static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
1841static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
1842static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
1843
1844static struct device_attribute *srp_host_attrs[] = {
1845	&dev_attr_id_ext,
1846	&dev_attr_ioc_guid,
1847	&dev_attr_service_id,
1848	&dev_attr_pkey,
1849	&dev_attr_dgid,
1850	&dev_attr_orig_dgid,
1851	&dev_attr_req_lim,
1852	&dev_attr_zero_req_lim,
1853	&dev_attr_local_ib_port,
1854	&dev_attr_local_ib_device,
1855	&dev_attr_cmd_sg_entries,
1856	&dev_attr_allow_ext_sg,
1857	NULL
1858};
1859
1860static struct scsi_host_template srp_template = {
1861	.module				= THIS_MODULE,
1862	.name				= "InfiniBand SRP initiator",
1863	.proc_name			= DRV_NAME,
1864	.info				= srp_target_info,
1865	.queuecommand			= srp_queuecommand,
1866	.eh_abort_handler		= srp_abort,
1867	.eh_device_reset_handler	= srp_reset_device,
1868	.eh_host_reset_handler		= srp_reset_host,
1869	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
1870	.can_queue			= SRP_CMD_SQ_SIZE,
1871	.this_id			= -1,
1872	.cmd_per_lun			= SRP_CMD_SQ_SIZE,
1873	.use_clustering			= ENABLE_CLUSTERING,
1874	.shost_attrs			= srp_host_attrs
1875};
1876
1877static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1878{
1879	struct srp_rport_identifiers ids;
1880	struct srp_rport *rport;
1881
1882	sprintf(target->target_name, "SRP.T10:%016llX",
1883		 (unsigned long long) be64_to_cpu(target->id_ext));
1884
1885	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
1886		return -ENODEV;
1887
1888	memcpy(ids.port_id, &target->id_ext, 8);
1889	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
1890	ids.roles = SRP_RPORT_ROLE_TARGET;
1891	rport = srp_rport_add(target->scsi_host, &ids);
1892	if (IS_ERR(rport)) {
1893		scsi_remove_host(target->scsi_host);
1894		return PTR_ERR(rport);
1895	}
1896
1897	spin_lock(&host->target_lock);
1898	list_add_tail(&target->list, &host->target_list);
1899	spin_unlock(&host->target_lock);
1900
1901	target->state = SRP_TARGET_LIVE;
1902
1903	scsi_scan_target(&target->scsi_host->shost_gendev,
1904			 0, target->scsi_id, SCAN_WILD_CARD, 0);
1905
1906	return 0;
1907}
1908
1909static void srp_release_dev(struct device *dev)
1910{
1911	struct srp_host *host =
1912		container_of(dev, struct srp_host, dev);
1913
1914	complete(&host->released);
1915}
1916
1917static struct class srp_class = {
1918	.name    = "infiniband_srp",
1919	.dev_release = srp_release_dev
1920};
1921
1922/*
1923 * Target ports are added by writing
1924 *
1925 *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1926 *     pkey=<P_Key>,service_id=<service ID>
1927 *
1928 * to the add_target sysfs attribute.
1929 */
1930enum {
1931	SRP_OPT_ERR		= 0,
1932	SRP_OPT_ID_EXT		= 1 << 0,
1933	SRP_OPT_IOC_GUID	= 1 << 1,
1934	SRP_OPT_DGID		= 1 << 2,
1935	SRP_OPT_PKEY		= 1 << 3,
1936	SRP_OPT_SERVICE_ID	= 1 << 4,
1937	SRP_OPT_MAX_SECT	= 1 << 5,
1938	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
1939	SRP_OPT_IO_CLASS	= 1 << 7,
1940	SRP_OPT_INITIATOR_EXT	= 1 << 8,
1941	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
1942	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
1943	SRP_OPT_SG_TABLESIZE	= 1 << 11,
1944	SRP_OPT_ALL		= (SRP_OPT_ID_EXT	|
1945				   SRP_OPT_IOC_GUID	|
1946				   SRP_OPT_DGID		|
1947				   SRP_OPT_PKEY		|
1948				   SRP_OPT_SERVICE_ID),
1949};
1950
1951static const match_table_t srp_opt_tokens = {
1952	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
1953	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
1954	{ SRP_OPT_DGID,			"dgid=%s" 		},
1955	{ SRP_OPT_PKEY,			"pkey=%x" 		},
1956	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
1957	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
1958	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
1959	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
1960	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
1961	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
1962	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
1963	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
1964	{ SRP_OPT_ERR,			NULL 			}
1965};
1966
1967static int srp_parse_options(const char *buf, struct srp_target_port *target)
1968{
1969	char *options, *sep_opt;
1970	char *p;
1971	char dgid[3];
1972	substring_t args[MAX_OPT_ARGS];
1973	int opt_mask = 0;
1974	int token;
1975	int ret = -EINVAL;
1976	int i;
1977
1978	options = kstrdup(buf, GFP_KERNEL);
1979	if (!options)
1980		return -ENOMEM;
1981
1982	sep_opt = options;
1983	while ((p = strsep(&sep_opt, ",")) != NULL) {
1984		if (!*p)
1985			continue;
1986
1987		token = match_token(p, srp_opt_tokens, args);
1988		opt_mask |= token;
1989
1990		switch (token) {
1991		case SRP_OPT_ID_EXT:
1992			p = match_strdup(args);
1993			if (!p) {
1994				ret = -ENOMEM;
1995				goto out;
1996			}
1997			target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1998			kfree(p);
1999			break;
2000
2001		case SRP_OPT_IOC_GUID:
2002			p = match_strdup(args);
2003			if (!p) {
2004				ret = -ENOMEM;
2005				goto out;
2006			}
2007			target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2008			kfree(p);
2009			break;
2010
2011		case SRP_OPT_DGID:
2012			p = match_strdup(args);
2013			if (!p) {
2014				ret = -ENOMEM;
2015				goto out;
2016			}
2017			if (strlen(p) != 32) {
2018				pr_warn("bad dest GID parameter '%s'\n", p);
2019				kfree(p);
2020				goto out;
2021			}
2022
2023			for (i = 0; i < 16; ++i) {
2024				strlcpy(dgid, p + i * 2, 3);
2025				target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2026			}
2027			kfree(p);
2028			memcpy(target->orig_dgid, target->path.dgid.raw, 16);
2029			break;
2030
2031		case SRP_OPT_PKEY:
2032			if (match_hex(args, &token)) {
2033				pr_warn("bad P_Key parameter '%s'\n", p);
2034				goto out;
2035			}
2036			target->path.pkey = cpu_to_be16(token);
2037			break;
2038
2039		case SRP_OPT_SERVICE_ID:
2040			p = match_strdup(args);
2041			if (!p) {
2042				ret = -ENOMEM;
2043				goto out;
2044			}
2045			target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2046			target->path.service_id = target->service_id;
2047			kfree(p);
2048			break;
2049
2050		case SRP_OPT_MAX_SECT:
2051			if (match_int(args, &token)) {
2052				pr_warn("bad max sect parameter '%s'\n", p);
2053				goto out;
2054			}
2055			target->scsi_host->max_sectors = token;
2056			break;
2057
2058		case SRP_OPT_MAX_CMD_PER_LUN:
2059			if (match_int(args, &token)) {
2060				pr_warn("bad max cmd_per_lun parameter '%s'\n",
2061					p);
2062				goto out;
2063			}
2064			target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
2065			break;
2066
2067		case SRP_OPT_IO_CLASS:
2068			if (match_hex(args, &token)) {
2069				pr_warn("bad IO class parameter '%s'\n", p);
2070				goto out;
2071			}
2072			if (token != SRP_REV10_IB_IO_CLASS &&
2073			    token != SRP_REV16A_IB_IO_CLASS) {
2074				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2075					token, SRP_REV10_IB_IO_CLASS,
2076					SRP_REV16A_IB_IO_CLASS);
2077				goto out;
2078			}
2079			target->io_class = token;
2080			break;
2081
2082		case SRP_OPT_INITIATOR_EXT:
2083			p = match_strdup(args);
2084			if (!p) {
2085				ret = -ENOMEM;
2086				goto out;
2087			}
2088			target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2089			kfree(p);
2090			break;
2091
2092		case SRP_OPT_CMD_SG_ENTRIES:
2093			if (match_int(args, &token) || token < 1 || token > 255) {
2094				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2095					p);
2096				goto out;
2097			}
2098			target->cmd_sg_cnt = token;
2099			break;
2100
2101		case SRP_OPT_ALLOW_EXT_SG:
2102			if (match_int(args, &token)) {
2103				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
2104				goto out;
2105			}
2106			target->allow_ext_sg = !!token;
2107			break;
2108
2109		case SRP_OPT_SG_TABLESIZE:
2110			if (match_int(args, &token) || token < 1 ||
2111					token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
2112				pr_warn("bad max sg_tablesize parameter '%s'\n",
2113					p);
2114				goto out;
2115			}
2116			target->sg_tablesize = token;
2117			break;
2118
2119		default:
2120			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2121				p);
2122			goto out;
2123		}
2124	}
2125
2126	if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2127		ret = 0;
2128	else
2129		for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2130			if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2131			    !(srp_opt_tokens[i].token & opt_mask))
2132				pr_warn("target creation request is missing parameter '%s'\n",
2133					srp_opt_tokens[i].pattern);
2134
2135out:
2136	kfree(options);
2137	return ret;
2138}
2139
2140static ssize_t srp_create_target(struct device *dev,
2141				 struct device_attribute *attr,
2142				 const char *buf, size_t count)
2143{
2144	struct srp_host *host =
2145		container_of(dev, struct srp_host, dev);
2146	struct Scsi_Host *target_host;
2147	struct srp_target_port *target;
2148	struct ib_device *ibdev = host->srp_dev->dev;
2149	dma_addr_t dma_addr;
2150	int i, ret;
2151
2152	target_host = scsi_host_alloc(&srp_template,
2153				      sizeof (struct srp_target_port));
2154	if (!target_host)
2155		return -ENOMEM;
2156
2157	target_host->transportt  = ib_srp_transport_template;
2158	target_host->max_channel = 0;
2159	target_host->max_id      = 1;
2160	target_host->max_lun     = SRP_MAX_LUN;
2161	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
2162
2163	target = host_to_target(target_host);
2164
2165	target->io_class	= SRP_REV16A_IB_IO_CLASS;
2166	target->scsi_host	= target_host;
2167	target->srp_host	= host;
2168	target->lkey		= host->srp_dev->mr->lkey;
2169	target->rkey		= host->srp_dev->mr->rkey;
2170	target->cmd_sg_cnt	= cmd_sg_entries;
2171	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
2172	target->allow_ext_sg	= allow_ext_sg;
2173
2174	ret = srp_parse_options(buf, target);
2175	if (ret)
2176		goto err;
2177
2178	if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2179				target->cmd_sg_cnt < target->sg_tablesize) {
2180		pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2181		target->sg_tablesize = target->cmd_sg_cnt;
2182	}
2183
2184	target_host->sg_tablesize = target->sg_tablesize;
2185	target->indirect_size = target->sg_tablesize *
2186				sizeof (struct srp_direct_buf);
2187	target->max_iu_len = sizeof (struct srp_cmd) +
2188			     sizeof (struct srp_indirect_buf) +
2189			     target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2190
2191	spin_lock_init(&target->lock);
2192	INIT_LIST_HEAD(&target->free_tx);
2193	INIT_LIST_HEAD(&target->free_reqs);
2194	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
2195		struct srp_request *req = &target->req_ring[i];
2196
2197		req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
2198					GFP_KERNEL);
2199		req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
2200					GFP_KERNEL);
2201		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
2202		if (!req->fmr_list || !req->map_page || !req->indirect_desc)
2203			goto err_free_mem;
2204
2205		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
2206					     target->indirect_size,
2207					     DMA_TO_DEVICE);
2208		if (ib_dma_mapping_error(ibdev, dma_addr))
2209			goto err_free_mem;
2210
2211		req->indirect_dma_addr = dma_addr;
2212		req->index = i;
2213		list_add_tail(&req->list, &target->free_reqs);
2214	}
2215
2216	ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
2217
2218	shost_printk(KERN_DEBUG, target->scsi_host, PFX
2219		     "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
2220		     "service_id %016llx dgid %pI6\n",
2221	       (unsigned long long) be64_to_cpu(target->id_ext),
2222	       (unsigned long long) be64_to_cpu(target->ioc_guid),
2223	       be16_to_cpu(target->path.pkey),
2224	       (unsigned long long) be64_to_cpu(target->service_id),
2225	       target->path.dgid.raw);
2226
2227	ret = srp_create_target_ib(target);
2228	if (ret)
2229		goto err_free_mem;
2230
2231	ret = srp_new_cm_id(target);
2232	if (ret)
2233		goto err_free_ib;
2234
2235	target->qp_in_error = 0;
2236	ret = srp_connect_target(target);
2237	if (ret) {
2238		shost_printk(KERN_ERR, target->scsi_host,
2239			     PFX "Connection failed\n");
2240		goto err_cm_id;
2241	}
2242
2243	ret = srp_add_target(host, target);
2244	if (ret)
2245		goto err_disconnect;
2246
2247	return count;
2248
2249err_disconnect:
2250	srp_disconnect_target(target);
2251
2252err_cm_id:
2253	ib_destroy_cm_id(target->cm_id);
2254
2255err_free_ib:
2256	srp_free_target_ib(target);
2257
2258err_free_mem:
2259	srp_free_req_data(target);
2260
2261err:
2262	scsi_host_put(target_host);
2263
2264	return ret;
2265}
2266
2267static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
2268
2269static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2270			  char *buf)
2271{
2272	struct srp_host *host = container_of(dev, struct srp_host, dev);
2273
2274	return sprintf(buf, "%s\n", host->srp_dev->dev->name);
2275}
2276
2277static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
2278
2279static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2280			 char *buf)
2281{
2282	struct srp_host *host = container_of(dev, struct srp_host, dev);
2283
2284	return sprintf(buf, "%d\n", host->port);
2285}
2286
2287static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
2288
2289static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
2290{
2291	struct srp_host *host;
2292
2293	host = kzalloc(sizeof *host, GFP_KERNEL);
2294	if (!host)
2295		return NULL;
2296
2297	INIT_LIST_HEAD(&host->target_list);
2298	spin_lock_init(&host->target_lock);
2299	init_completion(&host->released);
2300	host->srp_dev = device;
2301	host->port = port;
2302
2303	host->dev.class = &srp_class;
2304	host->dev.parent = device->dev->dma_device;
2305	dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
2306
2307	if (device_register(&host->dev))
2308		goto free_host;
2309	if (device_create_file(&host->dev, &dev_attr_add_target))
2310		goto err_class;
2311	if (device_create_file(&host->dev, &dev_attr_ibdev))
2312		goto err_class;
2313	if (device_create_file(&host->dev, &dev_attr_port))
2314		goto err_class;
2315
2316	return host;
2317
2318err_class:
2319	device_unregister(&host->dev);
2320
2321free_host:
2322	kfree(host);
2323
2324	return NULL;
2325}
2326
2327static void srp_add_one(struct ib_device *device)
2328{
2329	struct srp_device *srp_dev;
2330	struct ib_device_attr *dev_attr;
2331	struct ib_fmr_pool_param fmr_param;
2332	struct srp_host *host;
2333	int max_pages_per_fmr, fmr_page_shift, s, e, p;
2334
2335	dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2336	if (!dev_attr)
2337		return;
2338
2339	if (ib_query_device(device, dev_attr)) {
2340		pr_warn("Query device failed for %s\n", device->name);
2341		goto free_attr;
2342	}
2343
2344	srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2345	if (!srp_dev)
2346		goto free_attr;
2347
2348	/*
2349	 * Use the smallest page size supported by the HCA, down to a
2350	 * minimum of 4096 bytes. We're unlikely to build large sglists
2351	 * out of smaller entries.
2352	 */
2353	fmr_page_shift		= max(12, ffs(dev_attr->page_size_cap) - 1);
2354	srp_dev->fmr_page_size	= 1 << fmr_page_shift;
2355	srp_dev->fmr_page_mask	= ~((u64) srp_dev->fmr_page_size - 1);
2356	srp_dev->fmr_max_size	= srp_dev->fmr_page_size * SRP_FMR_SIZE;
2357
2358	INIT_LIST_HEAD(&srp_dev->dev_list);
2359
2360	srp_dev->dev = device;
2361	srp_dev->pd  = ib_alloc_pd(device);
2362	if (IS_ERR(srp_dev->pd))
2363		goto free_dev;
2364
2365	srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2366				    IB_ACCESS_LOCAL_WRITE |
2367				    IB_ACCESS_REMOTE_READ |
2368				    IB_ACCESS_REMOTE_WRITE);
2369	if (IS_ERR(srp_dev->mr))
2370		goto err_pd;
2371
2372	for (max_pages_per_fmr = SRP_FMR_SIZE;
2373			max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
2374			max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
2375		memset(&fmr_param, 0, sizeof fmr_param);
2376		fmr_param.pool_size	    = SRP_FMR_POOL_SIZE;
2377		fmr_param.dirty_watermark   = SRP_FMR_DIRTY_SIZE;
2378		fmr_param.cache		    = 1;
2379		fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2380		fmr_param.page_shift	    = fmr_page_shift;
2381		fmr_param.access	    = (IB_ACCESS_LOCAL_WRITE |
2382					       IB_ACCESS_REMOTE_WRITE |
2383					       IB_ACCESS_REMOTE_READ);
2384
2385		srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2386		if (!IS_ERR(srp_dev->fmr_pool))
2387			break;
2388	}
2389
2390	if (IS_ERR(srp_dev->fmr_pool))
2391		srp_dev->fmr_pool = NULL;
2392
2393	if (device->node_type == RDMA_NODE_IB_SWITCH) {
2394		s = 0;
2395		e = 0;
2396	} else {
2397		s = 1;
2398		e = device->phys_port_cnt;
2399	}
2400
2401	for (p = s; p <= e; ++p) {
2402		host = srp_add_port(srp_dev, p);
2403		if (host)
2404			list_add_tail(&host->list, &srp_dev->dev_list);
2405	}
2406
2407	ib_set_client_data(device, &srp_client, srp_dev);
2408
2409	goto free_attr;
2410
2411err_pd:
2412	ib_dealloc_pd(srp_dev->pd);
2413
2414free_dev:
2415	kfree(srp_dev);
2416
2417free_attr:
2418	kfree(dev_attr);
2419}
2420
2421static void srp_remove_one(struct ib_device *device)
2422{
2423	struct srp_device *srp_dev;
2424	struct srp_host *host, *tmp_host;
2425	LIST_HEAD(target_list);
2426	struct srp_target_port *target, *tmp_target;
2427
2428	srp_dev = ib_get_client_data(device, &srp_client);
2429
2430	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
2431		device_unregister(&host->dev);
2432		/*
2433		 * Wait for the sysfs entry to go away, so that no new
2434		 * target ports can be created.
2435		 */
2436		wait_for_completion(&host->released);
2437
2438		/*
2439		 * Mark all target ports as removed, so we stop queueing
2440		 * commands and don't try to reconnect.
2441		 */
2442		spin_lock(&host->target_lock);
2443		list_for_each_entry(target, &host->target_list, list) {
2444			spin_lock_irq(&target->lock);
2445			target->state = SRP_TARGET_REMOVED;
2446			spin_unlock_irq(&target->lock);
2447		}
2448		spin_unlock(&host->target_lock);
2449
2450		/*
2451		 * Wait for any reconnection tasks that may have
2452		 * started before we marked our target ports as
2453		 * removed, and any target port removal tasks.
2454		 */
2455		flush_workqueue(ib_wq);
2456
2457		list_for_each_entry_safe(target, tmp_target,
2458					 &host->target_list, list) {
2459			srp_del_scsi_host_attr(target->scsi_host);
2460			srp_remove_host(target->scsi_host);
2461			scsi_remove_host(target->scsi_host);
2462			srp_disconnect_target(target);
2463			ib_destroy_cm_id(target->cm_id);
2464			srp_free_target_ib(target);
2465			srp_free_req_data(target);
2466			scsi_host_put(target->scsi_host);
2467		}
2468
2469		kfree(host);
2470	}
2471
2472	if (srp_dev->fmr_pool)
2473		ib_destroy_fmr_pool(srp_dev->fmr_pool);
2474	ib_dereg_mr(srp_dev->mr);
2475	ib_dealloc_pd(srp_dev->pd);
2476
2477	kfree(srp_dev);
2478}
2479
2480static struct srp_function_template ib_srp_transport_functions = {
2481};
2482
2483static int __init srp_init_module(void)
2484{
2485	int ret;
2486
2487	BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2488
2489	if (srp_sg_tablesize) {
2490		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2491		if (!cmd_sg_entries)
2492			cmd_sg_entries = srp_sg_tablesize;
2493	}
2494
2495	if (!cmd_sg_entries)
2496		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2497
2498	if (cmd_sg_entries > 255) {
2499		pr_warn("Clamping cmd_sg_entries to 255\n");
2500		cmd_sg_entries = 255;
2501	}
2502
2503	if (!indirect_sg_entries)
2504		indirect_sg_entries = cmd_sg_entries;
2505	else if (indirect_sg_entries < cmd_sg_entries) {
2506		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2507			cmd_sg_entries);
2508		indirect_sg_entries = cmd_sg_entries;
2509	}
2510
2511	ib_srp_transport_template =
2512		srp_attach_transport(&ib_srp_transport_functions);
2513	if (!ib_srp_transport_template)
2514		return -ENOMEM;
2515
2516	ret = class_register(&srp_class);
2517	if (ret) {
2518		pr_err("couldn't register class infiniband_srp\n");
2519		srp_release_transport(ib_srp_transport_template);
2520		return ret;
2521	}
2522
2523	ib_sa_register_client(&srp_sa_client);
2524
2525	ret = ib_register_client(&srp_client);
2526	if (ret) {
2527		pr_err("couldn't register IB client\n");
2528		srp_release_transport(ib_srp_transport_template);
2529		ib_sa_unregister_client(&srp_sa_client);
2530		class_unregister(&srp_class);
2531		return ret;
2532	}
2533
2534	return 0;
2535}
2536
2537static void __exit srp_cleanup_module(void)
2538{
2539	ib_unregister_client(&srp_client);
2540	ib_sa_unregister_client(&srp_sa_client);
2541	class_unregister(&srp_class);
2542	srp_release_transport(ib_srp_transport_template);
2543}
2544
2545module_init(srp_init_module);
2546module_exit(srp_cleanup_module);