Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#define pr_fmt(fmt) PFX fmt
  34
  35#include <linux/module.h>
  36#include <linux/init.h>
  37#include <linux/slab.h>
  38#include <linux/err.h>
  39#include <linux/string.h>
  40#include <linux/parser.h>
  41#include <linux/random.h>
  42#include <linux/jiffies.h>
 
 
 
  43
  44#include <linux/atomic.h>
  45
  46#include <scsi/scsi.h>
  47#include <scsi/scsi_device.h>
  48#include <scsi/scsi_dbg.h>
 
  49#include <scsi/srp.h>
  50#include <scsi/scsi_transport_srp.h>
  51
  52#include "ib_srp.h"
  53
  54#define DRV_NAME	"ib_srp"
  55#define PFX		DRV_NAME ": "
  56#define DRV_VERSION	"0.2"
  57#define DRV_RELDATE	"November 1, 2005"
  58
  59MODULE_AUTHOR("Roland Dreier");
  60MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
  61		   "v" DRV_VERSION " (" DRV_RELDATE ")");
  62MODULE_LICENSE("Dual BSD/GPL");
  63
 
 
 
 
 
  64static unsigned int srp_sg_tablesize;
  65static unsigned int cmd_sg_entries;
  66static unsigned int indirect_sg_entries;
  67static bool allow_ext_sg;
 
 
  68static int topspin_workarounds = 1;
  69
  70module_param(srp_sg_tablesize, uint, 0444);
  71MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
  72
  73module_param(cmd_sg_entries, uint, 0444);
  74MODULE_PARM_DESC(cmd_sg_entries,
  75		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
  76
  77module_param(indirect_sg_entries, uint, 0444);
  78MODULE_PARM_DESC(indirect_sg_entries,
  79		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
  80
  81module_param(allow_ext_sg, bool, 0444);
  82MODULE_PARM_DESC(allow_ext_sg,
  83		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
  84
  85module_param(topspin_workarounds, int, 0444);
  86MODULE_PARM_DESC(topspin_workarounds,
  87		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
  88
  89static void srp_add_one(struct ib_device *device);
  90static void srp_remove_one(struct ib_device *device);
  91static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
  92static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
  93static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  94
  95static struct scsi_transport_template *ib_srp_transport_template;
 
  96
  97static struct ib_client srp_client = {
  98	.name   = "srp",
  99	.add    = srp_add_one,
 100	.remove = srp_remove_one
 
 101};
 102
 103static struct ib_sa_client srp_sa_client;
 104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 105static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
 106{
 107	return (struct srp_target_port *) host->hostdata;
 108}
 109
 110static const char *srp_target_info(struct Scsi_Host *host)
 111{
 112	return host_to_target(host)->target_name;
 113}
 114
 115static int srp_target_is_topspin(struct srp_target_port *target)
 116{
 117	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
 118	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
 119
 120	return topspin_workarounds &&
 121		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
 122		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
 123}
 124
 125static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
 126				   gfp_t gfp_mask,
 127				   enum dma_data_direction direction)
 128{
 129	struct srp_iu *iu;
 130
 131	iu = kmalloc(sizeof *iu, gfp_mask);
 132	if (!iu)
 133		goto out;
 134
 135	iu->buf = kzalloc(size, gfp_mask);
 136	if (!iu->buf)
 137		goto out_free_iu;
 138
 139	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
 140				    direction);
 141	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
 142		goto out_free_buf;
 143
 144	iu->size      = size;
 145	iu->direction = direction;
 146
 147	return iu;
 148
 149out_free_buf:
 150	kfree(iu->buf);
 151out_free_iu:
 152	kfree(iu);
 153out:
 154	return NULL;
 155}
 156
 157static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
 158{
 159	if (!iu)
 160		return;
 161
 162	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
 163			    iu->direction);
 164	kfree(iu->buf);
 165	kfree(iu);
 166}
 167
 168static void srp_qp_event(struct ib_event *event, void *context)
 169{
 170	pr_debug("QP event %d\n", event->event);
 
 171}
 172
 173static int srp_init_qp(struct srp_target_port *target,
 174		       struct ib_qp *qp)
 175{
 176	struct ib_qp_attr *attr;
 177	int ret;
 178
 179	attr = kmalloc(sizeof *attr, GFP_KERNEL);
 180	if (!attr)
 181		return -ENOMEM;
 182
 183	ret = ib_find_pkey(target->srp_host->srp_dev->dev,
 184			   target->srp_host->port,
 185			   be16_to_cpu(target->path.pkey),
 186			   &attr->pkey_index);
 187	if (ret)
 188		goto out;
 189
 190	attr->qp_state        = IB_QPS_INIT;
 191	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
 192				    IB_ACCESS_REMOTE_WRITE);
 193	attr->port_num        = target->srp_host->port;
 194
 195	ret = ib_modify_qp(qp, attr,
 196			   IB_QP_STATE		|
 197			   IB_QP_PKEY_INDEX	|
 198			   IB_QP_ACCESS_FLAGS	|
 199			   IB_QP_PORT);
 200
 201out:
 202	kfree(attr);
 203	return ret;
 204}
 205
 206static int srp_new_cm_id(struct srp_target_port *target)
 207{
 
 208	struct ib_cm_id *new_cm_id;
 209
 210	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
 211				    srp_cm_handler, target);
 212	if (IS_ERR(new_cm_id))
 213		return PTR_ERR(new_cm_id);
 214
 215	if (target->cm_id)
 216		ib_destroy_cm_id(target->cm_id);
 217	target->cm_id = new_cm_id;
 
 
 
 
 
 
 
 
 
 218
 219	return 0;
 220}
 221
 222static int srp_create_target_ib(struct srp_target_port *target)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 223{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 224	struct ib_qp_init_attr *init_attr;
 
 
 
 
 225	int ret;
 226
 227	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
 228	if (!init_attr)
 229		return -ENOMEM;
 230
 231	target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
 232				       srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
 233	if (IS_ERR(target->recv_cq)) {
 234		ret = PTR_ERR(target->recv_cq);
 
 235		goto err;
 236	}
 237
 238	target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
 239				       srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
 240	if (IS_ERR(target->send_cq)) {
 241		ret = PTR_ERR(target->send_cq);
 242		goto err_recv_cq;
 243	}
 244
 245	ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
 246
 247	init_attr->event_handler       = srp_qp_event;
 248	init_attr->cap.max_send_wr     = SRP_SQ_SIZE;
 249	init_attr->cap.max_recv_wr     = SRP_RQ_SIZE;
 250	init_attr->cap.max_recv_sge    = 1;
 251	init_attr->cap.max_send_sge    = 1;
 252	init_attr->sq_sig_type         = IB_SIGNAL_ALL_WR;
 253	init_attr->qp_type             = IB_QPT_RC;
 254	init_attr->send_cq             = target->send_cq;
 255	init_attr->recv_cq             = target->recv_cq;
 
 
 256
 257	target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
 258	if (IS_ERR(target->qp)) {
 259		ret = PTR_ERR(target->qp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 260		goto err_send_cq;
 261	}
 262
 263	ret = srp_init_qp(target, target->qp);
 264	if (ret)
 265		goto err_qp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 266
 267	kfree(init_attr);
 268	return 0;
 269
 270err_qp:
 271	ib_destroy_qp(target->qp);
 
 
 
 272
 273err_send_cq:
 274	ib_destroy_cq(target->send_cq);
 275
 276err_recv_cq:
 277	ib_destroy_cq(target->recv_cq);
 278
 279err:
 280	kfree(init_attr);
 281	return ret;
 282}
 283
 284static void srp_free_target_ib(struct srp_target_port *target)
 
 
 
 
 
 285{
 
 286	int i;
 287
 288	ib_destroy_qp(target->qp);
 289	ib_destroy_cq(target->send_cq);
 290	ib_destroy_cq(target->recv_cq);
 291
 292	for (i = 0; i < SRP_RQ_SIZE; ++i)
 293		srp_free_iu(target->srp_host, target->rx_ring[i]);
 294	for (i = 0; i < SRP_SQ_SIZE; ++i)
 295		srp_free_iu(target->srp_host, target->tx_ring[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 296}
 297
 298static void srp_path_rec_completion(int status,
 299				    struct ib_sa_path_rec *pathrec,
 300				    void *target_ptr)
 301{
 302	struct srp_target_port *target = target_ptr;
 
 303
 304	target->status = status;
 305	if (status)
 306		shost_printk(KERN_ERR, target->scsi_host,
 307			     PFX "Got failed path rec status %d\n", status);
 308	else
 309		target->path = *pathrec;
 310	complete(&target->done);
 311}
 312
 313static int srp_lookup_path(struct srp_target_port *target)
 314{
 315	target->path.numb_path = 1;
 
 316
 317	init_completion(&target->done);
 318
 319	target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
 320						   target->srp_host->srp_dev->dev,
 321						   target->srp_host->port,
 322						   &target->path,
 323						   IB_SA_PATH_REC_SERVICE_ID	|
 324						   IB_SA_PATH_REC_DGID		|
 325						   IB_SA_PATH_REC_SGID		|
 326						   IB_SA_PATH_REC_NUMB_PATH	|
 327						   IB_SA_PATH_REC_PKEY,
 328						   SRP_PATH_REC_TIMEOUT_MS,
 329						   GFP_KERNEL,
 330						   srp_path_rec_completion,
 331						   target, &target->path_query);
 332	if (target->path_query_id < 0)
 333		return target->path_query_id;
 334
 335	wait_for_completion(&target->done);
 336
 337	if (target->status < 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 338		shost_printk(KERN_WARNING, target->scsi_host,
 339			     PFX "Path record query failed\n");
 
 
 
 340
 341	return target->status;
 342}
 343
 344static int srp_send_req(struct srp_target_port *target)
 345{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 346	struct {
 347		struct ib_cm_req_param param;
 348		struct srp_login_req   priv;
 
 
 349	} *req = NULL;
 
 350	int status;
 351
 352	req = kzalloc(sizeof *req, GFP_KERNEL);
 353	if (!req)
 354		return -ENOMEM;
 355
 356	req->param.primary_path 	      = &target->path;
 357	req->param.alternate_path 	      = NULL;
 358	req->param.service_id 		      = target->service_id;
 359	req->param.qp_num 		      = target->qp->qp_num;
 360	req->param.qp_type 		      = target->qp->qp_type;
 361	req->param.private_data 	      = &req->priv;
 362	req->param.private_data_len 	      = sizeof req->priv;
 363	req->param.flow_control 	      = 1;
 364
 365	get_random_bytes(&req->param.starting_psn, 4);
 366	req->param.starting_psn 	     &= 0xffffff;
 367
 368	/*
 369	 * Pick some arbitrary defaults here; we could make these
 370	 * module parameters if anyone cared about setting them.
 371	 */
 372	req->param.responder_resources	      = 4;
 373	req->param.remote_cm_response_timeout = 20;
 374	req->param.local_cm_response_timeout  = 20;
 375	req->param.retry_count 		      = 7;
 376	req->param.rnr_retry_count 	      = 7;
 377	req->param.max_cm_retries 	      = 15;
 378
 379	req->priv.opcode     	= SRP_LOGIN_REQ;
 380	req->priv.tag        	= 0;
 381	req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
 382	req->priv.req_buf_fmt 	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
 383					      SRP_BUF_FORMAT_INDIRECT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 384	/*
 385	 * In the published SRP specification (draft rev. 16a), the
 386	 * port identifier format is 8 bytes of ID extension followed
 387	 * by 8 bytes of GUID.  Older drafts put the two halves in the
 388	 * opposite order, so that the GUID comes first.
 389	 *
 390	 * Targets conforming to these obsolete drafts can be
 391	 * recognized by the I/O Class they report.
 392	 */
 393	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
 394		memcpy(req->priv.initiator_port_id,
 395		       &target->path.sgid.global.interface_id, 8);
 396		memcpy(req->priv.initiator_port_id + 8,
 397		       &target->initiator_ext, 8);
 398		memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
 399		memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
 400	} else {
 401		memcpy(req->priv.initiator_port_id,
 402		       &target->initiator_ext, 8);
 403		memcpy(req->priv.initiator_port_id + 8,
 404		       &target->path.sgid.global.interface_id, 8);
 405		memcpy(req->priv.target_port_id,     &target->id_ext, 8);
 406		memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
 407	}
 408
 409	/*
 410	 * Topspin/Cisco SRP targets will reject our login unless we
 411	 * zero out the first 8 bytes of our initiator port ID and set
 412	 * the second 8 bytes to the local node GUID.
 413	 */
 414	if (srp_target_is_topspin(target)) {
 415		shost_printk(KERN_DEBUG, target->scsi_host,
 416			     PFX "Topspin/Cisco initiator port ID workaround "
 417			     "activated for target GUID %016llx\n",
 418			     (unsigned long long) be64_to_cpu(target->ioc_guid));
 419		memset(req->priv.initiator_port_id, 0, 8);
 420		memcpy(req->priv.initiator_port_id + 8,
 421		       &target->srp_host->srp_dev->dev->node_guid, 8);
 422	}
 423
 424	status = ib_send_cm_req(target->cm_id, &req->param);
 
 
 
 425
 426	kfree(req);
 427
 428	return status;
 429}
 430
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 431static void srp_disconnect_target(struct srp_target_port *target)
 432{
 
 
 
 433	/* XXX should send SRP_I_LOGOUT request */
 434
 435	init_completion(&target->done);
 436	if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
 437		shost_printk(KERN_DEBUG, target->scsi_host,
 438			     PFX "Sending CM DREQ failed\n");
 439		return;
 
 
 
 
 
 
 
 
 
 
 
 440	}
 441	wait_for_completion(&target->done);
 442}
 443
 444static bool srp_change_state(struct srp_target_port *target,
 445			    enum srp_target_state old,
 446			    enum srp_target_state new)
 447{
 448	bool changed = false;
 
 
 
 449
 450	spin_lock_irq(&target->lock);
 451	if (target->state == old) {
 452		target->state = new;
 453		changed = true;
 
 454	}
 455	spin_unlock_irq(&target->lock);
 456	return changed;
 
 457}
 458
 459static void srp_free_req_data(struct srp_target_port *target)
 460{
 461	struct ib_device *ibdev = target->srp_host->srp_dev->dev;
 462	struct srp_request *req;
 463	int i;
 
 
 
 464
 465	for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
 466		kfree(req->fmr_list);
 467		kfree(req->map_page);
 468		if (req->indirect_dma_addr) {
 469			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
 470					    target->indirect_size,
 471					    DMA_TO_DEVICE);
 472		}
 473		kfree(req->indirect_desc);
 
 
 
 
 
 
 
 474	}
 
 
 
 
 
 
 475}
 476
 477/**
 478 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
 479 * @shost: SCSI host whose attributes to remove from sysfs.
 480 *
 481 * Note: Any attributes defined in the host template and that did not exist
 482 * before invocation of this function will be ignored.
 483 */
 484static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
 485{
 486	struct device_attribute **attr;
 
 
 
 
 
 
 487
 488	for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
 489		device_remove_file(&shost->shost_dev, *attr);
 
 490}
 491
 492static void srp_remove_work(struct work_struct *work)
 493{
 494	struct srp_target_port *target =
 495		container_of(work, struct srp_target_port, work);
 496
 497	if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
 498		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 499
 500	spin_lock(&target->srp_host->target_lock);
 501	list_del(&target->list);
 502	spin_unlock(&target->srp_host->target_lock);
 503
 504	srp_del_scsi_host_attr(target->scsi_host);
 505	srp_remove_host(target->scsi_host);
 506	scsi_remove_host(target->scsi_host);
 507	ib_destroy_cm_id(target->cm_id);
 508	srp_free_target_ib(target);
 509	srp_free_req_data(target);
 510	scsi_host_put(target->scsi_host);
 511}
 512
 513static int srp_connect_target(struct srp_target_port *target)
 514{
 515	int retries = 3;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 516	int ret;
 517
 518	ret = srp_lookup_path(target);
 
 
 519	if (ret)
 520		return ret;
 521
 522	while (1) {
 523		init_completion(&target->done);
 524		ret = srp_send_req(target);
 525		if (ret)
 526			return ret;
 527		wait_for_completion(&target->done);
 
 
 528
 529		/*
 530		 * The CM event handling code will set status to
 531		 * SRP_PORT_REDIRECT if we get a port redirect REJ
 532		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
 533		 * redirect REJ back.
 534		 */
 535		switch (target->status) {
 
 536		case 0:
 537			return 0;
 
 538
 539		case SRP_PORT_REDIRECT:
 540			ret = srp_lookup_path(target);
 541			if (ret)
 542				return ret;
 543			break;
 544
 545		case SRP_DLID_REDIRECT:
 546			break;
 547
 548		case SRP_STALE_CONN:
 549			/* Our current CM id was stale, and is now in timewait.
 550			 * Try to reconnect with a new one.
 551			 */
 552			if (!retries-- || srp_new_cm_id(target)) {
 553				shost_printk(KERN_ERR, target->scsi_host, PFX
 554					     "giving up on stale connection\n");
 555				target->status = -ECONNRESET;
 556				return target->status;
 557			}
 558
 559			shost_printk(KERN_ERR, target->scsi_host, PFX
 560				     "retrying stale connection\n");
 561			break;
 
 562
 563		default:
 564			return target->status;
 565		}
 566	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 567}
 568
 569static void srp_unmap_data(struct scsi_cmnd *scmnd,
 570			   struct srp_target_port *target,
 571			   struct srp_request *req)
 572{
 573	struct ib_device *ibdev = target->srp_host->srp_dev->dev;
 574	struct ib_pool_fmr **pfmr;
 
 
 575
 576	if (!scsi_sglist(scmnd) ||
 577	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
 578	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
 579		return;
 580
 581	pfmr = req->fmr_list;
 582	while (req->nfmr--)
 583		ib_fmr_pool_unmap(*pfmr++);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 584
 585	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
 586			scmnd->sc_data_direction);
 587}
 588
 589/**
 590 * srp_claim_req - Take ownership of the scmnd associated with a request.
 591 * @target: SRP target port.
 592 * @req: SRP request.
 
 593 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
 594 *         ownership of @req->scmnd if it equals @scmnd.
 595 *
 596 * Return value:
 597 * Either NULL or a pointer to the SCSI command the caller became owner of.
 598 */
 599static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
 600				       struct srp_request *req,
 
 601				       struct scsi_cmnd *scmnd)
 602{
 603	unsigned long flags;
 604
 605	spin_lock_irqsave(&target->lock, flags);
 606	if (!scmnd) {
 
 
 607		scmnd = req->scmnd;
 608		req->scmnd = NULL;
 609	} else if (req->scmnd == scmnd) {
 610		req->scmnd = NULL;
 611	} else {
 612		scmnd = NULL;
 613	}
 614	spin_unlock_irqrestore(&target->lock, flags);
 615
 616	return scmnd;
 617}
 618
 619/**
 620 * srp_free_req() - Unmap data and add request to the free request list.
 
 
 
 
 621 */
 622static void srp_free_req(struct srp_target_port *target,
 623			 struct srp_request *req, struct scsi_cmnd *scmnd,
 624			 s32 req_lim_delta)
 625{
 626	unsigned long flags;
 627
 628	srp_unmap_data(scmnd, target, req);
 629
 630	spin_lock_irqsave(&target->lock, flags);
 631	target->req_lim += req_lim_delta;
 632	list_add_tail(&req->list, &target->free_reqs);
 633	spin_unlock_irqrestore(&target->lock, flags);
 634}
 635
 636static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
 
 637{
 638	struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
 639
 640	if (scmnd) {
 641		srp_free_req(target, req, scmnd, 0);
 642		scmnd->result = DID_RESET << 16;
 643		scmnd->scsi_done(scmnd);
 644	}
 645}
 646
 647static int srp_reconnect_target(struct srp_target_port *target)
 
 
 
 
 
 648{
 649	struct ib_qp_attr qp_attr;
 650	struct ib_wc wc;
 651	int i, ret;
 
 
 652
 653	if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
 654		return -EAGAIN;
 655
 656	srp_disconnect_target(target);
 657	/*
 658	 * Now get a new local CM ID so that we avoid confusing the
 659	 * target in case things are really fouled up.
 660	 */
 661	ret = srp_new_cm_id(target);
 662	if (ret)
 663		goto err;
 664
 665	qp_attr.qp_state = IB_QPS_RESET;
 666	ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
 667	if (ret)
 668		goto err;
 
 669
 670	ret = srp_init_qp(target, target->qp);
 671	if (ret)
 672		goto err;
 673
 674	while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
 675		; /* nothing */
 676	while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
 677		; /* nothing */
 678
 679	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
 680		struct srp_request *req = &target->req_ring[i];
 681		if (req->scmnd)
 682			srp_reset_req(target, req);
 683	}
 684
 685	INIT_LIST_HEAD(&target->free_tx);
 686	for (i = 0; i < SRP_SQ_SIZE; ++i)
 687		list_add(&target->tx_ring[i]->list, &target->free_tx);
 688
 689	target->qp_in_error = 0;
 690	ret = srp_connect_target(target);
 691	if (ret)
 692		goto err;
 693
 694	if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
 695		ret = -EAGAIN;
 696
 697	return ret;
 698
 699err:
 700	shost_printk(KERN_ERR, target->scsi_host,
 701		     PFX "reconnect failed (%d), removing target port.\n", ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 702
 703	/*
 704	 * We couldn't reconnect, so kill our target port off.
 705	 * However, we have to defer the real removal because we
 706	 * are in the context of the SCSI error handler now, which
 707	 * will deadlock if we call scsi_remove_host().
 708	 *
 709	 * Schedule our work inside the lock to avoid a race with
 710	 * the flush_scheduled_work() in srp_remove_one().
 711	 */
 712	spin_lock_irq(&target->lock);
 713	if (target->state == SRP_TARGET_CONNECTING) {
 714		target->state = SRP_TARGET_DEAD;
 715		INIT_WORK(&target->work, srp_remove_work);
 716		queue_work(ib_wq, &target->work);
 
 
 
 
 
 717	}
 718	spin_unlock_irq(&target->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 719
 720	return ret;
 721}
 722
 723static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
 724			 unsigned int dma_len, u32 rkey)
 725{
 726	struct srp_direct_buf *desc = state->desc;
 727
 
 
 728	desc->va = cpu_to_be64(dma_addr);
 729	desc->key = cpu_to_be32(rkey);
 730	desc->len = cpu_to_be32(dma_len);
 731
 732	state->total_len += dma_len;
 733	state->desc++;
 734	state->ndesc++;
 735}
 736
 737static int srp_map_finish_fmr(struct srp_map_state *state,
 738			      struct srp_target_port *target)
 739{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 740	struct srp_device *dev = target->srp_host->srp_dev;
 741	struct ib_pool_fmr *fmr;
 742	u64 io_addr = 0;
 
 
 
 
 
 
 
 
 
 743
 744	if (!state->npages)
 745		return 0;
 746
 747	if (state->npages == 1) {
 748		srp_map_desc(state, state->base_dma_addr, state->fmr_len,
 749			     target->rkey);
 750		state->npages = state->fmr_len = 0;
 751		return 0;
 
 
 
 
 752	}
 753
 754	fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
 755				   state->npages, io_addr);
 756	if (IS_ERR(fmr))
 757		return PTR_ERR(fmr);
 758
 759	*state->next_fmr++ = fmr;
 760	state->nfmr++;
 761
 762	srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
 763	state->npages = state->fmr_len = 0;
 764	return 0;
 765}
 
 
 
 
 
 
 
 766
 767static void srp_map_update_start(struct srp_map_state *state,
 768				 struct scatterlist *sg, int sg_index,
 769				 dma_addr_t dma_addr)
 770{
 771	state->unmapped_sg = sg;
 772	state->unmapped_index = sg_index;
 773	state->unmapped_addr = dma_addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 774}
 775
 776static int srp_map_sg_entry(struct srp_map_state *state,
 777			    struct srp_target_port *target,
 778			    struct scatterlist *sg, int sg_index,
 779			    int use_fmr)
 780{
 781	struct srp_device *dev = target->srp_host->srp_dev;
 782	struct ib_device *ibdev = dev->dev;
 783	dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
 784	unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
 785	unsigned int len;
 786	int ret;
 787
 788	if (!dma_len)
 789		return 0;
 
 790
 791	if (use_fmr == SRP_MAP_NO_FMR) {
 792		/* Once we're in direct map mode for a request, we don't
 793		 * go back to FMR mode, so no need to update anything
 794		 * other than the descriptor.
 795		 */
 796		srp_map_desc(state, dma_addr, dma_len, target->rkey);
 797		return 0;
 798	}
 799
 800	/* If we start at an offset into the FMR page, don't merge into
 801	 * the current FMR. Finish it out, and use the kernel's MR for this
 802	 * sg entry. This is to avoid potential bugs on some SRP targets
 803	 * that were never quite defined, but went away when the initiator
 804	 * avoided using FMR on such page fragments.
 805	 */
 806	if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
 807		ret = srp_map_finish_fmr(state, target);
 808		if (ret)
 809			return ret;
 810
 811		srp_map_desc(state, dma_addr, dma_len, target->rkey);
 812		srp_map_update_start(state, NULL, 0, 0);
 813		return 0;
 
 
 
 
 814	}
 815
 816	/* If this is the first sg to go into the FMR, save our position.
 817	 * We need to know the first unmapped entry, its index, and the
 818	 * first unmapped address within that entry to be able to restart
 819	 * mapping after an error.
 820	 */
 821	if (!state->unmapped_sg)
 822		srp_map_update_start(state, sg, sg_index, dma_addr);
 823
 824	while (dma_len) {
 825		if (state->npages == SRP_FMR_SIZE) {
 826			ret = srp_map_finish_fmr(state, target);
 827			if (ret)
 828				return ret;
 
 
 829
 830			srp_map_update_start(state, sg, sg_index, dma_addr);
 831		}
 
 
 832
 833		len = min_t(unsigned int, dma_len, dev->fmr_page_size);
 
 834
 835		if (!state->npages)
 836			state->base_dma_addr = dma_addr;
 837		state->pages[state->npages++] = dma_addr;
 838		state->fmr_len += len;
 839		dma_addr += len;
 840		dma_len -= len;
 841	}
 
 
 
 
 
 
 
 
 
 
 842
 843	/* If the last entry of the FMR wasn't a full page, then we need to
 844	 * close it out and start a new one -- we can only merge at page
 845	 * boundries.
 846	 */
 847	ret = 0;
 848	if (len != dev->fmr_page_size) {
 849		ret = srp_map_finish_fmr(state, target);
 850		if (!ret)
 851			srp_map_update_start(state, NULL, 0, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 852	}
 853	return ret;
 
 
 
 854}
 855
 856static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 857			struct srp_request *req)
 858{
 
 859	struct scatterlist *scat, *sg;
 860	struct srp_cmd *cmd = req->cmd->buf;
 861	int i, len, nents, count, use_fmr;
 862	struct srp_device *dev;
 863	struct ib_device *ibdev;
 864	struct srp_map_state state;
 865	struct srp_indirect_buf *indirect_hdr;
 866	u32 table_len;
 
 
 867	u8 fmt;
 868
 
 
 869	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
 870		return sizeof (struct srp_cmd);
 871
 872	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
 873	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
 874		shost_printk(KERN_WARNING, target->scsi_host,
 875			     PFX "Unhandled data direction %d\n",
 876			     scmnd->sc_data_direction);
 877		return -EINVAL;
 878	}
 879
 880	nents = scsi_sg_count(scmnd);
 881	scat  = scsi_sglist(scmnd);
 
 882
 883	dev = target->srp_host->srp_dev;
 884	ibdev = dev->dev;
 885
 886	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
 887	if (unlikely(count == 0))
 888		return -EIO;
 889
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 890	fmt = SRP_DATA_DESC_DIRECT;
 891	len = sizeof (struct srp_cmd) +	sizeof (struct srp_direct_buf);
 
 892
 893	if (count == 1) {
 894		/*
 895		 * The midlayer only generated a single gather/scatter
 896		 * entry, or DMA mapping coalesced everything to a
 897		 * single entry.  So a direct descriptor along with
 898		 * the DMA MR suffices.
 899		 */
 900		struct srp_direct_buf *buf = (void *) cmd->add_data;
 901
 902		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
 903		buf->key = cpu_to_be32(target->rkey);
 904		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
 
 905
 906		req->nfmr = 0;
 907		goto map_complete;
 908	}
 909
 910	/* We have more than one scatter/gather entry, so build our indirect
 911	 * descriptor table, trying to merge as many entries with FMR as we
 912	 * can.
 913	 */
 914	indirect_hdr = (void *) cmd->add_data;
 915
 916	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
 917				   target->indirect_size, DMA_TO_DEVICE);
 918
 919	memset(&state, 0, sizeof(state));
 920	state.desc	= req->indirect_desc;
 921	state.pages	= req->map_page;
 922	state.next_fmr	= req->fmr_list;
 923
 924	use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
 925
 926	for_each_sg(scat, sg, count, i) {
 927		if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
 928			/* FMR mapping failed, so backtrack to the first
 929			 * unmapped entry and continue on without using FMR.
 930			 */
 931			dma_addr_t dma_addr;
 932			unsigned int dma_len;
 933
 934backtrack:
 935			sg = state.unmapped_sg;
 936			i = state.unmapped_index;
 937
 938			dma_addr = ib_sg_dma_address(ibdev, sg);
 939			dma_len = ib_sg_dma_len(ibdev, sg);
 940			dma_len -= (state.unmapped_addr - dma_addr);
 941			dma_addr = state.unmapped_addr;
 942			use_fmr = SRP_MAP_NO_FMR;
 943			srp_map_desc(&state, dma_addr, dma_len, target->rkey);
 944		}
 945	}
 946
 947	if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
 948		goto backtrack;
 949
 950	/* We've mapped the request, now pull as much of the indirect
 951	 * descriptor table as we can into the command buffer. If this
 952	 * target is not using an external indirect table, we are
 953	 * guaranteed to fit into the command, as the SCSI layer won't
 954	 * give us more S/G entries than we allow.
 955	 */
 956	req->nfmr = state.nfmr;
 957	if (state.ndesc == 1) {
 958		/* FMR mapping was able to collapse this to one entry,
 
 959		 * so use a direct descriptor.
 960		 */
 961		struct srp_direct_buf *buf = (void *) cmd->add_data;
 962
 
 963		*buf = req->indirect_desc[0];
 964		goto map_complete;
 965	}
 966
 967	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
 968						!target->allow_ext_sg)) {
 969		shost_printk(KERN_ERR, target->scsi_host,
 970			     "Could not fit S/G list into SRP_CMD\n");
 971		return -EIO;
 
 972	}
 973
 974	count = min(state.ndesc, target->cmd_sg_cnt);
 975	table_len = state.ndesc * sizeof (struct srp_direct_buf);
 
 976
 977	fmt = SRP_DATA_DESC_INDIRECT;
 978	len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
 
 979	len += count * sizeof (struct srp_direct_buf);
 980
 981	memcpy(indirect_hdr->desc_list, req->indirect_desc,
 982	       count * sizeof (struct srp_direct_buf));
 983
 
 
 
 
 
 
 
 
 
 
 984	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
 985	indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
 986	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
 987	indirect_hdr->len = cpu_to_be32(state.total_len);
 988
 989	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
 990		cmd->data_out_desc_cnt = count;
 991	else
 992		cmd->data_in_desc_cnt = count;
 993
 994	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
 995				      DMA_TO_DEVICE);
 996
 997map_complete:
 998	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
 999		cmd->buf_fmt = fmt << 4;
1000	else
1001		cmd->buf_fmt = fmt;
1002
1003	return len;
 
 
 
 
 
 
1004}
1005
1006/*
1007 * Return an IU and possible credit to the free pool
1008 */
1009static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1010			  enum srp_iu_type iu_type)
1011{
1012	unsigned long flags;
1013
1014	spin_lock_irqsave(&target->lock, flags);
1015	list_add(&iu->list, &target->free_tx);
1016	if (iu_type != SRP_IU_RSP)
1017		++target->req_lim;
1018	spin_unlock_irqrestore(&target->lock, flags);
1019}
1020
1021/*
1022 * Must be called with target->lock held to protect req_lim and free_tx.
1023 * If IU is not sent, it must be returned using srp_put_tx_iu().
1024 *
1025 * Note:
1026 * An upper limit for the number of allocated information units for each
1027 * request type is:
1028 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1029 *   more than Scsi_Host.can_queue requests.
1030 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1031 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1032 *   one unanswered SRP request to an initiator.
1033 */
1034static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1035				      enum srp_iu_type iu_type)
1036{
 
1037	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1038	struct srp_iu *iu;
1039
1040	srp_send_completion(target->send_cq, target);
1041
1042	if (list_empty(&target->free_tx))
 
 
1043		return NULL;
1044
1045	/* Initiator responses to target requests do not consume credits */
1046	if (iu_type != SRP_IU_RSP) {
1047		if (target->req_lim <= rsv) {
1048			++target->zero_req_lim;
1049			return NULL;
1050		}
1051
1052		--target->req_lim;
1053	}
1054
1055	iu = list_first_entry(&target->free_tx, struct srp_iu, list);
1056	list_del(&iu->list);
1057	return iu;
1058}
1059
1060static int srp_post_send(struct srp_target_port *target,
1061			 struct srp_iu *iu, int len)
 
 
 
 
1062{
1063	struct ib_sge list;
1064	struct ib_send_wr wr, *bad_wr;
1065
1066	list.addr   = iu->dma;
1067	list.length = len;
1068	list.lkey   = target->lkey;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1069
1070	wr.next       = NULL;
1071	wr.wr_id      = (uintptr_t) iu;
1072	wr.sg_list    = &list;
1073	wr.num_sge    = 1;
1074	wr.opcode     = IB_WR_SEND;
1075	wr.send_flags = IB_SEND_SIGNALED;
1076
1077	return ib_post_send(target->qp, &wr, &bad_wr);
1078}
1079
1080static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
1081{
1082	struct ib_recv_wr wr, *bad_wr;
 
1083	struct ib_sge list;
1084
1085	list.addr   = iu->dma;
1086	list.length = iu->size;
1087	list.lkey   = target->lkey;
1088
 
 
1089	wr.next     = NULL;
1090	wr.wr_id    = (uintptr_t) iu;
1091	wr.sg_list  = &list;
1092	wr.num_sge  = 1;
1093
1094	return ib_post_recv(target->qp, &wr, &bad_wr);
1095}
1096
1097static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1098{
 
1099	struct srp_request *req;
1100	struct scsi_cmnd *scmnd;
1101	unsigned long flags;
1102
1103	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1104		spin_lock_irqsave(&target->lock, flags);
1105		target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1106		spin_unlock_irqrestore(&target->lock, flags);
1107
1108		target->tsk_mgmt_status = -1;
1109		if (be32_to_cpu(rsp->resp_data_len) >= 4)
1110			target->tsk_mgmt_status = rsp->data[3];
1111		complete(&target->tsk_mgmt_done);
 
 
 
 
 
1112	} else {
1113		req = &target->req_ring[rsp->tag];
1114		scmnd = srp_claim_req(target, req, NULL);
 
 
 
1115		if (!scmnd) {
1116			shost_printk(KERN_ERR, target->scsi_host,
1117				     "Null scmnd for RSP w/tag %016llx\n",
1118				     (unsigned long long) rsp->tag);
1119
1120			spin_lock_irqsave(&target->lock, flags);
1121			target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1122			spin_unlock_irqrestore(&target->lock, flags);
1123
1124			return;
1125		}
1126		scmnd->result = rsp->status;
1127
1128		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1129			memcpy(scmnd->sense_buffer, rsp->data +
1130			       be32_to_cpu(rsp->resp_data_len),
1131			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1132				     SCSI_SENSE_BUFFERSIZE));
1133		}
1134
1135		if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
1136			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1137		else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
1138			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
 
 
 
 
 
 
1139
1140		srp_free_req(target, req, scmnd,
1141			     be32_to_cpu(rsp->req_lim_delta));
1142
1143		scmnd->host_scribble = NULL;
1144		scmnd->scsi_done(scmnd);
1145	}
1146}
1147
1148static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1149			       void *rsp, int len)
1150{
 
1151	struct ib_device *dev = target->srp_host->srp_dev->dev;
1152	unsigned long flags;
1153	struct srp_iu *iu;
1154	int err;
1155
1156	spin_lock_irqsave(&target->lock, flags);
1157	target->req_lim += req_delta;
1158	iu = __srp_get_tx_iu(target, SRP_IU_RSP);
1159	spin_unlock_irqrestore(&target->lock, flags);
1160
1161	if (!iu) {
1162		shost_printk(KERN_ERR, target->scsi_host, PFX
1163			     "no IU available to send response\n");
1164		return 1;
1165	}
1166
 
1167	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1168	memcpy(iu->buf, rsp, len);
1169	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1170
1171	err = srp_post_send(target, iu, len);
1172	if (err) {
1173		shost_printk(KERN_ERR, target->scsi_host, PFX
1174			     "unable to post response: %d\n", err);
1175		srp_put_tx_iu(target, iu, SRP_IU_RSP);
1176	}
1177
1178	return err;
1179}
1180
1181static void srp_process_cred_req(struct srp_target_port *target,
1182				 struct srp_cred_req *req)
1183{
1184	struct srp_cred_rsp rsp = {
1185		.opcode = SRP_CRED_RSP,
1186		.tag = req->tag,
1187	};
1188	s32 delta = be32_to_cpu(req->req_lim_delta);
1189
1190	if (srp_response_common(target, delta, &rsp, sizeof rsp))
1191		shost_printk(KERN_ERR, target->scsi_host, PFX
1192			     "problems processing SRP_CRED_REQ\n");
1193}
1194
1195static void srp_process_aer_req(struct srp_target_port *target,
1196				struct srp_aer_req *req)
1197{
 
1198	struct srp_aer_rsp rsp = {
1199		.opcode = SRP_AER_RSP,
1200		.tag = req->tag,
1201	};
1202	s32 delta = be32_to_cpu(req->req_lim_delta);
1203
1204	shost_printk(KERN_ERR, target->scsi_host, PFX
1205		     "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1206
1207	if (srp_response_common(target, delta, &rsp, sizeof rsp))
1208		shost_printk(KERN_ERR, target->scsi_host, PFX
1209			     "problems processing SRP_AER_REQ\n");
1210}
1211
1212static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1213{
 
 
 
1214	struct ib_device *dev = target->srp_host->srp_dev->dev;
1215	struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1216	int res;
1217	u8 opcode;
1218
1219	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
 
 
 
 
 
1220				   DMA_FROM_DEVICE);
1221
1222	opcode = *(u8 *) iu->buf;
1223
1224	if (0) {
1225		shost_printk(KERN_ERR, target->scsi_host,
1226			     PFX "recv completion, opcode 0x%02x\n", opcode);
1227		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1228			       iu->buf, wc->byte_len, true);
1229	}
1230
1231	switch (opcode) {
1232	case SRP_RSP:
1233		srp_process_rsp(target, iu->buf);
1234		break;
1235
1236	case SRP_CRED_REQ:
1237		srp_process_cred_req(target, iu->buf);
1238		break;
1239
1240	case SRP_AER_REQ:
1241		srp_process_aer_req(target, iu->buf);
1242		break;
1243
1244	case SRP_T_LOGOUT:
1245		/* XXX Handle target logout */
1246		shost_printk(KERN_WARNING, target->scsi_host,
1247			     PFX "Got target logout request\n");
1248		break;
1249
1250	default:
1251		shost_printk(KERN_WARNING, target->scsi_host,
1252			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1253		break;
1254	}
1255
1256	ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1257				      DMA_FROM_DEVICE);
1258
1259	res = srp_post_recv(target, iu);
1260	if (res != 0)
1261		shost_printk(KERN_ERR, target->scsi_host,
1262			     PFX "Recv failed with error code %d\n", res);
1263}
1264
1265static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
 
 
 
 
 
 
 
1266{
1267	struct srp_target_port *target = target_ptr;
1268	struct ib_wc wc;
1269
1270	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1271	while (ib_poll_cq(cq, 1, &wc) > 0) {
1272		if (wc.status) {
1273			shost_printk(KERN_ERR, target->scsi_host,
1274				     PFX "failed receive status %d\n",
1275				     wc.status);
1276			target->qp_in_error = 1;
1277			break;
1278		}
1279
1280		srp_handle_recv(target, &wc);
1281	}
 
1282}
1283
1284static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
 
1285{
1286	struct srp_target_port *target = target_ptr;
1287	struct ib_wc wc;
1288	struct srp_iu *iu;
1289
1290	while (ib_poll_cq(cq, 1, &wc) > 0) {
1291		if (wc.status) {
1292			shost_printk(KERN_ERR, target->scsi_host,
1293				     PFX "failed send status %d\n",
1294				     wc.status);
1295			target->qp_in_error = 1;
1296			break;
1297		}
1298
1299		iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1300		list_add(&iu->list, &target->free_tx);
 
 
 
 
1301	}
 
1302}
1303
1304static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1305{
 
1306	struct srp_target_port *target = host_to_target(shost);
1307	struct srp_request *req;
 
1308	struct srp_iu *iu;
1309	struct srp_cmd *cmd;
1310	struct ib_device *dev;
1311	unsigned long flags;
1312	int len;
 
1313
1314	if (target->state == SRP_TARGET_CONNECTING)
 
1315		goto err;
1316
1317	if (target->state == SRP_TARGET_DEAD ||
1318	    target->state == SRP_TARGET_REMOVED) {
1319		scmnd->result = DID_BAD_TARGET << 16;
1320		scmnd->scsi_done(scmnd);
1321		return 0;
1322	}
 
1323
1324	spin_lock_irqsave(&target->lock, flags);
1325	iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1326	if (!iu)
1327		goto err_unlock;
1328
1329	req = list_first_entry(&target->free_reqs, struct srp_request, list);
1330	list_del(&req->list);
1331	spin_unlock_irqrestore(&target->lock, flags);
1332
1333	dev = target->srp_host->srp_dev->dev;
1334	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1335				   DMA_TO_DEVICE);
1336
1337	scmnd->result        = 0;
1338	scmnd->host_scribble = (void *) req;
1339
1340	cmd = iu->buf;
1341	memset(cmd, 0, sizeof *cmd);
1342
1343	cmd->opcode = SRP_CMD;
1344	cmd->lun    = cpu_to_be64((u64) scmnd->device->lun << 48);
1345	cmd->tag    = req->index;
1346	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
 
 
 
 
 
 
1347
1348	req->scmnd    = scmnd;
1349	req->cmd      = iu;
1350
1351	len = srp_map_data(scmnd, target, req);
1352	if (len < 0) {
1353		shost_printk(KERN_ERR, target->scsi_host,
1354			     PFX "Failed to map data\n");
 
 
 
 
 
 
 
 
1355		goto err_iu;
1356	}
1357
1358	ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
1359				      DMA_TO_DEVICE);
1360
1361	if (srp_post_send(target, iu, len)) {
1362		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
 
1363		goto err_unmap;
1364	}
1365
1366	return 0;
1367
1368err_unmap:
1369	srp_unmap_data(scmnd, target, req);
1370
1371err_iu:
1372	srp_put_tx_iu(target, iu, SRP_IU_CMD);
1373
1374	spin_lock_irqsave(&target->lock, flags);
1375	list_add(&req->list, &target->free_reqs);
1376
1377err_unlock:
1378	spin_unlock_irqrestore(&target->lock, flags);
 
 
 
1379
1380err:
1381	return SCSI_MLQUEUE_HOST_BUSY;
 
 
 
 
 
 
 
1382}
1383
1384static int srp_alloc_iu_bufs(struct srp_target_port *target)
 
 
 
 
1385{
 
1386	int i;
1387
1388	for (i = 0; i < SRP_RQ_SIZE; ++i) {
1389		target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1390						  target->max_ti_iu_len,
1391						  GFP_KERNEL, DMA_FROM_DEVICE);
1392		if (!target->rx_ring[i])
 
 
 
 
 
 
 
 
 
1393			goto err;
1394	}
1395
1396	for (i = 0; i < SRP_SQ_SIZE; ++i) {
1397		target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1398						  target->max_iu_len,
1399						  GFP_KERNEL, DMA_TO_DEVICE);
1400		if (!target->tx_ring[i])
1401			goto err;
1402
1403		list_add(&target->tx_ring[i]->list, &target->free_tx);
1404	}
1405
1406	return 0;
1407
1408err:
1409	for (i = 0; i < SRP_RQ_SIZE; ++i) {
1410		srp_free_iu(target->srp_host, target->rx_ring[i]);
1411		target->rx_ring[i] = NULL;
1412	}
1413
1414	for (i = 0; i < SRP_SQ_SIZE; ++i) {
1415		srp_free_iu(target->srp_host, target->tx_ring[i]);
1416		target->tx_ring[i] = NULL;
1417	}
 
 
1418
1419	return -ENOMEM;
1420}
1421
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1422static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1423			       struct srp_login_rsp *lrsp,
1424			       struct srp_target_port *target)
1425{
 
1426	struct ib_qp_attr *qp_attr = NULL;
1427	int attr_mask = 0;
1428	int ret;
1429	int i;
1430
1431	if (lrsp->opcode == SRP_LOGIN_RSP) {
1432		target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1433		target->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
 
 
 
 
 
 
 
 
 
 
 
1434
1435		/*
1436		 * Reserve credits for task management so we don't
1437		 * bounce requests back to the SCSI mid-layer.
1438		 */
1439		target->scsi_host->can_queue
1440			= min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1441			      target->scsi_host->can_queue);
 
 
 
1442	} else {
1443		shost_printk(KERN_WARNING, target->scsi_host,
1444			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1445		ret = -ECONNRESET;
1446		goto error;
1447	}
1448
1449	if (!target->rx_ring[0]) {
1450		ret = srp_alloc_iu_bufs(target);
1451		if (ret)
1452			goto error;
1453	}
1454
1455	ret = -ENOMEM;
1456	qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1457	if (!qp_attr)
1458		goto error;
1459
1460	qp_attr->qp_state = IB_QPS_RTR;
1461	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1462	if (ret)
1463		goto error_free;
1464
1465	ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1466	if (ret)
1467		goto error_free;
 
 
1468
1469	for (i = 0; i < SRP_RQ_SIZE; i++) {
1470		struct srp_iu *iu = target->rx_ring[i];
1471		ret = srp_post_recv(target, iu);
1472		if (ret)
1473			goto error_free;
1474	}
1475
1476	qp_attr->qp_state = IB_QPS_RTS;
1477	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1478	if (ret)
1479		goto error_free;
1480
1481	ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1482	if (ret)
1483		goto error_free;
 
 
 
 
 
 
 
1484
1485	ret = ib_send_cm_rtu(cm_id, NULL, 0);
 
1486
1487error_free:
1488	kfree(qp_attr);
1489
1490error:
1491	target->status = ret;
1492}
1493
1494static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1495			       struct ib_cm_event *event,
1496			       struct srp_target_port *target)
1497{
 
1498	struct Scsi_Host *shost = target->scsi_host;
1499	struct ib_class_port_info *cpi;
1500	int opcode;
 
1501
1502	switch (event->param.rej_rcvd.reason) {
1503	case IB_CM_REJ_PORT_CM_REDIRECT:
1504		cpi = event->param.rej_rcvd.ari;
1505		target->path.dlid = cpi->redirect_lid;
1506		target->path.pkey = cpi->redirect_pkey;
 
1507		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1508		memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1509
1510		target->status = target->path.dlid ?
1511			SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1512		break;
1513
1514	case IB_CM_REJ_PORT_REDIRECT:
1515		if (srp_target_is_topspin(target)) {
 
 
1516			/*
1517			 * Topspin/Cisco SRP gateways incorrectly send
1518			 * reject reason code 25 when they mean 24
1519			 * (port redirect).
1520			 */
1521			memcpy(target->path.dgid.raw,
1522			       event->param.rej_rcvd.ari, 16);
1523
1524			shost_printk(KERN_DEBUG, shost,
1525				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1526				     (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1527				     (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1528
1529			target->status = SRP_PORT_REDIRECT;
1530		} else {
1531			shost_printk(KERN_WARNING, shost,
1532				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1533			target->status = -ECONNRESET;
1534		}
1535		break;
1536
1537	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1538		shost_printk(KERN_WARNING, shost,
1539			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1540		target->status = -ECONNRESET;
1541		break;
1542
1543	case IB_CM_REJ_CONSUMER_DEFINED:
1544		opcode = *(u8 *) event->private_data;
1545		if (opcode == SRP_LOGIN_REJ) {
1546			struct srp_login_rej *rej = event->private_data;
1547			u32 reason = be32_to_cpu(rej->reason);
1548
1549			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1550				shost_printk(KERN_WARNING, shost,
1551					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1552			else
1553				shost_printk(KERN_WARNING, shost,
1554					    PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
 
 
 
1555		} else
1556			shost_printk(KERN_WARNING, shost,
1557				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1558				     " opcode 0x%02x\n", opcode);
1559		target->status = -ECONNRESET;
1560		break;
1561
1562	case IB_CM_REJ_STALE_CONN:
1563		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
1564		target->status = SRP_STALE_CONN;
1565		break;
1566
1567	default:
1568		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
1569			     event->param.rej_rcvd.reason);
1570		target->status = -ECONNRESET;
1571	}
1572}
1573
1574static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
 
1575{
1576	struct srp_target_port *target = cm_id->context;
 
1577	int comp = 0;
1578
1579	switch (event->event) {
1580	case IB_CM_REQ_ERROR:
1581		shost_printk(KERN_DEBUG, target->scsi_host,
1582			     PFX "Sending CM REQ failed\n");
1583		comp = 1;
1584		target->status = -ECONNRESET;
1585		break;
1586
1587	case IB_CM_REP_RECEIVED:
1588		comp = 1;
1589		srp_cm_rep_handler(cm_id, event->private_data, target);
1590		break;
1591
1592	case IB_CM_REJ_RECEIVED:
1593		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
1594		comp = 1;
1595
1596		srp_cm_rej_handler(cm_id, event, target);
1597		break;
1598
1599	case IB_CM_DREQ_RECEIVED:
1600		shost_printk(KERN_WARNING, target->scsi_host,
1601			     PFX "DREQ received - connection closed\n");
 
1602		if (ib_send_cm_drep(cm_id, NULL, 0))
1603			shost_printk(KERN_ERR, target->scsi_host,
1604				     PFX "Sending CM DREP failed\n");
 
1605		break;
1606
1607	case IB_CM_TIMEWAIT_EXIT:
1608		shost_printk(KERN_ERR, target->scsi_host,
1609			     PFX "connection closed\n");
1610
1611		comp = 1;
1612		target->status = 0;
 
1613		break;
1614
1615	case IB_CM_MRA_RECEIVED:
1616	case IB_CM_DREQ_ERROR:
1617	case IB_CM_DREP_RECEIVED:
1618		break;
1619
1620	default:
1621		shost_printk(KERN_WARNING, target->scsi_host,
1622			     PFX "Unhandled CM event %d\n", event->event);
1623		break;
1624	}
1625
1626	if (comp)
1627		complete(&target->done);
1628
1629	return 0;
1630}
1631
1632static int srp_send_tsk_mgmt(struct srp_target_port *target,
1633			     u64 req_tag, unsigned int lun, u8 func)
1634{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1635	struct ib_device *dev = target->srp_host->srp_dev->dev;
1636	struct srp_iu *iu;
1637	struct srp_tsk_mgmt *tsk_mgmt;
 
1638
1639	if (target->state == SRP_TARGET_DEAD ||
1640	    target->state == SRP_TARGET_REMOVED)
1641		return -1;
1642
1643	init_completion(&target->tsk_mgmt_done);
 
 
 
 
 
 
 
1644
1645	spin_lock_irq(&target->lock);
1646	iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1647	spin_unlock_irq(&target->lock);
1648
1649	if (!iu)
1650		return -1;
 
 
 
1651
1652	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1653				   DMA_TO_DEVICE);
1654	tsk_mgmt = iu->buf;
1655	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1656
1657	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
1658	tsk_mgmt->lun		= cpu_to_be64((u64) lun << 48);
1659	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
1660	tsk_mgmt->tsk_mgmt_func = func;
1661	tsk_mgmt->task_tag	= req_tag;
1662
 
 
 
 
 
 
 
1663	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1664				      DMA_TO_DEVICE);
1665	if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1666		srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
 
 
1667		return -1;
1668	}
 
 
 
 
 
1669
1670	if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
1671					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1672		return -1;
1673
1674	return 0;
1675}
1676
1677static int srp_abort(struct scsi_cmnd *scmnd)
1678{
1679	struct srp_target_port *target = host_to_target(scmnd->device->host);
1680	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
 
 
 
 
1681
1682	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1683
1684	if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
1685		return FAILED;
1686	srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1687			  SRP_TSK_ABORT_TASK);
1688	srp_free_req(target, req, scmnd, 0);
1689	scmnd->result = DID_ABORT << 16;
1690	scmnd->scsi_done(scmnd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1691
1692	return SUCCESS;
1693}
1694
1695static int srp_reset_device(struct scsi_cmnd *scmnd)
1696{
1697	struct srp_target_port *target = host_to_target(scmnd->device->host);
1698	int i;
 
1699
1700	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1701
1702	if (target->qp_in_error)
1703		return FAILED;
1704	if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1705			      SRP_TSK_LUN_RESET))
1706		return FAILED;
1707	if (target->tsk_mgmt_status)
1708		return FAILED;
1709
1710	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1711		struct srp_request *req = &target->req_ring[i];
1712		if (req->scmnd && req->scmnd->device == scmnd->device)
1713			srp_reset_req(target, req);
1714	}
1715
1716	return SUCCESS;
1717}
1718
1719static int srp_reset_host(struct scsi_cmnd *scmnd)
1720{
1721	struct srp_target_port *target = host_to_target(scmnd->device->host);
1722	int ret = FAILED;
1723
1724	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
1725
1726	if (!srp_reconnect_target(target))
1727		ret = SUCCESS;
1728
1729	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1730}
1731
1732static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1733			   char *buf)
1734{
1735	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1736
1737	return sprintf(buf, "0x%016llx\n",
1738		       (unsigned long long) be64_to_cpu(target->id_ext));
1739}
1740
1741static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
 
 
1742			     char *buf)
1743{
1744	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1745
1746	return sprintf(buf, "0x%016llx\n",
1747		       (unsigned long long) be64_to_cpu(target->ioc_guid));
1748}
1749
1750static ssize_t show_service_id(struct device *dev,
 
 
1751			       struct device_attribute *attr, char *buf)
1752{
1753	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1754
1755	return sprintf(buf, "0x%016llx\n",
1756		       (unsigned long long) be64_to_cpu(target->service_id));
 
 
1757}
1758
1759static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1760			 char *buf)
1761{
1762	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1763
1764	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1765}
1766
1767static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
 
 
1768			 char *buf)
1769{
1770	struct srp_target_port *target = host_to_target(class_to_shost(dev));
 
 
 
 
1771
1772	return sprintf(buf, "%pI6\n", target->path.dgid.raw);
1773}
1774
1775static ssize_t show_orig_dgid(struct device *dev,
1776			      struct device_attribute *attr, char *buf)
 
 
1777{
1778	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1779
1780	return sprintf(buf, "%pI6\n", target->orig_dgid);
 
 
 
1781}
1782
1783static ssize_t show_req_lim(struct device *dev,
1784			    struct device_attribute *attr, char *buf)
 
 
1785{
1786	struct srp_target_port *target = host_to_target(class_to_shost(dev));
 
 
 
 
 
 
 
1787
1788	return sprintf(buf, "%d\n", target->req_lim);
1789}
1790
1791static ssize_t show_zero_req_lim(struct device *dev,
 
 
1792				 struct device_attribute *attr, char *buf)
1793{
1794	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1795
1796	return sprintf(buf, "%d\n", target->zero_req_lim);
1797}
1798
1799static ssize_t show_local_ib_port(struct device *dev,
 
 
1800				  struct device_attribute *attr, char *buf)
1801{
1802	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1803
1804	return sprintf(buf, "%d\n", target->srp_host->port);
1805}
1806
1807static ssize_t show_local_ib_device(struct device *dev,
 
 
1808				    struct device_attribute *attr, char *buf)
1809{
1810	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1811
1812	return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
 
1813}
1814
1815static ssize_t show_cmd_sg_entries(struct device *dev,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1816				   struct device_attribute *attr, char *buf)
1817{
1818	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1819
1820	return sprintf(buf, "%u\n", target->cmd_sg_cnt);
1821}
1822
1823static ssize_t show_allow_ext_sg(struct device *dev,
 
 
 
 
 
 
 
 
 
 
 
 
1824				 struct device_attribute *attr, char *buf)
1825{
1826	struct srp_target_port *target = host_to_target(class_to_shost(dev));
1827
1828	return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
1829}
1830
1831static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
1832static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
1833static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
1834static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
1835static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
1836static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
1837static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
1838static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
1839static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
1840static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
1841static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
1842static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
1843
1844static struct device_attribute *srp_host_attrs[] = {
1845	&dev_attr_id_ext,
1846	&dev_attr_ioc_guid,
1847	&dev_attr_service_id,
1848	&dev_attr_pkey,
1849	&dev_attr_dgid,
1850	&dev_attr_orig_dgid,
1851	&dev_attr_req_lim,
1852	&dev_attr_zero_req_lim,
1853	&dev_attr_local_ib_port,
1854	&dev_attr_local_ib_device,
1855	&dev_attr_cmd_sg_entries,
1856	&dev_attr_allow_ext_sg,
1857	NULL
1858};
1859
 
 
1860static struct scsi_host_template srp_template = {
1861	.module				= THIS_MODULE,
1862	.name				= "InfiniBand SRP initiator",
1863	.proc_name			= DRV_NAME,
 
 
1864	.info				= srp_target_info,
 
 
1865	.queuecommand			= srp_queuecommand,
 
 
1866	.eh_abort_handler		= srp_abort,
1867	.eh_device_reset_handler	= srp_reset_device,
1868	.eh_host_reset_handler		= srp_reset_host,
 
1869	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
1870	.can_queue			= SRP_CMD_SQ_SIZE,
1871	.this_id			= -1,
1872	.cmd_per_lun			= SRP_CMD_SQ_SIZE,
1873	.use_clustering			= ENABLE_CLUSTERING,
1874	.shost_attrs			= srp_host_attrs
 
1875};
1876
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1877static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1878{
1879	struct srp_rport_identifiers ids;
1880	struct srp_rport *rport;
1881
 
1882	sprintf(target->target_name, "SRP.T10:%016llX",
1883		 (unsigned long long) be64_to_cpu(target->id_ext));
1884
1885	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
1886		return -ENODEV;
1887
1888	memcpy(ids.port_id, &target->id_ext, 8);
1889	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
1890	ids.roles = SRP_RPORT_ROLE_TARGET;
1891	rport = srp_rport_add(target->scsi_host, &ids);
1892	if (IS_ERR(rport)) {
1893		scsi_remove_host(target->scsi_host);
1894		return PTR_ERR(rport);
1895	}
1896
 
 
 
1897	spin_lock(&host->target_lock);
1898	list_add_tail(&target->list, &host->target_list);
1899	spin_unlock(&host->target_lock);
1900
1901	target->state = SRP_TARGET_LIVE;
1902
1903	scsi_scan_target(&target->scsi_host->shost_gendev,
1904			 0, target->scsi_id, SCAN_WILD_CARD, 0);
1905
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1906	return 0;
1907}
1908
1909static void srp_release_dev(struct device *dev)
1910{
1911	struct srp_host *host =
1912		container_of(dev, struct srp_host, dev);
1913
1914	complete(&host->released);
1915}
1916
 
 
 
 
1917static struct class srp_class = {
1918	.name    = "infiniband_srp",
 
1919	.dev_release = srp_release_dev
1920};
1921
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1922/*
1923 * Target ports are added by writing
1924 *
1925 *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1926 *     pkey=<P_Key>,service_id=<service ID>
 
 
 
1927 *
1928 * to the add_target sysfs attribute.
1929 */
1930enum {
1931	SRP_OPT_ERR		= 0,
1932	SRP_OPT_ID_EXT		= 1 << 0,
1933	SRP_OPT_IOC_GUID	= 1 << 1,
1934	SRP_OPT_DGID		= 1 << 2,
1935	SRP_OPT_PKEY		= 1 << 3,
1936	SRP_OPT_SERVICE_ID	= 1 << 4,
1937	SRP_OPT_MAX_SECT	= 1 << 5,
1938	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
1939	SRP_OPT_IO_CLASS	= 1 << 7,
1940	SRP_OPT_INITIATOR_EXT	= 1 << 8,
1941	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
1942	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
1943	SRP_OPT_SG_TABLESIZE	= 1 << 11,
1944	SRP_OPT_ALL		= (SRP_OPT_ID_EXT	|
1945				   SRP_OPT_IOC_GUID	|
1946				   SRP_OPT_DGID		|
1947				   SRP_OPT_PKEY		|
1948				   SRP_OPT_SERVICE_ID),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1949};
1950
1951static const match_table_t srp_opt_tokens = {
1952	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
1953	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
1954	{ SRP_OPT_DGID,			"dgid=%s" 		},
1955	{ SRP_OPT_PKEY,			"pkey=%x" 		},
1956	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
1957	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
1958	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
 
1959	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
1960	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
1961	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
1962	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
1963	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
 
 
 
 
 
 
 
1964	{ SRP_OPT_ERR,			NULL 			}
1965};
1966
1967static int srp_parse_options(const char *buf, struct srp_target_port *target)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1968{
1969	char *options, *sep_opt;
1970	char *p;
1971	char dgid[3];
1972	substring_t args[MAX_OPT_ARGS];
 
 
1973	int opt_mask = 0;
1974	int token;
1975	int ret = -EINVAL;
1976	int i;
1977
1978	options = kstrdup(buf, GFP_KERNEL);
1979	if (!options)
1980		return -ENOMEM;
1981
1982	sep_opt = options;
1983	while ((p = strsep(&sep_opt, ",")) != NULL) {
1984		if (!*p)
1985			continue;
1986
1987		token = match_token(p, srp_opt_tokens, args);
1988		opt_mask |= token;
1989
1990		switch (token) {
1991		case SRP_OPT_ID_EXT:
1992			p = match_strdup(args);
1993			if (!p) {
1994				ret = -ENOMEM;
1995				goto out;
1996			}
1997			target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
 
 
 
 
 
 
1998			kfree(p);
1999			break;
2000
2001		case SRP_OPT_IOC_GUID:
2002			p = match_strdup(args);
2003			if (!p) {
2004				ret = -ENOMEM;
2005				goto out;
2006			}
2007			target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
 
 
 
 
 
 
2008			kfree(p);
2009			break;
2010
2011		case SRP_OPT_DGID:
2012			p = match_strdup(args);
2013			if (!p) {
2014				ret = -ENOMEM;
2015				goto out;
2016			}
2017			if (strlen(p) != 32) {
2018				pr_warn("bad dest GID parameter '%s'\n", p);
2019				kfree(p);
2020				goto out;
2021			}
2022
2023			for (i = 0; i < 16; ++i) {
2024				strlcpy(dgid, p + i * 2, 3);
2025				target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2026			}
2027			kfree(p);
2028			memcpy(target->orig_dgid, target->path.dgid.raw, 16);
 
2029			break;
2030
2031		case SRP_OPT_PKEY:
2032			if (match_hex(args, &token)) {
 
2033				pr_warn("bad P_Key parameter '%s'\n", p);
2034				goto out;
2035			}
2036			target->path.pkey = cpu_to_be16(token);
2037			break;
2038
2039		case SRP_OPT_SERVICE_ID:
2040			p = match_strdup(args);
2041			if (!p) {
2042				ret = -ENOMEM;
2043				goto out;
2044			}
2045			target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2046			target->path.service_id = target->service_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2047			kfree(p);
2048			break;
2049
2050		case SRP_OPT_MAX_SECT:
2051			if (match_int(args, &token)) {
 
2052				pr_warn("bad max sect parameter '%s'\n", p);
2053				goto out;
2054			}
2055			target->scsi_host->max_sectors = token;
2056			break;
2057
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2058		case SRP_OPT_MAX_CMD_PER_LUN:
2059			if (match_int(args, &token)) {
 
 
 
 
 
 
2060				pr_warn("bad max cmd_per_lun parameter '%s'\n",
2061					p);
 
2062				goto out;
2063			}
2064			target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2065			break;
2066
2067		case SRP_OPT_IO_CLASS:
2068			if (match_hex(args, &token)) {
 
2069				pr_warn("bad IO class parameter '%s'\n", p);
2070				goto out;
2071			}
2072			if (token != SRP_REV10_IB_IO_CLASS &&
2073			    token != SRP_REV16A_IB_IO_CLASS) {
2074				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2075					token, SRP_REV10_IB_IO_CLASS,
2076					SRP_REV16A_IB_IO_CLASS);
 
2077				goto out;
2078			}
2079			target->io_class = token;
2080			break;
2081
2082		case SRP_OPT_INITIATOR_EXT:
2083			p = match_strdup(args);
2084			if (!p) {
2085				ret = -ENOMEM;
2086				goto out;
2087			}
2088			target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
 
 
 
 
 
 
2089			kfree(p);
2090			break;
2091
2092		case SRP_OPT_CMD_SG_ENTRIES:
2093			if (match_int(args, &token) || token < 1 || token > 255) {
 
 
 
 
 
 
2094				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2095					p);
 
2096				goto out;
2097			}
2098			target->cmd_sg_cnt = token;
2099			break;
2100
2101		case SRP_OPT_ALLOW_EXT_SG:
2102			if (match_int(args, &token)) {
 
2103				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
2104				goto out;
2105			}
2106			target->allow_ext_sg = !!token;
2107			break;
2108
2109		case SRP_OPT_SG_TABLESIZE:
2110			if (match_int(args, &token) || token < 1 ||
2111					token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
 
 
 
 
 
2112				pr_warn("bad max sg_tablesize parameter '%s'\n",
2113					p);
 
2114				goto out;
2115			}
2116			target->sg_tablesize = token;
2117			break;
2118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2119		default:
2120			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2121				p);
 
2122			goto out;
2123		}
2124	}
2125
2126	if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2127		ret = 0;
2128	else
2129		for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2130			if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2131			    !(srp_opt_tokens[i].token & opt_mask))
2132				pr_warn("target creation request is missing parameter '%s'\n",
2133					srp_opt_tokens[i].pattern);
 
 
 
 
 
 
2134
2135out:
2136	kfree(options);
2137	return ret;
2138}
2139
2140static ssize_t srp_create_target(struct device *dev,
2141				 struct device_attribute *attr,
2142				 const char *buf, size_t count)
2143{
2144	struct srp_host *host =
2145		container_of(dev, struct srp_host, dev);
2146	struct Scsi_Host *target_host;
2147	struct srp_target_port *target;
2148	struct ib_device *ibdev = host->srp_dev->dev;
2149	dma_addr_t dma_addr;
2150	int i, ret;
 
 
 
 
2151
2152	target_host = scsi_host_alloc(&srp_template,
2153				      sizeof (struct srp_target_port));
2154	if (!target_host)
2155		return -ENOMEM;
2156
2157	target_host->transportt  = ib_srp_transport_template;
2158	target_host->max_channel = 0;
2159	target_host->max_id      = 1;
2160	target_host->max_lun     = SRP_MAX_LUN;
2161	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
 
 
 
 
2162
2163	target = host_to_target(target_host);
2164
 
2165	target->io_class	= SRP_REV16A_IB_IO_CLASS;
2166	target->scsi_host	= target_host;
2167	target->srp_host	= host;
2168	target->lkey		= host->srp_dev->mr->lkey;
2169	target->rkey		= host->srp_dev->mr->rkey;
2170	target->cmd_sg_cnt	= cmd_sg_entries;
2171	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
2172	target->allow_ext_sg	= allow_ext_sg;
 
 
 
 
 
 
 
 
2173
2174	ret = srp_parse_options(buf, target);
 
 
 
 
2175	if (ret)
2176		goto err;
2177
2178	if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2179				target->cmd_sg_cnt < target->sg_tablesize) {
2180		pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2181		target->sg_tablesize = target->cmd_sg_cnt;
2182	}
2183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2184	target_host->sg_tablesize = target->sg_tablesize;
 
 
2185	target->indirect_size = target->sg_tablesize *
2186				sizeof (struct srp_direct_buf);
2187	target->max_iu_len = sizeof (struct srp_cmd) +
2188			     sizeof (struct srp_indirect_buf) +
2189			     target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2190
 
 
2191	spin_lock_init(&target->lock);
2192	INIT_LIST_HEAD(&target->free_tx);
2193	INIT_LIST_HEAD(&target->free_reqs);
2194	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
2195		struct srp_request *req = &target->req_ring[i];
2196
2197		req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
2198					GFP_KERNEL);
2199		req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
2200					GFP_KERNEL);
2201		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
2202		if (!req->fmr_list || !req->map_page || !req->indirect_desc)
2203			goto err_free_mem;
2204
2205		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
2206					     target->indirect_size,
2207					     DMA_TO_DEVICE);
2208		if (ib_dma_mapping_error(ibdev, dma_addr))
2209			goto err_free_mem;
2210
2211		req->indirect_dma_addr = dma_addr;
2212		req->index = i;
2213		list_add_tail(&req->list, &target->free_reqs);
2214	}
2215
2216	ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
2217
2218	shost_printk(KERN_DEBUG, target->scsi_host, PFX
2219		     "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
2220		     "service_id %016llx dgid %pI6\n",
2221	       (unsigned long long) be64_to_cpu(target->id_ext),
2222	       (unsigned long long) be64_to_cpu(target->ioc_guid),
2223	       be16_to_cpu(target->path.pkey),
2224	       (unsigned long long) be64_to_cpu(target->service_id),
2225	       target->path.dgid.raw);
2226
2227	ret = srp_create_target_ib(target);
2228	if (ret)
2229		goto err_free_mem;
 
2230
2231	ret = srp_new_cm_id(target);
2232	if (ret)
2233		goto err_free_ib;
 
 
 
 
 
 
2234
2235	target->qp_in_error = 0;
2236	ret = srp_connect_target(target);
2237	if (ret) {
2238		shost_printk(KERN_ERR, target->scsi_host,
2239			     PFX "Connection failed\n");
2240		goto err_cm_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2241	}
2242
 
 
 
2243	ret = srp_add_target(host, target);
2244	if (ret)
2245		goto err_disconnect;
2246
2247	return count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2248
2249err_disconnect:
2250	srp_disconnect_target(target);
2251
2252err_cm_id:
2253	ib_destroy_cm_id(target->cm_id);
 
 
 
 
 
 
 
 
 
 
2254
2255err_free_ib:
2256	srp_free_target_ib(target);
2257
2258err_free_mem:
2259	srp_free_req_data(target);
2260
2261err:
2262	scsi_host_put(target_host);
 
 
 
2263
2264	return ret;
 
2265}
2266
2267static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
2268
2269static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2270			  char *buf)
2271{
2272	struct srp_host *host = container_of(dev, struct srp_host, dev);
2273
2274	return sprintf(buf, "%s\n", host->srp_dev->dev->name);
2275}
2276
2277static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
2278
2279static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2280			 char *buf)
2281{
2282	struct srp_host *host = container_of(dev, struct srp_host, dev);
2283
2284	return sprintf(buf, "%d\n", host->port);
2285}
2286
2287static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
2288
2289static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
 
 
 
 
 
 
 
2290{
2291	struct srp_host *host;
2292
2293	host = kzalloc(sizeof *host, GFP_KERNEL);
2294	if (!host)
2295		return NULL;
2296
2297	INIT_LIST_HEAD(&host->target_list);
2298	spin_lock_init(&host->target_lock);
2299	init_completion(&host->released);
2300	host->srp_dev = device;
2301	host->port = port;
2302
 
2303	host->dev.class = &srp_class;
2304	host->dev.parent = device->dev->dma_device;
2305	dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
2306
2307	if (device_register(&host->dev))
2308		goto free_host;
2309	if (device_create_file(&host->dev, &dev_attr_add_target))
2310		goto err_class;
2311	if (device_create_file(&host->dev, &dev_attr_ibdev))
2312		goto err_class;
2313	if (device_create_file(&host->dev, &dev_attr_port))
2314		goto err_class;
2315
2316	return host;
2317
2318err_class:
2319	device_unregister(&host->dev);
2320
2321free_host:
2322	kfree(host);
2323
2324	return NULL;
2325}
2326
2327static void srp_add_one(struct ib_device *device)
2328{
2329	struct srp_device *srp_dev;
2330	struct ib_device_attr *dev_attr;
2331	struct ib_fmr_pool_param fmr_param;
2332	struct srp_host *host;
2333	int max_pages_per_fmr, fmr_page_shift, s, e, p;
2334
2335	dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2336	if (!dev_attr)
2337		return;
2338
2339	if (ib_query_device(device, dev_attr)) {
2340		pr_warn("Query device failed for %s\n", device->name);
2341		goto free_attr;
2342	}
 
2343
2344	srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
2345	if (!srp_dev)
2346		goto free_attr;
2347
2348	/*
2349	 * Use the smallest page size supported by the HCA, down to a
2350	 * minimum of 4096 bytes. We're unlikely to build large sglists
2351	 * out of smaller entries.
2352	 */
2353	fmr_page_shift		= max(12, ffs(dev_attr->page_size_cap) - 1);
2354	srp_dev->fmr_page_size	= 1 << fmr_page_shift;
2355	srp_dev->fmr_page_mask	= ~((u64) srp_dev->fmr_page_size - 1);
2356	srp_dev->fmr_max_size	= srp_dev->fmr_page_size * SRP_FMR_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2357
2358	INIT_LIST_HEAD(&srp_dev->dev_list);
2359
2360	srp_dev->dev = device;
2361	srp_dev->pd  = ib_alloc_pd(device);
2362	if (IS_ERR(srp_dev->pd))
2363		goto free_dev;
2364
2365	srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2366				    IB_ACCESS_LOCAL_WRITE |
2367				    IB_ACCESS_REMOTE_READ |
2368				    IB_ACCESS_REMOTE_WRITE);
2369	if (IS_ERR(srp_dev->mr))
2370		goto err_pd;
2371
2372	for (max_pages_per_fmr = SRP_FMR_SIZE;
2373			max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
2374			max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
2375		memset(&fmr_param, 0, sizeof fmr_param);
2376		fmr_param.pool_size	    = SRP_FMR_POOL_SIZE;
2377		fmr_param.dirty_watermark   = SRP_FMR_DIRTY_SIZE;
2378		fmr_param.cache		    = 1;
2379		fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2380		fmr_param.page_shift	    = fmr_page_shift;
2381		fmr_param.access	    = (IB_ACCESS_LOCAL_WRITE |
2382					       IB_ACCESS_REMOTE_WRITE |
2383					       IB_ACCESS_REMOTE_READ);
2384
2385		srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2386		if (!IS_ERR(srp_dev->fmr_pool))
2387			break;
2388	}
2389
2390	if (IS_ERR(srp_dev->fmr_pool))
2391		srp_dev->fmr_pool = NULL;
2392
2393	if (device->node_type == RDMA_NODE_IB_SWITCH) {
2394		s = 0;
2395		e = 0;
2396	} else {
2397		s = 1;
2398		e = device->phys_port_cnt;
2399	}
2400
2401	for (p = s; p <= e; ++p) {
2402		host = srp_add_port(srp_dev, p);
2403		if (host)
2404			list_add_tail(&host->list, &srp_dev->dev_list);
2405	}
2406
2407	ib_set_client_data(device, &srp_client, srp_dev);
2408
2409	goto free_attr;
2410
2411err_pd:
2412	ib_dealloc_pd(srp_dev->pd);
2413
2414free_dev:
2415	kfree(srp_dev);
2416
2417free_attr:
2418	kfree(dev_attr);
2419}
2420
2421static void srp_remove_one(struct ib_device *device)
2422{
2423	struct srp_device *srp_dev;
2424	struct srp_host *host, *tmp_host;
2425	LIST_HEAD(target_list);
2426	struct srp_target_port *target, *tmp_target;
2427
2428	srp_dev = ib_get_client_data(device, &srp_client);
2429
2430	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
2431		device_unregister(&host->dev);
2432		/*
2433		 * Wait for the sysfs entry to go away, so that no new
2434		 * target ports can be created.
2435		 */
2436		wait_for_completion(&host->released);
2437
2438		/*
2439		 * Mark all target ports as removed, so we stop queueing
2440		 * commands and don't try to reconnect.
2441		 */
2442		spin_lock(&host->target_lock);
2443		list_for_each_entry(target, &host->target_list, list) {
2444			spin_lock_irq(&target->lock);
2445			target->state = SRP_TARGET_REMOVED;
2446			spin_unlock_irq(&target->lock);
2447		}
2448		spin_unlock(&host->target_lock);
2449
2450		/*
2451		 * Wait for any reconnection tasks that may have
2452		 * started before we marked our target ports as
2453		 * removed, and any target port removal tasks.
 
2454		 */
2455		flush_workqueue(ib_wq);
 
 
 
2456
2457		list_for_each_entry_safe(target, tmp_target,
2458					 &host->target_list, list) {
2459			srp_del_scsi_host_attr(target->scsi_host);
2460			srp_remove_host(target->scsi_host);
2461			scsi_remove_host(target->scsi_host);
2462			srp_disconnect_target(target);
2463			ib_destroy_cm_id(target->cm_id);
2464			srp_free_target_ib(target);
2465			srp_free_req_data(target);
2466			scsi_host_put(target->scsi_host);
2467		}
2468
2469		kfree(host);
2470	}
2471
2472	if (srp_dev->fmr_pool)
2473		ib_destroy_fmr_pool(srp_dev->fmr_pool);
2474	ib_dereg_mr(srp_dev->mr);
2475	ib_dealloc_pd(srp_dev->pd);
2476
2477	kfree(srp_dev);
2478}
2479
2480static struct srp_function_template ib_srp_transport_functions = {
 
 
 
 
 
 
 
 
2481};
2482
2483static int __init srp_init_module(void)
2484{
2485	int ret;
2486
2487	BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
 
 
 
 
 
 
2488
2489	if (srp_sg_tablesize) {
2490		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2491		if (!cmd_sg_entries)
2492			cmd_sg_entries = srp_sg_tablesize;
2493	}
2494
2495	if (!cmd_sg_entries)
2496		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2497
2498	if (cmd_sg_entries > 255) {
2499		pr_warn("Clamping cmd_sg_entries to 255\n");
2500		cmd_sg_entries = 255;
2501	}
2502
2503	if (!indirect_sg_entries)
2504		indirect_sg_entries = cmd_sg_entries;
2505	else if (indirect_sg_entries < cmd_sg_entries) {
2506		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2507			cmd_sg_entries);
2508		indirect_sg_entries = cmd_sg_entries;
2509	}
2510
 
 
 
 
 
 
 
 
 
 
 
 
 
2511	ib_srp_transport_template =
2512		srp_attach_transport(&ib_srp_transport_functions);
2513	if (!ib_srp_transport_template)
2514		return -ENOMEM;
2515
2516	ret = class_register(&srp_class);
2517	if (ret) {
2518		pr_err("couldn't register class infiniband_srp\n");
2519		srp_release_transport(ib_srp_transport_template);
2520		return ret;
2521	}
2522
2523	ib_sa_register_client(&srp_sa_client);
2524
2525	ret = ib_register_client(&srp_client);
2526	if (ret) {
2527		pr_err("couldn't register IB client\n");
2528		srp_release_transport(ib_srp_transport_template);
2529		ib_sa_unregister_client(&srp_sa_client);
2530		class_unregister(&srp_class);
2531		return ret;
2532	}
2533
2534	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
2535}
2536
2537static void __exit srp_cleanup_module(void)
2538{
2539	ib_unregister_client(&srp_client);
2540	ib_sa_unregister_client(&srp_sa_client);
2541	class_unregister(&srp_class);
2542	srp_release_transport(ib_srp_transport_template);
 
2543}
2544
2545module_init(srp_init_module);
2546module_exit(srp_cleanup_module);
v6.2
   1/*
   2 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  34
  35#include <linux/module.h>
  36#include <linux/init.h>
  37#include <linux/slab.h>
  38#include <linux/err.h>
  39#include <linux/string.h>
  40#include <linux/parser.h>
  41#include <linux/random.h>
  42#include <linux/jiffies.h>
  43#include <linux/lockdep.h>
  44#include <linux/inet.h>
  45#include <rdma/ib_cache.h>
  46
  47#include <linux/atomic.h>
  48
  49#include <scsi/scsi.h>
  50#include <scsi/scsi_device.h>
  51#include <scsi/scsi_dbg.h>
  52#include <scsi/scsi_tcq.h>
  53#include <scsi/srp.h>
  54#include <scsi/scsi_transport_srp.h>
  55
  56#include "ib_srp.h"
  57
  58#define DRV_NAME	"ib_srp"
  59#define PFX		DRV_NAME ": "
 
 
  60
  61MODULE_AUTHOR("Roland Dreier");
  62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
 
  63MODULE_LICENSE("Dual BSD/GPL");
  64
  65#if !defined(CONFIG_DYNAMIC_DEBUG)
  66#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
  67#define DYNAMIC_DEBUG_BRANCH(descriptor) false
  68#endif
  69
  70static unsigned int srp_sg_tablesize;
  71static unsigned int cmd_sg_entries;
  72static unsigned int indirect_sg_entries;
  73static bool allow_ext_sg;
  74static bool register_always = true;
  75static bool never_register;
  76static int topspin_workarounds = 1;
  77
  78module_param(srp_sg_tablesize, uint, 0444);
  79MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
  80
  81module_param(cmd_sg_entries, uint, 0444);
  82MODULE_PARM_DESC(cmd_sg_entries,
  83		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
  84
  85module_param(indirect_sg_entries, uint, 0444);
  86MODULE_PARM_DESC(indirect_sg_entries,
  87		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
  88
  89module_param(allow_ext_sg, bool, 0444);
  90MODULE_PARM_DESC(allow_ext_sg,
  91		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
  92
  93module_param(topspin_workarounds, int, 0444);
  94MODULE_PARM_DESC(topspin_workarounds,
  95		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
  96
  97module_param(register_always, bool, 0444);
  98MODULE_PARM_DESC(register_always,
  99		 "Use memory registration even for contiguous memory regions");
 100
 101module_param(never_register, bool, 0444);
 102MODULE_PARM_DESC(never_register, "Never register memory");
 103
 104static const struct kernel_param_ops srp_tmo_ops;
 105
 106static int srp_reconnect_delay = 10;
 107module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
 108		S_IRUGO | S_IWUSR);
 109MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
 110
 111static int srp_fast_io_fail_tmo = 15;
 112module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
 113		S_IRUGO | S_IWUSR);
 114MODULE_PARM_DESC(fast_io_fail_tmo,
 115		 "Number of seconds between the observation of a transport"
 116		 " layer error and failing all I/O. \"off\" means that this"
 117		 " functionality is disabled.");
 118
 119static int srp_dev_loss_tmo = 600;
 120module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
 121		S_IRUGO | S_IWUSR);
 122MODULE_PARM_DESC(dev_loss_tmo,
 123		 "Maximum number of seconds that the SRP transport should"
 124		 " insulate transport layer errors. After this time has been"
 125		 " exceeded the SCSI host is removed. Should be"
 126		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
 127		 " if fast_io_fail_tmo has not been set. \"off\" means that"
 128		 " this functionality is disabled.");
 129
 130static bool srp_use_imm_data = true;
 131module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
 132MODULE_PARM_DESC(use_imm_data,
 133		 "Whether or not to request permission to use immediate data during SRP login.");
 134
 135static unsigned int srp_max_imm_data = 8 * 1024;
 136module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
 137MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
 138
 139static unsigned ch_count;
 140module_param(ch_count, uint, 0444);
 141MODULE_PARM_DESC(ch_count,
 142		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
 143
 144static int srp_add_one(struct ib_device *device);
 145static void srp_remove_one(struct ib_device *device, void *client_data);
 146static void srp_rename_dev(struct ib_device *device, void *client_data);
 147static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
 148static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
 149		const char *opname);
 150static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
 151			     const struct ib_cm_event *event);
 152static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
 153			       struct rdma_cm_event *event);
 154
 155static struct scsi_transport_template *ib_srp_transport_template;
 156static struct workqueue_struct *srp_remove_wq;
 157
 158static struct ib_client srp_client = {
 159	.name   = "srp",
 160	.add    = srp_add_one,
 161	.remove = srp_remove_one,
 162	.rename = srp_rename_dev
 163};
 164
 165static struct ib_sa_client srp_sa_client;
 166
 167static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
 168{
 169	int tmo = *(int *)kp->arg;
 170
 171	if (tmo >= 0)
 172		return sysfs_emit(buffer, "%d\n", tmo);
 173	else
 174		return sysfs_emit(buffer, "off\n");
 175}
 176
 177static int srp_tmo_set(const char *val, const struct kernel_param *kp)
 178{
 179	int tmo, res;
 180
 181	res = srp_parse_tmo(&tmo, val);
 182	if (res)
 183		goto out;
 184
 185	if (kp->arg == &srp_reconnect_delay)
 186		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
 187				    srp_dev_loss_tmo);
 188	else if (kp->arg == &srp_fast_io_fail_tmo)
 189		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
 190	else
 191		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
 192				    tmo);
 193	if (res)
 194		goto out;
 195	*(int *)kp->arg = tmo;
 196
 197out:
 198	return res;
 199}
 200
 201static const struct kernel_param_ops srp_tmo_ops = {
 202	.get = srp_tmo_get,
 203	.set = srp_tmo_set,
 204};
 205
 206static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
 207{
 208	return (struct srp_target_port *) host->hostdata;
 209}
 210
 211static const char *srp_target_info(struct Scsi_Host *host)
 212{
 213	return host_to_target(host)->target_name;
 214}
 215
 216static int srp_target_is_topspin(struct srp_target_port *target)
 217{
 218	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
 219	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
 220
 221	return topspin_workarounds &&
 222		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
 223		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
 224}
 225
 226static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
 227				   gfp_t gfp_mask,
 228				   enum dma_data_direction direction)
 229{
 230	struct srp_iu *iu;
 231
 232	iu = kmalloc(sizeof *iu, gfp_mask);
 233	if (!iu)
 234		goto out;
 235
 236	iu->buf = kzalloc(size, gfp_mask);
 237	if (!iu->buf)
 238		goto out_free_iu;
 239
 240	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
 241				    direction);
 242	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
 243		goto out_free_buf;
 244
 245	iu->size      = size;
 246	iu->direction = direction;
 247
 248	return iu;
 249
 250out_free_buf:
 251	kfree(iu->buf);
 252out_free_iu:
 253	kfree(iu);
 254out:
 255	return NULL;
 256}
 257
 258static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
 259{
 260	if (!iu)
 261		return;
 262
 263	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
 264			    iu->direction);
 265	kfree(iu->buf);
 266	kfree(iu);
 267}
 268
 269static void srp_qp_event(struct ib_event *event, void *context)
 270{
 271	pr_debug("QP event %s (%d)\n",
 272		 ib_event_msg(event->event), event->event);
 273}
 274
 275static int srp_init_ib_qp(struct srp_target_port *target,
 276			  struct ib_qp *qp)
 277{
 278	struct ib_qp_attr *attr;
 279	int ret;
 280
 281	attr = kmalloc(sizeof *attr, GFP_KERNEL);
 282	if (!attr)
 283		return -ENOMEM;
 284
 285	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
 286				  target->srp_host->port,
 287				  be16_to_cpu(target->ib_cm.pkey),
 288				  &attr->pkey_index);
 289	if (ret)
 290		goto out;
 291
 292	attr->qp_state        = IB_QPS_INIT;
 293	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
 294				    IB_ACCESS_REMOTE_WRITE);
 295	attr->port_num        = target->srp_host->port;
 296
 297	ret = ib_modify_qp(qp, attr,
 298			   IB_QP_STATE		|
 299			   IB_QP_PKEY_INDEX	|
 300			   IB_QP_ACCESS_FLAGS	|
 301			   IB_QP_PORT);
 302
 303out:
 304	kfree(attr);
 305	return ret;
 306}
 307
 308static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
 309{
 310	struct srp_target_port *target = ch->target;
 311	struct ib_cm_id *new_cm_id;
 312
 313	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
 314				    srp_ib_cm_handler, ch);
 315	if (IS_ERR(new_cm_id))
 316		return PTR_ERR(new_cm_id);
 317
 318	if (ch->ib_cm.cm_id)
 319		ib_destroy_cm_id(ch->ib_cm.cm_id);
 320	ch->ib_cm.cm_id = new_cm_id;
 321	if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
 322			    target->srp_host->port))
 323		ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
 324	else
 325		ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
 326	ch->ib_cm.path.sgid = target->sgid;
 327	ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
 328	ch->ib_cm.path.pkey = target->ib_cm.pkey;
 329	ch->ib_cm.path.service_id = target->ib_cm.service_id;
 330
 331	return 0;
 332}
 333
 334static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
 335{
 336	struct srp_target_port *target = ch->target;
 337	struct rdma_cm_id *new_cm_id;
 338	int ret;
 339
 340	new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
 341				   RDMA_PS_TCP, IB_QPT_RC);
 342	if (IS_ERR(new_cm_id)) {
 343		ret = PTR_ERR(new_cm_id);
 344		new_cm_id = NULL;
 345		goto out;
 346	}
 347
 348	init_completion(&ch->done);
 349	ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
 350				&target->rdma_cm.src.sa : NULL,
 351				&target->rdma_cm.dst.sa,
 352				SRP_PATH_REC_TIMEOUT_MS);
 353	if (ret) {
 354		pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
 355		       &target->rdma_cm.src, &target->rdma_cm.dst, ret);
 356		goto out;
 357	}
 358	ret = wait_for_completion_interruptible(&ch->done);
 359	if (ret < 0)
 360		goto out;
 361
 362	ret = ch->status;
 363	if (ret) {
 364		pr_err("Resolving address %pISpsc failed (%d)\n",
 365		       &target->rdma_cm.dst, ret);
 366		goto out;
 367	}
 368
 369	swap(ch->rdma_cm.cm_id, new_cm_id);
 370
 371out:
 372	if (new_cm_id)
 373		rdma_destroy_id(new_cm_id);
 374
 375	return ret;
 376}
 377
 378static int srp_new_cm_id(struct srp_rdma_ch *ch)
 379{
 380	struct srp_target_port *target = ch->target;
 381
 382	return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
 383		srp_new_ib_cm_id(ch);
 384}
 385
 386/**
 387 * srp_destroy_fr_pool() - free the resources owned by a pool
 388 * @pool: Fast registration pool to be destroyed.
 389 */
 390static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
 391{
 392	int i;
 393	struct srp_fr_desc *d;
 394
 395	if (!pool)
 396		return;
 397
 398	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
 399		if (d->mr)
 400			ib_dereg_mr(d->mr);
 401	}
 402	kfree(pool);
 403}
 404
 405/**
 406 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
 407 * @device:            IB device to allocate fast registration descriptors for.
 408 * @pd:                Protection domain associated with the FR descriptors.
 409 * @pool_size:         Number of descriptors to allocate.
 410 * @max_page_list_len: Maximum fast registration work request page list length.
 411 */
 412static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
 413					      struct ib_pd *pd, int pool_size,
 414					      int max_page_list_len)
 415{
 416	struct srp_fr_pool *pool;
 417	struct srp_fr_desc *d;
 418	struct ib_mr *mr;
 419	int i, ret = -EINVAL;
 420	enum ib_mr_type mr_type;
 421
 422	if (pool_size <= 0)
 423		goto err;
 424	ret = -ENOMEM;
 425	pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
 426	if (!pool)
 427		goto err;
 428	pool->size = pool_size;
 429	pool->max_page_list_len = max_page_list_len;
 430	spin_lock_init(&pool->lock);
 431	INIT_LIST_HEAD(&pool->free_list);
 432
 433	if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
 434		mr_type = IB_MR_TYPE_SG_GAPS;
 435	else
 436		mr_type = IB_MR_TYPE_MEM_REG;
 437
 438	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
 439		mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
 440		if (IS_ERR(mr)) {
 441			ret = PTR_ERR(mr);
 442			if (ret == -ENOMEM)
 443				pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
 444					dev_name(&device->dev));
 445			goto destroy_pool;
 446		}
 447		d->mr = mr;
 448		list_add_tail(&d->entry, &pool->free_list);
 449	}
 450
 451out:
 452	return pool;
 453
 454destroy_pool:
 455	srp_destroy_fr_pool(pool);
 456
 457err:
 458	pool = ERR_PTR(ret);
 459	goto out;
 460}
 461
 462/**
 463 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
 464 * @pool: Pool to obtain descriptor from.
 465 */
 466static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
 467{
 468	struct srp_fr_desc *d = NULL;
 469	unsigned long flags;
 470
 471	spin_lock_irqsave(&pool->lock, flags);
 472	if (!list_empty(&pool->free_list)) {
 473		d = list_first_entry(&pool->free_list, typeof(*d), entry);
 474		list_del(&d->entry);
 475	}
 476	spin_unlock_irqrestore(&pool->lock, flags);
 477
 478	return d;
 479}
 480
 481/**
 482 * srp_fr_pool_put() - put an FR descriptor back in the free list
 483 * @pool: Pool the descriptor was allocated from.
 484 * @desc: Pointer to an array of fast registration descriptor pointers.
 485 * @n:    Number of descriptors to put back.
 486 *
 487 * Note: The caller must already have queued an invalidation request for
 488 * desc->mr->rkey before calling this function.
 489 */
 490static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
 491			    int n)
 492{
 493	unsigned long flags;
 494	int i;
 495
 496	spin_lock_irqsave(&pool->lock, flags);
 497	for (i = 0; i < n; i++)
 498		list_add(&desc[i]->entry, &pool->free_list);
 499	spin_unlock_irqrestore(&pool->lock, flags);
 500}
 501
 502static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
 503{
 504	struct srp_device *dev = target->srp_host->srp_dev;
 505
 506	return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
 507				  dev->max_pages_per_mr);
 508}
 509
 510/**
 511 * srp_destroy_qp() - destroy an RDMA queue pair
 512 * @ch: SRP RDMA channel.
 513 *
 514 * Drain the qp before destroying it.  This avoids that the receive
 515 * completion handler can access the queue pair while it is
 516 * being destroyed.
 517 */
 518static void srp_destroy_qp(struct srp_rdma_ch *ch)
 519{
 520	spin_lock_irq(&ch->lock);
 521	ib_process_cq_direct(ch->send_cq, -1);
 522	spin_unlock_irq(&ch->lock);
 523
 524	ib_drain_qp(ch->qp);
 525	ib_destroy_qp(ch->qp);
 526}
 527
 528static int srp_create_ch_ib(struct srp_rdma_ch *ch)
 529{
 530	struct srp_target_port *target = ch->target;
 531	struct srp_device *dev = target->srp_host->srp_dev;
 532	const struct ib_device_attr *attr = &dev->dev->attrs;
 533	struct ib_qp_init_attr *init_attr;
 534	struct ib_cq *recv_cq, *send_cq;
 535	struct ib_qp *qp;
 536	struct srp_fr_pool *fr_pool = NULL;
 537	const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
 538	int ret;
 539
 540	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
 541	if (!init_attr)
 542		return -ENOMEM;
 543
 544	/* queue_size + 1 for ib_drain_rq() */
 545	recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
 546				ch->comp_vector, IB_POLL_SOFTIRQ);
 547	if (IS_ERR(recv_cq)) {
 548		ret = PTR_ERR(recv_cq);
 549		goto err;
 550	}
 551
 552	send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
 553				ch->comp_vector, IB_POLL_DIRECT);
 554	if (IS_ERR(send_cq)) {
 555		ret = PTR_ERR(send_cq);
 556		goto err_recv_cq;
 557	}
 558
 
 
 559	init_attr->event_handler       = srp_qp_event;
 560	init_attr->cap.max_send_wr     = m * target->queue_size;
 561	init_attr->cap.max_recv_wr     = target->queue_size + 1;
 562	init_attr->cap.max_recv_sge    = 1;
 563	init_attr->cap.max_send_sge    = min(SRP_MAX_SGE, attr->max_send_sge);
 564	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
 565	init_attr->qp_type             = IB_QPT_RC;
 566	init_attr->send_cq             = send_cq;
 567	init_attr->recv_cq             = recv_cq;
 568
 569	ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
 570
 571	if (target->using_rdma_cm) {
 572		ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
 573		qp = ch->rdma_cm.cm_id->qp;
 574	} else {
 575		qp = ib_create_qp(dev->pd, init_attr);
 576		if (!IS_ERR(qp)) {
 577			ret = srp_init_ib_qp(target, qp);
 578			if (ret)
 579				ib_destroy_qp(qp);
 580		} else {
 581			ret = PTR_ERR(qp);
 582		}
 583	}
 584	if (ret) {
 585		pr_err("QP creation failed for dev %s: %d\n",
 586		       dev_name(&dev->dev->dev), ret);
 587		goto err_send_cq;
 588	}
 589
 590	if (dev->use_fast_reg) {
 591		fr_pool = srp_alloc_fr_pool(target);
 592		if (IS_ERR(fr_pool)) {
 593			ret = PTR_ERR(fr_pool);
 594			shost_printk(KERN_WARNING, target->scsi_host, PFX
 595				     "FR pool allocation failed (%d)\n", ret);
 596			goto err_qp;
 597		}
 598	}
 599
 600	if (ch->qp)
 601		srp_destroy_qp(ch);
 602	if (ch->recv_cq)
 603		ib_free_cq(ch->recv_cq);
 604	if (ch->send_cq)
 605		ib_free_cq(ch->send_cq);
 606
 607	ch->qp = qp;
 608	ch->recv_cq = recv_cq;
 609	ch->send_cq = send_cq;
 610
 611	if (dev->use_fast_reg) {
 612		if (ch->fr_pool)
 613			srp_destroy_fr_pool(ch->fr_pool);
 614		ch->fr_pool = fr_pool;
 615	}
 616
 617	kfree(init_attr);
 618	return 0;
 619
 620err_qp:
 621	if (target->using_rdma_cm)
 622		rdma_destroy_qp(ch->rdma_cm.cm_id);
 623	else
 624		ib_destroy_qp(qp);
 625
 626err_send_cq:
 627	ib_free_cq(send_cq);
 628
 629err_recv_cq:
 630	ib_free_cq(recv_cq);
 631
 632err:
 633	kfree(init_attr);
 634	return ret;
 635}
 636
 637/*
 638 * Note: this function may be called without srp_alloc_iu_bufs() having been
 639 * invoked. Hence the ch->[rt]x_ring checks.
 640 */
 641static void srp_free_ch_ib(struct srp_target_port *target,
 642			   struct srp_rdma_ch *ch)
 643{
 644	struct srp_device *dev = target->srp_host->srp_dev;
 645	int i;
 646
 647	if (!ch->target)
 648		return;
 649
 650	if (target->using_rdma_cm) {
 651		if (ch->rdma_cm.cm_id) {
 652			rdma_destroy_id(ch->rdma_cm.cm_id);
 653			ch->rdma_cm.cm_id = NULL;
 654		}
 655	} else {
 656		if (ch->ib_cm.cm_id) {
 657			ib_destroy_cm_id(ch->ib_cm.cm_id);
 658			ch->ib_cm.cm_id = NULL;
 659		}
 660	}
 661
 662	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
 663	if (!ch->qp)
 664		return;
 665
 666	if (dev->use_fast_reg) {
 667		if (ch->fr_pool)
 668			srp_destroy_fr_pool(ch->fr_pool);
 669	}
 670
 671	srp_destroy_qp(ch);
 672	ib_free_cq(ch->send_cq);
 673	ib_free_cq(ch->recv_cq);
 674
 675	/*
 676	 * Avoid that the SCSI error handler tries to use this channel after
 677	 * it has been freed. The SCSI error handler can namely continue
 678	 * trying to perform recovery actions after scsi_remove_host()
 679	 * returned.
 680	 */
 681	ch->target = NULL;
 682
 683	ch->qp = NULL;
 684	ch->send_cq = ch->recv_cq = NULL;
 685
 686	if (ch->rx_ring) {
 687		for (i = 0; i < target->queue_size; ++i)
 688			srp_free_iu(target->srp_host, ch->rx_ring[i]);
 689		kfree(ch->rx_ring);
 690		ch->rx_ring = NULL;
 691	}
 692	if (ch->tx_ring) {
 693		for (i = 0; i < target->queue_size; ++i)
 694			srp_free_iu(target->srp_host, ch->tx_ring[i]);
 695		kfree(ch->tx_ring);
 696		ch->tx_ring = NULL;
 697	}
 698}
 699
 700static void srp_path_rec_completion(int status,
 701				    struct sa_path_rec *pathrec,
 702				    int num_paths, void *ch_ptr)
 703{
 704	struct srp_rdma_ch *ch = ch_ptr;
 705	struct srp_target_port *target = ch->target;
 706
 707	ch->status = status;
 708	if (status)
 709		shost_printk(KERN_ERR, target->scsi_host,
 710			     PFX "Got failed path rec status %d\n", status);
 711	else
 712		ch->ib_cm.path = *pathrec;
 713	complete(&ch->done);
 714}
 715
 716static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
 717{
 718	struct srp_target_port *target = ch->target;
 719	int ret;
 720
 721	ch->ib_cm.path.numb_path = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 722
 723	init_completion(&ch->done);
 724
 725	ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
 726					       target->srp_host->srp_dev->dev,
 727					       target->srp_host->port,
 728					       &ch->ib_cm.path,
 729					       IB_SA_PATH_REC_SERVICE_ID |
 730					       IB_SA_PATH_REC_DGID	 |
 731					       IB_SA_PATH_REC_SGID	 |
 732					       IB_SA_PATH_REC_NUMB_PATH	 |
 733					       IB_SA_PATH_REC_PKEY,
 734					       SRP_PATH_REC_TIMEOUT_MS,
 735					       GFP_KERNEL,
 736					       srp_path_rec_completion,
 737					       ch, &ch->ib_cm.path_query);
 738	if (ch->ib_cm.path_query_id < 0)
 739		return ch->ib_cm.path_query_id;
 740
 741	ret = wait_for_completion_interruptible(&ch->done);
 742	if (ret < 0)
 743		return ret;
 744
 745	if (ch->status < 0)
 746		shost_printk(KERN_WARNING, target->scsi_host,
 747			     PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
 748			     ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
 749			     be16_to_cpu(target->ib_cm.pkey),
 750			     be64_to_cpu(target->ib_cm.service_id));
 751
 752	return ch->status;
 753}
 754
 755static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
 756{
 757	struct srp_target_port *target = ch->target;
 758	int ret;
 759
 760	init_completion(&ch->done);
 761
 762	ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
 763	if (ret)
 764		return ret;
 765
 766	wait_for_completion_interruptible(&ch->done);
 767
 768	if (ch->status != 0)
 769		shost_printk(KERN_WARNING, target->scsi_host,
 770			     PFX "Path resolution failed\n");
 771
 772	return ch->status;
 773}
 774
 775static int srp_lookup_path(struct srp_rdma_ch *ch)
 776{
 777	struct srp_target_port *target = ch->target;
 778
 779	return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
 780		srp_ib_lookup_path(ch);
 781}
 782
 783static u8 srp_get_subnet_timeout(struct srp_host *host)
 784{
 785	struct ib_port_attr attr;
 786	int ret;
 787	u8 subnet_timeout = 18;
 788
 789	ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
 790	if (ret == 0)
 791		subnet_timeout = attr.subnet_timeout;
 792
 793	if (unlikely(subnet_timeout < 15))
 794		pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
 795			dev_name(&host->srp_dev->dev->dev), subnet_timeout);
 796
 797	return subnet_timeout;
 798}
 799
 800static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
 801			bool multich)
 802{
 803	struct srp_target_port *target = ch->target;
 804	struct {
 805		struct rdma_conn_param	  rdma_param;
 806		struct srp_login_req_rdma rdma_req;
 807		struct ib_cm_req_param	  ib_param;
 808		struct srp_login_req	  ib_req;
 809	} *req = NULL;
 810	char *ipi, *tpi;
 811	int status;
 812
 813	req = kzalloc(sizeof *req, GFP_KERNEL);
 814	if (!req)
 815		return -ENOMEM;
 816
 817	req->ib_param.flow_control = 1;
 818	req->ib_param.retry_count = target->tl_retry_count;
 
 
 
 
 
 
 
 
 
 819
 820	/*
 821	 * Pick some arbitrary defaults here; we could make these
 822	 * module parameters if anyone cared about setting them.
 823	 */
 824	req->ib_param.responder_resources = 4;
 825	req->ib_param.rnr_retry_count = 7;
 826	req->ib_param.max_cm_retries = 15;
 827
 828	req->ib_req.opcode = SRP_LOGIN_REQ;
 829	req->ib_req.tag = 0;
 830	req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
 831	req->ib_req.req_buf_fmt	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
 
 
 
 832					      SRP_BUF_FORMAT_INDIRECT);
 833	req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
 834				 SRP_MULTICHAN_SINGLE);
 835	if (srp_use_imm_data) {
 836		req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
 837		req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
 838	}
 839
 840	if (target->using_rdma_cm) {
 841		req->rdma_param.flow_control = req->ib_param.flow_control;
 842		req->rdma_param.responder_resources =
 843			req->ib_param.responder_resources;
 844		req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
 845		req->rdma_param.retry_count = req->ib_param.retry_count;
 846		req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
 847		req->rdma_param.private_data = &req->rdma_req;
 848		req->rdma_param.private_data_len = sizeof(req->rdma_req);
 849
 850		req->rdma_req.opcode = req->ib_req.opcode;
 851		req->rdma_req.tag = req->ib_req.tag;
 852		req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
 853		req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
 854		req->rdma_req.req_flags	= req->ib_req.req_flags;
 855		req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
 856
 857		ipi = req->rdma_req.initiator_port_id;
 858		tpi = req->rdma_req.target_port_id;
 859	} else {
 860		u8 subnet_timeout;
 861
 862		subnet_timeout = srp_get_subnet_timeout(target->srp_host);
 863
 864		req->ib_param.primary_path = &ch->ib_cm.path;
 865		req->ib_param.alternate_path = NULL;
 866		req->ib_param.service_id = target->ib_cm.service_id;
 867		get_random_bytes(&req->ib_param.starting_psn, 4);
 868		req->ib_param.starting_psn &= 0xffffff;
 869		req->ib_param.qp_num = ch->qp->qp_num;
 870		req->ib_param.qp_type = ch->qp->qp_type;
 871		req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
 872		req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
 873		req->ib_param.private_data = &req->ib_req;
 874		req->ib_param.private_data_len = sizeof(req->ib_req);
 875
 876		ipi = req->ib_req.initiator_port_id;
 877		tpi = req->ib_req.target_port_id;
 878	}
 879
 880	/*
 881	 * In the published SRP specification (draft rev. 16a), the
 882	 * port identifier format is 8 bytes of ID extension followed
 883	 * by 8 bytes of GUID.  Older drafts put the two halves in the
 884	 * opposite order, so that the GUID comes first.
 885	 *
 886	 * Targets conforming to these obsolete drafts can be
 887	 * recognized by the I/O Class they report.
 888	 */
 889	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
 890		memcpy(ipi,     &target->sgid.global.interface_id, 8);
 891		memcpy(ipi + 8, &target->initiator_ext, 8);
 892		memcpy(tpi,     &target->ioc_guid, 8);
 893		memcpy(tpi + 8, &target->id_ext, 8);
 
 
 894	} else {
 895		memcpy(ipi,     &target->initiator_ext, 8);
 896		memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
 897		memcpy(tpi,     &target->id_ext, 8);
 898		memcpy(tpi + 8, &target->ioc_guid, 8);
 
 
 899	}
 900
 901	/*
 902	 * Topspin/Cisco SRP targets will reject our login unless we
 903	 * zero out the first 8 bytes of our initiator port ID and set
 904	 * the second 8 bytes to the local node GUID.
 905	 */
 906	if (srp_target_is_topspin(target)) {
 907		shost_printk(KERN_DEBUG, target->scsi_host,
 908			     PFX "Topspin/Cisco initiator port ID workaround "
 909			     "activated for target GUID %016llx\n",
 910			     be64_to_cpu(target->ioc_guid));
 911		memset(ipi, 0, 8);
 912		memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
 
 913	}
 914
 915	if (target->using_rdma_cm)
 916		status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
 917	else
 918		status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
 919
 920	kfree(req);
 921
 922	return status;
 923}
 924
 925static bool srp_queue_remove_work(struct srp_target_port *target)
 926{
 927	bool changed = false;
 928
 929	spin_lock_irq(&target->lock);
 930	if (target->state != SRP_TARGET_REMOVED) {
 931		target->state = SRP_TARGET_REMOVED;
 932		changed = true;
 933	}
 934	spin_unlock_irq(&target->lock);
 935
 936	if (changed)
 937		queue_work(srp_remove_wq, &target->remove_work);
 938
 939	return changed;
 940}
 941
 942static void srp_disconnect_target(struct srp_target_port *target)
 943{
 944	struct srp_rdma_ch *ch;
 945	int i, ret;
 946
 947	/* XXX should send SRP_I_LOGOUT request */
 948
 949	for (i = 0; i < target->ch_count; i++) {
 950		ch = &target->ch[i];
 951		ch->connected = false;
 952		ret = 0;
 953		if (target->using_rdma_cm) {
 954			if (ch->rdma_cm.cm_id)
 955				rdma_disconnect(ch->rdma_cm.cm_id);
 956		} else {
 957			if (ch->ib_cm.cm_id)
 958				ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
 959						      NULL, 0);
 960		}
 961		if (ret < 0) {
 962			shost_printk(KERN_DEBUG, target->scsi_host,
 963				     PFX "Sending CM DREQ failed\n");
 964		}
 965	}
 
 966}
 967
 968static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 
 
 969{
 970	struct srp_target_port *target = host_to_target(shost);
 971	struct srp_device *dev = target->srp_host->srp_dev;
 972	struct ib_device *ibdev = dev->dev;
 973	struct srp_request *req = scsi_cmd_priv(cmd);
 974
 975	kfree(req->fr_list);
 976	if (req->indirect_dma_addr) {
 977		ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
 978				    target->indirect_size,
 979				    DMA_TO_DEVICE);
 980	}
 981	kfree(req->indirect_desc);
 982
 983	return 0;
 984}
 985
 986static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 987{
 988	struct srp_target_port *target = host_to_target(shost);
 989	struct srp_device *srp_dev = target->srp_host->srp_dev;
 990	struct ib_device *ibdev = srp_dev->dev;
 991	struct srp_request *req = scsi_cmd_priv(cmd);
 992	dma_addr_t dma_addr;
 993	int ret = -ENOMEM;
 994
 995	if (srp_dev->use_fast_reg) {
 996		req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
 997					GFP_KERNEL);
 998		if (!req->fr_list)
 999			goto out;
1000	}
1001	req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
1002	if (!req->indirect_desc)
1003		goto out;
1004
1005	dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1006				     target->indirect_size,
1007				     DMA_TO_DEVICE);
1008	if (ib_dma_mapping_error(ibdev, dma_addr)) {
1009		srp_exit_cmd_priv(shost, cmd);
1010		goto out;
1011	}
1012
1013	req->indirect_dma_addr = dma_addr;
1014	ret = 0;
1015
1016out:
1017	return ret;
1018}
1019
1020/**
1021 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1022 * @shost: SCSI host whose attributes to remove from sysfs.
1023 *
1024 * Note: Any attributes defined in the host template and that did not exist
1025 * before invocation of this function will be ignored.
1026 */
1027static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1028{
1029	const struct attribute_group **g;
1030	struct attribute **attr;
1031
1032	for (g = shost->hostt->shost_groups; *g; ++g) {
1033		for (attr = (*g)->attrs; *attr; ++attr) {
1034			struct device_attribute *dev_attr =
1035				container_of(*attr, typeof(*dev_attr), attr);
1036
1037			device_remove_file(&shost->shost_dev, dev_attr);
1038		}
1039	}
1040}
1041
1042static void srp_remove_target(struct srp_target_port *target)
1043{
1044	struct srp_rdma_ch *ch;
1045	int i;
1046
1047	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1048
1049	srp_del_scsi_host_attr(target->scsi_host);
1050	srp_rport_get(target->rport);
1051	srp_remove_host(target->scsi_host);
1052	scsi_remove_host(target->scsi_host);
1053	srp_stop_rport_timers(target->rport);
1054	srp_disconnect_target(target);
1055	kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
1056	for (i = 0; i < target->ch_count; i++) {
1057		ch = &target->ch[i];
1058		srp_free_ch_ib(target, ch);
1059	}
1060	cancel_work_sync(&target->tl_err_work);
1061	srp_rport_put(target->rport);
1062	kfree(target->ch);
1063	target->ch = NULL;
1064
1065	spin_lock(&target->srp_host->target_lock);
1066	list_del(&target->list);
1067	spin_unlock(&target->srp_host->target_lock);
1068
 
 
 
 
 
 
1069	scsi_host_put(target->scsi_host);
1070}
1071
1072static void srp_remove_work(struct work_struct *work)
1073{
1074	struct srp_target_port *target =
1075		container_of(work, struct srp_target_port, remove_work);
1076
1077	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1078
1079	srp_remove_target(target);
1080}
1081
1082static void srp_rport_delete(struct srp_rport *rport)
1083{
1084	struct srp_target_port *target = rport->lld_data;
1085
1086	srp_queue_remove_work(target);
1087}
1088
1089/**
1090 * srp_connected_ch() - number of connected channels
1091 * @target: SRP target port.
1092 */
1093static int srp_connected_ch(struct srp_target_port *target)
1094{
1095	int i, c = 0;
1096
1097	for (i = 0; i < target->ch_count; i++)
1098		c += target->ch[i].connected;
1099
1100	return c;
1101}
1102
1103static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1104			  bool multich)
1105{
1106	struct srp_target_port *target = ch->target;
1107	int ret;
1108
1109	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1110
1111	ret = srp_lookup_path(ch);
1112	if (ret)
1113		goto out;
1114
1115	while (1) {
1116		init_completion(&ch->done);
1117		ret = srp_send_req(ch, max_iu_len, multich);
1118		if (ret)
1119			goto out;
1120		ret = wait_for_completion_interruptible(&ch->done);
1121		if (ret < 0)
1122			goto out;
1123
1124		/*
1125		 * The CM event handling code will set status to
1126		 * SRP_PORT_REDIRECT if we get a port redirect REJ
1127		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1128		 * redirect REJ back.
1129		 */
1130		ret = ch->status;
1131		switch (ret) {
1132		case 0:
1133			ch->connected = true;
1134			goto out;
1135
1136		case SRP_PORT_REDIRECT:
1137			ret = srp_lookup_path(ch);
1138			if (ret)
1139				goto out;
1140			break;
1141
1142		case SRP_DLID_REDIRECT:
1143			break;
1144
1145		case SRP_STALE_CONN:
 
 
 
 
 
 
 
 
 
 
1146			shost_printk(KERN_ERR, target->scsi_host, PFX
1147				     "giving up on stale connection\n");
1148			ret = -ECONNRESET;
1149			goto out;
1150
1151		default:
1152			goto out;
1153		}
1154	}
1155
1156out:
1157	return ret <= 0 ? ret : -ENODEV;
1158}
1159
1160static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1161{
1162	srp_handle_qp_err(cq, wc, "INV RKEY");
1163}
1164
1165static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1166		u32 rkey)
1167{
1168	struct ib_send_wr wr = {
1169		.opcode		    = IB_WR_LOCAL_INV,
1170		.next		    = NULL,
1171		.num_sge	    = 0,
1172		.send_flags	    = 0,
1173		.ex.invalidate_rkey = rkey,
1174	};
1175
1176	wr.wr_cqe = &req->reg_cqe;
1177	req->reg_cqe.done = srp_inv_rkey_err_done;
1178	return ib_post_send(ch->qp, &wr, NULL);
1179}
1180
1181static void srp_unmap_data(struct scsi_cmnd *scmnd,
1182			   struct srp_rdma_ch *ch,
1183			   struct srp_request *req)
1184{
1185	struct srp_target_port *target = ch->target;
1186	struct srp_device *dev = target->srp_host->srp_dev;
1187	struct ib_device *ibdev = dev->dev;
1188	int i, res;
1189
1190	if (!scsi_sglist(scmnd) ||
1191	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1192	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
1193		return;
1194
1195	if (dev->use_fast_reg) {
1196		struct srp_fr_desc **pfr;
1197
1198		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1199			res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1200			if (res < 0) {
1201				shost_printk(KERN_ERR, target->scsi_host, PFX
1202				  "Queueing INV WR for rkey %#x failed (%d)\n",
1203				  (*pfr)->mr->rkey, res);
1204				queue_work(system_long_wq,
1205					   &target->tl_err_work);
1206			}
1207		}
1208		if (req->nmdesc)
1209			srp_fr_pool_put(ch->fr_pool, req->fr_list,
1210					req->nmdesc);
1211	}
1212
1213	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1214			scmnd->sc_data_direction);
1215}
1216
1217/**
1218 * srp_claim_req - Take ownership of the scmnd associated with a request.
1219 * @ch: SRP RDMA channel.
1220 * @req: SRP request.
1221 * @sdev: If not NULL, only take ownership for this SCSI device.
1222 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1223 *         ownership of @req->scmnd if it equals @scmnd.
1224 *
1225 * Return value:
1226 * Either NULL or a pointer to the SCSI command the caller became owner of.
1227 */
1228static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1229				       struct srp_request *req,
1230				       struct scsi_device *sdev,
1231				       struct scsi_cmnd *scmnd)
1232{
1233	unsigned long flags;
1234
1235	spin_lock_irqsave(&ch->lock, flags);
1236	if (req->scmnd &&
1237	    (!sdev || req->scmnd->device == sdev) &&
1238	    (!scmnd || req->scmnd == scmnd)) {
1239		scmnd = req->scmnd;
1240		req->scmnd = NULL;
 
 
1241	} else {
1242		scmnd = NULL;
1243	}
1244	spin_unlock_irqrestore(&ch->lock, flags);
1245
1246	return scmnd;
1247}
1248
1249/**
1250 * srp_free_req() - Unmap data and adjust ch->req_lim.
1251 * @ch:     SRP RDMA channel.
1252 * @req:    Request to be freed.
1253 * @scmnd:  SCSI command associated with @req.
1254 * @req_lim_delta: Amount to be added to @target->req_lim.
1255 */
1256static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1257			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
 
1258{
1259	unsigned long flags;
1260
1261	srp_unmap_data(scmnd, ch, req);
1262
1263	spin_lock_irqsave(&ch->lock, flags);
1264	ch->req_lim += req_lim_delta;
1265	spin_unlock_irqrestore(&ch->lock, flags);
 
1266}
1267
1268static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1269			   struct scsi_device *sdev, int result)
1270{
1271	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1272
1273	if (scmnd) {
1274		srp_free_req(ch, req, scmnd, 0);
1275		scmnd->result = result;
1276		scsi_done(scmnd);
1277	}
1278}
1279
1280struct srp_terminate_context {
1281	struct srp_target_port *srp_target;
1282	int scsi_result;
1283};
1284
1285static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr)
1286{
1287	struct srp_terminate_context *context = context_ptr;
1288	struct srp_target_port *target = context->srp_target;
1289	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
1290	struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
1291	struct srp_request *req = scsi_cmd_priv(scmnd);
1292
1293	srp_finish_req(ch, req, NULL, context->scsi_result);
 
1294
1295	return true;
1296}
 
 
 
 
 
 
1297
1298static void srp_terminate_io(struct srp_rport *rport)
1299{
1300	struct srp_target_port *target = rport->lld_data;
1301	struct srp_terminate_context context = { .srp_target = target,
1302		.scsi_result = DID_TRANSPORT_FAILFAST << 16 };
1303
1304	scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context);
1305}
 
1306
1307/* Calculate maximum initiator to target information unit length. */
1308static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
1309				  uint32_t max_it_iu_size)
1310{
1311	uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1312		sizeof(struct srp_indirect_buf) +
1313		cmd_sg_cnt * sizeof(struct srp_direct_buf);
 
 
 
 
 
 
 
1314
1315	if (use_imm_data)
1316		max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1317				 srp_max_imm_data);
 
1318
1319	if (max_it_iu_size)
1320		max_iu_len = min(max_iu_len, max_it_iu_size);
1321
1322	pr_debug("max_iu_len = %d\n", max_iu_len);
1323
1324	return max_iu_len;
1325}
1326
1327/*
1328 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1329 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1330 * srp_reset_device() or srp_reset_host() calls will occur while this function
1331 * is in progress. One way to realize that is not to call this function
1332 * directly but to call srp_reconnect_rport() instead since that last function
1333 * serializes calls of this function via rport->mutex and also blocks
1334 * srp_queuecommand() calls before invoking this function.
1335 */
1336static int srp_rport_reconnect(struct srp_rport *rport)
1337{
1338	struct srp_target_port *target = rport->lld_data;
1339	struct srp_rdma_ch *ch;
1340	uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1341						srp_use_imm_data,
1342						target->max_it_iu_size);
1343	int i, j, ret = 0;
1344	bool multich = false;
1345
1346	srp_disconnect_target(target);
1347
1348	if (target->state == SRP_TARGET_SCANNING)
1349		return -ENODEV;
1350
1351	/*
1352	 * Now get a new local CM ID so that we avoid confusing the target in
1353	 * case things are really fouled up. Doing so also ensures that all CM
1354	 * callbacks will have finished before a new QP is allocated.
 
 
 
 
1355	 */
1356	for (i = 0; i < target->ch_count; i++) {
1357		ch = &target->ch[i];
1358		ret += srp_new_cm_id(ch);
1359	}
1360	{
1361		struct srp_terminate_context context = {
1362			.srp_target = target, .scsi_result = DID_RESET << 16};
1363
1364		scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd,
1365				    &context);
1366	}
1367	for (i = 0; i < target->ch_count; i++) {
1368		ch = &target->ch[i];
1369		/*
1370		 * Whether or not creating a new CM ID succeeded, create a new
1371		 * QP. This guarantees that all completion callback function
1372		 * invocations have finished before request resetting starts.
1373		 */
1374		ret += srp_create_ch_ib(ch);
1375
1376		INIT_LIST_HEAD(&ch->free_tx);
1377		for (j = 0; j < target->queue_size; ++j)
1378			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1379	}
1380
1381	target->qp_in_error = false;
1382
1383	for (i = 0; i < target->ch_count; i++) {
1384		ch = &target->ch[i];
1385		if (ret)
1386			break;
1387		ret = srp_connect_ch(ch, max_iu_len, multich);
1388		multich = true;
1389	}
1390
1391	if (ret == 0)
1392		shost_printk(KERN_INFO, target->scsi_host,
1393			     PFX "reconnect succeeded\n");
1394
1395	return ret;
1396}
1397
1398static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1399			 unsigned int dma_len, u32 rkey)
1400{
1401	struct srp_direct_buf *desc = state->desc;
1402
1403	WARN_ON_ONCE(!dma_len);
1404
1405	desc->va = cpu_to_be64(dma_addr);
1406	desc->key = cpu_to_be32(rkey);
1407	desc->len = cpu_to_be32(dma_len);
1408
1409	state->total_len += dma_len;
1410	state->desc++;
1411	state->ndesc++;
1412}
1413
1414static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
 
1415{
1416	srp_handle_qp_err(cq, wc, "FAST REG");
1417}
1418
1419/*
1420 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1421 * where to start in the first element. If sg_offset_p != NULL then
1422 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1423 * byte that has not yet been mapped.
1424 */
1425static int srp_map_finish_fr(struct srp_map_state *state,
1426			     struct srp_request *req,
1427			     struct srp_rdma_ch *ch, int sg_nents,
1428			     unsigned int *sg_offset_p)
1429{
1430	struct srp_target_port *target = ch->target;
1431	struct srp_device *dev = target->srp_host->srp_dev;
1432	struct ib_reg_wr wr;
1433	struct srp_fr_desc *desc;
1434	u32 rkey;
1435	int n, err;
1436
1437	if (state->fr.next >= state->fr.end) {
1438		shost_printk(KERN_ERR, ch->target->scsi_host,
1439			     PFX "Out of MRs (mr_per_cmd = %d)\n",
1440			     ch->target->mr_per_cmd);
1441		return -ENOMEM;
1442	}
1443
1444	WARN_ON_ONCE(!dev->use_fast_reg);
 
1445
1446	if (sg_nents == 1 && target->global_rkey) {
1447		unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1448
1449		srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1450			     sg_dma_len(state->sg) - sg_offset,
1451			     target->global_rkey);
1452		if (sg_offset_p)
1453			*sg_offset_p = 0;
1454		return 1;
1455	}
1456
1457	desc = srp_fr_pool_get(ch->fr_pool);
1458	if (!desc)
1459		return -ENOMEM;
 
1460
1461	rkey = ib_inc_rkey(desc->mr->rkey);
1462	ib_update_fast_reg_key(desc->mr, rkey);
1463
1464	n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1465			 dev->mr_page_size);
1466	if (unlikely(n < 0)) {
1467		srp_fr_pool_put(ch->fr_pool, &desc, 1);
1468		pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1469			 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1470			 sg_offset_p ? *sg_offset_p : -1, n);
1471		return n;
1472	}
1473
1474	WARN_ON_ONCE(desc->mr->length == 0);
1475
1476	req->reg_cqe.done = srp_reg_mr_err_done;
1477
1478	wr.wr.next = NULL;
1479	wr.wr.opcode = IB_WR_REG_MR;
1480	wr.wr.wr_cqe = &req->reg_cqe;
1481	wr.wr.num_sge = 0;
1482	wr.wr.send_flags = 0;
1483	wr.mr = desc->mr;
1484	wr.key = desc->mr->rkey;
1485	wr.access = (IB_ACCESS_LOCAL_WRITE |
1486		     IB_ACCESS_REMOTE_READ |
1487		     IB_ACCESS_REMOTE_WRITE);
1488
1489	*state->fr.next++ = desc;
1490	state->nmdesc++;
1491
1492	srp_map_desc(state, desc->mr->iova,
1493		     desc->mr->length, desc->mr->rkey);
1494
1495	err = ib_post_send(ch->qp, &wr.wr, NULL);
1496	if (unlikely(err)) {
1497		WARN_ON_ONCE(err == -ENOMEM);
1498		return err;
1499	}
1500
1501	return n;
1502}
1503
1504static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1505			 struct srp_request *req, struct scatterlist *scat,
1506			 int count)
 
1507{
1508	unsigned int sg_offset = 0;
 
 
 
 
 
1509
1510	state->fr.next = req->fr_list;
1511	state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1512	state->sg = scat;
1513
1514	if (count == 0)
 
 
 
 
 
1515		return 0;
 
1516
1517	while (count) {
1518		int i, n;
 
 
 
 
 
 
 
 
1519
1520		n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1521		if (unlikely(n < 0))
1522			return n;
1523
1524		count -= n;
1525		for (i = 0; i < n; i++)
1526			state->sg = sg_next(state->sg);
1527	}
1528
1529	return 0;
1530}
 
 
 
 
 
1531
1532static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1533			  struct srp_request *req, struct scatterlist *scat,
1534			  int count)
1535{
1536	struct srp_target_port *target = ch->target;
1537	struct scatterlist *sg;
1538	int i;
1539
1540	for_each_sg(scat, sg, count, i) {
1541		srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
1542			     target->global_rkey);
1543	}
1544
1545	return 0;
1546}
1547
1548/*
1549 * Register the indirect data buffer descriptor with the HCA.
1550 *
1551 * Note: since the indirect data buffer descriptor has been allocated with
1552 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1553 * memory buffer.
1554 */
1555static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1556		       void **next_mr, void **end_mr, u32 idb_len,
1557		       __be32 *idb_rkey)
1558{
1559	struct srp_target_port *target = ch->target;
1560	struct srp_device *dev = target->srp_host->srp_dev;
1561	struct srp_map_state state;
1562	struct srp_direct_buf idb_desc;
1563	struct scatterlist idb_sg[1];
1564	int ret;
1565
1566	memset(&state, 0, sizeof(state));
1567	memset(&idb_desc, 0, sizeof(idb_desc));
1568	state.gen.next = next_mr;
1569	state.gen.end = end_mr;
1570	state.desc = &idb_desc;
1571	state.base_dma_addr = req->indirect_dma_addr;
1572	state.dma_len = idb_len;
1573
1574	if (dev->use_fast_reg) {
1575		state.sg = idb_sg;
1576		sg_init_one(idb_sg, req->indirect_desc, idb_len);
1577		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1578#ifdef CONFIG_NEED_SG_DMA_LENGTH
1579		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
1580#endif
1581		ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1582		if (ret < 0)
1583			return ret;
1584		WARN_ON_ONCE(ret < 1);
1585	} else {
1586		return -EINVAL;
1587	}
1588
1589	*idb_rkey = idb_desc.key;
1590
1591	return 0;
1592}
1593
1594static void srp_check_mapping(struct srp_map_state *state,
1595			      struct srp_rdma_ch *ch, struct srp_request *req,
1596			      struct scatterlist *scat, int count)
1597{
1598	struct srp_device *dev = ch->target->srp_host->srp_dev;
1599	struct srp_fr_desc **pfr;
1600	u64 desc_len = 0, mr_len = 0;
1601	int i;
1602
1603	for (i = 0; i < state->ndesc; i++)
1604		desc_len += be32_to_cpu(req->indirect_desc[i].len);
1605	if (dev->use_fast_reg)
1606		for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1607			mr_len += (*pfr)->mr->length;
1608	if (desc_len != scsi_bufflen(req->scmnd) ||
1609	    mr_len > scsi_bufflen(req->scmnd))
1610		pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1611		       scsi_bufflen(req->scmnd), desc_len, mr_len,
1612		       state->ndesc, state->nmdesc);
1613}
1614
1615/**
1616 * srp_map_data() - map SCSI data buffer onto an SRP request
1617 * @scmnd: SCSI command to map
1618 * @ch: SRP RDMA channel
1619 * @req: SRP request
1620 *
1621 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1622 * mapping failed. The size of any immediate data is not included in the
1623 * return value.
1624 */
1625static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1626			struct srp_request *req)
1627{
1628	struct srp_target_port *target = ch->target;
1629	struct scatterlist *scat, *sg;
1630	struct srp_cmd *cmd = req->cmd->buf;
1631	int i, len, nents, count, ret;
1632	struct srp_device *dev;
1633	struct ib_device *ibdev;
1634	struct srp_map_state state;
1635	struct srp_indirect_buf *indirect_hdr;
1636	u64 data_len;
1637	u32 idb_len, table_len;
1638	__be32 idb_rkey;
1639	u8 fmt;
1640
1641	req->cmd->num_sge = 1;
1642
1643	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1644		return sizeof(struct srp_cmd) + cmd->add_cdb_len;
1645
1646	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1647	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
1648		shost_printk(KERN_WARNING, target->scsi_host,
1649			     PFX "Unhandled data direction %d\n",
1650			     scmnd->sc_data_direction);
1651		return -EINVAL;
1652	}
1653
1654	nents = scsi_sg_count(scmnd);
1655	scat  = scsi_sglist(scmnd);
1656	data_len = scsi_bufflen(scmnd);
1657
1658	dev = target->srp_host->srp_dev;
1659	ibdev = dev->dev;
1660
1661	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1662	if (unlikely(count == 0))
1663		return -EIO;
1664
1665	if (ch->use_imm_data &&
1666	    count <= ch->max_imm_sge &&
1667	    SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1668	    scmnd->sc_data_direction == DMA_TO_DEVICE) {
1669		struct srp_imm_buf *buf;
1670		struct ib_sge *sge = &req->cmd->sge[1];
1671
1672		fmt = SRP_DATA_DESC_IMM;
1673		len = SRP_IMM_DATA_OFFSET;
1674		req->nmdesc = 0;
1675		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1676		buf->len = cpu_to_be32(data_len);
1677		WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1678		for_each_sg(scat, sg, count, i) {
1679			sge[i].addr   = sg_dma_address(sg);
1680			sge[i].length = sg_dma_len(sg);
1681			sge[i].lkey   = target->lkey;
1682		}
1683		req->cmd->num_sge += count;
1684		goto map_complete;
1685	}
1686
1687	fmt = SRP_DATA_DESC_DIRECT;
1688	len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1689		sizeof(struct srp_direct_buf);
1690
1691	if (count == 1 && target->global_rkey) {
1692		/*
1693		 * The midlayer only generated a single gather/scatter
1694		 * entry, or DMA mapping coalesced everything to a
1695		 * single entry.  So a direct descriptor along with
1696		 * the DMA MR suffices.
1697		 */
1698		struct srp_direct_buf *buf;
1699
1700		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1701		buf->va  = cpu_to_be64(sg_dma_address(scat));
1702		buf->key = cpu_to_be32(target->global_rkey);
1703		buf->len = cpu_to_be32(sg_dma_len(scat));
1704
1705		req->nmdesc = 0;
1706		goto map_complete;
1707	}
1708
1709	/*
1710	 * We have more than one scatter/gather entry, so build our indirect
1711	 * descriptor table, trying to merge as many entries as we can.
1712	 */
1713	indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
1714
1715	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1716				   target->indirect_size, DMA_TO_DEVICE);
1717
1718	memset(&state, 0, sizeof(state));
1719	state.desc = req->indirect_desc;
1720	if (dev->use_fast_reg)
1721		ret = srp_map_sg_fr(&state, ch, req, scat, count);
1722	else
1723		ret = srp_map_sg_dma(&state, ch, req, scat, count);
1724	req->nmdesc = state.nmdesc;
1725	if (ret < 0)
1726		goto unmap;
1727
1728	{
1729		DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1730			"Memory mapping consistency check");
1731		if (DYNAMIC_DEBUG_BRANCH(ddm))
1732			srp_check_mapping(&state, ch, req, scat, count);
 
 
 
 
 
 
 
 
 
 
 
1733	}
1734
 
 
 
1735	/* We've mapped the request, now pull as much of the indirect
1736	 * descriptor table as we can into the command buffer. If this
1737	 * target is not using an external indirect table, we are
1738	 * guaranteed to fit into the command, as the SCSI layer won't
1739	 * give us more S/G entries than we allow.
1740	 */
 
1741	if (state.ndesc == 1) {
1742		/*
1743		 * Memory registration collapsed the sg-list into one entry,
1744		 * so use a direct descriptor.
1745		 */
1746		struct srp_direct_buf *buf;
1747
1748		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1749		*buf = req->indirect_desc[0];
1750		goto map_complete;
1751	}
1752
1753	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1754						!target->allow_ext_sg)) {
1755		shost_printk(KERN_ERR, target->scsi_host,
1756			     "Could not fit S/G list into SRP_CMD\n");
1757		ret = -EIO;
1758		goto unmap;
1759	}
1760
1761	count = min(state.ndesc, target->cmd_sg_cnt);
1762	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1763	idb_len = sizeof(struct srp_indirect_buf) + table_len;
1764
1765	fmt = SRP_DATA_DESC_INDIRECT;
1766	len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1767		sizeof(struct srp_indirect_buf);
1768	len += count * sizeof (struct srp_direct_buf);
1769
1770	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1771	       count * sizeof (struct srp_direct_buf));
1772
1773	if (!target->global_rkey) {
1774		ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1775				  idb_len, &idb_rkey);
1776		if (ret < 0)
1777			goto unmap;
1778		req->nmdesc++;
1779	} else {
1780		idb_rkey = cpu_to_be32(target->global_rkey);
1781	}
1782
1783	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1784	indirect_hdr->table_desc.key = idb_rkey;
1785	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1786	indirect_hdr->len = cpu_to_be32(state.total_len);
1787
1788	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1789		cmd->data_out_desc_cnt = count;
1790	else
1791		cmd->data_in_desc_cnt = count;
1792
1793	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1794				      DMA_TO_DEVICE);
1795
1796map_complete:
1797	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1798		cmd->buf_fmt = fmt << 4;
1799	else
1800		cmd->buf_fmt = fmt;
1801
1802	return len;
1803
1804unmap:
1805	srp_unmap_data(scmnd, ch, req);
1806	if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1807		ret = -E2BIG;
1808	return ret;
1809}
1810
1811/*
1812 * Return an IU and possible credit to the free pool
1813 */
1814static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1815			  enum srp_iu_type iu_type)
1816{
1817	unsigned long flags;
1818
1819	spin_lock_irqsave(&ch->lock, flags);
1820	list_add(&iu->list, &ch->free_tx);
1821	if (iu_type != SRP_IU_RSP)
1822		++ch->req_lim;
1823	spin_unlock_irqrestore(&ch->lock, flags);
1824}
1825
1826/*
1827 * Must be called with ch->lock held to protect req_lim and free_tx.
1828 * If IU is not sent, it must be returned using srp_put_tx_iu().
1829 *
1830 * Note:
1831 * An upper limit for the number of allocated information units for each
1832 * request type is:
1833 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1834 *   more than Scsi_Host.can_queue requests.
1835 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1836 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1837 *   one unanswered SRP request to an initiator.
1838 */
1839static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1840				      enum srp_iu_type iu_type)
1841{
1842	struct srp_target_port *target = ch->target;
1843	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1844	struct srp_iu *iu;
1845
1846	lockdep_assert_held(&ch->lock);
1847
1848	ib_process_cq_direct(ch->send_cq, -1);
1849
1850	if (list_empty(&ch->free_tx))
1851		return NULL;
1852
1853	/* Initiator responses to target requests do not consume credits */
1854	if (iu_type != SRP_IU_RSP) {
1855		if (ch->req_lim <= rsv) {
1856			++target->zero_req_lim;
1857			return NULL;
1858		}
1859
1860		--ch->req_lim;
1861	}
1862
1863	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1864	list_del(&iu->list);
1865	return iu;
1866}
1867
1868/*
1869 * Note: if this function is called from inside ib_drain_sq() then it will
1870 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1871 * with status IB_WC_SUCCESS then that's a bug.
1872 */
1873static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1874{
1875	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1876	struct srp_rdma_ch *ch = cq->cq_context;
1877
1878	if (unlikely(wc->status != IB_WC_SUCCESS)) {
1879		srp_handle_qp_err(cq, wc, "SEND");
1880		return;
1881	}
1882
1883	lockdep_assert_held(&ch->lock);
1884
1885	list_add(&iu->list, &ch->free_tx);
1886}
1887
1888/**
1889 * srp_post_send() - send an SRP information unit
1890 * @ch: RDMA channel over which to send the information unit.
1891 * @iu: Information unit to send.
1892 * @len: Length of the information unit excluding immediate data.
1893 */
1894static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1895{
1896	struct srp_target_port *target = ch->target;
1897	struct ib_send_wr wr;
1898
1899	if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1900		return -EINVAL;
1901
1902	iu->sge[0].addr   = iu->dma;
1903	iu->sge[0].length = len;
1904	iu->sge[0].lkey   = target->lkey;
1905
1906	iu->cqe.done = srp_send_done;
1907
1908	wr.next       = NULL;
1909	wr.wr_cqe     = &iu->cqe;
1910	wr.sg_list    = &iu->sge[0];
1911	wr.num_sge    = iu->num_sge;
1912	wr.opcode     = IB_WR_SEND;
1913	wr.send_flags = IB_SEND_SIGNALED;
1914
1915	return ib_post_send(ch->qp, &wr, NULL);
1916}
1917
1918static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1919{
1920	struct srp_target_port *target = ch->target;
1921	struct ib_recv_wr wr;
1922	struct ib_sge list;
1923
1924	list.addr   = iu->dma;
1925	list.length = iu->size;
1926	list.lkey   = target->lkey;
1927
1928	iu->cqe.done = srp_recv_done;
1929
1930	wr.next     = NULL;
1931	wr.wr_cqe   = &iu->cqe;
1932	wr.sg_list  = &list;
1933	wr.num_sge  = 1;
1934
1935	return ib_post_recv(ch->qp, &wr, NULL);
1936}
1937
1938static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1939{
1940	struct srp_target_port *target = ch->target;
1941	struct srp_request *req;
1942	struct scsi_cmnd *scmnd;
1943	unsigned long flags;
1944
1945	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1946		spin_lock_irqsave(&ch->lock, flags);
1947		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1948		if (rsp->tag == ch->tsk_mgmt_tag) {
1949			ch->tsk_mgmt_status = -1;
1950			if (be32_to_cpu(rsp->resp_data_len) >= 4)
1951				ch->tsk_mgmt_status = rsp->data[3];
1952			complete(&ch->tsk_mgmt_done);
1953		} else {
1954			shost_printk(KERN_ERR, target->scsi_host,
1955				     "Received tsk mgmt response too late for tag %#llx\n",
1956				     rsp->tag);
1957		}
1958		spin_unlock_irqrestore(&ch->lock, flags);
1959	} else {
1960		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1961		if (scmnd) {
1962			req = scsi_cmd_priv(scmnd);
1963			scmnd = srp_claim_req(ch, req, NULL, scmnd);
1964		}
1965		if (!scmnd) {
1966			shost_printk(KERN_ERR, target->scsi_host,
1967				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1968				     rsp->tag, ch - target->ch, ch->qp->qp_num);
1969
1970			spin_lock_irqsave(&ch->lock, flags);
1971			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1972			spin_unlock_irqrestore(&ch->lock, flags);
1973
1974			return;
1975		}
1976		scmnd->result = rsp->status;
1977
1978		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1979			memcpy(scmnd->sense_buffer, rsp->data +
1980			       be32_to_cpu(rsp->resp_data_len),
1981			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1982				     SCSI_SENSE_BUFFERSIZE));
1983		}
1984
1985		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
 
 
1986			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1987		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1988			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1989		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1990			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1991		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1992			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1993
1994		srp_free_req(ch, req, scmnd,
1995			     be32_to_cpu(rsp->req_lim_delta));
1996
1997		scsi_done(scmnd);
 
1998	}
1999}
2000
2001static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
2002			       void *rsp, int len)
2003{
2004	struct srp_target_port *target = ch->target;
2005	struct ib_device *dev = target->srp_host->srp_dev->dev;
2006	unsigned long flags;
2007	struct srp_iu *iu;
2008	int err;
2009
2010	spin_lock_irqsave(&ch->lock, flags);
2011	ch->req_lim += req_delta;
2012	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2013	spin_unlock_irqrestore(&ch->lock, flags);
2014
2015	if (!iu) {
2016		shost_printk(KERN_ERR, target->scsi_host, PFX
2017			     "no IU available to send response\n");
2018		return 1;
2019	}
2020
2021	iu->num_sge = 1;
2022	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2023	memcpy(iu->buf, rsp, len);
2024	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2025
2026	err = srp_post_send(ch, iu, len);
2027	if (err) {
2028		shost_printk(KERN_ERR, target->scsi_host, PFX
2029			     "unable to post response: %d\n", err);
2030		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2031	}
2032
2033	return err;
2034}
2035
2036static void srp_process_cred_req(struct srp_rdma_ch *ch,
2037				 struct srp_cred_req *req)
2038{
2039	struct srp_cred_rsp rsp = {
2040		.opcode = SRP_CRED_RSP,
2041		.tag = req->tag,
2042	};
2043	s32 delta = be32_to_cpu(req->req_lim_delta);
2044
2045	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2046		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2047			     "problems processing SRP_CRED_REQ\n");
2048}
2049
2050static void srp_process_aer_req(struct srp_rdma_ch *ch,
2051				struct srp_aer_req *req)
2052{
2053	struct srp_target_port *target = ch->target;
2054	struct srp_aer_rsp rsp = {
2055		.opcode = SRP_AER_RSP,
2056		.tag = req->tag,
2057	};
2058	s32 delta = be32_to_cpu(req->req_lim_delta);
2059
2060	shost_printk(KERN_ERR, target->scsi_host, PFX
2061		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2062
2063	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2064		shost_printk(KERN_ERR, target->scsi_host, PFX
2065			     "problems processing SRP_AER_REQ\n");
2066}
2067
2068static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2069{
2070	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2071	struct srp_rdma_ch *ch = cq->cq_context;
2072	struct srp_target_port *target = ch->target;
2073	struct ib_device *dev = target->srp_host->srp_dev->dev;
 
2074	int res;
2075	u8 opcode;
2076
2077	if (unlikely(wc->status != IB_WC_SUCCESS)) {
2078		srp_handle_qp_err(cq, wc, "RECV");
2079		return;
2080	}
2081
2082	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2083				   DMA_FROM_DEVICE);
2084
2085	opcode = *(u8 *) iu->buf;
2086
2087	if (0) {
2088		shost_printk(KERN_ERR, target->scsi_host,
2089			     PFX "recv completion, opcode 0x%02x\n", opcode);
2090		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2091			       iu->buf, wc->byte_len, true);
2092	}
2093
2094	switch (opcode) {
2095	case SRP_RSP:
2096		srp_process_rsp(ch, iu->buf);
2097		break;
2098
2099	case SRP_CRED_REQ:
2100		srp_process_cred_req(ch, iu->buf);
2101		break;
2102
2103	case SRP_AER_REQ:
2104		srp_process_aer_req(ch, iu->buf);
2105		break;
2106
2107	case SRP_T_LOGOUT:
2108		/* XXX Handle target logout */
2109		shost_printk(KERN_WARNING, target->scsi_host,
2110			     PFX "Got target logout request\n");
2111		break;
2112
2113	default:
2114		shost_printk(KERN_WARNING, target->scsi_host,
2115			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2116		break;
2117	}
2118
2119	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2120				      DMA_FROM_DEVICE);
2121
2122	res = srp_post_recv(ch, iu);
2123	if (res != 0)
2124		shost_printk(KERN_ERR, target->scsi_host,
2125			     PFX "Recv failed with error code %d\n", res);
2126}
2127
2128/**
2129 * srp_tl_err_work() - handle a transport layer error
2130 * @work: Work structure embedded in an SRP target port.
2131 *
2132 * Note: This function may get invoked before the rport has been created,
2133 * hence the target->rport test.
2134 */
2135static void srp_tl_err_work(struct work_struct *work)
2136{
2137	struct srp_target_port *target;
 
 
 
 
 
 
 
 
 
 
 
2138
2139	target = container_of(work, struct srp_target_port, tl_err_work);
2140	if (target->rport)
2141		srp_start_tl_fail_timers(target->rport);
2142}
2143
2144static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2145		const char *opname)
2146{
2147	struct srp_rdma_ch *ch = cq->cq_context;
2148	struct srp_target_port *target = ch->target;
 
 
 
 
 
 
 
 
 
 
2149
2150	if (ch->connected && !target->qp_in_error) {
2151		shost_printk(KERN_ERR, target->scsi_host,
2152			     PFX "failed %s status %s (%d) for CQE %p\n",
2153			     opname, ib_wc_status_msg(wc->status), wc->status,
2154			     wc->wr_cqe);
2155		queue_work(system_long_wq, &target->tl_err_work);
2156	}
2157	target->qp_in_error = true;
2158}
2159
2160static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2161{
2162	struct request *rq = scsi_cmd_to_rq(scmnd);
2163	struct srp_target_port *target = host_to_target(shost);
2164	struct srp_rdma_ch *ch;
2165	struct srp_request *req = scsi_cmd_priv(scmnd);
2166	struct srp_iu *iu;
2167	struct srp_cmd *cmd;
2168	struct ib_device *dev;
2169	unsigned long flags;
2170	u32 tag;
2171	int len, ret;
2172
2173	scmnd->result = srp_chkready(target->rport);
2174	if (unlikely(scmnd->result))
2175		goto err;
2176
2177	WARN_ON_ONCE(rq->tag < 0);
2178	tag = blk_mq_unique_tag(rq);
2179	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2180
2181	spin_lock_irqsave(&ch->lock, flags);
2182	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2183	spin_unlock_irqrestore(&ch->lock, flags);
2184
 
 
2185	if (!iu)
2186		goto err;
 
 
 
 
2187
2188	dev = target->srp_host->srp_dev->dev;
2189	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2190				   DMA_TO_DEVICE);
2191
 
 
 
2192	cmd = iu->buf;
2193	memset(cmd, 0, sizeof *cmd);
2194
2195	cmd->opcode = SRP_CMD;
2196	int_to_scsilun(scmnd->device->lun, &cmd->lun);
2197	cmd->tag    = tag;
2198	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2199	if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2200		cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2201					    4);
2202		if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2203			goto err_iu;
2204	}
2205
2206	req->scmnd    = scmnd;
2207	req->cmd      = iu;
2208
2209	len = srp_map_data(scmnd, ch, req);
2210	if (len < 0) {
2211		shost_printk(KERN_ERR, target->scsi_host,
2212			     PFX "Failed to map data (%d)\n", len);
2213		/*
2214		 * If we ran out of memory descriptors (-ENOMEM) because an
2215		 * application is queuing many requests with more than
2216		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2217		 * to reduce queue depth temporarily.
2218		 */
2219		scmnd->result = len == -ENOMEM ?
2220			DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16;
2221		goto err_iu;
2222	}
2223
2224	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2225				      DMA_TO_DEVICE);
2226
2227	if (srp_post_send(ch, iu, len)) {
2228		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2229		scmnd->result = DID_ERROR << 16;
2230		goto err_unmap;
2231	}
2232
2233	return 0;
2234
2235err_unmap:
2236	srp_unmap_data(scmnd, ch, req);
2237
2238err_iu:
2239	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
 
 
 
2240
2241	/*
2242	 * Avoid that the loops that iterate over the request ring can
2243	 * encounter a dangling SCSI command pointer.
2244	 */
2245	req->scmnd = NULL;
2246
2247err:
2248	if (scmnd->result) {
2249		scsi_done(scmnd);
2250		ret = 0;
2251	} else {
2252		ret = SCSI_MLQUEUE_HOST_BUSY;
2253	}
2254
2255	return ret;
2256}
2257
2258/*
2259 * Note: the resources allocated in this function are freed in
2260 * srp_free_ch_ib().
2261 */
2262static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2263{
2264	struct srp_target_port *target = ch->target;
2265	int i;
2266
2267	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2268			      GFP_KERNEL);
2269	if (!ch->rx_ring)
2270		goto err_no_ring;
2271	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2272			      GFP_KERNEL);
2273	if (!ch->tx_ring)
2274		goto err_no_ring;
2275
2276	for (i = 0; i < target->queue_size; ++i) {
2277		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2278					      ch->max_ti_iu_len,
2279					      GFP_KERNEL, DMA_FROM_DEVICE);
2280		if (!ch->rx_ring[i])
2281			goto err;
2282	}
2283
2284	for (i = 0; i < target->queue_size; ++i) {
2285		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2286					      ch->max_it_iu_len,
2287					      GFP_KERNEL, DMA_TO_DEVICE);
2288		if (!ch->tx_ring[i])
2289			goto err;
2290
2291		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2292	}
2293
2294	return 0;
2295
2296err:
2297	for (i = 0; i < target->queue_size; ++i) {
2298		srp_free_iu(target->srp_host, ch->rx_ring[i]);
2299		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2300	}
2301
2302
2303err_no_ring:
2304	kfree(ch->tx_ring);
2305	ch->tx_ring = NULL;
2306	kfree(ch->rx_ring);
2307	ch->rx_ring = NULL;
2308
2309	return -ENOMEM;
2310}
2311
2312static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2313{
2314	uint64_t T_tr_ns, max_compl_time_ms;
2315	uint32_t rq_tmo_jiffies;
2316
2317	/*
2318	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2319	 * table 91), both the QP timeout and the retry count have to be set
2320	 * for RC QP's during the RTR to RTS transition.
2321	 */
2322	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2323		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2324
2325	/*
2326	 * Set target->rq_tmo_jiffies to one second more than the largest time
2327	 * it can take before an error completion is generated. See also
2328	 * C9-140..142 in the IBTA spec for more information about how to
2329	 * convert the QP Local ACK Timeout value to nanoseconds.
2330	 */
2331	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2332	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2333	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2334	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2335
2336	return rq_tmo_jiffies;
2337}
2338
2339static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2340			       const struct srp_login_rsp *lrsp,
2341			       struct srp_rdma_ch *ch)
2342{
2343	struct srp_target_port *target = ch->target;
2344	struct ib_qp_attr *qp_attr = NULL;
2345	int attr_mask = 0;
2346	int ret = 0;
2347	int i;
2348
2349	if (lrsp->opcode == SRP_LOGIN_RSP) {
2350		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2351		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2352		ch->use_imm_data  = srp_use_imm_data &&
2353			(lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
2354		ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2355						      ch->use_imm_data,
2356						      target->max_it_iu_size);
2357		WARN_ON_ONCE(ch->max_it_iu_len >
2358			     be32_to_cpu(lrsp->max_it_iu_len));
2359
2360		if (ch->use_imm_data)
2361			shost_printk(KERN_DEBUG, target->scsi_host,
2362				     PFX "using immediate data\n");
2363
2364		/*
2365		 * Reserve credits for task management so we don't
2366		 * bounce requests back to the SCSI mid-layer.
2367		 */
2368		target->scsi_host->can_queue
2369			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2370			      target->scsi_host->can_queue);
2371		target->scsi_host->cmd_per_lun
2372			= min_t(int, target->scsi_host->can_queue,
2373				target->scsi_host->cmd_per_lun);
2374	} else {
2375		shost_printk(KERN_WARNING, target->scsi_host,
2376			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2377		ret = -ECONNRESET;
2378		goto error;
2379	}
2380
2381	if (!ch->rx_ring) {
2382		ret = srp_alloc_iu_bufs(ch);
2383		if (ret)
2384			goto error;
2385	}
2386
2387	for (i = 0; i < target->queue_size; i++) {
2388		struct srp_iu *iu = ch->rx_ring[i];
 
 
2389
2390		ret = srp_post_recv(ch, iu);
2391		if (ret)
2392			goto error;
2393	}
2394
2395	if (!target->using_rdma_cm) {
2396		ret = -ENOMEM;
2397		qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2398		if (!qp_attr)
2399			goto error;
2400
2401		qp_attr->qp_state = IB_QPS_RTR;
2402		ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
 
2403		if (ret)
2404			goto error_free;
 
2405
2406		ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2407		if (ret)
2408			goto error_free;
 
2409
2410		qp_attr->qp_state = IB_QPS_RTS;
2411		ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2412		if (ret)
2413			goto error_free;
2414
2415		target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2416
2417		ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2418		if (ret)
2419			goto error_free;
2420
2421		ret = ib_send_cm_rtu(cm_id, NULL, 0);
2422	}
2423
2424error_free:
2425	kfree(qp_attr);
2426
2427error:
2428	ch->status = ret;
2429}
2430
2431static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
2432				  const struct ib_cm_event *event,
2433				  struct srp_rdma_ch *ch)
2434{
2435	struct srp_target_port *target = ch->target;
2436	struct Scsi_Host *shost = target->scsi_host;
2437	struct ib_class_port_info *cpi;
2438	int opcode;
2439	u16 dlid;
2440
2441	switch (event->param.rej_rcvd.reason) {
2442	case IB_CM_REJ_PORT_CM_REDIRECT:
2443		cpi = event->param.rej_rcvd.ari;
2444		dlid = be16_to_cpu(cpi->redirect_lid);
2445		sa_path_set_dlid(&ch->ib_cm.path, dlid);
2446		ch->ib_cm.path.pkey = cpi->redirect_pkey;
2447		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2448		memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
2449
2450		ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
 
2451		break;
2452
2453	case IB_CM_REJ_PORT_REDIRECT:
2454		if (srp_target_is_topspin(target)) {
2455			union ib_gid *dgid = &ch->ib_cm.path.dgid;
2456
2457			/*
2458			 * Topspin/Cisco SRP gateways incorrectly send
2459			 * reject reason code 25 when they mean 24
2460			 * (port redirect).
2461			 */
2462			memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
 
2463
2464			shost_printk(KERN_DEBUG, shost,
2465				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2466				     be64_to_cpu(dgid->global.subnet_prefix),
2467				     be64_to_cpu(dgid->global.interface_id));
2468
2469			ch->status = SRP_PORT_REDIRECT;
2470		} else {
2471			shost_printk(KERN_WARNING, shost,
2472				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2473			ch->status = -ECONNRESET;
2474		}
2475		break;
2476
2477	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2478		shost_printk(KERN_WARNING, shost,
2479			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2480		ch->status = -ECONNRESET;
2481		break;
2482
2483	case IB_CM_REJ_CONSUMER_DEFINED:
2484		opcode = *(u8 *) event->private_data;
2485		if (opcode == SRP_LOGIN_REJ) {
2486			struct srp_login_rej *rej = event->private_data;
2487			u32 reason = be32_to_cpu(rej->reason);
2488
2489			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2490				shost_printk(KERN_WARNING, shost,
2491					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2492			else
2493				shost_printk(KERN_WARNING, shost, PFX
2494					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2495					     target->sgid.raw,
2496					     target->ib_cm.orig_dgid.raw,
2497					     reason);
2498		} else
2499			shost_printk(KERN_WARNING, shost,
2500				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2501				     " opcode 0x%02x\n", opcode);
2502		ch->status = -ECONNRESET;
2503		break;
2504
2505	case IB_CM_REJ_STALE_CONN:
2506		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2507		ch->status = SRP_STALE_CONN;
2508		break;
2509
2510	default:
2511		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2512			     event->param.rej_rcvd.reason);
2513		ch->status = -ECONNRESET;
2514	}
2515}
2516
2517static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2518			     const struct ib_cm_event *event)
2519{
2520	struct srp_rdma_ch *ch = cm_id->context;
2521	struct srp_target_port *target = ch->target;
2522	int comp = 0;
2523
2524	switch (event->event) {
2525	case IB_CM_REQ_ERROR:
2526		shost_printk(KERN_DEBUG, target->scsi_host,
2527			     PFX "Sending CM REQ failed\n");
2528		comp = 1;
2529		ch->status = -ECONNRESET;
2530		break;
2531
2532	case IB_CM_REP_RECEIVED:
2533		comp = 1;
2534		srp_cm_rep_handler(cm_id, event->private_data, ch);
2535		break;
2536
2537	case IB_CM_REJ_RECEIVED:
2538		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2539		comp = 1;
2540
2541		srp_ib_cm_rej_handler(cm_id, event, ch);
2542		break;
2543
2544	case IB_CM_DREQ_RECEIVED:
2545		shost_printk(KERN_WARNING, target->scsi_host,
2546			     PFX "DREQ received - connection closed\n");
2547		ch->connected = false;
2548		if (ib_send_cm_drep(cm_id, NULL, 0))
2549			shost_printk(KERN_ERR, target->scsi_host,
2550				     PFX "Sending CM DREP failed\n");
2551		queue_work(system_long_wq, &target->tl_err_work);
2552		break;
2553
2554	case IB_CM_TIMEWAIT_EXIT:
2555		shost_printk(KERN_ERR, target->scsi_host,
2556			     PFX "connection closed\n");
 
2557		comp = 1;
2558
2559		ch->status = 0;
2560		break;
2561
2562	case IB_CM_MRA_RECEIVED:
2563	case IB_CM_DREQ_ERROR:
2564	case IB_CM_DREP_RECEIVED:
2565		break;
2566
2567	default:
2568		shost_printk(KERN_WARNING, target->scsi_host,
2569			     PFX "Unhandled CM event %d\n", event->event);
2570		break;
2571	}
2572
2573	if (comp)
2574		complete(&ch->done);
2575
2576	return 0;
2577}
2578
2579static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2580				    struct rdma_cm_event *event)
2581{
2582	struct srp_target_port *target = ch->target;
2583	struct Scsi_Host *shost = target->scsi_host;
2584	int opcode;
2585
2586	switch (event->status) {
2587	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2588		shost_printk(KERN_WARNING, shost,
2589			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2590		ch->status = -ECONNRESET;
2591		break;
2592
2593	case IB_CM_REJ_CONSUMER_DEFINED:
2594		opcode = *(u8 *) event->param.conn.private_data;
2595		if (opcode == SRP_LOGIN_REJ) {
2596			struct srp_login_rej *rej =
2597				(struct srp_login_rej *)
2598				event->param.conn.private_data;
2599			u32 reason = be32_to_cpu(rej->reason);
2600
2601			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2602				shost_printk(KERN_WARNING, shost,
2603					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2604			else
2605				shost_printk(KERN_WARNING, shost,
2606					    PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2607		} else {
2608			shost_printk(KERN_WARNING, shost,
2609				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2610				     opcode);
2611		}
2612		ch->status = -ECONNRESET;
2613		break;
2614
2615	case IB_CM_REJ_STALE_CONN:
2616		shost_printk(KERN_WARNING, shost,
2617			     "  REJ reason: stale connection\n");
2618		ch->status = SRP_STALE_CONN;
2619		break;
2620
2621	default:
2622		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2623			     event->status);
2624		ch->status = -ECONNRESET;
2625		break;
2626	}
2627}
2628
2629static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2630			       struct rdma_cm_event *event)
2631{
2632	struct srp_rdma_ch *ch = cm_id->context;
2633	struct srp_target_port *target = ch->target;
2634	int comp = 0;
2635
2636	switch (event->event) {
2637	case RDMA_CM_EVENT_ADDR_RESOLVED:
2638		ch->status = 0;
2639		comp = 1;
2640		break;
2641
2642	case RDMA_CM_EVENT_ADDR_ERROR:
2643		ch->status = -ENXIO;
2644		comp = 1;
2645		break;
2646
2647	case RDMA_CM_EVENT_ROUTE_RESOLVED:
2648		ch->status = 0;
2649		comp = 1;
2650		break;
2651
2652	case RDMA_CM_EVENT_ROUTE_ERROR:
2653	case RDMA_CM_EVENT_UNREACHABLE:
2654		ch->status = -EHOSTUNREACH;
2655		comp = 1;
2656		break;
2657
2658	case RDMA_CM_EVENT_CONNECT_ERROR:
2659		shost_printk(KERN_DEBUG, target->scsi_host,
2660			     PFX "Sending CM REQ failed\n");
2661		comp = 1;
2662		ch->status = -ECONNRESET;
2663		break;
2664
2665	case RDMA_CM_EVENT_ESTABLISHED:
2666		comp = 1;
2667		srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2668		break;
2669
2670	case RDMA_CM_EVENT_REJECTED:
2671		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2672		comp = 1;
2673
2674		srp_rdma_cm_rej_handler(ch, event);
2675		break;
2676
2677	case RDMA_CM_EVENT_DISCONNECTED:
2678		if (ch->connected) {
2679			shost_printk(KERN_WARNING, target->scsi_host,
2680				     PFX "received DREQ\n");
2681			rdma_disconnect(ch->rdma_cm.cm_id);
2682			comp = 1;
2683			ch->status = 0;
2684			queue_work(system_long_wq, &target->tl_err_work);
2685		}
2686		break;
2687
2688	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2689		shost_printk(KERN_ERR, target->scsi_host,
2690			     PFX "connection closed\n");
2691
2692		comp = 1;
2693		ch->status = 0;
2694		break;
2695
2696	default:
2697		shost_printk(KERN_WARNING, target->scsi_host,
2698			     PFX "Unhandled CM event %d\n", event->event);
2699		break;
2700	}
2701
2702	if (comp)
2703		complete(&ch->done);
2704
2705	return 0;
2706}
2707
2708/**
2709 * srp_change_queue_depth - setting device queue depth
2710 * @sdev: scsi device struct
2711 * @qdepth: requested queue depth
2712 *
2713 * Returns queue depth.
2714 */
2715static int
2716srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2717{
2718	if (!sdev->tagged_supported)
2719		qdepth = 1;
2720	return scsi_change_queue_depth(sdev, qdepth);
2721}
2722
2723static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2724			     u8 func, u8 *status)
2725{
2726	struct srp_target_port *target = ch->target;
2727	struct srp_rport *rport = target->rport;
2728	struct ib_device *dev = target->srp_host->srp_dev->dev;
2729	struct srp_iu *iu;
2730	struct srp_tsk_mgmt *tsk_mgmt;
2731	int res;
2732
2733	if (!ch->connected || target->qp_in_error)
 
2734		return -1;
2735
2736	/*
2737	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2738	 * invoked while a task management function is being sent.
2739	 */
2740	mutex_lock(&rport->mutex);
2741	spin_lock_irq(&ch->lock);
2742	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2743	spin_unlock_irq(&ch->lock);
2744
2745	if (!iu) {
2746		mutex_unlock(&rport->mutex);
 
2747
 
2748		return -1;
2749	}
2750
2751	iu->num_sge = 1;
2752
2753	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2754				   DMA_TO_DEVICE);
2755	tsk_mgmt = iu->buf;
2756	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2757
2758	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2759	int_to_scsilun(lun, &tsk_mgmt->lun);
 
2760	tsk_mgmt->tsk_mgmt_func = func;
2761	tsk_mgmt->task_tag	= req_tag;
2762
2763	spin_lock_irq(&ch->lock);
2764	ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2765	tsk_mgmt->tag = ch->tsk_mgmt_tag;
2766	spin_unlock_irq(&ch->lock);
2767
2768	init_completion(&ch->tsk_mgmt_done);
2769
2770	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2771				      DMA_TO_DEVICE);
2772	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2773		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2774		mutex_unlock(&rport->mutex);
2775
2776		return -1;
2777	}
2778	res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2779					msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2780	if (res > 0 && status)
2781		*status = ch->tsk_mgmt_status;
2782	mutex_unlock(&rport->mutex);
2783
2784	WARN_ON_ONCE(res < 0);
 
 
2785
2786	return res > 0 ? 0 : -1;
2787}
2788
2789static int srp_abort(struct scsi_cmnd *scmnd)
2790{
2791	struct srp_target_port *target = host_to_target(scmnd->device->host);
2792	struct srp_request *req = scsi_cmd_priv(scmnd);
2793	u32 tag;
2794	u16 ch_idx;
2795	struct srp_rdma_ch *ch;
2796	int ret;
2797
2798	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2799
2800	tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
2801	ch_idx = blk_mq_unique_tag_to_hwq(tag);
2802	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2803		return SUCCESS;
2804	ch = &target->ch[ch_idx];
2805	if (!srp_claim_req(ch, req, NULL, scmnd))
2806		return SUCCESS;
2807	shost_printk(KERN_ERR, target->scsi_host,
2808		     "Sending SRP abort for tag %#x\n", tag);
2809	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2810			      SRP_TSK_ABORT_TASK, NULL) == 0)
2811		ret = SUCCESS;
2812	else if (target->rport->state == SRP_RPORT_LOST)
2813		ret = FAST_IO_FAIL;
2814	else
2815		ret = FAILED;
2816	if (ret == SUCCESS) {
2817		srp_free_req(ch, req, scmnd, 0);
2818		scmnd->result = DID_ABORT << 16;
2819		scsi_done(scmnd);
2820	}
2821
2822	return ret;
2823}
2824
2825static int srp_reset_device(struct scsi_cmnd *scmnd)
2826{
2827	struct srp_target_port *target = host_to_target(scmnd->device->host);
2828	struct srp_rdma_ch *ch;
2829	u8 status;
2830
2831	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2832
2833	ch = &target->ch[0];
2834	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2835			      SRP_TSK_LUN_RESET, &status))
 
2836		return FAILED;
2837	if (status)
2838		return FAILED;
2839
 
 
 
 
 
 
2840	return SUCCESS;
2841}
2842
2843static int srp_reset_host(struct scsi_cmnd *scmnd)
2844{
2845	struct srp_target_port *target = host_to_target(scmnd->device->host);
 
2846
2847	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2848
2849	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2850}
2851
2852static int srp_target_alloc(struct scsi_target *starget)
2853{
2854	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2855	struct srp_target_port *target = host_to_target(shost);
2856
2857	if (target->target_can_queue)
2858		starget->can_queue = target->target_can_queue;
2859	return 0;
2860}
2861
2862static int srp_slave_configure(struct scsi_device *sdev)
2863{
2864	struct Scsi_Host *shost = sdev->host;
2865	struct srp_target_port *target = host_to_target(shost);
2866	struct request_queue *q = sdev->request_queue;
2867	unsigned long timeout;
2868
2869	if (sdev->type == TYPE_DISK) {
2870		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2871		blk_queue_rq_timeout(q, timeout);
2872	}
2873
2874	return 0;
2875}
2876
2877static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr,
2878			   char *buf)
2879{
2880	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2881
2882	return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
 
2883}
2884
2885static DEVICE_ATTR_RO(id_ext);
2886
2887static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr,
2888			     char *buf)
2889{
2890	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2891
2892	return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
 
2893}
2894
2895static DEVICE_ATTR_RO(ioc_guid);
2896
2897static ssize_t service_id_show(struct device *dev,
2898			       struct device_attribute *attr, char *buf)
2899{
2900	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2901
2902	if (target->using_rdma_cm)
2903		return -ENOENT;
2904	return sysfs_emit(buf, "0x%016llx\n",
2905			  be64_to_cpu(target->ib_cm.service_id));
2906}
2907
2908static DEVICE_ATTR_RO(service_id);
2909
2910static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
2911			 char *buf)
2912{
2913	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2914
2915	if (target->using_rdma_cm)
2916		return -ENOENT;
2917
2918	return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
2919}
2920
2921static DEVICE_ATTR_RO(pkey);
2922
2923static ssize_t sgid_show(struct device *dev, struct device_attribute *attr,
2924			 char *buf)
2925{
2926	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2927
2928	return sysfs_emit(buf, "%pI6\n", target->sgid.raw);
2929}
2930
2931static DEVICE_ATTR_RO(sgid);
2932
2933static ssize_t dgid_show(struct device *dev, struct device_attribute *attr,
2934			 char *buf)
2935{
2936	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2937	struct srp_rdma_ch *ch = &target->ch[0];
2938
2939	if (target->using_rdma_cm)
2940		return -ENOENT;
2941
2942	return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
2943}
2944
2945static DEVICE_ATTR_RO(dgid);
2946
2947static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr,
2948			      char *buf)
2949{
2950	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2951
2952	if (target->using_rdma_cm)
2953		return -ENOENT;
2954
2955	return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
2956}
2957
2958static DEVICE_ATTR_RO(orig_dgid);
2959
2960static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr,
2961			    char *buf)
2962{
2963	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2964	struct srp_rdma_ch *ch;
2965	int i, req_lim = INT_MAX;
2966
2967	for (i = 0; i < target->ch_count; i++) {
2968		ch = &target->ch[i];
2969		req_lim = min(req_lim, ch->req_lim);
2970	}
2971
2972	return sysfs_emit(buf, "%d\n", req_lim);
2973}
2974
2975static DEVICE_ATTR_RO(req_lim);
2976
2977static ssize_t zero_req_lim_show(struct device *dev,
2978				 struct device_attribute *attr, char *buf)
2979{
2980	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2981
2982	return sysfs_emit(buf, "%d\n", target->zero_req_lim);
2983}
2984
2985static DEVICE_ATTR_RO(zero_req_lim);
2986
2987static ssize_t local_ib_port_show(struct device *dev,
2988				  struct device_attribute *attr, char *buf)
2989{
2990	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2991
2992	return sysfs_emit(buf, "%u\n", target->srp_host->port);
2993}
2994
2995static DEVICE_ATTR_RO(local_ib_port);
2996
2997static ssize_t local_ib_device_show(struct device *dev,
2998				    struct device_attribute *attr, char *buf)
2999{
3000	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3001
3002	return sysfs_emit(buf, "%s\n",
3003			  dev_name(&target->srp_host->srp_dev->dev->dev));
3004}
3005
3006static DEVICE_ATTR_RO(local_ib_device);
3007
3008static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr,
3009			     char *buf)
3010{
3011	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3012
3013	return sysfs_emit(buf, "%d\n", target->ch_count);
3014}
3015
3016static DEVICE_ATTR_RO(ch_count);
3017
3018static ssize_t comp_vector_show(struct device *dev,
3019				struct device_attribute *attr, char *buf)
3020{
3021	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3022
3023	return sysfs_emit(buf, "%d\n", target->comp_vector);
3024}
3025
3026static DEVICE_ATTR_RO(comp_vector);
3027
3028static ssize_t tl_retry_count_show(struct device *dev,
3029				   struct device_attribute *attr, char *buf)
3030{
3031	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3032
3033	return sysfs_emit(buf, "%d\n", target->tl_retry_count);
3034}
3035
3036static DEVICE_ATTR_RO(tl_retry_count);
3037
3038static ssize_t cmd_sg_entries_show(struct device *dev,
3039				   struct device_attribute *attr, char *buf)
3040{
3041	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3042
3043	return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt);
3044}
3045
3046static DEVICE_ATTR_RO(cmd_sg_entries);
3047
3048static ssize_t allow_ext_sg_show(struct device *dev,
3049				 struct device_attribute *attr, char *buf)
3050{
3051	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3052
3053	return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3054}
3055
3056static DEVICE_ATTR_RO(allow_ext_sg);
3057
3058static struct attribute *srp_host_attrs[] = {
3059	&dev_attr_id_ext.attr,
3060	&dev_attr_ioc_guid.attr,
3061	&dev_attr_service_id.attr,
3062	&dev_attr_pkey.attr,
3063	&dev_attr_sgid.attr,
3064	&dev_attr_dgid.attr,
3065	&dev_attr_orig_dgid.attr,
3066	&dev_attr_req_lim.attr,
3067	&dev_attr_zero_req_lim.attr,
3068	&dev_attr_local_ib_port.attr,
3069	&dev_attr_local_ib_device.attr,
3070	&dev_attr_ch_count.attr,
3071	&dev_attr_comp_vector.attr,
3072	&dev_attr_tl_retry_count.attr,
3073	&dev_attr_cmd_sg_entries.attr,
3074	&dev_attr_allow_ext_sg.attr,
 
 
 
 
 
 
 
3075	NULL
3076};
3077
3078ATTRIBUTE_GROUPS(srp_host);
3079
3080static struct scsi_host_template srp_template = {
3081	.module				= THIS_MODULE,
3082	.name				= "InfiniBand SRP initiator",
3083	.proc_name			= DRV_NAME,
3084	.target_alloc			= srp_target_alloc,
3085	.slave_configure		= srp_slave_configure,
3086	.info				= srp_target_info,
3087	.init_cmd_priv			= srp_init_cmd_priv,
3088	.exit_cmd_priv			= srp_exit_cmd_priv,
3089	.queuecommand			= srp_queuecommand,
3090	.change_queue_depth             = srp_change_queue_depth,
3091	.eh_timed_out			= srp_timed_out,
3092	.eh_abort_handler		= srp_abort,
3093	.eh_device_reset_handler	= srp_reset_device,
3094	.eh_host_reset_handler		= srp_reset_host,
3095	.skip_settle_delay		= true,
3096	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
3097	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
3098	.this_id			= -1,
3099	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
3100	.shost_groups			= srp_host_groups,
3101	.track_queue_depth		= 1,
3102	.cmd_size			= sizeof(struct srp_request),
3103};
3104
3105static int srp_sdev_count(struct Scsi_Host *host)
3106{
3107	struct scsi_device *sdev;
3108	int c = 0;
3109
3110	shost_for_each_device(sdev, host)
3111		c++;
3112
3113	return c;
3114}
3115
3116/*
3117 * Return values:
3118 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3119 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3120 *    removal has been scheduled.
3121 * 0 and target->state != SRP_TARGET_REMOVED upon success.
3122 */
3123static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3124{
3125	struct srp_rport_identifiers ids;
3126	struct srp_rport *rport;
3127
3128	target->state = SRP_TARGET_SCANNING;
3129	sprintf(target->target_name, "SRP.T10:%016llX",
3130		be64_to_cpu(target->id_ext));
3131
3132	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
3133		return -ENODEV;
3134
3135	memcpy(ids.port_id, &target->id_ext, 8);
3136	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
3137	ids.roles = SRP_RPORT_ROLE_TARGET;
3138	rport = srp_rport_add(target->scsi_host, &ids);
3139	if (IS_ERR(rport)) {
3140		scsi_remove_host(target->scsi_host);
3141		return PTR_ERR(rport);
3142	}
3143
3144	rport->lld_data = target;
3145	target->rport = rport;
3146
3147	spin_lock(&host->target_lock);
3148	list_add_tail(&target->list, &host->target_list);
3149	spin_unlock(&host->target_lock);
3150
 
 
3151	scsi_scan_target(&target->scsi_host->shost_gendev,
3152			 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
3153
3154	if (srp_connected_ch(target) < target->ch_count ||
3155	    target->qp_in_error) {
3156		shost_printk(KERN_INFO, target->scsi_host,
3157			     PFX "SCSI scan failed - removing SCSI host\n");
3158		srp_queue_remove_work(target);
3159		goto out;
3160	}
3161
3162	pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
3163		 dev_name(&target->scsi_host->shost_gendev),
3164		 srp_sdev_count(target->scsi_host));
3165
3166	spin_lock_irq(&target->lock);
3167	if (target->state == SRP_TARGET_SCANNING)
3168		target->state = SRP_TARGET_LIVE;
3169	spin_unlock_irq(&target->lock);
3170
3171out:
3172	return 0;
3173}
3174
3175static void srp_release_dev(struct device *dev)
3176{
3177	struct srp_host *host =
3178		container_of(dev, struct srp_host, dev);
3179
3180	kfree(host);
3181}
3182
3183static struct attribute *srp_class_attrs[];
3184
3185ATTRIBUTE_GROUPS(srp_class);
3186
3187static struct class srp_class = {
3188	.name    = "infiniband_srp",
3189	.dev_groups = srp_class_groups,
3190	.dev_release = srp_release_dev
3191};
3192
3193/**
3194 * srp_conn_unique() - check whether the connection to a target is unique
3195 * @host:   SRP host.
3196 * @target: SRP target port.
3197 */
3198static bool srp_conn_unique(struct srp_host *host,
3199			    struct srp_target_port *target)
3200{
3201	struct srp_target_port *t;
3202	bool ret = false;
3203
3204	if (target->state == SRP_TARGET_REMOVED)
3205		goto out;
3206
3207	ret = true;
3208
3209	spin_lock(&host->target_lock);
3210	list_for_each_entry(t, &host->target_list, list) {
3211		if (t != target &&
3212		    target->id_ext == t->id_ext &&
3213		    target->ioc_guid == t->ioc_guid &&
3214		    target->initiator_ext == t->initiator_ext) {
3215			ret = false;
3216			break;
3217		}
3218	}
3219	spin_unlock(&host->target_lock);
3220
3221out:
3222	return ret;
3223}
3224
3225/*
3226 * Target ports are added by writing
3227 *
3228 *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3229 *     pkey=<P_Key>,service_id=<service ID>
3230 * or
3231 *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3232 *     [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
3233 *
3234 * to the add_target sysfs attribute.
3235 */
3236enum {
3237	SRP_OPT_ERR		= 0,
3238	SRP_OPT_ID_EXT		= 1 << 0,
3239	SRP_OPT_IOC_GUID	= 1 << 1,
3240	SRP_OPT_DGID		= 1 << 2,
3241	SRP_OPT_PKEY		= 1 << 3,
3242	SRP_OPT_SERVICE_ID	= 1 << 4,
3243	SRP_OPT_MAX_SECT	= 1 << 5,
3244	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
3245	SRP_OPT_IO_CLASS	= 1 << 7,
3246	SRP_OPT_INITIATOR_EXT	= 1 << 8,
3247	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
3248	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
3249	SRP_OPT_SG_TABLESIZE	= 1 << 11,
3250	SRP_OPT_COMP_VECTOR	= 1 << 12,
3251	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
3252	SRP_OPT_QUEUE_SIZE	= 1 << 14,
3253	SRP_OPT_IP_SRC		= 1 << 15,
3254	SRP_OPT_IP_DEST		= 1 << 16,
3255	SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
3256	SRP_OPT_MAX_IT_IU_SIZE  = 1 << 18,
3257	SRP_OPT_CH_COUNT	= 1 << 19,
3258};
3259
3260static unsigned int srp_opt_mandatory[] = {
3261	SRP_OPT_ID_EXT		|
3262	SRP_OPT_IOC_GUID	|
3263	SRP_OPT_DGID		|
3264	SRP_OPT_PKEY		|
3265	SRP_OPT_SERVICE_ID,
3266	SRP_OPT_ID_EXT		|
3267	SRP_OPT_IOC_GUID	|
3268	SRP_OPT_IP_DEST,
3269};
3270
3271static const match_table_t srp_opt_tokens = {
3272	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
3273	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
3274	{ SRP_OPT_DGID,			"dgid=%s" 		},
3275	{ SRP_OPT_PKEY,			"pkey=%x" 		},
3276	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
3277	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
3278	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
3279	{ SRP_OPT_TARGET_CAN_QUEUE,	"target_can_queue=%d"	},
3280	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
3281	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
3282	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
3283	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
3284	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
3285	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
3286	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
3287	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
3288	{ SRP_OPT_IP_SRC,		"src=%s"		},
3289	{ SRP_OPT_IP_DEST,		"dest=%s"		},
3290	{ SRP_OPT_MAX_IT_IU_SIZE,	"max_it_iu_size=%d"	},
3291	{ SRP_OPT_CH_COUNT,		"ch_count=%u",		},
3292	{ SRP_OPT_ERR,			NULL 			}
3293};
3294
3295/**
3296 * srp_parse_in - parse an IP address and port number combination
3297 * @net:	   [in]  Network namespace.
3298 * @sa:		   [out] Address family, IP address and port number.
3299 * @addr_port_str: [in]  IP address and port number.
3300 * @has_port:	   [out] Whether or not @addr_port_str includes a port number.
3301 *
3302 * Parse the following address formats:
3303 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3304 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3305 */
3306static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3307			const char *addr_port_str, bool *has_port)
3308{
3309	char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3310	char *port_str;
3311	int ret;
3312
3313	if (!addr)
3314		return -ENOMEM;
3315	port_str = strrchr(addr, ':');
3316	if (port_str && strchr(port_str, ']'))
3317		port_str = NULL;
3318	if (port_str)
3319		*port_str++ = '\0';
3320	if (has_port)
3321		*has_port = port_str != NULL;
3322	ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3323	if (ret && addr[0]) {
3324		addr_end = addr + strlen(addr) - 1;
3325		if (addr[0] == '[' && *addr_end == ']') {
3326			*addr_end = '\0';
3327			ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3328						   port_str, sa);
3329		}
3330	}
3331	kfree(addr);
3332	pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
3333	return ret;
3334}
3335
3336static int srp_parse_options(struct net *net, const char *buf,
3337			     struct srp_target_port *target)
3338{
3339	char *options, *sep_opt;
3340	char *p;
 
3341	substring_t args[MAX_OPT_ARGS];
3342	unsigned long long ull;
3343	bool has_port;
3344	int opt_mask = 0;
3345	int token;
3346	int ret = -EINVAL;
3347	int i;
3348
3349	options = kstrdup(buf, GFP_KERNEL);
3350	if (!options)
3351		return -ENOMEM;
3352
3353	sep_opt = options;
3354	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3355		if (!*p)
3356			continue;
3357
3358		token = match_token(p, srp_opt_tokens, args);
3359		opt_mask |= token;
3360
3361		switch (token) {
3362		case SRP_OPT_ID_EXT:
3363			p = match_strdup(args);
3364			if (!p) {
3365				ret = -ENOMEM;
3366				goto out;
3367			}
3368			ret = kstrtoull(p, 16, &ull);
3369			if (ret) {
3370				pr_warn("invalid id_ext parameter '%s'\n", p);
3371				kfree(p);
3372				goto out;
3373			}
3374			target->id_ext = cpu_to_be64(ull);
3375			kfree(p);
3376			break;
3377
3378		case SRP_OPT_IOC_GUID:
3379			p = match_strdup(args);
3380			if (!p) {
3381				ret = -ENOMEM;
3382				goto out;
3383			}
3384			ret = kstrtoull(p, 16, &ull);
3385			if (ret) {
3386				pr_warn("invalid ioc_guid parameter '%s'\n", p);
3387				kfree(p);
3388				goto out;
3389			}
3390			target->ioc_guid = cpu_to_be64(ull);
3391			kfree(p);
3392			break;
3393
3394		case SRP_OPT_DGID:
3395			p = match_strdup(args);
3396			if (!p) {
3397				ret = -ENOMEM;
3398				goto out;
3399			}
3400			if (strlen(p) != 32) {
3401				pr_warn("bad dest GID parameter '%s'\n", p);
3402				kfree(p);
3403				goto out;
3404			}
3405
3406			ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
 
 
 
3407			kfree(p);
3408			if (ret < 0)
3409				goto out;
3410			break;
3411
3412		case SRP_OPT_PKEY:
3413			ret = match_hex(args, &token);
3414			if (ret) {
3415				pr_warn("bad P_Key parameter '%s'\n", p);
3416				goto out;
3417			}
3418			target->ib_cm.pkey = cpu_to_be16(token);
3419			break;
3420
3421		case SRP_OPT_SERVICE_ID:
3422			p = match_strdup(args);
3423			if (!p) {
3424				ret = -ENOMEM;
3425				goto out;
3426			}
3427			ret = kstrtoull(p, 16, &ull);
3428			if (ret) {
3429				pr_warn("bad service_id parameter '%s'\n", p);
3430				kfree(p);
3431				goto out;
3432			}
3433			target->ib_cm.service_id = cpu_to_be64(ull);
3434			kfree(p);
3435			break;
3436
3437		case SRP_OPT_IP_SRC:
3438			p = match_strdup(args);
3439			if (!p) {
3440				ret = -ENOMEM;
3441				goto out;
3442			}
3443			ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3444					   NULL);
3445			if (ret < 0) {
3446				pr_warn("bad source parameter '%s'\n", p);
3447				kfree(p);
3448				goto out;
3449			}
3450			target->rdma_cm.src_specified = true;
3451			kfree(p);
3452			break;
3453
3454		case SRP_OPT_IP_DEST:
3455			p = match_strdup(args);
3456			if (!p) {
3457				ret = -ENOMEM;
3458				goto out;
3459			}
3460			ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3461					   &has_port);
3462			if (!has_port)
3463				ret = -EINVAL;
3464			if (ret < 0) {
3465				pr_warn("bad dest parameter '%s'\n", p);
3466				kfree(p);
3467				goto out;
3468			}
3469			target->using_rdma_cm = true;
3470			kfree(p);
3471			break;
3472
3473		case SRP_OPT_MAX_SECT:
3474			ret = match_int(args, &token);
3475			if (ret) {
3476				pr_warn("bad max sect parameter '%s'\n", p);
3477				goto out;
3478			}
3479			target->scsi_host->max_sectors = token;
3480			break;
3481
3482		case SRP_OPT_QUEUE_SIZE:
3483			ret = match_int(args, &token);
3484			if (ret) {
3485				pr_warn("match_int() failed for queue_size parameter '%s', Error %d\n",
3486					p, ret);
3487				goto out;
3488			}
3489			if (token < 1) {
3490				pr_warn("bad queue_size parameter '%s'\n", p);
3491				ret = -EINVAL;
3492				goto out;
3493			}
3494			target->scsi_host->can_queue = token;
3495			target->queue_size = token + SRP_RSP_SQ_SIZE +
3496					     SRP_TSK_MGMT_SQ_SIZE;
3497			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3498				target->scsi_host->cmd_per_lun = token;
3499			break;
3500
3501		case SRP_OPT_MAX_CMD_PER_LUN:
3502			ret = match_int(args, &token);
3503			if (ret) {
3504				pr_warn("match_int() failed for max cmd_per_lun parameter '%s', Error %d\n",
3505					p, ret);
3506				goto out;
3507			}
3508			if (token < 1) {
3509				pr_warn("bad max cmd_per_lun parameter '%s'\n",
3510					p);
3511				ret = -EINVAL;
3512				goto out;
3513			}
3514			target->scsi_host->cmd_per_lun = token;
3515			break;
3516
3517		case SRP_OPT_TARGET_CAN_QUEUE:
3518			ret = match_int(args, &token);
3519			if (ret) {
3520				pr_warn("match_int() failed for max target_can_queue parameter '%s', Error %d\n",
3521					p, ret);
3522				goto out;
3523			}
3524			if (token < 1) {
3525				pr_warn("bad max target_can_queue parameter '%s'\n",
3526					p);
3527				ret = -EINVAL;
3528				goto out;
3529			}
3530			target->target_can_queue = token;
3531			break;
3532
3533		case SRP_OPT_IO_CLASS:
3534			ret = match_hex(args, &token);
3535			if (ret) {
3536				pr_warn("bad IO class parameter '%s'\n", p);
3537				goto out;
3538			}
3539			if (token != SRP_REV10_IB_IO_CLASS &&
3540			    token != SRP_REV16A_IB_IO_CLASS) {
3541				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3542					token, SRP_REV10_IB_IO_CLASS,
3543					SRP_REV16A_IB_IO_CLASS);
3544				ret = -EINVAL;
3545				goto out;
3546			}
3547			target->io_class = token;
3548			break;
3549
3550		case SRP_OPT_INITIATOR_EXT:
3551			p = match_strdup(args);
3552			if (!p) {
3553				ret = -ENOMEM;
3554				goto out;
3555			}
3556			ret = kstrtoull(p, 16, &ull);
3557			if (ret) {
3558				pr_warn("bad initiator_ext value '%s'\n", p);
3559				kfree(p);
3560				goto out;
3561			}
3562			target->initiator_ext = cpu_to_be64(ull);
3563			kfree(p);
3564			break;
3565
3566		case SRP_OPT_CMD_SG_ENTRIES:
3567			ret = match_int(args, &token);
3568			if (ret) {
3569				pr_warn("match_int() failed for max cmd_sg_entries parameter '%s', Error %d\n",
3570					p, ret);
3571				goto out;
3572			}
3573			if (token < 1 || token > 255) {
3574				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3575					p);
3576				ret = -EINVAL;
3577				goto out;
3578			}
3579			target->cmd_sg_cnt = token;
3580			break;
3581
3582		case SRP_OPT_ALLOW_EXT_SG:
3583			ret = match_int(args, &token);
3584			if (ret) {
3585				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3586				goto out;
3587			}
3588			target->allow_ext_sg = !!token;
3589			break;
3590
3591		case SRP_OPT_SG_TABLESIZE:
3592			ret = match_int(args, &token);
3593			if (ret) {
3594				pr_warn("match_int() failed for max sg_tablesize parameter '%s', Error %d\n",
3595					p, ret);
3596				goto out;
3597			}
3598			if (token < 1 || token > SG_MAX_SEGMENTS) {
3599				pr_warn("bad max sg_tablesize parameter '%s'\n",
3600					p);
3601				ret = -EINVAL;
3602				goto out;
3603			}
3604			target->sg_tablesize = token;
3605			break;
3606
3607		case SRP_OPT_COMP_VECTOR:
3608			ret = match_int(args, &token);
3609			if (ret) {
3610				pr_warn("match_int() failed for comp_vector parameter '%s', Error %d\n",
3611					p, ret);
3612				goto out;
3613			}
3614			if (token < 0) {
3615				pr_warn("bad comp_vector parameter '%s'\n", p);
3616				ret = -EINVAL;
3617				goto out;
3618			}
3619			target->comp_vector = token;
3620			break;
3621
3622		case SRP_OPT_TL_RETRY_COUNT:
3623			ret = match_int(args, &token);
3624			if (ret) {
3625				pr_warn("match_int() failed for tl_retry_count parameter '%s', Error %d\n",
3626					p, ret);
3627				goto out;
3628			}
3629			if (token < 2 || token > 7) {
3630				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3631					p);
3632				ret = -EINVAL;
3633				goto out;
3634			}
3635			target->tl_retry_count = token;
3636			break;
3637
3638		case SRP_OPT_MAX_IT_IU_SIZE:
3639			ret = match_int(args, &token);
3640			if (ret) {
3641				pr_warn("match_int() failed for max it_iu_size parameter '%s', Error %d\n",
3642					p, ret);
3643				goto out;
3644			}
3645			if (token < 0) {
3646				pr_warn("bad maximum initiator to target IU size '%s'\n", p);
3647				ret = -EINVAL;
3648				goto out;
3649			}
3650			target->max_it_iu_size = token;
3651			break;
3652
3653		case SRP_OPT_CH_COUNT:
3654			ret = match_int(args, &token);
3655			if (ret) {
3656				pr_warn("match_int() failed for channel count parameter '%s', Error %d\n",
3657					p, ret);
3658				goto out;
3659			}
3660			if (token < 1) {
3661				pr_warn("bad channel count %s\n", p);
3662				ret = -EINVAL;
3663				goto out;
3664			}
3665			target->ch_count = token;
3666			break;
3667
3668		default:
3669			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3670				p);
3671			ret = -EINVAL;
3672			goto out;
3673		}
3674	}
3675
3676	for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3677		if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3678			ret = 0;
3679			break;
3680		}
3681	}
3682	if (ret)
3683		pr_warn("target creation request is missing one or more parameters\n");
3684
3685	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3686	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3687		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3688			target->scsi_host->cmd_per_lun,
3689			target->scsi_host->can_queue);
3690
3691out:
3692	kfree(options);
3693	return ret;
3694}
3695
3696static ssize_t add_target_store(struct device *dev,
3697				struct device_attribute *attr, const char *buf,
3698				size_t count)
3699{
3700	struct srp_host *host =
3701		container_of(dev, struct srp_host, dev);
3702	struct Scsi_Host *target_host;
3703	struct srp_target_port *target;
3704	struct srp_rdma_ch *ch;
3705	struct srp_device *srp_dev = host->srp_dev;
3706	struct ib_device *ibdev = srp_dev->dev;
3707	int ret, i, ch_idx;
3708	unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3709	bool multich = false;
3710	uint32_t max_iu_len;
3711
3712	target_host = scsi_host_alloc(&srp_template,
3713				      sizeof (struct srp_target_port));
3714	if (!target_host)
3715		return -ENOMEM;
3716
3717	target_host->transportt  = ib_srp_transport_template;
3718	target_host->max_channel = 0;
3719	target_host->max_id      = 1;
3720	target_host->max_lun     = -1LL;
3721	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3722	target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
3723
3724	if (!(ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG))
3725		target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
3726
3727	target = host_to_target(target_host);
3728
3729	target->net		= kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
3730	target->io_class	= SRP_REV16A_IB_IO_CLASS;
3731	target->scsi_host	= target_host;
3732	target->srp_host	= host;
3733	target->lkey		= host->srp_dev->pd->local_dma_lkey;
3734	target->global_rkey	= host->srp_dev->global_rkey;
3735	target->cmd_sg_cnt	= cmd_sg_entries;
3736	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
3737	target->allow_ext_sg	= allow_ext_sg;
3738	target->tl_retry_count	= 7;
3739	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3740
3741	/*
3742	 * Avoid that the SCSI host can be removed by srp_remove_target()
3743	 * before this function returns.
3744	 */
3745	scsi_host_get(target->scsi_host);
3746
3747	ret = mutex_lock_interruptible(&host->add_target_mutex);
3748	if (ret < 0)
3749		goto put;
3750
3751	ret = srp_parse_options(target->net, buf, target);
3752	if (ret)
3753		goto out;
3754
3755	if (!srp_conn_unique(target->srp_host, target)) {
3756		if (target->using_rdma_cm) {
3757			shost_printk(KERN_INFO, target->scsi_host,
3758				     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
3759				     be64_to_cpu(target->id_ext),
3760				     be64_to_cpu(target->ioc_guid),
3761				     &target->rdma_cm.dst);
3762		} else {
3763			shost_printk(KERN_INFO, target->scsi_host,
3764				     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3765				     be64_to_cpu(target->id_ext),
3766				     be64_to_cpu(target->ioc_guid),
3767				     be64_to_cpu(target->initiator_ext));
3768		}
3769		ret = -EEXIST;
3770		goto out;
3771	}
3772
3773	if (!srp_dev->has_fr && !target->allow_ext_sg &&
3774	    target->cmd_sg_cnt < target->sg_tablesize) {
3775		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3776		target->sg_tablesize = target->cmd_sg_cnt;
3777	}
3778
3779	if (srp_dev->use_fast_reg) {
3780		bool gaps_reg = ibdev->attrs.kernel_cap_flags &
3781				 IBK_SG_GAPS_REG;
3782
3783		max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3784				  (ilog2(srp_dev->mr_page_size) - 9);
3785		if (!gaps_reg) {
3786			/*
3787			 * FR can only map one HCA page per entry. If the start
3788			 * address is not aligned on a HCA page boundary two
3789			 * entries will be used for the head and the tail
3790			 * although these two entries combined contain at most
3791			 * one HCA page of data. Hence the "+ 1" in the
3792			 * calculation below.
3793			 *
3794			 * The indirect data buffer descriptor is contiguous
3795			 * so the memory for that buffer will only be
3796			 * registered if register_always is true. Hence add
3797			 * one to mr_per_cmd if register_always has been set.
3798			 */
3799			mr_per_cmd = register_always +
3800				(target->scsi_host->max_sectors + 1 +
3801				 max_sectors_per_mr - 1) / max_sectors_per_mr;
3802		} else {
3803			mr_per_cmd = register_always +
3804				(target->sg_tablesize +
3805				 srp_dev->max_pages_per_mr - 1) /
3806				srp_dev->max_pages_per_mr;
3807		}
3808		pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3809			 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3810			 max_sectors_per_mr, mr_per_cmd);
3811	}
3812
3813	target_host->sg_tablesize = target->sg_tablesize;
3814	target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3815	target->mr_per_cmd = mr_per_cmd;
3816	target->indirect_size = target->sg_tablesize *
3817				sizeof (struct srp_direct_buf);
3818	max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
3819				       srp_use_imm_data,
3820				       target->max_it_iu_size);
3821
3822	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3823	INIT_WORK(&target->remove_work, srp_remove_work);
3824	spin_lock_init(&target->lock);
3825	ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
3826	if (ret)
3827		goto out;
 
3828
3829	ret = -ENOMEM;
3830	if (target->ch_count == 0) {
3831		target->ch_count =
3832			min(ch_count ?:
3833				max(4 * num_online_nodes(),
3834				    ibdev->num_comp_vectors),
3835				num_online_cpus());
3836	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3837
3838	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3839			     GFP_KERNEL);
3840	if (!target->ch)
3841		goto out;
3842
3843	for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
3844		ch = &target->ch[ch_idx];
3845		ch->target = target;
3846		ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
3847		spin_lock_init(&ch->lock);
3848		INIT_LIST_HEAD(&ch->free_tx);
3849		ret = srp_new_cm_id(ch);
3850		if (ret)
3851			goto err_disconnect;
3852
3853		ret = srp_create_ch_ib(ch);
3854		if (ret)
3855			goto err_disconnect;
3856
3857		ret = srp_connect_ch(ch, max_iu_len, multich);
3858		if (ret) {
3859			char dst[64];
3860
3861			if (target->using_rdma_cm)
3862				snprintf(dst, sizeof(dst), "%pIS",
3863					&target->rdma_cm.dst);
3864			else
3865				snprintf(dst, sizeof(dst), "%pI6",
3866					target->ib_cm.orig_dgid.raw);
3867			shost_printk(KERN_ERR, target->scsi_host,
3868				PFX "Connection %d/%d to %s failed\n",
3869				ch_idx,
3870				target->ch_count, dst);
3871			if (ch_idx == 0) {
3872				goto free_ch;
3873			} else {
3874				srp_free_ch_ib(target, ch);
3875				target->ch_count = ch - target->ch;
3876				goto connected;
3877			}
3878		}
3879		multich = true;
3880	}
3881
3882connected:
3883	target->scsi_host->nr_hw_queues = target->ch_count;
3884
3885	ret = srp_add_target(host, target);
3886	if (ret)
3887		goto err_disconnect;
3888
3889	if (target->state != SRP_TARGET_REMOVED) {
3890		if (target->using_rdma_cm) {
3891			shost_printk(KERN_DEBUG, target->scsi_host, PFX
3892				     "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
3893				     be64_to_cpu(target->id_ext),
3894				     be64_to_cpu(target->ioc_guid),
3895				     target->sgid.raw, &target->rdma_cm.dst);
3896		} else {
3897			shost_printk(KERN_DEBUG, target->scsi_host, PFX
3898				     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3899				     be64_to_cpu(target->id_ext),
3900				     be64_to_cpu(target->ioc_guid),
3901				     be16_to_cpu(target->ib_cm.pkey),
3902				     be64_to_cpu(target->ib_cm.service_id),
3903				     target->sgid.raw,
3904				     target->ib_cm.orig_dgid.raw);
3905		}
3906	}
3907
3908	ret = count;
3909
3910out:
3911	mutex_unlock(&host->add_target_mutex);
3912
3913put:
3914	scsi_host_put(target->scsi_host);
3915	if (ret < 0) {
3916		/*
3917		 * If a call to srp_remove_target() has not been scheduled,
3918		 * drop the network namespace reference now that was obtained
3919		 * earlier in this function.
3920		 */
3921		if (target->state != SRP_TARGET_REMOVED)
3922			kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
3923		scsi_host_put(target->scsi_host);
3924	}
3925
3926	return ret;
 
3927
3928err_disconnect:
3929	srp_disconnect_target(target);
3930
3931free_ch:
3932	for (i = 0; i < target->ch_count; i++) {
3933		ch = &target->ch[i];
3934		srp_free_ch_ib(target, ch);
3935	}
3936
3937	kfree(target->ch);
3938	goto out;
3939}
3940
3941static DEVICE_ATTR_WO(add_target);
3942
3943static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
3944			  char *buf)
3945{
3946	struct srp_host *host = container_of(dev, struct srp_host, dev);
3947
3948	return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
3949}
3950
3951static DEVICE_ATTR_RO(ibdev);
3952
3953static ssize_t port_show(struct device *dev, struct device_attribute *attr,
3954			 char *buf)
3955{
3956	struct srp_host *host = container_of(dev, struct srp_host, dev);
3957
3958	return sysfs_emit(buf, "%u\n", host->port);
3959}
3960
3961static DEVICE_ATTR_RO(port);
3962
3963static struct attribute *srp_class_attrs[] = {
3964	&dev_attr_add_target.attr,
3965	&dev_attr_ibdev.attr,
3966	&dev_attr_port.attr,
3967	NULL
3968};
3969
3970static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
3971{
3972	struct srp_host *host;
3973
3974	host = kzalloc(sizeof *host, GFP_KERNEL);
3975	if (!host)
3976		return NULL;
3977
3978	INIT_LIST_HEAD(&host->target_list);
3979	spin_lock_init(&host->target_lock);
3980	mutex_init(&host->add_target_mutex);
3981	host->srp_dev = device;
3982	host->port = port;
3983
3984	device_initialize(&host->dev);
3985	host->dev.class = &srp_class;
3986	host->dev.parent = device->dev->dev.parent;
3987	if (dev_set_name(&host->dev, "srp-%s-%u", dev_name(&device->dev->dev),
3988			 port))
3989		goto put_host;
3990	if (device_add(&host->dev))
3991		goto put_host;
 
 
 
 
 
3992
3993	return host;
3994
3995put_host:
3996	device_del(&host->dev);
3997	put_device(&host->dev);
 
 
 
3998	return NULL;
3999}
4000
4001static void srp_rename_dev(struct ib_device *device, void *client_data)
4002{
4003	struct srp_device *srp_dev = client_data;
4004	struct srp_host *host, *tmp_host;
 
 
 
4005
4006	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4007		char name[IB_DEVICE_NAME_MAX + 8];
 
4008
4009		snprintf(name, sizeof(name), "srp-%s-%u",
4010			 dev_name(&device->dev), host->port);
4011		device_rename(&host->dev, name);
4012	}
4013}
4014
4015static int srp_add_one(struct ib_device *device)
4016{
4017	struct srp_device *srp_dev;
4018	struct ib_device_attr *attr = &device->attrs;
4019	struct srp_host *host;
4020	int mr_page_shift;
4021	u32 p;
4022	u64 max_pages_per_mr;
4023	unsigned int flags = 0;
4024
4025	srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
4026	if (!srp_dev)
4027		return -ENOMEM;
4028
4029	/*
4030	 * Use the smallest page size supported by the HCA, down to a
4031	 * minimum of 4096 bytes. We're unlikely to build large sglists
4032	 * out of smaller entries.
4033	 */
4034	mr_page_shift		= max(12, ffs(attr->page_size_cap) - 1);
4035	srp_dev->mr_page_size	= 1 << mr_page_shift;
4036	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
4037	max_pages_per_mr	= attr->max_mr_size;
4038	do_div(max_pages_per_mr, srp_dev->mr_page_size);
4039	pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
4040		 attr->max_mr_size, srp_dev->mr_page_size,
4041		 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
4042	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
4043					  max_pages_per_mr);
4044
4045	srp_dev->has_fr = (attr->device_cap_flags &
4046			   IB_DEVICE_MEM_MGT_EXTENSIONS);
4047	if (!never_register && !srp_dev->has_fr)
4048		dev_warn(&device->dev, "FR is not supported\n");
4049	else if (!never_register &&
4050		 attr->max_mr_size >= 2 * srp_dev->mr_page_size)
4051		srp_dev->use_fast_reg = srp_dev->has_fr;
4052
4053	if (never_register || !register_always || !srp_dev->has_fr)
4054		flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
4055
4056	if (srp_dev->use_fast_reg) {
4057		srp_dev->max_pages_per_mr =
4058			min_t(u32, srp_dev->max_pages_per_mr,
4059			      attr->max_fast_reg_page_list_len);
4060	}
4061	srp_dev->mr_max_size	= srp_dev->mr_page_size *
4062				   srp_dev->max_pages_per_mr;
4063	pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
4064		 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
4065		 attr->max_fast_reg_page_list_len,
4066		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
4067
4068	INIT_LIST_HEAD(&srp_dev->dev_list);
4069
4070	srp_dev->dev = device;
4071	srp_dev->pd  = ib_alloc_pd(device, flags);
4072	if (IS_ERR(srp_dev->pd)) {
4073		int ret = PTR_ERR(srp_dev->pd);
 
 
 
 
 
 
 
4074
4075		kfree(srp_dev);
4076		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4077	}
4078
4079	if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4080		srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4081		WARN_ON_ONCE(srp_dev->global_rkey == 0);
 
 
 
 
 
 
4082	}
4083
4084	rdma_for_each_port (device, p) {
4085		host = srp_add_port(srp_dev, p);
4086		if (host)
4087			list_add_tail(&host->list, &srp_dev->dev_list);
4088	}
4089
4090	ib_set_client_data(device, &srp_client, srp_dev);
4091	return 0;
 
 
 
 
 
 
 
 
 
 
4092}
4093
4094static void srp_remove_one(struct ib_device *device, void *client_data)
4095{
4096	struct srp_device *srp_dev;
4097	struct srp_host *host, *tmp_host;
4098	struct srp_target_port *target;
 
4099
4100	srp_dev = client_data;
4101
4102	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
 
4103		/*
4104		 * Remove the add_target sysfs entry so that no new target ports
4105		 * can be created.
4106		 */
4107		device_del(&host->dev);
4108
4109		/*
4110		 * Remove all target ports.
 
4111		 */
4112		spin_lock(&host->target_lock);
4113		list_for_each_entry(target, &host->target_list, list)
4114			srp_queue_remove_work(target);
 
 
 
4115		spin_unlock(&host->target_lock);
4116
4117		/*
4118		 * srp_queue_remove_work() queues a call to
4119		 * srp_remove_target(). The latter function cancels
4120		 * target->tl_err_work so waiting for the remove works to
4121		 * finish is sufficient.
4122		 */
4123		flush_workqueue(srp_remove_wq);
4124
4125		put_device(&host->dev);
4126	}
4127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4128	ib_dealloc_pd(srp_dev->pd);
4129
4130	kfree(srp_dev);
4131}
4132
4133static struct srp_function_template ib_srp_transport_functions = {
4134	.has_rport_state	 = true,
4135	.reset_timer_if_blocked	 = true,
4136	.reconnect_delay	 = &srp_reconnect_delay,
4137	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
4138	.dev_loss_tmo		 = &srp_dev_loss_tmo,
4139	.reconnect		 = srp_rport_reconnect,
4140	.rport_delete		 = srp_rport_delete,
4141	.terminate_rport_io	 = srp_terminate_io,
4142};
4143
4144static int __init srp_init_module(void)
4145{
4146	int ret;
4147
4148	BUILD_BUG_ON(sizeof(struct srp_aer_req) != 36);
4149	BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4150	BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4151	BUILD_BUG_ON(sizeof(struct srp_indirect_buf) != 20);
4152	BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4153	BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4154	BUILD_BUG_ON(sizeof(struct srp_rsp) != 36);
4155
4156	if (srp_sg_tablesize) {
4157		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
4158		if (!cmd_sg_entries)
4159			cmd_sg_entries = srp_sg_tablesize;
4160	}
4161
4162	if (!cmd_sg_entries)
4163		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4164
4165	if (cmd_sg_entries > 255) {
4166		pr_warn("Clamping cmd_sg_entries to 255\n");
4167		cmd_sg_entries = 255;
4168	}
4169
4170	if (!indirect_sg_entries)
4171		indirect_sg_entries = cmd_sg_entries;
4172	else if (indirect_sg_entries < cmd_sg_entries) {
4173		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4174			cmd_sg_entries);
4175		indirect_sg_entries = cmd_sg_entries;
4176	}
4177
4178	if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4179		pr_warn("Clamping indirect_sg_entries to %u\n",
4180			SG_MAX_SEGMENTS);
4181		indirect_sg_entries = SG_MAX_SEGMENTS;
4182	}
4183
4184	srp_remove_wq = create_workqueue("srp_remove");
4185	if (!srp_remove_wq) {
4186		ret = -ENOMEM;
4187		goto out;
4188	}
4189
4190	ret = -ENOMEM;
4191	ib_srp_transport_template =
4192		srp_attach_transport(&ib_srp_transport_functions);
4193	if (!ib_srp_transport_template)
4194		goto destroy_wq;
4195
4196	ret = class_register(&srp_class);
4197	if (ret) {
4198		pr_err("couldn't register class infiniband_srp\n");
4199		goto release_tr;
 
4200	}
4201
4202	ib_sa_register_client(&srp_sa_client);
4203
4204	ret = ib_register_client(&srp_client);
4205	if (ret) {
4206		pr_err("couldn't register IB client\n");
4207		goto unreg_sa;
 
 
 
4208	}
4209
4210out:
4211	return ret;
4212
4213unreg_sa:
4214	ib_sa_unregister_client(&srp_sa_client);
4215	class_unregister(&srp_class);
4216
4217release_tr:
4218	srp_release_transport(ib_srp_transport_template);
4219
4220destroy_wq:
4221	destroy_workqueue(srp_remove_wq);
4222	goto out;
4223}
4224
4225static void __exit srp_cleanup_module(void)
4226{
4227	ib_unregister_client(&srp_client);
4228	ib_sa_unregister_client(&srp_sa_client);
4229	class_unregister(&srp_class);
4230	srp_release_transport(ib_srp_transport_template);
4231	destroy_workqueue(srp_remove_wq);
4232}
4233
4234module_init(srp_init_module);
4235module_exit(srp_cleanup_module);