Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
   3 * Copyright (c) 2005 Intel Corporation.  All rights reserved.
   4 * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
   5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
   6 * Copyright (c) 2014,2018 Intel Corporation.  All rights reserved.
   7 *
   8 * This software is available to you under a choice of one of two
   9 * licenses.  You may choose to be licensed under the terms of the GNU
  10 * General Public License (GPL) Version 2, available from the file
  11 * COPYING in the main directory of this source tree, or the
  12 * OpenIB.org BSD license below:
  13 *
  14 *     Redistribution and use in source and binary forms, with or
  15 *     without modification, are permitted provided that the following
  16 *     conditions are met:
  17 *
  18 *      - Redistributions of source code must retain the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer.
  21 *
  22 *      - Redistributions in binary form must reproduce the above
  23 *        copyright notice, this list of conditions and the following
  24 *        disclaimer in the documentation and/or other materials
  25 *        provided with the distribution.
  26 *
  27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34 * SOFTWARE.
  35 *
  36 */
  37
  38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  39
  40#include <linux/dma-mapping.h>
  41#include <linux/slab.h>
  42#include <linux/module.h>
  43#include <linux/security.h>
  44#include <linux/xarray.h>
  45#include <rdma/ib_cache.h>
  46
  47#include "mad_priv.h"
  48#include "core_priv.h"
  49#include "mad_rmpp.h"
  50#include "smi.h"
  51#include "opa_smi.h"
  52#include "agent.h"
  53
  54#define CREATE_TRACE_POINTS
  55#include <trace/events/ib_mad.h>
  56
  57#ifdef CONFIG_TRACEPOINTS
  58static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
  59			  struct ib_mad_qp_info *qp_info,
  60			  struct trace_event_raw_ib_mad_send_template *entry)
  61{
  62	struct ib_ud_wr *wr = &mad_send_wr->send_wr;
  63	struct rdma_ah_attr attr = {};
  64
  65	rdma_query_ah(wr->ah, &attr);
  66
  67	/* These are common */
  68	entry->sl = attr.sl;
  69	entry->rqpn = wr->remote_qpn;
  70	entry->rqkey = wr->remote_qkey;
  71	entry->dlid = rdma_ah_get_dlid(&attr);
  72}
  73#endif
  74
  75static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
  76static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
  77
  78module_param_named(send_queue_size, mad_sendq_size, int, 0444);
  79MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
  80module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
  81MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
  82
  83static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
  84static u32 ib_mad_client_next;
  85static struct list_head ib_mad_port_list;
 
  86
  87/* Port list lock */
  88static DEFINE_SPINLOCK(ib_mad_port_list_lock);
  89
  90/* Forward declarations */
  91static int method_in_use(struct ib_mad_mgmt_method_table **method,
  92			 struct ib_mad_reg_req *mad_reg_req);
  93static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
  94static struct ib_mad_agent_private *find_mad_agent(
  95					struct ib_mad_port_private *port_priv,
  96					const struct ib_mad_hdr *mad);
  97static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
  98				    struct ib_mad_private *mad);
  99static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
 100static void timeout_sends(struct work_struct *work);
 101static void local_completions(struct work_struct *work);
 102static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
 103			      struct ib_mad_agent_private *agent_priv,
 104			      u8 mgmt_class);
 105static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
 106			   struct ib_mad_agent_private *agent_priv);
 107static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
 108			      struct ib_wc *wc);
 109static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
 110
 111/*
 112 * Returns a ib_mad_port_private structure or NULL for a device/port
 113 * Assumes ib_mad_port_list_lock is being held
 114 */
 115static inline struct ib_mad_port_private *
 116__ib_get_mad_port(struct ib_device *device, u32 port_num)
 117{
 118	struct ib_mad_port_private *entry;
 119
 120	list_for_each_entry(entry, &ib_mad_port_list, port_list) {
 121		if (entry->device == device && entry->port_num == port_num)
 122			return entry;
 123	}
 124	return NULL;
 125}
 126
 127/*
 128 * Wrapper function to return a ib_mad_port_private structure or NULL
 129 * for a device/port
 130 */
 131static inline struct ib_mad_port_private *
 132ib_get_mad_port(struct ib_device *device, u32 port_num)
 133{
 134	struct ib_mad_port_private *entry;
 135	unsigned long flags;
 136
 137	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 138	entry = __ib_get_mad_port(device, port_num);
 139	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
 140
 141	return entry;
 142}
 143
 144static inline u8 convert_mgmt_class(u8 mgmt_class)
 145{
 146	/* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
 147	return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
 148		0 : mgmt_class;
 149}
 150
 151static int get_spl_qp_index(enum ib_qp_type qp_type)
 152{
 153	switch (qp_type) {
 
 154	case IB_QPT_SMI:
 155		return 0;
 156	case IB_QPT_GSI:
 157		return 1;
 158	default:
 159		return -1;
 160	}
 161}
 162
 163static int vendor_class_index(u8 mgmt_class)
 164{
 165	return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
 166}
 167
 168static int is_vendor_class(u8 mgmt_class)
 169{
 170	if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
 171	    (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
 172		return 0;
 173	return 1;
 174}
 175
 176static int is_vendor_oui(char *oui)
 177{
 178	if (oui[0] || oui[1] || oui[2])
 179		return 1;
 180	return 0;
 181}
 182
 183static int is_vendor_method_in_use(
 184		struct ib_mad_mgmt_vendor_class *vendor_class,
 185		struct ib_mad_reg_req *mad_reg_req)
 186{
 187	struct ib_mad_mgmt_method_table *method;
 188	int i;
 189
 190	for (i = 0; i < MAX_MGMT_OUI; i++) {
 191		if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
 192			method = vendor_class->method_table[i];
 193			if (method) {
 194				if (method_in_use(&method, mad_reg_req))
 195					return 1;
 196				else
 197					break;
 198			}
 199		}
 200	}
 201	return 0;
 202}
 203
 204int ib_response_mad(const struct ib_mad_hdr *hdr)
 205{
 206	return ((hdr->method & IB_MGMT_METHOD_RESP) ||
 207		(hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
 208		((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
 209		 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
 210}
 211EXPORT_SYMBOL(ib_response_mad);
 212
 213/*
 214 * ib_register_mad_agent - Register to send/receive MADs
 215 *
 216 * Context: Process context.
 217 */
 218struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
 219					   u32 port_num,
 220					   enum ib_qp_type qp_type,
 221					   struct ib_mad_reg_req *mad_reg_req,
 222					   u8 rmpp_version,
 223					   ib_mad_send_handler send_handler,
 224					   ib_mad_recv_handler recv_handler,
 225					   void *context,
 226					   u32 registration_flags)
 227{
 228	struct ib_mad_port_private *port_priv;
 229	struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
 230	struct ib_mad_agent_private *mad_agent_priv;
 231	struct ib_mad_reg_req *reg_req = NULL;
 232	struct ib_mad_mgmt_class_table *class;
 233	struct ib_mad_mgmt_vendor_class_table *vendor;
 234	struct ib_mad_mgmt_vendor_class *vendor_class;
 235	struct ib_mad_mgmt_method_table *method;
 236	int ret2, qpn;
 
 237	u8 mgmt_class, vclass;
 238
 239	if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) ||
 240	    (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num)))
 241		return ERR_PTR(-EPROTONOSUPPORT);
 242
 243	/* Validate parameters */
 244	qpn = get_spl_qp_index(qp_type);
 245	if (qpn == -1) {
 246		dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n",
 247				    __func__, qp_type);
 
 248		goto error1;
 249	}
 250
 251	if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
 252		dev_dbg_ratelimited(&device->dev,
 253				    "%s: invalid RMPP Version %u\n",
 254				    __func__, rmpp_version);
 255		goto error1;
 256	}
 257
 258	/* Validate MAD registration request if supplied */
 259	if (mad_reg_req) {
 260		if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
 261			dev_dbg_ratelimited(&device->dev,
 262					    "%s: invalid Class Version %u\n",
 263					    __func__,
 264					    mad_reg_req->mgmt_class_version);
 265			goto error1;
 266		}
 267		if (!recv_handler) {
 268			dev_dbg_ratelimited(&device->dev,
 269					    "%s: no recv_handler\n", __func__);
 270			goto error1;
 271		}
 272		if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
 273			/*
 274			 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
 275			 * one in this range currently allowed
 276			 */
 277			if (mad_reg_req->mgmt_class !=
 278			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
 279				dev_dbg_ratelimited(&device->dev,
 280					"%s: Invalid Mgmt Class 0x%x\n",
 281					__func__, mad_reg_req->mgmt_class);
 282				goto error1;
 283			}
 284		} else if (mad_reg_req->mgmt_class == 0) {
 285			/*
 286			 * Class 0 is reserved in IBA and is used for
 287			 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
 288			 */
 289			dev_dbg_ratelimited(&device->dev,
 290					    "%s: Invalid Mgmt Class 0\n",
 291					    __func__);
 292			goto error1;
 293		} else if (is_vendor_class(mad_reg_req->mgmt_class)) {
 294			/*
 295			 * If class is in "new" vendor range,
 296			 * ensure supplied OUI is not zero
 297			 */
 298			if (!is_vendor_oui(mad_reg_req->oui)) {
 299				dev_dbg_ratelimited(&device->dev,
 300					"%s: No OUI specified for class 0x%x\n",
 301					__func__,
 302					mad_reg_req->mgmt_class);
 303				goto error1;
 304			}
 305		}
 306		/* Make sure class supplied is consistent with RMPP */
 307		if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
 308			if (rmpp_version) {
 309				dev_dbg_ratelimited(&device->dev,
 310					"%s: RMPP version for non-RMPP class 0x%x\n",
 311					__func__, mad_reg_req->mgmt_class);
 312				goto error1;
 313			}
 314		}
 315
 316		/* Make sure class supplied is consistent with QP type */
 317		if (qp_type == IB_QPT_SMI) {
 318			if ((mad_reg_req->mgmt_class !=
 319					IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
 320			    (mad_reg_req->mgmt_class !=
 321					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
 322				dev_dbg_ratelimited(&device->dev,
 323					"%s: Invalid SM QP type: class 0x%x\n",
 324					__func__, mad_reg_req->mgmt_class);
 325				goto error1;
 326			}
 327		} else {
 328			if ((mad_reg_req->mgmt_class ==
 329					IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
 330			    (mad_reg_req->mgmt_class ==
 331					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
 332				dev_dbg_ratelimited(&device->dev,
 333					"%s: Invalid GS QP type: class 0x%x\n",
 334					__func__, mad_reg_req->mgmt_class);
 335				goto error1;
 336			}
 337		}
 338	} else {
 339		/* No registration request supplied */
 340		if (!send_handler)
 341			goto error1;
 342		if (registration_flags & IB_MAD_USER_RMPP)
 343			goto error1;
 344	}
 345
 346	/* Validate device and port */
 347	port_priv = ib_get_mad_port(device, port_num);
 348	if (!port_priv) {
 349		dev_dbg_ratelimited(&device->dev, "%s: Invalid port %u\n",
 350				    __func__, port_num);
 351		ret = ERR_PTR(-ENODEV);
 352		goto error1;
 353	}
 354
 355	/* Verify the QP requested is supported. For example, Ethernet devices
 356	 * will not have QP0.
 357	 */
 358	if (!port_priv->qp_info[qpn].qp) {
 359		dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
 360				    __func__, qpn);
 361		ret = ERR_PTR(-EPROTONOSUPPORT);
 362		goto error1;
 363	}
 364
 365	/* Allocate structures */
 366	mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
 367	if (!mad_agent_priv) {
 368		ret = ERR_PTR(-ENOMEM);
 369		goto error1;
 370	}
 371
 372	if (mad_reg_req) {
 373		reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
 374		if (!reg_req) {
 375			ret = ERR_PTR(-ENOMEM);
 376			goto error3;
 377		}
 378	}
 379
 380	/* Now, fill in the various structures */
 381	mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
 382	mad_agent_priv->reg_req = reg_req;
 383	mad_agent_priv->agent.rmpp_version = rmpp_version;
 384	mad_agent_priv->agent.device = device;
 385	mad_agent_priv->agent.recv_handler = recv_handler;
 386	mad_agent_priv->agent.send_handler = send_handler;
 387	mad_agent_priv->agent.context = context;
 388	mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
 389	mad_agent_priv->agent.port_num = port_num;
 390	mad_agent_priv->agent.flags = registration_flags;
 391	spin_lock_init(&mad_agent_priv->lock);
 392	INIT_LIST_HEAD(&mad_agent_priv->send_list);
 393	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
 394	INIT_LIST_HEAD(&mad_agent_priv->done_list);
 395	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
 396	INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
 397	INIT_LIST_HEAD(&mad_agent_priv->local_list);
 398	INIT_WORK(&mad_agent_priv->local_work, local_completions);
 399	refcount_set(&mad_agent_priv->refcount, 1);
 400	init_completion(&mad_agent_priv->comp);
 401
 402	ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
 403	if (ret2) {
 404		ret = ERR_PTR(ret2);
 405		goto error4;
 406	}
 407
 408	/*
 409	 * The mlx4 driver uses the top byte to distinguish which virtual
 410	 * function generated the MAD, so we must avoid using it.
 411	 */
 412	ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid,
 413			mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1),
 414			&ib_mad_client_next, GFP_KERNEL);
 415	if (ret2 < 0) {
 416		ret = ERR_PTR(ret2);
 417		goto error5;
 418	}
 419
 420	/*
 421	 * Make sure MAD registration (if supplied)
 422	 * is non overlapping with any existing ones
 423	 */
 424	spin_lock_irq(&port_priv->reg_lock);
 425	if (mad_reg_req) {
 426		mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
 427		if (!is_vendor_class(mgmt_class)) {
 428			class = port_priv->version[mad_reg_req->
 429						   mgmt_class_version].class;
 430			if (class) {
 431				method = class->method_table[mgmt_class];
 432				if (method) {
 433					if (method_in_use(&method,
 434							   mad_reg_req))
 435						goto error6;
 436				}
 437			}
 438			ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
 439						  mgmt_class);
 440		} else {
 441			/* "New" vendor class range */
 442			vendor = port_priv->version[mad_reg_req->
 443						    mgmt_class_version].vendor;
 444			if (vendor) {
 445				vclass = vendor_class_index(mgmt_class);
 446				vendor_class = vendor->vendor_class[vclass];
 447				if (vendor_class) {
 448					if (is_vendor_method_in_use(
 449							vendor_class,
 450							mad_reg_req))
 451						goto error6;
 452				}
 453			}
 454			ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
 455		}
 456		if (ret2) {
 457			ret = ERR_PTR(ret2);
 458			goto error6;
 459		}
 460	}
 461	spin_unlock_irq(&port_priv->reg_lock);
 462
 463	trace_ib_mad_create_agent(mad_agent_priv);
 
 
 
 464	return &mad_agent_priv->agent;
 465error6:
 466	spin_unlock_irq(&port_priv->reg_lock);
 467	xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
 468error5:
 469	ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
 470error4:
 
 471	kfree(reg_req);
 472error3:
 473	kfree(mad_agent_priv);
 474error1:
 475	return ret;
 476}
 477EXPORT_SYMBOL(ib_register_mad_agent);
 478
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 479static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
 480{
 481	if (refcount_dec_and_test(&mad_agent_priv->refcount))
 482		complete(&mad_agent_priv->comp);
 483}
 484
 
 
 
 
 
 
 485static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
 486{
 487	struct ib_mad_port_private *port_priv;
 
 488
 489	/* Note that we could still be handling received MADs */
 490	trace_ib_mad_unregister_agent(mad_agent_priv);
 491
 492	/*
 493	 * Canceling all sends results in dropping received response
 494	 * MADs, preventing us from queuing additional work
 495	 */
 496	cancel_mads(mad_agent_priv);
 497	port_priv = mad_agent_priv->qp_info->port_priv;
 498	cancel_delayed_work(&mad_agent_priv->timed_work);
 499
 500	spin_lock_irq(&port_priv->reg_lock);
 501	remove_mad_reg_req(mad_agent_priv);
 502	spin_unlock_irq(&port_priv->reg_lock);
 503	xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
 504
 505	flush_workqueue(port_priv->wq);
 
 506
 507	deref_mad_agent(mad_agent_priv);
 508	wait_for_completion(&mad_agent_priv->comp);
 509	ib_cancel_rmpp_recvs(mad_agent_priv);
 510
 511	ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
 512
 513	kfree(mad_agent_priv->reg_req);
 514	kfree_rcu(mad_agent_priv, rcu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 515}
 516
 517/*
 518 * ib_unregister_mad_agent - Unregisters a client from using MAD services
 519 *
 520 * Context: Process context.
 521 */
 522void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
 523{
 524	struct ib_mad_agent_private *mad_agent_priv;
 
 525
 526	mad_agent_priv = container_of(mad_agent,
 527				      struct ib_mad_agent_private,
 528				      agent);
 529	unregister_mad_agent(mad_agent_priv);
 
 
 
 
 
 
 
 
 
 530}
 531EXPORT_SYMBOL(ib_unregister_mad_agent);
 532
 533static void dequeue_mad(struct ib_mad_list_head *mad_list)
 534{
 535	struct ib_mad_queue *mad_queue;
 536	unsigned long flags;
 537
 
 538	mad_queue = mad_list->mad_queue;
 539	spin_lock_irqsave(&mad_queue->lock, flags);
 540	list_del(&mad_list->list);
 541	mad_queue->count--;
 542	spin_unlock_irqrestore(&mad_queue->lock, flags);
 543}
 544
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 545static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
 546		u16 pkey_index, u32 port_num, struct ib_wc *wc)
 547{
 548	memset(wc, 0, sizeof *wc);
 549	wc->wr_cqe = cqe;
 550	wc->status = IB_WC_SUCCESS;
 551	wc->opcode = IB_WC_RECV;
 552	wc->pkey_index = pkey_index;
 553	wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
 554	wc->src_qp = IB_QP0;
 555	wc->qp = qp;
 556	wc->slid = slid;
 557	wc->sl = 0;
 558	wc->dlid_path_bits = 0;
 559	wc->port_num = port_num;
 560}
 561
 562static size_t mad_priv_size(const struct ib_mad_private *mp)
 563{
 564	return sizeof(struct ib_mad_private) + mp->mad_size;
 565}
 566
 567static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
 568{
 569	size_t size = sizeof(struct ib_mad_private) + mad_size;
 570	struct ib_mad_private *ret = kzalloc(size, flags);
 571
 572	if (ret)
 573		ret->mad_size = mad_size;
 574
 575	return ret;
 576}
 577
 578static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
 579{
 580	return rdma_max_mad_size(port_priv->device, port_priv->port_num);
 581}
 582
 583static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
 584{
 585	return sizeof(struct ib_grh) + mp->mad_size;
 586}
 587
 588/*
 589 * Return 0 if SMP is to be sent
 590 * Return 1 if SMP was consumed locally (whether or not solicited)
 591 * Return < 0 if error
 592 */
 593static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
 594				  struct ib_mad_send_wr_private *mad_send_wr)
 595{
 596	int ret = 0;
 597	struct ib_smp *smp = mad_send_wr->send_buf.mad;
 598	struct opa_smp *opa_smp = (struct opa_smp *)smp;
 599	unsigned long flags;
 600	struct ib_mad_local_private *local;
 601	struct ib_mad_private *mad_priv;
 602	struct ib_mad_port_private *port_priv;
 603	struct ib_mad_agent_private *recv_mad_agent = NULL;
 604	struct ib_device *device = mad_agent_priv->agent.device;
 605	u32 port_num;
 606	struct ib_wc mad_wc;
 607	struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
 608	size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
 609	u16 out_mad_pkey_index = 0;
 610	u16 drslid;
 611	bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
 612				    mad_agent_priv->qp_info->port_priv->port_num);
 613
 614	if (rdma_cap_ib_switch(device) &&
 615	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
 616		port_num = send_wr->port_num;
 617	else
 618		port_num = mad_agent_priv->agent.port_num;
 619
 620	/*
 621	 * Directed route handling starts if the initial LID routed part of
 622	 * a request or the ending LID routed part of a response is empty.
 623	 * If we are at the start of the LID routed part, don't update the
 624	 * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
 625	 */
 626	if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
 627		u32 opa_drslid;
 628
 629		trace_ib_mad_handle_out_opa_smi(opa_smp);
 630
 631		if ((opa_get_smp_direction(opa_smp)
 632		     ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
 633		     OPA_LID_PERMISSIVE &&
 634		     opa_smi_handle_dr_smp_send(opa_smp,
 635						rdma_cap_ib_switch(device),
 636						port_num) == IB_SMI_DISCARD) {
 637			ret = -EINVAL;
 638			dev_err(&device->dev, "OPA Invalid directed route\n");
 639			goto out;
 640		}
 641		opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
 642		if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
 643		    opa_drslid & 0xffff0000) {
 644			ret = -EINVAL;
 645			dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
 646			       opa_drslid);
 647			goto out;
 648		}
 649		drslid = (u16)(opa_drslid & 0x0000ffff);
 650
 651		/* Check to post send on QP or process locally */
 652		if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
 653		    opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
 654			goto out;
 655	} else {
 656		trace_ib_mad_handle_out_ib_smi(smp);
 657
 658		if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
 659		     IB_LID_PERMISSIVE &&
 660		     smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
 661		     IB_SMI_DISCARD) {
 662			ret = -EINVAL;
 663			dev_err(&device->dev, "Invalid directed route\n");
 664			goto out;
 665		}
 666		drslid = be16_to_cpu(smp->dr_slid);
 667
 668		/* Check to post send on QP or process locally */
 669		if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
 670		    smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
 671			goto out;
 672	}
 673
 674	local = kmalloc(sizeof *local, GFP_ATOMIC);
 675	if (!local) {
 676		ret = -ENOMEM;
 
 677		goto out;
 678	}
 679	local->mad_priv = NULL;
 680	local->recv_mad_agent = NULL;
 681	mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
 682	if (!mad_priv) {
 683		ret = -ENOMEM;
 
 684		kfree(local);
 685		goto out;
 686	}
 687
 688	build_smp_wc(mad_agent_priv->agent.qp,
 689		     send_wr->wr.wr_cqe, drslid,
 690		     send_wr->pkey_index,
 691		     send_wr->port_num, &mad_wc);
 692
 693	if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
 694		mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
 695					+ mad_send_wr->send_buf.data_len
 696					+ sizeof(struct ib_grh);
 697	}
 698
 699	/* No GRH for DR SMP */
 700	ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
 701				      (const struct ib_mad *)smp,
 702				      (struct ib_mad *)mad_priv->mad, &mad_size,
 703				      &out_mad_pkey_index);
 704	switch (ret) {
 
 705	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
 706		if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
 707		    mad_agent_priv->agent.recv_handler) {
 708			local->mad_priv = mad_priv;
 709			local->recv_mad_agent = mad_agent_priv;
 710			/*
 711			 * Reference MAD agent until receive
 712			 * side of local completion handled
 713			 */
 714			refcount_inc(&mad_agent_priv->refcount);
 715		} else
 716			kfree(mad_priv);
 717		break;
 718	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
 719		kfree(mad_priv);
 720		break;
 721	case IB_MAD_RESULT_SUCCESS:
 722		/* Treat like an incoming receive MAD */
 723		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
 724					    mad_agent_priv->agent.port_num);
 725		if (port_priv) {
 726			memcpy(mad_priv->mad, smp, mad_priv->mad_size);
 727			recv_mad_agent = find_mad_agent(port_priv,
 728						        (const struct ib_mad_hdr *)mad_priv->mad);
 729		}
 730		if (!port_priv || !recv_mad_agent) {
 731			/*
 732			 * No receiving agent so drop packet and
 733			 * generate send completion.
 734			 */
 735			kfree(mad_priv);
 736			break;
 737		}
 738		local->mad_priv = mad_priv;
 739		local->recv_mad_agent = recv_mad_agent;
 740		break;
 741	default:
 742		kfree(mad_priv);
 743		kfree(local);
 744		ret = -EINVAL;
 745		goto out;
 746	}
 747
 748	local->mad_send_wr = mad_send_wr;
 749	if (opa) {
 750		local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
 751		local->return_wc_byte_len = mad_size;
 752	}
 753	/* Reference MAD agent until send side of local completion handled */
 754	refcount_inc(&mad_agent_priv->refcount);
 755	/* Queue local completion to local list */
 756	spin_lock_irqsave(&mad_agent_priv->lock, flags);
 757	list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
 758	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
 759	queue_work(mad_agent_priv->qp_info->port_priv->wq,
 760		   &mad_agent_priv->local_work);
 761	ret = 1;
 762out:
 763	return ret;
 764}
 765
 766static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
 767{
 768	int seg_size, pad;
 769
 770	seg_size = mad_size - hdr_len;
 771	if (data_len && seg_size) {
 772		pad = seg_size - data_len % seg_size;
 773		return pad == seg_size ? 0 : pad;
 774	} else
 775		return seg_size;
 776}
 777
 778static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
 779{
 780	struct ib_rmpp_segment *s, *t;
 781
 782	list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
 783		list_del(&s->list);
 784		kfree(s);
 785	}
 786}
 787
 788static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
 789				size_t mad_size, gfp_t gfp_mask)
 790{
 791	struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
 792	struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
 793	struct ib_rmpp_segment *seg = NULL;
 794	int left, seg_size, pad;
 795
 796	send_buf->seg_size = mad_size - send_buf->hdr_len;
 797	send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
 798	seg_size = send_buf->seg_size;
 799	pad = send_wr->pad;
 800
 801	/* Allocate data segments. */
 802	for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
 803		seg = kmalloc(sizeof(*seg) + seg_size, gfp_mask);
 804		if (!seg) {
 
 
 
 805			free_send_rmpp_list(send_wr);
 806			return -ENOMEM;
 807		}
 808		seg->num = ++send_buf->seg_count;
 809		list_add_tail(&seg->list, &send_wr->rmpp_list);
 810	}
 811
 812	/* Zero any padding */
 813	if (pad)
 814		memset(seg->data + seg_size - pad, 0, pad);
 815
 816	rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
 817					  agent.rmpp_version;
 818	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
 819	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
 820
 821	send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
 822					struct ib_rmpp_segment, list);
 823	send_wr->last_ack_seg = send_wr->cur_seg;
 824	return 0;
 825}
 826
 827int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
 828{
 829	return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
 830}
 831EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
 832
 833struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
 834					   u32 remote_qpn, u16 pkey_index,
 835					   int rmpp_active, int hdr_len,
 836					   int data_len, gfp_t gfp_mask,
 837					   u8 base_version)
 
 838{
 839	struct ib_mad_agent_private *mad_agent_priv;
 840	struct ib_mad_send_wr_private *mad_send_wr;
 841	int pad, message_size, ret, size;
 842	void *buf;
 843	size_t mad_size;
 844	bool opa;
 845
 846	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
 847				      agent);
 848
 849	opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
 850
 851	if (opa && base_version == OPA_MGMT_BASE_VERSION)
 852		mad_size = sizeof(struct opa_mad);
 853	else
 854		mad_size = sizeof(struct ib_mad);
 855
 856	pad = get_pad_size(hdr_len, data_len, mad_size);
 857	message_size = hdr_len + data_len + pad;
 858
 859	if (ib_mad_kernel_rmpp_agent(mad_agent)) {
 860		if (!rmpp_active && message_size > mad_size)
 861			return ERR_PTR(-EINVAL);
 862	} else
 863		if (rmpp_active || message_size > mad_size)
 864			return ERR_PTR(-EINVAL);
 865
 866	size = rmpp_active ? hdr_len : mad_size;
 867	buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
 868	if (!buf)
 869		return ERR_PTR(-ENOMEM);
 870
 871	mad_send_wr = buf + size;
 872	INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
 873	mad_send_wr->send_buf.mad = buf;
 874	mad_send_wr->send_buf.hdr_len = hdr_len;
 875	mad_send_wr->send_buf.data_len = data_len;
 876	mad_send_wr->pad = pad;
 877
 878	mad_send_wr->mad_agent_priv = mad_agent_priv;
 879	mad_send_wr->sg_list[0].length = hdr_len;
 880	mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
 881
 882	/* OPA MADs don't have to be the full 2048 bytes */
 883	if (opa && base_version == OPA_MGMT_BASE_VERSION &&
 884	    data_len < mad_size - hdr_len)
 885		mad_send_wr->sg_list[1].length = data_len;
 886	else
 887		mad_send_wr->sg_list[1].length = mad_size - hdr_len;
 888
 889	mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
 890
 891	mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
 892
 893	mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
 894	mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
 895	mad_send_wr->send_wr.wr.num_sge = 2;
 896	mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
 897	mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
 898	mad_send_wr->send_wr.remote_qpn = remote_qpn;
 899	mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
 900	mad_send_wr->send_wr.pkey_index = pkey_index;
 901
 902	if (rmpp_active) {
 903		ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
 904		if (ret) {
 905			kfree(buf);
 906			return ERR_PTR(ret);
 907		}
 908	}
 909
 910	mad_send_wr->send_buf.mad_agent = mad_agent;
 911	refcount_inc(&mad_agent_priv->refcount);
 912	return &mad_send_wr->send_buf;
 913}
 914EXPORT_SYMBOL(ib_create_send_mad);
 915
 916int ib_get_mad_data_offset(u8 mgmt_class)
 917{
 918	if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
 919		return IB_MGMT_SA_HDR;
 920	else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
 921		 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
 922		 (mgmt_class == IB_MGMT_CLASS_BIS))
 923		return IB_MGMT_DEVICE_HDR;
 924	else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
 925		 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
 926		return IB_MGMT_VENDOR_HDR;
 927	else
 928		return IB_MGMT_MAD_HDR;
 929}
 930EXPORT_SYMBOL(ib_get_mad_data_offset);
 931
 932int ib_is_mad_class_rmpp(u8 mgmt_class)
 933{
 934	if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
 935	    (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
 936	    (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
 937	    (mgmt_class == IB_MGMT_CLASS_BIS) ||
 938	    ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
 939	     (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
 940		return 1;
 941	return 0;
 942}
 943EXPORT_SYMBOL(ib_is_mad_class_rmpp);
 944
 945void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
 946{
 947	struct ib_mad_send_wr_private *mad_send_wr;
 948	struct list_head *list;
 949
 950	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
 951				   send_buf);
 952	list = &mad_send_wr->cur_seg->list;
 953
 954	if (mad_send_wr->cur_seg->num < seg_num) {
 955		list_for_each_entry(mad_send_wr->cur_seg, list, list)
 956			if (mad_send_wr->cur_seg->num == seg_num)
 957				break;
 958	} else if (mad_send_wr->cur_seg->num > seg_num) {
 959		list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
 960			if (mad_send_wr->cur_seg->num == seg_num)
 961				break;
 962	}
 963	return mad_send_wr->cur_seg->data;
 964}
 965EXPORT_SYMBOL(ib_get_rmpp_segment);
 966
 967static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
 968{
 969	if (mad_send_wr->send_buf.seg_count)
 970		return ib_get_rmpp_segment(&mad_send_wr->send_buf,
 971					   mad_send_wr->seg_num);
 972	else
 973		return mad_send_wr->send_buf.mad +
 974		       mad_send_wr->send_buf.hdr_len;
 975}
 976
 977void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
 978{
 979	struct ib_mad_agent_private *mad_agent_priv;
 980	struct ib_mad_send_wr_private *mad_send_wr;
 981
 982	mad_agent_priv = container_of(send_buf->mad_agent,
 983				      struct ib_mad_agent_private, agent);
 984	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
 985				   send_buf);
 986
 987	free_send_rmpp_list(mad_send_wr);
 988	kfree(send_buf->mad);
 989	deref_mad_agent(mad_agent_priv);
 990}
 991EXPORT_SYMBOL(ib_free_send_mad);
 992
 993int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
 994{
 995	struct ib_mad_qp_info *qp_info;
 996	struct list_head *list;
 
 997	struct ib_mad_agent *mad_agent;
 998	struct ib_sge *sge;
 999	unsigned long flags;
1000	int ret;
1001
1002	/* Set WR ID to find mad_send_wr upon completion */
1003	qp_info = mad_send_wr->mad_agent_priv->qp_info;
1004	mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1005	mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1006	mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1007
1008	mad_agent = mad_send_wr->send_buf.mad_agent;
1009	sge = mad_send_wr->sg_list;
1010	sge[0].addr = ib_dma_map_single(mad_agent->device,
1011					mad_send_wr->send_buf.mad,
1012					sge[0].length,
1013					DMA_TO_DEVICE);
1014	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1015		return -ENOMEM;
1016
1017	mad_send_wr->header_mapping = sge[0].addr;
1018
1019	sge[1].addr = ib_dma_map_single(mad_agent->device,
1020					ib_get_payload(mad_send_wr),
1021					sge[1].length,
1022					DMA_TO_DEVICE);
1023	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1024		ib_dma_unmap_single(mad_agent->device,
1025				    mad_send_wr->header_mapping,
1026				    sge[0].length, DMA_TO_DEVICE);
1027		return -ENOMEM;
1028	}
1029	mad_send_wr->payload_mapping = sge[1].addr;
1030
1031	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1032	if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1033		trace_ib_mad_ib_send_mad(mad_send_wr, qp_info);
1034		ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1035				   NULL);
1036		list = &qp_info->send_queue.list;
1037	} else {
1038		ret = 0;
1039		list = &qp_info->overflow_list;
1040	}
1041
1042	if (!ret) {
1043		qp_info->send_queue.count++;
1044		list_add_tail(&mad_send_wr->mad_list.list, list);
1045	}
1046	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1047	if (ret) {
1048		ib_dma_unmap_single(mad_agent->device,
1049				    mad_send_wr->header_mapping,
1050				    sge[0].length, DMA_TO_DEVICE);
1051		ib_dma_unmap_single(mad_agent->device,
1052				    mad_send_wr->payload_mapping,
1053				    sge[1].length, DMA_TO_DEVICE);
1054	}
1055	return ret;
1056}
1057
1058/*
1059 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1060 *  with the registered client
1061 */
1062int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1063		     struct ib_mad_send_buf **bad_send_buf)
1064{
1065	struct ib_mad_agent_private *mad_agent_priv;
1066	struct ib_mad_send_buf *next_send_buf;
1067	struct ib_mad_send_wr_private *mad_send_wr;
1068	unsigned long flags;
1069	int ret = -EINVAL;
1070
1071	/* Walk list of send WRs and post each on send list */
1072	for (; send_buf; send_buf = next_send_buf) {
 
1073		mad_send_wr = container_of(send_buf,
1074					   struct ib_mad_send_wr_private,
1075					   send_buf);
1076		mad_agent_priv = mad_send_wr->mad_agent_priv;
1077
1078		ret = ib_mad_enforce_security(mad_agent_priv,
1079					      mad_send_wr->send_wr.pkey_index);
1080		if (ret)
1081			goto error;
1082
1083		if (!send_buf->mad_agent->send_handler ||
1084		    (send_buf->timeout_ms &&
1085		     !send_buf->mad_agent->recv_handler)) {
1086			ret = -EINVAL;
1087			goto error;
1088		}
1089
1090		if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1091			if (mad_agent_priv->agent.rmpp_version) {
1092				ret = -EINVAL;
1093				goto error;
1094			}
1095		}
1096
1097		/*
1098		 * Save pointer to next work request to post in case the
1099		 * current one completes, and the user modifies the work
1100		 * request associated with the completion
1101		 */
1102		next_send_buf = send_buf->next;
1103		mad_send_wr->send_wr.ah = send_buf->ah;
1104
1105		if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1106		    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1107			ret = handle_outgoing_dr_smp(mad_agent_priv,
1108						     mad_send_wr);
1109			if (ret < 0)		/* error */
1110				goto error;
1111			else if (ret == 1)	/* locally consumed */
1112				continue;
1113		}
1114
1115		mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1116		/* Timeout will be updated after send completes */
1117		mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1118		mad_send_wr->max_retries = send_buf->retries;
1119		mad_send_wr->retries_left = send_buf->retries;
1120		send_buf->retries = 0;
1121		/* Reference for work request to QP + response */
1122		mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1123		mad_send_wr->status = IB_WC_SUCCESS;
1124
1125		/* Reference MAD agent until send completes */
1126		refcount_inc(&mad_agent_priv->refcount);
1127		spin_lock_irqsave(&mad_agent_priv->lock, flags);
1128		list_add_tail(&mad_send_wr->agent_list,
1129			      &mad_agent_priv->send_list);
1130		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1131
1132		if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1133			ret = ib_send_rmpp_mad(mad_send_wr);
1134			if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1135				ret = ib_send_mad(mad_send_wr);
1136		} else
1137			ret = ib_send_mad(mad_send_wr);
1138		if (ret < 0) {
1139			/* Fail send request */
1140			spin_lock_irqsave(&mad_agent_priv->lock, flags);
1141			list_del(&mad_send_wr->agent_list);
1142			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1143			deref_mad_agent(mad_agent_priv);
1144			goto error;
1145		}
1146	}
1147	return 0;
1148error:
1149	if (bad_send_buf)
1150		*bad_send_buf = send_buf;
1151	return ret;
1152}
1153EXPORT_SYMBOL(ib_post_send_mad);
1154
1155/*
1156 * ib_free_recv_mad - Returns data buffers used to receive
1157 *  a MAD to the access layer
1158 */
1159void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1160{
1161	struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1162	struct ib_mad_private_header *mad_priv_hdr;
1163	struct ib_mad_private *priv;
1164	struct list_head free_list;
1165
1166	INIT_LIST_HEAD(&free_list);
1167	list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1168
1169	list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1170					&free_list, list) {
1171		mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1172					   recv_buf);
1173		mad_priv_hdr = container_of(mad_recv_wc,
1174					    struct ib_mad_private_header,
1175					    recv_wc);
1176		priv = container_of(mad_priv_hdr, struct ib_mad_private,
1177				    header);
1178		kfree(priv);
1179	}
1180}
1181EXPORT_SYMBOL(ib_free_recv_mad);
1182
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1183static int method_in_use(struct ib_mad_mgmt_method_table **method,
1184			 struct ib_mad_reg_req *mad_reg_req)
1185{
1186	int i;
1187
1188	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1189		if ((*method)->agent[i]) {
1190			pr_err("Method %d already in use\n", i);
1191			return -EINVAL;
1192		}
1193	}
1194	return 0;
1195}
1196
1197static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1198{
1199	/* Allocate management method table */
1200	*method = kzalloc(sizeof **method, GFP_ATOMIC);
1201	return (*method) ? 0 : (-ENOMEM);
 
 
 
 
 
1202}
1203
1204/*
1205 * Check to see if there are any methods still in use
1206 */
1207static int check_method_table(struct ib_mad_mgmt_method_table *method)
1208{
1209	int i;
1210
1211	for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1212		if (method->agent[i])
1213			return 1;
1214	return 0;
1215}
1216
1217/*
1218 * Check to see if there are any method tables for this class still in use
1219 */
1220static int check_class_table(struct ib_mad_mgmt_class_table *class)
1221{
1222	int i;
1223
1224	for (i = 0; i < MAX_MGMT_CLASS; i++)
1225		if (class->method_table[i])
1226			return 1;
1227	return 0;
1228}
1229
1230static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1231{
1232	int i;
1233
1234	for (i = 0; i < MAX_MGMT_OUI; i++)
1235		if (vendor_class->method_table[i])
1236			return 1;
1237	return 0;
1238}
1239
1240static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1241			   const char *oui)
1242{
1243	int i;
1244
1245	for (i = 0; i < MAX_MGMT_OUI; i++)
1246		/* Is there matching OUI for this vendor class ? */
1247		if (!memcmp(vendor_class->oui[i], oui, 3))
1248			return i;
1249
1250	return -1;
1251}
1252
1253static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1254{
1255	int i;
1256
1257	for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1258		if (vendor->vendor_class[i])
1259			return 1;
1260
1261	return 0;
1262}
1263
1264static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1265				     struct ib_mad_agent_private *agent)
1266{
1267	int i;
1268
1269	/* Remove any methods for this mad agent */
1270	for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1271		if (method->agent[i] == agent)
1272			method->agent[i] = NULL;
 
 
1273}
1274
1275static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1276			      struct ib_mad_agent_private *agent_priv,
1277			      u8 mgmt_class)
1278{
1279	struct ib_mad_port_private *port_priv;
1280	struct ib_mad_mgmt_class_table **class;
1281	struct ib_mad_mgmt_method_table **method;
1282	int i, ret;
1283
1284	port_priv = agent_priv->qp_info->port_priv;
1285	class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1286	if (!*class) {
1287		/* Allocate management class table for "new" class version */
1288		*class = kzalloc(sizeof **class, GFP_ATOMIC);
1289		if (!*class) {
 
 
1290			ret = -ENOMEM;
1291			goto error1;
1292		}
1293
1294		/* Allocate method table for this management class */
1295		method = &(*class)->method_table[mgmt_class];
1296		if ((ret = allocate_method_table(method)))
1297			goto error2;
1298	} else {
1299		method = &(*class)->method_table[mgmt_class];
1300		if (!*method) {
1301			/* Allocate method table for this management class */
1302			if ((ret = allocate_method_table(method)))
1303				goto error1;
1304		}
1305	}
1306
1307	/* Now, make sure methods are not already in use */
1308	if (method_in_use(method, mad_reg_req))
1309		goto error3;
1310
1311	/* Finally, add in methods being registered */
1312	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1313		(*method)->agent[i] = agent_priv;
1314
1315	return 0;
1316
1317error3:
1318	/* Remove any methods for this mad agent */
1319	remove_methods_mad_agent(*method, agent_priv);
1320	/* Now, check to see if there are any methods in use */
1321	if (!check_method_table(*method)) {
1322		/* If not, release management method table */
1323		kfree(*method);
1324		*method = NULL;
1325	}
1326	ret = -EINVAL;
1327	goto error1;
1328error2:
1329	kfree(*class);
1330	*class = NULL;
1331error1:
1332	return ret;
1333}
1334
1335static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1336			   struct ib_mad_agent_private *agent_priv)
1337{
1338	struct ib_mad_port_private *port_priv;
1339	struct ib_mad_mgmt_vendor_class_table **vendor_table;
1340	struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1341	struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1342	struct ib_mad_mgmt_method_table **method;
1343	int i, ret = -ENOMEM;
1344	u8 vclass;
1345
1346	/* "New" vendor (with OUI) class */
1347	vclass = vendor_class_index(mad_reg_req->mgmt_class);
1348	port_priv = agent_priv->qp_info->port_priv;
1349	vendor_table = &port_priv->version[
1350				mad_reg_req->mgmt_class_version].vendor;
1351	if (!*vendor_table) {
1352		/* Allocate mgmt vendor class table for "new" class version */
1353		vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1354		if (!vendor)
 
 
1355			goto error1;
 
1356
1357		*vendor_table = vendor;
1358	}
1359	if (!(*vendor_table)->vendor_class[vclass]) {
1360		/* Allocate table for this management vendor class */
1361		vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1362		if (!vendor_class)
 
 
1363			goto error2;
 
1364
1365		(*vendor_table)->vendor_class[vclass] = vendor_class;
1366	}
1367	for (i = 0; i < MAX_MGMT_OUI; i++) {
1368		/* Is there matching OUI for this vendor class ? */
1369		if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1370			    mad_reg_req->oui, 3)) {
1371			method = &(*vendor_table)->vendor_class[
1372						vclass]->method_table[i];
1373			if (!*method)
1374				goto error3;
1375			goto check_in_use;
1376		}
1377	}
1378	for (i = 0; i < MAX_MGMT_OUI; i++) {
1379		/* OUI slot available ? */
1380		if (!is_vendor_oui((*vendor_table)->vendor_class[
1381				vclass]->oui[i])) {
1382			method = &(*vendor_table)->vendor_class[
1383				vclass]->method_table[i];
 
1384			/* Allocate method table for this OUI */
1385			if (!*method) {
1386				ret = allocate_method_table(method);
1387				if (ret)
1388					goto error3;
1389			}
1390			memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1391			       mad_reg_req->oui, 3);
1392			goto check_in_use;
1393		}
1394	}
1395	dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1396	goto error3;
1397
1398check_in_use:
1399	/* Now, make sure methods are not already in use */
1400	if (method_in_use(method, mad_reg_req))
1401		goto error4;
1402
1403	/* Finally, add in methods being registered */
1404	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1405		(*method)->agent[i] = agent_priv;
1406
1407	return 0;
1408
1409error4:
1410	/* Remove any methods for this mad agent */
1411	remove_methods_mad_agent(*method, agent_priv);
1412	/* Now, check to see if there are any methods in use */
1413	if (!check_method_table(*method)) {
1414		/* If not, release management method table */
1415		kfree(*method);
1416		*method = NULL;
1417	}
1418	ret = -EINVAL;
1419error3:
1420	if (vendor_class) {
1421		(*vendor_table)->vendor_class[vclass] = NULL;
1422		kfree(vendor_class);
1423	}
1424error2:
1425	if (vendor) {
1426		*vendor_table = NULL;
1427		kfree(vendor);
1428	}
1429error1:
1430	return ret;
1431}
1432
1433static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1434{
1435	struct ib_mad_port_private *port_priv;
1436	struct ib_mad_mgmt_class_table *class;
1437	struct ib_mad_mgmt_method_table *method;
1438	struct ib_mad_mgmt_vendor_class_table *vendor;
1439	struct ib_mad_mgmt_vendor_class *vendor_class;
1440	int index;
1441	u8 mgmt_class;
1442
1443	/*
1444	 * Was MAD registration request supplied
1445	 * with original registration ?
1446	 */
1447	if (!agent_priv->reg_req)
1448		goto out;
 
1449
1450	port_priv = agent_priv->qp_info->port_priv;
1451	mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1452	class = port_priv->version[
1453			agent_priv->reg_req->mgmt_class_version].class;
1454	if (!class)
1455		goto vendor_check;
1456
1457	method = class->method_table[mgmt_class];
1458	if (method) {
1459		/* Remove any methods for this mad agent */
1460		remove_methods_mad_agent(method, agent_priv);
1461		/* Now, check to see if there are any methods still in use */
1462		if (!check_method_table(method)) {
1463			/* If not, release management method table */
1464			kfree(method);
1465			class->method_table[mgmt_class] = NULL;
1466			/* Any management classes left ? */
1467			if (!check_class_table(class)) {
1468				/* If not, release management class table */
1469				kfree(class);
1470				port_priv->version[
1471					agent_priv->reg_req->
1472					mgmt_class_version].class = NULL;
1473			}
1474		}
1475	}
1476
1477vendor_check:
1478	if (!is_vendor_class(mgmt_class))
1479		goto out;
1480
1481	/* normalize mgmt_class to vendor range 2 */
1482	mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1483	vendor = port_priv->version[
1484			agent_priv->reg_req->mgmt_class_version].vendor;
1485
1486	if (!vendor)
1487		goto out;
1488
1489	vendor_class = vendor->vendor_class[mgmt_class];
1490	if (vendor_class) {
1491		index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1492		if (index < 0)
1493			goto out;
1494		method = vendor_class->method_table[index];
1495		if (method) {
1496			/* Remove any methods for this mad agent */
1497			remove_methods_mad_agent(method, agent_priv);
1498			/*
1499			 * Now, check to see if there are
1500			 * any methods still in use
1501			 */
1502			if (!check_method_table(method)) {
1503				/* If not, release management method table */
1504				kfree(method);
1505				vendor_class->method_table[index] = NULL;
1506				memset(vendor_class->oui[index], 0, 3);
1507				/* Any OUIs left ? */
1508				if (!check_vendor_class(vendor_class)) {
1509					/* If not, release vendor class table */
1510					kfree(vendor_class);
1511					vendor->vendor_class[mgmt_class] = NULL;
1512					/* Any other vendor classes left ? */
1513					if (!check_vendor_table(vendor)) {
1514						kfree(vendor);
1515						port_priv->version[
1516							agent_priv->reg_req->
1517							mgmt_class_version].
1518							vendor = NULL;
1519					}
1520				}
1521			}
1522		}
1523	}
1524
1525out:
1526	return;
1527}
1528
1529static struct ib_mad_agent_private *
1530find_mad_agent(struct ib_mad_port_private *port_priv,
1531	       const struct ib_mad_hdr *mad_hdr)
1532{
1533	struct ib_mad_agent_private *mad_agent = NULL;
1534	unsigned long flags;
1535
 
1536	if (ib_response_mad(mad_hdr)) {
1537		u32 hi_tid;
 
1538
1539		/*
1540		 * Routing is based on high 32 bits of transaction ID
1541		 * of MAD.
1542		 */
1543		hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1544		rcu_read_lock();
1545		mad_agent = xa_load(&ib_mad_clients, hi_tid);
1546		if (mad_agent && !refcount_inc_not_zero(&mad_agent->refcount))
1547			mad_agent = NULL;
1548		rcu_read_unlock();
 
1549	} else {
1550		struct ib_mad_mgmt_class_table *class;
1551		struct ib_mad_mgmt_method_table *method;
1552		struct ib_mad_mgmt_vendor_class_table *vendor;
1553		struct ib_mad_mgmt_vendor_class *vendor_class;
1554		const struct ib_vendor_mad *vendor_mad;
1555		int index;
1556
1557		spin_lock_irqsave(&port_priv->reg_lock, flags);
1558		/*
1559		 * Routing is based on version, class, and method
1560		 * For "newer" vendor MADs, also based on OUI
1561		 */
1562		if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1563			goto out;
1564		if (!is_vendor_class(mad_hdr->mgmt_class)) {
1565			class = port_priv->version[
1566					mad_hdr->class_version].class;
1567			if (!class)
1568				goto out;
1569			if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1570			    ARRAY_SIZE(class->method_table))
1571				goto out;
1572			method = class->method_table[convert_mgmt_class(
1573							mad_hdr->mgmt_class)];
1574			if (method)
1575				mad_agent = method->agent[mad_hdr->method &
1576							  ~IB_MGMT_METHOD_RESP];
1577		} else {
1578			vendor = port_priv->version[
1579					mad_hdr->class_version].vendor;
1580			if (!vendor)
1581				goto out;
1582			vendor_class = vendor->vendor_class[vendor_class_index(
1583						mad_hdr->mgmt_class)];
1584			if (!vendor_class)
1585				goto out;
1586			/* Find matching OUI */
1587			vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1588			index = find_vendor_oui(vendor_class, vendor_mad->oui);
1589			if (index == -1)
1590				goto out;
1591			method = vendor_class->method_table[index];
1592			if (method) {
1593				mad_agent = method->agent[mad_hdr->method &
1594							  ~IB_MGMT_METHOD_RESP];
1595			}
1596		}
1597		if (mad_agent)
1598			refcount_inc(&mad_agent->refcount);
1599out:
1600		spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1601	}
1602
1603	if (mad_agent && !mad_agent->agent.recv_handler) {
1604		dev_notice(&port_priv->device->dev,
1605			   "No receive handler for client %p on port %u\n",
1606			   &mad_agent->agent, port_priv->port_num);
1607		deref_mad_agent(mad_agent);
1608		mad_agent = NULL;
 
 
 
1609	}
 
 
1610
1611	return mad_agent;
1612}
1613
1614static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1615			const struct ib_mad_qp_info *qp_info,
1616			bool opa)
1617{
1618	int valid = 0;
1619	u32 qp_num = qp_info->qp->qp_num;
1620
1621	/* Make sure MAD base version is understood */
1622	if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1623	    (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1624		pr_err("MAD received with unsupported base version %u %s\n",
1625		       mad_hdr->base_version, opa ? "(opa)" : "");
1626		goto out;
1627	}
1628
1629	/* Filter SMI packets sent to other than QP0 */
1630	if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1631	    (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1632		if (qp_num == 0)
1633			valid = 1;
1634	} else {
1635		/* CM attributes other than ClassPortInfo only use Send method */
1636		if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1637		    (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1638		    (mad_hdr->method != IB_MGMT_METHOD_SEND))
1639			goto out;
1640		/* Filter GSI packets sent to QP0 */
1641		if (qp_num != 0)
1642			valid = 1;
1643	}
1644
1645out:
1646	return valid;
1647}
1648
1649static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1650			    const struct ib_mad_hdr *mad_hdr)
1651{
1652	struct ib_rmpp_mad *rmpp_mad;
1653
1654	rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1655	return !mad_agent_priv->agent.rmpp_version ||
1656		!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1657		!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1658				    IB_MGMT_RMPP_FLAG_ACTIVE) ||
1659		(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1660}
1661
1662static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1663				     const struct ib_mad_recv_wc *rwc)
1664{
1665	return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1666		rwc->recv_buf.mad->mad_hdr.mgmt_class;
1667}
1668
1669static inline int
1670rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1671		 const struct ib_mad_send_wr_private *wr,
1672		 const struct ib_mad_recv_wc *rwc)
1673{
1674	struct rdma_ah_attr attr;
1675	u8 send_resp, rcv_resp;
1676	union ib_gid sgid;
1677	struct ib_device *device = mad_agent_priv->agent.device;
1678	u32 port_num = mad_agent_priv->agent.port_num;
1679	u8 lmc;
1680	bool has_grh;
1681
1682	send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1683	rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1684
1685	if (send_resp == rcv_resp)
1686		/* both requests, or both responses. GIDs different */
1687		return 0;
1688
1689	if (rdma_query_ah(wr->send_buf.ah, &attr))
1690		/* Assume not equal, to avoid false positives. */
1691		return 0;
1692
1693	has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH);
1694	if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH))
1695		/* one has GID, other does not.  Assume different */
1696		return 0;
1697
1698	if (!send_resp && rcv_resp) {
1699		/* is request/response. */
1700		if (!has_grh) {
1701			if (ib_get_cached_lmc(device, port_num, &lmc))
1702				return 0;
1703			return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
1704					   rwc->wc->dlid_path_bits) &
1705					  ((1 << lmc) - 1)));
1706		} else {
1707			const struct ib_global_route *grh =
1708					rdma_ah_read_grh(&attr);
1709
1710			if (rdma_query_gid(device, port_num,
1711					   grh->sgid_index, &sgid))
1712				return 0;
1713			return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1714				       16);
1715		}
1716	}
1717
1718	if (!has_grh)
1719		return rdma_ah_get_dlid(&attr) == rwc->wc->slid;
1720	else
1721		return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw,
1722			       rwc->recv_buf.grh->sgid.raw,
1723			       16);
1724}
1725
1726static inline int is_direct(u8 class)
1727{
1728	return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1729}
1730
1731struct ib_mad_send_wr_private*
1732ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1733		 const struct ib_mad_recv_wc *wc)
1734{
1735	struct ib_mad_send_wr_private *wr;
1736	const struct ib_mad_hdr *mad_hdr;
1737
1738	mad_hdr = &wc->recv_buf.mad->mad_hdr;
1739
1740	list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1741		if ((wr->tid == mad_hdr->tid) &&
1742		    rcv_has_same_class(wr, wc) &&
1743		    /*
1744		     * Don't check GID for direct routed MADs.
1745		     * These might have permissive LIDs.
1746		     */
1747		    (is_direct(mad_hdr->mgmt_class) ||
1748		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
1749			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1750	}
1751
1752	/*
1753	 * It's possible to receive the response before we've
1754	 * been notified that the send has completed
1755	 */
1756	list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1757		if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1758		    wr->tid == mad_hdr->tid &&
1759		    wr->timeout &&
1760		    rcv_has_same_class(wr, wc) &&
1761		    /*
1762		     * Don't check GID for direct routed MADs.
1763		     * These might have permissive LIDs.
1764		     */
1765		    (is_direct(mad_hdr->mgmt_class) ||
1766		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
1767			/* Verify request has not been canceled */
1768			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1769	}
1770	return NULL;
1771}
1772
1773void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1774{
1775	mad_send_wr->timeout = 0;
1776	if (mad_send_wr->refcount == 1)
1777		list_move_tail(&mad_send_wr->agent_list,
1778			      &mad_send_wr->mad_agent_priv->done_list);
1779}
1780
1781static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1782				 struct ib_mad_recv_wc *mad_recv_wc)
1783{
1784	struct ib_mad_send_wr_private *mad_send_wr;
1785	struct ib_mad_send_wc mad_send_wc;
1786	unsigned long flags;
1787	int ret;
1788
1789	INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1790	ret = ib_mad_enforce_security(mad_agent_priv,
1791				      mad_recv_wc->wc->pkey_index);
1792	if (ret) {
1793		ib_free_recv_mad(mad_recv_wc);
1794		deref_mad_agent(mad_agent_priv);
1795		return;
1796	}
1797
1798	list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1799	if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1800		mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1801						      mad_recv_wc);
1802		if (!mad_recv_wc) {
1803			deref_mad_agent(mad_agent_priv);
1804			return;
1805		}
1806	}
1807
1808	/* Complete corresponding request */
1809	if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1810		spin_lock_irqsave(&mad_agent_priv->lock, flags);
1811		mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1812		if (!mad_send_wr) {
1813			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1814			if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1815			   && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1816			   && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1817					& IB_MGMT_RMPP_FLAG_ACTIVE)) {
1818				/* user rmpp is in effect
1819				 * and this is an active RMPP MAD
1820				 */
1821				mad_agent_priv->agent.recv_handler(
1822						&mad_agent_priv->agent, NULL,
1823						mad_recv_wc);
1824				deref_mad_agent(mad_agent_priv);
1825			} else {
1826				/* not user rmpp, revert to normal behavior and
1827				 * drop the mad
1828				 */
1829				ib_free_recv_mad(mad_recv_wc);
1830				deref_mad_agent(mad_agent_priv);
1831				return;
1832			}
1833		} else {
1834			ib_mark_mad_done(mad_send_wr);
1835			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1836
1837			/* Defined behavior is to complete response before request */
1838			mad_agent_priv->agent.recv_handler(
1839					&mad_agent_priv->agent,
1840					&mad_send_wr->send_buf,
1841					mad_recv_wc);
1842			deref_mad_agent(mad_agent_priv);
1843
1844			mad_send_wc.status = IB_WC_SUCCESS;
1845			mad_send_wc.vendor_err = 0;
1846			mad_send_wc.send_buf = &mad_send_wr->send_buf;
1847			ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1848		}
1849	} else {
1850		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
1851						   mad_recv_wc);
1852		deref_mad_agent(mad_agent_priv);
1853	}
1854}
1855
1856static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
1857				     const struct ib_mad_qp_info *qp_info,
1858				     const struct ib_wc *wc,
1859				     u32 port_num,
1860				     struct ib_mad_private *recv,
1861				     struct ib_mad_private *response)
1862{
1863	enum smi_forward_action retsmi;
1864	struct ib_smp *smp = (struct ib_smp *)recv->mad;
1865
1866	trace_ib_mad_handle_ib_smi(smp);
1867
1868	if (smi_handle_dr_smp_recv(smp,
1869				   rdma_cap_ib_switch(port_priv->device),
1870				   port_num,
1871				   port_priv->device->phys_port_cnt) ==
1872				   IB_SMI_DISCARD)
1873		return IB_SMI_DISCARD;
1874
1875	retsmi = smi_check_forward_dr_smp(smp);
1876	if (retsmi == IB_SMI_LOCAL)
1877		return IB_SMI_HANDLE;
1878
1879	if (retsmi == IB_SMI_SEND) { /* don't forward */
1880		if (smi_handle_dr_smp_send(smp,
1881					   rdma_cap_ib_switch(port_priv->device),
1882					   port_num) == IB_SMI_DISCARD)
1883			return IB_SMI_DISCARD;
1884
1885		if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
1886			return IB_SMI_DISCARD;
1887	} else if (rdma_cap_ib_switch(port_priv->device)) {
1888		/* forward case for switches */
1889		memcpy(response, recv, mad_priv_size(response));
1890		response->header.recv_wc.wc = &response->header.wc;
1891		response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
1892		response->header.recv_wc.recv_buf.grh = &response->grh;
1893
1894		agent_send_response((const struct ib_mad_hdr *)response->mad,
1895				    &response->grh, wc,
1896				    port_priv->device,
1897				    smi_get_fwd_port(smp),
1898				    qp_info->qp->qp_num,
1899				    response->mad_size,
1900				    false);
1901
1902		return IB_SMI_DISCARD;
1903	}
1904	return IB_SMI_HANDLE;
1905}
1906
1907static bool generate_unmatched_resp(const struct ib_mad_private *recv,
1908				    struct ib_mad_private *response,
1909				    size_t *resp_len, bool opa)
1910{
1911	const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
1912	struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
1913
1914	if (recv_hdr->method == IB_MGMT_METHOD_GET ||
1915	    recv_hdr->method == IB_MGMT_METHOD_SET) {
1916		memcpy(response, recv, mad_priv_size(response));
1917		response->header.recv_wc.wc = &response->header.wc;
1918		response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
1919		response->header.recv_wc.recv_buf.grh = &response->grh;
1920		resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
1921		resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
1922		if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
1923			resp_hdr->status |= IB_SMP_DIRECTION;
1924
1925		if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
1926			if (recv_hdr->mgmt_class ==
1927			    IB_MGMT_CLASS_SUBN_LID_ROUTED ||
1928			    recv_hdr->mgmt_class ==
1929			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
1930				*resp_len = opa_get_smp_header_size(
1931							(struct opa_smp *)recv->mad);
1932			else
1933				*resp_len = sizeof(struct ib_mad_hdr);
1934		}
1935
1936		return true;
1937	} else {
1938		return false;
1939	}
1940}
1941
1942static enum smi_action
1943handle_opa_smi(struct ib_mad_port_private *port_priv,
1944	       struct ib_mad_qp_info *qp_info,
1945	       struct ib_wc *wc,
1946	       u32 port_num,
1947	       struct ib_mad_private *recv,
1948	       struct ib_mad_private *response)
1949{
1950	enum smi_forward_action retsmi;
1951	struct opa_smp *smp = (struct opa_smp *)recv->mad;
1952
1953	trace_ib_mad_handle_opa_smi(smp);
1954
1955	if (opa_smi_handle_dr_smp_recv(smp,
1956				   rdma_cap_ib_switch(port_priv->device),
1957				   port_num,
1958				   port_priv->device->phys_port_cnt) ==
1959				   IB_SMI_DISCARD)
1960		return IB_SMI_DISCARD;
1961
1962	retsmi = opa_smi_check_forward_dr_smp(smp);
1963	if (retsmi == IB_SMI_LOCAL)
1964		return IB_SMI_HANDLE;
1965
1966	if (retsmi == IB_SMI_SEND) { /* don't forward */
1967		if (opa_smi_handle_dr_smp_send(smp,
1968					   rdma_cap_ib_switch(port_priv->device),
1969					   port_num) == IB_SMI_DISCARD)
1970			return IB_SMI_DISCARD;
1971
1972		if (opa_smi_check_local_smp(smp, port_priv->device) ==
1973		    IB_SMI_DISCARD)
1974			return IB_SMI_DISCARD;
1975
1976	} else if (rdma_cap_ib_switch(port_priv->device)) {
1977		/* forward case for switches */
1978		memcpy(response, recv, mad_priv_size(response));
1979		response->header.recv_wc.wc = &response->header.wc;
1980		response->header.recv_wc.recv_buf.opa_mad =
1981				(struct opa_mad *)response->mad;
1982		response->header.recv_wc.recv_buf.grh = &response->grh;
1983
1984		agent_send_response((const struct ib_mad_hdr *)response->mad,
1985				    &response->grh, wc,
1986				    port_priv->device,
1987				    opa_smi_get_fwd_port(smp),
1988				    qp_info->qp->qp_num,
1989				    recv->header.wc.byte_len,
1990				    true);
1991
1992		return IB_SMI_DISCARD;
1993	}
1994
1995	return IB_SMI_HANDLE;
1996}
1997
1998static enum smi_action
1999handle_smi(struct ib_mad_port_private *port_priv,
2000	   struct ib_mad_qp_info *qp_info,
2001	   struct ib_wc *wc,
2002	   u32 port_num,
2003	   struct ib_mad_private *recv,
2004	   struct ib_mad_private *response,
2005	   bool opa)
2006{
2007	struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2008
2009	if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2010	    mad_hdr->class_version == OPA_SM_CLASS_VERSION)
2011		return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2012				      response);
2013
2014	return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2015}
2016
2017static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2018{
2019	struct ib_mad_port_private *port_priv = cq->cq_context;
2020	struct ib_mad_list_head *mad_list =
2021		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2022	struct ib_mad_qp_info *qp_info;
2023	struct ib_mad_private_header *mad_priv_hdr;
2024	struct ib_mad_private *recv, *response = NULL;
2025	struct ib_mad_agent_private *mad_agent;
2026	u32 port_num;
2027	int ret = IB_MAD_RESULT_SUCCESS;
2028	size_t mad_size;
2029	u16 resp_mad_pkey_index = 0;
2030	bool opa;
2031
2032	if (list_empty_careful(&port_priv->port_list))
2033		return;
2034
2035	if (wc->status != IB_WC_SUCCESS) {
2036		/*
2037		 * Receive errors indicate that the QP has entered the error
2038		 * state - error handling/shutdown code will cleanup
2039		 */
2040		return;
2041	}
2042
2043	qp_info = mad_list->mad_queue->qp_info;
2044	dequeue_mad(mad_list);
2045
2046	opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2047			       qp_info->port_priv->port_num);
2048
2049	mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2050				    mad_list);
2051	recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2052	ib_dma_unmap_single(port_priv->device,
2053			    recv->header.mapping,
2054			    mad_priv_dma_size(recv),
2055			    DMA_FROM_DEVICE);
2056
2057	/* Setup MAD receive work completion from "normal" work completion */
2058	recv->header.wc = *wc;
2059	recv->header.recv_wc.wc = &recv->header.wc;
2060
2061	if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2062		recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2063		recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2064	} else {
2065		recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2066		recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2067	}
2068
2069	recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2070	recv->header.recv_wc.recv_buf.grh = &recv->grh;
2071
 
 
 
2072	/* Validate MAD */
2073	if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2074		goto out;
2075
2076	trace_ib_mad_recv_done_handler(qp_info, wc,
2077				       (struct ib_mad_hdr *)recv->mad);
2078
2079	mad_size = recv->mad_size;
2080	response = alloc_mad_private(mad_size, GFP_KERNEL);
2081	if (!response)
 
 
2082		goto out;
 
2083
2084	if (rdma_cap_ib_switch(port_priv->device))
2085		port_num = wc->port_num;
2086	else
2087		port_num = port_priv->port_num;
2088
2089	if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2090	    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2091		if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2092			       response, opa)
2093		    == IB_SMI_DISCARD)
2094			goto out;
2095	}
2096
2097	/* Give driver "right of first refusal" on incoming MAD */
2098	if (port_priv->device->ops.process_mad) {
2099		ret = port_priv->device->ops.process_mad(
2100			port_priv->device, 0, port_priv->port_num, wc,
2101			&recv->grh, (const struct ib_mad *)recv->mad,
2102			(struct ib_mad *)response->mad, &mad_size,
2103			&resp_mad_pkey_index);
 
 
2104
2105		if (opa)
2106			wc->pkey_index = resp_mad_pkey_index;
2107
2108		if (ret & IB_MAD_RESULT_SUCCESS) {
2109			if (ret & IB_MAD_RESULT_CONSUMED)
2110				goto out;
2111			if (ret & IB_MAD_RESULT_REPLY) {
2112				agent_send_response((const struct ib_mad_hdr *)response->mad,
2113						    &recv->grh, wc,
2114						    port_priv->device,
2115						    port_num,
2116						    qp_info->qp->qp_num,
2117						    mad_size, opa);
2118				goto out;
2119			}
2120		}
2121	}
2122
2123	mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2124	if (mad_agent) {
2125		trace_ib_mad_recv_done_agent(mad_agent);
2126		ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2127		/*
2128		 * recv is freed up in error cases in ib_mad_complete_recv
2129		 * or via recv_handler in ib_mad_complete_recv()
2130		 */
2131		recv = NULL;
2132	} else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2133		   generate_unmatched_resp(recv, response, &mad_size, opa)) {
2134		agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2135				    port_priv->device, port_num,
2136				    qp_info->qp->qp_num, mad_size, opa);
2137	}
2138
2139out:
2140	/* Post another receive request for this QP */
2141	if (response) {
2142		ib_mad_post_receive_mads(qp_info, response);
2143		kfree(recv);
2144	} else
2145		ib_mad_post_receive_mads(qp_info, recv);
2146}
2147
2148static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2149{
2150	struct ib_mad_send_wr_private *mad_send_wr;
2151	unsigned long delay;
2152
2153	if (list_empty(&mad_agent_priv->wait_list)) {
2154		cancel_delayed_work(&mad_agent_priv->timed_work);
2155	} else {
2156		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2157					 struct ib_mad_send_wr_private,
2158					 agent_list);
2159
2160		if (time_after(mad_agent_priv->timeout,
2161			       mad_send_wr->timeout)) {
2162			mad_agent_priv->timeout = mad_send_wr->timeout;
2163			delay = mad_send_wr->timeout - jiffies;
2164			if ((long)delay <= 0)
2165				delay = 1;
2166			mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2167					 &mad_agent_priv->timed_work, delay);
2168		}
2169	}
2170}
2171
2172static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2173{
2174	struct ib_mad_agent_private *mad_agent_priv;
2175	struct ib_mad_send_wr_private *temp_mad_send_wr;
2176	struct list_head *list_item;
2177	unsigned long delay;
2178
2179	mad_agent_priv = mad_send_wr->mad_agent_priv;
2180	list_del(&mad_send_wr->agent_list);
2181
2182	delay = mad_send_wr->timeout;
2183	mad_send_wr->timeout += jiffies;
2184
2185	if (delay) {
2186		list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2187			temp_mad_send_wr = list_entry(list_item,
2188						struct ib_mad_send_wr_private,
2189						agent_list);
2190			if (time_after(mad_send_wr->timeout,
2191				       temp_mad_send_wr->timeout))
2192				break;
2193		}
2194	} else {
2195		list_item = &mad_agent_priv->wait_list;
2196	}
2197
 
2198	list_add(&mad_send_wr->agent_list, list_item);
2199
2200	/* Reschedule a work item if we have a shorter timeout */
2201	if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2202		mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2203				 &mad_agent_priv->timed_work, delay);
2204}
2205
2206void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2207			  unsigned long timeout_ms)
2208{
2209	mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2210	wait_for_response(mad_send_wr);
2211}
2212
2213/*
2214 * Process a send work completion
2215 */
2216void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2217			     struct ib_mad_send_wc *mad_send_wc)
2218{
2219	struct ib_mad_agent_private	*mad_agent_priv;
2220	unsigned long			flags;
2221	int				ret;
2222
2223	mad_agent_priv = mad_send_wr->mad_agent_priv;
2224	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2225	if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2226		ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2227		if (ret == IB_RMPP_RESULT_CONSUMED)
2228			goto done;
2229	} else
2230		ret = IB_RMPP_RESULT_UNHANDLED;
2231
2232	if (mad_send_wc->status != IB_WC_SUCCESS &&
2233	    mad_send_wr->status == IB_WC_SUCCESS) {
2234		mad_send_wr->status = mad_send_wc->status;
2235		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2236	}
2237
2238	if (--mad_send_wr->refcount > 0) {
2239		if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2240		    mad_send_wr->status == IB_WC_SUCCESS) {
2241			wait_for_response(mad_send_wr);
2242		}
2243		goto done;
2244	}
2245
2246	/* Remove send from MAD agent and notify client of completion */
2247	list_del(&mad_send_wr->agent_list);
2248	adjust_timeout(mad_agent_priv);
2249	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2250
2251	if (mad_send_wr->status != IB_WC_SUCCESS)
2252		mad_send_wc->status = mad_send_wr->status;
2253	if (ret == IB_RMPP_RESULT_INTERNAL)
2254		ib_rmpp_send_handler(mad_send_wc);
2255	else
2256		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2257						   mad_send_wc);
2258
2259	/* Release reference on agent taken when sending */
2260	deref_mad_agent(mad_agent_priv);
2261	return;
2262done:
2263	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2264}
2265
2266static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2267{
2268	struct ib_mad_port_private *port_priv = cq->cq_context;
2269	struct ib_mad_list_head *mad_list =
2270		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2271	struct ib_mad_send_wr_private	*mad_send_wr, *queued_send_wr;
2272	struct ib_mad_qp_info		*qp_info;
2273	struct ib_mad_queue		*send_queue;
 
2274	struct ib_mad_send_wc		mad_send_wc;
2275	unsigned long flags;
2276	int ret;
2277
2278	if (list_empty_careful(&port_priv->port_list))
2279		return;
2280
2281	if (wc->status != IB_WC_SUCCESS) {
2282		if (!ib_mad_send_error(port_priv, wc))
2283			return;
2284	}
2285
2286	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2287				   mad_list);
2288	send_queue = mad_list->mad_queue;
2289	qp_info = send_queue->qp_info;
2290
2291	trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv);
2292	trace_ib_mad_send_done_handler(mad_send_wr, wc);
2293
2294retry:
2295	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2296			    mad_send_wr->header_mapping,
2297			    mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2298	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2299			    mad_send_wr->payload_mapping,
2300			    mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2301	queued_send_wr = NULL;
2302	spin_lock_irqsave(&send_queue->lock, flags);
2303	list_del(&mad_list->list);
2304
2305	/* Move queued send to the send queue */
2306	if (send_queue->count-- > send_queue->max_active) {
2307		mad_list = container_of(qp_info->overflow_list.next,
2308					struct ib_mad_list_head, list);
2309		queued_send_wr = container_of(mad_list,
2310					struct ib_mad_send_wr_private,
2311					mad_list);
2312		list_move_tail(&mad_list->list, &send_queue->list);
2313	}
2314	spin_unlock_irqrestore(&send_queue->lock, flags);
2315
2316	mad_send_wc.send_buf = &mad_send_wr->send_buf;
2317	mad_send_wc.status = wc->status;
2318	mad_send_wc.vendor_err = wc->vendor_err;
 
 
 
2319	ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2320
2321	if (queued_send_wr) {
2322		trace_ib_mad_send_done_resend(queued_send_wr, qp_info);
2323		ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2324				   NULL);
2325		if (ret) {
2326			dev_err(&port_priv->device->dev,
2327				"ib_post_send failed: %d\n", ret);
2328			mad_send_wr = queued_send_wr;
2329			wc->status = IB_WC_LOC_QP_OP_ERR;
2330			goto retry;
2331		}
2332	}
2333}
2334
2335static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2336{
2337	struct ib_mad_send_wr_private *mad_send_wr;
2338	struct ib_mad_list_head *mad_list;
2339	unsigned long flags;
2340
2341	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2342	list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2343		mad_send_wr = container_of(mad_list,
2344					   struct ib_mad_send_wr_private,
2345					   mad_list);
2346		mad_send_wr->retry = 1;
2347	}
2348	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2349}
2350
2351static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2352		struct ib_wc *wc)
2353{
2354	struct ib_mad_list_head *mad_list =
2355		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2356	struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2357	struct ib_mad_send_wr_private *mad_send_wr;
2358	int ret;
2359
2360	/*
2361	 * Send errors will transition the QP to SQE - move
2362	 * QP to RTS and repost flushed work requests
2363	 */
2364	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2365				   mad_list);
2366	if (wc->status == IB_WC_WR_FLUSH_ERR) {
2367		if (mad_send_wr->retry) {
2368			/* Repost send */
 
 
2369			mad_send_wr->retry = 0;
2370			trace_ib_mad_error_handler(mad_send_wr, qp_info);
2371			ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2372					   NULL);
2373			if (!ret)
2374				return false;
2375		}
2376	} else {
2377		struct ib_qp_attr *attr;
2378
2379		/* Transition QP to RTS and fail offending send */
2380		attr = kmalloc(sizeof *attr, GFP_KERNEL);
2381		if (attr) {
2382			attr->qp_state = IB_QPS_RTS;
2383			attr->cur_qp_state = IB_QPS_SQE;
2384			ret = ib_modify_qp(qp_info->qp, attr,
2385					   IB_QP_STATE | IB_QP_CUR_STATE);
2386			kfree(attr);
2387			if (ret)
2388				dev_err(&port_priv->device->dev,
2389					"%s - ib_modify_qp to RTS: %d\n",
2390					__func__, ret);
2391			else
2392				mark_sends_for_retry(qp_info);
2393		}
2394	}
2395
2396	return true;
2397}
2398
2399static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2400{
2401	unsigned long flags;
2402	struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2403	struct ib_mad_send_wc mad_send_wc;
2404	struct list_head cancel_list;
2405
2406	INIT_LIST_HEAD(&cancel_list);
2407
2408	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2409	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2410				 &mad_agent_priv->send_list, agent_list) {
2411		if (mad_send_wr->status == IB_WC_SUCCESS) {
2412			mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2413			mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2414		}
2415	}
2416
2417	/* Empty wait list to prevent receives from finding a request */
2418	list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2419	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2420
2421	/* Report all cancelled requests */
2422	mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2423	mad_send_wc.vendor_err = 0;
2424
2425	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2426				 &cancel_list, agent_list) {
2427		mad_send_wc.send_buf = &mad_send_wr->send_buf;
2428		list_del(&mad_send_wr->agent_list);
2429		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2430						   &mad_send_wc);
2431		deref_mad_agent(mad_agent_priv);
2432	}
2433}
2434
2435static struct ib_mad_send_wr_private*
2436find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2437	     struct ib_mad_send_buf *send_buf)
2438{
2439	struct ib_mad_send_wr_private *mad_send_wr;
2440
2441	list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2442			    agent_list) {
2443		if (&mad_send_wr->send_buf == send_buf)
2444			return mad_send_wr;
2445	}
2446
2447	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2448			    agent_list) {
2449		if (is_rmpp_data_mad(mad_agent_priv,
2450				     mad_send_wr->send_buf.mad) &&
2451		    &mad_send_wr->send_buf == send_buf)
2452			return mad_send_wr;
2453	}
2454	return NULL;
2455}
2456
2457int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms)
 
2458{
2459	struct ib_mad_agent_private *mad_agent_priv;
2460	struct ib_mad_send_wr_private *mad_send_wr;
2461	unsigned long flags;
2462	int active;
2463
2464	if (!send_buf)
2465		return -EINVAL;
2466
2467	mad_agent_priv = container_of(send_buf->mad_agent,
2468				      struct ib_mad_agent_private, agent);
2469	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2470	mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2471	if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2472		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2473		return -EINVAL;
2474	}
2475
2476	active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2477	if (!timeout_ms) {
2478		mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2479		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2480	}
2481
2482	mad_send_wr->send_buf.timeout_ms = timeout_ms;
2483	if (active)
2484		mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2485	else
2486		ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2487
2488	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2489	return 0;
2490}
2491EXPORT_SYMBOL(ib_modify_mad);
2492
 
 
 
 
 
 
 
2493static void local_completions(struct work_struct *work)
2494{
2495	struct ib_mad_agent_private *mad_agent_priv;
2496	struct ib_mad_local_private *local;
2497	struct ib_mad_agent_private *recv_mad_agent;
2498	unsigned long flags;
2499	int free_mad;
2500	struct ib_wc wc;
2501	struct ib_mad_send_wc mad_send_wc;
2502	bool opa;
2503
2504	mad_agent_priv =
2505		container_of(work, struct ib_mad_agent_private, local_work);
2506
2507	opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2508			       mad_agent_priv->qp_info->port_priv->port_num);
2509
2510	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2511	while (!list_empty(&mad_agent_priv->local_list)) {
2512		local = list_entry(mad_agent_priv->local_list.next,
2513				   struct ib_mad_local_private,
2514				   completion_list);
2515		list_del(&local->completion_list);
2516		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2517		free_mad = 0;
2518		if (local->mad_priv) {
2519			u8 base_version;
2520			recv_mad_agent = local->recv_mad_agent;
2521			if (!recv_mad_agent) {
2522				dev_err(&mad_agent_priv->agent.device->dev,
2523					"No receive MAD agent for local completion\n");
2524				free_mad = 1;
2525				goto local_send_completion;
2526			}
2527
2528			/*
2529			 * Defined behavior is to complete response
2530			 * before request
2531			 */
2532			build_smp_wc(recv_mad_agent->agent.qp,
2533				     local->mad_send_wr->send_wr.wr.wr_cqe,
2534				     be16_to_cpu(IB_LID_PERMISSIVE),
2535				     local->mad_send_wr->send_wr.pkey_index,
2536				     recv_mad_agent->agent.port_num, &wc);
2537
2538			local->mad_priv->header.recv_wc.wc = &wc;
2539
2540			base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2541			if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2542				local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2543				local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2544			} else {
2545				local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2546				local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2547			}
2548
2549			INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2550			list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2551				 &local->mad_priv->header.recv_wc.rmpp_list);
2552			local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2553			local->mad_priv->header.recv_wc.recv_buf.mad =
2554						(struct ib_mad *)local->mad_priv->mad;
 
 
 
 
2555			recv_mad_agent->agent.recv_handler(
2556						&recv_mad_agent->agent,
2557						&local->mad_send_wr->send_buf,
2558						&local->mad_priv->header.recv_wc);
2559			spin_lock_irqsave(&recv_mad_agent->lock, flags);
2560			deref_mad_agent(recv_mad_agent);
2561			spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2562		}
2563
2564local_send_completion:
2565		/* Complete send */
2566		mad_send_wc.status = IB_WC_SUCCESS;
2567		mad_send_wc.vendor_err = 0;
2568		mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
 
 
 
 
2569		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2570						   &mad_send_wc);
2571
2572		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2573		deref_mad_agent(mad_agent_priv);
2574		if (free_mad)
2575			kfree(local->mad_priv);
2576		kfree(local);
2577	}
2578	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2579}
2580
2581static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2582{
2583	int ret;
2584
2585	if (!mad_send_wr->retries_left)
2586		return -ETIMEDOUT;
2587
2588	mad_send_wr->retries_left--;
2589	mad_send_wr->send_buf.retries++;
2590
2591	mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2592
2593	if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2594		ret = ib_retry_rmpp(mad_send_wr);
2595		switch (ret) {
2596		case IB_RMPP_RESULT_UNHANDLED:
2597			ret = ib_send_mad(mad_send_wr);
2598			break;
2599		case IB_RMPP_RESULT_CONSUMED:
2600			ret = 0;
2601			break;
2602		default:
2603			ret = -ECOMM;
2604			break;
2605		}
2606	} else
2607		ret = ib_send_mad(mad_send_wr);
2608
2609	if (!ret) {
2610		mad_send_wr->refcount++;
2611		list_add_tail(&mad_send_wr->agent_list,
2612			      &mad_send_wr->mad_agent_priv->send_list);
2613	}
2614	return ret;
2615}
2616
2617static void timeout_sends(struct work_struct *work)
2618{
2619	struct ib_mad_agent_private *mad_agent_priv;
2620	struct ib_mad_send_wr_private *mad_send_wr;
2621	struct ib_mad_send_wc mad_send_wc;
2622	unsigned long flags, delay;
2623
2624	mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2625				      timed_work.work);
2626	mad_send_wc.vendor_err = 0;
2627
2628	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2629	while (!list_empty(&mad_agent_priv->wait_list)) {
2630		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2631					 struct ib_mad_send_wr_private,
2632					 agent_list);
2633
2634		if (time_after(mad_send_wr->timeout, jiffies)) {
2635			delay = mad_send_wr->timeout - jiffies;
2636			if ((long)delay <= 0)
2637				delay = 1;
2638			queue_delayed_work(mad_agent_priv->qp_info->
2639					   port_priv->wq,
2640					   &mad_agent_priv->timed_work, delay);
2641			break;
2642		}
2643
2644		list_del(&mad_send_wr->agent_list);
2645		if (mad_send_wr->status == IB_WC_SUCCESS &&
2646		    !retry_send(mad_send_wr))
2647			continue;
2648
2649		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2650
2651		if (mad_send_wr->status == IB_WC_SUCCESS)
2652			mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2653		else
2654			mad_send_wc.status = mad_send_wr->status;
2655		mad_send_wc.send_buf = &mad_send_wr->send_buf;
2656		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2657						   &mad_send_wc);
2658
2659		deref_mad_agent(mad_agent_priv);
2660		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2661	}
2662	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2663}
2664
2665/*
2666 * Allocate receive MADs and post receive WRs for them
2667 */
2668static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2669				    struct ib_mad_private *mad)
2670{
2671	unsigned long flags;
2672	int post, ret;
2673	struct ib_mad_private *mad_priv;
2674	struct ib_sge sg_list;
2675	struct ib_recv_wr recv_wr;
2676	struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2677
2678	/* Initialize common scatter list fields */
2679	sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2680
2681	/* Initialize common receive WR fields */
2682	recv_wr.next = NULL;
2683	recv_wr.sg_list = &sg_list;
2684	recv_wr.num_sge = 1;
2685
2686	do {
2687		/* Allocate and map receive buffer */
2688		if (mad) {
2689			mad_priv = mad;
2690			mad = NULL;
2691		} else {
2692			mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2693						     GFP_ATOMIC);
2694			if (!mad_priv) {
 
 
2695				ret = -ENOMEM;
2696				break;
2697			}
2698		}
2699		sg_list.length = mad_priv_dma_size(mad_priv);
2700		sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2701						 &mad_priv->grh,
2702						 mad_priv_dma_size(mad_priv),
2703						 DMA_FROM_DEVICE);
2704		if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2705						  sg_list.addr))) {
2706			kfree(mad_priv);
2707			ret = -ENOMEM;
2708			break;
2709		}
2710		mad_priv->header.mapping = sg_list.addr;
2711		mad_priv->header.mad_list.mad_queue = recv_queue;
2712		mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2713		recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2714
2715		/* Post receive WR */
2716		spin_lock_irqsave(&recv_queue->lock, flags);
2717		post = (++recv_queue->count < recv_queue->max_active);
2718		list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2719		spin_unlock_irqrestore(&recv_queue->lock, flags);
2720		ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
2721		if (ret) {
2722			spin_lock_irqsave(&recv_queue->lock, flags);
2723			list_del(&mad_priv->header.mad_list.list);
2724			recv_queue->count--;
2725			spin_unlock_irqrestore(&recv_queue->lock, flags);
2726			ib_dma_unmap_single(qp_info->port_priv->device,
2727					    mad_priv->header.mapping,
2728					    mad_priv_dma_size(mad_priv),
2729					    DMA_FROM_DEVICE);
2730			kfree(mad_priv);
2731			dev_err(&qp_info->port_priv->device->dev,
2732				"ib_post_recv failed: %d\n", ret);
2733			break;
2734		}
2735	} while (post);
2736
2737	return ret;
2738}
2739
2740/*
2741 * Return all the posted receive MADs
2742 */
2743static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2744{
2745	struct ib_mad_private_header *mad_priv_hdr;
2746	struct ib_mad_private *recv;
2747	struct ib_mad_list_head *mad_list;
2748
2749	if (!qp_info->qp)
2750		return;
2751
2752	while (!list_empty(&qp_info->recv_queue.list)) {
2753
2754		mad_list = list_entry(qp_info->recv_queue.list.next,
2755				      struct ib_mad_list_head, list);
2756		mad_priv_hdr = container_of(mad_list,
2757					    struct ib_mad_private_header,
2758					    mad_list);
2759		recv = container_of(mad_priv_hdr, struct ib_mad_private,
2760				    header);
2761
2762		/* Remove from posted receive MAD list */
2763		list_del(&mad_list->list);
2764
2765		ib_dma_unmap_single(qp_info->port_priv->device,
2766				    recv->header.mapping,
2767				    mad_priv_dma_size(recv),
2768				    DMA_FROM_DEVICE);
2769		kfree(recv);
2770	}
2771
2772	qp_info->recv_queue.count = 0;
2773}
2774
2775/*
2776 * Start the port
2777 */
2778static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2779{
2780	int ret, i;
2781	struct ib_qp_attr *attr;
2782	struct ib_qp *qp;
2783	u16 pkey_index;
2784
2785	attr = kmalloc(sizeof *attr, GFP_KERNEL);
2786	if (!attr)
 
 
2787		return -ENOMEM;
 
2788
2789	ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2790			   IB_DEFAULT_PKEY_FULL, &pkey_index);
2791	if (ret)
2792		pkey_index = 0;
2793
2794	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2795		qp = port_priv->qp_info[i].qp;
2796		if (!qp)
2797			continue;
2798
2799		/*
2800		 * PKey index for QP1 is irrelevant but
2801		 * one is needed for the Reset to Init transition
2802		 */
2803		attr->qp_state = IB_QPS_INIT;
2804		attr->pkey_index = pkey_index;
2805		attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2806		ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2807					     IB_QP_PKEY_INDEX | IB_QP_QKEY);
2808		if (ret) {
2809			dev_err(&port_priv->device->dev,
2810				"Couldn't change QP%d state to INIT: %d\n",
2811				i, ret);
2812			goto out;
2813		}
2814
2815		attr->qp_state = IB_QPS_RTR;
2816		ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2817		if (ret) {
2818			dev_err(&port_priv->device->dev,
2819				"Couldn't change QP%d state to RTR: %d\n",
2820				i, ret);
2821			goto out;
2822		}
2823
2824		attr->qp_state = IB_QPS_RTS;
2825		attr->sq_psn = IB_MAD_SEND_Q_PSN;
2826		ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2827		if (ret) {
2828			dev_err(&port_priv->device->dev,
2829				"Couldn't change QP%d state to RTS: %d\n",
2830				i, ret);
2831			goto out;
2832		}
2833	}
2834
2835	ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2836	if (ret) {
2837		dev_err(&port_priv->device->dev,
2838			"Failed to request completion notification: %d\n",
2839			ret);
2840		goto out;
2841	}
2842
2843	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2844		if (!port_priv->qp_info[i].qp)
2845			continue;
2846
2847		ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2848		if (ret) {
2849			dev_err(&port_priv->device->dev,
2850				"Couldn't post receive WRs\n");
2851			goto out;
2852		}
2853	}
2854out:
2855	kfree(attr);
2856	return ret;
2857}
2858
2859static void qp_event_handler(struct ib_event *event, void *qp_context)
2860{
2861	struct ib_mad_qp_info	*qp_info = qp_context;
2862
2863	/* It's worse than that! He's dead, Jim! */
2864	dev_err(&qp_info->port_priv->device->dev,
2865		"Fatal error (%d) on MAD QP (%u)\n",
2866		event->event, qp_info->qp->qp_num);
2867}
2868
2869static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2870			   struct ib_mad_queue *mad_queue)
2871{
2872	mad_queue->qp_info = qp_info;
2873	mad_queue->count = 0;
2874	spin_lock_init(&mad_queue->lock);
2875	INIT_LIST_HEAD(&mad_queue->list);
2876}
2877
2878static void init_mad_qp(struct ib_mad_port_private *port_priv,
2879			struct ib_mad_qp_info *qp_info)
2880{
2881	qp_info->port_priv = port_priv;
2882	init_mad_queue(qp_info, &qp_info->send_queue);
2883	init_mad_queue(qp_info, &qp_info->recv_queue);
2884	INIT_LIST_HEAD(&qp_info->overflow_list);
 
 
 
 
2885}
2886
2887static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2888			 enum ib_qp_type qp_type)
2889{
2890	struct ib_qp_init_attr	qp_init_attr;
2891	int ret;
2892
2893	memset(&qp_init_attr, 0, sizeof qp_init_attr);
2894	qp_init_attr.send_cq = qp_info->port_priv->cq;
2895	qp_init_attr.recv_cq = qp_info->port_priv->cq;
2896	qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2897	qp_init_attr.cap.max_send_wr = mad_sendq_size;
2898	qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2899	qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2900	qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2901	qp_init_attr.qp_type = qp_type;
2902	qp_init_attr.port_num = qp_info->port_priv->port_num;
2903	qp_init_attr.qp_context = qp_info;
2904	qp_init_attr.event_handler = qp_event_handler;
2905	qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2906	if (IS_ERR(qp_info->qp)) {
2907		dev_err(&qp_info->port_priv->device->dev,
2908			"Couldn't create ib_mad QP%d\n",
2909			get_spl_qp_index(qp_type));
2910		ret = PTR_ERR(qp_info->qp);
2911		goto error;
2912	}
2913	/* Use minimum queue sizes unless the CQ is resized */
2914	qp_info->send_queue.max_active = mad_sendq_size;
2915	qp_info->recv_queue.max_active = mad_recvq_size;
2916	return 0;
2917
2918error:
2919	return ret;
2920}
2921
2922static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2923{
2924	if (!qp_info->qp)
2925		return;
2926
2927	ib_destroy_qp(qp_info->qp);
 
2928}
2929
2930/*
2931 * Open the port
2932 * Create the QP, PD, MR, and CQ if needed
2933 */
2934static int ib_mad_port_open(struct ib_device *device,
2935			    u32 port_num)
2936{
2937	int ret, cq_size;
2938	struct ib_mad_port_private *port_priv;
2939	unsigned long flags;
2940	char name[sizeof "ib_mad123"];
2941	int has_smi;
2942
2943	if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
2944		return -EFAULT;
2945
2946	if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
2947		    rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
2948		return -EFAULT;
2949
2950	/* Create new device info */
2951	port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2952	if (!port_priv)
 
2953		return -ENOMEM;
 
2954
2955	port_priv->device = device;
2956	port_priv->port_num = port_num;
2957	spin_lock_init(&port_priv->reg_lock);
 
2958	init_mad_qp(port_priv, &port_priv->qp_info[0]);
2959	init_mad_qp(port_priv, &port_priv->qp_info[1]);
2960
2961	cq_size = mad_sendq_size + mad_recvq_size;
2962	has_smi = rdma_cap_ib_smi(device, port_num);
2963	if (has_smi)
2964		cq_size *= 2;
2965
2966	port_priv->pd = ib_alloc_pd(device, 0);
2967	if (IS_ERR(port_priv->pd)) {
2968		dev_err(&device->dev, "Couldn't create ib_mad PD\n");
2969		ret = PTR_ERR(port_priv->pd);
2970		goto error3;
2971	}
2972
2973	port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
2974			IB_POLL_UNBOUND_WORKQUEUE);
2975	if (IS_ERR(port_priv->cq)) {
2976		dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
2977		ret = PTR_ERR(port_priv->cq);
 
 
 
 
 
 
 
2978		goto error4;
2979	}
2980
2981	if (has_smi) {
2982		ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2983		if (ret)
2984			goto error6;
2985	}
2986	ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2987	if (ret)
2988		goto error7;
2989
2990	snprintf(name, sizeof(name), "ib_mad%u", port_num);
2991	port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2992	if (!port_priv->wq) {
2993		ret = -ENOMEM;
2994		goto error8;
2995	}
2996
2997	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2998	list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2999	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3000
3001	ret = ib_mad_port_start(port_priv);
3002	if (ret) {
3003		dev_err(&device->dev, "Couldn't start port\n");
3004		goto error9;
3005	}
3006
3007	return 0;
3008
3009error9:
3010	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3011	list_del_init(&port_priv->port_list);
3012	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3013
3014	destroy_workqueue(port_priv->wq);
3015error8:
3016	destroy_mad_qp(&port_priv->qp_info[1]);
3017error7:
3018	destroy_mad_qp(&port_priv->qp_info[0]);
3019error6:
 
 
3020	ib_free_cq(port_priv->cq);
3021	cleanup_recv_queue(&port_priv->qp_info[1]);
3022	cleanup_recv_queue(&port_priv->qp_info[0]);
3023error4:
3024	ib_dealloc_pd(port_priv->pd);
3025error3:
3026	kfree(port_priv);
3027
3028	return ret;
3029}
3030
3031/*
3032 * Close the port
3033 * If there are no classes using the port, free the port
3034 * resources (CQ, MR, PD, QP) and remove the port's info structure
3035 */
3036static int ib_mad_port_close(struct ib_device *device, u32 port_num)
3037{
3038	struct ib_mad_port_private *port_priv;
3039	unsigned long flags;
3040
3041	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3042	port_priv = __ib_get_mad_port(device, port_num);
3043	if (port_priv == NULL) {
3044		spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3045		dev_err(&device->dev, "Port %u not found\n", port_num);
3046		return -ENODEV;
3047	}
3048	list_del_init(&port_priv->port_list);
3049	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3050
3051	destroy_workqueue(port_priv->wq);
3052	destroy_mad_qp(&port_priv->qp_info[1]);
3053	destroy_mad_qp(&port_priv->qp_info[0]);
3054	ib_free_cq(port_priv->cq);
3055	ib_dealloc_pd(port_priv->pd);
 
3056	cleanup_recv_queue(&port_priv->qp_info[1]);
3057	cleanup_recv_queue(&port_priv->qp_info[0]);
3058	/* XXX: Handle deallocation of MAD registration tables */
3059
3060	kfree(port_priv);
3061
3062	return 0;
3063}
3064
3065static int ib_mad_init_device(struct ib_device *device)
3066{
3067	int start, i;
3068	unsigned int count = 0;
3069	int ret;
3070
3071	start = rdma_start_port(device);
3072
3073	for (i = start; i <= rdma_end_port(device); i++) {
3074		if (!rdma_cap_ib_mad(device, i))
3075			continue;
3076
3077		ret = ib_mad_port_open(device, i);
3078		if (ret) {
3079			dev_err(&device->dev, "Couldn't open port %d\n", i);
3080			goto error;
3081		}
3082		ret = ib_agent_port_open(device, i);
3083		if (ret) {
3084			dev_err(&device->dev,
3085				"Couldn't open port %d for agents\n", i);
3086			goto error_agent;
3087		}
3088		count++;
3089	}
3090	if (!count)
3091		return -EOPNOTSUPP;
3092
3093	return 0;
3094
3095error_agent:
3096	if (ib_mad_port_close(device, i))
3097		dev_err(&device->dev, "Couldn't close port %d\n", i);
3098
3099error:
3100	while (--i >= start) {
3101		if (!rdma_cap_ib_mad(device, i))
3102			continue;
3103
3104		if (ib_agent_port_close(device, i))
3105			dev_err(&device->dev,
3106				"Couldn't close port %d for agents\n", i);
3107		if (ib_mad_port_close(device, i))
3108			dev_err(&device->dev, "Couldn't close port %d\n", i);
3109	}
3110	return ret;
3111}
3112
3113static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3114{
3115	unsigned int i;
3116
3117	rdma_for_each_port (device, i) {
3118		if (!rdma_cap_ib_mad(device, i))
3119			continue;
3120
3121		if (ib_agent_port_close(device, i))
3122			dev_err(&device->dev,
3123				"Couldn't close port %u for agents\n", i);
3124		if (ib_mad_port_close(device, i))
3125			dev_err(&device->dev, "Couldn't close port %u\n", i);
3126	}
3127}
3128
3129static struct ib_client mad_client = {
3130	.name   = "mad",
3131	.add = ib_mad_init_device,
3132	.remove = ib_mad_remove_device
3133};
3134
3135int ib_mad_init(void)
3136{
3137	mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3138	mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3139
3140	mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3141	mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3142
3143	INIT_LIST_HEAD(&ib_mad_port_list);
3144
3145	if (ib_register_client(&mad_client)) {
3146		pr_err("Couldn't register ib_mad client\n");
3147		return -EINVAL;
3148	}
3149
3150	return 0;
3151}
3152
3153void ib_mad_cleanup(void)
3154{
3155	ib_unregister_client(&mad_client);
3156}
v4.6
   1/*
   2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
   3 * Copyright (c) 2005 Intel Corporation.  All rights reserved.
   4 * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
   5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
   6 * Copyright (c) 2014 Intel Corporation.  All rights reserved.
   7 *
   8 * This software is available to you under a choice of one of two
   9 * licenses.  You may choose to be licensed under the terms of the GNU
  10 * General Public License (GPL) Version 2, available from the file
  11 * COPYING in the main directory of this source tree, or the
  12 * OpenIB.org BSD license below:
  13 *
  14 *     Redistribution and use in source and binary forms, with or
  15 *     without modification, are permitted provided that the following
  16 *     conditions are met:
  17 *
  18 *      - Redistributions of source code must retain the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer.
  21 *
  22 *      - Redistributions in binary form must reproduce the above
  23 *        copyright notice, this list of conditions and the following
  24 *        disclaimer in the documentation and/or other materials
  25 *        provided with the distribution.
  26 *
  27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34 * SOFTWARE.
  35 *
  36 */
  37
  38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  39
  40#include <linux/dma-mapping.h>
  41#include <linux/slab.h>
  42#include <linux/module.h>
 
 
  43#include <rdma/ib_cache.h>
  44
  45#include "mad_priv.h"
 
  46#include "mad_rmpp.h"
  47#include "smi.h"
  48#include "opa_smi.h"
  49#include "agent.h"
  50
  51MODULE_LICENSE("Dual BSD/GPL");
  52MODULE_DESCRIPTION("kernel IB MAD API");
  53MODULE_AUTHOR("Hal Rosenstock");
  54MODULE_AUTHOR("Sean Hefty");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  55
  56static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
  57static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
  58
  59module_param_named(send_queue_size, mad_sendq_size, int, 0444);
  60MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
  61module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
  62MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
  63
 
 
  64static struct list_head ib_mad_port_list;
  65static u32 ib_mad_client_id = 0;
  66
  67/* Port list lock */
  68static DEFINE_SPINLOCK(ib_mad_port_list_lock);
  69
  70/* Forward declarations */
  71static int method_in_use(struct ib_mad_mgmt_method_table **method,
  72			 struct ib_mad_reg_req *mad_reg_req);
  73static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
  74static struct ib_mad_agent_private *find_mad_agent(
  75					struct ib_mad_port_private *port_priv,
  76					const struct ib_mad_hdr *mad);
  77static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
  78				    struct ib_mad_private *mad);
  79static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
  80static void timeout_sends(struct work_struct *work);
  81static void local_completions(struct work_struct *work);
  82static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
  83			      struct ib_mad_agent_private *agent_priv,
  84			      u8 mgmt_class);
  85static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
  86			   struct ib_mad_agent_private *agent_priv);
  87static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
  88			      struct ib_wc *wc);
  89static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
  90
  91/*
  92 * Returns a ib_mad_port_private structure or NULL for a device/port
  93 * Assumes ib_mad_port_list_lock is being held
  94 */
  95static inline struct ib_mad_port_private *
  96__ib_get_mad_port(struct ib_device *device, int port_num)
  97{
  98	struct ib_mad_port_private *entry;
  99
 100	list_for_each_entry(entry, &ib_mad_port_list, port_list) {
 101		if (entry->device == device && entry->port_num == port_num)
 102			return entry;
 103	}
 104	return NULL;
 105}
 106
 107/*
 108 * Wrapper function to return a ib_mad_port_private structure or NULL
 109 * for a device/port
 110 */
 111static inline struct ib_mad_port_private *
 112ib_get_mad_port(struct ib_device *device, int port_num)
 113{
 114	struct ib_mad_port_private *entry;
 115	unsigned long flags;
 116
 117	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 118	entry = __ib_get_mad_port(device, port_num);
 119	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
 120
 121	return entry;
 122}
 123
 124static inline u8 convert_mgmt_class(u8 mgmt_class)
 125{
 126	/* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
 127	return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
 128		0 : mgmt_class;
 129}
 130
 131static int get_spl_qp_index(enum ib_qp_type qp_type)
 132{
 133	switch (qp_type)
 134	{
 135	case IB_QPT_SMI:
 136		return 0;
 137	case IB_QPT_GSI:
 138		return 1;
 139	default:
 140		return -1;
 141	}
 142}
 143
 144static int vendor_class_index(u8 mgmt_class)
 145{
 146	return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
 147}
 148
 149static int is_vendor_class(u8 mgmt_class)
 150{
 151	if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
 152	    (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
 153		return 0;
 154	return 1;
 155}
 156
 157static int is_vendor_oui(char *oui)
 158{
 159	if (oui[0] || oui[1] || oui[2])
 160		return 1;
 161	return 0;
 162}
 163
 164static int is_vendor_method_in_use(
 165		struct ib_mad_mgmt_vendor_class *vendor_class,
 166		struct ib_mad_reg_req *mad_reg_req)
 167{
 168	struct ib_mad_mgmt_method_table *method;
 169	int i;
 170
 171	for (i = 0; i < MAX_MGMT_OUI; i++) {
 172		if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
 173			method = vendor_class->method_table[i];
 174			if (method) {
 175				if (method_in_use(&method, mad_reg_req))
 176					return 1;
 177				else
 178					break;
 179			}
 180		}
 181	}
 182	return 0;
 183}
 184
 185int ib_response_mad(const struct ib_mad_hdr *hdr)
 186{
 187	return ((hdr->method & IB_MGMT_METHOD_RESP) ||
 188		(hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
 189		((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
 190		 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
 191}
 192EXPORT_SYMBOL(ib_response_mad);
 193
 194/*
 195 * ib_register_mad_agent - Register to send/receive MADs
 
 
 196 */
 197struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
 198					   u8 port_num,
 199					   enum ib_qp_type qp_type,
 200					   struct ib_mad_reg_req *mad_reg_req,
 201					   u8 rmpp_version,
 202					   ib_mad_send_handler send_handler,
 203					   ib_mad_recv_handler recv_handler,
 204					   void *context,
 205					   u32 registration_flags)
 206{
 207	struct ib_mad_port_private *port_priv;
 208	struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
 209	struct ib_mad_agent_private *mad_agent_priv;
 210	struct ib_mad_reg_req *reg_req = NULL;
 211	struct ib_mad_mgmt_class_table *class;
 212	struct ib_mad_mgmt_vendor_class_table *vendor;
 213	struct ib_mad_mgmt_vendor_class *vendor_class;
 214	struct ib_mad_mgmt_method_table *method;
 215	int ret2, qpn;
 216	unsigned long flags;
 217	u8 mgmt_class, vclass;
 218
 
 
 
 
 219	/* Validate parameters */
 220	qpn = get_spl_qp_index(qp_type);
 221	if (qpn == -1) {
 222		dev_notice(&device->dev,
 223			   "ib_register_mad_agent: invalid QP Type %d\n",
 224			   qp_type);
 225		goto error1;
 226	}
 227
 228	if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
 229		dev_notice(&device->dev,
 230			   "ib_register_mad_agent: invalid RMPP Version %u\n",
 231			   rmpp_version);
 232		goto error1;
 233	}
 234
 235	/* Validate MAD registration request if supplied */
 236	if (mad_reg_req) {
 237		if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
 238			dev_notice(&device->dev,
 239				   "ib_register_mad_agent: invalid Class Version %u\n",
 240				   mad_reg_req->mgmt_class_version);
 
 241			goto error1;
 242		}
 243		if (!recv_handler) {
 244			dev_notice(&device->dev,
 245				   "ib_register_mad_agent: no recv_handler\n");
 246			goto error1;
 247		}
 248		if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
 249			/*
 250			 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
 251			 * one in this range currently allowed
 252			 */
 253			if (mad_reg_req->mgmt_class !=
 254			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
 255				dev_notice(&device->dev,
 256					   "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
 257					   mad_reg_req->mgmt_class);
 258				goto error1;
 259			}
 260		} else if (mad_reg_req->mgmt_class == 0) {
 261			/*
 262			 * Class 0 is reserved in IBA and is used for
 263			 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
 264			 */
 265			dev_notice(&device->dev,
 266				   "ib_register_mad_agent: Invalid Mgmt Class 0\n");
 
 267			goto error1;
 268		} else if (is_vendor_class(mad_reg_req->mgmt_class)) {
 269			/*
 270			 * If class is in "new" vendor range,
 271			 * ensure supplied OUI is not zero
 272			 */
 273			if (!is_vendor_oui(mad_reg_req->oui)) {
 274				dev_notice(&device->dev,
 275					   "ib_register_mad_agent: No OUI specified for class 0x%x\n",
 276					   mad_reg_req->mgmt_class);
 
 277				goto error1;
 278			}
 279		}
 280		/* Make sure class supplied is consistent with RMPP */
 281		if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
 282			if (rmpp_version) {
 283				dev_notice(&device->dev,
 284					   "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
 285					   mad_reg_req->mgmt_class);
 286				goto error1;
 287			}
 288		}
 289
 290		/* Make sure class supplied is consistent with QP type */
 291		if (qp_type == IB_QPT_SMI) {
 292			if ((mad_reg_req->mgmt_class !=
 293					IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
 294			    (mad_reg_req->mgmt_class !=
 295					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
 296				dev_notice(&device->dev,
 297					   "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
 298					   mad_reg_req->mgmt_class);
 299				goto error1;
 300			}
 301		} else {
 302			if ((mad_reg_req->mgmt_class ==
 303					IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
 304			    (mad_reg_req->mgmt_class ==
 305					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
 306				dev_notice(&device->dev,
 307					   "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
 308					   mad_reg_req->mgmt_class);
 309				goto error1;
 310			}
 311		}
 312	} else {
 313		/* No registration request supplied */
 314		if (!send_handler)
 315			goto error1;
 316		if (registration_flags & IB_MAD_USER_RMPP)
 317			goto error1;
 318	}
 319
 320	/* Validate device and port */
 321	port_priv = ib_get_mad_port(device, port_num);
 322	if (!port_priv) {
 323		dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
 
 324		ret = ERR_PTR(-ENODEV);
 325		goto error1;
 326	}
 327
 328	/* Verify the QP requested is supported.  For example, Ethernet devices
 329	 * will not have QP0 */
 
 330	if (!port_priv->qp_info[qpn].qp) {
 331		dev_notice(&device->dev,
 332			   "ib_register_mad_agent: QP %d not supported\n", qpn);
 333		ret = ERR_PTR(-EPROTONOSUPPORT);
 334		goto error1;
 335	}
 336
 337	/* Allocate structures */
 338	mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
 339	if (!mad_agent_priv) {
 340		ret = ERR_PTR(-ENOMEM);
 341		goto error1;
 342	}
 343
 344	if (mad_reg_req) {
 345		reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
 346		if (!reg_req) {
 347			ret = ERR_PTR(-ENOMEM);
 348			goto error3;
 349		}
 350	}
 351
 352	/* Now, fill in the various structures */
 353	mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
 354	mad_agent_priv->reg_req = reg_req;
 355	mad_agent_priv->agent.rmpp_version = rmpp_version;
 356	mad_agent_priv->agent.device = device;
 357	mad_agent_priv->agent.recv_handler = recv_handler;
 358	mad_agent_priv->agent.send_handler = send_handler;
 359	mad_agent_priv->agent.context = context;
 360	mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
 361	mad_agent_priv->agent.port_num = port_num;
 362	mad_agent_priv->agent.flags = registration_flags;
 363	spin_lock_init(&mad_agent_priv->lock);
 364	INIT_LIST_HEAD(&mad_agent_priv->send_list);
 365	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
 366	INIT_LIST_HEAD(&mad_agent_priv->done_list);
 367	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
 368	INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
 369	INIT_LIST_HEAD(&mad_agent_priv->local_list);
 370	INIT_WORK(&mad_agent_priv->local_work, local_completions);
 371	atomic_set(&mad_agent_priv->refcount, 1);
 372	init_completion(&mad_agent_priv->comp);
 373
 374	spin_lock_irqsave(&port_priv->reg_lock, flags);
 375	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 376
 377	/*
 378	 * Make sure MAD registration (if supplied)
 379	 * is non overlapping with any existing ones
 380	 */
 
 381	if (mad_reg_req) {
 382		mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
 383		if (!is_vendor_class(mgmt_class)) {
 384			class = port_priv->version[mad_reg_req->
 385						   mgmt_class_version].class;
 386			if (class) {
 387				method = class->method_table[mgmt_class];
 388				if (method) {
 389					if (method_in_use(&method,
 390							   mad_reg_req))
 391						goto error4;
 392				}
 393			}
 394			ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
 395						  mgmt_class);
 396		} else {
 397			/* "New" vendor class range */
 398			vendor = port_priv->version[mad_reg_req->
 399						    mgmt_class_version].vendor;
 400			if (vendor) {
 401				vclass = vendor_class_index(mgmt_class);
 402				vendor_class = vendor->vendor_class[vclass];
 403				if (vendor_class) {
 404					if (is_vendor_method_in_use(
 405							vendor_class,
 406							mad_reg_req))
 407						goto error4;
 408				}
 409			}
 410			ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
 411		}
 412		if (ret2) {
 413			ret = ERR_PTR(ret2);
 414			goto error4;
 415		}
 416	}
 
 417
 418	/* Add mad agent into port's agent list */
 419	list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
 420	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
 421
 422	return &mad_agent_priv->agent;
 423
 
 
 
 
 424error4:
 425	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
 426	kfree(reg_req);
 427error3:
 428	kfree(mad_agent_priv);
 429error1:
 430	return ret;
 431}
 432EXPORT_SYMBOL(ib_register_mad_agent);
 433
 434static inline int is_snooping_sends(int mad_snoop_flags)
 435{
 436	return (mad_snoop_flags &
 437		(/*IB_MAD_SNOOP_POSTED_SENDS |
 438		 IB_MAD_SNOOP_RMPP_SENDS |*/
 439		 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
 440		 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
 441}
 442
 443static inline int is_snooping_recvs(int mad_snoop_flags)
 444{
 445	return (mad_snoop_flags &
 446		(IB_MAD_SNOOP_RECVS /*|
 447		 IB_MAD_SNOOP_RMPP_RECVS*/));
 448}
 449
 450static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
 451				struct ib_mad_snoop_private *mad_snoop_priv)
 452{
 453	struct ib_mad_snoop_private **new_snoop_table;
 454	unsigned long flags;
 455	int i;
 456
 457	spin_lock_irqsave(&qp_info->snoop_lock, flags);
 458	/* Check for empty slot in array. */
 459	for (i = 0; i < qp_info->snoop_table_size; i++)
 460		if (!qp_info->snoop_table[i])
 461			break;
 462
 463	if (i == qp_info->snoop_table_size) {
 464		/* Grow table. */
 465		new_snoop_table = krealloc(qp_info->snoop_table,
 466					   sizeof mad_snoop_priv *
 467					   (qp_info->snoop_table_size + 1),
 468					   GFP_ATOMIC);
 469		if (!new_snoop_table) {
 470			i = -ENOMEM;
 471			goto out;
 472		}
 473
 474		qp_info->snoop_table = new_snoop_table;
 475		qp_info->snoop_table_size++;
 476	}
 477	qp_info->snoop_table[i] = mad_snoop_priv;
 478	atomic_inc(&qp_info->snoop_count);
 479out:
 480	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 481	return i;
 482}
 483
 484struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
 485					   u8 port_num,
 486					   enum ib_qp_type qp_type,
 487					   int mad_snoop_flags,
 488					   ib_mad_snoop_handler snoop_handler,
 489					   ib_mad_recv_handler recv_handler,
 490					   void *context)
 491{
 492	struct ib_mad_port_private *port_priv;
 493	struct ib_mad_agent *ret;
 494	struct ib_mad_snoop_private *mad_snoop_priv;
 495	int qpn;
 496
 497	/* Validate parameters */
 498	if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
 499	    (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
 500		ret = ERR_PTR(-EINVAL);
 501		goto error1;
 502	}
 503	qpn = get_spl_qp_index(qp_type);
 504	if (qpn == -1) {
 505		ret = ERR_PTR(-EINVAL);
 506		goto error1;
 507	}
 508	port_priv = ib_get_mad_port(device, port_num);
 509	if (!port_priv) {
 510		ret = ERR_PTR(-ENODEV);
 511		goto error1;
 512	}
 513	/* Allocate structures */
 514	mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
 515	if (!mad_snoop_priv) {
 516		ret = ERR_PTR(-ENOMEM);
 517		goto error1;
 518	}
 519
 520	/* Now, fill in the various structures */
 521	mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
 522	mad_snoop_priv->agent.device = device;
 523	mad_snoop_priv->agent.recv_handler = recv_handler;
 524	mad_snoop_priv->agent.snoop_handler = snoop_handler;
 525	mad_snoop_priv->agent.context = context;
 526	mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
 527	mad_snoop_priv->agent.port_num = port_num;
 528	mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
 529	init_completion(&mad_snoop_priv->comp);
 530	mad_snoop_priv->snoop_index = register_snoop_agent(
 531						&port_priv->qp_info[qpn],
 532						mad_snoop_priv);
 533	if (mad_snoop_priv->snoop_index < 0) {
 534		ret = ERR_PTR(mad_snoop_priv->snoop_index);
 535		goto error2;
 536	}
 537
 538	atomic_set(&mad_snoop_priv->refcount, 1);
 539	return &mad_snoop_priv->agent;
 540
 541error2:
 542	kfree(mad_snoop_priv);
 543error1:
 544	return ret;
 545}
 546EXPORT_SYMBOL(ib_register_mad_snoop);
 547
 548static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
 549{
 550	if (atomic_dec_and_test(&mad_agent_priv->refcount))
 551		complete(&mad_agent_priv->comp);
 552}
 553
 554static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
 555{
 556	if (atomic_dec_and_test(&mad_snoop_priv->refcount))
 557		complete(&mad_snoop_priv->comp);
 558}
 559
 560static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
 561{
 562	struct ib_mad_port_private *port_priv;
 563	unsigned long flags;
 564
 565	/* Note that we could still be handling received MADs */
 
 566
 567	/*
 568	 * Canceling all sends results in dropping received response
 569	 * MADs, preventing us from queuing additional work
 570	 */
 571	cancel_mads(mad_agent_priv);
 572	port_priv = mad_agent_priv->qp_info->port_priv;
 573	cancel_delayed_work(&mad_agent_priv->timed_work);
 574
 575	spin_lock_irqsave(&port_priv->reg_lock, flags);
 576	remove_mad_reg_req(mad_agent_priv);
 577	list_del(&mad_agent_priv->agent_list);
 578	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
 579
 580	flush_workqueue(port_priv->wq);
 581	ib_cancel_rmpp_recvs(mad_agent_priv);
 582
 583	deref_mad_agent(mad_agent_priv);
 584	wait_for_completion(&mad_agent_priv->comp);
 
 
 
 585
 586	kfree(mad_agent_priv->reg_req);
 587	kfree(mad_agent_priv);
 588}
 589
 590static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
 591{
 592	struct ib_mad_qp_info *qp_info;
 593	unsigned long flags;
 594
 595	qp_info = mad_snoop_priv->qp_info;
 596	spin_lock_irqsave(&qp_info->snoop_lock, flags);
 597	qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
 598	atomic_dec(&qp_info->snoop_count);
 599	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 600
 601	deref_snoop_agent(mad_snoop_priv);
 602	wait_for_completion(&mad_snoop_priv->comp);
 603
 604	kfree(mad_snoop_priv);
 605}
 606
 607/*
 608 * ib_unregister_mad_agent - Unregisters a client from using MAD services
 
 
 609 */
 610int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
 611{
 612	struct ib_mad_agent_private *mad_agent_priv;
 613	struct ib_mad_snoop_private *mad_snoop_priv;
 614
 615	/* If the TID is zero, the agent can only snoop. */
 616	if (mad_agent->hi_tid) {
 617		mad_agent_priv = container_of(mad_agent,
 618					      struct ib_mad_agent_private,
 619					      agent);
 620		unregister_mad_agent(mad_agent_priv);
 621	} else {
 622		mad_snoop_priv = container_of(mad_agent,
 623					      struct ib_mad_snoop_private,
 624					      agent);
 625		unregister_mad_snoop(mad_snoop_priv);
 626	}
 627	return 0;
 628}
 629EXPORT_SYMBOL(ib_unregister_mad_agent);
 630
 631static void dequeue_mad(struct ib_mad_list_head *mad_list)
 632{
 633	struct ib_mad_queue *mad_queue;
 634	unsigned long flags;
 635
 636	BUG_ON(!mad_list->mad_queue);
 637	mad_queue = mad_list->mad_queue;
 638	spin_lock_irqsave(&mad_queue->lock, flags);
 639	list_del(&mad_list->list);
 640	mad_queue->count--;
 641	spin_unlock_irqrestore(&mad_queue->lock, flags);
 642}
 643
 644static void snoop_send(struct ib_mad_qp_info *qp_info,
 645		       struct ib_mad_send_buf *send_buf,
 646		       struct ib_mad_send_wc *mad_send_wc,
 647		       int mad_snoop_flags)
 648{
 649	struct ib_mad_snoop_private *mad_snoop_priv;
 650	unsigned long flags;
 651	int i;
 652
 653	spin_lock_irqsave(&qp_info->snoop_lock, flags);
 654	for (i = 0; i < qp_info->snoop_table_size; i++) {
 655		mad_snoop_priv = qp_info->snoop_table[i];
 656		if (!mad_snoop_priv ||
 657		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
 658			continue;
 659
 660		atomic_inc(&mad_snoop_priv->refcount);
 661		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 662		mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
 663						    send_buf, mad_send_wc);
 664		deref_snoop_agent(mad_snoop_priv);
 665		spin_lock_irqsave(&qp_info->snoop_lock, flags);
 666	}
 667	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 668}
 669
 670static void snoop_recv(struct ib_mad_qp_info *qp_info,
 671		       struct ib_mad_recv_wc *mad_recv_wc,
 672		       int mad_snoop_flags)
 673{
 674	struct ib_mad_snoop_private *mad_snoop_priv;
 675	unsigned long flags;
 676	int i;
 677
 678	spin_lock_irqsave(&qp_info->snoop_lock, flags);
 679	for (i = 0; i < qp_info->snoop_table_size; i++) {
 680		mad_snoop_priv = qp_info->snoop_table[i];
 681		if (!mad_snoop_priv ||
 682		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
 683			continue;
 684
 685		atomic_inc(&mad_snoop_priv->refcount);
 686		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 687		mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
 688						   mad_recv_wc);
 689		deref_snoop_agent(mad_snoop_priv);
 690		spin_lock_irqsave(&qp_info->snoop_lock, flags);
 691	}
 692	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 693}
 694
 695static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
 696		u16 pkey_index, u8 port_num, struct ib_wc *wc)
 697{
 698	memset(wc, 0, sizeof *wc);
 699	wc->wr_cqe = cqe;
 700	wc->status = IB_WC_SUCCESS;
 701	wc->opcode = IB_WC_RECV;
 702	wc->pkey_index = pkey_index;
 703	wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
 704	wc->src_qp = IB_QP0;
 705	wc->qp = qp;
 706	wc->slid = slid;
 707	wc->sl = 0;
 708	wc->dlid_path_bits = 0;
 709	wc->port_num = port_num;
 710}
 711
 712static size_t mad_priv_size(const struct ib_mad_private *mp)
 713{
 714	return sizeof(struct ib_mad_private) + mp->mad_size;
 715}
 716
 717static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
 718{
 719	size_t size = sizeof(struct ib_mad_private) + mad_size;
 720	struct ib_mad_private *ret = kzalloc(size, flags);
 721
 722	if (ret)
 723		ret->mad_size = mad_size;
 724
 725	return ret;
 726}
 727
 728static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
 729{
 730	return rdma_max_mad_size(port_priv->device, port_priv->port_num);
 731}
 732
 733static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
 734{
 735	return sizeof(struct ib_grh) + mp->mad_size;
 736}
 737
 738/*
 739 * Return 0 if SMP is to be sent
 740 * Return 1 if SMP was consumed locally (whether or not solicited)
 741 * Return < 0 if error
 742 */
 743static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
 744				  struct ib_mad_send_wr_private *mad_send_wr)
 745{
 746	int ret = 0;
 747	struct ib_smp *smp = mad_send_wr->send_buf.mad;
 748	struct opa_smp *opa_smp = (struct opa_smp *)smp;
 749	unsigned long flags;
 750	struct ib_mad_local_private *local;
 751	struct ib_mad_private *mad_priv;
 752	struct ib_mad_port_private *port_priv;
 753	struct ib_mad_agent_private *recv_mad_agent = NULL;
 754	struct ib_device *device = mad_agent_priv->agent.device;
 755	u8 port_num;
 756	struct ib_wc mad_wc;
 757	struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
 758	size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
 759	u16 out_mad_pkey_index = 0;
 760	u16 drslid;
 761	bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
 762				    mad_agent_priv->qp_info->port_priv->port_num);
 763
 764	if (rdma_cap_ib_switch(device) &&
 765	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
 766		port_num = send_wr->port_num;
 767	else
 768		port_num = mad_agent_priv->agent.port_num;
 769
 770	/*
 771	 * Directed route handling starts if the initial LID routed part of
 772	 * a request or the ending LID routed part of a response is empty.
 773	 * If we are at the start of the LID routed part, don't update the
 774	 * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
 775	 */
 776	if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
 777		u32 opa_drslid;
 778
 
 
 779		if ((opa_get_smp_direction(opa_smp)
 780		     ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
 781		     OPA_LID_PERMISSIVE &&
 782		     opa_smi_handle_dr_smp_send(opa_smp,
 783						rdma_cap_ib_switch(device),
 784						port_num) == IB_SMI_DISCARD) {
 785			ret = -EINVAL;
 786			dev_err(&device->dev, "OPA Invalid directed route\n");
 787			goto out;
 788		}
 789		opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
 790		if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
 791		    opa_drslid & 0xffff0000) {
 792			ret = -EINVAL;
 793			dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
 794			       opa_drslid);
 795			goto out;
 796		}
 797		drslid = (u16)(opa_drslid & 0x0000ffff);
 798
 799		/* Check to post send on QP or process locally */
 800		if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
 801		    opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
 802			goto out;
 803	} else {
 
 
 804		if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
 805		     IB_LID_PERMISSIVE &&
 806		     smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
 807		     IB_SMI_DISCARD) {
 808			ret = -EINVAL;
 809			dev_err(&device->dev, "Invalid directed route\n");
 810			goto out;
 811		}
 812		drslid = be16_to_cpu(smp->dr_slid);
 813
 814		/* Check to post send on QP or process locally */
 815		if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
 816		    smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
 817			goto out;
 818	}
 819
 820	local = kmalloc(sizeof *local, GFP_ATOMIC);
 821	if (!local) {
 822		ret = -ENOMEM;
 823		dev_err(&device->dev, "No memory for ib_mad_local_private\n");
 824		goto out;
 825	}
 826	local->mad_priv = NULL;
 827	local->recv_mad_agent = NULL;
 828	mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
 829	if (!mad_priv) {
 830		ret = -ENOMEM;
 831		dev_err(&device->dev, "No memory for local response MAD\n");
 832		kfree(local);
 833		goto out;
 834	}
 835
 836	build_smp_wc(mad_agent_priv->agent.qp,
 837		     send_wr->wr.wr_cqe, drslid,
 838		     send_wr->pkey_index,
 839		     send_wr->port_num, &mad_wc);
 840
 841	if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
 842		mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
 843					+ mad_send_wr->send_buf.data_len
 844					+ sizeof(struct ib_grh);
 845	}
 846
 847	/* No GRH for DR SMP */
 848	ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
 849				  (const struct ib_mad_hdr *)smp, mad_size,
 850				  (struct ib_mad_hdr *)mad_priv->mad,
 851				  &mad_size, &out_mad_pkey_index);
 852	switch (ret)
 853	{
 854	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
 855		if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
 856		    mad_agent_priv->agent.recv_handler) {
 857			local->mad_priv = mad_priv;
 858			local->recv_mad_agent = mad_agent_priv;
 859			/*
 860			 * Reference MAD agent until receive
 861			 * side of local completion handled
 862			 */
 863			atomic_inc(&mad_agent_priv->refcount);
 864		} else
 865			kfree(mad_priv);
 866		break;
 867	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
 868		kfree(mad_priv);
 869		break;
 870	case IB_MAD_RESULT_SUCCESS:
 871		/* Treat like an incoming receive MAD */
 872		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
 873					    mad_agent_priv->agent.port_num);
 874		if (port_priv) {
 875			memcpy(mad_priv->mad, smp, mad_priv->mad_size);
 876			recv_mad_agent = find_mad_agent(port_priv,
 877						        (const struct ib_mad_hdr *)mad_priv->mad);
 878		}
 879		if (!port_priv || !recv_mad_agent) {
 880			/*
 881			 * No receiving agent so drop packet and
 882			 * generate send completion.
 883			 */
 884			kfree(mad_priv);
 885			break;
 886		}
 887		local->mad_priv = mad_priv;
 888		local->recv_mad_agent = recv_mad_agent;
 889		break;
 890	default:
 891		kfree(mad_priv);
 892		kfree(local);
 893		ret = -EINVAL;
 894		goto out;
 895	}
 896
 897	local->mad_send_wr = mad_send_wr;
 898	if (opa) {
 899		local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
 900		local->return_wc_byte_len = mad_size;
 901	}
 902	/* Reference MAD agent until send side of local completion handled */
 903	atomic_inc(&mad_agent_priv->refcount);
 904	/* Queue local completion to local list */
 905	spin_lock_irqsave(&mad_agent_priv->lock, flags);
 906	list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
 907	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
 908	queue_work(mad_agent_priv->qp_info->port_priv->wq,
 909		   &mad_agent_priv->local_work);
 910	ret = 1;
 911out:
 912	return ret;
 913}
 914
 915static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
 916{
 917	int seg_size, pad;
 918
 919	seg_size = mad_size - hdr_len;
 920	if (data_len && seg_size) {
 921		pad = seg_size - data_len % seg_size;
 922		return pad == seg_size ? 0 : pad;
 923	} else
 924		return seg_size;
 925}
 926
 927static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
 928{
 929	struct ib_rmpp_segment *s, *t;
 930
 931	list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
 932		list_del(&s->list);
 933		kfree(s);
 934	}
 935}
 936
 937static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
 938				size_t mad_size, gfp_t gfp_mask)
 939{
 940	struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
 941	struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
 942	struct ib_rmpp_segment *seg = NULL;
 943	int left, seg_size, pad;
 944
 945	send_buf->seg_size = mad_size - send_buf->hdr_len;
 946	send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
 947	seg_size = send_buf->seg_size;
 948	pad = send_wr->pad;
 949
 950	/* Allocate data segments. */
 951	for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
 952		seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
 953		if (!seg) {
 954			dev_err(&send_buf->mad_agent->device->dev,
 955				"alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
 956				sizeof (*seg) + seg_size, gfp_mask);
 957			free_send_rmpp_list(send_wr);
 958			return -ENOMEM;
 959		}
 960		seg->num = ++send_buf->seg_count;
 961		list_add_tail(&seg->list, &send_wr->rmpp_list);
 962	}
 963
 964	/* Zero any padding */
 965	if (pad)
 966		memset(seg->data + seg_size - pad, 0, pad);
 967
 968	rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
 969					  agent.rmpp_version;
 970	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
 971	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
 972
 973	send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
 974					struct ib_rmpp_segment, list);
 975	send_wr->last_ack_seg = send_wr->cur_seg;
 976	return 0;
 977}
 978
 979int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
 980{
 981	return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
 982}
 983EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
 984
 985struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
 986					    u32 remote_qpn, u16 pkey_index,
 987					    int rmpp_active,
 988					    int hdr_len, int data_len,
 989					    gfp_t gfp_mask,
 990					    u8 base_version)
 991{
 992	struct ib_mad_agent_private *mad_agent_priv;
 993	struct ib_mad_send_wr_private *mad_send_wr;
 994	int pad, message_size, ret, size;
 995	void *buf;
 996	size_t mad_size;
 997	bool opa;
 998
 999	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1000				      agent);
1001
1002	opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1003
1004	if (opa && base_version == OPA_MGMT_BASE_VERSION)
1005		mad_size = sizeof(struct opa_mad);
1006	else
1007		mad_size = sizeof(struct ib_mad);
1008
1009	pad = get_pad_size(hdr_len, data_len, mad_size);
1010	message_size = hdr_len + data_len + pad;
1011
1012	if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1013		if (!rmpp_active && message_size > mad_size)
1014			return ERR_PTR(-EINVAL);
1015	} else
1016		if (rmpp_active || message_size > mad_size)
1017			return ERR_PTR(-EINVAL);
1018
1019	size = rmpp_active ? hdr_len : mad_size;
1020	buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1021	if (!buf)
1022		return ERR_PTR(-ENOMEM);
1023
1024	mad_send_wr = buf + size;
1025	INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1026	mad_send_wr->send_buf.mad = buf;
1027	mad_send_wr->send_buf.hdr_len = hdr_len;
1028	mad_send_wr->send_buf.data_len = data_len;
1029	mad_send_wr->pad = pad;
1030
1031	mad_send_wr->mad_agent_priv = mad_agent_priv;
1032	mad_send_wr->sg_list[0].length = hdr_len;
1033	mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1034
1035	/* OPA MADs don't have to be the full 2048 bytes */
1036	if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1037	    data_len < mad_size - hdr_len)
1038		mad_send_wr->sg_list[1].length = data_len;
1039	else
1040		mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1041
1042	mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1043
1044	mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1045
1046	mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1047	mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1048	mad_send_wr->send_wr.wr.num_sge = 2;
1049	mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1050	mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1051	mad_send_wr->send_wr.remote_qpn = remote_qpn;
1052	mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1053	mad_send_wr->send_wr.pkey_index = pkey_index;
1054
1055	if (rmpp_active) {
1056		ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1057		if (ret) {
1058			kfree(buf);
1059			return ERR_PTR(ret);
1060		}
1061	}
1062
1063	mad_send_wr->send_buf.mad_agent = mad_agent;
1064	atomic_inc(&mad_agent_priv->refcount);
1065	return &mad_send_wr->send_buf;
1066}
1067EXPORT_SYMBOL(ib_create_send_mad);
1068
1069int ib_get_mad_data_offset(u8 mgmt_class)
1070{
1071	if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1072		return IB_MGMT_SA_HDR;
1073	else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1074		 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1075		 (mgmt_class == IB_MGMT_CLASS_BIS))
1076		return IB_MGMT_DEVICE_HDR;
1077	else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1078		 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1079		return IB_MGMT_VENDOR_HDR;
1080	else
1081		return IB_MGMT_MAD_HDR;
1082}
1083EXPORT_SYMBOL(ib_get_mad_data_offset);
1084
1085int ib_is_mad_class_rmpp(u8 mgmt_class)
1086{
1087	if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1088	    (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1089	    (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1090	    (mgmt_class == IB_MGMT_CLASS_BIS) ||
1091	    ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1092	     (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1093		return 1;
1094	return 0;
1095}
1096EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1097
1098void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1099{
1100	struct ib_mad_send_wr_private *mad_send_wr;
1101	struct list_head *list;
1102
1103	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1104				   send_buf);
1105	list = &mad_send_wr->cur_seg->list;
1106
1107	if (mad_send_wr->cur_seg->num < seg_num) {
1108		list_for_each_entry(mad_send_wr->cur_seg, list, list)
1109			if (mad_send_wr->cur_seg->num == seg_num)
1110				break;
1111	} else if (mad_send_wr->cur_seg->num > seg_num) {
1112		list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1113			if (mad_send_wr->cur_seg->num == seg_num)
1114				break;
1115	}
1116	return mad_send_wr->cur_seg->data;
1117}
1118EXPORT_SYMBOL(ib_get_rmpp_segment);
1119
1120static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1121{
1122	if (mad_send_wr->send_buf.seg_count)
1123		return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1124					   mad_send_wr->seg_num);
1125	else
1126		return mad_send_wr->send_buf.mad +
1127		       mad_send_wr->send_buf.hdr_len;
1128}
1129
1130void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1131{
1132	struct ib_mad_agent_private *mad_agent_priv;
1133	struct ib_mad_send_wr_private *mad_send_wr;
1134
1135	mad_agent_priv = container_of(send_buf->mad_agent,
1136				      struct ib_mad_agent_private, agent);
1137	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1138				   send_buf);
1139
1140	free_send_rmpp_list(mad_send_wr);
1141	kfree(send_buf->mad);
1142	deref_mad_agent(mad_agent_priv);
1143}
1144EXPORT_SYMBOL(ib_free_send_mad);
1145
1146int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1147{
1148	struct ib_mad_qp_info *qp_info;
1149	struct list_head *list;
1150	struct ib_send_wr *bad_send_wr;
1151	struct ib_mad_agent *mad_agent;
1152	struct ib_sge *sge;
1153	unsigned long flags;
1154	int ret;
1155
1156	/* Set WR ID to find mad_send_wr upon completion */
1157	qp_info = mad_send_wr->mad_agent_priv->qp_info;
1158	mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1159	mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1160	mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1161
1162	mad_agent = mad_send_wr->send_buf.mad_agent;
1163	sge = mad_send_wr->sg_list;
1164	sge[0].addr = ib_dma_map_single(mad_agent->device,
1165					mad_send_wr->send_buf.mad,
1166					sge[0].length,
1167					DMA_TO_DEVICE);
1168	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1169		return -ENOMEM;
1170
1171	mad_send_wr->header_mapping = sge[0].addr;
1172
1173	sge[1].addr = ib_dma_map_single(mad_agent->device,
1174					ib_get_payload(mad_send_wr),
1175					sge[1].length,
1176					DMA_TO_DEVICE);
1177	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1178		ib_dma_unmap_single(mad_agent->device,
1179				    mad_send_wr->header_mapping,
1180				    sge[0].length, DMA_TO_DEVICE);
1181		return -ENOMEM;
1182	}
1183	mad_send_wr->payload_mapping = sge[1].addr;
1184
1185	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1186	if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
 
1187		ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1188				   &bad_send_wr);
1189		list = &qp_info->send_queue.list;
1190	} else {
1191		ret = 0;
1192		list = &qp_info->overflow_list;
1193	}
1194
1195	if (!ret) {
1196		qp_info->send_queue.count++;
1197		list_add_tail(&mad_send_wr->mad_list.list, list);
1198	}
1199	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1200	if (ret) {
1201		ib_dma_unmap_single(mad_agent->device,
1202				    mad_send_wr->header_mapping,
1203				    sge[0].length, DMA_TO_DEVICE);
1204		ib_dma_unmap_single(mad_agent->device,
1205				    mad_send_wr->payload_mapping,
1206				    sge[1].length, DMA_TO_DEVICE);
1207	}
1208	return ret;
1209}
1210
1211/*
1212 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1213 *  with the registered client
1214 */
1215int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1216		     struct ib_mad_send_buf **bad_send_buf)
1217{
1218	struct ib_mad_agent_private *mad_agent_priv;
1219	struct ib_mad_send_buf *next_send_buf;
1220	struct ib_mad_send_wr_private *mad_send_wr;
1221	unsigned long flags;
1222	int ret = -EINVAL;
1223
1224	/* Walk list of send WRs and post each on send list */
1225	for (; send_buf; send_buf = next_send_buf) {
1226
1227		mad_send_wr = container_of(send_buf,
1228					   struct ib_mad_send_wr_private,
1229					   send_buf);
1230		mad_agent_priv = mad_send_wr->mad_agent_priv;
1231
 
 
 
 
 
1232		if (!send_buf->mad_agent->send_handler ||
1233		    (send_buf->timeout_ms &&
1234		     !send_buf->mad_agent->recv_handler)) {
1235			ret = -EINVAL;
1236			goto error;
1237		}
1238
1239		if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1240			if (mad_agent_priv->agent.rmpp_version) {
1241				ret = -EINVAL;
1242				goto error;
1243			}
1244		}
1245
1246		/*
1247		 * Save pointer to next work request to post in case the
1248		 * current one completes, and the user modifies the work
1249		 * request associated with the completion
1250		 */
1251		next_send_buf = send_buf->next;
1252		mad_send_wr->send_wr.ah = send_buf->ah;
1253
1254		if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1255		    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1256			ret = handle_outgoing_dr_smp(mad_agent_priv,
1257						     mad_send_wr);
1258			if (ret < 0)		/* error */
1259				goto error;
1260			else if (ret == 1)	/* locally consumed */
1261				continue;
1262		}
1263
1264		mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1265		/* Timeout will be updated after send completes */
1266		mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1267		mad_send_wr->max_retries = send_buf->retries;
1268		mad_send_wr->retries_left = send_buf->retries;
1269		send_buf->retries = 0;
1270		/* Reference for work request to QP + response */
1271		mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1272		mad_send_wr->status = IB_WC_SUCCESS;
1273
1274		/* Reference MAD agent until send completes */
1275		atomic_inc(&mad_agent_priv->refcount);
1276		spin_lock_irqsave(&mad_agent_priv->lock, flags);
1277		list_add_tail(&mad_send_wr->agent_list,
1278			      &mad_agent_priv->send_list);
1279		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1280
1281		if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1282			ret = ib_send_rmpp_mad(mad_send_wr);
1283			if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1284				ret = ib_send_mad(mad_send_wr);
1285		} else
1286			ret = ib_send_mad(mad_send_wr);
1287		if (ret < 0) {
1288			/* Fail send request */
1289			spin_lock_irqsave(&mad_agent_priv->lock, flags);
1290			list_del(&mad_send_wr->agent_list);
1291			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1292			atomic_dec(&mad_agent_priv->refcount);
1293			goto error;
1294		}
1295	}
1296	return 0;
1297error:
1298	if (bad_send_buf)
1299		*bad_send_buf = send_buf;
1300	return ret;
1301}
1302EXPORT_SYMBOL(ib_post_send_mad);
1303
1304/*
1305 * ib_free_recv_mad - Returns data buffers used to receive
1306 *  a MAD to the access layer
1307 */
1308void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1309{
1310	struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1311	struct ib_mad_private_header *mad_priv_hdr;
1312	struct ib_mad_private *priv;
1313	struct list_head free_list;
1314
1315	INIT_LIST_HEAD(&free_list);
1316	list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1317
1318	list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1319					&free_list, list) {
1320		mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1321					   recv_buf);
1322		mad_priv_hdr = container_of(mad_recv_wc,
1323					    struct ib_mad_private_header,
1324					    recv_wc);
1325		priv = container_of(mad_priv_hdr, struct ib_mad_private,
1326				    header);
1327		kfree(priv);
1328	}
1329}
1330EXPORT_SYMBOL(ib_free_recv_mad);
1331
1332struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1333					u8 rmpp_version,
1334					ib_mad_send_handler send_handler,
1335					ib_mad_recv_handler recv_handler,
1336					void *context)
1337{
1338	return ERR_PTR(-EINVAL);	/* XXX: for now */
1339}
1340EXPORT_SYMBOL(ib_redirect_mad_qp);
1341
1342int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1343		      struct ib_wc *wc)
1344{
1345	dev_err(&mad_agent->device->dev,
1346		"ib_process_mad_wc() not implemented yet\n");
1347	return 0;
1348}
1349EXPORT_SYMBOL(ib_process_mad_wc);
1350
1351static int method_in_use(struct ib_mad_mgmt_method_table **method,
1352			 struct ib_mad_reg_req *mad_reg_req)
1353{
1354	int i;
1355
1356	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1357		if ((*method)->agent[i]) {
1358			pr_err("Method %d already in use\n", i);
1359			return -EINVAL;
1360		}
1361	}
1362	return 0;
1363}
1364
1365static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1366{
1367	/* Allocate management method table */
1368	*method = kzalloc(sizeof **method, GFP_ATOMIC);
1369	if (!*method) {
1370		pr_err("No memory for ib_mad_mgmt_method_table\n");
1371		return -ENOMEM;
1372	}
1373
1374	return 0;
1375}
1376
1377/*
1378 * Check to see if there are any methods still in use
1379 */
1380static int check_method_table(struct ib_mad_mgmt_method_table *method)
1381{
1382	int i;
1383
1384	for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1385		if (method->agent[i])
1386			return 1;
1387	return 0;
1388}
1389
1390/*
1391 * Check to see if there are any method tables for this class still in use
1392 */
1393static int check_class_table(struct ib_mad_mgmt_class_table *class)
1394{
1395	int i;
1396
1397	for (i = 0; i < MAX_MGMT_CLASS; i++)
1398		if (class->method_table[i])
1399			return 1;
1400	return 0;
1401}
1402
1403static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1404{
1405	int i;
1406
1407	for (i = 0; i < MAX_MGMT_OUI; i++)
1408		if (vendor_class->method_table[i])
1409			return 1;
1410	return 0;
1411}
1412
1413static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1414			   const char *oui)
1415{
1416	int i;
1417
1418	for (i = 0; i < MAX_MGMT_OUI; i++)
1419		/* Is there matching OUI for this vendor class ? */
1420		if (!memcmp(vendor_class->oui[i], oui, 3))
1421			return i;
1422
1423	return -1;
1424}
1425
1426static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1427{
1428	int i;
1429
1430	for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1431		if (vendor->vendor_class[i])
1432			return 1;
1433
1434	return 0;
1435}
1436
1437static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1438				     struct ib_mad_agent_private *agent)
1439{
1440	int i;
1441
1442	/* Remove any methods for this mad agent */
1443	for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1444		if (method->agent[i] == agent) {
1445			method->agent[i] = NULL;
1446		}
1447	}
1448}
1449
1450static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1451			      struct ib_mad_agent_private *agent_priv,
1452			      u8 mgmt_class)
1453{
1454	struct ib_mad_port_private *port_priv;
1455	struct ib_mad_mgmt_class_table **class;
1456	struct ib_mad_mgmt_method_table **method;
1457	int i, ret;
1458
1459	port_priv = agent_priv->qp_info->port_priv;
1460	class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1461	if (!*class) {
1462		/* Allocate management class table for "new" class version */
1463		*class = kzalloc(sizeof **class, GFP_ATOMIC);
1464		if (!*class) {
1465			dev_err(&agent_priv->agent.device->dev,
1466				"No memory for ib_mad_mgmt_class_table\n");
1467			ret = -ENOMEM;
1468			goto error1;
1469		}
1470
1471		/* Allocate method table for this management class */
1472		method = &(*class)->method_table[mgmt_class];
1473		if ((ret = allocate_method_table(method)))
1474			goto error2;
1475	} else {
1476		method = &(*class)->method_table[mgmt_class];
1477		if (!*method) {
1478			/* Allocate method table for this management class */
1479			if ((ret = allocate_method_table(method)))
1480				goto error1;
1481		}
1482	}
1483
1484	/* Now, make sure methods are not already in use */
1485	if (method_in_use(method, mad_reg_req))
1486		goto error3;
1487
1488	/* Finally, add in methods being registered */
1489	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1490		(*method)->agent[i] = agent_priv;
1491
1492	return 0;
1493
1494error3:
1495	/* Remove any methods for this mad agent */
1496	remove_methods_mad_agent(*method, agent_priv);
1497	/* Now, check to see if there are any methods in use */
1498	if (!check_method_table(*method)) {
1499		/* If not, release management method table */
1500		kfree(*method);
1501		*method = NULL;
1502	}
1503	ret = -EINVAL;
1504	goto error1;
1505error2:
1506	kfree(*class);
1507	*class = NULL;
1508error1:
1509	return ret;
1510}
1511
1512static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1513			   struct ib_mad_agent_private *agent_priv)
1514{
1515	struct ib_mad_port_private *port_priv;
1516	struct ib_mad_mgmt_vendor_class_table **vendor_table;
1517	struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1518	struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1519	struct ib_mad_mgmt_method_table **method;
1520	int i, ret = -ENOMEM;
1521	u8 vclass;
1522
1523	/* "New" vendor (with OUI) class */
1524	vclass = vendor_class_index(mad_reg_req->mgmt_class);
1525	port_priv = agent_priv->qp_info->port_priv;
1526	vendor_table = &port_priv->version[
1527				mad_reg_req->mgmt_class_version].vendor;
1528	if (!*vendor_table) {
1529		/* Allocate mgmt vendor class table for "new" class version */
1530		vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1531		if (!vendor) {
1532			dev_err(&agent_priv->agent.device->dev,
1533				"No memory for ib_mad_mgmt_vendor_class_table\n");
1534			goto error1;
1535		}
1536
1537		*vendor_table = vendor;
1538	}
1539	if (!(*vendor_table)->vendor_class[vclass]) {
1540		/* Allocate table for this management vendor class */
1541		vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1542		if (!vendor_class) {
1543			dev_err(&agent_priv->agent.device->dev,
1544				"No memory for ib_mad_mgmt_vendor_class\n");
1545			goto error2;
1546		}
1547
1548		(*vendor_table)->vendor_class[vclass] = vendor_class;
1549	}
1550	for (i = 0; i < MAX_MGMT_OUI; i++) {
1551		/* Is there matching OUI for this vendor class ? */
1552		if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1553			    mad_reg_req->oui, 3)) {
1554			method = &(*vendor_table)->vendor_class[
1555						vclass]->method_table[i];
1556			BUG_ON(!*method);
 
1557			goto check_in_use;
1558		}
1559	}
1560	for (i = 0; i < MAX_MGMT_OUI; i++) {
1561		/* OUI slot available ? */
1562		if (!is_vendor_oui((*vendor_table)->vendor_class[
1563				vclass]->oui[i])) {
1564			method = &(*vendor_table)->vendor_class[
1565				vclass]->method_table[i];
1566			BUG_ON(*method);
1567			/* Allocate method table for this OUI */
1568			if ((ret = allocate_method_table(method)))
1569				goto error3;
 
 
 
1570			memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1571			       mad_reg_req->oui, 3);
1572			goto check_in_use;
1573		}
1574	}
1575	dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1576	goto error3;
1577
1578check_in_use:
1579	/* Now, make sure methods are not already in use */
1580	if (method_in_use(method, mad_reg_req))
1581		goto error4;
1582
1583	/* Finally, add in methods being registered */
1584	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1585		(*method)->agent[i] = agent_priv;
1586
1587	return 0;
1588
1589error4:
1590	/* Remove any methods for this mad agent */
1591	remove_methods_mad_agent(*method, agent_priv);
1592	/* Now, check to see if there are any methods in use */
1593	if (!check_method_table(*method)) {
1594		/* If not, release management method table */
1595		kfree(*method);
1596		*method = NULL;
1597	}
1598	ret = -EINVAL;
1599error3:
1600	if (vendor_class) {
1601		(*vendor_table)->vendor_class[vclass] = NULL;
1602		kfree(vendor_class);
1603	}
1604error2:
1605	if (vendor) {
1606		*vendor_table = NULL;
1607		kfree(vendor);
1608	}
1609error1:
1610	return ret;
1611}
1612
1613static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1614{
1615	struct ib_mad_port_private *port_priv;
1616	struct ib_mad_mgmt_class_table *class;
1617	struct ib_mad_mgmt_method_table *method;
1618	struct ib_mad_mgmt_vendor_class_table *vendor;
1619	struct ib_mad_mgmt_vendor_class *vendor_class;
1620	int index;
1621	u8 mgmt_class;
1622
1623	/*
1624	 * Was MAD registration request supplied
1625	 * with original registration ?
1626	 */
1627	if (!agent_priv->reg_req) {
1628		goto out;
1629	}
1630
1631	port_priv = agent_priv->qp_info->port_priv;
1632	mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1633	class = port_priv->version[
1634			agent_priv->reg_req->mgmt_class_version].class;
1635	if (!class)
1636		goto vendor_check;
1637
1638	method = class->method_table[mgmt_class];
1639	if (method) {
1640		/* Remove any methods for this mad agent */
1641		remove_methods_mad_agent(method, agent_priv);
1642		/* Now, check to see if there are any methods still in use */
1643		if (!check_method_table(method)) {
1644			/* If not, release management method table */
1645			 kfree(method);
1646			 class->method_table[mgmt_class] = NULL;
1647			 /* Any management classes left ? */
1648			if (!check_class_table(class)) {
1649				/* If not, release management class table */
1650				kfree(class);
1651				port_priv->version[
1652					agent_priv->reg_req->
1653					mgmt_class_version].class = NULL;
1654			}
1655		}
1656	}
1657
1658vendor_check:
1659	if (!is_vendor_class(mgmt_class))
1660		goto out;
1661
1662	/* normalize mgmt_class to vendor range 2 */
1663	mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1664	vendor = port_priv->version[
1665			agent_priv->reg_req->mgmt_class_version].vendor;
1666
1667	if (!vendor)
1668		goto out;
1669
1670	vendor_class = vendor->vendor_class[mgmt_class];
1671	if (vendor_class) {
1672		index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1673		if (index < 0)
1674			goto out;
1675		method = vendor_class->method_table[index];
1676		if (method) {
1677			/* Remove any methods for this mad agent */
1678			remove_methods_mad_agent(method, agent_priv);
1679			/*
1680			 * Now, check to see if there are
1681			 * any methods still in use
1682			 */
1683			if (!check_method_table(method)) {
1684				/* If not, release management method table */
1685				kfree(method);
1686				vendor_class->method_table[index] = NULL;
1687				memset(vendor_class->oui[index], 0, 3);
1688				/* Any OUIs left ? */
1689				if (!check_vendor_class(vendor_class)) {
1690					/* If not, release vendor class table */
1691					kfree(vendor_class);
1692					vendor->vendor_class[mgmt_class] = NULL;
1693					/* Any other vendor classes left ? */
1694					if (!check_vendor_table(vendor)) {
1695						kfree(vendor);
1696						port_priv->version[
1697							agent_priv->reg_req->
1698							mgmt_class_version].
1699							vendor = NULL;
1700					}
1701				}
1702			}
1703		}
1704	}
1705
1706out:
1707	return;
1708}
1709
1710static struct ib_mad_agent_private *
1711find_mad_agent(struct ib_mad_port_private *port_priv,
1712	       const struct ib_mad_hdr *mad_hdr)
1713{
1714	struct ib_mad_agent_private *mad_agent = NULL;
1715	unsigned long flags;
1716
1717	spin_lock_irqsave(&port_priv->reg_lock, flags);
1718	if (ib_response_mad(mad_hdr)) {
1719		u32 hi_tid;
1720		struct ib_mad_agent_private *entry;
1721
1722		/*
1723		 * Routing is based on high 32 bits of transaction ID
1724		 * of MAD.
1725		 */
1726		hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1727		list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1728			if (entry->agent.hi_tid == hi_tid) {
1729				mad_agent = entry;
1730				break;
1731			}
1732		}
1733	} else {
1734		struct ib_mad_mgmt_class_table *class;
1735		struct ib_mad_mgmt_method_table *method;
1736		struct ib_mad_mgmt_vendor_class_table *vendor;
1737		struct ib_mad_mgmt_vendor_class *vendor_class;
1738		const struct ib_vendor_mad *vendor_mad;
1739		int index;
1740
 
1741		/*
1742		 * Routing is based on version, class, and method
1743		 * For "newer" vendor MADs, also based on OUI
1744		 */
1745		if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1746			goto out;
1747		if (!is_vendor_class(mad_hdr->mgmt_class)) {
1748			class = port_priv->version[
1749					mad_hdr->class_version].class;
1750			if (!class)
1751				goto out;
1752			if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1753			    IB_MGMT_MAX_METHODS)
1754				goto out;
1755			method = class->method_table[convert_mgmt_class(
1756							mad_hdr->mgmt_class)];
1757			if (method)
1758				mad_agent = method->agent[mad_hdr->method &
1759							  ~IB_MGMT_METHOD_RESP];
1760		} else {
1761			vendor = port_priv->version[
1762					mad_hdr->class_version].vendor;
1763			if (!vendor)
1764				goto out;
1765			vendor_class = vendor->vendor_class[vendor_class_index(
1766						mad_hdr->mgmt_class)];
1767			if (!vendor_class)
1768				goto out;
1769			/* Find matching OUI */
1770			vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1771			index = find_vendor_oui(vendor_class, vendor_mad->oui);
1772			if (index == -1)
1773				goto out;
1774			method = vendor_class->method_table[index];
1775			if (method) {
1776				mad_agent = method->agent[mad_hdr->method &
1777							  ~IB_MGMT_METHOD_RESP];
1778			}
1779		}
 
 
 
 
1780	}
1781
1782	if (mad_agent) {
1783		if (mad_agent->agent.recv_handler)
1784			atomic_inc(&mad_agent->refcount);
1785		else {
1786			dev_notice(&port_priv->device->dev,
1787				   "No receive handler for client %p on port %d\n",
1788				   &mad_agent->agent, port_priv->port_num);
1789			mad_agent = NULL;
1790		}
1791	}
1792out:
1793	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1794
1795	return mad_agent;
1796}
1797
1798static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1799			const struct ib_mad_qp_info *qp_info,
1800			bool opa)
1801{
1802	int valid = 0;
1803	u32 qp_num = qp_info->qp->qp_num;
1804
1805	/* Make sure MAD base version is understood */
1806	if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1807	    (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1808		pr_err("MAD received with unsupported base version %d %s\n",
1809		       mad_hdr->base_version, opa ? "(opa)" : "");
1810		goto out;
1811	}
1812
1813	/* Filter SMI packets sent to other than QP0 */
1814	if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1815	    (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1816		if (qp_num == 0)
1817			valid = 1;
1818	} else {
1819		/* CM attributes other than ClassPortInfo only use Send method */
1820		if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1821		    (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1822		    (mad_hdr->method != IB_MGMT_METHOD_SEND))
1823			goto out;
1824		/* Filter GSI packets sent to QP0 */
1825		if (qp_num != 0)
1826			valid = 1;
1827	}
1828
1829out:
1830	return valid;
1831}
1832
1833static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1834			    const struct ib_mad_hdr *mad_hdr)
1835{
1836	struct ib_rmpp_mad *rmpp_mad;
1837
1838	rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1839	return !mad_agent_priv->agent.rmpp_version ||
1840		!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1841		!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1842				    IB_MGMT_RMPP_FLAG_ACTIVE) ||
1843		(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1844}
1845
1846static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1847				     const struct ib_mad_recv_wc *rwc)
1848{
1849	return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1850		rwc->recv_buf.mad->mad_hdr.mgmt_class;
1851}
1852
1853static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1854				   const struct ib_mad_send_wr_private *wr,
1855				   const struct ib_mad_recv_wc *rwc )
 
1856{
1857	struct ib_ah_attr attr;
1858	u8 send_resp, rcv_resp;
1859	union ib_gid sgid;
1860	struct ib_device *device = mad_agent_priv->agent.device;
1861	u8 port_num = mad_agent_priv->agent.port_num;
1862	u8 lmc;
 
1863
1864	send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1865	rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1866
1867	if (send_resp == rcv_resp)
1868		/* both requests, or both responses. GIDs different */
1869		return 0;
1870
1871	if (ib_query_ah(wr->send_buf.ah, &attr))
1872		/* Assume not equal, to avoid false positives. */
1873		return 0;
1874
1875	if (!!(attr.ah_flags & IB_AH_GRH) !=
1876	    !!(rwc->wc->wc_flags & IB_WC_GRH))
1877		/* one has GID, other does not.  Assume different */
1878		return 0;
1879
1880	if (!send_resp && rcv_resp) {
1881		/* is request/response. */
1882		if (!(attr.ah_flags & IB_AH_GRH)) {
1883			if (ib_get_cached_lmc(device, port_num, &lmc))
1884				return 0;
1885			return (!lmc || !((attr.src_path_bits ^
1886					   rwc->wc->dlid_path_bits) &
1887					  ((1 << lmc) - 1)));
1888		} else {
1889			if (ib_get_cached_gid(device, port_num,
1890					      attr.grh.sgid_index, &sgid, NULL))
 
 
 
1891				return 0;
1892			return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1893				       16);
1894		}
1895	}
1896
1897	if (!(attr.ah_flags & IB_AH_GRH))
1898		return attr.dlid == rwc->wc->slid;
1899	else
1900		return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
 
1901			       16);
1902}
1903
1904static inline int is_direct(u8 class)
1905{
1906	return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1907}
1908
1909struct ib_mad_send_wr_private*
1910ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1911		 const struct ib_mad_recv_wc *wc)
1912{
1913	struct ib_mad_send_wr_private *wr;
1914	const struct ib_mad_hdr *mad_hdr;
1915
1916	mad_hdr = &wc->recv_buf.mad->mad_hdr;
1917
1918	list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1919		if ((wr->tid == mad_hdr->tid) &&
1920		    rcv_has_same_class(wr, wc) &&
1921		    /*
1922		     * Don't check GID for direct routed MADs.
1923		     * These might have permissive LIDs.
1924		     */
1925		    (is_direct(mad_hdr->mgmt_class) ||
1926		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
1927			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1928	}
1929
1930	/*
1931	 * It's possible to receive the response before we've
1932	 * been notified that the send has completed
1933	 */
1934	list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1935		if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1936		    wr->tid == mad_hdr->tid &&
1937		    wr->timeout &&
1938		    rcv_has_same_class(wr, wc) &&
1939		    /*
1940		     * Don't check GID for direct routed MADs.
1941		     * These might have permissive LIDs.
1942		     */
1943		    (is_direct(mad_hdr->mgmt_class) ||
1944		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
1945			/* Verify request has not been canceled */
1946			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1947	}
1948	return NULL;
1949}
1950
1951void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1952{
1953	mad_send_wr->timeout = 0;
1954	if (mad_send_wr->refcount == 1)
1955		list_move_tail(&mad_send_wr->agent_list,
1956			      &mad_send_wr->mad_agent_priv->done_list);
1957}
1958
1959static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1960				 struct ib_mad_recv_wc *mad_recv_wc)
1961{
1962	struct ib_mad_send_wr_private *mad_send_wr;
1963	struct ib_mad_send_wc mad_send_wc;
1964	unsigned long flags;
 
1965
1966	INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
 
 
 
 
 
 
 
 
1967	list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1968	if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1969		mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1970						      mad_recv_wc);
1971		if (!mad_recv_wc) {
1972			deref_mad_agent(mad_agent_priv);
1973			return;
1974		}
1975	}
1976
1977	/* Complete corresponding request */
1978	if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1979		spin_lock_irqsave(&mad_agent_priv->lock, flags);
1980		mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1981		if (!mad_send_wr) {
1982			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1983			if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1984			   && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1985			   && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1986					& IB_MGMT_RMPP_FLAG_ACTIVE)) {
1987				/* user rmpp is in effect
1988				 * and this is an active RMPP MAD
1989				 */
1990				mad_agent_priv->agent.recv_handler(
1991						&mad_agent_priv->agent, NULL,
1992						mad_recv_wc);
1993				atomic_dec(&mad_agent_priv->refcount);
1994			} else {
1995				/* not user rmpp, revert to normal behavior and
1996				 * drop the mad */
 
1997				ib_free_recv_mad(mad_recv_wc);
1998				deref_mad_agent(mad_agent_priv);
1999				return;
2000			}
2001		} else {
2002			ib_mark_mad_done(mad_send_wr);
2003			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2004
2005			/* Defined behavior is to complete response before request */
2006			mad_agent_priv->agent.recv_handler(
2007					&mad_agent_priv->agent,
2008					&mad_send_wr->send_buf,
2009					mad_recv_wc);
2010			atomic_dec(&mad_agent_priv->refcount);
2011
2012			mad_send_wc.status = IB_WC_SUCCESS;
2013			mad_send_wc.vendor_err = 0;
2014			mad_send_wc.send_buf = &mad_send_wr->send_buf;
2015			ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2016		}
2017	} else {
2018		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2019						   mad_recv_wc);
2020		deref_mad_agent(mad_agent_priv);
2021	}
2022}
2023
2024static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2025				     const struct ib_mad_qp_info *qp_info,
2026				     const struct ib_wc *wc,
2027				     int port_num,
2028				     struct ib_mad_private *recv,
2029				     struct ib_mad_private *response)
2030{
2031	enum smi_forward_action retsmi;
2032	struct ib_smp *smp = (struct ib_smp *)recv->mad;
2033
 
 
2034	if (smi_handle_dr_smp_recv(smp,
2035				   rdma_cap_ib_switch(port_priv->device),
2036				   port_num,
2037				   port_priv->device->phys_port_cnt) ==
2038				   IB_SMI_DISCARD)
2039		return IB_SMI_DISCARD;
2040
2041	retsmi = smi_check_forward_dr_smp(smp);
2042	if (retsmi == IB_SMI_LOCAL)
2043		return IB_SMI_HANDLE;
2044
2045	if (retsmi == IB_SMI_SEND) { /* don't forward */
2046		if (smi_handle_dr_smp_send(smp,
2047					   rdma_cap_ib_switch(port_priv->device),
2048					   port_num) == IB_SMI_DISCARD)
2049			return IB_SMI_DISCARD;
2050
2051		if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2052			return IB_SMI_DISCARD;
2053	} else if (rdma_cap_ib_switch(port_priv->device)) {
2054		/* forward case for switches */
2055		memcpy(response, recv, mad_priv_size(response));
2056		response->header.recv_wc.wc = &response->header.wc;
2057		response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2058		response->header.recv_wc.recv_buf.grh = &response->grh;
2059
2060		agent_send_response((const struct ib_mad_hdr *)response->mad,
2061				    &response->grh, wc,
2062				    port_priv->device,
2063				    smi_get_fwd_port(smp),
2064				    qp_info->qp->qp_num,
2065				    response->mad_size,
2066				    false);
2067
2068		return IB_SMI_DISCARD;
2069	}
2070	return IB_SMI_HANDLE;
2071}
2072
2073static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2074				    struct ib_mad_private *response,
2075				    size_t *resp_len, bool opa)
2076{
2077	const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2078	struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2079
2080	if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2081	    recv_hdr->method == IB_MGMT_METHOD_SET) {
2082		memcpy(response, recv, mad_priv_size(response));
2083		response->header.recv_wc.wc = &response->header.wc;
2084		response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2085		response->header.recv_wc.recv_buf.grh = &response->grh;
2086		resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2087		resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2088		if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2089			resp_hdr->status |= IB_SMP_DIRECTION;
2090
2091		if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2092			if (recv_hdr->mgmt_class ==
2093			    IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2094			    recv_hdr->mgmt_class ==
2095			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2096				*resp_len = opa_get_smp_header_size(
2097							(struct opa_smp *)recv->mad);
2098			else
2099				*resp_len = sizeof(struct ib_mad_hdr);
2100		}
2101
2102		return true;
2103	} else {
2104		return false;
2105	}
2106}
2107
2108static enum smi_action
2109handle_opa_smi(struct ib_mad_port_private *port_priv,
2110	       struct ib_mad_qp_info *qp_info,
2111	       struct ib_wc *wc,
2112	       int port_num,
2113	       struct ib_mad_private *recv,
2114	       struct ib_mad_private *response)
2115{
2116	enum smi_forward_action retsmi;
2117	struct opa_smp *smp = (struct opa_smp *)recv->mad;
2118
 
 
2119	if (opa_smi_handle_dr_smp_recv(smp,
2120				   rdma_cap_ib_switch(port_priv->device),
2121				   port_num,
2122				   port_priv->device->phys_port_cnt) ==
2123				   IB_SMI_DISCARD)
2124		return IB_SMI_DISCARD;
2125
2126	retsmi = opa_smi_check_forward_dr_smp(smp);
2127	if (retsmi == IB_SMI_LOCAL)
2128		return IB_SMI_HANDLE;
2129
2130	if (retsmi == IB_SMI_SEND) { /* don't forward */
2131		if (opa_smi_handle_dr_smp_send(smp,
2132					   rdma_cap_ib_switch(port_priv->device),
2133					   port_num) == IB_SMI_DISCARD)
2134			return IB_SMI_DISCARD;
2135
2136		if (opa_smi_check_local_smp(smp, port_priv->device) ==
2137		    IB_SMI_DISCARD)
2138			return IB_SMI_DISCARD;
2139
2140	} else if (rdma_cap_ib_switch(port_priv->device)) {
2141		/* forward case for switches */
2142		memcpy(response, recv, mad_priv_size(response));
2143		response->header.recv_wc.wc = &response->header.wc;
2144		response->header.recv_wc.recv_buf.opa_mad =
2145				(struct opa_mad *)response->mad;
2146		response->header.recv_wc.recv_buf.grh = &response->grh;
2147
2148		agent_send_response((const struct ib_mad_hdr *)response->mad,
2149				    &response->grh, wc,
2150				    port_priv->device,
2151				    opa_smi_get_fwd_port(smp),
2152				    qp_info->qp->qp_num,
2153				    recv->header.wc.byte_len,
2154				    true);
2155
2156		return IB_SMI_DISCARD;
2157	}
2158
2159	return IB_SMI_HANDLE;
2160}
2161
2162static enum smi_action
2163handle_smi(struct ib_mad_port_private *port_priv,
2164	   struct ib_mad_qp_info *qp_info,
2165	   struct ib_wc *wc,
2166	   int port_num,
2167	   struct ib_mad_private *recv,
2168	   struct ib_mad_private *response,
2169	   bool opa)
2170{
2171	struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2172
2173	if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2174	    mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
2175		return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2176				      response);
2177
2178	return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2179}
2180
2181static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2182{
2183	struct ib_mad_port_private *port_priv = cq->cq_context;
2184	struct ib_mad_list_head *mad_list =
2185		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2186	struct ib_mad_qp_info *qp_info;
2187	struct ib_mad_private_header *mad_priv_hdr;
2188	struct ib_mad_private *recv, *response = NULL;
2189	struct ib_mad_agent_private *mad_agent;
2190	int port_num;
2191	int ret = IB_MAD_RESULT_SUCCESS;
2192	size_t mad_size;
2193	u16 resp_mad_pkey_index = 0;
2194	bool opa;
2195
2196	if (list_empty_careful(&port_priv->port_list))
2197		return;
2198
2199	if (wc->status != IB_WC_SUCCESS) {
2200		/*
2201		 * Receive errors indicate that the QP has entered the error
2202		 * state - error handling/shutdown code will cleanup
2203		 */
2204		return;
2205	}
2206
2207	qp_info = mad_list->mad_queue->qp_info;
2208	dequeue_mad(mad_list);
2209
2210	opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2211			       qp_info->port_priv->port_num);
2212
2213	mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2214				    mad_list);
2215	recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2216	ib_dma_unmap_single(port_priv->device,
2217			    recv->header.mapping,
2218			    mad_priv_dma_size(recv),
2219			    DMA_FROM_DEVICE);
2220
2221	/* Setup MAD receive work completion from "normal" work completion */
2222	recv->header.wc = *wc;
2223	recv->header.recv_wc.wc = &recv->header.wc;
2224
2225	if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2226		recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2227		recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2228	} else {
2229		recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2230		recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2231	}
2232
2233	recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2234	recv->header.recv_wc.recv_buf.grh = &recv->grh;
2235
2236	if (atomic_read(&qp_info->snoop_count))
2237		snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2238
2239	/* Validate MAD */
2240	if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2241		goto out;
2242
 
 
 
2243	mad_size = recv->mad_size;
2244	response = alloc_mad_private(mad_size, GFP_KERNEL);
2245	if (!response) {
2246		dev_err(&port_priv->device->dev,
2247			"%s: no memory for response buffer\n", __func__);
2248		goto out;
2249	}
2250
2251	if (rdma_cap_ib_switch(port_priv->device))
2252		port_num = wc->port_num;
2253	else
2254		port_num = port_priv->port_num;
2255
2256	if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2257	    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2258		if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2259			       response, opa)
2260		    == IB_SMI_DISCARD)
2261			goto out;
2262	}
2263
2264	/* Give driver "right of first refusal" on incoming MAD */
2265	if (port_priv->device->process_mad) {
2266		ret = port_priv->device->process_mad(port_priv->device, 0,
2267						     port_priv->port_num,
2268						     wc, &recv->grh,
2269						     (const struct ib_mad_hdr *)recv->mad,
2270						     recv->mad_size,
2271						     (struct ib_mad_hdr *)response->mad,
2272						     &mad_size, &resp_mad_pkey_index);
2273
2274		if (opa)
2275			wc->pkey_index = resp_mad_pkey_index;
2276
2277		if (ret & IB_MAD_RESULT_SUCCESS) {
2278			if (ret & IB_MAD_RESULT_CONSUMED)
2279				goto out;
2280			if (ret & IB_MAD_RESULT_REPLY) {
2281				agent_send_response((const struct ib_mad_hdr *)response->mad,
2282						    &recv->grh, wc,
2283						    port_priv->device,
2284						    port_num,
2285						    qp_info->qp->qp_num,
2286						    mad_size, opa);
2287				goto out;
2288			}
2289		}
2290	}
2291
2292	mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2293	if (mad_agent) {
 
2294		ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2295		/*
2296		 * recv is freed up in error cases in ib_mad_complete_recv
2297		 * or via recv_handler in ib_mad_complete_recv()
2298		 */
2299		recv = NULL;
2300	} else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2301		   generate_unmatched_resp(recv, response, &mad_size, opa)) {
2302		agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2303				    port_priv->device, port_num,
2304				    qp_info->qp->qp_num, mad_size, opa);
2305	}
2306
2307out:
2308	/* Post another receive request for this QP */
2309	if (response) {
2310		ib_mad_post_receive_mads(qp_info, response);
2311		kfree(recv);
2312	} else
2313		ib_mad_post_receive_mads(qp_info, recv);
2314}
2315
2316static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2317{
2318	struct ib_mad_send_wr_private *mad_send_wr;
2319	unsigned long delay;
2320
2321	if (list_empty(&mad_agent_priv->wait_list)) {
2322		cancel_delayed_work(&mad_agent_priv->timed_work);
2323	} else {
2324		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2325					 struct ib_mad_send_wr_private,
2326					 agent_list);
2327
2328		if (time_after(mad_agent_priv->timeout,
2329			       mad_send_wr->timeout)) {
2330			mad_agent_priv->timeout = mad_send_wr->timeout;
2331			delay = mad_send_wr->timeout - jiffies;
2332			if ((long)delay <= 0)
2333				delay = 1;
2334			mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2335					 &mad_agent_priv->timed_work, delay);
2336		}
2337	}
2338}
2339
2340static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2341{
2342	struct ib_mad_agent_private *mad_agent_priv;
2343	struct ib_mad_send_wr_private *temp_mad_send_wr;
2344	struct list_head *list_item;
2345	unsigned long delay;
2346
2347	mad_agent_priv = mad_send_wr->mad_agent_priv;
2348	list_del(&mad_send_wr->agent_list);
2349
2350	delay = mad_send_wr->timeout;
2351	mad_send_wr->timeout += jiffies;
2352
2353	if (delay) {
2354		list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2355			temp_mad_send_wr = list_entry(list_item,
2356						struct ib_mad_send_wr_private,
2357						agent_list);
2358			if (time_after(mad_send_wr->timeout,
2359				       temp_mad_send_wr->timeout))
2360				break;
2361		}
 
 
2362	}
2363	else
2364		list_item = &mad_agent_priv->wait_list;
2365	list_add(&mad_send_wr->agent_list, list_item);
2366
2367	/* Reschedule a work item if we have a shorter timeout */
2368	if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2369		mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2370				 &mad_agent_priv->timed_work, delay);
2371}
2372
2373void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2374			  int timeout_ms)
2375{
2376	mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2377	wait_for_response(mad_send_wr);
2378}
2379
2380/*
2381 * Process a send work completion
2382 */
2383void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2384			     struct ib_mad_send_wc *mad_send_wc)
2385{
2386	struct ib_mad_agent_private	*mad_agent_priv;
2387	unsigned long			flags;
2388	int				ret;
2389
2390	mad_agent_priv = mad_send_wr->mad_agent_priv;
2391	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2392	if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2393		ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2394		if (ret == IB_RMPP_RESULT_CONSUMED)
2395			goto done;
2396	} else
2397		ret = IB_RMPP_RESULT_UNHANDLED;
2398
2399	if (mad_send_wc->status != IB_WC_SUCCESS &&
2400	    mad_send_wr->status == IB_WC_SUCCESS) {
2401		mad_send_wr->status = mad_send_wc->status;
2402		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2403	}
2404
2405	if (--mad_send_wr->refcount > 0) {
2406		if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2407		    mad_send_wr->status == IB_WC_SUCCESS) {
2408			wait_for_response(mad_send_wr);
2409		}
2410		goto done;
2411	}
2412
2413	/* Remove send from MAD agent and notify client of completion */
2414	list_del(&mad_send_wr->agent_list);
2415	adjust_timeout(mad_agent_priv);
2416	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2417
2418	if (mad_send_wr->status != IB_WC_SUCCESS )
2419		mad_send_wc->status = mad_send_wr->status;
2420	if (ret == IB_RMPP_RESULT_INTERNAL)
2421		ib_rmpp_send_handler(mad_send_wc);
2422	else
2423		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2424						   mad_send_wc);
2425
2426	/* Release reference on agent taken when sending */
2427	deref_mad_agent(mad_agent_priv);
2428	return;
2429done:
2430	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2431}
2432
2433static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2434{
2435	struct ib_mad_port_private *port_priv = cq->cq_context;
2436	struct ib_mad_list_head *mad_list =
2437		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2438	struct ib_mad_send_wr_private	*mad_send_wr, *queued_send_wr;
2439	struct ib_mad_qp_info		*qp_info;
2440	struct ib_mad_queue		*send_queue;
2441	struct ib_send_wr		*bad_send_wr;
2442	struct ib_mad_send_wc		mad_send_wc;
2443	unsigned long flags;
2444	int ret;
2445
2446	if (list_empty_careful(&port_priv->port_list))
2447		return;
2448
2449	if (wc->status != IB_WC_SUCCESS) {
2450		if (!ib_mad_send_error(port_priv, wc))
2451			return;
2452	}
2453
2454	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2455				   mad_list);
2456	send_queue = mad_list->mad_queue;
2457	qp_info = send_queue->qp_info;
2458
 
 
 
2459retry:
2460	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2461			    mad_send_wr->header_mapping,
2462			    mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2463	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2464			    mad_send_wr->payload_mapping,
2465			    mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2466	queued_send_wr = NULL;
2467	spin_lock_irqsave(&send_queue->lock, flags);
2468	list_del(&mad_list->list);
2469
2470	/* Move queued send to the send queue */
2471	if (send_queue->count-- > send_queue->max_active) {
2472		mad_list = container_of(qp_info->overflow_list.next,
2473					struct ib_mad_list_head, list);
2474		queued_send_wr = container_of(mad_list,
2475					struct ib_mad_send_wr_private,
2476					mad_list);
2477		list_move_tail(&mad_list->list, &send_queue->list);
2478	}
2479	spin_unlock_irqrestore(&send_queue->lock, flags);
2480
2481	mad_send_wc.send_buf = &mad_send_wr->send_buf;
2482	mad_send_wc.status = wc->status;
2483	mad_send_wc.vendor_err = wc->vendor_err;
2484	if (atomic_read(&qp_info->snoop_count))
2485		snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2486			   IB_MAD_SNOOP_SEND_COMPLETIONS);
2487	ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2488
2489	if (queued_send_wr) {
 
2490		ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2491				   &bad_send_wr);
2492		if (ret) {
2493			dev_err(&port_priv->device->dev,
2494				"ib_post_send failed: %d\n", ret);
2495			mad_send_wr = queued_send_wr;
2496			wc->status = IB_WC_LOC_QP_OP_ERR;
2497			goto retry;
2498		}
2499	}
2500}
2501
2502static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2503{
2504	struct ib_mad_send_wr_private *mad_send_wr;
2505	struct ib_mad_list_head *mad_list;
2506	unsigned long flags;
2507
2508	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2509	list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2510		mad_send_wr = container_of(mad_list,
2511					   struct ib_mad_send_wr_private,
2512					   mad_list);
2513		mad_send_wr->retry = 1;
2514	}
2515	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2516}
2517
2518static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2519		struct ib_wc *wc)
2520{
2521	struct ib_mad_list_head *mad_list =
2522		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2523	struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2524	struct ib_mad_send_wr_private *mad_send_wr;
2525	int ret;
2526
2527	/*
2528	 * Send errors will transition the QP to SQE - move
2529	 * QP to RTS and repost flushed work requests
2530	 */
2531	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2532				   mad_list);
2533	if (wc->status == IB_WC_WR_FLUSH_ERR) {
2534		if (mad_send_wr->retry) {
2535			/* Repost send */
2536			struct ib_send_wr *bad_send_wr;
2537
2538			mad_send_wr->retry = 0;
 
2539			ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2540					&bad_send_wr);
2541			if (!ret)
2542				return false;
2543		}
2544	} else {
2545		struct ib_qp_attr *attr;
2546
2547		/* Transition QP to RTS and fail offending send */
2548		attr = kmalloc(sizeof *attr, GFP_KERNEL);
2549		if (attr) {
2550			attr->qp_state = IB_QPS_RTS;
2551			attr->cur_qp_state = IB_QPS_SQE;
2552			ret = ib_modify_qp(qp_info->qp, attr,
2553					   IB_QP_STATE | IB_QP_CUR_STATE);
2554			kfree(attr);
2555			if (ret)
2556				dev_err(&port_priv->device->dev,
2557					"%s - ib_modify_qp to RTS: %d\n",
2558					__func__, ret);
2559			else
2560				mark_sends_for_retry(qp_info);
2561		}
2562	}
2563
2564	return true;
2565}
2566
2567static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2568{
2569	unsigned long flags;
2570	struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2571	struct ib_mad_send_wc mad_send_wc;
2572	struct list_head cancel_list;
2573
2574	INIT_LIST_HEAD(&cancel_list);
2575
2576	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2577	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2578				 &mad_agent_priv->send_list, agent_list) {
2579		if (mad_send_wr->status == IB_WC_SUCCESS) {
2580			mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2581			mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2582		}
2583	}
2584
2585	/* Empty wait list to prevent receives from finding a request */
2586	list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2587	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2588
2589	/* Report all cancelled requests */
2590	mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2591	mad_send_wc.vendor_err = 0;
2592
2593	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2594				 &cancel_list, agent_list) {
2595		mad_send_wc.send_buf = &mad_send_wr->send_buf;
2596		list_del(&mad_send_wr->agent_list);
2597		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2598						   &mad_send_wc);
2599		atomic_dec(&mad_agent_priv->refcount);
2600	}
2601}
2602
2603static struct ib_mad_send_wr_private*
2604find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2605	     struct ib_mad_send_buf *send_buf)
2606{
2607	struct ib_mad_send_wr_private *mad_send_wr;
2608
2609	list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2610			    agent_list) {
2611		if (&mad_send_wr->send_buf == send_buf)
2612			return mad_send_wr;
2613	}
2614
2615	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2616			    agent_list) {
2617		if (is_rmpp_data_mad(mad_agent_priv,
2618				     mad_send_wr->send_buf.mad) &&
2619		    &mad_send_wr->send_buf == send_buf)
2620			return mad_send_wr;
2621	}
2622	return NULL;
2623}
2624
2625int ib_modify_mad(struct ib_mad_agent *mad_agent,
2626		  struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2627{
2628	struct ib_mad_agent_private *mad_agent_priv;
2629	struct ib_mad_send_wr_private *mad_send_wr;
2630	unsigned long flags;
2631	int active;
2632
2633	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2634				      agent);
 
 
 
2635	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2636	mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2637	if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2638		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2639		return -EINVAL;
2640	}
2641
2642	active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2643	if (!timeout_ms) {
2644		mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2645		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2646	}
2647
2648	mad_send_wr->send_buf.timeout_ms = timeout_ms;
2649	if (active)
2650		mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2651	else
2652		ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2653
2654	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2655	return 0;
2656}
2657EXPORT_SYMBOL(ib_modify_mad);
2658
2659void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2660		   struct ib_mad_send_buf *send_buf)
2661{
2662	ib_modify_mad(mad_agent, send_buf, 0);
2663}
2664EXPORT_SYMBOL(ib_cancel_mad);
2665
2666static void local_completions(struct work_struct *work)
2667{
2668	struct ib_mad_agent_private *mad_agent_priv;
2669	struct ib_mad_local_private *local;
2670	struct ib_mad_agent_private *recv_mad_agent;
2671	unsigned long flags;
2672	int free_mad;
2673	struct ib_wc wc;
2674	struct ib_mad_send_wc mad_send_wc;
2675	bool opa;
2676
2677	mad_agent_priv =
2678		container_of(work, struct ib_mad_agent_private, local_work);
2679
2680	opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2681			       mad_agent_priv->qp_info->port_priv->port_num);
2682
2683	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2684	while (!list_empty(&mad_agent_priv->local_list)) {
2685		local = list_entry(mad_agent_priv->local_list.next,
2686				   struct ib_mad_local_private,
2687				   completion_list);
2688		list_del(&local->completion_list);
2689		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2690		free_mad = 0;
2691		if (local->mad_priv) {
2692			u8 base_version;
2693			recv_mad_agent = local->recv_mad_agent;
2694			if (!recv_mad_agent) {
2695				dev_err(&mad_agent_priv->agent.device->dev,
2696					"No receive MAD agent for local completion\n");
2697				free_mad = 1;
2698				goto local_send_completion;
2699			}
2700
2701			/*
2702			 * Defined behavior is to complete response
2703			 * before request
2704			 */
2705			build_smp_wc(recv_mad_agent->agent.qp,
2706				     local->mad_send_wr->send_wr.wr.wr_cqe,
2707				     be16_to_cpu(IB_LID_PERMISSIVE),
2708				     local->mad_send_wr->send_wr.pkey_index,
2709				     recv_mad_agent->agent.port_num, &wc);
2710
2711			local->mad_priv->header.recv_wc.wc = &wc;
2712
2713			base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2714			if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2715				local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2716				local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2717			} else {
2718				local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2719				local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2720			}
2721
2722			INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2723			list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2724				 &local->mad_priv->header.recv_wc.rmpp_list);
2725			local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2726			local->mad_priv->header.recv_wc.recv_buf.mad =
2727						(struct ib_mad *)local->mad_priv->mad;
2728			if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2729				snoop_recv(recv_mad_agent->qp_info,
2730					  &local->mad_priv->header.recv_wc,
2731					   IB_MAD_SNOOP_RECVS);
2732			recv_mad_agent->agent.recv_handler(
2733						&recv_mad_agent->agent,
2734						&local->mad_send_wr->send_buf,
2735						&local->mad_priv->header.recv_wc);
2736			spin_lock_irqsave(&recv_mad_agent->lock, flags);
2737			atomic_dec(&recv_mad_agent->refcount);
2738			spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2739		}
2740
2741local_send_completion:
2742		/* Complete send */
2743		mad_send_wc.status = IB_WC_SUCCESS;
2744		mad_send_wc.vendor_err = 0;
2745		mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2746		if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2747			snoop_send(mad_agent_priv->qp_info,
2748				   &local->mad_send_wr->send_buf,
2749				   &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2750		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2751						   &mad_send_wc);
2752
2753		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2754		atomic_dec(&mad_agent_priv->refcount);
2755		if (free_mad)
2756			kfree(local->mad_priv);
2757		kfree(local);
2758	}
2759	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2760}
2761
2762static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2763{
2764	int ret;
2765
2766	if (!mad_send_wr->retries_left)
2767		return -ETIMEDOUT;
2768
2769	mad_send_wr->retries_left--;
2770	mad_send_wr->send_buf.retries++;
2771
2772	mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2773
2774	if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2775		ret = ib_retry_rmpp(mad_send_wr);
2776		switch (ret) {
2777		case IB_RMPP_RESULT_UNHANDLED:
2778			ret = ib_send_mad(mad_send_wr);
2779			break;
2780		case IB_RMPP_RESULT_CONSUMED:
2781			ret = 0;
2782			break;
2783		default:
2784			ret = -ECOMM;
2785			break;
2786		}
2787	} else
2788		ret = ib_send_mad(mad_send_wr);
2789
2790	if (!ret) {
2791		mad_send_wr->refcount++;
2792		list_add_tail(&mad_send_wr->agent_list,
2793			      &mad_send_wr->mad_agent_priv->send_list);
2794	}
2795	return ret;
2796}
2797
2798static void timeout_sends(struct work_struct *work)
2799{
2800	struct ib_mad_agent_private *mad_agent_priv;
2801	struct ib_mad_send_wr_private *mad_send_wr;
2802	struct ib_mad_send_wc mad_send_wc;
2803	unsigned long flags, delay;
2804
2805	mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2806				      timed_work.work);
2807	mad_send_wc.vendor_err = 0;
2808
2809	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2810	while (!list_empty(&mad_agent_priv->wait_list)) {
2811		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2812					 struct ib_mad_send_wr_private,
2813					 agent_list);
2814
2815		if (time_after(mad_send_wr->timeout, jiffies)) {
2816			delay = mad_send_wr->timeout - jiffies;
2817			if ((long)delay <= 0)
2818				delay = 1;
2819			queue_delayed_work(mad_agent_priv->qp_info->
2820					   port_priv->wq,
2821					   &mad_agent_priv->timed_work, delay);
2822			break;
2823		}
2824
2825		list_del(&mad_send_wr->agent_list);
2826		if (mad_send_wr->status == IB_WC_SUCCESS &&
2827		    !retry_send(mad_send_wr))
2828			continue;
2829
2830		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2831
2832		if (mad_send_wr->status == IB_WC_SUCCESS)
2833			mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2834		else
2835			mad_send_wc.status = mad_send_wr->status;
2836		mad_send_wc.send_buf = &mad_send_wr->send_buf;
2837		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2838						   &mad_send_wc);
2839
2840		atomic_dec(&mad_agent_priv->refcount);
2841		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2842	}
2843	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2844}
2845
2846/*
2847 * Allocate receive MADs and post receive WRs for them
2848 */
2849static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2850				    struct ib_mad_private *mad)
2851{
2852	unsigned long flags;
2853	int post, ret;
2854	struct ib_mad_private *mad_priv;
2855	struct ib_sge sg_list;
2856	struct ib_recv_wr recv_wr, *bad_recv_wr;
2857	struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2858
2859	/* Initialize common scatter list fields */
2860	sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2861
2862	/* Initialize common receive WR fields */
2863	recv_wr.next = NULL;
2864	recv_wr.sg_list = &sg_list;
2865	recv_wr.num_sge = 1;
2866
2867	do {
2868		/* Allocate and map receive buffer */
2869		if (mad) {
2870			mad_priv = mad;
2871			mad = NULL;
2872		} else {
2873			mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2874						     GFP_ATOMIC);
2875			if (!mad_priv) {
2876				dev_err(&qp_info->port_priv->device->dev,
2877					"No memory for receive buffer\n");
2878				ret = -ENOMEM;
2879				break;
2880			}
2881		}
2882		sg_list.length = mad_priv_dma_size(mad_priv);
2883		sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2884						 &mad_priv->grh,
2885						 mad_priv_dma_size(mad_priv),
2886						 DMA_FROM_DEVICE);
2887		if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2888						  sg_list.addr))) {
 
2889			ret = -ENOMEM;
2890			break;
2891		}
2892		mad_priv->header.mapping = sg_list.addr;
2893		mad_priv->header.mad_list.mad_queue = recv_queue;
2894		mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2895		recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2896
2897		/* Post receive WR */
2898		spin_lock_irqsave(&recv_queue->lock, flags);
2899		post = (++recv_queue->count < recv_queue->max_active);
2900		list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2901		spin_unlock_irqrestore(&recv_queue->lock, flags);
2902		ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2903		if (ret) {
2904			spin_lock_irqsave(&recv_queue->lock, flags);
2905			list_del(&mad_priv->header.mad_list.list);
2906			recv_queue->count--;
2907			spin_unlock_irqrestore(&recv_queue->lock, flags);
2908			ib_dma_unmap_single(qp_info->port_priv->device,
2909					    mad_priv->header.mapping,
2910					    mad_priv_dma_size(mad_priv),
2911					    DMA_FROM_DEVICE);
2912			kfree(mad_priv);
2913			dev_err(&qp_info->port_priv->device->dev,
2914				"ib_post_recv failed: %d\n", ret);
2915			break;
2916		}
2917	} while (post);
2918
2919	return ret;
2920}
2921
2922/*
2923 * Return all the posted receive MADs
2924 */
2925static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2926{
2927	struct ib_mad_private_header *mad_priv_hdr;
2928	struct ib_mad_private *recv;
2929	struct ib_mad_list_head *mad_list;
2930
2931	if (!qp_info->qp)
2932		return;
2933
2934	while (!list_empty(&qp_info->recv_queue.list)) {
2935
2936		mad_list = list_entry(qp_info->recv_queue.list.next,
2937				      struct ib_mad_list_head, list);
2938		mad_priv_hdr = container_of(mad_list,
2939					    struct ib_mad_private_header,
2940					    mad_list);
2941		recv = container_of(mad_priv_hdr, struct ib_mad_private,
2942				    header);
2943
2944		/* Remove from posted receive MAD list */
2945		list_del(&mad_list->list);
2946
2947		ib_dma_unmap_single(qp_info->port_priv->device,
2948				    recv->header.mapping,
2949				    mad_priv_dma_size(recv),
2950				    DMA_FROM_DEVICE);
2951		kfree(recv);
2952	}
2953
2954	qp_info->recv_queue.count = 0;
2955}
2956
2957/*
2958 * Start the port
2959 */
2960static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2961{
2962	int ret, i;
2963	struct ib_qp_attr *attr;
2964	struct ib_qp *qp;
2965	u16 pkey_index;
2966
2967	attr = kmalloc(sizeof *attr, GFP_KERNEL);
2968	if (!attr) {
2969		dev_err(&port_priv->device->dev,
2970			"Couldn't kmalloc ib_qp_attr\n");
2971		return -ENOMEM;
2972	}
2973
2974	ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2975			   IB_DEFAULT_PKEY_FULL, &pkey_index);
2976	if (ret)
2977		pkey_index = 0;
2978
2979	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2980		qp = port_priv->qp_info[i].qp;
2981		if (!qp)
2982			continue;
2983
2984		/*
2985		 * PKey index for QP1 is irrelevant but
2986		 * one is needed for the Reset to Init transition
2987		 */
2988		attr->qp_state = IB_QPS_INIT;
2989		attr->pkey_index = pkey_index;
2990		attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2991		ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2992					     IB_QP_PKEY_INDEX | IB_QP_QKEY);
2993		if (ret) {
2994			dev_err(&port_priv->device->dev,
2995				"Couldn't change QP%d state to INIT: %d\n",
2996				i, ret);
2997			goto out;
2998		}
2999
3000		attr->qp_state = IB_QPS_RTR;
3001		ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3002		if (ret) {
3003			dev_err(&port_priv->device->dev,
3004				"Couldn't change QP%d state to RTR: %d\n",
3005				i, ret);
3006			goto out;
3007		}
3008
3009		attr->qp_state = IB_QPS_RTS;
3010		attr->sq_psn = IB_MAD_SEND_Q_PSN;
3011		ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3012		if (ret) {
3013			dev_err(&port_priv->device->dev,
3014				"Couldn't change QP%d state to RTS: %d\n",
3015				i, ret);
3016			goto out;
3017		}
3018	}
3019
3020	ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3021	if (ret) {
3022		dev_err(&port_priv->device->dev,
3023			"Failed to request completion notification: %d\n",
3024			ret);
3025		goto out;
3026	}
3027
3028	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3029		if (!port_priv->qp_info[i].qp)
3030			continue;
3031
3032		ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3033		if (ret) {
3034			dev_err(&port_priv->device->dev,
3035				"Couldn't post receive WRs\n");
3036			goto out;
3037		}
3038	}
3039out:
3040	kfree(attr);
3041	return ret;
3042}
3043
3044static void qp_event_handler(struct ib_event *event, void *qp_context)
3045{
3046	struct ib_mad_qp_info	*qp_info = qp_context;
3047
3048	/* It's worse than that! He's dead, Jim! */
3049	dev_err(&qp_info->port_priv->device->dev,
3050		"Fatal error (%d) on MAD QP (%d)\n",
3051		event->event, qp_info->qp->qp_num);
3052}
3053
3054static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3055			   struct ib_mad_queue *mad_queue)
3056{
3057	mad_queue->qp_info = qp_info;
3058	mad_queue->count = 0;
3059	spin_lock_init(&mad_queue->lock);
3060	INIT_LIST_HEAD(&mad_queue->list);
3061}
3062
3063static void init_mad_qp(struct ib_mad_port_private *port_priv,
3064			struct ib_mad_qp_info *qp_info)
3065{
3066	qp_info->port_priv = port_priv;
3067	init_mad_queue(qp_info, &qp_info->send_queue);
3068	init_mad_queue(qp_info, &qp_info->recv_queue);
3069	INIT_LIST_HEAD(&qp_info->overflow_list);
3070	spin_lock_init(&qp_info->snoop_lock);
3071	qp_info->snoop_table = NULL;
3072	qp_info->snoop_table_size = 0;
3073	atomic_set(&qp_info->snoop_count, 0);
3074}
3075
3076static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3077			 enum ib_qp_type qp_type)
3078{
3079	struct ib_qp_init_attr	qp_init_attr;
3080	int ret;
3081
3082	memset(&qp_init_attr, 0, sizeof qp_init_attr);
3083	qp_init_attr.send_cq = qp_info->port_priv->cq;
3084	qp_init_attr.recv_cq = qp_info->port_priv->cq;
3085	qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3086	qp_init_attr.cap.max_send_wr = mad_sendq_size;
3087	qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3088	qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3089	qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3090	qp_init_attr.qp_type = qp_type;
3091	qp_init_attr.port_num = qp_info->port_priv->port_num;
3092	qp_init_attr.qp_context = qp_info;
3093	qp_init_attr.event_handler = qp_event_handler;
3094	qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3095	if (IS_ERR(qp_info->qp)) {
3096		dev_err(&qp_info->port_priv->device->dev,
3097			"Couldn't create ib_mad QP%d\n",
3098			get_spl_qp_index(qp_type));
3099		ret = PTR_ERR(qp_info->qp);
3100		goto error;
3101	}
3102	/* Use minimum queue sizes unless the CQ is resized */
3103	qp_info->send_queue.max_active = mad_sendq_size;
3104	qp_info->recv_queue.max_active = mad_recvq_size;
3105	return 0;
3106
3107error:
3108	return ret;
3109}
3110
3111static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3112{
3113	if (!qp_info->qp)
3114		return;
3115
3116	ib_destroy_qp(qp_info->qp);
3117	kfree(qp_info->snoop_table);
3118}
3119
3120/*
3121 * Open the port
3122 * Create the QP, PD, MR, and CQ if needed
3123 */
3124static int ib_mad_port_open(struct ib_device *device,
3125			    int port_num)
3126{
3127	int ret, cq_size;
3128	struct ib_mad_port_private *port_priv;
3129	unsigned long flags;
3130	char name[sizeof "ib_mad123"];
3131	int has_smi;
3132
3133	if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3134		return -EFAULT;
3135
3136	if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3137		    rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3138		return -EFAULT;
3139
3140	/* Create new device info */
3141	port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3142	if (!port_priv) {
3143		dev_err(&device->dev, "No memory for ib_mad_port_private\n");
3144		return -ENOMEM;
3145	}
3146
3147	port_priv->device = device;
3148	port_priv->port_num = port_num;
3149	spin_lock_init(&port_priv->reg_lock);
3150	INIT_LIST_HEAD(&port_priv->agent_list);
3151	init_mad_qp(port_priv, &port_priv->qp_info[0]);
3152	init_mad_qp(port_priv, &port_priv->qp_info[1]);
3153
3154	cq_size = mad_sendq_size + mad_recvq_size;
3155	has_smi = rdma_cap_ib_smi(device, port_num);
3156	if (has_smi)
3157		cq_size *= 2;
3158
 
 
 
 
 
 
 
3159	port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3160			IB_POLL_WORKQUEUE);
3161	if (IS_ERR(port_priv->cq)) {
3162		dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3163		ret = PTR_ERR(port_priv->cq);
3164		goto error3;
3165	}
3166
3167	port_priv->pd = ib_alloc_pd(device);
3168	if (IS_ERR(port_priv->pd)) {
3169		dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3170		ret = PTR_ERR(port_priv->pd);
3171		goto error4;
3172	}
3173
3174	if (has_smi) {
3175		ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3176		if (ret)
3177			goto error6;
3178	}
3179	ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3180	if (ret)
3181		goto error7;
3182
3183	snprintf(name, sizeof name, "ib_mad%d", port_num);
3184	port_priv->wq = create_singlethread_workqueue(name);
3185	if (!port_priv->wq) {
3186		ret = -ENOMEM;
3187		goto error8;
3188	}
3189
3190	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3191	list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3192	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3193
3194	ret = ib_mad_port_start(port_priv);
3195	if (ret) {
3196		dev_err(&device->dev, "Couldn't start port\n");
3197		goto error9;
3198	}
3199
3200	return 0;
3201
3202error9:
3203	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3204	list_del_init(&port_priv->port_list);
3205	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3206
3207	destroy_workqueue(port_priv->wq);
3208error8:
3209	destroy_mad_qp(&port_priv->qp_info[1]);
3210error7:
3211	destroy_mad_qp(&port_priv->qp_info[0]);
3212error6:
3213	ib_dealloc_pd(port_priv->pd);
3214error4:
3215	ib_free_cq(port_priv->cq);
3216	cleanup_recv_queue(&port_priv->qp_info[1]);
3217	cleanup_recv_queue(&port_priv->qp_info[0]);
 
 
3218error3:
3219	kfree(port_priv);
3220
3221	return ret;
3222}
3223
3224/*
3225 * Close the port
3226 * If there are no classes using the port, free the port
3227 * resources (CQ, MR, PD, QP) and remove the port's info structure
3228 */
3229static int ib_mad_port_close(struct ib_device *device, int port_num)
3230{
3231	struct ib_mad_port_private *port_priv;
3232	unsigned long flags;
3233
3234	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3235	port_priv = __ib_get_mad_port(device, port_num);
3236	if (port_priv == NULL) {
3237		spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3238		dev_err(&device->dev, "Port %d not found\n", port_num);
3239		return -ENODEV;
3240	}
3241	list_del_init(&port_priv->port_list);
3242	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3243
3244	destroy_workqueue(port_priv->wq);
3245	destroy_mad_qp(&port_priv->qp_info[1]);
3246	destroy_mad_qp(&port_priv->qp_info[0]);
 
3247	ib_dealloc_pd(port_priv->pd);
3248	ib_free_cq(port_priv->cq);
3249	cleanup_recv_queue(&port_priv->qp_info[1]);
3250	cleanup_recv_queue(&port_priv->qp_info[0]);
3251	/* XXX: Handle deallocation of MAD registration tables */
3252
3253	kfree(port_priv);
3254
3255	return 0;
3256}
3257
3258static void ib_mad_init_device(struct ib_device *device)
3259{
3260	int start, i;
 
 
3261
3262	start = rdma_start_port(device);
3263
3264	for (i = start; i <= rdma_end_port(device); i++) {
3265		if (!rdma_cap_ib_mad(device, i))
3266			continue;
3267
3268		if (ib_mad_port_open(device, i)) {
 
3269			dev_err(&device->dev, "Couldn't open port %d\n", i);
3270			goto error;
3271		}
3272		if (ib_agent_port_open(device, i)) {
 
3273			dev_err(&device->dev,
3274				"Couldn't open port %d for agents\n", i);
3275			goto error_agent;
3276		}
 
3277	}
3278	return;
 
 
 
3279
3280error_agent:
3281	if (ib_mad_port_close(device, i))
3282		dev_err(&device->dev, "Couldn't close port %d\n", i);
3283
3284error:
3285	while (--i >= start) {
3286		if (!rdma_cap_ib_mad(device, i))
3287			continue;
3288
3289		if (ib_agent_port_close(device, i))
3290			dev_err(&device->dev,
3291				"Couldn't close port %d for agents\n", i);
3292		if (ib_mad_port_close(device, i))
3293			dev_err(&device->dev, "Couldn't close port %d\n", i);
3294	}
 
3295}
3296
3297static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3298{
3299	int i;
3300
3301	for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3302		if (!rdma_cap_ib_mad(device, i))
3303			continue;
3304
3305		if (ib_agent_port_close(device, i))
3306			dev_err(&device->dev,
3307				"Couldn't close port %d for agents\n", i);
3308		if (ib_mad_port_close(device, i))
3309			dev_err(&device->dev, "Couldn't close port %d\n", i);
3310	}
3311}
3312
3313static struct ib_client mad_client = {
3314	.name   = "mad",
3315	.add = ib_mad_init_device,
3316	.remove = ib_mad_remove_device
3317};
3318
3319static int __init ib_mad_init_module(void)
3320{
3321	mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3322	mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3323
3324	mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3325	mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3326
3327	INIT_LIST_HEAD(&ib_mad_port_list);
3328
3329	if (ib_register_client(&mad_client)) {
3330		pr_err("Couldn't register ib_mad client\n");
3331		return -EINVAL;
3332	}
3333
3334	return 0;
3335}
3336
3337static void __exit ib_mad_cleanup_module(void)
3338{
3339	ib_unregister_client(&mad_client);
3340}
3341
3342module_init(ib_mad_init_module);
3343module_exit(ib_mad_cleanup_module);