Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
   3 * Copyright (c) 2005 Intel Corporation.  All rights reserved.
   4 * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
   5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
 
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 *
  35 */
 
 
 
  36#include <linux/dma-mapping.h>
  37#include <linux/slab.h>
 
 
 
  38#include <rdma/ib_cache.h>
  39
  40#include "mad_priv.h"
 
  41#include "mad_rmpp.h"
  42#include "smi.h"
 
  43#include "agent.h"
  44
  45MODULE_LICENSE("Dual BSD/GPL");
  46MODULE_DESCRIPTION("kernel IB MAD API");
  47MODULE_AUTHOR("Hal Rosenstock");
  48MODULE_AUTHOR("Sean Hefty");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  49
  50static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
  51static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
  52
  53module_param_named(send_queue_size, mad_sendq_size, int, 0444);
  54MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
  55module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
  56MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
  57
  58static struct kmem_cache *ib_mad_cache;
  59
 
  60static struct list_head ib_mad_port_list;
  61static u32 ib_mad_client_id = 0;
  62
  63/* Port list lock */
  64static DEFINE_SPINLOCK(ib_mad_port_list_lock);
  65
  66/* Forward declarations */
  67static int method_in_use(struct ib_mad_mgmt_method_table **method,
  68			 struct ib_mad_reg_req *mad_reg_req);
  69static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
  70static struct ib_mad_agent_private *find_mad_agent(
  71					struct ib_mad_port_private *port_priv,
  72					struct ib_mad *mad);
  73static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
  74				    struct ib_mad_private *mad);
  75static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
  76static void timeout_sends(struct work_struct *work);
  77static void local_completions(struct work_struct *work);
  78static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
  79			      struct ib_mad_agent_private *agent_priv,
  80			      u8 mgmt_class);
  81static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
  82			   struct ib_mad_agent_private *agent_priv);
 
 
 
  83
  84/*
  85 * Returns a ib_mad_port_private structure or NULL for a device/port
  86 * Assumes ib_mad_port_list_lock is being held
  87 */
  88static inline struct ib_mad_port_private *
  89__ib_get_mad_port(struct ib_device *device, int port_num)
  90{
  91	struct ib_mad_port_private *entry;
  92
  93	list_for_each_entry(entry, &ib_mad_port_list, port_list) {
  94		if (entry->device == device && entry->port_num == port_num)
  95			return entry;
  96	}
  97	return NULL;
  98}
  99
 100/*
 101 * Wrapper function to return a ib_mad_port_private structure or NULL
 102 * for a device/port
 103 */
 104static inline struct ib_mad_port_private *
 105ib_get_mad_port(struct ib_device *device, int port_num)
 106{
 107	struct ib_mad_port_private *entry;
 108	unsigned long flags;
 109
 110	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 111	entry = __ib_get_mad_port(device, port_num);
 112	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
 113
 114	return entry;
 115}
 116
 117static inline u8 convert_mgmt_class(u8 mgmt_class)
 118{
 119	/* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
 120	return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
 121		0 : mgmt_class;
 122}
 123
 124static int get_spl_qp_index(enum ib_qp_type qp_type)
 125{
 126	switch (qp_type)
 127	{
 128	case IB_QPT_SMI:
 129		return 0;
 130	case IB_QPT_GSI:
 131		return 1;
 132	default:
 133		return -1;
 134	}
 135}
 136
 137static int vendor_class_index(u8 mgmt_class)
 138{
 139	return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
 140}
 141
 142static int is_vendor_class(u8 mgmt_class)
 143{
 144	if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
 145	    (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
 146		return 0;
 147	return 1;
 148}
 149
 150static int is_vendor_oui(char *oui)
 151{
 152	if (oui[0] || oui[1] || oui[2])
 153		return 1;
 154	return 0;
 155}
 156
 157static int is_vendor_method_in_use(
 158		struct ib_mad_mgmt_vendor_class *vendor_class,
 159		struct ib_mad_reg_req *mad_reg_req)
 160{
 161	struct ib_mad_mgmt_method_table *method;
 162	int i;
 163
 164	for (i = 0; i < MAX_MGMT_OUI; i++) {
 165		if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
 166			method = vendor_class->method_table[i];
 167			if (method) {
 168				if (method_in_use(&method, mad_reg_req))
 169					return 1;
 170				else
 171					break;
 172			}
 173		}
 174	}
 175	return 0;
 176}
 177
 178int ib_response_mad(struct ib_mad *mad)
 179{
 180	return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
 181		(mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
 182		((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
 183		 (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
 184}
 185EXPORT_SYMBOL(ib_response_mad);
 186
 187/*
 188 * ib_register_mad_agent - Register to send/receive MADs
 
 
 189 */
 190struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
 191					   u8 port_num,
 192					   enum ib_qp_type qp_type,
 193					   struct ib_mad_reg_req *mad_reg_req,
 194					   u8 rmpp_version,
 195					   ib_mad_send_handler send_handler,
 196					   ib_mad_recv_handler recv_handler,
 197					   void *context)
 
 198{
 199	struct ib_mad_port_private *port_priv;
 200	struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
 201	struct ib_mad_agent_private *mad_agent_priv;
 202	struct ib_mad_reg_req *reg_req = NULL;
 203	struct ib_mad_mgmt_class_table *class;
 204	struct ib_mad_mgmt_vendor_class_table *vendor;
 205	struct ib_mad_mgmt_vendor_class *vendor_class;
 206	struct ib_mad_mgmt_method_table *method;
 207	int ret2, qpn;
 208	unsigned long flags;
 209	u8 mgmt_class, vclass;
 210
 
 
 
 
 211	/* Validate parameters */
 212	qpn = get_spl_qp_index(qp_type);
 213	if (qpn == -1)
 
 
 214		goto error1;
 
 215
 216	if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
 
 
 
 217		goto error1;
 
 218
 219	/* Validate MAD registration request if supplied */
 220	if (mad_reg_req) {
 221		if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
 
 
 
 
 222			goto error1;
 223		if (!recv_handler)
 
 
 
 224			goto error1;
 
 225		if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
 226			/*
 227			 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
 228			 * one in this range currently allowed
 229			 */
 230			if (mad_reg_req->mgmt_class !=
 231			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
 
 
 
 232				goto error1;
 
 233		} else if (mad_reg_req->mgmt_class == 0) {
 234			/*
 235			 * Class 0 is reserved in IBA and is used for
 236			 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
 237			 */
 
 
 
 238			goto error1;
 239		} else if (is_vendor_class(mad_reg_req->mgmt_class)) {
 240			/*
 241			 * If class is in "new" vendor range,
 242			 * ensure supplied OUI is not zero
 243			 */
 244			if (!is_vendor_oui(mad_reg_req->oui))
 
 
 
 
 245				goto error1;
 
 246		}
 247		/* Make sure class supplied is consistent with RMPP */
 248		if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
 249			if (rmpp_version)
 
 
 
 250				goto error1;
 
 251		}
 
 252		/* Make sure class supplied is consistent with QP type */
 253		if (qp_type == IB_QPT_SMI) {
 254			if ((mad_reg_req->mgmt_class !=
 255					IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
 256			    (mad_reg_req->mgmt_class !=
 257					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
 
 
 
 258				goto error1;
 
 259		} else {
 260			if ((mad_reg_req->mgmt_class ==
 261					IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
 262			    (mad_reg_req->mgmt_class ==
 263					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
 
 
 
 264				goto error1;
 
 265		}
 266	} else {
 267		/* No registration request supplied */
 268		if (!send_handler)
 269			goto error1;
 
 
 270	}
 271
 272	/* Validate device and port */
 273	port_priv = ib_get_mad_port(device, port_num);
 274	if (!port_priv) {
 
 
 275		ret = ERR_PTR(-ENODEV);
 276		goto error1;
 277	}
 278
 279	/* Verify the QP requested is supported.  For example, Ethernet devices
 280	 * will not have QP0 */
 
 281	if (!port_priv->qp_info[qpn].qp) {
 
 
 282		ret = ERR_PTR(-EPROTONOSUPPORT);
 283		goto error1;
 284	}
 285
 286	/* Allocate structures */
 287	mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
 288	if (!mad_agent_priv) {
 289		ret = ERR_PTR(-ENOMEM);
 290		goto error1;
 291	}
 292
 293	mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
 294						 IB_ACCESS_LOCAL_WRITE);
 295	if (IS_ERR(mad_agent_priv->agent.mr)) {
 296		ret = ERR_PTR(-ENOMEM);
 297		goto error2;
 298	}
 299
 300	if (mad_reg_req) {
 301		reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
 302		if (!reg_req) {
 303			ret = ERR_PTR(-ENOMEM);
 304			goto error3;
 305		}
 306	}
 307
 308	/* Now, fill in the various structures */
 309	mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
 310	mad_agent_priv->reg_req = reg_req;
 311	mad_agent_priv->agent.rmpp_version = rmpp_version;
 312	mad_agent_priv->agent.device = device;
 313	mad_agent_priv->agent.recv_handler = recv_handler;
 314	mad_agent_priv->agent.send_handler = send_handler;
 315	mad_agent_priv->agent.context = context;
 316	mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
 317	mad_agent_priv->agent.port_num = port_num;
 
 318	spin_lock_init(&mad_agent_priv->lock);
 319	INIT_LIST_HEAD(&mad_agent_priv->send_list);
 320	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
 321	INIT_LIST_HEAD(&mad_agent_priv->done_list);
 322	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
 323	INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
 324	INIT_LIST_HEAD(&mad_agent_priv->local_list);
 325	INIT_WORK(&mad_agent_priv->local_work, local_completions);
 326	atomic_set(&mad_agent_priv->refcount, 1);
 327	init_completion(&mad_agent_priv->comp);
 328
 329	spin_lock_irqsave(&port_priv->reg_lock, flags);
 330	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 331
 332	/*
 333	 * Make sure MAD registration (if supplied)
 334	 * is non overlapping with any existing ones
 335	 */
 
 336	if (mad_reg_req) {
 337		mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
 338		if (!is_vendor_class(mgmt_class)) {
 339			class = port_priv->version[mad_reg_req->
 340						   mgmt_class_version].class;
 341			if (class) {
 342				method = class->method_table[mgmt_class];
 343				if (method) {
 344					if (method_in_use(&method,
 345							   mad_reg_req))
 346						goto error4;
 347				}
 348			}
 349			ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
 350						  mgmt_class);
 351		} else {
 352			/* "New" vendor class range */
 353			vendor = port_priv->version[mad_reg_req->
 354						    mgmt_class_version].vendor;
 355			if (vendor) {
 356				vclass = vendor_class_index(mgmt_class);
 357				vendor_class = vendor->vendor_class[vclass];
 358				if (vendor_class) {
 359					if (is_vendor_method_in_use(
 360							vendor_class,
 361							mad_reg_req))
 362						goto error4;
 363				}
 364			}
 365			ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
 366		}
 367		if (ret2) {
 368			ret = ERR_PTR(ret2);
 369			goto error4;
 370		}
 371	}
 
 372
 373	/* Add mad agent into port's agent list */
 374	list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
 375	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
 376
 377	return &mad_agent_priv->agent;
 378
 
 
 
 
 379error4:
 380	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
 381	kfree(reg_req);
 382error3:
 383	ib_dereg_mr(mad_agent_priv->agent.mr);
 384error2:
 385	kfree(mad_agent_priv);
 386error1:
 387	return ret;
 388}
 389EXPORT_SYMBOL(ib_register_mad_agent);
 390
 391static inline int is_snooping_sends(int mad_snoop_flags)
 392{
 393	return (mad_snoop_flags &
 394		(/*IB_MAD_SNOOP_POSTED_SENDS |
 395		 IB_MAD_SNOOP_RMPP_SENDS |*/
 396		 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
 397		 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
 398}
 399
 400static inline int is_snooping_recvs(int mad_snoop_flags)
 401{
 402	return (mad_snoop_flags &
 403		(IB_MAD_SNOOP_RECVS /*|
 404		 IB_MAD_SNOOP_RMPP_RECVS*/));
 405}
 406
 407static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
 408				struct ib_mad_snoop_private *mad_snoop_priv)
 409{
 410	struct ib_mad_snoop_private **new_snoop_table;
 411	unsigned long flags;
 412	int i;
 413
 414	spin_lock_irqsave(&qp_info->snoop_lock, flags);
 415	/* Check for empty slot in array. */
 416	for (i = 0; i < qp_info->snoop_table_size; i++)
 417		if (!qp_info->snoop_table[i])
 418			break;
 419
 420	if (i == qp_info->snoop_table_size) {
 421		/* Grow table. */
 422		new_snoop_table = krealloc(qp_info->snoop_table,
 423					   sizeof mad_snoop_priv *
 424					   (qp_info->snoop_table_size + 1),
 425					   GFP_ATOMIC);
 426		if (!new_snoop_table) {
 427			i = -ENOMEM;
 428			goto out;
 429		}
 430
 431		qp_info->snoop_table = new_snoop_table;
 432		qp_info->snoop_table_size++;
 433	}
 434	qp_info->snoop_table[i] = mad_snoop_priv;
 435	atomic_inc(&qp_info->snoop_count);
 436out:
 437	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 438	return i;
 439}
 440
 441struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
 442					   u8 port_num,
 443					   enum ib_qp_type qp_type,
 444					   int mad_snoop_flags,
 445					   ib_mad_snoop_handler snoop_handler,
 446					   ib_mad_recv_handler recv_handler,
 447					   void *context)
 448{
 449	struct ib_mad_port_private *port_priv;
 450	struct ib_mad_agent *ret;
 451	struct ib_mad_snoop_private *mad_snoop_priv;
 452	int qpn;
 
 453
 454	/* Validate parameters */
 455	if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
 456	    (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
 457		ret = ERR_PTR(-EINVAL);
 458		goto error1;
 459	}
 460	qpn = get_spl_qp_index(qp_type);
 461	if (qpn == -1) {
 462		ret = ERR_PTR(-EINVAL);
 463		goto error1;
 464	}
 465	port_priv = ib_get_mad_port(device, port_num);
 466	if (!port_priv) {
 467		ret = ERR_PTR(-ENODEV);
 468		goto error1;
 469	}
 470	/* Allocate structures */
 471	mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
 472	if (!mad_snoop_priv) {
 473		ret = ERR_PTR(-ENOMEM);
 474		goto error1;
 475	}
 476
 477	/* Now, fill in the various structures */
 478	mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
 479	mad_snoop_priv->agent.device = device;
 480	mad_snoop_priv->agent.recv_handler = recv_handler;
 481	mad_snoop_priv->agent.snoop_handler = snoop_handler;
 482	mad_snoop_priv->agent.context = context;
 483	mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
 484	mad_snoop_priv->agent.port_num = port_num;
 485	mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
 486	init_completion(&mad_snoop_priv->comp);
 
 
 
 
 
 
 
 487	mad_snoop_priv->snoop_index = register_snoop_agent(
 488						&port_priv->qp_info[qpn],
 489						mad_snoop_priv);
 490	if (mad_snoop_priv->snoop_index < 0) {
 491		ret = ERR_PTR(mad_snoop_priv->snoop_index);
 492		goto error2;
 493	}
 494
 495	atomic_set(&mad_snoop_priv->refcount, 1);
 496	return &mad_snoop_priv->agent;
 497
 
 498error2:
 499	kfree(mad_snoop_priv);
 500error1:
 501	return ret;
 502}
 503EXPORT_SYMBOL(ib_register_mad_snoop);
 504
 505static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
 506{
 507	if (atomic_dec_and_test(&mad_agent_priv->refcount))
 508		complete(&mad_agent_priv->comp);
 509}
 510
 511static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
 512{
 513	if (atomic_dec_and_test(&mad_snoop_priv->refcount))
 514		complete(&mad_snoop_priv->comp);
 515}
 516
 517static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
 518{
 519	struct ib_mad_port_private *port_priv;
 520	unsigned long flags;
 521
 522	/* Note that we could still be handling received MADs */
 
 523
 524	/*
 525	 * Canceling all sends results in dropping received response
 526	 * MADs, preventing us from queuing additional work
 527	 */
 528	cancel_mads(mad_agent_priv);
 529	port_priv = mad_agent_priv->qp_info->port_priv;
 530	cancel_delayed_work(&mad_agent_priv->timed_work);
 531
 532	spin_lock_irqsave(&port_priv->reg_lock, flags);
 533	remove_mad_reg_req(mad_agent_priv);
 534	list_del(&mad_agent_priv->agent_list);
 535	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
 536
 537	flush_workqueue(port_priv->wq);
 538	ib_cancel_rmpp_recvs(mad_agent_priv);
 539
 540	deref_mad_agent(mad_agent_priv);
 541	wait_for_completion(&mad_agent_priv->comp);
 542
 
 
 543	kfree(mad_agent_priv->reg_req);
 544	ib_dereg_mr(mad_agent_priv->agent.mr);
 545	kfree(mad_agent_priv);
 546}
 547
 548static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
 549{
 550	struct ib_mad_qp_info *qp_info;
 551	unsigned long flags;
 552
 553	qp_info = mad_snoop_priv->qp_info;
 554	spin_lock_irqsave(&qp_info->snoop_lock, flags);
 555	qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
 556	atomic_dec(&qp_info->snoop_count);
 557	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 558
 559	deref_snoop_agent(mad_snoop_priv);
 560	wait_for_completion(&mad_snoop_priv->comp);
 561
 
 
 562	kfree(mad_snoop_priv);
 563}
 564
 565/*
 566 * ib_unregister_mad_agent - Unregisters a client from using MAD services
 
 
 567 */
 568int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
 569{
 570	struct ib_mad_agent_private *mad_agent_priv;
 571	struct ib_mad_snoop_private *mad_snoop_priv;
 572
 573	/* If the TID is zero, the agent can only snoop. */
 574	if (mad_agent->hi_tid) {
 575		mad_agent_priv = container_of(mad_agent,
 576					      struct ib_mad_agent_private,
 577					      agent);
 578		unregister_mad_agent(mad_agent_priv);
 579	} else {
 580		mad_snoop_priv = container_of(mad_agent,
 581					      struct ib_mad_snoop_private,
 582					      agent);
 583		unregister_mad_snoop(mad_snoop_priv);
 584	}
 585	return 0;
 586}
 587EXPORT_SYMBOL(ib_unregister_mad_agent);
 588
 589static void dequeue_mad(struct ib_mad_list_head *mad_list)
 590{
 591	struct ib_mad_queue *mad_queue;
 592	unsigned long flags;
 593
 594	BUG_ON(!mad_list->mad_queue);
 595	mad_queue = mad_list->mad_queue;
 596	spin_lock_irqsave(&mad_queue->lock, flags);
 597	list_del(&mad_list->list);
 598	mad_queue->count--;
 599	spin_unlock_irqrestore(&mad_queue->lock, flags);
 600}
 601
 602static void snoop_send(struct ib_mad_qp_info *qp_info,
 603		       struct ib_mad_send_buf *send_buf,
 604		       struct ib_mad_send_wc *mad_send_wc,
 605		       int mad_snoop_flags)
 606{
 607	struct ib_mad_snoop_private *mad_snoop_priv;
 608	unsigned long flags;
 609	int i;
 610
 611	spin_lock_irqsave(&qp_info->snoop_lock, flags);
 612	for (i = 0; i < qp_info->snoop_table_size; i++) {
 613		mad_snoop_priv = qp_info->snoop_table[i];
 614		if (!mad_snoop_priv ||
 615		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
 616			continue;
 617
 618		atomic_inc(&mad_snoop_priv->refcount);
 619		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 620		mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
 621						    send_buf, mad_send_wc);
 622		deref_snoop_agent(mad_snoop_priv);
 623		spin_lock_irqsave(&qp_info->snoop_lock, flags);
 624	}
 625	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 626}
 627
 628static void snoop_recv(struct ib_mad_qp_info *qp_info,
 629		       struct ib_mad_recv_wc *mad_recv_wc,
 630		       int mad_snoop_flags)
 631{
 632	struct ib_mad_snoop_private *mad_snoop_priv;
 633	unsigned long flags;
 634	int i;
 635
 636	spin_lock_irqsave(&qp_info->snoop_lock, flags);
 637	for (i = 0; i < qp_info->snoop_table_size; i++) {
 638		mad_snoop_priv = qp_info->snoop_table[i];
 639		if (!mad_snoop_priv ||
 640		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
 641			continue;
 642
 643		atomic_inc(&mad_snoop_priv->refcount);
 644		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 645		mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
 646						   mad_recv_wc);
 647		deref_snoop_agent(mad_snoop_priv);
 648		spin_lock_irqsave(&qp_info->snoop_lock, flags);
 649	}
 650	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 651}
 652
 653static void build_smp_wc(struct ib_qp *qp,
 654			 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
 655			 struct ib_wc *wc)
 656{
 657	memset(wc, 0, sizeof *wc);
 658	wc->wr_id = wr_id;
 659	wc->status = IB_WC_SUCCESS;
 660	wc->opcode = IB_WC_RECV;
 661	wc->pkey_index = pkey_index;
 662	wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
 663	wc->src_qp = IB_QP0;
 664	wc->qp = qp;
 665	wc->slid = slid;
 666	wc->sl = 0;
 667	wc->dlid_path_bits = 0;
 668	wc->port_num = port_num;
 669}
 670
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 671/*
 672 * Return 0 if SMP is to be sent
 673 * Return 1 if SMP was consumed locally (whether or not solicited)
 674 * Return < 0 if error
 675 */
 676static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
 677				  struct ib_mad_send_wr_private *mad_send_wr)
 678{
 679	int ret = 0;
 680	struct ib_smp *smp = mad_send_wr->send_buf.mad;
 
 681	unsigned long flags;
 682	struct ib_mad_local_private *local;
 683	struct ib_mad_private *mad_priv;
 684	struct ib_mad_port_private *port_priv;
 685	struct ib_mad_agent_private *recv_mad_agent = NULL;
 686	struct ib_device *device = mad_agent_priv->agent.device;
 687	u8 port_num;
 688	struct ib_wc mad_wc;
 689	struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
 
 
 
 
 
 690
 691	if (device->node_type == RDMA_NODE_IB_SWITCH &&
 692	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
 693		port_num = send_wr->wr.ud.port_num;
 694	else
 695		port_num = mad_agent_priv->agent.port_num;
 696
 697	/*
 698	 * Directed route handling starts if the initial LID routed part of
 699	 * a request or the ending LID routed part of a response is empty.
 700	 * If we are at the start of the LID routed part, don't update the
 701	 * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
 702	 */
 703	if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
 704	     IB_LID_PERMISSIVE &&
 705	     smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
 706	     IB_SMI_DISCARD) {
 707		ret = -EINVAL;
 708		printk(KERN_ERR PFX "Invalid directed route\n");
 709		goto out;
 710	}
 711
 712	/* Check to post send on QP or process locally */
 713	if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
 714	    smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
 715		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 716
 717	local = kmalloc(sizeof *local, GFP_ATOMIC);
 718	if (!local) {
 719		ret = -ENOMEM;
 720		printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
 721		goto out;
 722	}
 723	local->mad_priv = NULL;
 724	local->recv_mad_agent = NULL;
 725	mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
 726	if (!mad_priv) {
 727		ret = -ENOMEM;
 728		printk(KERN_ERR PFX "No memory for local response MAD\n");
 729		kfree(local);
 730		goto out;
 731	}
 732
 733	build_smp_wc(mad_agent_priv->agent.qp,
 734		     send_wr->wr_id, be16_to_cpu(smp->dr_slid),
 735		     send_wr->wr.ud.pkey_index,
 736		     send_wr->wr.ud.port_num, &mad_wc);
 
 
 
 
 
 
 737
 738	/* No GRH for DR SMP */
 739	ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
 740				  (struct ib_mad *)smp,
 741				  (struct ib_mad *)&mad_priv->mad);
 
 742	switch (ret)
 743	{
 744	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
 745		if (ib_response_mad(&mad_priv->mad.mad) &&
 746		    mad_agent_priv->agent.recv_handler) {
 747			local->mad_priv = mad_priv;
 748			local->recv_mad_agent = mad_agent_priv;
 749			/*
 750			 * Reference MAD agent until receive
 751			 * side of local completion handled
 752			 */
 753			atomic_inc(&mad_agent_priv->refcount);
 754		} else
 755			kmem_cache_free(ib_mad_cache, mad_priv);
 756		break;
 757	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
 758		kmem_cache_free(ib_mad_cache, mad_priv);
 759		break;
 760	case IB_MAD_RESULT_SUCCESS:
 761		/* Treat like an incoming receive MAD */
 762		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
 763					    mad_agent_priv->agent.port_num);
 764		if (port_priv) {
 765			memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
 766			recv_mad_agent = find_mad_agent(port_priv,
 767						        &mad_priv->mad.mad);
 768		}
 769		if (!port_priv || !recv_mad_agent) {
 770			/*
 771			 * No receiving agent so drop packet and
 772			 * generate send completion.
 773			 */
 774			kmem_cache_free(ib_mad_cache, mad_priv);
 775			break;
 776		}
 777		local->mad_priv = mad_priv;
 778		local->recv_mad_agent = recv_mad_agent;
 779		break;
 780	default:
 781		kmem_cache_free(ib_mad_cache, mad_priv);
 782		kfree(local);
 783		ret = -EINVAL;
 784		goto out;
 785	}
 786
 787	local->mad_send_wr = mad_send_wr;
 
 
 
 
 788	/* Reference MAD agent until send side of local completion handled */
 789	atomic_inc(&mad_agent_priv->refcount);
 790	/* Queue local completion to local list */
 791	spin_lock_irqsave(&mad_agent_priv->lock, flags);
 792	list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
 793	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
 794	queue_work(mad_agent_priv->qp_info->port_priv->wq,
 795		   &mad_agent_priv->local_work);
 796	ret = 1;
 797out:
 798	return ret;
 799}
 800
 801static int get_pad_size(int hdr_len, int data_len)
 802{
 803	int seg_size, pad;
 804
 805	seg_size = sizeof(struct ib_mad) - hdr_len;
 806	if (data_len && seg_size) {
 807		pad = seg_size - data_len % seg_size;
 808		return pad == seg_size ? 0 : pad;
 809	} else
 810		return seg_size;
 811}
 812
 813static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
 814{
 815	struct ib_rmpp_segment *s, *t;
 816
 817	list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
 818		list_del(&s->list);
 819		kfree(s);
 820	}
 821}
 822
 823static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
 824				gfp_t gfp_mask)
 825{
 826	struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
 827	struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
 828	struct ib_rmpp_segment *seg = NULL;
 829	int left, seg_size, pad;
 830
 831	send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
 
 832	seg_size = send_buf->seg_size;
 833	pad = send_wr->pad;
 834
 835	/* Allocate data segments. */
 836	for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
 837		seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
 838		if (!seg) {
 839			printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
 840			       "alloc failed for len %zd, gfp %#x\n",
 841			       sizeof (*seg) + seg_size, gfp_mask);
 842			free_send_rmpp_list(send_wr);
 843			return -ENOMEM;
 844		}
 845		seg->num = ++send_buf->seg_count;
 846		list_add_tail(&seg->list, &send_wr->rmpp_list);
 847	}
 848
 849	/* Zero any padding */
 850	if (pad)
 851		memset(seg->data + seg_size - pad, 0, pad);
 852
 853	rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
 854					  agent.rmpp_version;
 855	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
 856	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
 857
 858	send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
 859					struct ib_rmpp_segment, list);
 860	send_wr->last_ack_seg = send_wr->cur_seg;
 861	return 0;
 862}
 863
 
 
 
 
 
 
 864struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
 865					    u32 remote_qpn, u16 pkey_index,
 866					    int rmpp_active,
 867					    int hdr_len, int data_len,
 868					    gfp_t gfp_mask)
 
 869{
 870	struct ib_mad_agent_private *mad_agent_priv;
 871	struct ib_mad_send_wr_private *mad_send_wr;
 872	int pad, message_size, ret, size;
 873	void *buf;
 
 
 874
 875	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
 876				      agent);
 877	pad = get_pad_size(hdr_len, data_len);
 
 
 
 
 
 
 
 
 878	message_size = hdr_len + data_len + pad;
 879
 880	if ((!mad_agent->rmpp_version &&
 881	     (rmpp_active || message_size > sizeof(struct ib_mad))) ||
 882	    (!rmpp_active && message_size > sizeof(struct ib_mad)))
 883		return ERR_PTR(-EINVAL);
 
 
 884
 885	size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
 886	buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
 887	if (!buf)
 888		return ERR_PTR(-ENOMEM);
 889
 890	mad_send_wr = buf + size;
 891	INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
 892	mad_send_wr->send_buf.mad = buf;
 893	mad_send_wr->send_buf.hdr_len = hdr_len;
 894	mad_send_wr->send_buf.data_len = data_len;
 895	mad_send_wr->pad = pad;
 896
 897	mad_send_wr->mad_agent_priv = mad_agent_priv;
 898	mad_send_wr->sg_list[0].length = hdr_len;
 899	mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
 900	mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
 901	mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
 902
 903	mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
 904	mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
 905	mad_send_wr->send_wr.num_sge = 2;
 906	mad_send_wr->send_wr.opcode = IB_WR_SEND;
 907	mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
 908	mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
 909	mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
 910	mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
 
 
 
 
 
 
 
 
 
 911
 912	if (rmpp_active) {
 913		ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
 914		if (ret) {
 915			kfree(buf);
 916			return ERR_PTR(ret);
 917		}
 918	}
 919
 920	mad_send_wr->send_buf.mad_agent = mad_agent;
 921	atomic_inc(&mad_agent_priv->refcount);
 922	return &mad_send_wr->send_buf;
 923}
 924EXPORT_SYMBOL(ib_create_send_mad);
 925
 926int ib_get_mad_data_offset(u8 mgmt_class)
 927{
 928	if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
 929		return IB_MGMT_SA_HDR;
 930	else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
 931		 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
 932		 (mgmt_class == IB_MGMT_CLASS_BIS))
 933		return IB_MGMT_DEVICE_HDR;
 934	else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
 935		 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
 936		return IB_MGMT_VENDOR_HDR;
 937	else
 938		return IB_MGMT_MAD_HDR;
 939}
 940EXPORT_SYMBOL(ib_get_mad_data_offset);
 941
 942int ib_is_mad_class_rmpp(u8 mgmt_class)
 943{
 944	if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
 945	    (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
 946	    (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
 947	    (mgmt_class == IB_MGMT_CLASS_BIS) ||
 948	    ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
 949	     (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
 950		return 1;
 951	return 0;
 952}
 953EXPORT_SYMBOL(ib_is_mad_class_rmpp);
 954
 955void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
 956{
 957	struct ib_mad_send_wr_private *mad_send_wr;
 958	struct list_head *list;
 959
 960	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
 961				   send_buf);
 962	list = &mad_send_wr->cur_seg->list;
 963
 964	if (mad_send_wr->cur_seg->num < seg_num) {
 965		list_for_each_entry(mad_send_wr->cur_seg, list, list)
 966			if (mad_send_wr->cur_seg->num == seg_num)
 967				break;
 968	} else if (mad_send_wr->cur_seg->num > seg_num) {
 969		list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
 970			if (mad_send_wr->cur_seg->num == seg_num)
 971				break;
 972	}
 973	return mad_send_wr->cur_seg->data;
 974}
 975EXPORT_SYMBOL(ib_get_rmpp_segment);
 976
 977static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
 978{
 979	if (mad_send_wr->send_buf.seg_count)
 980		return ib_get_rmpp_segment(&mad_send_wr->send_buf,
 981					   mad_send_wr->seg_num);
 982	else
 983		return mad_send_wr->send_buf.mad +
 984		       mad_send_wr->send_buf.hdr_len;
 985}
 986
 987void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
 988{
 989	struct ib_mad_agent_private *mad_agent_priv;
 990	struct ib_mad_send_wr_private *mad_send_wr;
 991
 992	mad_agent_priv = container_of(send_buf->mad_agent,
 993				      struct ib_mad_agent_private, agent);
 994	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
 995				   send_buf);
 996
 997	free_send_rmpp_list(mad_send_wr);
 998	kfree(send_buf->mad);
 999	deref_mad_agent(mad_agent_priv);
1000}
1001EXPORT_SYMBOL(ib_free_send_mad);
1002
1003int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1004{
1005	struct ib_mad_qp_info *qp_info;
1006	struct list_head *list;
1007	struct ib_send_wr *bad_send_wr;
1008	struct ib_mad_agent *mad_agent;
1009	struct ib_sge *sge;
1010	unsigned long flags;
1011	int ret;
1012
1013	/* Set WR ID to find mad_send_wr upon completion */
1014	qp_info = mad_send_wr->mad_agent_priv->qp_info;
1015	mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1016	mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
 
 
1017
1018	mad_agent = mad_send_wr->send_buf.mad_agent;
1019	sge = mad_send_wr->sg_list;
1020	sge[0].addr = ib_dma_map_single(mad_agent->device,
1021					mad_send_wr->send_buf.mad,
1022					sge[0].length,
1023					DMA_TO_DEVICE);
 
 
 
1024	mad_send_wr->header_mapping = sge[0].addr;
1025
1026	sge[1].addr = ib_dma_map_single(mad_agent->device,
1027					ib_get_payload(mad_send_wr),
1028					sge[1].length,
1029					DMA_TO_DEVICE);
 
 
 
 
 
 
1030	mad_send_wr->payload_mapping = sge[1].addr;
1031
1032	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1033	if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1034		ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1035				   &bad_send_wr);
 
1036		list = &qp_info->send_queue.list;
1037	} else {
1038		ret = 0;
1039		list = &qp_info->overflow_list;
1040	}
1041
1042	if (!ret) {
1043		qp_info->send_queue.count++;
1044		list_add_tail(&mad_send_wr->mad_list.list, list);
1045	}
1046	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1047	if (ret) {
1048		ib_dma_unmap_single(mad_agent->device,
1049				    mad_send_wr->header_mapping,
1050				    sge[0].length, DMA_TO_DEVICE);
1051		ib_dma_unmap_single(mad_agent->device,
1052				    mad_send_wr->payload_mapping,
1053				    sge[1].length, DMA_TO_DEVICE);
1054	}
1055	return ret;
1056}
1057
1058/*
1059 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1060 *  with the registered client
1061 */
1062int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1063		     struct ib_mad_send_buf **bad_send_buf)
1064{
1065	struct ib_mad_agent_private *mad_agent_priv;
1066	struct ib_mad_send_buf *next_send_buf;
1067	struct ib_mad_send_wr_private *mad_send_wr;
1068	unsigned long flags;
1069	int ret = -EINVAL;
1070
1071	/* Walk list of send WRs and post each on send list */
1072	for (; send_buf; send_buf = next_send_buf) {
1073
1074		mad_send_wr = container_of(send_buf,
1075					   struct ib_mad_send_wr_private,
1076					   send_buf);
1077		mad_agent_priv = mad_send_wr->mad_agent_priv;
1078
 
 
 
 
 
1079		if (!send_buf->mad_agent->send_handler ||
1080		    (send_buf->timeout_ms &&
1081		     !send_buf->mad_agent->recv_handler)) {
1082			ret = -EINVAL;
1083			goto error;
1084		}
1085
1086		if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1087			if (mad_agent_priv->agent.rmpp_version) {
1088				ret = -EINVAL;
1089				goto error;
1090			}
1091		}
1092
1093		/*
1094		 * Save pointer to next work request to post in case the
1095		 * current one completes, and the user modifies the work
1096		 * request associated with the completion
1097		 */
1098		next_send_buf = send_buf->next;
1099		mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1100
1101		if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1102		    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1103			ret = handle_outgoing_dr_smp(mad_agent_priv,
1104						     mad_send_wr);
1105			if (ret < 0)		/* error */
1106				goto error;
1107			else if (ret == 1)	/* locally consumed */
1108				continue;
1109		}
1110
1111		mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1112		/* Timeout will be updated after send completes */
1113		mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1114		mad_send_wr->max_retries = send_buf->retries;
1115		mad_send_wr->retries_left = send_buf->retries;
1116		send_buf->retries = 0;
1117		/* Reference for work request to QP + response */
1118		mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1119		mad_send_wr->status = IB_WC_SUCCESS;
1120
1121		/* Reference MAD agent until send completes */
1122		atomic_inc(&mad_agent_priv->refcount);
1123		spin_lock_irqsave(&mad_agent_priv->lock, flags);
1124		list_add_tail(&mad_send_wr->agent_list,
1125			      &mad_agent_priv->send_list);
1126		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1127
1128		if (mad_agent_priv->agent.rmpp_version) {
1129			ret = ib_send_rmpp_mad(mad_send_wr);
1130			if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1131				ret = ib_send_mad(mad_send_wr);
1132		} else
1133			ret = ib_send_mad(mad_send_wr);
1134		if (ret < 0) {
1135			/* Fail send request */
1136			spin_lock_irqsave(&mad_agent_priv->lock, flags);
1137			list_del(&mad_send_wr->agent_list);
1138			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1139			atomic_dec(&mad_agent_priv->refcount);
1140			goto error;
1141		}
1142	}
1143	return 0;
1144error:
1145	if (bad_send_buf)
1146		*bad_send_buf = send_buf;
1147	return ret;
1148}
1149EXPORT_SYMBOL(ib_post_send_mad);
1150
1151/*
1152 * ib_free_recv_mad - Returns data buffers used to receive
1153 *  a MAD to the access layer
1154 */
1155void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1156{
1157	struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1158	struct ib_mad_private_header *mad_priv_hdr;
1159	struct ib_mad_private *priv;
1160	struct list_head free_list;
1161
1162	INIT_LIST_HEAD(&free_list);
1163	list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1164
1165	list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1166					&free_list, list) {
1167		mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1168					   recv_buf);
1169		mad_priv_hdr = container_of(mad_recv_wc,
1170					    struct ib_mad_private_header,
1171					    recv_wc);
1172		priv = container_of(mad_priv_hdr, struct ib_mad_private,
1173				    header);
1174		kmem_cache_free(ib_mad_cache, priv);
1175	}
1176}
1177EXPORT_SYMBOL(ib_free_recv_mad);
1178
1179struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1180					u8 rmpp_version,
1181					ib_mad_send_handler send_handler,
1182					ib_mad_recv_handler recv_handler,
1183					void *context)
1184{
1185	return ERR_PTR(-EINVAL);	/* XXX: for now */
1186}
1187EXPORT_SYMBOL(ib_redirect_mad_qp);
1188
1189int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1190		      struct ib_wc *wc)
1191{
1192	printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
 
1193	return 0;
1194}
1195EXPORT_SYMBOL(ib_process_mad_wc);
1196
1197static int method_in_use(struct ib_mad_mgmt_method_table **method,
1198			 struct ib_mad_reg_req *mad_reg_req)
1199{
1200	int i;
1201
1202	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1203		if ((*method)->agent[i]) {
1204			printk(KERN_ERR PFX "Method %d already in use\n", i);
1205			return -EINVAL;
1206		}
1207	}
1208	return 0;
1209}
1210
1211static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1212{
1213	/* Allocate management method table */
1214	*method = kzalloc(sizeof **method, GFP_ATOMIC);
1215	if (!*method) {
1216		printk(KERN_ERR PFX "No memory for "
1217		       "ib_mad_mgmt_method_table\n");
1218		return -ENOMEM;
1219	}
1220
1221	return 0;
1222}
1223
1224/*
1225 * Check to see if there are any methods still in use
1226 */
1227static int check_method_table(struct ib_mad_mgmt_method_table *method)
1228{
1229	int i;
1230
1231	for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1232		if (method->agent[i])
1233			return 1;
1234	return 0;
1235}
1236
1237/*
1238 * Check to see if there are any method tables for this class still in use
1239 */
1240static int check_class_table(struct ib_mad_mgmt_class_table *class)
1241{
1242	int i;
1243
1244	for (i = 0; i < MAX_MGMT_CLASS; i++)
1245		if (class->method_table[i])
1246			return 1;
1247	return 0;
1248}
1249
1250static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1251{
1252	int i;
1253
1254	for (i = 0; i < MAX_MGMT_OUI; i++)
1255		if (vendor_class->method_table[i])
1256			return 1;
1257	return 0;
1258}
1259
1260static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1261			   char *oui)
1262{
1263	int i;
1264
1265	for (i = 0; i < MAX_MGMT_OUI; i++)
1266		/* Is there matching OUI for this vendor class ? */
1267		if (!memcmp(vendor_class->oui[i], oui, 3))
1268			return i;
1269
1270	return -1;
1271}
1272
1273static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1274{
1275	int i;
1276
1277	for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1278		if (vendor->vendor_class[i])
1279			return 1;
1280
1281	return 0;
1282}
1283
1284static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1285				     struct ib_mad_agent_private *agent)
1286{
1287	int i;
1288
1289	/* Remove any methods for this mad agent */
1290	for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1291		if (method->agent[i] == agent) {
1292			method->agent[i] = NULL;
1293		}
1294	}
1295}
1296
1297static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1298			      struct ib_mad_agent_private *agent_priv,
1299			      u8 mgmt_class)
1300{
1301	struct ib_mad_port_private *port_priv;
1302	struct ib_mad_mgmt_class_table **class;
1303	struct ib_mad_mgmt_method_table **method;
1304	int i, ret;
1305
1306	port_priv = agent_priv->qp_info->port_priv;
1307	class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1308	if (!*class) {
1309		/* Allocate management class table for "new" class version */
1310		*class = kzalloc(sizeof **class, GFP_ATOMIC);
1311		if (!*class) {
1312			printk(KERN_ERR PFX "No memory for "
1313			       "ib_mad_mgmt_class_table\n");
1314			ret = -ENOMEM;
1315			goto error1;
1316		}
1317
1318		/* Allocate method table for this management class */
1319		method = &(*class)->method_table[mgmt_class];
1320		if ((ret = allocate_method_table(method)))
1321			goto error2;
1322	} else {
1323		method = &(*class)->method_table[mgmt_class];
1324		if (!*method) {
1325			/* Allocate method table for this management class */
1326			if ((ret = allocate_method_table(method)))
1327				goto error1;
1328		}
1329	}
1330
1331	/* Now, make sure methods are not already in use */
1332	if (method_in_use(method, mad_reg_req))
1333		goto error3;
1334
1335	/* Finally, add in methods being registered */
1336	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1337		(*method)->agent[i] = agent_priv;
1338
1339	return 0;
1340
1341error3:
1342	/* Remove any methods for this mad agent */
1343	remove_methods_mad_agent(*method, agent_priv);
1344	/* Now, check to see if there are any methods in use */
1345	if (!check_method_table(*method)) {
1346		/* If not, release management method table */
1347		kfree(*method);
1348		*method = NULL;
1349	}
1350	ret = -EINVAL;
1351	goto error1;
1352error2:
1353	kfree(*class);
1354	*class = NULL;
1355error1:
1356	return ret;
1357}
1358
1359static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1360			   struct ib_mad_agent_private *agent_priv)
1361{
1362	struct ib_mad_port_private *port_priv;
1363	struct ib_mad_mgmt_vendor_class_table **vendor_table;
1364	struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1365	struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1366	struct ib_mad_mgmt_method_table **method;
1367	int i, ret = -ENOMEM;
1368	u8 vclass;
1369
1370	/* "New" vendor (with OUI) class */
1371	vclass = vendor_class_index(mad_reg_req->mgmt_class);
1372	port_priv = agent_priv->qp_info->port_priv;
1373	vendor_table = &port_priv->version[
1374				mad_reg_req->mgmt_class_version].vendor;
1375	if (!*vendor_table) {
1376		/* Allocate mgmt vendor class table for "new" class version */
1377		vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1378		if (!vendor) {
1379			printk(KERN_ERR PFX "No memory for "
1380			       "ib_mad_mgmt_vendor_class_table\n");
1381			goto error1;
1382		}
1383
1384		*vendor_table = vendor;
1385	}
1386	if (!(*vendor_table)->vendor_class[vclass]) {
1387		/* Allocate table for this management vendor class */
1388		vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1389		if (!vendor_class) {
1390			printk(KERN_ERR PFX "No memory for "
1391			       "ib_mad_mgmt_vendor_class\n");
1392			goto error2;
1393		}
1394
1395		(*vendor_table)->vendor_class[vclass] = vendor_class;
1396	}
1397	for (i = 0; i < MAX_MGMT_OUI; i++) {
1398		/* Is there matching OUI for this vendor class ? */
1399		if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1400			    mad_reg_req->oui, 3)) {
1401			method = &(*vendor_table)->vendor_class[
1402						vclass]->method_table[i];
1403			BUG_ON(!*method);
 
1404			goto check_in_use;
1405		}
1406	}
1407	for (i = 0; i < MAX_MGMT_OUI; i++) {
1408		/* OUI slot available ? */
1409		if (!is_vendor_oui((*vendor_table)->vendor_class[
1410				vclass]->oui[i])) {
1411			method = &(*vendor_table)->vendor_class[
1412				vclass]->method_table[i];
1413			BUG_ON(*method);
1414			/* Allocate method table for this OUI */
1415			if ((ret = allocate_method_table(method)))
1416				goto error3;
 
 
 
1417			memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1418			       mad_reg_req->oui, 3);
1419			goto check_in_use;
1420		}
1421	}
1422	printk(KERN_ERR PFX "All OUI slots in use\n");
1423	goto error3;
1424
1425check_in_use:
1426	/* Now, make sure methods are not already in use */
1427	if (method_in_use(method, mad_reg_req))
1428		goto error4;
1429
1430	/* Finally, add in methods being registered */
1431	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1432		(*method)->agent[i] = agent_priv;
1433
1434	return 0;
1435
1436error4:
1437	/* Remove any methods for this mad agent */
1438	remove_methods_mad_agent(*method, agent_priv);
1439	/* Now, check to see if there are any methods in use */
1440	if (!check_method_table(*method)) {
1441		/* If not, release management method table */
1442		kfree(*method);
1443		*method = NULL;
1444	}
1445	ret = -EINVAL;
1446error3:
1447	if (vendor_class) {
1448		(*vendor_table)->vendor_class[vclass] = NULL;
1449		kfree(vendor_class);
1450	}
1451error2:
1452	if (vendor) {
1453		*vendor_table = NULL;
1454		kfree(vendor);
1455	}
1456error1:
1457	return ret;
1458}
1459
1460static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1461{
1462	struct ib_mad_port_private *port_priv;
1463	struct ib_mad_mgmt_class_table *class;
1464	struct ib_mad_mgmt_method_table *method;
1465	struct ib_mad_mgmt_vendor_class_table *vendor;
1466	struct ib_mad_mgmt_vendor_class *vendor_class;
1467	int index;
1468	u8 mgmt_class;
1469
1470	/*
1471	 * Was MAD registration request supplied
1472	 * with original registration ?
1473	 */
1474	if (!agent_priv->reg_req) {
1475		goto out;
1476	}
1477
1478	port_priv = agent_priv->qp_info->port_priv;
1479	mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1480	class = port_priv->version[
1481			agent_priv->reg_req->mgmt_class_version].class;
1482	if (!class)
1483		goto vendor_check;
1484
1485	method = class->method_table[mgmt_class];
1486	if (method) {
1487		/* Remove any methods for this mad agent */
1488		remove_methods_mad_agent(method, agent_priv);
1489		/* Now, check to see if there are any methods still in use */
1490		if (!check_method_table(method)) {
1491			/* If not, release management method table */
1492			 kfree(method);
1493			 class->method_table[mgmt_class] = NULL;
1494			 /* Any management classes left ? */
1495			if (!check_class_table(class)) {
1496				/* If not, release management class table */
1497				kfree(class);
1498				port_priv->version[
1499					agent_priv->reg_req->
1500					mgmt_class_version].class = NULL;
1501			}
1502		}
1503	}
1504
1505vendor_check:
1506	if (!is_vendor_class(mgmt_class))
1507		goto out;
1508
1509	/* normalize mgmt_class to vendor range 2 */
1510	mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1511	vendor = port_priv->version[
1512			agent_priv->reg_req->mgmt_class_version].vendor;
1513
1514	if (!vendor)
1515		goto out;
1516
1517	vendor_class = vendor->vendor_class[mgmt_class];
1518	if (vendor_class) {
1519		index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1520		if (index < 0)
1521			goto out;
1522		method = vendor_class->method_table[index];
1523		if (method) {
1524			/* Remove any methods for this mad agent */
1525			remove_methods_mad_agent(method, agent_priv);
1526			/*
1527			 * Now, check to see if there are
1528			 * any methods still in use
1529			 */
1530			if (!check_method_table(method)) {
1531				/* If not, release management method table */
1532				kfree(method);
1533				vendor_class->method_table[index] = NULL;
1534				memset(vendor_class->oui[index], 0, 3);
1535				/* Any OUIs left ? */
1536				if (!check_vendor_class(vendor_class)) {
1537					/* If not, release vendor class table */
1538					kfree(vendor_class);
1539					vendor->vendor_class[mgmt_class] = NULL;
1540					/* Any other vendor classes left ? */
1541					if (!check_vendor_table(vendor)) {
1542						kfree(vendor);
1543						port_priv->version[
1544							agent_priv->reg_req->
1545							mgmt_class_version].
1546							vendor = NULL;
1547					}
1548				}
1549			}
1550		}
1551	}
1552
1553out:
1554	return;
1555}
1556
1557static struct ib_mad_agent_private *
1558find_mad_agent(struct ib_mad_port_private *port_priv,
1559	       struct ib_mad *mad)
1560{
1561	struct ib_mad_agent_private *mad_agent = NULL;
1562	unsigned long flags;
1563
1564	spin_lock_irqsave(&port_priv->reg_lock, flags);
1565	if (ib_response_mad(mad)) {
1566		u32 hi_tid;
1567		struct ib_mad_agent_private *entry;
1568
1569		/*
1570		 * Routing is based on high 32 bits of transaction ID
1571		 * of MAD.
1572		 */
1573		hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1574		list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1575			if (entry->agent.hi_tid == hi_tid) {
1576				mad_agent = entry;
1577				break;
1578			}
1579		}
1580	} else {
1581		struct ib_mad_mgmt_class_table *class;
1582		struct ib_mad_mgmt_method_table *method;
1583		struct ib_mad_mgmt_vendor_class_table *vendor;
1584		struct ib_mad_mgmt_vendor_class *vendor_class;
1585		struct ib_vendor_mad *vendor_mad;
1586		int index;
1587
 
1588		/*
1589		 * Routing is based on version, class, and method
1590		 * For "newer" vendor MADs, also based on OUI
1591		 */
1592		if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1593			goto out;
1594		if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1595			class = port_priv->version[
1596					mad->mad_hdr.class_version].class;
1597			if (!class)
1598				goto out;
 
 
 
1599			method = class->method_table[convert_mgmt_class(
1600							mad->mad_hdr.mgmt_class)];
1601			if (method)
1602				mad_agent = method->agent[mad->mad_hdr.method &
1603							  ~IB_MGMT_METHOD_RESP];
1604		} else {
1605			vendor = port_priv->version[
1606					mad->mad_hdr.class_version].vendor;
1607			if (!vendor)
1608				goto out;
1609			vendor_class = vendor->vendor_class[vendor_class_index(
1610						mad->mad_hdr.mgmt_class)];
1611			if (!vendor_class)
1612				goto out;
1613			/* Find matching OUI */
1614			vendor_mad = (struct ib_vendor_mad *)mad;
1615			index = find_vendor_oui(vendor_class, vendor_mad->oui);
1616			if (index == -1)
1617				goto out;
1618			method = vendor_class->method_table[index];
1619			if (method) {
1620				mad_agent = method->agent[mad->mad_hdr.method &
1621							  ~IB_MGMT_METHOD_RESP];
1622			}
1623		}
 
 
 
 
1624	}
1625
1626	if (mad_agent) {
1627		if (mad_agent->agent.recv_handler)
1628			atomic_inc(&mad_agent->refcount);
1629		else {
1630			printk(KERN_NOTICE PFX "No receive handler for client "
1631			       "%p on port %d\n",
1632			       &mad_agent->agent, port_priv->port_num);
1633			mad_agent = NULL;
1634		}
1635	}
1636out:
1637	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1638
1639	return mad_agent;
1640}
1641
1642static int validate_mad(struct ib_mad *mad, u32 qp_num)
 
 
1643{
1644	int valid = 0;
 
1645
1646	/* Make sure MAD base version is understood */
1647	if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1648		printk(KERN_ERR PFX "MAD received with unsupported base "
1649		       "version %d\n", mad->mad_hdr.base_version);
 
1650		goto out;
1651	}
1652
1653	/* Filter SMI packets sent to other than QP0 */
1654	if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1655	    (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1656		if (qp_num == 0)
1657			valid = 1;
1658	} else {
 
 
 
 
 
1659		/* Filter GSI packets sent to QP0 */
1660		if (qp_num != 0)
1661			valid = 1;
1662	}
1663
1664out:
1665	return valid;
1666}
1667
1668static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1669		       struct ib_mad_hdr *mad_hdr)
1670{
1671	struct ib_rmpp_mad *rmpp_mad;
1672
1673	rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1674	return !mad_agent_priv->agent.rmpp_version ||
 
1675		!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1676				    IB_MGMT_RMPP_FLAG_ACTIVE) ||
1677		(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1678}
1679
1680static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1681				     struct ib_mad_recv_wc *rwc)
1682{
1683	return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1684		rwc->recv_buf.mad->mad_hdr.mgmt_class;
1685}
1686
1687static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1688				   struct ib_mad_send_wr_private *wr,
1689				   struct ib_mad_recv_wc *rwc )
1690{
1691	struct ib_ah_attr attr;
1692	u8 send_resp, rcv_resp;
1693	union ib_gid sgid;
1694	struct ib_device *device = mad_agent_priv->agent.device;
1695	u8 port_num = mad_agent_priv->agent.port_num;
1696	u8 lmc;
 
1697
1698	send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1699	rcv_resp = ib_response_mad(rwc->recv_buf.mad);
1700
1701	if (send_resp == rcv_resp)
1702		/* both requests, or both responses. GIDs different */
1703		return 0;
1704
1705	if (ib_query_ah(wr->send_buf.ah, &attr))
1706		/* Assume not equal, to avoid false positives. */
1707		return 0;
1708
1709	if (!!(attr.ah_flags & IB_AH_GRH) !=
1710	    !!(rwc->wc->wc_flags & IB_WC_GRH))
1711		/* one has GID, other does not.  Assume different */
1712		return 0;
1713
1714	if (!send_resp && rcv_resp) {
1715		/* is request/response. */
1716		if (!(attr.ah_flags & IB_AH_GRH)) {
1717			if (ib_get_cached_lmc(device, port_num, &lmc))
1718				return 0;
1719			return (!lmc || !((attr.src_path_bits ^
1720					   rwc->wc->dlid_path_bits) &
1721					  ((1 << lmc) - 1)));
1722		} else {
1723			if (ib_get_cached_gid(device, port_num,
1724					      attr.grh.sgid_index, &sgid))
 
 
 
1725				return 0;
1726			return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1727				       16);
1728		}
1729	}
1730
1731	if (!(attr.ah_flags & IB_AH_GRH))
1732		return attr.dlid == rwc->wc->slid;
1733	else
1734		return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
 
1735			       16);
1736}
1737
1738static inline int is_direct(u8 class)
1739{
1740	return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1741}
1742
1743struct ib_mad_send_wr_private*
1744ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1745		 struct ib_mad_recv_wc *wc)
1746{
1747	struct ib_mad_send_wr_private *wr;
1748	struct ib_mad *mad;
1749
1750	mad = (struct ib_mad *)wc->recv_buf.mad;
1751
1752	list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1753		if ((wr->tid == mad->mad_hdr.tid) &&
1754		    rcv_has_same_class(wr, wc) &&
1755		    /*
1756		     * Don't check GID for direct routed MADs.
1757		     * These might have permissive LIDs.
1758		     */
1759		    (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1760		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
1761			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1762	}
1763
1764	/*
1765	 * It's possible to receive the response before we've
1766	 * been notified that the send has completed
1767	 */
1768	list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1769		if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1770		    wr->tid == mad->mad_hdr.tid &&
1771		    wr->timeout &&
1772		    rcv_has_same_class(wr, wc) &&
1773		    /*
1774		     * Don't check GID for direct routed MADs.
1775		     * These might have permissive LIDs.
1776		     */
1777		    (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1778		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
1779			/* Verify request has not been canceled */
1780			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1781	}
1782	return NULL;
1783}
1784
1785void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1786{
1787	mad_send_wr->timeout = 0;
1788	if (mad_send_wr->refcount == 1)
1789		list_move_tail(&mad_send_wr->agent_list,
1790			      &mad_send_wr->mad_agent_priv->done_list);
1791}
1792
1793static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1794				 struct ib_mad_recv_wc *mad_recv_wc)
1795{
1796	struct ib_mad_send_wr_private *mad_send_wr;
1797	struct ib_mad_send_wc mad_send_wc;
1798	unsigned long flags;
 
1799
1800	INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
 
 
 
 
 
 
 
 
1801	list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1802	if (mad_agent_priv->agent.rmpp_version) {
1803		mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1804						      mad_recv_wc);
1805		if (!mad_recv_wc) {
1806			deref_mad_agent(mad_agent_priv);
1807			return;
1808		}
1809	}
1810
1811	/* Complete corresponding request */
1812	if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1813		spin_lock_irqsave(&mad_agent_priv->lock, flags);
1814		mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1815		if (!mad_send_wr) {
1816			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1817			ib_free_recv_mad(mad_recv_wc);
1818			deref_mad_agent(mad_agent_priv);
1819			return;
1820		}
1821		ib_mark_mad_done(mad_send_wr);
1822		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1823
1824		/* Defined behavior is to complete response before request */
1825		mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1826		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1827						   mad_recv_wc);
1828		atomic_dec(&mad_agent_priv->refcount);
 
1829
1830		mad_send_wc.status = IB_WC_SUCCESS;
1831		mad_send_wc.vendor_err = 0;
1832		mad_send_wc.send_buf = &mad_send_wr->send_buf;
1833		ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
 
1834	} else {
1835		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1836						   mad_recv_wc);
1837		deref_mad_agent(mad_agent_priv);
1838	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1839}
1840
1841static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1842				     struct ib_wc *wc)
1843{
 
 
 
1844	struct ib_mad_qp_info *qp_info;
1845	struct ib_mad_private_header *mad_priv_hdr;
1846	struct ib_mad_private *recv, *response = NULL;
1847	struct ib_mad_list_head *mad_list;
1848	struct ib_mad_agent_private *mad_agent;
1849	int port_num;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1850
1851	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1852	qp_info = mad_list->mad_queue->qp_info;
1853	dequeue_mad(mad_list);
1854
 
 
 
1855	mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1856				    mad_list);
1857	recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1858	ib_dma_unmap_single(port_priv->device,
1859			    recv->header.mapping,
1860			    sizeof(struct ib_mad_private) -
1861			      sizeof(struct ib_mad_private_header),
1862			    DMA_FROM_DEVICE);
1863
1864	/* Setup MAD receive work completion from "normal" work completion */
1865	recv->header.wc = *wc;
1866	recv->header.recv_wc.wc = &recv->header.wc;
1867	recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1868	recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
 
 
 
 
 
 
 
 
1869	recv->header.recv_wc.recv_buf.grh = &recv->grh;
1870
1871	if (atomic_read(&qp_info->snoop_count))
1872		snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1873
1874	/* Validate MAD */
1875	if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1876		goto out;
1877
1878	response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1879	if (!response) {
1880		printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1881		       "for response buffer\n");
 
 
1882		goto out;
1883	}
1884
1885	if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1886		port_num = wc->port_num;
1887	else
1888		port_num = port_priv->port_num;
1889
1890	if (recv->mad.mad.mad_hdr.mgmt_class ==
1891	    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1892		enum smi_forward_action retsmi;
1893
1894		if (smi_handle_dr_smp_recv(&recv->mad.smp,
1895					   port_priv->device->node_type,
1896					   port_num,
1897					   port_priv->device->phys_port_cnt) ==
1898					   IB_SMI_DISCARD)
1899			goto out;
1900
1901		retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1902		if (retsmi == IB_SMI_LOCAL)
1903			goto local;
1904
1905		if (retsmi == IB_SMI_SEND) { /* don't forward */
1906			if (smi_handle_dr_smp_send(&recv->mad.smp,
1907						   port_priv->device->node_type,
1908						   port_num) == IB_SMI_DISCARD)
1909				goto out;
1910
1911			if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1912				goto out;
1913		} else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1914			/* forward case for switches */
1915			memcpy(response, recv, sizeof(*response));
1916			response->header.recv_wc.wc = &response->header.wc;
1917			response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1918			response->header.recv_wc.recv_buf.grh = &response->grh;
1919
1920			agent_send_response(&response->mad.mad,
1921					    &response->grh, wc,
1922					    port_priv->device,
1923					    smi_get_fwd_port(&recv->mad.smp),
1924					    qp_info->qp->qp_num);
1925
1926			goto out;
1927		}
1928	}
1929
1930local:
1931	/* Give driver "right of first refusal" on incoming MAD */
1932	if (port_priv->device->process_mad) {
1933		int ret;
 
 
 
 
 
 
 
1934
1935		ret = port_priv->device->process_mad(port_priv->device, 0,
1936						     port_priv->port_num,
1937						     wc, &recv->grh,
1938						     &recv->mad.mad,
1939						     &response->mad.mad);
1940		if (ret & IB_MAD_RESULT_SUCCESS) {
1941			if (ret & IB_MAD_RESULT_CONSUMED)
1942				goto out;
1943			if (ret & IB_MAD_RESULT_REPLY) {
1944				agent_send_response(&response->mad.mad,
1945						    &recv->grh, wc,
1946						    port_priv->device,
1947						    port_num,
1948						    qp_info->qp->qp_num);
 
1949				goto out;
1950			}
1951		}
1952	}
1953
1954	mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1955	if (mad_agent) {
 
1956		ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1957		/*
1958		 * recv is freed up in error cases in ib_mad_complete_recv
1959		 * or via recv_handler in ib_mad_complete_recv()
1960		 */
1961		recv = NULL;
 
 
 
 
 
1962	}
1963
1964out:
1965	/* Post another receive request for this QP */
1966	if (response) {
1967		ib_mad_post_receive_mads(qp_info, response);
1968		if (recv)
1969			kmem_cache_free(ib_mad_cache, recv);
1970	} else
1971		ib_mad_post_receive_mads(qp_info, recv);
1972}
1973
1974static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1975{
1976	struct ib_mad_send_wr_private *mad_send_wr;
1977	unsigned long delay;
1978
1979	if (list_empty(&mad_agent_priv->wait_list)) {
1980		__cancel_delayed_work(&mad_agent_priv->timed_work);
1981	} else {
1982		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1983					 struct ib_mad_send_wr_private,
1984					 agent_list);
1985
1986		if (time_after(mad_agent_priv->timeout,
1987			       mad_send_wr->timeout)) {
1988			mad_agent_priv->timeout = mad_send_wr->timeout;
1989			__cancel_delayed_work(&mad_agent_priv->timed_work);
1990			delay = mad_send_wr->timeout - jiffies;
1991			if ((long)delay <= 0)
1992				delay = 1;
1993			queue_delayed_work(mad_agent_priv->qp_info->
1994					   port_priv->wq,
1995					   &mad_agent_priv->timed_work, delay);
1996		}
1997	}
1998}
1999
2000static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2001{
2002	struct ib_mad_agent_private *mad_agent_priv;
2003	struct ib_mad_send_wr_private *temp_mad_send_wr;
2004	struct list_head *list_item;
2005	unsigned long delay;
2006
2007	mad_agent_priv = mad_send_wr->mad_agent_priv;
2008	list_del(&mad_send_wr->agent_list);
2009
2010	delay = mad_send_wr->timeout;
2011	mad_send_wr->timeout += jiffies;
2012
2013	if (delay) {
2014		list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2015			temp_mad_send_wr = list_entry(list_item,
2016						struct ib_mad_send_wr_private,
2017						agent_list);
2018			if (time_after(mad_send_wr->timeout,
2019				       temp_mad_send_wr->timeout))
2020				break;
2021		}
2022	}
2023	else
2024		list_item = &mad_agent_priv->wait_list;
2025	list_add(&mad_send_wr->agent_list, list_item);
2026
2027	/* Reschedule a work item if we have a shorter timeout */
2028	if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
2029		__cancel_delayed_work(&mad_agent_priv->timed_work);
2030		queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2031				   &mad_agent_priv->timed_work, delay);
2032	}
2033}
2034
2035void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2036			  int timeout_ms)
2037{
2038	mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2039	wait_for_response(mad_send_wr);
2040}
2041
2042/*
2043 * Process a send work completion
2044 */
2045void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2046			     struct ib_mad_send_wc *mad_send_wc)
2047{
2048	struct ib_mad_agent_private	*mad_agent_priv;
2049	unsigned long			flags;
2050	int				ret;
2051
2052	mad_agent_priv = mad_send_wr->mad_agent_priv;
2053	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2054	if (mad_agent_priv->agent.rmpp_version) {
2055		ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2056		if (ret == IB_RMPP_RESULT_CONSUMED)
2057			goto done;
2058	} else
2059		ret = IB_RMPP_RESULT_UNHANDLED;
2060
2061	if (mad_send_wc->status != IB_WC_SUCCESS &&
2062	    mad_send_wr->status == IB_WC_SUCCESS) {
2063		mad_send_wr->status = mad_send_wc->status;
2064		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2065	}
2066
2067	if (--mad_send_wr->refcount > 0) {
2068		if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2069		    mad_send_wr->status == IB_WC_SUCCESS) {
2070			wait_for_response(mad_send_wr);
2071		}
2072		goto done;
2073	}
2074
2075	/* Remove send from MAD agent and notify client of completion */
2076	list_del(&mad_send_wr->agent_list);
2077	adjust_timeout(mad_agent_priv);
2078	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2079
2080	if (mad_send_wr->status != IB_WC_SUCCESS )
2081		mad_send_wc->status = mad_send_wr->status;
2082	if (ret == IB_RMPP_RESULT_INTERNAL)
2083		ib_rmpp_send_handler(mad_send_wc);
2084	else
2085		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2086						   mad_send_wc);
2087
2088	/* Release reference on agent taken when sending */
2089	deref_mad_agent(mad_agent_priv);
2090	return;
2091done:
2092	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2093}
2094
2095static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2096				     struct ib_wc *wc)
2097{
 
 
 
2098	struct ib_mad_send_wr_private	*mad_send_wr, *queued_send_wr;
2099	struct ib_mad_list_head		*mad_list;
2100	struct ib_mad_qp_info		*qp_info;
2101	struct ib_mad_queue		*send_queue;
2102	struct ib_send_wr		*bad_send_wr;
2103	struct ib_mad_send_wc		mad_send_wc;
2104	unsigned long flags;
2105	int ret;
2106
2107	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
 
 
 
 
 
 
 
2108	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2109				   mad_list);
2110	send_queue = mad_list->mad_queue;
2111	qp_info = send_queue->qp_info;
2112
 
 
 
2113retry:
2114	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2115			    mad_send_wr->header_mapping,
2116			    mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2117	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2118			    mad_send_wr->payload_mapping,
2119			    mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2120	queued_send_wr = NULL;
2121	spin_lock_irqsave(&send_queue->lock, flags);
2122	list_del(&mad_list->list);
2123
2124	/* Move queued send to the send queue */
2125	if (send_queue->count-- > send_queue->max_active) {
2126		mad_list = container_of(qp_info->overflow_list.next,
2127					struct ib_mad_list_head, list);
2128		queued_send_wr = container_of(mad_list,
2129					struct ib_mad_send_wr_private,
2130					mad_list);
2131		list_move_tail(&mad_list->list, &send_queue->list);
2132	}
2133	spin_unlock_irqrestore(&send_queue->lock, flags);
2134
2135	mad_send_wc.send_buf = &mad_send_wr->send_buf;
2136	mad_send_wc.status = wc->status;
2137	mad_send_wc.vendor_err = wc->vendor_err;
2138	if (atomic_read(&qp_info->snoop_count))
2139		snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2140			   IB_MAD_SNOOP_SEND_COMPLETIONS);
2141	ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2142
2143	if (queued_send_wr) {
2144		ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2145				   &bad_send_wr);
 
2146		if (ret) {
2147			printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
 
2148			mad_send_wr = queued_send_wr;
2149			wc->status = IB_WC_LOC_QP_OP_ERR;
2150			goto retry;
2151		}
2152	}
2153}
2154
2155static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2156{
2157	struct ib_mad_send_wr_private *mad_send_wr;
2158	struct ib_mad_list_head *mad_list;
2159	unsigned long flags;
2160
2161	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2162	list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2163		mad_send_wr = container_of(mad_list,
2164					   struct ib_mad_send_wr_private,
2165					   mad_list);
2166		mad_send_wr->retry = 1;
2167	}
2168	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2169}
2170
2171static void mad_error_handler(struct ib_mad_port_private *port_priv,
2172			      struct ib_wc *wc)
2173{
2174	struct ib_mad_list_head *mad_list;
2175	struct ib_mad_qp_info *qp_info;
 
2176	struct ib_mad_send_wr_private *mad_send_wr;
2177	int ret;
2178
2179	/* Determine if failure was a send or receive */
2180	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2181	qp_info = mad_list->mad_queue->qp_info;
2182	if (mad_list->mad_queue == &qp_info->recv_queue)
2183		/*
2184		 * Receive errors indicate that the QP has entered the error
2185		 * state - error handling/shutdown code will cleanup
2186		 */
2187		return;
2188
2189	/*
2190	 * Send errors will transition the QP to SQE - move
2191	 * QP to RTS and repost flushed work requests
2192	 */
2193	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2194				   mad_list);
2195	if (wc->status == IB_WC_WR_FLUSH_ERR) {
2196		if (mad_send_wr->retry) {
2197			/* Repost send */
2198			struct ib_send_wr *bad_send_wr;
2199
2200			mad_send_wr->retry = 0;
2201			ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2202					&bad_send_wr);
2203			if (ret)
2204				ib_mad_send_done_handler(port_priv, wc);
2205		} else
2206			ib_mad_send_done_handler(port_priv, wc);
2207	} else {
2208		struct ib_qp_attr *attr;
2209
2210		/* Transition QP to RTS and fail offending send */
2211		attr = kmalloc(sizeof *attr, GFP_KERNEL);
2212		if (attr) {
2213			attr->qp_state = IB_QPS_RTS;
2214			attr->cur_qp_state = IB_QPS_SQE;
2215			ret = ib_modify_qp(qp_info->qp, attr,
2216					   IB_QP_STATE | IB_QP_CUR_STATE);
2217			kfree(attr);
2218			if (ret)
2219				printk(KERN_ERR PFX "mad_error_handler - "
2220				       "ib_modify_qp to RTS : %d\n", ret);
 
2221			else
2222				mark_sends_for_retry(qp_info);
2223		}
2224		ib_mad_send_done_handler(port_priv, wc);
2225	}
2226}
2227
2228/*
2229 * IB MAD completion callback
2230 */
2231static void ib_mad_completion_handler(struct work_struct *work)
2232{
2233	struct ib_mad_port_private *port_priv;
2234	struct ib_wc wc;
2235
2236	port_priv = container_of(work, struct ib_mad_port_private, work);
2237	ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2238
2239	while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2240		if (wc.status == IB_WC_SUCCESS) {
2241			switch (wc.opcode) {
2242			case IB_WC_SEND:
2243				ib_mad_send_done_handler(port_priv, &wc);
2244				break;
2245			case IB_WC_RECV:
2246				ib_mad_recv_done_handler(port_priv, &wc);
2247				break;
2248			default:
2249				BUG_ON(1);
2250				break;
2251			}
2252		} else
2253			mad_error_handler(port_priv, &wc);
2254	}
2255}
2256
2257static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2258{
2259	unsigned long flags;
2260	struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2261	struct ib_mad_send_wc mad_send_wc;
2262	struct list_head cancel_list;
2263
2264	INIT_LIST_HEAD(&cancel_list);
2265
2266	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2267	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2268				 &mad_agent_priv->send_list, agent_list) {
2269		if (mad_send_wr->status == IB_WC_SUCCESS) {
2270			mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2271			mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2272		}
2273	}
2274
2275	/* Empty wait list to prevent receives from finding a request */
2276	list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2277	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2278
2279	/* Report all cancelled requests */
2280	mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2281	mad_send_wc.vendor_err = 0;
2282
2283	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2284				 &cancel_list, agent_list) {
2285		mad_send_wc.send_buf = &mad_send_wr->send_buf;
2286		list_del(&mad_send_wr->agent_list);
2287		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2288						   &mad_send_wc);
2289		atomic_dec(&mad_agent_priv->refcount);
2290	}
2291}
2292
2293static struct ib_mad_send_wr_private*
2294find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2295	     struct ib_mad_send_buf *send_buf)
2296{
2297	struct ib_mad_send_wr_private *mad_send_wr;
2298
2299	list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2300			    agent_list) {
2301		if (&mad_send_wr->send_buf == send_buf)
2302			return mad_send_wr;
2303	}
2304
2305	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2306			    agent_list) {
2307		if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
 
2308		    &mad_send_wr->send_buf == send_buf)
2309			return mad_send_wr;
2310	}
2311	return NULL;
2312}
2313
2314int ib_modify_mad(struct ib_mad_agent *mad_agent,
2315		  struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2316{
2317	struct ib_mad_agent_private *mad_agent_priv;
2318	struct ib_mad_send_wr_private *mad_send_wr;
2319	unsigned long flags;
2320	int active;
2321
2322	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2323				      agent);
2324	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2325	mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2326	if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2327		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2328		return -EINVAL;
2329	}
2330
2331	active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2332	if (!timeout_ms) {
2333		mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2334		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2335	}
2336
2337	mad_send_wr->send_buf.timeout_ms = timeout_ms;
2338	if (active)
2339		mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2340	else
2341		ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2342
2343	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2344	return 0;
2345}
2346EXPORT_SYMBOL(ib_modify_mad);
2347
2348void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2349		   struct ib_mad_send_buf *send_buf)
2350{
2351	ib_modify_mad(mad_agent, send_buf, 0);
2352}
2353EXPORT_SYMBOL(ib_cancel_mad);
2354
2355static void local_completions(struct work_struct *work)
2356{
2357	struct ib_mad_agent_private *mad_agent_priv;
2358	struct ib_mad_local_private *local;
2359	struct ib_mad_agent_private *recv_mad_agent;
2360	unsigned long flags;
2361	int free_mad;
2362	struct ib_wc wc;
2363	struct ib_mad_send_wc mad_send_wc;
 
2364
2365	mad_agent_priv =
2366		container_of(work, struct ib_mad_agent_private, local_work);
2367
 
 
 
2368	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2369	while (!list_empty(&mad_agent_priv->local_list)) {
2370		local = list_entry(mad_agent_priv->local_list.next,
2371				   struct ib_mad_local_private,
2372				   completion_list);
2373		list_del(&local->completion_list);
2374		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2375		free_mad = 0;
2376		if (local->mad_priv) {
 
2377			recv_mad_agent = local->recv_mad_agent;
2378			if (!recv_mad_agent) {
2379				printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
 
2380				free_mad = 1;
2381				goto local_send_completion;
2382			}
2383
2384			/*
2385			 * Defined behavior is to complete response
2386			 * before request
2387			 */
2388			build_smp_wc(recv_mad_agent->agent.qp,
2389				     (unsigned long) local->mad_send_wr,
2390				     be16_to_cpu(IB_LID_PERMISSIVE),
2391				     0, recv_mad_agent->agent.port_num, &wc);
 
2392
2393			local->mad_priv->header.recv_wc.wc = &wc;
2394			local->mad_priv->header.recv_wc.mad_len =
2395						sizeof(struct ib_mad);
 
 
 
 
 
 
 
 
2396			INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2397			list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2398				 &local->mad_priv->header.recv_wc.rmpp_list);
2399			local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2400			local->mad_priv->header.recv_wc.recv_buf.mad =
2401						&local->mad_priv->mad.mad;
2402			if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2403				snoop_recv(recv_mad_agent->qp_info,
2404					  &local->mad_priv->header.recv_wc,
2405					   IB_MAD_SNOOP_RECVS);
2406			recv_mad_agent->agent.recv_handler(
2407						&recv_mad_agent->agent,
 
2408						&local->mad_priv->header.recv_wc);
2409			spin_lock_irqsave(&recv_mad_agent->lock, flags);
2410			atomic_dec(&recv_mad_agent->refcount);
2411			spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2412		}
2413
2414local_send_completion:
2415		/* Complete send */
2416		mad_send_wc.status = IB_WC_SUCCESS;
2417		mad_send_wc.vendor_err = 0;
2418		mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2419		if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2420			snoop_send(mad_agent_priv->qp_info,
2421				   &local->mad_send_wr->send_buf,
2422				   &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2423		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2424						   &mad_send_wc);
2425
2426		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2427		atomic_dec(&mad_agent_priv->refcount);
2428		if (free_mad)
2429			kmem_cache_free(ib_mad_cache, local->mad_priv);
2430		kfree(local);
2431	}
2432	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2433}
2434
2435static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2436{
2437	int ret;
2438
2439	if (!mad_send_wr->retries_left)
2440		return -ETIMEDOUT;
2441
2442	mad_send_wr->retries_left--;
2443	mad_send_wr->send_buf.retries++;
2444
2445	mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2446
2447	if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2448		ret = ib_retry_rmpp(mad_send_wr);
2449		switch (ret) {
2450		case IB_RMPP_RESULT_UNHANDLED:
2451			ret = ib_send_mad(mad_send_wr);
2452			break;
2453		case IB_RMPP_RESULT_CONSUMED:
2454			ret = 0;
2455			break;
2456		default:
2457			ret = -ECOMM;
2458			break;
2459		}
2460	} else
2461		ret = ib_send_mad(mad_send_wr);
2462
2463	if (!ret) {
2464		mad_send_wr->refcount++;
2465		list_add_tail(&mad_send_wr->agent_list,
2466			      &mad_send_wr->mad_agent_priv->send_list);
2467	}
2468	return ret;
2469}
2470
2471static void timeout_sends(struct work_struct *work)
2472{
2473	struct ib_mad_agent_private *mad_agent_priv;
2474	struct ib_mad_send_wr_private *mad_send_wr;
2475	struct ib_mad_send_wc mad_send_wc;
2476	unsigned long flags, delay;
2477
2478	mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2479				      timed_work.work);
2480	mad_send_wc.vendor_err = 0;
2481
2482	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2483	while (!list_empty(&mad_agent_priv->wait_list)) {
2484		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2485					 struct ib_mad_send_wr_private,
2486					 agent_list);
2487
2488		if (time_after(mad_send_wr->timeout, jiffies)) {
2489			delay = mad_send_wr->timeout - jiffies;
2490			if ((long)delay <= 0)
2491				delay = 1;
2492			queue_delayed_work(mad_agent_priv->qp_info->
2493					   port_priv->wq,
2494					   &mad_agent_priv->timed_work, delay);
2495			break;
2496		}
2497
2498		list_del(&mad_send_wr->agent_list);
2499		if (mad_send_wr->status == IB_WC_SUCCESS &&
2500		    !retry_send(mad_send_wr))
2501			continue;
2502
2503		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2504
2505		if (mad_send_wr->status == IB_WC_SUCCESS)
2506			mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2507		else
2508			mad_send_wc.status = mad_send_wr->status;
2509		mad_send_wc.send_buf = &mad_send_wr->send_buf;
2510		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2511						   &mad_send_wc);
2512
2513		atomic_dec(&mad_agent_priv->refcount);
2514		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2515	}
2516	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2517}
2518
2519static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2520{
2521	struct ib_mad_port_private *port_priv = cq->cq_context;
2522	unsigned long flags;
2523
2524	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2525	if (!list_empty(&port_priv->port_list))
2526		queue_work(port_priv->wq, &port_priv->work);
2527	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2528}
2529
2530/*
2531 * Allocate receive MADs and post receive WRs for them
2532 */
2533static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2534				    struct ib_mad_private *mad)
2535{
2536	unsigned long flags;
2537	int post, ret;
2538	struct ib_mad_private *mad_priv;
2539	struct ib_sge sg_list;
2540	struct ib_recv_wr recv_wr, *bad_recv_wr;
2541	struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2542
2543	/* Initialize common scatter list fields */
2544	sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2545	sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2546
2547	/* Initialize common receive WR fields */
2548	recv_wr.next = NULL;
2549	recv_wr.sg_list = &sg_list;
2550	recv_wr.num_sge = 1;
2551
2552	do {
2553		/* Allocate and map receive buffer */
2554		if (mad) {
2555			mad_priv = mad;
2556			mad = NULL;
2557		} else {
2558			mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
 
2559			if (!mad_priv) {
2560				printk(KERN_ERR PFX "No memory for receive buffer\n");
2561				ret = -ENOMEM;
2562				break;
2563			}
2564		}
 
2565		sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2566						 &mad_priv->grh,
2567						 sizeof *mad_priv -
2568						   sizeof mad_priv->header,
2569						 DMA_FROM_DEVICE);
 
 
 
 
 
2570		mad_priv->header.mapping = sg_list.addr;
2571		recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2572		mad_priv->header.mad_list.mad_queue = recv_queue;
 
 
2573
2574		/* Post receive WR */
2575		spin_lock_irqsave(&recv_queue->lock, flags);
2576		post = (++recv_queue->count < recv_queue->max_active);
2577		list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2578		spin_unlock_irqrestore(&recv_queue->lock, flags);
2579		ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2580		if (ret) {
2581			spin_lock_irqsave(&recv_queue->lock, flags);
2582			list_del(&mad_priv->header.mad_list.list);
2583			recv_queue->count--;
2584			spin_unlock_irqrestore(&recv_queue->lock, flags);
2585			ib_dma_unmap_single(qp_info->port_priv->device,
2586					    mad_priv->header.mapping,
2587					    sizeof *mad_priv -
2588					      sizeof mad_priv->header,
2589					    DMA_FROM_DEVICE);
2590			kmem_cache_free(ib_mad_cache, mad_priv);
2591			printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
 
2592			break;
2593		}
2594	} while (post);
2595
2596	return ret;
2597}
2598
2599/*
2600 * Return all the posted receive MADs
2601 */
2602static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2603{
2604	struct ib_mad_private_header *mad_priv_hdr;
2605	struct ib_mad_private *recv;
2606	struct ib_mad_list_head *mad_list;
2607
2608	if (!qp_info->qp)
2609		return;
2610
2611	while (!list_empty(&qp_info->recv_queue.list)) {
2612
2613		mad_list = list_entry(qp_info->recv_queue.list.next,
2614				      struct ib_mad_list_head, list);
2615		mad_priv_hdr = container_of(mad_list,
2616					    struct ib_mad_private_header,
2617					    mad_list);
2618		recv = container_of(mad_priv_hdr, struct ib_mad_private,
2619				    header);
2620
2621		/* Remove from posted receive MAD list */
2622		list_del(&mad_list->list);
2623
2624		ib_dma_unmap_single(qp_info->port_priv->device,
2625				    recv->header.mapping,
2626				    sizeof(struct ib_mad_private) -
2627				      sizeof(struct ib_mad_private_header),
2628				    DMA_FROM_DEVICE);
2629		kmem_cache_free(ib_mad_cache, recv);
2630	}
2631
2632	qp_info->recv_queue.count = 0;
2633}
2634
2635/*
2636 * Start the port
2637 */
2638static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2639{
2640	int ret, i;
2641	struct ib_qp_attr *attr;
2642	struct ib_qp *qp;
 
2643
2644	attr = kmalloc(sizeof *attr, GFP_KERNEL);
2645	if (!attr) {
2646		printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2647		return -ENOMEM;
2648	}
 
 
 
 
2649
2650	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2651		qp = port_priv->qp_info[i].qp;
2652		if (!qp)
2653			continue;
2654
2655		/*
2656		 * PKey index for QP1 is irrelevant but
2657		 * one is needed for the Reset to Init transition
2658		 */
2659		attr->qp_state = IB_QPS_INIT;
2660		attr->pkey_index = 0;
2661		attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2662		ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2663					     IB_QP_PKEY_INDEX | IB_QP_QKEY);
2664		if (ret) {
2665			printk(KERN_ERR PFX "Couldn't change QP%d state to "
2666			       "INIT: %d\n", i, ret);
 
2667			goto out;
2668		}
2669
2670		attr->qp_state = IB_QPS_RTR;
2671		ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2672		if (ret) {
2673			printk(KERN_ERR PFX "Couldn't change QP%d state to "
2674			       "RTR: %d\n", i, ret);
 
2675			goto out;
2676		}
2677
2678		attr->qp_state = IB_QPS_RTS;
2679		attr->sq_psn = IB_MAD_SEND_Q_PSN;
2680		ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2681		if (ret) {
2682			printk(KERN_ERR PFX "Couldn't change QP%d state to "
2683			       "RTS: %d\n", i, ret);
 
2684			goto out;
2685		}
2686	}
2687
2688	ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2689	if (ret) {
2690		printk(KERN_ERR PFX "Failed to request completion "
2691		       "notification: %d\n", ret);
 
2692		goto out;
2693	}
2694
2695	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2696		if (!port_priv->qp_info[i].qp)
2697			continue;
2698
2699		ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2700		if (ret) {
2701			printk(KERN_ERR PFX "Couldn't post receive WRs\n");
 
2702			goto out;
2703		}
2704	}
2705out:
2706	kfree(attr);
2707	return ret;
2708}
2709
2710static void qp_event_handler(struct ib_event *event, void *qp_context)
2711{
2712	struct ib_mad_qp_info	*qp_info = qp_context;
2713
2714	/* It's worse than that! He's dead, Jim! */
2715	printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
 
2716		event->event, qp_info->qp->qp_num);
2717}
2718
2719static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2720			   struct ib_mad_queue *mad_queue)
2721{
2722	mad_queue->qp_info = qp_info;
2723	mad_queue->count = 0;
2724	spin_lock_init(&mad_queue->lock);
2725	INIT_LIST_HEAD(&mad_queue->list);
2726}
2727
2728static void init_mad_qp(struct ib_mad_port_private *port_priv,
2729			struct ib_mad_qp_info *qp_info)
2730{
2731	qp_info->port_priv = port_priv;
2732	init_mad_queue(qp_info, &qp_info->send_queue);
2733	init_mad_queue(qp_info, &qp_info->recv_queue);
2734	INIT_LIST_HEAD(&qp_info->overflow_list);
2735	spin_lock_init(&qp_info->snoop_lock);
2736	qp_info->snoop_table = NULL;
2737	qp_info->snoop_table_size = 0;
2738	atomic_set(&qp_info->snoop_count, 0);
2739}
2740
2741static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2742			 enum ib_qp_type qp_type)
2743{
2744	struct ib_qp_init_attr	qp_init_attr;
2745	int ret;
2746
2747	memset(&qp_init_attr, 0, sizeof qp_init_attr);
2748	qp_init_attr.send_cq = qp_info->port_priv->cq;
2749	qp_init_attr.recv_cq = qp_info->port_priv->cq;
2750	qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2751	qp_init_attr.cap.max_send_wr = mad_sendq_size;
2752	qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2753	qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2754	qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2755	qp_init_attr.qp_type = qp_type;
2756	qp_init_attr.port_num = qp_info->port_priv->port_num;
2757	qp_init_attr.qp_context = qp_info;
2758	qp_init_attr.event_handler = qp_event_handler;
2759	qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2760	if (IS_ERR(qp_info->qp)) {
2761		printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2762		       get_spl_qp_index(qp_type));
 
2763		ret = PTR_ERR(qp_info->qp);
2764		goto error;
2765	}
2766	/* Use minimum queue sizes unless the CQ is resized */
2767	qp_info->send_queue.max_active = mad_sendq_size;
2768	qp_info->recv_queue.max_active = mad_recvq_size;
2769	return 0;
2770
2771error:
2772	return ret;
2773}
2774
2775static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2776{
2777	if (!qp_info->qp)
2778		return;
2779
2780	ib_destroy_qp(qp_info->qp);
2781	kfree(qp_info->snoop_table);
2782}
2783
2784/*
2785 * Open the port
2786 * Create the QP, PD, MR, and CQ if needed
2787 */
2788static int ib_mad_port_open(struct ib_device *device,
2789			    int port_num)
2790{
2791	int ret, cq_size;
2792	struct ib_mad_port_private *port_priv;
2793	unsigned long flags;
2794	char name[sizeof "ib_mad123"];
2795	int has_smi;
2796
 
 
 
 
 
 
 
2797	/* Create new device info */
2798	port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2799	if (!port_priv) {
2800		printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2801		return -ENOMEM;
2802	}
2803
2804	port_priv->device = device;
2805	port_priv->port_num = port_num;
2806	spin_lock_init(&port_priv->reg_lock);
2807	INIT_LIST_HEAD(&port_priv->agent_list);
2808	init_mad_qp(port_priv, &port_priv->qp_info[0]);
2809	init_mad_qp(port_priv, &port_priv->qp_info[1]);
2810
2811	cq_size = mad_sendq_size + mad_recvq_size;
2812	has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
2813	if (has_smi)
2814		cq_size *= 2;
2815
2816	port_priv->cq = ib_create_cq(port_priv->device,
2817				     ib_mad_thread_completion_handler,
2818				     NULL, port_priv, cq_size, 0);
2819	if (IS_ERR(port_priv->cq)) {
2820		printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2821		ret = PTR_ERR(port_priv->cq);
2822		goto error3;
2823	}
2824
2825	port_priv->pd = ib_alloc_pd(device);
2826	if (IS_ERR(port_priv->pd)) {
2827		printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2828		ret = PTR_ERR(port_priv->pd);
2829		goto error4;
2830	}
2831
2832	port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2833	if (IS_ERR(port_priv->mr)) {
2834		printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2835		ret = PTR_ERR(port_priv->mr);
2836		goto error5;
 
2837	}
2838
2839	if (has_smi) {
2840		ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2841		if (ret)
2842			goto error6;
2843	}
2844	ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2845	if (ret)
2846		goto error7;
2847
2848	snprintf(name, sizeof name, "ib_mad%d", port_num);
2849	port_priv->wq = create_singlethread_workqueue(name);
2850	if (!port_priv->wq) {
2851		ret = -ENOMEM;
2852		goto error8;
2853	}
2854	INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2855
2856	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2857	list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2858	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2859
2860	ret = ib_mad_port_start(port_priv);
2861	if (ret) {
2862		printk(KERN_ERR PFX "Couldn't start port\n");
2863		goto error9;
2864	}
2865
2866	return 0;
2867
2868error9:
2869	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2870	list_del_init(&port_priv->port_list);
2871	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2872
2873	destroy_workqueue(port_priv->wq);
2874error8:
2875	destroy_mad_qp(&port_priv->qp_info[1]);
2876error7:
2877	destroy_mad_qp(&port_priv->qp_info[0]);
2878error6:
2879	ib_dereg_mr(port_priv->mr);
2880error5:
2881	ib_dealloc_pd(port_priv->pd);
2882error4:
2883	ib_destroy_cq(port_priv->cq);
2884	cleanup_recv_queue(&port_priv->qp_info[1]);
2885	cleanup_recv_queue(&port_priv->qp_info[0]);
 
 
2886error3:
2887	kfree(port_priv);
2888
2889	return ret;
2890}
2891
2892/*
2893 * Close the port
2894 * If there are no classes using the port, free the port
2895 * resources (CQ, MR, PD, QP) and remove the port's info structure
2896 */
2897static int ib_mad_port_close(struct ib_device *device, int port_num)
2898{
2899	struct ib_mad_port_private *port_priv;
2900	unsigned long flags;
2901
2902	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2903	port_priv = __ib_get_mad_port(device, port_num);
2904	if (port_priv == NULL) {
2905		spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2906		printk(KERN_ERR PFX "Port %d not found\n", port_num);
2907		return -ENODEV;
2908	}
2909	list_del_init(&port_priv->port_list);
2910	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2911
2912	destroy_workqueue(port_priv->wq);
2913	destroy_mad_qp(&port_priv->qp_info[1]);
2914	destroy_mad_qp(&port_priv->qp_info[0]);
2915	ib_dereg_mr(port_priv->mr);
2916	ib_dealloc_pd(port_priv->pd);
2917	ib_destroy_cq(port_priv->cq);
2918	cleanup_recv_queue(&port_priv->qp_info[1]);
2919	cleanup_recv_queue(&port_priv->qp_info[0]);
2920	/* XXX: Handle deallocation of MAD registration tables */
2921
2922	kfree(port_priv);
2923
2924	return 0;
2925}
2926
2927static void ib_mad_init_device(struct ib_device *device)
2928{
2929	int start, end, i;
2930
2931	if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2932		return;
2933
2934	if (device->node_type == RDMA_NODE_IB_SWITCH) {
2935		start = 0;
2936		end   = 0;
2937	} else {
2938		start = 1;
2939		end   = device->phys_port_cnt;
2940	}
2941
2942	for (i = start; i <= end; i++) {
2943		if (ib_mad_port_open(device, i)) {
2944			printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2945			       device->name, i);
2946			goto error;
2947		}
2948		if (ib_agent_port_open(device, i)) {
2949			printk(KERN_ERR PFX "Couldn't open %s port %d "
2950			       "for agents\n",
2951			       device->name, i);
2952			goto error_agent;
2953		}
2954	}
2955	return;
2956
2957error_agent:
2958	if (ib_mad_port_close(device, i))
2959		printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2960		       device->name, i);
2961
2962error:
2963	i--;
 
 
2964
2965	while (i >= start) {
2966		if (ib_agent_port_close(device, i))
2967			printk(KERN_ERR PFX "Couldn't close %s port %d "
2968			       "for agents\n",
2969			       device->name, i);
2970		if (ib_mad_port_close(device, i))
2971			printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2972			       device->name, i);
2973		i--;
2974	}
2975}
2976
2977static void ib_mad_remove_device(struct ib_device *device)
2978{
2979	int i, num_ports, cur_port;
2980
2981	if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2982		return;
 
2983
2984	if (device->node_type == RDMA_NODE_IB_SWITCH) {
2985		num_ports = 1;
2986		cur_port = 0;
2987	} else {
2988		num_ports = device->phys_port_cnt;
2989		cur_port = 1;
2990	}
2991	for (i = 0; i < num_ports; i++, cur_port++) {
2992		if (ib_agent_port_close(device, cur_port))
2993			printk(KERN_ERR PFX "Couldn't close %s port %d "
2994			       "for agents\n",
2995			       device->name, cur_port);
2996		if (ib_mad_port_close(device, cur_port))
2997			printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2998			       device->name, cur_port);
2999	}
3000}
3001
3002static struct ib_client mad_client = {
3003	.name   = "mad",
3004	.add = ib_mad_init_device,
3005	.remove = ib_mad_remove_device
3006};
3007
3008static int __init ib_mad_init_module(void)
3009{
3010	int ret;
3011
3012	mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3013	mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3014
3015	mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3016	mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3017
3018	ib_mad_cache = kmem_cache_create("ib_mad",
3019					 sizeof(struct ib_mad_private),
3020					 0,
3021					 SLAB_HWCACHE_ALIGN,
3022					 NULL);
3023	if (!ib_mad_cache) {
3024		printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
3025		ret = -ENOMEM;
3026		goto error1;
3027	}
3028
3029	INIT_LIST_HEAD(&ib_mad_port_list);
3030
3031	if (ib_register_client(&mad_client)) {
3032		printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
3033		ret = -EINVAL;
3034		goto error2;
3035	}
3036
3037	return 0;
3038
3039error2:
3040	kmem_cache_destroy(ib_mad_cache);
3041error1:
3042	return ret;
3043}
3044
3045static void __exit ib_mad_cleanup_module(void)
3046{
3047	ib_unregister_client(&mad_client);
3048	kmem_cache_destroy(ib_mad_cache);
3049}
3050
3051module_init(ib_mad_init_module);
3052module_exit(ib_mad_cleanup_module);
v5.4
   1/*
   2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
   3 * Copyright (c) 2005 Intel Corporation.  All rights reserved.
   4 * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
   5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
   6 * Copyright (c) 2014,2018 Intel Corporation.  All rights reserved.
   7 *
   8 * This software is available to you under a choice of one of two
   9 * licenses.  You may choose to be licensed under the terms of the GNU
  10 * General Public License (GPL) Version 2, available from the file
  11 * COPYING in the main directory of this source tree, or the
  12 * OpenIB.org BSD license below:
  13 *
  14 *     Redistribution and use in source and binary forms, with or
  15 *     without modification, are permitted provided that the following
  16 *     conditions are met:
  17 *
  18 *      - Redistributions of source code must retain the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer.
  21 *
  22 *      - Redistributions in binary form must reproduce the above
  23 *        copyright notice, this list of conditions and the following
  24 *        disclaimer in the documentation and/or other materials
  25 *        provided with the distribution.
  26 *
  27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34 * SOFTWARE.
  35 *
  36 */
  37
  38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  39
  40#include <linux/dma-mapping.h>
  41#include <linux/slab.h>
  42#include <linux/module.h>
  43#include <linux/security.h>
  44#include <linux/xarray.h>
  45#include <rdma/ib_cache.h>
  46
  47#include "mad_priv.h"
  48#include "core_priv.h"
  49#include "mad_rmpp.h"
  50#include "smi.h"
  51#include "opa_smi.h"
  52#include "agent.h"
  53
  54#define CREATE_TRACE_POINTS
  55#include <trace/events/ib_mad.h>
  56
  57#ifdef CONFIG_TRACEPOINTS
  58static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
  59			  struct ib_mad_qp_info *qp_info,
  60			  struct trace_event_raw_ib_mad_send_template *entry)
  61{
  62	u16 pkey;
  63	struct ib_device *dev = qp_info->port_priv->device;
  64	u8 pnum = qp_info->port_priv->port_num;
  65	struct ib_ud_wr *wr = &mad_send_wr->send_wr;
  66	struct rdma_ah_attr attr = {};
  67
  68	rdma_query_ah(wr->ah, &attr);
  69
  70	/* These are common */
  71	entry->sl = attr.sl;
  72	ib_query_pkey(dev, pnum, wr->pkey_index, &pkey);
  73	entry->pkey = pkey;
  74	entry->rqpn = wr->remote_qpn;
  75	entry->rqkey = wr->remote_qkey;
  76	entry->dlid = rdma_ah_get_dlid(&attr);
  77}
  78#endif
  79
  80static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
  81static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
  82
  83module_param_named(send_queue_size, mad_sendq_size, int, 0444);
  84MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
  85module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
  86MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
  87
  88/* Client ID 0 is used for snoop-only clients */
  89static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
  90static u32 ib_mad_client_next;
  91static struct list_head ib_mad_port_list;
 
  92
  93/* Port list lock */
  94static DEFINE_SPINLOCK(ib_mad_port_list_lock);
  95
  96/* Forward declarations */
  97static int method_in_use(struct ib_mad_mgmt_method_table **method,
  98			 struct ib_mad_reg_req *mad_reg_req);
  99static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
 100static struct ib_mad_agent_private *find_mad_agent(
 101					struct ib_mad_port_private *port_priv,
 102					const struct ib_mad_hdr *mad);
 103static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
 104				    struct ib_mad_private *mad);
 105static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
 106static void timeout_sends(struct work_struct *work);
 107static void local_completions(struct work_struct *work);
 108static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
 109			      struct ib_mad_agent_private *agent_priv,
 110			      u8 mgmt_class);
 111static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
 112			   struct ib_mad_agent_private *agent_priv);
 113static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
 114			      struct ib_wc *wc);
 115static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
 116
 117/*
 118 * Returns a ib_mad_port_private structure or NULL for a device/port
 119 * Assumes ib_mad_port_list_lock is being held
 120 */
 121static inline struct ib_mad_port_private *
 122__ib_get_mad_port(struct ib_device *device, int port_num)
 123{
 124	struct ib_mad_port_private *entry;
 125
 126	list_for_each_entry(entry, &ib_mad_port_list, port_list) {
 127		if (entry->device == device && entry->port_num == port_num)
 128			return entry;
 129	}
 130	return NULL;
 131}
 132
 133/*
 134 * Wrapper function to return a ib_mad_port_private structure or NULL
 135 * for a device/port
 136 */
 137static inline struct ib_mad_port_private *
 138ib_get_mad_port(struct ib_device *device, int port_num)
 139{
 140	struct ib_mad_port_private *entry;
 141	unsigned long flags;
 142
 143	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 144	entry = __ib_get_mad_port(device, port_num);
 145	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
 146
 147	return entry;
 148}
 149
 150static inline u8 convert_mgmt_class(u8 mgmt_class)
 151{
 152	/* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
 153	return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
 154		0 : mgmt_class;
 155}
 156
 157static int get_spl_qp_index(enum ib_qp_type qp_type)
 158{
 159	switch (qp_type)
 160	{
 161	case IB_QPT_SMI:
 162		return 0;
 163	case IB_QPT_GSI:
 164		return 1;
 165	default:
 166		return -1;
 167	}
 168}
 169
 170static int vendor_class_index(u8 mgmt_class)
 171{
 172	return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
 173}
 174
 175static int is_vendor_class(u8 mgmt_class)
 176{
 177	if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
 178	    (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
 179		return 0;
 180	return 1;
 181}
 182
 183static int is_vendor_oui(char *oui)
 184{
 185	if (oui[0] || oui[1] || oui[2])
 186		return 1;
 187	return 0;
 188}
 189
 190static int is_vendor_method_in_use(
 191		struct ib_mad_mgmt_vendor_class *vendor_class,
 192		struct ib_mad_reg_req *mad_reg_req)
 193{
 194	struct ib_mad_mgmt_method_table *method;
 195	int i;
 196
 197	for (i = 0; i < MAX_MGMT_OUI; i++) {
 198		if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
 199			method = vendor_class->method_table[i];
 200			if (method) {
 201				if (method_in_use(&method, mad_reg_req))
 202					return 1;
 203				else
 204					break;
 205			}
 206		}
 207	}
 208	return 0;
 209}
 210
 211int ib_response_mad(const struct ib_mad_hdr *hdr)
 212{
 213	return ((hdr->method & IB_MGMT_METHOD_RESP) ||
 214		(hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
 215		((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
 216		 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
 217}
 218EXPORT_SYMBOL(ib_response_mad);
 219
 220/*
 221 * ib_register_mad_agent - Register to send/receive MADs
 222 *
 223 * Context: Process context.
 224 */
 225struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
 226					   u8 port_num,
 227					   enum ib_qp_type qp_type,
 228					   struct ib_mad_reg_req *mad_reg_req,
 229					   u8 rmpp_version,
 230					   ib_mad_send_handler send_handler,
 231					   ib_mad_recv_handler recv_handler,
 232					   void *context,
 233					   u32 registration_flags)
 234{
 235	struct ib_mad_port_private *port_priv;
 236	struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
 237	struct ib_mad_agent_private *mad_agent_priv;
 238	struct ib_mad_reg_req *reg_req = NULL;
 239	struct ib_mad_mgmt_class_table *class;
 240	struct ib_mad_mgmt_vendor_class_table *vendor;
 241	struct ib_mad_mgmt_vendor_class *vendor_class;
 242	struct ib_mad_mgmt_method_table *method;
 243	int ret2, qpn;
 
 244	u8 mgmt_class, vclass;
 245
 246	if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) ||
 247	    (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num)))
 248		return ERR_PTR(-EPROTONOSUPPORT);
 249
 250	/* Validate parameters */
 251	qpn = get_spl_qp_index(qp_type);
 252	if (qpn == -1) {
 253		dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n",
 254				    __func__, qp_type);
 255		goto error1;
 256	}
 257
 258	if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
 259		dev_dbg_ratelimited(&device->dev,
 260				    "%s: invalid RMPP Version %u\n",
 261				    __func__, rmpp_version);
 262		goto error1;
 263	}
 264
 265	/* Validate MAD registration request if supplied */
 266	if (mad_reg_req) {
 267		if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
 268			dev_dbg_ratelimited(&device->dev,
 269					    "%s: invalid Class Version %u\n",
 270					    __func__,
 271					    mad_reg_req->mgmt_class_version);
 272			goto error1;
 273		}
 274		if (!recv_handler) {
 275			dev_dbg_ratelimited(&device->dev,
 276					    "%s: no recv_handler\n", __func__);
 277			goto error1;
 278		}
 279		if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
 280			/*
 281			 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
 282			 * one in this range currently allowed
 283			 */
 284			if (mad_reg_req->mgmt_class !=
 285			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
 286				dev_dbg_ratelimited(&device->dev,
 287					"%s: Invalid Mgmt Class 0x%x\n",
 288					__func__, mad_reg_req->mgmt_class);
 289				goto error1;
 290			}
 291		} else if (mad_reg_req->mgmt_class == 0) {
 292			/*
 293			 * Class 0 is reserved in IBA and is used for
 294			 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
 295			 */
 296			dev_dbg_ratelimited(&device->dev,
 297					    "%s: Invalid Mgmt Class 0\n",
 298					    __func__);
 299			goto error1;
 300		} else if (is_vendor_class(mad_reg_req->mgmt_class)) {
 301			/*
 302			 * If class is in "new" vendor range,
 303			 * ensure supplied OUI is not zero
 304			 */
 305			if (!is_vendor_oui(mad_reg_req->oui)) {
 306				dev_dbg_ratelimited(&device->dev,
 307					"%s: No OUI specified for class 0x%x\n",
 308					__func__,
 309					mad_reg_req->mgmt_class);
 310				goto error1;
 311			}
 312		}
 313		/* Make sure class supplied is consistent with RMPP */
 314		if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
 315			if (rmpp_version) {
 316				dev_dbg_ratelimited(&device->dev,
 317					"%s: RMPP version for non-RMPP class 0x%x\n",
 318					__func__, mad_reg_req->mgmt_class);
 319				goto error1;
 320			}
 321		}
 322
 323		/* Make sure class supplied is consistent with QP type */
 324		if (qp_type == IB_QPT_SMI) {
 325			if ((mad_reg_req->mgmt_class !=
 326					IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
 327			    (mad_reg_req->mgmt_class !=
 328					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
 329				dev_dbg_ratelimited(&device->dev,
 330					"%s: Invalid SM QP type: class 0x%x\n",
 331					__func__, mad_reg_req->mgmt_class);
 332				goto error1;
 333			}
 334		} else {
 335			if ((mad_reg_req->mgmt_class ==
 336					IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
 337			    (mad_reg_req->mgmt_class ==
 338					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
 339				dev_dbg_ratelimited(&device->dev,
 340					"%s: Invalid GS QP type: class 0x%x\n",
 341					__func__, mad_reg_req->mgmt_class);
 342				goto error1;
 343			}
 344		}
 345	} else {
 346		/* No registration request supplied */
 347		if (!send_handler)
 348			goto error1;
 349		if (registration_flags & IB_MAD_USER_RMPP)
 350			goto error1;
 351	}
 352
 353	/* Validate device and port */
 354	port_priv = ib_get_mad_port(device, port_num);
 355	if (!port_priv) {
 356		dev_dbg_ratelimited(&device->dev, "%s: Invalid port %d\n",
 357				    __func__, port_num);
 358		ret = ERR_PTR(-ENODEV);
 359		goto error1;
 360	}
 361
 362	/* Verify the QP requested is supported. For example, Ethernet devices
 363	 * will not have QP0.
 364	 */
 365	if (!port_priv->qp_info[qpn].qp) {
 366		dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
 367				    __func__, qpn);
 368		ret = ERR_PTR(-EPROTONOSUPPORT);
 369		goto error1;
 370	}
 371
 372	/* Allocate structures */
 373	mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
 374	if (!mad_agent_priv) {
 375		ret = ERR_PTR(-ENOMEM);
 376		goto error1;
 377	}
 378
 
 
 
 
 
 
 
 379	if (mad_reg_req) {
 380		reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
 381		if (!reg_req) {
 382			ret = ERR_PTR(-ENOMEM);
 383			goto error3;
 384		}
 385	}
 386
 387	/* Now, fill in the various structures */
 388	mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
 389	mad_agent_priv->reg_req = reg_req;
 390	mad_agent_priv->agent.rmpp_version = rmpp_version;
 391	mad_agent_priv->agent.device = device;
 392	mad_agent_priv->agent.recv_handler = recv_handler;
 393	mad_agent_priv->agent.send_handler = send_handler;
 394	mad_agent_priv->agent.context = context;
 395	mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
 396	mad_agent_priv->agent.port_num = port_num;
 397	mad_agent_priv->agent.flags = registration_flags;
 398	spin_lock_init(&mad_agent_priv->lock);
 399	INIT_LIST_HEAD(&mad_agent_priv->send_list);
 400	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
 401	INIT_LIST_HEAD(&mad_agent_priv->done_list);
 402	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
 403	INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
 404	INIT_LIST_HEAD(&mad_agent_priv->local_list);
 405	INIT_WORK(&mad_agent_priv->local_work, local_completions);
 406	atomic_set(&mad_agent_priv->refcount, 1);
 407	init_completion(&mad_agent_priv->comp);
 408
 409	ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
 410	if (ret2) {
 411		ret = ERR_PTR(ret2);
 412		goto error4;
 413	}
 414
 415	/*
 416	 * The mlx4 driver uses the top byte to distinguish which virtual
 417	 * function generated the MAD, so we must avoid using it.
 418	 */
 419	ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid,
 420			mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1),
 421			&ib_mad_client_next, GFP_KERNEL);
 422	if (ret2 < 0) {
 423		ret = ERR_PTR(ret2);
 424		goto error5;
 425	}
 426
 427	/*
 428	 * Make sure MAD registration (if supplied)
 429	 * is non overlapping with any existing ones
 430	 */
 431	spin_lock_irq(&port_priv->reg_lock);
 432	if (mad_reg_req) {
 433		mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
 434		if (!is_vendor_class(mgmt_class)) {
 435			class = port_priv->version[mad_reg_req->
 436						   mgmt_class_version].class;
 437			if (class) {
 438				method = class->method_table[mgmt_class];
 439				if (method) {
 440					if (method_in_use(&method,
 441							   mad_reg_req))
 442						goto error6;
 443				}
 444			}
 445			ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
 446						  mgmt_class);
 447		} else {
 448			/* "New" vendor class range */
 449			vendor = port_priv->version[mad_reg_req->
 450						    mgmt_class_version].vendor;
 451			if (vendor) {
 452				vclass = vendor_class_index(mgmt_class);
 453				vendor_class = vendor->vendor_class[vclass];
 454				if (vendor_class) {
 455					if (is_vendor_method_in_use(
 456							vendor_class,
 457							mad_reg_req))
 458						goto error6;
 459				}
 460			}
 461			ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
 462		}
 463		if (ret2) {
 464			ret = ERR_PTR(ret2);
 465			goto error6;
 466		}
 467	}
 468	spin_unlock_irq(&port_priv->reg_lock);
 469
 470	trace_ib_mad_create_agent(mad_agent_priv);
 
 
 
 471	return &mad_agent_priv->agent;
 472error6:
 473	spin_unlock_irq(&port_priv->reg_lock);
 474	xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
 475error5:
 476	ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
 477error4:
 
 478	kfree(reg_req);
 479error3:
 
 
 480	kfree(mad_agent_priv);
 481error1:
 482	return ret;
 483}
 484EXPORT_SYMBOL(ib_register_mad_agent);
 485
 486static inline int is_snooping_sends(int mad_snoop_flags)
 487{
 488	return (mad_snoop_flags &
 489		(/*IB_MAD_SNOOP_POSTED_SENDS |
 490		 IB_MAD_SNOOP_RMPP_SENDS |*/
 491		 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
 492		 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
 493}
 494
 495static inline int is_snooping_recvs(int mad_snoop_flags)
 496{
 497	return (mad_snoop_flags &
 498		(IB_MAD_SNOOP_RECVS /*|
 499		 IB_MAD_SNOOP_RMPP_RECVS*/));
 500}
 501
 502static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
 503				struct ib_mad_snoop_private *mad_snoop_priv)
 504{
 505	struct ib_mad_snoop_private **new_snoop_table;
 506	unsigned long flags;
 507	int i;
 508
 509	spin_lock_irqsave(&qp_info->snoop_lock, flags);
 510	/* Check for empty slot in array. */
 511	for (i = 0; i < qp_info->snoop_table_size; i++)
 512		if (!qp_info->snoop_table[i])
 513			break;
 514
 515	if (i == qp_info->snoop_table_size) {
 516		/* Grow table. */
 517		new_snoop_table = krealloc(qp_info->snoop_table,
 518					   sizeof mad_snoop_priv *
 519					   (qp_info->snoop_table_size + 1),
 520					   GFP_ATOMIC);
 521		if (!new_snoop_table) {
 522			i = -ENOMEM;
 523			goto out;
 524		}
 525
 526		qp_info->snoop_table = new_snoop_table;
 527		qp_info->snoop_table_size++;
 528	}
 529	qp_info->snoop_table[i] = mad_snoop_priv;
 530	atomic_inc(&qp_info->snoop_count);
 531out:
 532	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 533	return i;
 534}
 535
 536struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
 537					   u8 port_num,
 538					   enum ib_qp_type qp_type,
 539					   int mad_snoop_flags,
 540					   ib_mad_snoop_handler snoop_handler,
 541					   ib_mad_recv_handler recv_handler,
 542					   void *context)
 543{
 544	struct ib_mad_port_private *port_priv;
 545	struct ib_mad_agent *ret;
 546	struct ib_mad_snoop_private *mad_snoop_priv;
 547	int qpn;
 548	int err;
 549
 550	/* Validate parameters */
 551	if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
 552	    (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
 553		ret = ERR_PTR(-EINVAL);
 554		goto error1;
 555	}
 556	qpn = get_spl_qp_index(qp_type);
 557	if (qpn == -1) {
 558		ret = ERR_PTR(-EINVAL);
 559		goto error1;
 560	}
 561	port_priv = ib_get_mad_port(device, port_num);
 562	if (!port_priv) {
 563		ret = ERR_PTR(-ENODEV);
 564		goto error1;
 565	}
 566	/* Allocate structures */
 567	mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
 568	if (!mad_snoop_priv) {
 569		ret = ERR_PTR(-ENOMEM);
 570		goto error1;
 571	}
 572
 573	/* Now, fill in the various structures */
 574	mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
 575	mad_snoop_priv->agent.device = device;
 576	mad_snoop_priv->agent.recv_handler = recv_handler;
 577	mad_snoop_priv->agent.snoop_handler = snoop_handler;
 578	mad_snoop_priv->agent.context = context;
 579	mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
 580	mad_snoop_priv->agent.port_num = port_num;
 581	mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
 582	init_completion(&mad_snoop_priv->comp);
 583
 584	err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type);
 585	if (err) {
 586		ret = ERR_PTR(err);
 587		goto error2;
 588	}
 589
 590	mad_snoop_priv->snoop_index = register_snoop_agent(
 591						&port_priv->qp_info[qpn],
 592						mad_snoop_priv);
 593	if (mad_snoop_priv->snoop_index < 0) {
 594		ret = ERR_PTR(mad_snoop_priv->snoop_index);
 595		goto error3;
 596	}
 597
 598	atomic_set(&mad_snoop_priv->refcount, 1);
 599	return &mad_snoop_priv->agent;
 600error3:
 601	ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
 602error2:
 603	kfree(mad_snoop_priv);
 604error1:
 605	return ret;
 606}
 607EXPORT_SYMBOL(ib_register_mad_snoop);
 608
 609static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
 610{
 611	if (atomic_dec_and_test(&mad_agent_priv->refcount))
 612		complete(&mad_agent_priv->comp);
 613}
 614
 615static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
 616{
 617	if (atomic_dec_and_test(&mad_snoop_priv->refcount))
 618		complete(&mad_snoop_priv->comp);
 619}
 620
 621static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
 622{
 623	struct ib_mad_port_private *port_priv;
 
 624
 625	/* Note that we could still be handling received MADs */
 626	trace_ib_mad_unregister_agent(mad_agent_priv);
 627
 628	/*
 629	 * Canceling all sends results in dropping received response
 630	 * MADs, preventing us from queuing additional work
 631	 */
 632	cancel_mads(mad_agent_priv);
 633	port_priv = mad_agent_priv->qp_info->port_priv;
 634	cancel_delayed_work(&mad_agent_priv->timed_work);
 635
 636	spin_lock_irq(&port_priv->reg_lock);
 637	remove_mad_reg_req(mad_agent_priv);
 638	spin_unlock_irq(&port_priv->reg_lock);
 639	xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
 640
 641	flush_workqueue(port_priv->wq);
 642	ib_cancel_rmpp_recvs(mad_agent_priv);
 643
 644	deref_mad_agent(mad_agent_priv);
 645	wait_for_completion(&mad_agent_priv->comp);
 646
 647	ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
 648
 649	kfree(mad_agent_priv->reg_req);
 650	kfree_rcu(mad_agent_priv, rcu);
 
 651}
 652
 653static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
 654{
 655	struct ib_mad_qp_info *qp_info;
 656	unsigned long flags;
 657
 658	qp_info = mad_snoop_priv->qp_info;
 659	spin_lock_irqsave(&qp_info->snoop_lock, flags);
 660	qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
 661	atomic_dec(&qp_info->snoop_count);
 662	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 663
 664	deref_snoop_agent(mad_snoop_priv);
 665	wait_for_completion(&mad_snoop_priv->comp);
 666
 667	ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
 668
 669	kfree(mad_snoop_priv);
 670}
 671
 672/*
 673 * ib_unregister_mad_agent - Unregisters a client from using MAD services
 674 *
 675 * Context: Process context.
 676 */
 677void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
 678{
 679	struct ib_mad_agent_private *mad_agent_priv;
 680	struct ib_mad_snoop_private *mad_snoop_priv;
 681
 682	/* If the TID is zero, the agent can only snoop. */
 683	if (mad_agent->hi_tid) {
 684		mad_agent_priv = container_of(mad_agent,
 685					      struct ib_mad_agent_private,
 686					      agent);
 687		unregister_mad_agent(mad_agent_priv);
 688	} else {
 689		mad_snoop_priv = container_of(mad_agent,
 690					      struct ib_mad_snoop_private,
 691					      agent);
 692		unregister_mad_snoop(mad_snoop_priv);
 693	}
 
 694}
 695EXPORT_SYMBOL(ib_unregister_mad_agent);
 696
 697static void dequeue_mad(struct ib_mad_list_head *mad_list)
 698{
 699	struct ib_mad_queue *mad_queue;
 700	unsigned long flags;
 701
 
 702	mad_queue = mad_list->mad_queue;
 703	spin_lock_irqsave(&mad_queue->lock, flags);
 704	list_del(&mad_list->list);
 705	mad_queue->count--;
 706	spin_unlock_irqrestore(&mad_queue->lock, flags);
 707}
 708
 709static void snoop_send(struct ib_mad_qp_info *qp_info,
 710		       struct ib_mad_send_buf *send_buf,
 711		       struct ib_mad_send_wc *mad_send_wc,
 712		       int mad_snoop_flags)
 713{
 714	struct ib_mad_snoop_private *mad_snoop_priv;
 715	unsigned long flags;
 716	int i;
 717
 718	spin_lock_irqsave(&qp_info->snoop_lock, flags);
 719	for (i = 0; i < qp_info->snoop_table_size; i++) {
 720		mad_snoop_priv = qp_info->snoop_table[i];
 721		if (!mad_snoop_priv ||
 722		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
 723			continue;
 724
 725		atomic_inc(&mad_snoop_priv->refcount);
 726		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 727		mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
 728						    send_buf, mad_send_wc);
 729		deref_snoop_agent(mad_snoop_priv);
 730		spin_lock_irqsave(&qp_info->snoop_lock, flags);
 731	}
 732	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 733}
 734
 735static void snoop_recv(struct ib_mad_qp_info *qp_info,
 736		       struct ib_mad_recv_wc *mad_recv_wc,
 737		       int mad_snoop_flags)
 738{
 739	struct ib_mad_snoop_private *mad_snoop_priv;
 740	unsigned long flags;
 741	int i;
 742
 743	spin_lock_irqsave(&qp_info->snoop_lock, flags);
 744	for (i = 0; i < qp_info->snoop_table_size; i++) {
 745		mad_snoop_priv = qp_info->snoop_table[i];
 746		if (!mad_snoop_priv ||
 747		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
 748			continue;
 749
 750		atomic_inc(&mad_snoop_priv->refcount);
 751		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 752		mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
 753						   mad_recv_wc);
 754		deref_snoop_agent(mad_snoop_priv);
 755		spin_lock_irqsave(&qp_info->snoop_lock, flags);
 756	}
 757	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 758}
 759
 760static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
 761		u16 pkey_index, u8 port_num, struct ib_wc *wc)
 
 762{
 763	memset(wc, 0, sizeof *wc);
 764	wc->wr_cqe = cqe;
 765	wc->status = IB_WC_SUCCESS;
 766	wc->opcode = IB_WC_RECV;
 767	wc->pkey_index = pkey_index;
 768	wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
 769	wc->src_qp = IB_QP0;
 770	wc->qp = qp;
 771	wc->slid = slid;
 772	wc->sl = 0;
 773	wc->dlid_path_bits = 0;
 774	wc->port_num = port_num;
 775}
 776
 777static size_t mad_priv_size(const struct ib_mad_private *mp)
 778{
 779	return sizeof(struct ib_mad_private) + mp->mad_size;
 780}
 781
 782static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
 783{
 784	size_t size = sizeof(struct ib_mad_private) + mad_size;
 785	struct ib_mad_private *ret = kzalloc(size, flags);
 786
 787	if (ret)
 788		ret->mad_size = mad_size;
 789
 790	return ret;
 791}
 792
 793static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
 794{
 795	return rdma_max_mad_size(port_priv->device, port_priv->port_num);
 796}
 797
 798static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
 799{
 800	return sizeof(struct ib_grh) + mp->mad_size;
 801}
 802
 803/*
 804 * Return 0 if SMP is to be sent
 805 * Return 1 if SMP was consumed locally (whether or not solicited)
 806 * Return < 0 if error
 807 */
 808static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
 809				  struct ib_mad_send_wr_private *mad_send_wr)
 810{
 811	int ret = 0;
 812	struct ib_smp *smp = mad_send_wr->send_buf.mad;
 813	struct opa_smp *opa_smp = (struct opa_smp *)smp;
 814	unsigned long flags;
 815	struct ib_mad_local_private *local;
 816	struct ib_mad_private *mad_priv;
 817	struct ib_mad_port_private *port_priv;
 818	struct ib_mad_agent_private *recv_mad_agent = NULL;
 819	struct ib_device *device = mad_agent_priv->agent.device;
 820	u8 port_num;
 821	struct ib_wc mad_wc;
 822	struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
 823	size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
 824	u16 out_mad_pkey_index = 0;
 825	u16 drslid;
 826	bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
 827				    mad_agent_priv->qp_info->port_priv->port_num);
 828
 829	if (rdma_cap_ib_switch(device) &&
 830	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
 831		port_num = send_wr->port_num;
 832	else
 833		port_num = mad_agent_priv->agent.port_num;
 834
 835	/*
 836	 * Directed route handling starts if the initial LID routed part of
 837	 * a request or the ending LID routed part of a response is empty.
 838	 * If we are at the start of the LID routed part, don't update the
 839	 * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
 840	 */
 841	if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
 842		u32 opa_drslid;
 
 
 
 
 
 
 843
 844		trace_ib_mad_handle_out_opa_smi(opa_smp);
 845
 846		if ((opa_get_smp_direction(opa_smp)
 847		     ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
 848		     OPA_LID_PERMISSIVE &&
 849		     opa_smi_handle_dr_smp_send(opa_smp,
 850						rdma_cap_ib_switch(device),
 851						port_num) == IB_SMI_DISCARD) {
 852			ret = -EINVAL;
 853			dev_err(&device->dev, "OPA Invalid directed route\n");
 854			goto out;
 855		}
 856		opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
 857		if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
 858		    opa_drslid & 0xffff0000) {
 859			ret = -EINVAL;
 860			dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
 861			       opa_drslid);
 862			goto out;
 863		}
 864		drslid = (u16)(opa_drslid & 0x0000ffff);
 865
 866		/* Check to post send on QP or process locally */
 867		if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
 868		    opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
 869			goto out;
 870	} else {
 871		trace_ib_mad_handle_out_ib_smi(smp);
 872
 873		if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
 874		     IB_LID_PERMISSIVE &&
 875		     smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
 876		     IB_SMI_DISCARD) {
 877			ret = -EINVAL;
 878			dev_err(&device->dev, "Invalid directed route\n");
 879			goto out;
 880		}
 881		drslid = be16_to_cpu(smp->dr_slid);
 882
 883		/* Check to post send on QP or process locally */
 884		if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
 885		    smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
 886			goto out;
 887	}
 888
 889	local = kmalloc(sizeof *local, GFP_ATOMIC);
 890	if (!local) {
 891		ret = -ENOMEM;
 
 892		goto out;
 893	}
 894	local->mad_priv = NULL;
 895	local->recv_mad_agent = NULL;
 896	mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
 897	if (!mad_priv) {
 898		ret = -ENOMEM;
 
 899		kfree(local);
 900		goto out;
 901	}
 902
 903	build_smp_wc(mad_agent_priv->agent.qp,
 904		     send_wr->wr.wr_cqe, drslid,
 905		     send_wr->pkey_index,
 906		     send_wr->port_num, &mad_wc);
 907
 908	if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
 909		mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
 910					+ mad_send_wr->send_buf.data_len
 911					+ sizeof(struct ib_grh);
 912	}
 913
 914	/* No GRH for DR SMP */
 915	ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
 916				      (const struct ib_mad_hdr *)smp, mad_size,
 917				      (struct ib_mad_hdr *)mad_priv->mad,
 918				      &mad_size, &out_mad_pkey_index);
 919	switch (ret)
 920	{
 921	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
 922		if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
 923		    mad_agent_priv->agent.recv_handler) {
 924			local->mad_priv = mad_priv;
 925			local->recv_mad_agent = mad_agent_priv;
 926			/*
 927			 * Reference MAD agent until receive
 928			 * side of local completion handled
 929			 */
 930			atomic_inc(&mad_agent_priv->refcount);
 931		} else
 932			kfree(mad_priv);
 933		break;
 934	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
 935		kfree(mad_priv);
 936		break;
 937	case IB_MAD_RESULT_SUCCESS:
 938		/* Treat like an incoming receive MAD */
 939		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
 940					    mad_agent_priv->agent.port_num);
 941		if (port_priv) {
 942			memcpy(mad_priv->mad, smp, mad_priv->mad_size);
 943			recv_mad_agent = find_mad_agent(port_priv,
 944						        (const struct ib_mad_hdr *)mad_priv->mad);
 945		}
 946		if (!port_priv || !recv_mad_agent) {
 947			/*
 948			 * No receiving agent so drop packet and
 949			 * generate send completion.
 950			 */
 951			kfree(mad_priv);
 952			break;
 953		}
 954		local->mad_priv = mad_priv;
 955		local->recv_mad_agent = recv_mad_agent;
 956		break;
 957	default:
 958		kfree(mad_priv);
 959		kfree(local);
 960		ret = -EINVAL;
 961		goto out;
 962	}
 963
 964	local->mad_send_wr = mad_send_wr;
 965	if (opa) {
 966		local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
 967		local->return_wc_byte_len = mad_size;
 968	}
 969	/* Reference MAD agent until send side of local completion handled */
 970	atomic_inc(&mad_agent_priv->refcount);
 971	/* Queue local completion to local list */
 972	spin_lock_irqsave(&mad_agent_priv->lock, flags);
 973	list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
 974	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
 975	queue_work(mad_agent_priv->qp_info->port_priv->wq,
 976		   &mad_agent_priv->local_work);
 977	ret = 1;
 978out:
 979	return ret;
 980}
 981
 982static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
 983{
 984	int seg_size, pad;
 985
 986	seg_size = mad_size - hdr_len;
 987	if (data_len && seg_size) {
 988		pad = seg_size - data_len % seg_size;
 989		return pad == seg_size ? 0 : pad;
 990	} else
 991		return seg_size;
 992}
 993
 994static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
 995{
 996	struct ib_rmpp_segment *s, *t;
 997
 998	list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
 999		list_del(&s->list);
1000		kfree(s);
1001	}
1002}
1003
1004static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
1005				size_t mad_size, gfp_t gfp_mask)
1006{
1007	struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
1008	struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
1009	struct ib_rmpp_segment *seg = NULL;
1010	int left, seg_size, pad;
1011
1012	send_buf->seg_size = mad_size - send_buf->hdr_len;
1013	send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
1014	seg_size = send_buf->seg_size;
1015	pad = send_wr->pad;
1016
1017	/* Allocate data segments. */
1018	for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
1019		seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
1020		if (!seg) {
 
 
 
1021			free_send_rmpp_list(send_wr);
1022			return -ENOMEM;
1023		}
1024		seg->num = ++send_buf->seg_count;
1025		list_add_tail(&seg->list, &send_wr->rmpp_list);
1026	}
1027
1028	/* Zero any padding */
1029	if (pad)
1030		memset(seg->data + seg_size - pad, 0, pad);
1031
1032	rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
1033					  agent.rmpp_version;
1034	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
1035	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
1036
1037	send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
1038					struct ib_rmpp_segment, list);
1039	send_wr->last_ack_seg = send_wr->cur_seg;
1040	return 0;
1041}
1042
1043int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
1044{
1045	return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
1046}
1047EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
1048
1049struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
1050					    u32 remote_qpn, u16 pkey_index,
1051					    int rmpp_active,
1052					    int hdr_len, int data_len,
1053					    gfp_t gfp_mask,
1054					    u8 base_version)
1055{
1056	struct ib_mad_agent_private *mad_agent_priv;
1057	struct ib_mad_send_wr_private *mad_send_wr;
1058	int pad, message_size, ret, size;
1059	void *buf;
1060	size_t mad_size;
1061	bool opa;
1062
1063	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1064				      agent);
1065
1066	opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1067
1068	if (opa && base_version == OPA_MGMT_BASE_VERSION)
1069		mad_size = sizeof(struct opa_mad);
1070	else
1071		mad_size = sizeof(struct ib_mad);
1072
1073	pad = get_pad_size(hdr_len, data_len, mad_size);
1074	message_size = hdr_len + data_len + pad;
1075
1076	if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1077		if (!rmpp_active && message_size > mad_size)
1078			return ERR_PTR(-EINVAL);
1079	} else
1080		if (rmpp_active || message_size > mad_size)
1081			return ERR_PTR(-EINVAL);
1082
1083	size = rmpp_active ? hdr_len : mad_size;
1084	buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1085	if (!buf)
1086		return ERR_PTR(-ENOMEM);
1087
1088	mad_send_wr = buf + size;
1089	INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1090	mad_send_wr->send_buf.mad = buf;
1091	mad_send_wr->send_buf.hdr_len = hdr_len;
1092	mad_send_wr->send_buf.data_len = data_len;
1093	mad_send_wr->pad = pad;
1094
1095	mad_send_wr->mad_agent_priv = mad_agent_priv;
1096	mad_send_wr->sg_list[0].length = hdr_len;
1097	mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1098
1099	/* OPA MADs don't have to be the full 2048 bytes */
1100	if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1101	    data_len < mad_size - hdr_len)
1102		mad_send_wr->sg_list[1].length = data_len;
1103	else
1104		mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1105
1106	mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1107
1108	mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1109
1110	mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1111	mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1112	mad_send_wr->send_wr.wr.num_sge = 2;
1113	mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1114	mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1115	mad_send_wr->send_wr.remote_qpn = remote_qpn;
1116	mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1117	mad_send_wr->send_wr.pkey_index = pkey_index;
1118
1119	if (rmpp_active) {
1120		ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1121		if (ret) {
1122			kfree(buf);
1123			return ERR_PTR(ret);
1124		}
1125	}
1126
1127	mad_send_wr->send_buf.mad_agent = mad_agent;
1128	atomic_inc(&mad_agent_priv->refcount);
1129	return &mad_send_wr->send_buf;
1130}
1131EXPORT_SYMBOL(ib_create_send_mad);
1132
1133int ib_get_mad_data_offset(u8 mgmt_class)
1134{
1135	if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1136		return IB_MGMT_SA_HDR;
1137	else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1138		 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1139		 (mgmt_class == IB_MGMT_CLASS_BIS))
1140		return IB_MGMT_DEVICE_HDR;
1141	else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1142		 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1143		return IB_MGMT_VENDOR_HDR;
1144	else
1145		return IB_MGMT_MAD_HDR;
1146}
1147EXPORT_SYMBOL(ib_get_mad_data_offset);
1148
1149int ib_is_mad_class_rmpp(u8 mgmt_class)
1150{
1151	if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1152	    (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1153	    (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1154	    (mgmt_class == IB_MGMT_CLASS_BIS) ||
1155	    ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1156	     (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1157		return 1;
1158	return 0;
1159}
1160EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1161
1162void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1163{
1164	struct ib_mad_send_wr_private *mad_send_wr;
1165	struct list_head *list;
1166
1167	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1168				   send_buf);
1169	list = &mad_send_wr->cur_seg->list;
1170
1171	if (mad_send_wr->cur_seg->num < seg_num) {
1172		list_for_each_entry(mad_send_wr->cur_seg, list, list)
1173			if (mad_send_wr->cur_seg->num == seg_num)
1174				break;
1175	} else if (mad_send_wr->cur_seg->num > seg_num) {
1176		list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1177			if (mad_send_wr->cur_seg->num == seg_num)
1178				break;
1179	}
1180	return mad_send_wr->cur_seg->data;
1181}
1182EXPORT_SYMBOL(ib_get_rmpp_segment);
1183
1184static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1185{
1186	if (mad_send_wr->send_buf.seg_count)
1187		return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1188					   mad_send_wr->seg_num);
1189	else
1190		return mad_send_wr->send_buf.mad +
1191		       mad_send_wr->send_buf.hdr_len;
1192}
1193
1194void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1195{
1196	struct ib_mad_agent_private *mad_agent_priv;
1197	struct ib_mad_send_wr_private *mad_send_wr;
1198
1199	mad_agent_priv = container_of(send_buf->mad_agent,
1200				      struct ib_mad_agent_private, agent);
1201	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1202				   send_buf);
1203
1204	free_send_rmpp_list(mad_send_wr);
1205	kfree(send_buf->mad);
1206	deref_mad_agent(mad_agent_priv);
1207}
1208EXPORT_SYMBOL(ib_free_send_mad);
1209
1210int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1211{
1212	struct ib_mad_qp_info *qp_info;
1213	struct list_head *list;
 
1214	struct ib_mad_agent *mad_agent;
1215	struct ib_sge *sge;
1216	unsigned long flags;
1217	int ret;
1218
1219	/* Set WR ID to find mad_send_wr upon completion */
1220	qp_info = mad_send_wr->mad_agent_priv->qp_info;
 
1221	mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1222	mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1223	mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1224
1225	mad_agent = mad_send_wr->send_buf.mad_agent;
1226	sge = mad_send_wr->sg_list;
1227	sge[0].addr = ib_dma_map_single(mad_agent->device,
1228					mad_send_wr->send_buf.mad,
1229					sge[0].length,
1230					DMA_TO_DEVICE);
1231	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1232		return -ENOMEM;
1233
1234	mad_send_wr->header_mapping = sge[0].addr;
1235
1236	sge[1].addr = ib_dma_map_single(mad_agent->device,
1237					ib_get_payload(mad_send_wr),
1238					sge[1].length,
1239					DMA_TO_DEVICE);
1240	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1241		ib_dma_unmap_single(mad_agent->device,
1242				    mad_send_wr->header_mapping,
1243				    sge[0].length, DMA_TO_DEVICE);
1244		return -ENOMEM;
1245	}
1246	mad_send_wr->payload_mapping = sge[1].addr;
1247
1248	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1249	if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1250		trace_ib_mad_ib_send_mad(mad_send_wr, qp_info);
1251		ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1252				   NULL);
1253		list = &qp_info->send_queue.list;
1254	} else {
1255		ret = 0;
1256		list = &qp_info->overflow_list;
1257	}
1258
1259	if (!ret) {
1260		qp_info->send_queue.count++;
1261		list_add_tail(&mad_send_wr->mad_list.list, list);
1262	}
1263	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1264	if (ret) {
1265		ib_dma_unmap_single(mad_agent->device,
1266				    mad_send_wr->header_mapping,
1267				    sge[0].length, DMA_TO_DEVICE);
1268		ib_dma_unmap_single(mad_agent->device,
1269				    mad_send_wr->payload_mapping,
1270				    sge[1].length, DMA_TO_DEVICE);
1271	}
1272	return ret;
1273}
1274
1275/*
1276 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1277 *  with the registered client
1278 */
1279int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1280		     struct ib_mad_send_buf **bad_send_buf)
1281{
1282	struct ib_mad_agent_private *mad_agent_priv;
1283	struct ib_mad_send_buf *next_send_buf;
1284	struct ib_mad_send_wr_private *mad_send_wr;
1285	unsigned long flags;
1286	int ret = -EINVAL;
1287
1288	/* Walk list of send WRs and post each on send list */
1289	for (; send_buf; send_buf = next_send_buf) {
 
1290		mad_send_wr = container_of(send_buf,
1291					   struct ib_mad_send_wr_private,
1292					   send_buf);
1293		mad_agent_priv = mad_send_wr->mad_agent_priv;
1294
1295		ret = ib_mad_enforce_security(mad_agent_priv,
1296					      mad_send_wr->send_wr.pkey_index);
1297		if (ret)
1298			goto error;
1299
1300		if (!send_buf->mad_agent->send_handler ||
1301		    (send_buf->timeout_ms &&
1302		     !send_buf->mad_agent->recv_handler)) {
1303			ret = -EINVAL;
1304			goto error;
1305		}
1306
1307		if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1308			if (mad_agent_priv->agent.rmpp_version) {
1309				ret = -EINVAL;
1310				goto error;
1311			}
1312		}
1313
1314		/*
1315		 * Save pointer to next work request to post in case the
1316		 * current one completes, and the user modifies the work
1317		 * request associated with the completion
1318		 */
1319		next_send_buf = send_buf->next;
1320		mad_send_wr->send_wr.ah = send_buf->ah;
1321
1322		if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1323		    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1324			ret = handle_outgoing_dr_smp(mad_agent_priv,
1325						     mad_send_wr);
1326			if (ret < 0)		/* error */
1327				goto error;
1328			else if (ret == 1)	/* locally consumed */
1329				continue;
1330		}
1331
1332		mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1333		/* Timeout will be updated after send completes */
1334		mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1335		mad_send_wr->max_retries = send_buf->retries;
1336		mad_send_wr->retries_left = send_buf->retries;
1337		send_buf->retries = 0;
1338		/* Reference for work request to QP + response */
1339		mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1340		mad_send_wr->status = IB_WC_SUCCESS;
1341
1342		/* Reference MAD agent until send completes */
1343		atomic_inc(&mad_agent_priv->refcount);
1344		spin_lock_irqsave(&mad_agent_priv->lock, flags);
1345		list_add_tail(&mad_send_wr->agent_list,
1346			      &mad_agent_priv->send_list);
1347		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1348
1349		if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1350			ret = ib_send_rmpp_mad(mad_send_wr);
1351			if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1352				ret = ib_send_mad(mad_send_wr);
1353		} else
1354			ret = ib_send_mad(mad_send_wr);
1355		if (ret < 0) {
1356			/* Fail send request */
1357			spin_lock_irqsave(&mad_agent_priv->lock, flags);
1358			list_del(&mad_send_wr->agent_list);
1359			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1360			atomic_dec(&mad_agent_priv->refcount);
1361			goto error;
1362		}
1363	}
1364	return 0;
1365error:
1366	if (bad_send_buf)
1367		*bad_send_buf = send_buf;
1368	return ret;
1369}
1370EXPORT_SYMBOL(ib_post_send_mad);
1371
1372/*
1373 * ib_free_recv_mad - Returns data buffers used to receive
1374 *  a MAD to the access layer
1375 */
1376void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1377{
1378	struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1379	struct ib_mad_private_header *mad_priv_hdr;
1380	struct ib_mad_private *priv;
1381	struct list_head free_list;
1382
1383	INIT_LIST_HEAD(&free_list);
1384	list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1385
1386	list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1387					&free_list, list) {
1388		mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1389					   recv_buf);
1390		mad_priv_hdr = container_of(mad_recv_wc,
1391					    struct ib_mad_private_header,
1392					    recv_wc);
1393		priv = container_of(mad_priv_hdr, struct ib_mad_private,
1394				    header);
1395		kfree(priv);
1396	}
1397}
1398EXPORT_SYMBOL(ib_free_recv_mad);
1399
1400struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1401					u8 rmpp_version,
1402					ib_mad_send_handler send_handler,
1403					ib_mad_recv_handler recv_handler,
1404					void *context)
1405{
1406	return ERR_PTR(-EINVAL);	/* XXX: for now */
1407}
1408EXPORT_SYMBOL(ib_redirect_mad_qp);
1409
1410int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1411		      struct ib_wc *wc)
1412{
1413	dev_err(&mad_agent->device->dev,
1414		"ib_process_mad_wc() not implemented yet\n");
1415	return 0;
1416}
1417EXPORT_SYMBOL(ib_process_mad_wc);
1418
1419static int method_in_use(struct ib_mad_mgmt_method_table **method,
1420			 struct ib_mad_reg_req *mad_reg_req)
1421{
1422	int i;
1423
1424	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1425		if ((*method)->agent[i]) {
1426			pr_err("Method %d already in use\n", i);
1427			return -EINVAL;
1428		}
1429	}
1430	return 0;
1431}
1432
1433static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1434{
1435	/* Allocate management method table */
1436	*method = kzalloc(sizeof **method, GFP_ATOMIC);
1437	return (*method) ? 0 : (-ENOMEM);
 
 
 
 
 
 
1438}
1439
1440/*
1441 * Check to see if there are any methods still in use
1442 */
1443static int check_method_table(struct ib_mad_mgmt_method_table *method)
1444{
1445	int i;
1446
1447	for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1448		if (method->agent[i])
1449			return 1;
1450	return 0;
1451}
1452
1453/*
1454 * Check to see if there are any method tables for this class still in use
1455 */
1456static int check_class_table(struct ib_mad_mgmt_class_table *class)
1457{
1458	int i;
1459
1460	for (i = 0; i < MAX_MGMT_CLASS; i++)
1461		if (class->method_table[i])
1462			return 1;
1463	return 0;
1464}
1465
1466static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1467{
1468	int i;
1469
1470	for (i = 0; i < MAX_MGMT_OUI; i++)
1471		if (vendor_class->method_table[i])
1472			return 1;
1473	return 0;
1474}
1475
1476static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1477			   const char *oui)
1478{
1479	int i;
1480
1481	for (i = 0; i < MAX_MGMT_OUI; i++)
1482		/* Is there matching OUI for this vendor class ? */
1483		if (!memcmp(vendor_class->oui[i], oui, 3))
1484			return i;
1485
1486	return -1;
1487}
1488
1489static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1490{
1491	int i;
1492
1493	for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1494		if (vendor->vendor_class[i])
1495			return 1;
1496
1497	return 0;
1498}
1499
1500static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1501				     struct ib_mad_agent_private *agent)
1502{
1503	int i;
1504
1505	/* Remove any methods for this mad agent */
1506	for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1507		if (method->agent[i] == agent) {
1508			method->agent[i] = NULL;
1509		}
1510	}
1511}
1512
1513static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1514			      struct ib_mad_agent_private *agent_priv,
1515			      u8 mgmt_class)
1516{
1517	struct ib_mad_port_private *port_priv;
1518	struct ib_mad_mgmt_class_table **class;
1519	struct ib_mad_mgmt_method_table **method;
1520	int i, ret;
1521
1522	port_priv = agent_priv->qp_info->port_priv;
1523	class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1524	if (!*class) {
1525		/* Allocate management class table for "new" class version */
1526		*class = kzalloc(sizeof **class, GFP_ATOMIC);
1527		if (!*class) {
 
 
1528			ret = -ENOMEM;
1529			goto error1;
1530		}
1531
1532		/* Allocate method table for this management class */
1533		method = &(*class)->method_table[mgmt_class];
1534		if ((ret = allocate_method_table(method)))
1535			goto error2;
1536	} else {
1537		method = &(*class)->method_table[mgmt_class];
1538		if (!*method) {
1539			/* Allocate method table for this management class */
1540			if ((ret = allocate_method_table(method)))
1541				goto error1;
1542		}
1543	}
1544
1545	/* Now, make sure methods are not already in use */
1546	if (method_in_use(method, mad_reg_req))
1547		goto error3;
1548
1549	/* Finally, add in methods being registered */
1550	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1551		(*method)->agent[i] = agent_priv;
1552
1553	return 0;
1554
1555error3:
1556	/* Remove any methods for this mad agent */
1557	remove_methods_mad_agent(*method, agent_priv);
1558	/* Now, check to see if there are any methods in use */
1559	if (!check_method_table(*method)) {
1560		/* If not, release management method table */
1561		kfree(*method);
1562		*method = NULL;
1563	}
1564	ret = -EINVAL;
1565	goto error1;
1566error2:
1567	kfree(*class);
1568	*class = NULL;
1569error1:
1570	return ret;
1571}
1572
1573static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1574			   struct ib_mad_agent_private *agent_priv)
1575{
1576	struct ib_mad_port_private *port_priv;
1577	struct ib_mad_mgmt_vendor_class_table **vendor_table;
1578	struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1579	struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1580	struct ib_mad_mgmt_method_table **method;
1581	int i, ret = -ENOMEM;
1582	u8 vclass;
1583
1584	/* "New" vendor (with OUI) class */
1585	vclass = vendor_class_index(mad_reg_req->mgmt_class);
1586	port_priv = agent_priv->qp_info->port_priv;
1587	vendor_table = &port_priv->version[
1588				mad_reg_req->mgmt_class_version].vendor;
1589	if (!*vendor_table) {
1590		/* Allocate mgmt vendor class table for "new" class version */
1591		vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1592		if (!vendor)
 
 
1593			goto error1;
 
1594
1595		*vendor_table = vendor;
1596	}
1597	if (!(*vendor_table)->vendor_class[vclass]) {
1598		/* Allocate table for this management vendor class */
1599		vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1600		if (!vendor_class)
 
 
1601			goto error2;
 
1602
1603		(*vendor_table)->vendor_class[vclass] = vendor_class;
1604	}
1605	for (i = 0; i < MAX_MGMT_OUI; i++) {
1606		/* Is there matching OUI for this vendor class ? */
1607		if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1608			    mad_reg_req->oui, 3)) {
1609			method = &(*vendor_table)->vendor_class[
1610						vclass]->method_table[i];
1611			if (!*method)
1612				goto error3;
1613			goto check_in_use;
1614		}
1615	}
1616	for (i = 0; i < MAX_MGMT_OUI; i++) {
1617		/* OUI slot available ? */
1618		if (!is_vendor_oui((*vendor_table)->vendor_class[
1619				vclass]->oui[i])) {
1620			method = &(*vendor_table)->vendor_class[
1621				vclass]->method_table[i];
 
1622			/* Allocate method table for this OUI */
1623			if (!*method) {
1624				ret = allocate_method_table(method);
1625				if (ret)
1626					goto error3;
1627			}
1628			memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1629			       mad_reg_req->oui, 3);
1630			goto check_in_use;
1631		}
1632	}
1633	dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1634	goto error3;
1635
1636check_in_use:
1637	/* Now, make sure methods are not already in use */
1638	if (method_in_use(method, mad_reg_req))
1639		goto error4;
1640
1641	/* Finally, add in methods being registered */
1642	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1643		(*method)->agent[i] = agent_priv;
1644
1645	return 0;
1646
1647error4:
1648	/* Remove any methods for this mad agent */
1649	remove_methods_mad_agent(*method, agent_priv);
1650	/* Now, check to see if there are any methods in use */
1651	if (!check_method_table(*method)) {
1652		/* If not, release management method table */
1653		kfree(*method);
1654		*method = NULL;
1655	}
1656	ret = -EINVAL;
1657error3:
1658	if (vendor_class) {
1659		(*vendor_table)->vendor_class[vclass] = NULL;
1660		kfree(vendor_class);
1661	}
1662error2:
1663	if (vendor) {
1664		*vendor_table = NULL;
1665		kfree(vendor);
1666	}
1667error1:
1668	return ret;
1669}
1670
1671static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1672{
1673	struct ib_mad_port_private *port_priv;
1674	struct ib_mad_mgmt_class_table *class;
1675	struct ib_mad_mgmt_method_table *method;
1676	struct ib_mad_mgmt_vendor_class_table *vendor;
1677	struct ib_mad_mgmt_vendor_class *vendor_class;
1678	int index;
1679	u8 mgmt_class;
1680
1681	/*
1682	 * Was MAD registration request supplied
1683	 * with original registration ?
1684	 */
1685	if (!agent_priv->reg_req) {
1686		goto out;
1687	}
1688
1689	port_priv = agent_priv->qp_info->port_priv;
1690	mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1691	class = port_priv->version[
1692			agent_priv->reg_req->mgmt_class_version].class;
1693	if (!class)
1694		goto vendor_check;
1695
1696	method = class->method_table[mgmt_class];
1697	if (method) {
1698		/* Remove any methods for this mad agent */
1699		remove_methods_mad_agent(method, agent_priv);
1700		/* Now, check to see if there are any methods still in use */
1701		if (!check_method_table(method)) {
1702			/* If not, release management method table */
1703			kfree(method);
1704			class->method_table[mgmt_class] = NULL;
1705			/* Any management classes left ? */
1706			if (!check_class_table(class)) {
1707				/* If not, release management class table */
1708				kfree(class);
1709				port_priv->version[
1710					agent_priv->reg_req->
1711					mgmt_class_version].class = NULL;
1712			}
1713		}
1714	}
1715
1716vendor_check:
1717	if (!is_vendor_class(mgmt_class))
1718		goto out;
1719
1720	/* normalize mgmt_class to vendor range 2 */
1721	mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1722	vendor = port_priv->version[
1723			agent_priv->reg_req->mgmt_class_version].vendor;
1724
1725	if (!vendor)
1726		goto out;
1727
1728	vendor_class = vendor->vendor_class[mgmt_class];
1729	if (vendor_class) {
1730		index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1731		if (index < 0)
1732			goto out;
1733		method = vendor_class->method_table[index];
1734		if (method) {
1735			/* Remove any methods for this mad agent */
1736			remove_methods_mad_agent(method, agent_priv);
1737			/*
1738			 * Now, check to see if there are
1739			 * any methods still in use
1740			 */
1741			if (!check_method_table(method)) {
1742				/* If not, release management method table */
1743				kfree(method);
1744				vendor_class->method_table[index] = NULL;
1745				memset(vendor_class->oui[index], 0, 3);
1746				/* Any OUIs left ? */
1747				if (!check_vendor_class(vendor_class)) {
1748					/* If not, release vendor class table */
1749					kfree(vendor_class);
1750					vendor->vendor_class[mgmt_class] = NULL;
1751					/* Any other vendor classes left ? */
1752					if (!check_vendor_table(vendor)) {
1753						kfree(vendor);
1754						port_priv->version[
1755							agent_priv->reg_req->
1756							mgmt_class_version].
1757							vendor = NULL;
1758					}
1759				}
1760			}
1761		}
1762	}
1763
1764out:
1765	return;
1766}
1767
1768static struct ib_mad_agent_private *
1769find_mad_agent(struct ib_mad_port_private *port_priv,
1770	       const struct ib_mad_hdr *mad_hdr)
1771{
1772	struct ib_mad_agent_private *mad_agent = NULL;
1773	unsigned long flags;
1774
1775	if (ib_response_mad(mad_hdr)) {
 
1776		u32 hi_tid;
 
1777
1778		/*
1779		 * Routing is based on high 32 bits of transaction ID
1780		 * of MAD.
1781		 */
1782		hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1783		rcu_read_lock();
1784		mad_agent = xa_load(&ib_mad_clients, hi_tid);
1785		if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount))
1786			mad_agent = NULL;
1787		rcu_read_unlock();
 
1788	} else {
1789		struct ib_mad_mgmt_class_table *class;
1790		struct ib_mad_mgmt_method_table *method;
1791		struct ib_mad_mgmt_vendor_class_table *vendor;
1792		struct ib_mad_mgmt_vendor_class *vendor_class;
1793		const struct ib_vendor_mad *vendor_mad;
1794		int index;
1795
1796		spin_lock_irqsave(&port_priv->reg_lock, flags);
1797		/*
1798		 * Routing is based on version, class, and method
1799		 * For "newer" vendor MADs, also based on OUI
1800		 */
1801		if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1802			goto out;
1803		if (!is_vendor_class(mad_hdr->mgmt_class)) {
1804			class = port_priv->version[
1805					mad_hdr->class_version].class;
1806			if (!class)
1807				goto out;
1808			if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1809			    ARRAY_SIZE(class->method_table))
1810				goto out;
1811			method = class->method_table[convert_mgmt_class(
1812							mad_hdr->mgmt_class)];
1813			if (method)
1814				mad_agent = method->agent[mad_hdr->method &
1815							  ~IB_MGMT_METHOD_RESP];
1816		} else {
1817			vendor = port_priv->version[
1818					mad_hdr->class_version].vendor;
1819			if (!vendor)
1820				goto out;
1821			vendor_class = vendor->vendor_class[vendor_class_index(
1822						mad_hdr->mgmt_class)];
1823			if (!vendor_class)
1824				goto out;
1825			/* Find matching OUI */
1826			vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1827			index = find_vendor_oui(vendor_class, vendor_mad->oui);
1828			if (index == -1)
1829				goto out;
1830			method = vendor_class->method_table[index];
1831			if (method) {
1832				mad_agent = method->agent[mad_hdr->method &
1833							  ~IB_MGMT_METHOD_RESP];
1834			}
1835		}
1836		if (mad_agent)
1837			atomic_inc(&mad_agent->refcount);
1838out:
1839		spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1840	}
1841
1842	if (mad_agent && !mad_agent->agent.recv_handler) {
1843		dev_notice(&port_priv->device->dev,
1844			   "No receive handler for client %p on port %d\n",
1845			   &mad_agent->agent, port_priv->port_num);
1846		deref_mad_agent(mad_agent);
1847		mad_agent = NULL;
 
 
 
1848	}
 
 
1849
1850	return mad_agent;
1851}
1852
1853static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1854			const struct ib_mad_qp_info *qp_info,
1855			bool opa)
1856{
1857	int valid = 0;
1858	u32 qp_num = qp_info->qp->qp_num;
1859
1860	/* Make sure MAD base version is understood */
1861	if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1862	    (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1863		pr_err("MAD received with unsupported base version %d %s\n",
1864		       mad_hdr->base_version, opa ? "(opa)" : "");
1865		goto out;
1866	}
1867
1868	/* Filter SMI packets sent to other than QP0 */
1869	if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1870	    (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1871		if (qp_num == 0)
1872			valid = 1;
1873	} else {
1874		/* CM attributes other than ClassPortInfo only use Send method */
1875		if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1876		    (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1877		    (mad_hdr->method != IB_MGMT_METHOD_SEND))
1878			goto out;
1879		/* Filter GSI packets sent to QP0 */
1880		if (qp_num != 0)
1881			valid = 1;
1882	}
1883
1884out:
1885	return valid;
1886}
1887
1888static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1889			    const struct ib_mad_hdr *mad_hdr)
1890{
1891	struct ib_rmpp_mad *rmpp_mad;
1892
1893	rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1894	return !mad_agent_priv->agent.rmpp_version ||
1895		!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1896		!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1897				    IB_MGMT_RMPP_FLAG_ACTIVE) ||
1898		(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1899}
1900
1901static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1902				     const struct ib_mad_recv_wc *rwc)
1903{
1904	return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1905		rwc->recv_buf.mad->mad_hdr.mgmt_class;
1906}
1907
1908static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1909				   const struct ib_mad_send_wr_private *wr,
1910				   const struct ib_mad_recv_wc *rwc )
1911{
1912	struct rdma_ah_attr attr;
1913	u8 send_resp, rcv_resp;
1914	union ib_gid sgid;
1915	struct ib_device *device = mad_agent_priv->agent.device;
1916	u8 port_num = mad_agent_priv->agent.port_num;
1917	u8 lmc;
1918	bool has_grh;
1919
1920	send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1921	rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1922
1923	if (send_resp == rcv_resp)
1924		/* both requests, or both responses. GIDs different */
1925		return 0;
1926
1927	if (rdma_query_ah(wr->send_buf.ah, &attr))
1928		/* Assume not equal, to avoid false positives. */
1929		return 0;
1930
1931	has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH);
1932	if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH))
1933		/* one has GID, other does not.  Assume different */
1934		return 0;
1935
1936	if (!send_resp && rcv_resp) {
1937		/* is request/response. */
1938		if (!has_grh) {
1939			if (ib_get_cached_lmc(device, port_num, &lmc))
1940				return 0;
1941			return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
1942					   rwc->wc->dlid_path_bits) &
1943					  ((1 << lmc) - 1)));
1944		} else {
1945			const struct ib_global_route *grh =
1946					rdma_ah_read_grh(&attr);
1947
1948			if (rdma_query_gid(device, port_num,
1949					   grh->sgid_index, &sgid))
1950				return 0;
1951			return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1952				       16);
1953		}
1954	}
1955
1956	if (!has_grh)
1957		return rdma_ah_get_dlid(&attr) == rwc->wc->slid;
1958	else
1959		return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw,
1960			       rwc->recv_buf.grh->sgid.raw,
1961			       16);
1962}
1963
1964static inline int is_direct(u8 class)
1965{
1966	return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1967}
1968
1969struct ib_mad_send_wr_private*
1970ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1971		 const struct ib_mad_recv_wc *wc)
1972{
1973	struct ib_mad_send_wr_private *wr;
1974	const struct ib_mad_hdr *mad_hdr;
1975
1976	mad_hdr = &wc->recv_buf.mad->mad_hdr;
1977
1978	list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1979		if ((wr->tid == mad_hdr->tid) &&
1980		    rcv_has_same_class(wr, wc) &&
1981		    /*
1982		     * Don't check GID for direct routed MADs.
1983		     * These might have permissive LIDs.
1984		     */
1985		    (is_direct(mad_hdr->mgmt_class) ||
1986		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
1987			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1988	}
1989
1990	/*
1991	 * It's possible to receive the response before we've
1992	 * been notified that the send has completed
1993	 */
1994	list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1995		if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1996		    wr->tid == mad_hdr->tid &&
1997		    wr->timeout &&
1998		    rcv_has_same_class(wr, wc) &&
1999		    /*
2000		     * Don't check GID for direct routed MADs.
2001		     * These might have permissive LIDs.
2002		     */
2003		    (is_direct(mad_hdr->mgmt_class) ||
2004		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
2005			/* Verify request has not been canceled */
2006			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
2007	}
2008	return NULL;
2009}
2010
2011void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
2012{
2013	mad_send_wr->timeout = 0;
2014	if (mad_send_wr->refcount == 1)
2015		list_move_tail(&mad_send_wr->agent_list,
2016			      &mad_send_wr->mad_agent_priv->done_list);
2017}
2018
2019static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
2020				 struct ib_mad_recv_wc *mad_recv_wc)
2021{
2022	struct ib_mad_send_wr_private *mad_send_wr;
2023	struct ib_mad_send_wc mad_send_wc;
2024	unsigned long flags;
2025	int ret;
2026
2027	INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
2028	ret = ib_mad_enforce_security(mad_agent_priv,
2029				      mad_recv_wc->wc->pkey_index);
2030	if (ret) {
2031		ib_free_recv_mad(mad_recv_wc);
2032		deref_mad_agent(mad_agent_priv);
2033		return;
2034	}
2035
2036	list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
2037	if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2038		mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
2039						      mad_recv_wc);
2040		if (!mad_recv_wc) {
2041			deref_mad_agent(mad_agent_priv);
2042			return;
2043		}
2044	}
2045
2046	/* Complete corresponding request */
2047	if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
2048		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2049		mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
2050		if (!mad_send_wr) {
2051			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2052			if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
2053			   && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
2054			   && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
2055					& IB_MGMT_RMPP_FLAG_ACTIVE)) {
2056				/* user rmpp is in effect
2057				 * and this is an active RMPP MAD
2058				 */
2059				mad_agent_priv->agent.recv_handler(
2060						&mad_agent_priv->agent, NULL,
2061						mad_recv_wc);
2062				atomic_dec(&mad_agent_priv->refcount);
2063			} else {
2064				/* not user rmpp, revert to normal behavior and
2065				 * drop the mad */
2066				ib_free_recv_mad(mad_recv_wc);
2067				deref_mad_agent(mad_agent_priv);
2068				return;
2069			}
2070		} else {
2071			ib_mark_mad_done(mad_send_wr);
2072			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2073
2074			/* Defined behavior is to complete response before request */
2075			mad_agent_priv->agent.recv_handler(
2076					&mad_agent_priv->agent,
2077					&mad_send_wr->send_buf,
2078					mad_recv_wc);
2079			atomic_dec(&mad_agent_priv->refcount);
2080
2081			mad_send_wc.status = IB_WC_SUCCESS;
2082			mad_send_wc.vendor_err = 0;
2083			mad_send_wc.send_buf = &mad_send_wr->send_buf;
2084			ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2085		}
2086	} else {
2087		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2088						   mad_recv_wc);
2089		deref_mad_agent(mad_agent_priv);
2090	}
2091
2092	return;
2093}
2094
2095static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2096				     const struct ib_mad_qp_info *qp_info,
2097				     const struct ib_wc *wc,
2098				     int port_num,
2099				     struct ib_mad_private *recv,
2100				     struct ib_mad_private *response)
2101{
2102	enum smi_forward_action retsmi;
2103	struct ib_smp *smp = (struct ib_smp *)recv->mad;
2104
2105	trace_ib_mad_handle_ib_smi(smp);
2106
2107	if (smi_handle_dr_smp_recv(smp,
2108				   rdma_cap_ib_switch(port_priv->device),
2109				   port_num,
2110				   port_priv->device->phys_port_cnt) ==
2111				   IB_SMI_DISCARD)
2112		return IB_SMI_DISCARD;
2113
2114	retsmi = smi_check_forward_dr_smp(smp);
2115	if (retsmi == IB_SMI_LOCAL)
2116		return IB_SMI_HANDLE;
2117
2118	if (retsmi == IB_SMI_SEND) { /* don't forward */
2119		if (smi_handle_dr_smp_send(smp,
2120					   rdma_cap_ib_switch(port_priv->device),
2121					   port_num) == IB_SMI_DISCARD)
2122			return IB_SMI_DISCARD;
2123
2124		if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2125			return IB_SMI_DISCARD;
2126	} else if (rdma_cap_ib_switch(port_priv->device)) {
2127		/* forward case for switches */
2128		memcpy(response, recv, mad_priv_size(response));
2129		response->header.recv_wc.wc = &response->header.wc;
2130		response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2131		response->header.recv_wc.recv_buf.grh = &response->grh;
2132
2133		agent_send_response((const struct ib_mad_hdr *)response->mad,
2134				    &response->grh, wc,
2135				    port_priv->device,
2136				    smi_get_fwd_port(smp),
2137				    qp_info->qp->qp_num,
2138				    response->mad_size,
2139				    false);
2140
2141		return IB_SMI_DISCARD;
2142	}
2143	return IB_SMI_HANDLE;
2144}
2145
2146static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2147				    struct ib_mad_private *response,
2148				    size_t *resp_len, bool opa)
2149{
2150	const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2151	struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2152
2153	if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2154	    recv_hdr->method == IB_MGMT_METHOD_SET) {
2155		memcpy(response, recv, mad_priv_size(response));
2156		response->header.recv_wc.wc = &response->header.wc;
2157		response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2158		response->header.recv_wc.recv_buf.grh = &response->grh;
2159		resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2160		resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2161		if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2162			resp_hdr->status |= IB_SMP_DIRECTION;
2163
2164		if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2165			if (recv_hdr->mgmt_class ==
2166			    IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2167			    recv_hdr->mgmt_class ==
2168			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2169				*resp_len = opa_get_smp_header_size(
2170							(struct opa_smp *)recv->mad);
2171			else
2172				*resp_len = sizeof(struct ib_mad_hdr);
2173		}
2174
2175		return true;
2176	} else {
2177		return false;
2178	}
2179}
2180
2181static enum smi_action
2182handle_opa_smi(struct ib_mad_port_private *port_priv,
2183	       struct ib_mad_qp_info *qp_info,
2184	       struct ib_wc *wc,
2185	       int port_num,
2186	       struct ib_mad_private *recv,
2187	       struct ib_mad_private *response)
2188{
2189	enum smi_forward_action retsmi;
2190	struct opa_smp *smp = (struct opa_smp *)recv->mad;
2191
2192	trace_ib_mad_handle_opa_smi(smp);
2193
2194	if (opa_smi_handle_dr_smp_recv(smp,
2195				   rdma_cap_ib_switch(port_priv->device),
2196				   port_num,
2197				   port_priv->device->phys_port_cnt) ==
2198				   IB_SMI_DISCARD)
2199		return IB_SMI_DISCARD;
2200
2201	retsmi = opa_smi_check_forward_dr_smp(smp);
2202	if (retsmi == IB_SMI_LOCAL)
2203		return IB_SMI_HANDLE;
2204
2205	if (retsmi == IB_SMI_SEND) { /* don't forward */
2206		if (opa_smi_handle_dr_smp_send(smp,
2207					   rdma_cap_ib_switch(port_priv->device),
2208					   port_num) == IB_SMI_DISCARD)
2209			return IB_SMI_DISCARD;
2210
2211		if (opa_smi_check_local_smp(smp, port_priv->device) ==
2212		    IB_SMI_DISCARD)
2213			return IB_SMI_DISCARD;
2214
2215	} else if (rdma_cap_ib_switch(port_priv->device)) {
2216		/* forward case for switches */
2217		memcpy(response, recv, mad_priv_size(response));
2218		response->header.recv_wc.wc = &response->header.wc;
2219		response->header.recv_wc.recv_buf.opa_mad =
2220				(struct opa_mad *)response->mad;
2221		response->header.recv_wc.recv_buf.grh = &response->grh;
2222
2223		agent_send_response((const struct ib_mad_hdr *)response->mad,
2224				    &response->grh, wc,
2225				    port_priv->device,
2226				    opa_smi_get_fwd_port(smp),
2227				    qp_info->qp->qp_num,
2228				    recv->header.wc.byte_len,
2229				    true);
2230
2231		return IB_SMI_DISCARD;
2232	}
2233
2234	return IB_SMI_HANDLE;
2235}
2236
2237static enum smi_action
2238handle_smi(struct ib_mad_port_private *port_priv,
2239	   struct ib_mad_qp_info *qp_info,
2240	   struct ib_wc *wc,
2241	   int port_num,
2242	   struct ib_mad_private *recv,
2243	   struct ib_mad_private *response,
2244	   bool opa)
2245{
2246	struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2247
2248	if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2249	    mad_hdr->class_version == OPA_SM_CLASS_VERSION)
2250		return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2251				      response);
2252
2253	return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2254}
2255
2256static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
 
2257{
2258	struct ib_mad_port_private *port_priv = cq->cq_context;
2259	struct ib_mad_list_head *mad_list =
2260		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2261	struct ib_mad_qp_info *qp_info;
2262	struct ib_mad_private_header *mad_priv_hdr;
2263	struct ib_mad_private *recv, *response = NULL;
 
2264	struct ib_mad_agent_private *mad_agent;
2265	int port_num;
2266	int ret = IB_MAD_RESULT_SUCCESS;
2267	size_t mad_size;
2268	u16 resp_mad_pkey_index = 0;
2269	bool opa;
2270
2271	if (list_empty_careful(&port_priv->port_list))
2272		return;
2273
2274	if (wc->status != IB_WC_SUCCESS) {
2275		/*
2276		 * Receive errors indicate that the QP has entered the error
2277		 * state - error handling/shutdown code will cleanup
2278		 */
2279		return;
2280	}
2281
 
2282	qp_info = mad_list->mad_queue->qp_info;
2283	dequeue_mad(mad_list);
2284
2285	opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2286			       qp_info->port_priv->port_num);
2287
2288	mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2289				    mad_list);
2290	recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2291	ib_dma_unmap_single(port_priv->device,
2292			    recv->header.mapping,
2293			    mad_priv_dma_size(recv),
 
2294			    DMA_FROM_DEVICE);
2295
2296	/* Setup MAD receive work completion from "normal" work completion */
2297	recv->header.wc = *wc;
2298	recv->header.recv_wc.wc = &recv->header.wc;
2299
2300	if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2301		recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2302		recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2303	} else {
2304		recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2305		recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2306	}
2307
2308	recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2309	recv->header.recv_wc.recv_buf.grh = &recv->grh;
2310
2311	if (atomic_read(&qp_info->snoop_count))
2312		snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2313
2314	/* Validate MAD */
2315	if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2316		goto out;
2317
2318	trace_ib_mad_recv_done_handler(qp_info, wc,
2319				       (struct ib_mad_hdr *)recv->mad);
2320
2321	mad_size = recv->mad_size;
2322	response = alloc_mad_private(mad_size, GFP_KERNEL);
2323	if (!response)
2324		goto out;
 
2325
2326	if (rdma_cap_ib_switch(port_priv->device))
2327		port_num = wc->port_num;
2328	else
2329		port_num = port_priv->port_num;
2330
2331	if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2332	    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2333		if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2334			       response, opa)
2335		    == IB_SMI_DISCARD)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2336			goto out;
 
2337	}
2338
 
2339	/* Give driver "right of first refusal" on incoming MAD */
2340	if (port_priv->device->ops.process_mad) {
2341		ret = port_priv->device->ops.process_mad(
2342			port_priv->device, 0, port_priv->port_num, wc,
2343			&recv->grh, (const struct ib_mad_hdr *)recv->mad,
2344			recv->mad_size, (struct ib_mad_hdr *)response->mad,
2345			&mad_size, &resp_mad_pkey_index);
2346
2347		if (opa)
2348			wc->pkey_index = resp_mad_pkey_index;
2349
 
 
 
 
 
2350		if (ret & IB_MAD_RESULT_SUCCESS) {
2351			if (ret & IB_MAD_RESULT_CONSUMED)
2352				goto out;
2353			if (ret & IB_MAD_RESULT_REPLY) {
2354				agent_send_response((const struct ib_mad_hdr *)response->mad,
2355						    &recv->grh, wc,
2356						    port_priv->device,
2357						    port_num,
2358						    qp_info->qp->qp_num,
2359						    mad_size, opa);
2360				goto out;
2361			}
2362		}
2363	}
2364
2365	mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2366	if (mad_agent) {
2367		trace_ib_mad_recv_done_agent(mad_agent);
2368		ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2369		/*
2370		 * recv is freed up in error cases in ib_mad_complete_recv
2371		 * or via recv_handler in ib_mad_complete_recv()
2372		 */
2373		recv = NULL;
2374	} else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2375		   generate_unmatched_resp(recv, response, &mad_size, opa)) {
2376		agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2377				    port_priv->device, port_num,
2378				    qp_info->qp->qp_num, mad_size, opa);
2379	}
2380
2381out:
2382	/* Post another receive request for this QP */
2383	if (response) {
2384		ib_mad_post_receive_mads(qp_info, response);
2385		kfree(recv);
 
2386	} else
2387		ib_mad_post_receive_mads(qp_info, recv);
2388}
2389
2390static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2391{
2392	struct ib_mad_send_wr_private *mad_send_wr;
2393	unsigned long delay;
2394
2395	if (list_empty(&mad_agent_priv->wait_list)) {
2396		cancel_delayed_work(&mad_agent_priv->timed_work);
2397	} else {
2398		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2399					 struct ib_mad_send_wr_private,
2400					 agent_list);
2401
2402		if (time_after(mad_agent_priv->timeout,
2403			       mad_send_wr->timeout)) {
2404			mad_agent_priv->timeout = mad_send_wr->timeout;
 
2405			delay = mad_send_wr->timeout - jiffies;
2406			if ((long)delay <= 0)
2407				delay = 1;
2408			mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2409					 &mad_agent_priv->timed_work, delay);
 
2410		}
2411	}
2412}
2413
2414static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2415{
2416	struct ib_mad_agent_private *mad_agent_priv;
2417	struct ib_mad_send_wr_private *temp_mad_send_wr;
2418	struct list_head *list_item;
2419	unsigned long delay;
2420
2421	mad_agent_priv = mad_send_wr->mad_agent_priv;
2422	list_del(&mad_send_wr->agent_list);
2423
2424	delay = mad_send_wr->timeout;
2425	mad_send_wr->timeout += jiffies;
2426
2427	if (delay) {
2428		list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2429			temp_mad_send_wr = list_entry(list_item,
2430						struct ib_mad_send_wr_private,
2431						agent_list);
2432			if (time_after(mad_send_wr->timeout,
2433				       temp_mad_send_wr->timeout))
2434				break;
2435		}
2436	}
2437	else
2438		list_item = &mad_agent_priv->wait_list;
2439	list_add(&mad_send_wr->agent_list, list_item);
2440
2441	/* Reschedule a work item if we have a shorter timeout */
2442	if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2443		mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2444				 &mad_agent_priv->timed_work, delay);
 
 
2445}
2446
2447void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2448			  unsigned long timeout_ms)
2449{
2450	mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2451	wait_for_response(mad_send_wr);
2452}
2453
2454/*
2455 * Process a send work completion
2456 */
2457void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2458			     struct ib_mad_send_wc *mad_send_wc)
2459{
2460	struct ib_mad_agent_private	*mad_agent_priv;
2461	unsigned long			flags;
2462	int				ret;
2463
2464	mad_agent_priv = mad_send_wr->mad_agent_priv;
2465	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2466	if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2467		ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2468		if (ret == IB_RMPP_RESULT_CONSUMED)
2469			goto done;
2470	} else
2471		ret = IB_RMPP_RESULT_UNHANDLED;
2472
2473	if (mad_send_wc->status != IB_WC_SUCCESS &&
2474	    mad_send_wr->status == IB_WC_SUCCESS) {
2475		mad_send_wr->status = mad_send_wc->status;
2476		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2477	}
2478
2479	if (--mad_send_wr->refcount > 0) {
2480		if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2481		    mad_send_wr->status == IB_WC_SUCCESS) {
2482			wait_for_response(mad_send_wr);
2483		}
2484		goto done;
2485	}
2486
2487	/* Remove send from MAD agent and notify client of completion */
2488	list_del(&mad_send_wr->agent_list);
2489	adjust_timeout(mad_agent_priv);
2490	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2491
2492	if (mad_send_wr->status != IB_WC_SUCCESS )
2493		mad_send_wc->status = mad_send_wr->status;
2494	if (ret == IB_RMPP_RESULT_INTERNAL)
2495		ib_rmpp_send_handler(mad_send_wc);
2496	else
2497		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2498						   mad_send_wc);
2499
2500	/* Release reference on agent taken when sending */
2501	deref_mad_agent(mad_agent_priv);
2502	return;
2503done:
2504	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2505}
2506
2507static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
 
2508{
2509	struct ib_mad_port_private *port_priv = cq->cq_context;
2510	struct ib_mad_list_head *mad_list =
2511		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2512	struct ib_mad_send_wr_private	*mad_send_wr, *queued_send_wr;
 
2513	struct ib_mad_qp_info		*qp_info;
2514	struct ib_mad_queue		*send_queue;
 
2515	struct ib_mad_send_wc		mad_send_wc;
2516	unsigned long flags;
2517	int ret;
2518
2519	if (list_empty_careful(&port_priv->port_list))
2520		return;
2521
2522	if (wc->status != IB_WC_SUCCESS) {
2523		if (!ib_mad_send_error(port_priv, wc))
2524			return;
2525	}
2526
2527	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2528				   mad_list);
2529	send_queue = mad_list->mad_queue;
2530	qp_info = send_queue->qp_info;
2531
2532	trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv);
2533	trace_ib_mad_send_done_handler(mad_send_wr, wc);
2534
2535retry:
2536	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2537			    mad_send_wr->header_mapping,
2538			    mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2539	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2540			    mad_send_wr->payload_mapping,
2541			    mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2542	queued_send_wr = NULL;
2543	spin_lock_irqsave(&send_queue->lock, flags);
2544	list_del(&mad_list->list);
2545
2546	/* Move queued send to the send queue */
2547	if (send_queue->count-- > send_queue->max_active) {
2548		mad_list = container_of(qp_info->overflow_list.next,
2549					struct ib_mad_list_head, list);
2550		queued_send_wr = container_of(mad_list,
2551					struct ib_mad_send_wr_private,
2552					mad_list);
2553		list_move_tail(&mad_list->list, &send_queue->list);
2554	}
2555	spin_unlock_irqrestore(&send_queue->lock, flags);
2556
2557	mad_send_wc.send_buf = &mad_send_wr->send_buf;
2558	mad_send_wc.status = wc->status;
2559	mad_send_wc.vendor_err = wc->vendor_err;
2560	if (atomic_read(&qp_info->snoop_count))
2561		snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2562			   IB_MAD_SNOOP_SEND_COMPLETIONS);
2563	ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2564
2565	if (queued_send_wr) {
2566		trace_ib_mad_send_done_resend(queued_send_wr, qp_info);
2567		ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2568				   NULL);
2569		if (ret) {
2570			dev_err(&port_priv->device->dev,
2571				"ib_post_send failed: %d\n", ret);
2572			mad_send_wr = queued_send_wr;
2573			wc->status = IB_WC_LOC_QP_OP_ERR;
2574			goto retry;
2575		}
2576	}
2577}
2578
2579static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2580{
2581	struct ib_mad_send_wr_private *mad_send_wr;
2582	struct ib_mad_list_head *mad_list;
2583	unsigned long flags;
2584
2585	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2586	list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2587		mad_send_wr = container_of(mad_list,
2588					   struct ib_mad_send_wr_private,
2589					   mad_list);
2590		mad_send_wr->retry = 1;
2591	}
2592	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2593}
2594
2595static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2596		struct ib_wc *wc)
2597{
2598	struct ib_mad_list_head *mad_list =
2599		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2600	struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2601	struct ib_mad_send_wr_private *mad_send_wr;
2602	int ret;
2603
 
 
 
 
 
 
 
 
 
 
2604	/*
2605	 * Send errors will transition the QP to SQE - move
2606	 * QP to RTS and repost flushed work requests
2607	 */
2608	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2609				   mad_list);
2610	if (wc->status == IB_WC_WR_FLUSH_ERR) {
2611		if (mad_send_wr->retry) {
2612			/* Repost send */
 
 
2613			mad_send_wr->retry = 0;
2614			trace_ib_mad_error_handler(mad_send_wr, qp_info);
2615			ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2616					   NULL);
2617			if (!ret)
2618				return false;
2619		}
2620	} else {
2621		struct ib_qp_attr *attr;
2622
2623		/* Transition QP to RTS and fail offending send */
2624		attr = kmalloc(sizeof *attr, GFP_KERNEL);
2625		if (attr) {
2626			attr->qp_state = IB_QPS_RTS;
2627			attr->cur_qp_state = IB_QPS_SQE;
2628			ret = ib_modify_qp(qp_info->qp, attr,
2629					   IB_QP_STATE | IB_QP_CUR_STATE);
2630			kfree(attr);
2631			if (ret)
2632				dev_err(&port_priv->device->dev,
2633					"%s - ib_modify_qp to RTS: %d\n",
2634					__func__, ret);
2635			else
2636				mark_sends_for_retry(qp_info);
2637		}
 
2638	}
 
 
 
 
 
 
 
 
 
 
 
 
2639
2640	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2641}
2642
2643static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2644{
2645	unsigned long flags;
2646	struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2647	struct ib_mad_send_wc mad_send_wc;
2648	struct list_head cancel_list;
2649
2650	INIT_LIST_HEAD(&cancel_list);
2651
2652	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2653	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2654				 &mad_agent_priv->send_list, agent_list) {
2655		if (mad_send_wr->status == IB_WC_SUCCESS) {
2656			mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2657			mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2658		}
2659	}
2660
2661	/* Empty wait list to prevent receives from finding a request */
2662	list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2663	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2664
2665	/* Report all cancelled requests */
2666	mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2667	mad_send_wc.vendor_err = 0;
2668
2669	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2670				 &cancel_list, agent_list) {
2671		mad_send_wc.send_buf = &mad_send_wr->send_buf;
2672		list_del(&mad_send_wr->agent_list);
2673		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2674						   &mad_send_wc);
2675		atomic_dec(&mad_agent_priv->refcount);
2676	}
2677}
2678
2679static struct ib_mad_send_wr_private*
2680find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2681	     struct ib_mad_send_buf *send_buf)
2682{
2683	struct ib_mad_send_wr_private *mad_send_wr;
2684
2685	list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2686			    agent_list) {
2687		if (&mad_send_wr->send_buf == send_buf)
2688			return mad_send_wr;
2689	}
2690
2691	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2692			    agent_list) {
2693		if (is_rmpp_data_mad(mad_agent_priv,
2694				     mad_send_wr->send_buf.mad) &&
2695		    &mad_send_wr->send_buf == send_buf)
2696			return mad_send_wr;
2697	}
2698	return NULL;
2699}
2700
2701int ib_modify_mad(struct ib_mad_agent *mad_agent,
2702		  struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2703{
2704	struct ib_mad_agent_private *mad_agent_priv;
2705	struct ib_mad_send_wr_private *mad_send_wr;
2706	unsigned long flags;
2707	int active;
2708
2709	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2710				      agent);
2711	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2712	mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2713	if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2714		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2715		return -EINVAL;
2716	}
2717
2718	active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2719	if (!timeout_ms) {
2720		mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2721		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2722	}
2723
2724	mad_send_wr->send_buf.timeout_ms = timeout_ms;
2725	if (active)
2726		mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2727	else
2728		ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2729
2730	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2731	return 0;
2732}
2733EXPORT_SYMBOL(ib_modify_mad);
2734
2735void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2736		   struct ib_mad_send_buf *send_buf)
2737{
2738	ib_modify_mad(mad_agent, send_buf, 0);
2739}
2740EXPORT_SYMBOL(ib_cancel_mad);
2741
2742static void local_completions(struct work_struct *work)
2743{
2744	struct ib_mad_agent_private *mad_agent_priv;
2745	struct ib_mad_local_private *local;
2746	struct ib_mad_agent_private *recv_mad_agent;
2747	unsigned long flags;
2748	int free_mad;
2749	struct ib_wc wc;
2750	struct ib_mad_send_wc mad_send_wc;
2751	bool opa;
2752
2753	mad_agent_priv =
2754		container_of(work, struct ib_mad_agent_private, local_work);
2755
2756	opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2757			       mad_agent_priv->qp_info->port_priv->port_num);
2758
2759	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2760	while (!list_empty(&mad_agent_priv->local_list)) {
2761		local = list_entry(mad_agent_priv->local_list.next,
2762				   struct ib_mad_local_private,
2763				   completion_list);
2764		list_del(&local->completion_list);
2765		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2766		free_mad = 0;
2767		if (local->mad_priv) {
2768			u8 base_version;
2769			recv_mad_agent = local->recv_mad_agent;
2770			if (!recv_mad_agent) {
2771				dev_err(&mad_agent_priv->agent.device->dev,
2772					"No receive MAD agent for local completion\n");
2773				free_mad = 1;
2774				goto local_send_completion;
2775			}
2776
2777			/*
2778			 * Defined behavior is to complete response
2779			 * before request
2780			 */
2781			build_smp_wc(recv_mad_agent->agent.qp,
2782				     local->mad_send_wr->send_wr.wr.wr_cqe,
2783				     be16_to_cpu(IB_LID_PERMISSIVE),
2784				     local->mad_send_wr->send_wr.pkey_index,
2785				     recv_mad_agent->agent.port_num, &wc);
2786
2787			local->mad_priv->header.recv_wc.wc = &wc;
2788
2789			base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2790			if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2791				local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2792				local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2793			} else {
2794				local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2795				local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2796			}
2797
2798			INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2799			list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2800				 &local->mad_priv->header.recv_wc.rmpp_list);
2801			local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2802			local->mad_priv->header.recv_wc.recv_buf.mad =
2803						(struct ib_mad *)local->mad_priv->mad;
2804			if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2805				snoop_recv(recv_mad_agent->qp_info,
2806					  &local->mad_priv->header.recv_wc,
2807					   IB_MAD_SNOOP_RECVS);
2808			recv_mad_agent->agent.recv_handler(
2809						&recv_mad_agent->agent,
2810						&local->mad_send_wr->send_buf,
2811						&local->mad_priv->header.recv_wc);
2812			spin_lock_irqsave(&recv_mad_agent->lock, flags);
2813			atomic_dec(&recv_mad_agent->refcount);
2814			spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2815		}
2816
2817local_send_completion:
2818		/* Complete send */
2819		mad_send_wc.status = IB_WC_SUCCESS;
2820		mad_send_wc.vendor_err = 0;
2821		mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2822		if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2823			snoop_send(mad_agent_priv->qp_info,
2824				   &local->mad_send_wr->send_buf,
2825				   &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2826		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2827						   &mad_send_wc);
2828
2829		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2830		atomic_dec(&mad_agent_priv->refcount);
2831		if (free_mad)
2832			kfree(local->mad_priv);
2833		kfree(local);
2834	}
2835	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2836}
2837
2838static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2839{
2840	int ret;
2841
2842	if (!mad_send_wr->retries_left)
2843		return -ETIMEDOUT;
2844
2845	mad_send_wr->retries_left--;
2846	mad_send_wr->send_buf.retries++;
2847
2848	mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2849
2850	if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2851		ret = ib_retry_rmpp(mad_send_wr);
2852		switch (ret) {
2853		case IB_RMPP_RESULT_UNHANDLED:
2854			ret = ib_send_mad(mad_send_wr);
2855			break;
2856		case IB_RMPP_RESULT_CONSUMED:
2857			ret = 0;
2858			break;
2859		default:
2860			ret = -ECOMM;
2861			break;
2862		}
2863	} else
2864		ret = ib_send_mad(mad_send_wr);
2865
2866	if (!ret) {
2867		mad_send_wr->refcount++;
2868		list_add_tail(&mad_send_wr->agent_list,
2869			      &mad_send_wr->mad_agent_priv->send_list);
2870	}
2871	return ret;
2872}
2873
2874static void timeout_sends(struct work_struct *work)
2875{
2876	struct ib_mad_agent_private *mad_agent_priv;
2877	struct ib_mad_send_wr_private *mad_send_wr;
2878	struct ib_mad_send_wc mad_send_wc;
2879	unsigned long flags, delay;
2880
2881	mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2882				      timed_work.work);
2883	mad_send_wc.vendor_err = 0;
2884
2885	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2886	while (!list_empty(&mad_agent_priv->wait_list)) {
2887		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2888					 struct ib_mad_send_wr_private,
2889					 agent_list);
2890
2891		if (time_after(mad_send_wr->timeout, jiffies)) {
2892			delay = mad_send_wr->timeout - jiffies;
2893			if ((long)delay <= 0)
2894				delay = 1;
2895			queue_delayed_work(mad_agent_priv->qp_info->
2896					   port_priv->wq,
2897					   &mad_agent_priv->timed_work, delay);
2898			break;
2899		}
2900
2901		list_del(&mad_send_wr->agent_list);
2902		if (mad_send_wr->status == IB_WC_SUCCESS &&
2903		    !retry_send(mad_send_wr))
2904			continue;
2905
2906		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2907
2908		if (mad_send_wr->status == IB_WC_SUCCESS)
2909			mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2910		else
2911			mad_send_wc.status = mad_send_wr->status;
2912		mad_send_wc.send_buf = &mad_send_wr->send_buf;
2913		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2914						   &mad_send_wc);
2915
2916		atomic_dec(&mad_agent_priv->refcount);
2917		spin_lock_irqsave(&mad_agent_priv->lock, flags);
2918	}
2919	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2920}
2921
 
 
 
 
 
 
 
 
 
 
 
2922/*
2923 * Allocate receive MADs and post receive WRs for them
2924 */
2925static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2926				    struct ib_mad_private *mad)
2927{
2928	unsigned long flags;
2929	int post, ret;
2930	struct ib_mad_private *mad_priv;
2931	struct ib_sge sg_list;
2932	struct ib_recv_wr recv_wr;
2933	struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2934
2935	/* Initialize common scatter list fields */
2936	sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
 
2937
2938	/* Initialize common receive WR fields */
2939	recv_wr.next = NULL;
2940	recv_wr.sg_list = &sg_list;
2941	recv_wr.num_sge = 1;
2942
2943	do {
2944		/* Allocate and map receive buffer */
2945		if (mad) {
2946			mad_priv = mad;
2947			mad = NULL;
2948		} else {
2949			mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2950						     GFP_ATOMIC);
2951			if (!mad_priv) {
 
2952				ret = -ENOMEM;
2953				break;
2954			}
2955		}
2956		sg_list.length = mad_priv_dma_size(mad_priv);
2957		sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2958						 &mad_priv->grh,
2959						 mad_priv_dma_size(mad_priv),
 
2960						 DMA_FROM_DEVICE);
2961		if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2962						  sg_list.addr))) {
2963			ret = -ENOMEM;
2964			break;
2965		}
2966		mad_priv->header.mapping = sg_list.addr;
 
2967		mad_priv->header.mad_list.mad_queue = recv_queue;
2968		mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2969		recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2970
2971		/* Post receive WR */
2972		spin_lock_irqsave(&recv_queue->lock, flags);
2973		post = (++recv_queue->count < recv_queue->max_active);
2974		list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2975		spin_unlock_irqrestore(&recv_queue->lock, flags);
2976		ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
2977		if (ret) {
2978			spin_lock_irqsave(&recv_queue->lock, flags);
2979			list_del(&mad_priv->header.mad_list.list);
2980			recv_queue->count--;
2981			spin_unlock_irqrestore(&recv_queue->lock, flags);
2982			ib_dma_unmap_single(qp_info->port_priv->device,
2983					    mad_priv->header.mapping,
2984					    mad_priv_dma_size(mad_priv),
 
2985					    DMA_FROM_DEVICE);
2986			kfree(mad_priv);
2987			dev_err(&qp_info->port_priv->device->dev,
2988				"ib_post_recv failed: %d\n", ret);
2989			break;
2990		}
2991	} while (post);
2992
2993	return ret;
2994}
2995
2996/*
2997 * Return all the posted receive MADs
2998 */
2999static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
3000{
3001	struct ib_mad_private_header *mad_priv_hdr;
3002	struct ib_mad_private *recv;
3003	struct ib_mad_list_head *mad_list;
3004
3005	if (!qp_info->qp)
3006		return;
3007
3008	while (!list_empty(&qp_info->recv_queue.list)) {
3009
3010		mad_list = list_entry(qp_info->recv_queue.list.next,
3011				      struct ib_mad_list_head, list);
3012		mad_priv_hdr = container_of(mad_list,
3013					    struct ib_mad_private_header,
3014					    mad_list);
3015		recv = container_of(mad_priv_hdr, struct ib_mad_private,
3016				    header);
3017
3018		/* Remove from posted receive MAD list */
3019		list_del(&mad_list->list);
3020
3021		ib_dma_unmap_single(qp_info->port_priv->device,
3022				    recv->header.mapping,
3023				    mad_priv_dma_size(recv),
 
3024				    DMA_FROM_DEVICE);
3025		kfree(recv);
3026	}
3027
3028	qp_info->recv_queue.count = 0;
3029}
3030
3031/*
3032 * Start the port
3033 */
3034static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
3035{
3036	int ret, i;
3037	struct ib_qp_attr *attr;
3038	struct ib_qp *qp;
3039	u16 pkey_index;
3040
3041	attr = kmalloc(sizeof *attr, GFP_KERNEL);
3042	if (!attr)
 
3043		return -ENOMEM;
3044
3045	ret = ib_find_pkey(port_priv->device, port_priv->port_num,
3046			   IB_DEFAULT_PKEY_FULL, &pkey_index);
3047	if (ret)
3048		pkey_index = 0;
3049
3050	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3051		qp = port_priv->qp_info[i].qp;
3052		if (!qp)
3053			continue;
3054
3055		/*
3056		 * PKey index for QP1 is irrelevant but
3057		 * one is needed for the Reset to Init transition
3058		 */
3059		attr->qp_state = IB_QPS_INIT;
3060		attr->pkey_index = pkey_index;
3061		attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
3062		ret = ib_modify_qp(qp, attr, IB_QP_STATE |
3063					     IB_QP_PKEY_INDEX | IB_QP_QKEY);
3064		if (ret) {
3065			dev_err(&port_priv->device->dev,
3066				"Couldn't change QP%d state to INIT: %d\n",
3067				i, ret);
3068			goto out;
3069		}
3070
3071		attr->qp_state = IB_QPS_RTR;
3072		ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3073		if (ret) {
3074			dev_err(&port_priv->device->dev,
3075				"Couldn't change QP%d state to RTR: %d\n",
3076				i, ret);
3077			goto out;
3078		}
3079
3080		attr->qp_state = IB_QPS_RTS;
3081		attr->sq_psn = IB_MAD_SEND_Q_PSN;
3082		ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3083		if (ret) {
3084			dev_err(&port_priv->device->dev,
3085				"Couldn't change QP%d state to RTS: %d\n",
3086				i, ret);
3087			goto out;
3088		}
3089	}
3090
3091	ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3092	if (ret) {
3093		dev_err(&port_priv->device->dev,
3094			"Failed to request completion notification: %d\n",
3095			ret);
3096		goto out;
3097	}
3098
3099	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3100		if (!port_priv->qp_info[i].qp)
3101			continue;
3102
3103		ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3104		if (ret) {
3105			dev_err(&port_priv->device->dev,
3106				"Couldn't post receive WRs\n");
3107			goto out;
3108		}
3109	}
3110out:
3111	kfree(attr);
3112	return ret;
3113}
3114
3115static void qp_event_handler(struct ib_event *event, void *qp_context)
3116{
3117	struct ib_mad_qp_info	*qp_info = qp_context;
3118
3119	/* It's worse than that! He's dead, Jim! */
3120	dev_err(&qp_info->port_priv->device->dev,
3121		"Fatal error (%d) on MAD QP (%d)\n",
3122		event->event, qp_info->qp->qp_num);
3123}
3124
3125static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3126			   struct ib_mad_queue *mad_queue)
3127{
3128	mad_queue->qp_info = qp_info;
3129	mad_queue->count = 0;
3130	spin_lock_init(&mad_queue->lock);
3131	INIT_LIST_HEAD(&mad_queue->list);
3132}
3133
3134static void init_mad_qp(struct ib_mad_port_private *port_priv,
3135			struct ib_mad_qp_info *qp_info)
3136{
3137	qp_info->port_priv = port_priv;
3138	init_mad_queue(qp_info, &qp_info->send_queue);
3139	init_mad_queue(qp_info, &qp_info->recv_queue);
3140	INIT_LIST_HEAD(&qp_info->overflow_list);
3141	spin_lock_init(&qp_info->snoop_lock);
3142	qp_info->snoop_table = NULL;
3143	qp_info->snoop_table_size = 0;
3144	atomic_set(&qp_info->snoop_count, 0);
3145}
3146
3147static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3148			 enum ib_qp_type qp_type)
3149{
3150	struct ib_qp_init_attr	qp_init_attr;
3151	int ret;
3152
3153	memset(&qp_init_attr, 0, sizeof qp_init_attr);
3154	qp_init_attr.send_cq = qp_info->port_priv->cq;
3155	qp_init_attr.recv_cq = qp_info->port_priv->cq;
3156	qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3157	qp_init_attr.cap.max_send_wr = mad_sendq_size;
3158	qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3159	qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3160	qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3161	qp_init_attr.qp_type = qp_type;
3162	qp_init_attr.port_num = qp_info->port_priv->port_num;
3163	qp_init_attr.qp_context = qp_info;
3164	qp_init_attr.event_handler = qp_event_handler;
3165	qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3166	if (IS_ERR(qp_info->qp)) {
3167		dev_err(&qp_info->port_priv->device->dev,
3168			"Couldn't create ib_mad QP%d\n",
3169			get_spl_qp_index(qp_type));
3170		ret = PTR_ERR(qp_info->qp);
3171		goto error;
3172	}
3173	/* Use minimum queue sizes unless the CQ is resized */
3174	qp_info->send_queue.max_active = mad_sendq_size;
3175	qp_info->recv_queue.max_active = mad_recvq_size;
3176	return 0;
3177
3178error:
3179	return ret;
3180}
3181
3182static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3183{
3184	if (!qp_info->qp)
3185		return;
3186
3187	ib_destroy_qp(qp_info->qp);
3188	kfree(qp_info->snoop_table);
3189}
3190
3191/*
3192 * Open the port
3193 * Create the QP, PD, MR, and CQ if needed
3194 */
3195static int ib_mad_port_open(struct ib_device *device,
3196			    int port_num)
3197{
3198	int ret, cq_size;
3199	struct ib_mad_port_private *port_priv;
3200	unsigned long flags;
3201	char name[sizeof "ib_mad123"];
3202	int has_smi;
3203
3204	if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3205		return -EFAULT;
3206
3207	if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3208		    rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3209		return -EFAULT;
3210
3211	/* Create new device info */
3212	port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3213	if (!port_priv)
 
3214		return -ENOMEM;
 
3215
3216	port_priv->device = device;
3217	port_priv->port_num = port_num;
3218	spin_lock_init(&port_priv->reg_lock);
 
3219	init_mad_qp(port_priv, &port_priv->qp_info[0]);
3220	init_mad_qp(port_priv, &port_priv->qp_info[1]);
3221
3222	cq_size = mad_sendq_size + mad_recvq_size;
3223	has_smi = rdma_cap_ib_smi(device, port_num);
3224	if (has_smi)
3225		cq_size *= 2;
3226
3227	port_priv->pd = ib_alloc_pd(device, 0);
 
 
 
 
 
 
 
 
 
3228	if (IS_ERR(port_priv->pd)) {
3229		dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3230		ret = PTR_ERR(port_priv->pd);
3231		goto error3;
3232	}
3233
3234	port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3235			IB_POLL_UNBOUND_WORKQUEUE);
3236	if (IS_ERR(port_priv->cq)) {
3237		dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3238		ret = PTR_ERR(port_priv->cq);
3239		goto error4;
3240	}
3241
3242	if (has_smi) {
3243		ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3244		if (ret)
3245			goto error6;
3246	}
3247	ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3248	if (ret)
3249		goto error7;
3250
3251	snprintf(name, sizeof name, "ib_mad%d", port_num);
3252	port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3253	if (!port_priv->wq) {
3254		ret = -ENOMEM;
3255		goto error8;
3256	}
 
3257
3258	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3259	list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3260	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3261
3262	ret = ib_mad_port_start(port_priv);
3263	if (ret) {
3264		dev_err(&device->dev, "Couldn't start port\n");
3265		goto error9;
3266	}
3267
3268	return 0;
3269
3270error9:
3271	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3272	list_del_init(&port_priv->port_list);
3273	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3274
3275	destroy_workqueue(port_priv->wq);
3276error8:
3277	destroy_mad_qp(&port_priv->qp_info[1]);
3278error7:
3279	destroy_mad_qp(&port_priv->qp_info[0]);
3280error6:
3281	ib_free_cq(port_priv->cq);
 
 
 
 
3282	cleanup_recv_queue(&port_priv->qp_info[1]);
3283	cleanup_recv_queue(&port_priv->qp_info[0]);
3284error4:
3285	ib_dealloc_pd(port_priv->pd);
3286error3:
3287	kfree(port_priv);
3288
3289	return ret;
3290}
3291
3292/*
3293 * Close the port
3294 * If there are no classes using the port, free the port
3295 * resources (CQ, MR, PD, QP) and remove the port's info structure
3296 */
3297static int ib_mad_port_close(struct ib_device *device, int port_num)
3298{
3299	struct ib_mad_port_private *port_priv;
3300	unsigned long flags;
3301
3302	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3303	port_priv = __ib_get_mad_port(device, port_num);
3304	if (port_priv == NULL) {
3305		spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3306		dev_err(&device->dev, "Port %d not found\n", port_num);
3307		return -ENODEV;
3308	}
3309	list_del_init(&port_priv->port_list);
3310	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3311
3312	destroy_workqueue(port_priv->wq);
3313	destroy_mad_qp(&port_priv->qp_info[1]);
3314	destroy_mad_qp(&port_priv->qp_info[0]);
3315	ib_free_cq(port_priv->cq);
3316	ib_dealloc_pd(port_priv->pd);
 
3317	cleanup_recv_queue(&port_priv->qp_info[1]);
3318	cleanup_recv_queue(&port_priv->qp_info[0]);
3319	/* XXX: Handle deallocation of MAD registration tables */
3320
3321	kfree(port_priv);
3322
3323	return 0;
3324}
3325
3326static void ib_mad_init_device(struct ib_device *device)
3327{
3328	int start, i;
3329
3330	start = rdma_start_port(device);
 
3331
3332	for (i = start; i <= rdma_end_port(device); i++) {
3333		if (!rdma_cap_ib_mad(device, i))
3334			continue;
 
 
 
 
3335
 
3336		if (ib_mad_port_open(device, i)) {
3337			dev_err(&device->dev, "Couldn't open port %d\n", i);
 
3338			goto error;
3339		}
3340		if (ib_agent_port_open(device, i)) {
3341			dev_err(&device->dev,
3342				"Couldn't open port %d for agents\n", i);
 
3343			goto error_agent;
3344		}
3345	}
3346	return;
3347
3348error_agent:
3349	if (ib_mad_port_close(device, i))
3350		dev_err(&device->dev, "Couldn't close port %d\n", i);
 
3351
3352error:
3353	while (--i >= start) {
3354		if (!rdma_cap_ib_mad(device, i))
3355			continue;
3356
 
3357		if (ib_agent_port_close(device, i))
3358			dev_err(&device->dev,
3359				"Couldn't close port %d for agents\n", i);
 
3360		if (ib_mad_port_close(device, i))
3361			dev_err(&device->dev, "Couldn't close port %d\n", i);
 
 
3362	}
3363}
3364
3365static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3366{
3367	unsigned int i;
3368
3369	rdma_for_each_port (device, i) {
3370		if (!rdma_cap_ib_mad(device, i))
3371			continue;
3372
3373		if (ib_agent_port_close(device, i))
3374			dev_err(&device->dev,
3375				"Couldn't close port %d for agents\n", i);
3376		if (ib_mad_port_close(device, i))
3377			dev_err(&device->dev, "Couldn't close port %d\n", i);
 
 
 
 
 
 
 
 
 
 
3378	}
3379}
3380
3381static struct ib_client mad_client = {
3382	.name   = "mad",
3383	.add = ib_mad_init_device,
3384	.remove = ib_mad_remove_device
3385};
3386
3387int ib_mad_init(void)
3388{
 
 
3389	mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3390	mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3391
3392	mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3393	mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3394
 
 
 
 
 
 
 
 
 
 
 
3395	INIT_LIST_HEAD(&ib_mad_port_list);
3396
3397	if (ib_register_client(&mad_client)) {
3398		pr_err("Couldn't register ib_mad client\n");
3399		return -EINVAL;
 
3400	}
3401
3402	return 0;
 
 
 
 
 
3403}
3404
3405void ib_mad_cleanup(void)
3406{
3407	ib_unregister_client(&mad_client);
 
3408}