Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
   4 * Copyright (c) 2006 Intel Corporation.  All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
 
  35#include <linux/init.h>
  36#include <linux/err.h>
  37#include <linux/random.h>
  38#include <linux/spinlock.h>
  39#include <linux/slab.h>
  40#include <linux/dma-mapping.h>
  41#include <linux/kref.h>
  42#include <linux/xarray.h>
  43#include <linux/workqueue.h>
  44#include <uapi/linux/if_ether.h>
  45#include <rdma/ib_pack.h>
  46#include <rdma/ib_cache.h>
  47#include <rdma/rdma_netlink.h>
  48#include <net/netlink.h>
  49#include <uapi/rdma/ib_user_sa.h>
  50#include <rdma/ib_marshall.h>
  51#include <rdma/ib_addr.h>
  52#include <rdma/opa_addr.h>
  53#include <rdma/rdma_cm.h>
  54#include "sa.h"
  55#include "core_priv.h"
  56
  57#define IB_SA_LOCAL_SVC_TIMEOUT_MIN		100
  58#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT		2000
  59#define IB_SA_LOCAL_SVC_TIMEOUT_MAX		200000
  60#define IB_SA_CPI_MAX_RETRY_CNT			3
  61#define IB_SA_CPI_RETRY_WAIT			1000 /*msecs */
  62static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
  63
  64struct ib_sa_sm_ah {
  65	struct ib_ah        *ah;
  66	struct kref          ref;
  67	u16		     pkey_index;
  68	u8		     src_path_mask;
  69};
  70
  71enum rdma_class_port_info_type {
  72	RDMA_CLASS_PORT_INFO_IB,
  73	RDMA_CLASS_PORT_INFO_OPA
  74};
  75
  76struct rdma_class_port_info {
  77	enum rdma_class_port_info_type type;
  78	union {
  79		struct ib_class_port_info ib;
  80		struct opa_class_port_info opa;
  81	};
  82};
  83
  84struct ib_sa_classport_cache {
  85	bool valid;
  86	int retry_cnt;
  87	struct rdma_class_port_info data;
  88};
  89
  90struct ib_sa_port {
  91	struct ib_mad_agent *agent;
  92	struct ib_sa_sm_ah  *sm_ah;
  93	struct work_struct   update_task;
  94	struct ib_sa_classport_cache classport_info;
  95	struct delayed_work ib_cpi_work;
  96	spinlock_t                   classport_lock; /* protects class port info set */
  97	spinlock_t           ah_lock;
  98	u32		     port_num;
  99};
 100
 101struct ib_sa_device {
 102	int                     start_port, end_port;
 103	struct ib_event_handler event_handler;
 104	struct ib_sa_port port[];
 105};
 106
 107struct ib_sa_query {
 108	void (*callback)(struct ib_sa_query *sa_query, int status,
 109			 struct ib_sa_mad *mad);
 110	void (*release)(struct ib_sa_query *);
 111	struct ib_sa_client    *client;
 112	struct ib_sa_port      *port;
 113	struct ib_mad_send_buf *mad_buf;
 114	struct ib_sa_sm_ah     *sm_ah;
 115	int			id;
 116	u32			flags;
 117	struct list_head	list; /* Local svc request list */
 118	u32			seq; /* Local svc request sequence number */
 119	unsigned long		timeout; /* Local svc timeout */
 120	u8			path_use; /* How will the pathrecord be used */
 121};
 122
 123#define IB_SA_ENABLE_LOCAL_SERVICE	0x00000001
 124#define IB_SA_CANCEL			0x00000002
 125#define IB_SA_QUERY_OPA			0x00000004
 126
 
 
 
 
 
 
 127struct ib_sa_path_query {
 128	void (*callback)(int status, struct sa_path_rec *rec,
 129			 unsigned int num_paths, void *context);
 130	void *context;
 131	struct ib_sa_query sa_query;
 132	struct sa_path_rec *conv_pr;
 133};
 134
 135struct ib_sa_guidinfo_query {
 136	void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
 137	void *context;
 138	struct ib_sa_query sa_query;
 139};
 140
 141struct ib_sa_classport_info_query {
 142	void (*callback)(void *);
 143	void *context;
 144	struct ib_sa_query sa_query;
 145};
 146
 147struct ib_sa_mcmember_query {
 148	void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
 149	void *context;
 150	struct ib_sa_query sa_query;
 151};
 152
 153static LIST_HEAD(ib_nl_request_list);
 154static DEFINE_SPINLOCK(ib_nl_request_lock);
 155static atomic_t ib_nl_sa_request_seq;
 156static struct workqueue_struct *ib_nl_wq;
 157static struct delayed_work ib_nl_timed_work;
 158static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
 159	[LS_NLA_TYPE_PATH_RECORD]	= {.type = NLA_BINARY,
 160		.len = sizeof(struct ib_path_rec_data)},
 161	[LS_NLA_TYPE_TIMEOUT]		= {.type = NLA_U32},
 162	[LS_NLA_TYPE_SERVICE_ID]	= {.type = NLA_U64},
 163	[LS_NLA_TYPE_DGID]		= {.type = NLA_BINARY,
 164		.len = sizeof(struct rdma_nla_ls_gid)},
 165	[LS_NLA_TYPE_SGID]		= {.type = NLA_BINARY,
 166		.len = sizeof(struct rdma_nla_ls_gid)},
 167	[LS_NLA_TYPE_TCLASS]		= {.type = NLA_U8},
 168	[LS_NLA_TYPE_PKEY]		= {.type = NLA_U16},
 169	[LS_NLA_TYPE_QOS_CLASS]		= {.type = NLA_U16},
 170};
 171
 172
 173static int ib_sa_add_one(struct ib_device *device);
 174static void ib_sa_remove_one(struct ib_device *device, void *client_data);
 175
 176static struct ib_client sa_client = {
 177	.name   = "sa",
 178	.add    = ib_sa_add_one,
 179	.remove = ib_sa_remove_one
 180};
 181
 182static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
 183
 184static DEFINE_SPINLOCK(tid_lock);
 185static u32 tid;
 186
 187#define PATH_REC_FIELD(field) \
 188	.struct_offset_bytes = offsetof(struct sa_path_rec, field),	\
 189	.struct_size_bytes   = sizeof_field(struct sa_path_rec, field),	\
 190	.field_name          = "sa_path_rec:" #field
 191
 192static const struct ib_field path_rec_table[] = {
 193	{ PATH_REC_FIELD(service_id),
 194	  .offset_words = 0,
 195	  .offset_bits  = 0,
 196	  .size_bits    = 64 },
 197	{ PATH_REC_FIELD(dgid),
 198	  .offset_words = 2,
 199	  .offset_bits  = 0,
 200	  .size_bits    = 128 },
 201	{ PATH_REC_FIELD(sgid),
 202	  .offset_words = 6,
 203	  .offset_bits  = 0,
 204	  .size_bits    = 128 },
 205	{ PATH_REC_FIELD(ib.dlid),
 206	  .offset_words = 10,
 207	  .offset_bits  = 0,
 208	  .size_bits    = 16 },
 209	{ PATH_REC_FIELD(ib.slid),
 210	  .offset_words = 10,
 211	  .offset_bits  = 16,
 212	  .size_bits    = 16 },
 213	{ PATH_REC_FIELD(ib.raw_traffic),
 214	  .offset_words = 11,
 215	  .offset_bits  = 0,
 216	  .size_bits    = 1 },
 217	{ RESERVED,
 218	  .offset_words = 11,
 219	  .offset_bits  = 1,
 220	  .size_bits    = 3 },
 221	{ PATH_REC_FIELD(flow_label),
 222	  .offset_words = 11,
 223	  .offset_bits  = 4,
 224	  .size_bits    = 20 },
 225	{ PATH_REC_FIELD(hop_limit),
 226	  .offset_words = 11,
 227	  .offset_bits  = 24,
 228	  .size_bits    = 8 },
 229	{ PATH_REC_FIELD(traffic_class),
 230	  .offset_words = 12,
 231	  .offset_bits  = 0,
 232	  .size_bits    = 8 },
 233	{ PATH_REC_FIELD(reversible),
 234	  .offset_words = 12,
 235	  .offset_bits  = 8,
 236	  .size_bits    = 1 },
 237	{ PATH_REC_FIELD(numb_path),
 238	  .offset_words = 12,
 239	  .offset_bits  = 9,
 240	  .size_bits    = 7 },
 241	{ PATH_REC_FIELD(pkey),
 242	  .offset_words = 12,
 243	  .offset_bits  = 16,
 244	  .size_bits    = 16 },
 245	{ PATH_REC_FIELD(qos_class),
 246	  .offset_words = 13,
 247	  .offset_bits  = 0,
 248	  .size_bits    = 12 },
 249	{ PATH_REC_FIELD(sl),
 250	  .offset_words = 13,
 251	  .offset_bits  = 12,
 252	  .size_bits    = 4 },
 253	{ PATH_REC_FIELD(mtu_selector),
 254	  .offset_words = 13,
 255	  .offset_bits  = 16,
 256	  .size_bits    = 2 },
 257	{ PATH_REC_FIELD(mtu),
 258	  .offset_words = 13,
 259	  .offset_bits  = 18,
 260	  .size_bits    = 6 },
 261	{ PATH_REC_FIELD(rate_selector),
 262	  .offset_words = 13,
 263	  .offset_bits  = 24,
 264	  .size_bits    = 2 },
 265	{ PATH_REC_FIELD(rate),
 266	  .offset_words = 13,
 267	  .offset_bits  = 26,
 268	  .size_bits    = 6 },
 269	{ PATH_REC_FIELD(packet_life_time_selector),
 270	  .offset_words = 14,
 271	  .offset_bits  = 0,
 272	  .size_bits    = 2 },
 273	{ PATH_REC_FIELD(packet_life_time),
 274	  .offset_words = 14,
 275	  .offset_bits  = 2,
 276	  .size_bits    = 6 },
 277	{ PATH_REC_FIELD(preference),
 278	  .offset_words = 14,
 279	  .offset_bits  = 8,
 280	  .size_bits    = 8 },
 281	{ RESERVED,
 282	  .offset_words = 14,
 283	  .offset_bits  = 16,
 284	  .size_bits    = 48 },
 285};
 286
 287#define OPA_PATH_REC_FIELD(field) \
 288	.struct_offset_bytes = \
 289		offsetof(struct sa_path_rec, field), \
 290	.struct_size_bytes   = \
 291		sizeof_field(struct sa_path_rec, field),	\
 292	.field_name          = "sa_path_rec:" #field
 293
 294static const struct ib_field opa_path_rec_table[] = {
 295	{ OPA_PATH_REC_FIELD(service_id),
 296	  .offset_words = 0,
 297	  .offset_bits  = 0,
 298	  .size_bits    = 64 },
 299	{ OPA_PATH_REC_FIELD(dgid),
 300	  .offset_words = 2,
 301	  .offset_bits  = 0,
 302	  .size_bits    = 128 },
 303	{ OPA_PATH_REC_FIELD(sgid),
 304	  .offset_words = 6,
 305	  .offset_bits  = 0,
 306	  .size_bits    = 128 },
 307	{ OPA_PATH_REC_FIELD(opa.dlid),
 308	  .offset_words = 10,
 309	  .offset_bits  = 0,
 310	  .size_bits    = 32 },
 311	{ OPA_PATH_REC_FIELD(opa.slid),
 312	  .offset_words = 11,
 313	  .offset_bits  = 0,
 314	  .size_bits    = 32 },
 315	{ OPA_PATH_REC_FIELD(opa.raw_traffic),
 316	  .offset_words = 12,
 317	  .offset_bits  = 0,
 318	  .size_bits    = 1 },
 319	{ RESERVED,
 320	  .offset_words = 12,
 321	  .offset_bits  = 1,
 322	  .size_bits    = 3 },
 323	{ OPA_PATH_REC_FIELD(flow_label),
 324	  .offset_words = 12,
 325	  .offset_bits  = 4,
 326	  .size_bits    = 20 },
 327	{ OPA_PATH_REC_FIELD(hop_limit),
 328	  .offset_words = 12,
 329	  .offset_bits  = 24,
 330	  .size_bits    = 8 },
 331	{ OPA_PATH_REC_FIELD(traffic_class),
 332	  .offset_words = 13,
 333	  .offset_bits  = 0,
 334	  .size_bits    = 8 },
 335	{ OPA_PATH_REC_FIELD(reversible),
 336	  .offset_words = 13,
 337	  .offset_bits  = 8,
 338	  .size_bits    = 1 },
 339	{ OPA_PATH_REC_FIELD(numb_path),
 340	  .offset_words = 13,
 341	  .offset_bits  = 9,
 342	  .size_bits    = 7 },
 343	{ OPA_PATH_REC_FIELD(pkey),
 344	  .offset_words = 13,
 345	  .offset_bits  = 16,
 346	  .size_bits    = 16 },
 347	{ OPA_PATH_REC_FIELD(opa.l2_8B),
 348	  .offset_words = 14,
 349	  .offset_bits  = 0,
 350	  .size_bits    = 1 },
 351	{ OPA_PATH_REC_FIELD(opa.l2_10B),
 352	  .offset_words = 14,
 353	  .offset_bits  = 1,
 354	  .size_bits    = 1 },
 355	{ OPA_PATH_REC_FIELD(opa.l2_9B),
 356	  .offset_words = 14,
 357	  .offset_bits  = 2,
 358	  .size_bits    = 1 },
 359	{ OPA_PATH_REC_FIELD(opa.l2_16B),
 360	  .offset_words = 14,
 361	  .offset_bits  = 3,
 362	  .size_bits    = 1 },
 363	{ RESERVED,
 364	  .offset_words = 14,
 365	  .offset_bits  = 4,
 366	  .size_bits    = 2 },
 367	{ OPA_PATH_REC_FIELD(opa.qos_type),
 368	  .offset_words = 14,
 369	  .offset_bits  = 6,
 370	  .size_bits    = 2 },
 371	{ OPA_PATH_REC_FIELD(opa.qos_priority),
 372	  .offset_words = 14,
 373	  .offset_bits  = 8,
 374	  .size_bits    = 8 },
 375	{ RESERVED,
 376	  .offset_words = 14,
 377	  .offset_bits  = 16,
 378	  .size_bits    = 3 },
 379	{ OPA_PATH_REC_FIELD(sl),
 380	  .offset_words = 14,
 381	  .offset_bits  = 19,
 382	  .size_bits    = 5 },
 383	{ RESERVED,
 384	  .offset_words = 14,
 385	  .offset_bits  = 24,
 386	  .size_bits    = 8 },
 387	{ OPA_PATH_REC_FIELD(mtu_selector),
 388	  .offset_words = 15,
 389	  .offset_bits  = 0,
 390	  .size_bits    = 2 },
 391	{ OPA_PATH_REC_FIELD(mtu),
 392	  .offset_words = 15,
 393	  .offset_bits  = 2,
 394	  .size_bits    = 6 },
 395	{ OPA_PATH_REC_FIELD(rate_selector),
 396	  .offset_words = 15,
 397	  .offset_bits  = 8,
 398	  .size_bits    = 2 },
 399	{ OPA_PATH_REC_FIELD(rate),
 400	  .offset_words = 15,
 401	  .offset_bits  = 10,
 402	  .size_bits    = 6 },
 403	{ OPA_PATH_REC_FIELD(packet_life_time_selector),
 404	  .offset_words = 15,
 405	  .offset_bits  = 16,
 406	  .size_bits    = 2 },
 407	{ OPA_PATH_REC_FIELD(packet_life_time),
 408	  .offset_words = 15,
 409	  .offset_bits  = 18,
 410	  .size_bits    = 6 },
 411	{ OPA_PATH_REC_FIELD(preference),
 412	  .offset_words = 15,
 413	  .offset_bits  = 24,
 414	  .size_bits    = 8 },
 415};
 416
 417#define MCMEMBER_REC_FIELD(field) \
 418	.struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field),	\
 419	.struct_size_bytes   = sizeof_field(struct ib_sa_mcmember_rec, field),	\
 420	.field_name          = "sa_mcmember_rec:" #field
 421
 422static const struct ib_field mcmember_rec_table[] = {
 423	{ MCMEMBER_REC_FIELD(mgid),
 424	  .offset_words = 0,
 425	  .offset_bits  = 0,
 426	  .size_bits    = 128 },
 427	{ MCMEMBER_REC_FIELD(port_gid),
 428	  .offset_words = 4,
 429	  .offset_bits  = 0,
 430	  .size_bits    = 128 },
 431	{ MCMEMBER_REC_FIELD(qkey),
 432	  .offset_words = 8,
 433	  .offset_bits  = 0,
 434	  .size_bits    = 32 },
 435	{ MCMEMBER_REC_FIELD(mlid),
 436	  .offset_words = 9,
 437	  .offset_bits  = 0,
 438	  .size_bits    = 16 },
 439	{ MCMEMBER_REC_FIELD(mtu_selector),
 440	  .offset_words = 9,
 441	  .offset_bits  = 16,
 442	  .size_bits    = 2 },
 443	{ MCMEMBER_REC_FIELD(mtu),
 444	  .offset_words = 9,
 445	  .offset_bits  = 18,
 446	  .size_bits    = 6 },
 447	{ MCMEMBER_REC_FIELD(traffic_class),
 448	  .offset_words = 9,
 449	  .offset_bits  = 24,
 450	  .size_bits    = 8 },
 451	{ MCMEMBER_REC_FIELD(pkey),
 452	  .offset_words = 10,
 453	  .offset_bits  = 0,
 454	  .size_bits    = 16 },
 455	{ MCMEMBER_REC_FIELD(rate_selector),
 456	  .offset_words = 10,
 457	  .offset_bits  = 16,
 458	  .size_bits    = 2 },
 459	{ MCMEMBER_REC_FIELD(rate),
 460	  .offset_words = 10,
 461	  .offset_bits  = 18,
 462	  .size_bits    = 6 },
 463	{ MCMEMBER_REC_FIELD(packet_life_time_selector),
 464	  .offset_words = 10,
 465	  .offset_bits  = 24,
 466	  .size_bits    = 2 },
 467	{ MCMEMBER_REC_FIELD(packet_life_time),
 468	  .offset_words = 10,
 469	  .offset_bits  = 26,
 470	  .size_bits    = 6 },
 471	{ MCMEMBER_REC_FIELD(sl),
 472	  .offset_words = 11,
 473	  .offset_bits  = 0,
 474	  .size_bits    = 4 },
 475	{ MCMEMBER_REC_FIELD(flow_label),
 476	  .offset_words = 11,
 477	  .offset_bits  = 4,
 478	  .size_bits    = 20 },
 479	{ MCMEMBER_REC_FIELD(hop_limit),
 480	  .offset_words = 11,
 481	  .offset_bits  = 24,
 482	  .size_bits    = 8 },
 483	{ MCMEMBER_REC_FIELD(scope),
 484	  .offset_words = 12,
 485	  .offset_bits  = 0,
 486	  .size_bits    = 4 },
 487	{ MCMEMBER_REC_FIELD(join_state),
 488	  .offset_words = 12,
 489	  .offset_bits  = 4,
 490	  .size_bits    = 4 },
 491	{ MCMEMBER_REC_FIELD(proxy_join),
 492	  .offset_words = 12,
 493	  .offset_bits  = 8,
 494	  .size_bits    = 1 },
 495	{ RESERVED,
 496	  .offset_words = 12,
 497	  .offset_bits  = 9,
 498	  .size_bits    = 23 },
 499};
 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 501#define CLASSPORTINFO_REC_FIELD(field) \
 502	.struct_offset_bytes = offsetof(struct ib_class_port_info, field),	\
 503	.struct_size_bytes   = sizeof_field(struct ib_class_port_info, field),	\
 504	.field_name          = "ib_class_port_info:" #field
 505
 506static const struct ib_field ib_classport_info_rec_table[] = {
 507	{ CLASSPORTINFO_REC_FIELD(base_version),
 508	  .offset_words = 0,
 509	  .offset_bits  = 0,
 510	  .size_bits    = 8 },
 511	{ CLASSPORTINFO_REC_FIELD(class_version),
 512	  .offset_words = 0,
 513	  .offset_bits  = 8,
 514	  .size_bits    = 8 },
 515	{ CLASSPORTINFO_REC_FIELD(capability_mask),
 516	  .offset_words = 0,
 517	  .offset_bits  = 16,
 518	  .size_bits    = 16 },
 519	{ CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
 520	  .offset_words = 1,
 521	  .offset_bits  = 0,
 522	  .size_bits    = 32 },
 523	{ CLASSPORTINFO_REC_FIELD(redirect_gid),
 524	  .offset_words = 2,
 525	  .offset_bits  = 0,
 526	  .size_bits    = 128 },
 527	{ CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
 528	  .offset_words = 6,
 529	  .offset_bits  = 0,
 530	  .size_bits    = 32 },
 531	{ CLASSPORTINFO_REC_FIELD(redirect_lid),
 532	  .offset_words = 7,
 533	  .offset_bits  = 0,
 534	  .size_bits    = 16 },
 535	{ CLASSPORTINFO_REC_FIELD(redirect_pkey),
 536	  .offset_words = 7,
 537	  .offset_bits  = 16,
 538	  .size_bits    = 16 },
 539
 540	{ CLASSPORTINFO_REC_FIELD(redirect_qp),
 541	  .offset_words = 8,
 542	  .offset_bits  = 0,
 543	  .size_bits    = 32 },
 544	{ CLASSPORTINFO_REC_FIELD(redirect_qkey),
 545	  .offset_words = 9,
 546	  .offset_bits  = 0,
 547	  .size_bits    = 32 },
 548
 549	{ CLASSPORTINFO_REC_FIELD(trap_gid),
 550	  .offset_words = 10,
 551	  .offset_bits  = 0,
 552	  .size_bits    = 128 },
 553	{ CLASSPORTINFO_REC_FIELD(trap_tcslfl),
 554	  .offset_words = 14,
 555	  .offset_bits  = 0,
 556	  .size_bits    = 32 },
 557
 558	{ CLASSPORTINFO_REC_FIELD(trap_lid),
 559	  .offset_words = 15,
 560	  .offset_bits  = 0,
 561	  .size_bits    = 16 },
 562	{ CLASSPORTINFO_REC_FIELD(trap_pkey),
 563	  .offset_words = 15,
 564	  .offset_bits  = 16,
 565	  .size_bits    = 16 },
 566
 567	{ CLASSPORTINFO_REC_FIELD(trap_hlqp),
 568	  .offset_words = 16,
 569	  .offset_bits  = 0,
 570	  .size_bits    = 32 },
 571	{ CLASSPORTINFO_REC_FIELD(trap_qkey),
 572	  .offset_words = 17,
 573	  .offset_bits  = 0,
 574	  .size_bits    = 32 },
 575};
 576
 577#define OPA_CLASSPORTINFO_REC_FIELD(field) \
 578	.struct_offset_bytes =\
 579		offsetof(struct opa_class_port_info, field),	\
 580	.struct_size_bytes   = \
 581		sizeof_field(struct opa_class_port_info, field),	\
 582	.field_name          = "opa_class_port_info:" #field
 583
 584static const struct ib_field opa_classport_info_rec_table[] = {
 585	{ OPA_CLASSPORTINFO_REC_FIELD(base_version),
 586	  .offset_words = 0,
 587	  .offset_bits  = 0,
 588	  .size_bits    = 8 },
 589	{ OPA_CLASSPORTINFO_REC_FIELD(class_version),
 590	  .offset_words = 0,
 591	  .offset_bits  = 8,
 592	  .size_bits    = 8 },
 593	{ OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
 594	  .offset_words = 0,
 595	  .offset_bits  = 16,
 596	  .size_bits    = 16 },
 597	{ OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
 598	  .offset_words = 1,
 599	  .offset_bits  = 0,
 600	  .size_bits    = 32 },
 601	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
 602	  .offset_words = 2,
 603	  .offset_bits  = 0,
 604	  .size_bits    = 128 },
 605	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
 606	  .offset_words = 6,
 607	  .offset_bits  = 0,
 608	  .size_bits    = 32 },
 609	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
 610	  .offset_words = 7,
 611	  .offset_bits  = 0,
 612	  .size_bits    = 32 },
 613	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
 614	  .offset_words = 8,
 615	  .offset_bits  = 0,
 616	  .size_bits    = 32 },
 617	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
 618	  .offset_words = 9,
 619	  .offset_bits  = 0,
 620	  .size_bits    = 32 },
 621	{ OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
 622	  .offset_words = 10,
 623	  .offset_bits  = 0,
 624	  .size_bits    = 128 },
 625	{ OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
 626	  .offset_words = 14,
 627	  .offset_bits  = 0,
 628	  .size_bits    = 32 },
 629	{ OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
 630	  .offset_words = 15,
 631	  .offset_bits  = 0,
 632	  .size_bits    = 32 },
 633	{ OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
 634	  .offset_words = 16,
 635	  .offset_bits  = 0,
 636	  .size_bits    = 32 },
 637	{ OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
 638	  .offset_words = 17,
 639	  .offset_bits  = 0,
 640	  .size_bits    = 32 },
 641	{ OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
 642	  .offset_words = 18,
 643	  .offset_bits  = 0,
 644	  .size_bits    = 16 },
 645	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
 646	  .offset_words = 18,
 647	  .offset_bits  = 16,
 648	  .size_bits    = 16 },
 649	{ OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
 650	  .offset_words = 19,
 651	  .offset_bits  = 0,
 652	  .size_bits    = 8 },
 653	{ RESERVED,
 654	  .offset_words = 19,
 655	  .offset_bits  = 8,
 656	  .size_bits    = 24 },
 657};
 658
 659#define GUIDINFO_REC_FIELD(field) \
 660	.struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field),	\
 661	.struct_size_bytes   = sizeof_field(struct ib_sa_guidinfo_rec, field),	\
 662	.field_name          = "sa_guidinfo_rec:" #field
 663
 664static const struct ib_field guidinfo_rec_table[] = {
 665	{ GUIDINFO_REC_FIELD(lid),
 666	  .offset_words = 0,
 667	  .offset_bits  = 0,
 668	  .size_bits    = 16 },
 669	{ GUIDINFO_REC_FIELD(block_num),
 670	  .offset_words = 0,
 671	  .offset_bits  = 16,
 672	  .size_bits    = 8 },
 673	{ GUIDINFO_REC_FIELD(res1),
 674	  .offset_words = 0,
 675	  .offset_bits  = 24,
 676	  .size_bits    = 8 },
 677	{ GUIDINFO_REC_FIELD(res2),
 678	  .offset_words = 1,
 679	  .offset_bits  = 0,
 680	  .size_bits    = 32 },
 681	{ GUIDINFO_REC_FIELD(guid_info_list),
 682	  .offset_words = 2,
 683	  .offset_bits  = 0,
 684	  .size_bits    = 512 },
 685};
 686
 687#define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
 688
 689static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
 690{
 691	query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
 692}
 693
 694static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
 695{
 696	return (query->flags & IB_SA_CANCEL);
 697}
 698
 699static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
 700				     struct ib_sa_query *query)
 701{
 702	struct sa_path_rec *sa_rec = query->mad_buf->context[1];
 703	struct ib_sa_mad *mad = query->mad_buf->mad;
 704	ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
 705	u16 val16;
 706	u64 val64;
 707	struct rdma_ls_resolve_header *header;
 708
 709	query->mad_buf->context[1] = NULL;
 710
 711	/* Construct the family header first */
 712	header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
 713	strscpy_pad(header->device_name,
 714		    dev_name(&query->port->agent->device->dev),
 715		    LS_DEVICE_NAME_MAX);
 716	header->port_num = query->port->port_num;
 717
 718	if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
 719	    sa_rec->reversible != 0)
 720		query->path_use = LS_RESOLVE_PATH_USE_ALL;
 721	else
 722		query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
 723	header->path_use = query->path_use;
 724
 725	/* Now build the attributes */
 726	if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
 727		val64 = be64_to_cpu(sa_rec->service_id);
 728		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
 729			sizeof(val64), &val64);
 730	}
 731	if (comp_mask & IB_SA_PATH_REC_DGID)
 732		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
 733			sizeof(sa_rec->dgid), &sa_rec->dgid);
 734	if (comp_mask & IB_SA_PATH_REC_SGID)
 735		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
 736			sizeof(sa_rec->sgid), &sa_rec->sgid);
 737	if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
 738		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
 739			sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
 740
 741	if (comp_mask & IB_SA_PATH_REC_PKEY) {
 742		val16 = be16_to_cpu(sa_rec->pkey);
 743		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
 744			sizeof(val16), &val16);
 745	}
 746	if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
 747		val16 = be16_to_cpu(sa_rec->qos_class);
 748		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
 749			sizeof(val16), &val16);
 750	}
 751}
 752
 753static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
 754{
 755	int len = 0;
 756
 757	if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
 758		len += nla_total_size(sizeof(u64));
 759	if (comp_mask & IB_SA_PATH_REC_DGID)
 760		len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
 761	if (comp_mask & IB_SA_PATH_REC_SGID)
 762		len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
 763	if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
 764		len += nla_total_size(sizeof(u8));
 765	if (comp_mask & IB_SA_PATH_REC_PKEY)
 766		len += nla_total_size(sizeof(u16));
 767	if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
 768		len += nla_total_size(sizeof(u16));
 769
 770	/*
 771	 * Make sure that at least some of the required comp_mask bits are
 772	 * set.
 773	 */
 774	if (WARN_ON(len == 0))
 775		return len;
 776
 777	/* Add the family header */
 778	len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
 779
 780	return len;
 781}
 782
 783static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
 784{
 785	struct sk_buff *skb = NULL;
 786	struct nlmsghdr *nlh;
 787	void *data;
 788	struct ib_sa_mad *mad;
 789	int len;
 790	unsigned long flags;
 791	unsigned long delay;
 792	gfp_t gfp_flag;
 793	int ret;
 794
 795	INIT_LIST_HEAD(&query->list);
 796	query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
 797
 798	mad = query->mad_buf->mad;
 799	len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
 800	if (len <= 0)
 801		return -EMSGSIZE;
 802
 803	skb = nlmsg_new(len, gfp_mask);
 804	if (!skb)
 805		return -ENOMEM;
 806
 807	/* Put nlmsg header only for now */
 808	data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
 809			    RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
 810	if (!data) {
 811		nlmsg_free(skb);
 812		return -EMSGSIZE;
 813	}
 814
 815	/* Add attributes */
 816	ib_nl_set_path_rec_attrs(skb, query);
 817
 818	/* Repair the nlmsg header length */
 819	nlmsg_end(skb, nlh);
 820
 821	gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
 822		GFP_NOWAIT;
 823
 824	spin_lock_irqsave(&ib_nl_request_lock, flags);
 825	ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
 
 
 
 826
 827	if (ret)
 828		goto out;
 829
 830	/* Put the request on the list.*/
 
 831	delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
 832	query->timeout = delay + jiffies;
 833	list_add_tail(&query->list, &ib_nl_request_list);
 834	/* Start the timeout if this is the only request */
 835	if (ib_nl_request_list.next == &query->list)
 836		queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
 837
 838out:
 839	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
 840
 
 
 
 
 
 
 
 
 
 841	return ret;
 842}
 843
 844static int ib_nl_cancel_request(struct ib_sa_query *query)
 845{
 846	unsigned long flags;
 847	struct ib_sa_query *wait_query;
 848	int found = 0;
 849
 850	spin_lock_irqsave(&ib_nl_request_lock, flags);
 851	list_for_each_entry(wait_query, &ib_nl_request_list, list) {
 852		/* Let the timeout to take care of the callback */
 853		if (query == wait_query) {
 854			query->flags |= IB_SA_CANCEL;
 855			query->timeout = jiffies;
 856			list_move(&query->list, &ib_nl_request_list);
 857			found = 1;
 858			mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
 859			break;
 860		}
 861	}
 862	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
 863
 864	return found;
 865}
 866
 867static void send_handler(struct ib_mad_agent *agent,
 868			 struct ib_mad_send_wc *mad_send_wc);
 869
 870static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
 871					   const struct nlmsghdr *nlh)
 872{
 873	struct sa_path_rec recs[RDMA_PRIMARY_PATH_MAX_REC_NUM];
 874	struct ib_sa_path_query *path_query;
 875	struct ib_path_rec_data *rec_data;
 876	struct ib_mad_send_wc mad_send_wc;
 877	const struct nlattr *head, *curr;
 878	struct ib_sa_mad *mad = NULL;
 879	int len, rem, status = -EIO;
 880	unsigned int num_prs = 0;
 
 881	u32 mask = 0;
 
 882
 883	if (!query->callback)
 884		goto out;
 885
 886	path_query = container_of(query, struct ib_sa_path_query, sa_query);
 887	mad = query->mad_buf->mad;
 888
 889	head = (const struct nlattr *) nlmsg_data(nlh);
 890	len = nlmsg_len(nlh);
 891	switch (query->path_use) {
 892	case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
 893		mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
 894		break;
 895
 896	case LS_RESOLVE_PATH_USE_ALL:
 897		mask = IB_PATH_PRIMARY;
 898		break;
 899
 900	case LS_RESOLVE_PATH_USE_GMP:
 901	default:
 902		mask = IB_PATH_PRIMARY | IB_PATH_GMP |
 903			IB_PATH_BIDIRECTIONAL;
 904		break;
 905	}
 906
 907	nla_for_each_attr(curr, head, len, rem) {
 908		if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD)
 909			continue;
 910
 911		rec_data = nla_data(curr);
 912		if ((rec_data->flags & mask) != mask)
 913			continue;
 914
 915		if ((query->flags & IB_SA_QUERY_OPA) ||
 916		    path_query->conv_pr) {
 917			mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
 918			memcpy(mad->data, rec_data->path_rec,
 919			       sizeof(rec_data->path_rec));
 920			query->callback(query, 0, mad);
 921			goto out;
 922		}
 923
 924		status = 0;
 925		ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
 926			  rec_data->path_rec, &recs[num_prs]);
 927		recs[num_prs].flags = rec_data->flags;
 928		recs[num_prs].rec_type = SA_PATH_REC_TYPE_IB;
 929		sa_path_set_dmac_zero(&recs[num_prs]);
 930
 931		num_prs++;
 932		if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM)
 933			break;
 934	}
 935
 936	if (!status) {
 937		mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
 938		path_query->callback(status, recs, num_prs,
 939				     path_query->context);
 940	} else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 941		query->callback(query, status, mad);
 
 942
 943out:
 944	mad_send_wc.send_buf = query->mad_buf;
 945	mad_send_wc.status = IB_WC_SUCCESS;
 946	send_handler(query->mad_buf->mad_agent, &mad_send_wc);
 947}
 948
 949static void ib_nl_request_timeout(struct work_struct *work)
 950{
 951	unsigned long flags;
 952	struct ib_sa_query *query;
 953	unsigned long delay;
 954	struct ib_mad_send_wc mad_send_wc;
 955	int ret;
 956
 957	spin_lock_irqsave(&ib_nl_request_lock, flags);
 958	while (!list_empty(&ib_nl_request_list)) {
 959		query = list_entry(ib_nl_request_list.next,
 960				   struct ib_sa_query, list);
 961
 962		if (time_after(query->timeout, jiffies)) {
 963			delay = query->timeout - jiffies;
 964			if ((long)delay <= 0)
 965				delay = 1;
 966			queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
 967			break;
 968		}
 969
 970		list_del(&query->list);
 971		ib_sa_disable_local_svc(query);
 972		/* Hold the lock to protect against query cancellation */
 973		if (ib_sa_query_cancelled(query))
 974			ret = -1;
 975		else
 976			ret = ib_post_send_mad(query->mad_buf, NULL);
 977		if (ret) {
 978			mad_send_wc.send_buf = query->mad_buf;
 979			mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
 980			spin_unlock_irqrestore(&ib_nl_request_lock, flags);
 981			send_handler(query->port->agent, &mad_send_wc);
 982			spin_lock_irqsave(&ib_nl_request_lock, flags);
 983		}
 984	}
 985	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
 986}
 987
 988int ib_nl_handle_set_timeout(struct sk_buff *skb,
 989			     struct nlmsghdr *nlh,
 990			     struct netlink_ext_ack *extack)
 991{
 992	int timeout, delta, abs_delta;
 993	const struct nlattr *attr;
 994	unsigned long flags;
 995	struct ib_sa_query *query;
 996	long delay = 0;
 997	struct nlattr *tb[LS_NLA_TYPE_MAX];
 998	int ret;
 999
1000	if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
1001	    !(NETLINK_CB(skb).sk))
1002		return -EPERM;
1003
1004	ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1005				   nlmsg_len(nlh), ib_nl_policy, NULL);
1006	attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
1007	if (ret || !attr)
1008		goto settimeout_out;
1009
1010	timeout = *(int *) nla_data(attr);
1011	if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
1012		timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
1013	if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
1014		timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
1015
1016	delta = timeout - sa_local_svc_timeout_ms;
1017	if (delta < 0)
1018		abs_delta = -delta;
1019	else
1020		abs_delta = delta;
1021
1022	if (delta != 0) {
1023		spin_lock_irqsave(&ib_nl_request_lock, flags);
1024		sa_local_svc_timeout_ms = timeout;
1025		list_for_each_entry(query, &ib_nl_request_list, list) {
1026			if (delta < 0 && abs_delta > query->timeout)
1027				query->timeout = 0;
1028			else
1029				query->timeout += delta;
1030
1031			/* Get the new delay from the first entry */
1032			if (!delay) {
1033				delay = query->timeout - jiffies;
1034				if (delay <= 0)
1035					delay = 1;
1036			}
1037		}
1038		if (delay)
1039			mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
1040					 (unsigned long)delay);
1041		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1042	}
1043
1044settimeout_out:
1045	return 0;
1046}
1047
1048static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1049{
1050	struct nlattr *tb[LS_NLA_TYPE_MAX];
1051	int ret;
1052
1053	if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
1054		return 0;
1055
1056	ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1057				   nlmsg_len(nlh), ib_nl_policy, NULL);
1058	if (ret)
1059		return 0;
1060
1061	return 1;
1062}
1063
1064int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1065			      struct nlmsghdr *nlh,
1066			      struct netlink_ext_ack *extack)
1067{
1068	unsigned long flags;
1069	struct ib_sa_query *query = NULL, *iter;
1070	struct ib_mad_send_buf *send_buf;
1071	struct ib_mad_send_wc mad_send_wc;
 
1072	int ret;
1073
1074	if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
1075	    !(NETLINK_CB(skb).sk))
1076		return -EPERM;
1077
1078	spin_lock_irqsave(&ib_nl_request_lock, flags);
1079	list_for_each_entry(iter, &ib_nl_request_list, list) {
1080		/*
1081		 * If the query is cancelled, let the timeout routine
1082		 * take care of it.
1083		 */
1084		if (nlh->nlmsg_seq == iter->seq) {
1085			if (!ib_sa_query_cancelled(iter)) {
1086				list_del(&iter->list);
1087				query = iter;
1088			}
1089			break;
1090		}
1091	}
1092
1093	if (!query) {
1094		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1095		goto resp_out;
1096	}
1097
1098	send_buf = query->mad_buf;
1099
1100	if (!ib_nl_is_good_resolve_resp(nlh)) {
1101		/* if the result is a failure, send out the packet via IB */
1102		ib_sa_disable_local_svc(query);
1103		ret = ib_post_send_mad(query->mad_buf, NULL);
1104		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1105		if (ret) {
1106			mad_send_wc.send_buf = send_buf;
1107			mad_send_wc.status = IB_WC_GENERAL_ERR;
1108			send_handler(query->port->agent, &mad_send_wc);
1109		}
1110	} else {
1111		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1112		ib_nl_process_good_resolve_rsp(query, nlh);
1113	}
1114
1115resp_out:
1116	return 0;
1117}
1118
1119static void free_sm_ah(struct kref *kref)
1120{
1121	struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
1122
1123	rdma_destroy_ah(sm_ah->ah, 0);
1124	kfree(sm_ah);
1125}
1126
1127void ib_sa_register_client(struct ib_sa_client *client)
1128{
1129	atomic_set(&client->users, 1);
1130	init_completion(&client->comp);
1131}
1132EXPORT_SYMBOL(ib_sa_register_client);
1133
1134void ib_sa_unregister_client(struct ib_sa_client *client)
1135{
1136	ib_sa_client_put(client);
1137	wait_for_completion(&client->comp);
1138}
1139EXPORT_SYMBOL(ib_sa_unregister_client);
1140
1141/**
1142 * ib_sa_cancel_query - try to cancel an SA query
1143 * @id:ID of query to cancel
1144 * @query:query pointer to cancel
1145 *
1146 * Try to cancel an SA query.  If the id and query don't match up or
1147 * the query has already completed, nothing is done.  Otherwise the
1148 * query is canceled and will complete with a status of -EINTR.
1149 */
1150void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1151{
1152	unsigned long flags;
 
1153	struct ib_mad_send_buf *mad_buf;
1154
1155	xa_lock_irqsave(&queries, flags);
1156	if (xa_load(&queries, id) != query) {
1157		xa_unlock_irqrestore(&queries, flags);
1158		return;
1159	}
 
1160	mad_buf = query->mad_buf;
1161	xa_unlock_irqrestore(&queries, flags);
1162
1163	/*
1164	 * If the query is still on the netlink request list, schedule
1165	 * it to be cancelled by the timeout routine. Otherwise, it has been
1166	 * sent to the MAD layer and has to be cancelled from there.
1167	 */
1168	if (!ib_nl_cancel_request(query))
1169		ib_cancel_mad(mad_buf);
1170}
1171EXPORT_SYMBOL(ib_sa_cancel_query);
1172
1173static u8 get_src_path_mask(struct ib_device *device, u32 port_num)
1174{
1175	struct ib_sa_device *sa_dev;
1176	struct ib_sa_port   *port;
1177	unsigned long flags;
1178	u8 src_path_mask;
1179
1180	sa_dev = ib_get_client_data(device, &sa_client);
1181	if (!sa_dev)
1182		return 0x7f;
1183
1184	port  = &sa_dev->port[port_num - sa_dev->start_port];
1185	spin_lock_irqsave(&port->ah_lock, flags);
1186	src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1187	spin_unlock_irqrestore(&port->ah_lock, flags);
1188
1189	return src_path_mask;
1190}
1191
1192static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num,
1193				   struct sa_path_rec *rec,
1194				   struct rdma_ah_attr *ah_attr,
1195				   const struct ib_gid_attr *gid_attr)
1196{
1197	enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
1198
1199	if (!gid_attr) {
1200		gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type,
1201						 port_num, NULL);
1202		if (IS_ERR(gid_attr))
1203			return PTR_ERR(gid_attr);
1204	} else
1205		rdma_hold_gid_attr(gid_attr);
1206
1207	rdma_move_grh_sgid_attr(ah_attr, &rec->dgid,
1208				be32_to_cpu(rec->flow_label),
1209				rec->hop_limit,	rec->traffic_class,
1210				gid_attr);
1211	return 0;
1212}
1213
1214/**
1215 * ib_init_ah_attr_from_path - Initialize address handle attributes based on
1216 *   an SA path record.
1217 * @device: Device associated ah attributes initialization.
1218 * @port_num: Port on the specified device.
1219 * @rec: path record entry to use for ah attributes initialization.
1220 * @ah_attr: address handle attributes to initialization from path record.
1221 * @gid_attr: SGID attribute to consider during initialization.
1222 *
1223 * When ib_init_ah_attr_from_path() returns success,
1224 * (a) for IB link layer it optionally contains a reference to SGID attribute
1225 * when GRH is present for IB link layer.
1226 * (b) for RoCE link layer it contains a reference to SGID attribute.
1227 * User must invoke rdma_destroy_ah_attr() to release reference to SGID
1228 * attributes which are initialized using ib_init_ah_attr_from_path().
1229 */
1230int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num,
1231			      struct sa_path_rec *rec,
1232			      struct rdma_ah_attr *ah_attr,
1233			      const struct ib_gid_attr *gid_attr)
1234{
1235	int ret = 0;
1236
1237	memset(ah_attr, 0, sizeof(*ah_attr));
1238	ah_attr->type = rdma_ah_find_type(device, port_num);
1239	rdma_ah_set_sl(ah_attr, rec->sl);
1240	rdma_ah_set_port_num(ah_attr, port_num);
1241	rdma_ah_set_static_rate(ah_attr, rec->rate);
1242
1243	if (sa_path_is_roce(rec)) {
1244		ret = roce_resolve_route_from_path(rec, gid_attr);
1245		if (ret)
1246			return ret;
1247
1248		memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN);
1249	} else {
1250		rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
1251		if (sa_path_is_opa(rec) &&
1252		    rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE))
1253			rdma_ah_set_make_grd(ah_attr, true);
1254
1255		rdma_ah_set_path_bits(ah_attr,
1256				      be32_to_cpu(sa_path_get_slid(rec)) &
1257				      get_src_path_mask(device, port_num));
1258	}
1259
1260	if (rec->hop_limit > 0 || sa_path_is_roce(rec))
1261		ret = init_ah_attr_grh_fields(device, port_num,
1262					      rec, ah_attr, gid_attr);
1263	return ret;
1264}
1265EXPORT_SYMBOL(ib_init_ah_attr_from_path);
1266
1267static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1268{
1269	struct rdma_ah_attr ah_attr;
1270	unsigned long flags;
1271
1272	spin_lock_irqsave(&query->port->ah_lock, flags);
1273	if (!query->port->sm_ah) {
1274		spin_unlock_irqrestore(&query->port->ah_lock, flags);
1275		return -EAGAIN;
1276	}
1277	kref_get(&query->port->sm_ah->ref);
1278	query->sm_ah = query->port->sm_ah;
1279	spin_unlock_irqrestore(&query->port->ah_lock, flags);
1280
1281	/*
1282	 * Always check if sm_ah has valid dlid assigned,
1283	 * before querying for class port info
1284	 */
1285	if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
1286	    !rdma_is_valid_unicast_lid(&ah_attr)) {
1287		kref_put(&query->sm_ah->ref, free_sm_ah);
1288		return -EAGAIN;
1289	}
1290	query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1291					    query->sm_ah->pkey_index,
1292					    0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1293					    gfp_mask,
1294					    ((query->flags & IB_SA_QUERY_OPA) ?
1295					     OPA_MGMT_BASE_VERSION :
1296					     IB_MGMT_BASE_VERSION));
1297	if (IS_ERR(query->mad_buf)) {
1298		kref_put(&query->sm_ah->ref, free_sm_ah);
1299		return -ENOMEM;
1300	}
1301
1302	query->mad_buf->ah = query->sm_ah->ah;
1303
1304	return 0;
1305}
1306
1307static void free_mad(struct ib_sa_query *query)
1308{
1309	ib_free_send_mad(query->mad_buf);
1310	kref_put(&query->sm_ah->ref, free_sm_ah);
1311}
1312
1313static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
1314{
1315	struct ib_sa_mad *mad = query->mad_buf->mad;
1316	unsigned long flags;
1317
1318	memset(mad, 0, sizeof *mad);
1319
1320	if (query->flags & IB_SA_QUERY_OPA) {
1321		mad->mad_hdr.base_version  = OPA_MGMT_BASE_VERSION;
1322		mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
1323	} else {
1324		mad->mad_hdr.base_version  = IB_MGMT_BASE_VERSION;
1325		mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1326	}
1327	mad->mad_hdr.mgmt_class    = IB_MGMT_CLASS_SUBN_ADM;
1328	spin_lock_irqsave(&tid_lock, flags);
1329	mad->mad_hdr.tid           =
1330		cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1331	spin_unlock_irqrestore(&tid_lock, flags);
1332}
1333
1334static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
1335		    gfp_t gfp_mask)
1336{
1337	unsigned long flags;
1338	int ret, id;
1339	const int nmbr_sa_query_retries = 10;
1340
1341	xa_lock_irqsave(&queries, flags);
1342	ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
1343	xa_unlock_irqrestore(&queries, flags);
1344	if (ret < 0)
1345		return ret;
1346
1347	query->mad_buf->timeout_ms  = timeout_ms / nmbr_sa_query_retries;
1348	query->mad_buf->retries = nmbr_sa_query_retries;
1349	if (!query->mad_buf->timeout_ms) {
1350		/* Special case, very small timeout_ms */
1351		query->mad_buf->timeout_ms = 1;
1352		query->mad_buf->retries = timeout_ms;
1353	}
1354	query->mad_buf->context[0] = query;
1355	query->id = id;
1356
1357	if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
1358	    (!(query->flags & IB_SA_QUERY_OPA))) {
1359		if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
1360			if (!ib_nl_make_request(query, gfp_mask))
1361				return id;
1362		}
1363		ib_sa_disable_local_svc(query);
1364	}
1365
1366	ret = ib_post_send_mad(query->mad_buf, NULL);
1367	if (ret) {
1368		xa_lock_irqsave(&queries, flags);
1369		__xa_erase(&queries, id);
1370		xa_unlock_irqrestore(&queries, flags);
1371	}
1372
1373	/*
1374	 * It's not safe to dereference query any more, because the
1375	 * send may already have completed and freed the query in
1376	 * another context.
1377	 */
1378	return ret ? ret : id;
1379}
1380
1381void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
1382{
1383	ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1384}
1385EXPORT_SYMBOL(ib_sa_unpack_path);
1386
1387void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
1388{
1389	ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1390}
1391EXPORT_SYMBOL(ib_sa_pack_path);
1392
1393static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
1394					 struct ib_sa_device *sa_dev,
1395					 u32 port_num)
1396{
 
1397	struct ib_sa_port *port;
1398	unsigned long flags;
1399	bool ret = false;
1400
 
 
 
1401	port = &sa_dev->port[port_num - sa_dev->start_port];
1402	spin_lock_irqsave(&port->classport_lock, flags);
1403	if (!port->classport_info.valid)
1404		goto ret;
1405
1406	if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA)
1407		ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) &
1408			OPA_CLASS_PORT_INFO_PR_SUPPORT;
1409ret:
1410	spin_unlock_irqrestore(&port->classport_lock, flags);
1411	return ret;
1412}
1413
1414enum opa_pr_supported {
1415	PR_NOT_SUPPORTED,
1416	PR_OPA_SUPPORTED,
1417	PR_IB_SUPPORTED
1418};
1419
1420/*
1421 * opa_pr_query_possible - Check if current PR query can be an OPA query.
1422 *
1423 * Retuns PR_NOT_SUPPORTED if a path record query is not
1424 * possible, PR_OPA_SUPPORTED if an OPA path record query
1425 * is possible and PR_IB_SUPPORTED if an IB path record
1426 * query is possible.
1427 */
1428static int opa_pr_query_possible(struct ib_sa_client *client,
1429				 struct ib_sa_device *sa_dev,
1430				 struct ib_device *device, u32 port_num)
 
1431{
1432	struct ib_port_attr port_attr;
1433
1434	if (ib_query_port(device, port_num, &port_attr))
1435		return PR_NOT_SUPPORTED;
1436
1437	if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num))
1438		return PR_OPA_SUPPORTED;
1439
1440	if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1441		return PR_NOT_SUPPORTED;
1442	else
1443		return PR_IB_SUPPORTED;
1444}
1445
1446static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1447				    int status, struct ib_sa_mad *mad)
 
1448{
1449	struct ib_sa_path_query *query =
1450		container_of(sa_query, struct ib_sa_path_query, sa_query);
1451	struct sa_path_rec rec = {};
1452
1453	if (!mad) {
1454		query->callback(status, NULL, 0, query->context);
1455		return;
1456	}
1457
1458	if (sa_query->flags & IB_SA_QUERY_OPA) {
1459		ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1460			  mad->data, &rec);
1461		rec.rec_type = SA_PATH_REC_TYPE_OPA;
1462		query->callback(status, &rec, 1, query->context);
1463		return;
1464	}
 
 
 
 
 
1465
1466	ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
1467		  mad->data, &rec);
1468	rec.rec_type = SA_PATH_REC_TYPE_IB;
1469	sa_path_set_dmac_zero(&rec);
1470
1471	if (query->conv_pr) {
1472		struct sa_path_rec opa;
1473
1474		memset(&opa, 0, sizeof(struct sa_path_rec));
1475		sa_convert_path_ib_to_opa(&opa, &rec);
1476		query->callback(status, &opa, 1, query->context);
1477	} else {
1478		query->callback(status, &rec, 1, query->context);
1479	}
1480}
1481
1482static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1483{
1484	struct ib_sa_path_query *query =
1485		container_of(sa_query, struct ib_sa_path_query, sa_query);
1486
1487	kfree(query->conv_pr);
1488	kfree(query);
1489}
1490
1491/**
1492 * ib_sa_path_rec_get - Start a Path get query
1493 * @client:SA client
1494 * @device:device to send query on
1495 * @port_num: port number to send query on
1496 * @rec:Path Record to send in query
1497 * @comp_mask:component mask to send in query
1498 * @timeout_ms:time to wait for response
1499 * @gfp_mask:GFP mask to use for internal allocations
1500 * @callback:function called when query completes, times out or is
1501 * canceled
1502 * @context:opaque user context passed to callback
1503 * @sa_query:query context, used to cancel query
1504 *
1505 * Send a Path Record Get query to the SA to look up a path.  The
1506 * callback function will be called when the query completes (or
1507 * fails); status is 0 for a successful response, -EINTR if the query
1508 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1509 * occurred sending the query.  The resp parameter of the callback is
1510 * only valid if status is 0.
1511 *
1512 * If the return value of ib_sa_path_rec_get() is negative, it is an
1513 * error code.  Otherwise it is a query ID that can be used to cancel
1514 * the query.
1515 */
1516int ib_sa_path_rec_get(struct ib_sa_client *client,
1517		       struct ib_device *device, u32 port_num,
1518		       struct sa_path_rec *rec,
1519		       ib_sa_comp_mask comp_mask,
1520		       unsigned long timeout_ms, gfp_t gfp_mask,
1521		       void (*callback)(int status,
1522					struct sa_path_rec *resp,
1523					unsigned int num_paths, void *context),
1524		       void *context,
1525		       struct ib_sa_query **sa_query)
1526{
1527	struct ib_sa_path_query *query;
1528	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1529	struct ib_sa_port   *port;
1530	struct ib_mad_agent *agent;
1531	struct ib_sa_mad *mad;
1532	enum opa_pr_supported status;
1533	int ret;
1534
1535	if (!sa_dev)
1536		return -ENODEV;
1537
1538	if ((rec->rec_type != SA_PATH_REC_TYPE_IB) &&
1539	    (rec->rec_type != SA_PATH_REC_TYPE_OPA))
1540		return -EINVAL;
1541
1542	port  = &sa_dev->port[port_num - sa_dev->start_port];
1543	agent = port->agent;
1544
1545	query = kzalloc(sizeof(*query), gfp_mask);
1546	if (!query)
1547		return -ENOMEM;
1548
1549	query->sa_query.port     = port;
1550	if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
1551		status = opa_pr_query_possible(client, sa_dev, device, port_num);
1552		if (status == PR_NOT_SUPPORTED) {
1553			ret = -EINVAL;
1554			goto err1;
1555		} else if (status == PR_OPA_SUPPORTED) {
1556			query->sa_query.flags |= IB_SA_QUERY_OPA;
1557		} else {
1558			query->conv_pr =
1559				kmalloc(sizeof(*query->conv_pr), gfp_mask);
1560			if (!query->conv_pr) {
1561				ret = -ENOMEM;
1562				goto err1;
1563			}
1564		}
1565	}
1566
1567	ret = alloc_mad(&query->sa_query, gfp_mask);
1568	if (ret)
1569		goto err2;
1570
1571	ib_sa_client_get(client);
1572	query->sa_query.client = client;
1573	query->callback        = callback;
1574	query->context         = context;
1575
1576	mad = query->sa_query.mad_buf->mad;
1577	init_mad(&query->sa_query, agent);
1578
1579	query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1580	query->sa_query.release  = ib_sa_path_rec_release;
1581	mad->mad_hdr.method	 = IB_MGMT_METHOD_GET;
1582	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1583	mad->sa_hdr.comp_mask	 = comp_mask;
1584
1585	if (query->sa_query.flags & IB_SA_QUERY_OPA) {
1586		ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1587			rec, mad->data);
1588	} else if (query->conv_pr) {
1589		sa_convert_path_opa_to_ib(query->conv_pr, rec);
1590		ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1591			query->conv_pr, mad->data);
1592	} else {
1593		ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1594			rec, mad->data);
1595	}
1596
1597	*sa_query = &query->sa_query;
1598
1599	query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1600	query->sa_query.mad_buf->context[1] = (query->conv_pr) ?
1601						query->conv_pr : rec;
1602
1603	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1604	if (ret < 0)
1605		goto err3;
1606
1607	return ret;
1608
1609err3:
1610	*sa_query = NULL;
1611	ib_sa_client_put(query->sa_query.client);
1612	free_mad(&query->sa_query);
1613err2:
1614	kfree(query->conv_pr);
1615err1:
1616	kfree(query);
1617	return ret;
1618}
1619EXPORT_SYMBOL(ib_sa_path_rec_get);
1620
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1621static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1622					int status, struct ib_sa_mad *mad)
 
1623{
1624	struct ib_sa_mcmember_query *query =
1625		container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1626
1627	if (mad) {
1628		struct ib_sa_mcmember_rec rec;
1629
1630		ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1631			  mad->data, &rec);
1632		query->callback(status, &rec, query->context);
1633	} else
1634		query->callback(status, NULL, query->context);
1635}
1636
1637static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1638{
1639	kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1640}
1641
1642int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1643			     struct ib_device *device, u32 port_num,
1644			     u8 method,
1645			     struct ib_sa_mcmember_rec *rec,
1646			     ib_sa_comp_mask comp_mask,
1647			     unsigned long timeout_ms, gfp_t gfp_mask,
1648			     void (*callback)(int status,
1649					      struct ib_sa_mcmember_rec *resp,
1650					      void *context),
1651			     void *context,
1652			     struct ib_sa_query **sa_query)
1653{
1654	struct ib_sa_mcmember_query *query;
1655	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1656	struct ib_sa_port   *port;
1657	struct ib_mad_agent *agent;
1658	struct ib_sa_mad *mad;
1659	int ret;
1660
1661	if (!sa_dev)
1662		return -ENODEV;
1663
1664	port  = &sa_dev->port[port_num - sa_dev->start_port];
1665	agent = port->agent;
1666
1667	query = kzalloc(sizeof(*query), gfp_mask);
1668	if (!query)
1669		return -ENOMEM;
1670
1671	query->sa_query.port     = port;
1672	ret = alloc_mad(&query->sa_query, gfp_mask);
1673	if (ret)
1674		goto err1;
1675
1676	ib_sa_client_get(client);
1677	query->sa_query.client = client;
1678	query->callback        = callback;
1679	query->context         = context;
1680
1681	mad = query->sa_query.mad_buf->mad;
1682	init_mad(&query->sa_query, agent);
1683
1684	query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1685	query->sa_query.release  = ib_sa_mcmember_rec_release;
1686	mad->mad_hdr.method	 = method;
1687	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1688	mad->sa_hdr.comp_mask	 = comp_mask;
1689
1690	ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1691		rec, mad->data);
1692
1693	*sa_query = &query->sa_query;
1694
1695	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1696	if (ret < 0)
1697		goto err2;
1698
1699	return ret;
1700
1701err2:
1702	*sa_query = NULL;
1703	ib_sa_client_put(query->sa_query.client);
1704	free_mad(&query->sa_query);
1705
1706err1:
1707	kfree(query);
1708	return ret;
1709}
1710
1711/* Support GuidInfoRecord */
1712static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1713					int status, struct ib_sa_mad *mad)
 
1714{
1715	struct ib_sa_guidinfo_query *query =
1716		container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1717
1718	if (mad) {
1719		struct ib_sa_guidinfo_rec rec;
1720
1721		ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1722			  mad->data, &rec);
1723		query->callback(status, &rec, query->context);
1724	} else
1725		query->callback(status, NULL, query->context);
1726}
1727
1728static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1729{
1730	kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1731}
1732
1733int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1734			      struct ib_device *device, u32 port_num,
1735			      struct ib_sa_guidinfo_rec *rec,
1736			      ib_sa_comp_mask comp_mask, u8 method,
1737			      unsigned long timeout_ms, gfp_t gfp_mask,
1738			      void (*callback)(int status,
1739					       struct ib_sa_guidinfo_rec *resp,
1740					       void *context),
1741			      void *context,
1742			      struct ib_sa_query **sa_query)
1743{
1744	struct ib_sa_guidinfo_query *query;
1745	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1746	struct ib_sa_port *port;
1747	struct ib_mad_agent *agent;
1748	struct ib_sa_mad *mad;
1749	int ret;
1750
1751	if (!sa_dev)
1752		return -ENODEV;
1753
1754	if (method != IB_MGMT_METHOD_GET &&
1755	    method != IB_MGMT_METHOD_SET &&
1756	    method != IB_SA_METHOD_DELETE) {
1757		return -EINVAL;
1758	}
1759
1760	port  = &sa_dev->port[port_num - sa_dev->start_port];
1761	agent = port->agent;
1762
1763	query = kzalloc(sizeof(*query), gfp_mask);
1764	if (!query)
1765		return -ENOMEM;
1766
1767	query->sa_query.port = port;
1768	ret = alloc_mad(&query->sa_query, gfp_mask);
1769	if (ret)
1770		goto err1;
1771
1772	ib_sa_client_get(client);
1773	query->sa_query.client = client;
1774	query->callback        = callback;
1775	query->context         = context;
1776
1777	mad = query->sa_query.mad_buf->mad;
1778	init_mad(&query->sa_query, agent);
1779
1780	query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1781	query->sa_query.release  = ib_sa_guidinfo_rec_release;
1782
1783	mad->mad_hdr.method	 = method;
1784	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1785	mad->sa_hdr.comp_mask	 = comp_mask;
1786
1787	ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1788		mad->data);
1789
1790	*sa_query = &query->sa_query;
1791
1792	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1793	if (ret < 0)
1794		goto err2;
1795
1796	return ret;
1797
1798err2:
1799	*sa_query = NULL;
1800	ib_sa_client_put(query->sa_query.client);
1801	free_mad(&query->sa_query);
1802
1803err1:
1804	kfree(query);
1805	return ret;
1806}
1807EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1808
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1809struct ib_classport_info_context {
1810	struct completion	done;
1811	struct ib_sa_query	*sa_query;
1812};
1813
1814static void ib_classportinfo_cb(void *context)
1815{
1816	struct ib_classport_info_context *cb_ctx = context;
1817
1818	complete(&cb_ctx->done);
1819}
1820
1821static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
1822					      int status, struct ib_sa_mad *mad)
 
1823{
1824	unsigned long flags;
1825	struct ib_sa_classport_info_query *query =
1826		container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
1827	struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
1828
1829	if (mad) {
1830		if (sa_query->flags & IB_SA_QUERY_OPA) {
1831			struct opa_class_port_info rec;
1832
1833			ib_unpack(opa_classport_info_rec_table,
1834				  ARRAY_SIZE(opa_classport_info_rec_table),
1835				  mad->data, &rec);
1836
1837			spin_lock_irqsave(&sa_query->port->classport_lock,
1838					  flags);
1839			if (!status && !info->valid) {
1840				memcpy(&info->data.opa, &rec,
1841				       sizeof(info->data.opa));
1842
1843				info->valid = true;
1844				info->data.type = RDMA_CLASS_PORT_INFO_OPA;
1845			}
1846			spin_unlock_irqrestore(&sa_query->port->classport_lock,
1847					       flags);
1848
1849		} else {
1850			struct ib_class_port_info rec;
1851
1852			ib_unpack(ib_classport_info_rec_table,
1853				  ARRAY_SIZE(ib_classport_info_rec_table),
1854				  mad->data, &rec);
1855
1856			spin_lock_irqsave(&sa_query->port->classport_lock,
1857					  flags);
1858			if (!status && !info->valid) {
1859				memcpy(&info->data.ib, &rec,
1860				       sizeof(info->data.ib));
1861
1862				info->valid = true;
1863				info->data.type = RDMA_CLASS_PORT_INFO_IB;
1864			}
1865			spin_unlock_irqrestore(&sa_query->port->classport_lock,
1866					       flags);
1867		}
1868	}
1869	query->callback(query->context);
1870}
1871
1872static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
1873{
1874	kfree(container_of(sa_query, struct ib_sa_classport_info_query,
1875			   sa_query));
1876}
1877
1878static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
1879					  unsigned long timeout_ms,
1880					  void (*callback)(void *context),
1881					  void *context,
1882					  struct ib_sa_query **sa_query)
1883{
1884	struct ib_mad_agent *agent;
1885	struct ib_sa_classport_info_query *query;
1886	struct ib_sa_mad *mad;
1887	gfp_t gfp_mask = GFP_KERNEL;
1888	int ret;
1889
1890	agent = port->agent;
1891
1892	query = kzalloc(sizeof(*query), gfp_mask);
1893	if (!query)
1894		return -ENOMEM;
1895
1896	query->sa_query.port = port;
1897	query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
1898						 port->port_num) ?
1899				 IB_SA_QUERY_OPA : 0;
1900	ret = alloc_mad(&query->sa_query, gfp_mask);
1901	if (ret)
1902		goto err_free;
1903
1904	query->callback = callback;
1905	query->context = context;
1906
1907	mad = query->sa_query.mad_buf->mad;
1908	init_mad(&query->sa_query, agent);
1909
1910	query->sa_query.callback = ib_sa_classport_info_rec_callback;
1911	query->sa_query.release  = ib_sa_classport_info_rec_release;
1912	mad->mad_hdr.method	 = IB_MGMT_METHOD_GET;
1913	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
1914	mad->sa_hdr.comp_mask	 = 0;
1915	*sa_query = &query->sa_query;
1916
1917	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1918	if (ret < 0)
1919		goto err_free_mad;
1920
1921	return ret;
1922
1923err_free_mad:
1924	*sa_query = NULL;
1925	free_mad(&query->sa_query);
1926
1927err_free:
1928	kfree(query);
1929	return ret;
1930}
1931
1932static void update_ib_cpi(struct work_struct *work)
1933{
1934	struct ib_sa_port *port =
1935		container_of(work, struct ib_sa_port, ib_cpi_work.work);
1936	struct ib_classport_info_context *cb_context;
1937	unsigned long flags;
1938	int ret;
1939
1940	/* If the classport info is valid, nothing
1941	 * to do here.
1942	 */
1943	spin_lock_irqsave(&port->classport_lock, flags);
1944	if (port->classport_info.valid) {
1945		spin_unlock_irqrestore(&port->classport_lock, flags);
1946		return;
1947	}
1948	spin_unlock_irqrestore(&port->classport_lock, flags);
1949
1950	cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL);
1951	if (!cb_context)
1952		goto err_nomem;
1953
1954	init_completion(&cb_context->done);
1955
1956	ret = ib_sa_classport_info_rec_query(port, 3000,
1957					     ib_classportinfo_cb, cb_context,
1958					     &cb_context->sa_query);
1959	if (ret < 0)
1960		goto free_cb_err;
1961	wait_for_completion(&cb_context->done);
1962free_cb_err:
1963	kfree(cb_context);
1964	spin_lock_irqsave(&port->classport_lock, flags);
1965
1966	/* If the classport info is still not valid, the query should have
1967	 * failed for some reason. Retry issuing the query
1968	 */
1969	if (!port->classport_info.valid) {
1970		port->classport_info.retry_cnt++;
1971		if (port->classport_info.retry_cnt <=
1972		    IB_SA_CPI_MAX_RETRY_CNT) {
1973			unsigned long delay =
1974				msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
1975
1976			queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
1977		}
1978	}
1979	spin_unlock_irqrestore(&port->classport_lock, flags);
1980
1981err_nomem:
1982	return;
1983}
1984
1985static void send_handler(struct ib_mad_agent *agent,
1986			 struct ib_mad_send_wc *mad_send_wc)
1987{
1988	struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
1989	unsigned long flags;
1990
1991	if (query->callback)
1992		switch (mad_send_wc->status) {
1993		case IB_WC_SUCCESS:
1994			/* No callback -- already got recv */
1995			break;
1996		case IB_WC_RESP_TIMEOUT_ERR:
1997			query->callback(query, -ETIMEDOUT, NULL);
1998			break;
1999		case IB_WC_WR_FLUSH_ERR:
2000			query->callback(query, -EINTR, NULL);
2001			break;
2002		default:
2003			query->callback(query, -EIO, NULL);
2004			break;
2005		}
2006
2007	xa_lock_irqsave(&queries, flags);
2008	__xa_erase(&queries, query->id);
2009	xa_unlock_irqrestore(&queries, flags);
2010
2011	free_mad(query);
2012	if (query->client)
2013		ib_sa_client_put(query->client);
2014	query->release(query);
2015}
2016
2017static void recv_handler(struct ib_mad_agent *mad_agent,
2018			 struct ib_mad_send_buf *send_buf,
2019			 struct ib_mad_recv_wc *mad_recv_wc)
2020{
2021	struct ib_sa_query *query;
2022
2023	if (!send_buf)
2024		return;
2025
2026	query = send_buf->context[0];
2027	if (query->callback) {
2028		if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
2029			query->callback(query,
2030					mad_recv_wc->recv_buf.mad->mad_hdr.status ?
2031					-EINVAL : 0,
2032					(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
2033		else
2034			query->callback(query, -EIO, NULL);
2035	}
2036
2037	ib_free_recv_mad(mad_recv_wc);
2038}
2039
2040static void update_sm_ah(struct work_struct *work)
2041{
2042	struct ib_sa_port *port =
2043		container_of(work, struct ib_sa_port, update_task);
2044	struct ib_sa_sm_ah *new_ah;
2045	struct ib_port_attr port_attr;
2046	struct rdma_ah_attr   ah_attr;
2047	bool grh_required;
2048
2049	if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
2050		pr_warn("Couldn't query port\n");
2051		return;
2052	}
2053
2054	new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL);
2055	if (!new_ah)
2056		return;
2057
2058	kref_init(&new_ah->ref);
2059	new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
2060
2061	new_ah->pkey_index = 0;
2062	if (ib_find_pkey(port->agent->device, port->port_num,
2063			 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
2064		pr_err("Couldn't find index for default PKey\n");
2065
2066	memset(&ah_attr, 0, sizeof(ah_attr));
2067	ah_attr.type = rdma_ah_find_type(port->agent->device,
2068					 port->port_num);
2069	rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
2070	rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
2071	rdma_ah_set_port_num(&ah_attr, port->port_num);
2072
2073	grh_required = rdma_is_grh_required(port->agent->device,
2074					    port->port_num);
2075
2076	/*
2077	 * The OPA sm_lid of 0xFFFF needs special handling so that it can be
2078	 * differentiated from a permissive LID of 0xFFFF.  We set the
2079	 * grh_required flag here so the SA can program the DGID in the
2080	 * address handle appropriately
2081	 */
2082	if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA &&
2083	    (grh_required ||
2084	     port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE)))
2085		rdma_ah_set_make_grd(&ah_attr, true);
2086
2087	if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) {
2088		rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
2089		rdma_ah_set_subnet_prefix(&ah_attr,
2090					  cpu_to_be64(port_attr.subnet_prefix));
2091		rdma_ah_set_interface_id(&ah_attr,
2092					 cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
2093	}
2094
2095	new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr,
2096				    RDMA_CREATE_AH_SLEEPABLE);
2097	if (IS_ERR(new_ah->ah)) {
2098		pr_warn("Couldn't create new SM AH\n");
2099		kfree(new_ah);
2100		return;
2101	}
2102
2103	spin_lock_irq(&port->ah_lock);
2104	if (port->sm_ah)
2105		kref_put(&port->sm_ah->ref, free_sm_ah);
2106	port->sm_ah = new_ah;
2107	spin_unlock_irq(&port->ah_lock);
2108}
2109
2110static void ib_sa_event(struct ib_event_handler *handler,
2111			struct ib_event *event)
2112{
2113	if (event->event == IB_EVENT_PORT_ERR    ||
2114	    event->event == IB_EVENT_PORT_ACTIVE ||
2115	    event->event == IB_EVENT_LID_CHANGE  ||
2116	    event->event == IB_EVENT_PKEY_CHANGE ||
2117	    event->event == IB_EVENT_SM_CHANGE   ||
2118	    event->event == IB_EVENT_CLIENT_REREGISTER) {
2119		unsigned long flags;
2120		struct ib_sa_device *sa_dev =
2121			container_of(handler, typeof(*sa_dev), event_handler);
2122		u32 port_num = event->element.port_num - sa_dev->start_port;
2123		struct ib_sa_port *port = &sa_dev->port[port_num];
2124
2125		if (!rdma_cap_ib_sa(handler->device, port->port_num))
2126			return;
2127
2128		spin_lock_irqsave(&port->ah_lock, flags);
2129		if (port->sm_ah)
2130			kref_put(&port->sm_ah->ref, free_sm_ah);
2131		port->sm_ah = NULL;
2132		spin_unlock_irqrestore(&port->ah_lock, flags);
2133
2134		if (event->event == IB_EVENT_SM_CHANGE ||
2135		    event->event == IB_EVENT_CLIENT_REREGISTER ||
2136		    event->event == IB_EVENT_LID_CHANGE ||
2137		    event->event == IB_EVENT_PORT_ACTIVE) {
2138			unsigned long delay =
2139				msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2140
2141			spin_lock_irqsave(&port->classport_lock, flags);
2142			port->classport_info.valid = false;
2143			port->classport_info.retry_cnt = 0;
2144			spin_unlock_irqrestore(&port->classport_lock, flags);
2145			queue_delayed_work(ib_wq,
2146					   &port->ib_cpi_work, delay);
2147		}
2148		queue_work(ib_wq, &sa_dev->port[port_num].update_task);
2149	}
2150}
2151
2152static int ib_sa_add_one(struct ib_device *device)
2153{
2154	struct ib_sa_device *sa_dev;
2155	int s, e, i;
2156	int count = 0;
2157	int ret;
2158
2159	s = rdma_start_port(device);
2160	e = rdma_end_port(device);
2161
2162	sa_dev = kzalloc(struct_size(sa_dev, port,
2163				     size_add(size_sub(e, s), 1)),
2164			 GFP_KERNEL);
2165	if (!sa_dev)
2166		return -ENOMEM;
2167
2168	sa_dev->start_port = s;
2169	sa_dev->end_port   = e;
2170
2171	for (i = 0; i <= e - s; ++i) {
2172		spin_lock_init(&sa_dev->port[i].ah_lock);
2173		if (!rdma_cap_ib_sa(device, i + 1))
2174			continue;
2175
2176		sa_dev->port[i].sm_ah    = NULL;
2177		sa_dev->port[i].port_num = i + s;
2178
2179		spin_lock_init(&sa_dev->port[i].classport_lock);
2180		sa_dev->port[i].classport_info.valid = false;
2181
2182		sa_dev->port[i].agent =
2183			ib_register_mad_agent(device, i + s, IB_QPT_GSI,
2184					      NULL, 0, send_handler,
2185					      recv_handler, sa_dev, 0);
2186		if (IS_ERR(sa_dev->port[i].agent)) {
2187			ret = PTR_ERR(sa_dev->port[i].agent);
2188			goto err;
2189		}
2190
2191		INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
2192		INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
2193				  update_ib_cpi);
2194
2195		count++;
2196	}
2197
2198	if (!count) {
2199		ret = -EOPNOTSUPP;
2200		goto free;
2201	}
2202
2203	ib_set_client_data(device, &sa_client, sa_dev);
2204
2205	/*
2206	 * We register our event handler after everything is set up,
2207	 * and then update our cached info after the event handler is
2208	 * registered to avoid any problems if a port changes state
2209	 * during our initialization.
2210	 */
2211
2212	INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
2213	ib_register_event_handler(&sa_dev->event_handler);
2214
2215	for (i = 0; i <= e - s; ++i) {
2216		if (rdma_cap_ib_sa(device, i + 1))
2217			update_sm_ah(&sa_dev->port[i].update_task);
2218	}
2219
2220	return 0;
2221
2222err:
2223	while (--i >= 0) {
2224		if (rdma_cap_ib_sa(device, i + 1))
2225			ib_unregister_mad_agent(sa_dev->port[i].agent);
2226	}
2227free:
2228	kfree(sa_dev);
2229	return ret;
2230}
2231
2232static void ib_sa_remove_one(struct ib_device *device, void *client_data)
2233{
2234	struct ib_sa_device *sa_dev = client_data;
2235	int i;
2236
 
 
 
2237	ib_unregister_event_handler(&sa_dev->event_handler);
2238	flush_workqueue(ib_wq);
2239
2240	for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
2241		if (rdma_cap_ib_sa(device, i + 1)) {
2242			cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
2243			ib_unregister_mad_agent(sa_dev->port[i].agent);
2244			if (sa_dev->port[i].sm_ah)
2245				kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
2246		}
2247
2248	}
2249
2250	kfree(sa_dev);
2251}
2252
2253int ib_sa_init(void)
2254{
2255	int ret;
2256
2257	get_random_bytes(&tid, sizeof tid);
2258
2259	atomic_set(&ib_nl_sa_request_seq, 0);
2260
2261	ret = ib_register_client(&sa_client);
2262	if (ret) {
2263		pr_err("Couldn't register ib_sa client\n");
2264		goto err1;
2265	}
2266
2267	ret = mcast_init();
2268	if (ret) {
2269		pr_err("Couldn't initialize multicast handling\n");
2270		goto err2;
2271	}
2272
2273	ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
2274	if (!ib_nl_wq) {
2275		ret = -ENOMEM;
2276		goto err3;
2277	}
2278
2279	INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2280
2281	return 0;
2282
2283err3:
2284	mcast_cleanup();
2285err2:
2286	ib_unregister_client(&sa_client);
2287err1:
2288	return ret;
2289}
2290
2291void ib_sa_cleanup(void)
2292{
2293	cancel_delayed_work(&ib_nl_timed_work);
 
2294	destroy_workqueue(ib_nl_wq);
2295	mcast_cleanup();
2296	ib_unregister_client(&sa_client);
2297	WARN_ON(!xa_empty(&queries));
2298}
v5.4
   1/*
   2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
   4 * Copyright (c) 2006 Intel Corporation.  All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/module.h>
  36#include <linux/init.h>
  37#include <linux/err.h>
  38#include <linux/random.h>
  39#include <linux/spinlock.h>
  40#include <linux/slab.h>
  41#include <linux/dma-mapping.h>
  42#include <linux/kref.h>
  43#include <linux/xarray.h>
  44#include <linux/workqueue.h>
  45#include <uapi/linux/if_ether.h>
  46#include <rdma/ib_pack.h>
  47#include <rdma/ib_cache.h>
  48#include <rdma/rdma_netlink.h>
  49#include <net/netlink.h>
  50#include <uapi/rdma/ib_user_sa.h>
  51#include <rdma/ib_marshall.h>
  52#include <rdma/ib_addr.h>
  53#include <rdma/opa_addr.h>
 
  54#include "sa.h"
  55#include "core_priv.h"
  56
  57#define IB_SA_LOCAL_SVC_TIMEOUT_MIN		100
  58#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT		2000
  59#define IB_SA_LOCAL_SVC_TIMEOUT_MAX		200000
  60#define IB_SA_CPI_MAX_RETRY_CNT			3
  61#define IB_SA_CPI_RETRY_WAIT			1000 /*msecs */
  62static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
  63
  64struct ib_sa_sm_ah {
  65	struct ib_ah        *ah;
  66	struct kref          ref;
  67	u16		     pkey_index;
  68	u8		     src_path_mask;
  69};
  70
  71enum rdma_class_port_info_type {
  72	RDMA_CLASS_PORT_INFO_IB,
  73	RDMA_CLASS_PORT_INFO_OPA
  74};
  75
  76struct rdma_class_port_info {
  77	enum rdma_class_port_info_type type;
  78	union {
  79		struct ib_class_port_info ib;
  80		struct opa_class_port_info opa;
  81	};
  82};
  83
  84struct ib_sa_classport_cache {
  85	bool valid;
  86	int retry_cnt;
  87	struct rdma_class_port_info data;
  88};
  89
  90struct ib_sa_port {
  91	struct ib_mad_agent *agent;
  92	struct ib_sa_sm_ah  *sm_ah;
  93	struct work_struct   update_task;
  94	struct ib_sa_classport_cache classport_info;
  95	struct delayed_work ib_cpi_work;
  96	spinlock_t                   classport_lock; /* protects class port info set */
  97	spinlock_t           ah_lock;
  98	u8                   port_num;
  99};
 100
 101struct ib_sa_device {
 102	int                     start_port, end_port;
 103	struct ib_event_handler event_handler;
 104	struct ib_sa_port port[0];
 105};
 106
 107struct ib_sa_query {
 108	void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
 
 109	void (*release)(struct ib_sa_query *);
 110	struct ib_sa_client    *client;
 111	struct ib_sa_port      *port;
 112	struct ib_mad_send_buf *mad_buf;
 113	struct ib_sa_sm_ah     *sm_ah;
 114	int			id;
 115	u32			flags;
 116	struct list_head	list; /* Local svc request list */
 117	u32			seq; /* Local svc request sequence number */
 118	unsigned long		timeout; /* Local svc timeout */
 119	u8			path_use; /* How will the pathrecord be used */
 120};
 121
 122#define IB_SA_ENABLE_LOCAL_SERVICE	0x00000001
 123#define IB_SA_CANCEL			0x00000002
 124#define IB_SA_QUERY_OPA			0x00000004
 125
 126struct ib_sa_service_query {
 127	void (*callback)(int, struct ib_sa_service_rec *, void *);
 128	void *context;
 129	struct ib_sa_query sa_query;
 130};
 131
 132struct ib_sa_path_query {
 133	void (*callback)(int, struct sa_path_rec *, void *);
 
 134	void *context;
 135	struct ib_sa_query sa_query;
 136	struct sa_path_rec *conv_pr;
 137};
 138
 139struct ib_sa_guidinfo_query {
 140	void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
 141	void *context;
 142	struct ib_sa_query sa_query;
 143};
 144
 145struct ib_sa_classport_info_query {
 146	void (*callback)(void *);
 147	void *context;
 148	struct ib_sa_query sa_query;
 149};
 150
 151struct ib_sa_mcmember_query {
 152	void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
 153	void *context;
 154	struct ib_sa_query sa_query;
 155};
 156
 157static LIST_HEAD(ib_nl_request_list);
 158static DEFINE_SPINLOCK(ib_nl_request_lock);
 159static atomic_t ib_nl_sa_request_seq;
 160static struct workqueue_struct *ib_nl_wq;
 161static struct delayed_work ib_nl_timed_work;
 162static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
 163	[LS_NLA_TYPE_PATH_RECORD]	= {.type = NLA_BINARY,
 164		.len = sizeof(struct ib_path_rec_data)},
 165	[LS_NLA_TYPE_TIMEOUT]		= {.type = NLA_U32},
 166	[LS_NLA_TYPE_SERVICE_ID]	= {.type = NLA_U64},
 167	[LS_NLA_TYPE_DGID]		= {.type = NLA_BINARY,
 168		.len = sizeof(struct rdma_nla_ls_gid)},
 169	[LS_NLA_TYPE_SGID]		= {.type = NLA_BINARY,
 170		.len = sizeof(struct rdma_nla_ls_gid)},
 171	[LS_NLA_TYPE_TCLASS]		= {.type = NLA_U8},
 172	[LS_NLA_TYPE_PKEY]		= {.type = NLA_U16},
 173	[LS_NLA_TYPE_QOS_CLASS]		= {.type = NLA_U16},
 174};
 175
 176
 177static void ib_sa_add_one(struct ib_device *device);
 178static void ib_sa_remove_one(struct ib_device *device, void *client_data);
 179
 180static struct ib_client sa_client = {
 181	.name   = "sa",
 182	.add    = ib_sa_add_one,
 183	.remove = ib_sa_remove_one
 184};
 185
 186static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
 187
 188static DEFINE_SPINLOCK(tid_lock);
 189static u32 tid;
 190
 191#define PATH_REC_FIELD(field) \
 192	.struct_offset_bytes = offsetof(struct sa_path_rec, field),	\
 193	.struct_size_bytes   = sizeof((struct sa_path_rec *)0)->field,	\
 194	.field_name          = "sa_path_rec:" #field
 195
 196static const struct ib_field path_rec_table[] = {
 197	{ PATH_REC_FIELD(service_id),
 198	  .offset_words = 0,
 199	  .offset_bits  = 0,
 200	  .size_bits    = 64 },
 201	{ PATH_REC_FIELD(dgid),
 202	  .offset_words = 2,
 203	  .offset_bits  = 0,
 204	  .size_bits    = 128 },
 205	{ PATH_REC_FIELD(sgid),
 206	  .offset_words = 6,
 207	  .offset_bits  = 0,
 208	  .size_bits    = 128 },
 209	{ PATH_REC_FIELD(ib.dlid),
 210	  .offset_words = 10,
 211	  .offset_bits  = 0,
 212	  .size_bits    = 16 },
 213	{ PATH_REC_FIELD(ib.slid),
 214	  .offset_words = 10,
 215	  .offset_bits  = 16,
 216	  .size_bits    = 16 },
 217	{ PATH_REC_FIELD(ib.raw_traffic),
 218	  .offset_words = 11,
 219	  .offset_bits  = 0,
 220	  .size_bits    = 1 },
 221	{ RESERVED,
 222	  .offset_words = 11,
 223	  .offset_bits  = 1,
 224	  .size_bits    = 3 },
 225	{ PATH_REC_FIELD(flow_label),
 226	  .offset_words = 11,
 227	  .offset_bits  = 4,
 228	  .size_bits    = 20 },
 229	{ PATH_REC_FIELD(hop_limit),
 230	  .offset_words = 11,
 231	  .offset_bits  = 24,
 232	  .size_bits    = 8 },
 233	{ PATH_REC_FIELD(traffic_class),
 234	  .offset_words = 12,
 235	  .offset_bits  = 0,
 236	  .size_bits    = 8 },
 237	{ PATH_REC_FIELD(reversible),
 238	  .offset_words = 12,
 239	  .offset_bits  = 8,
 240	  .size_bits    = 1 },
 241	{ PATH_REC_FIELD(numb_path),
 242	  .offset_words = 12,
 243	  .offset_bits  = 9,
 244	  .size_bits    = 7 },
 245	{ PATH_REC_FIELD(pkey),
 246	  .offset_words = 12,
 247	  .offset_bits  = 16,
 248	  .size_bits    = 16 },
 249	{ PATH_REC_FIELD(qos_class),
 250	  .offset_words = 13,
 251	  .offset_bits  = 0,
 252	  .size_bits    = 12 },
 253	{ PATH_REC_FIELD(sl),
 254	  .offset_words = 13,
 255	  .offset_bits  = 12,
 256	  .size_bits    = 4 },
 257	{ PATH_REC_FIELD(mtu_selector),
 258	  .offset_words = 13,
 259	  .offset_bits  = 16,
 260	  .size_bits    = 2 },
 261	{ PATH_REC_FIELD(mtu),
 262	  .offset_words = 13,
 263	  .offset_bits  = 18,
 264	  .size_bits    = 6 },
 265	{ PATH_REC_FIELD(rate_selector),
 266	  .offset_words = 13,
 267	  .offset_bits  = 24,
 268	  .size_bits    = 2 },
 269	{ PATH_REC_FIELD(rate),
 270	  .offset_words = 13,
 271	  .offset_bits  = 26,
 272	  .size_bits    = 6 },
 273	{ PATH_REC_FIELD(packet_life_time_selector),
 274	  .offset_words = 14,
 275	  .offset_bits  = 0,
 276	  .size_bits    = 2 },
 277	{ PATH_REC_FIELD(packet_life_time),
 278	  .offset_words = 14,
 279	  .offset_bits  = 2,
 280	  .size_bits    = 6 },
 281	{ PATH_REC_FIELD(preference),
 282	  .offset_words = 14,
 283	  .offset_bits  = 8,
 284	  .size_bits    = 8 },
 285	{ RESERVED,
 286	  .offset_words = 14,
 287	  .offset_bits  = 16,
 288	  .size_bits    = 48 },
 289};
 290
 291#define OPA_PATH_REC_FIELD(field) \
 292	.struct_offset_bytes = \
 293		offsetof(struct sa_path_rec, field), \
 294	.struct_size_bytes   = \
 295		sizeof((struct sa_path_rec *)0)->field,	\
 296	.field_name          = "sa_path_rec:" #field
 297
 298static const struct ib_field opa_path_rec_table[] = {
 299	{ OPA_PATH_REC_FIELD(service_id),
 300	  .offset_words = 0,
 301	  .offset_bits  = 0,
 302	  .size_bits    = 64 },
 303	{ OPA_PATH_REC_FIELD(dgid),
 304	  .offset_words = 2,
 305	  .offset_bits  = 0,
 306	  .size_bits    = 128 },
 307	{ OPA_PATH_REC_FIELD(sgid),
 308	  .offset_words = 6,
 309	  .offset_bits  = 0,
 310	  .size_bits    = 128 },
 311	{ OPA_PATH_REC_FIELD(opa.dlid),
 312	  .offset_words = 10,
 313	  .offset_bits  = 0,
 314	  .size_bits    = 32 },
 315	{ OPA_PATH_REC_FIELD(opa.slid),
 316	  .offset_words = 11,
 317	  .offset_bits  = 0,
 318	  .size_bits    = 32 },
 319	{ OPA_PATH_REC_FIELD(opa.raw_traffic),
 320	  .offset_words = 12,
 321	  .offset_bits  = 0,
 322	  .size_bits    = 1 },
 323	{ RESERVED,
 324	  .offset_words = 12,
 325	  .offset_bits  = 1,
 326	  .size_bits    = 3 },
 327	{ OPA_PATH_REC_FIELD(flow_label),
 328	  .offset_words = 12,
 329	  .offset_bits  = 4,
 330	  .size_bits    = 20 },
 331	{ OPA_PATH_REC_FIELD(hop_limit),
 332	  .offset_words = 12,
 333	  .offset_bits  = 24,
 334	  .size_bits    = 8 },
 335	{ OPA_PATH_REC_FIELD(traffic_class),
 336	  .offset_words = 13,
 337	  .offset_bits  = 0,
 338	  .size_bits    = 8 },
 339	{ OPA_PATH_REC_FIELD(reversible),
 340	  .offset_words = 13,
 341	  .offset_bits  = 8,
 342	  .size_bits    = 1 },
 343	{ OPA_PATH_REC_FIELD(numb_path),
 344	  .offset_words = 13,
 345	  .offset_bits  = 9,
 346	  .size_bits    = 7 },
 347	{ OPA_PATH_REC_FIELD(pkey),
 348	  .offset_words = 13,
 349	  .offset_bits  = 16,
 350	  .size_bits    = 16 },
 351	{ OPA_PATH_REC_FIELD(opa.l2_8B),
 352	  .offset_words = 14,
 353	  .offset_bits  = 0,
 354	  .size_bits    = 1 },
 355	{ OPA_PATH_REC_FIELD(opa.l2_10B),
 356	  .offset_words = 14,
 357	  .offset_bits  = 1,
 358	  .size_bits    = 1 },
 359	{ OPA_PATH_REC_FIELD(opa.l2_9B),
 360	  .offset_words = 14,
 361	  .offset_bits  = 2,
 362	  .size_bits    = 1 },
 363	{ OPA_PATH_REC_FIELD(opa.l2_16B),
 364	  .offset_words = 14,
 365	  .offset_bits  = 3,
 366	  .size_bits    = 1 },
 367	{ RESERVED,
 368	  .offset_words = 14,
 369	  .offset_bits  = 4,
 370	  .size_bits    = 2 },
 371	{ OPA_PATH_REC_FIELD(opa.qos_type),
 372	  .offset_words = 14,
 373	  .offset_bits  = 6,
 374	  .size_bits    = 2 },
 375	{ OPA_PATH_REC_FIELD(opa.qos_priority),
 376	  .offset_words = 14,
 377	  .offset_bits  = 8,
 378	  .size_bits    = 8 },
 379	{ RESERVED,
 380	  .offset_words = 14,
 381	  .offset_bits  = 16,
 382	  .size_bits    = 3 },
 383	{ OPA_PATH_REC_FIELD(sl),
 384	  .offset_words = 14,
 385	  .offset_bits  = 19,
 386	  .size_bits    = 5 },
 387	{ RESERVED,
 388	  .offset_words = 14,
 389	  .offset_bits  = 24,
 390	  .size_bits    = 8 },
 391	{ OPA_PATH_REC_FIELD(mtu_selector),
 392	  .offset_words = 15,
 393	  .offset_bits  = 0,
 394	  .size_bits    = 2 },
 395	{ OPA_PATH_REC_FIELD(mtu),
 396	  .offset_words = 15,
 397	  .offset_bits  = 2,
 398	  .size_bits    = 6 },
 399	{ OPA_PATH_REC_FIELD(rate_selector),
 400	  .offset_words = 15,
 401	  .offset_bits  = 8,
 402	  .size_bits    = 2 },
 403	{ OPA_PATH_REC_FIELD(rate),
 404	  .offset_words = 15,
 405	  .offset_bits  = 10,
 406	  .size_bits    = 6 },
 407	{ OPA_PATH_REC_FIELD(packet_life_time_selector),
 408	  .offset_words = 15,
 409	  .offset_bits  = 16,
 410	  .size_bits    = 2 },
 411	{ OPA_PATH_REC_FIELD(packet_life_time),
 412	  .offset_words = 15,
 413	  .offset_bits  = 18,
 414	  .size_bits    = 6 },
 415	{ OPA_PATH_REC_FIELD(preference),
 416	  .offset_words = 15,
 417	  .offset_bits  = 24,
 418	  .size_bits    = 8 },
 419};
 420
 421#define MCMEMBER_REC_FIELD(field) \
 422	.struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field),	\
 423	.struct_size_bytes   = sizeof ((struct ib_sa_mcmember_rec *) 0)->field,	\
 424	.field_name          = "sa_mcmember_rec:" #field
 425
 426static const struct ib_field mcmember_rec_table[] = {
 427	{ MCMEMBER_REC_FIELD(mgid),
 428	  .offset_words = 0,
 429	  .offset_bits  = 0,
 430	  .size_bits    = 128 },
 431	{ MCMEMBER_REC_FIELD(port_gid),
 432	  .offset_words = 4,
 433	  .offset_bits  = 0,
 434	  .size_bits    = 128 },
 435	{ MCMEMBER_REC_FIELD(qkey),
 436	  .offset_words = 8,
 437	  .offset_bits  = 0,
 438	  .size_bits    = 32 },
 439	{ MCMEMBER_REC_FIELD(mlid),
 440	  .offset_words = 9,
 441	  .offset_bits  = 0,
 442	  .size_bits    = 16 },
 443	{ MCMEMBER_REC_FIELD(mtu_selector),
 444	  .offset_words = 9,
 445	  .offset_bits  = 16,
 446	  .size_bits    = 2 },
 447	{ MCMEMBER_REC_FIELD(mtu),
 448	  .offset_words = 9,
 449	  .offset_bits  = 18,
 450	  .size_bits    = 6 },
 451	{ MCMEMBER_REC_FIELD(traffic_class),
 452	  .offset_words = 9,
 453	  .offset_bits  = 24,
 454	  .size_bits    = 8 },
 455	{ MCMEMBER_REC_FIELD(pkey),
 456	  .offset_words = 10,
 457	  .offset_bits  = 0,
 458	  .size_bits    = 16 },
 459	{ MCMEMBER_REC_FIELD(rate_selector),
 460	  .offset_words = 10,
 461	  .offset_bits  = 16,
 462	  .size_bits    = 2 },
 463	{ MCMEMBER_REC_FIELD(rate),
 464	  .offset_words = 10,
 465	  .offset_bits  = 18,
 466	  .size_bits    = 6 },
 467	{ MCMEMBER_REC_FIELD(packet_life_time_selector),
 468	  .offset_words = 10,
 469	  .offset_bits  = 24,
 470	  .size_bits    = 2 },
 471	{ MCMEMBER_REC_FIELD(packet_life_time),
 472	  .offset_words = 10,
 473	  .offset_bits  = 26,
 474	  .size_bits    = 6 },
 475	{ MCMEMBER_REC_FIELD(sl),
 476	  .offset_words = 11,
 477	  .offset_bits  = 0,
 478	  .size_bits    = 4 },
 479	{ MCMEMBER_REC_FIELD(flow_label),
 480	  .offset_words = 11,
 481	  .offset_bits  = 4,
 482	  .size_bits    = 20 },
 483	{ MCMEMBER_REC_FIELD(hop_limit),
 484	  .offset_words = 11,
 485	  .offset_bits  = 24,
 486	  .size_bits    = 8 },
 487	{ MCMEMBER_REC_FIELD(scope),
 488	  .offset_words = 12,
 489	  .offset_bits  = 0,
 490	  .size_bits    = 4 },
 491	{ MCMEMBER_REC_FIELD(join_state),
 492	  .offset_words = 12,
 493	  .offset_bits  = 4,
 494	  .size_bits    = 4 },
 495	{ MCMEMBER_REC_FIELD(proxy_join),
 496	  .offset_words = 12,
 497	  .offset_bits  = 8,
 498	  .size_bits    = 1 },
 499	{ RESERVED,
 500	  .offset_words = 12,
 501	  .offset_bits  = 9,
 502	  .size_bits    = 23 },
 503};
 504
 505#define SERVICE_REC_FIELD(field) \
 506	.struct_offset_bytes = offsetof(struct ib_sa_service_rec, field),	\
 507	.struct_size_bytes   = sizeof ((struct ib_sa_service_rec *) 0)->field,	\
 508	.field_name          = "sa_service_rec:" #field
 509
 510static const struct ib_field service_rec_table[] = {
 511	{ SERVICE_REC_FIELD(id),
 512	  .offset_words = 0,
 513	  .offset_bits  = 0,
 514	  .size_bits    = 64 },
 515	{ SERVICE_REC_FIELD(gid),
 516	  .offset_words = 2,
 517	  .offset_bits  = 0,
 518	  .size_bits    = 128 },
 519	{ SERVICE_REC_FIELD(pkey),
 520	  .offset_words = 6,
 521	  .offset_bits  = 0,
 522	  .size_bits    = 16 },
 523	{ SERVICE_REC_FIELD(lease),
 524	  .offset_words = 7,
 525	  .offset_bits  = 0,
 526	  .size_bits    = 32 },
 527	{ SERVICE_REC_FIELD(key),
 528	  .offset_words = 8,
 529	  .offset_bits  = 0,
 530	  .size_bits    = 128 },
 531	{ SERVICE_REC_FIELD(name),
 532	  .offset_words = 12,
 533	  .offset_bits  = 0,
 534	  .size_bits    = 64*8 },
 535	{ SERVICE_REC_FIELD(data8),
 536	  .offset_words = 28,
 537	  .offset_bits  = 0,
 538	  .size_bits    = 16*8 },
 539	{ SERVICE_REC_FIELD(data16),
 540	  .offset_words = 32,
 541	  .offset_bits  = 0,
 542	  .size_bits    = 8*16 },
 543	{ SERVICE_REC_FIELD(data32),
 544	  .offset_words = 36,
 545	  .offset_bits  = 0,
 546	  .size_bits    = 4*32 },
 547	{ SERVICE_REC_FIELD(data64),
 548	  .offset_words = 40,
 549	  .offset_bits  = 0,
 550	  .size_bits    = 2*64 },
 551};
 552
 553#define CLASSPORTINFO_REC_FIELD(field) \
 554	.struct_offset_bytes = offsetof(struct ib_class_port_info, field),	\
 555	.struct_size_bytes   = sizeof((struct ib_class_port_info *)0)->field,	\
 556	.field_name          = "ib_class_port_info:" #field
 557
 558static const struct ib_field ib_classport_info_rec_table[] = {
 559	{ CLASSPORTINFO_REC_FIELD(base_version),
 560	  .offset_words = 0,
 561	  .offset_bits  = 0,
 562	  .size_bits    = 8 },
 563	{ CLASSPORTINFO_REC_FIELD(class_version),
 564	  .offset_words = 0,
 565	  .offset_bits  = 8,
 566	  .size_bits    = 8 },
 567	{ CLASSPORTINFO_REC_FIELD(capability_mask),
 568	  .offset_words = 0,
 569	  .offset_bits  = 16,
 570	  .size_bits    = 16 },
 571	{ CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
 572	  .offset_words = 1,
 573	  .offset_bits  = 0,
 574	  .size_bits    = 32 },
 575	{ CLASSPORTINFO_REC_FIELD(redirect_gid),
 576	  .offset_words = 2,
 577	  .offset_bits  = 0,
 578	  .size_bits    = 128 },
 579	{ CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
 580	  .offset_words = 6,
 581	  .offset_bits  = 0,
 582	  .size_bits    = 32 },
 583	{ CLASSPORTINFO_REC_FIELD(redirect_lid),
 584	  .offset_words = 7,
 585	  .offset_bits  = 0,
 586	  .size_bits    = 16 },
 587	{ CLASSPORTINFO_REC_FIELD(redirect_pkey),
 588	  .offset_words = 7,
 589	  .offset_bits  = 16,
 590	  .size_bits    = 16 },
 591
 592	{ CLASSPORTINFO_REC_FIELD(redirect_qp),
 593	  .offset_words = 8,
 594	  .offset_bits  = 0,
 595	  .size_bits    = 32 },
 596	{ CLASSPORTINFO_REC_FIELD(redirect_qkey),
 597	  .offset_words = 9,
 598	  .offset_bits  = 0,
 599	  .size_bits    = 32 },
 600
 601	{ CLASSPORTINFO_REC_FIELD(trap_gid),
 602	  .offset_words = 10,
 603	  .offset_bits  = 0,
 604	  .size_bits    = 128 },
 605	{ CLASSPORTINFO_REC_FIELD(trap_tcslfl),
 606	  .offset_words = 14,
 607	  .offset_bits  = 0,
 608	  .size_bits    = 32 },
 609
 610	{ CLASSPORTINFO_REC_FIELD(trap_lid),
 611	  .offset_words = 15,
 612	  .offset_bits  = 0,
 613	  .size_bits    = 16 },
 614	{ CLASSPORTINFO_REC_FIELD(trap_pkey),
 615	  .offset_words = 15,
 616	  .offset_bits  = 16,
 617	  .size_bits    = 16 },
 618
 619	{ CLASSPORTINFO_REC_FIELD(trap_hlqp),
 620	  .offset_words = 16,
 621	  .offset_bits  = 0,
 622	  .size_bits    = 32 },
 623	{ CLASSPORTINFO_REC_FIELD(trap_qkey),
 624	  .offset_words = 17,
 625	  .offset_bits  = 0,
 626	  .size_bits    = 32 },
 627};
 628
 629#define OPA_CLASSPORTINFO_REC_FIELD(field) \
 630	.struct_offset_bytes =\
 631		offsetof(struct opa_class_port_info, field),	\
 632	.struct_size_bytes   = \
 633		sizeof((struct opa_class_port_info *)0)->field,	\
 634	.field_name          = "opa_class_port_info:" #field
 635
 636static const struct ib_field opa_classport_info_rec_table[] = {
 637	{ OPA_CLASSPORTINFO_REC_FIELD(base_version),
 638	  .offset_words = 0,
 639	  .offset_bits  = 0,
 640	  .size_bits    = 8 },
 641	{ OPA_CLASSPORTINFO_REC_FIELD(class_version),
 642	  .offset_words = 0,
 643	  .offset_bits  = 8,
 644	  .size_bits    = 8 },
 645	{ OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
 646	  .offset_words = 0,
 647	  .offset_bits  = 16,
 648	  .size_bits    = 16 },
 649	{ OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
 650	  .offset_words = 1,
 651	  .offset_bits  = 0,
 652	  .size_bits    = 32 },
 653	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
 654	  .offset_words = 2,
 655	  .offset_bits  = 0,
 656	  .size_bits    = 128 },
 657	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
 658	  .offset_words = 6,
 659	  .offset_bits  = 0,
 660	  .size_bits    = 32 },
 661	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
 662	  .offset_words = 7,
 663	  .offset_bits  = 0,
 664	  .size_bits    = 32 },
 665	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
 666	  .offset_words = 8,
 667	  .offset_bits  = 0,
 668	  .size_bits    = 32 },
 669	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
 670	  .offset_words = 9,
 671	  .offset_bits  = 0,
 672	  .size_bits    = 32 },
 673	{ OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
 674	  .offset_words = 10,
 675	  .offset_bits  = 0,
 676	  .size_bits    = 128 },
 677	{ OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
 678	  .offset_words = 14,
 679	  .offset_bits  = 0,
 680	  .size_bits    = 32 },
 681	{ OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
 682	  .offset_words = 15,
 683	  .offset_bits  = 0,
 684	  .size_bits    = 32 },
 685	{ OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
 686	  .offset_words = 16,
 687	  .offset_bits  = 0,
 688	  .size_bits    = 32 },
 689	{ OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
 690	  .offset_words = 17,
 691	  .offset_bits  = 0,
 692	  .size_bits    = 32 },
 693	{ OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
 694	  .offset_words = 18,
 695	  .offset_bits  = 0,
 696	  .size_bits    = 16 },
 697	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
 698	  .offset_words = 18,
 699	  .offset_bits  = 16,
 700	  .size_bits    = 16 },
 701	{ OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
 702	  .offset_words = 19,
 703	  .offset_bits  = 0,
 704	  .size_bits    = 8 },
 705	{ RESERVED,
 706	  .offset_words = 19,
 707	  .offset_bits  = 8,
 708	  .size_bits    = 24 },
 709};
 710
 711#define GUIDINFO_REC_FIELD(field) \
 712	.struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field),	\
 713	.struct_size_bytes   = sizeof((struct ib_sa_guidinfo_rec *) 0)->field,	\
 714	.field_name          = "sa_guidinfo_rec:" #field
 715
 716static const struct ib_field guidinfo_rec_table[] = {
 717	{ GUIDINFO_REC_FIELD(lid),
 718	  .offset_words = 0,
 719	  .offset_bits  = 0,
 720	  .size_bits    = 16 },
 721	{ GUIDINFO_REC_FIELD(block_num),
 722	  .offset_words = 0,
 723	  .offset_bits  = 16,
 724	  .size_bits    = 8 },
 725	{ GUIDINFO_REC_FIELD(res1),
 726	  .offset_words = 0,
 727	  .offset_bits  = 24,
 728	  .size_bits    = 8 },
 729	{ GUIDINFO_REC_FIELD(res2),
 730	  .offset_words = 1,
 731	  .offset_bits  = 0,
 732	  .size_bits    = 32 },
 733	{ GUIDINFO_REC_FIELD(guid_info_list),
 734	  .offset_words = 2,
 735	  .offset_bits  = 0,
 736	  .size_bits    = 512 },
 737};
 738
 
 
 739static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
 740{
 741	query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
 742}
 743
 744static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
 745{
 746	return (query->flags & IB_SA_CANCEL);
 747}
 748
 749static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
 750				     struct ib_sa_query *query)
 751{
 752	struct sa_path_rec *sa_rec = query->mad_buf->context[1];
 753	struct ib_sa_mad *mad = query->mad_buf->mad;
 754	ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
 755	u16 val16;
 756	u64 val64;
 757	struct rdma_ls_resolve_header *header;
 758
 759	query->mad_buf->context[1] = NULL;
 760
 761	/* Construct the family header first */
 762	header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
 763	memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
 764	       LS_DEVICE_NAME_MAX);
 
 765	header->port_num = query->port->port_num;
 766
 767	if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
 768	    sa_rec->reversible != 0)
 769		query->path_use = LS_RESOLVE_PATH_USE_GMP;
 770	else
 771		query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
 772	header->path_use = query->path_use;
 773
 774	/* Now build the attributes */
 775	if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
 776		val64 = be64_to_cpu(sa_rec->service_id);
 777		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
 778			sizeof(val64), &val64);
 779	}
 780	if (comp_mask & IB_SA_PATH_REC_DGID)
 781		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
 782			sizeof(sa_rec->dgid), &sa_rec->dgid);
 783	if (comp_mask & IB_SA_PATH_REC_SGID)
 784		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
 785			sizeof(sa_rec->sgid), &sa_rec->sgid);
 786	if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
 787		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
 788			sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
 789
 790	if (comp_mask & IB_SA_PATH_REC_PKEY) {
 791		val16 = be16_to_cpu(sa_rec->pkey);
 792		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
 793			sizeof(val16), &val16);
 794	}
 795	if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
 796		val16 = be16_to_cpu(sa_rec->qos_class);
 797		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
 798			sizeof(val16), &val16);
 799	}
 800}
 801
 802static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
 803{
 804	int len = 0;
 805
 806	if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
 807		len += nla_total_size(sizeof(u64));
 808	if (comp_mask & IB_SA_PATH_REC_DGID)
 809		len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
 810	if (comp_mask & IB_SA_PATH_REC_SGID)
 811		len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
 812	if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
 813		len += nla_total_size(sizeof(u8));
 814	if (comp_mask & IB_SA_PATH_REC_PKEY)
 815		len += nla_total_size(sizeof(u16));
 816	if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
 817		len += nla_total_size(sizeof(u16));
 818
 819	/*
 820	 * Make sure that at least some of the required comp_mask bits are
 821	 * set.
 822	 */
 823	if (WARN_ON(len == 0))
 824		return len;
 825
 826	/* Add the family header */
 827	len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
 828
 829	return len;
 830}
 831
 832static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
 833{
 834	struct sk_buff *skb = NULL;
 835	struct nlmsghdr *nlh;
 836	void *data;
 837	struct ib_sa_mad *mad;
 838	int len;
 
 
 
 
 
 
 
 839
 840	mad = query->mad_buf->mad;
 841	len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
 842	if (len <= 0)
 843		return -EMSGSIZE;
 844
 845	skb = nlmsg_new(len, gfp_mask);
 846	if (!skb)
 847		return -ENOMEM;
 848
 849	/* Put nlmsg header only for now */
 850	data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
 851			    RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
 852	if (!data) {
 853		nlmsg_free(skb);
 854		return -EMSGSIZE;
 855	}
 856
 857	/* Add attributes */
 858	ib_nl_set_path_rec_attrs(skb, query);
 859
 860	/* Repair the nlmsg header length */
 861	nlmsg_end(skb, nlh);
 862
 863	return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
 864}
 865
 866static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
 867{
 868	unsigned long flags;
 869	unsigned long delay;
 870	int ret;
 871
 872	INIT_LIST_HEAD(&query->list);
 873	query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
 874
 875	/* Put the request on the list first.*/
 876	spin_lock_irqsave(&ib_nl_request_lock, flags);
 877	delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
 878	query->timeout = delay + jiffies;
 879	list_add_tail(&query->list, &ib_nl_request_list);
 880	/* Start the timeout if this is the only request */
 881	if (ib_nl_request_list.next == &query->list)
 882		queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
 
 
 883	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
 884
 885	ret = ib_nl_send_msg(query, gfp_mask);
 886	if (ret) {
 887		ret = -EIO;
 888		/* Remove the request */
 889		spin_lock_irqsave(&ib_nl_request_lock, flags);
 890		list_del(&query->list);
 891		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
 892	}
 893
 894	return ret;
 895}
 896
 897static int ib_nl_cancel_request(struct ib_sa_query *query)
 898{
 899	unsigned long flags;
 900	struct ib_sa_query *wait_query;
 901	int found = 0;
 902
 903	spin_lock_irqsave(&ib_nl_request_lock, flags);
 904	list_for_each_entry(wait_query, &ib_nl_request_list, list) {
 905		/* Let the timeout to take care of the callback */
 906		if (query == wait_query) {
 907			query->flags |= IB_SA_CANCEL;
 908			query->timeout = jiffies;
 909			list_move(&query->list, &ib_nl_request_list);
 910			found = 1;
 911			mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
 912			break;
 913		}
 914	}
 915	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
 916
 917	return found;
 918}
 919
 920static void send_handler(struct ib_mad_agent *agent,
 921			 struct ib_mad_send_wc *mad_send_wc);
 922
 923static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
 924					   const struct nlmsghdr *nlh)
 925{
 
 
 
 926	struct ib_mad_send_wc mad_send_wc;
 
 927	struct ib_sa_mad *mad = NULL;
 928	const struct nlattr *head, *curr;
 929	struct ib_path_rec_data  *rec;
 930	int len, rem;
 931	u32 mask = 0;
 932	int status = -EIO;
 933
 934	if (query->callback) {
 935		head = (const struct nlattr *) nlmsg_data(nlh);
 936		len = nlmsg_len(nlh);
 937		switch (query->path_use) {
 938		case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
 939			mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 940			break;
 
 941
 942		case LS_RESOLVE_PATH_USE_ALL:
 943		case LS_RESOLVE_PATH_USE_GMP:
 944		default:
 945			mask = IB_PATH_PRIMARY | IB_PATH_GMP |
 946				IB_PATH_BIDIRECTIONAL;
 947			break;
 948		}
 949		nla_for_each_attr(curr, head, len, rem) {
 950			if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
 951				rec = nla_data(curr);
 952				/*
 953				 * Get the first one. In the future, we may
 954				 * need to get up to 6 pathrecords.
 955				 */
 956				if ((rec->flags & mask) == mask) {
 957					mad = query->mad_buf->mad;
 958					mad->mad_hdr.method |=
 959						IB_MGMT_METHOD_RESP;
 960					memcpy(mad->data, rec->path_rec,
 961					       sizeof(rec->path_rec));
 962					status = 0;
 963					break;
 964				}
 965			}
 966		}
 967		query->callback(query, status, mad);
 968	}
 969
 
 970	mad_send_wc.send_buf = query->mad_buf;
 971	mad_send_wc.status = IB_WC_SUCCESS;
 972	send_handler(query->mad_buf->mad_agent, &mad_send_wc);
 973}
 974
 975static void ib_nl_request_timeout(struct work_struct *work)
 976{
 977	unsigned long flags;
 978	struct ib_sa_query *query;
 979	unsigned long delay;
 980	struct ib_mad_send_wc mad_send_wc;
 981	int ret;
 982
 983	spin_lock_irqsave(&ib_nl_request_lock, flags);
 984	while (!list_empty(&ib_nl_request_list)) {
 985		query = list_entry(ib_nl_request_list.next,
 986				   struct ib_sa_query, list);
 987
 988		if (time_after(query->timeout, jiffies)) {
 989			delay = query->timeout - jiffies;
 990			if ((long)delay <= 0)
 991				delay = 1;
 992			queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
 993			break;
 994		}
 995
 996		list_del(&query->list);
 997		ib_sa_disable_local_svc(query);
 998		/* Hold the lock to protect against query cancellation */
 999		if (ib_sa_query_cancelled(query))
1000			ret = -1;
1001		else
1002			ret = ib_post_send_mad(query->mad_buf, NULL);
1003		if (ret) {
1004			mad_send_wc.send_buf = query->mad_buf;
1005			mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
1006			spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1007			send_handler(query->port->agent, &mad_send_wc);
1008			spin_lock_irqsave(&ib_nl_request_lock, flags);
1009		}
1010	}
1011	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1012}
1013
1014int ib_nl_handle_set_timeout(struct sk_buff *skb,
1015			     struct nlmsghdr *nlh,
1016			     struct netlink_ext_ack *extack)
1017{
1018	int timeout, delta, abs_delta;
1019	const struct nlattr *attr;
1020	unsigned long flags;
1021	struct ib_sa_query *query;
1022	long delay = 0;
1023	struct nlattr *tb[LS_NLA_TYPE_MAX];
1024	int ret;
1025
1026	if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
1027	    !(NETLINK_CB(skb).sk))
1028		return -EPERM;
1029
1030	ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1031				   nlmsg_len(nlh), ib_nl_policy, NULL);
1032	attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
1033	if (ret || !attr)
1034		goto settimeout_out;
1035
1036	timeout = *(int *) nla_data(attr);
1037	if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
1038		timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
1039	if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
1040		timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
1041
1042	delta = timeout - sa_local_svc_timeout_ms;
1043	if (delta < 0)
1044		abs_delta = -delta;
1045	else
1046		abs_delta = delta;
1047
1048	if (delta != 0) {
1049		spin_lock_irqsave(&ib_nl_request_lock, flags);
1050		sa_local_svc_timeout_ms = timeout;
1051		list_for_each_entry(query, &ib_nl_request_list, list) {
1052			if (delta < 0 && abs_delta > query->timeout)
1053				query->timeout = 0;
1054			else
1055				query->timeout += delta;
1056
1057			/* Get the new delay from the first entry */
1058			if (!delay) {
1059				delay = query->timeout - jiffies;
1060				if (delay <= 0)
1061					delay = 1;
1062			}
1063		}
1064		if (delay)
1065			mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
1066					 (unsigned long)delay);
1067		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1068	}
1069
1070settimeout_out:
1071	return skb->len;
1072}
1073
1074static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1075{
1076	struct nlattr *tb[LS_NLA_TYPE_MAX];
1077	int ret;
1078
1079	if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
1080		return 0;
1081
1082	ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1083				   nlmsg_len(nlh), ib_nl_policy, NULL);
1084	if (ret)
1085		return 0;
1086
1087	return 1;
1088}
1089
1090int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1091			      struct nlmsghdr *nlh,
1092			      struct netlink_ext_ack *extack)
1093{
1094	unsigned long flags;
1095	struct ib_sa_query *query;
1096	struct ib_mad_send_buf *send_buf;
1097	struct ib_mad_send_wc mad_send_wc;
1098	int found = 0;
1099	int ret;
1100
1101	if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
1102	    !(NETLINK_CB(skb).sk))
1103		return -EPERM;
1104
1105	spin_lock_irqsave(&ib_nl_request_lock, flags);
1106	list_for_each_entry(query, &ib_nl_request_list, list) {
1107		/*
1108		 * If the query is cancelled, let the timeout routine
1109		 * take care of it.
1110		 */
1111		if (nlh->nlmsg_seq == query->seq) {
1112			found = !ib_sa_query_cancelled(query);
1113			if (found)
1114				list_del(&query->list);
 
1115			break;
1116		}
1117	}
1118
1119	if (!found) {
1120		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1121		goto resp_out;
1122	}
1123
1124	send_buf = query->mad_buf;
1125
1126	if (!ib_nl_is_good_resolve_resp(nlh)) {
1127		/* if the result is a failure, send out the packet via IB */
1128		ib_sa_disable_local_svc(query);
1129		ret = ib_post_send_mad(query->mad_buf, NULL);
1130		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1131		if (ret) {
1132			mad_send_wc.send_buf = send_buf;
1133			mad_send_wc.status = IB_WC_GENERAL_ERR;
1134			send_handler(query->port->agent, &mad_send_wc);
1135		}
1136	} else {
1137		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1138		ib_nl_process_good_resolve_rsp(query, nlh);
1139	}
1140
1141resp_out:
1142	return skb->len;
1143}
1144
1145static void free_sm_ah(struct kref *kref)
1146{
1147	struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
1148
1149	rdma_destroy_ah(sm_ah->ah, 0);
1150	kfree(sm_ah);
1151}
1152
1153void ib_sa_register_client(struct ib_sa_client *client)
1154{
1155	atomic_set(&client->users, 1);
1156	init_completion(&client->comp);
1157}
1158EXPORT_SYMBOL(ib_sa_register_client);
1159
1160void ib_sa_unregister_client(struct ib_sa_client *client)
1161{
1162	ib_sa_client_put(client);
1163	wait_for_completion(&client->comp);
1164}
1165EXPORT_SYMBOL(ib_sa_unregister_client);
1166
1167/**
1168 * ib_sa_cancel_query - try to cancel an SA query
1169 * @id:ID of query to cancel
1170 * @query:query pointer to cancel
1171 *
1172 * Try to cancel an SA query.  If the id and query don't match up or
1173 * the query has already completed, nothing is done.  Otherwise the
1174 * query is canceled and will complete with a status of -EINTR.
1175 */
1176void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1177{
1178	unsigned long flags;
1179	struct ib_mad_agent *agent;
1180	struct ib_mad_send_buf *mad_buf;
1181
1182	xa_lock_irqsave(&queries, flags);
1183	if (xa_load(&queries, id) != query) {
1184		xa_unlock_irqrestore(&queries, flags);
1185		return;
1186	}
1187	agent = query->port->agent;
1188	mad_buf = query->mad_buf;
1189	xa_unlock_irqrestore(&queries, flags);
1190
1191	/*
1192	 * If the query is still on the netlink request list, schedule
1193	 * it to be cancelled by the timeout routine. Otherwise, it has been
1194	 * sent to the MAD layer and has to be cancelled from there.
1195	 */
1196	if (!ib_nl_cancel_request(query))
1197		ib_cancel_mad(agent, mad_buf);
1198}
1199EXPORT_SYMBOL(ib_sa_cancel_query);
1200
1201static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
1202{
1203	struct ib_sa_device *sa_dev;
1204	struct ib_sa_port   *port;
1205	unsigned long flags;
1206	u8 src_path_mask;
1207
1208	sa_dev = ib_get_client_data(device, &sa_client);
1209	if (!sa_dev)
1210		return 0x7f;
1211
1212	port  = &sa_dev->port[port_num - sa_dev->start_port];
1213	spin_lock_irqsave(&port->ah_lock, flags);
1214	src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1215	spin_unlock_irqrestore(&port->ah_lock, flags);
1216
1217	return src_path_mask;
1218}
1219
1220static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
1221				   struct sa_path_rec *rec,
1222				   struct rdma_ah_attr *ah_attr,
1223				   const struct ib_gid_attr *gid_attr)
1224{
1225	enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
1226
1227	if (!gid_attr) {
1228		gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type,
1229						 port_num, NULL);
1230		if (IS_ERR(gid_attr))
1231			return PTR_ERR(gid_attr);
1232	} else
1233		rdma_hold_gid_attr(gid_attr);
1234
1235	rdma_move_grh_sgid_attr(ah_attr, &rec->dgid,
1236				be32_to_cpu(rec->flow_label),
1237				rec->hop_limit,	rec->traffic_class,
1238				gid_attr);
1239	return 0;
1240}
1241
1242/**
1243 * ib_init_ah_attr_from_path - Initialize address handle attributes based on
1244 *   an SA path record.
1245 * @device: Device associated ah attributes initialization.
1246 * @port_num: Port on the specified device.
1247 * @rec: path record entry to use for ah attributes initialization.
1248 * @ah_attr: address handle attributes to initialization from path record.
1249 * @sgid_attr: SGID attribute to consider during initialization.
1250 *
1251 * When ib_init_ah_attr_from_path() returns success,
1252 * (a) for IB link layer it optionally contains a reference to SGID attribute
1253 * when GRH is present for IB link layer.
1254 * (b) for RoCE link layer it contains a reference to SGID attribute.
1255 * User must invoke rdma_destroy_ah_attr() to release reference to SGID
1256 * attributes which are initialized using ib_init_ah_attr_from_path().
1257 */
1258int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
1259			      struct sa_path_rec *rec,
1260			      struct rdma_ah_attr *ah_attr,
1261			      const struct ib_gid_attr *gid_attr)
1262{
1263	int ret = 0;
1264
1265	memset(ah_attr, 0, sizeof(*ah_attr));
1266	ah_attr->type = rdma_ah_find_type(device, port_num);
1267	rdma_ah_set_sl(ah_attr, rec->sl);
1268	rdma_ah_set_port_num(ah_attr, port_num);
1269	rdma_ah_set_static_rate(ah_attr, rec->rate);
1270
1271	if (sa_path_is_roce(rec)) {
1272		ret = roce_resolve_route_from_path(rec, gid_attr);
1273		if (ret)
1274			return ret;
1275
1276		memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN);
1277	} else {
1278		rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
1279		if (sa_path_is_opa(rec) &&
1280		    rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE))
1281			rdma_ah_set_make_grd(ah_attr, true);
1282
1283		rdma_ah_set_path_bits(ah_attr,
1284				      be32_to_cpu(sa_path_get_slid(rec)) &
1285				      get_src_path_mask(device, port_num));
1286	}
1287
1288	if (rec->hop_limit > 0 || sa_path_is_roce(rec))
1289		ret = init_ah_attr_grh_fields(device, port_num,
1290					      rec, ah_attr, gid_attr);
1291	return ret;
1292}
1293EXPORT_SYMBOL(ib_init_ah_attr_from_path);
1294
1295static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1296{
1297	struct rdma_ah_attr ah_attr;
1298	unsigned long flags;
1299
1300	spin_lock_irqsave(&query->port->ah_lock, flags);
1301	if (!query->port->sm_ah) {
1302		spin_unlock_irqrestore(&query->port->ah_lock, flags);
1303		return -EAGAIN;
1304	}
1305	kref_get(&query->port->sm_ah->ref);
1306	query->sm_ah = query->port->sm_ah;
1307	spin_unlock_irqrestore(&query->port->ah_lock, flags);
1308
1309	/*
1310	 * Always check if sm_ah has valid dlid assigned,
1311	 * before querying for class port info
1312	 */
1313	if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
1314	    !rdma_is_valid_unicast_lid(&ah_attr)) {
1315		kref_put(&query->sm_ah->ref, free_sm_ah);
1316		return -EAGAIN;
1317	}
1318	query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1319					    query->sm_ah->pkey_index,
1320					    0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1321					    gfp_mask,
1322					    ((query->flags & IB_SA_QUERY_OPA) ?
1323					     OPA_MGMT_BASE_VERSION :
1324					     IB_MGMT_BASE_VERSION));
1325	if (IS_ERR(query->mad_buf)) {
1326		kref_put(&query->sm_ah->ref, free_sm_ah);
1327		return -ENOMEM;
1328	}
1329
1330	query->mad_buf->ah = query->sm_ah->ah;
1331
1332	return 0;
1333}
1334
1335static void free_mad(struct ib_sa_query *query)
1336{
1337	ib_free_send_mad(query->mad_buf);
1338	kref_put(&query->sm_ah->ref, free_sm_ah);
1339}
1340
1341static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
1342{
1343	struct ib_sa_mad *mad = query->mad_buf->mad;
1344	unsigned long flags;
1345
1346	memset(mad, 0, sizeof *mad);
1347
1348	if (query->flags & IB_SA_QUERY_OPA) {
1349		mad->mad_hdr.base_version  = OPA_MGMT_BASE_VERSION;
1350		mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
1351	} else {
1352		mad->mad_hdr.base_version  = IB_MGMT_BASE_VERSION;
1353		mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1354	}
1355	mad->mad_hdr.mgmt_class    = IB_MGMT_CLASS_SUBN_ADM;
1356	spin_lock_irqsave(&tid_lock, flags);
1357	mad->mad_hdr.tid           =
1358		cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1359	spin_unlock_irqrestore(&tid_lock, flags);
1360}
1361
1362static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
1363		    gfp_t gfp_mask)
1364{
1365	unsigned long flags;
1366	int ret, id;
 
1367
1368	xa_lock_irqsave(&queries, flags);
1369	ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
1370	xa_unlock_irqrestore(&queries, flags);
1371	if (ret < 0)
1372		return ret;
1373
1374	query->mad_buf->timeout_ms  = timeout_ms;
 
 
 
 
 
 
1375	query->mad_buf->context[0] = query;
1376	query->id = id;
1377
1378	if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
1379	    (!(query->flags & IB_SA_QUERY_OPA))) {
1380		if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
1381			if (!ib_nl_make_request(query, gfp_mask))
1382				return id;
1383		}
1384		ib_sa_disable_local_svc(query);
1385	}
1386
1387	ret = ib_post_send_mad(query->mad_buf, NULL);
1388	if (ret) {
1389		xa_lock_irqsave(&queries, flags);
1390		__xa_erase(&queries, id);
1391		xa_unlock_irqrestore(&queries, flags);
1392	}
1393
1394	/*
1395	 * It's not safe to dereference query any more, because the
1396	 * send may already have completed and freed the query in
1397	 * another context.
1398	 */
1399	return ret ? ret : id;
1400}
1401
1402void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
1403{
1404	ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1405}
1406EXPORT_SYMBOL(ib_sa_unpack_path);
1407
1408void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
1409{
1410	ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1411}
1412EXPORT_SYMBOL(ib_sa_pack_path);
1413
1414static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
1415					 struct ib_device *device,
1416					 u8 port_num)
1417{
1418	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1419	struct ib_sa_port *port;
1420	unsigned long flags;
1421	bool ret = false;
1422
1423	if (!sa_dev)
1424		return ret;
1425
1426	port = &sa_dev->port[port_num - sa_dev->start_port];
1427	spin_lock_irqsave(&port->classport_lock, flags);
1428	if (!port->classport_info.valid)
1429		goto ret;
1430
1431	if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA)
1432		ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) &
1433			OPA_CLASS_PORT_INFO_PR_SUPPORT;
1434ret:
1435	spin_unlock_irqrestore(&port->classport_lock, flags);
1436	return ret;
1437}
1438
1439enum opa_pr_supported {
1440	PR_NOT_SUPPORTED,
1441	PR_OPA_SUPPORTED,
1442	PR_IB_SUPPORTED
1443};
1444
1445/**
1446 * Check if current PR query can be an OPA query.
 
1447 * Retuns PR_NOT_SUPPORTED if a path record query is not
1448 * possible, PR_OPA_SUPPORTED if an OPA path record query
1449 * is possible and PR_IB_SUPPORTED if an IB path record
1450 * query is possible.
1451 */
1452static int opa_pr_query_possible(struct ib_sa_client *client,
1453				 struct ib_device *device,
1454				 u8 port_num,
1455				 struct sa_path_rec *rec)
1456{
1457	struct ib_port_attr port_attr;
1458
1459	if (ib_query_port(device, port_num, &port_attr))
1460		return PR_NOT_SUPPORTED;
1461
1462	if (ib_sa_opa_pathrecord_support(client, device, port_num))
1463		return PR_OPA_SUPPORTED;
1464
1465	if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1466		return PR_NOT_SUPPORTED;
1467	else
1468		return PR_IB_SUPPORTED;
1469}
1470
1471static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1472				    int status,
1473				    struct ib_sa_mad *mad)
1474{
1475	struct ib_sa_path_query *query =
1476		container_of(sa_query, struct ib_sa_path_query, sa_query);
 
1477
1478	if (mad) {
1479		struct sa_path_rec rec;
 
 
1480
1481		if (sa_query->flags & IB_SA_QUERY_OPA) {
1482			ib_unpack(opa_path_rec_table,
1483				  ARRAY_SIZE(opa_path_rec_table),
1484				  mad->data, &rec);
1485			rec.rec_type = SA_PATH_REC_TYPE_OPA;
1486			query->callback(status, &rec, query->context);
1487		} else {
1488			ib_unpack(path_rec_table,
1489				  ARRAY_SIZE(path_rec_table),
1490				  mad->data, &rec);
1491			rec.rec_type = SA_PATH_REC_TYPE_IB;
1492			sa_path_set_dmac_zero(&rec);
1493
1494			if (query->conv_pr) {
1495				struct sa_path_rec opa;
1496
1497				memset(&opa, 0, sizeof(struct sa_path_rec));
1498				sa_convert_path_ib_to_opa(&opa, &rec);
1499				query->callback(status, &opa, query->context);
1500			} else {
1501				query->callback(status, &rec, query->context);
1502			}
1503		}
1504	} else
1505		query->callback(status, NULL, query->context);
 
 
1506}
1507
1508static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1509{
1510	struct ib_sa_path_query *query =
1511		container_of(sa_query, struct ib_sa_path_query, sa_query);
1512
1513	kfree(query->conv_pr);
1514	kfree(query);
1515}
1516
1517/**
1518 * ib_sa_path_rec_get - Start a Path get query
1519 * @client:SA client
1520 * @device:device to send query on
1521 * @port_num: port number to send query on
1522 * @rec:Path Record to send in query
1523 * @comp_mask:component mask to send in query
1524 * @timeout_ms:time to wait for response
1525 * @gfp_mask:GFP mask to use for internal allocations
1526 * @callback:function called when query completes, times out or is
1527 * canceled
1528 * @context:opaque user context passed to callback
1529 * @sa_query:query context, used to cancel query
1530 *
1531 * Send a Path Record Get query to the SA to look up a path.  The
1532 * callback function will be called when the query completes (or
1533 * fails); status is 0 for a successful response, -EINTR if the query
1534 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1535 * occurred sending the query.  The resp parameter of the callback is
1536 * only valid if status is 0.
1537 *
1538 * If the return value of ib_sa_path_rec_get() is negative, it is an
1539 * error code.  Otherwise it is a query ID that can be used to cancel
1540 * the query.
1541 */
1542int ib_sa_path_rec_get(struct ib_sa_client *client,
1543		       struct ib_device *device, u8 port_num,
1544		       struct sa_path_rec *rec,
1545		       ib_sa_comp_mask comp_mask,
1546		       unsigned long timeout_ms, gfp_t gfp_mask,
1547		       void (*callback)(int status,
1548					struct sa_path_rec *resp,
1549					void *context),
1550		       void *context,
1551		       struct ib_sa_query **sa_query)
1552{
1553	struct ib_sa_path_query *query;
1554	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1555	struct ib_sa_port   *port;
1556	struct ib_mad_agent *agent;
1557	struct ib_sa_mad *mad;
1558	enum opa_pr_supported status;
1559	int ret;
1560
1561	if (!sa_dev)
1562		return -ENODEV;
1563
1564	if ((rec->rec_type != SA_PATH_REC_TYPE_IB) &&
1565	    (rec->rec_type != SA_PATH_REC_TYPE_OPA))
1566		return -EINVAL;
1567
1568	port  = &sa_dev->port[port_num - sa_dev->start_port];
1569	agent = port->agent;
1570
1571	query = kzalloc(sizeof(*query), gfp_mask);
1572	if (!query)
1573		return -ENOMEM;
1574
1575	query->sa_query.port     = port;
1576	if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
1577		status = opa_pr_query_possible(client, device, port_num, rec);
1578		if (status == PR_NOT_SUPPORTED) {
1579			ret = -EINVAL;
1580			goto err1;
1581		} else if (status == PR_OPA_SUPPORTED) {
1582			query->sa_query.flags |= IB_SA_QUERY_OPA;
1583		} else {
1584			query->conv_pr =
1585				kmalloc(sizeof(*query->conv_pr), gfp_mask);
1586			if (!query->conv_pr) {
1587				ret = -ENOMEM;
1588				goto err1;
1589			}
1590		}
1591	}
1592
1593	ret = alloc_mad(&query->sa_query, gfp_mask);
1594	if (ret)
1595		goto err2;
1596
1597	ib_sa_client_get(client);
1598	query->sa_query.client = client;
1599	query->callback        = callback;
1600	query->context         = context;
1601
1602	mad = query->sa_query.mad_buf->mad;
1603	init_mad(&query->sa_query, agent);
1604
1605	query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1606	query->sa_query.release  = ib_sa_path_rec_release;
1607	mad->mad_hdr.method	 = IB_MGMT_METHOD_GET;
1608	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1609	mad->sa_hdr.comp_mask	 = comp_mask;
1610
1611	if (query->sa_query.flags & IB_SA_QUERY_OPA) {
1612		ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1613			rec, mad->data);
1614	} else if (query->conv_pr) {
1615		sa_convert_path_opa_to_ib(query->conv_pr, rec);
1616		ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1617			query->conv_pr, mad->data);
1618	} else {
1619		ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1620			rec, mad->data);
1621	}
1622
1623	*sa_query = &query->sa_query;
1624
1625	query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1626	query->sa_query.mad_buf->context[1] = (query->conv_pr) ?
1627						query->conv_pr : rec;
1628
1629	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1630	if (ret < 0)
1631		goto err3;
1632
1633	return ret;
1634
1635err3:
1636	*sa_query = NULL;
1637	ib_sa_client_put(query->sa_query.client);
1638	free_mad(&query->sa_query);
1639err2:
1640	kfree(query->conv_pr);
1641err1:
1642	kfree(query);
1643	return ret;
1644}
1645EXPORT_SYMBOL(ib_sa_path_rec_get);
1646
1647static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1648				    int status,
1649				    struct ib_sa_mad *mad)
1650{
1651	struct ib_sa_service_query *query =
1652		container_of(sa_query, struct ib_sa_service_query, sa_query);
1653
1654	if (mad) {
1655		struct ib_sa_service_rec rec;
1656
1657		ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1658			  mad->data, &rec);
1659		query->callback(status, &rec, query->context);
1660	} else
1661		query->callback(status, NULL, query->context);
1662}
1663
1664static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1665{
1666	kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1667}
1668
1669/**
1670 * ib_sa_service_rec_query - Start Service Record operation
1671 * @client:SA client
1672 * @device:device to send request on
1673 * @port_num: port number to send request on
1674 * @method:SA method - should be get, set, or delete
1675 * @rec:Service Record to send in request
1676 * @comp_mask:component mask to send in request
1677 * @timeout_ms:time to wait for response
1678 * @gfp_mask:GFP mask to use for internal allocations
1679 * @callback:function called when request completes, times out or is
1680 * canceled
1681 * @context:opaque user context passed to callback
1682 * @sa_query:request context, used to cancel request
1683 *
1684 * Send a Service Record set/get/delete to the SA to register,
1685 * unregister or query a service record.
1686 * The callback function will be called when the request completes (or
1687 * fails); status is 0 for a successful response, -EINTR if the query
1688 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1689 * occurred sending the query.  The resp parameter of the callback is
1690 * only valid if status is 0.
1691 *
1692 * If the return value of ib_sa_service_rec_query() is negative, it is an
1693 * error code.  Otherwise it is a request ID that can be used to cancel
1694 * the query.
1695 */
1696int ib_sa_service_rec_query(struct ib_sa_client *client,
1697			    struct ib_device *device, u8 port_num, u8 method,
1698			    struct ib_sa_service_rec *rec,
1699			    ib_sa_comp_mask comp_mask,
1700			    unsigned long timeout_ms, gfp_t gfp_mask,
1701			    void (*callback)(int status,
1702					     struct ib_sa_service_rec *resp,
1703					     void *context),
1704			    void *context,
1705			    struct ib_sa_query **sa_query)
1706{
1707	struct ib_sa_service_query *query;
1708	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1709	struct ib_sa_port   *port;
1710	struct ib_mad_agent *agent;
1711	struct ib_sa_mad *mad;
1712	int ret;
1713
1714	if (!sa_dev)
1715		return -ENODEV;
1716
1717	port  = &sa_dev->port[port_num - sa_dev->start_port];
1718	agent = port->agent;
1719
1720	if (method != IB_MGMT_METHOD_GET &&
1721	    method != IB_MGMT_METHOD_SET &&
1722	    method != IB_SA_METHOD_DELETE)
1723		return -EINVAL;
1724
1725	query = kzalloc(sizeof(*query), gfp_mask);
1726	if (!query)
1727		return -ENOMEM;
1728
1729	query->sa_query.port     = port;
1730	ret = alloc_mad(&query->sa_query, gfp_mask);
1731	if (ret)
1732		goto err1;
1733
1734	ib_sa_client_get(client);
1735	query->sa_query.client = client;
1736	query->callback        = callback;
1737	query->context         = context;
1738
1739	mad = query->sa_query.mad_buf->mad;
1740	init_mad(&query->sa_query, agent);
1741
1742	query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1743	query->sa_query.release  = ib_sa_service_rec_release;
1744	mad->mad_hdr.method	 = method;
1745	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1746	mad->sa_hdr.comp_mask	 = comp_mask;
1747
1748	ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1749		rec, mad->data);
1750
1751	*sa_query = &query->sa_query;
1752
1753	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1754	if (ret < 0)
1755		goto err2;
1756
1757	return ret;
1758
1759err2:
1760	*sa_query = NULL;
1761	ib_sa_client_put(query->sa_query.client);
1762	free_mad(&query->sa_query);
1763
1764err1:
1765	kfree(query);
1766	return ret;
1767}
1768EXPORT_SYMBOL(ib_sa_service_rec_query);
1769
1770static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1771					int status,
1772					struct ib_sa_mad *mad)
1773{
1774	struct ib_sa_mcmember_query *query =
1775		container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1776
1777	if (mad) {
1778		struct ib_sa_mcmember_rec rec;
1779
1780		ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1781			  mad->data, &rec);
1782		query->callback(status, &rec, query->context);
1783	} else
1784		query->callback(status, NULL, query->context);
1785}
1786
1787static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1788{
1789	kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1790}
1791
1792int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1793			     struct ib_device *device, u8 port_num,
1794			     u8 method,
1795			     struct ib_sa_mcmember_rec *rec,
1796			     ib_sa_comp_mask comp_mask,
1797			     unsigned long timeout_ms, gfp_t gfp_mask,
1798			     void (*callback)(int status,
1799					      struct ib_sa_mcmember_rec *resp,
1800					      void *context),
1801			     void *context,
1802			     struct ib_sa_query **sa_query)
1803{
1804	struct ib_sa_mcmember_query *query;
1805	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1806	struct ib_sa_port   *port;
1807	struct ib_mad_agent *agent;
1808	struct ib_sa_mad *mad;
1809	int ret;
1810
1811	if (!sa_dev)
1812		return -ENODEV;
1813
1814	port  = &sa_dev->port[port_num - sa_dev->start_port];
1815	agent = port->agent;
1816
1817	query = kzalloc(sizeof(*query), gfp_mask);
1818	if (!query)
1819		return -ENOMEM;
1820
1821	query->sa_query.port     = port;
1822	ret = alloc_mad(&query->sa_query, gfp_mask);
1823	if (ret)
1824		goto err1;
1825
1826	ib_sa_client_get(client);
1827	query->sa_query.client = client;
1828	query->callback        = callback;
1829	query->context         = context;
1830
1831	mad = query->sa_query.mad_buf->mad;
1832	init_mad(&query->sa_query, agent);
1833
1834	query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1835	query->sa_query.release  = ib_sa_mcmember_rec_release;
1836	mad->mad_hdr.method	 = method;
1837	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1838	mad->sa_hdr.comp_mask	 = comp_mask;
1839
1840	ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1841		rec, mad->data);
1842
1843	*sa_query = &query->sa_query;
1844
1845	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1846	if (ret < 0)
1847		goto err2;
1848
1849	return ret;
1850
1851err2:
1852	*sa_query = NULL;
1853	ib_sa_client_put(query->sa_query.client);
1854	free_mad(&query->sa_query);
1855
1856err1:
1857	kfree(query);
1858	return ret;
1859}
1860
1861/* Support GuidInfoRecord */
1862static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1863					int status,
1864					struct ib_sa_mad *mad)
1865{
1866	struct ib_sa_guidinfo_query *query =
1867		container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1868
1869	if (mad) {
1870		struct ib_sa_guidinfo_rec rec;
1871
1872		ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1873			  mad->data, &rec);
1874		query->callback(status, &rec, query->context);
1875	} else
1876		query->callback(status, NULL, query->context);
1877}
1878
1879static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1880{
1881	kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1882}
1883
1884int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1885			      struct ib_device *device, u8 port_num,
1886			      struct ib_sa_guidinfo_rec *rec,
1887			      ib_sa_comp_mask comp_mask, u8 method,
1888			      unsigned long timeout_ms, gfp_t gfp_mask,
1889			      void (*callback)(int status,
1890					       struct ib_sa_guidinfo_rec *resp,
1891					       void *context),
1892			      void *context,
1893			      struct ib_sa_query **sa_query)
1894{
1895	struct ib_sa_guidinfo_query *query;
1896	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1897	struct ib_sa_port *port;
1898	struct ib_mad_agent *agent;
1899	struct ib_sa_mad *mad;
1900	int ret;
1901
1902	if (!sa_dev)
1903		return -ENODEV;
1904
1905	if (method != IB_MGMT_METHOD_GET &&
1906	    method != IB_MGMT_METHOD_SET &&
1907	    method != IB_SA_METHOD_DELETE) {
1908		return -EINVAL;
1909	}
1910
1911	port  = &sa_dev->port[port_num - sa_dev->start_port];
1912	agent = port->agent;
1913
1914	query = kzalloc(sizeof(*query), gfp_mask);
1915	if (!query)
1916		return -ENOMEM;
1917
1918	query->sa_query.port = port;
1919	ret = alloc_mad(&query->sa_query, gfp_mask);
1920	if (ret)
1921		goto err1;
1922
1923	ib_sa_client_get(client);
1924	query->sa_query.client = client;
1925	query->callback        = callback;
1926	query->context         = context;
1927
1928	mad = query->sa_query.mad_buf->mad;
1929	init_mad(&query->sa_query, agent);
1930
1931	query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1932	query->sa_query.release  = ib_sa_guidinfo_rec_release;
1933
1934	mad->mad_hdr.method	 = method;
1935	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1936	mad->sa_hdr.comp_mask	 = comp_mask;
1937
1938	ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1939		mad->data);
1940
1941	*sa_query = &query->sa_query;
1942
1943	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1944	if (ret < 0)
1945		goto err2;
1946
1947	return ret;
1948
1949err2:
1950	*sa_query = NULL;
1951	ib_sa_client_put(query->sa_query.client);
1952	free_mad(&query->sa_query);
1953
1954err1:
1955	kfree(query);
1956	return ret;
1957}
1958EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1959
1960bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
1961				    struct ib_device *device,
1962				    u8 port_num)
1963{
1964	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1965	struct ib_sa_port *port;
1966	bool ret = false;
1967	unsigned long flags;
1968
1969	if (!sa_dev)
1970		return ret;
1971
1972	port  = &sa_dev->port[port_num - sa_dev->start_port];
1973
1974	spin_lock_irqsave(&port->classport_lock, flags);
1975	if ((port->classport_info.valid) &&
1976	    (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB))
1977		ret = ib_get_cpi_capmask2(&port->classport_info.data.ib)
1978			& IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT;
1979	spin_unlock_irqrestore(&port->classport_lock, flags);
1980	return ret;
1981}
1982EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support);
1983
1984struct ib_classport_info_context {
1985	struct completion	done;
1986	struct ib_sa_query	*sa_query;
1987};
1988
1989static void ib_classportinfo_cb(void *context)
1990{
1991	struct ib_classport_info_context *cb_ctx = context;
1992
1993	complete(&cb_ctx->done);
1994}
1995
1996static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
1997					      int status,
1998					      struct ib_sa_mad *mad)
1999{
2000	unsigned long flags;
2001	struct ib_sa_classport_info_query *query =
2002		container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
2003	struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
2004
2005	if (mad) {
2006		if (sa_query->flags & IB_SA_QUERY_OPA) {
2007			struct opa_class_port_info rec;
2008
2009			ib_unpack(opa_classport_info_rec_table,
2010				  ARRAY_SIZE(opa_classport_info_rec_table),
2011				  mad->data, &rec);
2012
2013			spin_lock_irqsave(&sa_query->port->classport_lock,
2014					  flags);
2015			if (!status && !info->valid) {
2016				memcpy(&info->data.opa, &rec,
2017				       sizeof(info->data.opa));
2018
2019				info->valid = true;
2020				info->data.type = RDMA_CLASS_PORT_INFO_OPA;
2021			}
2022			spin_unlock_irqrestore(&sa_query->port->classport_lock,
2023					       flags);
2024
2025		} else {
2026			struct ib_class_port_info rec;
2027
2028			ib_unpack(ib_classport_info_rec_table,
2029				  ARRAY_SIZE(ib_classport_info_rec_table),
2030				  mad->data, &rec);
2031
2032			spin_lock_irqsave(&sa_query->port->classport_lock,
2033					  flags);
2034			if (!status && !info->valid) {
2035				memcpy(&info->data.ib, &rec,
2036				       sizeof(info->data.ib));
2037
2038				info->valid = true;
2039				info->data.type = RDMA_CLASS_PORT_INFO_IB;
2040			}
2041			spin_unlock_irqrestore(&sa_query->port->classport_lock,
2042					       flags);
2043		}
2044	}
2045	query->callback(query->context);
2046}
2047
2048static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
2049{
2050	kfree(container_of(sa_query, struct ib_sa_classport_info_query,
2051			   sa_query));
2052}
2053
2054static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
2055					  unsigned long timeout_ms,
2056					  void (*callback)(void *context),
2057					  void *context,
2058					  struct ib_sa_query **sa_query)
2059{
2060	struct ib_mad_agent *agent;
2061	struct ib_sa_classport_info_query *query;
2062	struct ib_sa_mad *mad;
2063	gfp_t gfp_mask = GFP_KERNEL;
2064	int ret;
2065
2066	agent = port->agent;
2067
2068	query = kzalloc(sizeof(*query), gfp_mask);
2069	if (!query)
2070		return -ENOMEM;
2071
2072	query->sa_query.port = port;
2073	query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
2074						 port->port_num) ?
2075				 IB_SA_QUERY_OPA : 0;
2076	ret = alloc_mad(&query->sa_query, gfp_mask);
2077	if (ret)
2078		goto err_free;
2079
2080	query->callback = callback;
2081	query->context = context;
2082
2083	mad = query->sa_query.mad_buf->mad;
2084	init_mad(&query->sa_query, agent);
2085
2086	query->sa_query.callback = ib_sa_classport_info_rec_callback;
2087	query->sa_query.release  = ib_sa_classport_info_rec_release;
2088	mad->mad_hdr.method	 = IB_MGMT_METHOD_GET;
2089	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
2090	mad->sa_hdr.comp_mask	 = 0;
2091	*sa_query = &query->sa_query;
2092
2093	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
2094	if (ret < 0)
2095		goto err_free_mad;
2096
2097	return ret;
2098
2099err_free_mad:
2100	*sa_query = NULL;
2101	free_mad(&query->sa_query);
2102
2103err_free:
2104	kfree(query);
2105	return ret;
2106}
2107
2108static void update_ib_cpi(struct work_struct *work)
2109{
2110	struct ib_sa_port *port =
2111		container_of(work, struct ib_sa_port, ib_cpi_work.work);
2112	struct ib_classport_info_context *cb_context;
2113	unsigned long flags;
2114	int ret;
2115
2116	/* If the classport info is valid, nothing
2117	 * to do here.
2118	 */
2119	spin_lock_irqsave(&port->classport_lock, flags);
2120	if (port->classport_info.valid) {
2121		spin_unlock_irqrestore(&port->classport_lock, flags);
2122		return;
2123	}
2124	spin_unlock_irqrestore(&port->classport_lock, flags);
2125
2126	cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL);
2127	if (!cb_context)
2128		goto err_nomem;
2129
2130	init_completion(&cb_context->done);
2131
2132	ret = ib_sa_classport_info_rec_query(port, 3000,
2133					     ib_classportinfo_cb, cb_context,
2134					     &cb_context->sa_query);
2135	if (ret < 0)
2136		goto free_cb_err;
2137	wait_for_completion(&cb_context->done);
2138free_cb_err:
2139	kfree(cb_context);
2140	spin_lock_irqsave(&port->classport_lock, flags);
2141
2142	/* If the classport info is still not valid, the query should have
2143	 * failed for some reason. Retry issuing the query
2144	 */
2145	if (!port->classport_info.valid) {
2146		port->classport_info.retry_cnt++;
2147		if (port->classport_info.retry_cnt <=
2148		    IB_SA_CPI_MAX_RETRY_CNT) {
2149			unsigned long delay =
2150				msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2151
2152			queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
2153		}
2154	}
2155	spin_unlock_irqrestore(&port->classport_lock, flags);
2156
2157err_nomem:
2158	return;
2159}
2160
2161static void send_handler(struct ib_mad_agent *agent,
2162			 struct ib_mad_send_wc *mad_send_wc)
2163{
2164	struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
2165	unsigned long flags;
2166
2167	if (query->callback)
2168		switch (mad_send_wc->status) {
2169		case IB_WC_SUCCESS:
2170			/* No callback -- already got recv */
2171			break;
2172		case IB_WC_RESP_TIMEOUT_ERR:
2173			query->callback(query, -ETIMEDOUT, NULL);
2174			break;
2175		case IB_WC_WR_FLUSH_ERR:
2176			query->callback(query, -EINTR, NULL);
2177			break;
2178		default:
2179			query->callback(query, -EIO, NULL);
2180			break;
2181		}
2182
2183	xa_lock_irqsave(&queries, flags);
2184	__xa_erase(&queries, query->id);
2185	xa_unlock_irqrestore(&queries, flags);
2186
2187	free_mad(query);
2188	if (query->client)
2189		ib_sa_client_put(query->client);
2190	query->release(query);
2191}
2192
2193static void recv_handler(struct ib_mad_agent *mad_agent,
2194			 struct ib_mad_send_buf *send_buf,
2195			 struct ib_mad_recv_wc *mad_recv_wc)
2196{
2197	struct ib_sa_query *query;
2198
2199	if (!send_buf)
2200		return;
2201
2202	query = send_buf->context[0];
2203	if (query->callback) {
2204		if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
2205			query->callback(query,
2206					mad_recv_wc->recv_buf.mad->mad_hdr.status ?
2207					-EINVAL : 0,
2208					(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
2209		else
2210			query->callback(query, -EIO, NULL);
2211	}
2212
2213	ib_free_recv_mad(mad_recv_wc);
2214}
2215
2216static void update_sm_ah(struct work_struct *work)
2217{
2218	struct ib_sa_port *port =
2219		container_of(work, struct ib_sa_port, update_task);
2220	struct ib_sa_sm_ah *new_ah;
2221	struct ib_port_attr port_attr;
2222	struct rdma_ah_attr   ah_attr;
2223	bool grh_required;
2224
2225	if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
2226		pr_warn("Couldn't query port\n");
2227		return;
2228	}
2229
2230	new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL);
2231	if (!new_ah)
2232		return;
2233
2234	kref_init(&new_ah->ref);
2235	new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
2236
2237	new_ah->pkey_index = 0;
2238	if (ib_find_pkey(port->agent->device, port->port_num,
2239			 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
2240		pr_err("Couldn't find index for default PKey\n");
2241
2242	memset(&ah_attr, 0, sizeof(ah_attr));
2243	ah_attr.type = rdma_ah_find_type(port->agent->device,
2244					 port->port_num);
2245	rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
2246	rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
2247	rdma_ah_set_port_num(&ah_attr, port->port_num);
2248
2249	grh_required = rdma_is_grh_required(port->agent->device,
2250					    port->port_num);
2251
2252	/*
2253	 * The OPA sm_lid of 0xFFFF needs special handling so that it can be
2254	 * differentiated from a permissive LID of 0xFFFF.  We set the
2255	 * grh_required flag here so the SA can program the DGID in the
2256	 * address handle appropriately
2257	 */
2258	if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA &&
2259	    (grh_required ||
2260	     port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE)))
2261		rdma_ah_set_make_grd(&ah_attr, true);
2262
2263	if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) {
2264		rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
2265		rdma_ah_set_subnet_prefix(&ah_attr,
2266					  cpu_to_be64(port_attr.subnet_prefix));
2267		rdma_ah_set_interface_id(&ah_attr,
2268					 cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
2269	}
2270
2271	new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr,
2272				    RDMA_CREATE_AH_SLEEPABLE);
2273	if (IS_ERR(new_ah->ah)) {
2274		pr_warn("Couldn't create new SM AH\n");
2275		kfree(new_ah);
2276		return;
2277	}
2278
2279	spin_lock_irq(&port->ah_lock);
2280	if (port->sm_ah)
2281		kref_put(&port->sm_ah->ref, free_sm_ah);
2282	port->sm_ah = new_ah;
2283	spin_unlock_irq(&port->ah_lock);
2284}
2285
2286static void ib_sa_event(struct ib_event_handler *handler,
2287			struct ib_event *event)
2288{
2289	if (event->event == IB_EVENT_PORT_ERR    ||
2290	    event->event == IB_EVENT_PORT_ACTIVE ||
2291	    event->event == IB_EVENT_LID_CHANGE  ||
2292	    event->event == IB_EVENT_PKEY_CHANGE ||
2293	    event->event == IB_EVENT_SM_CHANGE   ||
2294	    event->event == IB_EVENT_CLIENT_REREGISTER) {
2295		unsigned long flags;
2296		struct ib_sa_device *sa_dev =
2297			container_of(handler, typeof(*sa_dev), event_handler);
2298		u8 port_num = event->element.port_num - sa_dev->start_port;
2299		struct ib_sa_port *port = &sa_dev->port[port_num];
2300
2301		if (!rdma_cap_ib_sa(handler->device, port->port_num))
2302			return;
2303
2304		spin_lock_irqsave(&port->ah_lock, flags);
2305		if (port->sm_ah)
2306			kref_put(&port->sm_ah->ref, free_sm_ah);
2307		port->sm_ah = NULL;
2308		spin_unlock_irqrestore(&port->ah_lock, flags);
2309
2310		if (event->event == IB_EVENT_SM_CHANGE ||
2311		    event->event == IB_EVENT_CLIENT_REREGISTER ||
2312		    event->event == IB_EVENT_LID_CHANGE ||
2313		    event->event == IB_EVENT_PORT_ACTIVE) {
2314			unsigned long delay =
2315				msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2316
2317			spin_lock_irqsave(&port->classport_lock, flags);
2318			port->classport_info.valid = false;
2319			port->classport_info.retry_cnt = 0;
2320			spin_unlock_irqrestore(&port->classport_lock, flags);
2321			queue_delayed_work(ib_wq,
2322					   &port->ib_cpi_work, delay);
2323		}
2324		queue_work(ib_wq, &sa_dev->port[port_num].update_task);
2325	}
2326}
2327
2328static void ib_sa_add_one(struct ib_device *device)
2329{
2330	struct ib_sa_device *sa_dev;
2331	int s, e, i;
2332	int count = 0;
 
2333
2334	s = rdma_start_port(device);
2335	e = rdma_end_port(device);
2336
2337	sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
 
 
2338	if (!sa_dev)
2339		return;
2340
2341	sa_dev->start_port = s;
2342	sa_dev->end_port   = e;
2343
2344	for (i = 0; i <= e - s; ++i) {
2345		spin_lock_init(&sa_dev->port[i].ah_lock);
2346		if (!rdma_cap_ib_sa(device, i + 1))
2347			continue;
2348
2349		sa_dev->port[i].sm_ah    = NULL;
2350		sa_dev->port[i].port_num = i + s;
2351
2352		spin_lock_init(&sa_dev->port[i].classport_lock);
2353		sa_dev->port[i].classport_info.valid = false;
2354
2355		sa_dev->port[i].agent =
2356			ib_register_mad_agent(device, i + s, IB_QPT_GSI,
2357					      NULL, 0, send_handler,
2358					      recv_handler, sa_dev, 0);
2359		if (IS_ERR(sa_dev->port[i].agent))
 
2360			goto err;
 
2361
2362		INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
2363		INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
2364				  update_ib_cpi);
2365
2366		count++;
2367	}
2368
2369	if (!count)
 
2370		goto free;
 
2371
2372	ib_set_client_data(device, &sa_client, sa_dev);
2373
2374	/*
2375	 * We register our event handler after everything is set up,
2376	 * and then update our cached info after the event handler is
2377	 * registered to avoid any problems if a port changes state
2378	 * during our initialization.
2379	 */
2380
2381	INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
2382	ib_register_event_handler(&sa_dev->event_handler);
2383
2384	for (i = 0; i <= e - s; ++i) {
2385		if (rdma_cap_ib_sa(device, i + 1))
2386			update_sm_ah(&sa_dev->port[i].update_task);
2387	}
2388
2389	return;
2390
2391err:
2392	while (--i >= 0) {
2393		if (rdma_cap_ib_sa(device, i + 1))
2394			ib_unregister_mad_agent(sa_dev->port[i].agent);
2395	}
2396free:
2397	kfree(sa_dev);
2398	return;
2399}
2400
2401static void ib_sa_remove_one(struct ib_device *device, void *client_data)
2402{
2403	struct ib_sa_device *sa_dev = client_data;
2404	int i;
2405
2406	if (!sa_dev)
2407		return;
2408
2409	ib_unregister_event_handler(&sa_dev->event_handler);
2410	flush_workqueue(ib_wq);
2411
2412	for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
2413		if (rdma_cap_ib_sa(device, i + 1)) {
2414			cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
2415			ib_unregister_mad_agent(sa_dev->port[i].agent);
2416			if (sa_dev->port[i].sm_ah)
2417				kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
2418		}
2419
2420	}
2421
2422	kfree(sa_dev);
2423}
2424
2425int ib_sa_init(void)
2426{
2427	int ret;
2428
2429	get_random_bytes(&tid, sizeof tid);
2430
2431	atomic_set(&ib_nl_sa_request_seq, 0);
2432
2433	ret = ib_register_client(&sa_client);
2434	if (ret) {
2435		pr_err("Couldn't register ib_sa client\n");
2436		goto err1;
2437	}
2438
2439	ret = mcast_init();
2440	if (ret) {
2441		pr_err("Couldn't initialize multicast handling\n");
2442		goto err2;
2443	}
2444
2445	ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
2446	if (!ib_nl_wq) {
2447		ret = -ENOMEM;
2448		goto err3;
2449	}
2450
2451	INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2452
2453	return 0;
2454
2455err3:
2456	mcast_cleanup();
2457err2:
2458	ib_unregister_client(&sa_client);
2459err1:
2460	return ret;
2461}
2462
2463void ib_sa_cleanup(void)
2464{
2465	cancel_delayed_work(&ib_nl_timed_work);
2466	flush_workqueue(ib_nl_wq);
2467	destroy_workqueue(ib_nl_wq);
2468	mcast_cleanup();
2469	ib_unregister_client(&sa_client);
2470	WARN_ON(!xa_empty(&queries));
2471}