Loading...
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/err.h>
37#include <linux/random.h>
38#include <linux/spinlock.h>
39#include <linux/slab.h>
40#include <linux/dma-mapping.h>
41#include <linux/kref.h>
42#include <linux/xarray.h>
43#include <linux/workqueue.h>
44#include <uapi/linux/if_ether.h>
45#include <rdma/ib_pack.h>
46#include <rdma/ib_cache.h>
47#include <rdma/rdma_netlink.h>
48#include <net/netlink.h>
49#include <uapi/rdma/ib_user_sa.h>
50#include <rdma/ib_marshall.h>
51#include <rdma/ib_addr.h>
52#include <rdma/opa_addr.h>
53#include <rdma/rdma_cm.h>
54#include "sa.h"
55#include "core_priv.h"
56
57#define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
58#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
59#define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
60#define IB_SA_CPI_MAX_RETRY_CNT 3
61#define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */
62static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
63
64struct ib_sa_sm_ah {
65 struct ib_ah *ah;
66 struct kref ref;
67 u16 pkey_index;
68 u8 src_path_mask;
69};
70
71enum rdma_class_port_info_type {
72 RDMA_CLASS_PORT_INFO_IB,
73 RDMA_CLASS_PORT_INFO_OPA
74};
75
76struct rdma_class_port_info {
77 enum rdma_class_port_info_type type;
78 union {
79 struct ib_class_port_info ib;
80 struct opa_class_port_info opa;
81 };
82};
83
84struct ib_sa_classport_cache {
85 bool valid;
86 int retry_cnt;
87 struct rdma_class_port_info data;
88};
89
90struct ib_sa_port {
91 struct ib_mad_agent *agent;
92 struct ib_sa_sm_ah *sm_ah;
93 struct work_struct update_task;
94 struct ib_sa_classport_cache classport_info;
95 struct delayed_work ib_cpi_work;
96 spinlock_t classport_lock; /* protects class port info set */
97 spinlock_t ah_lock;
98 u32 port_num;
99};
100
101struct ib_sa_device {
102 int start_port, end_port;
103 struct ib_event_handler event_handler;
104 struct ib_sa_port port[];
105};
106
107struct ib_sa_query {
108 void (*callback)(struct ib_sa_query *sa_query, int status,
109 struct ib_sa_mad *mad);
110 void (*release)(struct ib_sa_query *);
111 struct ib_sa_client *client;
112 struct ib_sa_port *port;
113 struct ib_mad_send_buf *mad_buf;
114 struct ib_sa_sm_ah *sm_ah;
115 int id;
116 u32 flags;
117 struct list_head list; /* Local svc request list */
118 u32 seq; /* Local svc request sequence number */
119 unsigned long timeout; /* Local svc timeout */
120 u8 path_use; /* How will the pathrecord be used */
121};
122
123#define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
124#define IB_SA_CANCEL 0x00000002
125#define IB_SA_QUERY_OPA 0x00000004
126
127struct ib_sa_path_query {
128 void (*callback)(int status, struct sa_path_rec *rec,
129 unsigned int num_paths, void *context);
130 void *context;
131 struct ib_sa_query sa_query;
132 struct sa_path_rec *conv_pr;
133};
134
135struct ib_sa_guidinfo_query {
136 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
137 void *context;
138 struct ib_sa_query sa_query;
139};
140
141struct ib_sa_classport_info_query {
142 void (*callback)(void *);
143 void *context;
144 struct ib_sa_query sa_query;
145};
146
147struct ib_sa_mcmember_query {
148 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
149 void *context;
150 struct ib_sa_query sa_query;
151};
152
153static LIST_HEAD(ib_nl_request_list);
154static DEFINE_SPINLOCK(ib_nl_request_lock);
155static atomic_t ib_nl_sa_request_seq;
156static struct workqueue_struct *ib_nl_wq;
157static struct delayed_work ib_nl_timed_work;
158static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
159 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
160 .len = sizeof(struct ib_path_rec_data)},
161 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
162 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
163 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
164 .len = sizeof(struct rdma_nla_ls_gid)},
165 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
166 .len = sizeof(struct rdma_nla_ls_gid)},
167 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
168 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
169 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
170};
171
172
173static int ib_sa_add_one(struct ib_device *device);
174static void ib_sa_remove_one(struct ib_device *device, void *client_data);
175
176static struct ib_client sa_client = {
177 .name = "sa",
178 .add = ib_sa_add_one,
179 .remove = ib_sa_remove_one
180};
181
182static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
183
184static DEFINE_SPINLOCK(tid_lock);
185static u32 tid;
186
187#define PATH_REC_FIELD(field) \
188 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \
189 .struct_size_bytes = sizeof_field(struct sa_path_rec, field), \
190 .field_name = "sa_path_rec:" #field
191
192static const struct ib_field path_rec_table[] = {
193 { PATH_REC_FIELD(service_id),
194 .offset_words = 0,
195 .offset_bits = 0,
196 .size_bits = 64 },
197 { PATH_REC_FIELD(dgid),
198 .offset_words = 2,
199 .offset_bits = 0,
200 .size_bits = 128 },
201 { PATH_REC_FIELD(sgid),
202 .offset_words = 6,
203 .offset_bits = 0,
204 .size_bits = 128 },
205 { PATH_REC_FIELD(ib.dlid),
206 .offset_words = 10,
207 .offset_bits = 0,
208 .size_bits = 16 },
209 { PATH_REC_FIELD(ib.slid),
210 .offset_words = 10,
211 .offset_bits = 16,
212 .size_bits = 16 },
213 { PATH_REC_FIELD(ib.raw_traffic),
214 .offset_words = 11,
215 .offset_bits = 0,
216 .size_bits = 1 },
217 { RESERVED,
218 .offset_words = 11,
219 .offset_bits = 1,
220 .size_bits = 3 },
221 { PATH_REC_FIELD(flow_label),
222 .offset_words = 11,
223 .offset_bits = 4,
224 .size_bits = 20 },
225 { PATH_REC_FIELD(hop_limit),
226 .offset_words = 11,
227 .offset_bits = 24,
228 .size_bits = 8 },
229 { PATH_REC_FIELD(traffic_class),
230 .offset_words = 12,
231 .offset_bits = 0,
232 .size_bits = 8 },
233 { PATH_REC_FIELD(reversible),
234 .offset_words = 12,
235 .offset_bits = 8,
236 .size_bits = 1 },
237 { PATH_REC_FIELD(numb_path),
238 .offset_words = 12,
239 .offset_bits = 9,
240 .size_bits = 7 },
241 { PATH_REC_FIELD(pkey),
242 .offset_words = 12,
243 .offset_bits = 16,
244 .size_bits = 16 },
245 { PATH_REC_FIELD(qos_class),
246 .offset_words = 13,
247 .offset_bits = 0,
248 .size_bits = 12 },
249 { PATH_REC_FIELD(sl),
250 .offset_words = 13,
251 .offset_bits = 12,
252 .size_bits = 4 },
253 { PATH_REC_FIELD(mtu_selector),
254 .offset_words = 13,
255 .offset_bits = 16,
256 .size_bits = 2 },
257 { PATH_REC_FIELD(mtu),
258 .offset_words = 13,
259 .offset_bits = 18,
260 .size_bits = 6 },
261 { PATH_REC_FIELD(rate_selector),
262 .offset_words = 13,
263 .offset_bits = 24,
264 .size_bits = 2 },
265 { PATH_REC_FIELD(rate),
266 .offset_words = 13,
267 .offset_bits = 26,
268 .size_bits = 6 },
269 { PATH_REC_FIELD(packet_life_time_selector),
270 .offset_words = 14,
271 .offset_bits = 0,
272 .size_bits = 2 },
273 { PATH_REC_FIELD(packet_life_time),
274 .offset_words = 14,
275 .offset_bits = 2,
276 .size_bits = 6 },
277 { PATH_REC_FIELD(preference),
278 .offset_words = 14,
279 .offset_bits = 8,
280 .size_bits = 8 },
281 { RESERVED,
282 .offset_words = 14,
283 .offset_bits = 16,
284 .size_bits = 48 },
285};
286
287#define OPA_PATH_REC_FIELD(field) \
288 .struct_offset_bytes = \
289 offsetof(struct sa_path_rec, field), \
290 .struct_size_bytes = \
291 sizeof_field(struct sa_path_rec, field), \
292 .field_name = "sa_path_rec:" #field
293
294static const struct ib_field opa_path_rec_table[] = {
295 { OPA_PATH_REC_FIELD(service_id),
296 .offset_words = 0,
297 .offset_bits = 0,
298 .size_bits = 64 },
299 { OPA_PATH_REC_FIELD(dgid),
300 .offset_words = 2,
301 .offset_bits = 0,
302 .size_bits = 128 },
303 { OPA_PATH_REC_FIELD(sgid),
304 .offset_words = 6,
305 .offset_bits = 0,
306 .size_bits = 128 },
307 { OPA_PATH_REC_FIELD(opa.dlid),
308 .offset_words = 10,
309 .offset_bits = 0,
310 .size_bits = 32 },
311 { OPA_PATH_REC_FIELD(opa.slid),
312 .offset_words = 11,
313 .offset_bits = 0,
314 .size_bits = 32 },
315 { OPA_PATH_REC_FIELD(opa.raw_traffic),
316 .offset_words = 12,
317 .offset_bits = 0,
318 .size_bits = 1 },
319 { RESERVED,
320 .offset_words = 12,
321 .offset_bits = 1,
322 .size_bits = 3 },
323 { OPA_PATH_REC_FIELD(flow_label),
324 .offset_words = 12,
325 .offset_bits = 4,
326 .size_bits = 20 },
327 { OPA_PATH_REC_FIELD(hop_limit),
328 .offset_words = 12,
329 .offset_bits = 24,
330 .size_bits = 8 },
331 { OPA_PATH_REC_FIELD(traffic_class),
332 .offset_words = 13,
333 .offset_bits = 0,
334 .size_bits = 8 },
335 { OPA_PATH_REC_FIELD(reversible),
336 .offset_words = 13,
337 .offset_bits = 8,
338 .size_bits = 1 },
339 { OPA_PATH_REC_FIELD(numb_path),
340 .offset_words = 13,
341 .offset_bits = 9,
342 .size_bits = 7 },
343 { OPA_PATH_REC_FIELD(pkey),
344 .offset_words = 13,
345 .offset_bits = 16,
346 .size_bits = 16 },
347 { OPA_PATH_REC_FIELD(opa.l2_8B),
348 .offset_words = 14,
349 .offset_bits = 0,
350 .size_bits = 1 },
351 { OPA_PATH_REC_FIELD(opa.l2_10B),
352 .offset_words = 14,
353 .offset_bits = 1,
354 .size_bits = 1 },
355 { OPA_PATH_REC_FIELD(opa.l2_9B),
356 .offset_words = 14,
357 .offset_bits = 2,
358 .size_bits = 1 },
359 { OPA_PATH_REC_FIELD(opa.l2_16B),
360 .offset_words = 14,
361 .offset_bits = 3,
362 .size_bits = 1 },
363 { RESERVED,
364 .offset_words = 14,
365 .offset_bits = 4,
366 .size_bits = 2 },
367 { OPA_PATH_REC_FIELD(opa.qos_type),
368 .offset_words = 14,
369 .offset_bits = 6,
370 .size_bits = 2 },
371 { OPA_PATH_REC_FIELD(opa.qos_priority),
372 .offset_words = 14,
373 .offset_bits = 8,
374 .size_bits = 8 },
375 { RESERVED,
376 .offset_words = 14,
377 .offset_bits = 16,
378 .size_bits = 3 },
379 { OPA_PATH_REC_FIELD(sl),
380 .offset_words = 14,
381 .offset_bits = 19,
382 .size_bits = 5 },
383 { RESERVED,
384 .offset_words = 14,
385 .offset_bits = 24,
386 .size_bits = 8 },
387 { OPA_PATH_REC_FIELD(mtu_selector),
388 .offset_words = 15,
389 .offset_bits = 0,
390 .size_bits = 2 },
391 { OPA_PATH_REC_FIELD(mtu),
392 .offset_words = 15,
393 .offset_bits = 2,
394 .size_bits = 6 },
395 { OPA_PATH_REC_FIELD(rate_selector),
396 .offset_words = 15,
397 .offset_bits = 8,
398 .size_bits = 2 },
399 { OPA_PATH_REC_FIELD(rate),
400 .offset_words = 15,
401 .offset_bits = 10,
402 .size_bits = 6 },
403 { OPA_PATH_REC_FIELD(packet_life_time_selector),
404 .offset_words = 15,
405 .offset_bits = 16,
406 .size_bits = 2 },
407 { OPA_PATH_REC_FIELD(packet_life_time),
408 .offset_words = 15,
409 .offset_bits = 18,
410 .size_bits = 6 },
411 { OPA_PATH_REC_FIELD(preference),
412 .offset_words = 15,
413 .offset_bits = 24,
414 .size_bits = 8 },
415};
416
417#define MCMEMBER_REC_FIELD(field) \
418 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
419 .struct_size_bytes = sizeof_field(struct ib_sa_mcmember_rec, field), \
420 .field_name = "sa_mcmember_rec:" #field
421
422static const struct ib_field mcmember_rec_table[] = {
423 { MCMEMBER_REC_FIELD(mgid),
424 .offset_words = 0,
425 .offset_bits = 0,
426 .size_bits = 128 },
427 { MCMEMBER_REC_FIELD(port_gid),
428 .offset_words = 4,
429 .offset_bits = 0,
430 .size_bits = 128 },
431 { MCMEMBER_REC_FIELD(qkey),
432 .offset_words = 8,
433 .offset_bits = 0,
434 .size_bits = 32 },
435 { MCMEMBER_REC_FIELD(mlid),
436 .offset_words = 9,
437 .offset_bits = 0,
438 .size_bits = 16 },
439 { MCMEMBER_REC_FIELD(mtu_selector),
440 .offset_words = 9,
441 .offset_bits = 16,
442 .size_bits = 2 },
443 { MCMEMBER_REC_FIELD(mtu),
444 .offset_words = 9,
445 .offset_bits = 18,
446 .size_bits = 6 },
447 { MCMEMBER_REC_FIELD(traffic_class),
448 .offset_words = 9,
449 .offset_bits = 24,
450 .size_bits = 8 },
451 { MCMEMBER_REC_FIELD(pkey),
452 .offset_words = 10,
453 .offset_bits = 0,
454 .size_bits = 16 },
455 { MCMEMBER_REC_FIELD(rate_selector),
456 .offset_words = 10,
457 .offset_bits = 16,
458 .size_bits = 2 },
459 { MCMEMBER_REC_FIELD(rate),
460 .offset_words = 10,
461 .offset_bits = 18,
462 .size_bits = 6 },
463 { MCMEMBER_REC_FIELD(packet_life_time_selector),
464 .offset_words = 10,
465 .offset_bits = 24,
466 .size_bits = 2 },
467 { MCMEMBER_REC_FIELD(packet_life_time),
468 .offset_words = 10,
469 .offset_bits = 26,
470 .size_bits = 6 },
471 { MCMEMBER_REC_FIELD(sl),
472 .offset_words = 11,
473 .offset_bits = 0,
474 .size_bits = 4 },
475 { MCMEMBER_REC_FIELD(flow_label),
476 .offset_words = 11,
477 .offset_bits = 4,
478 .size_bits = 20 },
479 { MCMEMBER_REC_FIELD(hop_limit),
480 .offset_words = 11,
481 .offset_bits = 24,
482 .size_bits = 8 },
483 { MCMEMBER_REC_FIELD(scope),
484 .offset_words = 12,
485 .offset_bits = 0,
486 .size_bits = 4 },
487 { MCMEMBER_REC_FIELD(join_state),
488 .offset_words = 12,
489 .offset_bits = 4,
490 .size_bits = 4 },
491 { MCMEMBER_REC_FIELD(proxy_join),
492 .offset_words = 12,
493 .offset_bits = 8,
494 .size_bits = 1 },
495 { RESERVED,
496 .offset_words = 12,
497 .offset_bits = 9,
498 .size_bits = 23 },
499};
500
501#define CLASSPORTINFO_REC_FIELD(field) \
502 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
503 .struct_size_bytes = sizeof_field(struct ib_class_port_info, field), \
504 .field_name = "ib_class_port_info:" #field
505
506static const struct ib_field ib_classport_info_rec_table[] = {
507 { CLASSPORTINFO_REC_FIELD(base_version),
508 .offset_words = 0,
509 .offset_bits = 0,
510 .size_bits = 8 },
511 { CLASSPORTINFO_REC_FIELD(class_version),
512 .offset_words = 0,
513 .offset_bits = 8,
514 .size_bits = 8 },
515 { CLASSPORTINFO_REC_FIELD(capability_mask),
516 .offset_words = 0,
517 .offset_bits = 16,
518 .size_bits = 16 },
519 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
520 .offset_words = 1,
521 .offset_bits = 0,
522 .size_bits = 32 },
523 { CLASSPORTINFO_REC_FIELD(redirect_gid),
524 .offset_words = 2,
525 .offset_bits = 0,
526 .size_bits = 128 },
527 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
528 .offset_words = 6,
529 .offset_bits = 0,
530 .size_bits = 32 },
531 { CLASSPORTINFO_REC_FIELD(redirect_lid),
532 .offset_words = 7,
533 .offset_bits = 0,
534 .size_bits = 16 },
535 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
536 .offset_words = 7,
537 .offset_bits = 16,
538 .size_bits = 16 },
539
540 { CLASSPORTINFO_REC_FIELD(redirect_qp),
541 .offset_words = 8,
542 .offset_bits = 0,
543 .size_bits = 32 },
544 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
545 .offset_words = 9,
546 .offset_bits = 0,
547 .size_bits = 32 },
548
549 { CLASSPORTINFO_REC_FIELD(trap_gid),
550 .offset_words = 10,
551 .offset_bits = 0,
552 .size_bits = 128 },
553 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
554 .offset_words = 14,
555 .offset_bits = 0,
556 .size_bits = 32 },
557
558 { CLASSPORTINFO_REC_FIELD(trap_lid),
559 .offset_words = 15,
560 .offset_bits = 0,
561 .size_bits = 16 },
562 { CLASSPORTINFO_REC_FIELD(trap_pkey),
563 .offset_words = 15,
564 .offset_bits = 16,
565 .size_bits = 16 },
566
567 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
568 .offset_words = 16,
569 .offset_bits = 0,
570 .size_bits = 32 },
571 { CLASSPORTINFO_REC_FIELD(trap_qkey),
572 .offset_words = 17,
573 .offset_bits = 0,
574 .size_bits = 32 },
575};
576
577#define OPA_CLASSPORTINFO_REC_FIELD(field) \
578 .struct_offset_bytes =\
579 offsetof(struct opa_class_port_info, field), \
580 .struct_size_bytes = \
581 sizeof_field(struct opa_class_port_info, field), \
582 .field_name = "opa_class_port_info:" #field
583
584static const struct ib_field opa_classport_info_rec_table[] = {
585 { OPA_CLASSPORTINFO_REC_FIELD(base_version),
586 .offset_words = 0,
587 .offset_bits = 0,
588 .size_bits = 8 },
589 { OPA_CLASSPORTINFO_REC_FIELD(class_version),
590 .offset_words = 0,
591 .offset_bits = 8,
592 .size_bits = 8 },
593 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
594 .offset_words = 0,
595 .offset_bits = 16,
596 .size_bits = 16 },
597 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
598 .offset_words = 1,
599 .offset_bits = 0,
600 .size_bits = 32 },
601 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
602 .offset_words = 2,
603 .offset_bits = 0,
604 .size_bits = 128 },
605 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
606 .offset_words = 6,
607 .offset_bits = 0,
608 .size_bits = 32 },
609 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
610 .offset_words = 7,
611 .offset_bits = 0,
612 .size_bits = 32 },
613 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
614 .offset_words = 8,
615 .offset_bits = 0,
616 .size_bits = 32 },
617 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
618 .offset_words = 9,
619 .offset_bits = 0,
620 .size_bits = 32 },
621 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
622 .offset_words = 10,
623 .offset_bits = 0,
624 .size_bits = 128 },
625 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
626 .offset_words = 14,
627 .offset_bits = 0,
628 .size_bits = 32 },
629 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
630 .offset_words = 15,
631 .offset_bits = 0,
632 .size_bits = 32 },
633 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
634 .offset_words = 16,
635 .offset_bits = 0,
636 .size_bits = 32 },
637 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
638 .offset_words = 17,
639 .offset_bits = 0,
640 .size_bits = 32 },
641 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
642 .offset_words = 18,
643 .offset_bits = 0,
644 .size_bits = 16 },
645 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
646 .offset_words = 18,
647 .offset_bits = 16,
648 .size_bits = 16 },
649 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
650 .offset_words = 19,
651 .offset_bits = 0,
652 .size_bits = 8 },
653 { RESERVED,
654 .offset_words = 19,
655 .offset_bits = 8,
656 .size_bits = 24 },
657};
658
659#define GUIDINFO_REC_FIELD(field) \
660 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
661 .struct_size_bytes = sizeof_field(struct ib_sa_guidinfo_rec, field), \
662 .field_name = "sa_guidinfo_rec:" #field
663
664static const struct ib_field guidinfo_rec_table[] = {
665 { GUIDINFO_REC_FIELD(lid),
666 .offset_words = 0,
667 .offset_bits = 0,
668 .size_bits = 16 },
669 { GUIDINFO_REC_FIELD(block_num),
670 .offset_words = 0,
671 .offset_bits = 16,
672 .size_bits = 8 },
673 { GUIDINFO_REC_FIELD(res1),
674 .offset_words = 0,
675 .offset_bits = 24,
676 .size_bits = 8 },
677 { GUIDINFO_REC_FIELD(res2),
678 .offset_words = 1,
679 .offset_bits = 0,
680 .size_bits = 32 },
681 { GUIDINFO_REC_FIELD(guid_info_list),
682 .offset_words = 2,
683 .offset_bits = 0,
684 .size_bits = 512 },
685};
686
687#define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
688
689static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
690{
691 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
692}
693
694static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
695{
696 return (query->flags & IB_SA_CANCEL);
697}
698
699static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
700 struct ib_sa_query *query)
701{
702 struct sa_path_rec *sa_rec = query->mad_buf->context[1];
703 struct ib_sa_mad *mad = query->mad_buf->mad;
704 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
705 u16 val16;
706 u64 val64;
707 struct rdma_ls_resolve_header *header;
708
709 query->mad_buf->context[1] = NULL;
710
711 /* Construct the family header first */
712 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
713 strscpy_pad(header->device_name,
714 dev_name(&query->port->agent->device->dev),
715 LS_DEVICE_NAME_MAX);
716 header->port_num = query->port->port_num;
717
718 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
719 sa_rec->reversible != 0)
720 query->path_use = LS_RESOLVE_PATH_USE_ALL;
721 else
722 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
723 header->path_use = query->path_use;
724
725 /* Now build the attributes */
726 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
727 val64 = be64_to_cpu(sa_rec->service_id);
728 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
729 sizeof(val64), &val64);
730 }
731 if (comp_mask & IB_SA_PATH_REC_DGID)
732 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
733 sizeof(sa_rec->dgid), &sa_rec->dgid);
734 if (comp_mask & IB_SA_PATH_REC_SGID)
735 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
736 sizeof(sa_rec->sgid), &sa_rec->sgid);
737 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
738 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
739 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
740
741 if (comp_mask & IB_SA_PATH_REC_PKEY) {
742 val16 = be16_to_cpu(sa_rec->pkey);
743 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
744 sizeof(val16), &val16);
745 }
746 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
747 val16 = be16_to_cpu(sa_rec->qos_class);
748 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
749 sizeof(val16), &val16);
750 }
751}
752
753static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
754{
755 int len = 0;
756
757 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
758 len += nla_total_size(sizeof(u64));
759 if (comp_mask & IB_SA_PATH_REC_DGID)
760 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
761 if (comp_mask & IB_SA_PATH_REC_SGID)
762 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
763 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
764 len += nla_total_size(sizeof(u8));
765 if (comp_mask & IB_SA_PATH_REC_PKEY)
766 len += nla_total_size(sizeof(u16));
767 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
768 len += nla_total_size(sizeof(u16));
769
770 /*
771 * Make sure that at least some of the required comp_mask bits are
772 * set.
773 */
774 if (WARN_ON(len == 0))
775 return len;
776
777 /* Add the family header */
778 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
779
780 return len;
781}
782
783static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
784{
785 struct sk_buff *skb = NULL;
786 struct nlmsghdr *nlh;
787 void *data;
788 struct ib_sa_mad *mad;
789 int len;
790 unsigned long flags;
791 unsigned long delay;
792 gfp_t gfp_flag;
793 int ret;
794
795 INIT_LIST_HEAD(&query->list);
796 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
797
798 mad = query->mad_buf->mad;
799 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
800 if (len <= 0)
801 return -EMSGSIZE;
802
803 skb = nlmsg_new(len, gfp_mask);
804 if (!skb)
805 return -ENOMEM;
806
807 /* Put nlmsg header only for now */
808 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
809 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
810 if (!data) {
811 nlmsg_free(skb);
812 return -EMSGSIZE;
813 }
814
815 /* Add attributes */
816 ib_nl_set_path_rec_attrs(skb, query);
817
818 /* Repair the nlmsg header length */
819 nlmsg_end(skb, nlh);
820
821 gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
822 GFP_NOWAIT;
823
824 spin_lock_irqsave(&ib_nl_request_lock, flags);
825 ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
826
827 if (ret)
828 goto out;
829
830 /* Put the request on the list.*/
831 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
832 query->timeout = delay + jiffies;
833 list_add_tail(&query->list, &ib_nl_request_list);
834 /* Start the timeout if this is the only request */
835 if (ib_nl_request_list.next == &query->list)
836 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
837
838out:
839 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
840
841 return ret;
842}
843
844static int ib_nl_cancel_request(struct ib_sa_query *query)
845{
846 unsigned long flags;
847 struct ib_sa_query *wait_query;
848 int found = 0;
849
850 spin_lock_irqsave(&ib_nl_request_lock, flags);
851 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
852 /* Let the timeout to take care of the callback */
853 if (query == wait_query) {
854 query->flags |= IB_SA_CANCEL;
855 query->timeout = jiffies;
856 list_move(&query->list, &ib_nl_request_list);
857 found = 1;
858 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
859 break;
860 }
861 }
862 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
863
864 return found;
865}
866
867static void send_handler(struct ib_mad_agent *agent,
868 struct ib_mad_send_wc *mad_send_wc);
869
870static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
871 const struct nlmsghdr *nlh)
872{
873 struct sa_path_rec recs[RDMA_PRIMARY_PATH_MAX_REC_NUM];
874 struct ib_sa_path_query *path_query;
875 struct ib_path_rec_data *rec_data;
876 struct ib_mad_send_wc mad_send_wc;
877 const struct nlattr *head, *curr;
878 struct ib_sa_mad *mad = NULL;
879 int len, rem, status = -EIO;
880 unsigned int num_prs = 0;
881 u32 mask = 0;
882
883 if (!query->callback)
884 goto out;
885
886 path_query = container_of(query, struct ib_sa_path_query, sa_query);
887 mad = query->mad_buf->mad;
888
889 head = (const struct nlattr *) nlmsg_data(nlh);
890 len = nlmsg_len(nlh);
891 switch (query->path_use) {
892 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
893 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
894 break;
895
896 case LS_RESOLVE_PATH_USE_ALL:
897 mask = IB_PATH_PRIMARY;
898 break;
899
900 case LS_RESOLVE_PATH_USE_GMP:
901 default:
902 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
903 IB_PATH_BIDIRECTIONAL;
904 break;
905 }
906
907 nla_for_each_attr(curr, head, len, rem) {
908 if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD)
909 continue;
910
911 rec_data = nla_data(curr);
912 if ((rec_data->flags & mask) != mask)
913 continue;
914
915 if ((query->flags & IB_SA_QUERY_OPA) ||
916 path_query->conv_pr) {
917 mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
918 memcpy(mad->data, rec_data->path_rec,
919 sizeof(rec_data->path_rec));
920 query->callback(query, 0, mad);
921 goto out;
922 }
923
924 status = 0;
925 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
926 rec_data->path_rec, &recs[num_prs]);
927 recs[num_prs].flags = rec_data->flags;
928 recs[num_prs].rec_type = SA_PATH_REC_TYPE_IB;
929 sa_path_set_dmac_zero(&recs[num_prs]);
930
931 num_prs++;
932 if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM)
933 break;
934 }
935
936 if (!status) {
937 mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
938 path_query->callback(status, recs, num_prs,
939 path_query->context);
940 } else
941 query->callback(query, status, mad);
942
943out:
944 mad_send_wc.send_buf = query->mad_buf;
945 mad_send_wc.status = IB_WC_SUCCESS;
946 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
947}
948
949static void ib_nl_request_timeout(struct work_struct *work)
950{
951 unsigned long flags;
952 struct ib_sa_query *query;
953 unsigned long delay;
954 struct ib_mad_send_wc mad_send_wc;
955 int ret;
956
957 spin_lock_irqsave(&ib_nl_request_lock, flags);
958 while (!list_empty(&ib_nl_request_list)) {
959 query = list_entry(ib_nl_request_list.next,
960 struct ib_sa_query, list);
961
962 if (time_after(query->timeout, jiffies)) {
963 delay = query->timeout - jiffies;
964 if ((long)delay <= 0)
965 delay = 1;
966 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
967 break;
968 }
969
970 list_del(&query->list);
971 ib_sa_disable_local_svc(query);
972 /* Hold the lock to protect against query cancellation */
973 if (ib_sa_query_cancelled(query))
974 ret = -1;
975 else
976 ret = ib_post_send_mad(query->mad_buf, NULL);
977 if (ret) {
978 mad_send_wc.send_buf = query->mad_buf;
979 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
980 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
981 send_handler(query->port->agent, &mad_send_wc);
982 spin_lock_irqsave(&ib_nl_request_lock, flags);
983 }
984 }
985 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
986}
987
988int ib_nl_handle_set_timeout(struct sk_buff *skb,
989 struct nlmsghdr *nlh,
990 struct netlink_ext_ack *extack)
991{
992 int timeout, delta, abs_delta;
993 const struct nlattr *attr;
994 unsigned long flags;
995 struct ib_sa_query *query;
996 long delay = 0;
997 struct nlattr *tb[LS_NLA_TYPE_MAX];
998 int ret;
999
1000 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
1001 !(NETLINK_CB(skb).sk))
1002 return -EPERM;
1003
1004 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1005 nlmsg_len(nlh), ib_nl_policy, NULL);
1006 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
1007 if (ret || !attr)
1008 goto settimeout_out;
1009
1010 timeout = *(int *) nla_data(attr);
1011 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
1012 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
1013 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
1014 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
1015
1016 delta = timeout - sa_local_svc_timeout_ms;
1017 if (delta < 0)
1018 abs_delta = -delta;
1019 else
1020 abs_delta = delta;
1021
1022 if (delta != 0) {
1023 spin_lock_irqsave(&ib_nl_request_lock, flags);
1024 sa_local_svc_timeout_ms = timeout;
1025 list_for_each_entry(query, &ib_nl_request_list, list) {
1026 if (delta < 0 && abs_delta > query->timeout)
1027 query->timeout = 0;
1028 else
1029 query->timeout += delta;
1030
1031 /* Get the new delay from the first entry */
1032 if (!delay) {
1033 delay = query->timeout - jiffies;
1034 if (delay <= 0)
1035 delay = 1;
1036 }
1037 }
1038 if (delay)
1039 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
1040 (unsigned long)delay);
1041 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1042 }
1043
1044settimeout_out:
1045 return 0;
1046}
1047
1048static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1049{
1050 struct nlattr *tb[LS_NLA_TYPE_MAX];
1051 int ret;
1052
1053 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
1054 return 0;
1055
1056 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1057 nlmsg_len(nlh), ib_nl_policy, NULL);
1058 if (ret)
1059 return 0;
1060
1061 return 1;
1062}
1063
1064int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1065 struct nlmsghdr *nlh,
1066 struct netlink_ext_ack *extack)
1067{
1068 unsigned long flags;
1069 struct ib_sa_query *query = NULL, *iter;
1070 struct ib_mad_send_buf *send_buf;
1071 struct ib_mad_send_wc mad_send_wc;
1072 int ret;
1073
1074 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
1075 !(NETLINK_CB(skb).sk))
1076 return -EPERM;
1077
1078 spin_lock_irqsave(&ib_nl_request_lock, flags);
1079 list_for_each_entry(iter, &ib_nl_request_list, list) {
1080 /*
1081 * If the query is cancelled, let the timeout routine
1082 * take care of it.
1083 */
1084 if (nlh->nlmsg_seq == iter->seq) {
1085 if (!ib_sa_query_cancelled(iter)) {
1086 list_del(&iter->list);
1087 query = iter;
1088 }
1089 break;
1090 }
1091 }
1092
1093 if (!query) {
1094 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1095 goto resp_out;
1096 }
1097
1098 send_buf = query->mad_buf;
1099
1100 if (!ib_nl_is_good_resolve_resp(nlh)) {
1101 /* if the result is a failure, send out the packet via IB */
1102 ib_sa_disable_local_svc(query);
1103 ret = ib_post_send_mad(query->mad_buf, NULL);
1104 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1105 if (ret) {
1106 mad_send_wc.send_buf = send_buf;
1107 mad_send_wc.status = IB_WC_GENERAL_ERR;
1108 send_handler(query->port->agent, &mad_send_wc);
1109 }
1110 } else {
1111 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1112 ib_nl_process_good_resolve_rsp(query, nlh);
1113 }
1114
1115resp_out:
1116 return 0;
1117}
1118
1119static void free_sm_ah(struct kref *kref)
1120{
1121 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
1122
1123 rdma_destroy_ah(sm_ah->ah, 0);
1124 kfree(sm_ah);
1125}
1126
1127void ib_sa_register_client(struct ib_sa_client *client)
1128{
1129 atomic_set(&client->users, 1);
1130 init_completion(&client->comp);
1131}
1132EXPORT_SYMBOL(ib_sa_register_client);
1133
1134void ib_sa_unregister_client(struct ib_sa_client *client)
1135{
1136 ib_sa_client_put(client);
1137 wait_for_completion(&client->comp);
1138}
1139EXPORT_SYMBOL(ib_sa_unregister_client);
1140
1141/**
1142 * ib_sa_cancel_query - try to cancel an SA query
1143 * @id:ID of query to cancel
1144 * @query:query pointer to cancel
1145 *
1146 * Try to cancel an SA query. If the id and query don't match up or
1147 * the query has already completed, nothing is done. Otherwise the
1148 * query is canceled and will complete with a status of -EINTR.
1149 */
1150void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1151{
1152 unsigned long flags;
1153 struct ib_mad_send_buf *mad_buf;
1154
1155 xa_lock_irqsave(&queries, flags);
1156 if (xa_load(&queries, id) != query) {
1157 xa_unlock_irqrestore(&queries, flags);
1158 return;
1159 }
1160 mad_buf = query->mad_buf;
1161 xa_unlock_irqrestore(&queries, flags);
1162
1163 /*
1164 * If the query is still on the netlink request list, schedule
1165 * it to be cancelled by the timeout routine. Otherwise, it has been
1166 * sent to the MAD layer and has to be cancelled from there.
1167 */
1168 if (!ib_nl_cancel_request(query))
1169 ib_cancel_mad(mad_buf);
1170}
1171EXPORT_SYMBOL(ib_sa_cancel_query);
1172
1173static u8 get_src_path_mask(struct ib_device *device, u32 port_num)
1174{
1175 struct ib_sa_device *sa_dev;
1176 struct ib_sa_port *port;
1177 unsigned long flags;
1178 u8 src_path_mask;
1179
1180 sa_dev = ib_get_client_data(device, &sa_client);
1181 if (!sa_dev)
1182 return 0x7f;
1183
1184 port = &sa_dev->port[port_num - sa_dev->start_port];
1185 spin_lock_irqsave(&port->ah_lock, flags);
1186 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1187 spin_unlock_irqrestore(&port->ah_lock, flags);
1188
1189 return src_path_mask;
1190}
1191
1192static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num,
1193 struct sa_path_rec *rec,
1194 struct rdma_ah_attr *ah_attr,
1195 const struct ib_gid_attr *gid_attr)
1196{
1197 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
1198
1199 if (!gid_attr) {
1200 gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type,
1201 port_num, NULL);
1202 if (IS_ERR(gid_attr))
1203 return PTR_ERR(gid_attr);
1204 } else
1205 rdma_hold_gid_attr(gid_attr);
1206
1207 rdma_move_grh_sgid_attr(ah_attr, &rec->dgid,
1208 be32_to_cpu(rec->flow_label),
1209 rec->hop_limit, rec->traffic_class,
1210 gid_attr);
1211 return 0;
1212}
1213
1214/**
1215 * ib_init_ah_attr_from_path - Initialize address handle attributes based on
1216 * an SA path record.
1217 * @device: Device associated ah attributes initialization.
1218 * @port_num: Port on the specified device.
1219 * @rec: path record entry to use for ah attributes initialization.
1220 * @ah_attr: address handle attributes to initialization from path record.
1221 * @gid_attr: SGID attribute to consider during initialization.
1222 *
1223 * When ib_init_ah_attr_from_path() returns success,
1224 * (a) for IB link layer it optionally contains a reference to SGID attribute
1225 * when GRH is present for IB link layer.
1226 * (b) for RoCE link layer it contains a reference to SGID attribute.
1227 * User must invoke rdma_destroy_ah_attr() to release reference to SGID
1228 * attributes which are initialized using ib_init_ah_attr_from_path().
1229 */
1230int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num,
1231 struct sa_path_rec *rec,
1232 struct rdma_ah_attr *ah_attr,
1233 const struct ib_gid_attr *gid_attr)
1234{
1235 int ret = 0;
1236
1237 memset(ah_attr, 0, sizeof(*ah_attr));
1238 ah_attr->type = rdma_ah_find_type(device, port_num);
1239 rdma_ah_set_sl(ah_attr, rec->sl);
1240 rdma_ah_set_port_num(ah_attr, port_num);
1241 rdma_ah_set_static_rate(ah_attr, rec->rate);
1242
1243 if (sa_path_is_roce(rec)) {
1244 ret = roce_resolve_route_from_path(rec, gid_attr);
1245 if (ret)
1246 return ret;
1247
1248 memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN);
1249 } else {
1250 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
1251 if (sa_path_is_opa(rec) &&
1252 rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE))
1253 rdma_ah_set_make_grd(ah_attr, true);
1254
1255 rdma_ah_set_path_bits(ah_attr,
1256 be32_to_cpu(sa_path_get_slid(rec)) &
1257 get_src_path_mask(device, port_num));
1258 }
1259
1260 if (rec->hop_limit > 0 || sa_path_is_roce(rec))
1261 ret = init_ah_attr_grh_fields(device, port_num,
1262 rec, ah_attr, gid_attr);
1263 return ret;
1264}
1265EXPORT_SYMBOL(ib_init_ah_attr_from_path);
1266
1267static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1268{
1269 struct rdma_ah_attr ah_attr;
1270 unsigned long flags;
1271
1272 spin_lock_irqsave(&query->port->ah_lock, flags);
1273 if (!query->port->sm_ah) {
1274 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1275 return -EAGAIN;
1276 }
1277 kref_get(&query->port->sm_ah->ref);
1278 query->sm_ah = query->port->sm_ah;
1279 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1280
1281 /*
1282 * Always check if sm_ah has valid dlid assigned,
1283 * before querying for class port info
1284 */
1285 if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
1286 !rdma_is_valid_unicast_lid(&ah_attr)) {
1287 kref_put(&query->sm_ah->ref, free_sm_ah);
1288 return -EAGAIN;
1289 }
1290 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1291 query->sm_ah->pkey_index,
1292 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1293 gfp_mask,
1294 ((query->flags & IB_SA_QUERY_OPA) ?
1295 OPA_MGMT_BASE_VERSION :
1296 IB_MGMT_BASE_VERSION));
1297 if (IS_ERR(query->mad_buf)) {
1298 kref_put(&query->sm_ah->ref, free_sm_ah);
1299 return -ENOMEM;
1300 }
1301
1302 query->mad_buf->ah = query->sm_ah->ah;
1303
1304 return 0;
1305}
1306
1307static void free_mad(struct ib_sa_query *query)
1308{
1309 ib_free_send_mad(query->mad_buf);
1310 kref_put(&query->sm_ah->ref, free_sm_ah);
1311}
1312
1313static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
1314{
1315 struct ib_sa_mad *mad = query->mad_buf->mad;
1316 unsigned long flags;
1317
1318 memset(mad, 0, sizeof *mad);
1319
1320 if (query->flags & IB_SA_QUERY_OPA) {
1321 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
1322 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
1323 } else {
1324 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1325 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1326 }
1327 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1328 spin_lock_irqsave(&tid_lock, flags);
1329 mad->mad_hdr.tid =
1330 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1331 spin_unlock_irqrestore(&tid_lock, flags);
1332}
1333
1334static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
1335 gfp_t gfp_mask)
1336{
1337 unsigned long flags;
1338 int ret, id;
1339 const int nmbr_sa_query_retries = 10;
1340
1341 xa_lock_irqsave(&queries, flags);
1342 ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
1343 xa_unlock_irqrestore(&queries, flags);
1344 if (ret < 0)
1345 return ret;
1346
1347 query->mad_buf->timeout_ms = timeout_ms / nmbr_sa_query_retries;
1348 query->mad_buf->retries = nmbr_sa_query_retries;
1349 if (!query->mad_buf->timeout_ms) {
1350 /* Special case, very small timeout_ms */
1351 query->mad_buf->timeout_ms = 1;
1352 query->mad_buf->retries = timeout_ms;
1353 }
1354 query->mad_buf->context[0] = query;
1355 query->id = id;
1356
1357 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
1358 (!(query->flags & IB_SA_QUERY_OPA))) {
1359 if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
1360 if (!ib_nl_make_request(query, gfp_mask))
1361 return id;
1362 }
1363 ib_sa_disable_local_svc(query);
1364 }
1365
1366 ret = ib_post_send_mad(query->mad_buf, NULL);
1367 if (ret) {
1368 xa_lock_irqsave(&queries, flags);
1369 __xa_erase(&queries, id);
1370 xa_unlock_irqrestore(&queries, flags);
1371 }
1372
1373 /*
1374 * It's not safe to dereference query any more, because the
1375 * send may already have completed and freed the query in
1376 * another context.
1377 */
1378 return ret ? ret : id;
1379}
1380
1381void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
1382{
1383 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1384}
1385EXPORT_SYMBOL(ib_sa_unpack_path);
1386
1387void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
1388{
1389 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1390}
1391EXPORT_SYMBOL(ib_sa_pack_path);
1392
1393static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
1394 struct ib_sa_device *sa_dev,
1395 u32 port_num)
1396{
1397 struct ib_sa_port *port;
1398 unsigned long flags;
1399 bool ret = false;
1400
1401 port = &sa_dev->port[port_num - sa_dev->start_port];
1402 spin_lock_irqsave(&port->classport_lock, flags);
1403 if (!port->classport_info.valid)
1404 goto ret;
1405
1406 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA)
1407 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) &
1408 OPA_CLASS_PORT_INFO_PR_SUPPORT;
1409ret:
1410 spin_unlock_irqrestore(&port->classport_lock, flags);
1411 return ret;
1412}
1413
1414enum opa_pr_supported {
1415 PR_NOT_SUPPORTED,
1416 PR_OPA_SUPPORTED,
1417 PR_IB_SUPPORTED
1418};
1419
1420/*
1421 * opa_pr_query_possible - Check if current PR query can be an OPA query.
1422 *
1423 * Retuns PR_NOT_SUPPORTED if a path record query is not
1424 * possible, PR_OPA_SUPPORTED if an OPA path record query
1425 * is possible and PR_IB_SUPPORTED if an IB path record
1426 * query is possible.
1427 */
1428static int opa_pr_query_possible(struct ib_sa_client *client,
1429 struct ib_sa_device *sa_dev,
1430 struct ib_device *device, u32 port_num)
1431{
1432 struct ib_port_attr port_attr;
1433
1434 if (ib_query_port(device, port_num, &port_attr))
1435 return PR_NOT_SUPPORTED;
1436
1437 if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num))
1438 return PR_OPA_SUPPORTED;
1439
1440 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1441 return PR_NOT_SUPPORTED;
1442 else
1443 return PR_IB_SUPPORTED;
1444}
1445
1446static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1447 int status, struct ib_sa_mad *mad)
1448{
1449 struct ib_sa_path_query *query =
1450 container_of(sa_query, struct ib_sa_path_query, sa_query);
1451 struct sa_path_rec rec = {};
1452
1453 if (!mad) {
1454 query->callback(status, NULL, 0, query->context);
1455 return;
1456 }
1457
1458 if (sa_query->flags & IB_SA_QUERY_OPA) {
1459 ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1460 mad->data, &rec);
1461 rec.rec_type = SA_PATH_REC_TYPE_OPA;
1462 query->callback(status, &rec, 1, query->context);
1463 return;
1464 }
1465
1466 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
1467 mad->data, &rec);
1468 rec.rec_type = SA_PATH_REC_TYPE_IB;
1469 sa_path_set_dmac_zero(&rec);
1470
1471 if (query->conv_pr) {
1472 struct sa_path_rec opa;
1473
1474 memset(&opa, 0, sizeof(struct sa_path_rec));
1475 sa_convert_path_ib_to_opa(&opa, &rec);
1476 query->callback(status, &opa, 1, query->context);
1477 } else {
1478 query->callback(status, &rec, 1, query->context);
1479 }
1480}
1481
1482static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1483{
1484 struct ib_sa_path_query *query =
1485 container_of(sa_query, struct ib_sa_path_query, sa_query);
1486
1487 kfree(query->conv_pr);
1488 kfree(query);
1489}
1490
1491/**
1492 * ib_sa_path_rec_get - Start a Path get query
1493 * @client:SA client
1494 * @device:device to send query on
1495 * @port_num: port number to send query on
1496 * @rec:Path Record to send in query
1497 * @comp_mask:component mask to send in query
1498 * @timeout_ms:time to wait for response
1499 * @gfp_mask:GFP mask to use for internal allocations
1500 * @callback:function called when query completes, times out or is
1501 * canceled
1502 * @context:opaque user context passed to callback
1503 * @sa_query:query context, used to cancel query
1504 *
1505 * Send a Path Record Get query to the SA to look up a path. The
1506 * callback function will be called when the query completes (or
1507 * fails); status is 0 for a successful response, -EINTR if the query
1508 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1509 * occurred sending the query. The resp parameter of the callback is
1510 * only valid if status is 0.
1511 *
1512 * If the return value of ib_sa_path_rec_get() is negative, it is an
1513 * error code. Otherwise it is a query ID that can be used to cancel
1514 * the query.
1515 */
1516int ib_sa_path_rec_get(struct ib_sa_client *client,
1517 struct ib_device *device, u32 port_num,
1518 struct sa_path_rec *rec,
1519 ib_sa_comp_mask comp_mask,
1520 unsigned long timeout_ms, gfp_t gfp_mask,
1521 void (*callback)(int status,
1522 struct sa_path_rec *resp,
1523 unsigned int num_paths, void *context),
1524 void *context,
1525 struct ib_sa_query **sa_query)
1526{
1527 struct ib_sa_path_query *query;
1528 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1529 struct ib_sa_port *port;
1530 struct ib_mad_agent *agent;
1531 struct ib_sa_mad *mad;
1532 enum opa_pr_supported status;
1533 int ret;
1534
1535 if (!sa_dev)
1536 return -ENODEV;
1537
1538 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) &&
1539 (rec->rec_type != SA_PATH_REC_TYPE_OPA))
1540 return -EINVAL;
1541
1542 port = &sa_dev->port[port_num - sa_dev->start_port];
1543 agent = port->agent;
1544
1545 query = kzalloc(sizeof(*query), gfp_mask);
1546 if (!query)
1547 return -ENOMEM;
1548
1549 query->sa_query.port = port;
1550 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
1551 status = opa_pr_query_possible(client, sa_dev, device, port_num);
1552 if (status == PR_NOT_SUPPORTED) {
1553 ret = -EINVAL;
1554 goto err1;
1555 } else if (status == PR_OPA_SUPPORTED) {
1556 query->sa_query.flags |= IB_SA_QUERY_OPA;
1557 } else {
1558 query->conv_pr =
1559 kmalloc(sizeof(*query->conv_pr), gfp_mask);
1560 if (!query->conv_pr) {
1561 ret = -ENOMEM;
1562 goto err1;
1563 }
1564 }
1565 }
1566
1567 ret = alloc_mad(&query->sa_query, gfp_mask);
1568 if (ret)
1569 goto err2;
1570
1571 ib_sa_client_get(client);
1572 query->sa_query.client = client;
1573 query->callback = callback;
1574 query->context = context;
1575
1576 mad = query->sa_query.mad_buf->mad;
1577 init_mad(&query->sa_query, agent);
1578
1579 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1580 query->sa_query.release = ib_sa_path_rec_release;
1581 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1582 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1583 mad->sa_hdr.comp_mask = comp_mask;
1584
1585 if (query->sa_query.flags & IB_SA_QUERY_OPA) {
1586 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1587 rec, mad->data);
1588 } else if (query->conv_pr) {
1589 sa_convert_path_opa_to_ib(query->conv_pr, rec);
1590 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1591 query->conv_pr, mad->data);
1592 } else {
1593 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1594 rec, mad->data);
1595 }
1596
1597 *sa_query = &query->sa_query;
1598
1599 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1600 query->sa_query.mad_buf->context[1] = (query->conv_pr) ?
1601 query->conv_pr : rec;
1602
1603 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1604 if (ret < 0)
1605 goto err3;
1606
1607 return ret;
1608
1609err3:
1610 *sa_query = NULL;
1611 ib_sa_client_put(query->sa_query.client);
1612 free_mad(&query->sa_query);
1613err2:
1614 kfree(query->conv_pr);
1615err1:
1616 kfree(query);
1617 return ret;
1618}
1619EXPORT_SYMBOL(ib_sa_path_rec_get);
1620
1621static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1622 int status, struct ib_sa_mad *mad)
1623{
1624 struct ib_sa_mcmember_query *query =
1625 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1626
1627 if (mad) {
1628 struct ib_sa_mcmember_rec rec;
1629
1630 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1631 mad->data, &rec);
1632 query->callback(status, &rec, query->context);
1633 } else
1634 query->callback(status, NULL, query->context);
1635}
1636
1637static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1638{
1639 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1640}
1641
1642int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1643 struct ib_device *device, u32 port_num,
1644 u8 method,
1645 struct ib_sa_mcmember_rec *rec,
1646 ib_sa_comp_mask comp_mask,
1647 unsigned long timeout_ms, gfp_t gfp_mask,
1648 void (*callback)(int status,
1649 struct ib_sa_mcmember_rec *resp,
1650 void *context),
1651 void *context,
1652 struct ib_sa_query **sa_query)
1653{
1654 struct ib_sa_mcmember_query *query;
1655 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1656 struct ib_sa_port *port;
1657 struct ib_mad_agent *agent;
1658 struct ib_sa_mad *mad;
1659 int ret;
1660
1661 if (!sa_dev)
1662 return -ENODEV;
1663
1664 port = &sa_dev->port[port_num - sa_dev->start_port];
1665 agent = port->agent;
1666
1667 query = kzalloc(sizeof(*query), gfp_mask);
1668 if (!query)
1669 return -ENOMEM;
1670
1671 query->sa_query.port = port;
1672 ret = alloc_mad(&query->sa_query, gfp_mask);
1673 if (ret)
1674 goto err1;
1675
1676 ib_sa_client_get(client);
1677 query->sa_query.client = client;
1678 query->callback = callback;
1679 query->context = context;
1680
1681 mad = query->sa_query.mad_buf->mad;
1682 init_mad(&query->sa_query, agent);
1683
1684 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1685 query->sa_query.release = ib_sa_mcmember_rec_release;
1686 mad->mad_hdr.method = method;
1687 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1688 mad->sa_hdr.comp_mask = comp_mask;
1689
1690 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1691 rec, mad->data);
1692
1693 *sa_query = &query->sa_query;
1694
1695 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1696 if (ret < 0)
1697 goto err2;
1698
1699 return ret;
1700
1701err2:
1702 *sa_query = NULL;
1703 ib_sa_client_put(query->sa_query.client);
1704 free_mad(&query->sa_query);
1705
1706err1:
1707 kfree(query);
1708 return ret;
1709}
1710
1711/* Support GuidInfoRecord */
1712static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1713 int status, struct ib_sa_mad *mad)
1714{
1715 struct ib_sa_guidinfo_query *query =
1716 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1717
1718 if (mad) {
1719 struct ib_sa_guidinfo_rec rec;
1720
1721 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1722 mad->data, &rec);
1723 query->callback(status, &rec, query->context);
1724 } else
1725 query->callback(status, NULL, query->context);
1726}
1727
1728static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1729{
1730 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1731}
1732
1733int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1734 struct ib_device *device, u32 port_num,
1735 struct ib_sa_guidinfo_rec *rec,
1736 ib_sa_comp_mask comp_mask, u8 method,
1737 unsigned long timeout_ms, gfp_t gfp_mask,
1738 void (*callback)(int status,
1739 struct ib_sa_guidinfo_rec *resp,
1740 void *context),
1741 void *context,
1742 struct ib_sa_query **sa_query)
1743{
1744 struct ib_sa_guidinfo_query *query;
1745 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1746 struct ib_sa_port *port;
1747 struct ib_mad_agent *agent;
1748 struct ib_sa_mad *mad;
1749 int ret;
1750
1751 if (!sa_dev)
1752 return -ENODEV;
1753
1754 if (method != IB_MGMT_METHOD_GET &&
1755 method != IB_MGMT_METHOD_SET &&
1756 method != IB_SA_METHOD_DELETE) {
1757 return -EINVAL;
1758 }
1759
1760 port = &sa_dev->port[port_num - sa_dev->start_port];
1761 agent = port->agent;
1762
1763 query = kzalloc(sizeof(*query), gfp_mask);
1764 if (!query)
1765 return -ENOMEM;
1766
1767 query->sa_query.port = port;
1768 ret = alloc_mad(&query->sa_query, gfp_mask);
1769 if (ret)
1770 goto err1;
1771
1772 ib_sa_client_get(client);
1773 query->sa_query.client = client;
1774 query->callback = callback;
1775 query->context = context;
1776
1777 mad = query->sa_query.mad_buf->mad;
1778 init_mad(&query->sa_query, agent);
1779
1780 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1781 query->sa_query.release = ib_sa_guidinfo_rec_release;
1782
1783 mad->mad_hdr.method = method;
1784 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1785 mad->sa_hdr.comp_mask = comp_mask;
1786
1787 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1788 mad->data);
1789
1790 *sa_query = &query->sa_query;
1791
1792 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1793 if (ret < 0)
1794 goto err2;
1795
1796 return ret;
1797
1798err2:
1799 *sa_query = NULL;
1800 ib_sa_client_put(query->sa_query.client);
1801 free_mad(&query->sa_query);
1802
1803err1:
1804 kfree(query);
1805 return ret;
1806}
1807EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1808
1809struct ib_classport_info_context {
1810 struct completion done;
1811 struct ib_sa_query *sa_query;
1812};
1813
1814static void ib_classportinfo_cb(void *context)
1815{
1816 struct ib_classport_info_context *cb_ctx = context;
1817
1818 complete(&cb_ctx->done);
1819}
1820
1821static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
1822 int status, struct ib_sa_mad *mad)
1823{
1824 unsigned long flags;
1825 struct ib_sa_classport_info_query *query =
1826 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
1827 struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
1828
1829 if (mad) {
1830 if (sa_query->flags & IB_SA_QUERY_OPA) {
1831 struct opa_class_port_info rec;
1832
1833 ib_unpack(opa_classport_info_rec_table,
1834 ARRAY_SIZE(opa_classport_info_rec_table),
1835 mad->data, &rec);
1836
1837 spin_lock_irqsave(&sa_query->port->classport_lock,
1838 flags);
1839 if (!status && !info->valid) {
1840 memcpy(&info->data.opa, &rec,
1841 sizeof(info->data.opa));
1842
1843 info->valid = true;
1844 info->data.type = RDMA_CLASS_PORT_INFO_OPA;
1845 }
1846 spin_unlock_irqrestore(&sa_query->port->classport_lock,
1847 flags);
1848
1849 } else {
1850 struct ib_class_port_info rec;
1851
1852 ib_unpack(ib_classport_info_rec_table,
1853 ARRAY_SIZE(ib_classport_info_rec_table),
1854 mad->data, &rec);
1855
1856 spin_lock_irqsave(&sa_query->port->classport_lock,
1857 flags);
1858 if (!status && !info->valid) {
1859 memcpy(&info->data.ib, &rec,
1860 sizeof(info->data.ib));
1861
1862 info->valid = true;
1863 info->data.type = RDMA_CLASS_PORT_INFO_IB;
1864 }
1865 spin_unlock_irqrestore(&sa_query->port->classport_lock,
1866 flags);
1867 }
1868 }
1869 query->callback(query->context);
1870}
1871
1872static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
1873{
1874 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
1875 sa_query));
1876}
1877
1878static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
1879 unsigned long timeout_ms,
1880 void (*callback)(void *context),
1881 void *context,
1882 struct ib_sa_query **sa_query)
1883{
1884 struct ib_mad_agent *agent;
1885 struct ib_sa_classport_info_query *query;
1886 struct ib_sa_mad *mad;
1887 gfp_t gfp_mask = GFP_KERNEL;
1888 int ret;
1889
1890 agent = port->agent;
1891
1892 query = kzalloc(sizeof(*query), gfp_mask);
1893 if (!query)
1894 return -ENOMEM;
1895
1896 query->sa_query.port = port;
1897 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
1898 port->port_num) ?
1899 IB_SA_QUERY_OPA : 0;
1900 ret = alloc_mad(&query->sa_query, gfp_mask);
1901 if (ret)
1902 goto err_free;
1903
1904 query->callback = callback;
1905 query->context = context;
1906
1907 mad = query->sa_query.mad_buf->mad;
1908 init_mad(&query->sa_query, agent);
1909
1910 query->sa_query.callback = ib_sa_classport_info_rec_callback;
1911 query->sa_query.release = ib_sa_classport_info_rec_release;
1912 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1913 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
1914 mad->sa_hdr.comp_mask = 0;
1915 *sa_query = &query->sa_query;
1916
1917 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1918 if (ret < 0)
1919 goto err_free_mad;
1920
1921 return ret;
1922
1923err_free_mad:
1924 *sa_query = NULL;
1925 free_mad(&query->sa_query);
1926
1927err_free:
1928 kfree(query);
1929 return ret;
1930}
1931
1932static void update_ib_cpi(struct work_struct *work)
1933{
1934 struct ib_sa_port *port =
1935 container_of(work, struct ib_sa_port, ib_cpi_work.work);
1936 struct ib_classport_info_context *cb_context;
1937 unsigned long flags;
1938 int ret;
1939
1940 /* If the classport info is valid, nothing
1941 * to do here.
1942 */
1943 spin_lock_irqsave(&port->classport_lock, flags);
1944 if (port->classport_info.valid) {
1945 spin_unlock_irqrestore(&port->classport_lock, flags);
1946 return;
1947 }
1948 spin_unlock_irqrestore(&port->classport_lock, flags);
1949
1950 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL);
1951 if (!cb_context)
1952 goto err_nomem;
1953
1954 init_completion(&cb_context->done);
1955
1956 ret = ib_sa_classport_info_rec_query(port, 3000,
1957 ib_classportinfo_cb, cb_context,
1958 &cb_context->sa_query);
1959 if (ret < 0)
1960 goto free_cb_err;
1961 wait_for_completion(&cb_context->done);
1962free_cb_err:
1963 kfree(cb_context);
1964 spin_lock_irqsave(&port->classport_lock, flags);
1965
1966 /* If the classport info is still not valid, the query should have
1967 * failed for some reason. Retry issuing the query
1968 */
1969 if (!port->classport_info.valid) {
1970 port->classport_info.retry_cnt++;
1971 if (port->classport_info.retry_cnt <=
1972 IB_SA_CPI_MAX_RETRY_CNT) {
1973 unsigned long delay =
1974 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
1975
1976 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
1977 }
1978 }
1979 spin_unlock_irqrestore(&port->classport_lock, flags);
1980
1981err_nomem:
1982 return;
1983}
1984
1985static void send_handler(struct ib_mad_agent *agent,
1986 struct ib_mad_send_wc *mad_send_wc)
1987{
1988 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
1989 unsigned long flags;
1990
1991 if (query->callback)
1992 switch (mad_send_wc->status) {
1993 case IB_WC_SUCCESS:
1994 /* No callback -- already got recv */
1995 break;
1996 case IB_WC_RESP_TIMEOUT_ERR:
1997 query->callback(query, -ETIMEDOUT, NULL);
1998 break;
1999 case IB_WC_WR_FLUSH_ERR:
2000 query->callback(query, -EINTR, NULL);
2001 break;
2002 default:
2003 query->callback(query, -EIO, NULL);
2004 break;
2005 }
2006
2007 xa_lock_irqsave(&queries, flags);
2008 __xa_erase(&queries, query->id);
2009 xa_unlock_irqrestore(&queries, flags);
2010
2011 free_mad(query);
2012 if (query->client)
2013 ib_sa_client_put(query->client);
2014 query->release(query);
2015}
2016
2017static void recv_handler(struct ib_mad_agent *mad_agent,
2018 struct ib_mad_send_buf *send_buf,
2019 struct ib_mad_recv_wc *mad_recv_wc)
2020{
2021 struct ib_sa_query *query;
2022
2023 if (!send_buf)
2024 return;
2025
2026 query = send_buf->context[0];
2027 if (query->callback) {
2028 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
2029 query->callback(query,
2030 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
2031 -EINVAL : 0,
2032 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
2033 else
2034 query->callback(query, -EIO, NULL);
2035 }
2036
2037 ib_free_recv_mad(mad_recv_wc);
2038}
2039
2040static void update_sm_ah(struct work_struct *work)
2041{
2042 struct ib_sa_port *port =
2043 container_of(work, struct ib_sa_port, update_task);
2044 struct ib_sa_sm_ah *new_ah;
2045 struct ib_port_attr port_attr;
2046 struct rdma_ah_attr ah_attr;
2047 bool grh_required;
2048
2049 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
2050 pr_warn("Couldn't query port\n");
2051 return;
2052 }
2053
2054 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL);
2055 if (!new_ah)
2056 return;
2057
2058 kref_init(&new_ah->ref);
2059 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
2060
2061 new_ah->pkey_index = 0;
2062 if (ib_find_pkey(port->agent->device, port->port_num,
2063 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
2064 pr_err("Couldn't find index for default PKey\n");
2065
2066 memset(&ah_attr, 0, sizeof(ah_attr));
2067 ah_attr.type = rdma_ah_find_type(port->agent->device,
2068 port->port_num);
2069 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
2070 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
2071 rdma_ah_set_port_num(&ah_attr, port->port_num);
2072
2073 grh_required = rdma_is_grh_required(port->agent->device,
2074 port->port_num);
2075
2076 /*
2077 * The OPA sm_lid of 0xFFFF needs special handling so that it can be
2078 * differentiated from a permissive LID of 0xFFFF. We set the
2079 * grh_required flag here so the SA can program the DGID in the
2080 * address handle appropriately
2081 */
2082 if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA &&
2083 (grh_required ||
2084 port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE)))
2085 rdma_ah_set_make_grd(&ah_attr, true);
2086
2087 if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) {
2088 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
2089 rdma_ah_set_subnet_prefix(&ah_attr,
2090 cpu_to_be64(port_attr.subnet_prefix));
2091 rdma_ah_set_interface_id(&ah_attr,
2092 cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
2093 }
2094
2095 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr,
2096 RDMA_CREATE_AH_SLEEPABLE);
2097 if (IS_ERR(new_ah->ah)) {
2098 pr_warn("Couldn't create new SM AH\n");
2099 kfree(new_ah);
2100 return;
2101 }
2102
2103 spin_lock_irq(&port->ah_lock);
2104 if (port->sm_ah)
2105 kref_put(&port->sm_ah->ref, free_sm_ah);
2106 port->sm_ah = new_ah;
2107 spin_unlock_irq(&port->ah_lock);
2108}
2109
2110static void ib_sa_event(struct ib_event_handler *handler,
2111 struct ib_event *event)
2112{
2113 if (event->event == IB_EVENT_PORT_ERR ||
2114 event->event == IB_EVENT_PORT_ACTIVE ||
2115 event->event == IB_EVENT_LID_CHANGE ||
2116 event->event == IB_EVENT_PKEY_CHANGE ||
2117 event->event == IB_EVENT_SM_CHANGE ||
2118 event->event == IB_EVENT_CLIENT_REREGISTER) {
2119 unsigned long flags;
2120 struct ib_sa_device *sa_dev =
2121 container_of(handler, typeof(*sa_dev), event_handler);
2122 u32 port_num = event->element.port_num - sa_dev->start_port;
2123 struct ib_sa_port *port = &sa_dev->port[port_num];
2124
2125 if (!rdma_cap_ib_sa(handler->device, port->port_num))
2126 return;
2127
2128 spin_lock_irqsave(&port->ah_lock, flags);
2129 if (port->sm_ah)
2130 kref_put(&port->sm_ah->ref, free_sm_ah);
2131 port->sm_ah = NULL;
2132 spin_unlock_irqrestore(&port->ah_lock, flags);
2133
2134 if (event->event == IB_EVENT_SM_CHANGE ||
2135 event->event == IB_EVENT_CLIENT_REREGISTER ||
2136 event->event == IB_EVENT_LID_CHANGE ||
2137 event->event == IB_EVENT_PORT_ACTIVE) {
2138 unsigned long delay =
2139 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2140
2141 spin_lock_irqsave(&port->classport_lock, flags);
2142 port->classport_info.valid = false;
2143 port->classport_info.retry_cnt = 0;
2144 spin_unlock_irqrestore(&port->classport_lock, flags);
2145 queue_delayed_work(ib_wq,
2146 &port->ib_cpi_work, delay);
2147 }
2148 queue_work(ib_wq, &sa_dev->port[port_num].update_task);
2149 }
2150}
2151
2152static int ib_sa_add_one(struct ib_device *device)
2153{
2154 struct ib_sa_device *sa_dev;
2155 int s, e, i;
2156 int count = 0;
2157 int ret;
2158
2159 s = rdma_start_port(device);
2160 e = rdma_end_port(device);
2161
2162 sa_dev = kzalloc(struct_size(sa_dev, port,
2163 size_add(size_sub(e, s), 1)),
2164 GFP_KERNEL);
2165 if (!sa_dev)
2166 return -ENOMEM;
2167
2168 sa_dev->start_port = s;
2169 sa_dev->end_port = e;
2170
2171 for (i = 0; i <= e - s; ++i) {
2172 spin_lock_init(&sa_dev->port[i].ah_lock);
2173 if (!rdma_cap_ib_sa(device, i + 1))
2174 continue;
2175
2176 sa_dev->port[i].sm_ah = NULL;
2177 sa_dev->port[i].port_num = i + s;
2178
2179 spin_lock_init(&sa_dev->port[i].classport_lock);
2180 sa_dev->port[i].classport_info.valid = false;
2181
2182 sa_dev->port[i].agent =
2183 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
2184 NULL, 0, send_handler,
2185 recv_handler, sa_dev, 0);
2186 if (IS_ERR(sa_dev->port[i].agent)) {
2187 ret = PTR_ERR(sa_dev->port[i].agent);
2188 goto err;
2189 }
2190
2191 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
2192 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
2193 update_ib_cpi);
2194
2195 count++;
2196 }
2197
2198 if (!count) {
2199 ret = -EOPNOTSUPP;
2200 goto free;
2201 }
2202
2203 ib_set_client_data(device, &sa_client, sa_dev);
2204
2205 /*
2206 * We register our event handler after everything is set up,
2207 * and then update our cached info after the event handler is
2208 * registered to avoid any problems if a port changes state
2209 * during our initialization.
2210 */
2211
2212 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
2213 ib_register_event_handler(&sa_dev->event_handler);
2214
2215 for (i = 0; i <= e - s; ++i) {
2216 if (rdma_cap_ib_sa(device, i + 1))
2217 update_sm_ah(&sa_dev->port[i].update_task);
2218 }
2219
2220 return 0;
2221
2222err:
2223 while (--i >= 0) {
2224 if (rdma_cap_ib_sa(device, i + 1))
2225 ib_unregister_mad_agent(sa_dev->port[i].agent);
2226 }
2227free:
2228 kfree(sa_dev);
2229 return ret;
2230}
2231
2232static void ib_sa_remove_one(struct ib_device *device, void *client_data)
2233{
2234 struct ib_sa_device *sa_dev = client_data;
2235 int i;
2236
2237 ib_unregister_event_handler(&sa_dev->event_handler);
2238 flush_workqueue(ib_wq);
2239
2240 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
2241 if (rdma_cap_ib_sa(device, i + 1)) {
2242 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
2243 ib_unregister_mad_agent(sa_dev->port[i].agent);
2244 if (sa_dev->port[i].sm_ah)
2245 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
2246 }
2247
2248 }
2249
2250 kfree(sa_dev);
2251}
2252
2253int ib_sa_init(void)
2254{
2255 int ret;
2256
2257 get_random_bytes(&tid, sizeof tid);
2258
2259 atomic_set(&ib_nl_sa_request_seq, 0);
2260
2261 ret = ib_register_client(&sa_client);
2262 if (ret) {
2263 pr_err("Couldn't register ib_sa client\n");
2264 goto err1;
2265 }
2266
2267 ret = mcast_init();
2268 if (ret) {
2269 pr_err("Couldn't initialize multicast handling\n");
2270 goto err2;
2271 }
2272
2273 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
2274 if (!ib_nl_wq) {
2275 ret = -ENOMEM;
2276 goto err3;
2277 }
2278
2279 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2280
2281 return 0;
2282
2283err3:
2284 mcast_cleanup();
2285err2:
2286 ib_unregister_client(&sa_client);
2287err1:
2288 return ret;
2289}
2290
2291void ib_sa_cleanup(void)
2292{
2293 cancel_delayed_work(&ib_nl_timed_work);
2294 destroy_workqueue(ib_nl_wq);
2295 mcast_cleanup();
2296 ib_unregister_client(&sa_client);
2297 WARN_ON(!xa_empty(&queries));
2298}
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/err.h>
38#include <linux/random.h>
39#include <linux/spinlock.h>
40#include <linux/slab.h>
41#include <linux/dma-mapping.h>
42#include <linux/kref.h>
43#include <linux/idr.h>
44#include <linux/workqueue.h>
45#include <uapi/linux/if_ether.h>
46#include <rdma/ib_pack.h>
47#include <rdma/ib_cache.h>
48#include <rdma/rdma_netlink.h>
49#include <net/netlink.h>
50#include <uapi/rdma/ib_user_sa.h>
51#include <rdma/ib_marshall.h>
52#include <rdma/ib_addr.h>
53#include <rdma/opa_addr.h>
54#include "sa.h"
55#include "core_priv.h"
56
57#define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
58#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
59#define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
60#define IB_SA_CPI_MAX_RETRY_CNT 3
61#define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */
62static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
63
64struct ib_sa_sm_ah {
65 struct ib_ah *ah;
66 struct kref ref;
67 u16 pkey_index;
68 u8 src_path_mask;
69};
70
71enum rdma_class_port_info_type {
72 RDMA_CLASS_PORT_INFO_IB,
73 RDMA_CLASS_PORT_INFO_OPA
74};
75
76struct rdma_class_port_info {
77 enum rdma_class_port_info_type type;
78 union {
79 struct ib_class_port_info ib;
80 struct opa_class_port_info opa;
81 };
82};
83
84struct ib_sa_classport_cache {
85 bool valid;
86 int retry_cnt;
87 struct rdma_class_port_info data;
88};
89
90struct ib_sa_port {
91 struct ib_mad_agent *agent;
92 struct ib_sa_sm_ah *sm_ah;
93 struct work_struct update_task;
94 struct ib_sa_classport_cache classport_info;
95 struct delayed_work ib_cpi_work;
96 spinlock_t classport_lock; /* protects class port info set */
97 spinlock_t ah_lock;
98 u8 port_num;
99};
100
101struct ib_sa_device {
102 int start_port, end_port;
103 struct ib_event_handler event_handler;
104 struct ib_sa_port port[0];
105};
106
107struct ib_sa_query {
108 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
109 void (*release)(struct ib_sa_query *);
110 struct ib_sa_client *client;
111 struct ib_sa_port *port;
112 struct ib_mad_send_buf *mad_buf;
113 struct ib_sa_sm_ah *sm_ah;
114 int id;
115 u32 flags;
116 struct list_head list; /* Local svc request list */
117 u32 seq; /* Local svc request sequence number */
118 unsigned long timeout; /* Local svc timeout */
119 u8 path_use; /* How will the pathrecord be used */
120};
121
122#define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
123#define IB_SA_CANCEL 0x00000002
124#define IB_SA_QUERY_OPA 0x00000004
125
126struct ib_sa_service_query {
127 void (*callback)(int, struct ib_sa_service_rec *, void *);
128 void *context;
129 struct ib_sa_query sa_query;
130};
131
132struct ib_sa_path_query {
133 void (*callback)(int, struct sa_path_rec *, void *);
134 void *context;
135 struct ib_sa_query sa_query;
136 struct sa_path_rec *conv_pr;
137};
138
139struct ib_sa_guidinfo_query {
140 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
141 void *context;
142 struct ib_sa_query sa_query;
143};
144
145struct ib_sa_classport_info_query {
146 void (*callback)(void *);
147 void *context;
148 struct ib_sa_query sa_query;
149};
150
151struct ib_sa_mcmember_query {
152 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
153 void *context;
154 struct ib_sa_query sa_query;
155};
156
157static LIST_HEAD(ib_nl_request_list);
158static DEFINE_SPINLOCK(ib_nl_request_lock);
159static atomic_t ib_nl_sa_request_seq;
160static struct workqueue_struct *ib_nl_wq;
161static struct delayed_work ib_nl_timed_work;
162static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
163 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
164 .len = sizeof(struct ib_path_rec_data)},
165 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
166 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
167 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
168 .len = sizeof(struct rdma_nla_ls_gid)},
169 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
170 .len = sizeof(struct rdma_nla_ls_gid)},
171 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
172 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
173 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
174};
175
176
177static void ib_sa_add_one(struct ib_device *device);
178static void ib_sa_remove_one(struct ib_device *device, void *client_data);
179
180static struct ib_client sa_client = {
181 .name = "sa",
182 .add = ib_sa_add_one,
183 .remove = ib_sa_remove_one
184};
185
186static DEFINE_SPINLOCK(idr_lock);
187static DEFINE_IDR(query_idr);
188
189static DEFINE_SPINLOCK(tid_lock);
190static u32 tid;
191
192#define PATH_REC_FIELD(field) \
193 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \
194 .struct_size_bytes = sizeof((struct sa_path_rec *)0)->field, \
195 .field_name = "sa_path_rec:" #field
196
197static const struct ib_field path_rec_table[] = {
198 { PATH_REC_FIELD(service_id),
199 .offset_words = 0,
200 .offset_bits = 0,
201 .size_bits = 64 },
202 { PATH_REC_FIELD(dgid),
203 .offset_words = 2,
204 .offset_bits = 0,
205 .size_bits = 128 },
206 { PATH_REC_FIELD(sgid),
207 .offset_words = 6,
208 .offset_bits = 0,
209 .size_bits = 128 },
210 { PATH_REC_FIELD(ib.dlid),
211 .offset_words = 10,
212 .offset_bits = 0,
213 .size_bits = 16 },
214 { PATH_REC_FIELD(ib.slid),
215 .offset_words = 10,
216 .offset_bits = 16,
217 .size_bits = 16 },
218 { PATH_REC_FIELD(ib.raw_traffic),
219 .offset_words = 11,
220 .offset_bits = 0,
221 .size_bits = 1 },
222 { RESERVED,
223 .offset_words = 11,
224 .offset_bits = 1,
225 .size_bits = 3 },
226 { PATH_REC_FIELD(flow_label),
227 .offset_words = 11,
228 .offset_bits = 4,
229 .size_bits = 20 },
230 { PATH_REC_FIELD(hop_limit),
231 .offset_words = 11,
232 .offset_bits = 24,
233 .size_bits = 8 },
234 { PATH_REC_FIELD(traffic_class),
235 .offset_words = 12,
236 .offset_bits = 0,
237 .size_bits = 8 },
238 { PATH_REC_FIELD(reversible),
239 .offset_words = 12,
240 .offset_bits = 8,
241 .size_bits = 1 },
242 { PATH_REC_FIELD(numb_path),
243 .offset_words = 12,
244 .offset_bits = 9,
245 .size_bits = 7 },
246 { PATH_REC_FIELD(pkey),
247 .offset_words = 12,
248 .offset_bits = 16,
249 .size_bits = 16 },
250 { PATH_REC_FIELD(qos_class),
251 .offset_words = 13,
252 .offset_bits = 0,
253 .size_bits = 12 },
254 { PATH_REC_FIELD(sl),
255 .offset_words = 13,
256 .offset_bits = 12,
257 .size_bits = 4 },
258 { PATH_REC_FIELD(mtu_selector),
259 .offset_words = 13,
260 .offset_bits = 16,
261 .size_bits = 2 },
262 { PATH_REC_FIELD(mtu),
263 .offset_words = 13,
264 .offset_bits = 18,
265 .size_bits = 6 },
266 { PATH_REC_FIELD(rate_selector),
267 .offset_words = 13,
268 .offset_bits = 24,
269 .size_bits = 2 },
270 { PATH_REC_FIELD(rate),
271 .offset_words = 13,
272 .offset_bits = 26,
273 .size_bits = 6 },
274 { PATH_REC_FIELD(packet_life_time_selector),
275 .offset_words = 14,
276 .offset_bits = 0,
277 .size_bits = 2 },
278 { PATH_REC_FIELD(packet_life_time),
279 .offset_words = 14,
280 .offset_bits = 2,
281 .size_bits = 6 },
282 { PATH_REC_FIELD(preference),
283 .offset_words = 14,
284 .offset_bits = 8,
285 .size_bits = 8 },
286 { RESERVED,
287 .offset_words = 14,
288 .offset_bits = 16,
289 .size_bits = 48 },
290};
291
292#define OPA_PATH_REC_FIELD(field) \
293 .struct_offset_bytes = \
294 offsetof(struct sa_path_rec, field), \
295 .struct_size_bytes = \
296 sizeof((struct sa_path_rec *)0)->field, \
297 .field_name = "sa_path_rec:" #field
298
299static const struct ib_field opa_path_rec_table[] = {
300 { OPA_PATH_REC_FIELD(service_id),
301 .offset_words = 0,
302 .offset_bits = 0,
303 .size_bits = 64 },
304 { OPA_PATH_REC_FIELD(dgid),
305 .offset_words = 2,
306 .offset_bits = 0,
307 .size_bits = 128 },
308 { OPA_PATH_REC_FIELD(sgid),
309 .offset_words = 6,
310 .offset_bits = 0,
311 .size_bits = 128 },
312 { OPA_PATH_REC_FIELD(opa.dlid),
313 .offset_words = 10,
314 .offset_bits = 0,
315 .size_bits = 32 },
316 { OPA_PATH_REC_FIELD(opa.slid),
317 .offset_words = 11,
318 .offset_bits = 0,
319 .size_bits = 32 },
320 { OPA_PATH_REC_FIELD(opa.raw_traffic),
321 .offset_words = 12,
322 .offset_bits = 0,
323 .size_bits = 1 },
324 { RESERVED,
325 .offset_words = 12,
326 .offset_bits = 1,
327 .size_bits = 3 },
328 { OPA_PATH_REC_FIELD(flow_label),
329 .offset_words = 12,
330 .offset_bits = 4,
331 .size_bits = 20 },
332 { OPA_PATH_REC_FIELD(hop_limit),
333 .offset_words = 12,
334 .offset_bits = 24,
335 .size_bits = 8 },
336 { OPA_PATH_REC_FIELD(traffic_class),
337 .offset_words = 13,
338 .offset_bits = 0,
339 .size_bits = 8 },
340 { OPA_PATH_REC_FIELD(reversible),
341 .offset_words = 13,
342 .offset_bits = 8,
343 .size_bits = 1 },
344 { OPA_PATH_REC_FIELD(numb_path),
345 .offset_words = 13,
346 .offset_bits = 9,
347 .size_bits = 7 },
348 { OPA_PATH_REC_FIELD(pkey),
349 .offset_words = 13,
350 .offset_bits = 16,
351 .size_bits = 16 },
352 { OPA_PATH_REC_FIELD(opa.l2_8B),
353 .offset_words = 14,
354 .offset_bits = 0,
355 .size_bits = 1 },
356 { OPA_PATH_REC_FIELD(opa.l2_10B),
357 .offset_words = 14,
358 .offset_bits = 1,
359 .size_bits = 1 },
360 { OPA_PATH_REC_FIELD(opa.l2_9B),
361 .offset_words = 14,
362 .offset_bits = 2,
363 .size_bits = 1 },
364 { OPA_PATH_REC_FIELD(opa.l2_16B),
365 .offset_words = 14,
366 .offset_bits = 3,
367 .size_bits = 1 },
368 { RESERVED,
369 .offset_words = 14,
370 .offset_bits = 4,
371 .size_bits = 2 },
372 { OPA_PATH_REC_FIELD(opa.qos_type),
373 .offset_words = 14,
374 .offset_bits = 6,
375 .size_bits = 2 },
376 { OPA_PATH_REC_FIELD(opa.qos_priority),
377 .offset_words = 14,
378 .offset_bits = 8,
379 .size_bits = 8 },
380 { RESERVED,
381 .offset_words = 14,
382 .offset_bits = 16,
383 .size_bits = 3 },
384 { OPA_PATH_REC_FIELD(sl),
385 .offset_words = 14,
386 .offset_bits = 19,
387 .size_bits = 5 },
388 { RESERVED,
389 .offset_words = 14,
390 .offset_bits = 24,
391 .size_bits = 8 },
392 { OPA_PATH_REC_FIELD(mtu_selector),
393 .offset_words = 15,
394 .offset_bits = 0,
395 .size_bits = 2 },
396 { OPA_PATH_REC_FIELD(mtu),
397 .offset_words = 15,
398 .offset_bits = 2,
399 .size_bits = 6 },
400 { OPA_PATH_REC_FIELD(rate_selector),
401 .offset_words = 15,
402 .offset_bits = 8,
403 .size_bits = 2 },
404 { OPA_PATH_REC_FIELD(rate),
405 .offset_words = 15,
406 .offset_bits = 10,
407 .size_bits = 6 },
408 { OPA_PATH_REC_FIELD(packet_life_time_selector),
409 .offset_words = 15,
410 .offset_bits = 16,
411 .size_bits = 2 },
412 { OPA_PATH_REC_FIELD(packet_life_time),
413 .offset_words = 15,
414 .offset_bits = 18,
415 .size_bits = 6 },
416 { OPA_PATH_REC_FIELD(preference),
417 .offset_words = 15,
418 .offset_bits = 24,
419 .size_bits = 8 },
420};
421
422#define MCMEMBER_REC_FIELD(field) \
423 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
424 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
425 .field_name = "sa_mcmember_rec:" #field
426
427static const struct ib_field mcmember_rec_table[] = {
428 { MCMEMBER_REC_FIELD(mgid),
429 .offset_words = 0,
430 .offset_bits = 0,
431 .size_bits = 128 },
432 { MCMEMBER_REC_FIELD(port_gid),
433 .offset_words = 4,
434 .offset_bits = 0,
435 .size_bits = 128 },
436 { MCMEMBER_REC_FIELD(qkey),
437 .offset_words = 8,
438 .offset_bits = 0,
439 .size_bits = 32 },
440 { MCMEMBER_REC_FIELD(mlid),
441 .offset_words = 9,
442 .offset_bits = 0,
443 .size_bits = 16 },
444 { MCMEMBER_REC_FIELD(mtu_selector),
445 .offset_words = 9,
446 .offset_bits = 16,
447 .size_bits = 2 },
448 { MCMEMBER_REC_FIELD(mtu),
449 .offset_words = 9,
450 .offset_bits = 18,
451 .size_bits = 6 },
452 { MCMEMBER_REC_FIELD(traffic_class),
453 .offset_words = 9,
454 .offset_bits = 24,
455 .size_bits = 8 },
456 { MCMEMBER_REC_FIELD(pkey),
457 .offset_words = 10,
458 .offset_bits = 0,
459 .size_bits = 16 },
460 { MCMEMBER_REC_FIELD(rate_selector),
461 .offset_words = 10,
462 .offset_bits = 16,
463 .size_bits = 2 },
464 { MCMEMBER_REC_FIELD(rate),
465 .offset_words = 10,
466 .offset_bits = 18,
467 .size_bits = 6 },
468 { MCMEMBER_REC_FIELD(packet_life_time_selector),
469 .offset_words = 10,
470 .offset_bits = 24,
471 .size_bits = 2 },
472 { MCMEMBER_REC_FIELD(packet_life_time),
473 .offset_words = 10,
474 .offset_bits = 26,
475 .size_bits = 6 },
476 { MCMEMBER_REC_FIELD(sl),
477 .offset_words = 11,
478 .offset_bits = 0,
479 .size_bits = 4 },
480 { MCMEMBER_REC_FIELD(flow_label),
481 .offset_words = 11,
482 .offset_bits = 4,
483 .size_bits = 20 },
484 { MCMEMBER_REC_FIELD(hop_limit),
485 .offset_words = 11,
486 .offset_bits = 24,
487 .size_bits = 8 },
488 { MCMEMBER_REC_FIELD(scope),
489 .offset_words = 12,
490 .offset_bits = 0,
491 .size_bits = 4 },
492 { MCMEMBER_REC_FIELD(join_state),
493 .offset_words = 12,
494 .offset_bits = 4,
495 .size_bits = 4 },
496 { MCMEMBER_REC_FIELD(proxy_join),
497 .offset_words = 12,
498 .offset_bits = 8,
499 .size_bits = 1 },
500 { RESERVED,
501 .offset_words = 12,
502 .offset_bits = 9,
503 .size_bits = 23 },
504};
505
506#define SERVICE_REC_FIELD(field) \
507 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
508 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
509 .field_name = "sa_service_rec:" #field
510
511static const struct ib_field service_rec_table[] = {
512 { SERVICE_REC_FIELD(id),
513 .offset_words = 0,
514 .offset_bits = 0,
515 .size_bits = 64 },
516 { SERVICE_REC_FIELD(gid),
517 .offset_words = 2,
518 .offset_bits = 0,
519 .size_bits = 128 },
520 { SERVICE_REC_FIELD(pkey),
521 .offset_words = 6,
522 .offset_bits = 0,
523 .size_bits = 16 },
524 { SERVICE_REC_FIELD(lease),
525 .offset_words = 7,
526 .offset_bits = 0,
527 .size_bits = 32 },
528 { SERVICE_REC_FIELD(key),
529 .offset_words = 8,
530 .offset_bits = 0,
531 .size_bits = 128 },
532 { SERVICE_REC_FIELD(name),
533 .offset_words = 12,
534 .offset_bits = 0,
535 .size_bits = 64*8 },
536 { SERVICE_REC_FIELD(data8),
537 .offset_words = 28,
538 .offset_bits = 0,
539 .size_bits = 16*8 },
540 { SERVICE_REC_FIELD(data16),
541 .offset_words = 32,
542 .offset_bits = 0,
543 .size_bits = 8*16 },
544 { SERVICE_REC_FIELD(data32),
545 .offset_words = 36,
546 .offset_bits = 0,
547 .size_bits = 4*32 },
548 { SERVICE_REC_FIELD(data64),
549 .offset_words = 40,
550 .offset_bits = 0,
551 .size_bits = 2*64 },
552};
553
554#define CLASSPORTINFO_REC_FIELD(field) \
555 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
556 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
557 .field_name = "ib_class_port_info:" #field
558
559static const struct ib_field ib_classport_info_rec_table[] = {
560 { CLASSPORTINFO_REC_FIELD(base_version),
561 .offset_words = 0,
562 .offset_bits = 0,
563 .size_bits = 8 },
564 { CLASSPORTINFO_REC_FIELD(class_version),
565 .offset_words = 0,
566 .offset_bits = 8,
567 .size_bits = 8 },
568 { CLASSPORTINFO_REC_FIELD(capability_mask),
569 .offset_words = 0,
570 .offset_bits = 16,
571 .size_bits = 16 },
572 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
573 .offset_words = 1,
574 .offset_bits = 0,
575 .size_bits = 32 },
576 { CLASSPORTINFO_REC_FIELD(redirect_gid),
577 .offset_words = 2,
578 .offset_bits = 0,
579 .size_bits = 128 },
580 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
581 .offset_words = 6,
582 .offset_bits = 0,
583 .size_bits = 32 },
584 { CLASSPORTINFO_REC_FIELD(redirect_lid),
585 .offset_words = 7,
586 .offset_bits = 0,
587 .size_bits = 16 },
588 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
589 .offset_words = 7,
590 .offset_bits = 16,
591 .size_bits = 16 },
592
593 { CLASSPORTINFO_REC_FIELD(redirect_qp),
594 .offset_words = 8,
595 .offset_bits = 0,
596 .size_bits = 32 },
597 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
598 .offset_words = 9,
599 .offset_bits = 0,
600 .size_bits = 32 },
601
602 { CLASSPORTINFO_REC_FIELD(trap_gid),
603 .offset_words = 10,
604 .offset_bits = 0,
605 .size_bits = 128 },
606 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
607 .offset_words = 14,
608 .offset_bits = 0,
609 .size_bits = 32 },
610
611 { CLASSPORTINFO_REC_FIELD(trap_lid),
612 .offset_words = 15,
613 .offset_bits = 0,
614 .size_bits = 16 },
615 { CLASSPORTINFO_REC_FIELD(trap_pkey),
616 .offset_words = 15,
617 .offset_bits = 16,
618 .size_bits = 16 },
619
620 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
621 .offset_words = 16,
622 .offset_bits = 0,
623 .size_bits = 32 },
624 { CLASSPORTINFO_REC_FIELD(trap_qkey),
625 .offset_words = 17,
626 .offset_bits = 0,
627 .size_bits = 32 },
628};
629
630#define OPA_CLASSPORTINFO_REC_FIELD(field) \
631 .struct_offset_bytes =\
632 offsetof(struct opa_class_port_info, field), \
633 .struct_size_bytes = \
634 sizeof((struct opa_class_port_info *)0)->field, \
635 .field_name = "opa_class_port_info:" #field
636
637static const struct ib_field opa_classport_info_rec_table[] = {
638 { OPA_CLASSPORTINFO_REC_FIELD(base_version),
639 .offset_words = 0,
640 .offset_bits = 0,
641 .size_bits = 8 },
642 { OPA_CLASSPORTINFO_REC_FIELD(class_version),
643 .offset_words = 0,
644 .offset_bits = 8,
645 .size_bits = 8 },
646 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
647 .offset_words = 0,
648 .offset_bits = 16,
649 .size_bits = 16 },
650 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
651 .offset_words = 1,
652 .offset_bits = 0,
653 .size_bits = 32 },
654 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
655 .offset_words = 2,
656 .offset_bits = 0,
657 .size_bits = 128 },
658 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
659 .offset_words = 6,
660 .offset_bits = 0,
661 .size_bits = 32 },
662 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
663 .offset_words = 7,
664 .offset_bits = 0,
665 .size_bits = 32 },
666 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
667 .offset_words = 8,
668 .offset_bits = 0,
669 .size_bits = 32 },
670 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
671 .offset_words = 9,
672 .offset_bits = 0,
673 .size_bits = 32 },
674 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
675 .offset_words = 10,
676 .offset_bits = 0,
677 .size_bits = 128 },
678 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
679 .offset_words = 14,
680 .offset_bits = 0,
681 .size_bits = 32 },
682 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
683 .offset_words = 15,
684 .offset_bits = 0,
685 .size_bits = 32 },
686 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
687 .offset_words = 16,
688 .offset_bits = 0,
689 .size_bits = 32 },
690 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
691 .offset_words = 17,
692 .offset_bits = 0,
693 .size_bits = 32 },
694 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
695 .offset_words = 18,
696 .offset_bits = 0,
697 .size_bits = 16 },
698 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
699 .offset_words = 18,
700 .offset_bits = 16,
701 .size_bits = 16 },
702 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
703 .offset_words = 19,
704 .offset_bits = 0,
705 .size_bits = 8 },
706 { RESERVED,
707 .offset_words = 19,
708 .offset_bits = 8,
709 .size_bits = 24 },
710};
711
712#define GUIDINFO_REC_FIELD(field) \
713 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
714 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
715 .field_name = "sa_guidinfo_rec:" #field
716
717static const struct ib_field guidinfo_rec_table[] = {
718 { GUIDINFO_REC_FIELD(lid),
719 .offset_words = 0,
720 .offset_bits = 0,
721 .size_bits = 16 },
722 { GUIDINFO_REC_FIELD(block_num),
723 .offset_words = 0,
724 .offset_bits = 16,
725 .size_bits = 8 },
726 { GUIDINFO_REC_FIELD(res1),
727 .offset_words = 0,
728 .offset_bits = 24,
729 .size_bits = 8 },
730 { GUIDINFO_REC_FIELD(res2),
731 .offset_words = 1,
732 .offset_bits = 0,
733 .size_bits = 32 },
734 { GUIDINFO_REC_FIELD(guid_info_list),
735 .offset_words = 2,
736 .offset_bits = 0,
737 .size_bits = 512 },
738};
739
740static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
741{
742 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
743}
744
745static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
746{
747 return (query->flags & IB_SA_CANCEL);
748}
749
750static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
751 struct ib_sa_query *query)
752{
753 struct sa_path_rec *sa_rec = query->mad_buf->context[1];
754 struct ib_sa_mad *mad = query->mad_buf->mad;
755 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
756 u16 val16;
757 u64 val64;
758 struct rdma_ls_resolve_header *header;
759
760 query->mad_buf->context[1] = NULL;
761
762 /* Construct the family header first */
763 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
764 memcpy(header->device_name, query->port->agent->device->name,
765 LS_DEVICE_NAME_MAX);
766 header->port_num = query->port->port_num;
767
768 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
769 sa_rec->reversible != 0)
770 query->path_use = LS_RESOLVE_PATH_USE_GMP;
771 else
772 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
773 header->path_use = query->path_use;
774
775 /* Now build the attributes */
776 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
777 val64 = be64_to_cpu(sa_rec->service_id);
778 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
779 sizeof(val64), &val64);
780 }
781 if (comp_mask & IB_SA_PATH_REC_DGID)
782 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
783 sizeof(sa_rec->dgid), &sa_rec->dgid);
784 if (comp_mask & IB_SA_PATH_REC_SGID)
785 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
786 sizeof(sa_rec->sgid), &sa_rec->sgid);
787 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
788 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
789 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
790
791 if (comp_mask & IB_SA_PATH_REC_PKEY) {
792 val16 = be16_to_cpu(sa_rec->pkey);
793 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
794 sizeof(val16), &val16);
795 }
796 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
797 val16 = be16_to_cpu(sa_rec->qos_class);
798 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
799 sizeof(val16), &val16);
800 }
801}
802
803static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
804{
805 int len = 0;
806
807 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
808 len += nla_total_size(sizeof(u64));
809 if (comp_mask & IB_SA_PATH_REC_DGID)
810 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
811 if (comp_mask & IB_SA_PATH_REC_SGID)
812 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
813 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
814 len += nla_total_size(sizeof(u8));
815 if (comp_mask & IB_SA_PATH_REC_PKEY)
816 len += nla_total_size(sizeof(u16));
817 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
818 len += nla_total_size(sizeof(u16));
819
820 /*
821 * Make sure that at least some of the required comp_mask bits are
822 * set.
823 */
824 if (WARN_ON(len == 0))
825 return len;
826
827 /* Add the family header */
828 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
829
830 return len;
831}
832
833static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
834{
835 struct sk_buff *skb = NULL;
836 struct nlmsghdr *nlh;
837 void *data;
838 int ret = 0;
839 struct ib_sa_mad *mad;
840 int len;
841
842 mad = query->mad_buf->mad;
843 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
844 if (len <= 0)
845 return -EMSGSIZE;
846
847 skb = nlmsg_new(len, gfp_mask);
848 if (!skb)
849 return -ENOMEM;
850
851 /* Put nlmsg header only for now */
852 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
853 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
854 if (!data) {
855 nlmsg_free(skb);
856 return -EMSGSIZE;
857 }
858
859 /* Add attributes */
860 ib_nl_set_path_rec_attrs(skb, query);
861
862 /* Repair the nlmsg header length */
863 nlmsg_end(skb, nlh);
864
865 ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask);
866 if (!ret)
867 ret = len;
868 else
869 ret = 0;
870
871 return ret;
872}
873
874static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
875{
876 unsigned long flags;
877 unsigned long delay;
878 int ret;
879
880 INIT_LIST_HEAD(&query->list);
881 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
882
883 /* Put the request on the list first.*/
884 spin_lock_irqsave(&ib_nl_request_lock, flags);
885 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
886 query->timeout = delay + jiffies;
887 list_add_tail(&query->list, &ib_nl_request_list);
888 /* Start the timeout if this is the only request */
889 if (ib_nl_request_list.next == &query->list)
890 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
891 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
892
893 ret = ib_nl_send_msg(query, gfp_mask);
894 if (ret <= 0) {
895 ret = -EIO;
896 /* Remove the request */
897 spin_lock_irqsave(&ib_nl_request_lock, flags);
898 list_del(&query->list);
899 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
900 } else {
901 ret = 0;
902 }
903
904 return ret;
905}
906
907static int ib_nl_cancel_request(struct ib_sa_query *query)
908{
909 unsigned long flags;
910 struct ib_sa_query *wait_query;
911 int found = 0;
912
913 spin_lock_irqsave(&ib_nl_request_lock, flags);
914 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
915 /* Let the timeout to take care of the callback */
916 if (query == wait_query) {
917 query->flags |= IB_SA_CANCEL;
918 query->timeout = jiffies;
919 list_move(&query->list, &ib_nl_request_list);
920 found = 1;
921 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
922 break;
923 }
924 }
925 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
926
927 return found;
928}
929
930static void send_handler(struct ib_mad_agent *agent,
931 struct ib_mad_send_wc *mad_send_wc);
932
933static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
934 const struct nlmsghdr *nlh)
935{
936 struct ib_mad_send_wc mad_send_wc;
937 struct ib_sa_mad *mad = NULL;
938 const struct nlattr *head, *curr;
939 struct ib_path_rec_data *rec;
940 int len, rem;
941 u32 mask = 0;
942 int status = -EIO;
943
944 if (query->callback) {
945 head = (const struct nlattr *) nlmsg_data(nlh);
946 len = nlmsg_len(nlh);
947 switch (query->path_use) {
948 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
949 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
950 break;
951
952 case LS_RESOLVE_PATH_USE_ALL:
953 case LS_RESOLVE_PATH_USE_GMP:
954 default:
955 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
956 IB_PATH_BIDIRECTIONAL;
957 break;
958 }
959 nla_for_each_attr(curr, head, len, rem) {
960 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
961 rec = nla_data(curr);
962 /*
963 * Get the first one. In the future, we may
964 * need to get up to 6 pathrecords.
965 */
966 if ((rec->flags & mask) == mask) {
967 mad = query->mad_buf->mad;
968 mad->mad_hdr.method |=
969 IB_MGMT_METHOD_RESP;
970 memcpy(mad->data, rec->path_rec,
971 sizeof(rec->path_rec));
972 status = 0;
973 break;
974 }
975 }
976 }
977 query->callback(query, status, mad);
978 }
979
980 mad_send_wc.send_buf = query->mad_buf;
981 mad_send_wc.status = IB_WC_SUCCESS;
982 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
983}
984
985static void ib_nl_request_timeout(struct work_struct *work)
986{
987 unsigned long flags;
988 struct ib_sa_query *query;
989 unsigned long delay;
990 struct ib_mad_send_wc mad_send_wc;
991 int ret;
992
993 spin_lock_irqsave(&ib_nl_request_lock, flags);
994 while (!list_empty(&ib_nl_request_list)) {
995 query = list_entry(ib_nl_request_list.next,
996 struct ib_sa_query, list);
997
998 if (time_after(query->timeout, jiffies)) {
999 delay = query->timeout - jiffies;
1000 if ((long)delay <= 0)
1001 delay = 1;
1002 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
1003 break;
1004 }
1005
1006 list_del(&query->list);
1007 ib_sa_disable_local_svc(query);
1008 /* Hold the lock to protect against query cancellation */
1009 if (ib_sa_query_cancelled(query))
1010 ret = -1;
1011 else
1012 ret = ib_post_send_mad(query->mad_buf, NULL);
1013 if (ret) {
1014 mad_send_wc.send_buf = query->mad_buf;
1015 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
1016 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1017 send_handler(query->port->agent, &mad_send_wc);
1018 spin_lock_irqsave(&ib_nl_request_lock, flags);
1019 }
1020 }
1021 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1022}
1023
1024int ib_nl_handle_set_timeout(struct sk_buff *skb,
1025 struct nlmsghdr *nlh,
1026 struct netlink_ext_ack *extack)
1027{
1028 int timeout, delta, abs_delta;
1029 const struct nlattr *attr;
1030 unsigned long flags;
1031 struct ib_sa_query *query;
1032 long delay = 0;
1033 struct nlattr *tb[LS_NLA_TYPE_MAX];
1034 int ret;
1035
1036 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
1037 !(NETLINK_CB(skb).sk))
1038 return -EPERM;
1039
1040 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1041 nlmsg_len(nlh), ib_nl_policy, NULL);
1042 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
1043 if (ret || !attr)
1044 goto settimeout_out;
1045
1046 timeout = *(int *) nla_data(attr);
1047 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
1048 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
1049 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
1050 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
1051
1052 delta = timeout - sa_local_svc_timeout_ms;
1053 if (delta < 0)
1054 abs_delta = -delta;
1055 else
1056 abs_delta = delta;
1057
1058 if (delta != 0) {
1059 spin_lock_irqsave(&ib_nl_request_lock, flags);
1060 sa_local_svc_timeout_ms = timeout;
1061 list_for_each_entry(query, &ib_nl_request_list, list) {
1062 if (delta < 0 && abs_delta > query->timeout)
1063 query->timeout = 0;
1064 else
1065 query->timeout += delta;
1066
1067 /* Get the new delay from the first entry */
1068 if (!delay) {
1069 delay = query->timeout - jiffies;
1070 if (delay <= 0)
1071 delay = 1;
1072 }
1073 }
1074 if (delay)
1075 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
1076 (unsigned long)delay);
1077 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1078 }
1079
1080settimeout_out:
1081 return skb->len;
1082}
1083
1084static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1085{
1086 struct nlattr *tb[LS_NLA_TYPE_MAX];
1087 int ret;
1088
1089 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
1090 return 0;
1091
1092 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1093 nlmsg_len(nlh), ib_nl_policy, NULL);
1094 if (ret)
1095 return 0;
1096
1097 return 1;
1098}
1099
1100int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1101 struct nlmsghdr *nlh,
1102 struct netlink_ext_ack *extack)
1103{
1104 unsigned long flags;
1105 struct ib_sa_query *query;
1106 struct ib_mad_send_buf *send_buf;
1107 struct ib_mad_send_wc mad_send_wc;
1108 int found = 0;
1109 int ret;
1110
1111 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
1112 !(NETLINK_CB(skb).sk))
1113 return -EPERM;
1114
1115 spin_lock_irqsave(&ib_nl_request_lock, flags);
1116 list_for_each_entry(query, &ib_nl_request_list, list) {
1117 /*
1118 * If the query is cancelled, let the timeout routine
1119 * take care of it.
1120 */
1121 if (nlh->nlmsg_seq == query->seq) {
1122 found = !ib_sa_query_cancelled(query);
1123 if (found)
1124 list_del(&query->list);
1125 break;
1126 }
1127 }
1128
1129 if (!found) {
1130 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1131 goto resp_out;
1132 }
1133
1134 send_buf = query->mad_buf;
1135
1136 if (!ib_nl_is_good_resolve_resp(nlh)) {
1137 /* if the result is a failure, send out the packet via IB */
1138 ib_sa_disable_local_svc(query);
1139 ret = ib_post_send_mad(query->mad_buf, NULL);
1140 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1141 if (ret) {
1142 mad_send_wc.send_buf = send_buf;
1143 mad_send_wc.status = IB_WC_GENERAL_ERR;
1144 send_handler(query->port->agent, &mad_send_wc);
1145 }
1146 } else {
1147 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1148 ib_nl_process_good_resolve_rsp(query, nlh);
1149 }
1150
1151resp_out:
1152 return skb->len;
1153}
1154
1155static void free_sm_ah(struct kref *kref)
1156{
1157 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
1158
1159 rdma_destroy_ah(sm_ah->ah);
1160 kfree(sm_ah);
1161}
1162
1163void ib_sa_register_client(struct ib_sa_client *client)
1164{
1165 atomic_set(&client->users, 1);
1166 init_completion(&client->comp);
1167}
1168EXPORT_SYMBOL(ib_sa_register_client);
1169
1170void ib_sa_unregister_client(struct ib_sa_client *client)
1171{
1172 ib_sa_client_put(client);
1173 wait_for_completion(&client->comp);
1174}
1175EXPORT_SYMBOL(ib_sa_unregister_client);
1176
1177/**
1178 * ib_sa_cancel_query - try to cancel an SA query
1179 * @id:ID of query to cancel
1180 * @query:query pointer to cancel
1181 *
1182 * Try to cancel an SA query. If the id and query don't match up or
1183 * the query has already completed, nothing is done. Otherwise the
1184 * query is canceled and will complete with a status of -EINTR.
1185 */
1186void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1187{
1188 unsigned long flags;
1189 struct ib_mad_agent *agent;
1190 struct ib_mad_send_buf *mad_buf;
1191
1192 spin_lock_irqsave(&idr_lock, flags);
1193 if (idr_find(&query_idr, id) != query) {
1194 spin_unlock_irqrestore(&idr_lock, flags);
1195 return;
1196 }
1197 agent = query->port->agent;
1198 mad_buf = query->mad_buf;
1199 spin_unlock_irqrestore(&idr_lock, flags);
1200
1201 /*
1202 * If the query is still on the netlink request list, schedule
1203 * it to be cancelled by the timeout routine. Otherwise, it has been
1204 * sent to the MAD layer and has to be cancelled from there.
1205 */
1206 if (!ib_nl_cancel_request(query))
1207 ib_cancel_mad(agent, mad_buf);
1208}
1209EXPORT_SYMBOL(ib_sa_cancel_query);
1210
1211static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
1212{
1213 struct ib_sa_device *sa_dev;
1214 struct ib_sa_port *port;
1215 unsigned long flags;
1216 u8 src_path_mask;
1217
1218 sa_dev = ib_get_client_data(device, &sa_client);
1219 if (!sa_dev)
1220 return 0x7f;
1221
1222 port = &sa_dev->port[port_num - sa_dev->start_port];
1223 spin_lock_irqsave(&port->ah_lock, flags);
1224 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1225 spin_unlock_irqrestore(&port->ah_lock, flags);
1226
1227 return src_path_mask;
1228}
1229
1230static int
1231roce_resolve_route_from_path(struct ib_device *device, u8 port_num,
1232 struct sa_path_rec *rec)
1233{
1234 struct net_device *resolved_dev;
1235 struct net_device *ndev;
1236 struct net_device *idev;
1237 struct rdma_dev_addr dev_addr = {
1238 .bound_dev_if = ((sa_path_get_ifindex(rec) >= 0) ?
1239 sa_path_get_ifindex(rec) : 0),
1240 .net = sa_path_get_ndev(rec) ?
1241 sa_path_get_ndev(rec) :
1242 &init_net
1243 };
1244 union {
1245 struct sockaddr _sockaddr;
1246 struct sockaddr_in _sockaddr_in;
1247 struct sockaddr_in6 _sockaddr_in6;
1248 } sgid_addr, dgid_addr;
1249 int ret;
1250
1251 if (rec->roce.route_resolved)
1252 return 0;
1253
1254 if (!device->get_netdev)
1255 return -EOPNOTSUPP;
1256
1257 rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
1258 rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
1259
1260 /* validate the route */
1261 ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
1262 &dgid_addr._sockaddr, &dev_addr);
1263 if (ret)
1264 return ret;
1265
1266 if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
1267 dev_addr.network == RDMA_NETWORK_IPV6) &&
1268 rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
1269 return -EINVAL;
1270
1271 idev = device->get_netdev(device, port_num);
1272 if (!idev)
1273 return -ENODEV;
1274
1275 resolved_dev = dev_get_by_index(dev_addr.net,
1276 dev_addr.bound_dev_if);
1277 if (!resolved_dev) {
1278 ret = -ENODEV;
1279 goto done;
1280 }
1281 ndev = ib_get_ndev_from_path(rec);
1282 rcu_read_lock();
1283 if ((ndev && ndev != resolved_dev) ||
1284 (resolved_dev != idev &&
1285 !rdma_is_upper_dev_rcu(idev, resolved_dev)))
1286 ret = -EHOSTUNREACH;
1287 rcu_read_unlock();
1288 dev_put(resolved_dev);
1289 if (ndev)
1290 dev_put(ndev);
1291done:
1292 dev_put(idev);
1293 if (!ret)
1294 rec->roce.route_resolved = true;
1295 return ret;
1296}
1297
1298static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
1299 struct sa_path_rec *rec,
1300 struct rdma_ah_attr *ah_attr)
1301{
1302 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
1303 struct net_device *ndev;
1304 u16 gid_index;
1305 int ret;
1306
1307 ndev = ib_get_ndev_from_path(rec);
1308 ret = ib_find_cached_gid_by_port(device, &rec->sgid, type,
1309 port_num, ndev, &gid_index);
1310 if (ndev)
1311 dev_put(ndev);
1312 if (ret)
1313 return ret;
1314
1315 rdma_ah_set_grh(ah_attr, &rec->dgid,
1316 be32_to_cpu(rec->flow_label),
1317 gid_index, rec->hop_limit,
1318 rec->traffic_class);
1319 return 0;
1320}
1321
1322int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
1323 struct sa_path_rec *rec,
1324 struct rdma_ah_attr *ah_attr)
1325{
1326 int ret = 0;
1327
1328 memset(ah_attr, 0, sizeof(*ah_attr));
1329 ah_attr->type = rdma_ah_find_type(device, port_num);
1330 rdma_ah_set_sl(ah_attr, rec->sl);
1331 rdma_ah_set_port_num(ah_attr, port_num);
1332 rdma_ah_set_static_rate(ah_attr, rec->rate);
1333
1334 if (sa_path_is_roce(rec)) {
1335 ret = roce_resolve_route_from_path(device, port_num, rec);
1336 if (ret)
1337 return ret;
1338
1339 memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN);
1340 } else {
1341 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
1342 if (sa_path_is_opa(rec) &&
1343 rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE))
1344 rdma_ah_set_make_grd(ah_attr, true);
1345
1346 rdma_ah_set_path_bits(ah_attr,
1347 be32_to_cpu(sa_path_get_slid(rec)) &
1348 get_src_path_mask(device, port_num));
1349 }
1350
1351 if (rec->hop_limit > 0 || sa_path_is_roce(rec))
1352 ret = init_ah_attr_grh_fields(device, port_num, rec, ah_attr);
1353 return ret;
1354}
1355EXPORT_SYMBOL(ib_init_ah_attr_from_path);
1356
1357static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1358{
1359 struct rdma_ah_attr ah_attr;
1360 unsigned long flags;
1361
1362 spin_lock_irqsave(&query->port->ah_lock, flags);
1363 if (!query->port->sm_ah) {
1364 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1365 return -EAGAIN;
1366 }
1367 kref_get(&query->port->sm_ah->ref);
1368 query->sm_ah = query->port->sm_ah;
1369 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1370
1371 /*
1372 * Always check if sm_ah has valid dlid assigned,
1373 * before querying for class port info
1374 */
1375 if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
1376 !rdma_is_valid_unicast_lid(&ah_attr)) {
1377 kref_put(&query->sm_ah->ref, free_sm_ah);
1378 return -EAGAIN;
1379 }
1380 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1381 query->sm_ah->pkey_index,
1382 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1383 gfp_mask,
1384 ((query->flags & IB_SA_QUERY_OPA) ?
1385 OPA_MGMT_BASE_VERSION :
1386 IB_MGMT_BASE_VERSION));
1387 if (IS_ERR(query->mad_buf)) {
1388 kref_put(&query->sm_ah->ref, free_sm_ah);
1389 return -ENOMEM;
1390 }
1391
1392 query->mad_buf->ah = query->sm_ah->ah;
1393
1394 return 0;
1395}
1396
1397static void free_mad(struct ib_sa_query *query)
1398{
1399 ib_free_send_mad(query->mad_buf);
1400 kref_put(&query->sm_ah->ref, free_sm_ah);
1401}
1402
1403static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
1404{
1405 struct ib_sa_mad *mad = query->mad_buf->mad;
1406 unsigned long flags;
1407
1408 memset(mad, 0, sizeof *mad);
1409
1410 if (query->flags & IB_SA_QUERY_OPA) {
1411 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
1412 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
1413 } else {
1414 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1415 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1416 }
1417 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1418 spin_lock_irqsave(&tid_lock, flags);
1419 mad->mad_hdr.tid =
1420 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1421 spin_unlock_irqrestore(&tid_lock, flags);
1422}
1423
1424static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1425{
1426 bool preload = gfpflags_allow_blocking(gfp_mask);
1427 unsigned long flags;
1428 int ret, id;
1429
1430 if (preload)
1431 idr_preload(gfp_mask);
1432 spin_lock_irqsave(&idr_lock, flags);
1433
1434 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
1435
1436 spin_unlock_irqrestore(&idr_lock, flags);
1437 if (preload)
1438 idr_preload_end();
1439 if (id < 0)
1440 return id;
1441
1442 query->mad_buf->timeout_ms = timeout_ms;
1443 query->mad_buf->context[0] = query;
1444 query->id = id;
1445
1446 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
1447 (!(query->flags & IB_SA_QUERY_OPA))) {
1448 if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
1449 if (!ib_nl_make_request(query, gfp_mask))
1450 return id;
1451 }
1452 ib_sa_disable_local_svc(query);
1453 }
1454
1455 ret = ib_post_send_mad(query->mad_buf, NULL);
1456 if (ret) {
1457 spin_lock_irqsave(&idr_lock, flags);
1458 idr_remove(&query_idr, id);
1459 spin_unlock_irqrestore(&idr_lock, flags);
1460 }
1461
1462 /*
1463 * It's not safe to dereference query any more, because the
1464 * send may already have completed and freed the query in
1465 * another context.
1466 */
1467 return ret ? ret : id;
1468}
1469
1470void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
1471{
1472 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1473}
1474EXPORT_SYMBOL(ib_sa_unpack_path);
1475
1476void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
1477{
1478 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1479}
1480EXPORT_SYMBOL(ib_sa_pack_path);
1481
1482static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
1483 struct ib_device *device,
1484 u8 port_num)
1485{
1486 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1487 struct ib_sa_port *port;
1488 unsigned long flags;
1489 bool ret = false;
1490
1491 if (!sa_dev)
1492 return ret;
1493
1494 port = &sa_dev->port[port_num - sa_dev->start_port];
1495 spin_lock_irqsave(&port->classport_lock, flags);
1496 if (!port->classport_info.valid)
1497 goto ret;
1498
1499 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA)
1500 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) &
1501 OPA_CLASS_PORT_INFO_PR_SUPPORT;
1502ret:
1503 spin_unlock_irqrestore(&port->classport_lock, flags);
1504 return ret;
1505}
1506
1507enum opa_pr_supported {
1508 PR_NOT_SUPPORTED,
1509 PR_OPA_SUPPORTED,
1510 PR_IB_SUPPORTED
1511};
1512
1513/**
1514 * Check if current PR query can be an OPA query.
1515 * Retuns PR_NOT_SUPPORTED if a path record query is not
1516 * possible, PR_OPA_SUPPORTED if an OPA path record query
1517 * is possible and PR_IB_SUPPORTED if an IB path record
1518 * query is possible.
1519 */
1520static int opa_pr_query_possible(struct ib_sa_client *client,
1521 struct ib_device *device,
1522 u8 port_num,
1523 struct sa_path_rec *rec)
1524{
1525 struct ib_port_attr port_attr;
1526
1527 if (ib_query_port(device, port_num, &port_attr))
1528 return PR_NOT_SUPPORTED;
1529
1530 if (ib_sa_opa_pathrecord_support(client, device, port_num))
1531 return PR_OPA_SUPPORTED;
1532
1533 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1534 return PR_NOT_SUPPORTED;
1535 else
1536 return PR_IB_SUPPORTED;
1537}
1538
1539static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1540 int status,
1541 struct ib_sa_mad *mad)
1542{
1543 struct ib_sa_path_query *query =
1544 container_of(sa_query, struct ib_sa_path_query, sa_query);
1545
1546 if (mad) {
1547 struct sa_path_rec rec;
1548
1549 if (sa_query->flags & IB_SA_QUERY_OPA) {
1550 ib_unpack(opa_path_rec_table,
1551 ARRAY_SIZE(opa_path_rec_table),
1552 mad->data, &rec);
1553 rec.rec_type = SA_PATH_REC_TYPE_OPA;
1554 query->callback(status, &rec, query->context);
1555 } else {
1556 ib_unpack(path_rec_table,
1557 ARRAY_SIZE(path_rec_table),
1558 mad->data, &rec);
1559 rec.rec_type = SA_PATH_REC_TYPE_IB;
1560 sa_path_set_ndev(&rec, NULL);
1561 sa_path_set_ifindex(&rec, 0);
1562 sa_path_set_dmac_zero(&rec);
1563
1564 if (query->conv_pr) {
1565 struct sa_path_rec opa;
1566
1567 memset(&opa, 0, sizeof(struct sa_path_rec));
1568 sa_convert_path_ib_to_opa(&opa, &rec);
1569 query->callback(status, &opa, query->context);
1570 } else {
1571 query->callback(status, &rec, query->context);
1572 }
1573 }
1574 } else
1575 query->callback(status, NULL, query->context);
1576}
1577
1578static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1579{
1580 struct ib_sa_path_query *query =
1581 container_of(sa_query, struct ib_sa_path_query, sa_query);
1582
1583 kfree(query->conv_pr);
1584 kfree(query);
1585}
1586
1587/**
1588 * ib_sa_path_rec_get - Start a Path get query
1589 * @client:SA client
1590 * @device:device to send query on
1591 * @port_num: port number to send query on
1592 * @rec:Path Record to send in query
1593 * @comp_mask:component mask to send in query
1594 * @timeout_ms:time to wait for response
1595 * @gfp_mask:GFP mask to use for internal allocations
1596 * @callback:function called when query completes, times out or is
1597 * canceled
1598 * @context:opaque user context passed to callback
1599 * @sa_query:query context, used to cancel query
1600 *
1601 * Send a Path Record Get query to the SA to look up a path. The
1602 * callback function will be called when the query completes (or
1603 * fails); status is 0 for a successful response, -EINTR if the query
1604 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1605 * occurred sending the query. The resp parameter of the callback is
1606 * only valid if status is 0.
1607 *
1608 * If the return value of ib_sa_path_rec_get() is negative, it is an
1609 * error code. Otherwise it is a query ID that can be used to cancel
1610 * the query.
1611 */
1612int ib_sa_path_rec_get(struct ib_sa_client *client,
1613 struct ib_device *device, u8 port_num,
1614 struct sa_path_rec *rec,
1615 ib_sa_comp_mask comp_mask,
1616 int timeout_ms, gfp_t gfp_mask,
1617 void (*callback)(int status,
1618 struct sa_path_rec *resp,
1619 void *context),
1620 void *context,
1621 struct ib_sa_query **sa_query)
1622{
1623 struct ib_sa_path_query *query;
1624 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1625 struct ib_sa_port *port;
1626 struct ib_mad_agent *agent;
1627 struct ib_sa_mad *mad;
1628 enum opa_pr_supported status;
1629 int ret;
1630
1631 if (!sa_dev)
1632 return -ENODEV;
1633
1634 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) &&
1635 (rec->rec_type != SA_PATH_REC_TYPE_OPA))
1636 return -EINVAL;
1637
1638 port = &sa_dev->port[port_num - sa_dev->start_port];
1639 agent = port->agent;
1640
1641 query = kzalloc(sizeof(*query), gfp_mask);
1642 if (!query)
1643 return -ENOMEM;
1644
1645 query->sa_query.port = port;
1646 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
1647 status = opa_pr_query_possible(client, device, port_num, rec);
1648 if (status == PR_NOT_SUPPORTED) {
1649 ret = -EINVAL;
1650 goto err1;
1651 } else if (status == PR_OPA_SUPPORTED) {
1652 query->sa_query.flags |= IB_SA_QUERY_OPA;
1653 } else {
1654 query->conv_pr =
1655 kmalloc(sizeof(*query->conv_pr), gfp_mask);
1656 if (!query->conv_pr) {
1657 ret = -ENOMEM;
1658 goto err1;
1659 }
1660 }
1661 }
1662
1663 ret = alloc_mad(&query->sa_query, gfp_mask);
1664 if (ret)
1665 goto err2;
1666
1667 ib_sa_client_get(client);
1668 query->sa_query.client = client;
1669 query->callback = callback;
1670 query->context = context;
1671
1672 mad = query->sa_query.mad_buf->mad;
1673 init_mad(&query->sa_query, agent);
1674
1675 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1676 query->sa_query.release = ib_sa_path_rec_release;
1677 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1678 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1679 mad->sa_hdr.comp_mask = comp_mask;
1680
1681 if (query->sa_query.flags & IB_SA_QUERY_OPA) {
1682 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1683 rec, mad->data);
1684 } else if (query->conv_pr) {
1685 sa_convert_path_opa_to_ib(query->conv_pr, rec);
1686 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1687 query->conv_pr, mad->data);
1688 } else {
1689 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1690 rec, mad->data);
1691 }
1692
1693 *sa_query = &query->sa_query;
1694
1695 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1696 query->sa_query.mad_buf->context[1] = (query->conv_pr) ?
1697 query->conv_pr : rec;
1698
1699 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1700 if (ret < 0)
1701 goto err3;
1702
1703 return ret;
1704
1705err3:
1706 *sa_query = NULL;
1707 ib_sa_client_put(query->sa_query.client);
1708 free_mad(&query->sa_query);
1709err2:
1710 kfree(query->conv_pr);
1711err1:
1712 kfree(query);
1713 return ret;
1714}
1715EXPORT_SYMBOL(ib_sa_path_rec_get);
1716
1717static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1718 int status,
1719 struct ib_sa_mad *mad)
1720{
1721 struct ib_sa_service_query *query =
1722 container_of(sa_query, struct ib_sa_service_query, sa_query);
1723
1724 if (mad) {
1725 struct ib_sa_service_rec rec;
1726
1727 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1728 mad->data, &rec);
1729 query->callback(status, &rec, query->context);
1730 } else
1731 query->callback(status, NULL, query->context);
1732}
1733
1734static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1735{
1736 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1737}
1738
1739/**
1740 * ib_sa_service_rec_query - Start Service Record operation
1741 * @client:SA client
1742 * @device:device to send request on
1743 * @port_num: port number to send request on
1744 * @method:SA method - should be get, set, or delete
1745 * @rec:Service Record to send in request
1746 * @comp_mask:component mask to send in request
1747 * @timeout_ms:time to wait for response
1748 * @gfp_mask:GFP mask to use for internal allocations
1749 * @callback:function called when request completes, times out or is
1750 * canceled
1751 * @context:opaque user context passed to callback
1752 * @sa_query:request context, used to cancel request
1753 *
1754 * Send a Service Record set/get/delete to the SA to register,
1755 * unregister or query a service record.
1756 * The callback function will be called when the request completes (or
1757 * fails); status is 0 for a successful response, -EINTR if the query
1758 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1759 * occurred sending the query. The resp parameter of the callback is
1760 * only valid if status is 0.
1761 *
1762 * If the return value of ib_sa_service_rec_query() is negative, it is an
1763 * error code. Otherwise it is a request ID that can be used to cancel
1764 * the query.
1765 */
1766int ib_sa_service_rec_query(struct ib_sa_client *client,
1767 struct ib_device *device, u8 port_num, u8 method,
1768 struct ib_sa_service_rec *rec,
1769 ib_sa_comp_mask comp_mask,
1770 int timeout_ms, gfp_t gfp_mask,
1771 void (*callback)(int status,
1772 struct ib_sa_service_rec *resp,
1773 void *context),
1774 void *context,
1775 struct ib_sa_query **sa_query)
1776{
1777 struct ib_sa_service_query *query;
1778 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1779 struct ib_sa_port *port;
1780 struct ib_mad_agent *agent;
1781 struct ib_sa_mad *mad;
1782 int ret;
1783
1784 if (!sa_dev)
1785 return -ENODEV;
1786
1787 port = &sa_dev->port[port_num - sa_dev->start_port];
1788 agent = port->agent;
1789
1790 if (method != IB_MGMT_METHOD_GET &&
1791 method != IB_MGMT_METHOD_SET &&
1792 method != IB_SA_METHOD_DELETE)
1793 return -EINVAL;
1794
1795 query = kzalloc(sizeof(*query), gfp_mask);
1796 if (!query)
1797 return -ENOMEM;
1798
1799 query->sa_query.port = port;
1800 ret = alloc_mad(&query->sa_query, gfp_mask);
1801 if (ret)
1802 goto err1;
1803
1804 ib_sa_client_get(client);
1805 query->sa_query.client = client;
1806 query->callback = callback;
1807 query->context = context;
1808
1809 mad = query->sa_query.mad_buf->mad;
1810 init_mad(&query->sa_query, agent);
1811
1812 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1813 query->sa_query.release = ib_sa_service_rec_release;
1814 mad->mad_hdr.method = method;
1815 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1816 mad->sa_hdr.comp_mask = comp_mask;
1817
1818 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1819 rec, mad->data);
1820
1821 *sa_query = &query->sa_query;
1822
1823 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1824 if (ret < 0)
1825 goto err2;
1826
1827 return ret;
1828
1829err2:
1830 *sa_query = NULL;
1831 ib_sa_client_put(query->sa_query.client);
1832 free_mad(&query->sa_query);
1833
1834err1:
1835 kfree(query);
1836 return ret;
1837}
1838EXPORT_SYMBOL(ib_sa_service_rec_query);
1839
1840static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1841 int status,
1842 struct ib_sa_mad *mad)
1843{
1844 struct ib_sa_mcmember_query *query =
1845 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1846
1847 if (mad) {
1848 struct ib_sa_mcmember_rec rec;
1849
1850 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1851 mad->data, &rec);
1852 query->callback(status, &rec, query->context);
1853 } else
1854 query->callback(status, NULL, query->context);
1855}
1856
1857static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1858{
1859 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1860}
1861
1862int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1863 struct ib_device *device, u8 port_num,
1864 u8 method,
1865 struct ib_sa_mcmember_rec *rec,
1866 ib_sa_comp_mask comp_mask,
1867 int timeout_ms, gfp_t gfp_mask,
1868 void (*callback)(int status,
1869 struct ib_sa_mcmember_rec *resp,
1870 void *context),
1871 void *context,
1872 struct ib_sa_query **sa_query)
1873{
1874 struct ib_sa_mcmember_query *query;
1875 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1876 struct ib_sa_port *port;
1877 struct ib_mad_agent *agent;
1878 struct ib_sa_mad *mad;
1879 int ret;
1880
1881 if (!sa_dev)
1882 return -ENODEV;
1883
1884 port = &sa_dev->port[port_num - sa_dev->start_port];
1885 agent = port->agent;
1886
1887 query = kzalloc(sizeof(*query), gfp_mask);
1888 if (!query)
1889 return -ENOMEM;
1890
1891 query->sa_query.port = port;
1892 ret = alloc_mad(&query->sa_query, gfp_mask);
1893 if (ret)
1894 goto err1;
1895
1896 ib_sa_client_get(client);
1897 query->sa_query.client = client;
1898 query->callback = callback;
1899 query->context = context;
1900
1901 mad = query->sa_query.mad_buf->mad;
1902 init_mad(&query->sa_query, agent);
1903
1904 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1905 query->sa_query.release = ib_sa_mcmember_rec_release;
1906 mad->mad_hdr.method = method;
1907 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1908 mad->sa_hdr.comp_mask = comp_mask;
1909
1910 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1911 rec, mad->data);
1912
1913 *sa_query = &query->sa_query;
1914
1915 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1916 if (ret < 0)
1917 goto err2;
1918
1919 return ret;
1920
1921err2:
1922 *sa_query = NULL;
1923 ib_sa_client_put(query->sa_query.client);
1924 free_mad(&query->sa_query);
1925
1926err1:
1927 kfree(query);
1928 return ret;
1929}
1930
1931/* Support GuidInfoRecord */
1932static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1933 int status,
1934 struct ib_sa_mad *mad)
1935{
1936 struct ib_sa_guidinfo_query *query =
1937 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1938
1939 if (mad) {
1940 struct ib_sa_guidinfo_rec rec;
1941
1942 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1943 mad->data, &rec);
1944 query->callback(status, &rec, query->context);
1945 } else
1946 query->callback(status, NULL, query->context);
1947}
1948
1949static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1950{
1951 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1952}
1953
1954int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1955 struct ib_device *device, u8 port_num,
1956 struct ib_sa_guidinfo_rec *rec,
1957 ib_sa_comp_mask comp_mask, u8 method,
1958 int timeout_ms, gfp_t gfp_mask,
1959 void (*callback)(int status,
1960 struct ib_sa_guidinfo_rec *resp,
1961 void *context),
1962 void *context,
1963 struct ib_sa_query **sa_query)
1964{
1965 struct ib_sa_guidinfo_query *query;
1966 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1967 struct ib_sa_port *port;
1968 struct ib_mad_agent *agent;
1969 struct ib_sa_mad *mad;
1970 int ret;
1971
1972 if (!sa_dev)
1973 return -ENODEV;
1974
1975 if (method != IB_MGMT_METHOD_GET &&
1976 method != IB_MGMT_METHOD_SET &&
1977 method != IB_SA_METHOD_DELETE) {
1978 return -EINVAL;
1979 }
1980
1981 port = &sa_dev->port[port_num - sa_dev->start_port];
1982 agent = port->agent;
1983
1984 query = kzalloc(sizeof(*query), gfp_mask);
1985 if (!query)
1986 return -ENOMEM;
1987
1988 query->sa_query.port = port;
1989 ret = alloc_mad(&query->sa_query, gfp_mask);
1990 if (ret)
1991 goto err1;
1992
1993 ib_sa_client_get(client);
1994 query->sa_query.client = client;
1995 query->callback = callback;
1996 query->context = context;
1997
1998 mad = query->sa_query.mad_buf->mad;
1999 init_mad(&query->sa_query, agent);
2000
2001 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
2002 query->sa_query.release = ib_sa_guidinfo_rec_release;
2003
2004 mad->mad_hdr.method = method;
2005 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
2006 mad->sa_hdr.comp_mask = comp_mask;
2007
2008 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
2009 mad->data);
2010
2011 *sa_query = &query->sa_query;
2012
2013 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
2014 if (ret < 0)
2015 goto err2;
2016
2017 return ret;
2018
2019err2:
2020 *sa_query = NULL;
2021 ib_sa_client_put(query->sa_query.client);
2022 free_mad(&query->sa_query);
2023
2024err1:
2025 kfree(query);
2026 return ret;
2027}
2028EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
2029
2030bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
2031 struct ib_device *device,
2032 u8 port_num)
2033{
2034 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
2035 struct ib_sa_port *port;
2036 bool ret = false;
2037 unsigned long flags;
2038
2039 if (!sa_dev)
2040 return ret;
2041
2042 port = &sa_dev->port[port_num - sa_dev->start_port];
2043
2044 spin_lock_irqsave(&port->classport_lock, flags);
2045 if ((port->classport_info.valid) &&
2046 (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB))
2047 ret = ib_get_cpi_capmask2(&port->classport_info.data.ib)
2048 & IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT;
2049 spin_unlock_irqrestore(&port->classport_lock, flags);
2050 return ret;
2051}
2052EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support);
2053
2054struct ib_classport_info_context {
2055 struct completion done;
2056 struct ib_sa_query *sa_query;
2057};
2058
2059static void ib_classportinfo_cb(void *context)
2060{
2061 struct ib_classport_info_context *cb_ctx = context;
2062
2063 complete(&cb_ctx->done);
2064}
2065
2066static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
2067 int status,
2068 struct ib_sa_mad *mad)
2069{
2070 unsigned long flags;
2071 struct ib_sa_classport_info_query *query =
2072 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
2073 struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
2074
2075 if (mad) {
2076 if (sa_query->flags & IB_SA_QUERY_OPA) {
2077 struct opa_class_port_info rec;
2078
2079 ib_unpack(opa_classport_info_rec_table,
2080 ARRAY_SIZE(opa_classport_info_rec_table),
2081 mad->data, &rec);
2082
2083 spin_lock_irqsave(&sa_query->port->classport_lock,
2084 flags);
2085 if (!status && !info->valid) {
2086 memcpy(&info->data.opa, &rec,
2087 sizeof(info->data.opa));
2088
2089 info->valid = true;
2090 info->data.type = RDMA_CLASS_PORT_INFO_OPA;
2091 }
2092 spin_unlock_irqrestore(&sa_query->port->classport_lock,
2093 flags);
2094
2095 } else {
2096 struct ib_class_port_info rec;
2097
2098 ib_unpack(ib_classport_info_rec_table,
2099 ARRAY_SIZE(ib_classport_info_rec_table),
2100 mad->data, &rec);
2101
2102 spin_lock_irqsave(&sa_query->port->classport_lock,
2103 flags);
2104 if (!status && !info->valid) {
2105 memcpy(&info->data.ib, &rec,
2106 sizeof(info->data.ib));
2107
2108 info->valid = true;
2109 info->data.type = RDMA_CLASS_PORT_INFO_IB;
2110 }
2111 spin_unlock_irqrestore(&sa_query->port->classport_lock,
2112 flags);
2113 }
2114 }
2115 query->callback(query->context);
2116}
2117
2118static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
2119{
2120 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
2121 sa_query));
2122}
2123
2124static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
2125 int timeout_ms,
2126 void (*callback)(void *context),
2127 void *context,
2128 struct ib_sa_query **sa_query)
2129{
2130 struct ib_mad_agent *agent;
2131 struct ib_sa_classport_info_query *query;
2132 struct ib_sa_mad *mad;
2133 gfp_t gfp_mask = GFP_KERNEL;
2134 int ret;
2135
2136 agent = port->agent;
2137
2138 query = kzalloc(sizeof(*query), gfp_mask);
2139 if (!query)
2140 return -ENOMEM;
2141
2142 query->sa_query.port = port;
2143 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
2144 port->port_num) ?
2145 IB_SA_QUERY_OPA : 0;
2146 ret = alloc_mad(&query->sa_query, gfp_mask);
2147 if (ret)
2148 goto err_free;
2149
2150 query->callback = callback;
2151 query->context = context;
2152
2153 mad = query->sa_query.mad_buf->mad;
2154 init_mad(&query->sa_query, agent);
2155
2156 query->sa_query.callback = ib_sa_classport_info_rec_callback;
2157 query->sa_query.release = ib_sa_classport_info_rec_release;
2158 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
2159 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
2160 mad->sa_hdr.comp_mask = 0;
2161 *sa_query = &query->sa_query;
2162
2163 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
2164 if (ret < 0)
2165 goto err_free_mad;
2166
2167 return ret;
2168
2169err_free_mad:
2170 *sa_query = NULL;
2171 free_mad(&query->sa_query);
2172
2173err_free:
2174 kfree(query);
2175 return ret;
2176}
2177
2178static void update_ib_cpi(struct work_struct *work)
2179{
2180 struct ib_sa_port *port =
2181 container_of(work, struct ib_sa_port, ib_cpi_work.work);
2182 struct ib_classport_info_context *cb_context;
2183 unsigned long flags;
2184 int ret;
2185
2186 /* If the classport info is valid, nothing
2187 * to do here.
2188 */
2189 spin_lock_irqsave(&port->classport_lock, flags);
2190 if (port->classport_info.valid) {
2191 spin_unlock_irqrestore(&port->classport_lock, flags);
2192 return;
2193 }
2194 spin_unlock_irqrestore(&port->classport_lock, flags);
2195
2196 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL);
2197 if (!cb_context)
2198 goto err_nomem;
2199
2200 init_completion(&cb_context->done);
2201
2202 ret = ib_sa_classport_info_rec_query(port, 3000,
2203 ib_classportinfo_cb, cb_context,
2204 &cb_context->sa_query);
2205 if (ret < 0)
2206 goto free_cb_err;
2207 wait_for_completion(&cb_context->done);
2208free_cb_err:
2209 kfree(cb_context);
2210 spin_lock_irqsave(&port->classport_lock, flags);
2211
2212 /* If the classport info is still not valid, the query should have
2213 * failed for some reason. Retry issuing the query
2214 */
2215 if (!port->classport_info.valid) {
2216 port->classport_info.retry_cnt++;
2217 if (port->classport_info.retry_cnt <=
2218 IB_SA_CPI_MAX_RETRY_CNT) {
2219 unsigned long delay =
2220 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2221
2222 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
2223 }
2224 }
2225 spin_unlock_irqrestore(&port->classport_lock, flags);
2226
2227err_nomem:
2228 return;
2229}
2230
2231static void send_handler(struct ib_mad_agent *agent,
2232 struct ib_mad_send_wc *mad_send_wc)
2233{
2234 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
2235 unsigned long flags;
2236
2237 if (query->callback)
2238 switch (mad_send_wc->status) {
2239 case IB_WC_SUCCESS:
2240 /* No callback -- already got recv */
2241 break;
2242 case IB_WC_RESP_TIMEOUT_ERR:
2243 query->callback(query, -ETIMEDOUT, NULL);
2244 break;
2245 case IB_WC_WR_FLUSH_ERR:
2246 query->callback(query, -EINTR, NULL);
2247 break;
2248 default:
2249 query->callback(query, -EIO, NULL);
2250 break;
2251 }
2252
2253 spin_lock_irqsave(&idr_lock, flags);
2254 idr_remove(&query_idr, query->id);
2255 spin_unlock_irqrestore(&idr_lock, flags);
2256
2257 free_mad(query);
2258 if (query->client)
2259 ib_sa_client_put(query->client);
2260 query->release(query);
2261}
2262
2263static void recv_handler(struct ib_mad_agent *mad_agent,
2264 struct ib_mad_send_buf *send_buf,
2265 struct ib_mad_recv_wc *mad_recv_wc)
2266{
2267 struct ib_sa_query *query;
2268
2269 if (!send_buf)
2270 return;
2271
2272 query = send_buf->context[0];
2273 if (query->callback) {
2274 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
2275 query->callback(query,
2276 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
2277 -EINVAL : 0,
2278 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
2279 else
2280 query->callback(query, -EIO, NULL);
2281 }
2282
2283 ib_free_recv_mad(mad_recv_wc);
2284}
2285
2286static void update_sm_ah(struct work_struct *work)
2287{
2288 struct ib_sa_port *port =
2289 container_of(work, struct ib_sa_port, update_task);
2290 struct ib_sa_sm_ah *new_ah;
2291 struct ib_port_attr port_attr;
2292 struct rdma_ah_attr ah_attr;
2293
2294 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
2295 pr_warn("Couldn't query port\n");
2296 return;
2297 }
2298
2299 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL);
2300 if (!new_ah)
2301 return;
2302
2303 kref_init(&new_ah->ref);
2304 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
2305
2306 new_ah->pkey_index = 0;
2307 if (ib_find_pkey(port->agent->device, port->port_num,
2308 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
2309 pr_err("Couldn't find index for default PKey\n");
2310
2311 memset(&ah_attr, 0, sizeof(ah_attr));
2312 ah_attr.type = rdma_ah_find_type(port->agent->device,
2313 port->port_num);
2314 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
2315 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
2316 rdma_ah_set_port_num(&ah_attr, port->port_num);
2317 if (port_attr.grh_required) {
2318 if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA) {
2319 rdma_ah_set_make_grd(&ah_attr, true);
2320 } else {
2321 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
2322 rdma_ah_set_subnet_prefix(&ah_attr,
2323 cpu_to_be64(port_attr.subnet_prefix));
2324 rdma_ah_set_interface_id(&ah_attr,
2325 cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
2326 }
2327 }
2328
2329 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr);
2330 if (IS_ERR(new_ah->ah)) {
2331 pr_warn("Couldn't create new SM AH\n");
2332 kfree(new_ah);
2333 return;
2334 }
2335
2336 spin_lock_irq(&port->ah_lock);
2337 if (port->sm_ah)
2338 kref_put(&port->sm_ah->ref, free_sm_ah);
2339 port->sm_ah = new_ah;
2340 spin_unlock_irq(&port->ah_lock);
2341}
2342
2343static void ib_sa_event(struct ib_event_handler *handler,
2344 struct ib_event *event)
2345{
2346 if (event->event == IB_EVENT_PORT_ERR ||
2347 event->event == IB_EVENT_PORT_ACTIVE ||
2348 event->event == IB_EVENT_LID_CHANGE ||
2349 event->event == IB_EVENT_PKEY_CHANGE ||
2350 event->event == IB_EVENT_SM_CHANGE ||
2351 event->event == IB_EVENT_CLIENT_REREGISTER) {
2352 unsigned long flags;
2353 struct ib_sa_device *sa_dev =
2354 container_of(handler, typeof(*sa_dev), event_handler);
2355 u8 port_num = event->element.port_num - sa_dev->start_port;
2356 struct ib_sa_port *port = &sa_dev->port[port_num];
2357
2358 if (!rdma_cap_ib_sa(handler->device, port->port_num))
2359 return;
2360
2361 spin_lock_irqsave(&port->ah_lock, flags);
2362 if (port->sm_ah)
2363 kref_put(&port->sm_ah->ref, free_sm_ah);
2364 port->sm_ah = NULL;
2365 spin_unlock_irqrestore(&port->ah_lock, flags);
2366
2367 if (event->event == IB_EVENT_SM_CHANGE ||
2368 event->event == IB_EVENT_CLIENT_REREGISTER ||
2369 event->event == IB_EVENT_LID_CHANGE ||
2370 event->event == IB_EVENT_PORT_ACTIVE) {
2371 unsigned long delay =
2372 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2373
2374 spin_lock_irqsave(&port->classport_lock, flags);
2375 port->classport_info.valid = false;
2376 port->classport_info.retry_cnt = 0;
2377 spin_unlock_irqrestore(&port->classport_lock, flags);
2378 queue_delayed_work(ib_wq,
2379 &port->ib_cpi_work, delay);
2380 }
2381 queue_work(ib_wq, &sa_dev->port[port_num].update_task);
2382 }
2383}
2384
2385static void ib_sa_add_one(struct ib_device *device)
2386{
2387 struct ib_sa_device *sa_dev;
2388 int s, e, i;
2389 int count = 0;
2390
2391 s = rdma_start_port(device);
2392 e = rdma_end_port(device);
2393
2394 sa_dev = kzalloc(sizeof *sa_dev +
2395 (e - s + 1) * sizeof (struct ib_sa_port),
2396 GFP_KERNEL);
2397 if (!sa_dev)
2398 return;
2399
2400 sa_dev->start_port = s;
2401 sa_dev->end_port = e;
2402
2403 for (i = 0; i <= e - s; ++i) {
2404 spin_lock_init(&sa_dev->port[i].ah_lock);
2405 if (!rdma_cap_ib_sa(device, i + 1))
2406 continue;
2407
2408 sa_dev->port[i].sm_ah = NULL;
2409 sa_dev->port[i].port_num = i + s;
2410
2411 spin_lock_init(&sa_dev->port[i].classport_lock);
2412 sa_dev->port[i].classport_info.valid = false;
2413
2414 sa_dev->port[i].agent =
2415 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
2416 NULL, 0, send_handler,
2417 recv_handler, sa_dev, 0);
2418 if (IS_ERR(sa_dev->port[i].agent))
2419 goto err;
2420
2421 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
2422 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
2423 update_ib_cpi);
2424
2425 count++;
2426 }
2427
2428 if (!count)
2429 goto free;
2430
2431 ib_set_client_data(device, &sa_client, sa_dev);
2432
2433 /*
2434 * We register our event handler after everything is set up,
2435 * and then update our cached info after the event handler is
2436 * registered to avoid any problems if a port changes state
2437 * during our initialization.
2438 */
2439
2440 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
2441 ib_register_event_handler(&sa_dev->event_handler);
2442
2443 for (i = 0; i <= e - s; ++i) {
2444 if (rdma_cap_ib_sa(device, i + 1))
2445 update_sm_ah(&sa_dev->port[i].update_task);
2446 }
2447
2448 return;
2449
2450err:
2451 while (--i >= 0) {
2452 if (rdma_cap_ib_sa(device, i + 1))
2453 ib_unregister_mad_agent(sa_dev->port[i].agent);
2454 }
2455free:
2456 kfree(sa_dev);
2457 return;
2458}
2459
2460static void ib_sa_remove_one(struct ib_device *device, void *client_data)
2461{
2462 struct ib_sa_device *sa_dev = client_data;
2463 int i;
2464
2465 if (!sa_dev)
2466 return;
2467
2468 ib_unregister_event_handler(&sa_dev->event_handler);
2469 flush_workqueue(ib_wq);
2470
2471 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
2472 if (rdma_cap_ib_sa(device, i + 1)) {
2473 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
2474 ib_unregister_mad_agent(sa_dev->port[i].agent);
2475 if (sa_dev->port[i].sm_ah)
2476 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
2477 }
2478
2479 }
2480
2481 kfree(sa_dev);
2482}
2483
2484int ib_sa_init(void)
2485{
2486 int ret;
2487
2488 get_random_bytes(&tid, sizeof tid);
2489
2490 atomic_set(&ib_nl_sa_request_seq, 0);
2491
2492 ret = ib_register_client(&sa_client);
2493 if (ret) {
2494 pr_err("Couldn't register ib_sa client\n");
2495 goto err1;
2496 }
2497
2498 ret = mcast_init();
2499 if (ret) {
2500 pr_err("Couldn't initialize multicast handling\n");
2501 goto err2;
2502 }
2503
2504 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
2505 if (!ib_nl_wq) {
2506 ret = -ENOMEM;
2507 goto err3;
2508 }
2509
2510 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2511
2512 return 0;
2513
2514err3:
2515 mcast_cleanup();
2516err2:
2517 ib_unregister_client(&sa_client);
2518err1:
2519 return ret;
2520}
2521
2522void ib_sa_cleanup(void)
2523{
2524 cancel_delayed_work(&ib_nl_timed_work);
2525 flush_workqueue(ib_nl_wq);
2526 destroy_workqueue(ib_nl_wq);
2527 mcast_cleanup();
2528 ib_unregister_client(&sa_client);
2529 idr_destroy(&query_idr);
2530}