Loading...
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/if_vlan.h>
37#include <linux/errno.h>
38#include <linux/slab.h>
39#include <linux/workqueue.h>
40#include <linux/netdevice.h>
41#include <net/addrconf.h>
42
43#include <rdma/ib_cache.h>
44
45#include "core_priv.h"
46
47struct ib_pkey_cache {
48 int table_len;
49 u16 table[] __counted_by(table_len);
50};
51
52struct ib_update_work {
53 struct work_struct work;
54 struct ib_event event;
55 bool enforce_security;
56};
57
58union ib_gid zgid;
59EXPORT_SYMBOL(zgid);
60
61enum gid_attr_find_mask {
62 GID_ATTR_FIND_MASK_GID = 1UL << 0,
63 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
64 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
65 GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
66};
67
68enum gid_table_entry_state {
69 GID_TABLE_ENTRY_INVALID = 1,
70 GID_TABLE_ENTRY_VALID = 2,
71 /*
72 * Indicates that entry is pending to be removed, there may
73 * be active users of this GID entry.
74 * When last user of the GID entry releases reference to it,
75 * GID entry is detached from the table.
76 */
77 GID_TABLE_ENTRY_PENDING_DEL = 3,
78};
79
80struct roce_gid_ndev_storage {
81 struct rcu_head rcu_head;
82 struct net_device *ndev;
83};
84
85struct ib_gid_table_entry {
86 struct kref kref;
87 struct work_struct del_work;
88 struct ib_gid_attr attr;
89 void *context;
90 /* Store the ndev pointer to release reference later on in
91 * call_rcu context because by that time gid_table_entry
92 * and attr might be already freed. So keep a copy of it.
93 * ndev_storage is freed by rcu callback.
94 */
95 struct roce_gid_ndev_storage *ndev_storage;
96 enum gid_table_entry_state state;
97};
98
99struct ib_gid_table {
100 int sz;
101 /* In RoCE, adding a GID to the table requires:
102 * (a) Find if this GID is already exists.
103 * (b) Find a free space.
104 * (c) Write the new GID
105 *
106 * Delete requires different set of operations:
107 * (a) Find the GID
108 * (b) Delete it.
109 *
110 **/
111 /* Any writer to data_vec must hold this lock and the write side of
112 * rwlock. Readers must hold only rwlock. All writers must be in a
113 * sleepable context.
114 */
115 struct mutex lock;
116 /* rwlock protects data_vec[ix]->state and entry pointer.
117 */
118 rwlock_t rwlock;
119 struct ib_gid_table_entry **data_vec;
120 /* bit field, each bit indicates the index of default GID */
121 u32 default_gid_indices;
122};
123
124static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port)
125{
126 struct ib_event event;
127
128 event.device = ib_dev;
129 event.element.port_num = port;
130 event.event = IB_EVENT_GID_CHANGE;
131
132 ib_dispatch_event_clients(&event);
133}
134
135static const char * const gid_type_str[] = {
136 /* IB/RoCE v1 value is set for IB_GID_TYPE_IB and IB_GID_TYPE_ROCE for
137 * user space compatibility reasons.
138 */
139 [IB_GID_TYPE_IB] = "IB/RoCE v1",
140 [IB_GID_TYPE_ROCE] = "IB/RoCE v1",
141 [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
142};
143
144const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
145{
146 if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
147 return gid_type_str[gid_type];
148
149 return "Invalid GID type";
150}
151EXPORT_SYMBOL(ib_cache_gid_type_str);
152
153/** rdma_is_zero_gid - Check if given GID is zero or not.
154 * @gid: GID to check
155 * Returns true if given GID is zero, returns false otherwise.
156 */
157bool rdma_is_zero_gid(const union ib_gid *gid)
158{
159 return !memcmp(gid, &zgid, sizeof(*gid));
160}
161EXPORT_SYMBOL(rdma_is_zero_gid);
162
163/** is_gid_index_default - Check if a given index belongs to
164 * reserved default GIDs or not.
165 * @table: GID table pointer
166 * @index: Index to check in GID table
167 * Returns true if index is one of the reserved default GID index otherwise
168 * returns false.
169 */
170static bool is_gid_index_default(const struct ib_gid_table *table,
171 unsigned int index)
172{
173 return index < 32 && (BIT(index) & table->default_gid_indices);
174}
175
176int ib_cache_gid_parse_type_str(const char *buf)
177{
178 unsigned int i;
179 size_t len;
180 int err = -EINVAL;
181
182 len = strlen(buf);
183 if (len == 0)
184 return -EINVAL;
185
186 if (buf[len - 1] == '\n')
187 len--;
188
189 for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
190 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
191 len == strlen(gid_type_str[i])) {
192 err = i;
193 break;
194 }
195
196 return err;
197}
198EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
199
200static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port)
201{
202 return device->port_data[port].cache.gid;
203}
204
205static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
206{
207 return !entry;
208}
209
210static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry)
211{
212 return entry && entry->state == GID_TABLE_ENTRY_VALID;
213}
214
215static void schedule_free_gid(struct kref *kref)
216{
217 struct ib_gid_table_entry *entry =
218 container_of(kref, struct ib_gid_table_entry, kref);
219
220 queue_work(ib_wq, &entry->del_work);
221}
222
223static void put_gid_ndev(struct rcu_head *head)
224{
225 struct roce_gid_ndev_storage *storage =
226 container_of(head, struct roce_gid_ndev_storage, rcu_head);
227
228 WARN_ON(!storage->ndev);
229 /* At this point its safe to release netdev reference,
230 * as all callers working on gid_attr->ndev are done
231 * using this netdev.
232 */
233 dev_put(storage->ndev);
234 kfree(storage);
235}
236
237static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
238{
239 struct ib_device *device = entry->attr.device;
240 u32 port_num = entry->attr.port_num;
241 struct ib_gid_table *table = rdma_gid_table(device, port_num);
242
243 dev_dbg(&device->dev, "%s port=%u index=%u gid %pI6\n", __func__,
244 port_num, entry->attr.index, entry->attr.gid.raw);
245
246 write_lock_irq(&table->rwlock);
247
248 /*
249 * The only way to avoid overwriting NULL in table is
250 * by comparing if it is same entry in table or not!
251 * If new entry in table is added by the time we free here,
252 * don't overwrite the table entry.
253 */
254 if (entry == table->data_vec[entry->attr.index])
255 table->data_vec[entry->attr.index] = NULL;
256 /* Now this index is ready to be allocated */
257 write_unlock_irq(&table->rwlock);
258
259 if (entry->ndev_storage)
260 call_rcu(&entry->ndev_storage->rcu_head, put_gid_ndev);
261 kfree(entry);
262}
263
264static void free_gid_entry(struct kref *kref)
265{
266 struct ib_gid_table_entry *entry =
267 container_of(kref, struct ib_gid_table_entry, kref);
268
269 free_gid_entry_locked(entry);
270}
271
272/**
273 * free_gid_work - Release reference to the GID entry
274 * @work: Work structure to refer to GID entry which needs to be
275 * deleted.
276 *
277 * free_gid_work() frees the entry from the HCA's hardware table
278 * if provider supports it. It releases reference to netdevice.
279 */
280static void free_gid_work(struct work_struct *work)
281{
282 struct ib_gid_table_entry *entry =
283 container_of(work, struct ib_gid_table_entry, del_work);
284 struct ib_device *device = entry->attr.device;
285 u32 port_num = entry->attr.port_num;
286 struct ib_gid_table *table = rdma_gid_table(device, port_num);
287
288 mutex_lock(&table->lock);
289 free_gid_entry_locked(entry);
290 mutex_unlock(&table->lock);
291}
292
293static struct ib_gid_table_entry *
294alloc_gid_entry(const struct ib_gid_attr *attr)
295{
296 struct ib_gid_table_entry *entry;
297 struct net_device *ndev;
298
299 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
300 if (!entry)
301 return NULL;
302
303 ndev = rcu_dereference_protected(attr->ndev, 1);
304 if (ndev) {
305 entry->ndev_storage = kzalloc(sizeof(*entry->ndev_storage),
306 GFP_KERNEL);
307 if (!entry->ndev_storage) {
308 kfree(entry);
309 return NULL;
310 }
311 dev_hold(ndev);
312 entry->ndev_storage->ndev = ndev;
313 }
314 kref_init(&entry->kref);
315 memcpy(&entry->attr, attr, sizeof(*attr));
316 INIT_WORK(&entry->del_work, free_gid_work);
317 entry->state = GID_TABLE_ENTRY_INVALID;
318 return entry;
319}
320
321static void store_gid_entry(struct ib_gid_table *table,
322 struct ib_gid_table_entry *entry)
323{
324 entry->state = GID_TABLE_ENTRY_VALID;
325
326 dev_dbg(&entry->attr.device->dev, "%s port=%u index=%u gid %pI6\n",
327 __func__, entry->attr.port_num, entry->attr.index,
328 entry->attr.gid.raw);
329
330 lockdep_assert_held(&table->lock);
331 write_lock_irq(&table->rwlock);
332 table->data_vec[entry->attr.index] = entry;
333 write_unlock_irq(&table->rwlock);
334}
335
336static void get_gid_entry(struct ib_gid_table_entry *entry)
337{
338 kref_get(&entry->kref);
339}
340
341static void put_gid_entry(struct ib_gid_table_entry *entry)
342{
343 kref_put(&entry->kref, schedule_free_gid);
344}
345
346static void put_gid_entry_locked(struct ib_gid_table_entry *entry)
347{
348 kref_put(&entry->kref, free_gid_entry);
349}
350
351static int add_roce_gid(struct ib_gid_table_entry *entry)
352{
353 const struct ib_gid_attr *attr = &entry->attr;
354 int ret;
355
356 if (!attr->ndev) {
357 dev_err(&attr->device->dev, "%s NULL netdev port=%u index=%u\n",
358 __func__, attr->port_num, attr->index);
359 return -EINVAL;
360 }
361 if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
362 ret = attr->device->ops.add_gid(attr, &entry->context);
363 if (ret) {
364 dev_err(&attr->device->dev,
365 "%s GID add failed port=%u index=%u\n",
366 __func__, attr->port_num, attr->index);
367 return ret;
368 }
369 }
370 return 0;
371}
372
373/**
374 * del_gid - Delete GID table entry
375 *
376 * @ib_dev: IB device whose GID entry to be deleted
377 * @port: Port number of the IB device
378 * @table: GID table of the IB device for a port
379 * @ix: GID entry index to delete
380 *
381 */
382static void del_gid(struct ib_device *ib_dev, u32 port,
383 struct ib_gid_table *table, int ix)
384{
385 struct roce_gid_ndev_storage *ndev_storage;
386 struct ib_gid_table_entry *entry;
387
388 lockdep_assert_held(&table->lock);
389
390 dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port,
391 ix, table->data_vec[ix]->attr.gid.raw);
392
393 write_lock_irq(&table->rwlock);
394 entry = table->data_vec[ix];
395 entry->state = GID_TABLE_ENTRY_PENDING_DEL;
396 /*
397 * For non RoCE protocol, GID entry slot is ready to use.
398 */
399 if (!rdma_protocol_roce(ib_dev, port))
400 table->data_vec[ix] = NULL;
401 write_unlock_irq(&table->rwlock);
402
403 if (rdma_cap_roce_gid_table(ib_dev, port))
404 ib_dev->ops.del_gid(&entry->attr, &entry->context);
405
406 ndev_storage = entry->ndev_storage;
407 if (ndev_storage) {
408 entry->ndev_storage = NULL;
409 rcu_assign_pointer(entry->attr.ndev, NULL);
410 call_rcu(&ndev_storage->rcu_head, put_gid_ndev);
411 }
412
413 put_gid_entry_locked(entry);
414}
415
416/**
417 * add_modify_gid - Add or modify GID table entry
418 *
419 * @table: GID table in which GID to be added or modified
420 * @attr: Attributes of the GID
421 *
422 * Returns 0 on success or appropriate error code. It accepts zero
423 * GID addition for non RoCE ports for HCA's who report them as valid
424 * GID. However such zero GIDs are not added to the cache.
425 */
426static int add_modify_gid(struct ib_gid_table *table,
427 const struct ib_gid_attr *attr)
428{
429 struct ib_gid_table_entry *entry;
430 int ret = 0;
431
432 /*
433 * Invalidate any old entry in the table to make it safe to write to
434 * this index.
435 */
436 if (is_gid_entry_valid(table->data_vec[attr->index]))
437 del_gid(attr->device, attr->port_num, table, attr->index);
438
439 /*
440 * Some HCA's report multiple GID entries with only one valid GID, and
441 * leave other unused entries as the zero GID. Convert zero GIDs to
442 * empty table entries instead of storing them.
443 */
444 if (rdma_is_zero_gid(&attr->gid))
445 return 0;
446
447 entry = alloc_gid_entry(attr);
448 if (!entry)
449 return -ENOMEM;
450
451 if (rdma_protocol_roce(attr->device, attr->port_num)) {
452 ret = add_roce_gid(entry);
453 if (ret)
454 goto done;
455 }
456
457 store_gid_entry(table, entry);
458 return 0;
459
460done:
461 put_gid_entry(entry);
462 return ret;
463}
464
465/* rwlock should be read locked, or lock should be held */
466static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
467 const struct ib_gid_attr *val, bool default_gid,
468 unsigned long mask, int *pempty)
469{
470 int i = 0;
471 int found = -1;
472 int empty = pempty ? -1 : 0;
473
474 while (i < table->sz && (found < 0 || empty < 0)) {
475 struct ib_gid_table_entry *data = table->data_vec[i];
476 struct ib_gid_attr *attr;
477 int curr_index = i;
478
479 i++;
480
481 /* find_gid() is used during GID addition where it is expected
482 * to return a free entry slot which is not duplicate.
483 * Free entry slot is requested and returned if pempty is set,
484 * so lookup free slot only if requested.
485 */
486 if (pempty && empty < 0) {
487 if (is_gid_entry_free(data) &&
488 default_gid ==
489 is_gid_index_default(table, curr_index)) {
490 /*
491 * Found an invalid (free) entry; allocate it.
492 * If default GID is requested, then our
493 * found slot must be one of the DEFAULT
494 * reserved slots or we fail.
495 * This ensures that only DEFAULT reserved
496 * slots are used for default property GIDs.
497 */
498 empty = curr_index;
499 }
500 }
501
502 /*
503 * Additionally find_gid() is used to find valid entry during
504 * lookup operation; so ignore the entries which are marked as
505 * pending for removal and the entries which are marked as
506 * invalid.
507 */
508 if (!is_gid_entry_valid(data))
509 continue;
510
511 if (found >= 0)
512 continue;
513
514 attr = &data->attr;
515 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
516 attr->gid_type != val->gid_type)
517 continue;
518
519 if (mask & GID_ATTR_FIND_MASK_GID &&
520 memcmp(gid, &data->attr.gid, sizeof(*gid)))
521 continue;
522
523 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
524 attr->ndev != val->ndev)
525 continue;
526
527 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
528 is_gid_index_default(table, curr_index) != default_gid)
529 continue;
530
531 found = curr_index;
532 }
533
534 if (pempty)
535 *pempty = empty;
536
537 return found;
538}
539
540static void make_default_gid(struct net_device *dev, union ib_gid *gid)
541{
542 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
543 addrconf_ifid_eui48(&gid->raw[8], dev);
544}
545
546static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
547 union ib_gid *gid, struct ib_gid_attr *attr,
548 unsigned long mask, bool default_gid)
549{
550 struct ib_gid_table *table;
551 int ret = 0;
552 int empty;
553 int ix;
554
555 /* Do not allow adding zero GID in support of
556 * IB spec version 1.3 section 4.1.1 point (6) and
557 * section 12.7.10 and section 12.7.20
558 */
559 if (rdma_is_zero_gid(gid))
560 return -EINVAL;
561
562 table = rdma_gid_table(ib_dev, port);
563
564 mutex_lock(&table->lock);
565
566 ix = find_gid(table, gid, attr, default_gid, mask, &empty);
567 if (ix >= 0)
568 goto out_unlock;
569
570 if (empty < 0) {
571 ret = -ENOSPC;
572 goto out_unlock;
573 }
574 attr->device = ib_dev;
575 attr->index = empty;
576 attr->port_num = port;
577 attr->gid = *gid;
578 ret = add_modify_gid(table, attr);
579 if (!ret)
580 dispatch_gid_change_event(ib_dev, port);
581
582out_unlock:
583 mutex_unlock(&table->lock);
584 if (ret)
585 pr_warn("%s: unable to add gid %pI6 error=%d\n",
586 __func__, gid->raw, ret);
587 return ret;
588}
589
590int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
591 union ib_gid *gid, struct ib_gid_attr *attr)
592{
593 unsigned long mask = GID_ATTR_FIND_MASK_GID |
594 GID_ATTR_FIND_MASK_GID_TYPE |
595 GID_ATTR_FIND_MASK_NETDEV;
596
597 return __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
598}
599
600static int
601_ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
602 union ib_gid *gid, struct ib_gid_attr *attr,
603 unsigned long mask, bool default_gid)
604{
605 struct ib_gid_table *table;
606 int ret = 0;
607 int ix;
608
609 table = rdma_gid_table(ib_dev, port);
610
611 mutex_lock(&table->lock);
612
613 ix = find_gid(table, gid, attr, default_gid, mask, NULL);
614 if (ix < 0) {
615 ret = -EINVAL;
616 goto out_unlock;
617 }
618
619 del_gid(ib_dev, port, table, ix);
620 dispatch_gid_change_event(ib_dev, port);
621
622out_unlock:
623 mutex_unlock(&table->lock);
624 if (ret)
625 pr_debug("%s: can't delete gid %pI6 error=%d\n",
626 __func__, gid->raw, ret);
627 return ret;
628}
629
630int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
631 union ib_gid *gid, struct ib_gid_attr *attr)
632{
633 unsigned long mask = GID_ATTR_FIND_MASK_GID |
634 GID_ATTR_FIND_MASK_GID_TYPE |
635 GID_ATTR_FIND_MASK_DEFAULT |
636 GID_ATTR_FIND_MASK_NETDEV;
637
638 return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
639}
640
641int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
642 struct net_device *ndev)
643{
644 struct ib_gid_table *table;
645 int ix;
646 bool deleted = false;
647
648 table = rdma_gid_table(ib_dev, port);
649
650 mutex_lock(&table->lock);
651
652 for (ix = 0; ix < table->sz; ix++) {
653 if (is_gid_entry_valid(table->data_vec[ix]) &&
654 table->data_vec[ix]->attr.ndev == ndev) {
655 del_gid(ib_dev, port, table, ix);
656 deleted = true;
657 }
658 }
659
660 mutex_unlock(&table->lock);
661
662 if (deleted)
663 dispatch_gid_change_event(ib_dev, port);
664
665 return 0;
666}
667
668/**
669 * rdma_find_gid_by_port - Returns the GID entry attributes when it finds
670 * a valid GID entry for given search parameters. It searches for the specified
671 * GID value in the local software cache.
672 * @ib_dev: The device to query.
673 * @gid: The GID value to search for.
674 * @gid_type: The GID type to search for.
675 * @port: The port number of the device where the GID value should be searched.
676 * @ndev: In RoCE, the net device of the device. NULL means ignore.
677 *
678 * Returns sgid attributes if the GID is found with valid reference or
679 * returns ERR_PTR for the error.
680 * The caller must invoke rdma_put_gid_attr() to release the reference.
681 */
682const struct ib_gid_attr *
683rdma_find_gid_by_port(struct ib_device *ib_dev,
684 const union ib_gid *gid,
685 enum ib_gid_type gid_type,
686 u32 port, struct net_device *ndev)
687{
688 int local_index;
689 struct ib_gid_table *table;
690 unsigned long mask = GID_ATTR_FIND_MASK_GID |
691 GID_ATTR_FIND_MASK_GID_TYPE;
692 struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
693 const struct ib_gid_attr *attr;
694 unsigned long flags;
695
696 if (!rdma_is_port_valid(ib_dev, port))
697 return ERR_PTR(-ENOENT);
698
699 table = rdma_gid_table(ib_dev, port);
700
701 if (ndev)
702 mask |= GID_ATTR_FIND_MASK_NETDEV;
703
704 read_lock_irqsave(&table->rwlock, flags);
705 local_index = find_gid(table, gid, &val, false, mask, NULL);
706 if (local_index >= 0) {
707 get_gid_entry(table->data_vec[local_index]);
708 attr = &table->data_vec[local_index]->attr;
709 read_unlock_irqrestore(&table->rwlock, flags);
710 return attr;
711 }
712
713 read_unlock_irqrestore(&table->rwlock, flags);
714 return ERR_PTR(-ENOENT);
715}
716EXPORT_SYMBOL(rdma_find_gid_by_port);
717
718/**
719 * rdma_find_gid_by_filter - Returns the GID table attribute where a
720 * specified GID value occurs
721 * @ib_dev: The device to query.
722 * @gid: The GID value to search for.
723 * @port: The port number of the device where the GID value could be
724 * searched.
725 * @filter: The filter function is executed on any matching GID in the table.
726 * If the filter function returns true, the corresponding index is returned,
727 * otherwise, we continue searching the GID table. It's guaranteed that
728 * while filter is executed, ndev field is valid and the structure won't
729 * change. filter is executed in an atomic context. filter must not be NULL.
730 * @context: Private data to pass into the call-back.
731 *
732 * rdma_find_gid_by_filter() searches for the specified GID value
733 * of which the filter function returns true in the port's GID table.
734 *
735 */
736const struct ib_gid_attr *rdma_find_gid_by_filter(
737 struct ib_device *ib_dev, const union ib_gid *gid, u32 port,
738 bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
739 void *),
740 void *context)
741{
742 const struct ib_gid_attr *res = ERR_PTR(-ENOENT);
743 struct ib_gid_table *table;
744 unsigned long flags;
745 unsigned int i;
746
747 if (!rdma_is_port_valid(ib_dev, port))
748 return ERR_PTR(-EINVAL);
749
750 table = rdma_gid_table(ib_dev, port);
751
752 read_lock_irqsave(&table->rwlock, flags);
753 for (i = 0; i < table->sz; i++) {
754 struct ib_gid_table_entry *entry = table->data_vec[i];
755
756 if (!is_gid_entry_valid(entry))
757 continue;
758
759 if (memcmp(gid, &entry->attr.gid, sizeof(*gid)))
760 continue;
761
762 if (filter(gid, &entry->attr, context)) {
763 get_gid_entry(entry);
764 res = &entry->attr;
765 break;
766 }
767 }
768 read_unlock_irqrestore(&table->rwlock, flags);
769 return res;
770}
771
772static struct ib_gid_table *alloc_gid_table(int sz)
773{
774 struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL);
775
776 if (!table)
777 return NULL;
778
779 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
780 if (!table->data_vec)
781 goto err_free_table;
782
783 mutex_init(&table->lock);
784
785 table->sz = sz;
786 rwlock_init(&table->rwlock);
787 return table;
788
789err_free_table:
790 kfree(table);
791 return NULL;
792}
793
794static void release_gid_table(struct ib_device *device,
795 struct ib_gid_table *table)
796{
797 int i;
798
799 if (!table)
800 return;
801
802 for (i = 0; i < table->sz; i++) {
803 if (is_gid_entry_free(table->data_vec[i]))
804 continue;
805
806 WARN_ONCE(true,
807 "GID entry ref leak for dev %s index %d ref=%u\n",
808 dev_name(&device->dev), i,
809 kref_read(&table->data_vec[i]->kref));
810 }
811
812 mutex_destroy(&table->lock);
813 kfree(table->data_vec);
814 kfree(table);
815}
816
817static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port,
818 struct ib_gid_table *table)
819{
820 int i;
821
822 if (!table)
823 return;
824
825 mutex_lock(&table->lock);
826 for (i = 0; i < table->sz; ++i) {
827 if (is_gid_entry_valid(table->data_vec[i]))
828 del_gid(ib_dev, port, table, i);
829 }
830 mutex_unlock(&table->lock);
831}
832
833void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
834 struct net_device *ndev,
835 unsigned long gid_type_mask,
836 enum ib_cache_gid_default_mode mode)
837{
838 union ib_gid gid = { };
839 struct ib_gid_attr gid_attr;
840 unsigned int gid_type;
841 unsigned long mask;
842
843 mask = GID_ATTR_FIND_MASK_GID_TYPE |
844 GID_ATTR_FIND_MASK_DEFAULT |
845 GID_ATTR_FIND_MASK_NETDEV;
846 memset(&gid_attr, 0, sizeof(gid_attr));
847 gid_attr.ndev = ndev;
848
849 for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
850 if (1UL << gid_type & ~gid_type_mask)
851 continue;
852
853 gid_attr.gid_type = gid_type;
854
855 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
856 make_default_gid(ndev, &gid);
857 __ib_cache_gid_add(ib_dev, port, &gid,
858 &gid_attr, mask, true);
859 } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
860 _ib_cache_gid_del(ib_dev, port, &gid,
861 &gid_attr, mask, true);
862 }
863 }
864}
865
866static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port,
867 struct ib_gid_table *table)
868{
869 unsigned int i;
870 unsigned long roce_gid_type_mask;
871 unsigned int num_default_gids;
872
873 roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
874 num_default_gids = hweight_long(roce_gid_type_mask);
875 /* Reserve starting indices for default GIDs */
876 for (i = 0; i < num_default_gids && i < table->sz; i++)
877 table->default_gid_indices |= BIT(i);
878}
879
880
881static void gid_table_release_one(struct ib_device *ib_dev)
882{
883 u32 p;
884
885 rdma_for_each_port (ib_dev, p) {
886 release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
887 ib_dev->port_data[p].cache.gid = NULL;
888 }
889}
890
891static int _gid_table_setup_one(struct ib_device *ib_dev)
892{
893 struct ib_gid_table *table;
894 u32 rdma_port;
895
896 rdma_for_each_port (ib_dev, rdma_port) {
897 table = alloc_gid_table(
898 ib_dev->port_data[rdma_port].immutable.gid_tbl_len);
899 if (!table)
900 goto rollback_table_setup;
901
902 gid_table_reserve_default(ib_dev, rdma_port, table);
903 ib_dev->port_data[rdma_port].cache.gid = table;
904 }
905 return 0;
906
907rollback_table_setup:
908 gid_table_release_one(ib_dev);
909 return -ENOMEM;
910}
911
912static void gid_table_cleanup_one(struct ib_device *ib_dev)
913{
914 u32 p;
915
916 rdma_for_each_port (ib_dev, p)
917 cleanup_gid_table_port(ib_dev, p,
918 ib_dev->port_data[p].cache.gid);
919}
920
921static int gid_table_setup_one(struct ib_device *ib_dev)
922{
923 int err;
924
925 err = _gid_table_setup_one(ib_dev);
926
927 if (err)
928 return err;
929
930 rdma_roce_rescan_device(ib_dev);
931
932 return err;
933}
934
935/**
936 * rdma_query_gid - Read the GID content from the GID software cache
937 * @device: Device to query the GID
938 * @port_num: Port number of the device
939 * @index: Index of the GID table entry to read
940 * @gid: Pointer to GID where to store the entry's GID
941 *
942 * rdma_query_gid() only reads the GID entry content for requested device,
943 * port and index. It reads for IB, RoCE and iWarp link layers. It doesn't
944 * hold any reference to the GID table entry in the HCA or software cache.
945 *
946 * Returns 0 on success or appropriate error code.
947 *
948 */
949int rdma_query_gid(struct ib_device *device, u32 port_num,
950 int index, union ib_gid *gid)
951{
952 struct ib_gid_table *table;
953 unsigned long flags;
954 int res;
955
956 if (!rdma_is_port_valid(device, port_num))
957 return -EINVAL;
958
959 table = rdma_gid_table(device, port_num);
960 read_lock_irqsave(&table->rwlock, flags);
961
962 if (index < 0 || index >= table->sz) {
963 res = -EINVAL;
964 goto done;
965 }
966
967 if (!is_gid_entry_valid(table->data_vec[index])) {
968 res = -ENOENT;
969 goto done;
970 }
971
972 memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
973 res = 0;
974
975done:
976 read_unlock_irqrestore(&table->rwlock, flags);
977 return res;
978}
979EXPORT_SYMBOL(rdma_query_gid);
980
981/**
982 * rdma_read_gid_hw_context - Read the HW GID context from GID attribute
983 * @attr: Potinter to the GID attribute
984 *
985 * rdma_read_gid_hw_context() reads the drivers GID HW context corresponding
986 * to the SGID attr. Callers are required to already be holding the reference
987 * to an existing GID entry.
988 *
989 * Returns the HW GID context
990 *
991 */
992void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr)
993{
994 return container_of(attr, struct ib_gid_table_entry, attr)->context;
995}
996EXPORT_SYMBOL(rdma_read_gid_hw_context);
997
998/**
999 * rdma_find_gid - Returns SGID attributes if the matching GID is found.
1000 * @device: The device to query.
1001 * @gid: The GID value to search for.
1002 * @gid_type: The GID type to search for.
1003 * @ndev: In RoCE, the net device of the device. NULL means ignore.
1004 *
1005 * rdma_find_gid() searches for the specified GID value in the software cache.
1006 *
1007 * Returns GID attributes if a valid GID is found or returns ERR_PTR for the
1008 * error. The caller must invoke rdma_put_gid_attr() to release the reference.
1009 *
1010 */
1011const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
1012 const union ib_gid *gid,
1013 enum ib_gid_type gid_type,
1014 struct net_device *ndev)
1015{
1016 unsigned long mask = GID_ATTR_FIND_MASK_GID |
1017 GID_ATTR_FIND_MASK_GID_TYPE;
1018 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
1019 u32 p;
1020
1021 if (ndev)
1022 mask |= GID_ATTR_FIND_MASK_NETDEV;
1023
1024 rdma_for_each_port(device, p) {
1025 struct ib_gid_table *table;
1026 unsigned long flags;
1027 int index;
1028
1029 table = device->port_data[p].cache.gid;
1030 read_lock_irqsave(&table->rwlock, flags);
1031 index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
1032 if (index >= 0) {
1033 const struct ib_gid_attr *attr;
1034
1035 get_gid_entry(table->data_vec[index]);
1036 attr = &table->data_vec[index]->attr;
1037 read_unlock_irqrestore(&table->rwlock, flags);
1038 return attr;
1039 }
1040 read_unlock_irqrestore(&table->rwlock, flags);
1041 }
1042
1043 return ERR_PTR(-ENOENT);
1044}
1045EXPORT_SYMBOL(rdma_find_gid);
1046
1047int ib_get_cached_pkey(struct ib_device *device,
1048 u32 port_num,
1049 int index,
1050 u16 *pkey)
1051{
1052 struct ib_pkey_cache *cache;
1053 unsigned long flags;
1054 int ret = 0;
1055
1056 if (!rdma_is_port_valid(device, port_num))
1057 return -EINVAL;
1058
1059 read_lock_irqsave(&device->cache_lock, flags);
1060
1061 cache = device->port_data[port_num].cache.pkey;
1062
1063 if (!cache || index < 0 || index >= cache->table_len)
1064 ret = -EINVAL;
1065 else
1066 *pkey = cache->table[index];
1067
1068 read_unlock_irqrestore(&device->cache_lock, flags);
1069
1070 return ret;
1071}
1072EXPORT_SYMBOL(ib_get_cached_pkey);
1073
1074void ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num,
1075 u64 *sn_pfx)
1076{
1077 unsigned long flags;
1078
1079 read_lock_irqsave(&device->cache_lock, flags);
1080 *sn_pfx = device->port_data[port_num].cache.subnet_prefix;
1081 read_unlock_irqrestore(&device->cache_lock, flags);
1082}
1083EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
1084
1085int ib_find_cached_pkey(struct ib_device *device, u32 port_num,
1086 u16 pkey, u16 *index)
1087{
1088 struct ib_pkey_cache *cache;
1089 unsigned long flags;
1090 int i;
1091 int ret = -ENOENT;
1092 int partial_ix = -1;
1093
1094 if (!rdma_is_port_valid(device, port_num))
1095 return -EINVAL;
1096
1097 read_lock_irqsave(&device->cache_lock, flags);
1098
1099 cache = device->port_data[port_num].cache.pkey;
1100 if (!cache) {
1101 ret = -EINVAL;
1102 goto err;
1103 }
1104
1105 *index = -1;
1106
1107 for (i = 0; i < cache->table_len; ++i)
1108 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
1109 if (cache->table[i] & 0x8000) {
1110 *index = i;
1111 ret = 0;
1112 break;
1113 } else {
1114 partial_ix = i;
1115 }
1116 }
1117
1118 if (ret && partial_ix >= 0) {
1119 *index = partial_ix;
1120 ret = 0;
1121 }
1122
1123err:
1124 read_unlock_irqrestore(&device->cache_lock, flags);
1125
1126 return ret;
1127}
1128EXPORT_SYMBOL(ib_find_cached_pkey);
1129
1130int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num,
1131 u16 pkey, u16 *index)
1132{
1133 struct ib_pkey_cache *cache;
1134 unsigned long flags;
1135 int i;
1136 int ret = -ENOENT;
1137
1138 if (!rdma_is_port_valid(device, port_num))
1139 return -EINVAL;
1140
1141 read_lock_irqsave(&device->cache_lock, flags);
1142
1143 cache = device->port_data[port_num].cache.pkey;
1144 if (!cache) {
1145 ret = -EINVAL;
1146 goto err;
1147 }
1148
1149 *index = -1;
1150
1151 for (i = 0; i < cache->table_len; ++i)
1152 if (cache->table[i] == pkey) {
1153 *index = i;
1154 ret = 0;
1155 break;
1156 }
1157
1158err:
1159 read_unlock_irqrestore(&device->cache_lock, flags);
1160
1161 return ret;
1162}
1163EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1164
1165int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
1166{
1167 unsigned long flags;
1168 int ret = 0;
1169
1170 if (!rdma_is_port_valid(device, port_num))
1171 return -EINVAL;
1172
1173 read_lock_irqsave(&device->cache_lock, flags);
1174 *lmc = device->port_data[port_num].cache.lmc;
1175 read_unlock_irqrestore(&device->cache_lock, flags);
1176
1177 return ret;
1178}
1179EXPORT_SYMBOL(ib_get_cached_lmc);
1180
1181int ib_get_cached_port_state(struct ib_device *device, u32 port_num,
1182 enum ib_port_state *port_state)
1183{
1184 unsigned long flags;
1185 int ret = 0;
1186
1187 if (!rdma_is_port_valid(device, port_num))
1188 return -EINVAL;
1189
1190 read_lock_irqsave(&device->cache_lock, flags);
1191 *port_state = device->port_data[port_num].cache.port_state;
1192 read_unlock_irqrestore(&device->cache_lock, flags);
1193
1194 return ret;
1195}
1196EXPORT_SYMBOL(ib_get_cached_port_state);
1197
1198/**
1199 * rdma_get_gid_attr - Returns GID attributes for a port of a device
1200 * at a requested gid_index, if a valid GID entry exists.
1201 * @device: The device to query.
1202 * @port_num: The port number on the device where the GID value
1203 * is to be queried.
1204 * @index: Index of the GID table entry whose attributes are to
1205 * be queried.
1206 *
1207 * rdma_get_gid_attr() acquires reference count of gid attributes from the
1208 * cached GID table. Caller must invoke rdma_put_gid_attr() to release
1209 * reference to gid attribute regardless of link layer.
1210 *
1211 * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error
1212 * code.
1213 */
1214const struct ib_gid_attr *
1215rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index)
1216{
1217 const struct ib_gid_attr *attr = ERR_PTR(-ENODATA);
1218 struct ib_gid_table *table;
1219 unsigned long flags;
1220
1221 if (!rdma_is_port_valid(device, port_num))
1222 return ERR_PTR(-EINVAL);
1223
1224 table = rdma_gid_table(device, port_num);
1225 if (index < 0 || index >= table->sz)
1226 return ERR_PTR(-EINVAL);
1227
1228 read_lock_irqsave(&table->rwlock, flags);
1229 if (!is_gid_entry_valid(table->data_vec[index]))
1230 goto done;
1231
1232 get_gid_entry(table->data_vec[index]);
1233 attr = &table->data_vec[index]->attr;
1234done:
1235 read_unlock_irqrestore(&table->rwlock, flags);
1236 return attr;
1237}
1238EXPORT_SYMBOL(rdma_get_gid_attr);
1239
1240/**
1241 * rdma_query_gid_table - Reads GID table entries of all the ports of a device up to max_entries.
1242 * @device: The device to query.
1243 * @entries: Entries where GID entries are returned.
1244 * @max_entries: Maximum number of entries that can be returned.
1245 * Entries array must be allocated to hold max_entries number of entries.
1246 *
1247 * Returns number of entries on success or appropriate error code.
1248 */
1249ssize_t rdma_query_gid_table(struct ib_device *device,
1250 struct ib_uverbs_gid_entry *entries,
1251 size_t max_entries)
1252{
1253 const struct ib_gid_attr *gid_attr;
1254 ssize_t num_entries = 0, ret;
1255 struct ib_gid_table *table;
1256 u32 port_num, i;
1257 struct net_device *ndev;
1258 unsigned long flags;
1259
1260 rdma_for_each_port(device, port_num) {
1261 table = rdma_gid_table(device, port_num);
1262 read_lock_irqsave(&table->rwlock, flags);
1263 for (i = 0; i < table->sz; i++) {
1264 if (!is_gid_entry_valid(table->data_vec[i]))
1265 continue;
1266 if (num_entries >= max_entries) {
1267 ret = -EINVAL;
1268 goto err;
1269 }
1270
1271 gid_attr = &table->data_vec[i]->attr;
1272
1273 memcpy(&entries->gid, &gid_attr->gid,
1274 sizeof(gid_attr->gid));
1275 entries->gid_index = gid_attr->index;
1276 entries->port_num = gid_attr->port_num;
1277 entries->gid_type = gid_attr->gid_type;
1278 ndev = rcu_dereference_protected(
1279 gid_attr->ndev,
1280 lockdep_is_held(&table->rwlock));
1281 if (ndev)
1282 entries->netdev_ifindex = ndev->ifindex;
1283
1284 num_entries++;
1285 entries++;
1286 }
1287 read_unlock_irqrestore(&table->rwlock, flags);
1288 }
1289
1290 return num_entries;
1291err:
1292 read_unlock_irqrestore(&table->rwlock, flags);
1293 return ret;
1294}
1295EXPORT_SYMBOL(rdma_query_gid_table);
1296
1297/**
1298 * rdma_put_gid_attr - Release reference to the GID attribute
1299 * @attr: Pointer to the GID attribute whose reference
1300 * needs to be released.
1301 *
1302 * rdma_put_gid_attr() must be used to release reference whose
1303 * reference is acquired using rdma_get_gid_attr() or any APIs
1304 * which returns a pointer to the ib_gid_attr regardless of link layer
1305 * of IB or RoCE.
1306 *
1307 */
1308void rdma_put_gid_attr(const struct ib_gid_attr *attr)
1309{
1310 struct ib_gid_table_entry *entry =
1311 container_of(attr, struct ib_gid_table_entry, attr);
1312
1313 put_gid_entry(entry);
1314}
1315EXPORT_SYMBOL(rdma_put_gid_attr);
1316
1317/**
1318 * rdma_hold_gid_attr - Get reference to existing GID attribute
1319 *
1320 * @attr: Pointer to the GID attribute whose reference
1321 * needs to be taken.
1322 *
1323 * Increase the reference count to a GID attribute to keep it from being
1324 * freed. Callers are required to already be holding a reference to attribute.
1325 *
1326 */
1327void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
1328{
1329 struct ib_gid_table_entry *entry =
1330 container_of(attr, struct ib_gid_table_entry, attr);
1331
1332 get_gid_entry(entry);
1333}
1334EXPORT_SYMBOL(rdma_hold_gid_attr);
1335
1336/**
1337 * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice
1338 * which must be in UP state.
1339 *
1340 * @attr:Pointer to the GID attribute
1341 *
1342 * Returns pointer to netdevice if the netdevice was attached to GID and
1343 * netdevice is in UP state. Caller must hold RCU lock as this API
1344 * reads the netdev flags which can change while netdevice migrates to
1345 * different net namespace. Returns ERR_PTR with error code otherwise.
1346 *
1347 */
1348struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
1349{
1350 struct ib_gid_table_entry *entry =
1351 container_of(attr, struct ib_gid_table_entry, attr);
1352 struct ib_device *device = entry->attr.device;
1353 struct net_device *ndev = ERR_PTR(-EINVAL);
1354 u32 port_num = entry->attr.port_num;
1355 struct ib_gid_table *table;
1356 unsigned long flags;
1357 bool valid;
1358
1359 table = rdma_gid_table(device, port_num);
1360
1361 read_lock_irqsave(&table->rwlock, flags);
1362 valid = is_gid_entry_valid(table->data_vec[attr->index]);
1363 if (valid) {
1364 ndev = rcu_dereference(attr->ndev);
1365 if (!ndev)
1366 ndev = ERR_PTR(-ENODEV);
1367 }
1368 read_unlock_irqrestore(&table->rwlock, flags);
1369 return ndev;
1370}
1371EXPORT_SYMBOL(rdma_read_gid_attr_ndev_rcu);
1372
1373static int get_lower_dev_vlan(struct net_device *lower_dev,
1374 struct netdev_nested_priv *priv)
1375{
1376 u16 *vlan_id = (u16 *)priv->data;
1377
1378 if (is_vlan_dev(lower_dev))
1379 *vlan_id = vlan_dev_vlan_id(lower_dev);
1380
1381 /* We are interested only in first level vlan device, so
1382 * always return 1 to stop iterating over next level devices.
1383 */
1384 return 1;
1385}
1386
1387/**
1388 * rdma_read_gid_l2_fields - Read the vlan ID and source MAC address
1389 * of a GID entry.
1390 *
1391 * @attr: GID attribute pointer whose L2 fields to be read
1392 * @vlan_id: Pointer to vlan id to fill up if the GID entry has
1393 * vlan id. It is optional.
1394 * @smac: Pointer to smac to fill up for a GID entry. It is optional.
1395 *
1396 * rdma_read_gid_l2_fields() returns 0 on success and returns vlan id
1397 * (if gid entry has vlan) and source MAC, or returns error.
1398 */
1399int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
1400 u16 *vlan_id, u8 *smac)
1401{
1402 struct netdev_nested_priv priv = {
1403 .data = (void *)vlan_id,
1404 };
1405 struct net_device *ndev;
1406
1407 rcu_read_lock();
1408 ndev = rcu_dereference(attr->ndev);
1409 if (!ndev) {
1410 rcu_read_unlock();
1411 return -ENODEV;
1412 }
1413 if (smac)
1414 ether_addr_copy(smac, ndev->dev_addr);
1415 if (vlan_id) {
1416 *vlan_id = 0xffff;
1417 if (is_vlan_dev(ndev)) {
1418 *vlan_id = vlan_dev_vlan_id(ndev);
1419 } else {
1420 /* If the netdev is upper device and if it's lower
1421 * device is vlan device, consider vlan id of
1422 * the lower vlan device for this gid entry.
1423 */
1424 netdev_walk_all_lower_dev_rcu(attr->ndev,
1425 get_lower_dev_vlan, &priv);
1426 }
1427 }
1428 rcu_read_unlock();
1429 return 0;
1430}
1431EXPORT_SYMBOL(rdma_read_gid_l2_fields);
1432
1433static int config_non_roce_gid_cache(struct ib_device *device,
1434 u32 port, struct ib_port_attr *tprops)
1435{
1436 struct ib_gid_attr gid_attr = {};
1437 struct ib_gid_table *table;
1438 int ret = 0;
1439 int i;
1440
1441 gid_attr.device = device;
1442 gid_attr.port_num = port;
1443 table = rdma_gid_table(device, port);
1444
1445 mutex_lock(&table->lock);
1446 for (i = 0; i < tprops->gid_tbl_len; ++i) {
1447 if (!device->ops.query_gid)
1448 continue;
1449 ret = device->ops.query_gid(device, port, i, &gid_attr.gid);
1450 if (ret) {
1451 dev_warn(&device->dev,
1452 "query_gid failed (%d) for index %d\n", ret,
1453 i);
1454 goto err;
1455 }
1456
1457 if (rdma_protocol_iwarp(device, port)) {
1458 struct net_device *ndev;
1459
1460 ndev = ib_device_get_netdev(device, port);
1461 if (!ndev)
1462 continue;
1463 RCU_INIT_POINTER(gid_attr.ndev, ndev);
1464 dev_put(ndev);
1465 }
1466
1467 gid_attr.index = i;
1468 tprops->subnet_prefix =
1469 be64_to_cpu(gid_attr.gid.global.subnet_prefix);
1470 add_modify_gid(table, &gid_attr);
1471 }
1472err:
1473 mutex_unlock(&table->lock);
1474 return ret;
1475}
1476
1477static int
1478ib_cache_update(struct ib_device *device, u32 port, bool update_gids,
1479 bool update_pkeys, bool enforce_security)
1480{
1481 struct ib_port_attr *tprops = NULL;
1482 struct ib_pkey_cache *pkey_cache = NULL;
1483 struct ib_pkey_cache *old_pkey_cache = NULL;
1484 int i;
1485 int ret;
1486
1487 if (!rdma_is_port_valid(device, port))
1488 return -EINVAL;
1489
1490 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1491 if (!tprops)
1492 return -ENOMEM;
1493
1494 ret = ib_query_port(device, port, tprops);
1495 if (ret) {
1496 dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
1497 goto err;
1498 }
1499
1500 if (!rdma_protocol_roce(device, port) && update_gids) {
1501 ret = config_non_roce_gid_cache(device, port,
1502 tprops);
1503 if (ret)
1504 goto err;
1505 }
1506
1507 update_pkeys &= !!tprops->pkey_tbl_len;
1508
1509 if (update_pkeys) {
1510 pkey_cache = kmalloc(struct_size(pkey_cache, table,
1511 tprops->pkey_tbl_len),
1512 GFP_KERNEL);
1513 if (!pkey_cache) {
1514 ret = -ENOMEM;
1515 goto err;
1516 }
1517
1518 pkey_cache->table_len = tprops->pkey_tbl_len;
1519
1520 for (i = 0; i < pkey_cache->table_len; ++i) {
1521 ret = ib_query_pkey(device, port, i,
1522 pkey_cache->table + i);
1523 if (ret) {
1524 dev_warn(&device->dev,
1525 "ib_query_pkey failed (%d) for index %d\n",
1526 ret, i);
1527 goto err;
1528 }
1529 }
1530 }
1531
1532 write_lock_irq(&device->cache_lock);
1533
1534 if (update_pkeys) {
1535 old_pkey_cache = device->port_data[port].cache.pkey;
1536 device->port_data[port].cache.pkey = pkey_cache;
1537 }
1538 device->port_data[port].cache.lmc = tprops->lmc;
1539 device->port_data[port].cache.port_state = tprops->state;
1540
1541 device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
1542 write_unlock_irq(&device->cache_lock);
1543
1544 if (enforce_security)
1545 ib_security_cache_change(device,
1546 port,
1547 tprops->subnet_prefix);
1548
1549 kfree(old_pkey_cache);
1550 kfree(tprops);
1551 return 0;
1552
1553err:
1554 kfree(pkey_cache);
1555 kfree(tprops);
1556 return ret;
1557}
1558
1559static void ib_cache_event_task(struct work_struct *_work)
1560{
1561 struct ib_update_work *work =
1562 container_of(_work, struct ib_update_work, work);
1563 int ret;
1564
1565 /* Before distributing the cache update event, first sync
1566 * the cache.
1567 */
1568 ret = ib_cache_update(work->event.device, work->event.element.port_num,
1569 work->event.event == IB_EVENT_GID_CHANGE,
1570 work->event.event == IB_EVENT_PKEY_CHANGE,
1571 work->enforce_security);
1572
1573 /* GID event is notified already for individual GID entries by
1574 * dispatch_gid_change_event(). Hence, notifiy for rest of the
1575 * events.
1576 */
1577 if (!ret && work->event.event != IB_EVENT_GID_CHANGE)
1578 ib_dispatch_event_clients(&work->event);
1579
1580 kfree(work);
1581}
1582
1583static void ib_generic_event_task(struct work_struct *_work)
1584{
1585 struct ib_update_work *work =
1586 container_of(_work, struct ib_update_work, work);
1587
1588 ib_dispatch_event_clients(&work->event);
1589 kfree(work);
1590}
1591
1592static bool is_cache_update_event(const struct ib_event *event)
1593{
1594 return (event->event == IB_EVENT_PORT_ERR ||
1595 event->event == IB_EVENT_PORT_ACTIVE ||
1596 event->event == IB_EVENT_LID_CHANGE ||
1597 event->event == IB_EVENT_PKEY_CHANGE ||
1598 event->event == IB_EVENT_CLIENT_REREGISTER ||
1599 event->event == IB_EVENT_GID_CHANGE);
1600}
1601
1602/**
1603 * ib_dispatch_event - Dispatch an asynchronous event
1604 * @event:Event to dispatch
1605 *
1606 * Low-level drivers must call ib_dispatch_event() to dispatch the
1607 * event to all registered event handlers when an asynchronous event
1608 * occurs.
1609 */
1610void ib_dispatch_event(const struct ib_event *event)
1611{
1612 struct ib_update_work *work;
1613
1614 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1615 if (!work)
1616 return;
1617
1618 if (is_cache_update_event(event))
1619 INIT_WORK(&work->work, ib_cache_event_task);
1620 else
1621 INIT_WORK(&work->work, ib_generic_event_task);
1622
1623 work->event = *event;
1624 if (event->event == IB_EVENT_PKEY_CHANGE ||
1625 event->event == IB_EVENT_GID_CHANGE)
1626 work->enforce_security = true;
1627
1628 queue_work(ib_wq, &work->work);
1629}
1630EXPORT_SYMBOL(ib_dispatch_event);
1631
1632int ib_cache_setup_one(struct ib_device *device)
1633{
1634 u32 p;
1635 int err;
1636
1637 err = gid_table_setup_one(device);
1638 if (err)
1639 return err;
1640
1641 rdma_for_each_port (device, p) {
1642 err = ib_cache_update(device, p, true, true, true);
1643 if (err) {
1644 gid_table_cleanup_one(device);
1645 return err;
1646 }
1647 }
1648
1649 return 0;
1650}
1651
1652void ib_cache_release_one(struct ib_device *device)
1653{
1654 u32 p;
1655
1656 /*
1657 * The release function frees all the cache elements.
1658 * This function should be called as part of freeing
1659 * all the device's resources when the cache could no
1660 * longer be accessed.
1661 */
1662 rdma_for_each_port (device, p)
1663 kfree(device->port_data[p].cache.pkey);
1664
1665 gid_table_release_one(device);
1666}
1667
1668void ib_cache_cleanup_one(struct ib_device *device)
1669{
1670 /* The cleanup function waits for all in-progress workqueue
1671 * elements and cleans up the GID cache. This function should be
1672 * called after the device was removed from the devices list and
1673 * all clients were removed, so the cache exists but is
1674 * non-functional and shouldn't be updated anymore.
1675 */
1676 flush_workqueue(ib_wq);
1677 gid_table_cleanup_one(device);
1678
1679 /*
1680 * Flush the wq second time for any pending GID delete work.
1681 */
1682 flush_workqueue(ib_wq);
1683}
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/module.h>
37#include <linux/errno.h>
38#include <linux/slab.h>
39#include <linux/workqueue.h>
40#include <linux/netdevice.h>
41#include <net/addrconf.h>
42
43#include <rdma/ib_cache.h>
44
45#include "core_priv.h"
46
47struct ib_pkey_cache {
48 int table_len;
49 u16 table[0];
50};
51
52struct ib_update_work {
53 struct work_struct work;
54 struct ib_device *device;
55 u8 port_num;
56};
57
58union ib_gid zgid;
59EXPORT_SYMBOL(zgid);
60
61static const struct ib_gid_attr zattr;
62
63enum gid_attr_find_mask {
64 GID_ATTR_FIND_MASK_GID = 1UL << 0,
65 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
66 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
67 GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
68};
69
70enum gid_table_entry_props {
71 GID_TABLE_ENTRY_INVALID = 1UL << 0,
72 GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
73};
74
75enum gid_table_write_action {
76 GID_TABLE_WRITE_ACTION_ADD,
77 GID_TABLE_WRITE_ACTION_DEL,
78 /* MODIFY only updates the GID table. Currently only used by
79 * ib_cache_update.
80 */
81 GID_TABLE_WRITE_ACTION_MODIFY
82};
83
84struct ib_gid_table_entry {
85 unsigned long props;
86 union ib_gid gid;
87 struct ib_gid_attr attr;
88 void *context;
89};
90
91struct ib_gid_table {
92 int sz;
93 /* In RoCE, adding a GID to the table requires:
94 * (a) Find if this GID is already exists.
95 * (b) Find a free space.
96 * (c) Write the new GID
97 *
98 * Delete requires different set of operations:
99 * (a) Find the GID
100 * (b) Delete it.
101 *
102 * Add/delete should be carried out atomically.
103 * This is done by locking this mutex from multiple
104 * writers. We don't need this lock for IB, as the MAD
105 * layer replaces all entries. All data_vec entries
106 * are locked by this lock.
107 **/
108 struct mutex lock;
109 /* This lock protects the table entries from being
110 * read and written simultaneously.
111 */
112 rwlock_t rwlock;
113 struct ib_gid_table_entry *data_vec;
114};
115
116static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
117{
118 if (rdma_cap_roce_gid_table(ib_dev, port)) {
119 struct ib_event event;
120
121 event.device = ib_dev;
122 event.element.port_num = port;
123 event.event = IB_EVENT_GID_CHANGE;
124
125 ib_dispatch_event(&event);
126 }
127}
128
129static const char * const gid_type_str[] = {
130 [IB_GID_TYPE_IB] = "IB/RoCE v1",
131 [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
132};
133
134const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
135{
136 if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
137 return gid_type_str[gid_type];
138
139 return "Invalid GID type";
140}
141EXPORT_SYMBOL(ib_cache_gid_type_str);
142
143int ib_cache_gid_parse_type_str(const char *buf)
144{
145 unsigned int i;
146 size_t len;
147 int err = -EINVAL;
148
149 len = strlen(buf);
150 if (len == 0)
151 return -EINVAL;
152
153 if (buf[len - 1] == '\n')
154 len--;
155
156 for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
157 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
158 len == strlen(gid_type_str[i])) {
159 err = i;
160 break;
161 }
162
163 return err;
164}
165EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
166
167/* This function expects that rwlock will be write locked in all
168 * scenarios and that lock will be locked in sleep-able (RoCE)
169 * scenarios.
170 */
171static int write_gid(struct ib_device *ib_dev, u8 port,
172 struct ib_gid_table *table, int ix,
173 const union ib_gid *gid,
174 const struct ib_gid_attr *attr,
175 enum gid_table_write_action action,
176 bool default_gid)
177 __releases(&table->rwlock) __acquires(&table->rwlock)
178{
179 int ret = 0;
180 struct net_device *old_net_dev;
181
182 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
183 * sleep-able lock.
184 */
185
186 if (rdma_cap_roce_gid_table(ib_dev, port)) {
187 table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
188 write_unlock_irq(&table->rwlock);
189 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
190 * RoCE providers and thus only updates the cache.
191 */
192 if (action == GID_TABLE_WRITE_ACTION_ADD)
193 ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
194 &table->data_vec[ix].context);
195 else if (action == GID_TABLE_WRITE_ACTION_DEL)
196 ret = ib_dev->del_gid(ib_dev, port, ix,
197 &table->data_vec[ix].context);
198 write_lock_irq(&table->rwlock);
199 }
200
201 old_net_dev = table->data_vec[ix].attr.ndev;
202 if (old_net_dev && old_net_dev != attr->ndev)
203 dev_put(old_net_dev);
204 /* if modify_gid failed, just delete the old gid */
205 if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
206 gid = &zgid;
207 attr = &zattr;
208 table->data_vec[ix].context = NULL;
209 }
210 if (default_gid)
211 table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
212 memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
213 memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
214 if (table->data_vec[ix].attr.ndev &&
215 table->data_vec[ix].attr.ndev != old_net_dev)
216 dev_hold(table->data_vec[ix].attr.ndev);
217
218 table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
219
220 return ret;
221}
222
223static int add_gid(struct ib_device *ib_dev, u8 port,
224 struct ib_gid_table *table, int ix,
225 const union ib_gid *gid,
226 const struct ib_gid_attr *attr,
227 bool default_gid) {
228 return write_gid(ib_dev, port, table, ix, gid, attr,
229 GID_TABLE_WRITE_ACTION_ADD, default_gid);
230}
231
232static int modify_gid(struct ib_device *ib_dev, u8 port,
233 struct ib_gid_table *table, int ix,
234 const union ib_gid *gid,
235 const struct ib_gid_attr *attr,
236 bool default_gid) {
237 return write_gid(ib_dev, port, table, ix, gid, attr,
238 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
239}
240
241static int del_gid(struct ib_device *ib_dev, u8 port,
242 struct ib_gid_table *table, int ix,
243 bool default_gid) {
244 return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
245 GID_TABLE_WRITE_ACTION_DEL, default_gid);
246}
247
248/* rwlock should be read locked */
249static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
250 const struct ib_gid_attr *val, bool default_gid,
251 unsigned long mask, int *pempty)
252{
253 int i = 0;
254 int found = -1;
255 int empty = pempty ? -1 : 0;
256
257 while (i < table->sz && (found < 0 || empty < 0)) {
258 struct ib_gid_table_entry *data = &table->data_vec[i];
259 struct ib_gid_attr *attr = &data->attr;
260 int curr_index = i;
261
262 i++;
263
264 if (data->props & GID_TABLE_ENTRY_INVALID)
265 continue;
266
267 if (empty < 0)
268 if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
269 !memcmp(attr, &zattr, sizeof(*attr)) &&
270 !data->props)
271 empty = curr_index;
272
273 if (found >= 0)
274 continue;
275
276 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
277 attr->gid_type != val->gid_type)
278 continue;
279
280 if (mask & GID_ATTR_FIND_MASK_GID &&
281 memcmp(gid, &data->gid, sizeof(*gid)))
282 continue;
283
284 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
285 attr->ndev != val->ndev)
286 continue;
287
288 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
289 !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
290 default_gid)
291 continue;
292
293 found = curr_index;
294 }
295
296 if (pempty)
297 *pempty = empty;
298
299 return found;
300}
301
302static void make_default_gid(struct net_device *dev, union ib_gid *gid)
303{
304 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
305 addrconf_ifid_eui48(&gid->raw[8], dev);
306}
307
308int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
309 union ib_gid *gid, struct ib_gid_attr *attr)
310{
311 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
312 struct ib_gid_table *table;
313 int ix;
314 int ret = 0;
315 struct net_device *idev;
316 int empty;
317
318 table = ports_table[port - rdma_start_port(ib_dev)];
319
320 if (!memcmp(gid, &zgid, sizeof(*gid)))
321 return -EINVAL;
322
323 if (ib_dev->get_netdev) {
324 idev = ib_dev->get_netdev(ib_dev, port);
325 if (idev && attr->ndev != idev) {
326 union ib_gid default_gid;
327
328 /* Adding default GIDs in not permitted */
329 make_default_gid(idev, &default_gid);
330 if (!memcmp(gid, &default_gid, sizeof(*gid))) {
331 dev_put(idev);
332 return -EPERM;
333 }
334 }
335 if (idev)
336 dev_put(idev);
337 }
338
339 mutex_lock(&table->lock);
340 write_lock_irq(&table->rwlock);
341
342 ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
343 GID_ATTR_FIND_MASK_GID_TYPE |
344 GID_ATTR_FIND_MASK_NETDEV, &empty);
345 if (ix >= 0)
346 goto out_unlock;
347
348 if (empty < 0) {
349 ret = -ENOSPC;
350 goto out_unlock;
351 }
352
353 ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
354 if (!ret)
355 dispatch_gid_change_event(ib_dev, port);
356
357out_unlock:
358 write_unlock_irq(&table->rwlock);
359 mutex_unlock(&table->lock);
360 return ret;
361}
362
363int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
364 union ib_gid *gid, struct ib_gid_attr *attr)
365{
366 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
367 struct ib_gid_table *table;
368 int ix;
369
370 table = ports_table[port - rdma_start_port(ib_dev)];
371
372 mutex_lock(&table->lock);
373 write_lock_irq(&table->rwlock);
374
375 ix = find_gid(table, gid, attr, false,
376 GID_ATTR_FIND_MASK_GID |
377 GID_ATTR_FIND_MASK_GID_TYPE |
378 GID_ATTR_FIND_MASK_NETDEV |
379 GID_ATTR_FIND_MASK_DEFAULT,
380 NULL);
381 if (ix < 0)
382 goto out_unlock;
383
384 if (!del_gid(ib_dev, port, table, ix, false))
385 dispatch_gid_change_event(ib_dev, port);
386
387out_unlock:
388 write_unlock_irq(&table->rwlock);
389 mutex_unlock(&table->lock);
390 return 0;
391}
392
393int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
394 struct net_device *ndev)
395{
396 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
397 struct ib_gid_table *table;
398 int ix;
399 bool deleted = false;
400
401 table = ports_table[port - rdma_start_port(ib_dev)];
402
403 mutex_lock(&table->lock);
404 write_lock_irq(&table->rwlock);
405
406 for (ix = 0; ix < table->sz; ix++)
407 if (table->data_vec[ix].attr.ndev == ndev)
408 if (!del_gid(ib_dev, port, table, ix, false))
409 deleted = true;
410
411 write_unlock_irq(&table->rwlock);
412 mutex_unlock(&table->lock);
413
414 if (deleted)
415 dispatch_gid_change_event(ib_dev, port);
416
417 return 0;
418}
419
420static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
421 union ib_gid *gid, struct ib_gid_attr *attr)
422{
423 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
424 struct ib_gid_table *table;
425
426 table = ports_table[port - rdma_start_port(ib_dev)];
427
428 if (index < 0 || index >= table->sz)
429 return -EINVAL;
430
431 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
432 return -EAGAIN;
433
434 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
435 if (attr) {
436 memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
437 if (attr->ndev)
438 dev_hold(attr->ndev);
439 }
440
441 return 0;
442}
443
444static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
445 const union ib_gid *gid,
446 const struct ib_gid_attr *val,
447 unsigned long mask,
448 u8 *port, u16 *index)
449{
450 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
451 struct ib_gid_table *table;
452 u8 p;
453 int local_index;
454 unsigned long flags;
455
456 for (p = 0; p < ib_dev->phys_port_cnt; p++) {
457 table = ports_table[p];
458 read_lock_irqsave(&table->rwlock, flags);
459 local_index = find_gid(table, gid, val, false, mask, NULL);
460 if (local_index >= 0) {
461 if (index)
462 *index = local_index;
463 if (port)
464 *port = p + rdma_start_port(ib_dev);
465 read_unlock_irqrestore(&table->rwlock, flags);
466 return 0;
467 }
468 read_unlock_irqrestore(&table->rwlock, flags);
469 }
470
471 return -ENOENT;
472}
473
474static int ib_cache_gid_find(struct ib_device *ib_dev,
475 const union ib_gid *gid,
476 enum ib_gid_type gid_type,
477 struct net_device *ndev, u8 *port,
478 u16 *index)
479{
480 unsigned long mask = GID_ATTR_FIND_MASK_GID |
481 GID_ATTR_FIND_MASK_GID_TYPE;
482 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
483
484 if (ndev)
485 mask |= GID_ATTR_FIND_MASK_NETDEV;
486
487 return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
488 mask, port, index);
489}
490
491int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
492 const union ib_gid *gid,
493 enum ib_gid_type gid_type,
494 u8 port, struct net_device *ndev,
495 u16 *index)
496{
497 int local_index;
498 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
499 struct ib_gid_table *table;
500 unsigned long mask = GID_ATTR_FIND_MASK_GID |
501 GID_ATTR_FIND_MASK_GID_TYPE;
502 struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
503 unsigned long flags;
504
505 if (port < rdma_start_port(ib_dev) ||
506 port > rdma_end_port(ib_dev))
507 return -ENOENT;
508
509 table = ports_table[port - rdma_start_port(ib_dev)];
510
511 if (ndev)
512 mask |= GID_ATTR_FIND_MASK_NETDEV;
513
514 read_lock_irqsave(&table->rwlock, flags);
515 local_index = find_gid(table, gid, &val, false, mask, NULL);
516 if (local_index >= 0) {
517 if (index)
518 *index = local_index;
519 read_unlock_irqrestore(&table->rwlock, flags);
520 return 0;
521 }
522
523 read_unlock_irqrestore(&table->rwlock, flags);
524 return -ENOENT;
525}
526EXPORT_SYMBOL(ib_find_cached_gid_by_port);
527
528/**
529 * ib_find_gid_by_filter - Returns the GID table index where a specified
530 * GID value occurs
531 * @device: The device to query.
532 * @gid: The GID value to search for.
533 * @port_num: The port number of the device where the GID value could be
534 * searched.
535 * @filter: The filter function is executed on any matching GID in the table.
536 * If the filter function returns true, the corresponding index is returned,
537 * otherwise, we continue searching the GID table. It's guaranteed that
538 * while filter is executed, ndev field is valid and the structure won't
539 * change. filter is executed in an atomic context. filter must not be NULL.
540 * @index: The index into the cached GID table where the GID was found. This
541 * parameter may be NULL.
542 *
543 * ib_cache_gid_find_by_filter() searches for the specified GID value
544 * of which the filter function returns true in the port's GID table.
545 * This function is only supported on RoCE ports.
546 *
547 */
548static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
549 const union ib_gid *gid,
550 u8 port,
551 bool (*filter)(const union ib_gid *,
552 const struct ib_gid_attr *,
553 void *),
554 void *context,
555 u16 *index)
556{
557 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
558 struct ib_gid_table *table;
559 unsigned int i;
560 unsigned long flags;
561 bool found = false;
562
563 if (!ports_table)
564 return -EOPNOTSUPP;
565
566 if (port < rdma_start_port(ib_dev) ||
567 port > rdma_end_port(ib_dev) ||
568 !rdma_protocol_roce(ib_dev, port))
569 return -EPROTONOSUPPORT;
570
571 table = ports_table[port - rdma_start_port(ib_dev)];
572
573 read_lock_irqsave(&table->rwlock, flags);
574 for (i = 0; i < table->sz; i++) {
575 struct ib_gid_attr attr;
576
577 if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
578 goto next;
579
580 if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
581 goto next;
582
583 memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
584
585 if (filter(gid, &attr, context))
586 found = true;
587
588next:
589 if (found)
590 break;
591 }
592 read_unlock_irqrestore(&table->rwlock, flags);
593
594 if (!found)
595 return -ENOENT;
596
597 if (index)
598 *index = i;
599 return 0;
600}
601
602static struct ib_gid_table *alloc_gid_table(int sz)
603{
604 struct ib_gid_table *table =
605 kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
606
607 if (!table)
608 return NULL;
609
610 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
611 if (!table->data_vec)
612 goto err_free_table;
613
614 mutex_init(&table->lock);
615
616 table->sz = sz;
617 rwlock_init(&table->rwlock);
618
619 return table;
620
621err_free_table:
622 kfree(table);
623 return NULL;
624}
625
626static void release_gid_table(struct ib_gid_table *table)
627{
628 if (table) {
629 kfree(table->data_vec);
630 kfree(table);
631 }
632}
633
634static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
635 struct ib_gid_table *table)
636{
637 int i;
638 bool deleted = false;
639
640 if (!table)
641 return;
642
643 write_lock_irq(&table->rwlock);
644 for (i = 0; i < table->sz; ++i) {
645 if (memcmp(&table->data_vec[i].gid, &zgid,
646 sizeof(table->data_vec[i].gid)))
647 if (!del_gid(ib_dev, port, table, i,
648 table->data_vec[i].props &
649 GID_ATTR_FIND_MASK_DEFAULT))
650 deleted = true;
651 }
652 write_unlock_irq(&table->rwlock);
653
654 if (deleted)
655 dispatch_gid_change_event(ib_dev, port);
656}
657
658void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
659 struct net_device *ndev,
660 unsigned long gid_type_mask,
661 enum ib_cache_gid_default_mode mode)
662{
663 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
664 union ib_gid gid;
665 struct ib_gid_attr gid_attr;
666 struct ib_gid_attr zattr_type = zattr;
667 struct ib_gid_table *table;
668 unsigned int gid_type;
669
670 table = ports_table[port - rdma_start_port(ib_dev)];
671
672 make_default_gid(ndev, &gid);
673 memset(&gid_attr, 0, sizeof(gid_attr));
674 gid_attr.ndev = ndev;
675
676 for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
677 int ix;
678 union ib_gid current_gid;
679 struct ib_gid_attr current_gid_attr = {};
680
681 if (1UL << gid_type & ~gid_type_mask)
682 continue;
683
684 gid_attr.gid_type = gid_type;
685
686 mutex_lock(&table->lock);
687 write_lock_irq(&table->rwlock);
688 ix = find_gid(table, NULL, &gid_attr, true,
689 GID_ATTR_FIND_MASK_GID_TYPE |
690 GID_ATTR_FIND_MASK_DEFAULT,
691 NULL);
692
693 /* Coudn't find default GID location */
694 if (WARN_ON(ix < 0))
695 goto release;
696
697 zattr_type.gid_type = gid_type;
698
699 if (!__ib_cache_gid_get(ib_dev, port, ix,
700 ¤t_gid, ¤t_gid_attr) &&
701 mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
702 !memcmp(&gid, ¤t_gid, sizeof(gid)) &&
703 !memcmp(&gid_attr, ¤t_gid_attr, sizeof(gid_attr)))
704 goto release;
705
706 if (memcmp(¤t_gid, &zgid, sizeof(current_gid)) ||
707 memcmp(¤t_gid_attr, &zattr_type,
708 sizeof(current_gid_attr))) {
709 if (del_gid(ib_dev, port, table, ix, true)) {
710 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
711 ix, gid.raw);
712 goto release;
713 } else {
714 dispatch_gid_change_event(ib_dev, port);
715 }
716 }
717
718 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
719 if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
720 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
721 gid.raw);
722 else
723 dispatch_gid_change_event(ib_dev, port);
724 }
725
726release:
727 if (current_gid_attr.ndev)
728 dev_put(current_gid_attr.ndev);
729 write_unlock_irq(&table->rwlock);
730 mutex_unlock(&table->lock);
731 }
732}
733
734static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
735 struct ib_gid_table *table)
736{
737 unsigned int i;
738 unsigned long roce_gid_type_mask;
739 unsigned int num_default_gids;
740 unsigned int current_gid = 0;
741
742 roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
743 num_default_gids = hweight_long(roce_gid_type_mask);
744 for (i = 0; i < num_default_gids && i < table->sz; i++) {
745 struct ib_gid_table_entry *entry =
746 &table->data_vec[i];
747
748 entry->props |= GID_TABLE_ENTRY_DEFAULT;
749 current_gid = find_next_bit(&roce_gid_type_mask,
750 BITS_PER_LONG,
751 current_gid);
752 entry->attr.gid_type = current_gid++;
753 }
754
755 return 0;
756}
757
758static int _gid_table_setup_one(struct ib_device *ib_dev)
759{
760 u8 port;
761 struct ib_gid_table **table;
762 int err = 0;
763
764 table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
765
766 if (!table) {
767 pr_warn("failed to allocate ib gid cache for %s\n",
768 ib_dev->name);
769 return -ENOMEM;
770 }
771
772 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
773 u8 rdma_port = port + rdma_start_port(ib_dev);
774
775 table[port] =
776 alloc_gid_table(
777 ib_dev->port_immutable[rdma_port].gid_tbl_len);
778 if (!table[port]) {
779 err = -ENOMEM;
780 goto rollback_table_setup;
781 }
782
783 err = gid_table_reserve_default(ib_dev,
784 port + rdma_start_port(ib_dev),
785 table[port]);
786 if (err)
787 goto rollback_table_setup;
788 }
789
790 ib_dev->cache.gid_cache = table;
791 return 0;
792
793rollback_table_setup:
794 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
795 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
796 table[port]);
797 release_gid_table(table[port]);
798 }
799
800 kfree(table);
801 return err;
802}
803
804static void gid_table_release_one(struct ib_device *ib_dev)
805{
806 struct ib_gid_table **table = ib_dev->cache.gid_cache;
807 u8 port;
808
809 if (!table)
810 return;
811
812 for (port = 0; port < ib_dev->phys_port_cnt; port++)
813 release_gid_table(table[port]);
814
815 kfree(table);
816 ib_dev->cache.gid_cache = NULL;
817}
818
819static void gid_table_cleanup_one(struct ib_device *ib_dev)
820{
821 struct ib_gid_table **table = ib_dev->cache.gid_cache;
822 u8 port;
823
824 if (!table)
825 return;
826
827 for (port = 0; port < ib_dev->phys_port_cnt; port++)
828 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
829 table[port]);
830}
831
832static int gid_table_setup_one(struct ib_device *ib_dev)
833{
834 int err;
835
836 err = _gid_table_setup_one(ib_dev);
837
838 if (err)
839 return err;
840
841 err = roce_rescan_device(ib_dev);
842
843 if (err) {
844 gid_table_cleanup_one(ib_dev);
845 gid_table_release_one(ib_dev);
846 }
847
848 return err;
849}
850
851int ib_get_cached_gid(struct ib_device *device,
852 u8 port_num,
853 int index,
854 union ib_gid *gid,
855 struct ib_gid_attr *gid_attr)
856{
857 int res;
858 unsigned long flags;
859 struct ib_gid_table **ports_table = device->cache.gid_cache;
860 struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
861
862 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
863 return -EINVAL;
864
865 read_lock_irqsave(&table->rwlock, flags);
866 res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
867 read_unlock_irqrestore(&table->rwlock, flags);
868
869 return res;
870}
871EXPORT_SYMBOL(ib_get_cached_gid);
872
873int ib_find_cached_gid(struct ib_device *device,
874 const union ib_gid *gid,
875 enum ib_gid_type gid_type,
876 struct net_device *ndev,
877 u8 *port_num,
878 u16 *index)
879{
880 return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
881}
882EXPORT_SYMBOL(ib_find_cached_gid);
883
884int ib_find_gid_by_filter(struct ib_device *device,
885 const union ib_gid *gid,
886 u8 port_num,
887 bool (*filter)(const union ib_gid *gid,
888 const struct ib_gid_attr *,
889 void *),
890 void *context, u16 *index)
891{
892 /* Only RoCE GID table supports filter function */
893 if (!rdma_cap_roce_gid_table(device, port_num) && filter)
894 return -EPROTONOSUPPORT;
895
896 return ib_cache_gid_find_by_filter(device, gid,
897 port_num, filter,
898 context, index);
899}
900EXPORT_SYMBOL(ib_find_gid_by_filter);
901
902int ib_get_cached_pkey(struct ib_device *device,
903 u8 port_num,
904 int index,
905 u16 *pkey)
906{
907 struct ib_pkey_cache *cache;
908 unsigned long flags;
909 int ret = 0;
910
911 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
912 return -EINVAL;
913
914 read_lock_irqsave(&device->cache.lock, flags);
915
916 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
917
918 if (index < 0 || index >= cache->table_len)
919 ret = -EINVAL;
920 else
921 *pkey = cache->table[index];
922
923 read_unlock_irqrestore(&device->cache.lock, flags);
924
925 return ret;
926}
927EXPORT_SYMBOL(ib_get_cached_pkey);
928
929int ib_find_cached_pkey(struct ib_device *device,
930 u8 port_num,
931 u16 pkey,
932 u16 *index)
933{
934 struct ib_pkey_cache *cache;
935 unsigned long flags;
936 int i;
937 int ret = -ENOENT;
938 int partial_ix = -1;
939
940 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
941 return -EINVAL;
942
943 read_lock_irqsave(&device->cache.lock, flags);
944
945 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
946
947 *index = -1;
948
949 for (i = 0; i < cache->table_len; ++i)
950 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
951 if (cache->table[i] & 0x8000) {
952 *index = i;
953 ret = 0;
954 break;
955 } else
956 partial_ix = i;
957 }
958
959 if (ret && partial_ix >= 0) {
960 *index = partial_ix;
961 ret = 0;
962 }
963
964 read_unlock_irqrestore(&device->cache.lock, flags);
965
966 return ret;
967}
968EXPORT_SYMBOL(ib_find_cached_pkey);
969
970int ib_find_exact_cached_pkey(struct ib_device *device,
971 u8 port_num,
972 u16 pkey,
973 u16 *index)
974{
975 struct ib_pkey_cache *cache;
976 unsigned long flags;
977 int i;
978 int ret = -ENOENT;
979
980 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
981 return -EINVAL;
982
983 read_lock_irqsave(&device->cache.lock, flags);
984
985 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
986
987 *index = -1;
988
989 for (i = 0; i < cache->table_len; ++i)
990 if (cache->table[i] == pkey) {
991 *index = i;
992 ret = 0;
993 break;
994 }
995
996 read_unlock_irqrestore(&device->cache.lock, flags);
997
998 return ret;
999}
1000EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1001
1002int ib_get_cached_lmc(struct ib_device *device,
1003 u8 port_num,
1004 u8 *lmc)
1005{
1006 unsigned long flags;
1007 int ret = 0;
1008
1009 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1010 return -EINVAL;
1011
1012 read_lock_irqsave(&device->cache.lock, flags);
1013 *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
1014 read_unlock_irqrestore(&device->cache.lock, flags);
1015
1016 return ret;
1017}
1018EXPORT_SYMBOL(ib_get_cached_lmc);
1019
1020static void ib_cache_update(struct ib_device *device,
1021 u8 port)
1022{
1023 struct ib_port_attr *tprops = NULL;
1024 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
1025 struct ib_gid_cache {
1026 int table_len;
1027 union ib_gid table[0];
1028 } *gid_cache = NULL;
1029 int i;
1030 int ret;
1031 struct ib_gid_table *table;
1032 struct ib_gid_table **ports_table = device->cache.gid_cache;
1033 bool use_roce_gid_table =
1034 rdma_cap_roce_gid_table(device, port);
1035
1036 if (port < rdma_start_port(device) || port > rdma_end_port(device))
1037 return;
1038
1039 table = ports_table[port - rdma_start_port(device)];
1040
1041 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1042 if (!tprops)
1043 return;
1044
1045 ret = ib_query_port(device, port, tprops);
1046 if (ret) {
1047 pr_warn("ib_query_port failed (%d) for %s\n",
1048 ret, device->name);
1049 goto err;
1050 }
1051
1052 pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
1053 sizeof *pkey_cache->table, GFP_KERNEL);
1054 if (!pkey_cache)
1055 goto err;
1056
1057 pkey_cache->table_len = tprops->pkey_tbl_len;
1058
1059 if (!use_roce_gid_table) {
1060 gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
1061 sizeof(*gid_cache->table), GFP_KERNEL);
1062 if (!gid_cache)
1063 goto err;
1064
1065 gid_cache->table_len = tprops->gid_tbl_len;
1066 }
1067
1068 for (i = 0; i < pkey_cache->table_len; ++i) {
1069 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1070 if (ret) {
1071 pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1072 ret, device->name, i);
1073 goto err;
1074 }
1075 }
1076
1077 if (!use_roce_gid_table) {
1078 for (i = 0; i < gid_cache->table_len; ++i) {
1079 ret = ib_query_gid(device, port, i,
1080 gid_cache->table + i, NULL);
1081 if (ret) {
1082 pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1083 ret, device->name, i);
1084 goto err;
1085 }
1086 }
1087 }
1088
1089 write_lock_irq(&device->cache.lock);
1090
1091 old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
1092
1093 device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
1094 if (!use_roce_gid_table) {
1095 write_lock(&table->rwlock);
1096 for (i = 0; i < gid_cache->table_len; i++) {
1097 modify_gid(device, port, table, i, gid_cache->table + i,
1098 &zattr, false);
1099 }
1100 write_unlock(&table->rwlock);
1101 }
1102
1103 device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
1104
1105 write_unlock_irq(&device->cache.lock);
1106
1107 kfree(gid_cache);
1108 kfree(old_pkey_cache);
1109 kfree(tprops);
1110 return;
1111
1112err:
1113 kfree(pkey_cache);
1114 kfree(gid_cache);
1115 kfree(tprops);
1116}
1117
1118static void ib_cache_task(struct work_struct *_work)
1119{
1120 struct ib_update_work *work =
1121 container_of(_work, struct ib_update_work, work);
1122
1123 ib_cache_update(work->device, work->port_num);
1124 kfree(work);
1125}
1126
1127static void ib_cache_event(struct ib_event_handler *handler,
1128 struct ib_event *event)
1129{
1130 struct ib_update_work *work;
1131
1132 if (event->event == IB_EVENT_PORT_ERR ||
1133 event->event == IB_EVENT_PORT_ACTIVE ||
1134 event->event == IB_EVENT_LID_CHANGE ||
1135 event->event == IB_EVENT_PKEY_CHANGE ||
1136 event->event == IB_EVENT_SM_CHANGE ||
1137 event->event == IB_EVENT_CLIENT_REREGISTER ||
1138 event->event == IB_EVENT_GID_CHANGE) {
1139 work = kmalloc(sizeof *work, GFP_ATOMIC);
1140 if (work) {
1141 INIT_WORK(&work->work, ib_cache_task);
1142 work->device = event->device;
1143 work->port_num = event->element.port_num;
1144 queue_work(ib_wq, &work->work);
1145 }
1146 }
1147}
1148
1149int ib_cache_setup_one(struct ib_device *device)
1150{
1151 int p;
1152 int err;
1153
1154 rwlock_init(&device->cache.lock);
1155
1156 device->cache.pkey_cache =
1157 kzalloc(sizeof *device->cache.pkey_cache *
1158 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1159 device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
1160 (rdma_end_port(device) -
1161 rdma_start_port(device) + 1),
1162 GFP_KERNEL);
1163 if (!device->cache.pkey_cache ||
1164 !device->cache.lmc_cache) {
1165 pr_warn("Couldn't allocate cache for %s\n", device->name);
1166 return -ENOMEM;
1167 }
1168
1169 err = gid_table_setup_one(device);
1170 if (err)
1171 /* Allocated memory will be cleaned in the release function */
1172 return err;
1173
1174 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1175 ib_cache_update(device, p + rdma_start_port(device));
1176
1177 INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1178 device, ib_cache_event);
1179 err = ib_register_event_handler(&device->cache.event_handler);
1180 if (err)
1181 goto err;
1182
1183 return 0;
1184
1185err:
1186 gid_table_cleanup_one(device);
1187 return err;
1188}
1189
1190void ib_cache_release_one(struct ib_device *device)
1191{
1192 int p;
1193
1194 /*
1195 * The release function frees all the cache elements.
1196 * This function should be called as part of freeing
1197 * all the device's resources when the cache could no
1198 * longer be accessed.
1199 */
1200 if (device->cache.pkey_cache)
1201 for (p = 0;
1202 p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1203 kfree(device->cache.pkey_cache[p]);
1204
1205 gid_table_release_one(device);
1206 kfree(device->cache.pkey_cache);
1207 kfree(device->cache.lmc_cache);
1208}
1209
1210void ib_cache_cleanup_one(struct ib_device *device)
1211{
1212 /* The cleanup function unregisters the event handler,
1213 * waits for all in-progress workqueue elements and cleans
1214 * up the GID cache. This function should be called after
1215 * the device was removed from the devices list and all
1216 * clients were removed, so the cache exists but is
1217 * non-functional and shouldn't be updated anymore.
1218 */
1219 ib_unregister_event_handler(&device->cache.event_handler);
1220 flush_workqueue(ib_wq);
1221 gid_table_cleanup_one(device);
1222}
1223
1224void __init ib_cache_setup(void)
1225{
1226 roce_gid_mgmt_init();
1227}
1228
1229void __exit ib_cache_cleanup(void)
1230{
1231 roce_gid_mgmt_cleanup();
1232}