Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
   4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/if_vlan.h>
  37#include <linux/errno.h>
  38#include <linux/slab.h>
  39#include <linux/workqueue.h>
  40#include <linux/netdevice.h>
  41#include <net/addrconf.h>
  42
  43#include <rdma/ib_cache.h>
  44
  45#include "core_priv.h"
  46
  47struct ib_pkey_cache {
  48	int             table_len;
  49	u16             table[] __counted_by(table_len);
  50};
  51
  52struct ib_update_work {
  53	struct work_struct work;
  54	struct ib_event event;
  55	bool enforce_security;
  56};
  57
  58union ib_gid zgid;
  59EXPORT_SYMBOL(zgid);
  60
 
 
  61enum gid_attr_find_mask {
  62	GID_ATTR_FIND_MASK_GID          = 1UL << 0,
  63	GID_ATTR_FIND_MASK_NETDEV	= 1UL << 1,
  64	GID_ATTR_FIND_MASK_DEFAULT	= 1UL << 2,
  65	GID_ATTR_FIND_MASK_GID_TYPE	= 1UL << 3,
  66};
  67
  68enum gid_table_entry_state {
  69	GID_TABLE_ENTRY_INVALID		= 1,
  70	GID_TABLE_ENTRY_VALID		= 2,
  71	/*
  72	 * Indicates that entry is pending to be removed, there may
  73	 * be active users of this GID entry.
  74	 * When last user of the GID entry releases reference to it,
  75	 * GID entry is detached from the table.
  76	 */
  77	GID_TABLE_ENTRY_PENDING_DEL	= 3,
  78};
  79
  80struct roce_gid_ndev_storage {
  81	struct rcu_head rcu_head;
  82	struct net_device *ndev;
 
 
 
 
  83};
  84
  85struct ib_gid_table_entry {
  86	struct kref			kref;
  87	struct work_struct		del_work;
  88	struct ib_gid_attr		attr;
  89	void				*context;
  90	/* Store the ndev pointer to release reference later on in
  91	 * call_rcu context because by that time gid_table_entry
  92	 * and attr might be already freed. So keep a copy of it.
  93	 * ndev_storage is freed by rcu callback.
  94	 */
  95	struct roce_gid_ndev_storage	*ndev_storage;
  96	enum gid_table_entry_state	state;
  97};
  98
  99struct ib_gid_table {
 100	int				sz;
 101	/* In RoCE, adding a GID to the table requires:
 102	 * (a) Find if this GID is already exists.
 103	 * (b) Find a free space.
 104	 * (c) Write the new GID
 105	 *
 106	 * Delete requires different set of operations:
 107	 * (a) Find the GID
 108	 * (b) Delete it.
 109	 *
 
 
 
 
 
 110	 **/
 111	/* Any writer to data_vec must hold this lock and the write side of
 112	 * rwlock. Readers must hold only rwlock. All writers must be in a
 113	 * sleepable context.
 114	 */
 115	struct mutex			lock;
 116	/* rwlock protects data_vec[ix]->state and entry pointer.
 117	 */
 118	rwlock_t			rwlock;
 119	struct ib_gid_table_entry	**data_vec;
 120	/* bit field, each bit indicates the index of default GID */
 121	u32				default_gid_indices;
 122};
 123
 124static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port)
 125{
 126	struct ib_event event;
 
 127
 128	event.device		= ib_dev;
 129	event.element.port_num	= port;
 130	event.event		= IB_EVENT_GID_CHANGE;
 131
 132	ib_dispatch_event_clients(&event);
 
 133}
 134
 135static const char * const gid_type_str[] = {
 136	/* IB/RoCE v1 value is set for IB_GID_TYPE_IB and IB_GID_TYPE_ROCE for
 137	 * user space compatibility reasons.
 138	 */
 139	[IB_GID_TYPE_IB]	= "IB/RoCE v1",
 140	[IB_GID_TYPE_ROCE]	= "IB/RoCE v1",
 141	[IB_GID_TYPE_ROCE_UDP_ENCAP]	= "RoCE v2",
 142};
 143
 144const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
 145{
 146	if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
 147		return gid_type_str[gid_type];
 148
 149	return "Invalid GID type";
 150}
 151EXPORT_SYMBOL(ib_cache_gid_type_str);
 152
 153/** rdma_is_zero_gid - Check if given GID is zero or not.
 154 * @gid:	GID to check
 155 * Returns true if given GID is zero, returns false otherwise.
 156 */
 157bool rdma_is_zero_gid(const union ib_gid *gid)
 158{
 159	return !memcmp(gid, &zgid, sizeof(*gid));
 160}
 161EXPORT_SYMBOL(rdma_is_zero_gid);
 162
 163/** is_gid_index_default - Check if a given index belongs to
 164 * reserved default GIDs or not.
 165 * @table:	GID table pointer
 166 * @index:	Index to check in GID table
 167 * Returns true if index is one of the reserved default GID index otherwise
 168 * returns false.
 169 */
 170static bool is_gid_index_default(const struct ib_gid_table *table,
 171				 unsigned int index)
 172{
 173	return index < 32 && (BIT(index) & table->default_gid_indices);
 174}
 175
 176int ib_cache_gid_parse_type_str(const char *buf)
 177{
 178	unsigned int i;
 179	size_t len;
 180	int err = -EINVAL;
 181
 182	len = strlen(buf);
 183	if (len == 0)
 184		return -EINVAL;
 185
 186	if (buf[len - 1] == '\n')
 187		len--;
 188
 189	for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
 190		if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
 191		    len == strlen(gid_type_str[i])) {
 192			err = i;
 193			break;
 194		}
 195
 196	return err;
 197}
 198EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
 199
 200static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port)
 201{
 202	return device->port_data[port].cache.gid;
 203}
 204
 205static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
 206{
 207	return !entry;
 208}
 209
 210static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry)
 211{
 212	return entry && entry->state == GID_TABLE_ENTRY_VALID;
 213}
 214
 215static void schedule_free_gid(struct kref *kref)
 216{
 217	struct ib_gid_table_entry *entry =
 218			container_of(kref, struct ib_gid_table_entry, kref);
 219
 220	queue_work(ib_wq, &entry->del_work);
 221}
 222
 223static void put_gid_ndev(struct rcu_head *head)
 224{
 225	struct roce_gid_ndev_storage *storage =
 226		container_of(head, struct roce_gid_ndev_storage, rcu_head);
 227
 228	WARN_ON(!storage->ndev);
 229	/* At this point its safe to release netdev reference,
 230	 * as all callers working on gid_attr->ndev are done
 231	 * using this netdev.
 232	 */
 233	dev_put(storage->ndev);
 234	kfree(storage);
 235}
 236
 237static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
 238{
 239	struct ib_device *device = entry->attr.device;
 240	u32 port_num = entry->attr.port_num;
 241	struct ib_gid_table *table = rdma_gid_table(device, port_num);
 242
 243	dev_dbg(&device->dev, "%s port=%u index=%u gid %pI6\n", __func__,
 244		port_num, entry->attr.index, entry->attr.gid.raw);
 245
 246	write_lock_irq(&table->rwlock);
 247
 248	/*
 249	 * The only way to avoid overwriting NULL in table is
 250	 * by comparing if it is same entry in table or not!
 251	 * If new entry in table is added by the time we free here,
 252	 * don't overwrite the table entry.
 253	 */
 254	if (entry == table->data_vec[entry->attr.index])
 255		table->data_vec[entry->attr.index] = NULL;
 256	/* Now this index is ready to be allocated */
 257	write_unlock_irq(&table->rwlock);
 258
 259	if (entry->ndev_storage)
 260		call_rcu(&entry->ndev_storage->rcu_head, put_gid_ndev);
 261	kfree(entry);
 262}
 263
 264static void free_gid_entry(struct kref *kref)
 265{
 266	struct ib_gid_table_entry *entry =
 267			container_of(kref, struct ib_gid_table_entry, kref);
 268
 269	free_gid_entry_locked(entry);
 270}
 271
 272/**
 273 * free_gid_work - Release reference to the GID entry
 274 * @work: Work structure to refer to GID entry which needs to be
 275 * deleted.
 276 *
 277 * free_gid_work() frees the entry from the HCA's hardware table
 278 * if provider supports it. It releases reference to netdevice.
 279 */
 280static void free_gid_work(struct work_struct *work)
 281{
 282	struct ib_gid_table_entry *entry =
 283		container_of(work, struct ib_gid_table_entry, del_work);
 284	struct ib_device *device = entry->attr.device;
 285	u32 port_num = entry->attr.port_num;
 286	struct ib_gid_table *table = rdma_gid_table(device, port_num);
 287
 288	mutex_lock(&table->lock);
 289	free_gid_entry_locked(entry);
 290	mutex_unlock(&table->lock);
 291}
 292
 293static struct ib_gid_table_entry *
 294alloc_gid_entry(const struct ib_gid_attr *attr)
 295{
 296	struct ib_gid_table_entry *entry;
 297	struct net_device *ndev;
 298
 299	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 300	if (!entry)
 301		return NULL;
 302
 303	ndev = rcu_dereference_protected(attr->ndev, 1);
 304	if (ndev) {
 305		entry->ndev_storage = kzalloc(sizeof(*entry->ndev_storage),
 306					      GFP_KERNEL);
 307		if (!entry->ndev_storage) {
 308			kfree(entry);
 309			return NULL;
 310		}
 311		dev_hold(ndev);
 312		entry->ndev_storage->ndev = ndev;
 313	}
 314	kref_init(&entry->kref);
 315	memcpy(&entry->attr, attr, sizeof(*attr));
 316	INIT_WORK(&entry->del_work, free_gid_work);
 317	entry->state = GID_TABLE_ENTRY_INVALID;
 318	return entry;
 319}
 320
 321static void store_gid_entry(struct ib_gid_table *table,
 322			    struct ib_gid_table_entry *entry)
 323{
 324	entry->state = GID_TABLE_ENTRY_VALID;
 325
 326	dev_dbg(&entry->attr.device->dev, "%s port=%u index=%u gid %pI6\n",
 327		__func__, entry->attr.port_num, entry->attr.index,
 328		entry->attr.gid.raw);
 329
 330	lockdep_assert_held(&table->lock);
 331	write_lock_irq(&table->rwlock);
 332	table->data_vec[entry->attr.index] = entry;
 333	write_unlock_irq(&table->rwlock);
 334}
 335
 336static void get_gid_entry(struct ib_gid_table_entry *entry)
 337{
 338	kref_get(&entry->kref);
 339}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 340
 341static void put_gid_entry(struct ib_gid_table_entry *entry)
 342{
 343	kref_put(&entry->kref, schedule_free_gid);
 344}
 345
 346static void put_gid_entry_locked(struct ib_gid_table_entry *entry)
 347{
 348	kref_put(&entry->kref, free_gid_entry);
 349}
 350
 351static int add_roce_gid(struct ib_gid_table_entry *entry)
 352{
 353	const struct ib_gid_attr *attr = &entry->attr;
 354	int ret;
 355
 356	if (!attr->ndev) {
 357		dev_err(&attr->device->dev, "%s NULL netdev port=%u index=%u\n",
 358			__func__, attr->port_num, attr->index);
 359		return -EINVAL;
 360	}
 361	if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
 362		ret = attr->device->ops.add_gid(attr, &entry->context);
 363		if (ret) {
 364			dev_err(&attr->device->dev,
 365				"%s GID add failed port=%u index=%u\n",
 366				__func__, attr->port_num, attr->index);
 367			return ret;
 368		}
 369	}
 370	return 0;
 371}
 372
 373/**
 374 * del_gid - Delete GID table entry
 375 *
 376 * @ib_dev:	IB device whose GID entry to be deleted
 377 * @port:	Port number of the IB device
 378 * @table:	GID table of the IB device for a port
 379 * @ix:		GID entry index to delete
 380 *
 381 */
 382static void del_gid(struct ib_device *ib_dev, u32 port,
 383		    struct ib_gid_table *table, int ix)
 384{
 385	struct roce_gid_ndev_storage *ndev_storage;
 386	struct ib_gid_table_entry *entry;
 387
 388	lockdep_assert_held(&table->lock);
 389
 390	dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port,
 391		ix, table->data_vec[ix]->attr.gid.raw);
 392
 393	write_lock_irq(&table->rwlock);
 394	entry = table->data_vec[ix];
 395	entry->state = GID_TABLE_ENTRY_PENDING_DEL;
 396	/*
 397	 * For non RoCE protocol, GID entry slot is ready to use.
 398	 */
 399	if (!rdma_protocol_roce(ib_dev, port))
 400		table->data_vec[ix] = NULL;
 401	write_unlock_irq(&table->rwlock);
 402
 403	if (rdma_cap_roce_gid_table(ib_dev, port))
 404		ib_dev->ops.del_gid(&entry->attr, &entry->context);
 405
 406	ndev_storage = entry->ndev_storage;
 407	if (ndev_storage) {
 408		entry->ndev_storage = NULL;
 409		rcu_assign_pointer(entry->attr.ndev, NULL);
 410		call_rcu(&ndev_storage->rcu_head, put_gid_ndev);
 411	}
 412
 413	put_gid_entry_locked(entry);
 414}
 415
 416/**
 417 * add_modify_gid - Add or modify GID table entry
 418 *
 419 * @table:	GID table in which GID to be added or modified
 420 * @attr:	Attributes of the GID
 421 *
 422 * Returns 0 on success or appropriate error code. It accepts zero
 423 * GID addition for non RoCE ports for HCA's who report them as valid
 424 * GID. However such zero GIDs are not added to the cache.
 425 */
 426static int add_modify_gid(struct ib_gid_table *table,
 427			  const struct ib_gid_attr *attr)
 428{
 429	struct ib_gid_table_entry *entry;
 430	int ret = 0;
 431
 432	/*
 433	 * Invalidate any old entry in the table to make it safe to write to
 434	 * this index.
 435	 */
 436	if (is_gid_entry_valid(table->data_vec[attr->index]))
 437		del_gid(attr->device, attr->port_num, table, attr->index);
 438
 439	/*
 440	 * Some HCA's report multiple GID entries with only one valid GID, and
 441	 * leave other unused entries as the zero GID. Convert zero GIDs to
 442	 * empty table entries instead of storing them.
 443	 */
 444	if (rdma_is_zero_gid(&attr->gid))
 445		return 0;
 446
 447	entry = alloc_gid_entry(attr);
 448	if (!entry)
 449		return -ENOMEM;
 450
 451	if (rdma_protocol_roce(attr->device, attr->port_num)) {
 452		ret = add_roce_gid(entry);
 453		if (ret)
 454			goto done;
 455	}
 456
 457	store_gid_entry(table, entry);
 458	return 0;
 459
 460done:
 461	put_gid_entry(entry);
 462	return ret;
 463}
 464
 465/* rwlock should be read locked, or lock should be held */
 466static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
 467		    const struct ib_gid_attr *val, bool default_gid,
 468		    unsigned long mask, int *pempty)
 469{
 470	int i = 0;
 471	int found = -1;
 472	int empty = pempty ? -1 : 0;
 473
 474	while (i < table->sz && (found < 0 || empty < 0)) {
 475		struct ib_gid_table_entry *data = table->data_vec[i];
 476		struct ib_gid_attr *attr;
 477		int curr_index = i;
 478
 479		i++;
 480
 481		/* find_gid() is used during GID addition where it is expected
 482		 * to return a free entry slot which is not duplicate.
 483		 * Free entry slot is requested and returned if pempty is set,
 484		 * so lookup free slot only if requested.
 485		 */
 486		if (pempty && empty < 0) {
 487			if (is_gid_entry_free(data) &&
 488			    default_gid ==
 489				is_gid_index_default(table, curr_index)) {
 490				/*
 491				 * Found an invalid (free) entry; allocate it.
 492				 * If default GID is requested, then our
 493				 * found slot must be one of the DEFAULT
 494				 * reserved slots or we fail.
 495				 * This ensures that only DEFAULT reserved
 496				 * slots are used for default property GIDs.
 497				 */
 498				empty = curr_index;
 499			}
 500		}
 501
 502		/*
 503		 * Additionally find_gid() is used to find valid entry during
 504		 * lookup operation; so ignore the entries which are marked as
 505		 * pending for removal and the entries which are marked as
 506		 * invalid.
 507		 */
 508		if (!is_gid_entry_valid(data))
 509			continue;
 510
 
 
 
 
 
 
 511		if (found >= 0)
 512			continue;
 513
 514		attr = &data->attr;
 515		if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
 516		    attr->gid_type != val->gid_type)
 517			continue;
 518
 519		if (mask & GID_ATTR_FIND_MASK_GID &&
 520		    memcmp(gid, &data->attr.gid, sizeof(*gid)))
 521			continue;
 522
 523		if (mask & GID_ATTR_FIND_MASK_NETDEV &&
 524		    attr->ndev != val->ndev)
 525			continue;
 526
 527		if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
 528		    is_gid_index_default(table, curr_index) != default_gid)
 
 529			continue;
 530
 531		found = curr_index;
 532	}
 533
 534	if (pempty)
 535		*pempty = empty;
 536
 537	return found;
 538}
 539
 540static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
 541{
 542	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
 543	addrconf_ifid_eui48(&gid->raw[8], dev);
 544}
 545
 546static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
 547			      union ib_gid *gid, struct ib_gid_attr *attr,
 548			      unsigned long mask, bool default_gid)
 549{
 
 550	struct ib_gid_table *table;
 
 551	int ret = 0;
 
 552	int empty;
 553	int ix;
 554
 555	/* Do not allow adding zero GID in support of
 556	 * IB spec version 1.3 section 4.1.1 point (6) and
 557	 * section 12.7.10 and section 12.7.20
 558	 */
 559	if (rdma_is_zero_gid(gid))
 560		return -EINVAL;
 561
 562	table = rdma_gid_table(ib_dev, port);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 563
 564	mutex_lock(&table->lock);
 
 565
 566	ix = find_gid(table, gid, attr, default_gid, mask, &empty);
 
 
 567	if (ix >= 0)
 568		goto out_unlock;
 569
 570	if (empty < 0) {
 571		ret = -ENOSPC;
 572		goto out_unlock;
 573	}
 574	attr->device = ib_dev;
 575	attr->index = empty;
 576	attr->port_num = port;
 577	attr->gid = *gid;
 578	ret = add_modify_gid(table, attr);
 579	if (!ret)
 580		dispatch_gid_change_event(ib_dev, port);
 581
 582out_unlock:
 
 583	mutex_unlock(&table->lock);
 584	if (ret)
 585		pr_warn("%s: unable to add gid %pI6 error=%d\n",
 586			__func__, gid->raw, ret);
 587	return ret;
 588}
 589
 590int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
 591		     union ib_gid *gid, struct ib_gid_attr *attr)
 592{
 593	unsigned long mask = GID_ATTR_FIND_MASK_GID |
 594			     GID_ATTR_FIND_MASK_GID_TYPE |
 595			     GID_ATTR_FIND_MASK_NETDEV;
 596
 597	return __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
 598}
 599
 600static int
 601_ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
 602		  union ib_gid *gid, struct ib_gid_attr *attr,
 603		  unsigned long mask, bool default_gid)
 604{
 605	struct ib_gid_table *table;
 606	int ret = 0;
 607	int ix;
 608
 609	table = rdma_gid_table(ib_dev, port);
 610
 611	mutex_lock(&table->lock);
 
 612
 613	ix = find_gid(table, gid, attr, default_gid, mask, NULL);
 614	if (ix < 0) {
 615		ret = -EINVAL;
 
 
 
 
 616		goto out_unlock;
 617	}
 618
 619	del_gid(ib_dev, port, table, ix);
 620	dispatch_gid_change_event(ib_dev, port);
 621
 622out_unlock:
 
 623	mutex_unlock(&table->lock);
 624	if (ret)
 625		pr_debug("%s: can't delete gid %pI6 error=%d\n",
 626			 __func__, gid->raw, ret);
 627	return ret;
 628}
 629
 630int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
 631		     union ib_gid *gid, struct ib_gid_attr *attr)
 632{
 633	unsigned long mask = GID_ATTR_FIND_MASK_GID	  |
 634			     GID_ATTR_FIND_MASK_GID_TYPE |
 635			     GID_ATTR_FIND_MASK_DEFAULT  |
 636			     GID_ATTR_FIND_MASK_NETDEV;
 637
 638	return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
 639}
 640
 641int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
 642				     struct net_device *ndev)
 643{
 
 644	struct ib_gid_table *table;
 645	int ix;
 646	bool deleted = false;
 647
 648	table = rdma_gid_table(ib_dev, port);
 649
 650	mutex_lock(&table->lock);
 
 651
 652	for (ix = 0; ix < table->sz; ix++) {
 653		if (is_gid_entry_valid(table->data_vec[ix]) &&
 654		    table->data_vec[ix]->attr.ndev == ndev) {
 655			del_gid(ib_dev, port, table, ix);
 656			deleted = true;
 657		}
 658	}
 659
 
 660	mutex_unlock(&table->lock);
 661
 662	if (deleted)
 663		dispatch_gid_change_event(ib_dev, port);
 664
 665	return 0;
 666}
 667
 668/**
 669 * rdma_find_gid_by_port - Returns the GID entry attributes when it finds
 670 * a valid GID entry for given search parameters. It searches for the specified
 671 * GID value in the local software cache.
 672 * @ib_dev: The device to query.
 673 * @gid: The GID value to search for.
 674 * @gid_type: The GID type to search for.
 675 * @port: The port number of the device where the GID value should be searched.
 676 * @ndev: In RoCE, the net device of the device. NULL means ignore.
 677 *
 678 * Returns sgid attributes if the GID is found with valid reference or
 679 * returns ERR_PTR for the error.
 680 * The caller must invoke rdma_put_gid_attr() to release the reference.
 681 */
 682const struct ib_gid_attr *
 683rdma_find_gid_by_port(struct ib_device *ib_dev,
 684		      const union ib_gid *gid,
 685		      enum ib_gid_type gid_type,
 686		      u32 port, struct net_device *ndev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 687{
 688	int local_index;
 
 689	struct ib_gid_table *table;
 690	unsigned long mask = GID_ATTR_FIND_MASK_GID |
 691			     GID_ATTR_FIND_MASK_GID_TYPE;
 692	struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
 693	const struct ib_gid_attr *attr;
 694	unsigned long flags;
 695
 696	if (!rdma_is_port_valid(ib_dev, port))
 697		return ERR_PTR(-ENOENT);
 
 698
 699	table = rdma_gid_table(ib_dev, port);
 700
 701	if (ndev)
 702		mask |= GID_ATTR_FIND_MASK_NETDEV;
 703
 704	read_lock_irqsave(&table->rwlock, flags);
 705	local_index = find_gid(table, gid, &val, false, mask, NULL);
 706	if (local_index >= 0) {
 707		get_gid_entry(table->data_vec[local_index]);
 708		attr = &table->data_vec[local_index]->attr;
 709		read_unlock_irqrestore(&table->rwlock, flags);
 710		return attr;
 711	}
 712
 713	read_unlock_irqrestore(&table->rwlock, flags);
 714	return ERR_PTR(-ENOENT);
 715}
 716EXPORT_SYMBOL(rdma_find_gid_by_port);
 717
 718/**
 719 * rdma_find_gid_by_filter - Returns the GID table attribute where a
 720 * specified GID value occurs
 721 * @ib_dev: The device to query.
 722 * @gid: The GID value to search for.
 723 * @port: The port number of the device where the GID value could be
 724 *   searched.
 725 * @filter: The filter function is executed on any matching GID in the table.
 726 *   If the filter function returns true, the corresponding index is returned,
 727 *   otherwise, we continue searching the GID table. It's guaranteed that
 728 *   while filter is executed, ndev field is valid and the structure won't
 729 *   change. filter is executed in an atomic context. filter must not be NULL.
 730 * @context: Private data to pass into the call-back.
 
 731 *
 732 * rdma_find_gid_by_filter() searches for the specified GID value
 733 * of which the filter function returns true in the port's GID table.
 
 734 *
 735 */
 736const struct ib_gid_attr *rdma_find_gid_by_filter(
 737	struct ib_device *ib_dev, const union ib_gid *gid, u32 port,
 738	bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
 739		       void *),
 740	void *context)
 
 
 
 741{
 742	const struct ib_gid_attr *res = ERR_PTR(-ENOENT);
 743	struct ib_gid_table *table;
 744	unsigned long flags;
 745	unsigned int i;
 
 
 
 
 
 746
 747	if (!rdma_is_port_valid(ib_dev, port))
 748		return ERR_PTR(-EINVAL);
 
 
 749
 750	table = rdma_gid_table(ib_dev, port);
 751
 752	read_lock_irqsave(&table->rwlock, flags);
 753	for (i = 0; i < table->sz; i++) {
 754		struct ib_gid_table_entry *entry = table->data_vec[i];
 755
 756		if (!is_gid_entry_valid(entry))
 757			continue;
 758
 759		if (memcmp(gid, &entry->attr.gid, sizeof(*gid)))
 760			continue;
 
 
 
 
 
 761
 762		if (filter(gid, &entry->attr, context)) {
 763			get_gid_entry(entry);
 764			res = &entry->attr;
 765			break;
 766		}
 767	}
 768	read_unlock_irqrestore(&table->rwlock, flags);
 769	return res;
 
 
 
 
 
 
 770}
 771
 772static struct ib_gid_table *alloc_gid_table(int sz)
 773{
 774	struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL);
 
 775
 776	if (!table)
 777		return NULL;
 778
 779	table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
 780	if (!table->data_vec)
 781		goto err_free_table;
 782
 783	mutex_init(&table->lock);
 784
 785	table->sz = sz;
 786	rwlock_init(&table->rwlock);
 
 787	return table;
 788
 789err_free_table:
 790	kfree(table);
 791	return NULL;
 792}
 793
 794static void release_gid_table(struct ib_device *device,
 795			      struct ib_gid_table *table)
 796{
 797	bool leak = false;
 798	int i;
 799
 800	if (!table)
 801		return;
 802
 803	for (i = 0; i < table->sz; i++) {
 804		if (is_gid_entry_free(table->data_vec[i]))
 805			continue;
 806		if (kref_read(&table->data_vec[i]->kref) > 1) {
 807			dev_err(&device->dev,
 808				"GID entry ref leak for index %d ref=%u\n", i,
 809				kref_read(&table->data_vec[i]->kref));
 810			leak = true;
 811		}
 812	}
 813	if (leak)
 814		return;
 815
 816	mutex_destroy(&table->lock);
 817	kfree(table->data_vec);
 818	kfree(table);
 819}
 820
 821static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port,
 822				   struct ib_gid_table *table)
 823{
 824	int i;
 
 825
 826	if (!table)
 827		return;
 828
 829	mutex_lock(&table->lock);
 830	for (i = 0; i < table->sz; ++i) {
 831		if (is_gid_entry_valid(table->data_vec[i]))
 832			del_gid(ib_dev, port, table, i);
 
 
 
 
 833	}
 834	mutex_unlock(&table->lock);
 
 
 
 835}
 836
 837void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
 838				  struct net_device *ndev,
 839				  unsigned long gid_type_mask,
 840				  enum ib_cache_gid_default_mode mode)
 841{
 842	union ib_gid gid = { };
 
 843	struct ib_gid_attr gid_attr;
 
 
 844	unsigned int gid_type;
 845	unsigned long mask;
 846
 847	mask = GID_ATTR_FIND_MASK_GID_TYPE |
 848	       GID_ATTR_FIND_MASK_DEFAULT |
 849	       GID_ATTR_FIND_MASK_NETDEV;
 850	memset(&gid_attr, 0, sizeof(gid_attr));
 851	gid_attr.ndev = ndev;
 852
 853	for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
 
 
 
 
 854		if (1UL << gid_type & ~gid_type_mask)
 855			continue;
 856
 857		gid_attr.gid_type = gid_type;
 858
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 859		if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
 860			make_default_gid(ndev, &gid);
 861			__ib_cache_gid_add(ib_dev, port, &gid,
 862					   &gid_attr, mask, true);
 863		} else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
 864			_ib_cache_gid_del(ib_dev, port, &gid,
 865					  &gid_attr, mask, true);
 866		}
 
 
 
 
 
 
 867	}
 868}
 869
 870static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port,
 871				      struct ib_gid_table *table)
 872{
 873	unsigned int i;
 874	unsigned long roce_gid_type_mask;
 875	unsigned int num_default_gids;
 
 876
 877	roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
 878	num_default_gids = hweight_long(roce_gid_type_mask);
 879	/* Reserve starting indices for default GIDs */
 880	for (i = 0; i < num_default_gids && i < table->sz; i++)
 881		table->default_gid_indices |= BIT(i);
 882}
 883
 884
 885static void gid_table_release_one(struct ib_device *ib_dev)
 886{
 887	u32 p;
 888
 889	rdma_for_each_port (ib_dev, p) {
 890		release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
 891		ib_dev->port_data[p].cache.gid = NULL;
 892	}
 
 
 893}
 894
 895static int _gid_table_setup_one(struct ib_device *ib_dev)
 896{
 897	struct ib_gid_table *table;
 898	u32 rdma_port;
 
 
 
 
 
 899
 900	rdma_for_each_port (ib_dev, rdma_port) {
 901		table = alloc_gid_table(
 902			ib_dev->port_data[rdma_port].immutable.gid_tbl_len);
 903		if (!table)
 
 
 
 
 904			goto rollback_table_setup;
 
 905
 906		gid_table_reserve_default(ib_dev, rdma_port, table);
 907		ib_dev->port_data[rdma_port].cache.gid = table;
 
 
 
 908	}
 
 
 909	return 0;
 910
 911rollback_table_setup:
 912	gid_table_release_one(ib_dev);
 913	return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 914}
 915
 916static void gid_table_cleanup_one(struct ib_device *ib_dev)
 917{
 918	u32 p;
 
 919
 920	rdma_for_each_port (ib_dev, p)
 921		cleanup_gid_table_port(ib_dev, p,
 922				       ib_dev->port_data[p].cache.gid);
 
 
 
 923}
 924
 925static int gid_table_setup_one(struct ib_device *ib_dev)
 926{
 927	int err;
 928
 929	err = _gid_table_setup_one(ib_dev);
 930
 931	if (err)
 932		return err;
 933
 934	rdma_roce_rescan_device(ib_dev);
 
 
 
 
 
 935
 936	return err;
 937}
 938
 939/**
 940 * rdma_query_gid - Read the GID content from the GID software cache
 941 * @device:		Device to query the GID
 942 * @port_num:		Port number of the device
 943 * @index:		Index of the GID table entry to read
 944 * @gid:		Pointer to GID where to store the entry's GID
 945 *
 946 * rdma_query_gid() only reads the GID entry content for requested device,
 947 * port and index. It reads for IB, RoCE and iWarp link layers.  It doesn't
 948 * hold any reference to the GID table entry in the HCA or software cache.
 949 *
 950 * Returns 0 on success or appropriate error code.
 951 *
 952 */
 953int rdma_query_gid(struct ib_device *device, u32 port_num,
 954		   int index, union ib_gid *gid)
 955{
 956	struct ib_gid_table *table;
 957	unsigned long flags;
 958	int res;
 
 
 
 959
 960	if (!rdma_is_port_valid(device, port_num))
 961		return -EINVAL;
 962
 963	table = rdma_gid_table(device, port_num);
 964	read_lock_irqsave(&table->rwlock, flags);
 965
 966	if (index < 0 || index >= table->sz) {
 967		res = -EINVAL;
 968		goto done;
 969	}
 970
 971	if (!is_gid_entry_valid(table->data_vec[index])) {
 972		res = -ENOENT;
 973		goto done;
 974	}
 975
 976	memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
 977	res = 0;
 978
 979done:
 980	read_unlock_irqrestore(&table->rwlock, flags);
 981	return res;
 982}
 983EXPORT_SYMBOL(rdma_query_gid);
 984
 985/**
 986 * rdma_read_gid_hw_context - Read the HW GID context from GID attribute
 987 * @attr:		Potinter to the GID attribute
 988 *
 989 * rdma_read_gid_hw_context() reads the drivers GID HW context corresponding
 990 * to the SGID attr. Callers are required to already be holding the reference
 991 * to an existing GID entry.
 992 *
 993 * Returns the HW GID context
 994 *
 995 */
 996void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr)
 997{
 998	return container_of(attr, struct ib_gid_table_entry, attr)->context;
 999}
1000EXPORT_SYMBOL(rdma_read_gid_hw_context);
1001
1002/**
1003 * rdma_find_gid - Returns SGID attributes if the matching GID is found.
1004 * @device: The device to query.
1005 * @gid: The GID value to search for.
1006 * @gid_type: The GID type to search for.
1007 * @ndev: In RoCE, the net device of the device. NULL means ignore.
1008 *
1009 * rdma_find_gid() searches for the specified GID value in the software cache.
1010 *
1011 * Returns GID attributes if a valid GID is found or returns ERR_PTR for the
1012 * error. The caller must invoke rdma_put_gid_attr() to release the reference.
1013 *
1014 */
1015const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
1016					const union ib_gid *gid,
1017					enum ib_gid_type gid_type,
1018					struct net_device *ndev)
1019{
1020	unsigned long mask = GID_ATTR_FIND_MASK_GID |
1021			     GID_ATTR_FIND_MASK_GID_TYPE;
1022	struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
1023	u32 p;
1024
1025	if (ndev)
1026		mask |= GID_ATTR_FIND_MASK_NETDEV;
1027
1028	rdma_for_each_port(device, p) {
1029		struct ib_gid_table *table;
1030		unsigned long flags;
1031		int index;
1032
1033		table = device->port_data[p].cache.gid;
1034		read_lock_irqsave(&table->rwlock, flags);
1035		index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
1036		if (index >= 0) {
1037			const struct ib_gid_attr *attr;
1038
1039			get_gid_entry(table->data_vec[index]);
1040			attr = &table->data_vec[index]->attr;
1041			read_unlock_irqrestore(&table->rwlock, flags);
1042			return attr;
1043		}
1044		read_unlock_irqrestore(&table->rwlock, flags);
1045	}
1046
1047	return ERR_PTR(-ENOENT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1048}
1049EXPORT_SYMBOL(rdma_find_gid);
1050
1051int ib_get_cached_pkey(struct ib_device *device,
1052		       u32               port_num,
1053		       int               index,
1054		       u16              *pkey)
1055{
1056	struct ib_pkey_cache *cache;
1057	unsigned long flags;
1058	int ret = 0;
1059
1060	if (!rdma_is_port_valid(device, port_num))
1061		return -EINVAL;
1062
1063	read_lock_irqsave(&device->cache_lock, flags);
1064
1065	cache = device->port_data[port_num].cache.pkey;
1066
1067	if (!cache || index < 0 || index >= cache->table_len)
1068		ret = -EINVAL;
1069	else
1070		*pkey = cache->table[index];
1071
1072	read_unlock_irqrestore(&device->cache_lock, flags);
1073
1074	return ret;
1075}
1076EXPORT_SYMBOL(ib_get_cached_pkey);
1077
1078void ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num,
1079				u64 *sn_pfx)
1080{
1081	unsigned long flags;
1082
1083	read_lock_irqsave(&device->cache_lock, flags);
1084	*sn_pfx = device->port_data[port_num].cache.subnet_prefix;
1085	read_unlock_irqrestore(&device->cache_lock, flags);
1086}
1087EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
1088
1089int ib_find_cached_pkey(struct ib_device *device, u32 port_num,
1090			u16 pkey, u16 *index)
1091{
1092	struct ib_pkey_cache *cache;
1093	unsigned long flags;
1094	int i;
1095	int ret = -ENOENT;
1096	int partial_ix = -1;
1097
1098	if (!rdma_is_port_valid(device, port_num))
1099		return -EINVAL;
1100
1101	read_lock_irqsave(&device->cache_lock, flags);
1102
1103	cache = device->port_data[port_num].cache.pkey;
1104	if (!cache) {
1105		ret = -EINVAL;
1106		goto err;
1107	}
1108
1109	*index = -1;
1110
1111	for (i = 0; i < cache->table_len; ++i)
1112		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
1113			if (cache->table[i] & 0x8000) {
1114				*index = i;
1115				ret = 0;
1116				break;
1117			} else {
1118				partial_ix = i;
1119			}
1120		}
1121
1122	if (ret && partial_ix >= 0) {
1123		*index = partial_ix;
1124		ret = 0;
1125	}
1126
1127err:
1128	read_unlock_irqrestore(&device->cache_lock, flags);
1129
1130	return ret;
1131}
1132EXPORT_SYMBOL(ib_find_cached_pkey);
1133
1134int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num,
1135			      u16 pkey, u16 *index)
 
 
1136{
1137	struct ib_pkey_cache *cache;
1138	unsigned long flags;
1139	int i;
1140	int ret = -ENOENT;
1141
1142	if (!rdma_is_port_valid(device, port_num))
1143		return -EINVAL;
1144
1145	read_lock_irqsave(&device->cache_lock, flags);
1146
1147	cache = device->port_data[port_num].cache.pkey;
1148	if (!cache) {
1149		ret = -EINVAL;
1150		goto err;
1151	}
1152
1153	*index = -1;
1154
1155	for (i = 0; i < cache->table_len; ++i)
1156		if (cache->table[i] == pkey) {
1157			*index = i;
1158			ret = 0;
1159			break;
1160		}
1161
1162err:
1163	read_unlock_irqrestore(&device->cache_lock, flags);
1164
1165	return ret;
1166}
1167EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1168
1169int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
 
 
1170{
1171	unsigned long flags;
1172	int ret = 0;
1173
1174	if (!rdma_is_port_valid(device, port_num))
1175		return -EINVAL;
1176
1177	read_lock_irqsave(&device->cache_lock, flags);
1178	*lmc = device->port_data[port_num].cache.lmc;
1179	read_unlock_irqrestore(&device->cache_lock, flags);
1180
1181	return ret;
1182}
1183EXPORT_SYMBOL(ib_get_cached_lmc);
1184
1185int ib_get_cached_port_state(struct ib_device *device, u32 port_num,
1186			     enum ib_port_state *port_state)
1187{
1188	unsigned long flags;
1189	int ret = 0;
1190
1191	if (!rdma_is_port_valid(device, port_num))
1192		return -EINVAL;
1193
1194	read_lock_irqsave(&device->cache_lock, flags);
1195	*port_state = device->port_data[port_num].cache.port_state;
1196	read_unlock_irqrestore(&device->cache_lock, flags);
1197
1198	return ret;
1199}
1200EXPORT_SYMBOL(ib_get_cached_port_state);
1201
1202/**
1203 * rdma_get_gid_attr - Returns GID attributes for a port of a device
1204 * at a requested gid_index, if a valid GID entry exists.
1205 * @device:		The device to query.
1206 * @port_num:		The port number on the device where the GID value
1207 *			is to be queried.
1208 * @index:		Index of the GID table entry whose attributes are to
1209 *                      be queried.
1210 *
1211 * rdma_get_gid_attr() acquires reference count of gid attributes from the
1212 * cached GID table. Caller must invoke rdma_put_gid_attr() to release
1213 * reference to gid attribute regardless of link layer.
1214 *
1215 * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error
1216 * code.
1217 */
1218const struct ib_gid_attr *
1219rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index)
1220{
1221	const struct ib_gid_attr *attr = ERR_PTR(-ENODATA);
1222	struct ib_gid_table *table;
1223	unsigned long flags;
1224
1225	if (!rdma_is_port_valid(device, port_num))
1226		return ERR_PTR(-EINVAL);
1227
1228	table = rdma_gid_table(device, port_num);
1229	if (index < 0 || index >= table->sz)
1230		return ERR_PTR(-EINVAL);
1231
1232	read_lock_irqsave(&table->rwlock, flags);
1233	if (!is_gid_entry_valid(table->data_vec[index]))
1234		goto done;
1235
1236	get_gid_entry(table->data_vec[index]);
1237	attr = &table->data_vec[index]->attr;
1238done:
1239	read_unlock_irqrestore(&table->rwlock, flags);
1240	return attr;
1241}
1242EXPORT_SYMBOL(rdma_get_gid_attr);
1243
1244/**
1245 * rdma_query_gid_table - Reads GID table entries of all the ports of a device up to max_entries.
1246 * @device: The device to query.
1247 * @entries: Entries where GID entries are returned.
1248 * @max_entries: Maximum number of entries that can be returned.
1249 * Entries array must be allocated to hold max_entries number of entries.
1250 *
1251 * Returns number of entries on success or appropriate error code.
1252 */
1253ssize_t rdma_query_gid_table(struct ib_device *device,
1254			     struct ib_uverbs_gid_entry *entries,
1255			     size_t max_entries)
1256{
1257	const struct ib_gid_attr *gid_attr;
1258	ssize_t num_entries = 0, ret;
1259	struct ib_gid_table *table;
1260	u32 port_num, i;
1261	struct net_device *ndev;
1262	unsigned long flags;
1263
1264	rdma_for_each_port(device, port_num) {
1265		table = rdma_gid_table(device, port_num);
1266		read_lock_irqsave(&table->rwlock, flags);
1267		for (i = 0; i < table->sz; i++) {
1268			if (!is_gid_entry_valid(table->data_vec[i]))
1269				continue;
1270			if (num_entries >= max_entries) {
1271				ret = -EINVAL;
1272				goto err;
1273			}
1274
1275			gid_attr = &table->data_vec[i]->attr;
1276
1277			memcpy(&entries->gid, &gid_attr->gid,
1278			       sizeof(gid_attr->gid));
1279			entries->gid_index = gid_attr->index;
1280			entries->port_num = gid_attr->port_num;
1281			entries->gid_type = gid_attr->gid_type;
1282			ndev = rcu_dereference_protected(
1283				gid_attr->ndev,
1284				lockdep_is_held(&table->rwlock));
1285			if (ndev)
1286				entries->netdev_ifindex = ndev->ifindex;
1287
1288			num_entries++;
1289			entries++;
1290		}
1291		read_unlock_irqrestore(&table->rwlock, flags);
1292	}
1293
1294	return num_entries;
1295err:
1296	read_unlock_irqrestore(&table->rwlock, flags);
1297	return ret;
1298}
1299EXPORT_SYMBOL(rdma_query_gid_table);
1300
1301/**
1302 * rdma_put_gid_attr - Release reference to the GID attribute
1303 * @attr:		Pointer to the GID attribute whose reference
1304 *			needs to be released.
1305 *
1306 * rdma_put_gid_attr() must be used to release reference whose
1307 * reference is acquired using rdma_get_gid_attr() or any APIs
1308 * which returns a pointer to the ib_gid_attr regardless of link layer
1309 * of IB or RoCE.
1310 *
1311 */
1312void rdma_put_gid_attr(const struct ib_gid_attr *attr)
1313{
1314	struct ib_gid_table_entry *entry =
1315		container_of(attr, struct ib_gid_table_entry, attr);
1316
1317	put_gid_entry(entry);
1318}
1319EXPORT_SYMBOL(rdma_put_gid_attr);
1320
1321/**
1322 * rdma_hold_gid_attr - Get reference to existing GID attribute
1323 *
1324 * @attr:		Pointer to the GID attribute whose reference
1325 *			needs to be taken.
1326 *
1327 * Increase the reference count to a GID attribute to keep it from being
1328 * freed. Callers are required to already be holding a reference to attribute.
1329 *
1330 */
1331void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
1332{
1333	struct ib_gid_table_entry *entry =
1334		container_of(attr, struct ib_gid_table_entry, attr);
1335
1336	get_gid_entry(entry);
1337}
1338EXPORT_SYMBOL(rdma_hold_gid_attr);
1339
1340/**
1341 * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice
1342 * which must be in UP state.
1343 *
1344 * @attr:Pointer to the GID attribute
1345 *
1346 * Returns pointer to netdevice if the netdevice was attached to GID and
1347 * netdevice is in UP state. Caller must hold RCU lock as this API
1348 * reads the netdev flags which can change while netdevice migrates to
1349 * different net namespace. Returns ERR_PTR with error code otherwise.
1350 *
1351 */
1352struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
1353{
1354	struct ib_gid_table_entry *entry =
1355			container_of(attr, struct ib_gid_table_entry, attr);
1356	struct ib_device *device = entry->attr.device;
1357	struct net_device *ndev = ERR_PTR(-EINVAL);
1358	u32 port_num = entry->attr.port_num;
1359	struct ib_gid_table *table;
1360	unsigned long flags;
1361	bool valid;
1362
1363	table = rdma_gid_table(device, port_num);
1364
1365	read_lock_irqsave(&table->rwlock, flags);
1366	valid = is_gid_entry_valid(table->data_vec[attr->index]);
1367	if (valid) {
1368		ndev = rcu_dereference(attr->ndev);
1369		if (!ndev)
1370			ndev = ERR_PTR(-ENODEV);
1371	}
1372	read_unlock_irqrestore(&table->rwlock, flags);
1373	return ndev;
1374}
1375EXPORT_SYMBOL(rdma_read_gid_attr_ndev_rcu);
1376
1377static int get_lower_dev_vlan(struct net_device *lower_dev,
1378			      struct netdev_nested_priv *priv)
1379{
1380	u16 *vlan_id = (u16 *)priv->data;
1381
1382	if (is_vlan_dev(lower_dev))
1383		*vlan_id = vlan_dev_vlan_id(lower_dev);
1384
1385	/* We are interested only in first level vlan device, so
1386	 * always return 1 to stop iterating over next level devices.
1387	 */
1388	return 1;
1389}
1390
1391/**
1392 * rdma_read_gid_l2_fields - Read the vlan ID and source MAC address
1393 *			     of a GID entry.
1394 *
1395 * @attr:	GID attribute pointer whose L2 fields to be read
1396 * @vlan_id:	Pointer to vlan id to fill up if the GID entry has
1397 *		vlan id. It is optional.
1398 * @smac:	Pointer to smac to fill up for a GID entry. It is optional.
1399 *
1400 * rdma_read_gid_l2_fields() returns 0 on success and returns vlan id
1401 * (if gid entry has vlan) and source MAC, or returns error.
1402 */
1403int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
1404			    u16 *vlan_id, u8 *smac)
1405{
1406	struct netdev_nested_priv priv = {
1407		.data = (void *)vlan_id,
1408	};
1409	struct net_device *ndev;
1410
1411	rcu_read_lock();
1412	ndev = rcu_dereference(attr->ndev);
1413	if (!ndev) {
1414		rcu_read_unlock();
1415		return -ENODEV;
1416	}
1417	if (smac)
1418		ether_addr_copy(smac, ndev->dev_addr);
1419	if (vlan_id) {
1420		*vlan_id = 0xffff;
1421		if (is_vlan_dev(ndev)) {
1422			*vlan_id = vlan_dev_vlan_id(ndev);
1423		} else {
1424			/* If the netdev is upper device and if it's lower
1425			 * device is vlan device, consider vlan id of
1426			 * the lower vlan device for this gid entry.
1427			 */
1428			netdev_walk_all_lower_dev_rcu(attr->ndev,
1429					get_lower_dev_vlan, &priv);
1430		}
1431	}
1432	rcu_read_unlock();
1433	return 0;
1434}
1435EXPORT_SYMBOL(rdma_read_gid_l2_fields);
1436
1437static int config_non_roce_gid_cache(struct ib_device *device,
1438				     u32 port, struct ib_port_attr *tprops)
1439{
1440	struct ib_gid_attr gid_attr = {};
1441	struct ib_gid_table *table;
1442	int ret = 0;
1443	int i;
1444
1445	gid_attr.device = device;
1446	gid_attr.port_num = port;
1447	table = rdma_gid_table(device, port);
1448
1449	mutex_lock(&table->lock);
1450	for (i = 0; i < tprops->gid_tbl_len; ++i) {
1451		if (!device->ops.query_gid)
1452			continue;
1453		ret = device->ops.query_gid(device, port, i, &gid_attr.gid);
1454		if (ret) {
1455			dev_warn(&device->dev,
1456				 "query_gid failed (%d) for index %d\n", ret,
1457				 i);
1458			goto err;
1459		}
1460
1461		if (rdma_protocol_iwarp(device, port)) {
1462			struct net_device *ndev;
1463
1464			ndev = ib_device_get_netdev(device, port);
1465			if (!ndev)
1466				continue;
1467			RCU_INIT_POINTER(gid_attr.ndev, ndev);
1468			dev_put(ndev);
1469		}
1470
1471		gid_attr.index = i;
1472		tprops->subnet_prefix =
1473			be64_to_cpu(gid_attr.gid.global.subnet_prefix);
1474		add_modify_gid(table, &gid_attr);
1475	}
1476err:
1477	mutex_unlock(&table->lock);
1478	return ret;
1479}
1480
1481static int
1482ib_cache_update(struct ib_device *device, u32 port, bool update_gids,
1483		bool update_pkeys, bool enforce_security)
1484{
1485	struct ib_port_attr       *tprops = NULL;
1486	struct ib_pkey_cache      *pkey_cache = NULL;
1487	struct ib_pkey_cache      *old_pkey_cache = NULL;
 
 
 
1488	int                        i;
1489	int                        ret;
 
 
 
 
1490
1491	if (!rdma_is_port_valid(device, port))
1492		return -EINVAL;
 
 
1493
1494	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1495	if (!tprops)
1496		return -ENOMEM;
1497
1498	ret = ib_query_port(device, port, tprops);
1499	if (ret) {
1500		dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
 
1501		goto err;
1502	}
1503
1504	if (!rdma_protocol_roce(device, port) && update_gids) {
1505		ret = config_non_roce_gid_cache(device, port,
1506						tprops);
1507		if (ret)
 
 
 
 
 
 
 
1508			goto err;
1509	}
1510
1511	update_pkeys &= !!tprops->pkey_tbl_len;
 
1512
1513	if (update_pkeys) {
1514		pkey_cache = kmalloc(struct_size(pkey_cache, table,
1515						 tprops->pkey_tbl_len),
1516				     GFP_KERNEL);
1517		if (!pkey_cache) {
1518			ret = -ENOMEM;
1519			goto err;
1520		}
 
1521
1522		pkey_cache->table_len = tprops->pkey_tbl_len;
1523
1524		for (i = 0; i < pkey_cache->table_len; ++i) {
1525			ret = ib_query_pkey(device, port, i,
1526					    pkey_cache->table + i);
1527			if (ret) {
1528				dev_warn(&device->dev,
1529					 "ib_query_pkey failed (%d) for index %d\n",
1530					 ret, i);
1531				goto err;
1532			}
1533		}
1534	}
1535
1536	write_lock_irq(&device->cache_lock);
 
 
1537
1538	if (update_pkeys) {
1539		old_pkey_cache = device->port_data[port].cache.pkey;
1540		device->port_data[port].cache.pkey = pkey_cache;
1541	}
1542	device->port_data[port].cache.lmc = tprops->lmc;
1543	device->port_data[port].cache.port_state = tprops->state;
1544
1545	device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
1546	write_unlock_irq(&device->cache_lock);
1547
1548	if (enforce_security)
1549		ib_security_cache_change(device,
1550					 port,
1551					 tprops->subnet_prefix);
1552
 
 
 
 
 
1553	kfree(old_pkey_cache);
1554	kfree(tprops);
1555	return 0;
1556
1557err:
1558	kfree(pkey_cache);
 
1559	kfree(tprops);
1560	return ret;
1561}
1562
1563static void ib_cache_event_task(struct work_struct *_work)
1564{
1565	struct ib_update_work *work =
1566		container_of(_work, struct ib_update_work, work);
1567	int ret;
1568
1569	/* Before distributing the cache update event, first sync
1570	 * the cache.
1571	 */
1572	ret = ib_cache_update(work->event.device, work->event.element.port_num,
1573			      work->event.event == IB_EVENT_GID_CHANGE,
1574			      work->event.event == IB_EVENT_PKEY_CHANGE,
1575			      work->enforce_security);
1576
1577	/* GID event is notified already for individual GID entries by
1578	 * dispatch_gid_change_event(). Hence, notifiy for rest of the
1579	 * events.
1580	 */
1581	if (!ret && work->event.event != IB_EVENT_GID_CHANGE)
1582		ib_dispatch_event_clients(&work->event);
1583
1584	kfree(work);
1585}
1586
1587static void ib_generic_event_task(struct work_struct *_work)
1588{
1589	struct ib_update_work *work =
1590		container_of(_work, struct ib_update_work, work);
1591
1592	ib_dispatch_event_clients(&work->event);
1593	kfree(work);
1594}
1595
1596static bool is_cache_update_event(const struct ib_event *event)
1597{
1598	return (event->event == IB_EVENT_PORT_ERR    ||
1599		event->event == IB_EVENT_PORT_ACTIVE ||
1600		event->event == IB_EVENT_LID_CHANGE  ||
1601		event->event == IB_EVENT_PKEY_CHANGE ||
1602		event->event == IB_EVENT_CLIENT_REREGISTER ||
1603		event->event == IB_EVENT_GID_CHANGE);
1604}
1605
1606/**
1607 * ib_dispatch_event - Dispatch an asynchronous event
1608 * @event:Event to dispatch
1609 *
1610 * Low-level drivers must call ib_dispatch_event() to dispatch the
1611 * event to all registered event handlers when an asynchronous event
1612 * occurs.
1613 */
1614void ib_dispatch_event(const struct ib_event *event)
1615{
1616	struct ib_update_work *work;
1617
1618	work = kzalloc(sizeof(*work), GFP_ATOMIC);
1619	if (!work)
1620		return;
1621
1622	if (is_cache_update_event(event))
1623		INIT_WORK(&work->work, ib_cache_event_task);
1624	else
1625		INIT_WORK(&work->work, ib_generic_event_task);
1626
1627	work->event = *event;
1628	if (event->event == IB_EVENT_PKEY_CHANGE ||
1629	    event->event == IB_EVENT_GID_CHANGE)
1630		work->enforce_security = true;
1631
1632	queue_work(ib_wq, &work->work);
1633}
1634EXPORT_SYMBOL(ib_dispatch_event);
1635
1636int ib_cache_setup_one(struct ib_device *device)
1637{
1638	u32 p;
1639	int err;
1640
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1641	err = gid_table_setup_one(device);
1642	if (err)
1643		return err;
1644
1645	rdma_for_each_port (device, p) {
1646		err = ib_cache_update(device, p, true, true, true);
1647		if (err)
1648			return err;
1649	}
 
 
 
1650
1651	return 0;
 
 
 
 
 
 
 
1652}
1653
1654void ib_cache_release_one(struct ib_device *device)
1655{
1656	u32 p;
1657
1658	/*
1659	 * The release function frees all the cache elements.
1660	 * This function should be called as part of freeing
1661	 * all the device's resources when the cache could no
1662	 * longer be accessed.
1663	 */
1664	rdma_for_each_port (device, p)
1665		kfree(device->port_data[p].cache.pkey);
 
 
1666
1667	gid_table_release_one(device);
 
 
1668}
1669
1670void ib_cache_cleanup_one(struct ib_device *device)
1671{
1672	/* The cleanup function waits for all in-progress workqueue
1673	 * elements and cleans up the GID cache. This function should be
1674	 * called after the device was removed from the devices list and
1675	 * all clients were removed, so the cache exists but is
 
1676	 * non-functional and shouldn't be updated anymore.
1677	 */
 
1678	flush_workqueue(ib_wq);
1679	gid_table_cleanup_one(device);
 
1680
1681	/*
1682	 * Flush the wq second time for any pending GID delete work.
1683	 */
1684	flush_workqueue(ib_wq);
 
 
 
 
1685}
v4.10.11
   1/*
   2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
   4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/module.h>
  37#include <linux/errno.h>
  38#include <linux/slab.h>
  39#include <linux/workqueue.h>
  40#include <linux/netdevice.h>
  41#include <net/addrconf.h>
  42
  43#include <rdma/ib_cache.h>
  44
  45#include "core_priv.h"
  46
  47struct ib_pkey_cache {
  48	int             table_len;
  49	u16             table[0];
  50};
  51
  52struct ib_update_work {
  53	struct work_struct work;
  54	struct ib_device  *device;
  55	u8                 port_num;
  56};
  57
  58union ib_gid zgid;
  59EXPORT_SYMBOL(zgid);
  60
  61static const struct ib_gid_attr zattr;
  62
  63enum gid_attr_find_mask {
  64	GID_ATTR_FIND_MASK_GID          = 1UL << 0,
  65	GID_ATTR_FIND_MASK_NETDEV	= 1UL << 1,
  66	GID_ATTR_FIND_MASK_DEFAULT	= 1UL << 2,
  67	GID_ATTR_FIND_MASK_GID_TYPE	= 1UL << 3,
  68};
  69
  70enum gid_table_entry_props {
  71	GID_TABLE_ENTRY_INVALID		= 1UL << 0,
  72	GID_TABLE_ENTRY_DEFAULT		= 1UL << 1,
 
 
 
 
 
 
 
  73};
  74
  75enum gid_table_write_action {
  76	GID_TABLE_WRITE_ACTION_ADD,
  77	GID_TABLE_WRITE_ACTION_DEL,
  78	/* MODIFY only updates the GID table. Currently only used by
  79	 * ib_cache_update.
  80	 */
  81	GID_TABLE_WRITE_ACTION_MODIFY
  82};
  83
  84struct ib_gid_table_entry {
  85	unsigned long	    props;
  86	union ib_gid        gid;
  87	struct ib_gid_attr  attr;
  88	void		   *context;
 
 
 
 
 
 
 
  89};
  90
  91struct ib_gid_table {
  92	int                  sz;
  93	/* In RoCE, adding a GID to the table requires:
  94	 * (a) Find if this GID is already exists.
  95	 * (b) Find a free space.
  96	 * (c) Write the new GID
  97	 *
  98	 * Delete requires different set of operations:
  99	 * (a) Find the GID
 100	 * (b) Delete it.
 101	 *
 102	 * Add/delete should be carried out atomically.
 103	 * This is done by locking this mutex from multiple
 104	 * writers. We don't need this lock for IB, as the MAD
 105	 * layer replaces all entries. All data_vec entries
 106	 * are locked by this lock.
 107	 **/
 108	struct mutex         lock;
 109	/* This lock protects the table entries from being
 110	 * read and written simultaneously.
 111	 */
 112	rwlock_t	     rwlock;
 113	struct ib_gid_table_entry *data_vec;
 
 
 
 
 
 114};
 115
 116static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
 117{
 118	if (rdma_cap_roce_gid_table(ib_dev, port)) {
 119		struct ib_event event;
 120
 121		event.device		= ib_dev;
 122		event.element.port_num	= port;
 123		event.event		= IB_EVENT_GID_CHANGE;
 124
 125		ib_dispatch_event(&event);
 126	}
 127}
 128
 129static const char * const gid_type_str[] = {
 
 
 
 130	[IB_GID_TYPE_IB]	= "IB/RoCE v1",
 
 131	[IB_GID_TYPE_ROCE_UDP_ENCAP]	= "RoCE v2",
 132};
 133
 134const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
 135{
 136	if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
 137		return gid_type_str[gid_type];
 138
 139	return "Invalid GID type";
 140}
 141EXPORT_SYMBOL(ib_cache_gid_type_str);
 142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 143int ib_cache_gid_parse_type_str(const char *buf)
 144{
 145	unsigned int i;
 146	size_t len;
 147	int err = -EINVAL;
 148
 149	len = strlen(buf);
 150	if (len == 0)
 151		return -EINVAL;
 152
 153	if (buf[len - 1] == '\n')
 154		len--;
 155
 156	for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
 157		if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
 158		    len == strlen(gid_type_str[i])) {
 159			err = i;
 160			break;
 161		}
 162
 163	return err;
 164}
 165EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
 166
 167/* This function expects that rwlock will be write locked in all
 168 * scenarios and that lock will be locked in sleep-able (RoCE)
 169 * scenarios.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 170 */
 171static int write_gid(struct ib_device *ib_dev, u8 port,
 172		     struct ib_gid_table *table, int ix,
 173		     const union ib_gid *gid,
 174		     const struct ib_gid_attr *attr,
 175		     enum gid_table_write_action action,
 176		     bool  default_gid)
 177	__releases(&table->rwlock) __acquires(&table->rwlock)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 178{
 179	int ret = 0;
 180	struct net_device *old_net_dev;
 181	enum ib_gid_type old_gid_type;
 
 
 182
 183	/* in rdma_cap_roce_gid_table, this funciton should be protected by a
 184	 * sleep-able lock.
 185	 */
 
 
 186
 187	if (rdma_cap_roce_gid_table(ib_dev, port)) {
 188		table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
 189		write_unlock_irq(&table->rwlock);
 190		/* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
 191		 * RoCE providers and thus only updates the cache.
 192		 */
 193		if (action == GID_TABLE_WRITE_ACTION_ADD)
 194			ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
 195					      &table->data_vec[ix].context);
 196		else if (action == GID_TABLE_WRITE_ACTION_DEL)
 197			ret = ib_dev->del_gid(ib_dev, port, ix,
 198					      &table->data_vec[ix].context);
 199		write_lock_irq(&table->rwlock);
 200	}
 201
 202	old_net_dev = table->data_vec[ix].attr.ndev;
 203	old_gid_type = table->data_vec[ix].attr.gid_type;
 204	if (old_net_dev && old_net_dev != attr->ndev)
 205		dev_put(old_net_dev);
 206	/* if modify_gid failed, just delete the old gid */
 207	if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
 208		gid = &zgid;
 209		attr = &zattr;
 210		table->data_vec[ix].context = NULL;
 211	}
 212
 213	memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
 214	memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
 215	if (default_gid) {
 216		table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
 217		if (action == GID_TABLE_WRITE_ACTION_DEL)
 218			table->data_vec[ix].attr.gid_type = old_gid_type;
 219	}
 220	if (table->data_vec[ix].attr.ndev &&
 221	    table->data_vec[ix].attr.ndev != old_net_dev)
 222		dev_hold(table->data_vec[ix].attr.ndev);
 223
 224	table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
 
 
 
 225
 226	return ret;
 
 
 227}
 228
 229static int add_gid(struct ib_device *ib_dev, u8 port,
 230		   struct ib_gid_table *table, int ix,
 231		   const union ib_gid *gid,
 232		   const struct ib_gid_attr *attr,
 233		   bool  default_gid) {
 234	return write_gid(ib_dev, port, table, ix, gid, attr,
 235			 GID_TABLE_WRITE_ACTION_ADD, default_gid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 236}
 237
 238static int modify_gid(struct ib_device *ib_dev, u8 port,
 239		      struct ib_gid_table *table, int ix,
 240		      const union ib_gid *gid,
 241		      const struct ib_gid_attr *attr,
 242		      bool  default_gid) {
 243	return write_gid(ib_dev, port, table, ix, gid, attr,
 244			 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 245}
 246
 247static int del_gid(struct ib_device *ib_dev, u8 port,
 248		   struct ib_gid_table *table, int ix,
 249		   bool  default_gid) {
 250	return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
 251			 GID_TABLE_WRITE_ACTION_DEL, default_gid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 252}
 253
 254/* rwlock should be read locked */
 255static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
 256		    const struct ib_gid_attr *val, bool default_gid,
 257		    unsigned long mask, int *pempty)
 258{
 259	int i = 0;
 260	int found = -1;
 261	int empty = pempty ? -1 : 0;
 262
 263	while (i < table->sz && (found < 0 || empty < 0)) {
 264		struct ib_gid_table_entry *data = &table->data_vec[i];
 265		struct ib_gid_attr *attr = &data->attr;
 266		int curr_index = i;
 267
 268		i++;
 269
 270		if (data->props & GID_TABLE_ENTRY_INVALID)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 271			continue;
 272
 273		if (empty < 0)
 274			if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
 275			    !memcmp(attr, &zattr, sizeof(*attr)) &&
 276			    !data->props)
 277				empty = curr_index;
 278
 279		if (found >= 0)
 280			continue;
 281
 
 282		if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
 283		    attr->gid_type != val->gid_type)
 284			continue;
 285
 286		if (mask & GID_ATTR_FIND_MASK_GID &&
 287		    memcmp(gid, &data->gid, sizeof(*gid)))
 288			continue;
 289
 290		if (mask & GID_ATTR_FIND_MASK_NETDEV &&
 291		    attr->ndev != val->ndev)
 292			continue;
 293
 294		if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
 295		    !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
 296		    default_gid)
 297			continue;
 298
 299		found = curr_index;
 300	}
 301
 302	if (pempty)
 303		*pempty = empty;
 304
 305	return found;
 306}
 307
 308static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
 309{
 310	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
 311	addrconf_ifid_eui48(&gid->raw[8], dev);
 312}
 313
 314int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
 315		     union ib_gid *gid, struct ib_gid_attr *attr)
 
 316{
 317	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 318	struct ib_gid_table *table;
 319	int ix;
 320	int ret = 0;
 321	struct net_device *idev;
 322	int empty;
 
 323
 324	table = ports_table[port - rdma_start_port(ib_dev)];
 325
 326	if (!memcmp(gid, &zgid, sizeof(*gid)))
 
 
 327		return -EINVAL;
 328
 329	if (ib_dev->get_netdev) {
 330		idev = ib_dev->get_netdev(ib_dev, port);
 331		if (idev && attr->ndev != idev) {
 332			union ib_gid default_gid;
 333
 334			/* Adding default GIDs in not permitted */
 335			make_default_gid(idev, &default_gid);
 336			if (!memcmp(gid, &default_gid, sizeof(*gid))) {
 337				dev_put(idev);
 338				return -EPERM;
 339			}
 340		}
 341		if (idev)
 342			dev_put(idev);
 343	}
 344
 345	mutex_lock(&table->lock);
 346	write_lock_irq(&table->rwlock);
 347
 348	ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
 349		      GID_ATTR_FIND_MASK_GID_TYPE |
 350		      GID_ATTR_FIND_MASK_NETDEV, &empty);
 351	if (ix >= 0)
 352		goto out_unlock;
 353
 354	if (empty < 0) {
 355		ret = -ENOSPC;
 356		goto out_unlock;
 357	}
 358
 359	ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
 
 
 
 360	if (!ret)
 361		dispatch_gid_change_event(ib_dev, port);
 362
 363out_unlock:
 364	write_unlock_irq(&table->rwlock);
 365	mutex_unlock(&table->lock);
 
 
 
 366	return ret;
 367}
 368
 369int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
 370		     union ib_gid *gid, struct ib_gid_attr *attr)
 371{
 372	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 
 
 
 
 
 
 
 
 
 
 
 373	struct ib_gid_table *table;
 
 374	int ix;
 375
 376	table = ports_table[port - rdma_start_port(ib_dev)];
 377
 378	mutex_lock(&table->lock);
 379	write_lock_irq(&table->rwlock);
 380
 381	ix = find_gid(table, gid, attr, false,
 382		      GID_ATTR_FIND_MASK_GID	  |
 383		      GID_ATTR_FIND_MASK_GID_TYPE |
 384		      GID_ATTR_FIND_MASK_NETDEV	  |
 385		      GID_ATTR_FIND_MASK_DEFAULT,
 386		      NULL);
 387	if (ix < 0)
 388		goto out_unlock;
 
 389
 390	if (!del_gid(ib_dev, port, table, ix, false))
 391		dispatch_gid_change_event(ib_dev, port);
 392
 393out_unlock:
 394	write_unlock_irq(&table->rwlock);
 395	mutex_unlock(&table->lock);
 396	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 397}
 398
 399int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 400				     struct net_device *ndev)
 401{
 402	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 403	struct ib_gid_table *table;
 404	int ix;
 405	bool deleted = false;
 406
 407	table  = ports_table[port - rdma_start_port(ib_dev)];
 408
 409	mutex_lock(&table->lock);
 410	write_lock_irq(&table->rwlock);
 411
 412	for (ix = 0; ix < table->sz; ix++)
 413		if (table->data_vec[ix].attr.ndev == ndev)
 414			if (!del_gid(ib_dev, port, table, ix,
 415				     !!(table->data_vec[ix].props &
 416					GID_TABLE_ENTRY_DEFAULT)))
 417				deleted = true;
 
 418
 419	write_unlock_irq(&table->rwlock);
 420	mutex_unlock(&table->lock);
 421
 422	if (deleted)
 423		dispatch_gid_change_event(ib_dev, port);
 424
 425	return 0;
 426}
 427
 428static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
 429			      union ib_gid *gid, struct ib_gid_attr *attr)
 430{
 431	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 432	struct ib_gid_table *table;
 433
 434	table = ports_table[port - rdma_start_port(ib_dev)];
 435
 436	if (index < 0 || index >= table->sz)
 437		return -EINVAL;
 438
 439	if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
 440		return -EAGAIN;
 441
 442	memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
 443	if (attr) {
 444		memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
 445		if (attr->ndev)
 446			dev_hold(attr->ndev);
 447	}
 448
 449	return 0;
 450}
 451
 452static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
 453				    const union ib_gid *gid,
 454				    const struct ib_gid_attr *val,
 455				    unsigned long mask,
 456				    u8 *port, u16 *index)
 457{
 458	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 459	struct ib_gid_table *table;
 460	u8 p;
 461	int local_index;
 462	unsigned long flags;
 463
 464	for (p = 0; p < ib_dev->phys_port_cnt; p++) {
 465		table = ports_table[p];
 466		read_lock_irqsave(&table->rwlock, flags);
 467		local_index = find_gid(table, gid, val, false, mask, NULL);
 468		if (local_index >= 0) {
 469			if (index)
 470				*index = local_index;
 471			if (port)
 472				*port = p + rdma_start_port(ib_dev);
 473			read_unlock_irqrestore(&table->rwlock, flags);
 474			return 0;
 475		}
 476		read_unlock_irqrestore(&table->rwlock, flags);
 477	}
 478
 479	return -ENOENT;
 480}
 481
 482static int ib_cache_gid_find(struct ib_device *ib_dev,
 483			     const union ib_gid *gid,
 484			     enum ib_gid_type gid_type,
 485			     struct net_device *ndev, u8 *port,
 486			     u16 *index)
 487{
 488	unsigned long mask = GID_ATTR_FIND_MASK_GID |
 489			     GID_ATTR_FIND_MASK_GID_TYPE;
 490	struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
 491
 492	if (ndev)
 493		mask |= GID_ATTR_FIND_MASK_NETDEV;
 494
 495	return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
 496					mask, port, index);
 497}
 498
 499int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
 500			       const union ib_gid *gid,
 501			       enum ib_gid_type gid_type,
 502			       u8 port, struct net_device *ndev,
 503			       u16 *index)
 504{
 505	int local_index;
 506	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 507	struct ib_gid_table *table;
 508	unsigned long mask = GID_ATTR_FIND_MASK_GID |
 509			     GID_ATTR_FIND_MASK_GID_TYPE;
 510	struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
 
 511	unsigned long flags;
 512
 513	if (port < rdma_start_port(ib_dev) ||
 514	    port > rdma_end_port(ib_dev))
 515		return -ENOENT;
 516
 517	table = ports_table[port - rdma_start_port(ib_dev)];
 518
 519	if (ndev)
 520		mask |= GID_ATTR_FIND_MASK_NETDEV;
 521
 522	read_lock_irqsave(&table->rwlock, flags);
 523	local_index = find_gid(table, gid, &val, false, mask, NULL);
 524	if (local_index >= 0) {
 525		if (index)
 526			*index = local_index;
 527		read_unlock_irqrestore(&table->rwlock, flags);
 528		return 0;
 529	}
 530
 531	read_unlock_irqrestore(&table->rwlock, flags);
 532	return -ENOENT;
 533}
 534EXPORT_SYMBOL(ib_find_cached_gid_by_port);
 535
 536/**
 537 * ib_find_gid_by_filter - Returns the GID table index where a specified
 538 * GID value occurs
 539 * @device: The device to query.
 540 * @gid: The GID value to search for.
 541 * @port_num: The port number of the device where the GID value could be
 542 *   searched.
 543 * @filter: The filter function is executed on any matching GID in the table.
 544 *   If the filter function returns true, the corresponding index is returned,
 545 *   otherwise, we continue searching the GID table. It's guaranteed that
 546 *   while filter is executed, ndev field is valid and the structure won't
 547 *   change. filter is executed in an atomic context. filter must not be NULL.
 548 * @index: The index into the cached GID table where the GID was found.  This
 549 *   parameter may be NULL.
 550 *
 551 * ib_cache_gid_find_by_filter() searches for the specified GID value
 552 * of which the filter function returns true in the port's GID table.
 553 * This function is only supported on RoCE ports.
 554 *
 555 */
 556static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
 557				       const union ib_gid *gid,
 558				       u8 port,
 559				       bool (*filter)(const union ib_gid *,
 560						      const struct ib_gid_attr *,
 561						      void *),
 562				       void *context,
 563				       u16 *index)
 564{
 565	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 566	struct ib_gid_table *table;
 
 567	unsigned int i;
 568	unsigned long flags;
 569	bool found = false;
 570
 571	if (!ports_table)
 572		return -EOPNOTSUPP;
 573
 574	if (port < rdma_start_port(ib_dev) ||
 575	    port > rdma_end_port(ib_dev) ||
 576	    !rdma_protocol_roce(ib_dev, port))
 577		return -EPROTONOSUPPORT;
 578
 579	table = ports_table[port - rdma_start_port(ib_dev)];
 580
 581	read_lock_irqsave(&table->rwlock, flags);
 582	for (i = 0; i < table->sz; i++) {
 583		struct ib_gid_attr attr;
 584
 585		if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
 586			goto next;
 587
 588		if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
 589			goto next;
 590
 591		memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
 592
 593		if (filter(gid, &attr, context))
 594			found = true;
 595
 596next:
 597		if (found)
 
 598			break;
 
 599	}
 600	read_unlock_irqrestore(&table->rwlock, flags);
 601
 602	if (!found)
 603		return -ENOENT;
 604
 605	if (index)
 606		*index = i;
 607	return 0;
 608}
 609
 610static struct ib_gid_table *alloc_gid_table(int sz)
 611{
 612	struct ib_gid_table *table =
 613		kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
 614
 615	if (!table)
 616		return NULL;
 617
 618	table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
 619	if (!table->data_vec)
 620		goto err_free_table;
 621
 622	mutex_init(&table->lock);
 623
 624	table->sz = sz;
 625	rwlock_init(&table->rwlock);
 626
 627	return table;
 628
 629err_free_table:
 630	kfree(table);
 631	return NULL;
 632}
 633
 634static void release_gid_table(struct ib_gid_table *table)
 
 635{
 636	if (table) {
 637		kfree(table->data_vec);
 638		kfree(table);
 
 
 
 
 
 
 
 
 
 
 
 
 639	}
 
 
 
 
 
 
 640}
 641
 642static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
 643				   struct ib_gid_table *table)
 644{
 645	int i;
 646	bool deleted = false;
 647
 648	if (!table)
 649		return;
 650
 651	write_lock_irq(&table->rwlock);
 652	for (i = 0; i < table->sz; ++i) {
 653		if (memcmp(&table->data_vec[i].gid, &zgid,
 654			   sizeof(table->data_vec[i].gid)))
 655			if (!del_gid(ib_dev, port, table, i,
 656				     table->data_vec[i].props &
 657				     GID_ATTR_FIND_MASK_DEFAULT))
 658				deleted = true;
 659	}
 660	write_unlock_irq(&table->rwlock);
 661
 662	if (deleted)
 663		dispatch_gid_change_event(ib_dev, port);
 664}
 665
 666void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
 667				  struct net_device *ndev,
 668				  unsigned long gid_type_mask,
 669				  enum ib_cache_gid_default_mode mode)
 670{
 671	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
 672	union ib_gid gid;
 673	struct ib_gid_attr gid_attr;
 674	struct ib_gid_attr zattr_type = zattr;
 675	struct ib_gid_table *table;
 676	unsigned int gid_type;
 
 677
 678	table  = ports_table[port - rdma_start_port(ib_dev)];
 679
 680	make_default_gid(ndev, &gid);
 681	memset(&gid_attr, 0, sizeof(gid_attr));
 682	gid_attr.ndev = ndev;
 683
 684	for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
 685		int ix;
 686		union ib_gid current_gid;
 687		struct ib_gid_attr current_gid_attr = {};
 688
 689		if (1UL << gid_type & ~gid_type_mask)
 690			continue;
 691
 692		gid_attr.gid_type = gid_type;
 693
 694		mutex_lock(&table->lock);
 695		write_lock_irq(&table->rwlock);
 696		ix = find_gid(table, NULL, &gid_attr, true,
 697			      GID_ATTR_FIND_MASK_GID_TYPE |
 698			      GID_ATTR_FIND_MASK_DEFAULT,
 699			      NULL);
 700
 701		/* Coudn't find default GID location */
 702		if (WARN_ON(ix < 0))
 703			goto release;
 704
 705		zattr_type.gid_type = gid_type;
 706
 707		if (!__ib_cache_gid_get(ib_dev, port, ix,
 708					&current_gid, &current_gid_attr) &&
 709		    mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
 710		    !memcmp(&gid, &current_gid, sizeof(gid)) &&
 711		    !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
 712			goto release;
 713
 714		if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
 715		    memcmp(&current_gid_attr, &zattr_type,
 716			   sizeof(current_gid_attr))) {
 717			if (del_gid(ib_dev, port, table, ix, true)) {
 718				pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
 719					ix, gid.raw);
 720				goto release;
 721			} else {
 722				dispatch_gid_change_event(ib_dev, port);
 723			}
 724		}
 725
 726		if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
 727			if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
 728				pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
 729					gid.raw);
 730			else
 731				dispatch_gid_change_event(ib_dev, port);
 
 732		}
 733
 734release:
 735		if (current_gid_attr.ndev)
 736			dev_put(current_gid_attr.ndev);
 737		write_unlock_irq(&table->rwlock);
 738		mutex_unlock(&table->lock);
 739	}
 740}
 741
 742static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
 743				     struct ib_gid_table *table)
 744{
 745	unsigned int i;
 746	unsigned long roce_gid_type_mask;
 747	unsigned int num_default_gids;
 748	unsigned int current_gid = 0;
 749
 750	roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
 751	num_default_gids = hweight_long(roce_gid_type_mask);
 752	for (i = 0; i < num_default_gids && i < table->sz; i++) {
 753		struct ib_gid_table_entry *entry =
 754			&table->data_vec[i];
 755
 756		entry->props |= GID_TABLE_ENTRY_DEFAULT;
 757		current_gid = find_next_bit(&roce_gid_type_mask,
 758					    BITS_PER_LONG,
 759					    current_gid);
 760		entry->attr.gid_type = current_gid++;
 
 
 
 
 761	}
 762
 763	return 0;
 764}
 765
 766static int _gid_table_setup_one(struct ib_device *ib_dev)
 767{
 768	u8 port;
 769	struct ib_gid_table **table;
 770	int err = 0;
 771
 772	table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
 773	if (!table)
 774		return -ENOMEM;
 775
 776	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
 777		u8 rdma_port = port + rdma_start_port(ib_dev);
 778
 779		table[port] =
 780			alloc_gid_table(
 781				ib_dev->port_immutable[rdma_port].gid_tbl_len);
 782		if (!table[port]) {
 783			err = -ENOMEM;
 784			goto rollback_table_setup;
 785		}
 786
 787		err = gid_table_reserve_default(ib_dev,
 788						port + rdma_start_port(ib_dev),
 789						table[port]);
 790		if (err)
 791			goto rollback_table_setup;
 792	}
 793
 794	ib_dev->cache.gid_cache = table;
 795	return 0;
 796
 797rollback_table_setup:
 798	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
 799		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
 800				       table[port]);
 801		release_gid_table(table[port]);
 802	}
 803
 804	kfree(table);
 805	return err;
 806}
 807
 808static void gid_table_release_one(struct ib_device *ib_dev)
 809{
 810	struct ib_gid_table **table = ib_dev->cache.gid_cache;
 811	u8 port;
 812
 813	if (!table)
 814		return;
 815
 816	for (port = 0; port < ib_dev->phys_port_cnt; port++)
 817		release_gid_table(table[port]);
 818
 819	kfree(table);
 820	ib_dev->cache.gid_cache = NULL;
 821}
 822
 823static void gid_table_cleanup_one(struct ib_device *ib_dev)
 824{
 825	struct ib_gid_table **table = ib_dev->cache.gid_cache;
 826	u8 port;
 827
 828	if (!table)
 829		return;
 830
 831	for (port = 0; port < ib_dev->phys_port_cnt; port++)
 832		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
 833				       table[port]);
 834}
 835
 836static int gid_table_setup_one(struct ib_device *ib_dev)
 837{
 838	int err;
 839
 840	err = _gid_table_setup_one(ib_dev);
 841
 842	if (err)
 843		return err;
 844
 845	err = roce_rescan_device(ib_dev);
 846
 847	if (err) {
 848		gid_table_cleanup_one(ib_dev);
 849		gid_table_release_one(ib_dev);
 850	}
 851
 852	return err;
 853}
 854
 855int ib_get_cached_gid(struct ib_device *device,
 856		      u8                port_num,
 857		      int               index,
 858		      union ib_gid     *gid,
 859		      struct ib_gid_attr *gid_attr)
 
 
 
 
 
 
 
 
 
 
 
 860{
 
 
 861	int res;
 862	unsigned long flags;
 863	struct ib_gid_table **ports_table = device->cache.gid_cache;
 864	struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
 865
 866	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
 867		return -EINVAL;
 868
 
 869	read_lock_irqsave(&table->rwlock, flags);
 870	res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 871	read_unlock_irqrestore(&table->rwlock, flags);
 
 
 
 872
 873	return res;
 
 
 
 
 
 
 
 
 
 
 
 
 
 874}
 875EXPORT_SYMBOL(ib_get_cached_gid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876
 877int ib_find_cached_gid(struct ib_device *device,
 878		       const union ib_gid *gid,
 879		       enum ib_gid_type gid_type,
 880		       struct net_device *ndev,
 881		       u8               *port_num,
 882		       u16              *index)
 883{
 884	return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
 885}
 886EXPORT_SYMBOL(ib_find_cached_gid);
 887
 888int ib_find_gid_by_filter(struct ib_device *device,
 889			  const union ib_gid *gid,
 890			  u8 port_num,
 891			  bool (*filter)(const union ib_gid *gid,
 892					 const struct ib_gid_attr *,
 893					 void *),
 894			  void *context, u16 *index)
 895{
 896	/* Only RoCE GID table supports filter function */
 897	if (!rdma_cap_roce_gid_table(device, port_num) && filter)
 898		return -EPROTONOSUPPORT;
 899
 900	return ib_cache_gid_find_by_filter(device, gid,
 901					   port_num, filter,
 902					   context, index);
 903}
 904EXPORT_SYMBOL(ib_find_gid_by_filter);
 905
 906int ib_get_cached_pkey(struct ib_device *device,
 907		       u8                port_num,
 908		       int               index,
 909		       u16              *pkey)
 910{
 911	struct ib_pkey_cache *cache;
 912	unsigned long flags;
 913	int ret = 0;
 914
 915	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
 916		return -EINVAL;
 917
 918	read_lock_irqsave(&device->cache.lock, flags);
 919
 920	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
 921
 922	if (index < 0 || index >= cache->table_len)
 923		ret = -EINVAL;
 924	else
 925		*pkey = cache->table[index];
 926
 927	read_unlock_irqrestore(&device->cache.lock, flags);
 928
 929	return ret;
 930}
 931EXPORT_SYMBOL(ib_get_cached_pkey);
 932
 933int ib_find_cached_pkey(struct ib_device *device,
 934			u8                port_num,
 935			u16               pkey,
 936			u16              *index)
 
 
 
 
 
 
 
 
 
 937{
 938	struct ib_pkey_cache *cache;
 939	unsigned long flags;
 940	int i;
 941	int ret = -ENOENT;
 942	int partial_ix = -1;
 943
 944	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
 945		return -EINVAL;
 946
 947	read_lock_irqsave(&device->cache.lock, flags);
 948
 949	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
 
 
 
 
 950
 951	*index = -1;
 952
 953	for (i = 0; i < cache->table_len; ++i)
 954		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
 955			if (cache->table[i] & 0x8000) {
 956				*index = i;
 957				ret = 0;
 958				break;
 959			} else
 960				partial_ix = i;
 
 961		}
 962
 963	if (ret && partial_ix >= 0) {
 964		*index = partial_ix;
 965		ret = 0;
 966	}
 967
 968	read_unlock_irqrestore(&device->cache.lock, flags);
 
 969
 970	return ret;
 971}
 972EXPORT_SYMBOL(ib_find_cached_pkey);
 973
 974int ib_find_exact_cached_pkey(struct ib_device *device,
 975			      u8                port_num,
 976			      u16               pkey,
 977			      u16              *index)
 978{
 979	struct ib_pkey_cache *cache;
 980	unsigned long flags;
 981	int i;
 982	int ret = -ENOENT;
 983
 984	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
 985		return -EINVAL;
 986
 987	read_lock_irqsave(&device->cache.lock, flags);
 988
 989	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
 
 
 
 
 990
 991	*index = -1;
 992
 993	for (i = 0; i < cache->table_len; ++i)
 994		if (cache->table[i] == pkey) {
 995			*index = i;
 996			ret = 0;
 997			break;
 998		}
 999
1000	read_unlock_irqrestore(&device->cache.lock, flags);
 
1001
1002	return ret;
1003}
1004EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1005
1006int ib_get_cached_lmc(struct ib_device *device,
1007		      u8                port_num,
1008		      u8                *lmc)
1009{
1010	unsigned long flags;
1011	int ret = 0;
1012
1013	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1014		return -EINVAL;
1015
1016	read_lock_irqsave(&device->cache.lock, flags);
1017	*lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
1018	read_unlock_irqrestore(&device->cache.lock, flags);
1019
1020	return ret;
1021}
1022EXPORT_SYMBOL(ib_get_cached_lmc);
1023
1024static void ib_cache_update(struct ib_device *device,
1025			    u8                port)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1026{
1027	struct ib_port_attr       *tprops = NULL;
1028	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
1029	struct ib_gid_cache {
1030		int             table_len;
1031		union ib_gid    table[0];
1032	}			  *gid_cache = NULL;
1033	int                        i;
1034	int                        ret;
1035	struct ib_gid_table	  *table;
1036	struct ib_gid_table	 **ports_table = device->cache.gid_cache;
1037	bool			   use_roce_gid_table =
1038					rdma_cap_roce_gid_table(device, port);
1039
1040	if (port < rdma_start_port(device) || port > rdma_end_port(device))
1041		return;
1042
1043	table = ports_table[port - rdma_start_port(device)];
1044
1045	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1046	if (!tprops)
1047		return;
1048
1049	ret = ib_query_port(device, port, tprops);
1050	if (ret) {
1051		pr_warn("ib_query_port failed (%d) for %s\n",
1052			ret, device->name);
1053		goto err;
1054	}
1055
1056	pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
1057			     sizeof *pkey_cache->table, GFP_KERNEL);
1058	if (!pkey_cache)
1059		goto err;
1060
1061	pkey_cache->table_len = tprops->pkey_tbl_len;
1062
1063	if (!use_roce_gid_table) {
1064		gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
1065			    sizeof(*gid_cache->table), GFP_KERNEL);
1066		if (!gid_cache)
1067			goto err;
 
1068
1069		gid_cache->table_len = tprops->gid_tbl_len;
1070	}
1071
1072	for (i = 0; i < pkey_cache->table_len; ++i) {
1073		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1074		if (ret) {
1075			pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1076				ret, device->name, i);
 
1077			goto err;
1078		}
1079	}
1080
1081	if (!use_roce_gid_table) {
1082		for (i = 0;  i < gid_cache->table_len; ++i) {
1083			ret = ib_query_gid(device, port, i,
1084					   gid_cache->table + i, NULL);
 
1085			if (ret) {
1086				pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1087					ret, device->name, i);
 
1088				goto err;
1089			}
1090		}
1091	}
1092
1093	write_lock_irq(&device->cache.lock);
1094
1095	old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
1096
1097	device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
1098	if (!use_roce_gid_table) {
1099		write_lock(&table->rwlock);
1100		for (i = 0; i < gid_cache->table_len; i++) {
1101			modify_gid(device, port, table, i, gid_cache->table + i,
1102				   &zattr, false);
1103		}
1104		write_unlock(&table->rwlock);
1105	}
 
 
 
 
 
1106
1107	device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
1108
1109	write_unlock_irq(&device->cache.lock);
1110
1111	kfree(gid_cache);
1112	kfree(old_pkey_cache);
1113	kfree(tprops);
1114	return;
1115
1116err:
1117	kfree(pkey_cache);
1118	kfree(gid_cache);
1119	kfree(tprops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1120}
1121
1122static void ib_cache_task(struct work_struct *_work)
1123{
1124	struct ib_update_work *work =
1125		container_of(_work, struct ib_update_work, work);
1126
1127	ib_cache_update(work->device, work->port_num);
1128	kfree(work);
1129}
1130
1131static void ib_cache_event(struct ib_event_handler *handler,
1132			   struct ib_event *event)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1133{
1134	struct ib_update_work *work;
1135
1136	if (event->event == IB_EVENT_PORT_ERR    ||
1137	    event->event == IB_EVENT_PORT_ACTIVE ||
1138	    event->event == IB_EVENT_LID_CHANGE  ||
1139	    event->event == IB_EVENT_PKEY_CHANGE ||
1140	    event->event == IB_EVENT_SM_CHANGE   ||
1141	    event->event == IB_EVENT_CLIENT_REREGISTER ||
1142	    event->event == IB_EVENT_GID_CHANGE) {
1143		work = kmalloc(sizeof *work, GFP_ATOMIC);
1144		if (work) {
1145			INIT_WORK(&work->work, ib_cache_task);
1146			work->device   = event->device;
1147			work->port_num = event->element.port_num;
1148			queue_work(ib_wq, &work->work);
1149		}
1150	}
1151}
 
1152
1153int ib_cache_setup_one(struct ib_device *device)
1154{
1155	int p;
1156	int err;
1157
1158	rwlock_init(&device->cache.lock);
1159
1160	device->cache.pkey_cache =
1161		kzalloc(sizeof *device->cache.pkey_cache *
1162			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1163	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
1164					  (rdma_end_port(device) -
1165					   rdma_start_port(device) + 1),
1166					  GFP_KERNEL);
1167	if (!device->cache.pkey_cache ||
1168	    !device->cache.lmc_cache) {
1169		err = -ENOMEM;
1170		goto free;
1171	}
1172
1173	err = gid_table_setup_one(device);
1174	if (err)
1175		goto free;
1176
1177	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1178		ib_cache_update(device, p + rdma_start_port(device));
1179
1180	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1181			      device, ib_cache_event);
1182	err = ib_register_event_handler(&device->cache.event_handler);
1183	if (err)
1184		goto err;
1185
1186	return 0;
1187
1188err:
1189	gid_table_cleanup_one(device);
1190free:
1191	kfree(device->cache.pkey_cache);
1192	kfree(device->cache.lmc_cache);
1193	return err;
1194}
1195
1196void ib_cache_release_one(struct ib_device *device)
1197{
1198	int p;
1199
1200	/*
1201	 * The release function frees all the cache elements.
1202	 * This function should be called as part of freeing
1203	 * all the device's resources when the cache could no
1204	 * longer be accessed.
1205	 */
1206	if (device->cache.pkey_cache)
1207		for (p = 0;
1208		     p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1209			kfree(device->cache.pkey_cache[p]);
1210
1211	gid_table_release_one(device);
1212	kfree(device->cache.pkey_cache);
1213	kfree(device->cache.lmc_cache);
1214}
1215
1216void ib_cache_cleanup_one(struct ib_device *device)
1217{
1218	/* The cleanup function unregisters the event handler,
1219	 * waits for all in-progress workqueue elements and cleans
1220	 * up the GID cache. This function should be called after
1221	 * the device was removed from the devices list and all
1222	 * clients were removed, so the cache exists but is
1223	 * non-functional and shouldn't be updated anymore.
1224	 */
1225	ib_unregister_event_handler(&device->cache.event_handler);
1226	flush_workqueue(ib_wq);
1227	gid_table_cleanup_one(device);
1228}
1229
1230void __init ib_cache_setup(void)
1231{
1232	roce_gid_mgmt_init();
1233}
1234
1235void __exit ib_cache_cleanup(void)
1236{
1237	roce_gid_mgmt_cleanup();
1238}