Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright Gavin Shan, IBM Corporation 2016.
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/kernel.h>
   8#include <linux/init.h>
   9#include <linux/netdevice.h>
  10#include <linux/skbuff.h>
  11#include <linux/of.h>
  12#include <linux/platform_device.h>
  13
  14#include <net/ncsi.h>
  15#include <net/net_namespace.h>
  16#include <net/sock.h>
  17#include <net/addrconf.h>
  18#include <net/ipv6.h>
  19#include <net/genetlink.h>
  20
  21#include "internal.h"
  22#include "ncsi-pkt.h"
  23#include "ncsi-netlink.h"
  24
  25LIST_HEAD(ncsi_dev_list);
  26DEFINE_SPINLOCK(ncsi_dev_lock);
  27
  28bool ncsi_channel_has_link(struct ncsi_channel *channel)
  29{
  30	return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
  31}
  32
  33bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
  34			  struct ncsi_channel *channel)
  35{
  36	struct ncsi_package *np;
  37	struct ncsi_channel *nc;
  38
  39	NCSI_FOR_EACH_PACKAGE(ndp, np)
  40		NCSI_FOR_EACH_CHANNEL(np, nc) {
  41			if (nc == channel)
  42				continue;
  43			if (nc->state == NCSI_CHANNEL_ACTIVE &&
  44			    ncsi_channel_has_link(nc))
  45				return false;
  46		}
  47
  48	return true;
  49}
  50
  51static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
  52{
  53	struct ncsi_dev *nd = &ndp->ndev;
  54	struct ncsi_package *np;
  55	struct ncsi_channel *nc;
  56	unsigned long flags;
  57
  58	nd->state = ncsi_dev_state_functional;
  59	if (force_down) {
  60		nd->link_up = 0;
  61		goto report;
  62	}
  63
  64	nd->link_up = 0;
  65	NCSI_FOR_EACH_PACKAGE(ndp, np) {
  66		NCSI_FOR_EACH_CHANNEL(np, nc) {
  67			spin_lock_irqsave(&nc->lock, flags);
  68
  69			if (!list_empty(&nc->link) ||
  70			    nc->state != NCSI_CHANNEL_ACTIVE) {
  71				spin_unlock_irqrestore(&nc->lock, flags);
  72				continue;
  73			}
  74
  75			if (ncsi_channel_has_link(nc)) {
  76				spin_unlock_irqrestore(&nc->lock, flags);
  77				nd->link_up = 1;
  78				goto report;
  79			}
  80
  81			spin_unlock_irqrestore(&nc->lock, flags);
  82		}
  83	}
  84
  85report:
  86	nd->handler(nd);
  87}
  88
  89static void ncsi_channel_monitor(struct timer_list *t)
  90{
  91	struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
  92	struct ncsi_package *np = nc->package;
  93	struct ncsi_dev_priv *ndp = np->ndp;
  94	struct ncsi_channel_mode *ncm;
  95	struct ncsi_cmd_arg nca;
  96	bool enabled, chained;
  97	unsigned int monitor_state;
  98	unsigned long flags;
  99	int state, ret;
 100
 101	spin_lock_irqsave(&nc->lock, flags);
 102	state = nc->state;
 103	chained = !list_empty(&nc->link);
 104	enabled = nc->monitor.enabled;
 105	monitor_state = nc->monitor.state;
 106	spin_unlock_irqrestore(&nc->lock, flags);
 107
 108	if (!enabled)
 109		return;		/* expected race disabling timer */
 110	if (WARN_ON_ONCE(chained))
 111		goto bad_state;
 112
 113	if (state != NCSI_CHANNEL_INACTIVE &&
 114	    state != NCSI_CHANNEL_ACTIVE) {
 115bad_state:
 116		netdev_warn(ndp->ndev.dev,
 117			    "Bad NCSI monitor state channel %d 0x%x %s queue\n",
 118			    nc->id, state, chained ? "on" : "off");
 119		spin_lock_irqsave(&nc->lock, flags);
 120		nc->monitor.enabled = false;
 121		spin_unlock_irqrestore(&nc->lock, flags);
 122		return;
 123	}
 124
 125	switch (monitor_state) {
 126	case NCSI_CHANNEL_MONITOR_START:
 127	case NCSI_CHANNEL_MONITOR_RETRY:
 128		nca.ndp = ndp;
 129		nca.package = np->id;
 130		nca.channel = nc->id;
 131		nca.type = NCSI_PKT_CMD_GLS;
 132		nca.req_flags = 0;
 133		ret = ncsi_xmit_cmd(&nca);
 134		if (ret)
 135			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
 136				   ret);
 137		break;
 138	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
 139		break;
 140	default:
 141		netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
 142			   nc->id);
 143		ncsi_report_link(ndp, true);
 144		ndp->flags |= NCSI_DEV_RESHUFFLE;
 145
 146		ncm = &nc->modes[NCSI_MODE_LINK];
 147		spin_lock_irqsave(&nc->lock, flags);
 148		nc->monitor.enabled = false;
 149		nc->state = NCSI_CHANNEL_INVISIBLE;
 150		ncm->data[2] &= ~0x1;
 151		spin_unlock_irqrestore(&nc->lock, flags);
 152
 153		spin_lock_irqsave(&ndp->lock, flags);
 154		nc->state = NCSI_CHANNEL_ACTIVE;
 155		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
 156		spin_unlock_irqrestore(&ndp->lock, flags);
 157		ncsi_process_next_channel(ndp);
 158		return;
 159	}
 160
 161	spin_lock_irqsave(&nc->lock, flags);
 162	nc->monitor.state++;
 163	spin_unlock_irqrestore(&nc->lock, flags);
 164	mod_timer(&nc->monitor.timer, jiffies + HZ);
 165}
 166
 167void ncsi_start_channel_monitor(struct ncsi_channel *nc)
 168{
 169	unsigned long flags;
 170
 171	spin_lock_irqsave(&nc->lock, flags);
 172	WARN_ON_ONCE(nc->monitor.enabled);
 173	nc->monitor.enabled = true;
 174	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
 175	spin_unlock_irqrestore(&nc->lock, flags);
 176
 177	mod_timer(&nc->monitor.timer, jiffies + HZ);
 178}
 179
 180void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
 181{
 182	unsigned long flags;
 183
 184	spin_lock_irqsave(&nc->lock, flags);
 185	if (!nc->monitor.enabled) {
 186		spin_unlock_irqrestore(&nc->lock, flags);
 187		return;
 188	}
 189	nc->monitor.enabled = false;
 190	spin_unlock_irqrestore(&nc->lock, flags);
 191
 192	del_timer_sync(&nc->monitor.timer);
 193}
 194
 195struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
 196				       unsigned char id)
 197{
 198	struct ncsi_channel *nc;
 199
 200	NCSI_FOR_EACH_CHANNEL(np, nc) {
 201		if (nc->id == id)
 202			return nc;
 203	}
 204
 205	return NULL;
 206}
 207
 208struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
 209{
 210	struct ncsi_channel *nc, *tmp;
 211	int index;
 212	unsigned long flags;
 213
 214	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
 215	if (!nc)
 216		return NULL;
 217
 218	nc->id = id;
 219	nc->package = np;
 220	nc->state = NCSI_CHANNEL_INACTIVE;
 221	nc->monitor.enabled = false;
 222	timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
 223	spin_lock_init(&nc->lock);
 224	INIT_LIST_HEAD(&nc->link);
 225	for (index = 0; index < NCSI_CAP_MAX; index++)
 226		nc->caps[index].index = index;
 227	for (index = 0; index < NCSI_MODE_MAX; index++)
 228		nc->modes[index].index = index;
 229
 230	spin_lock_irqsave(&np->lock, flags);
 231	tmp = ncsi_find_channel(np, id);
 232	if (tmp) {
 233		spin_unlock_irqrestore(&np->lock, flags);
 234		kfree(nc);
 235		return tmp;
 236	}
 237
 238	list_add_tail_rcu(&nc->node, &np->channels);
 239	np->channel_num++;
 240	spin_unlock_irqrestore(&np->lock, flags);
 241
 242	return nc;
 243}
 244
 245static void ncsi_remove_channel(struct ncsi_channel *nc)
 246{
 247	struct ncsi_package *np = nc->package;
 248	unsigned long flags;
 249
 250	spin_lock_irqsave(&nc->lock, flags);
 251
 252	/* Release filters */
 253	kfree(nc->mac_filter.addrs);
 254	kfree(nc->vlan_filter.vids);
 255
 256	nc->state = NCSI_CHANNEL_INACTIVE;
 257	spin_unlock_irqrestore(&nc->lock, flags);
 258	ncsi_stop_channel_monitor(nc);
 259
 260	/* Remove and free channel */
 261	spin_lock_irqsave(&np->lock, flags);
 262	list_del_rcu(&nc->node);
 263	np->channel_num--;
 264	spin_unlock_irqrestore(&np->lock, flags);
 265
 266	kfree(nc);
 267}
 268
 269struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
 270				       unsigned char id)
 271{
 272	struct ncsi_package *np;
 273
 274	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 275		if (np->id == id)
 276			return np;
 277	}
 278
 279	return NULL;
 280}
 281
 282struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
 283				      unsigned char id)
 284{
 285	struct ncsi_package *np, *tmp;
 286	unsigned long flags;
 287
 288	np = kzalloc(sizeof(*np), GFP_ATOMIC);
 289	if (!np)
 290		return NULL;
 291
 292	np->id = id;
 293	np->ndp = ndp;
 294	spin_lock_init(&np->lock);
 295	INIT_LIST_HEAD(&np->channels);
 296	np->channel_whitelist = UINT_MAX;
 297
 298	spin_lock_irqsave(&ndp->lock, flags);
 299	tmp = ncsi_find_package(ndp, id);
 300	if (tmp) {
 301		spin_unlock_irqrestore(&ndp->lock, flags);
 302		kfree(np);
 303		return tmp;
 304	}
 305
 306	list_add_tail_rcu(&np->node, &ndp->packages);
 307	ndp->package_num++;
 308	spin_unlock_irqrestore(&ndp->lock, flags);
 309
 310	return np;
 311}
 312
 313void ncsi_remove_package(struct ncsi_package *np)
 314{
 315	struct ncsi_dev_priv *ndp = np->ndp;
 316	struct ncsi_channel *nc, *tmp;
 317	unsigned long flags;
 318
 319	/* Release all child channels */
 320	list_for_each_entry_safe(nc, tmp, &np->channels, node)
 321		ncsi_remove_channel(nc);
 322
 323	/* Remove and free package */
 324	spin_lock_irqsave(&ndp->lock, flags);
 325	list_del_rcu(&np->node);
 326	ndp->package_num--;
 327	spin_unlock_irqrestore(&ndp->lock, flags);
 328
 329	kfree(np);
 330}
 331
 332void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
 333				   unsigned char id,
 334				   struct ncsi_package **np,
 335				   struct ncsi_channel **nc)
 336{
 337	struct ncsi_package *p;
 338	struct ncsi_channel *c;
 339
 340	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
 341	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
 342
 343	if (np)
 344		*np = p;
 345	if (nc)
 346		*nc = c;
 347}
 348
 349/* For two consecutive NCSI commands, the packet IDs shouldn't
 350 * be same. Otherwise, the bogus response might be replied. So
 351 * the available IDs are allocated in round-robin fashion.
 352 */
 353struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
 354					unsigned int req_flags)
 355{
 356	struct ncsi_request *nr = NULL;
 357	int i, limit = ARRAY_SIZE(ndp->requests);
 358	unsigned long flags;
 359
 360	/* Check if there is one available request until the ceiling */
 361	spin_lock_irqsave(&ndp->lock, flags);
 362	for (i = ndp->request_id; i < limit; i++) {
 363		if (ndp->requests[i].used)
 364			continue;
 365
 366		nr = &ndp->requests[i];
 367		nr->used = true;
 368		nr->flags = req_flags;
 369		ndp->request_id = i + 1;
 370		goto found;
 371	}
 372
 373	/* Fail back to check from the starting cursor */
 374	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
 375		if (ndp->requests[i].used)
 376			continue;
 377
 378		nr = &ndp->requests[i];
 379		nr->used = true;
 380		nr->flags = req_flags;
 381		ndp->request_id = i + 1;
 382		goto found;
 383	}
 384
 385found:
 386	spin_unlock_irqrestore(&ndp->lock, flags);
 387	return nr;
 388}
 389
 390void ncsi_free_request(struct ncsi_request *nr)
 391{
 392	struct ncsi_dev_priv *ndp = nr->ndp;
 393	struct sk_buff *cmd, *rsp;
 394	unsigned long flags;
 395	bool driven;
 396
 397	if (nr->enabled) {
 398		nr->enabled = false;
 399		del_timer_sync(&nr->timer);
 400	}
 401
 402	spin_lock_irqsave(&ndp->lock, flags);
 403	cmd = nr->cmd;
 404	rsp = nr->rsp;
 405	nr->cmd = NULL;
 406	nr->rsp = NULL;
 407	nr->used = false;
 408	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
 409	spin_unlock_irqrestore(&ndp->lock, flags);
 410
 411	if (driven && cmd && --ndp->pending_req_num == 0)
 412		schedule_work(&ndp->work);
 413
 414	/* Release command and response */
 415	consume_skb(cmd);
 416	consume_skb(rsp);
 417}
 418
 419struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
 420{
 421	struct ncsi_dev_priv *ndp;
 422
 423	NCSI_FOR_EACH_DEV(ndp) {
 424		if (ndp->ndev.dev == dev)
 425			return &ndp->ndev;
 426	}
 427
 428	return NULL;
 429}
 430
 431static void ncsi_request_timeout(struct timer_list *t)
 432{
 433	struct ncsi_request *nr = from_timer(nr, t, timer);
 434	struct ncsi_dev_priv *ndp = nr->ndp;
 435	struct ncsi_cmd_pkt *cmd;
 436	struct ncsi_package *np;
 437	struct ncsi_channel *nc;
 438	unsigned long flags;
 439
 440	/* If the request already had associated response,
 441	 * let the response handler to release it.
 442	 */
 443	spin_lock_irqsave(&ndp->lock, flags);
 444	nr->enabled = false;
 445	if (nr->rsp || !nr->cmd) {
 446		spin_unlock_irqrestore(&ndp->lock, flags);
 447		return;
 448	}
 449	spin_unlock_irqrestore(&ndp->lock, flags);
 450
 451	if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
 452		if (nr->cmd) {
 453			/* Find the package */
 454			cmd = (struct ncsi_cmd_pkt *)
 455			      skb_network_header(nr->cmd);
 456			ncsi_find_package_and_channel(ndp,
 457						      cmd->cmd.common.channel,
 458						      &np, &nc);
 459			ncsi_send_netlink_timeout(nr, np, nc);
 460		}
 461	}
 462
 463	/* Release the request */
 464	ncsi_free_request(nr);
 465}
 466
 467static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
 468{
 469	struct ncsi_dev *nd = &ndp->ndev;
 470	struct ncsi_package *np;
 471	struct ncsi_channel *nc, *tmp;
 472	struct ncsi_cmd_arg nca;
 473	unsigned long flags;
 474	int ret;
 475
 476	np = ndp->active_package;
 477	nc = ndp->active_channel;
 478	nca.ndp = ndp;
 479	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
 480	switch (nd->state) {
 481	case ncsi_dev_state_suspend:
 482		nd->state = ncsi_dev_state_suspend_select;
 483		fallthrough;
 484	case ncsi_dev_state_suspend_select:
 485		ndp->pending_req_num = 1;
 486
 487		nca.type = NCSI_PKT_CMD_SP;
 488		nca.package = np->id;
 489		nca.channel = NCSI_RESERVED_CHANNEL;
 490		if (ndp->flags & NCSI_DEV_HWA)
 491			nca.bytes[0] = 0;
 492		else
 493			nca.bytes[0] = 1;
 494
 495		/* To retrieve the last link states of channels in current
 496		 * package when current active channel needs fail over to
 497		 * another one. It means we will possibly select another
 498		 * channel as next active one. The link states of channels
 499		 * are most important factor of the selection. So we need
 500		 * accurate link states. Unfortunately, the link states on
 501		 * inactive channels can't be updated with LSC AEN in time.
 502		 */
 503		if (ndp->flags & NCSI_DEV_RESHUFFLE)
 504			nd->state = ncsi_dev_state_suspend_gls;
 505		else
 506			nd->state = ncsi_dev_state_suspend_dcnt;
 507		ret = ncsi_xmit_cmd(&nca);
 508		if (ret)
 509			goto error;
 510
 511		break;
 512	case ncsi_dev_state_suspend_gls:
 513		ndp->pending_req_num = np->channel_num;
 514
 515		nca.type = NCSI_PKT_CMD_GLS;
 516		nca.package = np->id;
 
 
 
 
 
 517
 518		nd->state = ncsi_dev_state_suspend_dcnt;
 519		NCSI_FOR_EACH_CHANNEL(np, nc) {
 520			nca.channel = nc->id;
 521			ret = ncsi_xmit_cmd(&nca);
 522			if (ret)
 523				goto error;
 524		}
 525
 526		break;
 527	case ncsi_dev_state_suspend_dcnt:
 528		ndp->pending_req_num = 1;
 529
 530		nca.type = NCSI_PKT_CMD_DCNT;
 531		nca.package = np->id;
 532		nca.channel = nc->id;
 533
 534		nd->state = ncsi_dev_state_suspend_dc;
 535		ret = ncsi_xmit_cmd(&nca);
 536		if (ret)
 537			goto error;
 538
 539		break;
 540	case ncsi_dev_state_suspend_dc:
 541		ndp->pending_req_num = 1;
 542
 543		nca.type = NCSI_PKT_CMD_DC;
 544		nca.package = np->id;
 545		nca.channel = nc->id;
 546		nca.bytes[0] = 1;
 547
 548		nd->state = ncsi_dev_state_suspend_deselect;
 549		ret = ncsi_xmit_cmd(&nca);
 550		if (ret)
 551			goto error;
 552
 553		NCSI_FOR_EACH_CHANNEL(np, tmp) {
 554			/* If there is another channel active on this package
 555			 * do not deselect the package.
 556			 */
 557			if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
 558				nd->state = ncsi_dev_state_suspend_done;
 559				break;
 560			}
 561		}
 562		break;
 563	case ncsi_dev_state_suspend_deselect:
 564		ndp->pending_req_num = 1;
 565
 566		nca.type = NCSI_PKT_CMD_DP;
 567		nca.package = np->id;
 568		nca.channel = NCSI_RESERVED_CHANNEL;
 569
 570		nd->state = ncsi_dev_state_suspend_done;
 571		ret = ncsi_xmit_cmd(&nca);
 572		if (ret)
 573			goto error;
 574
 575		break;
 576	case ncsi_dev_state_suspend_done:
 577		spin_lock_irqsave(&nc->lock, flags);
 578		nc->state = NCSI_CHANNEL_INACTIVE;
 579		spin_unlock_irqrestore(&nc->lock, flags);
 580		if (ndp->flags & NCSI_DEV_RESET)
 581			ncsi_reset_dev(nd);
 582		else
 583			ncsi_process_next_channel(ndp);
 584		break;
 585	default:
 586		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
 587			    nd->state);
 588	}
 589
 590	return;
 591error:
 592	nd->state = ncsi_dev_state_functional;
 593}
 594
 595/* Check the VLAN filter bitmap for a set filter, and construct a
 596 * "Set VLAN Filter - Disable" packet if found.
 597 */
 598static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
 599			 struct ncsi_cmd_arg *nca)
 600{
 601	struct ncsi_channel_vlan_filter *ncf;
 602	unsigned long flags;
 603	void *bitmap;
 604	int index;
 605	u16 vid;
 606
 607	ncf = &nc->vlan_filter;
 608	bitmap = &ncf->bitmap;
 609
 610	spin_lock_irqsave(&nc->lock, flags);
 611	index = find_first_bit(bitmap, ncf->n_vids);
 612	if (index >= ncf->n_vids) {
 613		spin_unlock_irqrestore(&nc->lock, flags);
 614		return -1;
 615	}
 616	vid = ncf->vids[index];
 617
 618	clear_bit(index, bitmap);
 619	ncf->vids[index] = 0;
 620	spin_unlock_irqrestore(&nc->lock, flags);
 621
 622	nca->type = NCSI_PKT_CMD_SVF;
 623	nca->words[1] = vid;
 624	/* HW filter index starts at 1 */
 625	nca->bytes[6] = index + 1;
 626	nca->bytes[7] = 0x00;
 627	return 0;
 628}
 629
 630/* Find an outstanding VLAN tag and construct a "Set VLAN Filter - Enable"
 631 * packet.
 632 */
 633static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
 634		       struct ncsi_cmd_arg *nca)
 635{
 636	struct ncsi_channel_vlan_filter *ncf;
 637	struct vlan_vid *vlan = NULL;
 638	unsigned long flags;
 639	int i, index;
 640	void *bitmap;
 641	u16 vid;
 642
 643	if (list_empty(&ndp->vlan_vids))
 644		return -1;
 645
 646	ncf = &nc->vlan_filter;
 647	bitmap = &ncf->bitmap;
 648
 649	spin_lock_irqsave(&nc->lock, flags);
 650
 651	rcu_read_lock();
 652	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
 653		vid = vlan->vid;
 654		for (i = 0; i < ncf->n_vids; i++)
 655			if (ncf->vids[i] == vid) {
 656				vid = 0;
 657				break;
 658			}
 659		if (vid)
 660			break;
 661	}
 662	rcu_read_unlock();
 663
 664	if (!vid) {
 665		/* No VLAN ID is not set */
 666		spin_unlock_irqrestore(&nc->lock, flags);
 667		return -1;
 668	}
 669
 670	index = find_first_zero_bit(bitmap, ncf->n_vids);
 671	if (index < 0 || index >= ncf->n_vids) {
 672		netdev_err(ndp->ndev.dev,
 673			   "Channel %u already has all VLAN filters set\n",
 674			   nc->id);
 675		spin_unlock_irqrestore(&nc->lock, flags);
 676		return -1;
 677	}
 678
 679	ncf->vids[index] = vid;
 680	set_bit(index, bitmap);
 681	spin_unlock_irqrestore(&nc->lock, flags);
 682
 683	nca->type = NCSI_PKT_CMD_SVF;
 684	nca->words[1] = vid;
 685	/* HW filter index starts at 1 */
 686	nca->bytes[6] = index + 1;
 687	nca->bytes[7] = 0x01;
 688
 689	return 0;
 690}
 691
 692static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
 693{
 694	unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
 695	int ret = 0;
 696
 697	nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN;
 698
 699	memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN);
 700	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
 701
 702	data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY;
 703
 704	/* PHY Link up attribute */
 705	data[6] = 0x1;
 706
 707	nca->data = data;
 708
 709	ret = ncsi_xmit_cmd(nca);
 710	if (ret)
 711		netdev_err(nca->ndp->ndev.dev,
 712			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 713			   nca->type);
 714	return ret;
 715}
 716
 717/* NCSI OEM Command APIs */
 718static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
 719{
 720	unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
 721	int ret = 0;
 722
 723	nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
 724
 725	memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
 726	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID);
 727	data[5] = NCSI_OEM_BCM_CMD_GMA;
 728
 729	nca->data = data;
 730
 731	ret = ncsi_xmit_cmd(nca);
 732	if (ret)
 733		netdev_err(nca->ndp->ndev.dev,
 734			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 735			   nca->type);
 736	return ret;
 737}
 738
 739static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
 740{
 741	union {
 742		u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
 743		u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
 744	} u;
 745	int ret = 0;
 746
 747	nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
 748
 749	memset(&u, 0, sizeof(u));
 750	u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
 751	u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
 752	u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
 753
 754	nca->data = u.data_u8;
 755
 756	ret = ncsi_xmit_cmd(nca);
 757	if (ret)
 758		netdev_err(nca->ndp->ndev.dev,
 759			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 760			   nca->type);
 761	return ret;
 762}
 763
 764static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
 765{
 766	union {
 767		u8 data_u8[NCSI_OEM_MLX_CMD_SMAF_LEN];
 768		u32 data_u32[NCSI_OEM_MLX_CMD_SMAF_LEN / sizeof(u32)];
 769	} u;
 770	int ret = 0;
 771
 772	memset(&u, 0, sizeof(u));
 773	u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
 774	u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
 775	u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
 776	memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
 777	       nca->ndp->ndev.dev->dev_addr,	ETH_ALEN);
 778	u.data_u8[MLX_SMAF_MED_SUPPORT_OFFSET] =
 779		(MLX_MC_RBT_AVL | MLX_MC_RBT_SUPPORT);
 780
 781	nca->payload = NCSI_OEM_MLX_CMD_SMAF_LEN;
 782	nca->data = u.data_u8;
 783
 784	ret = ncsi_xmit_cmd(nca);
 785	if (ret)
 786		netdev_err(nca->ndp->ndev.dev,
 787			   "NCSI: Failed to transmit cmd 0x%x during probe\n",
 788			   nca->type);
 789	return ret;
 790}
 791
 792static int ncsi_oem_gma_handler_intel(struct ncsi_cmd_arg *nca)
 793{
 794	unsigned char data[NCSI_OEM_INTEL_CMD_GMA_LEN];
 795	int ret = 0;
 796
 797	nca->payload = NCSI_OEM_INTEL_CMD_GMA_LEN;
 798
 799	memset(data, 0, NCSI_OEM_INTEL_CMD_GMA_LEN);
 800	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
 801	data[4] = NCSI_OEM_INTEL_CMD_GMA;
 802
 803	nca->data = data;
 804
 805	ret = ncsi_xmit_cmd(nca);
 806	if (ret)
 807		netdev_err(nca->ndp->ndev.dev,
 808			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 809			   nca->type);
 810
 811	return ret;
 812}
 813
 814/* OEM Command handlers initialization */
 815static struct ncsi_oem_gma_handler {
 816	unsigned int	mfr_id;
 817	int		(*handler)(struct ncsi_cmd_arg *nca);
 818} ncsi_oem_gma_handlers[] = {
 819	{ NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
 820	{ NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx },
 821	{ NCSI_OEM_MFR_INTEL_ID, ncsi_oem_gma_handler_intel }
 822};
 823
 824static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
 825{
 826	struct ncsi_oem_gma_handler *nch = NULL;
 827	int i;
 828
 829	/* This function should only be called once, return if flag set */
 830	if (nca->ndp->gma_flag == 1)
 831		return -1;
 832
 833	/* Find gma handler for given manufacturer id */
 834	for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
 835		if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
 836			if (ncsi_oem_gma_handlers[i].handler)
 837				nch = &ncsi_oem_gma_handlers[i];
 838			break;
 839			}
 840	}
 841
 842	if (!nch) {
 843		netdev_err(nca->ndp->ndev.dev,
 844			   "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
 845			   mf_id);
 846		return -1;
 847	}
 848
 849	/* Get Mac address from NCSI device */
 850	return nch->handler(nca);
 851}
 852
 853/* Determine if a given channel from the channel_queue should be used for Tx */
 854static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
 855			       struct ncsi_channel *nc)
 856{
 857	struct ncsi_channel_mode *ncm;
 858	struct ncsi_channel *channel;
 859	struct ncsi_package *np;
 860
 861	/* Check if any other channel has Tx enabled; a channel may have already
 862	 * been configured and removed from the channel queue.
 863	 */
 864	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 865		if (!ndp->multi_package && np != nc->package)
 866			continue;
 867		NCSI_FOR_EACH_CHANNEL(np, channel) {
 868			ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
 869			if (ncm->enable)
 870				return false;
 871		}
 872	}
 873
 874	/* This channel is the preferred channel and has link */
 875	list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
 876		np = channel->package;
 877		if (np->preferred_channel &&
 878		    ncsi_channel_has_link(np->preferred_channel)) {
 879			return np->preferred_channel == nc;
 880		}
 881	}
 882
 883	/* This channel has link */
 884	if (ncsi_channel_has_link(nc))
 885		return true;
 886
 887	list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
 888		if (ncsi_channel_has_link(channel))
 889			return false;
 890
 891	/* No other channel has link; default to this one */
 892	return true;
 893}
 894
 895/* Change the active Tx channel in a multi-channel setup */
 896int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
 897			   struct ncsi_package *package,
 898			   struct ncsi_channel *disable,
 899			   struct ncsi_channel *enable)
 900{
 901	struct ncsi_cmd_arg nca;
 902	struct ncsi_channel *nc;
 903	struct ncsi_package *np;
 904	int ret = 0;
 905
 906	if (!package->multi_channel && !ndp->multi_package)
 907		netdev_warn(ndp->ndev.dev,
 908			    "NCSI: Trying to update Tx channel in single-channel mode\n");
 909	nca.ndp = ndp;
 910	nca.req_flags = 0;
 911
 912	/* Find current channel with Tx enabled */
 913	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 914		if (disable)
 915			break;
 916		if (!ndp->multi_package && np != package)
 917			continue;
 918
 919		NCSI_FOR_EACH_CHANNEL(np, nc)
 920			if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
 921				disable = nc;
 922				break;
 923			}
 924	}
 925
 926	/* Find a suitable channel for Tx */
 927	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 928		if (enable)
 929			break;
 930		if (!ndp->multi_package && np != package)
 931			continue;
 932		if (!(ndp->package_whitelist & (0x1 << np->id)))
 933			continue;
 934
 935		if (np->preferred_channel &&
 936		    ncsi_channel_has_link(np->preferred_channel)) {
 937			enable = np->preferred_channel;
 938			break;
 939		}
 940
 941		NCSI_FOR_EACH_CHANNEL(np, nc) {
 942			if (!(np->channel_whitelist & 0x1 << nc->id))
 943				continue;
 944			if (nc->state != NCSI_CHANNEL_ACTIVE)
 945				continue;
 946			if (ncsi_channel_has_link(nc)) {
 947				enable = nc;
 948				break;
 949			}
 950		}
 951	}
 952
 953	if (disable == enable)
 954		return -1;
 955
 956	if (!enable)
 957		return -1;
 958
 959	if (disable) {
 960		nca.channel = disable->id;
 961		nca.package = disable->package->id;
 962		nca.type = NCSI_PKT_CMD_DCNT;
 963		ret = ncsi_xmit_cmd(&nca);
 964		if (ret)
 965			netdev_err(ndp->ndev.dev,
 966				   "Error %d sending DCNT\n",
 967				   ret);
 968	}
 969
 970	netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
 971
 972	nca.channel = enable->id;
 973	nca.package = enable->package->id;
 974	nca.type = NCSI_PKT_CMD_ECNT;
 975	ret = ncsi_xmit_cmd(&nca);
 976	if (ret)
 977		netdev_err(ndp->ndev.dev,
 978			   "Error %d sending ECNT\n",
 979			   ret);
 980
 981	return ret;
 982}
 983
 984static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
 985{
 986	struct ncsi_package *np = ndp->active_package;
 987	struct ncsi_channel *nc = ndp->active_channel;
 988	struct ncsi_channel *hot_nc = NULL;
 989	struct ncsi_dev *nd = &ndp->ndev;
 990	struct net_device *dev = nd->dev;
 991	struct ncsi_cmd_arg nca;
 992	unsigned char index;
 993	unsigned long flags;
 994	int ret;
 995
 996	nca.ndp = ndp;
 997	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
 998	switch (nd->state) {
 999	case ncsi_dev_state_config:
1000	case ncsi_dev_state_config_sp:
1001		ndp->pending_req_num = 1;
1002
1003		/* Select the specific package */
1004		nca.type = NCSI_PKT_CMD_SP;
1005		if (ndp->flags & NCSI_DEV_HWA)
1006			nca.bytes[0] = 0;
1007		else
1008			nca.bytes[0] = 1;
1009		nca.package = np->id;
1010		nca.channel = NCSI_RESERVED_CHANNEL;
1011		ret = ncsi_xmit_cmd(&nca);
1012		if (ret) {
1013			netdev_err(ndp->ndev.dev,
1014				   "NCSI: Failed to transmit CMD_SP\n");
1015			goto error;
1016		}
1017
1018		nd->state = ncsi_dev_state_config_cis;
1019		break;
1020	case ncsi_dev_state_config_cis:
1021		ndp->pending_req_num = 1;
1022
1023		/* Clear initial state */
1024		nca.type = NCSI_PKT_CMD_CIS;
1025		nca.package = np->id;
1026		nca.channel = nc->id;
1027		ret = ncsi_xmit_cmd(&nca);
1028		if (ret) {
1029			netdev_err(ndp->ndev.dev,
1030				   "NCSI: Failed to transmit CMD_CIS\n");
1031			goto error;
1032		}
1033
1034		nd->state = IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
1035			  ? ncsi_dev_state_config_oem_gma
1036			  : ncsi_dev_state_config_clear_vids;
1037		break;
1038	case ncsi_dev_state_config_oem_gma:
1039		nd->state = ncsi_dev_state_config_clear_vids;
1040
1041		nca.package = np->id;
1042		nca.channel = nc->id;
1043		ndp->pending_req_num = 1;
1044		if (nc->version.major >= 1 && nc->version.minor >= 2) {
1045			nca.type = NCSI_PKT_CMD_GMCMA;
1046			ret = ncsi_xmit_cmd(&nca);
1047		} else {
1048			nca.type = NCSI_PKT_CMD_OEM;
1049			ret = ncsi_gma_handler(&nca, nc->version.mf_id);
1050		}
1051		if (ret < 0)
 
1052			schedule_work(&ndp->work);
 
1053
1054		break;
 
 
 
 
 
 
 
 
 
 
1055	case ncsi_dev_state_config_clear_vids:
1056	case ncsi_dev_state_config_svf:
1057	case ncsi_dev_state_config_ev:
1058	case ncsi_dev_state_config_sma:
1059	case ncsi_dev_state_config_ebf:
1060	case ncsi_dev_state_config_dgmf:
1061	case ncsi_dev_state_config_ecnt:
1062	case ncsi_dev_state_config_ec:
1063	case ncsi_dev_state_config_ae:
1064	case ncsi_dev_state_config_gls:
1065		ndp->pending_req_num = 1;
1066
1067		nca.package = np->id;
1068		nca.channel = nc->id;
1069
1070		/* Clear any active filters on the channel before setting */
1071		if (nd->state == ncsi_dev_state_config_clear_vids) {
1072			ret = clear_one_vid(ndp, nc, &nca);
1073			if (ret) {
1074				nd->state = ncsi_dev_state_config_svf;
1075				schedule_work(&ndp->work);
1076				break;
1077			}
1078			/* Repeat */
1079			nd->state = ncsi_dev_state_config_clear_vids;
1080		/* Add known VLAN tags to the filter */
1081		} else if (nd->state == ncsi_dev_state_config_svf) {
1082			ret = set_one_vid(ndp, nc, &nca);
1083			if (ret) {
1084				nd->state = ncsi_dev_state_config_ev;
1085				schedule_work(&ndp->work);
1086				break;
1087			}
1088			/* Repeat */
1089			nd->state = ncsi_dev_state_config_svf;
1090		/* Enable/Disable the VLAN filter */
1091		} else if (nd->state == ncsi_dev_state_config_ev) {
1092			if (list_empty(&ndp->vlan_vids)) {
1093				nca.type = NCSI_PKT_CMD_DV;
1094			} else {
1095				nca.type = NCSI_PKT_CMD_EV;
1096				nca.bytes[3] = NCSI_CAP_VLAN_NO;
1097			}
1098			nd->state = ncsi_dev_state_config_sma;
1099		} else if (nd->state == ncsi_dev_state_config_sma) {
1100		/* Use first entry in unicast filter table. Note that
1101		 * the MAC filter table starts from entry 1 instead of
1102		 * 0.
1103		 */
1104			nca.type = NCSI_PKT_CMD_SMA;
1105			for (index = 0; index < 6; index++)
1106				nca.bytes[index] = dev->dev_addr[index];
1107			nca.bytes[6] = 0x1;
1108			nca.bytes[7] = 0x1;
1109			nd->state = ncsi_dev_state_config_ebf;
1110		} else if (nd->state == ncsi_dev_state_config_ebf) {
1111			nca.type = NCSI_PKT_CMD_EBF;
1112			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
1113			/* if multicast global filtering is supported then
1114			 * disable it so that all multicast packet will be
1115			 * forwarded to management controller
1116			 */
1117			if (nc->caps[NCSI_CAP_GENERIC].cap &
1118			    NCSI_CAP_GENERIC_MC)
1119				nd->state = ncsi_dev_state_config_dgmf;
1120			else if (ncsi_channel_is_tx(ndp, nc))
1121				nd->state = ncsi_dev_state_config_ecnt;
1122			else
1123				nd->state = ncsi_dev_state_config_ec;
1124		} else if (nd->state == ncsi_dev_state_config_dgmf) {
1125			nca.type = NCSI_PKT_CMD_DGMF;
1126			if (ncsi_channel_is_tx(ndp, nc))
1127				nd->state = ncsi_dev_state_config_ecnt;
1128			else
1129				nd->state = ncsi_dev_state_config_ec;
1130		} else if (nd->state == ncsi_dev_state_config_ecnt) {
1131			if (np->preferred_channel &&
1132			    nc != np->preferred_channel)
1133				netdev_info(ndp->ndev.dev,
1134					    "NCSI: Tx failed over to channel %u\n",
1135					    nc->id);
1136			nca.type = NCSI_PKT_CMD_ECNT;
1137			nd->state = ncsi_dev_state_config_ec;
1138		} else if (nd->state == ncsi_dev_state_config_ec) {
1139			/* Enable AEN if it's supported */
1140			nca.type = NCSI_PKT_CMD_EC;
1141			nd->state = ncsi_dev_state_config_ae;
1142			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
1143				nd->state = ncsi_dev_state_config_gls;
1144		} else if (nd->state == ncsi_dev_state_config_ae) {
1145			nca.type = NCSI_PKT_CMD_AE;
1146			nca.bytes[0] = 0;
1147			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
1148			nd->state = ncsi_dev_state_config_gls;
1149		} else if (nd->state == ncsi_dev_state_config_gls) {
1150			nca.type = NCSI_PKT_CMD_GLS;
1151			nd->state = ncsi_dev_state_config_done;
1152		}
1153
1154		ret = ncsi_xmit_cmd(&nca);
1155		if (ret) {
1156			netdev_err(ndp->ndev.dev,
1157				   "NCSI: Failed to transmit CMD %x\n",
1158				   nca.type);
1159			goto error;
1160		}
1161		break;
1162	case ncsi_dev_state_config_done:
1163		netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
1164			   nc->id);
1165		spin_lock_irqsave(&nc->lock, flags);
1166		nc->state = NCSI_CHANNEL_ACTIVE;
1167
1168		if (ndp->flags & NCSI_DEV_RESET) {
1169			/* A reset event happened during config, start it now */
1170			nc->reconfigure_needed = false;
1171			spin_unlock_irqrestore(&nc->lock, flags);
1172			ncsi_reset_dev(nd);
1173			break;
1174		}
1175
1176		if (nc->reconfigure_needed) {
1177			/* This channel's configuration has been updated
1178			 * part-way during the config state - start the
1179			 * channel configuration over
1180			 */
1181			nc->reconfigure_needed = false;
1182			nc->state = NCSI_CHANNEL_INACTIVE;
1183			spin_unlock_irqrestore(&nc->lock, flags);
1184
1185			spin_lock_irqsave(&ndp->lock, flags);
1186			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1187			spin_unlock_irqrestore(&ndp->lock, flags);
1188
1189			netdev_dbg(dev, "Dirty NCSI channel state reset\n");
1190			ncsi_process_next_channel(ndp);
1191			break;
1192		}
1193
1194		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
1195			hot_nc = nc;
1196		} else {
1197			hot_nc = NULL;
1198			netdev_dbg(ndp->ndev.dev,
1199				   "NCSI: channel %u link down after config\n",
1200				   nc->id);
1201		}
1202		spin_unlock_irqrestore(&nc->lock, flags);
1203
1204		/* Update the hot channel */
1205		spin_lock_irqsave(&ndp->lock, flags);
1206		ndp->hot_channel = hot_nc;
1207		spin_unlock_irqrestore(&ndp->lock, flags);
1208
1209		ncsi_start_channel_monitor(nc);
1210		ncsi_process_next_channel(ndp);
1211		break;
1212	default:
1213		netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
1214			     nd->state);
1215	}
1216
1217	return;
1218
1219error:
1220	ncsi_report_link(ndp, true);
1221}
1222
1223static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
1224{
1225	struct ncsi_channel *nc, *found, *hot_nc;
1226	struct ncsi_channel_mode *ncm;
1227	unsigned long flags, cflags;
1228	struct ncsi_package *np;
1229	bool with_link;
1230
1231	spin_lock_irqsave(&ndp->lock, flags);
1232	hot_nc = ndp->hot_channel;
1233	spin_unlock_irqrestore(&ndp->lock, flags);
1234
1235	/* By default the search is done once an inactive channel with up
1236	 * link is found, unless a preferred channel is set.
1237	 * If multi_package or multi_channel are configured all channels in the
1238	 * whitelist are added to the channel queue.
1239	 */
1240	found = NULL;
1241	with_link = false;
1242	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1243		if (!(ndp->package_whitelist & (0x1 << np->id)))
1244			continue;
1245		NCSI_FOR_EACH_CHANNEL(np, nc) {
1246			if (!(np->channel_whitelist & (0x1 << nc->id)))
1247				continue;
1248
1249			spin_lock_irqsave(&nc->lock, cflags);
1250
1251			if (!list_empty(&nc->link) ||
1252			    nc->state != NCSI_CHANNEL_INACTIVE) {
1253				spin_unlock_irqrestore(&nc->lock, cflags);
1254				continue;
1255			}
1256
1257			if (!found)
1258				found = nc;
1259
1260			if (nc == hot_nc)
1261				found = nc;
1262
1263			ncm = &nc->modes[NCSI_MODE_LINK];
1264			if (ncm->data[2] & 0x1) {
1265				found = nc;
1266				with_link = true;
1267			}
1268
1269			/* If multi_channel is enabled configure all valid
1270			 * channels whether or not they currently have link
1271			 * so they will have AENs enabled.
1272			 */
1273			if (with_link || np->multi_channel) {
1274				spin_lock_irqsave(&ndp->lock, flags);
1275				list_add_tail_rcu(&nc->link,
1276						  &ndp->channel_queue);
1277				spin_unlock_irqrestore(&ndp->lock, flags);
1278
1279				netdev_dbg(ndp->ndev.dev,
1280					   "NCSI: Channel %u added to queue (link %s)\n",
1281					   nc->id,
1282					   ncm->data[2] & 0x1 ? "up" : "down");
1283			}
1284
1285			spin_unlock_irqrestore(&nc->lock, cflags);
1286
1287			if (with_link && !np->multi_channel)
1288				break;
1289		}
1290		if (with_link && !ndp->multi_package)
1291			break;
1292	}
1293
1294	if (list_empty(&ndp->channel_queue) && found) {
1295		netdev_info(ndp->ndev.dev,
1296			    "NCSI: No channel with link found, configuring channel %u\n",
1297			    found->id);
1298		spin_lock_irqsave(&ndp->lock, flags);
1299		list_add_tail_rcu(&found->link, &ndp->channel_queue);
1300		spin_unlock_irqrestore(&ndp->lock, flags);
1301	} else if (!found) {
1302		netdev_warn(ndp->ndev.dev,
1303			    "NCSI: No channel found to configure!\n");
1304		ncsi_report_link(ndp, true);
1305		return -ENODEV;
1306	}
1307
1308	return ncsi_process_next_channel(ndp);
1309}
1310
1311static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1312{
1313	struct ncsi_package *np;
1314	struct ncsi_channel *nc;
1315	unsigned int cap;
1316	bool has_channel = false;
1317
1318	/* The hardware arbitration is disabled if any one channel
1319	 * doesn't support explicitly.
1320	 */
1321	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1322		NCSI_FOR_EACH_CHANNEL(np, nc) {
1323			has_channel = true;
1324
1325			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1326			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1327			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1328			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1329				ndp->flags &= ~NCSI_DEV_HWA;
1330				return false;
1331			}
1332		}
1333	}
1334
1335	if (has_channel) {
1336		ndp->flags |= NCSI_DEV_HWA;
1337		return true;
1338	}
1339
1340	ndp->flags &= ~NCSI_DEV_HWA;
1341	return false;
1342}
1343
1344static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1345{
1346	struct ncsi_dev *nd = &ndp->ndev;
1347	struct ncsi_package *np;
1348	struct ncsi_channel *nc;
1349	struct ncsi_cmd_arg nca;
1350	unsigned char index;
1351	int ret;
1352
1353	nca.ndp = ndp;
1354	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1355	switch (nd->state) {
1356	case ncsi_dev_state_probe:
1357		nd->state = ncsi_dev_state_probe_deselect;
1358		fallthrough;
1359	case ncsi_dev_state_probe_deselect:
1360		ndp->pending_req_num = 8;
1361
1362		/* Deselect all possible packages */
1363		nca.type = NCSI_PKT_CMD_DP;
1364		nca.channel = NCSI_RESERVED_CHANNEL;
1365		for (index = 0; index < 8; index++) {
1366			nca.package = index;
1367			ret = ncsi_xmit_cmd(&nca);
1368			if (ret)
1369				goto error;
1370		}
1371
1372		nd->state = ncsi_dev_state_probe_package;
1373		break;
1374	case ncsi_dev_state_probe_package:
 
 
 
 
 
 
1375		ndp->pending_req_num = 1;
1376
1377		nca.type = NCSI_PKT_CMD_SP;
1378		nca.bytes[0] = 1;
1379		nca.package = ndp->package_probe_id;
1380		nca.channel = NCSI_RESERVED_CHANNEL;
1381		ret = ncsi_xmit_cmd(&nca);
1382		if (ret)
1383			goto error;
1384		nd->state = ncsi_dev_state_probe_channel;
1385		break;
1386	case ncsi_dev_state_probe_channel:
1387		ndp->active_package = ncsi_find_package(ndp,
1388							ndp->package_probe_id);
1389		if (!ndp->active_package) {
1390			/* No response */
1391			nd->state = ncsi_dev_state_probe_dp;
1392			schedule_work(&ndp->work);
1393			break;
1394		}
1395		nd->state = ncsi_dev_state_probe_cis;
1396		if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) &&
1397		    ndp->mlx_multi_host)
1398			nd->state = ncsi_dev_state_probe_mlx_gma;
1399
1400		schedule_work(&ndp->work);
1401		break;
1402	case ncsi_dev_state_probe_mlx_gma:
1403		ndp->pending_req_num = 1;
1404
1405		nca.type = NCSI_PKT_CMD_OEM;
1406		nca.package = ndp->active_package->id;
1407		nca.channel = 0;
1408		ret = ncsi_oem_gma_handler_mlx(&nca);
1409		if (ret)
1410			goto error;
1411
1412		nd->state = ncsi_dev_state_probe_mlx_smaf;
1413		break;
1414	case ncsi_dev_state_probe_mlx_smaf:
1415		ndp->pending_req_num = 1;
1416
1417		nca.type = NCSI_PKT_CMD_OEM;
1418		nca.package = ndp->active_package->id;
1419		nca.channel = 0;
1420		ret = ncsi_oem_smaf_mlx(&nca);
1421		if (ret)
1422			goto error;
1423
1424		nd->state = ncsi_dev_state_probe_cis;
1425		break;
1426	case ncsi_dev_state_probe_cis:
1427		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1428
1429		/* Clear initial state */
1430		nca.type = NCSI_PKT_CMD_CIS;
1431		nca.package = ndp->active_package->id;
1432		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1433			nca.channel = index;
1434			ret = ncsi_xmit_cmd(&nca);
1435			if (ret)
1436				goto error;
1437		}
1438
1439		nd->state = ncsi_dev_state_probe_gvi;
1440		if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
1441			nd->state = ncsi_dev_state_probe_keep_phy;
1442		break;
1443	case ncsi_dev_state_probe_keep_phy:
1444		ndp->pending_req_num = 1;
1445
1446		nca.type = NCSI_PKT_CMD_OEM;
1447		nca.package = ndp->active_package->id;
1448		nca.channel = 0;
1449		ret = ncsi_oem_keep_phy_intel(&nca);
1450		if (ret)
1451			goto error;
1452
1453		nd->state = ncsi_dev_state_probe_gvi;
1454		break;
 
1455	case ncsi_dev_state_probe_gvi:
1456	case ncsi_dev_state_probe_gc:
1457	case ncsi_dev_state_probe_gls:
1458		np = ndp->active_package;
1459		ndp->pending_req_num = np->channel_num;
1460
1461		/* Retrieve version, capability or link status */
1462		if (nd->state == ncsi_dev_state_probe_gvi)
 
 
1463			nca.type = NCSI_PKT_CMD_GVI;
1464		else if (nd->state == ncsi_dev_state_probe_gc)
1465			nca.type = NCSI_PKT_CMD_GC;
1466		else
1467			nca.type = NCSI_PKT_CMD_GLS;
1468
1469		nca.package = np->id;
1470		NCSI_FOR_EACH_CHANNEL(np, nc) {
1471			nca.channel = nc->id;
1472			ret = ncsi_xmit_cmd(&nca);
1473			if (ret)
1474				goto error;
1475		}
1476
1477		if (nd->state == ncsi_dev_state_probe_gvi)
 
 
 
 
1478			nd->state = ncsi_dev_state_probe_gc;
1479		else if (nd->state == ncsi_dev_state_probe_gc)
1480			nd->state = ncsi_dev_state_probe_gls;
1481		else
 
 
 
 
 
 
1482			nd->state = ncsi_dev_state_probe_dp;
 
1483		break;
1484	case ncsi_dev_state_probe_dp:
1485		ndp->pending_req_num = 1;
1486
1487		/* Deselect the current package */
1488		nca.type = NCSI_PKT_CMD_DP;
1489		nca.package = ndp->package_probe_id;
1490		nca.channel = NCSI_RESERVED_CHANNEL;
1491		ret = ncsi_xmit_cmd(&nca);
1492		if (ret)
1493			goto error;
1494
1495		/* Probe next package */
1496		ndp->package_probe_id++;
1497		if (ndp->package_probe_id >= 8) {
1498			/* Probe finished */
1499			ndp->flags |= NCSI_DEV_PROBED;
1500			break;
1501		}
1502		nd->state = ncsi_dev_state_probe_package;
1503		ndp->active_package = NULL;
1504		break;
1505	default:
1506		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1507			    nd->state);
1508	}
1509
1510	if (ndp->flags & NCSI_DEV_PROBED) {
1511		/* Check if all packages have HWA support */
1512		ncsi_check_hwa(ndp);
1513		ncsi_choose_active_channel(ndp);
1514	}
1515
1516	return;
1517error:
1518	netdev_err(ndp->ndev.dev,
1519		   "NCSI: Failed to transmit cmd 0x%x during probe\n",
1520		   nca.type);
1521	ncsi_report_link(ndp, true);
1522}
1523
1524static void ncsi_dev_work(struct work_struct *work)
1525{
1526	struct ncsi_dev_priv *ndp = container_of(work,
1527			struct ncsi_dev_priv, work);
1528	struct ncsi_dev *nd = &ndp->ndev;
1529
1530	switch (nd->state & ncsi_dev_state_major) {
1531	case ncsi_dev_state_probe:
1532		ncsi_probe_channel(ndp);
1533		break;
1534	case ncsi_dev_state_suspend:
1535		ncsi_suspend_channel(ndp);
1536		break;
1537	case ncsi_dev_state_config:
1538		ncsi_configure_channel(ndp);
1539		break;
1540	default:
1541		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1542			    nd->state);
1543	}
1544}
1545
1546int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1547{
1548	struct ncsi_channel *nc;
1549	int old_state;
1550	unsigned long flags;
1551
1552	spin_lock_irqsave(&ndp->lock, flags);
1553	nc = list_first_or_null_rcu(&ndp->channel_queue,
1554				    struct ncsi_channel, link);
1555	if (!nc) {
1556		spin_unlock_irqrestore(&ndp->lock, flags);
1557		goto out;
1558	}
1559
1560	list_del_init(&nc->link);
1561	spin_unlock_irqrestore(&ndp->lock, flags);
1562
1563	spin_lock_irqsave(&nc->lock, flags);
1564	old_state = nc->state;
1565	nc->state = NCSI_CHANNEL_INVISIBLE;
1566	spin_unlock_irqrestore(&nc->lock, flags);
1567
1568	ndp->active_channel = nc;
1569	ndp->active_package = nc->package;
1570
1571	switch (old_state) {
1572	case NCSI_CHANNEL_INACTIVE:
1573		ndp->ndev.state = ncsi_dev_state_config;
1574		netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1575	                   nc->id);
1576		ncsi_configure_channel(ndp);
1577		break;
1578	case NCSI_CHANNEL_ACTIVE:
1579		ndp->ndev.state = ncsi_dev_state_suspend;
1580		netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1581			   nc->id);
1582		ncsi_suspend_channel(ndp);
1583		break;
1584	default:
1585		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1586			   old_state, nc->package->id, nc->id);
1587		ncsi_report_link(ndp, false);
1588		return -EINVAL;
1589	}
1590
1591	return 0;
1592
1593out:
1594	ndp->active_channel = NULL;
1595	ndp->active_package = NULL;
1596	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1597		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1598		return ncsi_choose_active_channel(ndp);
1599	}
1600
1601	ncsi_report_link(ndp, false);
1602	return -ENODEV;
1603}
1604
1605static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1606{
1607	struct ncsi_dev *nd = &ndp->ndev;
1608	struct ncsi_channel *nc;
1609	struct ncsi_package *np;
1610	unsigned long flags;
1611	unsigned int n = 0;
1612
1613	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1614		NCSI_FOR_EACH_CHANNEL(np, nc) {
1615			spin_lock_irqsave(&nc->lock, flags);
1616
1617			/* Channels may be busy, mark dirty instead of
1618			 * kicking if;
1619			 * a) not ACTIVE (configured)
1620			 * b) in the channel_queue (to be configured)
1621			 * c) it's ndev is in the config state
1622			 */
1623			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1624				if ((ndp->ndev.state & 0xff00) ==
1625						ncsi_dev_state_config ||
1626						!list_empty(&nc->link)) {
1627					netdev_dbg(nd->dev,
1628						   "NCSI: channel %p marked dirty\n",
1629						   nc);
1630					nc->reconfigure_needed = true;
1631				}
1632				spin_unlock_irqrestore(&nc->lock, flags);
1633				continue;
1634			}
1635
1636			spin_unlock_irqrestore(&nc->lock, flags);
1637
1638			ncsi_stop_channel_monitor(nc);
1639			spin_lock_irqsave(&nc->lock, flags);
1640			nc->state = NCSI_CHANNEL_INACTIVE;
1641			spin_unlock_irqrestore(&nc->lock, flags);
1642
1643			spin_lock_irqsave(&ndp->lock, flags);
1644			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1645			spin_unlock_irqrestore(&ndp->lock, flags);
1646
1647			netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1648			n++;
1649		}
1650	}
1651
1652	return n;
1653}
1654
1655int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1656{
1657	struct ncsi_dev_priv *ndp;
1658	unsigned int n_vids = 0;
1659	struct vlan_vid *vlan;
1660	struct ncsi_dev *nd;
1661	bool found = false;
1662
1663	if (vid == 0)
1664		return 0;
1665
1666	nd = ncsi_find_dev(dev);
1667	if (!nd) {
1668		netdev_warn(dev, "NCSI: No net_device?\n");
1669		return 0;
1670	}
1671
1672	ndp = TO_NCSI_DEV_PRIV(nd);
1673
1674	/* Add the VLAN id to our internal list */
1675	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1676		n_vids++;
1677		if (vlan->vid == vid) {
1678			netdev_dbg(dev, "NCSI: vid %u already registered\n",
1679				   vid);
1680			return 0;
1681		}
1682	}
1683	if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1684		netdev_warn(dev,
1685			    "tried to add vlan id %u but NCSI max already registered (%u)\n",
1686			    vid, NCSI_MAX_VLAN_VIDS);
1687		return -ENOSPC;
1688	}
1689
1690	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1691	if (!vlan)
1692		return -ENOMEM;
1693
1694	vlan->proto = proto;
1695	vlan->vid = vid;
1696	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1697
1698	netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1699
1700	found = ncsi_kick_channels(ndp) != 0;
1701
1702	return found ? ncsi_process_next_channel(ndp) : 0;
1703}
1704EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1705
1706int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1707{
1708	struct vlan_vid *vlan, *tmp;
1709	struct ncsi_dev_priv *ndp;
1710	struct ncsi_dev *nd;
1711	bool found = false;
1712
1713	if (vid == 0)
1714		return 0;
1715
1716	nd = ncsi_find_dev(dev);
1717	if (!nd) {
1718		netdev_warn(dev, "NCSI: no net_device?\n");
1719		return 0;
1720	}
1721
1722	ndp = TO_NCSI_DEV_PRIV(nd);
1723
1724	/* Remove the VLAN id from our internal list */
1725	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1726		if (vlan->vid == vid) {
1727			netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1728			list_del_rcu(&vlan->list);
1729			found = true;
1730			kfree(vlan);
1731		}
1732
1733	if (!found) {
1734		netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1735		return -EINVAL;
1736	}
1737
1738	found = ncsi_kick_channels(ndp) != 0;
1739
1740	return found ? ncsi_process_next_channel(ndp) : 0;
1741}
1742EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1743
1744struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1745				   void (*handler)(struct ncsi_dev *ndev))
1746{
1747	struct ncsi_dev_priv *ndp;
1748	struct ncsi_dev *nd;
1749	struct platform_device *pdev;
1750	struct device_node *np;
1751	unsigned long flags;
1752	int i;
1753
1754	/* Check if the device has been registered or not */
1755	nd = ncsi_find_dev(dev);
1756	if (nd)
1757		return nd;
1758
1759	/* Create NCSI device */
1760	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1761	if (!ndp)
1762		return NULL;
1763
1764	nd = &ndp->ndev;
1765	nd->state = ncsi_dev_state_registered;
1766	nd->dev = dev;
1767	nd->handler = handler;
1768	ndp->pending_req_num = 0;
1769	INIT_LIST_HEAD(&ndp->channel_queue);
1770	INIT_LIST_HEAD(&ndp->vlan_vids);
1771	INIT_WORK(&ndp->work, ncsi_dev_work);
1772	ndp->package_whitelist = UINT_MAX;
1773
1774	/* Initialize private NCSI device */
1775	spin_lock_init(&ndp->lock);
1776	INIT_LIST_HEAD(&ndp->packages);
1777	ndp->request_id = NCSI_REQ_START_IDX;
1778	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1779		ndp->requests[i].id = i;
1780		ndp->requests[i].ndp = ndp;
1781		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1782	}
 
1783
1784	spin_lock_irqsave(&ncsi_dev_lock, flags);
1785	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1786	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1787
1788	/* Register NCSI packet Rx handler */
1789	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1790	ndp->ptype.func = ncsi_rcv_rsp;
1791	ndp->ptype.dev = dev;
1792	dev_add_pack(&ndp->ptype);
1793
1794	pdev = to_platform_device(dev->dev.parent);
1795	if (pdev) {
1796		np = pdev->dev.of_node;
1797		if (np && (of_property_read_bool(np, "mellanox,multi-host") ||
1798			   of_property_read_bool(np, "mlx,multi-host")))
1799			ndp->mlx_multi_host = true;
1800	}
1801
1802	return nd;
1803}
1804EXPORT_SYMBOL_GPL(ncsi_register_dev);
1805
1806int ncsi_start_dev(struct ncsi_dev *nd)
1807{
1808	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1809
1810	if (nd->state != ncsi_dev_state_registered &&
1811	    nd->state != ncsi_dev_state_functional)
1812		return -ENOTTY;
1813
1814	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1815		ndp->package_probe_id = 0;
 
1816		nd->state = ncsi_dev_state_probe;
1817		schedule_work(&ndp->work);
1818		return 0;
1819	}
1820
1821	return ncsi_reset_dev(nd);
1822}
1823EXPORT_SYMBOL_GPL(ncsi_start_dev);
1824
1825void ncsi_stop_dev(struct ncsi_dev *nd)
1826{
1827	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1828	struct ncsi_package *np;
1829	struct ncsi_channel *nc;
1830	bool chained;
1831	int old_state;
1832	unsigned long flags;
1833
1834	/* Stop the channel monitor on any active channels. Don't reset the
1835	 * channel state so we know which were active when ncsi_start_dev()
1836	 * is next called.
1837	 */
1838	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1839		NCSI_FOR_EACH_CHANNEL(np, nc) {
1840			ncsi_stop_channel_monitor(nc);
1841
1842			spin_lock_irqsave(&nc->lock, flags);
1843			chained = !list_empty(&nc->link);
1844			old_state = nc->state;
1845			spin_unlock_irqrestore(&nc->lock, flags);
1846
1847			WARN_ON_ONCE(chained ||
1848				     old_state == NCSI_CHANNEL_INVISIBLE);
1849		}
1850	}
1851
1852	netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1853	ncsi_report_link(ndp, true);
1854}
1855EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1856
1857int ncsi_reset_dev(struct ncsi_dev *nd)
1858{
1859	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1860	struct ncsi_channel *nc, *active, *tmp;
1861	struct ncsi_package *np;
1862	unsigned long flags;
1863
1864	spin_lock_irqsave(&ndp->lock, flags);
1865
1866	if (!(ndp->flags & NCSI_DEV_RESET)) {
1867		/* Haven't been called yet, check states */
1868		switch (nd->state & ncsi_dev_state_major) {
1869		case ncsi_dev_state_registered:
1870		case ncsi_dev_state_probe:
1871			/* Not even probed yet - do nothing */
1872			spin_unlock_irqrestore(&ndp->lock, flags);
1873			return 0;
1874		case ncsi_dev_state_suspend:
1875		case ncsi_dev_state_config:
1876			/* Wait for the channel to finish its suspend/config
1877			 * operation; once it finishes it will check for
1878			 * NCSI_DEV_RESET and reset the state.
1879			 */
1880			ndp->flags |= NCSI_DEV_RESET;
1881			spin_unlock_irqrestore(&ndp->lock, flags);
1882			return 0;
1883		}
1884	} else {
1885		switch (nd->state) {
1886		case ncsi_dev_state_suspend_done:
1887		case ncsi_dev_state_config_done:
1888		case ncsi_dev_state_functional:
1889			/* Ok */
1890			break;
1891		default:
1892			/* Current reset operation happening */
1893			spin_unlock_irqrestore(&ndp->lock, flags);
1894			return 0;
1895		}
1896	}
1897
1898	if (!list_empty(&ndp->channel_queue)) {
1899		/* Clear any channel queue we may have interrupted */
1900		list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
1901			list_del_init(&nc->link);
1902	}
1903	spin_unlock_irqrestore(&ndp->lock, flags);
1904
1905	active = NULL;
1906	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1907		NCSI_FOR_EACH_CHANNEL(np, nc) {
1908			spin_lock_irqsave(&nc->lock, flags);
1909
1910			if (nc->state == NCSI_CHANNEL_ACTIVE) {
1911				active = nc;
1912				nc->state = NCSI_CHANNEL_INVISIBLE;
1913				spin_unlock_irqrestore(&nc->lock, flags);
1914				ncsi_stop_channel_monitor(nc);
1915				break;
1916			}
1917
1918			spin_unlock_irqrestore(&nc->lock, flags);
1919		}
1920		if (active)
1921			break;
1922	}
1923
1924	if (!active) {
1925		/* Done */
1926		spin_lock_irqsave(&ndp->lock, flags);
1927		ndp->flags &= ~NCSI_DEV_RESET;
1928		spin_unlock_irqrestore(&ndp->lock, flags);
1929		return ncsi_choose_active_channel(ndp);
1930	}
1931
1932	spin_lock_irqsave(&ndp->lock, flags);
1933	ndp->flags |= NCSI_DEV_RESET;
1934	ndp->active_channel = active;
1935	ndp->active_package = active->package;
1936	spin_unlock_irqrestore(&ndp->lock, flags);
1937
1938	nd->state = ncsi_dev_state_suspend;
1939	schedule_work(&ndp->work);
1940	return 0;
1941}
1942
1943void ncsi_unregister_dev(struct ncsi_dev *nd)
1944{
1945	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1946	struct ncsi_package *np, *tmp;
1947	unsigned long flags;
1948
1949	dev_remove_pack(&ndp->ptype);
1950
1951	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1952		ncsi_remove_package(np);
1953
1954	spin_lock_irqsave(&ncsi_dev_lock, flags);
1955	list_del_rcu(&ndp->node);
1956	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
 
 
1957
1958	kfree(ndp);
1959}
1960EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright Gavin Shan, IBM Corporation 2016.
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/kernel.h>
   8#include <linux/init.h>
   9#include <linux/netdevice.h>
  10#include <linux/skbuff.h>
  11#include <linux/of.h>
  12#include <linux/platform_device.h>
  13
  14#include <net/ncsi.h>
  15#include <net/net_namespace.h>
  16#include <net/sock.h>
  17#include <net/addrconf.h>
  18#include <net/ipv6.h>
  19#include <net/genetlink.h>
  20
  21#include "internal.h"
  22#include "ncsi-pkt.h"
  23#include "ncsi-netlink.h"
  24
  25LIST_HEAD(ncsi_dev_list);
  26DEFINE_SPINLOCK(ncsi_dev_lock);
  27
  28bool ncsi_channel_has_link(struct ncsi_channel *channel)
  29{
  30	return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
  31}
  32
  33bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
  34			  struct ncsi_channel *channel)
  35{
  36	struct ncsi_package *np;
  37	struct ncsi_channel *nc;
  38
  39	NCSI_FOR_EACH_PACKAGE(ndp, np)
  40		NCSI_FOR_EACH_CHANNEL(np, nc) {
  41			if (nc == channel)
  42				continue;
  43			if (nc->state == NCSI_CHANNEL_ACTIVE &&
  44			    ncsi_channel_has_link(nc))
  45				return false;
  46		}
  47
  48	return true;
  49}
  50
  51static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
  52{
  53	struct ncsi_dev *nd = &ndp->ndev;
  54	struct ncsi_package *np;
  55	struct ncsi_channel *nc;
  56	unsigned long flags;
  57
  58	nd->state = ncsi_dev_state_functional;
  59	if (force_down) {
  60		nd->link_up = 0;
  61		goto report;
  62	}
  63
  64	nd->link_up = 0;
  65	NCSI_FOR_EACH_PACKAGE(ndp, np) {
  66		NCSI_FOR_EACH_CHANNEL(np, nc) {
  67			spin_lock_irqsave(&nc->lock, flags);
  68
  69			if (!list_empty(&nc->link) ||
  70			    nc->state != NCSI_CHANNEL_ACTIVE) {
  71				spin_unlock_irqrestore(&nc->lock, flags);
  72				continue;
  73			}
  74
  75			if (ncsi_channel_has_link(nc)) {
  76				spin_unlock_irqrestore(&nc->lock, flags);
  77				nd->link_up = 1;
  78				goto report;
  79			}
  80
  81			spin_unlock_irqrestore(&nc->lock, flags);
  82		}
  83	}
  84
  85report:
  86	nd->handler(nd);
  87}
  88
  89static void ncsi_channel_monitor(struct timer_list *t)
  90{
  91	struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
  92	struct ncsi_package *np = nc->package;
  93	struct ncsi_dev_priv *ndp = np->ndp;
  94	struct ncsi_channel_mode *ncm;
  95	struct ncsi_cmd_arg nca;
  96	bool enabled, chained;
  97	unsigned int monitor_state;
  98	unsigned long flags;
  99	int state, ret;
 100
 101	spin_lock_irqsave(&nc->lock, flags);
 102	state = nc->state;
 103	chained = !list_empty(&nc->link);
 104	enabled = nc->monitor.enabled;
 105	monitor_state = nc->monitor.state;
 106	spin_unlock_irqrestore(&nc->lock, flags);
 107
 108	if (!enabled)
 109		return;		/* expected race disabling timer */
 110	if (WARN_ON_ONCE(chained))
 111		goto bad_state;
 112
 113	if (state != NCSI_CHANNEL_INACTIVE &&
 114	    state != NCSI_CHANNEL_ACTIVE) {
 115bad_state:
 116		netdev_warn(ndp->ndev.dev,
 117			    "Bad NCSI monitor state channel %d 0x%x %s queue\n",
 118			    nc->id, state, chained ? "on" : "off");
 119		spin_lock_irqsave(&nc->lock, flags);
 120		nc->monitor.enabled = false;
 121		spin_unlock_irqrestore(&nc->lock, flags);
 122		return;
 123	}
 124
 125	switch (monitor_state) {
 126	case NCSI_CHANNEL_MONITOR_START:
 127	case NCSI_CHANNEL_MONITOR_RETRY:
 128		nca.ndp = ndp;
 129		nca.package = np->id;
 130		nca.channel = nc->id;
 131		nca.type = NCSI_PKT_CMD_GLS;
 132		nca.req_flags = 0;
 133		ret = ncsi_xmit_cmd(&nca);
 134		if (ret)
 135			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
 136				   ret);
 137		break;
 138	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
 139		break;
 140	default:
 141		netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
 142			   nc->id);
 143		ncsi_report_link(ndp, true);
 144		ndp->flags |= NCSI_DEV_RESHUFFLE;
 145
 146		ncm = &nc->modes[NCSI_MODE_LINK];
 147		spin_lock_irqsave(&nc->lock, flags);
 148		nc->monitor.enabled = false;
 149		nc->state = NCSI_CHANNEL_INVISIBLE;
 150		ncm->data[2] &= ~0x1;
 151		spin_unlock_irqrestore(&nc->lock, flags);
 152
 153		spin_lock_irqsave(&ndp->lock, flags);
 154		nc->state = NCSI_CHANNEL_ACTIVE;
 155		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
 156		spin_unlock_irqrestore(&ndp->lock, flags);
 157		ncsi_process_next_channel(ndp);
 158		return;
 159	}
 160
 161	spin_lock_irqsave(&nc->lock, flags);
 162	nc->monitor.state++;
 163	spin_unlock_irqrestore(&nc->lock, flags);
 164	mod_timer(&nc->monitor.timer, jiffies + HZ);
 165}
 166
 167void ncsi_start_channel_monitor(struct ncsi_channel *nc)
 168{
 169	unsigned long flags;
 170
 171	spin_lock_irqsave(&nc->lock, flags);
 172	WARN_ON_ONCE(nc->monitor.enabled);
 173	nc->monitor.enabled = true;
 174	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
 175	spin_unlock_irqrestore(&nc->lock, flags);
 176
 177	mod_timer(&nc->monitor.timer, jiffies + HZ);
 178}
 179
 180void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
 181{
 182	unsigned long flags;
 183
 184	spin_lock_irqsave(&nc->lock, flags);
 185	if (!nc->monitor.enabled) {
 186		spin_unlock_irqrestore(&nc->lock, flags);
 187		return;
 188	}
 189	nc->monitor.enabled = false;
 190	spin_unlock_irqrestore(&nc->lock, flags);
 191
 192	del_timer_sync(&nc->monitor.timer);
 193}
 194
 195struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
 196				       unsigned char id)
 197{
 198	struct ncsi_channel *nc;
 199
 200	NCSI_FOR_EACH_CHANNEL(np, nc) {
 201		if (nc->id == id)
 202			return nc;
 203	}
 204
 205	return NULL;
 206}
 207
 208struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
 209{
 210	struct ncsi_channel *nc, *tmp;
 211	int index;
 212	unsigned long flags;
 213
 214	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
 215	if (!nc)
 216		return NULL;
 217
 218	nc->id = id;
 219	nc->package = np;
 220	nc->state = NCSI_CHANNEL_INACTIVE;
 221	nc->monitor.enabled = false;
 222	timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
 223	spin_lock_init(&nc->lock);
 224	INIT_LIST_HEAD(&nc->link);
 225	for (index = 0; index < NCSI_CAP_MAX; index++)
 226		nc->caps[index].index = index;
 227	for (index = 0; index < NCSI_MODE_MAX; index++)
 228		nc->modes[index].index = index;
 229
 230	spin_lock_irqsave(&np->lock, flags);
 231	tmp = ncsi_find_channel(np, id);
 232	if (tmp) {
 233		spin_unlock_irqrestore(&np->lock, flags);
 234		kfree(nc);
 235		return tmp;
 236	}
 237
 238	list_add_tail_rcu(&nc->node, &np->channels);
 239	np->channel_num++;
 240	spin_unlock_irqrestore(&np->lock, flags);
 241
 242	return nc;
 243}
 244
 245static void ncsi_remove_channel(struct ncsi_channel *nc)
 246{
 247	struct ncsi_package *np = nc->package;
 248	unsigned long flags;
 249
 250	spin_lock_irqsave(&nc->lock, flags);
 251
 252	/* Release filters */
 253	kfree(nc->mac_filter.addrs);
 254	kfree(nc->vlan_filter.vids);
 255
 256	nc->state = NCSI_CHANNEL_INACTIVE;
 257	spin_unlock_irqrestore(&nc->lock, flags);
 258	ncsi_stop_channel_monitor(nc);
 259
 260	/* Remove and free channel */
 261	spin_lock_irqsave(&np->lock, flags);
 262	list_del_rcu(&nc->node);
 263	np->channel_num--;
 264	spin_unlock_irqrestore(&np->lock, flags);
 265
 266	kfree(nc);
 267}
 268
 269struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
 270				       unsigned char id)
 271{
 272	struct ncsi_package *np;
 273
 274	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 275		if (np->id == id)
 276			return np;
 277	}
 278
 279	return NULL;
 280}
 281
 282struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
 283				      unsigned char id)
 284{
 285	struct ncsi_package *np, *tmp;
 286	unsigned long flags;
 287
 288	np = kzalloc(sizeof(*np), GFP_ATOMIC);
 289	if (!np)
 290		return NULL;
 291
 292	np->id = id;
 293	np->ndp = ndp;
 294	spin_lock_init(&np->lock);
 295	INIT_LIST_HEAD(&np->channels);
 296	np->channel_whitelist = UINT_MAX;
 297
 298	spin_lock_irqsave(&ndp->lock, flags);
 299	tmp = ncsi_find_package(ndp, id);
 300	if (tmp) {
 301		spin_unlock_irqrestore(&ndp->lock, flags);
 302		kfree(np);
 303		return tmp;
 304	}
 305
 306	list_add_tail_rcu(&np->node, &ndp->packages);
 307	ndp->package_num++;
 308	spin_unlock_irqrestore(&ndp->lock, flags);
 309
 310	return np;
 311}
 312
 313void ncsi_remove_package(struct ncsi_package *np)
 314{
 315	struct ncsi_dev_priv *ndp = np->ndp;
 316	struct ncsi_channel *nc, *tmp;
 317	unsigned long flags;
 318
 319	/* Release all child channels */
 320	list_for_each_entry_safe(nc, tmp, &np->channels, node)
 321		ncsi_remove_channel(nc);
 322
 323	/* Remove and free package */
 324	spin_lock_irqsave(&ndp->lock, flags);
 325	list_del_rcu(&np->node);
 326	ndp->package_num--;
 327	spin_unlock_irqrestore(&ndp->lock, flags);
 328
 329	kfree(np);
 330}
 331
 332void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
 333				   unsigned char id,
 334				   struct ncsi_package **np,
 335				   struct ncsi_channel **nc)
 336{
 337	struct ncsi_package *p;
 338	struct ncsi_channel *c;
 339
 340	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
 341	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
 342
 343	if (np)
 344		*np = p;
 345	if (nc)
 346		*nc = c;
 347}
 348
 349/* For two consecutive NCSI commands, the packet IDs shouldn't
 350 * be same. Otherwise, the bogus response might be replied. So
 351 * the available IDs are allocated in round-robin fashion.
 352 */
 353struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
 354					unsigned int req_flags)
 355{
 356	struct ncsi_request *nr = NULL;
 357	int i, limit = ARRAY_SIZE(ndp->requests);
 358	unsigned long flags;
 359
 360	/* Check if there is one available request until the ceiling */
 361	spin_lock_irqsave(&ndp->lock, flags);
 362	for (i = ndp->request_id; i < limit; i++) {
 363		if (ndp->requests[i].used)
 364			continue;
 365
 366		nr = &ndp->requests[i];
 367		nr->used = true;
 368		nr->flags = req_flags;
 369		ndp->request_id = i + 1;
 370		goto found;
 371	}
 372
 373	/* Fail back to check from the starting cursor */
 374	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
 375		if (ndp->requests[i].used)
 376			continue;
 377
 378		nr = &ndp->requests[i];
 379		nr->used = true;
 380		nr->flags = req_flags;
 381		ndp->request_id = i + 1;
 382		goto found;
 383	}
 384
 385found:
 386	spin_unlock_irqrestore(&ndp->lock, flags);
 387	return nr;
 388}
 389
 390void ncsi_free_request(struct ncsi_request *nr)
 391{
 392	struct ncsi_dev_priv *ndp = nr->ndp;
 393	struct sk_buff *cmd, *rsp;
 394	unsigned long flags;
 395	bool driven;
 396
 397	if (nr->enabled) {
 398		nr->enabled = false;
 399		del_timer_sync(&nr->timer);
 400	}
 401
 402	spin_lock_irqsave(&ndp->lock, flags);
 403	cmd = nr->cmd;
 404	rsp = nr->rsp;
 405	nr->cmd = NULL;
 406	nr->rsp = NULL;
 407	nr->used = false;
 408	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
 409	spin_unlock_irqrestore(&ndp->lock, flags);
 410
 411	if (driven && cmd && --ndp->pending_req_num == 0)
 412		schedule_work(&ndp->work);
 413
 414	/* Release command and response */
 415	consume_skb(cmd);
 416	consume_skb(rsp);
 417}
 418
 419struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
 420{
 421	struct ncsi_dev_priv *ndp;
 422
 423	NCSI_FOR_EACH_DEV(ndp) {
 424		if (ndp->ndev.dev == dev)
 425			return &ndp->ndev;
 426	}
 427
 428	return NULL;
 429}
 430
 431static void ncsi_request_timeout(struct timer_list *t)
 432{
 433	struct ncsi_request *nr = from_timer(nr, t, timer);
 434	struct ncsi_dev_priv *ndp = nr->ndp;
 435	struct ncsi_cmd_pkt *cmd;
 436	struct ncsi_package *np;
 437	struct ncsi_channel *nc;
 438	unsigned long flags;
 439
 440	/* If the request already had associated response,
 441	 * let the response handler to release it.
 442	 */
 443	spin_lock_irqsave(&ndp->lock, flags);
 444	nr->enabled = false;
 445	if (nr->rsp || !nr->cmd) {
 446		spin_unlock_irqrestore(&ndp->lock, flags);
 447		return;
 448	}
 449	spin_unlock_irqrestore(&ndp->lock, flags);
 450
 451	if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
 452		if (nr->cmd) {
 453			/* Find the package */
 454			cmd = (struct ncsi_cmd_pkt *)
 455			      skb_network_header(nr->cmd);
 456			ncsi_find_package_and_channel(ndp,
 457						      cmd->cmd.common.channel,
 458						      &np, &nc);
 459			ncsi_send_netlink_timeout(nr, np, nc);
 460		}
 461	}
 462
 463	/* Release the request */
 464	ncsi_free_request(nr);
 465}
 466
 467static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
 468{
 469	struct ncsi_dev *nd = &ndp->ndev;
 470	struct ncsi_package *np;
 471	struct ncsi_channel *nc, *tmp;
 472	struct ncsi_cmd_arg nca;
 473	unsigned long flags;
 474	int ret;
 475
 476	np = ndp->active_package;
 477	nc = ndp->active_channel;
 478	nca.ndp = ndp;
 479	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
 480	switch (nd->state) {
 481	case ncsi_dev_state_suspend:
 482		nd->state = ncsi_dev_state_suspend_select;
 483		fallthrough;
 484	case ncsi_dev_state_suspend_select:
 485		ndp->pending_req_num = 1;
 486
 487		nca.type = NCSI_PKT_CMD_SP;
 488		nca.package = np->id;
 489		nca.channel = NCSI_RESERVED_CHANNEL;
 490		if (ndp->flags & NCSI_DEV_HWA)
 491			nca.bytes[0] = 0;
 492		else
 493			nca.bytes[0] = 1;
 494
 495		/* To retrieve the last link states of channels in current
 496		 * package when current active channel needs fail over to
 497		 * another one. It means we will possibly select another
 498		 * channel as next active one. The link states of channels
 499		 * are most important factor of the selection. So we need
 500		 * accurate link states. Unfortunately, the link states on
 501		 * inactive channels can't be updated with LSC AEN in time.
 502		 */
 503		if (ndp->flags & NCSI_DEV_RESHUFFLE)
 504			nd->state = ncsi_dev_state_suspend_gls;
 505		else
 506			nd->state = ncsi_dev_state_suspend_dcnt;
 507		ret = ncsi_xmit_cmd(&nca);
 508		if (ret)
 509			goto error;
 510
 511		break;
 512	case ncsi_dev_state_suspend_gls:
 513		ndp->pending_req_num = 1;
 514
 515		nca.type = NCSI_PKT_CMD_GLS;
 516		nca.package = np->id;
 517		nca.channel = ndp->channel_probe_id;
 518		ret = ncsi_xmit_cmd(&nca);
 519		if (ret)
 520			goto error;
 521		ndp->channel_probe_id++;
 522
 523		if (ndp->channel_probe_id == ndp->channel_count) {
 524			ndp->channel_probe_id = 0;
 525			nd->state = ncsi_dev_state_suspend_dcnt;
 
 
 
 526		}
 527
 528		break;
 529	case ncsi_dev_state_suspend_dcnt:
 530		ndp->pending_req_num = 1;
 531
 532		nca.type = NCSI_PKT_CMD_DCNT;
 533		nca.package = np->id;
 534		nca.channel = nc->id;
 535
 536		nd->state = ncsi_dev_state_suspend_dc;
 537		ret = ncsi_xmit_cmd(&nca);
 538		if (ret)
 539			goto error;
 540
 541		break;
 542	case ncsi_dev_state_suspend_dc:
 543		ndp->pending_req_num = 1;
 544
 545		nca.type = NCSI_PKT_CMD_DC;
 546		nca.package = np->id;
 547		nca.channel = nc->id;
 548		nca.bytes[0] = 1;
 549
 550		nd->state = ncsi_dev_state_suspend_deselect;
 551		ret = ncsi_xmit_cmd(&nca);
 552		if (ret)
 553			goto error;
 554
 555		NCSI_FOR_EACH_CHANNEL(np, tmp) {
 556			/* If there is another channel active on this package
 557			 * do not deselect the package.
 558			 */
 559			if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
 560				nd->state = ncsi_dev_state_suspend_done;
 561				break;
 562			}
 563		}
 564		break;
 565	case ncsi_dev_state_suspend_deselect:
 566		ndp->pending_req_num = 1;
 567
 568		nca.type = NCSI_PKT_CMD_DP;
 569		nca.package = np->id;
 570		nca.channel = NCSI_RESERVED_CHANNEL;
 571
 572		nd->state = ncsi_dev_state_suspend_done;
 573		ret = ncsi_xmit_cmd(&nca);
 574		if (ret)
 575			goto error;
 576
 577		break;
 578	case ncsi_dev_state_suspend_done:
 579		spin_lock_irqsave(&nc->lock, flags);
 580		nc->state = NCSI_CHANNEL_INACTIVE;
 581		spin_unlock_irqrestore(&nc->lock, flags);
 582		if (ndp->flags & NCSI_DEV_RESET)
 583			ncsi_reset_dev(nd);
 584		else
 585			ncsi_process_next_channel(ndp);
 586		break;
 587	default:
 588		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
 589			    nd->state);
 590	}
 591
 592	return;
 593error:
 594	nd->state = ncsi_dev_state_functional;
 595}
 596
 597/* Check the VLAN filter bitmap for a set filter, and construct a
 598 * "Set VLAN Filter - Disable" packet if found.
 599 */
 600static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
 601			 struct ncsi_cmd_arg *nca)
 602{
 603	struct ncsi_channel_vlan_filter *ncf;
 604	unsigned long flags;
 605	void *bitmap;
 606	int index;
 607	u16 vid;
 608
 609	ncf = &nc->vlan_filter;
 610	bitmap = &ncf->bitmap;
 611
 612	spin_lock_irqsave(&nc->lock, flags);
 613	index = find_first_bit(bitmap, ncf->n_vids);
 614	if (index >= ncf->n_vids) {
 615		spin_unlock_irqrestore(&nc->lock, flags);
 616		return -1;
 617	}
 618	vid = ncf->vids[index];
 619
 620	clear_bit(index, bitmap);
 621	ncf->vids[index] = 0;
 622	spin_unlock_irqrestore(&nc->lock, flags);
 623
 624	nca->type = NCSI_PKT_CMD_SVF;
 625	nca->words[1] = vid;
 626	/* HW filter index starts at 1 */
 627	nca->bytes[6] = index + 1;
 628	nca->bytes[7] = 0x00;
 629	return 0;
 630}
 631
 632/* Find an outstanding VLAN tag and construct a "Set VLAN Filter - Enable"
 633 * packet.
 634 */
 635static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
 636		       struct ncsi_cmd_arg *nca)
 637{
 638	struct ncsi_channel_vlan_filter *ncf;
 639	struct vlan_vid *vlan = NULL;
 640	unsigned long flags;
 641	int i, index;
 642	void *bitmap;
 643	u16 vid;
 644
 645	if (list_empty(&ndp->vlan_vids))
 646		return -1;
 647
 648	ncf = &nc->vlan_filter;
 649	bitmap = &ncf->bitmap;
 650
 651	spin_lock_irqsave(&nc->lock, flags);
 652
 653	rcu_read_lock();
 654	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
 655		vid = vlan->vid;
 656		for (i = 0; i < ncf->n_vids; i++)
 657			if (ncf->vids[i] == vid) {
 658				vid = 0;
 659				break;
 660			}
 661		if (vid)
 662			break;
 663	}
 664	rcu_read_unlock();
 665
 666	if (!vid) {
 667		/* No VLAN ID is not set */
 668		spin_unlock_irqrestore(&nc->lock, flags);
 669		return -1;
 670	}
 671
 672	index = find_first_zero_bit(bitmap, ncf->n_vids);
 673	if (index < 0 || index >= ncf->n_vids) {
 674		netdev_err(ndp->ndev.dev,
 675			   "Channel %u already has all VLAN filters set\n",
 676			   nc->id);
 677		spin_unlock_irqrestore(&nc->lock, flags);
 678		return -1;
 679	}
 680
 681	ncf->vids[index] = vid;
 682	set_bit(index, bitmap);
 683	spin_unlock_irqrestore(&nc->lock, flags);
 684
 685	nca->type = NCSI_PKT_CMD_SVF;
 686	nca->words[1] = vid;
 687	/* HW filter index starts at 1 */
 688	nca->bytes[6] = index + 1;
 689	nca->bytes[7] = 0x01;
 690
 691	return 0;
 692}
 693
 694static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
 695{
 696	unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
 697	int ret = 0;
 698
 699	nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN;
 700
 701	memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN);
 702	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
 703
 704	data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY;
 705
 706	/* PHY Link up attribute */
 707	data[6] = 0x1;
 708
 709	nca->data = data;
 710
 711	ret = ncsi_xmit_cmd(nca);
 712	if (ret)
 713		netdev_err(nca->ndp->ndev.dev,
 714			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 715			   nca->type);
 716	return ret;
 717}
 718
 719/* NCSI OEM Command APIs */
 720static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
 721{
 722	unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
 723	int ret = 0;
 724
 725	nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
 726
 727	memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
 728	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID);
 729	data[5] = NCSI_OEM_BCM_CMD_GMA;
 730
 731	nca->data = data;
 732
 733	ret = ncsi_xmit_cmd(nca);
 734	if (ret)
 735		netdev_err(nca->ndp->ndev.dev,
 736			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 737			   nca->type);
 738	return ret;
 739}
 740
 741static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
 742{
 743	union {
 744		u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
 745		u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
 746	} u;
 747	int ret = 0;
 748
 749	nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
 750
 751	memset(&u, 0, sizeof(u));
 752	u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
 753	u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
 754	u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
 755
 756	nca->data = u.data_u8;
 757
 758	ret = ncsi_xmit_cmd(nca);
 759	if (ret)
 760		netdev_err(nca->ndp->ndev.dev,
 761			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 762			   nca->type);
 763	return ret;
 764}
 765
 766static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
 767{
 768	union {
 769		u8 data_u8[NCSI_OEM_MLX_CMD_SMAF_LEN];
 770		u32 data_u32[NCSI_OEM_MLX_CMD_SMAF_LEN / sizeof(u32)];
 771	} u;
 772	int ret = 0;
 773
 774	memset(&u, 0, sizeof(u));
 775	u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
 776	u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
 777	u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
 778	memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
 779	       nca->ndp->ndev.dev->dev_addr,	ETH_ALEN);
 780	u.data_u8[MLX_SMAF_MED_SUPPORT_OFFSET] =
 781		(MLX_MC_RBT_AVL | MLX_MC_RBT_SUPPORT);
 782
 783	nca->payload = NCSI_OEM_MLX_CMD_SMAF_LEN;
 784	nca->data = u.data_u8;
 785
 786	ret = ncsi_xmit_cmd(nca);
 787	if (ret)
 788		netdev_err(nca->ndp->ndev.dev,
 789			   "NCSI: Failed to transmit cmd 0x%x during probe\n",
 790			   nca->type);
 791	return ret;
 792}
 793
 794static int ncsi_oem_gma_handler_intel(struct ncsi_cmd_arg *nca)
 795{
 796	unsigned char data[NCSI_OEM_INTEL_CMD_GMA_LEN];
 797	int ret = 0;
 798
 799	nca->payload = NCSI_OEM_INTEL_CMD_GMA_LEN;
 800
 801	memset(data, 0, NCSI_OEM_INTEL_CMD_GMA_LEN);
 802	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
 803	data[4] = NCSI_OEM_INTEL_CMD_GMA;
 804
 805	nca->data = data;
 806
 807	ret = ncsi_xmit_cmd(nca);
 808	if (ret)
 809		netdev_err(nca->ndp->ndev.dev,
 810			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 811			   nca->type);
 812
 813	return ret;
 814}
 815
 816/* OEM Command handlers initialization */
 817static struct ncsi_oem_gma_handler {
 818	unsigned int	mfr_id;
 819	int		(*handler)(struct ncsi_cmd_arg *nca);
 820} ncsi_oem_gma_handlers[] = {
 821	{ NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
 822	{ NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx },
 823	{ NCSI_OEM_MFR_INTEL_ID, ncsi_oem_gma_handler_intel }
 824};
 825
 826static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
 827{
 828	struct ncsi_oem_gma_handler *nch = NULL;
 829	int i;
 830
 831	/* This function should only be called once, return if flag set */
 832	if (nca->ndp->gma_flag == 1)
 833		return -1;
 834
 835	/* Find gma handler for given manufacturer id */
 836	for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
 837		if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
 838			if (ncsi_oem_gma_handlers[i].handler)
 839				nch = &ncsi_oem_gma_handlers[i];
 840			break;
 841			}
 842	}
 843
 844	if (!nch) {
 845		netdev_err(nca->ndp->ndev.dev,
 846			   "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
 847			   mf_id);
 848		return -1;
 849	}
 850
 851	/* Get Mac address from NCSI device */
 852	return nch->handler(nca);
 853}
 854
 855/* Determine if a given channel from the channel_queue should be used for Tx */
 856static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
 857			       struct ncsi_channel *nc)
 858{
 859	struct ncsi_channel_mode *ncm;
 860	struct ncsi_channel *channel;
 861	struct ncsi_package *np;
 862
 863	/* Check if any other channel has Tx enabled; a channel may have already
 864	 * been configured and removed from the channel queue.
 865	 */
 866	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 867		if (!ndp->multi_package && np != nc->package)
 868			continue;
 869		NCSI_FOR_EACH_CHANNEL(np, channel) {
 870			ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
 871			if (ncm->enable)
 872				return false;
 873		}
 874	}
 875
 876	/* This channel is the preferred channel and has link */
 877	list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
 878		np = channel->package;
 879		if (np->preferred_channel &&
 880		    ncsi_channel_has_link(np->preferred_channel)) {
 881			return np->preferred_channel == nc;
 882		}
 883	}
 884
 885	/* This channel has link */
 886	if (ncsi_channel_has_link(nc))
 887		return true;
 888
 889	list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
 890		if (ncsi_channel_has_link(channel))
 891			return false;
 892
 893	/* No other channel has link; default to this one */
 894	return true;
 895}
 896
 897/* Change the active Tx channel in a multi-channel setup */
 898int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
 899			   struct ncsi_package *package,
 900			   struct ncsi_channel *disable,
 901			   struct ncsi_channel *enable)
 902{
 903	struct ncsi_cmd_arg nca;
 904	struct ncsi_channel *nc;
 905	struct ncsi_package *np;
 906	int ret = 0;
 907
 908	if (!package->multi_channel && !ndp->multi_package)
 909		netdev_warn(ndp->ndev.dev,
 910			    "NCSI: Trying to update Tx channel in single-channel mode\n");
 911	nca.ndp = ndp;
 912	nca.req_flags = 0;
 913
 914	/* Find current channel with Tx enabled */
 915	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 916		if (disable)
 917			break;
 918		if (!ndp->multi_package && np != package)
 919			continue;
 920
 921		NCSI_FOR_EACH_CHANNEL(np, nc)
 922			if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
 923				disable = nc;
 924				break;
 925			}
 926	}
 927
 928	/* Find a suitable channel for Tx */
 929	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 930		if (enable)
 931			break;
 932		if (!ndp->multi_package && np != package)
 933			continue;
 934		if (!(ndp->package_whitelist & (0x1 << np->id)))
 935			continue;
 936
 937		if (np->preferred_channel &&
 938		    ncsi_channel_has_link(np->preferred_channel)) {
 939			enable = np->preferred_channel;
 940			break;
 941		}
 942
 943		NCSI_FOR_EACH_CHANNEL(np, nc) {
 944			if (!(np->channel_whitelist & 0x1 << nc->id))
 945				continue;
 946			if (nc->state != NCSI_CHANNEL_ACTIVE)
 947				continue;
 948			if (ncsi_channel_has_link(nc)) {
 949				enable = nc;
 950				break;
 951			}
 952		}
 953	}
 954
 955	if (disable == enable)
 956		return -1;
 957
 958	if (!enable)
 959		return -1;
 960
 961	if (disable) {
 962		nca.channel = disable->id;
 963		nca.package = disable->package->id;
 964		nca.type = NCSI_PKT_CMD_DCNT;
 965		ret = ncsi_xmit_cmd(&nca);
 966		if (ret)
 967			netdev_err(ndp->ndev.dev,
 968				   "Error %d sending DCNT\n",
 969				   ret);
 970	}
 971
 972	netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
 973
 974	nca.channel = enable->id;
 975	nca.package = enable->package->id;
 976	nca.type = NCSI_PKT_CMD_ECNT;
 977	ret = ncsi_xmit_cmd(&nca);
 978	if (ret)
 979		netdev_err(ndp->ndev.dev,
 980			   "Error %d sending ECNT\n",
 981			   ret);
 982
 983	return ret;
 984}
 985
 986static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
 987{
 988	struct ncsi_package *np = ndp->active_package;
 989	struct ncsi_channel *nc = ndp->active_channel;
 990	struct ncsi_channel *hot_nc = NULL;
 991	struct ncsi_dev *nd = &ndp->ndev;
 992	struct net_device *dev = nd->dev;
 993	struct ncsi_cmd_arg nca;
 994	unsigned char index;
 995	unsigned long flags;
 996	int ret;
 997
 998	nca.ndp = ndp;
 999	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1000	switch (nd->state) {
1001	case ncsi_dev_state_config:
1002	case ncsi_dev_state_config_sp:
1003		ndp->pending_req_num = 1;
1004
1005		/* Select the specific package */
1006		nca.type = NCSI_PKT_CMD_SP;
1007		if (ndp->flags & NCSI_DEV_HWA)
1008			nca.bytes[0] = 0;
1009		else
1010			nca.bytes[0] = 1;
1011		nca.package = np->id;
1012		nca.channel = NCSI_RESERVED_CHANNEL;
1013		ret = ncsi_xmit_cmd(&nca);
1014		if (ret) {
1015			netdev_err(ndp->ndev.dev,
1016				   "NCSI: Failed to transmit CMD_SP\n");
1017			goto error;
1018		}
1019
1020		nd->state = ncsi_dev_state_config_cis;
1021		break;
1022	case ncsi_dev_state_config_cis:
1023		ndp->pending_req_num = 1;
1024
1025		/* Clear initial state */
1026		nca.type = NCSI_PKT_CMD_CIS;
1027		nca.package = np->id;
1028		nca.channel = nc->id;
1029		ret = ncsi_xmit_cmd(&nca);
1030		if (ret) {
1031			netdev_err(ndp->ndev.dev,
1032				   "NCSI: Failed to transmit CMD_CIS\n");
1033			goto error;
1034		}
1035
1036		nd->state = IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
1037			  ? ncsi_dev_state_config_oem_gma
1038			  : ncsi_dev_state_config_clear_vids;
1039		break;
1040	case ncsi_dev_state_config_oem_gma:
1041		nd->state = ncsi_dev_state_config_apply_mac;
1042
1043		nca.package = np->id;
1044		nca.channel = nc->id;
1045		ndp->pending_req_num = 1;
1046		if (nc->version.major >= 1 && nc->version.minor >= 2) {
1047			nca.type = NCSI_PKT_CMD_GMCMA;
1048			ret = ncsi_xmit_cmd(&nca);
1049		} else {
1050			nca.type = NCSI_PKT_CMD_OEM;
1051			ret = ncsi_gma_handler(&nca, nc->version.mf_id);
1052		}
1053		if (ret < 0) {
1054			nd->state = ncsi_dev_state_config_clear_vids;
1055			schedule_work(&ndp->work);
1056		}
1057
1058		break;
1059	case ncsi_dev_state_config_apply_mac:
1060		rtnl_lock();
1061		ret = dev_set_mac_address(dev, &ndp->pending_mac, NULL);
1062		rtnl_unlock();
1063		if (ret < 0)
1064			netdev_warn(dev, "NCSI: 'Writing MAC address to device failed\n");
1065
1066		nd->state = ncsi_dev_state_config_clear_vids;
1067
1068		fallthrough;
1069	case ncsi_dev_state_config_clear_vids:
1070	case ncsi_dev_state_config_svf:
1071	case ncsi_dev_state_config_ev:
1072	case ncsi_dev_state_config_sma:
1073	case ncsi_dev_state_config_ebf:
1074	case ncsi_dev_state_config_dgmf:
1075	case ncsi_dev_state_config_ecnt:
1076	case ncsi_dev_state_config_ec:
1077	case ncsi_dev_state_config_ae:
1078	case ncsi_dev_state_config_gls:
1079		ndp->pending_req_num = 1;
1080
1081		nca.package = np->id;
1082		nca.channel = nc->id;
1083
1084		/* Clear any active filters on the channel before setting */
1085		if (nd->state == ncsi_dev_state_config_clear_vids) {
1086			ret = clear_one_vid(ndp, nc, &nca);
1087			if (ret) {
1088				nd->state = ncsi_dev_state_config_svf;
1089				schedule_work(&ndp->work);
1090				break;
1091			}
1092			/* Repeat */
1093			nd->state = ncsi_dev_state_config_clear_vids;
1094		/* Add known VLAN tags to the filter */
1095		} else if (nd->state == ncsi_dev_state_config_svf) {
1096			ret = set_one_vid(ndp, nc, &nca);
1097			if (ret) {
1098				nd->state = ncsi_dev_state_config_ev;
1099				schedule_work(&ndp->work);
1100				break;
1101			}
1102			/* Repeat */
1103			nd->state = ncsi_dev_state_config_svf;
1104		/* Enable/Disable the VLAN filter */
1105		} else if (nd->state == ncsi_dev_state_config_ev) {
1106			if (list_empty(&ndp->vlan_vids)) {
1107				nca.type = NCSI_PKT_CMD_DV;
1108			} else {
1109				nca.type = NCSI_PKT_CMD_EV;
1110				nca.bytes[3] = NCSI_CAP_VLAN_NO;
1111			}
1112			nd->state = ncsi_dev_state_config_sma;
1113		} else if (nd->state == ncsi_dev_state_config_sma) {
1114		/* Use first entry in unicast filter table. Note that
1115		 * the MAC filter table starts from entry 1 instead of
1116		 * 0.
1117		 */
1118			nca.type = NCSI_PKT_CMD_SMA;
1119			for (index = 0; index < 6; index++)
1120				nca.bytes[index] = dev->dev_addr[index];
1121			nca.bytes[6] = 0x1;
1122			nca.bytes[7] = 0x1;
1123			nd->state = ncsi_dev_state_config_ebf;
1124		} else if (nd->state == ncsi_dev_state_config_ebf) {
1125			nca.type = NCSI_PKT_CMD_EBF;
1126			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
1127			/* if multicast global filtering is supported then
1128			 * disable it so that all multicast packet will be
1129			 * forwarded to management controller
1130			 */
1131			if (nc->caps[NCSI_CAP_GENERIC].cap &
1132			    NCSI_CAP_GENERIC_MC)
1133				nd->state = ncsi_dev_state_config_dgmf;
1134			else if (ncsi_channel_is_tx(ndp, nc))
1135				nd->state = ncsi_dev_state_config_ecnt;
1136			else
1137				nd->state = ncsi_dev_state_config_ec;
1138		} else if (nd->state == ncsi_dev_state_config_dgmf) {
1139			nca.type = NCSI_PKT_CMD_DGMF;
1140			if (ncsi_channel_is_tx(ndp, nc))
1141				nd->state = ncsi_dev_state_config_ecnt;
1142			else
1143				nd->state = ncsi_dev_state_config_ec;
1144		} else if (nd->state == ncsi_dev_state_config_ecnt) {
1145			if (np->preferred_channel &&
1146			    nc != np->preferred_channel)
1147				netdev_info(ndp->ndev.dev,
1148					    "NCSI: Tx failed over to channel %u\n",
1149					    nc->id);
1150			nca.type = NCSI_PKT_CMD_ECNT;
1151			nd->state = ncsi_dev_state_config_ec;
1152		} else if (nd->state == ncsi_dev_state_config_ec) {
1153			/* Enable AEN if it's supported */
1154			nca.type = NCSI_PKT_CMD_EC;
1155			nd->state = ncsi_dev_state_config_ae;
1156			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
1157				nd->state = ncsi_dev_state_config_gls;
1158		} else if (nd->state == ncsi_dev_state_config_ae) {
1159			nca.type = NCSI_PKT_CMD_AE;
1160			nca.bytes[0] = 0;
1161			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
1162			nd->state = ncsi_dev_state_config_gls;
1163		} else if (nd->state == ncsi_dev_state_config_gls) {
1164			nca.type = NCSI_PKT_CMD_GLS;
1165			nd->state = ncsi_dev_state_config_done;
1166		}
1167
1168		ret = ncsi_xmit_cmd(&nca);
1169		if (ret) {
1170			netdev_err(ndp->ndev.dev,
1171				   "NCSI: Failed to transmit CMD %x\n",
1172				   nca.type);
1173			goto error;
1174		}
1175		break;
1176	case ncsi_dev_state_config_done:
1177		netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
1178			   nc->id);
1179		spin_lock_irqsave(&nc->lock, flags);
1180		nc->state = NCSI_CHANNEL_ACTIVE;
1181
1182		if (ndp->flags & NCSI_DEV_RESET) {
1183			/* A reset event happened during config, start it now */
1184			nc->reconfigure_needed = false;
1185			spin_unlock_irqrestore(&nc->lock, flags);
1186			ncsi_reset_dev(nd);
1187			break;
1188		}
1189
1190		if (nc->reconfigure_needed) {
1191			/* This channel's configuration has been updated
1192			 * part-way during the config state - start the
1193			 * channel configuration over
1194			 */
1195			nc->reconfigure_needed = false;
1196			nc->state = NCSI_CHANNEL_INACTIVE;
1197			spin_unlock_irqrestore(&nc->lock, flags);
1198
1199			spin_lock_irqsave(&ndp->lock, flags);
1200			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1201			spin_unlock_irqrestore(&ndp->lock, flags);
1202
1203			netdev_dbg(dev, "Dirty NCSI channel state reset\n");
1204			ncsi_process_next_channel(ndp);
1205			break;
1206		}
1207
1208		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
1209			hot_nc = nc;
1210		} else {
1211			hot_nc = NULL;
1212			netdev_dbg(ndp->ndev.dev,
1213				   "NCSI: channel %u link down after config\n",
1214				   nc->id);
1215		}
1216		spin_unlock_irqrestore(&nc->lock, flags);
1217
1218		/* Update the hot channel */
1219		spin_lock_irqsave(&ndp->lock, flags);
1220		ndp->hot_channel = hot_nc;
1221		spin_unlock_irqrestore(&ndp->lock, flags);
1222
1223		ncsi_start_channel_monitor(nc);
1224		ncsi_process_next_channel(ndp);
1225		break;
1226	default:
1227		netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
1228			     nd->state);
1229	}
1230
1231	return;
1232
1233error:
1234	ncsi_report_link(ndp, true);
1235}
1236
1237static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
1238{
1239	struct ncsi_channel *nc, *found, *hot_nc;
1240	struct ncsi_channel_mode *ncm;
1241	unsigned long flags, cflags;
1242	struct ncsi_package *np;
1243	bool with_link;
1244
1245	spin_lock_irqsave(&ndp->lock, flags);
1246	hot_nc = ndp->hot_channel;
1247	spin_unlock_irqrestore(&ndp->lock, flags);
1248
1249	/* By default the search is done once an inactive channel with up
1250	 * link is found, unless a preferred channel is set.
1251	 * If multi_package or multi_channel are configured all channels in the
1252	 * whitelist are added to the channel queue.
1253	 */
1254	found = NULL;
1255	with_link = false;
1256	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1257		if (!(ndp->package_whitelist & (0x1 << np->id)))
1258			continue;
1259		NCSI_FOR_EACH_CHANNEL(np, nc) {
1260			if (!(np->channel_whitelist & (0x1 << nc->id)))
1261				continue;
1262
1263			spin_lock_irqsave(&nc->lock, cflags);
1264
1265			if (!list_empty(&nc->link) ||
1266			    nc->state != NCSI_CHANNEL_INACTIVE) {
1267				spin_unlock_irqrestore(&nc->lock, cflags);
1268				continue;
1269			}
1270
1271			if (!found)
1272				found = nc;
1273
1274			if (nc == hot_nc)
1275				found = nc;
1276
1277			ncm = &nc->modes[NCSI_MODE_LINK];
1278			if (ncm->data[2] & 0x1) {
1279				found = nc;
1280				with_link = true;
1281			}
1282
1283			/* If multi_channel is enabled configure all valid
1284			 * channels whether or not they currently have link
1285			 * so they will have AENs enabled.
1286			 */
1287			if (with_link || np->multi_channel) {
1288				spin_lock_irqsave(&ndp->lock, flags);
1289				list_add_tail_rcu(&nc->link,
1290						  &ndp->channel_queue);
1291				spin_unlock_irqrestore(&ndp->lock, flags);
1292
1293				netdev_dbg(ndp->ndev.dev,
1294					   "NCSI: Channel %u added to queue (link %s)\n",
1295					   nc->id,
1296					   ncm->data[2] & 0x1 ? "up" : "down");
1297			}
1298
1299			spin_unlock_irqrestore(&nc->lock, cflags);
1300
1301			if (with_link && !np->multi_channel)
1302				break;
1303		}
1304		if (with_link && !ndp->multi_package)
1305			break;
1306	}
1307
1308	if (list_empty(&ndp->channel_queue) && found) {
1309		netdev_info(ndp->ndev.dev,
1310			    "NCSI: No channel with link found, configuring channel %u\n",
1311			    found->id);
1312		spin_lock_irqsave(&ndp->lock, flags);
1313		list_add_tail_rcu(&found->link, &ndp->channel_queue);
1314		spin_unlock_irqrestore(&ndp->lock, flags);
1315	} else if (!found) {
1316		netdev_warn(ndp->ndev.dev,
1317			    "NCSI: No channel found to configure!\n");
1318		ncsi_report_link(ndp, true);
1319		return -ENODEV;
1320	}
1321
1322	return ncsi_process_next_channel(ndp);
1323}
1324
1325static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1326{
1327	struct ncsi_package *np;
1328	struct ncsi_channel *nc;
1329	unsigned int cap;
1330	bool has_channel = false;
1331
1332	/* The hardware arbitration is disabled if any one channel
1333	 * doesn't support explicitly.
1334	 */
1335	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1336		NCSI_FOR_EACH_CHANNEL(np, nc) {
1337			has_channel = true;
1338
1339			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1340			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1341			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1342			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1343				ndp->flags &= ~NCSI_DEV_HWA;
1344				return false;
1345			}
1346		}
1347	}
1348
1349	if (has_channel) {
1350		ndp->flags |= NCSI_DEV_HWA;
1351		return true;
1352	}
1353
1354	ndp->flags &= ~NCSI_DEV_HWA;
1355	return false;
1356}
1357
1358static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1359{
1360	struct ncsi_dev *nd = &ndp->ndev;
1361	struct ncsi_package *np;
 
1362	struct ncsi_cmd_arg nca;
1363	unsigned char index;
1364	int ret;
1365
1366	nca.ndp = ndp;
1367	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1368	switch (nd->state) {
1369	case ncsi_dev_state_probe:
1370		nd->state = ncsi_dev_state_probe_deselect;
1371		fallthrough;
1372	case ncsi_dev_state_probe_deselect:
1373		ndp->pending_req_num = 8;
1374
1375		/* Deselect all possible packages */
1376		nca.type = NCSI_PKT_CMD_DP;
1377		nca.channel = NCSI_RESERVED_CHANNEL;
1378		for (index = 0; index < 8; index++) {
1379			nca.package = index;
1380			ret = ncsi_xmit_cmd(&nca);
1381			if (ret)
1382				goto error;
1383		}
1384
1385		nd->state = ncsi_dev_state_probe_package;
1386		break;
1387	case ncsi_dev_state_probe_package:
1388		if (ndp->package_probe_id >= 8) {
1389			/* Last package probed, finishing */
1390			ndp->flags |= NCSI_DEV_PROBED;
1391			break;
1392		}
1393
1394		ndp->pending_req_num = 1;
1395
1396		nca.type = NCSI_PKT_CMD_SP;
1397		nca.bytes[0] = 1;
1398		nca.package = ndp->package_probe_id;
1399		nca.channel = NCSI_RESERVED_CHANNEL;
1400		ret = ncsi_xmit_cmd(&nca);
1401		if (ret)
1402			goto error;
1403		nd->state = ncsi_dev_state_probe_channel;
1404		break;
1405	case ncsi_dev_state_probe_channel:
1406		ndp->active_package = ncsi_find_package(ndp,
1407							ndp->package_probe_id);
1408		if (!ndp->active_package) {
1409			/* No response */
1410			nd->state = ncsi_dev_state_probe_dp;
1411			schedule_work(&ndp->work);
1412			break;
1413		}
1414		nd->state = ncsi_dev_state_probe_cis;
1415		if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) &&
1416		    ndp->mlx_multi_host)
1417			nd->state = ncsi_dev_state_probe_mlx_gma;
1418
1419		schedule_work(&ndp->work);
1420		break;
1421	case ncsi_dev_state_probe_mlx_gma:
1422		ndp->pending_req_num = 1;
1423
1424		nca.type = NCSI_PKT_CMD_OEM;
1425		nca.package = ndp->active_package->id;
1426		nca.channel = 0;
1427		ret = ncsi_oem_gma_handler_mlx(&nca);
1428		if (ret)
1429			goto error;
1430
1431		nd->state = ncsi_dev_state_probe_mlx_smaf;
1432		break;
1433	case ncsi_dev_state_probe_mlx_smaf:
1434		ndp->pending_req_num = 1;
1435
1436		nca.type = NCSI_PKT_CMD_OEM;
1437		nca.package = ndp->active_package->id;
1438		nca.channel = 0;
1439		ret = ncsi_oem_smaf_mlx(&nca);
1440		if (ret)
1441			goto error;
1442
1443		nd->state = ncsi_dev_state_probe_cis;
1444		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1445	case ncsi_dev_state_probe_keep_phy:
1446		ndp->pending_req_num = 1;
1447
1448		nca.type = NCSI_PKT_CMD_OEM;
1449		nca.package = ndp->active_package->id;
1450		nca.channel = 0;
1451		ret = ncsi_oem_keep_phy_intel(&nca);
1452		if (ret)
1453			goto error;
1454
1455		nd->state = ncsi_dev_state_probe_gvi;
1456		break;
1457	case ncsi_dev_state_probe_cis:
1458	case ncsi_dev_state_probe_gvi:
1459	case ncsi_dev_state_probe_gc:
1460	case ncsi_dev_state_probe_gls:
1461		np = ndp->active_package;
1462		ndp->pending_req_num = 1;
1463
1464		/* Clear initial state Retrieve version, capability or link status */
1465		if (nd->state == ncsi_dev_state_probe_cis)
1466			nca.type = NCSI_PKT_CMD_CIS;
1467		else if (nd->state == ncsi_dev_state_probe_gvi)
1468			nca.type = NCSI_PKT_CMD_GVI;
1469		else if (nd->state == ncsi_dev_state_probe_gc)
1470			nca.type = NCSI_PKT_CMD_GC;
1471		else
1472			nca.type = NCSI_PKT_CMD_GLS;
1473
1474		nca.package = np->id;
1475		nca.channel = ndp->channel_probe_id;
1476
1477		ret = ncsi_xmit_cmd(&nca);
1478		if (ret)
1479			goto error;
 
1480
1481		if (nd->state == ncsi_dev_state_probe_cis) {
1482			nd->state = ncsi_dev_state_probe_gvi;
1483			if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
1484				nd->state = ncsi_dev_state_probe_keep_phy;
1485		} else if (nd->state == ncsi_dev_state_probe_gvi) {
1486			nd->state = ncsi_dev_state_probe_gc;
1487		} else if (nd->state == ncsi_dev_state_probe_gc) {
1488			nd->state = ncsi_dev_state_probe_gls;
1489		} else {
1490			nd->state = ncsi_dev_state_probe_cis;
1491			ndp->channel_probe_id++;
1492		}
1493
1494		if (ndp->channel_probe_id == ndp->channel_count) {
1495			ndp->channel_probe_id = 0;
1496			nd->state = ncsi_dev_state_probe_dp;
1497		}
1498		break;
1499	case ncsi_dev_state_probe_dp:
1500		ndp->pending_req_num = 1;
1501
1502		/* Deselect the current package */
1503		nca.type = NCSI_PKT_CMD_DP;
1504		nca.package = ndp->package_probe_id;
1505		nca.channel = NCSI_RESERVED_CHANNEL;
1506		ret = ncsi_xmit_cmd(&nca);
1507		if (ret)
1508			goto error;
1509
1510		/* Probe next package after receiving response */
1511		ndp->package_probe_id++;
 
 
 
 
 
1512		nd->state = ncsi_dev_state_probe_package;
1513		ndp->active_package = NULL;
1514		break;
1515	default:
1516		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1517			    nd->state);
1518	}
1519
1520	if (ndp->flags & NCSI_DEV_PROBED) {
1521		/* Check if all packages have HWA support */
1522		ncsi_check_hwa(ndp);
1523		ncsi_choose_active_channel(ndp);
1524	}
1525
1526	return;
1527error:
1528	netdev_err(ndp->ndev.dev,
1529		   "NCSI: Failed to transmit cmd 0x%x during probe\n",
1530		   nca.type);
1531	ncsi_report_link(ndp, true);
1532}
1533
1534static void ncsi_dev_work(struct work_struct *work)
1535{
1536	struct ncsi_dev_priv *ndp = container_of(work,
1537			struct ncsi_dev_priv, work);
1538	struct ncsi_dev *nd = &ndp->ndev;
1539
1540	switch (nd->state & ncsi_dev_state_major) {
1541	case ncsi_dev_state_probe:
1542		ncsi_probe_channel(ndp);
1543		break;
1544	case ncsi_dev_state_suspend:
1545		ncsi_suspend_channel(ndp);
1546		break;
1547	case ncsi_dev_state_config:
1548		ncsi_configure_channel(ndp);
1549		break;
1550	default:
1551		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1552			    nd->state);
1553	}
1554}
1555
1556int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1557{
1558	struct ncsi_channel *nc;
1559	int old_state;
1560	unsigned long flags;
1561
1562	spin_lock_irqsave(&ndp->lock, flags);
1563	nc = list_first_or_null_rcu(&ndp->channel_queue,
1564				    struct ncsi_channel, link);
1565	if (!nc) {
1566		spin_unlock_irqrestore(&ndp->lock, flags);
1567		goto out;
1568	}
1569
1570	list_del_init(&nc->link);
1571	spin_unlock_irqrestore(&ndp->lock, flags);
1572
1573	spin_lock_irqsave(&nc->lock, flags);
1574	old_state = nc->state;
1575	nc->state = NCSI_CHANNEL_INVISIBLE;
1576	spin_unlock_irqrestore(&nc->lock, flags);
1577
1578	ndp->active_channel = nc;
1579	ndp->active_package = nc->package;
1580
1581	switch (old_state) {
1582	case NCSI_CHANNEL_INACTIVE:
1583		ndp->ndev.state = ncsi_dev_state_config;
1584		netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1585	                   nc->id);
1586		ncsi_configure_channel(ndp);
1587		break;
1588	case NCSI_CHANNEL_ACTIVE:
1589		ndp->ndev.state = ncsi_dev_state_suspend;
1590		netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1591			   nc->id);
1592		ncsi_suspend_channel(ndp);
1593		break;
1594	default:
1595		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1596			   old_state, nc->package->id, nc->id);
1597		ncsi_report_link(ndp, false);
1598		return -EINVAL;
1599	}
1600
1601	return 0;
1602
1603out:
1604	ndp->active_channel = NULL;
1605	ndp->active_package = NULL;
1606	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1607		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1608		return ncsi_choose_active_channel(ndp);
1609	}
1610
1611	ncsi_report_link(ndp, false);
1612	return -ENODEV;
1613}
1614
1615static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1616{
1617	struct ncsi_dev *nd = &ndp->ndev;
1618	struct ncsi_channel *nc;
1619	struct ncsi_package *np;
1620	unsigned long flags;
1621	unsigned int n = 0;
1622
1623	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1624		NCSI_FOR_EACH_CHANNEL(np, nc) {
1625			spin_lock_irqsave(&nc->lock, flags);
1626
1627			/* Channels may be busy, mark dirty instead of
1628			 * kicking if;
1629			 * a) not ACTIVE (configured)
1630			 * b) in the channel_queue (to be configured)
1631			 * c) it's ndev is in the config state
1632			 */
1633			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1634				if ((ndp->ndev.state & 0xff00) ==
1635						ncsi_dev_state_config ||
1636						!list_empty(&nc->link)) {
1637					netdev_dbg(nd->dev,
1638						   "NCSI: channel %p marked dirty\n",
1639						   nc);
1640					nc->reconfigure_needed = true;
1641				}
1642				spin_unlock_irqrestore(&nc->lock, flags);
1643				continue;
1644			}
1645
1646			spin_unlock_irqrestore(&nc->lock, flags);
1647
1648			ncsi_stop_channel_monitor(nc);
1649			spin_lock_irqsave(&nc->lock, flags);
1650			nc->state = NCSI_CHANNEL_INACTIVE;
1651			spin_unlock_irqrestore(&nc->lock, flags);
1652
1653			spin_lock_irqsave(&ndp->lock, flags);
1654			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1655			spin_unlock_irqrestore(&ndp->lock, flags);
1656
1657			netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1658			n++;
1659		}
1660	}
1661
1662	return n;
1663}
1664
1665int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1666{
1667	struct ncsi_dev_priv *ndp;
1668	unsigned int n_vids = 0;
1669	struct vlan_vid *vlan;
1670	struct ncsi_dev *nd;
1671	bool found = false;
1672
1673	if (vid == 0)
1674		return 0;
1675
1676	nd = ncsi_find_dev(dev);
1677	if (!nd) {
1678		netdev_warn(dev, "NCSI: No net_device?\n");
1679		return 0;
1680	}
1681
1682	ndp = TO_NCSI_DEV_PRIV(nd);
1683
1684	/* Add the VLAN id to our internal list */
1685	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1686		n_vids++;
1687		if (vlan->vid == vid) {
1688			netdev_dbg(dev, "NCSI: vid %u already registered\n",
1689				   vid);
1690			return 0;
1691		}
1692	}
1693	if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1694		netdev_warn(dev,
1695			    "tried to add vlan id %u but NCSI max already registered (%u)\n",
1696			    vid, NCSI_MAX_VLAN_VIDS);
1697		return -ENOSPC;
1698	}
1699
1700	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1701	if (!vlan)
1702		return -ENOMEM;
1703
1704	vlan->proto = proto;
1705	vlan->vid = vid;
1706	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1707
1708	netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1709
1710	found = ncsi_kick_channels(ndp) != 0;
1711
1712	return found ? ncsi_process_next_channel(ndp) : 0;
1713}
1714EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1715
1716int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1717{
1718	struct vlan_vid *vlan, *tmp;
1719	struct ncsi_dev_priv *ndp;
1720	struct ncsi_dev *nd;
1721	bool found = false;
1722
1723	if (vid == 0)
1724		return 0;
1725
1726	nd = ncsi_find_dev(dev);
1727	if (!nd) {
1728		netdev_warn(dev, "NCSI: no net_device?\n");
1729		return 0;
1730	}
1731
1732	ndp = TO_NCSI_DEV_PRIV(nd);
1733
1734	/* Remove the VLAN id from our internal list */
1735	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1736		if (vlan->vid == vid) {
1737			netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1738			list_del_rcu(&vlan->list);
1739			found = true;
1740			kfree(vlan);
1741		}
1742
1743	if (!found) {
1744		netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1745		return -EINVAL;
1746	}
1747
1748	found = ncsi_kick_channels(ndp) != 0;
1749
1750	return found ? ncsi_process_next_channel(ndp) : 0;
1751}
1752EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1753
1754struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1755				   void (*handler)(struct ncsi_dev *ndev))
1756{
1757	struct ncsi_dev_priv *ndp;
1758	struct ncsi_dev *nd;
1759	struct platform_device *pdev;
1760	struct device_node *np;
1761	unsigned long flags;
1762	int i;
1763
1764	/* Check if the device has been registered or not */
1765	nd = ncsi_find_dev(dev);
1766	if (nd)
1767		return nd;
1768
1769	/* Create NCSI device */
1770	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1771	if (!ndp)
1772		return NULL;
1773
1774	nd = &ndp->ndev;
1775	nd->state = ncsi_dev_state_registered;
1776	nd->dev = dev;
1777	nd->handler = handler;
1778	ndp->pending_req_num = 0;
1779	INIT_LIST_HEAD(&ndp->channel_queue);
1780	INIT_LIST_HEAD(&ndp->vlan_vids);
1781	INIT_WORK(&ndp->work, ncsi_dev_work);
1782	ndp->package_whitelist = UINT_MAX;
1783
1784	/* Initialize private NCSI device */
1785	spin_lock_init(&ndp->lock);
1786	INIT_LIST_HEAD(&ndp->packages);
1787	ndp->request_id = NCSI_REQ_START_IDX;
1788	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1789		ndp->requests[i].id = i;
1790		ndp->requests[i].ndp = ndp;
1791		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1792	}
1793	ndp->channel_count = NCSI_RESERVED_CHANNEL;
1794
1795	spin_lock_irqsave(&ncsi_dev_lock, flags);
1796	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1797	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1798
1799	/* Register NCSI packet Rx handler */
1800	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1801	ndp->ptype.func = ncsi_rcv_rsp;
1802	ndp->ptype.dev = dev;
1803	dev_add_pack(&ndp->ptype);
1804
1805	pdev = to_platform_device(dev->dev.parent);
1806	if (pdev) {
1807		np = pdev->dev.of_node;
1808		if (np && (of_property_read_bool(np, "mellanox,multi-host") ||
1809			   of_property_read_bool(np, "mlx,multi-host")))
1810			ndp->mlx_multi_host = true;
1811	}
1812
1813	return nd;
1814}
1815EXPORT_SYMBOL_GPL(ncsi_register_dev);
1816
1817int ncsi_start_dev(struct ncsi_dev *nd)
1818{
1819	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1820
1821	if (nd->state != ncsi_dev_state_registered &&
1822	    nd->state != ncsi_dev_state_functional)
1823		return -ENOTTY;
1824
1825	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1826		ndp->package_probe_id = 0;
1827		ndp->channel_probe_id = 0;
1828		nd->state = ncsi_dev_state_probe;
1829		schedule_work(&ndp->work);
1830		return 0;
1831	}
1832
1833	return ncsi_reset_dev(nd);
1834}
1835EXPORT_SYMBOL_GPL(ncsi_start_dev);
1836
1837void ncsi_stop_dev(struct ncsi_dev *nd)
1838{
1839	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1840	struct ncsi_package *np;
1841	struct ncsi_channel *nc;
1842	bool chained;
1843	int old_state;
1844	unsigned long flags;
1845
1846	/* Stop the channel monitor on any active channels. Don't reset the
1847	 * channel state so we know which were active when ncsi_start_dev()
1848	 * is next called.
1849	 */
1850	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1851		NCSI_FOR_EACH_CHANNEL(np, nc) {
1852			ncsi_stop_channel_monitor(nc);
1853
1854			spin_lock_irqsave(&nc->lock, flags);
1855			chained = !list_empty(&nc->link);
1856			old_state = nc->state;
1857			spin_unlock_irqrestore(&nc->lock, flags);
1858
1859			WARN_ON_ONCE(chained ||
1860				     old_state == NCSI_CHANNEL_INVISIBLE);
1861		}
1862	}
1863
1864	netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1865	ncsi_report_link(ndp, true);
1866}
1867EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1868
1869int ncsi_reset_dev(struct ncsi_dev *nd)
1870{
1871	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1872	struct ncsi_channel *nc, *active, *tmp;
1873	struct ncsi_package *np;
1874	unsigned long flags;
1875
1876	spin_lock_irqsave(&ndp->lock, flags);
1877
1878	if (!(ndp->flags & NCSI_DEV_RESET)) {
1879		/* Haven't been called yet, check states */
1880		switch (nd->state & ncsi_dev_state_major) {
1881		case ncsi_dev_state_registered:
1882		case ncsi_dev_state_probe:
1883			/* Not even probed yet - do nothing */
1884			spin_unlock_irqrestore(&ndp->lock, flags);
1885			return 0;
1886		case ncsi_dev_state_suspend:
1887		case ncsi_dev_state_config:
1888			/* Wait for the channel to finish its suspend/config
1889			 * operation; once it finishes it will check for
1890			 * NCSI_DEV_RESET and reset the state.
1891			 */
1892			ndp->flags |= NCSI_DEV_RESET;
1893			spin_unlock_irqrestore(&ndp->lock, flags);
1894			return 0;
1895		}
1896	} else {
1897		switch (nd->state) {
1898		case ncsi_dev_state_suspend_done:
1899		case ncsi_dev_state_config_done:
1900		case ncsi_dev_state_functional:
1901			/* Ok */
1902			break;
1903		default:
1904			/* Current reset operation happening */
1905			spin_unlock_irqrestore(&ndp->lock, flags);
1906			return 0;
1907		}
1908	}
1909
1910	if (!list_empty(&ndp->channel_queue)) {
1911		/* Clear any channel queue we may have interrupted */
1912		list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
1913			list_del_init(&nc->link);
1914	}
1915	spin_unlock_irqrestore(&ndp->lock, flags);
1916
1917	active = NULL;
1918	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1919		NCSI_FOR_EACH_CHANNEL(np, nc) {
1920			spin_lock_irqsave(&nc->lock, flags);
1921
1922			if (nc->state == NCSI_CHANNEL_ACTIVE) {
1923				active = nc;
1924				nc->state = NCSI_CHANNEL_INVISIBLE;
1925				spin_unlock_irqrestore(&nc->lock, flags);
1926				ncsi_stop_channel_monitor(nc);
1927				break;
1928			}
1929
1930			spin_unlock_irqrestore(&nc->lock, flags);
1931		}
1932		if (active)
1933			break;
1934	}
1935
1936	if (!active) {
1937		/* Done */
1938		spin_lock_irqsave(&ndp->lock, flags);
1939		ndp->flags &= ~NCSI_DEV_RESET;
1940		spin_unlock_irqrestore(&ndp->lock, flags);
1941		return ncsi_choose_active_channel(ndp);
1942	}
1943
1944	spin_lock_irqsave(&ndp->lock, flags);
1945	ndp->flags |= NCSI_DEV_RESET;
1946	ndp->active_channel = active;
1947	ndp->active_package = active->package;
1948	spin_unlock_irqrestore(&ndp->lock, flags);
1949
1950	nd->state = ncsi_dev_state_suspend;
1951	schedule_work(&ndp->work);
1952	return 0;
1953}
1954
1955void ncsi_unregister_dev(struct ncsi_dev *nd)
1956{
1957	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1958	struct ncsi_package *np, *tmp;
1959	unsigned long flags;
1960
1961	dev_remove_pack(&ndp->ptype);
1962
1963	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1964		ncsi_remove_package(np);
1965
1966	spin_lock_irqsave(&ncsi_dev_lock, flags);
1967	list_del_rcu(&ndp->node);
1968	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1969
1970	disable_work_sync(&ndp->work);
1971
1972	kfree(ndp);
1973}
1974EXPORT_SYMBOL_GPL(ncsi_unregister_dev);