Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
Note: File does not exist in v3.5.6.
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright Gavin Shan, IBM Corporation 2016.
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/kernel.h>
   8#include <linux/init.h>
   9#include <linux/netdevice.h>
  10#include <linux/skbuff.h>
  11#include <linux/of.h>
  12#include <linux/platform_device.h>
  13
  14#include <net/ncsi.h>
  15#include <net/net_namespace.h>
  16#include <net/sock.h>
  17#include <net/addrconf.h>
  18#include <net/ipv6.h>
  19#include <net/genetlink.h>
  20
  21#include "internal.h"
  22#include "ncsi-pkt.h"
  23#include "ncsi-netlink.h"
  24
  25LIST_HEAD(ncsi_dev_list);
  26DEFINE_SPINLOCK(ncsi_dev_lock);
  27
  28bool ncsi_channel_has_link(struct ncsi_channel *channel)
  29{
  30	return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
  31}
  32
  33bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
  34			  struct ncsi_channel *channel)
  35{
  36	struct ncsi_package *np;
  37	struct ncsi_channel *nc;
  38
  39	NCSI_FOR_EACH_PACKAGE(ndp, np)
  40		NCSI_FOR_EACH_CHANNEL(np, nc) {
  41			if (nc == channel)
  42				continue;
  43			if (nc->state == NCSI_CHANNEL_ACTIVE &&
  44			    ncsi_channel_has_link(nc))
  45				return false;
  46		}
  47
  48	return true;
  49}
  50
  51static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
  52{
  53	struct ncsi_dev *nd = &ndp->ndev;
  54	struct ncsi_package *np;
  55	struct ncsi_channel *nc;
  56	unsigned long flags;
  57
  58	nd->state = ncsi_dev_state_functional;
  59	if (force_down) {
  60		nd->link_up = 0;
  61		goto report;
  62	}
  63
  64	nd->link_up = 0;
  65	NCSI_FOR_EACH_PACKAGE(ndp, np) {
  66		NCSI_FOR_EACH_CHANNEL(np, nc) {
  67			spin_lock_irqsave(&nc->lock, flags);
  68
  69			if (!list_empty(&nc->link) ||
  70			    nc->state != NCSI_CHANNEL_ACTIVE) {
  71				spin_unlock_irqrestore(&nc->lock, flags);
  72				continue;
  73			}
  74
  75			if (ncsi_channel_has_link(nc)) {
  76				spin_unlock_irqrestore(&nc->lock, flags);
  77				nd->link_up = 1;
  78				goto report;
  79			}
  80
  81			spin_unlock_irqrestore(&nc->lock, flags);
  82		}
  83	}
  84
  85report:
  86	nd->handler(nd);
  87}
  88
  89static void ncsi_channel_monitor(struct timer_list *t)
  90{
  91	struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
  92	struct ncsi_package *np = nc->package;
  93	struct ncsi_dev_priv *ndp = np->ndp;
  94	struct ncsi_channel_mode *ncm;
  95	struct ncsi_cmd_arg nca;
  96	bool enabled, chained;
  97	unsigned int monitor_state;
  98	unsigned long flags;
  99	int state, ret;
 100
 101	spin_lock_irqsave(&nc->lock, flags);
 102	state = nc->state;
 103	chained = !list_empty(&nc->link);
 104	enabled = nc->monitor.enabled;
 105	monitor_state = nc->monitor.state;
 106	spin_unlock_irqrestore(&nc->lock, flags);
 107
 108	if (!enabled)
 109		return;		/* expected race disabling timer */
 110	if (WARN_ON_ONCE(chained))
 111		goto bad_state;
 112
 113	if (state != NCSI_CHANNEL_INACTIVE &&
 114	    state != NCSI_CHANNEL_ACTIVE) {
 115bad_state:
 116		netdev_warn(ndp->ndev.dev,
 117			    "Bad NCSI monitor state channel %d 0x%x %s queue\n",
 118			    nc->id, state, chained ? "on" : "off");
 119		spin_lock_irqsave(&nc->lock, flags);
 120		nc->monitor.enabled = false;
 121		spin_unlock_irqrestore(&nc->lock, flags);
 122		return;
 123	}
 124
 125	switch (monitor_state) {
 126	case NCSI_CHANNEL_MONITOR_START:
 127	case NCSI_CHANNEL_MONITOR_RETRY:
 128		nca.ndp = ndp;
 129		nca.package = np->id;
 130		nca.channel = nc->id;
 131		nca.type = NCSI_PKT_CMD_GLS;
 132		nca.req_flags = 0;
 133		ret = ncsi_xmit_cmd(&nca);
 134		if (ret)
 135			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
 136				   ret);
 137		break;
 138	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
 139		break;
 140	default:
 141		netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
 142			   nc->id);
 143		ncsi_report_link(ndp, true);
 144		ndp->flags |= NCSI_DEV_RESHUFFLE;
 145
 146		ncm = &nc->modes[NCSI_MODE_LINK];
 147		spin_lock_irqsave(&nc->lock, flags);
 148		nc->monitor.enabled = false;
 149		nc->state = NCSI_CHANNEL_INVISIBLE;
 150		ncm->data[2] &= ~0x1;
 151		spin_unlock_irqrestore(&nc->lock, flags);
 152
 153		spin_lock_irqsave(&ndp->lock, flags);
 154		nc->state = NCSI_CHANNEL_ACTIVE;
 155		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
 156		spin_unlock_irqrestore(&ndp->lock, flags);
 157		ncsi_process_next_channel(ndp);
 158		return;
 159	}
 160
 161	spin_lock_irqsave(&nc->lock, flags);
 162	nc->monitor.state++;
 163	spin_unlock_irqrestore(&nc->lock, flags);
 164	mod_timer(&nc->monitor.timer, jiffies + HZ);
 165}
 166
 167void ncsi_start_channel_monitor(struct ncsi_channel *nc)
 168{
 169	unsigned long flags;
 170
 171	spin_lock_irqsave(&nc->lock, flags);
 172	WARN_ON_ONCE(nc->monitor.enabled);
 173	nc->monitor.enabled = true;
 174	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
 175	spin_unlock_irqrestore(&nc->lock, flags);
 176
 177	mod_timer(&nc->monitor.timer, jiffies + HZ);
 178}
 179
 180void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
 181{
 182	unsigned long flags;
 183
 184	spin_lock_irqsave(&nc->lock, flags);
 185	if (!nc->monitor.enabled) {
 186		spin_unlock_irqrestore(&nc->lock, flags);
 187		return;
 188	}
 189	nc->monitor.enabled = false;
 190	spin_unlock_irqrestore(&nc->lock, flags);
 191
 192	del_timer_sync(&nc->monitor.timer);
 193}
 194
 195struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
 196				       unsigned char id)
 197{
 198	struct ncsi_channel *nc;
 199
 200	NCSI_FOR_EACH_CHANNEL(np, nc) {
 201		if (nc->id == id)
 202			return nc;
 203	}
 204
 205	return NULL;
 206}
 207
 208struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
 209{
 210	struct ncsi_channel *nc, *tmp;
 211	int index;
 212	unsigned long flags;
 213
 214	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
 215	if (!nc)
 216		return NULL;
 217
 218	nc->id = id;
 219	nc->package = np;
 220	nc->state = NCSI_CHANNEL_INACTIVE;
 221	nc->monitor.enabled = false;
 222	timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
 223	spin_lock_init(&nc->lock);
 224	INIT_LIST_HEAD(&nc->link);
 225	for (index = 0; index < NCSI_CAP_MAX; index++)
 226		nc->caps[index].index = index;
 227	for (index = 0; index < NCSI_MODE_MAX; index++)
 228		nc->modes[index].index = index;
 229
 230	spin_lock_irqsave(&np->lock, flags);
 231	tmp = ncsi_find_channel(np, id);
 232	if (tmp) {
 233		spin_unlock_irqrestore(&np->lock, flags);
 234		kfree(nc);
 235		return tmp;
 236	}
 237
 238	list_add_tail_rcu(&nc->node, &np->channels);
 239	np->channel_num++;
 240	spin_unlock_irqrestore(&np->lock, flags);
 241
 242	return nc;
 243}
 244
 245static void ncsi_remove_channel(struct ncsi_channel *nc)
 246{
 247	struct ncsi_package *np = nc->package;
 248	unsigned long flags;
 249
 250	spin_lock_irqsave(&nc->lock, flags);
 251
 252	/* Release filters */
 253	kfree(nc->mac_filter.addrs);
 254	kfree(nc->vlan_filter.vids);
 255
 256	nc->state = NCSI_CHANNEL_INACTIVE;
 257	spin_unlock_irqrestore(&nc->lock, flags);
 258	ncsi_stop_channel_monitor(nc);
 259
 260	/* Remove and free channel */
 261	spin_lock_irqsave(&np->lock, flags);
 262	list_del_rcu(&nc->node);
 263	np->channel_num--;
 264	spin_unlock_irqrestore(&np->lock, flags);
 265
 266	kfree(nc);
 267}
 268
 269struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
 270				       unsigned char id)
 271{
 272	struct ncsi_package *np;
 273
 274	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 275		if (np->id == id)
 276			return np;
 277	}
 278
 279	return NULL;
 280}
 281
 282struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
 283				      unsigned char id)
 284{
 285	struct ncsi_package *np, *tmp;
 286	unsigned long flags;
 287
 288	np = kzalloc(sizeof(*np), GFP_ATOMIC);
 289	if (!np)
 290		return NULL;
 291
 292	np->id = id;
 293	np->ndp = ndp;
 294	spin_lock_init(&np->lock);
 295	INIT_LIST_HEAD(&np->channels);
 296	np->channel_whitelist = UINT_MAX;
 297
 298	spin_lock_irqsave(&ndp->lock, flags);
 299	tmp = ncsi_find_package(ndp, id);
 300	if (tmp) {
 301		spin_unlock_irqrestore(&ndp->lock, flags);
 302		kfree(np);
 303		return tmp;
 304	}
 305
 306	list_add_tail_rcu(&np->node, &ndp->packages);
 307	ndp->package_num++;
 308	spin_unlock_irqrestore(&ndp->lock, flags);
 309
 310	return np;
 311}
 312
 313void ncsi_remove_package(struct ncsi_package *np)
 314{
 315	struct ncsi_dev_priv *ndp = np->ndp;
 316	struct ncsi_channel *nc, *tmp;
 317	unsigned long flags;
 318
 319	/* Release all child channels */
 320	list_for_each_entry_safe(nc, tmp, &np->channels, node)
 321		ncsi_remove_channel(nc);
 322
 323	/* Remove and free package */
 324	spin_lock_irqsave(&ndp->lock, flags);
 325	list_del_rcu(&np->node);
 326	ndp->package_num--;
 327	spin_unlock_irqrestore(&ndp->lock, flags);
 328
 329	kfree(np);
 330}
 331
 332void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
 333				   unsigned char id,
 334				   struct ncsi_package **np,
 335				   struct ncsi_channel **nc)
 336{
 337	struct ncsi_package *p;
 338	struct ncsi_channel *c;
 339
 340	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
 341	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
 342
 343	if (np)
 344		*np = p;
 345	if (nc)
 346		*nc = c;
 347}
 348
 349/* For two consecutive NCSI commands, the packet IDs shouldn't
 350 * be same. Otherwise, the bogus response might be replied. So
 351 * the available IDs are allocated in round-robin fashion.
 352 */
 353struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
 354					unsigned int req_flags)
 355{
 356	struct ncsi_request *nr = NULL;
 357	int i, limit = ARRAY_SIZE(ndp->requests);
 358	unsigned long flags;
 359
 360	/* Check if there is one available request until the ceiling */
 361	spin_lock_irqsave(&ndp->lock, flags);
 362	for (i = ndp->request_id; i < limit; i++) {
 363		if (ndp->requests[i].used)
 364			continue;
 365
 366		nr = &ndp->requests[i];
 367		nr->used = true;
 368		nr->flags = req_flags;
 369		ndp->request_id = i + 1;
 370		goto found;
 371	}
 372
 373	/* Fail back to check from the starting cursor */
 374	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
 375		if (ndp->requests[i].used)
 376			continue;
 377
 378		nr = &ndp->requests[i];
 379		nr->used = true;
 380		nr->flags = req_flags;
 381		ndp->request_id = i + 1;
 382		goto found;
 383	}
 384
 385found:
 386	spin_unlock_irqrestore(&ndp->lock, flags);
 387	return nr;
 388}
 389
 390void ncsi_free_request(struct ncsi_request *nr)
 391{
 392	struct ncsi_dev_priv *ndp = nr->ndp;
 393	struct sk_buff *cmd, *rsp;
 394	unsigned long flags;
 395	bool driven;
 396
 397	if (nr->enabled) {
 398		nr->enabled = false;
 399		del_timer_sync(&nr->timer);
 400	}
 401
 402	spin_lock_irqsave(&ndp->lock, flags);
 403	cmd = nr->cmd;
 404	rsp = nr->rsp;
 405	nr->cmd = NULL;
 406	nr->rsp = NULL;
 407	nr->used = false;
 408	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
 409	spin_unlock_irqrestore(&ndp->lock, flags);
 410
 411	if (driven && cmd && --ndp->pending_req_num == 0)
 412		schedule_work(&ndp->work);
 413
 414	/* Release command and response */
 415	consume_skb(cmd);
 416	consume_skb(rsp);
 417}
 418
 419struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
 420{
 421	struct ncsi_dev_priv *ndp;
 422
 423	NCSI_FOR_EACH_DEV(ndp) {
 424		if (ndp->ndev.dev == dev)
 425			return &ndp->ndev;
 426	}
 427
 428	return NULL;
 429}
 430
 431static void ncsi_request_timeout(struct timer_list *t)
 432{
 433	struct ncsi_request *nr = from_timer(nr, t, timer);
 434	struct ncsi_dev_priv *ndp = nr->ndp;
 435	struct ncsi_cmd_pkt *cmd;
 436	struct ncsi_package *np;
 437	struct ncsi_channel *nc;
 438	unsigned long flags;
 439
 440	/* If the request already had associated response,
 441	 * let the response handler to release it.
 442	 */
 443	spin_lock_irqsave(&ndp->lock, flags);
 444	nr->enabled = false;
 445	if (nr->rsp || !nr->cmd) {
 446		spin_unlock_irqrestore(&ndp->lock, flags);
 447		return;
 448	}
 449	spin_unlock_irqrestore(&ndp->lock, flags);
 450
 451	if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
 452		if (nr->cmd) {
 453			/* Find the package */
 454			cmd = (struct ncsi_cmd_pkt *)
 455			      skb_network_header(nr->cmd);
 456			ncsi_find_package_and_channel(ndp,
 457						      cmd->cmd.common.channel,
 458						      &np, &nc);
 459			ncsi_send_netlink_timeout(nr, np, nc);
 460		}
 461	}
 462
 463	/* Release the request */
 464	ncsi_free_request(nr);
 465}
 466
 467static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
 468{
 469	struct ncsi_dev *nd = &ndp->ndev;
 470	struct ncsi_package *np;
 471	struct ncsi_channel *nc, *tmp;
 472	struct ncsi_cmd_arg nca;
 473	unsigned long flags;
 474	int ret;
 475
 476	np = ndp->active_package;
 477	nc = ndp->active_channel;
 478	nca.ndp = ndp;
 479	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
 480	switch (nd->state) {
 481	case ncsi_dev_state_suspend:
 482		nd->state = ncsi_dev_state_suspend_select;
 483		fallthrough;
 484	case ncsi_dev_state_suspend_select:
 485		ndp->pending_req_num = 1;
 486
 487		nca.type = NCSI_PKT_CMD_SP;
 488		nca.package = np->id;
 489		nca.channel = NCSI_RESERVED_CHANNEL;
 490		if (ndp->flags & NCSI_DEV_HWA)
 491			nca.bytes[0] = 0;
 492		else
 493			nca.bytes[0] = 1;
 494
 495		/* To retrieve the last link states of channels in current
 496		 * package when current active channel needs fail over to
 497		 * another one. It means we will possibly select another
 498		 * channel as next active one. The link states of channels
 499		 * are most important factor of the selection. So we need
 500		 * accurate link states. Unfortunately, the link states on
 501		 * inactive channels can't be updated with LSC AEN in time.
 502		 */
 503		if (ndp->flags & NCSI_DEV_RESHUFFLE)
 504			nd->state = ncsi_dev_state_suspend_gls;
 505		else
 506			nd->state = ncsi_dev_state_suspend_dcnt;
 507		ret = ncsi_xmit_cmd(&nca);
 508		if (ret)
 509			goto error;
 510
 511		break;
 512	case ncsi_dev_state_suspend_gls:
 513		ndp->pending_req_num = np->channel_num;
 514
 515		nca.type = NCSI_PKT_CMD_GLS;
 516		nca.package = np->id;
 517
 518		nd->state = ncsi_dev_state_suspend_dcnt;
 519		NCSI_FOR_EACH_CHANNEL(np, nc) {
 520			nca.channel = nc->id;
 521			ret = ncsi_xmit_cmd(&nca);
 522			if (ret)
 523				goto error;
 524		}
 525
 526		break;
 527	case ncsi_dev_state_suspend_dcnt:
 528		ndp->pending_req_num = 1;
 529
 530		nca.type = NCSI_PKT_CMD_DCNT;
 531		nca.package = np->id;
 532		nca.channel = nc->id;
 533
 534		nd->state = ncsi_dev_state_suspend_dc;
 535		ret = ncsi_xmit_cmd(&nca);
 536		if (ret)
 537			goto error;
 538
 539		break;
 540	case ncsi_dev_state_suspend_dc:
 541		ndp->pending_req_num = 1;
 542
 543		nca.type = NCSI_PKT_CMD_DC;
 544		nca.package = np->id;
 545		nca.channel = nc->id;
 546		nca.bytes[0] = 1;
 547
 548		nd->state = ncsi_dev_state_suspend_deselect;
 549		ret = ncsi_xmit_cmd(&nca);
 550		if (ret)
 551			goto error;
 552
 553		NCSI_FOR_EACH_CHANNEL(np, tmp) {
 554			/* If there is another channel active on this package
 555			 * do not deselect the package.
 556			 */
 557			if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
 558				nd->state = ncsi_dev_state_suspend_done;
 559				break;
 560			}
 561		}
 562		break;
 563	case ncsi_dev_state_suspend_deselect:
 564		ndp->pending_req_num = 1;
 565
 566		nca.type = NCSI_PKT_CMD_DP;
 567		nca.package = np->id;
 568		nca.channel = NCSI_RESERVED_CHANNEL;
 569
 570		nd->state = ncsi_dev_state_suspend_done;
 571		ret = ncsi_xmit_cmd(&nca);
 572		if (ret)
 573			goto error;
 574
 575		break;
 576	case ncsi_dev_state_suspend_done:
 577		spin_lock_irqsave(&nc->lock, flags);
 578		nc->state = NCSI_CHANNEL_INACTIVE;
 579		spin_unlock_irqrestore(&nc->lock, flags);
 580		if (ndp->flags & NCSI_DEV_RESET)
 581			ncsi_reset_dev(nd);
 582		else
 583			ncsi_process_next_channel(ndp);
 584		break;
 585	default:
 586		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
 587			    nd->state);
 588	}
 589
 590	return;
 591error:
 592	nd->state = ncsi_dev_state_functional;
 593}
 594
 595/* Check the VLAN filter bitmap for a set filter, and construct a
 596 * "Set VLAN Filter - Disable" packet if found.
 597 */
 598static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
 599			 struct ncsi_cmd_arg *nca)
 600{
 601	struct ncsi_channel_vlan_filter *ncf;
 602	unsigned long flags;
 603	void *bitmap;
 604	int index;
 605	u16 vid;
 606
 607	ncf = &nc->vlan_filter;
 608	bitmap = &ncf->bitmap;
 609
 610	spin_lock_irqsave(&nc->lock, flags);
 611	index = find_first_bit(bitmap, ncf->n_vids);
 612	if (index >= ncf->n_vids) {
 613		spin_unlock_irqrestore(&nc->lock, flags);
 614		return -1;
 615	}
 616	vid = ncf->vids[index];
 617
 618	clear_bit(index, bitmap);
 619	ncf->vids[index] = 0;
 620	spin_unlock_irqrestore(&nc->lock, flags);
 621
 622	nca->type = NCSI_PKT_CMD_SVF;
 623	nca->words[1] = vid;
 624	/* HW filter index starts at 1 */
 625	nca->bytes[6] = index + 1;
 626	nca->bytes[7] = 0x00;
 627	return 0;
 628}
 629
 630/* Find an outstanding VLAN tag and construct a "Set VLAN Filter - Enable"
 631 * packet.
 632 */
 633static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
 634		       struct ncsi_cmd_arg *nca)
 635{
 636	struct ncsi_channel_vlan_filter *ncf;
 637	struct vlan_vid *vlan = NULL;
 638	unsigned long flags;
 639	int i, index;
 640	void *bitmap;
 641	u16 vid;
 642
 643	if (list_empty(&ndp->vlan_vids))
 644		return -1;
 645
 646	ncf = &nc->vlan_filter;
 647	bitmap = &ncf->bitmap;
 648
 649	spin_lock_irqsave(&nc->lock, flags);
 650
 651	rcu_read_lock();
 652	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
 653		vid = vlan->vid;
 654		for (i = 0; i < ncf->n_vids; i++)
 655			if (ncf->vids[i] == vid) {
 656				vid = 0;
 657				break;
 658			}
 659		if (vid)
 660			break;
 661	}
 662	rcu_read_unlock();
 663
 664	if (!vid) {
 665		/* No VLAN ID is not set */
 666		spin_unlock_irqrestore(&nc->lock, flags);
 667		return -1;
 668	}
 669
 670	index = find_first_zero_bit(bitmap, ncf->n_vids);
 671	if (index < 0 || index >= ncf->n_vids) {
 672		netdev_err(ndp->ndev.dev,
 673			   "Channel %u already has all VLAN filters set\n",
 674			   nc->id);
 675		spin_unlock_irqrestore(&nc->lock, flags);
 676		return -1;
 677	}
 678
 679	ncf->vids[index] = vid;
 680	set_bit(index, bitmap);
 681	spin_unlock_irqrestore(&nc->lock, flags);
 682
 683	nca->type = NCSI_PKT_CMD_SVF;
 684	nca->words[1] = vid;
 685	/* HW filter index starts at 1 */
 686	nca->bytes[6] = index + 1;
 687	nca->bytes[7] = 0x01;
 688
 689	return 0;
 690}
 691
 692static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
 693{
 694	unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
 695	int ret = 0;
 696
 697	nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN;
 698
 699	memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN);
 700	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
 701
 702	data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY;
 703
 704	/* PHY Link up attribute */
 705	data[6] = 0x1;
 706
 707	nca->data = data;
 708
 709	ret = ncsi_xmit_cmd(nca);
 710	if (ret)
 711		netdev_err(nca->ndp->ndev.dev,
 712			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 713			   nca->type);
 714	return ret;
 715}
 716
 717/* NCSI OEM Command APIs */
 718static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
 719{
 720	unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
 721	int ret = 0;
 722
 723	nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
 724
 725	memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
 726	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID);
 727	data[5] = NCSI_OEM_BCM_CMD_GMA;
 728
 729	nca->data = data;
 730
 731	ret = ncsi_xmit_cmd(nca);
 732	if (ret)
 733		netdev_err(nca->ndp->ndev.dev,
 734			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 735			   nca->type);
 736	return ret;
 737}
 738
 739static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
 740{
 741	union {
 742		u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
 743		u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
 744	} u;
 745	int ret = 0;
 746
 747	nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
 748
 749	memset(&u, 0, sizeof(u));
 750	u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
 751	u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
 752	u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
 753
 754	nca->data = u.data_u8;
 755
 756	ret = ncsi_xmit_cmd(nca);
 757	if (ret)
 758		netdev_err(nca->ndp->ndev.dev,
 759			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 760			   nca->type);
 761	return ret;
 762}
 763
 764static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
 765{
 766	union {
 767		u8 data_u8[NCSI_OEM_MLX_CMD_SMAF_LEN];
 768		u32 data_u32[NCSI_OEM_MLX_CMD_SMAF_LEN / sizeof(u32)];
 769	} u;
 770	int ret = 0;
 771
 772	memset(&u, 0, sizeof(u));
 773	u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
 774	u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
 775	u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
 776	memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
 777	       nca->ndp->ndev.dev->dev_addr,	ETH_ALEN);
 778	u.data_u8[MLX_SMAF_MED_SUPPORT_OFFSET] =
 779		(MLX_MC_RBT_AVL | MLX_MC_RBT_SUPPORT);
 780
 781	nca->payload = NCSI_OEM_MLX_CMD_SMAF_LEN;
 782	nca->data = u.data_u8;
 783
 784	ret = ncsi_xmit_cmd(nca);
 785	if (ret)
 786		netdev_err(nca->ndp->ndev.dev,
 787			   "NCSI: Failed to transmit cmd 0x%x during probe\n",
 788			   nca->type);
 789	return ret;
 790}
 791
 792static int ncsi_oem_gma_handler_intel(struct ncsi_cmd_arg *nca)
 793{
 794	unsigned char data[NCSI_OEM_INTEL_CMD_GMA_LEN];
 795	int ret = 0;
 796
 797	nca->payload = NCSI_OEM_INTEL_CMD_GMA_LEN;
 798
 799	memset(data, 0, NCSI_OEM_INTEL_CMD_GMA_LEN);
 800	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
 801	data[4] = NCSI_OEM_INTEL_CMD_GMA;
 802
 803	nca->data = data;
 804
 805	ret = ncsi_xmit_cmd(nca);
 806	if (ret)
 807		netdev_err(nca->ndp->ndev.dev,
 808			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 809			   nca->type);
 810
 811	return ret;
 812}
 813
 814/* OEM Command handlers initialization */
 815static struct ncsi_oem_gma_handler {
 816	unsigned int	mfr_id;
 817	int		(*handler)(struct ncsi_cmd_arg *nca);
 818} ncsi_oem_gma_handlers[] = {
 819	{ NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
 820	{ NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx },
 821	{ NCSI_OEM_MFR_INTEL_ID, ncsi_oem_gma_handler_intel }
 822};
 823
 824static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
 825{
 826	struct ncsi_oem_gma_handler *nch = NULL;
 827	int i;
 828
 829	/* This function should only be called once, return if flag set */
 830	if (nca->ndp->gma_flag == 1)
 831		return -1;
 832
 833	/* Find gma handler for given manufacturer id */
 834	for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
 835		if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
 836			if (ncsi_oem_gma_handlers[i].handler)
 837				nch = &ncsi_oem_gma_handlers[i];
 838			break;
 839			}
 840	}
 841
 842	if (!nch) {
 843		netdev_err(nca->ndp->ndev.dev,
 844			   "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
 845			   mf_id);
 846		return -1;
 847	}
 848
 849	/* Get Mac address from NCSI device */
 850	return nch->handler(nca);
 851}
 852
 853/* Determine if a given channel from the channel_queue should be used for Tx */
 854static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
 855			       struct ncsi_channel *nc)
 856{
 857	struct ncsi_channel_mode *ncm;
 858	struct ncsi_channel *channel;
 859	struct ncsi_package *np;
 860
 861	/* Check if any other channel has Tx enabled; a channel may have already
 862	 * been configured and removed from the channel queue.
 863	 */
 864	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 865		if (!ndp->multi_package && np != nc->package)
 866			continue;
 867		NCSI_FOR_EACH_CHANNEL(np, channel) {
 868			ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
 869			if (ncm->enable)
 870				return false;
 871		}
 872	}
 873
 874	/* This channel is the preferred channel and has link */
 875	list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
 876		np = channel->package;
 877		if (np->preferred_channel &&
 878		    ncsi_channel_has_link(np->preferred_channel)) {
 879			return np->preferred_channel == nc;
 880		}
 881	}
 882
 883	/* This channel has link */
 884	if (ncsi_channel_has_link(nc))
 885		return true;
 886
 887	list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
 888		if (ncsi_channel_has_link(channel))
 889			return false;
 890
 891	/* No other channel has link; default to this one */
 892	return true;
 893}
 894
 895/* Change the active Tx channel in a multi-channel setup */
 896int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
 897			   struct ncsi_package *package,
 898			   struct ncsi_channel *disable,
 899			   struct ncsi_channel *enable)
 900{
 901	struct ncsi_cmd_arg nca;
 902	struct ncsi_channel *nc;
 903	struct ncsi_package *np;
 904	int ret = 0;
 905
 906	if (!package->multi_channel && !ndp->multi_package)
 907		netdev_warn(ndp->ndev.dev,
 908			    "NCSI: Trying to update Tx channel in single-channel mode\n");
 909	nca.ndp = ndp;
 910	nca.req_flags = 0;
 911
 912	/* Find current channel with Tx enabled */
 913	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 914		if (disable)
 915			break;
 916		if (!ndp->multi_package && np != package)
 917			continue;
 918
 919		NCSI_FOR_EACH_CHANNEL(np, nc)
 920			if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
 921				disable = nc;
 922				break;
 923			}
 924	}
 925
 926	/* Find a suitable channel for Tx */
 927	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 928		if (enable)
 929			break;
 930		if (!ndp->multi_package && np != package)
 931			continue;
 932		if (!(ndp->package_whitelist & (0x1 << np->id)))
 933			continue;
 934
 935		if (np->preferred_channel &&
 936		    ncsi_channel_has_link(np->preferred_channel)) {
 937			enable = np->preferred_channel;
 938			break;
 939		}
 940
 941		NCSI_FOR_EACH_CHANNEL(np, nc) {
 942			if (!(np->channel_whitelist & 0x1 << nc->id))
 943				continue;
 944			if (nc->state != NCSI_CHANNEL_ACTIVE)
 945				continue;
 946			if (ncsi_channel_has_link(nc)) {
 947				enable = nc;
 948				break;
 949			}
 950		}
 951	}
 952
 953	if (disable == enable)
 954		return -1;
 955
 956	if (!enable)
 957		return -1;
 958
 959	if (disable) {
 960		nca.channel = disable->id;
 961		nca.package = disable->package->id;
 962		nca.type = NCSI_PKT_CMD_DCNT;
 963		ret = ncsi_xmit_cmd(&nca);
 964		if (ret)
 965			netdev_err(ndp->ndev.dev,
 966				   "Error %d sending DCNT\n",
 967				   ret);
 968	}
 969
 970	netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
 971
 972	nca.channel = enable->id;
 973	nca.package = enable->package->id;
 974	nca.type = NCSI_PKT_CMD_ECNT;
 975	ret = ncsi_xmit_cmd(&nca);
 976	if (ret)
 977		netdev_err(ndp->ndev.dev,
 978			   "Error %d sending ECNT\n",
 979			   ret);
 980
 981	return ret;
 982}
 983
 984static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
 985{
 986	struct ncsi_package *np = ndp->active_package;
 987	struct ncsi_channel *nc = ndp->active_channel;
 988	struct ncsi_channel *hot_nc = NULL;
 989	struct ncsi_dev *nd = &ndp->ndev;
 990	struct net_device *dev = nd->dev;
 991	struct ncsi_cmd_arg nca;
 992	unsigned char index;
 993	unsigned long flags;
 994	int ret;
 995
 996	nca.ndp = ndp;
 997	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
 998	switch (nd->state) {
 999	case ncsi_dev_state_config:
1000	case ncsi_dev_state_config_sp:
1001		ndp->pending_req_num = 1;
1002
1003		/* Select the specific package */
1004		nca.type = NCSI_PKT_CMD_SP;
1005		if (ndp->flags & NCSI_DEV_HWA)
1006			nca.bytes[0] = 0;
1007		else
1008			nca.bytes[0] = 1;
1009		nca.package = np->id;
1010		nca.channel = NCSI_RESERVED_CHANNEL;
1011		ret = ncsi_xmit_cmd(&nca);
1012		if (ret) {
1013			netdev_err(ndp->ndev.dev,
1014				   "NCSI: Failed to transmit CMD_SP\n");
1015			goto error;
1016		}
1017
1018		nd->state = ncsi_dev_state_config_cis;
1019		break;
1020	case ncsi_dev_state_config_cis:
1021		ndp->pending_req_num = 1;
1022
1023		/* Clear initial state */
1024		nca.type = NCSI_PKT_CMD_CIS;
1025		nca.package = np->id;
1026		nca.channel = nc->id;
1027		ret = ncsi_xmit_cmd(&nca);
1028		if (ret) {
1029			netdev_err(ndp->ndev.dev,
1030				   "NCSI: Failed to transmit CMD_CIS\n");
1031			goto error;
1032		}
1033
1034		nd->state = IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
1035			  ? ncsi_dev_state_config_oem_gma
1036			  : ncsi_dev_state_config_clear_vids;
1037		break;
1038	case ncsi_dev_state_config_oem_gma:
1039		nd->state = ncsi_dev_state_config_clear_vids;
1040
1041		nca.package = np->id;
1042		nca.channel = nc->id;
1043		ndp->pending_req_num = 1;
1044		if (nc->version.major >= 1 && nc->version.minor >= 2) {
1045			nca.type = NCSI_PKT_CMD_GMCMA;
1046			ret = ncsi_xmit_cmd(&nca);
1047		} else {
1048			nca.type = NCSI_PKT_CMD_OEM;
1049			ret = ncsi_gma_handler(&nca, nc->version.mf_id);
1050		}
1051		if (ret < 0)
1052			schedule_work(&ndp->work);
1053
1054		break;
1055	case ncsi_dev_state_config_clear_vids:
1056	case ncsi_dev_state_config_svf:
1057	case ncsi_dev_state_config_ev:
1058	case ncsi_dev_state_config_sma:
1059	case ncsi_dev_state_config_ebf:
1060	case ncsi_dev_state_config_dgmf:
1061	case ncsi_dev_state_config_ecnt:
1062	case ncsi_dev_state_config_ec:
1063	case ncsi_dev_state_config_ae:
1064	case ncsi_dev_state_config_gls:
1065		ndp->pending_req_num = 1;
1066
1067		nca.package = np->id;
1068		nca.channel = nc->id;
1069
1070		/* Clear any active filters on the channel before setting */
1071		if (nd->state == ncsi_dev_state_config_clear_vids) {
1072			ret = clear_one_vid(ndp, nc, &nca);
1073			if (ret) {
1074				nd->state = ncsi_dev_state_config_svf;
1075				schedule_work(&ndp->work);
1076				break;
1077			}
1078			/* Repeat */
1079			nd->state = ncsi_dev_state_config_clear_vids;
1080		/* Add known VLAN tags to the filter */
1081		} else if (nd->state == ncsi_dev_state_config_svf) {
1082			ret = set_one_vid(ndp, nc, &nca);
1083			if (ret) {
1084				nd->state = ncsi_dev_state_config_ev;
1085				schedule_work(&ndp->work);
1086				break;
1087			}
1088			/* Repeat */
1089			nd->state = ncsi_dev_state_config_svf;
1090		/* Enable/Disable the VLAN filter */
1091		} else if (nd->state == ncsi_dev_state_config_ev) {
1092			if (list_empty(&ndp->vlan_vids)) {
1093				nca.type = NCSI_PKT_CMD_DV;
1094			} else {
1095				nca.type = NCSI_PKT_CMD_EV;
1096				nca.bytes[3] = NCSI_CAP_VLAN_NO;
1097			}
1098			nd->state = ncsi_dev_state_config_sma;
1099		} else if (nd->state == ncsi_dev_state_config_sma) {
1100		/* Use first entry in unicast filter table. Note that
1101		 * the MAC filter table starts from entry 1 instead of
1102		 * 0.
1103		 */
1104			nca.type = NCSI_PKT_CMD_SMA;
1105			for (index = 0; index < 6; index++)
1106				nca.bytes[index] = dev->dev_addr[index];
1107			nca.bytes[6] = 0x1;
1108			nca.bytes[7] = 0x1;
1109			nd->state = ncsi_dev_state_config_ebf;
1110		} else if (nd->state == ncsi_dev_state_config_ebf) {
1111			nca.type = NCSI_PKT_CMD_EBF;
1112			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
1113			/* if multicast global filtering is supported then
1114			 * disable it so that all multicast packet will be
1115			 * forwarded to management controller
1116			 */
1117			if (nc->caps[NCSI_CAP_GENERIC].cap &
1118			    NCSI_CAP_GENERIC_MC)
1119				nd->state = ncsi_dev_state_config_dgmf;
1120			else if (ncsi_channel_is_tx(ndp, nc))
1121				nd->state = ncsi_dev_state_config_ecnt;
1122			else
1123				nd->state = ncsi_dev_state_config_ec;
1124		} else if (nd->state == ncsi_dev_state_config_dgmf) {
1125			nca.type = NCSI_PKT_CMD_DGMF;
1126			if (ncsi_channel_is_tx(ndp, nc))
1127				nd->state = ncsi_dev_state_config_ecnt;
1128			else
1129				nd->state = ncsi_dev_state_config_ec;
1130		} else if (nd->state == ncsi_dev_state_config_ecnt) {
1131			if (np->preferred_channel &&
1132			    nc != np->preferred_channel)
1133				netdev_info(ndp->ndev.dev,
1134					    "NCSI: Tx failed over to channel %u\n",
1135					    nc->id);
1136			nca.type = NCSI_PKT_CMD_ECNT;
1137			nd->state = ncsi_dev_state_config_ec;
1138		} else if (nd->state == ncsi_dev_state_config_ec) {
1139			/* Enable AEN if it's supported */
1140			nca.type = NCSI_PKT_CMD_EC;
1141			nd->state = ncsi_dev_state_config_ae;
1142			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
1143				nd->state = ncsi_dev_state_config_gls;
1144		} else if (nd->state == ncsi_dev_state_config_ae) {
1145			nca.type = NCSI_PKT_CMD_AE;
1146			nca.bytes[0] = 0;
1147			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
1148			nd->state = ncsi_dev_state_config_gls;
1149		} else if (nd->state == ncsi_dev_state_config_gls) {
1150			nca.type = NCSI_PKT_CMD_GLS;
1151			nd->state = ncsi_dev_state_config_done;
1152		}
1153
1154		ret = ncsi_xmit_cmd(&nca);
1155		if (ret) {
1156			netdev_err(ndp->ndev.dev,
1157				   "NCSI: Failed to transmit CMD %x\n",
1158				   nca.type);
1159			goto error;
1160		}
1161		break;
1162	case ncsi_dev_state_config_done:
1163		netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
1164			   nc->id);
1165		spin_lock_irqsave(&nc->lock, flags);
1166		nc->state = NCSI_CHANNEL_ACTIVE;
1167
1168		if (ndp->flags & NCSI_DEV_RESET) {
1169			/* A reset event happened during config, start it now */
1170			nc->reconfigure_needed = false;
1171			spin_unlock_irqrestore(&nc->lock, flags);
1172			ncsi_reset_dev(nd);
1173			break;
1174		}
1175
1176		if (nc->reconfigure_needed) {
1177			/* This channel's configuration has been updated
1178			 * part-way during the config state - start the
1179			 * channel configuration over
1180			 */
1181			nc->reconfigure_needed = false;
1182			nc->state = NCSI_CHANNEL_INACTIVE;
1183			spin_unlock_irqrestore(&nc->lock, flags);
1184
1185			spin_lock_irqsave(&ndp->lock, flags);
1186			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1187			spin_unlock_irqrestore(&ndp->lock, flags);
1188
1189			netdev_dbg(dev, "Dirty NCSI channel state reset\n");
1190			ncsi_process_next_channel(ndp);
1191			break;
1192		}
1193
1194		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
1195			hot_nc = nc;
1196		} else {
1197			hot_nc = NULL;
1198			netdev_dbg(ndp->ndev.dev,
1199				   "NCSI: channel %u link down after config\n",
1200				   nc->id);
1201		}
1202		spin_unlock_irqrestore(&nc->lock, flags);
1203
1204		/* Update the hot channel */
1205		spin_lock_irqsave(&ndp->lock, flags);
1206		ndp->hot_channel = hot_nc;
1207		spin_unlock_irqrestore(&ndp->lock, flags);
1208
1209		ncsi_start_channel_monitor(nc);
1210		ncsi_process_next_channel(ndp);
1211		break;
1212	default:
1213		netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
1214			     nd->state);
1215	}
1216
1217	return;
1218
1219error:
1220	ncsi_report_link(ndp, true);
1221}
1222
1223static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
1224{
1225	struct ncsi_channel *nc, *found, *hot_nc;
1226	struct ncsi_channel_mode *ncm;
1227	unsigned long flags, cflags;
1228	struct ncsi_package *np;
1229	bool with_link;
1230
1231	spin_lock_irqsave(&ndp->lock, flags);
1232	hot_nc = ndp->hot_channel;
1233	spin_unlock_irqrestore(&ndp->lock, flags);
1234
1235	/* By default the search is done once an inactive channel with up
1236	 * link is found, unless a preferred channel is set.
1237	 * If multi_package or multi_channel are configured all channels in the
1238	 * whitelist are added to the channel queue.
1239	 */
1240	found = NULL;
1241	with_link = false;
1242	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1243		if (!(ndp->package_whitelist & (0x1 << np->id)))
1244			continue;
1245		NCSI_FOR_EACH_CHANNEL(np, nc) {
1246			if (!(np->channel_whitelist & (0x1 << nc->id)))
1247				continue;
1248
1249			spin_lock_irqsave(&nc->lock, cflags);
1250
1251			if (!list_empty(&nc->link) ||
1252			    nc->state != NCSI_CHANNEL_INACTIVE) {
1253				spin_unlock_irqrestore(&nc->lock, cflags);
1254				continue;
1255			}
1256
1257			if (!found)
1258				found = nc;
1259
1260			if (nc == hot_nc)
1261				found = nc;
1262
1263			ncm = &nc->modes[NCSI_MODE_LINK];
1264			if (ncm->data[2] & 0x1) {
1265				found = nc;
1266				with_link = true;
1267			}
1268
1269			/* If multi_channel is enabled configure all valid
1270			 * channels whether or not they currently have link
1271			 * so they will have AENs enabled.
1272			 */
1273			if (with_link || np->multi_channel) {
1274				spin_lock_irqsave(&ndp->lock, flags);
1275				list_add_tail_rcu(&nc->link,
1276						  &ndp->channel_queue);
1277				spin_unlock_irqrestore(&ndp->lock, flags);
1278
1279				netdev_dbg(ndp->ndev.dev,
1280					   "NCSI: Channel %u added to queue (link %s)\n",
1281					   nc->id,
1282					   ncm->data[2] & 0x1 ? "up" : "down");
1283			}
1284
1285			spin_unlock_irqrestore(&nc->lock, cflags);
1286
1287			if (with_link && !np->multi_channel)
1288				break;
1289		}
1290		if (with_link && !ndp->multi_package)
1291			break;
1292	}
1293
1294	if (list_empty(&ndp->channel_queue) && found) {
1295		netdev_info(ndp->ndev.dev,
1296			    "NCSI: No channel with link found, configuring channel %u\n",
1297			    found->id);
1298		spin_lock_irqsave(&ndp->lock, flags);
1299		list_add_tail_rcu(&found->link, &ndp->channel_queue);
1300		spin_unlock_irqrestore(&ndp->lock, flags);
1301	} else if (!found) {
1302		netdev_warn(ndp->ndev.dev,
1303			    "NCSI: No channel found to configure!\n");
1304		ncsi_report_link(ndp, true);
1305		return -ENODEV;
1306	}
1307
1308	return ncsi_process_next_channel(ndp);
1309}
1310
1311static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1312{
1313	struct ncsi_package *np;
1314	struct ncsi_channel *nc;
1315	unsigned int cap;
1316	bool has_channel = false;
1317
1318	/* The hardware arbitration is disabled if any one channel
1319	 * doesn't support explicitly.
1320	 */
1321	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1322		NCSI_FOR_EACH_CHANNEL(np, nc) {
1323			has_channel = true;
1324
1325			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1326			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1327			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1328			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1329				ndp->flags &= ~NCSI_DEV_HWA;
1330				return false;
1331			}
1332		}
1333	}
1334
1335	if (has_channel) {
1336		ndp->flags |= NCSI_DEV_HWA;
1337		return true;
1338	}
1339
1340	ndp->flags &= ~NCSI_DEV_HWA;
1341	return false;
1342}
1343
1344static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1345{
1346	struct ncsi_dev *nd = &ndp->ndev;
1347	struct ncsi_package *np;
1348	struct ncsi_channel *nc;
1349	struct ncsi_cmd_arg nca;
1350	unsigned char index;
1351	int ret;
1352
1353	nca.ndp = ndp;
1354	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1355	switch (nd->state) {
1356	case ncsi_dev_state_probe:
1357		nd->state = ncsi_dev_state_probe_deselect;
1358		fallthrough;
1359	case ncsi_dev_state_probe_deselect:
1360		ndp->pending_req_num = 8;
1361
1362		/* Deselect all possible packages */
1363		nca.type = NCSI_PKT_CMD_DP;
1364		nca.channel = NCSI_RESERVED_CHANNEL;
1365		for (index = 0; index < 8; index++) {
1366			nca.package = index;
1367			ret = ncsi_xmit_cmd(&nca);
1368			if (ret)
1369				goto error;
1370		}
1371
1372		nd->state = ncsi_dev_state_probe_package;
1373		break;
1374	case ncsi_dev_state_probe_package:
1375		ndp->pending_req_num = 1;
1376
1377		nca.type = NCSI_PKT_CMD_SP;
1378		nca.bytes[0] = 1;
1379		nca.package = ndp->package_probe_id;
1380		nca.channel = NCSI_RESERVED_CHANNEL;
1381		ret = ncsi_xmit_cmd(&nca);
1382		if (ret)
1383			goto error;
1384		nd->state = ncsi_dev_state_probe_channel;
1385		break;
1386	case ncsi_dev_state_probe_channel:
1387		ndp->active_package = ncsi_find_package(ndp,
1388							ndp->package_probe_id);
1389		if (!ndp->active_package) {
1390			/* No response */
1391			nd->state = ncsi_dev_state_probe_dp;
1392			schedule_work(&ndp->work);
1393			break;
1394		}
1395		nd->state = ncsi_dev_state_probe_cis;
1396		if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) &&
1397		    ndp->mlx_multi_host)
1398			nd->state = ncsi_dev_state_probe_mlx_gma;
1399
1400		schedule_work(&ndp->work);
1401		break;
1402	case ncsi_dev_state_probe_mlx_gma:
1403		ndp->pending_req_num = 1;
1404
1405		nca.type = NCSI_PKT_CMD_OEM;
1406		nca.package = ndp->active_package->id;
1407		nca.channel = 0;
1408		ret = ncsi_oem_gma_handler_mlx(&nca);
1409		if (ret)
1410			goto error;
1411
1412		nd->state = ncsi_dev_state_probe_mlx_smaf;
1413		break;
1414	case ncsi_dev_state_probe_mlx_smaf:
1415		ndp->pending_req_num = 1;
1416
1417		nca.type = NCSI_PKT_CMD_OEM;
1418		nca.package = ndp->active_package->id;
1419		nca.channel = 0;
1420		ret = ncsi_oem_smaf_mlx(&nca);
1421		if (ret)
1422			goto error;
1423
1424		nd->state = ncsi_dev_state_probe_cis;
1425		break;
1426	case ncsi_dev_state_probe_cis:
1427		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1428
1429		/* Clear initial state */
1430		nca.type = NCSI_PKT_CMD_CIS;
1431		nca.package = ndp->active_package->id;
1432		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1433			nca.channel = index;
1434			ret = ncsi_xmit_cmd(&nca);
1435			if (ret)
1436				goto error;
1437		}
1438
1439		nd->state = ncsi_dev_state_probe_gvi;
1440		if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
1441			nd->state = ncsi_dev_state_probe_keep_phy;
1442		break;
1443	case ncsi_dev_state_probe_keep_phy:
1444		ndp->pending_req_num = 1;
1445
1446		nca.type = NCSI_PKT_CMD_OEM;
1447		nca.package = ndp->active_package->id;
1448		nca.channel = 0;
1449		ret = ncsi_oem_keep_phy_intel(&nca);
1450		if (ret)
1451			goto error;
1452
1453		nd->state = ncsi_dev_state_probe_gvi;
1454		break;
1455	case ncsi_dev_state_probe_gvi:
1456	case ncsi_dev_state_probe_gc:
1457	case ncsi_dev_state_probe_gls:
1458		np = ndp->active_package;
1459		ndp->pending_req_num = np->channel_num;
1460
1461		/* Retrieve version, capability or link status */
1462		if (nd->state == ncsi_dev_state_probe_gvi)
1463			nca.type = NCSI_PKT_CMD_GVI;
1464		else if (nd->state == ncsi_dev_state_probe_gc)
1465			nca.type = NCSI_PKT_CMD_GC;
1466		else
1467			nca.type = NCSI_PKT_CMD_GLS;
1468
1469		nca.package = np->id;
1470		NCSI_FOR_EACH_CHANNEL(np, nc) {
1471			nca.channel = nc->id;
1472			ret = ncsi_xmit_cmd(&nca);
1473			if (ret)
1474				goto error;
1475		}
1476
1477		if (nd->state == ncsi_dev_state_probe_gvi)
1478			nd->state = ncsi_dev_state_probe_gc;
1479		else if (nd->state == ncsi_dev_state_probe_gc)
1480			nd->state = ncsi_dev_state_probe_gls;
1481		else
1482			nd->state = ncsi_dev_state_probe_dp;
1483		break;
1484	case ncsi_dev_state_probe_dp:
1485		ndp->pending_req_num = 1;
1486
1487		/* Deselect the current package */
1488		nca.type = NCSI_PKT_CMD_DP;
1489		nca.package = ndp->package_probe_id;
1490		nca.channel = NCSI_RESERVED_CHANNEL;
1491		ret = ncsi_xmit_cmd(&nca);
1492		if (ret)
1493			goto error;
1494
1495		/* Probe next package */
1496		ndp->package_probe_id++;
1497		if (ndp->package_probe_id >= 8) {
1498			/* Probe finished */
1499			ndp->flags |= NCSI_DEV_PROBED;
1500			break;
1501		}
1502		nd->state = ncsi_dev_state_probe_package;
1503		ndp->active_package = NULL;
1504		break;
1505	default:
1506		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1507			    nd->state);
1508	}
1509
1510	if (ndp->flags & NCSI_DEV_PROBED) {
1511		/* Check if all packages have HWA support */
1512		ncsi_check_hwa(ndp);
1513		ncsi_choose_active_channel(ndp);
1514	}
1515
1516	return;
1517error:
1518	netdev_err(ndp->ndev.dev,
1519		   "NCSI: Failed to transmit cmd 0x%x during probe\n",
1520		   nca.type);
1521	ncsi_report_link(ndp, true);
1522}
1523
1524static void ncsi_dev_work(struct work_struct *work)
1525{
1526	struct ncsi_dev_priv *ndp = container_of(work,
1527			struct ncsi_dev_priv, work);
1528	struct ncsi_dev *nd = &ndp->ndev;
1529
1530	switch (nd->state & ncsi_dev_state_major) {
1531	case ncsi_dev_state_probe:
1532		ncsi_probe_channel(ndp);
1533		break;
1534	case ncsi_dev_state_suspend:
1535		ncsi_suspend_channel(ndp);
1536		break;
1537	case ncsi_dev_state_config:
1538		ncsi_configure_channel(ndp);
1539		break;
1540	default:
1541		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1542			    nd->state);
1543	}
1544}
1545
1546int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1547{
1548	struct ncsi_channel *nc;
1549	int old_state;
1550	unsigned long flags;
1551
1552	spin_lock_irqsave(&ndp->lock, flags);
1553	nc = list_first_or_null_rcu(&ndp->channel_queue,
1554				    struct ncsi_channel, link);
1555	if (!nc) {
1556		spin_unlock_irqrestore(&ndp->lock, flags);
1557		goto out;
1558	}
1559
1560	list_del_init(&nc->link);
1561	spin_unlock_irqrestore(&ndp->lock, flags);
1562
1563	spin_lock_irqsave(&nc->lock, flags);
1564	old_state = nc->state;
1565	nc->state = NCSI_CHANNEL_INVISIBLE;
1566	spin_unlock_irqrestore(&nc->lock, flags);
1567
1568	ndp->active_channel = nc;
1569	ndp->active_package = nc->package;
1570
1571	switch (old_state) {
1572	case NCSI_CHANNEL_INACTIVE:
1573		ndp->ndev.state = ncsi_dev_state_config;
1574		netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1575	                   nc->id);
1576		ncsi_configure_channel(ndp);
1577		break;
1578	case NCSI_CHANNEL_ACTIVE:
1579		ndp->ndev.state = ncsi_dev_state_suspend;
1580		netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1581			   nc->id);
1582		ncsi_suspend_channel(ndp);
1583		break;
1584	default:
1585		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1586			   old_state, nc->package->id, nc->id);
1587		ncsi_report_link(ndp, false);
1588		return -EINVAL;
1589	}
1590
1591	return 0;
1592
1593out:
1594	ndp->active_channel = NULL;
1595	ndp->active_package = NULL;
1596	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1597		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1598		return ncsi_choose_active_channel(ndp);
1599	}
1600
1601	ncsi_report_link(ndp, false);
1602	return -ENODEV;
1603}
1604
1605static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1606{
1607	struct ncsi_dev *nd = &ndp->ndev;
1608	struct ncsi_channel *nc;
1609	struct ncsi_package *np;
1610	unsigned long flags;
1611	unsigned int n = 0;
1612
1613	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1614		NCSI_FOR_EACH_CHANNEL(np, nc) {
1615			spin_lock_irqsave(&nc->lock, flags);
1616
1617			/* Channels may be busy, mark dirty instead of
1618			 * kicking if;
1619			 * a) not ACTIVE (configured)
1620			 * b) in the channel_queue (to be configured)
1621			 * c) it's ndev is in the config state
1622			 */
1623			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1624				if ((ndp->ndev.state & 0xff00) ==
1625						ncsi_dev_state_config ||
1626						!list_empty(&nc->link)) {
1627					netdev_dbg(nd->dev,
1628						   "NCSI: channel %p marked dirty\n",
1629						   nc);
1630					nc->reconfigure_needed = true;
1631				}
1632				spin_unlock_irqrestore(&nc->lock, flags);
1633				continue;
1634			}
1635
1636			spin_unlock_irqrestore(&nc->lock, flags);
1637
1638			ncsi_stop_channel_monitor(nc);
1639			spin_lock_irqsave(&nc->lock, flags);
1640			nc->state = NCSI_CHANNEL_INACTIVE;
1641			spin_unlock_irqrestore(&nc->lock, flags);
1642
1643			spin_lock_irqsave(&ndp->lock, flags);
1644			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1645			spin_unlock_irqrestore(&ndp->lock, flags);
1646
1647			netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1648			n++;
1649		}
1650	}
1651
1652	return n;
1653}
1654
1655int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1656{
1657	struct ncsi_dev_priv *ndp;
1658	unsigned int n_vids = 0;
1659	struct vlan_vid *vlan;
1660	struct ncsi_dev *nd;
1661	bool found = false;
1662
1663	if (vid == 0)
1664		return 0;
1665
1666	nd = ncsi_find_dev(dev);
1667	if (!nd) {
1668		netdev_warn(dev, "NCSI: No net_device?\n");
1669		return 0;
1670	}
1671
1672	ndp = TO_NCSI_DEV_PRIV(nd);
1673
1674	/* Add the VLAN id to our internal list */
1675	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1676		n_vids++;
1677		if (vlan->vid == vid) {
1678			netdev_dbg(dev, "NCSI: vid %u already registered\n",
1679				   vid);
1680			return 0;
1681		}
1682	}
1683	if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1684		netdev_warn(dev,
1685			    "tried to add vlan id %u but NCSI max already registered (%u)\n",
1686			    vid, NCSI_MAX_VLAN_VIDS);
1687		return -ENOSPC;
1688	}
1689
1690	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1691	if (!vlan)
1692		return -ENOMEM;
1693
1694	vlan->proto = proto;
1695	vlan->vid = vid;
1696	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1697
1698	netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1699
1700	found = ncsi_kick_channels(ndp) != 0;
1701
1702	return found ? ncsi_process_next_channel(ndp) : 0;
1703}
1704EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1705
1706int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1707{
1708	struct vlan_vid *vlan, *tmp;
1709	struct ncsi_dev_priv *ndp;
1710	struct ncsi_dev *nd;
1711	bool found = false;
1712
1713	if (vid == 0)
1714		return 0;
1715
1716	nd = ncsi_find_dev(dev);
1717	if (!nd) {
1718		netdev_warn(dev, "NCSI: no net_device?\n");
1719		return 0;
1720	}
1721
1722	ndp = TO_NCSI_DEV_PRIV(nd);
1723
1724	/* Remove the VLAN id from our internal list */
1725	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1726		if (vlan->vid == vid) {
1727			netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1728			list_del_rcu(&vlan->list);
1729			found = true;
1730			kfree(vlan);
1731		}
1732
1733	if (!found) {
1734		netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1735		return -EINVAL;
1736	}
1737
1738	found = ncsi_kick_channels(ndp) != 0;
1739
1740	return found ? ncsi_process_next_channel(ndp) : 0;
1741}
1742EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1743
1744struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1745				   void (*handler)(struct ncsi_dev *ndev))
1746{
1747	struct ncsi_dev_priv *ndp;
1748	struct ncsi_dev *nd;
1749	struct platform_device *pdev;
1750	struct device_node *np;
1751	unsigned long flags;
1752	int i;
1753
1754	/* Check if the device has been registered or not */
1755	nd = ncsi_find_dev(dev);
1756	if (nd)
1757		return nd;
1758
1759	/* Create NCSI device */
1760	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1761	if (!ndp)
1762		return NULL;
1763
1764	nd = &ndp->ndev;
1765	nd->state = ncsi_dev_state_registered;
1766	nd->dev = dev;
1767	nd->handler = handler;
1768	ndp->pending_req_num = 0;
1769	INIT_LIST_HEAD(&ndp->channel_queue);
1770	INIT_LIST_HEAD(&ndp->vlan_vids);
1771	INIT_WORK(&ndp->work, ncsi_dev_work);
1772	ndp->package_whitelist = UINT_MAX;
1773
1774	/* Initialize private NCSI device */
1775	spin_lock_init(&ndp->lock);
1776	INIT_LIST_HEAD(&ndp->packages);
1777	ndp->request_id = NCSI_REQ_START_IDX;
1778	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1779		ndp->requests[i].id = i;
1780		ndp->requests[i].ndp = ndp;
1781		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1782	}
1783
1784	spin_lock_irqsave(&ncsi_dev_lock, flags);
1785	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1786	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1787
1788	/* Register NCSI packet Rx handler */
1789	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1790	ndp->ptype.func = ncsi_rcv_rsp;
1791	ndp->ptype.dev = dev;
1792	dev_add_pack(&ndp->ptype);
1793
1794	pdev = to_platform_device(dev->dev.parent);
1795	if (pdev) {
1796		np = pdev->dev.of_node;
1797		if (np && (of_property_read_bool(np, "mellanox,multi-host") ||
1798			   of_property_read_bool(np, "mlx,multi-host")))
1799			ndp->mlx_multi_host = true;
1800	}
1801
1802	return nd;
1803}
1804EXPORT_SYMBOL_GPL(ncsi_register_dev);
1805
1806int ncsi_start_dev(struct ncsi_dev *nd)
1807{
1808	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1809
1810	if (nd->state != ncsi_dev_state_registered &&
1811	    nd->state != ncsi_dev_state_functional)
1812		return -ENOTTY;
1813
1814	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1815		ndp->package_probe_id = 0;
1816		nd->state = ncsi_dev_state_probe;
1817		schedule_work(&ndp->work);
1818		return 0;
1819	}
1820
1821	return ncsi_reset_dev(nd);
1822}
1823EXPORT_SYMBOL_GPL(ncsi_start_dev);
1824
1825void ncsi_stop_dev(struct ncsi_dev *nd)
1826{
1827	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1828	struct ncsi_package *np;
1829	struct ncsi_channel *nc;
1830	bool chained;
1831	int old_state;
1832	unsigned long flags;
1833
1834	/* Stop the channel monitor on any active channels. Don't reset the
1835	 * channel state so we know which were active when ncsi_start_dev()
1836	 * is next called.
1837	 */
1838	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1839		NCSI_FOR_EACH_CHANNEL(np, nc) {
1840			ncsi_stop_channel_monitor(nc);
1841
1842			spin_lock_irqsave(&nc->lock, flags);
1843			chained = !list_empty(&nc->link);
1844			old_state = nc->state;
1845			spin_unlock_irqrestore(&nc->lock, flags);
1846
1847			WARN_ON_ONCE(chained ||
1848				     old_state == NCSI_CHANNEL_INVISIBLE);
1849		}
1850	}
1851
1852	netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1853	ncsi_report_link(ndp, true);
1854}
1855EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1856
1857int ncsi_reset_dev(struct ncsi_dev *nd)
1858{
1859	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1860	struct ncsi_channel *nc, *active, *tmp;
1861	struct ncsi_package *np;
1862	unsigned long flags;
1863
1864	spin_lock_irqsave(&ndp->lock, flags);
1865
1866	if (!(ndp->flags & NCSI_DEV_RESET)) {
1867		/* Haven't been called yet, check states */
1868		switch (nd->state & ncsi_dev_state_major) {
1869		case ncsi_dev_state_registered:
1870		case ncsi_dev_state_probe:
1871			/* Not even probed yet - do nothing */
1872			spin_unlock_irqrestore(&ndp->lock, flags);
1873			return 0;
1874		case ncsi_dev_state_suspend:
1875		case ncsi_dev_state_config:
1876			/* Wait for the channel to finish its suspend/config
1877			 * operation; once it finishes it will check for
1878			 * NCSI_DEV_RESET and reset the state.
1879			 */
1880			ndp->flags |= NCSI_DEV_RESET;
1881			spin_unlock_irqrestore(&ndp->lock, flags);
1882			return 0;
1883		}
1884	} else {
1885		switch (nd->state) {
1886		case ncsi_dev_state_suspend_done:
1887		case ncsi_dev_state_config_done:
1888		case ncsi_dev_state_functional:
1889			/* Ok */
1890			break;
1891		default:
1892			/* Current reset operation happening */
1893			spin_unlock_irqrestore(&ndp->lock, flags);
1894			return 0;
1895		}
1896	}
1897
1898	if (!list_empty(&ndp->channel_queue)) {
1899		/* Clear any channel queue we may have interrupted */
1900		list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
1901			list_del_init(&nc->link);
1902	}
1903	spin_unlock_irqrestore(&ndp->lock, flags);
1904
1905	active = NULL;
1906	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1907		NCSI_FOR_EACH_CHANNEL(np, nc) {
1908			spin_lock_irqsave(&nc->lock, flags);
1909
1910			if (nc->state == NCSI_CHANNEL_ACTIVE) {
1911				active = nc;
1912				nc->state = NCSI_CHANNEL_INVISIBLE;
1913				spin_unlock_irqrestore(&nc->lock, flags);
1914				ncsi_stop_channel_monitor(nc);
1915				break;
1916			}
1917
1918			spin_unlock_irqrestore(&nc->lock, flags);
1919		}
1920		if (active)
1921			break;
1922	}
1923
1924	if (!active) {
1925		/* Done */
1926		spin_lock_irqsave(&ndp->lock, flags);
1927		ndp->flags &= ~NCSI_DEV_RESET;
1928		spin_unlock_irqrestore(&ndp->lock, flags);
1929		return ncsi_choose_active_channel(ndp);
1930	}
1931
1932	spin_lock_irqsave(&ndp->lock, flags);
1933	ndp->flags |= NCSI_DEV_RESET;
1934	ndp->active_channel = active;
1935	ndp->active_package = active->package;
1936	spin_unlock_irqrestore(&ndp->lock, flags);
1937
1938	nd->state = ncsi_dev_state_suspend;
1939	schedule_work(&ndp->work);
1940	return 0;
1941}
1942
1943void ncsi_unregister_dev(struct ncsi_dev *nd)
1944{
1945	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1946	struct ncsi_package *np, *tmp;
1947	unsigned long flags;
1948
1949	dev_remove_pack(&ndp->ptype);
1950
1951	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1952		ncsi_remove_package(np);
1953
1954	spin_lock_irqsave(&ncsi_dev_lock, flags);
1955	list_del_rcu(&ndp->node);
1956	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1957
1958	kfree(ndp);
1959}
1960EXPORT_SYMBOL_GPL(ncsi_unregister_dev);