Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright Gavin Shan, IBM Corporation 2016.
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/kernel.h>
   8#include <linux/init.h>
   9#include <linux/netdevice.h>
  10#include <linux/skbuff.h>
  11#include <linux/of.h>
  12#include <linux/platform_device.h>
  13
  14#include <net/ncsi.h>
  15#include <net/net_namespace.h>
  16#include <net/sock.h>
  17#include <net/addrconf.h>
  18#include <net/ipv6.h>
  19#include <net/genetlink.h>
  20
  21#include "internal.h"
  22#include "ncsi-pkt.h"
  23#include "ncsi-netlink.h"
  24
  25LIST_HEAD(ncsi_dev_list);
  26DEFINE_SPINLOCK(ncsi_dev_lock);
  27
  28bool ncsi_channel_has_link(struct ncsi_channel *channel)
  29{
  30	return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
  31}
  32
  33bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
  34			  struct ncsi_channel *channel)
  35{
  36	struct ncsi_package *np;
  37	struct ncsi_channel *nc;
  38
  39	NCSI_FOR_EACH_PACKAGE(ndp, np)
  40		NCSI_FOR_EACH_CHANNEL(np, nc) {
  41			if (nc == channel)
  42				continue;
  43			if (nc->state == NCSI_CHANNEL_ACTIVE &&
  44			    ncsi_channel_has_link(nc))
  45				return false;
  46		}
  47
  48	return true;
  49}
  50
  51static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
  52{
  53	struct ncsi_dev *nd = &ndp->ndev;
  54	struct ncsi_package *np;
  55	struct ncsi_channel *nc;
  56	unsigned long flags;
  57
  58	nd->state = ncsi_dev_state_functional;
  59	if (force_down) {
  60		nd->link_up = 0;
  61		goto report;
  62	}
  63
  64	nd->link_up = 0;
  65	NCSI_FOR_EACH_PACKAGE(ndp, np) {
  66		NCSI_FOR_EACH_CHANNEL(np, nc) {
  67			spin_lock_irqsave(&nc->lock, flags);
  68
  69			if (!list_empty(&nc->link) ||
  70			    nc->state != NCSI_CHANNEL_ACTIVE) {
  71				spin_unlock_irqrestore(&nc->lock, flags);
  72				continue;
  73			}
  74
  75			if (ncsi_channel_has_link(nc)) {
  76				spin_unlock_irqrestore(&nc->lock, flags);
  77				nd->link_up = 1;
  78				goto report;
  79			}
  80
  81			spin_unlock_irqrestore(&nc->lock, flags);
  82		}
  83	}
  84
  85report:
  86	nd->handler(nd);
  87}
  88
  89static void ncsi_channel_monitor(struct timer_list *t)
  90{
  91	struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
  92	struct ncsi_package *np = nc->package;
  93	struct ncsi_dev_priv *ndp = np->ndp;
  94	struct ncsi_channel_mode *ncm;
  95	struct ncsi_cmd_arg nca;
  96	bool enabled, chained;
  97	unsigned int monitor_state;
  98	unsigned long flags;
  99	int state, ret;
 100
 101	spin_lock_irqsave(&nc->lock, flags);
 102	state = nc->state;
 103	chained = !list_empty(&nc->link);
 104	enabled = nc->monitor.enabled;
 105	monitor_state = nc->monitor.state;
 106	spin_unlock_irqrestore(&nc->lock, flags);
 107
 108	if (!enabled)
 109		return;		/* expected race disabling timer */
 110	if (WARN_ON_ONCE(chained))
 111		goto bad_state;
 112
 113	if (state != NCSI_CHANNEL_INACTIVE &&
 114	    state != NCSI_CHANNEL_ACTIVE) {
 115bad_state:
 116		netdev_warn(ndp->ndev.dev,
 117			    "Bad NCSI monitor state channel %d 0x%x %s queue\n",
 118			    nc->id, state, chained ? "on" : "off");
 119		spin_lock_irqsave(&nc->lock, flags);
 120		nc->monitor.enabled = false;
 121		spin_unlock_irqrestore(&nc->lock, flags);
 122		return;
 123	}
 124
 125	switch (monitor_state) {
 126	case NCSI_CHANNEL_MONITOR_START:
 127	case NCSI_CHANNEL_MONITOR_RETRY:
 128		nca.ndp = ndp;
 129		nca.package = np->id;
 130		nca.channel = nc->id;
 131		nca.type = NCSI_PKT_CMD_GLS;
 132		nca.req_flags = 0;
 133		ret = ncsi_xmit_cmd(&nca);
 134		if (ret)
 135			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
 136				   ret);
 137		break;
 138	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
 139		break;
 140	default:
 141		netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
 142			   nc->id);
 143		ncsi_report_link(ndp, true);
 144		ndp->flags |= NCSI_DEV_RESHUFFLE;
 145
 
 
 146		ncm = &nc->modes[NCSI_MODE_LINK];
 147		spin_lock_irqsave(&nc->lock, flags);
 148		nc->monitor.enabled = false;
 149		nc->state = NCSI_CHANNEL_INVISIBLE;
 150		ncm->data[2] &= ~0x1;
 151		spin_unlock_irqrestore(&nc->lock, flags);
 152
 153		spin_lock_irqsave(&ndp->lock, flags);
 154		nc->state = NCSI_CHANNEL_ACTIVE;
 155		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
 156		spin_unlock_irqrestore(&ndp->lock, flags);
 157		ncsi_process_next_channel(ndp);
 158		return;
 159	}
 160
 161	spin_lock_irqsave(&nc->lock, flags);
 162	nc->monitor.state++;
 163	spin_unlock_irqrestore(&nc->lock, flags);
 164	mod_timer(&nc->monitor.timer, jiffies + HZ);
 165}
 166
 167void ncsi_start_channel_monitor(struct ncsi_channel *nc)
 168{
 169	unsigned long flags;
 170
 171	spin_lock_irqsave(&nc->lock, flags);
 172	WARN_ON_ONCE(nc->monitor.enabled);
 173	nc->monitor.enabled = true;
 174	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
 175	spin_unlock_irqrestore(&nc->lock, flags);
 176
 177	mod_timer(&nc->monitor.timer, jiffies + HZ);
 178}
 179
 180void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
 181{
 182	unsigned long flags;
 183
 184	spin_lock_irqsave(&nc->lock, flags);
 185	if (!nc->monitor.enabled) {
 186		spin_unlock_irqrestore(&nc->lock, flags);
 187		return;
 188	}
 189	nc->monitor.enabled = false;
 190	spin_unlock_irqrestore(&nc->lock, flags);
 191
 192	del_timer_sync(&nc->monitor.timer);
 193}
 194
 195struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
 196				       unsigned char id)
 197{
 198	struct ncsi_channel *nc;
 199
 200	NCSI_FOR_EACH_CHANNEL(np, nc) {
 201		if (nc->id == id)
 202			return nc;
 203	}
 204
 205	return NULL;
 206}
 207
 208struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
 209{
 210	struct ncsi_channel *nc, *tmp;
 211	int index;
 212	unsigned long flags;
 213
 214	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
 215	if (!nc)
 216		return NULL;
 217
 218	nc->id = id;
 219	nc->package = np;
 220	nc->state = NCSI_CHANNEL_INACTIVE;
 221	nc->monitor.enabled = false;
 222	timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
 223	spin_lock_init(&nc->lock);
 224	INIT_LIST_HEAD(&nc->link);
 225	for (index = 0; index < NCSI_CAP_MAX; index++)
 226		nc->caps[index].index = index;
 227	for (index = 0; index < NCSI_MODE_MAX; index++)
 228		nc->modes[index].index = index;
 229
 230	spin_lock_irqsave(&np->lock, flags);
 231	tmp = ncsi_find_channel(np, id);
 232	if (tmp) {
 233		spin_unlock_irqrestore(&np->lock, flags);
 234		kfree(nc);
 235		return tmp;
 236	}
 237
 238	list_add_tail_rcu(&nc->node, &np->channels);
 239	np->channel_num++;
 240	spin_unlock_irqrestore(&np->lock, flags);
 241
 242	return nc;
 243}
 244
 245static void ncsi_remove_channel(struct ncsi_channel *nc)
 246{
 247	struct ncsi_package *np = nc->package;
 248	unsigned long flags;
 249
 250	spin_lock_irqsave(&nc->lock, flags);
 251
 252	/* Release filters */
 253	kfree(nc->mac_filter.addrs);
 254	kfree(nc->vlan_filter.vids);
 255
 256	nc->state = NCSI_CHANNEL_INACTIVE;
 257	spin_unlock_irqrestore(&nc->lock, flags);
 258	ncsi_stop_channel_monitor(nc);
 259
 260	/* Remove and free channel */
 261	spin_lock_irqsave(&np->lock, flags);
 262	list_del_rcu(&nc->node);
 263	np->channel_num--;
 264	spin_unlock_irqrestore(&np->lock, flags);
 265
 266	kfree(nc);
 267}
 268
 269struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
 270				       unsigned char id)
 271{
 272	struct ncsi_package *np;
 273
 274	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 275		if (np->id == id)
 276			return np;
 277	}
 278
 279	return NULL;
 280}
 281
 282struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
 283				      unsigned char id)
 284{
 285	struct ncsi_package *np, *tmp;
 286	unsigned long flags;
 287
 288	np = kzalloc(sizeof(*np), GFP_ATOMIC);
 289	if (!np)
 290		return NULL;
 291
 292	np->id = id;
 293	np->ndp = ndp;
 294	spin_lock_init(&np->lock);
 295	INIT_LIST_HEAD(&np->channels);
 296	np->channel_whitelist = UINT_MAX;
 297
 298	spin_lock_irqsave(&ndp->lock, flags);
 299	tmp = ncsi_find_package(ndp, id);
 300	if (tmp) {
 301		spin_unlock_irqrestore(&ndp->lock, flags);
 302		kfree(np);
 303		return tmp;
 304	}
 305
 306	list_add_tail_rcu(&np->node, &ndp->packages);
 307	ndp->package_num++;
 308	spin_unlock_irqrestore(&ndp->lock, flags);
 309
 310	return np;
 311}
 312
 313void ncsi_remove_package(struct ncsi_package *np)
 314{
 315	struct ncsi_dev_priv *ndp = np->ndp;
 316	struct ncsi_channel *nc, *tmp;
 317	unsigned long flags;
 318
 319	/* Release all child channels */
 320	list_for_each_entry_safe(nc, tmp, &np->channels, node)
 321		ncsi_remove_channel(nc);
 322
 323	/* Remove and free package */
 324	spin_lock_irqsave(&ndp->lock, flags);
 325	list_del_rcu(&np->node);
 326	ndp->package_num--;
 327	spin_unlock_irqrestore(&ndp->lock, flags);
 328
 329	kfree(np);
 330}
 331
 332void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
 333				   unsigned char id,
 334				   struct ncsi_package **np,
 335				   struct ncsi_channel **nc)
 336{
 337	struct ncsi_package *p;
 338	struct ncsi_channel *c;
 339
 340	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
 341	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
 342
 343	if (np)
 344		*np = p;
 345	if (nc)
 346		*nc = c;
 347}
 348
 349/* For two consecutive NCSI commands, the packet IDs shouldn't
 350 * be same. Otherwise, the bogus response might be replied. So
 351 * the available IDs are allocated in round-robin fashion.
 352 */
 353struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
 354					unsigned int req_flags)
 355{
 356	struct ncsi_request *nr = NULL;
 357	int i, limit = ARRAY_SIZE(ndp->requests);
 358	unsigned long flags;
 359
 360	/* Check if there is one available request until the ceiling */
 361	spin_lock_irqsave(&ndp->lock, flags);
 362	for (i = ndp->request_id; i < limit; i++) {
 363		if (ndp->requests[i].used)
 364			continue;
 365
 366		nr = &ndp->requests[i];
 367		nr->used = true;
 368		nr->flags = req_flags;
 369		ndp->request_id = i + 1;
 370		goto found;
 371	}
 372
 373	/* Fail back to check from the starting cursor */
 374	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
 375		if (ndp->requests[i].used)
 376			continue;
 377
 378		nr = &ndp->requests[i];
 379		nr->used = true;
 380		nr->flags = req_flags;
 381		ndp->request_id = i + 1;
 382		goto found;
 383	}
 384
 385found:
 386	spin_unlock_irqrestore(&ndp->lock, flags);
 387	return nr;
 388}
 389
 390void ncsi_free_request(struct ncsi_request *nr)
 391{
 392	struct ncsi_dev_priv *ndp = nr->ndp;
 393	struct sk_buff *cmd, *rsp;
 394	unsigned long flags;
 395	bool driven;
 396
 397	if (nr->enabled) {
 398		nr->enabled = false;
 399		del_timer_sync(&nr->timer);
 400	}
 401
 402	spin_lock_irqsave(&ndp->lock, flags);
 403	cmd = nr->cmd;
 404	rsp = nr->rsp;
 405	nr->cmd = NULL;
 406	nr->rsp = NULL;
 407	nr->used = false;
 408	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
 409	spin_unlock_irqrestore(&ndp->lock, flags);
 410
 411	if (driven && cmd && --ndp->pending_req_num == 0)
 412		schedule_work(&ndp->work);
 413
 414	/* Release command and response */
 415	consume_skb(cmd);
 416	consume_skb(rsp);
 417}
 418
 419struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
 420{
 421	struct ncsi_dev_priv *ndp;
 422
 423	NCSI_FOR_EACH_DEV(ndp) {
 424		if (ndp->ndev.dev == dev)
 425			return &ndp->ndev;
 426	}
 427
 428	return NULL;
 429}
 430
 431static void ncsi_request_timeout(struct timer_list *t)
 432{
 433	struct ncsi_request *nr = from_timer(nr, t, timer);
 434	struct ncsi_dev_priv *ndp = nr->ndp;
 435	struct ncsi_cmd_pkt *cmd;
 436	struct ncsi_package *np;
 437	struct ncsi_channel *nc;
 438	unsigned long flags;
 439
 440	/* If the request already had associated response,
 441	 * let the response handler to release it.
 442	 */
 443	spin_lock_irqsave(&ndp->lock, flags);
 444	nr->enabled = false;
 445	if (nr->rsp || !nr->cmd) {
 446		spin_unlock_irqrestore(&ndp->lock, flags);
 447		return;
 448	}
 449	spin_unlock_irqrestore(&ndp->lock, flags);
 450
 451	if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
 452		if (nr->cmd) {
 453			/* Find the package */
 454			cmd = (struct ncsi_cmd_pkt *)
 455			      skb_network_header(nr->cmd);
 456			ncsi_find_package_and_channel(ndp,
 457						      cmd->cmd.common.channel,
 458						      &np, &nc);
 459			ncsi_send_netlink_timeout(nr, np, nc);
 460		}
 461	}
 462
 463	/* Release the request */
 464	ncsi_free_request(nr);
 465}
 466
 467static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
 468{
 469	struct ncsi_dev *nd = &ndp->ndev;
 470	struct ncsi_package *np;
 471	struct ncsi_channel *nc, *tmp;
 472	struct ncsi_cmd_arg nca;
 473	unsigned long flags;
 474	int ret;
 475
 476	np = ndp->active_package;
 477	nc = ndp->active_channel;
 478	nca.ndp = ndp;
 479	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
 480	switch (nd->state) {
 481	case ncsi_dev_state_suspend:
 482		nd->state = ncsi_dev_state_suspend_select;
 483		fallthrough;
 484	case ncsi_dev_state_suspend_select:
 485		ndp->pending_req_num = 1;
 486
 487		nca.type = NCSI_PKT_CMD_SP;
 488		nca.package = np->id;
 489		nca.channel = NCSI_RESERVED_CHANNEL;
 490		if (ndp->flags & NCSI_DEV_HWA)
 491			nca.bytes[0] = 0;
 492		else
 493			nca.bytes[0] = 1;
 494
 495		/* To retrieve the last link states of channels in current
 496		 * package when current active channel needs fail over to
 497		 * another one. It means we will possibly select another
 498		 * channel as next active one. The link states of channels
 499		 * are most important factor of the selection. So we need
 500		 * accurate link states. Unfortunately, the link states on
 501		 * inactive channels can't be updated with LSC AEN in time.
 502		 */
 503		if (ndp->flags & NCSI_DEV_RESHUFFLE)
 504			nd->state = ncsi_dev_state_suspend_gls;
 505		else
 506			nd->state = ncsi_dev_state_suspend_dcnt;
 507		ret = ncsi_xmit_cmd(&nca);
 508		if (ret)
 509			goto error;
 510
 511		break;
 512	case ncsi_dev_state_suspend_gls:
 513		ndp->pending_req_num = 1;
 514
 515		nca.type = NCSI_PKT_CMD_GLS;
 516		nca.package = np->id;
 517		nca.channel = ndp->channel_probe_id;
 518		ret = ncsi_xmit_cmd(&nca);
 519		if (ret)
 520			goto error;
 521		ndp->channel_probe_id++;
 522
 523		if (ndp->channel_probe_id == ndp->channel_count) {
 524			ndp->channel_probe_id = 0;
 525			nd->state = ncsi_dev_state_suspend_dcnt;
 
 
 
 526		}
 527
 528		break;
 529	case ncsi_dev_state_suspend_dcnt:
 530		ndp->pending_req_num = 1;
 531
 532		nca.type = NCSI_PKT_CMD_DCNT;
 533		nca.package = np->id;
 534		nca.channel = nc->id;
 535
 536		nd->state = ncsi_dev_state_suspend_dc;
 537		ret = ncsi_xmit_cmd(&nca);
 538		if (ret)
 539			goto error;
 540
 541		break;
 542	case ncsi_dev_state_suspend_dc:
 543		ndp->pending_req_num = 1;
 544
 545		nca.type = NCSI_PKT_CMD_DC;
 546		nca.package = np->id;
 547		nca.channel = nc->id;
 548		nca.bytes[0] = 1;
 549
 550		nd->state = ncsi_dev_state_suspend_deselect;
 551		ret = ncsi_xmit_cmd(&nca);
 552		if (ret)
 553			goto error;
 554
 555		NCSI_FOR_EACH_CHANNEL(np, tmp) {
 556			/* If there is another channel active on this package
 557			 * do not deselect the package.
 558			 */
 559			if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
 560				nd->state = ncsi_dev_state_suspend_done;
 561				break;
 562			}
 563		}
 564		break;
 565	case ncsi_dev_state_suspend_deselect:
 566		ndp->pending_req_num = 1;
 567
 568		nca.type = NCSI_PKT_CMD_DP;
 569		nca.package = np->id;
 570		nca.channel = NCSI_RESERVED_CHANNEL;
 571
 572		nd->state = ncsi_dev_state_suspend_done;
 573		ret = ncsi_xmit_cmd(&nca);
 574		if (ret)
 575			goto error;
 576
 577		break;
 578	case ncsi_dev_state_suspend_done:
 579		spin_lock_irqsave(&nc->lock, flags);
 580		nc->state = NCSI_CHANNEL_INACTIVE;
 581		spin_unlock_irqrestore(&nc->lock, flags);
 582		if (ndp->flags & NCSI_DEV_RESET)
 583			ncsi_reset_dev(nd);
 584		else
 585			ncsi_process_next_channel(ndp);
 586		break;
 587	default:
 588		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
 589			    nd->state);
 590	}
 591
 592	return;
 593error:
 594	nd->state = ncsi_dev_state_functional;
 595}
 596
 597/* Check the VLAN filter bitmap for a set filter, and construct a
 598 * "Set VLAN Filter - Disable" packet if found.
 599 */
 600static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
 601			 struct ncsi_cmd_arg *nca)
 602{
 603	struct ncsi_channel_vlan_filter *ncf;
 604	unsigned long flags;
 605	void *bitmap;
 606	int index;
 607	u16 vid;
 608
 609	ncf = &nc->vlan_filter;
 610	bitmap = &ncf->bitmap;
 611
 612	spin_lock_irqsave(&nc->lock, flags);
 613	index = find_first_bit(bitmap, ncf->n_vids);
 614	if (index >= ncf->n_vids) {
 615		spin_unlock_irqrestore(&nc->lock, flags);
 616		return -1;
 617	}
 618	vid = ncf->vids[index];
 619
 620	clear_bit(index, bitmap);
 621	ncf->vids[index] = 0;
 622	spin_unlock_irqrestore(&nc->lock, flags);
 623
 624	nca->type = NCSI_PKT_CMD_SVF;
 625	nca->words[1] = vid;
 626	/* HW filter index starts at 1 */
 627	nca->bytes[6] = index + 1;
 628	nca->bytes[7] = 0x00;
 629	return 0;
 630}
 631
 632/* Find an outstanding VLAN tag and construct a "Set VLAN Filter - Enable"
 633 * packet.
 634 */
 635static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
 636		       struct ncsi_cmd_arg *nca)
 637{
 638	struct ncsi_channel_vlan_filter *ncf;
 639	struct vlan_vid *vlan = NULL;
 640	unsigned long flags;
 641	int i, index;
 642	void *bitmap;
 643	u16 vid;
 644
 645	if (list_empty(&ndp->vlan_vids))
 646		return -1;
 647
 648	ncf = &nc->vlan_filter;
 649	bitmap = &ncf->bitmap;
 650
 651	spin_lock_irqsave(&nc->lock, flags);
 652
 653	rcu_read_lock();
 654	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
 655		vid = vlan->vid;
 656		for (i = 0; i < ncf->n_vids; i++)
 657			if (ncf->vids[i] == vid) {
 658				vid = 0;
 659				break;
 660			}
 661		if (vid)
 662			break;
 663	}
 664	rcu_read_unlock();
 665
 666	if (!vid) {
 667		/* No VLAN ID is not set */
 668		spin_unlock_irqrestore(&nc->lock, flags);
 669		return -1;
 670	}
 671
 672	index = find_first_zero_bit(bitmap, ncf->n_vids);
 673	if (index < 0 || index >= ncf->n_vids) {
 674		netdev_err(ndp->ndev.dev,
 675			   "Channel %u already has all VLAN filters set\n",
 676			   nc->id);
 677		spin_unlock_irqrestore(&nc->lock, flags);
 678		return -1;
 679	}
 680
 681	ncf->vids[index] = vid;
 682	set_bit(index, bitmap);
 683	spin_unlock_irqrestore(&nc->lock, flags);
 684
 685	nca->type = NCSI_PKT_CMD_SVF;
 686	nca->words[1] = vid;
 687	/* HW filter index starts at 1 */
 688	nca->bytes[6] = index + 1;
 689	nca->bytes[7] = 0x01;
 690
 691	return 0;
 692}
 693
 694static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
 695{
 696	unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
 697	int ret = 0;
 698
 699	nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN;
 700
 701	memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN);
 702	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
 703
 704	data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY;
 705
 706	/* PHY Link up attribute */
 707	data[6] = 0x1;
 708
 709	nca->data = data;
 710
 711	ret = ncsi_xmit_cmd(nca);
 712	if (ret)
 713		netdev_err(nca->ndp->ndev.dev,
 714			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 715			   nca->type);
 716	return ret;
 717}
 718
 719/* NCSI OEM Command APIs */
 720static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
 721{
 722	unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
 723	int ret = 0;
 724
 725	nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
 726
 727	memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
 728	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID);
 729	data[5] = NCSI_OEM_BCM_CMD_GMA;
 730
 731	nca->data = data;
 732
 733	ret = ncsi_xmit_cmd(nca);
 734	if (ret)
 735		netdev_err(nca->ndp->ndev.dev,
 736			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 737			   nca->type);
 738	return ret;
 739}
 740
 741static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
 742{
 743	union {
 744		u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
 745		u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
 746	} u;
 747	int ret = 0;
 748
 749	nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
 750
 751	memset(&u, 0, sizeof(u));
 752	u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
 753	u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
 754	u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
 755
 756	nca->data = u.data_u8;
 757
 758	ret = ncsi_xmit_cmd(nca);
 759	if (ret)
 760		netdev_err(nca->ndp->ndev.dev,
 761			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 762			   nca->type);
 763	return ret;
 764}
 765
 766static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
 767{
 768	union {
 769		u8 data_u8[NCSI_OEM_MLX_CMD_SMAF_LEN];
 770		u32 data_u32[NCSI_OEM_MLX_CMD_SMAF_LEN / sizeof(u32)];
 771	} u;
 772	int ret = 0;
 773
 774	memset(&u, 0, sizeof(u));
 775	u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
 776	u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
 777	u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
 778	memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
 779	       nca->ndp->ndev.dev->dev_addr,	ETH_ALEN);
 780	u.data_u8[MLX_SMAF_MED_SUPPORT_OFFSET] =
 781		(MLX_MC_RBT_AVL | MLX_MC_RBT_SUPPORT);
 782
 783	nca->payload = NCSI_OEM_MLX_CMD_SMAF_LEN;
 784	nca->data = u.data_u8;
 785
 786	ret = ncsi_xmit_cmd(nca);
 787	if (ret)
 788		netdev_err(nca->ndp->ndev.dev,
 789			   "NCSI: Failed to transmit cmd 0x%x during probe\n",
 790			   nca->type);
 791	return ret;
 792}
 793
 794static int ncsi_oem_gma_handler_intel(struct ncsi_cmd_arg *nca)
 795{
 796	unsigned char data[NCSI_OEM_INTEL_CMD_GMA_LEN];
 797	int ret = 0;
 798
 799	nca->payload = NCSI_OEM_INTEL_CMD_GMA_LEN;
 800
 801	memset(data, 0, NCSI_OEM_INTEL_CMD_GMA_LEN);
 802	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
 803	data[4] = NCSI_OEM_INTEL_CMD_GMA;
 804
 805	nca->data = data;
 806
 807	ret = ncsi_xmit_cmd(nca);
 808	if (ret)
 809		netdev_err(nca->ndp->ndev.dev,
 810			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 811			   nca->type);
 812
 813	return ret;
 814}
 815
 816/* OEM Command handlers initialization */
 817static struct ncsi_oem_gma_handler {
 818	unsigned int	mfr_id;
 819	int		(*handler)(struct ncsi_cmd_arg *nca);
 820} ncsi_oem_gma_handlers[] = {
 821	{ NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
 822	{ NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx },
 823	{ NCSI_OEM_MFR_INTEL_ID, ncsi_oem_gma_handler_intel }
 824};
 825
 826static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
 827{
 828	struct ncsi_oem_gma_handler *nch = NULL;
 829	int i;
 830
 831	/* This function should only be called once, return if flag set */
 832	if (nca->ndp->gma_flag == 1)
 833		return -1;
 834
 835	/* Find gma handler for given manufacturer id */
 836	for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
 837		if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
 838			if (ncsi_oem_gma_handlers[i].handler)
 839				nch = &ncsi_oem_gma_handlers[i];
 840			break;
 841			}
 842	}
 843
 844	if (!nch) {
 845		netdev_err(nca->ndp->ndev.dev,
 846			   "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
 847			   mf_id);
 848		return -1;
 849	}
 850
 
 
 
 851	/* Get Mac address from NCSI device */
 852	return nch->handler(nca);
 853}
 854
 
 
 855/* Determine if a given channel from the channel_queue should be used for Tx */
 856static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
 857			       struct ncsi_channel *nc)
 858{
 859	struct ncsi_channel_mode *ncm;
 860	struct ncsi_channel *channel;
 861	struct ncsi_package *np;
 862
 863	/* Check if any other channel has Tx enabled; a channel may have already
 864	 * been configured and removed from the channel queue.
 865	 */
 866	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 867		if (!ndp->multi_package && np != nc->package)
 868			continue;
 869		NCSI_FOR_EACH_CHANNEL(np, channel) {
 870			ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
 871			if (ncm->enable)
 872				return false;
 873		}
 874	}
 875
 876	/* This channel is the preferred channel and has link */
 877	list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
 878		np = channel->package;
 879		if (np->preferred_channel &&
 880		    ncsi_channel_has_link(np->preferred_channel)) {
 881			return np->preferred_channel == nc;
 882		}
 883	}
 884
 885	/* This channel has link */
 886	if (ncsi_channel_has_link(nc))
 887		return true;
 888
 889	list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
 890		if (ncsi_channel_has_link(channel))
 891			return false;
 892
 893	/* No other channel has link; default to this one */
 894	return true;
 895}
 896
 897/* Change the active Tx channel in a multi-channel setup */
 898int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
 899			   struct ncsi_package *package,
 900			   struct ncsi_channel *disable,
 901			   struct ncsi_channel *enable)
 902{
 903	struct ncsi_cmd_arg nca;
 904	struct ncsi_channel *nc;
 905	struct ncsi_package *np;
 906	int ret = 0;
 907
 908	if (!package->multi_channel && !ndp->multi_package)
 909		netdev_warn(ndp->ndev.dev,
 910			    "NCSI: Trying to update Tx channel in single-channel mode\n");
 911	nca.ndp = ndp;
 912	nca.req_flags = 0;
 913
 914	/* Find current channel with Tx enabled */
 915	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 916		if (disable)
 917			break;
 918		if (!ndp->multi_package && np != package)
 919			continue;
 920
 921		NCSI_FOR_EACH_CHANNEL(np, nc)
 922			if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
 923				disable = nc;
 924				break;
 925			}
 926	}
 927
 928	/* Find a suitable channel for Tx */
 929	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 930		if (enable)
 931			break;
 932		if (!ndp->multi_package && np != package)
 933			continue;
 934		if (!(ndp->package_whitelist & (0x1 << np->id)))
 935			continue;
 936
 937		if (np->preferred_channel &&
 938		    ncsi_channel_has_link(np->preferred_channel)) {
 939			enable = np->preferred_channel;
 940			break;
 941		}
 942
 943		NCSI_FOR_EACH_CHANNEL(np, nc) {
 944			if (!(np->channel_whitelist & 0x1 << nc->id))
 945				continue;
 946			if (nc->state != NCSI_CHANNEL_ACTIVE)
 947				continue;
 948			if (ncsi_channel_has_link(nc)) {
 949				enable = nc;
 950				break;
 951			}
 952		}
 953	}
 954
 955	if (disable == enable)
 956		return -1;
 957
 958	if (!enable)
 959		return -1;
 960
 961	if (disable) {
 962		nca.channel = disable->id;
 963		nca.package = disable->package->id;
 964		nca.type = NCSI_PKT_CMD_DCNT;
 965		ret = ncsi_xmit_cmd(&nca);
 966		if (ret)
 967			netdev_err(ndp->ndev.dev,
 968				   "Error %d sending DCNT\n",
 969				   ret);
 970	}
 971
 972	netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
 973
 974	nca.channel = enable->id;
 975	nca.package = enable->package->id;
 976	nca.type = NCSI_PKT_CMD_ECNT;
 977	ret = ncsi_xmit_cmd(&nca);
 978	if (ret)
 979		netdev_err(ndp->ndev.dev,
 980			   "Error %d sending ECNT\n",
 981			   ret);
 982
 983	return ret;
 984}
 985
 986static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
 987{
 988	struct ncsi_package *np = ndp->active_package;
 989	struct ncsi_channel *nc = ndp->active_channel;
 990	struct ncsi_channel *hot_nc = NULL;
 991	struct ncsi_dev *nd = &ndp->ndev;
 992	struct net_device *dev = nd->dev;
 993	struct ncsi_cmd_arg nca;
 994	unsigned char index;
 995	unsigned long flags;
 996	int ret;
 997
 998	nca.ndp = ndp;
 999	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1000	switch (nd->state) {
1001	case ncsi_dev_state_config:
1002	case ncsi_dev_state_config_sp:
1003		ndp->pending_req_num = 1;
1004
1005		/* Select the specific package */
1006		nca.type = NCSI_PKT_CMD_SP;
1007		if (ndp->flags & NCSI_DEV_HWA)
1008			nca.bytes[0] = 0;
1009		else
1010			nca.bytes[0] = 1;
1011		nca.package = np->id;
1012		nca.channel = NCSI_RESERVED_CHANNEL;
1013		ret = ncsi_xmit_cmd(&nca);
1014		if (ret) {
1015			netdev_err(ndp->ndev.dev,
1016				   "NCSI: Failed to transmit CMD_SP\n");
1017			goto error;
1018		}
1019
1020		nd->state = ncsi_dev_state_config_cis;
1021		break;
1022	case ncsi_dev_state_config_cis:
1023		ndp->pending_req_num = 1;
1024
1025		/* Clear initial state */
1026		nca.type = NCSI_PKT_CMD_CIS;
1027		nca.package = np->id;
1028		nca.channel = nc->id;
1029		ret = ncsi_xmit_cmd(&nca);
1030		if (ret) {
1031			netdev_err(ndp->ndev.dev,
1032				   "NCSI: Failed to transmit CMD_CIS\n");
1033			goto error;
1034		}
1035
1036		nd->state = IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
1037			  ? ncsi_dev_state_config_oem_gma
1038			  : ncsi_dev_state_config_clear_vids;
1039		break;
1040	case ncsi_dev_state_config_oem_gma:
1041		nd->state = ncsi_dev_state_config_apply_mac;
 
1042
 
 
1043		nca.package = np->id;
1044		nca.channel = nc->id;
1045		ndp->pending_req_num = 1;
1046		if (nc->version.major >= 1 && nc->version.minor >= 2) {
1047			nca.type = NCSI_PKT_CMD_GMCMA;
1048			ret = ncsi_xmit_cmd(&nca);
1049		} else {
1050			nca.type = NCSI_PKT_CMD_OEM;
1051			ret = ncsi_gma_handler(&nca, nc->version.mf_id);
1052		}
1053		if (ret < 0) {
1054			nd->state = ncsi_dev_state_config_clear_vids;
1055			schedule_work(&ndp->work);
1056		}
1057
1058		break;
1059	case ncsi_dev_state_config_apply_mac:
1060		rtnl_lock();
1061		ret = dev_set_mac_address(dev, &ndp->pending_mac, NULL);
1062		rtnl_unlock();
1063		if (ret < 0)
1064			netdev_warn(dev, "NCSI: 'Writing MAC address to device failed\n");
1065
1066		nd->state = ncsi_dev_state_config_clear_vids;
1067
1068		fallthrough;
1069	case ncsi_dev_state_config_clear_vids:
1070	case ncsi_dev_state_config_svf:
1071	case ncsi_dev_state_config_ev:
1072	case ncsi_dev_state_config_sma:
1073	case ncsi_dev_state_config_ebf:
1074	case ncsi_dev_state_config_dgmf:
1075	case ncsi_dev_state_config_ecnt:
1076	case ncsi_dev_state_config_ec:
1077	case ncsi_dev_state_config_ae:
1078	case ncsi_dev_state_config_gls:
1079		ndp->pending_req_num = 1;
1080
1081		nca.package = np->id;
1082		nca.channel = nc->id;
1083
1084		/* Clear any active filters on the channel before setting */
1085		if (nd->state == ncsi_dev_state_config_clear_vids) {
1086			ret = clear_one_vid(ndp, nc, &nca);
1087			if (ret) {
1088				nd->state = ncsi_dev_state_config_svf;
1089				schedule_work(&ndp->work);
1090				break;
1091			}
1092			/* Repeat */
1093			nd->state = ncsi_dev_state_config_clear_vids;
1094		/* Add known VLAN tags to the filter */
1095		} else if (nd->state == ncsi_dev_state_config_svf) {
1096			ret = set_one_vid(ndp, nc, &nca);
1097			if (ret) {
1098				nd->state = ncsi_dev_state_config_ev;
1099				schedule_work(&ndp->work);
1100				break;
1101			}
1102			/* Repeat */
1103			nd->state = ncsi_dev_state_config_svf;
1104		/* Enable/Disable the VLAN filter */
1105		} else if (nd->state == ncsi_dev_state_config_ev) {
1106			if (list_empty(&ndp->vlan_vids)) {
1107				nca.type = NCSI_PKT_CMD_DV;
1108			} else {
1109				nca.type = NCSI_PKT_CMD_EV;
1110				nca.bytes[3] = NCSI_CAP_VLAN_NO;
1111			}
1112			nd->state = ncsi_dev_state_config_sma;
1113		} else if (nd->state == ncsi_dev_state_config_sma) {
1114		/* Use first entry in unicast filter table. Note that
1115		 * the MAC filter table starts from entry 1 instead of
1116		 * 0.
1117		 */
1118			nca.type = NCSI_PKT_CMD_SMA;
1119			for (index = 0; index < 6; index++)
1120				nca.bytes[index] = dev->dev_addr[index];
1121			nca.bytes[6] = 0x1;
1122			nca.bytes[7] = 0x1;
1123			nd->state = ncsi_dev_state_config_ebf;
1124		} else if (nd->state == ncsi_dev_state_config_ebf) {
1125			nca.type = NCSI_PKT_CMD_EBF;
1126			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
1127			/* if multicast global filtering is supported then
1128			 * disable it so that all multicast packet will be
1129			 * forwarded to management controller
1130			 */
1131			if (nc->caps[NCSI_CAP_GENERIC].cap &
1132			    NCSI_CAP_GENERIC_MC)
1133				nd->state = ncsi_dev_state_config_dgmf;
1134			else if (ncsi_channel_is_tx(ndp, nc))
1135				nd->state = ncsi_dev_state_config_ecnt;
1136			else
1137				nd->state = ncsi_dev_state_config_ec;
1138		} else if (nd->state == ncsi_dev_state_config_dgmf) {
1139			nca.type = NCSI_PKT_CMD_DGMF;
1140			if (ncsi_channel_is_tx(ndp, nc))
1141				nd->state = ncsi_dev_state_config_ecnt;
1142			else
1143				nd->state = ncsi_dev_state_config_ec;
1144		} else if (nd->state == ncsi_dev_state_config_ecnt) {
1145			if (np->preferred_channel &&
1146			    nc != np->preferred_channel)
1147				netdev_info(ndp->ndev.dev,
1148					    "NCSI: Tx failed over to channel %u\n",
1149					    nc->id);
1150			nca.type = NCSI_PKT_CMD_ECNT;
1151			nd->state = ncsi_dev_state_config_ec;
1152		} else if (nd->state == ncsi_dev_state_config_ec) {
1153			/* Enable AEN if it's supported */
1154			nca.type = NCSI_PKT_CMD_EC;
1155			nd->state = ncsi_dev_state_config_ae;
1156			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
1157				nd->state = ncsi_dev_state_config_gls;
1158		} else if (nd->state == ncsi_dev_state_config_ae) {
1159			nca.type = NCSI_PKT_CMD_AE;
1160			nca.bytes[0] = 0;
1161			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
1162			nd->state = ncsi_dev_state_config_gls;
1163		} else if (nd->state == ncsi_dev_state_config_gls) {
1164			nca.type = NCSI_PKT_CMD_GLS;
1165			nd->state = ncsi_dev_state_config_done;
1166		}
1167
1168		ret = ncsi_xmit_cmd(&nca);
1169		if (ret) {
1170			netdev_err(ndp->ndev.dev,
1171				   "NCSI: Failed to transmit CMD %x\n",
1172				   nca.type);
1173			goto error;
1174		}
1175		break;
1176	case ncsi_dev_state_config_done:
1177		netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
1178			   nc->id);
1179		spin_lock_irqsave(&nc->lock, flags);
1180		nc->state = NCSI_CHANNEL_ACTIVE;
1181
1182		if (ndp->flags & NCSI_DEV_RESET) {
1183			/* A reset event happened during config, start it now */
1184			nc->reconfigure_needed = false;
1185			spin_unlock_irqrestore(&nc->lock, flags);
1186			ncsi_reset_dev(nd);
1187			break;
1188		}
1189
1190		if (nc->reconfigure_needed) {
1191			/* This channel's configuration has been updated
1192			 * part-way during the config state - start the
1193			 * channel configuration over
1194			 */
1195			nc->reconfigure_needed = false;
1196			nc->state = NCSI_CHANNEL_INACTIVE;
1197			spin_unlock_irqrestore(&nc->lock, flags);
1198
1199			spin_lock_irqsave(&ndp->lock, flags);
1200			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1201			spin_unlock_irqrestore(&ndp->lock, flags);
1202
1203			netdev_dbg(dev, "Dirty NCSI channel state reset\n");
1204			ncsi_process_next_channel(ndp);
1205			break;
1206		}
1207
1208		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
1209			hot_nc = nc;
1210		} else {
1211			hot_nc = NULL;
1212			netdev_dbg(ndp->ndev.dev,
1213				   "NCSI: channel %u link down after config\n",
1214				   nc->id);
1215		}
1216		spin_unlock_irqrestore(&nc->lock, flags);
1217
1218		/* Update the hot channel */
1219		spin_lock_irqsave(&ndp->lock, flags);
1220		ndp->hot_channel = hot_nc;
1221		spin_unlock_irqrestore(&ndp->lock, flags);
1222
1223		ncsi_start_channel_monitor(nc);
1224		ncsi_process_next_channel(ndp);
1225		break;
1226	default:
1227		netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
1228			     nd->state);
1229	}
1230
1231	return;
1232
1233error:
1234	ncsi_report_link(ndp, true);
1235}
1236
1237static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
1238{
1239	struct ncsi_channel *nc, *found, *hot_nc;
1240	struct ncsi_channel_mode *ncm;
1241	unsigned long flags, cflags;
1242	struct ncsi_package *np;
1243	bool with_link;
1244
1245	spin_lock_irqsave(&ndp->lock, flags);
1246	hot_nc = ndp->hot_channel;
1247	spin_unlock_irqrestore(&ndp->lock, flags);
1248
1249	/* By default the search is done once an inactive channel with up
1250	 * link is found, unless a preferred channel is set.
1251	 * If multi_package or multi_channel are configured all channels in the
1252	 * whitelist are added to the channel queue.
1253	 */
1254	found = NULL;
1255	with_link = false;
1256	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1257		if (!(ndp->package_whitelist & (0x1 << np->id)))
1258			continue;
1259		NCSI_FOR_EACH_CHANNEL(np, nc) {
1260			if (!(np->channel_whitelist & (0x1 << nc->id)))
1261				continue;
1262
1263			spin_lock_irqsave(&nc->lock, cflags);
1264
1265			if (!list_empty(&nc->link) ||
1266			    nc->state != NCSI_CHANNEL_INACTIVE) {
1267				spin_unlock_irqrestore(&nc->lock, cflags);
1268				continue;
1269			}
1270
1271			if (!found)
1272				found = nc;
1273
1274			if (nc == hot_nc)
1275				found = nc;
1276
1277			ncm = &nc->modes[NCSI_MODE_LINK];
1278			if (ncm->data[2] & 0x1) {
1279				found = nc;
1280				with_link = true;
1281			}
1282
1283			/* If multi_channel is enabled configure all valid
1284			 * channels whether or not they currently have link
1285			 * so they will have AENs enabled.
1286			 */
1287			if (with_link || np->multi_channel) {
1288				spin_lock_irqsave(&ndp->lock, flags);
1289				list_add_tail_rcu(&nc->link,
1290						  &ndp->channel_queue);
1291				spin_unlock_irqrestore(&ndp->lock, flags);
1292
1293				netdev_dbg(ndp->ndev.dev,
1294					   "NCSI: Channel %u added to queue (link %s)\n",
1295					   nc->id,
1296					   ncm->data[2] & 0x1 ? "up" : "down");
1297			}
1298
1299			spin_unlock_irqrestore(&nc->lock, cflags);
1300
1301			if (with_link && !np->multi_channel)
1302				break;
1303		}
1304		if (with_link && !ndp->multi_package)
1305			break;
1306	}
1307
1308	if (list_empty(&ndp->channel_queue) && found) {
1309		netdev_info(ndp->ndev.dev,
1310			    "NCSI: No channel with link found, configuring channel %u\n",
1311			    found->id);
1312		spin_lock_irqsave(&ndp->lock, flags);
1313		list_add_tail_rcu(&found->link, &ndp->channel_queue);
1314		spin_unlock_irqrestore(&ndp->lock, flags);
1315	} else if (!found) {
1316		netdev_warn(ndp->ndev.dev,
1317			    "NCSI: No channel found to configure!\n");
1318		ncsi_report_link(ndp, true);
1319		return -ENODEV;
1320	}
1321
1322	return ncsi_process_next_channel(ndp);
1323}
1324
1325static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1326{
1327	struct ncsi_package *np;
1328	struct ncsi_channel *nc;
1329	unsigned int cap;
1330	bool has_channel = false;
1331
1332	/* The hardware arbitration is disabled if any one channel
1333	 * doesn't support explicitly.
1334	 */
1335	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1336		NCSI_FOR_EACH_CHANNEL(np, nc) {
1337			has_channel = true;
1338
1339			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1340			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1341			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1342			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1343				ndp->flags &= ~NCSI_DEV_HWA;
1344				return false;
1345			}
1346		}
1347	}
1348
1349	if (has_channel) {
1350		ndp->flags |= NCSI_DEV_HWA;
1351		return true;
1352	}
1353
1354	ndp->flags &= ~NCSI_DEV_HWA;
1355	return false;
1356}
1357
1358static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1359{
1360	struct ncsi_dev *nd = &ndp->ndev;
1361	struct ncsi_package *np;
 
1362	struct ncsi_cmd_arg nca;
1363	unsigned char index;
1364	int ret;
1365
1366	nca.ndp = ndp;
1367	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1368	switch (nd->state) {
1369	case ncsi_dev_state_probe:
1370		nd->state = ncsi_dev_state_probe_deselect;
1371		fallthrough;
1372	case ncsi_dev_state_probe_deselect:
1373		ndp->pending_req_num = 8;
1374
1375		/* Deselect all possible packages */
1376		nca.type = NCSI_PKT_CMD_DP;
1377		nca.channel = NCSI_RESERVED_CHANNEL;
1378		for (index = 0; index < 8; index++) {
1379			nca.package = index;
1380			ret = ncsi_xmit_cmd(&nca);
1381			if (ret)
1382				goto error;
1383		}
1384
1385		nd->state = ncsi_dev_state_probe_package;
1386		break;
1387	case ncsi_dev_state_probe_package:
1388		if (ndp->package_probe_id >= 8) {
1389			/* Last package probed, finishing */
1390			ndp->flags |= NCSI_DEV_PROBED;
1391			break;
1392		}
1393
1394		ndp->pending_req_num = 1;
1395
1396		nca.type = NCSI_PKT_CMD_SP;
1397		nca.bytes[0] = 1;
1398		nca.package = ndp->package_probe_id;
1399		nca.channel = NCSI_RESERVED_CHANNEL;
1400		ret = ncsi_xmit_cmd(&nca);
1401		if (ret)
1402			goto error;
1403		nd->state = ncsi_dev_state_probe_channel;
1404		break;
1405	case ncsi_dev_state_probe_channel:
1406		ndp->active_package = ncsi_find_package(ndp,
1407							ndp->package_probe_id);
1408		if (!ndp->active_package) {
1409			/* No response */
1410			nd->state = ncsi_dev_state_probe_dp;
1411			schedule_work(&ndp->work);
1412			break;
1413		}
1414		nd->state = ncsi_dev_state_probe_cis;
1415		if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) &&
1416		    ndp->mlx_multi_host)
1417			nd->state = ncsi_dev_state_probe_mlx_gma;
1418
1419		schedule_work(&ndp->work);
1420		break;
1421	case ncsi_dev_state_probe_mlx_gma:
1422		ndp->pending_req_num = 1;
1423
1424		nca.type = NCSI_PKT_CMD_OEM;
1425		nca.package = ndp->active_package->id;
1426		nca.channel = 0;
1427		ret = ncsi_oem_gma_handler_mlx(&nca);
1428		if (ret)
1429			goto error;
1430
1431		nd->state = ncsi_dev_state_probe_mlx_smaf;
1432		break;
1433	case ncsi_dev_state_probe_mlx_smaf:
1434		ndp->pending_req_num = 1;
1435
1436		nca.type = NCSI_PKT_CMD_OEM;
1437		nca.package = ndp->active_package->id;
1438		nca.channel = 0;
1439		ret = ncsi_oem_smaf_mlx(&nca);
1440		if (ret)
1441			goto error;
1442
1443		nd->state = ncsi_dev_state_probe_cis;
1444		break;
1445	case ncsi_dev_state_probe_keep_phy:
1446		ndp->pending_req_num = 1;
1447
1448		nca.type = NCSI_PKT_CMD_OEM;
 
1449		nca.package = ndp->active_package->id;
1450		nca.channel = 0;
1451		ret = ncsi_oem_keep_phy_intel(&nca);
1452		if (ret)
1453			goto error;
 
 
1454
1455		nd->state = ncsi_dev_state_probe_gvi;
1456		break;
1457	case ncsi_dev_state_probe_cis:
1458	case ncsi_dev_state_probe_gvi:
1459	case ncsi_dev_state_probe_gc:
1460	case ncsi_dev_state_probe_gls:
1461		np = ndp->active_package;
1462		ndp->pending_req_num = 1;
1463
1464		/* Clear initial state Retrieve version, capability or link status */
1465		if (nd->state == ncsi_dev_state_probe_cis)
1466			nca.type = NCSI_PKT_CMD_CIS;
1467		else if (nd->state == ncsi_dev_state_probe_gvi)
1468			nca.type = NCSI_PKT_CMD_GVI;
1469		else if (nd->state == ncsi_dev_state_probe_gc)
1470			nca.type = NCSI_PKT_CMD_GC;
1471		else
1472			nca.type = NCSI_PKT_CMD_GLS;
1473
1474		nca.package = np->id;
1475		nca.channel = ndp->channel_probe_id;
1476
1477		ret = ncsi_xmit_cmd(&nca);
1478		if (ret)
1479			goto error;
 
1480
1481		if (nd->state == ncsi_dev_state_probe_cis) {
1482			nd->state = ncsi_dev_state_probe_gvi;
1483			if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
1484				nd->state = ncsi_dev_state_probe_keep_phy;
1485		} else if (nd->state == ncsi_dev_state_probe_gvi) {
1486			nd->state = ncsi_dev_state_probe_gc;
1487		} else if (nd->state == ncsi_dev_state_probe_gc) {
1488			nd->state = ncsi_dev_state_probe_gls;
1489		} else {
1490			nd->state = ncsi_dev_state_probe_cis;
1491			ndp->channel_probe_id++;
1492		}
1493
1494		if (ndp->channel_probe_id == ndp->channel_count) {
1495			ndp->channel_probe_id = 0;
1496			nd->state = ncsi_dev_state_probe_dp;
1497		}
1498		break;
1499	case ncsi_dev_state_probe_dp:
1500		ndp->pending_req_num = 1;
1501
1502		/* Deselect the current package */
1503		nca.type = NCSI_PKT_CMD_DP;
1504		nca.package = ndp->package_probe_id;
1505		nca.channel = NCSI_RESERVED_CHANNEL;
1506		ret = ncsi_xmit_cmd(&nca);
1507		if (ret)
1508			goto error;
1509
1510		/* Probe next package after receiving response */
1511		ndp->package_probe_id++;
 
 
 
 
 
1512		nd->state = ncsi_dev_state_probe_package;
1513		ndp->active_package = NULL;
1514		break;
1515	default:
1516		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1517			    nd->state);
1518	}
1519
1520	if (ndp->flags & NCSI_DEV_PROBED) {
1521		/* Check if all packages have HWA support */
1522		ncsi_check_hwa(ndp);
1523		ncsi_choose_active_channel(ndp);
1524	}
1525
1526	return;
1527error:
1528	netdev_err(ndp->ndev.dev,
1529		   "NCSI: Failed to transmit cmd 0x%x during probe\n",
1530		   nca.type);
1531	ncsi_report_link(ndp, true);
1532}
1533
1534static void ncsi_dev_work(struct work_struct *work)
1535{
1536	struct ncsi_dev_priv *ndp = container_of(work,
1537			struct ncsi_dev_priv, work);
1538	struct ncsi_dev *nd = &ndp->ndev;
1539
1540	switch (nd->state & ncsi_dev_state_major) {
1541	case ncsi_dev_state_probe:
1542		ncsi_probe_channel(ndp);
1543		break;
1544	case ncsi_dev_state_suspend:
1545		ncsi_suspend_channel(ndp);
1546		break;
1547	case ncsi_dev_state_config:
1548		ncsi_configure_channel(ndp);
1549		break;
1550	default:
1551		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1552			    nd->state);
1553	}
1554}
1555
1556int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1557{
1558	struct ncsi_channel *nc;
1559	int old_state;
1560	unsigned long flags;
1561
1562	spin_lock_irqsave(&ndp->lock, flags);
1563	nc = list_first_or_null_rcu(&ndp->channel_queue,
1564				    struct ncsi_channel, link);
1565	if (!nc) {
1566		spin_unlock_irqrestore(&ndp->lock, flags);
1567		goto out;
1568	}
1569
1570	list_del_init(&nc->link);
1571	spin_unlock_irqrestore(&ndp->lock, flags);
1572
1573	spin_lock_irqsave(&nc->lock, flags);
1574	old_state = nc->state;
1575	nc->state = NCSI_CHANNEL_INVISIBLE;
1576	spin_unlock_irqrestore(&nc->lock, flags);
1577
1578	ndp->active_channel = nc;
1579	ndp->active_package = nc->package;
1580
1581	switch (old_state) {
1582	case NCSI_CHANNEL_INACTIVE:
1583		ndp->ndev.state = ncsi_dev_state_config;
1584		netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1585	                   nc->id);
1586		ncsi_configure_channel(ndp);
1587		break;
1588	case NCSI_CHANNEL_ACTIVE:
1589		ndp->ndev.state = ncsi_dev_state_suspend;
1590		netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1591			   nc->id);
1592		ncsi_suspend_channel(ndp);
1593		break;
1594	default:
1595		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1596			   old_state, nc->package->id, nc->id);
1597		ncsi_report_link(ndp, false);
1598		return -EINVAL;
1599	}
1600
1601	return 0;
1602
1603out:
1604	ndp->active_channel = NULL;
1605	ndp->active_package = NULL;
1606	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1607		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1608		return ncsi_choose_active_channel(ndp);
1609	}
1610
1611	ncsi_report_link(ndp, false);
1612	return -ENODEV;
1613}
1614
1615static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1616{
1617	struct ncsi_dev *nd = &ndp->ndev;
1618	struct ncsi_channel *nc;
1619	struct ncsi_package *np;
1620	unsigned long flags;
1621	unsigned int n = 0;
1622
1623	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1624		NCSI_FOR_EACH_CHANNEL(np, nc) {
1625			spin_lock_irqsave(&nc->lock, flags);
1626
1627			/* Channels may be busy, mark dirty instead of
1628			 * kicking if;
1629			 * a) not ACTIVE (configured)
1630			 * b) in the channel_queue (to be configured)
1631			 * c) it's ndev is in the config state
1632			 */
1633			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1634				if ((ndp->ndev.state & 0xff00) ==
1635						ncsi_dev_state_config ||
1636						!list_empty(&nc->link)) {
1637					netdev_dbg(nd->dev,
1638						   "NCSI: channel %p marked dirty\n",
1639						   nc);
1640					nc->reconfigure_needed = true;
1641				}
1642				spin_unlock_irqrestore(&nc->lock, flags);
1643				continue;
1644			}
1645
1646			spin_unlock_irqrestore(&nc->lock, flags);
1647
1648			ncsi_stop_channel_monitor(nc);
1649			spin_lock_irqsave(&nc->lock, flags);
1650			nc->state = NCSI_CHANNEL_INACTIVE;
1651			spin_unlock_irqrestore(&nc->lock, flags);
1652
1653			spin_lock_irqsave(&ndp->lock, flags);
1654			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1655			spin_unlock_irqrestore(&ndp->lock, flags);
1656
1657			netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1658			n++;
1659		}
1660	}
1661
1662	return n;
1663}
1664
1665int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1666{
1667	struct ncsi_dev_priv *ndp;
1668	unsigned int n_vids = 0;
1669	struct vlan_vid *vlan;
1670	struct ncsi_dev *nd;
1671	bool found = false;
1672
1673	if (vid == 0)
1674		return 0;
1675
1676	nd = ncsi_find_dev(dev);
1677	if (!nd) {
1678		netdev_warn(dev, "NCSI: No net_device?\n");
1679		return 0;
1680	}
1681
1682	ndp = TO_NCSI_DEV_PRIV(nd);
1683
1684	/* Add the VLAN id to our internal list */
1685	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1686		n_vids++;
1687		if (vlan->vid == vid) {
1688			netdev_dbg(dev, "NCSI: vid %u already registered\n",
1689				   vid);
1690			return 0;
1691		}
1692	}
1693	if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1694		netdev_warn(dev,
1695			    "tried to add vlan id %u but NCSI max already registered (%u)\n",
1696			    vid, NCSI_MAX_VLAN_VIDS);
1697		return -ENOSPC;
1698	}
1699
1700	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1701	if (!vlan)
1702		return -ENOMEM;
1703
1704	vlan->proto = proto;
1705	vlan->vid = vid;
1706	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1707
1708	netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1709
1710	found = ncsi_kick_channels(ndp) != 0;
1711
1712	return found ? ncsi_process_next_channel(ndp) : 0;
1713}
1714EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1715
1716int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1717{
1718	struct vlan_vid *vlan, *tmp;
1719	struct ncsi_dev_priv *ndp;
1720	struct ncsi_dev *nd;
1721	bool found = false;
1722
1723	if (vid == 0)
1724		return 0;
1725
1726	nd = ncsi_find_dev(dev);
1727	if (!nd) {
1728		netdev_warn(dev, "NCSI: no net_device?\n");
1729		return 0;
1730	}
1731
1732	ndp = TO_NCSI_DEV_PRIV(nd);
1733
1734	/* Remove the VLAN id from our internal list */
1735	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1736		if (vlan->vid == vid) {
1737			netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1738			list_del_rcu(&vlan->list);
1739			found = true;
1740			kfree(vlan);
1741		}
1742
1743	if (!found) {
1744		netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1745		return -EINVAL;
1746	}
1747
1748	found = ncsi_kick_channels(ndp) != 0;
1749
1750	return found ? ncsi_process_next_channel(ndp) : 0;
1751}
1752EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1753
1754struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1755				   void (*handler)(struct ncsi_dev *ndev))
1756{
1757	struct ncsi_dev_priv *ndp;
1758	struct ncsi_dev *nd;
1759	struct platform_device *pdev;
1760	struct device_node *np;
1761	unsigned long flags;
1762	int i;
1763
1764	/* Check if the device has been registered or not */
1765	nd = ncsi_find_dev(dev);
1766	if (nd)
1767		return nd;
1768
1769	/* Create NCSI device */
1770	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1771	if (!ndp)
1772		return NULL;
1773
1774	nd = &ndp->ndev;
1775	nd->state = ncsi_dev_state_registered;
1776	nd->dev = dev;
1777	nd->handler = handler;
1778	ndp->pending_req_num = 0;
1779	INIT_LIST_HEAD(&ndp->channel_queue);
1780	INIT_LIST_HEAD(&ndp->vlan_vids);
1781	INIT_WORK(&ndp->work, ncsi_dev_work);
1782	ndp->package_whitelist = UINT_MAX;
1783
1784	/* Initialize private NCSI device */
1785	spin_lock_init(&ndp->lock);
1786	INIT_LIST_HEAD(&ndp->packages);
1787	ndp->request_id = NCSI_REQ_START_IDX;
1788	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1789		ndp->requests[i].id = i;
1790		ndp->requests[i].ndp = ndp;
1791		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1792	}
1793	ndp->channel_count = NCSI_RESERVED_CHANNEL;
1794
1795	spin_lock_irqsave(&ncsi_dev_lock, flags);
1796	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1797	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1798
1799	/* Register NCSI packet Rx handler */
1800	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1801	ndp->ptype.func = ncsi_rcv_rsp;
1802	ndp->ptype.dev = dev;
1803	dev_add_pack(&ndp->ptype);
1804
1805	pdev = to_platform_device(dev->dev.parent);
1806	if (pdev) {
1807		np = pdev->dev.of_node;
1808		if (np && (of_property_read_bool(np, "mellanox,multi-host") ||
1809			   of_property_read_bool(np, "mlx,multi-host")))
1810			ndp->mlx_multi_host = true;
1811	}
1812
1813	return nd;
1814}
1815EXPORT_SYMBOL_GPL(ncsi_register_dev);
1816
1817int ncsi_start_dev(struct ncsi_dev *nd)
1818{
1819	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1820
1821	if (nd->state != ncsi_dev_state_registered &&
1822	    nd->state != ncsi_dev_state_functional)
1823		return -ENOTTY;
1824
1825	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1826		ndp->package_probe_id = 0;
1827		ndp->channel_probe_id = 0;
1828		nd->state = ncsi_dev_state_probe;
1829		schedule_work(&ndp->work);
1830		return 0;
1831	}
1832
1833	return ncsi_reset_dev(nd);
1834}
1835EXPORT_SYMBOL_GPL(ncsi_start_dev);
1836
1837void ncsi_stop_dev(struct ncsi_dev *nd)
1838{
1839	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1840	struct ncsi_package *np;
1841	struct ncsi_channel *nc;
1842	bool chained;
1843	int old_state;
1844	unsigned long flags;
1845
1846	/* Stop the channel monitor on any active channels. Don't reset the
1847	 * channel state so we know which were active when ncsi_start_dev()
1848	 * is next called.
1849	 */
1850	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1851		NCSI_FOR_EACH_CHANNEL(np, nc) {
1852			ncsi_stop_channel_monitor(nc);
1853
1854			spin_lock_irqsave(&nc->lock, flags);
1855			chained = !list_empty(&nc->link);
1856			old_state = nc->state;
1857			spin_unlock_irqrestore(&nc->lock, flags);
1858
1859			WARN_ON_ONCE(chained ||
1860				     old_state == NCSI_CHANNEL_INVISIBLE);
1861		}
1862	}
1863
1864	netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1865	ncsi_report_link(ndp, true);
1866}
1867EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1868
1869int ncsi_reset_dev(struct ncsi_dev *nd)
1870{
1871	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1872	struct ncsi_channel *nc, *active, *tmp;
1873	struct ncsi_package *np;
1874	unsigned long flags;
1875
1876	spin_lock_irqsave(&ndp->lock, flags);
1877
1878	if (!(ndp->flags & NCSI_DEV_RESET)) {
1879		/* Haven't been called yet, check states */
1880		switch (nd->state & ncsi_dev_state_major) {
1881		case ncsi_dev_state_registered:
1882		case ncsi_dev_state_probe:
1883			/* Not even probed yet - do nothing */
1884			spin_unlock_irqrestore(&ndp->lock, flags);
1885			return 0;
1886		case ncsi_dev_state_suspend:
1887		case ncsi_dev_state_config:
1888			/* Wait for the channel to finish its suspend/config
1889			 * operation; once it finishes it will check for
1890			 * NCSI_DEV_RESET and reset the state.
1891			 */
1892			ndp->flags |= NCSI_DEV_RESET;
1893			spin_unlock_irqrestore(&ndp->lock, flags);
1894			return 0;
1895		}
1896	} else {
1897		switch (nd->state) {
1898		case ncsi_dev_state_suspend_done:
1899		case ncsi_dev_state_config_done:
1900		case ncsi_dev_state_functional:
1901			/* Ok */
1902			break;
1903		default:
1904			/* Current reset operation happening */
1905			spin_unlock_irqrestore(&ndp->lock, flags);
1906			return 0;
1907		}
1908	}
1909
1910	if (!list_empty(&ndp->channel_queue)) {
1911		/* Clear any channel queue we may have interrupted */
1912		list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
1913			list_del_init(&nc->link);
1914	}
1915	spin_unlock_irqrestore(&ndp->lock, flags);
1916
1917	active = NULL;
1918	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1919		NCSI_FOR_EACH_CHANNEL(np, nc) {
1920			spin_lock_irqsave(&nc->lock, flags);
1921
1922			if (nc->state == NCSI_CHANNEL_ACTIVE) {
1923				active = nc;
1924				nc->state = NCSI_CHANNEL_INVISIBLE;
1925				spin_unlock_irqrestore(&nc->lock, flags);
1926				ncsi_stop_channel_monitor(nc);
1927				break;
1928			}
1929
1930			spin_unlock_irqrestore(&nc->lock, flags);
1931		}
1932		if (active)
1933			break;
1934	}
1935
1936	if (!active) {
1937		/* Done */
1938		spin_lock_irqsave(&ndp->lock, flags);
1939		ndp->flags &= ~NCSI_DEV_RESET;
1940		spin_unlock_irqrestore(&ndp->lock, flags);
1941		return ncsi_choose_active_channel(ndp);
1942	}
1943
1944	spin_lock_irqsave(&ndp->lock, flags);
1945	ndp->flags |= NCSI_DEV_RESET;
1946	ndp->active_channel = active;
1947	ndp->active_package = active->package;
1948	spin_unlock_irqrestore(&ndp->lock, flags);
1949
1950	nd->state = ncsi_dev_state_suspend;
1951	schedule_work(&ndp->work);
1952	return 0;
1953}
1954
1955void ncsi_unregister_dev(struct ncsi_dev *nd)
1956{
1957	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1958	struct ncsi_package *np, *tmp;
1959	unsigned long flags;
1960
1961	dev_remove_pack(&ndp->ptype);
1962
1963	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1964		ncsi_remove_package(np);
1965
1966	spin_lock_irqsave(&ncsi_dev_lock, flags);
1967	list_del_rcu(&ndp->node);
1968	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1969
1970	disable_work_sync(&ndp->work);
1971
1972	kfree(ndp);
1973}
1974EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright Gavin Shan, IBM Corporation 2016.
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/kernel.h>
   8#include <linux/init.h>
   9#include <linux/netdevice.h>
  10#include <linux/skbuff.h>
 
 
  11
  12#include <net/ncsi.h>
  13#include <net/net_namespace.h>
  14#include <net/sock.h>
  15#include <net/addrconf.h>
  16#include <net/ipv6.h>
  17#include <net/genetlink.h>
  18
  19#include "internal.h"
  20#include "ncsi-pkt.h"
  21#include "ncsi-netlink.h"
  22
  23LIST_HEAD(ncsi_dev_list);
  24DEFINE_SPINLOCK(ncsi_dev_lock);
  25
  26bool ncsi_channel_has_link(struct ncsi_channel *channel)
  27{
  28	return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
  29}
  30
  31bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
  32			  struct ncsi_channel *channel)
  33{
  34	struct ncsi_package *np;
  35	struct ncsi_channel *nc;
  36
  37	NCSI_FOR_EACH_PACKAGE(ndp, np)
  38		NCSI_FOR_EACH_CHANNEL(np, nc) {
  39			if (nc == channel)
  40				continue;
  41			if (nc->state == NCSI_CHANNEL_ACTIVE &&
  42			    ncsi_channel_has_link(nc))
  43				return false;
  44		}
  45
  46	return true;
  47}
  48
  49static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
  50{
  51	struct ncsi_dev *nd = &ndp->ndev;
  52	struct ncsi_package *np;
  53	struct ncsi_channel *nc;
  54	unsigned long flags;
  55
  56	nd->state = ncsi_dev_state_functional;
  57	if (force_down) {
  58		nd->link_up = 0;
  59		goto report;
  60	}
  61
  62	nd->link_up = 0;
  63	NCSI_FOR_EACH_PACKAGE(ndp, np) {
  64		NCSI_FOR_EACH_CHANNEL(np, nc) {
  65			spin_lock_irqsave(&nc->lock, flags);
  66
  67			if (!list_empty(&nc->link) ||
  68			    nc->state != NCSI_CHANNEL_ACTIVE) {
  69				spin_unlock_irqrestore(&nc->lock, flags);
  70				continue;
  71			}
  72
  73			if (ncsi_channel_has_link(nc)) {
  74				spin_unlock_irqrestore(&nc->lock, flags);
  75				nd->link_up = 1;
  76				goto report;
  77			}
  78
  79			spin_unlock_irqrestore(&nc->lock, flags);
  80		}
  81	}
  82
  83report:
  84	nd->handler(nd);
  85}
  86
  87static void ncsi_channel_monitor(struct timer_list *t)
  88{
  89	struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
  90	struct ncsi_package *np = nc->package;
  91	struct ncsi_dev_priv *ndp = np->ndp;
  92	struct ncsi_channel_mode *ncm;
  93	struct ncsi_cmd_arg nca;
  94	bool enabled, chained;
  95	unsigned int monitor_state;
  96	unsigned long flags;
  97	int state, ret;
  98
  99	spin_lock_irqsave(&nc->lock, flags);
 100	state = nc->state;
 101	chained = !list_empty(&nc->link);
 102	enabled = nc->monitor.enabled;
 103	monitor_state = nc->monitor.state;
 104	spin_unlock_irqrestore(&nc->lock, flags);
 105
 106	if (!enabled || chained) {
 107		ncsi_stop_channel_monitor(nc);
 108		return;
 109	}
 
 110	if (state != NCSI_CHANNEL_INACTIVE &&
 111	    state != NCSI_CHANNEL_ACTIVE) {
 112		ncsi_stop_channel_monitor(nc);
 
 
 
 
 
 
 113		return;
 114	}
 115
 116	switch (monitor_state) {
 117	case NCSI_CHANNEL_MONITOR_START:
 118	case NCSI_CHANNEL_MONITOR_RETRY:
 119		nca.ndp = ndp;
 120		nca.package = np->id;
 121		nca.channel = nc->id;
 122		nca.type = NCSI_PKT_CMD_GLS;
 123		nca.req_flags = 0;
 124		ret = ncsi_xmit_cmd(&nca);
 125		if (ret)
 126			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
 127				   ret);
 128		break;
 129	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
 130		break;
 131	default:
 132		netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
 133			   nc->id);
 134		ncsi_report_link(ndp, true);
 135		ndp->flags |= NCSI_DEV_RESHUFFLE;
 136
 137		ncsi_stop_channel_monitor(nc);
 138
 139		ncm = &nc->modes[NCSI_MODE_LINK];
 140		spin_lock_irqsave(&nc->lock, flags);
 
 141		nc->state = NCSI_CHANNEL_INVISIBLE;
 142		ncm->data[2] &= ~0x1;
 143		spin_unlock_irqrestore(&nc->lock, flags);
 144
 145		spin_lock_irqsave(&ndp->lock, flags);
 146		nc->state = NCSI_CHANNEL_ACTIVE;
 147		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
 148		spin_unlock_irqrestore(&ndp->lock, flags);
 149		ncsi_process_next_channel(ndp);
 150		return;
 151	}
 152
 153	spin_lock_irqsave(&nc->lock, flags);
 154	nc->monitor.state++;
 155	spin_unlock_irqrestore(&nc->lock, flags);
 156	mod_timer(&nc->monitor.timer, jiffies + HZ);
 157}
 158
 159void ncsi_start_channel_monitor(struct ncsi_channel *nc)
 160{
 161	unsigned long flags;
 162
 163	spin_lock_irqsave(&nc->lock, flags);
 164	WARN_ON_ONCE(nc->monitor.enabled);
 165	nc->monitor.enabled = true;
 166	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
 167	spin_unlock_irqrestore(&nc->lock, flags);
 168
 169	mod_timer(&nc->monitor.timer, jiffies + HZ);
 170}
 171
 172void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
 173{
 174	unsigned long flags;
 175
 176	spin_lock_irqsave(&nc->lock, flags);
 177	if (!nc->monitor.enabled) {
 178		spin_unlock_irqrestore(&nc->lock, flags);
 179		return;
 180	}
 181	nc->monitor.enabled = false;
 182	spin_unlock_irqrestore(&nc->lock, flags);
 183
 184	del_timer_sync(&nc->monitor.timer);
 185}
 186
 187struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
 188				       unsigned char id)
 189{
 190	struct ncsi_channel *nc;
 191
 192	NCSI_FOR_EACH_CHANNEL(np, nc) {
 193		if (nc->id == id)
 194			return nc;
 195	}
 196
 197	return NULL;
 198}
 199
 200struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
 201{
 202	struct ncsi_channel *nc, *tmp;
 203	int index;
 204	unsigned long flags;
 205
 206	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
 207	if (!nc)
 208		return NULL;
 209
 210	nc->id = id;
 211	nc->package = np;
 212	nc->state = NCSI_CHANNEL_INACTIVE;
 213	nc->monitor.enabled = false;
 214	timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
 215	spin_lock_init(&nc->lock);
 216	INIT_LIST_HEAD(&nc->link);
 217	for (index = 0; index < NCSI_CAP_MAX; index++)
 218		nc->caps[index].index = index;
 219	for (index = 0; index < NCSI_MODE_MAX; index++)
 220		nc->modes[index].index = index;
 221
 222	spin_lock_irqsave(&np->lock, flags);
 223	tmp = ncsi_find_channel(np, id);
 224	if (tmp) {
 225		spin_unlock_irqrestore(&np->lock, flags);
 226		kfree(nc);
 227		return tmp;
 228	}
 229
 230	list_add_tail_rcu(&nc->node, &np->channels);
 231	np->channel_num++;
 232	spin_unlock_irqrestore(&np->lock, flags);
 233
 234	return nc;
 235}
 236
 237static void ncsi_remove_channel(struct ncsi_channel *nc)
 238{
 239	struct ncsi_package *np = nc->package;
 240	unsigned long flags;
 241
 242	spin_lock_irqsave(&nc->lock, flags);
 243
 244	/* Release filters */
 245	kfree(nc->mac_filter.addrs);
 246	kfree(nc->vlan_filter.vids);
 247
 248	nc->state = NCSI_CHANNEL_INACTIVE;
 249	spin_unlock_irqrestore(&nc->lock, flags);
 250	ncsi_stop_channel_monitor(nc);
 251
 252	/* Remove and free channel */
 253	spin_lock_irqsave(&np->lock, flags);
 254	list_del_rcu(&nc->node);
 255	np->channel_num--;
 256	spin_unlock_irqrestore(&np->lock, flags);
 257
 258	kfree(nc);
 259}
 260
 261struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
 262				       unsigned char id)
 263{
 264	struct ncsi_package *np;
 265
 266	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 267		if (np->id == id)
 268			return np;
 269	}
 270
 271	return NULL;
 272}
 273
 274struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
 275				      unsigned char id)
 276{
 277	struct ncsi_package *np, *tmp;
 278	unsigned long flags;
 279
 280	np = kzalloc(sizeof(*np), GFP_ATOMIC);
 281	if (!np)
 282		return NULL;
 283
 284	np->id = id;
 285	np->ndp = ndp;
 286	spin_lock_init(&np->lock);
 287	INIT_LIST_HEAD(&np->channels);
 288	np->channel_whitelist = UINT_MAX;
 289
 290	spin_lock_irqsave(&ndp->lock, flags);
 291	tmp = ncsi_find_package(ndp, id);
 292	if (tmp) {
 293		spin_unlock_irqrestore(&ndp->lock, flags);
 294		kfree(np);
 295		return tmp;
 296	}
 297
 298	list_add_tail_rcu(&np->node, &ndp->packages);
 299	ndp->package_num++;
 300	spin_unlock_irqrestore(&ndp->lock, flags);
 301
 302	return np;
 303}
 304
 305void ncsi_remove_package(struct ncsi_package *np)
 306{
 307	struct ncsi_dev_priv *ndp = np->ndp;
 308	struct ncsi_channel *nc, *tmp;
 309	unsigned long flags;
 310
 311	/* Release all child channels */
 312	list_for_each_entry_safe(nc, tmp, &np->channels, node)
 313		ncsi_remove_channel(nc);
 314
 315	/* Remove and free package */
 316	spin_lock_irqsave(&ndp->lock, flags);
 317	list_del_rcu(&np->node);
 318	ndp->package_num--;
 319	spin_unlock_irqrestore(&ndp->lock, flags);
 320
 321	kfree(np);
 322}
 323
 324void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
 325				   unsigned char id,
 326				   struct ncsi_package **np,
 327				   struct ncsi_channel **nc)
 328{
 329	struct ncsi_package *p;
 330	struct ncsi_channel *c;
 331
 332	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
 333	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
 334
 335	if (np)
 336		*np = p;
 337	if (nc)
 338		*nc = c;
 339}
 340
 341/* For two consecutive NCSI commands, the packet IDs shouldn't
 342 * be same. Otherwise, the bogus response might be replied. So
 343 * the available IDs are allocated in round-robin fashion.
 344 */
 345struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
 346					unsigned int req_flags)
 347{
 348	struct ncsi_request *nr = NULL;
 349	int i, limit = ARRAY_SIZE(ndp->requests);
 350	unsigned long flags;
 351
 352	/* Check if there is one available request until the ceiling */
 353	spin_lock_irqsave(&ndp->lock, flags);
 354	for (i = ndp->request_id; i < limit; i++) {
 355		if (ndp->requests[i].used)
 356			continue;
 357
 358		nr = &ndp->requests[i];
 359		nr->used = true;
 360		nr->flags = req_flags;
 361		ndp->request_id = i + 1;
 362		goto found;
 363	}
 364
 365	/* Fail back to check from the starting cursor */
 366	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
 367		if (ndp->requests[i].used)
 368			continue;
 369
 370		nr = &ndp->requests[i];
 371		nr->used = true;
 372		nr->flags = req_flags;
 373		ndp->request_id = i + 1;
 374		goto found;
 375	}
 376
 377found:
 378	spin_unlock_irqrestore(&ndp->lock, flags);
 379	return nr;
 380}
 381
 382void ncsi_free_request(struct ncsi_request *nr)
 383{
 384	struct ncsi_dev_priv *ndp = nr->ndp;
 385	struct sk_buff *cmd, *rsp;
 386	unsigned long flags;
 387	bool driven;
 388
 389	if (nr->enabled) {
 390		nr->enabled = false;
 391		del_timer_sync(&nr->timer);
 392	}
 393
 394	spin_lock_irqsave(&ndp->lock, flags);
 395	cmd = nr->cmd;
 396	rsp = nr->rsp;
 397	nr->cmd = NULL;
 398	nr->rsp = NULL;
 399	nr->used = false;
 400	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
 401	spin_unlock_irqrestore(&ndp->lock, flags);
 402
 403	if (driven && cmd && --ndp->pending_req_num == 0)
 404		schedule_work(&ndp->work);
 405
 406	/* Release command and response */
 407	consume_skb(cmd);
 408	consume_skb(rsp);
 409}
 410
 411struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
 412{
 413	struct ncsi_dev_priv *ndp;
 414
 415	NCSI_FOR_EACH_DEV(ndp) {
 416		if (ndp->ndev.dev == dev)
 417			return &ndp->ndev;
 418	}
 419
 420	return NULL;
 421}
 422
 423static void ncsi_request_timeout(struct timer_list *t)
 424{
 425	struct ncsi_request *nr = from_timer(nr, t, timer);
 426	struct ncsi_dev_priv *ndp = nr->ndp;
 427	struct ncsi_cmd_pkt *cmd;
 428	struct ncsi_package *np;
 429	struct ncsi_channel *nc;
 430	unsigned long flags;
 431
 432	/* If the request already had associated response,
 433	 * let the response handler to release it.
 434	 */
 435	spin_lock_irqsave(&ndp->lock, flags);
 436	nr->enabled = false;
 437	if (nr->rsp || !nr->cmd) {
 438		spin_unlock_irqrestore(&ndp->lock, flags);
 439		return;
 440	}
 441	spin_unlock_irqrestore(&ndp->lock, flags);
 442
 443	if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
 444		if (nr->cmd) {
 445			/* Find the package */
 446			cmd = (struct ncsi_cmd_pkt *)
 447			      skb_network_header(nr->cmd);
 448			ncsi_find_package_and_channel(ndp,
 449						      cmd->cmd.common.channel,
 450						      &np, &nc);
 451			ncsi_send_netlink_timeout(nr, np, nc);
 452		}
 453	}
 454
 455	/* Release the request */
 456	ncsi_free_request(nr);
 457}
 458
 459static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
 460{
 461	struct ncsi_dev *nd = &ndp->ndev;
 462	struct ncsi_package *np;
 463	struct ncsi_channel *nc, *tmp;
 464	struct ncsi_cmd_arg nca;
 465	unsigned long flags;
 466	int ret;
 467
 468	np = ndp->active_package;
 469	nc = ndp->active_channel;
 470	nca.ndp = ndp;
 471	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
 472	switch (nd->state) {
 473	case ncsi_dev_state_suspend:
 474		nd->state = ncsi_dev_state_suspend_select;
 475		/* Fall through */
 476	case ncsi_dev_state_suspend_select:
 477		ndp->pending_req_num = 1;
 478
 479		nca.type = NCSI_PKT_CMD_SP;
 480		nca.package = np->id;
 481		nca.channel = NCSI_RESERVED_CHANNEL;
 482		if (ndp->flags & NCSI_DEV_HWA)
 483			nca.bytes[0] = 0;
 484		else
 485			nca.bytes[0] = 1;
 486
 487		/* To retrieve the last link states of channels in current
 488		 * package when current active channel needs fail over to
 489		 * another one. It means we will possibly select another
 490		 * channel as next active one. The link states of channels
 491		 * are most important factor of the selection. So we need
 492		 * accurate link states. Unfortunately, the link states on
 493		 * inactive channels can't be updated with LSC AEN in time.
 494		 */
 495		if (ndp->flags & NCSI_DEV_RESHUFFLE)
 496			nd->state = ncsi_dev_state_suspend_gls;
 497		else
 498			nd->state = ncsi_dev_state_suspend_dcnt;
 499		ret = ncsi_xmit_cmd(&nca);
 500		if (ret)
 501			goto error;
 502
 503		break;
 504	case ncsi_dev_state_suspend_gls:
 505		ndp->pending_req_num = np->channel_num;
 506
 507		nca.type = NCSI_PKT_CMD_GLS;
 508		nca.package = np->id;
 
 
 
 
 
 509
 510		nd->state = ncsi_dev_state_suspend_dcnt;
 511		NCSI_FOR_EACH_CHANNEL(np, nc) {
 512			nca.channel = nc->id;
 513			ret = ncsi_xmit_cmd(&nca);
 514			if (ret)
 515				goto error;
 516		}
 517
 518		break;
 519	case ncsi_dev_state_suspend_dcnt:
 520		ndp->pending_req_num = 1;
 521
 522		nca.type = NCSI_PKT_CMD_DCNT;
 523		nca.package = np->id;
 524		nca.channel = nc->id;
 525
 526		nd->state = ncsi_dev_state_suspend_dc;
 527		ret = ncsi_xmit_cmd(&nca);
 528		if (ret)
 529			goto error;
 530
 531		break;
 532	case ncsi_dev_state_suspend_dc:
 533		ndp->pending_req_num = 1;
 534
 535		nca.type = NCSI_PKT_CMD_DC;
 536		nca.package = np->id;
 537		nca.channel = nc->id;
 538		nca.bytes[0] = 1;
 539
 540		nd->state = ncsi_dev_state_suspend_deselect;
 541		ret = ncsi_xmit_cmd(&nca);
 542		if (ret)
 543			goto error;
 544
 545		NCSI_FOR_EACH_CHANNEL(np, tmp) {
 546			/* If there is another channel active on this package
 547			 * do not deselect the package.
 548			 */
 549			if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
 550				nd->state = ncsi_dev_state_suspend_done;
 551				break;
 552			}
 553		}
 554		break;
 555	case ncsi_dev_state_suspend_deselect:
 556		ndp->pending_req_num = 1;
 557
 558		nca.type = NCSI_PKT_CMD_DP;
 559		nca.package = np->id;
 560		nca.channel = NCSI_RESERVED_CHANNEL;
 561
 562		nd->state = ncsi_dev_state_suspend_done;
 563		ret = ncsi_xmit_cmd(&nca);
 564		if (ret)
 565			goto error;
 566
 567		break;
 568	case ncsi_dev_state_suspend_done:
 569		spin_lock_irqsave(&nc->lock, flags);
 570		nc->state = NCSI_CHANNEL_INACTIVE;
 571		spin_unlock_irqrestore(&nc->lock, flags);
 572		if (ndp->flags & NCSI_DEV_RESET)
 573			ncsi_reset_dev(nd);
 574		else
 575			ncsi_process_next_channel(ndp);
 576		break;
 577	default:
 578		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
 579			    nd->state);
 580	}
 581
 582	return;
 583error:
 584	nd->state = ncsi_dev_state_functional;
 585}
 586
 587/* Check the VLAN filter bitmap for a set filter, and construct a
 588 * "Set VLAN Filter - Disable" packet if found.
 589 */
 590static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
 591			 struct ncsi_cmd_arg *nca)
 592{
 593	struct ncsi_channel_vlan_filter *ncf;
 594	unsigned long flags;
 595	void *bitmap;
 596	int index;
 597	u16 vid;
 598
 599	ncf = &nc->vlan_filter;
 600	bitmap = &ncf->bitmap;
 601
 602	spin_lock_irqsave(&nc->lock, flags);
 603	index = find_next_bit(bitmap, ncf->n_vids, 0);
 604	if (index >= ncf->n_vids) {
 605		spin_unlock_irqrestore(&nc->lock, flags);
 606		return -1;
 607	}
 608	vid = ncf->vids[index];
 609
 610	clear_bit(index, bitmap);
 611	ncf->vids[index] = 0;
 612	spin_unlock_irqrestore(&nc->lock, flags);
 613
 614	nca->type = NCSI_PKT_CMD_SVF;
 615	nca->words[1] = vid;
 616	/* HW filter index starts at 1 */
 617	nca->bytes[6] = index + 1;
 618	nca->bytes[7] = 0x00;
 619	return 0;
 620}
 621
 622/* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
 623 * packet.
 624 */
 625static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
 626		       struct ncsi_cmd_arg *nca)
 627{
 628	struct ncsi_channel_vlan_filter *ncf;
 629	struct vlan_vid *vlan = NULL;
 630	unsigned long flags;
 631	int i, index;
 632	void *bitmap;
 633	u16 vid;
 634
 635	if (list_empty(&ndp->vlan_vids))
 636		return -1;
 637
 638	ncf = &nc->vlan_filter;
 639	bitmap = &ncf->bitmap;
 640
 641	spin_lock_irqsave(&nc->lock, flags);
 642
 643	rcu_read_lock();
 644	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
 645		vid = vlan->vid;
 646		for (i = 0; i < ncf->n_vids; i++)
 647			if (ncf->vids[i] == vid) {
 648				vid = 0;
 649				break;
 650			}
 651		if (vid)
 652			break;
 653	}
 654	rcu_read_unlock();
 655
 656	if (!vid) {
 657		/* No VLAN ID is not set */
 658		spin_unlock_irqrestore(&nc->lock, flags);
 659		return -1;
 660	}
 661
 662	index = find_next_zero_bit(bitmap, ncf->n_vids, 0);
 663	if (index < 0 || index >= ncf->n_vids) {
 664		netdev_err(ndp->ndev.dev,
 665			   "Channel %u already has all VLAN filters set\n",
 666			   nc->id);
 667		spin_unlock_irqrestore(&nc->lock, flags);
 668		return -1;
 669	}
 670
 671	ncf->vids[index] = vid;
 672	set_bit(index, bitmap);
 673	spin_unlock_irqrestore(&nc->lock, flags);
 674
 675	nca->type = NCSI_PKT_CMD_SVF;
 676	nca->words[1] = vid;
 677	/* HW filter index starts at 1 */
 678	nca->bytes[6] = index + 1;
 679	nca->bytes[7] = 0x01;
 680
 681	return 0;
 682}
 683
 684#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 685
 686/* NCSI OEM Command APIs */
 687static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
 688{
 689	unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
 690	int ret = 0;
 691
 692	nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
 693
 694	memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
 695	*(unsigned int *)data = ntohl(NCSI_OEM_MFR_BCM_ID);
 696	data[5] = NCSI_OEM_BCM_CMD_GMA;
 697
 698	nca->data = data;
 699
 700	ret = ncsi_xmit_cmd(nca);
 701	if (ret)
 702		netdev_err(nca->ndp->ndev.dev,
 703			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 704			   nca->type);
 705	return ret;
 706}
 707
 708static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
 709{
 710	union {
 711		u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
 712		u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
 713	} u;
 714	int ret = 0;
 715
 716	nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
 717
 718	memset(&u, 0, sizeof(u));
 719	u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
 720	u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
 721	u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
 722
 723	nca->data = u.data_u8;
 724
 725	ret = ncsi_xmit_cmd(nca);
 726	if (ret)
 727		netdev_err(nca->ndp->ndev.dev,
 728			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 729			   nca->type);
 730	return ret;
 731}
 732
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 733/* OEM Command handlers initialization */
 734static struct ncsi_oem_gma_handler {
 735	unsigned int	mfr_id;
 736	int		(*handler)(struct ncsi_cmd_arg *nca);
 737} ncsi_oem_gma_handlers[] = {
 738	{ NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
 739	{ NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx }
 
 740};
 741
 742static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
 743{
 744	struct ncsi_oem_gma_handler *nch = NULL;
 745	int i;
 746
 747	/* This function should only be called once, return if flag set */
 748	if (nca->ndp->gma_flag == 1)
 749		return -1;
 750
 751	/* Find gma handler for given manufacturer id */
 752	for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
 753		if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
 754			if (ncsi_oem_gma_handlers[i].handler)
 755				nch = &ncsi_oem_gma_handlers[i];
 756			break;
 757			}
 758	}
 759
 760	if (!nch) {
 761		netdev_err(nca->ndp->ndev.dev,
 762			   "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
 763			   mf_id);
 764		return -1;
 765	}
 766
 767	/* Set the flag for GMA command which should only be called once */
 768	nca->ndp->gma_flag = 1;
 769
 770	/* Get Mac address from NCSI device */
 771	return nch->handler(nca);
 772}
 773
 774#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
 775
 776/* Determine if a given channel from the channel_queue should be used for Tx */
 777static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
 778			       struct ncsi_channel *nc)
 779{
 780	struct ncsi_channel_mode *ncm;
 781	struct ncsi_channel *channel;
 782	struct ncsi_package *np;
 783
 784	/* Check if any other channel has Tx enabled; a channel may have already
 785	 * been configured and removed from the channel queue.
 786	 */
 787	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 788		if (!ndp->multi_package && np != nc->package)
 789			continue;
 790		NCSI_FOR_EACH_CHANNEL(np, channel) {
 791			ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
 792			if (ncm->enable)
 793				return false;
 794		}
 795	}
 796
 797	/* This channel is the preferred channel and has link */
 798	list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
 799		np = channel->package;
 800		if (np->preferred_channel &&
 801		    ncsi_channel_has_link(np->preferred_channel)) {
 802			return np->preferred_channel == nc;
 803		}
 804	}
 805
 806	/* This channel has link */
 807	if (ncsi_channel_has_link(nc))
 808		return true;
 809
 810	list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
 811		if (ncsi_channel_has_link(channel))
 812			return false;
 813
 814	/* No other channel has link; default to this one */
 815	return true;
 816}
 817
 818/* Change the active Tx channel in a multi-channel setup */
 819int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
 820			   struct ncsi_package *package,
 821			   struct ncsi_channel *disable,
 822			   struct ncsi_channel *enable)
 823{
 824	struct ncsi_cmd_arg nca;
 825	struct ncsi_channel *nc;
 826	struct ncsi_package *np;
 827	int ret = 0;
 828
 829	if (!package->multi_channel && !ndp->multi_package)
 830		netdev_warn(ndp->ndev.dev,
 831			    "NCSI: Trying to update Tx channel in single-channel mode\n");
 832	nca.ndp = ndp;
 833	nca.req_flags = 0;
 834
 835	/* Find current channel with Tx enabled */
 836	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 837		if (disable)
 838			break;
 839		if (!ndp->multi_package && np != package)
 840			continue;
 841
 842		NCSI_FOR_EACH_CHANNEL(np, nc)
 843			if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
 844				disable = nc;
 845				break;
 846			}
 847	}
 848
 849	/* Find a suitable channel for Tx */
 850	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 851		if (enable)
 852			break;
 853		if (!ndp->multi_package && np != package)
 854			continue;
 855		if (!(ndp->package_whitelist & (0x1 << np->id)))
 856			continue;
 857
 858		if (np->preferred_channel &&
 859		    ncsi_channel_has_link(np->preferred_channel)) {
 860			enable = np->preferred_channel;
 861			break;
 862		}
 863
 864		NCSI_FOR_EACH_CHANNEL(np, nc) {
 865			if (!(np->channel_whitelist & 0x1 << nc->id))
 866				continue;
 867			if (nc->state != NCSI_CHANNEL_ACTIVE)
 868				continue;
 869			if (ncsi_channel_has_link(nc)) {
 870				enable = nc;
 871				break;
 872			}
 873		}
 874	}
 875
 876	if (disable == enable)
 877		return -1;
 878
 879	if (!enable)
 880		return -1;
 881
 882	if (disable) {
 883		nca.channel = disable->id;
 884		nca.package = disable->package->id;
 885		nca.type = NCSI_PKT_CMD_DCNT;
 886		ret = ncsi_xmit_cmd(&nca);
 887		if (ret)
 888			netdev_err(ndp->ndev.dev,
 889				   "Error %d sending DCNT\n",
 890				   ret);
 891	}
 892
 893	netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
 894
 895	nca.channel = enable->id;
 896	nca.package = enable->package->id;
 897	nca.type = NCSI_PKT_CMD_ECNT;
 898	ret = ncsi_xmit_cmd(&nca);
 899	if (ret)
 900		netdev_err(ndp->ndev.dev,
 901			   "Error %d sending ECNT\n",
 902			   ret);
 903
 904	return ret;
 905}
 906
 907static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
 908{
 909	struct ncsi_package *np = ndp->active_package;
 910	struct ncsi_channel *nc = ndp->active_channel;
 911	struct ncsi_channel *hot_nc = NULL;
 912	struct ncsi_dev *nd = &ndp->ndev;
 913	struct net_device *dev = nd->dev;
 914	struct ncsi_cmd_arg nca;
 915	unsigned char index;
 916	unsigned long flags;
 917	int ret;
 918
 919	nca.ndp = ndp;
 920	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
 921	switch (nd->state) {
 922	case ncsi_dev_state_config:
 923	case ncsi_dev_state_config_sp:
 924		ndp->pending_req_num = 1;
 925
 926		/* Select the specific package */
 927		nca.type = NCSI_PKT_CMD_SP;
 928		if (ndp->flags & NCSI_DEV_HWA)
 929			nca.bytes[0] = 0;
 930		else
 931			nca.bytes[0] = 1;
 932		nca.package = np->id;
 933		nca.channel = NCSI_RESERVED_CHANNEL;
 934		ret = ncsi_xmit_cmd(&nca);
 935		if (ret) {
 936			netdev_err(ndp->ndev.dev,
 937				   "NCSI: Failed to transmit CMD_SP\n");
 938			goto error;
 939		}
 940
 941		nd->state = ncsi_dev_state_config_cis;
 942		break;
 943	case ncsi_dev_state_config_cis:
 944		ndp->pending_req_num = 1;
 945
 946		/* Clear initial state */
 947		nca.type = NCSI_PKT_CMD_CIS;
 948		nca.package = np->id;
 949		nca.channel = nc->id;
 950		ret = ncsi_xmit_cmd(&nca);
 951		if (ret) {
 952			netdev_err(ndp->ndev.dev,
 953				   "NCSI: Failed to transmit CMD_CIS\n");
 954			goto error;
 955		}
 956
 957		nd->state = ncsi_dev_state_config_oem_gma;
 
 
 958		break;
 959	case ncsi_dev_state_config_oem_gma:
 960		nd->state = ncsi_dev_state_config_clear_vids;
 961		ret = -1;
 962
 963#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
 964		nca.type = NCSI_PKT_CMD_OEM;
 965		nca.package = np->id;
 966		nca.channel = nc->id;
 967		ndp->pending_req_num = 1;
 968		ret = ncsi_gma_handler(&nca, nc->version.mf_id);
 969#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
 
 
 
 
 
 
 
 
 
 970
 
 
 
 
 
 971		if (ret < 0)
 972			schedule_work(&ndp->work);
 
 
 973
 974		break;
 975	case ncsi_dev_state_config_clear_vids:
 976	case ncsi_dev_state_config_svf:
 977	case ncsi_dev_state_config_ev:
 978	case ncsi_dev_state_config_sma:
 979	case ncsi_dev_state_config_ebf:
 980	case ncsi_dev_state_config_dgmf:
 981	case ncsi_dev_state_config_ecnt:
 982	case ncsi_dev_state_config_ec:
 983	case ncsi_dev_state_config_ae:
 984	case ncsi_dev_state_config_gls:
 985		ndp->pending_req_num = 1;
 986
 987		nca.package = np->id;
 988		nca.channel = nc->id;
 989
 990		/* Clear any active filters on the channel before setting */
 991		if (nd->state == ncsi_dev_state_config_clear_vids) {
 992			ret = clear_one_vid(ndp, nc, &nca);
 993			if (ret) {
 994				nd->state = ncsi_dev_state_config_svf;
 995				schedule_work(&ndp->work);
 996				break;
 997			}
 998			/* Repeat */
 999			nd->state = ncsi_dev_state_config_clear_vids;
1000		/* Add known VLAN tags to the filter */
1001		} else if (nd->state == ncsi_dev_state_config_svf) {
1002			ret = set_one_vid(ndp, nc, &nca);
1003			if (ret) {
1004				nd->state = ncsi_dev_state_config_ev;
1005				schedule_work(&ndp->work);
1006				break;
1007			}
1008			/* Repeat */
1009			nd->state = ncsi_dev_state_config_svf;
1010		/* Enable/Disable the VLAN filter */
1011		} else if (nd->state == ncsi_dev_state_config_ev) {
1012			if (list_empty(&ndp->vlan_vids)) {
1013				nca.type = NCSI_PKT_CMD_DV;
1014			} else {
1015				nca.type = NCSI_PKT_CMD_EV;
1016				nca.bytes[3] = NCSI_CAP_VLAN_NO;
1017			}
1018			nd->state = ncsi_dev_state_config_sma;
1019		} else if (nd->state == ncsi_dev_state_config_sma) {
1020		/* Use first entry in unicast filter table. Note that
1021		 * the MAC filter table starts from entry 1 instead of
1022		 * 0.
1023		 */
1024			nca.type = NCSI_PKT_CMD_SMA;
1025			for (index = 0; index < 6; index++)
1026				nca.bytes[index] = dev->dev_addr[index];
1027			nca.bytes[6] = 0x1;
1028			nca.bytes[7] = 0x1;
1029			nd->state = ncsi_dev_state_config_ebf;
1030		} else if (nd->state == ncsi_dev_state_config_ebf) {
1031			nca.type = NCSI_PKT_CMD_EBF;
1032			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
1033			/* if multicast global filtering is supported then
1034			 * disable it so that all multicast packet will be
1035			 * forwarded to management controller
1036			 */
1037			if (nc->caps[NCSI_CAP_GENERIC].cap &
1038			    NCSI_CAP_GENERIC_MC)
1039				nd->state = ncsi_dev_state_config_dgmf;
1040			else if (ncsi_channel_is_tx(ndp, nc))
1041				nd->state = ncsi_dev_state_config_ecnt;
1042			else
1043				nd->state = ncsi_dev_state_config_ec;
1044		} else if (nd->state == ncsi_dev_state_config_dgmf) {
1045			nca.type = NCSI_PKT_CMD_DGMF;
1046			if (ncsi_channel_is_tx(ndp, nc))
1047				nd->state = ncsi_dev_state_config_ecnt;
1048			else
1049				nd->state = ncsi_dev_state_config_ec;
1050		} else if (nd->state == ncsi_dev_state_config_ecnt) {
1051			if (np->preferred_channel &&
1052			    nc != np->preferred_channel)
1053				netdev_info(ndp->ndev.dev,
1054					    "NCSI: Tx failed over to channel %u\n",
1055					    nc->id);
1056			nca.type = NCSI_PKT_CMD_ECNT;
1057			nd->state = ncsi_dev_state_config_ec;
1058		} else if (nd->state == ncsi_dev_state_config_ec) {
1059			/* Enable AEN if it's supported */
1060			nca.type = NCSI_PKT_CMD_EC;
1061			nd->state = ncsi_dev_state_config_ae;
1062			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
1063				nd->state = ncsi_dev_state_config_gls;
1064		} else if (nd->state == ncsi_dev_state_config_ae) {
1065			nca.type = NCSI_PKT_CMD_AE;
1066			nca.bytes[0] = 0;
1067			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
1068			nd->state = ncsi_dev_state_config_gls;
1069		} else if (nd->state == ncsi_dev_state_config_gls) {
1070			nca.type = NCSI_PKT_CMD_GLS;
1071			nd->state = ncsi_dev_state_config_done;
1072		}
1073
1074		ret = ncsi_xmit_cmd(&nca);
1075		if (ret) {
1076			netdev_err(ndp->ndev.dev,
1077				   "NCSI: Failed to transmit CMD %x\n",
1078				   nca.type);
1079			goto error;
1080		}
1081		break;
1082	case ncsi_dev_state_config_done:
1083		netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
1084			   nc->id);
1085		spin_lock_irqsave(&nc->lock, flags);
1086		nc->state = NCSI_CHANNEL_ACTIVE;
1087
1088		if (ndp->flags & NCSI_DEV_RESET) {
1089			/* A reset event happened during config, start it now */
1090			nc->reconfigure_needed = false;
1091			spin_unlock_irqrestore(&nc->lock, flags);
1092			ncsi_reset_dev(nd);
1093			break;
1094		}
1095
1096		if (nc->reconfigure_needed) {
1097			/* This channel's configuration has been updated
1098			 * part-way during the config state - start the
1099			 * channel configuration over
1100			 */
1101			nc->reconfigure_needed = false;
1102			nc->state = NCSI_CHANNEL_INACTIVE;
1103			spin_unlock_irqrestore(&nc->lock, flags);
1104
1105			spin_lock_irqsave(&ndp->lock, flags);
1106			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1107			spin_unlock_irqrestore(&ndp->lock, flags);
1108
1109			netdev_dbg(dev, "Dirty NCSI channel state reset\n");
1110			ncsi_process_next_channel(ndp);
1111			break;
1112		}
1113
1114		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
1115			hot_nc = nc;
1116		} else {
1117			hot_nc = NULL;
1118			netdev_dbg(ndp->ndev.dev,
1119				   "NCSI: channel %u link down after config\n",
1120				   nc->id);
1121		}
1122		spin_unlock_irqrestore(&nc->lock, flags);
1123
1124		/* Update the hot channel */
1125		spin_lock_irqsave(&ndp->lock, flags);
1126		ndp->hot_channel = hot_nc;
1127		spin_unlock_irqrestore(&ndp->lock, flags);
1128
1129		ncsi_start_channel_monitor(nc);
1130		ncsi_process_next_channel(ndp);
1131		break;
1132	default:
1133		netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
1134			     nd->state);
1135	}
1136
1137	return;
1138
1139error:
1140	ncsi_report_link(ndp, true);
1141}
1142
1143static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
1144{
1145	struct ncsi_channel *nc, *found, *hot_nc;
1146	struct ncsi_channel_mode *ncm;
1147	unsigned long flags, cflags;
1148	struct ncsi_package *np;
1149	bool with_link;
1150
1151	spin_lock_irqsave(&ndp->lock, flags);
1152	hot_nc = ndp->hot_channel;
1153	spin_unlock_irqrestore(&ndp->lock, flags);
1154
1155	/* By default the search is done once an inactive channel with up
1156	 * link is found, unless a preferred channel is set.
1157	 * If multi_package or multi_channel are configured all channels in the
1158	 * whitelist are added to the channel queue.
1159	 */
1160	found = NULL;
1161	with_link = false;
1162	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1163		if (!(ndp->package_whitelist & (0x1 << np->id)))
1164			continue;
1165		NCSI_FOR_EACH_CHANNEL(np, nc) {
1166			if (!(np->channel_whitelist & (0x1 << nc->id)))
1167				continue;
1168
1169			spin_lock_irqsave(&nc->lock, cflags);
1170
1171			if (!list_empty(&nc->link) ||
1172			    nc->state != NCSI_CHANNEL_INACTIVE) {
1173				spin_unlock_irqrestore(&nc->lock, cflags);
1174				continue;
1175			}
1176
1177			if (!found)
1178				found = nc;
1179
1180			if (nc == hot_nc)
1181				found = nc;
1182
1183			ncm = &nc->modes[NCSI_MODE_LINK];
1184			if (ncm->data[2] & 0x1) {
1185				found = nc;
1186				with_link = true;
1187			}
1188
1189			/* If multi_channel is enabled configure all valid
1190			 * channels whether or not they currently have link
1191			 * so they will have AENs enabled.
1192			 */
1193			if (with_link || np->multi_channel) {
1194				spin_lock_irqsave(&ndp->lock, flags);
1195				list_add_tail_rcu(&nc->link,
1196						  &ndp->channel_queue);
1197				spin_unlock_irqrestore(&ndp->lock, flags);
1198
1199				netdev_dbg(ndp->ndev.dev,
1200					   "NCSI: Channel %u added to queue (link %s)\n",
1201					   nc->id,
1202					   ncm->data[2] & 0x1 ? "up" : "down");
1203			}
1204
1205			spin_unlock_irqrestore(&nc->lock, cflags);
1206
1207			if (with_link && !np->multi_channel)
1208				break;
1209		}
1210		if (with_link && !ndp->multi_package)
1211			break;
1212	}
1213
1214	if (list_empty(&ndp->channel_queue) && found) {
1215		netdev_info(ndp->ndev.dev,
1216			    "NCSI: No channel with link found, configuring channel %u\n",
1217			    found->id);
1218		spin_lock_irqsave(&ndp->lock, flags);
1219		list_add_tail_rcu(&found->link, &ndp->channel_queue);
1220		spin_unlock_irqrestore(&ndp->lock, flags);
1221	} else if (!found) {
1222		netdev_warn(ndp->ndev.dev,
1223			    "NCSI: No channel found to configure!\n");
1224		ncsi_report_link(ndp, true);
1225		return -ENODEV;
1226	}
1227
1228	return ncsi_process_next_channel(ndp);
1229}
1230
1231static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1232{
1233	struct ncsi_package *np;
1234	struct ncsi_channel *nc;
1235	unsigned int cap;
1236	bool has_channel = false;
1237
1238	/* The hardware arbitration is disabled if any one channel
1239	 * doesn't support explicitly.
1240	 */
1241	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1242		NCSI_FOR_EACH_CHANNEL(np, nc) {
1243			has_channel = true;
1244
1245			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1246			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1247			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1248			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1249				ndp->flags &= ~NCSI_DEV_HWA;
1250				return false;
1251			}
1252		}
1253	}
1254
1255	if (has_channel) {
1256		ndp->flags |= NCSI_DEV_HWA;
1257		return true;
1258	}
1259
1260	ndp->flags &= ~NCSI_DEV_HWA;
1261	return false;
1262}
1263
1264static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1265{
1266	struct ncsi_dev *nd = &ndp->ndev;
1267	struct ncsi_package *np;
1268	struct ncsi_channel *nc;
1269	struct ncsi_cmd_arg nca;
1270	unsigned char index;
1271	int ret;
1272
1273	nca.ndp = ndp;
1274	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1275	switch (nd->state) {
1276	case ncsi_dev_state_probe:
1277		nd->state = ncsi_dev_state_probe_deselect;
1278		/* Fall through */
1279	case ncsi_dev_state_probe_deselect:
1280		ndp->pending_req_num = 8;
1281
1282		/* Deselect all possible packages */
1283		nca.type = NCSI_PKT_CMD_DP;
1284		nca.channel = NCSI_RESERVED_CHANNEL;
1285		for (index = 0; index < 8; index++) {
1286			nca.package = index;
1287			ret = ncsi_xmit_cmd(&nca);
1288			if (ret)
1289				goto error;
1290		}
1291
1292		nd->state = ncsi_dev_state_probe_package;
1293		break;
1294	case ncsi_dev_state_probe_package:
 
 
 
 
 
 
1295		ndp->pending_req_num = 1;
1296
1297		nca.type = NCSI_PKT_CMD_SP;
1298		nca.bytes[0] = 1;
1299		nca.package = ndp->package_probe_id;
1300		nca.channel = NCSI_RESERVED_CHANNEL;
1301		ret = ncsi_xmit_cmd(&nca);
1302		if (ret)
1303			goto error;
1304		nd->state = ncsi_dev_state_probe_channel;
1305		break;
1306	case ncsi_dev_state_probe_channel:
1307		ndp->active_package = ncsi_find_package(ndp,
1308							ndp->package_probe_id);
1309		if (!ndp->active_package) {
1310			/* No response */
1311			nd->state = ncsi_dev_state_probe_dp;
1312			schedule_work(&ndp->work);
1313			break;
1314		}
1315		nd->state = ncsi_dev_state_probe_cis;
 
 
 
 
1316		schedule_work(&ndp->work);
1317		break;
1318	case ncsi_dev_state_probe_cis:
1319		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1320
1321		/* Clear initial state */
1322		nca.type = NCSI_PKT_CMD_CIS;
1323		nca.package = ndp->active_package->id;
1324		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1325			nca.channel = index;
1326			ret = ncsi_xmit_cmd(&nca);
1327			if (ret)
1328				goto error;
1329		}
1330
1331		nd->state = ncsi_dev_state_probe_gvi;
1332		break;
 
1333	case ncsi_dev_state_probe_gvi:
1334	case ncsi_dev_state_probe_gc:
1335	case ncsi_dev_state_probe_gls:
1336		np = ndp->active_package;
1337		ndp->pending_req_num = np->channel_num;
1338
1339		/* Retrieve version, capability or link status */
1340		if (nd->state == ncsi_dev_state_probe_gvi)
 
 
1341			nca.type = NCSI_PKT_CMD_GVI;
1342		else if (nd->state == ncsi_dev_state_probe_gc)
1343			nca.type = NCSI_PKT_CMD_GC;
1344		else
1345			nca.type = NCSI_PKT_CMD_GLS;
1346
1347		nca.package = np->id;
1348		NCSI_FOR_EACH_CHANNEL(np, nc) {
1349			nca.channel = nc->id;
1350			ret = ncsi_xmit_cmd(&nca);
1351			if (ret)
1352				goto error;
1353		}
1354
1355		if (nd->state == ncsi_dev_state_probe_gvi)
 
 
 
 
1356			nd->state = ncsi_dev_state_probe_gc;
1357		else if (nd->state == ncsi_dev_state_probe_gc)
1358			nd->state = ncsi_dev_state_probe_gls;
1359		else
 
 
 
 
 
 
1360			nd->state = ncsi_dev_state_probe_dp;
 
1361		break;
1362	case ncsi_dev_state_probe_dp:
1363		ndp->pending_req_num = 1;
1364
1365		/* Deselect the current package */
1366		nca.type = NCSI_PKT_CMD_DP;
1367		nca.package = ndp->package_probe_id;
1368		nca.channel = NCSI_RESERVED_CHANNEL;
1369		ret = ncsi_xmit_cmd(&nca);
1370		if (ret)
1371			goto error;
1372
1373		/* Probe next package */
1374		ndp->package_probe_id++;
1375		if (ndp->package_probe_id >= 8) {
1376			/* Probe finished */
1377			ndp->flags |= NCSI_DEV_PROBED;
1378			break;
1379		}
1380		nd->state = ncsi_dev_state_probe_package;
1381		ndp->active_package = NULL;
1382		break;
1383	default:
1384		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1385			    nd->state);
1386	}
1387
1388	if (ndp->flags & NCSI_DEV_PROBED) {
1389		/* Check if all packages have HWA support */
1390		ncsi_check_hwa(ndp);
1391		ncsi_choose_active_channel(ndp);
1392	}
1393
1394	return;
1395error:
1396	netdev_err(ndp->ndev.dev,
1397		   "NCSI: Failed to transmit cmd 0x%x during probe\n",
1398		   nca.type);
1399	ncsi_report_link(ndp, true);
1400}
1401
1402static void ncsi_dev_work(struct work_struct *work)
1403{
1404	struct ncsi_dev_priv *ndp = container_of(work,
1405			struct ncsi_dev_priv, work);
1406	struct ncsi_dev *nd = &ndp->ndev;
1407
1408	switch (nd->state & ncsi_dev_state_major) {
1409	case ncsi_dev_state_probe:
1410		ncsi_probe_channel(ndp);
1411		break;
1412	case ncsi_dev_state_suspend:
1413		ncsi_suspend_channel(ndp);
1414		break;
1415	case ncsi_dev_state_config:
1416		ncsi_configure_channel(ndp);
1417		break;
1418	default:
1419		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1420			    nd->state);
1421	}
1422}
1423
1424int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1425{
1426	struct ncsi_channel *nc;
1427	int old_state;
1428	unsigned long flags;
1429
1430	spin_lock_irqsave(&ndp->lock, flags);
1431	nc = list_first_or_null_rcu(&ndp->channel_queue,
1432				    struct ncsi_channel, link);
1433	if (!nc) {
1434		spin_unlock_irqrestore(&ndp->lock, flags);
1435		goto out;
1436	}
1437
1438	list_del_init(&nc->link);
1439	spin_unlock_irqrestore(&ndp->lock, flags);
1440
1441	spin_lock_irqsave(&nc->lock, flags);
1442	old_state = nc->state;
1443	nc->state = NCSI_CHANNEL_INVISIBLE;
1444	spin_unlock_irqrestore(&nc->lock, flags);
1445
1446	ndp->active_channel = nc;
1447	ndp->active_package = nc->package;
1448
1449	switch (old_state) {
1450	case NCSI_CHANNEL_INACTIVE:
1451		ndp->ndev.state = ncsi_dev_state_config;
1452		netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1453	                   nc->id);
1454		ncsi_configure_channel(ndp);
1455		break;
1456	case NCSI_CHANNEL_ACTIVE:
1457		ndp->ndev.state = ncsi_dev_state_suspend;
1458		netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1459			   nc->id);
1460		ncsi_suspend_channel(ndp);
1461		break;
1462	default:
1463		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1464			   old_state, nc->package->id, nc->id);
1465		ncsi_report_link(ndp, false);
1466		return -EINVAL;
1467	}
1468
1469	return 0;
1470
1471out:
1472	ndp->active_channel = NULL;
1473	ndp->active_package = NULL;
1474	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1475		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1476		return ncsi_choose_active_channel(ndp);
1477	}
1478
1479	ncsi_report_link(ndp, false);
1480	return -ENODEV;
1481}
1482
1483static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1484{
1485	struct ncsi_dev *nd = &ndp->ndev;
1486	struct ncsi_channel *nc;
1487	struct ncsi_package *np;
1488	unsigned long flags;
1489	unsigned int n = 0;
1490
1491	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1492		NCSI_FOR_EACH_CHANNEL(np, nc) {
1493			spin_lock_irqsave(&nc->lock, flags);
1494
1495			/* Channels may be busy, mark dirty instead of
1496			 * kicking if;
1497			 * a) not ACTIVE (configured)
1498			 * b) in the channel_queue (to be configured)
1499			 * c) it's ndev is in the config state
1500			 */
1501			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1502				if ((ndp->ndev.state & 0xff00) ==
1503						ncsi_dev_state_config ||
1504						!list_empty(&nc->link)) {
1505					netdev_dbg(nd->dev,
1506						   "NCSI: channel %p marked dirty\n",
1507						   nc);
1508					nc->reconfigure_needed = true;
1509				}
1510				spin_unlock_irqrestore(&nc->lock, flags);
1511				continue;
1512			}
1513
1514			spin_unlock_irqrestore(&nc->lock, flags);
1515
1516			ncsi_stop_channel_monitor(nc);
1517			spin_lock_irqsave(&nc->lock, flags);
1518			nc->state = NCSI_CHANNEL_INACTIVE;
1519			spin_unlock_irqrestore(&nc->lock, flags);
1520
1521			spin_lock_irqsave(&ndp->lock, flags);
1522			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1523			spin_unlock_irqrestore(&ndp->lock, flags);
1524
1525			netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1526			n++;
1527		}
1528	}
1529
1530	return n;
1531}
1532
1533int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1534{
1535	struct ncsi_dev_priv *ndp;
1536	unsigned int n_vids = 0;
1537	struct vlan_vid *vlan;
1538	struct ncsi_dev *nd;
1539	bool found = false;
1540
1541	if (vid == 0)
1542		return 0;
1543
1544	nd = ncsi_find_dev(dev);
1545	if (!nd) {
1546		netdev_warn(dev, "NCSI: No net_device?\n");
1547		return 0;
1548	}
1549
1550	ndp = TO_NCSI_DEV_PRIV(nd);
1551
1552	/* Add the VLAN id to our internal list */
1553	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1554		n_vids++;
1555		if (vlan->vid == vid) {
1556			netdev_dbg(dev, "NCSI: vid %u already registered\n",
1557				   vid);
1558			return 0;
1559		}
1560	}
1561	if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1562		netdev_warn(dev,
1563			    "tried to add vlan id %u but NCSI max already registered (%u)\n",
1564			    vid, NCSI_MAX_VLAN_VIDS);
1565		return -ENOSPC;
1566	}
1567
1568	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1569	if (!vlan)
1570		return -ENOMEM;
1571
1572	vlan->proto = proto;
1573	vlan->vid = vid;
1574	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1575
1576	netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1577
1578	found = ncsi_kick_channels(ndp) != 0;
1579
1580	return found ? ncsi_process_next_channel(ndp) : 0;
1581}
1582EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1583
1584int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1585{
1586	struct vlan_vid *vlan, *tmp;
1587	struct ncsi_dev_priv *ndp;
1588	struct ncsi_dev *nd;
1589	bool found = false;
1590
1591	if (vid == 0)
1592		return 0;
1593
1594	nd = ncsi_find_dev(dev);
1595	if (!nd) {
1596		netdev_warn(dev, "NCSI: no net_device?\n");
1597		return 0;
1598	}
1599
1600	ndp = TO_NCSI_DEV_PRIV(nd);
1601
1602	/* Remove the VLAN id from our internal list */
1603	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1604		if (vlan->vid == vid) {
1605			netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1606			list_del_rcu(&vlan->list);
1607			found = true;
1608			kfree(vlan);
1609		}
1610
1611	if (!found) {
1612		netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1613		return -EINVAL;
1614	}
1615
1616	found = ncsi_kick_channels(ndp) != 0;
1617
1618	return found ? ncsi_process_next_channel(ndp) : 0;
1619}
1620EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1621
1622struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1623				   void (*handler)(struct ncsi_dev *ndev))
1624{
1625	struct ncsi_dev_priv *ndp;
1626	struct ncsi_dev *nd;
 
 
1627	unsigned long flags;
1628	int i;
1629
1630	/* Check if the device has been registered or not */
1631	nd = ncsi_find_dev(dev);
1632	if (nd)
1633		return nd;
1634
1635	/* Create NCSI device */
1636	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1637	if (!ndp)
1638		return NULL;
1639
1640	nd = &ndp->ndev;
1641	nd->state = ncsi_dev_state_registered;
1642	nd->dev = dev;
1643	nd->handler = handler;
1644	ndp->pending_req_num = 0;
1645	INIT_LIST_HEAD(&ndp->channel_queue);
1646	INIT_LIST_HEAD(&ndp->vlan_vids);
1647	INIT_WORK(&ndp->work, ncsi_dev_work);
1648	ndp->package_whitelist = UINT_MAX;
1649
1650	/* Initialize private NCSI device */
1651	spin_lock_init(&ndp->lock);
1652	INIT_LIST_HEAD(&ndp->packages);
1653	ndp->request_id = NCSI_REQ_START_IDX;
1654	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1655		ndp->requests[i].id = i;
1656		ndp->requests[i].ndp = ndp;
1657		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1658	}
 
1659
1660	spin_lock_irqsave(&ncsi_dev_lock, flags);
1661	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1662	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1663
1664	/* Register NCSI packet Rx handler */
1665	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1666	ndp->ptype.func = ncsi_rcv_rsp;
1667	ndp->ptype.dev = dev;
1668	dev_add_pack(&ndp->ptype);
1669
1670	/* Set up generic netlink interface */
1671	ncsi_init_netlink(dev);
 
 
 
 
 
1672
1673	return nd;
1674}
1675EXPORT_SYMBOL_GPL(ncsi_register_dev);
1676
1677int ncsi_start_dev(struct ncsi_dev *nd)
1678{
1679	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1680
1681	if (nd->state != ncsi_dev_state_registered &&
1682	    nd->state != ncsi_dev_state_functional)
1683		return -ENOTTY;
1684
1685	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1686		ndp->package_probe_id = 0;
 
1687		nd->state = ncsi_dev_state_probe;
1688		schedule_work(&ndp->work);
1689		return 0;
1690	}
1691
1692	return ncsi_reset_dev(nd);
1693}
1694EXPORT_SYMBOL_GPL(ncsi_start_dev);
1695
1696void ncsi_stop_dev(struct ncsi_dev *nd)
1697{
1698	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1699	struct ncsi_package *np;
1700	struct ncsi_channel *nc;
1701	bool chained;
1702	int old_state;
1703	unsigned long flags;
1704
1705	/* Stop the channel monitor on any active channels. Don't reset the
1706	 * channel state so we know which were active when ncsi_start_dev()
1707	 * is next called.
1708	 */
1709	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1710		NCSI_FOR_EACH_CHANNEL(np, nc) {
1711			ncsi_stop_channel_monitor(nc);
1712
1713			spin_lock_irqsave(&nc->lock, flags);
1714			chained = !list_empty(&nc->link);
1715			old_state = nc->state;
1716			spin_unlock_irqrestore(&nc->lock, flags);
1717
1718			WARN_ON_ONCE(chained ||
1719				     old_state == NCSI_CHANNEL_INVISIBLE);
1720		}
1721	}
1722
1723	netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1724	ncsi_report_link(ndp, true);
1725}
1726EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1727
1728int ncsi_reset_dev(struct ncsi_dev *nd)
1729{
1730	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1731	struct ncsi_channel *nc, *active, *tmp;
1732	struct ncsi_package *np;
1733	unsigned long flags;
1734
1735	spin_lock_irqsave(&ndp->lock, flags);
1736
1737	if (!(ndp->flags & NCSI_DEV_RESET)) {
1738		/* Haven't been called yet, check states */
1739		switch (nd->state & ncsi_dev_state_major) {
1740		case ncsi_dev_state_registered:
1741		case ncsi_dev_state_probe:
1742			/* Not even probed yet - do nothing */
1743			spin_unlock_irqrestore(&ndp->lock, flags);
1744			return 0;
1745		case ncsi_dev_state_suspend:
1746		case ncsi_dev_state_config:
1747			/* Wait for the channel to finish its suspend/config
1748			 * operation; once it finishes it will check for
1749			 * NCSI_DEV_RESET and reset the state.
1750			 */
1751			ndp->flags |= NCSI_DEV_RESET;
1752			spin_unlock_irqrestore(&ndp->lock, flags);
1753			return 0;
1754		}
1755	} else {
1756		switch (nd->state) {
1757		case ncsi_dev_state_suspend_done:
1758		case ncsi_dev_state_config_done:
1759		case ncsi_dev_state_functional:
1760			/* Ok */
1761			break;
1762		default:
1763			/* Current reset operation happening */
1764			spin_unlock_irqrestore(&ndp->lock, flags);
1765			return 0;
1766		}
1767	}
1768
1769	if (!list_empty(&ndp->channel_queue)) {
1770		/* Clear any channel queue we may have interrupted */
1771		list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
1772			list_del_init(&nc->link);
1773	}
1774	spin_unlock_irqrestore(&ndp->lock, flags);
1775
1776	active = NULL;
1777	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1778		NCSI_FOR_EACH_CHANNEL(np, nc) {
1779			spin_lock_irqsave(&nc->lock, flags);
1780
1781			if (nc->state == NCSI_CHANNEL_ACTIVE) {
1782				active = nc;
1783				nc->state = NCSI_CHANNEL_INVISIBLE;
1784				spin_unlock_irqrestore(&nc->lock, flags);
1785				ncsi_stop_channel_monitor(nc);
1786				break;
1787			}
1788
1789			spin_unlock_irqrestore(&nc->lock, flags);
1790		}
1791		if (active)
1792			break;
1793	}
1794
1795	if (!active) {
1796		/* Done */
1797		spin_lock_irqsave(&ndp->lock, flags);
1798		ndp->flags &= ~NCSI_DEV_RESET;
1799		spin_unlock_irqrestore(&ndp->lock, flags);
1800		return ncsi_choose_active_channel(ndp);
1801	}
1802
1803	spin_lock_irqsave(&ndp->lock, flags);
1804	ndp->flags |= NCSI_DEV_RESET;
1805	ndp->active_channel = active;
1806	ndp->active_package = active->package;
1807	spin_unlock_irqrestore(&ndp->lock, flags);
1808
1809	nd->state = ncsi_dev_state_suspend;
1810	schedule_work(&ndp->work);
1811	return 0;
1812}
1813
1814void ncsi_unregister_dev(struct ncsi_dev *nd)
1815{
1816	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1817	struct ncsi_package *np, *tmp;
1818	unsigned long flags;
1819
1820	dev_remove_pack(&ndp->ptype);
1821
1822	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1823		ncsi_remove_package(np);
1824
1825	spin_lock_irqsave(&ncsi_dev_lock, flags);
1826	list_del_rcu(&ndp->node);
1827	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1828
1829	ncsi_unregister_netlink(nd->dev);
1830
1831	kfree(ndp);
1832}
1833EXPORT_SYMBOL_GPL(ncsi_unregister_dev);