Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright Gavin Shan, IBM Corporation 2016.
 
 
 
 
 
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/kernel.h>
   8#include <linux/init.h>
   9#include <linux/netdevice.h>
  10#include <linux/skbuff.h>
  11#include <linux/of.h>
  12#include <linux/platform_device.h>
  13
  14#include <net/ncsi.h>
  15#include <net/net_namespace.h>
  16#include <net/sock.h>
  17#include <net/addrconf.h>
  18#include <net/ipv6.h>
  19#include <net/genetlink.h>
  20
  21#include "internal.h"
  22#include "ncsi-pkt.h"
  23#include "ncsi-netlink.h"
  24
  25LIST_HEAD(ncsi_dev_list);
  26DEFINE_SPINLOCK(ncsi_dev_lock);
  27
  28bool ncsi_channel_has_link(struct ncsi_channel *channel)
  29{
  30	return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
 
 
 
 
 
 
  31}
  32
  33bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
  34			  struct ncsi_channel *channel)
  35{
  36	struct ncsi_package *np;
  37	struct ncsi_channel *nc;
 
 
  38
  39	NCSI_FOR_EACH_PACKAGE(ndp, np)
  40		NCSI_FOR_EACH_CHANNEL(np, nc) {
  41			if (nc == channel)
  42				continue;
  43			if (nc->state == NCSI_CHANNEL_ACTIVE &&
  44			    ncsi_channel_has_link(nc))
  45				return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  46		}
 
  47
  48	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  49}
  50
  51static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
  52{
  53	struct ncsi_dev *nd = &ndp->ndev;
  54	struct ncsi_package *np;
  55	struct ncsi_channel *nc;
  56	unsigned long flags;
  57
  58	nd->state = ncsi_dev_state_functional;
  59	if (force_down) {
  60		nd->link_up = 0;
  61		goto report;
  62	}
  63
  64	nd->link_up = 0;
  65	NCSI_FOR_EACH_PACKAGE(ndp, np) {
  66		NCSI_FOR_EACH_CHANNEL(np, nc) {
  67			spin_lock_irqsave(&nc->lock, flags);
  68
  69			if (!list_empty(&nc->link) ||
  70			    nc->state != NCSI_CHANNEL_ACTIVE) {
  71				spin_unlock_irqrestore(&nc->lock, flags);
  72				continue;
  73			}
  74
  75			if (ncsi_channel_has_link(nc)) {
  76				spin_unlock_irqrestore(&nc->lock, flags);
  77				nd->link_up = 1;
  78				goto report;
  79			}
  80
  81			spin_unlock_irqrestore(&nc->lock, flags);
  82		}
  83	}
  84
  85report:
  86	nd->handler(nd);
  87}
  88
  89static void ncsi_channel_monitor(struct timer_list *t)
  90{
  91	struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
  92	struct ncsi_package *np = nc->package;
  93	struct ncsi_dev_priv *ndp = np->ndp;
  94	struct ncsi_channel_mode *ncm;
  95	struct ncsi_cmd_arg nca;
  96	bool enabled, chained;
  97	unsigned int monitor_state;
  98	unsigned long flags;
  99	int state, ret;
 100
 101	spin_lock_irqsave(&nc->lock, flags);
 102	state = nc->state;
 103	chained = !list_empty(&nc->link);
 104	enabled = nc->monitor.enabled;
 105	monitor_state = nc->monitor.state;
 106	spin_unlock_irqrestore(&nc->lock, flags);
 107
 108	if (!enabled)
 109		return;		/* expected race disabling timer */
 110	if (WARN_ON_ONCE(chained))
 111		goto bad_state;
 112
 113	if (state != NCSI_CHANNEL_INACTIVE &&
 114	    state != NCSI_CHANNEL_ACTIVE) {
 115bad_state:
 116		netdev_warn(ndp->ndev.dev,
 117			    "Bad NCSI monitor state channel %d 0x%x %s queue\n",
 118			    nc->id, state, chained ? "on" : "off");
 119		spin_lock_irqsave(&nc->lock, flags);
 120		nc->monitor.enabled = false;
 121		spin_unlock_irqrestore(&nc->lock, flags);
 122		return;
 123	}
 124
 125	switch (monitor_state) {
 126	case NCSI_CHANNEL_MONITOR_START:
 127	case NCSI_CHANNEL_MONITOR_RETRY:
 128		nca.ndp = ndp;
 129		nca.package = np->id;
 130		nca.channel = nc->id;
 131		nca.type = NCSI_PKT_CMD_GLS;
 132		nca.req_flags = 0;
 133		ret = ncsi_xmit_cmd(&nca);
 134		if (ret)
 135			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
 136				   ret);
 
 
 
 137		break;
 138	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
 139		break;
 140	default:
 141		netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
 142			   nc->id);
 143		ncsi_report_link(ndp, true);
 144		ndp->flags |= NCSI_DEV_RESHUFFLE;
 
 145
 146		ncm = &nc->modes[NCSI_MODE_LINK];
 147		spin_lock_irqsave(&nc->lock, flags);
 148		nc->monitor.enabled = false;
 149		nc->state = NCSI_CHANNEL_INVISIBLE;
 150		ncm->data[2] &= ~0x1;
 151		spin_unlock_irqrestore(&nc->lock, flags);
 152
 153		spin_lock_irqsave(&ndp->lock, flags);
 154		nc->state = NCSI_CHANNEL_ACTIVE;
 155		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
 156		spin_unlock_irqrestore(&ndp->lock, flags);
 157		ncsi_process_next_channel(ndp);
 158		return;
 159	}
 160
 161	spin_lock_irqsave(&nc->lock, flags);
 162	nc->monitor.state++;
 163	spin_unlock_irqrestore(&nc->lock, flags);
 164	mod_timer(&nc->monitor.timer, jiffies + HZ);
 165}
 166
 167void ncsi_start_channel_monitor(struct ncsi_channel *nc)
 168{
 169	unsigned long flags;
 170
 171	spin_lock_irqsave(&nc->lock, flags);
 172	WARN_ON_ONCE(nc->monitor.enabled);
 173	nc->monitor.enabled = true;
 174	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
 175	spin_unlock_irqrestore(&nc->lock, flags);
 176
 177	mod_timer(&nc->monitor.timer, jiffies + HZ);
 178}
 179
 180void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
 181{
 182	unsigned long flags;
 183
 184	spin_lock_irqsave(&nc->lock, flags);
 185	if (!nc->monitor.enabled) {
 186		spin_unlock_irqrestore(&nc->lock, flags);
 187		return;
 188	}
 189	nc->monitor.enabled = false;
 190	spin_unlock_irqrestore(&nc->lock, flags);
 191
 192	del_timer_sync(&nc->monitor.timer);
 193}
 194
 195struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
 196				       unsigned char id)
 197{
 198	struct ncsi_channel *nc;
 199
 200	NCSI_FOR_EACH_CHANNEL(np, nc) {
 201		if (nc->id == id)
 202			return nc;
 203	}
 204
 205	return NULL;
 206}
 207
 208struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
 209{
 210	struct ncsi_channel *nc, *tmp;
 211	int index;
 212	unsigned long flags;
 213
 214	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
 215	if (!nc)
 216		return NULL;
 217
 218	nc->id = id;
 219	nc->package = np;
 220	nc->state = NCSI_CHANNEL_INACTIVE;
 221	nc->monitor.enabled = false;
 222	timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
 
 223	spin_lock_init(&nc->lock);
 224	INIT_LIST_HEAD(&nc->link);
 225	for (index = 0; index < NCSI_CAP_MAX; index++)
 226		nc->caps[index].index = index;
 227	for (index = 0; index < NCSI_MODE_MAX; index++)
 228		nc->modes[index].index = index;
 229
 230	spin_lock_irqsave(&np->lock, flags);
 231	tmp = ncsi_find_channel(np, id);
 232	if (tmp) {
 233		spin_unlock_irqrestore(&np->lock, flags);
 234		kfree(nc);
 235		return tmp;
 236	}
 237
 238	list_add_tail_rcu(&nc->node, &np->channels);
 239	np->channel_num++;
 240	spin_unlock_irqrestore(&np->lock, flags);
 241
 242	return nc;
 243}
 244
 245static void ncsi_remove_channel(struct ncsi_channel *nc)
 246{
 247	struct ncsi_package *np = nc->package;
 
 248	unsigned long flags;
 
 249
 
 250	spin_lock_irqsave(&nc->lock, flags);
 
 
 
 
 251
 252	/* Release filters */
 253	kfree(nc->mac_filter.addrs);
 254	kfree(nc->vlan_filter.vids);
 255
 256	nc->state = NCSI_CHANNEL_INACTIVE;
 257	spin_unlock_irqrestore(&nc->lock, flags);
 258	ncsi_stop_channel_monitor(nc);
 259
 260	/* Remove and free channel */
 261	spin_lock_irqsave(&np->lock, flags);
 262	list_del_rcu(&nc->node);
 263	np->channel_num--;
 264	spin_unlock_irqrestore(&np->lock, flags);
 265
 266	kfree(nc);
 267}
 268
 269struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
 270				       unsigned char id)
 271{
 272	struct ncsi_package *np;
 273
 274	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 275		if (np->id == id)
 276			return np;
 277	}
 278
 279	return NULL;
 280}
 281
 282struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
 283				      unsigned char id)
 284{
 285	struct ncsi_package *np, *tmp;
 286	unsigned long flags;
 287
 288	np = kzalloc(sizeof(*np), GFP_ATOMIC);
 289	if (!np)
 290		return NULL;
 291
 292	np->id = id;
 293	np->ndp = ndp;
 294	spin_lock_init(&np->lock);
 295	INIT_LIST_HEAD(&np->channels);
 296	np->channel_whitelist = UINT_MAX;
 297
 298	spin_lock_irqsave(&ndp->lock, flags);
 299	tmp = ncsi_find_package(ndp, id);
 300	if (tmp) {
 301		spin_unlock_irqrestore(&ndp->lock, flags);
 302		kfree(np);
 303		return tmp;
 304	}
 305
 306	list_add_tail_rcu(&np->node, &ndp->packages);
 307	ndp->package_num++;
 308	spin_unlock_irqrestore(&ndp->lock, flags);
 309
 310	return np;
 311}
 312
 313void ncsi_remove_package(struct ncsi_package *np)
 314{
 315	struct ncsi_dev_priv *ndp = np->ndp;
 316	struct ncsi_channel *nc, *tmp;
 317	unsigned long flags;
 318
 319	/* Release all child channels */
 320	list_for_each_entry_safe(nc, tmp, &np->channels, node)
 321		ncsi_remove_channel(nc);
 322
 323	/* Remove and free package */
 324	spin_lock_irqsave(&ndp->lock, flags);
 325	list_del_rcu(&np->node);
 326	ndp->package_num--;
 327	spin_unlock_irqrestore(&ndp->lock, flags);
 328
 329	kfree(np);
 330}
 331
 332void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
 333				   unsigned char id,
 334				   struct ncsi_package **np,
 335				   struct ncsi_channel **nc)
 336{
 337	struct ncsi_package *p;
 338	struct ncsi_channel *c;
 339
 340	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
 341	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
 342
 343	if (np)
 344		*np = p;
 345	if (nc)
 346		*nc = c;
 347}
 348
 349/* For two consecutive NCSI commands, the packet IDs shouldn't
 350 * be same. Otherwise, the bogus response might be replied. So
 351 * the available IDs are allocated in round-robin fashion.
 352 */
 353struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
 354					unsigned int req_flags)
 355{
 356	struct ncsi_request *nr = NULL;
 357	int i, limit = ARRAY_SIZE(ndp->requests);
 358	unsigned long flags;
 359
 360	/* Check if there is one available request until the ceiling */
 361	spin_lock_irqsave(&ndp->lock, flags);
 362	for (i = ndp->request_id; i < limit; i++) {
 363		if (ndp->requests[i].used)
 364			continue;
 365
 366		nr = &ndp->requests[i];
 367		nr->used = true;
 368		nr->flags = req_flags;
 369		ndp->request_id = i + 1;
 370		goto found;
 371	}
 372
 373	/* Fail back to check from the starting cursor */
 374	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
 375		if (ndp->requests[i].used)
 376			continue;
 377
 378		nr = &ndp->requests[i];
 379		nr->used = true;
 380		nr->flags = req_flags;
 381		ndp->request_id = i + 1;
 382		goto found;
 383	}
 384
 385found:
 386	spin_unlock_irqrestore(&ndp->lock, flags);
 387	return nr;
 388}
 389
 390void ncsi_free_request(struct ncsi_request *nr)
 391{
 392	struct ncsi_dev_priv *ndp = nr->ndp;
 393	struct sk_buff *cmd, *rsp;
 394	unsigned long flags;
 395	bool driven;
 396
 397	if (nr->enabled) {
 398		nr->enabled = false;
 399		del_timer_sync(&nr->timer);
 400	}
 401
 402	spin_lock_irqsave(&ndp->lock, flags);
 403	cmd = nr->cmd;
 404	rsp = nr->rsp;
 405	nr->cmd = NULL;
 406	nr->rsp = NULL;
 407	nr->used = false;
 408	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
 409	spin_unlock_irqrestore(&ndp->lock, flags);
 410
 411	if (driven && cmd && --ndp->pending_req_num == 0)
 412		schedule_work(&ndp->work);
 413
 414	/* Release command and response */
 415	consume_skb(cmd);
 416	consume_skb(rsp);
 417}
 418
 419struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
 420{
 421	struct ncsi_dev_priv *ndp;
 422
 423	NCSI_FOR_EACH_DEV(ndp) {
 424		if (ndp->ndev.dev == dev)
 425			return &ndp->ndev;
 426	}
 427
 428	return NULL;
 429}
 430
 431static void ncsi_request_timeout(struct timer_list *t)
 432{
 433	struct ncsi_request *nr = from_timer(nr, t, timer);
 434	struct ncsi_dev_priv *ndp = nr->ndp;
 435	struct ncsi_cmd_pkt *cmd;
 436	struct ncsi_package *np;
 437	struct ncsi_channel *nc;
 438	unsigned long flags;
 439
 440	/* If the request already had associated response,
 441	 * let the response handler to release it.
 442	 */
 443	spin_lock_irqsave(&ndp->lock, flags);
 444	nr->enabled = false;
 445	if (nr->rsp || !nr->cmd) {
 446		spin_unlock_irqrestore(&ndp->lock, flags);
 447		return;
 448	}
 449	spin_unlock_irqrestore(&ndp->lock, flags);
 450
 451	if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
 452		if (nr->cmd) {
 453			/* Find the package */
 454			cmd = (struct ncsi_cmd_pkt *)
 455			      skb_network_header(nr->cmd);
 456			ncsi_find_package_and_channel(ndp,
 457						      cmd->cmd.common.channel,
 458						      &np, &nc);
 459			ncsi_send_netlink_timeout(nr, np, nc);
 460		}
 461	}
 462
 463	/* Release the request */
 464	ncsi_free_request(nr);
 465}
 466
 467static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
 468{
 469	struct ncsi_dev *nd = &ndp->ndev;
 470	struct ncsi_package *np;
 471	struct ncsi_channel *nc, *tmp;
 472	struct ncsi_cmd_arg nca;
 473	unsigned long flags;
 474	int ret;
 475
 476	np = ndp->active_package;
 477	nc = ndp->active_channel;
 478	nca.ndp = ndp;
 479	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
 480	switch (nd->state) {
 481	case ncsi_dev_state_suspend:
 482		nd->state = ncsi_dev_state_suspend_select;
 483		fallthrough;
 484	case ncsi_dev_state_suspend_select:
 485		ndp->pending_req_num = 1;
 486
 487		nca.type = NCSI_PKT_CMD_SP;
 488		nca.package = np->id;
 489		nca.channel = NCSI_RESERVED_CHANNEL;
 490		if (ndp->flags & NCSI_DEV_HWA)
 491			nca.bytes[0] = 0;
 492		else
 493			nca.bytes[0] = 1;
 494
 495		/* To retrieve the last link states of channels in current
 496		 * package when current active channel needs fail over to
 497		 * another one. It means we will possibly select another
 498		 * channel as next active one. The link states of channels
 499		 * are most important factor of the selection. So we need
 500		 * accurate link states. Unfortunately, the link states on
 501		 * inactive channels can't be updated with LSC AEN in time.
 502		 */
 503		if (ndp->flags & NCSI_DEV_RESHUFFLE)
 504			nd->state = ncsi_dev_state_suspend_gls;
 505		else
 506			nd->state = ncsi_dev_state_suspend_dcnt;
 507		ret = ncsi_xmit_cmd(&nca);
 508		if (ret)
 509			goto error;
 510
 511		break;
 512	case ncsi_dev_state_suspend_gls:
 513		ndp->pending_req_num = 1;
 514
 515		nca.type = NCSI_PKT_CMD_GLS;
 516		nca.package = np->id;
 517		nca.channel = ndp->channel_probe_id;
 518		ret = ncsi_xmit_cmd(&nca);
 519		if (ret)
 520			goto error;
 521		ndp->channel_probe_id++;
 522
 523		if (ndp->channel_probe_id == ndp->channel_count) {
 524			ndp->channel_probe_id = 0;
 525			nd->state = ncsi_dev_state_suspend_dcnt;
 
 
 
 526		}
 527
 528		break;
 529	case ncsi_dev_state_suspend_dcnt:
 530		ndp->pending_req_num = 1;
 531
 532		nca.type = NCSI_PKT_CMD_DCNT;
 533		nca.package = np->id;
 534		nca.channel = nc->id;
 535
 536		nd->state = ncsi_dev_state_suspend_dc;
 537		ret = ncsi_xmit_cmd(&nca);
 538		if (ret)
 539			goto error;
 540
 541		break;
 542	case ncsi_dev_state_suspend_dc:
 543		ndp->pending_req_num = 1;
 544
 545		nca.type = NCSI_PKT_CMD_DC;
 546		nca.package = np->id;
 547		nca.channel = nc->id;
 548		nca.bytes[0] = 1;
 549
 550		nd->state = ncsi_dev_state_suspend_deselect;
 551		ret = ncsi_xmit_cmd(&nca);
 552		if (ret)
 553			goto error;
 554
 555		NCSI_FOR_EACH_CHANNEL(np, tmp) {
 556			/* If there is another channel active on this package
 557			 * do not deselect the package.
 558			 */
 559			if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
 560				nd->state = ncsi_dev_state_suspend_done;
 561				break;
 562			}
 563		}
 564		break;
 565	case ncsi_dev_state_suspend_deselect:
 566		ndp->pending_req_num = 1;
 567
 568		nca.type = NCSI_PKT_CMD_DP;
 569		nca.package = np->id;
 570		nca.channel = NCSI_RESERVED_CHANNEL;
 571
 572		nd->state = ncsi_dev_state_suspend_done;
 573		ret = ncsi_xmit_cmd(&nca);
 574		if (ret)
 575			goto error;
 576
 577		break;
 578	case ncsi_dev_state_suspend_done:
 579		spin_lock_irqsave(&nc->lock, flags);
 580		nc->state = NCSI_CHANNEL_INACTIVE;
 581		spin_unlock_irqrestore(&nc->lock, flags);
 582		if (ndp->flags & NCSI_DEV_RESET)
 583			ncsi_reset_dev(nd);
 584		else
 585			ncsi_process_next_channel(ndp);
 586		break;
 587	default:
 588		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
 589			    nd->state);
 590	}
 591
 592	return;
 593error:
 594	nd->state = ncsi_dev_state_functional;
 595}
 596
 597/* Check the VLAN filter bitmap for a set filter, and construct a
 598 * "Set VLAN Filter - Disable" packet if found.
 599 */
 600static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
 601			 struct ncsi_cmd_arg *nca)
 602{
 603	struct ncsi_channel_vlan_filter *ncf;
 604	unsigned long flags;
 605	void *bitmap;
 606	int index;
 607	u16 vid;
 608
 609	ncf = &nc->vlan_filter;
 610	bitmap = &ncf->bitmap;
 611
 612	spin_lock_irqsave(&nc->lock, flags);
 613	index = find_first_bit(bitmap, ncf->n_vids);
 614	if (index >= ncf->n_vids) {
 615		spin_unlock_irqrestore(&nc->lock, flags);
 616		return -1;
 617	}
 618	vid = ncf->vids[index];
 619
 620	clear_bit(index, bitmap);
 621	ncf->vids[index] = 0;
 622	spin_unlock_irqrestore(&nc->lock, flags);
 623
 624	nca->type = NCSI_PKT_CMD_SVF;
 625	nca->words[1] = vid;
 626	/* HW filter index starts at 1 */
 627	nca->bytes[6] = index + 1;
 628	nca->bytes[7] = 0x00;
 629	return 0;
 630}
 631
 632/* Find an outstanding VLAN tag and construct a "Set VLAN Filter - Enable"
 633 * packet.
 634 */
 635static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
 636		       struct ncsi_cmd_arg *nca)
 637{
 638	struct ncsi_channel_vlan_filter *ncf;
 639	struct vlan_vid *vlan = NULL;
 640	unsigned long flags;
 641	int i, index;
 642	void *bitmap;
 643	u16 vid;
 644
 645	if (list_empty(&ndp->vlan_vids))
 646		return -1;
 647
 648	ncf = &nc->vlan_filter;
 649	bitmap = &ncf->bitmap;
 650
 651	spin_lock_irqsave(&nc->lock, flags);
 652
 653	rcu_read_lock();
 654	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
 655		vid = vlan->vid;
 656		for (i = 0; i < ncf->n_vids; i++)
 657			if (ncf->vids[i] == vid) {
 658				vid = 0;
 659				break;
 660			}
 661		if (vid)
 662			break;
 663	}
 664	rcu_read_unlock();
 665
 666	if (!vid) {
 667		/* No VLAN ID is not set */
 668		spin_unlock_irqrestore(&nc->lock, flags);
 669		return -1;
 670	}
 671
 672	index = find_first_zero_bit(bitmap, ncf->n_vids);
 673	if (index < 0 || index >= ncf->n_vids) {
 674		netdev_err(ndp->ndev.dev,
 675			   "Channel %u already has all VLAN filters set\n",
 676			   nc->id);
 677		spin_unlock_irqrestore(&nc->lock, flags);
 678		return -1;
 679	}
 680
 681	ncf->vids[index] = vid;
 682	set_bit(index, bitmap);
 683	spin_unlock_irqrestore(&nc->lock, flags);
 684
 685	nca->type = NCSI_PKT_CMD_SVF;
 686	nca->words[1] = vid;
 687	/* HW filter index starts at 1 */
 688	nca->bytes[6] = index + 1;
 689	nca->bytes[7] = 0x01;
 690
 691	return 0;
 692}
 693
 694static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
 695{
 696	unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
 697	int ret = 0;
 698
 699	nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN;
 700
 701	memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN);
 702	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
 703
 704	data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY;
 705
 706	/* PHY Link up attribute */
 707	data[6] = 0x1;
 708
 709	nca->data = data;
 710
 711	ret = ncsi_xmit_cmd(nca);
 712	if (ret)
 713		netdev_err(nca->ndp->ndev.dev,
 714			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 715			   nca->type);
 716	return ret;
 717}
 718
 719/* NCSI OEM Command APIs */
 720static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
 721{
 722	unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
 723	int ret = 0;
 724
 725	nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
 726
 727	memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
 728	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID);
 729	data[5] = NCSI_OEM_BCM_CMD_GMA;
 730
 731	nca->data = data;
 732
 733	ret = ncsi_xmit_cmd(nca);
 734	if (ret)
 735		netdev_err(nca->ndp->ndev.dev,
 736			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 737			   nca->type);
 738	return ret;
 739}
 740
 741static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
 742{
 743	union {
 744		u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
 745		u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
 746	} u;
 747	int ret = 0;
 748
 749	nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
 750
 751	memset(&u, 0, sizeof(u));
 752	u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
 753	u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
 754	u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
 755
 756	nca->data = u.data_u8;
 757
 758	ret = ncsi_xmit_cmd(nca);
 759	if (ret)
 760		netdev_err(nca->ndp->ndev.dev,
 761			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 762			   nca->type);
 763	return ret;
 764}
 765
 766static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
 767{
 768	union {
 769		u8 data_u8[NCSI_OEM_MLX_CMD_SMAF_LEN];
 770		u32 data_u32[NCSI_OEM_MLX_CMD_SMAF_LEN / sizeof(u32)];
 771	} u;
 772	int ret = 0;
 773
 774	memset(&u, 0, sizeof(u));
 775	u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
 776	u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
 777	u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
 778	memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
 779	       nca->ndp->ndev.dev->dev_addr,	ETH_ALEN);
 780	u.data_u8[MLX_SMAF_MED_SUPPORT_OFFSET] =
 781		(MLX_MC_RBT_AVL | MLX_MC_RBT_SUPPORT);
 782
 783	nca->payload = NCSI_OEM_MLX_CMD_SMAF_LEN;
 784	nca->data = u.data_u8;
 785
 786	ret = ncsi_xmit_cmd(nca);
 787	if (ret)
 788		netdev_err(nca->ndp->ndev.dev,
 789			   "NCSI: Failed to transmit cmd 0x%x during probe\n",
 790			   nca->type);
 791	return ret;
 792}
 793
 794static int ncsi_oem_gma_handler_intel(struct ncsi_cmd_arg *nca)
 795{
 796	unsigned char data[NCSI_OEM_INTEL_CMD_GMA_LEN];
 797	int ret = 0;
 798
 799	nca->payload = NCSI_OEM_INTEL_CMD_GMA_LEN;
 800
 801	memset(data, 0, NCSI_OEM_INTEL_CMD_GMA_LEN);
 802	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
 803	data[4] = NCSI_OEM_INTEL_CMD_GMA;
 804
 805	nca->data = data;
 806
 807	ret = ncsi_xmit_cmd(nca);
 808	if (ret)
 809		netdev_err(nca->ndp->ndev.dev,
 810			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
 811			   nca->type);
 812
 813	return ret;
 814}
 815
 816/* OEM Command handlers initialization */
 817static struct ncsi_oem_gma_handler {
 818	unsigned int	mfr_id;
 819	int		(*handler)(struct ncsi_cmd_arg *nca);
 820} ncsi_oem_gma_handlers[] = {
 821	{ NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
 822	{ NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx },
 823	{ NCSI_OEM_MFR_INTEL_ID, ncsi_oem_gma_handler_intel }
 824};
 825
 826static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
 827{
 828	struct ncsi_oem_gma_handler *nch = NULL;
 829	int i;
 830
 831	/* This function should only be called once, return if flag set */
 832	if (nca->ndp->gma_flag == 1)
 833		return -1;
 834
 835	/* Find gma handler for given manufacturer id */
 836	for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
 837		if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
 838			if (ncsi_oem_gma_handlers[i].handler)
 839				nch = &ncsi_oem_gma_handlers[i];
 840			break;
 841			}
 842	}
 843
 844	if (!nch) {
 845		netdev_err(nca->ndp->ndev.dev,
 846			   "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
 847			   mf_id);
 848		return -1;
 849	}
 850
 851	/* Get Mac address from NCSI device */
 852	return nch->handler(nca);
 853}
 854
 855/* Determine if a given channel from the channel_queue should be used for Tx */
 856static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
 857			       struct ncsi_channel *nc)
 858{
 859	struct ncsi_channel_mode *ncm;
 860	struct ncsi_channel *channel;
 861	struct ncsi_package *np;
 862
 863	/* Check if any other channel has Tx enabled; a channel may have already
 864	 * been configured and removed from the channel queue.
 865	 */
 866	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 867		if (!ndp->multi_package && np != nc->package)
 868			continue;
 869		NCSI_FOR_EACH_CHANNEL(np, channel) {
 870			ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
 871			if (ncm->enable)
 872				return false;
 873		}
 874	}
 875
 876	/* This channel is the preferred channel and has link */
 877	list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
 878		np = channel->package;
 879		if (np->preferred_channel &&
 880		    ncsi_channel_has_link(np->preferred_channel)) {
 881			return np->preferred_channel == nc;
 882		}
 883	}
 884
 885	/* This channel has link */
 886	if (ncsi_channel_has_link(nc))
 887		return true;
 888
 889	list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
 890		if (ncsi_channel_has_link(channel))
 891			return false;
 892
 893	/* No other channel has link; default to this one */
 894	return true;
 895}
 896
 897/* Change the active Tx channel in a multi-channel setup */
 898int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
 899			   struct ncsi_package *package,
 900			   struct ncsi_channel *disable,
 901			   struct ncsi_channel *enable)
 902{
 903	struct ncsi_cmd_arg nca;
 904	struct ncsi_channel *nc;
 905	struct ncsi_package *np;
 906	int ret = 0;
 907
 908	if (!package->multi_channel && !ndp->multi_package)
 909		netdev_warn(ndp->ndev.dev,
 910			    "NCSI: Trying to update Tx channel in single-channel mode\n");
 911	nca.ndp = ndp;
 912	nca.req_flags = 0;
 913
 914	/* Find current channel with Tx enabled */
 915	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 916		if (disable)
 917			break;
 918		if (!ndp->multi_package && np != package)
 919			continue;
 920
 921		NCSI_FOR_EACH_CHANNEL(np, nc)
 922			if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
 923				disable = nc;
 924				break;
 925			}
 926	}
 927
 928	/* Find a suitable channel for Tx */
 929	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 930		if (enable)
 931			break;
 932		if (!ndp->multi_package && np != package)
 933			continue;
 934		if (!(ndp->package_whitelist & (0x1 << np->id)))
 935			continue;
 936
 937		if (np->preferred_channel &&
 938		    ncsi_channel_has_link(np->preferred_channel)) {
 939			enable = np->preferred_channel;
 940			break;
 941		}
 942
 943		NCSI_FOR_EACH_CHANNEL(np, nc) {
 944			if (!(np->channel_whitelist & 0x1 << nc->id))
 945				continue;
 946			if (nc->state != NCSI_CHANNEL_ACTIVE)
 947				continue;
 948			if (ncsi_channel_has_link(nc)) {
 949				enable = nc;
 950				break;
 951			}
 952		}
 953	}
 954
 955	if (disable == enable)
 956		return -1;
 957
 958	if (!enable)
 959		return -1;
 960
 961	if (disable) {
 962		nca.channel = disable->id;
 963		nca.package = disable->package->id;
 964		nca.type = NCSI_PKT_CMD_DCNT;
 965		ret = ncsi_xmit_cmd(&nca);
 966		if (ret)
 967			netdev_err(ndp->ndev.dev,
 968				   "Error %d sending DCNT\n",
 969				   ret);
 970	}
 971
 972	netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
 973
 974	nca.channel = enable->id;
 975	nca.package = enable->package->id;
 976	nca.type = NCSI_PKT_CMD_ECNT;
 977	ret = ncsi_xmit_cmd(&nca);
 978	if (ret)
 979		netdev_err(ndp->ndev.dev,
 980			   "Error %d sending ECNT\n",
 981			   ret);
 982
 983	return ret;
 984}
 985
 986static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
 987{
 
 
 988	struct ncsi_package *np = ndp->active_package;
 989	struct ncsi_channel *nc = ndp->active_channel;
 990	struct ncsi_channel *hot_nc = NULL;
 991	struct ncsi_dev *nd = &ndp->ndev;
 992	struct net_device *dev = nd->dev;
 993	struct ncsi_cmd_arg nca;
 994	unsigned char index;
 995	unsigned long flags;
 996	int ret;
 997
 998	nca.ndp = ndp;
 999	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1000	switch (nd->state) {
1001	case ncsi_dev_state_config:
1002	case ncsi_dev_state_config_sp:
1003		ndp->pending_req_num = 1;
1004
1005		/* Select the specific package */
1006		nca.type = NCSI_PKT_CMD_SP;
1007		if (ndp->flags & NCSI_DEV_HWA)
1008			nca.bytes[0] = 0;
1009		else
1010			nca.bytes[0] = 1;
1011		nca.package = np->id;
1012		nca.channel = NCSI_RESERVED_CHANNEL;
1013		ret = ncsi_xmit_cmd(&nca);
1014		if (ret) {
1015			netdev_err(ndp->ndev.dev,
1016				   "NCSI: Failed to transmit CMD_SP\n");
1017			goto error;
1018		}
1019
1020		nd->state = ncsi_dev_state_config_cis;
1021		break;
1022	case ncsi_dev_state_config_cis:
1023		ndp->pending_req_num = 1;
1024
1025		/* Clear initial state */
1026		nca.type = NCSI_PKT_CMD_CIS;
1027		nca.package = np->id;
1028		nca.channel = nc->id;
1029		ret = ncsi_xmit_cmd(&nca);
1030		if (ret) {
1031			netdev_err(ndp->ndev.dev,
1032				   "NCSI: Failed to transmit CMD_CIS\n");
1033			goto error;
1034		}
1035
1036		nd->state = IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
1037			  ? ncsi_dev_state_config_oem_gma
1038			  : ncsi_dev_state_config_clear_vids;
1039		break;
1040	case ncsi_dev_state_config_oem_gma:
1041		nd->state = ncsi_dev_state_config_apply_mac;
1042
1043		nca.package = np->id;
1044		nca.channel = nc->id;
1045		ndp->pending_req_num = 1;
1046		if (nc->version.major >= 1 && nc->version.minor >= 2) {
1047			nca.type = NCSI_PKT_CMD_GMCMA;
1048			ret = ncsi_xmit_cmd(&nca);
1049		} else {
1050			nca.type = NCSI_PKT_CMD_OEM;
1051			ret = ncsi_gma_handler(&nca, nc->version.mf_id);
1052		}
1053		if (ret < 0) {
1054			nd->state = ncsi_dev_state_config_clear_vids;
1055			schedule_work(&ndp->work);
1056		}
1057
1058		break;
1059	case ncsi_dev_state_config_apply_mac:
1060		rtnl_lock();
1061		ret = dev_set_mac_address(dev, &ndp->pending_mac, NULL);
1062		rtnl_unlock();
1063		if (ret < 0)
1064			netdev_warn(dev, "NCSI: 'Writing MAC address to device failed\n");
1065
1066		nd->state = ncsi_dev_state_config_clear_vids;
1067
1068		fallthrough;
1069	case ncsi_dev_state_config_clear_vids:
1070	case ncsi_dev_state_config_svf:
1071	case ncsi_dev_state_config_ev:
1072	case ncsi_dev_state_config_sma:
1073	case ncsi_dev_state_config_ebf:
1074	case ncsi_dev_state_config_dgmf:
 
 
1075	case ncsi_dev_state_config_ecnt:
1076	case ncsi_dev_state_config_ec:
1077	case ncsi_dev_state_config_ae:
1078	case ncsi_dev_state_config_gls:
1079		ndp->pending_req_num = 1;
1080
1081		nca.package = np->id;
1082		nca.channel = nc->id;
1083
1084		/* Clear any active filters on the channel before setting */
1085		if (nd->state == ncsi_dev_state_config_clear_vids) {
1086			ret = clear_one_vid(ndp, nc, &nca);
1087			if (ret) {
1088				nd->state = ncsi_dev_state_config_svf;
1089				schedule_work(&ndp->work);
1090				break;
1091			}
1092			/* Repeat */
1093			nd->state = ncsi_dev_state_config_clear_vids;
1094		/* Add known VLAN tags to the filter */
1095		} else if (nd->state == ncsi_dev_state_config_svf) {
1096			ret = set_one_vid(ndp, nc, &nca);
1097			if (ret) {
1098				nd->state = ncsi_dev_state_config_ev;
1099				schedule_work(&ndp->work);
1100				break;
1101			}
1102			/* Repeat */
1103			nd->state = ncsi_dev_state_config_svf;
1104		/* Enable/Disable the VLAN filter */
1105		} else if (nd->state == ncsi_dev_state_config_ev) {
1106			if (list_empty(&ndp->vlan_vids)) {
1107				nca.type = NCSI_PKT_CMD_DV;
1108			} else {
1109				nca.type = NCSI_PKT_CMD_EV;
1110				nca.bytes[3] = NCSI_CAP_VLAN_NO;
1111			}
1112			nd->state = ncsi_dev_state_config_sma;
1113		} else if (nd->state == ncsi_dev_state_config_sma) {
1114		/* Use first entry in unicast filter table. Note that
1115		 * the MAC filter table starts from entry 1 instead of
1116		 * 0.
1117		 */
 
1118			nca.type = NCSI_PKT_CMD_SMA;
1119			for (index = 0; index < 6; index++)
1120				nca.bytes[index] = dev->dev_addr[index];
1121			nca.bytes[6] = 0x1;
1122			nca.bytes[7] = 0x1;
1123			nd->state = ncsi_dev_state_config_ebf;
1124		} else if (nd->state == ncsi_dev_state_config_ebf) {
1125			nca.type = NCSI_PKT_CMD_EBF;
1126			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
1127			/* if multicast global filtering is supported then
1128			 * disable it so that all multicast packet will be
1129			 * forwarded to management controller
1130			 */
1131			if (nc->caps[NCSI_CAP_GENERIC].cap &
1132			    NCSI_CAP_GENERIC_MC)
1133				nd->state = ncsi_dev_state_config_dgmf;
1134			else if (ncsi_channel_is_tx(ndp, nc))
1135				nd->state = ncsi_dev_state_config_ecnt;
1136			else
1137				nd->state = ncsi_dev_state_config_ec;
1138		} else if (nd->state == ncsi_dev_state_config_dgmf) {
1139			nca.type = NCSI_PKT_CMD_DGMF;
1140			if (ncsi_channel_is_tx(ndp, nc))
1141				nd->state = ncsi_dev_state_config_ecnt;
1142			else
1143				nd->state = ncsi_dev_state_config_ec;
 
 
 
1144		} else if (nd->state == ncsi_dev_state_config_ecnt) {
1145			if (np->preferred_channel &&
1146			    nc != np->preferred_channel)
1147				netdev_info(ndp->ndev.dev,
1148					    "NCSI: Tx failed over to channel %u\n",
1149					    nc->id);
1150			nca.type = NCSI_PKT_CMD_ECNT;
1151			nd->state = ncsi_dev_state_config_ec;
1152		} else if (nd->state == ncsi_dev_state_config_ec) {
1153			/* Enable AEN if it's supported */
1154			nca.type = NCSI_PKT_CMD_EC;
1155			nd->state = ncsi_dev_state_config_ae;
1156			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
1157				nd->state = ncsi_dev_state_config_gls;
1158		} else if (nd->state == ncsi_dev_state_config_ae) {
1159			nca.type = NCSI_PKT_CMD_AE;
1160			nca.bytes[0] = 0;
1161			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
1162			nd->state = ncsi_dev_state_config_gls;
1163		} else if (nd->state == ncsi_dev_state_config_gls) {
1164			nca.type = NCSI_PKT_CMD_GLS;
1165			nd->state = ncsi_dev_state_config_done;
1166		}
1167
1168		ret = ncsi_xmit_cmd(&nca);
1169		if (ret) {
1170			netdev_err(ndp->ndev.dev,
1171				   "NCSI: Failed to transmit CMD %x\n",
1172				   nca.type);
1173			goto error;
1174		}
1175		break;
1176	case ncsi_dev_state_config_done:
1177		netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
1178			   nc->id);
1179		spin_lock_irqsave(&nc->lock, flags);
1180		nc->state = NCSI_CHANNEL_ACTIVE;
1181
1182		if (ndp->flags & NCSI_DEV_RESET) {
1183			/* A reset event happened during config, start it now */
1184			nc->reconfigure_needed = false;
1185			spin_unlock_irqrestore(&nc->lock, flags);
1186			ncsi_reset_dev(nd);
1187			break;
1188		}
1189
1190		if (nc->reconfigure_needed) {
1191			/* This channel's configuration has been updated
1192			 * part-way during the config state - start the
1193			 * channel configuration over
1194			 */
1195			nc->reconfigure_needed = false;
1196			nc->state = NCSI_CHANNEL_INACTIVE;
1197			spin_unlock_irqrestore(&nc->lock, flags);
1198
1199			spin_lock_irqsave(&ndp->lock, flags);
1200			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1201			spin_unlock_irqrestore(&ndp->lock, flags);
1202
1203			netdev_dbg(dev, "Dirty NCSI channel state reset\n");
1204			ncsi_process_next_channel(ndp);
1205			break;
1206		}
1207
1208		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
1209			hot_nc = nc;
 
1210		} else {
1211			hot_nc = NULL;
1212			netdev_dbg(ndp->ndev.dev,
1213				   "NCSI: channel %u link down after config\n",
1214				   nc->id);
1215		}
1216		spin_unlock_irqrestore(&nc->lock, flags);
1217
1218		/* Update the hot channel */
1219		spin_lock_irqsave(&ndp->lock, flags);
1220		ndp->hot_channel = hot_nc;
1221		spin_unlock_irqrestore(&ndp->lock, flags);
1222
1223		ncsi_start_channel_monitor(nc);
1224		ncsi_process_next_channel(ndp);
1225		break;
1226	default:
1227		netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
1228			     nd->state);
1229	}
1230
1231	return;
1232
1233error:
1234	ncsi_report_link(ndp, true);
1235}
1236
1237static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
1238{
 
1239	struct ncsi_channel *nc, *found, *hot_nc;
1240	struct ncsi_channel_mode *ncm;
1241	unsigned long flags, cflags;
1242	struct ncsi_package *np;
1243	bool with_link;
1244
1245	spin_lock_irqsave(&ndp->lock, flags);
1246	hot_nc = ndp->hot_channel;
1247	spin_unlock_irqrestore(&ndp->lock, flags);
1248
1249	/* By default the search is done once an inactive channel with up
1250	 * link is found, unless a preferred channel is set.
1251	 * If multi_package or multi_channel are configured all channels in the
1252	 * whitelist are added to the channel queue.
1253	 */
1254	found = NULL;
1255	with_link = false;
1256	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1257		if (!(ndp->package_whitelist & (0x1 << np->id)))
1258			continue;
1259		NCSI_FOR_EACH_CHANNEL(np, nc) {
1260			if (!(np->channel_whitelist & (0x1 << nc->id)))
1261				continue;
1262
1263			spin_lock_irqsave(&nc->lock, cflags);
1264
1265			if (!list_empty(&nc->link) ||
1266			    nc->state != NCSI_CHANNEL_INACTIVE) {
1267				spin_unlock_irqrestore(&nc->lock, cflags);
1268				continue;
1269			}
1270
1271			if (!found)
1272				found = nc;
1273
1274			if (nc == hot_nc)
1275				found = nc;
1276
1277			ncm = &nc->modes[NCSI_MODE_LINK];
1278			if (ncm->data[2] & 0x1) {
 
1279				found = nc;
1280				with_link = true;
1281			}
1282
1283			/* If multi_channel is enabled configure all valid
1284			 * channels whether or not they currently have link
1285			 * so they will have AENs enabled.
1286			 */
1287			if (with_link || np->multi_channel) {
1288				spin_lock_irqsave(&ndp->lock, flags);
1289				list_add_tail_rcu(&nc->link,
1290						  &ndp->channel_queue);
1291				spin_unlock_irqrestore(&ndp->lock, flags);
1292
1293				netdev_dbg(ndp->ndev.dev,
1294					   "NCSI: Channel %u added to queue (link %s)\n",
1295					   nc->id,
1296					   ncm->data[2] & 0x1 ? "up" : "down");
1297			}
1298
1299			spin_unlock_irqrestore(&nc->lock, cflags);
1300
1301			if (with_link && !np->multi_channel)
1302				break;
1303		}
1304		if (with_link && !ndp->multi_package)
1305			break;
1306	}
1307
1308	if (list_empty(&ndp->channel_queue) && found) {
1309		netdev_info(ndp->ndev.dev,
1310			    "NCSI: No channel with link found, configuring channel %u\n",
1311			    found->id);
1312		spin_lock_irqsave(&ndp->lock, flags);
1313		list_add_tail_rcu(&found->link, &ndp->channel_queue);
1314		spin_unlock_irqrestore(&ndp->lock, flags);
1315	} else if (!found) {
1316		netdev_warn(ndp->ndev.dev,
1317			    "NCSI: No channel found to configure!\n");
1318		ncsi_report_link(ndp, true);
1319		return -ENODEV;
1320	}
1321
 
 
 
 
 
1322	return ncsi_process_next_channel(ndp);
1323}
1324
1325static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1326{
1327	struct ncsi_package *np;
1328	struct ncsi_channel *nc;
1329	unsigned int cap;
1330	bool has_channel = false;
1331
1332	/* The hardware arbitration is disabled if any one channel
1333	 * doesn't support explicitly.
1334	 */
1335	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1336		NCSI_FOR_EACH_CHANNEL(np, nc) {
1337			has_channel = true;
1338
1339			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1340			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1341			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1342			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1343				ndp->flags &= ~NCSI_DEV_HWA;
1344				return false;
1345			}
1346		}
1347	}
1348
1349	if (has_channel) {
1350		ndp->flags |= NCSI_DEV_HWA;
1351		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1352	}
 
1353
1354	ndp->flags &= ~NCSI_DEV_HWA;
1355	return false;
 
 
 
 
 
1356}
1357
1358static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1359{
1360	struct ncsi_dev *nd = &ndp->ndev;
1361	struct ncsi_package *np;
 
1362	struct ncsi_cmd_arg nca;
1363	unsigned char index;
1364	int ret;
1365
1366	nca.ndp = ndp;
1367	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1368	switch (nd->state) {
1369	case ncsi_dev_state_probe:
1370		nd->state = ncsi_dev_state_probe_deselect;
1371		fallthrough;
1372	case ncsi_dev_state_probe_deselect:
1373		ndp->pending_req_num = 8;
1374
1375		/* Deselect all possible packages */
1376		nca.type = NCSI_PKT_CMD_DP;
1377		nca.channel = NCSI_RESERVED_CHANNEL;
1378		for (index = 0; index < 8; index++) {
1379			nca.package = index;
1380			ret = ncsi_xmit_cmd(&nca);
1381			if (ret)
1382				goto error;
1383		}
1384
1385		nd->state = ncsi_dev_state_probe_package;
1386		break;
1387	case ncsi_dev_state_probe_package:
1388		if (ndp->package_probe_id >= 8) {
1389			/* Last package probed, finishing */
1390			ndp->flags |= NCSI_DEV_PROBED;
1391			break;
1392		}
1393
1394		ndp->pending_req_num = 1;
1395
 
1396		nca.type = NCSI_PKT_CMD_SP;
1397		nca.bytes[0] = 1;
1398		nca.package = ndp->package_probe_id;
1399		nca.channel = NCSI_RESERVED_CHANNEL;
1400		ret = ncsi_xmit_cmd(&nca);
1401		if (ret)
1402			goto error;
 
 
 
 
 
 
 
 
 
 
 
 
 
1403		nd->state = ncsi_dev_state_probe_channel;
1404		break;
1405	case ncsi_dev_state_probe_channel:
1406		ndp->active_package = ncsi_find_package(ndp,
1407							ndp->package_probe_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1408		if (!ndp->active_package) {
1409			/* No response */
1410			nd->state = ncsi_dev_state_probe_dp;
1411			schedule_work(&ndp->work);
1412			break;
 
 
1413		}
1414		nd->state = ncsi_dev_state_probe_cis;
1415		if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) &&
1416		    ndp->mlx_multi_host)
1417			nd->state = ncsi_dev_state_probe_mlx_gma;
1418
1419		schedule_work(&ndp->work);
1420		break;
1421	case ncsi_dev_state_probe_mlx_gma:
1422		ndp->pending_req_num = 1;
1423
1424		nca.type = NCSI_PKT_CMD_OEM;
1425		nca.package = ndp->active_package->id;
1426		nca.channel = 0;
1427		ret = ncsi_oem_gma_handler_mlx(&nca);
1428		if (ret)
1429			goto error;
1430
1431		nd->state = ncsi_dev_state_probe_mlx_smaf;
1432		break;
1433	case ncsi_dev_state_probe_mlx_smaf:
1434		ndp->pending_req_num = 1;
1435
1436		nca.type = NCSI_PKT_CMD_OEM;
1437		nca.package = ndp->active_package->id;
1438		nca.channel = 0;
1439		ret = ncsi_oem_smaf_mlx(&nca);
1440		if (ret)
1441			goto error;
1442
1443		nd->state = ncsi_dev_state_probe_cis;
1444		break;
1445	case ncsi_dev_state_probe_keep_phy:
1446		ndp->pending_req_num = 1;
1447
1448		nca.type = NCSI_PKT_CMD_OEM;
 
1449		nca.package = ndp->active_package->id;
1450		nca.channel = 0;
1451		ret = ncsi_oem_keep_phy_intel(&nca);
1452		if (ret)
1453			goto error;
 
 
1454
1455		nd->state = ncsi_dev_state_probe_gvi;
1456		break;
1457	case ncsi_dev_state_probe_cis:
1458	case ncsi_dev_state_probe_gvi:
1459	case ncsi_dev_state_probe_gc:
1460	case ncsi_dev_state_probe_gls:
1461		np = ndp->active_package;
1462		ndp->pending_req_num = 1;
1463
1464		/* Clear initial state Retrieve version, capability or link status */
1465		if (nd->state == ncsi_dev_state_probe_cis)
1466			nca.type = NCSI_PKT_CMD_CIS;
1467		else if (nd->state == ncsi_dev_state_probe_gvi)
1468			nca.type = NCSI_PKT_CMD_GVI;
1469		else if (nd->state == ncsi_dev_state_probe_gc)
1470			nca.type = NCSI_PKT_CMD_GC;
1471		else
1472			nca.type = NCSI_PKT_CMD_GLS;
1473
1474		nca.package = np->id;
1475		nca.channel = ndp->channel_probe_id;
1476
1477		ret = ncsi_xmit_cmd(&nca);
1478		if (ret)
1479			goto error;
 
1480
1481		if (nd->state == ncsi_dev_state_probe_cis) {
1482			nd->state = ncsi_dev_state_probe_gvi;
1483			if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
1484				nd->state = ncsi_dev_state_probe_keep_phy;
1485		} else if (nd->state == ncsi_dev_state_probe_gvi) {
1486			nd->state = ncsi_dev_state_probe_gc;
1487		} else if (nd->state == ncsi_dev_state_probe_gc) {
1488			nd->state = ncsi_dev_state_probe_gls;
1489		} else {
1490			nd->state = ncsi_dev_state_probe_cis;
1491			ndp->channel_probe_id++;
1492		}
1493
1494		if (ndp->channel_probe_id == ndp->channel_count) {
1495			ndp->channel_probe_id = 0;
1496			nd->state = ncsi_dev_state_probe_dp;
1497		}
1498		break;
1499	case ncsi_dev_state_probe_dp:
1500		ndp->pending_req_num = 1;
1501
1502		/* Deselect the current package */
1503		nca.type = NCSI_PKT_CMD_DP;
1504		nca.package = ndp->package_probe_id;
1505		nca.channel = NCSI_RESERVED_CHANNEL;
1506		ret = ncsi_xmit_cmd(&nca);
1507		if (ret)
1508			goto error;
1509
1510		/* Probe next package after receiving response */
1511		ndp->package_probe_id++;
1512		nd->state = ncsi_dev_state_probe_package;
1513		ndp->active_package = NULL;
1514		break;
1515	default:
1516		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1517			    nd->state);
1518	}
1519
1520	if (ndp->flags & NCSI_DEV_PROBED) {
1521		/* Check if all packages have HWA support */
1522		ncsi_check_hwa(ndp);
1523		ncsi_choose_active_channel(ndp);
1524	}
1525
1526	return;
1527error:
1528	netdev_err(ndp->ndev.dev,
1529		   "NCSI: Failed to transmit cmd 0x%x during probe\n",
1530		   nca.type);
1531	ncsi_report_link(ndp, true);
1532}
1533
1534static void ncsi_dev_work(struct work_struct *work)
1535{
1536	struct ncsi_dev_priv *ndp = container_of(work,
1537			struct ncsi_dev_priv, work);
1538	struct ncsi_dev *nd = &ndp->ndev;
1539
1540	switch (nd->state & ncsi_dev_state_major) {
1541	case ncsi_dev_state_probe:
1542		ncsi_probe_channel(ndp);
1543		break;
1544	case ncsi_dev_state_suspend:
1545		ncsi_suspend_channel(ndp);
1546		break;
1547	case ncsi_dev_state_config:
1548		ncsi_configure_channel(ndp);
1549		break;
1550	default:
1551		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1552			    nd->state);
1553	}
1554}
1555
1556int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1557{
1558	struct ncsi_channel *nc;
1559	int old_state;
1560	unsigned long flags;
1561
1562	spin_lock_irqsave(&ndp->lock, flags);
1563	nc = list_first_or_null_rcu(&ndp->channel_queue,
1564				    struct ncsi_channel, link);
1565	if (!nc) {
1566		spin_unlock_irqrestore(&ndp->lock, flags);
1567		goto out;
1568	}
1569
1570	list_del_init(&nc->link);
1571	spin_unlock_irqrestore(&ndp->lock, flags);
1572
1573	spin_lock_irqsave(&nc->lock, flags);
1574	old_state = nc->state;
1575	nc->state = NCSI_CHANNEL_INVISIBLE;
1576	spin_unlock_irqrestore(&nc->lock, flags);
1577
1578	ndp->active_channel = nc;
1579	ndp->active_package = nc->package;
1580
1581	switch (old_state) {
1582	case NCSI_CHANNEL_INACTIVE:
1583		ndp->ndev.state = ncsi_dev_state_config;
1584		netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1585	                   nc->id);
1586		ncsi_configure_channel(ndp);
1587		break;
1588	case NCSI_CHANNEL_ACTIVE:
1589		ndp->ndev.state = ncsi_dev_state_suspend;
1590		netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1591			   nc->id);
1592		ncsi_suspend_channel(ndp);
1593		break;
1594	default:
1595		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1596			   old_state, nc->package->id, nc->id);
1597		ncsi_report_link(ndp, false);
1598		return -EINVAL;
1599	}
1600
1601	return 0;
1602
1603out:
1604	ndp->active_channel = NULL;
1605	ndp->active_package = NULL;
1606	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1607		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1608		return ncsi_choose_active_channel(ndp);
1609	}
1610
1611	ncsi_report_link(ndp, false);
1612	return -ENODEV;
1613}
1614
1615static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1616{
1617	struct ncsi_dev *nd = &ndp->ndev;
1618	struct ncsi_channel *nc;
 
 
 
 
1619	struct ncsi_package *np;
1620	unsigned long flags;
1621	unsigned int n = 0;
1622
1623	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1624		NCSI_FOR_EACH_CHANNEL(np, nc) {
1625			spin_lock_irqsave(&nc->lock, flags);
1626
1627			/* Channels may be busy, mark dirty instead of
1628			 * kicking if;
1629			 * a) not ACTIVE (configured)
1630			 * b) in the channel_queue (to be configured)
1631			 * c) it's ndev is in the config state
1632			 */
1633			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1634				if ((ndp->ndev.state & 0xff00) ==
1635						ncsi_dev_state_config ||
1636						!list_empty(&nc->link)) {
1637					netdev_dbg(nd->dev,
1638						   "NCSI: channel %p marked dirty\n",
1639						   nc);
1640					nc->reconfigure_needed = true;
1641				}
1642				spin_unlock_irqrestore(&nc->lock, flags);
1643				continue;
1644			}
1645
1646			spin_unlock_irqrestore(&nc->lock, flags);
1647
1648			ncsi_stop_channel_monitor(nc);
1649			spin_lock_irqsave(&nc->lock, flags);
1650			nc->state = NCSI_CHANNEL_INACTIVE;
1651			spin_unlock_irqrestore(&nc->lock, flags);
1652
1653			spin_lock_irqsave(&ndp->lock, flags);
1654			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1655			spin_unlock_irqrestore(&ndp->lock, flags);
1656
1657			netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1658			n++;
1659		}
 
 
 
 
 
 
 
 
 
 
 
 
1660	}
1661
1662	return n;
1663}
1664
1665int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1666{
1667	struct ncsi_dev_priv *ndp;
1668	unsigned int n_vids = 0;
1669	struct vlan_vid *vlan;
1670	struct ncsi_dev *nd;
1671	bool found = false;
1672
1673	if (vid == 0)
1674		return 0;
1675
1676	nd = ncsi_find_dev(dev);
1677	if (!nd) {
1678		netdev_warn(dev, "NCSI: No net_device?\n");
1679		return 0;
1680	}
1681
1682	ndp = TO_NCSI_DEV_PRIV(nd);
 
 
1683
1684	/* Add the VLAN id to our internal list */
1685	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1686		n_vids++;
1687		if (vlan->vid == vid) {
1688			netdev_dbg(dev, "NCSI: vid %u already registered\n",
1689				   vid);
1690			return 0;
1691		}
1692	}
1693	if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1694		netdev_warn(dev,
1695			    "tried to add vlan id %u but NCSI max already registered (%u)\n",
1696			    vid, NCSI_MAX_VLAN_VIDS);
1697		return -ENOSPC;
1698	}
1699
1700	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1701	if (!vlan)
1702		return -ENOMEM;
1703
1704	vlan->proto = proto;
1705	vlan->vid = vid;
1706	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1707
1708	netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1709
1710	found = ncsi_kick_channels(ndp) != 0;
1711
1712	return found ? ncsi_process_next_channel(ndp) : 0;
1713}
1714EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1715
1716int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1717{
1718	struct vlan_vid *vlan, *tmp;
1719	struct ncsi_dev_priv *ndp;
1720	struct ncsi_dev *nd;
1721	bool found = false;
1722
1723	if (vid == 0)
1724		return 0;
1725
1726	nd = ncsi_find_dev(dev);
1727	if (!nd) {
1728		netdev_warn(dev, "NCSI: no net_device?\n");
1729		return 0;
1730	}
1731
1732	ndp = TO_NCSI_DEV_PRIV(nd);
1733
1734	/* Remove the VLAN id from our internal list */
1735	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1736		if (vlan->vid == vid) {
1737			netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1738			list_del_rcu(&vlan->list);
1739			found = true;
1740			kfree(vlan);
1741		}
1742
1743	if (!found) {
1744		netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1745		return -EINVAL;
1746	}
1747
1748	found = ncsi_kick_channels(ndp) != 0;
1749
1750	return found ? ncsi_process_next_channel(ndp) : 0;
1751}
1752EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1753
1754struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1755				   void (*handler)(struct ncsi_dev *ndev))
1756{
1757	struct ncsi_dev_priv *ndp;
1758	struct ncsi_dev *nd;
1759	struct platform_device *pdev;
1760	struct device_node *np;
1761	unsigned long flags;
1762	int i;
1763
1764	/* Check if the device has been registered or not */
1765	nd = ncsi_find_dev(dev);
1766	if (nd)
1767		return nd;
1768
1769	/* Create NCSI device */
1770	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1771	if (!ndp)
1772		return NULL;
1773
1774	nd = &ndp->ndev;
1775	nd->state = ncsi_dev_state_registered;
1776	nd->dev = dev;
1777	nd->handler = handler;
1778	ndp->pending_req_num = 0;
1779	INIT_LIST_HEAD(&ndp->channel_queue);
1780	INIT_LIST_HEAD(&ndp->vlan_vids);
1781	INIT_WORK(&ndp->work, ncsi_dev_work);
1782	ndp->package_whitelist = UINT_MAX;
1783
1784	/* Initialize private NCSI device */
1785	spin_lock_init(&ndp->lock);
1786	INIT_LIST_HEAD(&ndp->packages);
1787	ndp->request_id = NCSI_REQ_START_IDX;
1788	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1789		ndp->requests[i].id = i;
1790		ndp->requests[i].ndp = ndp;
1791		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
 
 
1792	}
1793	ndp->channel_count = NCSI_RESERVED_CHANNEL;
1794
1795	spin_lock_irqsave(&ncsi_dev_lock, flags);
 
 
 
 
 
1796	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1797	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1798
1799	/* Register NCSI packet Rx handler */
1800	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1801	ndp->ptype.func = ncsi_rcv_rsp;
1802	ndp->ptype.dev = dev;
1803	dev_add_pack(&ndp->ptype);
1804
1805	pdev = to_platform_device(dev->dev.parent);
1806	if (pdev) {
1807		np = pdev->dev.of_node;
1808		if (np && (of_property_read_bool(np, "mellanox,multi-host") ||
1809			   of_property_read_bool(np, "mlx,multi-host")))
1810			ndp->mlx_multi_host = true;
1811	}
1812
1813	return nd;
1814}
1815EXPORT_SYMBOL_GPL(ncsi_register_dev);
1816
1817int ncsi_start_dev(struct ncsi_dev *nd)
1818{
1819	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
 
1820
1821	if (nd->state != ncsi_dev_state_registered &&
1822	    nd->state != ncsi_dev_state_functional)
1823		return -ENOTTY;
1824
1825	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1826		ndp->package_probe_id = 0;
1827		ndp->channel_probe_id = 0;
1828		nd->state = ncsi_dev_state_probe;
1829		schedule_work(&ndp->work);
1830		return 0;
1831	}
1832
1833	return ncsi_reset_dev(nd);
 
 
 
 
 
1834}
1835EXPORT_SYMBOL_GPL(ncsi_start_dev);
1836
1837void ncsi_stop_dev(struct ncsi_dev *nd)
1838{
1839	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1840	struct ncsi_package *np;
1841	struct ncsi_channel *nc;
1842	bool chained;
1843	int old_state;
1844	unsigned long flags;
1845
1846	/* Stop the channel monitor on any active channels. Don't reset the
1847	 * channel state so we know which were active when ncsi_start_dev()
1848	 * is next called.
1849	 */
1850	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1851		NCSI_FOR_EACH_CHANNEL(np, nc) {
1852			ncsi_stop_channel_monitor(nc);
1853
1854			spin_lock_irqsave(&nc->lock, flags);
1855			chained = !list_empty(&nc->link);
1856			old_state = nc->state;
 
1857			spin_unlock_irqrestore(&nc->lock, flags);
1858
1859			WARN_ON_ONCE(chained ||
1860				     old_state == NCSI_CHANNEL_INVISIBLE);
1861		}
1862	}
1863
1864	netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1865	ncsi_report_link(ndp, true);
1866}
1867EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1868
1869int ncsi_reset_dev(struct ncsi_dev *nd)
1870{
1871	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1872	struct ncsi_channel *nc, *active, *tmp;
1873	struct ncsi_package *np;
1874	unsigned long flags;
1875
1876	spin_lock_irqsave(&ndp->lock, flags);
1877
1878	if (!(ndp->flags & NCSI_DEV_RESET)) {
1879		/* Haven't been called yet, check states */
1880		switch (nd->state & ncsi_dev_state_major) {
1881		case ncsi_dev_state_registered:
1882		case ncsi_dev_state_probe:
1883			/* Not even probed yet - do nothing */
1884			spin_unlock_irqrestore(&ndp->lock, flags);
1885			return 0;
1886		case ncsi_dev_state_suspend:
1887		case ncsi_dev_state_config:
1888			/* Wait for the channel to finish its suspend/config
1889			 * operation; once it finishes it will check for
1890			 * NCSI_DEV_RESET and reset the state.
1891			 */
1892			ndp->flags |= NCSI_DEV_RESET;
1893			spin_unlock_irqrestore(&ndp->lock, flags);
1894			return 0;
1895		}
1896	} else {
1897		switch (nd->state) {
1898		case ncsi_dev_state_suspend_done:
1899		case ncsi_dev_state_config_done:
1900		case ncsi_dev_state_functional:
1901			/* Ok */
1902			break;
1903		default:
1904			/* Current reset operation happening */
1905			spin_unlock_irqrestore(&ndp->lock, flags);
1906			return 0;
1907		}
1908	}
1909
1910	if (!list_empty(&ndp->channel_queue)) {
1911		/* Clear any channel queue we may have interrupted */
1912		list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
1913			list_del_init(&nc->link);
1914	}
1915	spin_unlock_irqrestore(&ndp->lock, flags);
1916
1917	active = NULL;
1918	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1919		NCSI_FOR_EACH_CHANNEL(np, nc) {
1920			spin_lock_irqsave(&nc->lock, flags);
1921
1922			if (nc->state == NCSI_CHANNEL_ACTIVE) {
1923				active = nc;
1924				nc->state = NCSI_CHANNEL_INVISIBLE;
1925				spin_unlock_irqrestore(&nc->lock, flags);
1926				ncsi_stop_channel_monitor(nc);
1927				break;
1928			}
1929
1930			spin_unlock_irqrestore(&nc->lock, flags);
1931		}
1932		if (active)
1933			break;
1934	}
1935
1936	if (!active) {
1937		/* Done */
1938		spin_lock_irqsave(&ndp->lock, flags);
1939		ndp->flags &= ~NCSI_DEV_RESET;
1940		spin_unlock_irqrestore(&ndp->lock, flags);
1941		return ncsi_choose_active_channel(ndp);
1942	}
1943
1944	spin_lock_irqsave(&ndp->lock, flags);
1945	ndp->flags |= NCSI_DEV_RESET;
1946	ndp->active_channel = active;
1947	ndp->active_package = active->package;
1948	spin_unlock_irqrestore(&ndp->lock, flags);
1949
1950	nd->state = ncsi_dev_state_suspend;
1951	schedule_work(&ndp->work);
1952	return 0;
1953}
1954
1955void ncsi_unregister_dev(struct ncsi_dev *nd)
1956{
1957	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1958	struct ncsi_package *np, *tmp;
1959	unsigned long flags;
1960
1961	dev_remove_pack(&ndp->ptype);
1962
1963	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1964		ncsi_remove_package(np);
1965
1966	spin_lock_irqsave(&ncsi_dev_lock, flags);
1967	list_del_rcu(&ndp->node);
 
 
 
 
1968	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1969
1970	disable_work_sync(&ndp->work);
1971
1972	kfree(ndp);
1973}
1974EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
v4.10.11
 
   1/*
   2 * Copyright Gavin Shan, IBM Corporation 2016.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 */
   9
  10#include <linux/module.h>
  11#include <linux/kernel.h>
  12#include <linux/init.h>
  13#include <linux/netdevice.h>
  14#include <linux/skbuff.h>
  15#include <linux/netlink.h>
 
  16
  17#include <net/ncsi.h>
  18#include <net/net_namespace.h>
  19#include <net/sock.h>
  20#include <net/addrconf.h>
  21#include <net/ipv6.h>
  22#include <net/if_inet6.h>
  23
  24#include "internal.h"
  25#include "ncsi-pkt.h"
 
  26
  27LIST_HEAD(ncsi_dev_list);
  28DEFINE_SPINLOCK(ncsi_dev_lock);
  29
  30static inline int ncsi_filter_size(int table)
  31{
  32	int sizes[] = { 2, 6, 6, 6 };
  33
  34	BUILD_BUG_ON(ARRAY_SIZE(sizes) != NCSI_FILTER_MAX);
  35	if (table < NCSI_FILTER_BASE || table >= NCSI_FILTER_MAX)
  36		return -EINVAL;
  37
  38	return sizes[table];
  39}
  40
  41int ncsi_find_filter(struct ncsi_channel *nc, int table, void *data)
 
  42{
  43	struct ncsi_channel_filter *ncf;
  44	void *bitmap;
  45	int index, size;
  46	unsigned long flags;
  47
  48	ncf = nc->filters[table];
  49	if (!ncf)
  50		return -ENXIO;
  51
  52	size = ncsi_filter_size(table);
  53	if (size < 0)
  54		return size;
  55
  56	spin_lock_irqsave(&nc->lock, flags);
  57	bitmap = (void *)&ncf->bitmap;
  58	index = -1;
  59	while ((index = find_next_bit(bitmap, ncf->total, index + 1))
  60	       < ncf->total) {
  61		if (!memcmp(ncf->data + size * index, data, size)) {
  62			spin_unlock_irqrestore(&nc->lock, flags);
  63			return index;
  64		}
  65	}
  66	spin_unlock_irqrestore(&nc->lock, flags);
  67
  68	return -ENOENT;
  69}
  70
  71int ncsi_add_filter(struct ncsi_channel *nc, int table, void *data)
  72{
  73	struct ncsi_channel_filter *ncf;
  74	int index, size;
  75	void *bitmap;
  76	unsigned long flags;
  77
  78	size = ncsi_filter_size(table);
  79	if (size < 0)
  80		return size;
  81
  82	index = ncsi_find_filter(nc, table, data);
  83	if (index >= 0)
  84		return index;
  85
  86	ncf = nc->filters[table];
  87	if (!ncf)
  88		return -ENODEV;
  89
  90	spin_lock_irqsave(&nc->lock, flags);
  91	bitmap = (void *)&ncf->bitmap;
  92	do {
  93		index = find_next_zero_bit(bitmap, ncf->total, 0);
  94		if (index >= ncf->total) {
  95			spin_unlock_irqrestore(&nc->lock, flags);
  96			return -ENOSPC;
  97		}
  98	} while (test_and_set_bit(index, bitmap));
  99
 100	memcpy(ncf->data + size * index, data, size);
 101	spin_unlock_irqrestore(&nc->lock, flags);
 102
 103	return index;
 104}
 105
 106int ncsi_remove_filter(struct ncsi_channel *nc, int table, int index)
 107{
 108	struct ncsi_channel_filter *ncf;
 109	int size;
 110	void *bitmap;
 111	unsigned long flags;
 112
 113	size = ncsi_filter_size(table);
 114	if (size < 0)
 115		return size;
 116
 117	ncf = nc->filters[table];
 118	if (!ncf || index >= ncf->total)
 119		return -ENODEV;
 120
 121	spin_lock_irqsave(&nc->lock, flags);
 122	bitmap = (void *)&ncf->bitmap;
 123	if (test_and_clear_bit(index, bitmap))
 124		memset(ncf->data + size * index, 0, size);
 125	spin_unlock_irqrestore(&nc->lock, flags);
 126
 127	return 0;
 128}
 129
 130static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
 131{
 132	struct ncsi_dev *nd = &ndp->ndev;
 133	struct ncsi_package *np;
 134	struct ncsi_channel *nc;
 135	unsigned long flags;
 136
 137	nd->state = ncsi_dev_state_functional;
 138	if (force_down) {
 139		nd->link_up = 0;
 140		goto report;
 141	}
 142
 143	nd->link_up = 0;
 144	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 145		NCSI_FOR_EACH_CHANNEL(np, nc) {
 146			spin_lock_irqsave(&nc->lock, flags);
 147
 148			if (!list_empty(&nc->link) ||
 149			    nc->state != NCSI_CHANNEL_ACTIVE) {
 150				spin_unlock_irqrestore(&nc->lock, flags);
 151				continue;
 152			}
 153
 154			if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
 155				spin_unlock_irqrestore(&nc->lock, flags);
 156				nd->link_up = 1;
 157				goto report;
 158			}
 159
 160			spin_unlock_irqrestore(&nc->lock, flags);
 161		}
 162	}
 163
 164report:
 165	nd->handler(nd);
 166}
 167
 168static void ncsi_channel_monitor(unsigned long data)
 169{
 170	struct ncsi_channel *nc = (struct ncsi_channel *)data;
 171	struct ncsi_package *np = nc->package;
 172	struct ncsi_dev_priv *ndp = np->ndp;
 
 173	struct ncsi_cmd_arg nca;
 174	bool enabled, chained;
 175	unsigned int monitor_state;
 176	unsigned long flags;
 177	int state, ret;
 178
 179	spin_lock_irqsave(&nc->lock, flags);
 180	state = nc->state;
 181	chained = !list_empty(&nc->link);
 182	enabled = nc->monitor.enabled;
 183	monitor_state = nc->monitor.state;
 184	spin_unlock_irqrestore(&nc->lock, flags);
 185
 186	if (!enabled || chained)
 187		return;
 
 
 
 188	if (state != NCSI_CHANNEL_INACTIVE &&
 189	    state != NCSI_CHANNEL_ACTIVE)
 
 
 
 
 
 
 
 190		return;
 
 191
 192	switch (monitor_state) {
 193	case NCSI_CHANNEL_MONITOR_START:
 194	case NCSI_CHANNEL_MONITOR_RETRY:
 195		nca.ndp = ndp;
 196		nca.package = np->id;
 197		nca.channel = nc->id;
 198		nca.type = NCSI_PKT_CMD_GLS;
 199		nca.req_flags = 0;
 200		ret = ncsi_xmit_cmd(&nca);
 201		if (ret) {
 202			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
 203				   ret);
 204			return;
 205		}
 206
 207		break;
 208	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
 209		break;
 210	default:
 211		if (!(ndp->flags & NCSI_DEV_HWA) &&
 212		    state == NCSI_CHANNEL_ACTIVE) {
 213			ncsi_report_link(ndp, true);
 214			ndp->flags |= NCSI_DEV_RESHUFFLE;
 215		}
 216
 
 217		spin_lock_irqsave(&nc->lock, flags);
 
 218		nc->state = NCSI_CHANNEL_INVISIBLE;
 
 219		spin_unlock_irqrestore(&nc->lock, flags);
 220
 221		spin_lock_irqsave(&ndp->lock, flags);
 222		nc->state = NCSI_CHANNEL_INACTIVE;
 223		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
 224		spin_unlock_irqrestore(&ndp->lock, flags);
 225		ncsi_process_next_channel(ndp);
 226		return;
 227	}
 228
 229	spin_lock_irqsave(&nc->lock, flags);
 230	nc->monitor.state++;
 231	spin_unlock_irqrestore(&nc->lock, flags);
 232	mod_timer(&nc->monitor.timer, jiffies + HZ);
 233}
 234
 235void ncsi_start_channel_monitor(struct ncsi_channel *nc)
 236{
 237	unsigned long flags;
 238
 239	spin_lock_irqsave(&nc->lock, flags);
 240	WARN_ON_ONCE(nc->monitor.enabled);
 241	nc->monitor.enabled = true;
 242	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
 243	spin_unlock_irqrestore(&nc->lock, flags);
 244
 245	mod_timer(&nc->monitor.timer, jiffies + HZ);
 246}
 247
 248void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
 249{
 250	unsigned long flags;
 251
 252	spin_lock_irqsave(&nc->lock, flags);
 253	if (!nc->monitor.enabled) {
 254		spin_unlock_irqrestore(&nc->lock, flags);
 255		return;
 256	}
 257	nc->monitor.enabled = false;
 258	spin_unlock_irqrestore(&nc->lock, flags);
 259
 260	del_timer_sync(&nc->monitor.timer);
 261}
 262
 263struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
 264				       unsigned char id)
 265{
 266	struct ncsi_channel *nc;
 267
 268	NCSI_FOR_EACH_CHANNEL(np, nc) {
 269		if (nc->id == id)
 270			return nc;
 271	}
 272
 273	return NULL;
 274}
 275
 276struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
 277{
 278	struct ncsi_channel *nc, *tmp;
 279	int index;
 280	unsigned long flags;
 281
 282	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
 283	if (!nc)
 284		return NULL;
 285
 286	nc->id = id;
 287	nc->package = np;
 288	nc->state = NCSI_CHANNEL_INACTIVE;
 289	nc->monitor.enabled = false;
 290	setup_timer(&nc->monitor.timer,
 291		    ncsi_channel_monitor, (unsigned long)nc);
 292	spin_lock_init(&nc->lock);
 293	INIT_LIST_HEAD(&nc->link);
 294	for (index = 0; index < NCSI_CAP_MAX; index++)
 295		nc->caps[index].index = index;
 296	for (index = 0; index < NCSI_MODE_MAX; index++)
 297		nc->modes[index].index = index;
 298
 299	spin_lock_irqsave(&np->lock, flags);
 300	tmp = ncsi_find_channel(np, id);
 301	if (tmp) {
 302		spin_unlock_irqrestore(&np->lock, flags);
 303		kfree(nc);
 304		return tmp;
 305	}
 306
 307	list_add_tail_rcu(&nc->node, &np->channels);
 308	np->channel_num++;
 309	spin_unlock_irqrestore(&np->lock, flags);
 310
 311	return nc;
 312}
 313
 314static void ncsi_remove_channel(struct ncsi_channel *nc)
 315{
 316	struct ncsi_package *np = nc->package;
 317	struct ncsi_channel_filter *ncf;
 318	unsigned long flags;
 319	int i;
 320
 321	/* Release filters */
 322	spin_lock_irqsave(&nc->lock, flags);
 323	for (i = 0; i < NCSI_FILTER_MAX; i++) {
 324		ncf = nc->filters[i];
 325		if (!ncf)
 326			continue;
 327
 328		nc->filters[i] = NULL;
 329		kfree(ncf);
 330	}
 331
 332	nc->state = NCSI_CHANNEL_INACTIVE;
 333	spin_unlock_irqrestore(&nc->lock, flags);
 334	ncsi_stop_channel_monitor(nc);
 335
 336	/* Remove and free channel */
 337	spin_lock_irqsave(&np->lock, flags);
 338	list_del_rcu(&nc->node);
 339	np->channel_num--;
 340	spin_unlock_irqrestore(&np->lock, flags);
 341
 342	kfree(nc);
 343}
 344
 345struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
 346				       unsigned char id)
 347{
 348	struct ncsi_package *np;
 349
 350	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 351		if (np->id == id)
 352			return np;
 353	}
 354
 355	return NULL;
 356}
 357
 358struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
 359				      unsigned char id)
 360{
 361	struct ncsi_package *np, *tmp;
 362	unsigned long flags;
 363
 364	np = kzalloc(sizeof(*np), GFP_ATOMIC);
 365	if (!np)
 366		return NULL;
 367
 368	np->id = id;
 369	np->ndp = ndp;
 370	spin_lock_init(&np->lock);
 371	INIT_LIST_HEAD(&np->channels);
 
 372
 373	spin_lock_irqsave(&ndp->lock, flags);
 374	tmp = ncsi_find_package(ndp, id);
 375	if (tmp) {
 376		spin_unlock_irqrestore(&ndp->lock, flags);
 377		kfree(np);
 378		return tmp;
 379	}
 380
 381	list_add_tail_rcu(&np->node, &ndp->packages);
 382	ndp->package_num++;
 383	spin_unlock_irqrestore(&ndp->lock, flags);
 384
 385	return np;
 386}
 387
 388void ncsi_remove_package(struct ncsi_package *np)
 389{
 390	struct ncsi_dev_priv *ndp = np->ndp;
 391	struct ncsi_channel *nc, *tmp;
 392	unsigned long flags;
 393
 394	/* Release all child channels */
 395	list_for_each_entry_safe(nc, tmp, &np->channels, node)
 396		ncsi_remove_channel(nc);
 397
 398	/* Remove and free package */
 399	spin_lock_irqsave(&ndp->lock, flags);
 400	list_del_rcu(&np->node);
 401	ndp->package_num--;
 402	spin_unlock_irqrestore(&ndp->lock, flags);
 403
 404	kfree(np);
 405}
 406
 407void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
 408				   unsigned char id,
 409				   struct ncsi_package **np,
 410				   struct ncsi_channel **nc)
 411{
 412	struct ncsi_package *p;
 413	struct ncsi_channel *c;
 414
 415	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
 416	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
 417
 418	if (np)
 419		*np = p;
 420	if (nc)
 421		*nc = c;
 422}
 423
 424/* For two consecutive NCSI commands, the packet IDs shouldn't
 425 * be same. Otherwise, the bogus response might be replied. So
 426 * the available IDs are allocated in round-robin fashion.
 427 */
 428struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
 429					unsigned int req_flags)
 430{
 431	struct ncsi_request *nr = NULL;
 432	int i, limit = ARRAY_SIZE(ndp->requests);
 433	unsigned long flags;
 434
 435	/* Check if there is one available request until the ceiling */
 436	spin_lock_irqsave(&ndp->lock, flags);
 437	for (i = ndp->request_id; i < limit; i++) {
 438		if (ndp->requests[i].used)
 439			continue;
 440
 441		nr = &ndp->requests[i];
 442		nr->used = true;
 443		nr->flags = req_flags;
 444		ndp->request_id = i + 1;
 445		goto found;
 446	}
 447
 448	/* Fail back to check from the starting cursor */
 449	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
 450		if (ndp->requests[i].used)
 451			continue;
 452
 453		nr = &ndp->requests[i];
 454		nr->used = true;
 455		nr->flags = req_flags;
 456		ndp->request_id = i + 1;
 457		goto found;
 458	}
 459
 460found:
 461	spin_unlock_irqrestore(&ndp->lock, flags);
 462	return nr;
 463}
 464
 465void ncsi_free_request(struct ncsi_request *nr)
 466{
 467	struct ncsi_dev_priv *ndp = nr->ndp;
 468	struct sk_buff *cmd, *rsp;
 469	unsigned long flags;
 470	bool driven;
 471
 472	if (nr->enabled) {
 473		nr->enabled = false;
 474		del_timer_sync(&nr->timer);
 475	}
 476
 477	spin_lock_irqsave(&ndp->lock, flags);
 478	cmd = nr->cmd;
 479	rsp = nr->rsp;
 480	nr->cmd = NULL;
 481	nr->rsp = NULL;
 482	nr->used = false;
 483	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
 484	spin_unlock_irqrestore(&ndp->lock, flags);
 485
 486	if (driven && cmd && --ndp->pending_req_num == 0)
 487		schedule_work(&ndp->work);
 488
 489	/* Release command and response */
 490	consume_skb(cmd);
 491	consume_skb(rsp);
 492}
 493
 494struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
 495{
 496	struct ncsi_dev_priv *ndp;
 497
 498	NCSI_FOR_EACH_DEV(ndp) {
 499		if (ndp->ndev.dev == dev)
 500			return &ndp->ndev;
 501	}
 502
 503	return NULL;
 504}
 505
 506static void ncsi_request_timeout(unsigned long data)
 507{
 508	struct ncsi_request *nr = (struct ncsi_request *)data;
 509	struct ncsi_dev_priv *ndp = nr->ndp;
 
 
 
 510	unsigned long flags;
 511
 512	/* If the request already had associated response,
 513	 * let the response handler to release it.
 514	 */
 515	spin_lock_irqsave(&ndp->lock, flags);
 516	nr->enabled = false;
 517	if (nr->rsp || !nr->cmd) {
 518		spin_unlock_irqrestore(&ndp->lock, flags);
 519		return;
 520	}
 521	spin_unlock_irqrestore(&ndp->lock, flags);
 522
 
 
 
 
 
 
 
 
 
 
 
 
 523	/* Release the request */
 524	ncsi_free_request(nr);
 525}
 526
 527static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
 528{
 529	struct ncsi_dev *nd = &ndp->ndev;
 530	struct ncsi_package *np = ndp->active_package;
 531	struct ncsi_channel *nc = ndp->active_channel;
 532	struct ncsi_cmd_arg nca;
 533	unsigned long flags;
 534	int ret;
 535
 
 
 536	nca.ndp = ndp;
 537	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
 538	switch (nd->state) {
 539	case ncsi_dev_state_suspend:
 540		nd->state = ncsi_dev_state_suspend_select;
 541		/* Fall through */
 542	case ncsi_dev_state_suspend_select:
 543		ndp->pending_req_num = 1;
 544
 545		nca.type = NCSI_PKT_CMD_SP;
 546		nca.package = np->id;
 547		nca.channel = NCSI_RESERVED_CHANNEL;
 548		if (ndp->flags & NCSI_DEV_HWA)
 549			nca.bytes[0] = 0;
 550		else
 551			nca.bytes[0] = 1;
 552
 553		/* To retrieve the last link states of channels in current
 554		 * package when current active channel needs fail over to
 555		 * another one. It means we will possibly select another
 556		 * channel as next active one. The link states of channels
 557		 * are most important factor of the selection. So we need
 558		 * accurate link states. Unfortunately, the link states on
 559		 * inactive channels can't be updated with LSC AEN in time.
 560		 */
 561		if (ndp->flags & NCSI_DEV_RESHUFFLE)
 562			nd->state = ncsi_dev_state_suspend_gls;
 563		else
 564			nd->state = ncsi_dev_state_suspend_dcnt;
 565		ret = ncsi_xmit_cmd(&nca);
 566		if (ret)
 567			goto error;
 568
 569		break;
 570	case ncsi_dev_state_suspend_gls:
 571		ndp->pending_req_num = np->channel_num;
 572
 573		nca.type = NCSI_PKT_CMD_GLS;
 574		nca.package = np->id;
 
 
 
 
 
 575
 576		nd->state = ncsi_dev_state_suspend_dcnt;
 577		NCSI_FOR_EACH_CHANNEL(np, nc) {
 578			nca.channel = nc->id;
 579			ret = ncsi_xmit_cmd(&nca);
 580			if (ret)
 581				goto error;
 582		}
 583
 584		break;
 585	case ncsi_dev_state_suspend_dcnt:
 586		ndp->pending_req_num = 1;
 587
 588		nca.type = NCSI_PKT_CMD_DCNT;
 589		nca.package = np->id;
 590		nca.channel = nc->id;
 591
 592		nd->state = ncsi_dev_state_suspend_dc;
 593		ret = ncsi_xmit_cmd(&nca);
 594		if (ret)
 595			goto error;
 596
 597		break;
 598	case ncsi_dev_state_suspend_dc:
 599		ndp->pending_req_num = 1;
 600
 601		nca.type = NCSI_PKT_CMD_DC;
 602		nca.package = np->id;
 603		nca.channel = nc->id;
 604		nca.bytes[0] = 1;
 605
 606		nd->state = ncsi_dev_state_suspend_deselect;
 607		ret = ncsi_xmit_cmd(&nca);
 608		if (ret)
 609			goto error;
 610
 
 
 
 
 
 
 
 
 
 611		break;
 612	case ncsi_dev_state_suspend_deselect:
 613		ndp->pending_req_num = 1;
 614
 615		nca.type = NCSI_PKT_CMD_DP;
 616		nca.package = np->id;
 617		nca.channel = NCSI_RESERVED_CHANNEL;
 618
 619		nd->state = ncsi_dev_state_suspend_done;
 620		ret = ncsi_xmit_cmd(&nca);
 621		if (ret)
 622			goto error;
 623
 624		break;
 625	case ncsi_dev_state_suspend_done:
 626		spin_lock_irqsave(&nc->lock, flags);
 627		nc->state = NCSI_CHANNEL_INACTIVE;
 628		spin_unlock_irqrestore(&nc->lock, flags);
 629		ncsi_process_next_channel(ndp);
 630
 
 
 631		break;
 632	default:
 633		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
 634			    nd->state);
 635	}
 636
 637	return;
 638error:
 639	nd->state = ncsi_dev_state_functional;
 640}
 641
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 642static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
 643{
 644	struct ncsi_dev *nd = &ndp->ndev;
 645	struct net_device *dev = nd->dev;
 646	struct ncsi_package *np = ndp->active_package;
 647	struct ncsi_channel *nc = ndp->active_channel;
 648	struct ncsi_channel *hot_nc = NULL;
 
 
 649	struct ncsi_cmd_arg nca;
 650	unsigned char index;
 651	unsigned long flags;
 652	int ret;
 653
 654	nca.ndp = ndp;
 655	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
 656	switch (nd->state) {
 657	case ncsi_dev_state_config:
 658	case ncsi_dev_state_config_sp:
 659		ndp->pending_req_num = 1;
 660
 661		/* Select the specific package */
 662		nca.type = NCSI_PKT_CMD_SP;
 663		if (ndp->flags & NCSI_DEV_HWA)
 664			nca.bytes[0] = 0;
 665		else
 666			nca.bytes[0] = 1;
 667		nca.package = np->id;
 668		nca.channel = NCSI_RESERVED_CHANNEL;
 669		ret = ncsi_xmit_cmd(&nca);
 670		if (ret)
 
 
 671			goto error;
 
 672
 673		nd->state = ncsi_dev_state_config_cis;
 674		break;
 675	case ncsi_dev_state_config_cis:
 676		ndp->pending_req_num = 1;
 677
 678		/* Clear initial state */
 679		nca.type = NCSI_PKT_CMD_CIS;
 680		nca.package = np->id;
 681		nca.channel = nc->id;
 682		ret = ncsi_xmit_cmd(&nca);
 683		if (ret)
 
 
 684			goto error;
 
 685
 686		nd->state = ncsi_dev_state_config_sma;
 
 
 687		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 688	case ncsi_dev_state_config_sma:
 689	case ncsi_dev_state_config_ebf:
 690#if IS_ENABLED(CONFIG_IPV6)
 691	case ncsi_dev_state_config_egmf:
 692#endif
 693	case ncsi_dev_state_config_ecnt:
 694	case ncsi_dev_state_config_ec:
 695	case ncsi_dev_state_config_ae:
 696	case ncsi_dev_state_config_gls:
 697		ndp->pending_req_num = 1;
 698
 699		nca.package = np->id;
 700		nca.channel = nc->id;
 701
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 702		/* Use first entry in unicast filter table. Note that
 703		 * the MAC filter table starts from entry 1 instead of
 704		 * 0.
 705		 */
 706		if (nd->state == ncsi_dev_state_config_sma) {
 707			nca.type = NCSI_PKT_CMD_SMA;
 708			for (index = 0; index < 6; index++)
 709				nca.bytes[index] = dev->dev_addr[index];
 710			nca.bytes[6] = 0x1;
 711			nca.bytes[7] = 0x1;
 712			nd->state = ncsi_dev_state_config_ebf;
 713		} else if (nd->state == ncsi_dev_state_config_ebf) {
 714			nca.type = NCSI_PKT_CMD_EBF;
 715			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
 716			nd->state = ncsi_dev_state_config_ecnt;
 717#if IS_ENABLED(CONFIG_IPV6)
 718			if (ndp->inet6_addr_num > 0 &&
 719			    (nc->caps[NCSI_CAP_GENERIC].cap &
 720			     NCSI_CAP_GENERIC_MC))
 721				nd->state = ncsi_dev_state_config_egmf;
 
 
 
 722			else
 
 
 
 
 723				nd->state = ncsi_dev_state_config_ecnt;
 724		} else if (nd->state == ncsi_dev_state_config_egmf) {
 725			nca.type = NCSI_PKT_CMD_EGMF;
 726			nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
 727			nd->state = ncsi_dev_state_config_ecnt;
 728#endif /* CONFIG_IPV6 */
 729		} else if (nd->state == ncsi_dev_state_config_ecnt) {
 
 
 
 
 
 730			nca.type = NCSI_PKT_CMD_ECNT;
 731			nd->state = ncsi_dev_state_config_ec;
 732		} else if (nd->state == ncsi_dev_state_config_ec) {
 733			/* Enable AEN if it's supported */
 734			nca.type = NCSI_PKT_CMD_EC;
 735			nd->state = ncsi_dev_state_config_ae;
 736			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
 737				nd->state = ncsi_dev_state_config_gls;
 738		} else if (nd->state == ncsi_dev_state_config_ae) {
 739			nca.type = NCSI_PKT_CMD_AE;
 740			nca.bytes[0] = 0;
 741			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
 742			nd->state = ncsi_dev_state_config_gls;
 743		} else if (nd->state == ncsi_dev_state_config_gls) {
 744			nca.type = NCSI_PKT_CMD_GLS;
 745			nd->state = ncsi_dev_state_config_done;
 746		}
 747
 748		ret = ncsi_xmit_cmd(&nca);
 749		if (ret)
 
 
 
 750			goto error;
 
 751		break;
 752	case ncsi_dev_state_config_done:
 
 
 753		spin_lock_irqsave(&nc->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 754		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
 755			hot_nc = nc;
 756			nc->state = NCSI_CHANNEL_ACTIVE;
 757		} else {
 758			hot_nc = NULL;
 759			nc->state = NCSI_CHANNEL_INACTIVE;
 
 
 760		}
 761		spin_unlock_irqrestore(&nc->lock, flags);
 762
 763		/* Update the hot channel */
 764		spin_lock_irqsave(&ndp->lock, flags);
 765		ndp->hot_channel = hot_nc;
 766		spin_unlock_irqrestore(&ndp->lock, flags);
 767
 768		ncsi_start_channel_monitor(nc);
 769		ncsi_process_next_channel(ndp);
 770		break;
 771	default:
 772		netdev_warn(dev, "Wrong NCSI state 0x%x in config\n",
 773			    nd->state);
 774	}
 775
 776	return;
 777
 778error:
 779	ncsi_report_link(ndp, true);
 780}
 781
 782static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
 783{
 784	struct ncsi_package *np;
 785	struct ncsi_channel *nc, *found, *hot_nc;
 786	struct ncsi_channel_mode *ncm;
 787	unsigned long flags;
 
 
 788
 789	spin_lock_irqsave(&ndp->lock, flags);
 790	hot_nc = ndp->hot_channel;
 791	spin_unlock_irqrestore(&ndp->lock, flags);
 792
 793	/* The search is done once an inactive channel with up
 794	 * link is found.
 
 
 795	 */
 796	found = NULL;
 
 797	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 
 
 798		NCSI_FOR_EACH_CHANNEL(np, nc) {
 799			spin_lock_irqsave(&nc->lock, flags);
 
 
 
 800
 801			if (!list_empty(&nc->link) ||
 802			    nc->state != NCSI_CHANNEL_INACTIVE) {
 803				spin_unlock_irqrestore(&nc->lock, flags);
 804				continue;
 805			}
 806
 807			if (!found)
 808				found = nc;
 809
 810			if (nc == hot_nc)
 811				found = nc;
 812
 813			ncm = &nc->modes[NCSI_MODE_LINK];
 814			if (ncm->data[2] & 0x1) {
 815				spin_unlock_irqrestore(&nc->lock, flags);
 816				found = nc;
 817				goto out;
 818			}
 819
 820			spin_unlock_irqrestore(&nc->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 821		}
 
 
 822	}
 823
 824	if (!found) {
 
 
 
 
 
 
 
 
 
 825		ncsi_report_link(ndp, true);
 826		return -ENODEV;
 827	}
 828
 829out:
 830	spin_lock_irqsave(&ndp->lock, flags);
 831	list_add_tail_rcu(&found->link, &ndp->channel_queue);
 832	spin_unlock_irqrestore(&ndp->lock, flags);
 833
 834	return ncsi_process_next_channel(ndp);
 835}
 836
 837static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
 838{
 839	struct ncsi_package *np;
 840	struct ncsi_channel *nc;
 841	unsigned int cap;
 
 842
 843	/* The hardware arbitration is disabled if any one channel
 844	 * doesn't support explicitly.
 845	 */
 846	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 847		NCSI_FOR_EACH_CHANNEL(np, nc) {
 
 
 848			cap = nc->caps[NCSI_CAP_GENERIC].cap;
 849			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
 850			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
 851			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
 852				ndp->flags &= ~NCSI_DEV_HWA;
 853				return false;
 854			}
 855		}
 856	}
 857
 858	ndp->flags |= NCSI_DEV_HWA;
 859	return true;
 860}
 861
 862static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
 863{
 864	struct ncsi_package *np;
 865	struct ncsi_channel *nc;
 866	unsigned long flags;
 867
 868	/* Move all available channels to processing queue */
 869	spin_lock_irqsave(&ndp->lock, flags);
 870	NCSI_FOR_EACH_PACKAGE(ndp, np) {
 871		NCSI_FOR_EACH_CHANNEL(np, nc) {
 872			WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE ||
 873				     !list_empty(&nc->link));
 874			ncsi_stop_channel_monitor(nc);
 875			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
 876		}
 877	}
 878	spin_unlock_irqrestore(&ndp->lock, flags);
 879
 880	/* We can have no channels in extremely case */
 881	if (list_empty(&ndp->channel_queue)) {
 882		ncsi_report_link(ndp, false);
 883		return -ENOENT;
 884	}
 885
 886	return ncsi_process_next_channel(ndp);
 887}
 888
 889static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
 890{
 891	struct ncsi_dev *nd = &ndp->ndev;
 892	struct ncsi_package *np;
 893	struct ncsi_channel *nc;
 894	struct ncsi_cmd_arg nca;
 895	unsigned char index;
 896	int ret;
 897
 898	nca.ndp = ndp;
 899	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
 900	switch (nd->state) {
 901	case ncsi_dev_state_probe:
 902		nd->state = ncsi_dev_state_probe_deselect;
 903		/* Fall through */
 904	case ncsi_dev_state_probe_deselect:
 905		ndp->pending_req_num = 8;
 906
 907		/* Deselect all possible packages */
 908		nca.type = NCSI_PKT_CMD_DP;
 909		nca.channel = NCSI_RESERVED_CHANNEL;
 910		for (index = 0; index < 8; index++) {
 911			nca.package = index;
 912			ret = ncsi_xmit_cmd(&nca);
 913			if (ret)
 914				goto error;
 915		}
 916
 917		nd->state = ncsi_dev_state_probe_package;
 918		break;
 919	case ncsi_dev_state_probe_package:
 920		ndp->pending_req_num = 16;
 
 
 
 
 
 
 921
 922		/* Select all possible packages */
 923		nca.type = NCSI_PKT_CMD_SP;
 924		nca.bytes[0] = 1;
 
 925		nca.channel = NCSI_RESERVED_CHANNEL;
 926		for (index = 0; index < 8; index++) {
 927			nca.package = index;
 928			ret = ncsi_xmit_cmd(&nca);
 929			if (ret)
 930				goto error;
 931		}
 932
 933		/* Disable all possible packages */
 934		nca.type = NCSI_PKT_CMD_DP;
 935		for (index = 0; index < 8; index++) {
 936			nca.package = index;
 937			ret = ncsi_xmit_cmd(&nca);
 938			if (ret)
 939				goto error;
 940		}
 941
 942		nd->state = ncsi_dev_state_probe_channel;
 943		break;
 944	case ncsi_dev_state_probe_channel:
 945		if (!ndp->active_package)
 946			ndp->active_package = list_first_or_null_rcu(
 947				&ndp->packages, struct ncsi_package, node);
 948		else if (list_is_last(&ndp->active_package->node,
 949				      &ndp->packages))
 950			ndp->active_package = NULL;
 951		else
 952			ndp->active_package = list_next_entry(
 953				ndp->active_package, node);
 954
 955		/* All available packages and channels are enumerated. The
 956		 * enumeration happens for once when the NCSI interface is
 957		 * started. So we need continue to start the interface after
 958		 * the enumeration.
 959		 *
 960		 * We have to choose an active channel before configuring it.
 961		 * Note that we possibly don't have active channel in extreme
 962		 * situation.
 963		 */
 964		if (!ndp->active_package) {
 965			ndp->flags |= NCSI_DEV_PROBED;
 966			if (ncsi_check_hwa(ndp))
 967				ncsi_enable_hwa(ndp);
 968			else
 969				ncsi_choose_active_channel(ndp);
 970			return;
 971		}
 
 
 
 
 972
 973		/* Select the active package */
 
 
 974		ndp->pending_req_num = 1;
 975		nca.type = NCSI_PKT_CMD_SP;
 976		nca.bytes[0] = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 977		nca.package = ndp->active_package->id;
 978		nca.channel = NCSI_RESERVED_CHANNEL;
 979		ret = ncsi_xmit_cmd(&nca);
 980		if (ret)
 981			goto error;
 982
 983		nd->state = ncsi_dev_state_probe_cis;
 984		break;
 985	case ncsi_dev_state_probe_cis:
 986		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
 987
 988		/* Clear initial state */
 989		nca.type = NCSI_PKT_CMD_CIS;
 990		nca.package = ndp->active_package->id;
 991		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
 992			nca.channel = index;
 993			ret = ncsi_xmit_cmd(&nca);
 994			if (ret)
 995				goto error;
 996		}
 997
 998		nd->state = ncsi_dev_state_probe_gvi;
 999		break;
 
1000	case ncsi_dev_state_probe_gvi:
1001	case ncsi_dev_state_probe_gc:
1002	case ncsi_dev_state_probe_gls:
1003		np = ndp->active_package;
1004		ndp->pending_req_num = np->channel_num;
1005
1006		/* Retrieve version, capability or link status */
1007		if (nd->state == ncsi_dev_state_probe_gvi)
 
 
1008			nca.type = NCSI_PKT_CMD_GVI;
1009		else if (nd->state == ncsi_dev_state_probe_gc)
1010			nca.type = NCSI_PKT_CMD_GC;
1011		else
1012			nca.type = NCSI_PKT_CMD_GLS;
1013
1014		nca.package = np->id;
1015		NCSI_FOR_EACH_CHANNEL(np, nc) {
1016			nca.channel = nc->id;
1017			ret = ncsi_xmit_cmd(&nca);
1018			if (ret)
1019				goto error;
1020		}
1021
1022		if (nd->state == ncsi_dev_state_probe_gvi)
 
 
 
 
1023			nd->state = ncsi_dev_state_probe_gc;
1024		else if (nd->state == ncsi_dev_state_probe_gc)
1025			nd->state = ncsi_dev_state_probe_gls;
1026		else
 
 
 
 
 
 
1027			nd->state = ncsi_dev_state_probe_dp;
 
1028		break;
1029	case ncsi_dev_state_probe_dp:
1030		ndp->pending_req_num = 1;
1031
1032		/* Deselect the active package */
1033		nca.type = NCSI_PKT_CMD_DP;
1034		nca.package = ndp->active_package->id;
1035		nca.channel = NCSI_RESERVED_CHANNEL;
1036		ret = ncsi_xmit_cmd(&nca);
1037		if (ret)
1038			goto error;
1039
1040		/* Scan channels in next package */
1041		nd->state = ncsi_dev_state_probe_channel;
 
 
1042		break;
1043	default:
1044		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1045			    nd->state);
1046	}
1047
 
 
 
 
 
 
1048	return;
1049error:
 
 
 
1050	ncsi_report_link(ndp, true);
1051}
1052
1053static void ncsi_dev_work(struct work_struct *work)
1054{
1055	struct ncsi_dev_priv *ndp = container_of(work,
1056			struct ncsi_dev_priv, work);
1057	struct ncsi_dev *nd = &ndp->ndev;
1058
1059	switch (nd->state & ncsi_dev_state_major) {
1060	case ncsi_dev_state_probe:
1061		ncsi_probe_channel(ndp);
1062		break;
1063	case ncsi_dev_state_suspend:
1064		ncsi_suspend_channel(ndp);
1065		break;
1066	case ncsi_dev_state_config:
1067		ncsi_configure_channel(ndp);
1068		break;
1069	default:
1070		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1071			    nd->state);
1072	}
1073}
1074
1075int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1076{
1077	struct ncsi_channel *nc;
1078	int old_state;
1079	unsigned long flags;
1080
1081	spin_lock_irqsave(&ndp->lock, flags);
1082	nc = list_first_or_null_rcu(&ndp->channel_queue,
1083				    struct ncsi_channel, link);
1084	if (!nc) {
1085		spin_unlock_irqrestore(&ndp->lock, flags);
1086		goto out;
1087	}
1088
1089	list_del_init(&nc->link);
1090	spin_unlock_irqrestore(&ndp->lock, flags);
1091
1092	spin_lock_irqsave(&nc->lock, flags);
1093	old_state = nc->state;
1094	nc->state = NCSI_CHANNEL_INVISIBLE;
1095	spin_unlock_irqrestore(&nc->lock, flags);
1096
1097	ndp->active_channel = nc;
1098	ndp->active_package = nc->package;
1099
1100	switch (old_state) {
1101	case NCSI_CHANNEL_INACTIVE:
1102		ndp->ndev.state = ncsi_dev_state_config;
 
 
1103		ncsi_configure_channel(ndp);
1104		break;
1105	case NCSI_CHANNEL_ACTIVE:
1106		ndp->ndev.state = ncsi_dev_state_suspend;
 
 
1107		ncsi_suspend_channel(ndp);
1108		break;
1109	default:
1110		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1111			   old_state, nc->package->id, nc->id);
1112		ncsi_report_link(ndp, false);
1113		return -EINVAL;
1114	}
1115
1116	return 0;
1117
1118out:
1119	ndp->active_channel = NULL;
1120	ndp->active_package = NULL;
1121	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1122		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1123		return ncsi_choose_active_channel(ndp);
1124	}
1125
1126	ncsi_report_link(ndp, false);
1127	return -ENODEV;
1128}
1129
1130#if IS_ENABLED(CONFIG_IPV6)
1131static int ncsi_inet6addr_event(struct notifier_block *this,
1132				unsigned long event, void *data)
1133{
1134	struct inet6_ifaddr *ifa = data;
1135	struct net_device *dev = ifa->idev->dev;
1136	struct ncsi_dev *nd = ncsi_find_dev(dev);
1137	struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
1138	struct ncsi_package *np;
1139	struct ncsi_channel *nc;
1140	struct ncsi_cmd_arg nca;
1141	bool action;
1142	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1143
1144	if (!ndp || (ipv6_addr_type(&ifa->addr) &
1145	    (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)))
1146		return NOTIFY_OK;
1147
1148	switch (event) {
1149	case NETDEV_UP:
1150		action = (++ndp->inet6_addr_num) == 1;
1151		nca.type = NCSI_PKT_CMD_EGMF;
1152		break;
1153	case NETDEV_DOWN:
1154		action = (--ndp->inet6_addr_num == 0);
1155		nca.type = NCSI_PKT_CMD_DGMF;
1156		break;
1157	default:
1158		return NOTIFY_OK;
1159	}
1160
1161	/* We might not have active channel or packages. The IPv6
1162	 * required multicast will be enabled when active channel
1163	 * or packages are chosen.
1164	 */
1165	np = ndp->active_package;
1166	nc = ndp->active_channel;
1167	if (!action || !np || !nc)
1168		return NOTIFY_OK;
 
 
 
 
 
 
 
 
 
 
 
1169
1170	/* We needn't enable or disable it if the function isn't supported */
1171	if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC))
1172		return NOTIFY_OK;
1173
1174	nca.ndp = ndp;
1175	nca.req_flags = 0;
1176	nca.package = np->id;
1177	nca.channel = nc->id;
1178	nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1179	ret = ncsi_xmit_cmd(&nca);
1180	if (ret) {
1181		netdev_warn(dev, "Fail to %s global multicast filter (%d)\n",
1182			    (event == NETDEV_UP) ? "enable" : "disable", ret);
1183		return NOTIFY_DONE;
 
 
 
 
1184	}
1185
1186	return NOTIFY_OK;
 
 
 
 
 
 
 
 
 
 
 
 
1187}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1188
1189static struct notifier_block ncsi_inet6addr_notifier = {
1190	.notifier_call = ncsi_inet6addr_event,
1191};
1192#endif /* CONFIG_IPV6 */
 
1193
1194struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1195				   void (*handler)(struct ncsi_dev *ndev))
1196{
1197	struct ncsi_dev_priv *ndp;
1198	struct ncsi_dev *nd;
 
 
1199	unsigned long flags;
1200	int i;
1201
1202	/* Check if the device has been registered or not */
1203	nd = ncsi_find_dev(dev);
1204	if (nd)
1205		return nd;
1206
1207	/* Create NCSI device */
1208	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1209	if (!ndp)
1210		return NULL;
1211
1212	nd = &ndp->ndev;
1213	nd->state = ncsi_dev_state_registered;
1214	nd->dev = dev;
1215	nd->handler = handler;
1216	ndp->pending_req_num = 0;
1217	INIT_LIST_HEAD(&ndp->channel_queue);
 
1218	INIT_WORK(&ndp->work, ncsi_dev_work);
 
1219
1220	/* Initialize private NCSI device */
1221	spin_lock_init(&ndp->lock);
1222	INIT_LIST_HEAD(&ndp->packages);
1223	ndp->request_id = NCSI_REQ_START_IDX;
1224	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1225		ndp->requests[i].id = i;
1226		ndp->requests[i].ndp = ndp;
1227		setup_timer(&ndp->requests[i].timer,
1228			    ncsi_request_timeout,
1229			    (unsigned long)&ndp->requests[i]);
1230	}
 
1231
1232	spin_lock_irqsave(&ncsi_dev_lock, flags);
1233#if IS_ENABLED(CONFIG_IPV6)
1234	ndp->inet6_addr_num = 0;
1235	if (list_empty(&ncsi_dev_list))
1236		register_inet6addr_notifier(&ncsi_inet6addr_notifier);
1237#endif
1238	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1239	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1240
1241	/* Register NCSI packet Rx handler */
1242	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1243	ndp->ptype.func = ncsi_rcv_rsp;
1244	ndp->ptype.dev = dev;
1245	dev_add_pack(&ndp->ptype);
1246
 
 
 
 
 
 
 
 
1247	return nd;
1248}
1249EXPORT_SYMBOL_GPL(ncsi_register_dev);
1250
1251int ncsi_start_dev(struct ncsi_dev *nd)
1252{
1253	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1254	int ret;
1255
1256	if (nd->state != ncsi_dev_state_registered &&
1257	    nd->state != ncsi_dev_state_functional)
1258		return -ENOTTY;
1259
1260	if (!(ndp->flags & NCSI_DEV_PROBED)) {
 
 
1261		nd->state = ncsi_dev_state_probe;
1262		schedule_work(&ndp->work);
1263		return 0;
1264	}
1265
1266	if (ndp->flags & NCSI_DEV_HWA)
1267		ret = ncsi_enable_hwa(ndp);
1268	else
1269		ret = ncsi_choose_active_channel(ndp);
1270
1271	return ret;
1272}
1273EXPORT_SYMBOL_GPL(ncsi_start_dev);
1274
1275void ncsi_stop_dev(struct ncsi_dev *nd)
1276{
1277	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1278	struct ncsi_package *np;
1279	struct ncsi_channel *nc;
1280	bool chained;
1281	int old_state;
1282	unsigned long flags;
1283
1284	/* Stop the channel monitor and reset channel's state */
 
 
 
1285	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1286		NCSI_FOR_EACH_CHANNEL(np, nc) {
1287			ncsi_stop_channel_monitor(nc);
1288
1289			spin_lock_irqsave(&nc->lock, flags);
1290			chained = !list_empty(&nc->link);
1291			old_state = nc->state;
1292			nc->state = NCSI_CHANNEL_INACTIVE;
1293			spin_unlock_irqrestore(&nc->lock, flags);
1294
1295			WARN_ON_ONCE(chained ||
1296				     old_state == NCSI_CHANNEL_INVISIBLE);
1297		}
1298	}
1299
 
1300	ncsi_report_link(ndp, true);
1301}
1302EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1303
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1304void ncsi_unregister_dev(struct ncsi_dev *nd)
1305{
1306	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1307	struct ncsi_package *np, *tmp;
1308	unsigned long flags;
1309
1310	dev_remove_pack(&ndp->ptype);
1311
1312	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1313		ncsi_remove_package(np);
1314
1315	spin_lock_irqsave(&ncsi_dev_lock, flags);
1316	list_del_rcu(&ndp->node);
1317#if IS_ENABLED(CONFIG_IPV6)
1318	if (list_empty(&ncsi_dev_list))
1319		unregister_inet6addr_notifier(&ncsi_inet6addr_notifier);
1320#endif
1321	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
 
 
1322
1323	kfree(ndp);
1324}
1325EXPORT_SYMBOL_GPL(ncsi_unregister_dev);