Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Keystone NetCP Core driver
   3 *
   4 * Copyright (C) 2014 Texas Instruments Incorporated
   5 * Authors:	Sandeep Nair <sandeep_n@ti.com>
   6 *		Sandeep Paulraj <s-paulraj@ti.com>
   7 *		Cyril Chemparathy <cyril@ti.com>
   8 *		Santosh Shilimkar <santosh.shilimkar@ti.com>
   9 *		Murali Karicheri <m-karicheri2@ti.com>
  10 *		Wingman Kwok <w-kwok2@ti.com>
  11 *
  12 * This program is free software; you can redistribute it and/or
  13 * modify it under the terms of the GNU General Public License as
  14 * published by the Free Software Foundation version 2.
  15 *
  16 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  17 * kind, whether express or implied; without even the implied warranty
  18 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 * GNU General Public License for more details.
  20 */
  21
  22#include <linux/io.h>
  23#include <linux/module.h>
  24#include <linux/of_net.h>
  25#include <linux/of_address.h>
  26#include <linux/if_vlan.h>
  27#include <linux/pm_runtime.h>
  28#include <linux/platform_device.h>
  29#include <linux/soc/ti/knav_qmss.h>
  30#include <linux/soc/ti/knav_dma.h>
  31
  32#include "netcp.h"
  33
  34#define NETCP_SOP_OFFSET	(NET_IP_ALIGN + NET_SKB_PAD)
  35#define NETCP_NAPI_WEIGHT	64
  36#define NETCP_TX_TIMEOUT	(5 * HZ)
  37#define NETCP_PACKET_SIZE	(ETH_FRAME_LEN + ETH_FCS_LEN)
  38#define NETCP_MIN_PACKET_SIZE	ETH_ZLEN
  39#define NETCP_MAX_MCAST_ADDR	16
  40
  41#define NETCP_EFUSE_REG_INDEX	0
  42
  43#define NETCP_MOD_PROBE_SKIPPED	1
  44#define NETCP_MOD_PROBE_FAILED	2
  45
  46#define NETCP_DEBUG (NETIF_MSG_HW	| NETIF_MSG_WOL		|	\
  47		    NETIF_MSG_DRV	| NETIF_MSG_LINK	|	\
  48		    NETIF_MSG_IFUP	| NETIF_MSG_INTR	|	\
  49		    NETIF_MSG_PROBE	| NETIF_MSG_TIMER	|	\
  50		    NETIF_MSG_IFDOWN	| NETIF_MSG_RX_ERR	|	\
  51		    NETIF_MSG_TX_ERR	| NETIF_MSG_TX_DONE	|	\
  52		    NETIF_MSG_PKTDATA	| NETIF_MSG_TX_QUEUED	|	\
  53		    NETIF_MSG_RX_STATUS)
  54
  55#define NETCP_EFUSE_ADDR_SWAP	2
  56
  57#define knav_queue_get_id(q)	knav_queue_device_control(q, \
  58				KNAV_QUEUE_GET_ID, (unsigned long)NULL)
  59
  60#define knav_queue_enable_notify(q) knav_queue_device_control(q,	\
  61					KNAV_QUEUE_ENABLE_NOTIFY,	\
  62					(unsigned long)NULL)
  63
  64#define knav_queue_disable_notify(q) knav_queue_device_control(q,	\
  65					KNAV_QUEUE_DISABLE_NOTIFY,	\
  66					(unsigned long)NULL)
  67
  68#define knav_queue_get_count(q)	knav_queue_device_control(q, \
  69				KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
  70
  71#define for_each_netcp_module(module)			\
  72	list_for_each_entry(module, &netcp_modules, module_list)
  73
  74#define for_each_netcp_device_module(netcp_device, inst_modpriv) \
  75	list_for_each_entry(inst_modpriv, \
  76		&((netcp_device)->modpriv_head), inst_list)
  77
  78#define for_each_module(netcp, intf_modpriv)			\
  79	list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
  80
  81/* Module management structures */
  82struct netcp_device {
  83	struct list_head	device_list;
  84	struct list_head	interface_head;
  85	struct list_head	modpriv_head;
  86	struct device		*device;
  87};
  88
  89struct netcp_inst_modpriv {
  90	struct netcp_device	*netcp_device;
  91	struct netcp_module	*netcp_module;
  92	struct list_head	inst_list;
  93	void			*module_priv;
  94};
  95
  96struct netcp_intf_modpriv {
  97	struct netcp_intf	*netcp_priv;
  98	struct netcp_module	*netcp_module;
  99	struct list_head	intf_list;
 100	void			*module_priv;
 101};
 102
 103struct netcp_tx_cb {
 104	void	*ts_context;
 105	void	(*txtstamp)(void *context, struct sk_buff *skb);
 106};
 107
 108static LIST_HEAD(netcp_devices);
 109static LIST_HEAD(netcp_modules);
 110static DEFINE_MUTEX(netcp_modules_lock);
 111
 112static int netcp_debug_level = -1;
 113module_param(netcp_debug_level, int, 0);
 114MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
 115
 116/* Helper functions - Get/Set */
 117static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc,
 118			 struct knav_dma_desc *desc)
 119{
 120	*buff_len = le32_to_cpu(desc->buff_len);
 121	*buff = le32_to_cpu(desc->buff);
 122	*ndesc = le32_to_cpu(desc->next_desc);
 123}
 124
 125static u32 get_sw_data(int index, struct knav_dma_desc *desc)
 126{
 127	/* No Endian conversion needed as this data is untouched by hw */
 128	return desc->sw_data[index];
 129}
 130
 131/* use these macros to get sw data */
 132#define GET_SW_DATA0(desc) get_sw_data(0, desc)
 133#define GET_SW_DATA1(desc) get_sw_data(1, desc)
 134#define GET_SW_DATA2(desc) get_sw_data(2, desc)
 135#define GET_SW_DATA3(desc) get_sw_data(3, desc)
 136
 137static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len,
 138			     struct knav_dma_desc *desc)
 139{
 140	*buff = le32_to_cpu(desc->orig_buff);
 141	*buff_len = le32_to_cpu(desc->orig_len);
 142}
 143
 144static void get_words(dma_addr_t *words, int num_words, __le32 *desc)
 145{
 146	int i;
 147
 148	for (i = 0; i < num_words; i++)
 149		words[i] = le32_to_cpu(desc[i]);
 150}
 151
 152static void set_pkt_info(dma_addr_t buff, u32 buff_len, u32 ndesc,
 153			 struct knav_dma_desc *desc)
 154{
 155	desc->buff_len = cpu_to_le32(buff_len);
 156	desc->buff = cpu_to_le32(buff);
 157	desc->next_desc = cpu_to_le32(ndesc);
 158}
 159
 160static void set_desc_info(u32 desc_info, u32 pkt_info,
 161			  struct knav_dma_desc *desc)
 162{
 163	desc->desc_info = cpu_to_le32(desc_info);
 164	desc->packet_info = cpu_to_le32(pkt_info);
 165}
 166
 167static void set_sw_data(int index, u32 data, struct knav_dma_desc *desc)
 168{
 169	/* No Endian conversion needed as this data is untouched by hw */
 170	desc->sw_data[index] = data;
 171}
 172
 173/* use these macros to set sw data */
 174#define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
 175#define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
 176#define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
 177#define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
 178
 179static void set_org_pkt_info(dma_addr_t buff, u32 buff_len,
 180			     struct knav_dma_desc *desc)
 181{
 182	desc->orig_buff = cpu_to_le32(buff);
 183	desc->orig_len = cpu_to_le32(buff_len);
 184}
 185
 186static void set_words(u32 *words, int num_words, __le32 *desc)
 187{
 188	int i;
 189
 190	for (i = 0; i < num_words; i++)
 191		desc[i] = cpu_to_le32(words[i]);
 192}
 193
 194/* Read the e-fuse value as 32 bit values to be endian independent */
 195static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
 196{
 197	unsigned int addr0, addr1;
 198
 199	addr1 = readl(efuse_mac + 4);
 200	addr0 = readl(efuse_mac);
 201
 202	switch (swap) {
 203	case NETCP_EFUSE_ADDR_SWAP:
 204		addr0 = addr1;
 205		addr1 = readl(efuse_mac);
 206		break;
 207	default:
 208		break;
 209	}
 210
 211	x[0] = (addr1 & 0x0000ff00) >> 8;
 212	x[1] = addr1 & 0x000000ff;
 213	x[2] = (addr0 & 0xff000000) >> 24;
 214	x[3] = (addr0 & 0x00ff0000) >> 16;
 215	x[4] = (addr0 & 0x0000ff00) >> 8;
 216	x[5] = addr0 & 0x000000ff;
 217
 218	return 0;
 219}
 220
 221static const char *netcp_node_name(struct device_node *node)
 222{
 223	const char *name;
 224
 225	if (of_property_read_string(node, "label", &name) < 0)
 226		name = node->name;
 227	if (!name)
 228		name = "unknown";
 229	return name;
 230}
 231
 232/* Module management routines */
 233static int netcp_register_interface(struct netcp_intf *netcp)
 234{
 235	int ret;
 236
 237	ret = register_netdev(netcp->ndev);
 238	if (!ret)
 239		netcp->netdev_registered = true;
 240	return ret;
 241}
 242
 243static int netcp_module_probe(struct netcp_device *netcp_device,
 244			      struct netcp_module *module)
 245{
 246	struct device *dev = netcp_device->device;
 247	struct device_node *devices, *interface, *node = dev->of_node;
 248	struct device_node *child;
 249	struct netcp_inst_modpriv *inst_modpriv;
 250	struct netcp_intf *netcp_intf;
 251	struct netcp_module *tmp;
 252	bool primary_module_registered = false;
 253	int ret;
 254
 255	/* Find this module in the sub-tree for this device */
 256	devices = of_get_child_by_name(node, "netcp-devices");
 257	if (!devices) {
 258		dev_err(dev, "could not find netcp-devices node\n");
 259		return NETCP_MOD_PROBE_SKIPPED;
 260	}
 261
 262	for_each_available_child_of_node(devices, child) {
 263		const char *name = netcp_node_name(child);
 264
 265		if (!strcasecmp(module->name, name))
 266			break;
 267	}
 268
 269	of_node_put(devices);
 270	/* If module not used for this device, skip it */
 271	if (!child) {
 272		dev_warn(dev, "module(%s) not used for device\n", module->name);
 273		return NETCP_MOD_PROBE_SKIPPED;
 274	}
 275
 276	inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL);
 277	if (!inst_modpriv) {
 278		of_node_put(child);
 279		return -ENOMEM;
 280	}
 281
 282	inst_modpriv->netcp_device = netcp_device;
 283	inst_modpriv->netcp_module = module;
 284	list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head);
 285
 286	ret = module->probe(netcp_device, dev, child,
 287			    &inst_modpriv->module_priv);
 288	of_node_put(child);
 289	if (ret) {
 290		dev_err(dev, "Probe of module(%s) failed with %d\n",
 291			module->name, ret);
 292		list_del(&inst_modpriv->inst_list);
 293		devm_kfree(dev, inst_modpriv);
 294		return NETCP_MOD_PROBE_FAILED;
 295	}
 296
 297	/* Attach modules only if the primary module is probed */
 298	for_each_netcp_module(tmp) {
 299		if (tmp->primary)
 300			primary_module_registered = true;
 301	}
 302
 303	if (!primary_module_registered)
 304		return 0;
 305
 306	/* Attach module to interfaces */
 307	list_for_each_entry(netcp_intf, &netcp_device->interface_head,
 308			    interface_list) {
 309		struct netcp_intf_modpriv *intf_modpriv;
 310
 311		intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
 312					    GFP_KERNEL);
 313		if (!intf_modpriv)
 314			return -ENOMEM;
 315
 316		interface = of_parse_phandle(netcp_intf->node_interface,
 317					     module->name, 0);
 318
 319		if (!interface) {
 320			devm_kfree(dev, intf_modpriv);
 321			continue;
 322		}
 323
 324		intf_modpriv->netcp_priv = netcp_intf;
 325		intf_modpriv->netcp_module = module;
 326		list_add_tail(&intf_modpriv->intf_list,
 327			      &netcp_intf->module_head);
 328
 329		ret = module->attach(inst_modpriv->module_priv,
 330				     netcp_intf->ndev, interface,
 331				     &intf_modpriv->module_priv);
 332		of_node_put(interface);
 333		if (ret) {
 334			dev_dbg(dev, "Attach of module %s declined with %d\n",
 335				module->name, ret);
 336			list_del(&intf_modpriv->intf_list);
 337			devm_kfree(dev, intf_modpriv);
 338			continue;
 339		}
 340	}
 341
 342	/* Now register the interface with netdev */
 343	list_for_each_entry(netcp_intf,
 344			    &netcp_device->interface_head,
 345			    interface_list) {
 346		/* If interface not registered then register now */
 347		if (!netcp_intf->netdev_registered) {
 348			ret = netcp_register_interface(netcp_intf);
 349			if (ret)
 350				return -ENODEV;
 351		}
 352	}
 353	return 0;
 354}
 355
 356int netcp_register_module(struct netcp_module *module)
 357{
 358	struct netcp_device *netcp_device;
 359	struct netcp_module *tmp;
 360	int ret;
 361
 362	if (!module->name) {
 363		WARN(1, "error registering netcp module: no name\n");
 364		return -EINVAL;
 365	}
 366
 367	if (!module->probe) {
 368		WARN(1, "error registering netcp module: no probe\n");
 369		return -EINVAL;
 370	}
 371
 372	mutex_lock(&netcp_modules_lock);
 373
 374	for_each_netcp_module(tmp) {
 375		if (!strcasecmp(tmp->name, module->name)) {
 376			mutex_unlock(&netcp_modules_lock);
 377			return -EEXIST;
 378		}
 379	}
 380	list_add_tail(&module->module_list, &netcp_modules);
 381
 382	list_for_each_entry(netcp_device, &netcp_devices, device_list) {
 383		ret = netcp_module_probe(netcp_device, module);
 384		if (ret < 0)
 385			goto fail;
 386	}
 387	mutex_unlock(&netcp_modules_lock);
 388	return 0;
 389
 390fail:
 391	mutex_unlock(&netcp_modules_lock);
 392	netcp_unregister_module(module);
 393	return ret;
 394}
 395EXPORT_SYMBOL_GPL(netcp_register_module);
 396
 397static void netcp_release_module(struct netcp_device *netcp_device,
 398				 struct netcp_module *module)
 399{
 400	struct netcp_inst_modpriv *inst_modpriv, *inst_tmp;
 401	struct netcp_intf *netcp_intf, *netcp_tmp;
 402	struct device *dev = netcp_device->device;
 403
 404	/* Release the module from each interface */
 405	list_for_each_entry_safe(netcp_intf, netcp_tmp,
 406				 &netcp_device->interface_head,
 407				 interface_list) {
 408		struct netcp_intf_modpriv *intf_modpriv, *intf_tmp;
 409
 410		list_for_each_entry_safe(intf_modpriv, intf_tmp,
 411					 &netcp_intf->module_head,
 412					 intf_list) {
 413			if (intf_modpriv->netcp_module == module) {
 414				module->release(intf_modpriv->module_priv);
 415				list_del(&intf_modpriv->intf_list);
 416				devm_kfree(dev, intf_modpriv);
 417				break;
 418			}
 419		}
 420	}
 421
 422	/* Remove the module from each instance */
 423	list_for_each_entry_safe(inst_modpriv, inst_tmp,
 424				 &netcp_device->modpriv_head, inst_list) {
 425		if (inst_modpriv->netcp_module == module) {
 426			module->remove(netcp_device,
 427				       inst_modpriv->module_priv);
 428			list_del(&inst_modpriv->inst_list);
 429			devm_kfree(dev, inst_modpriv);
 430			break;
 431		}
 432	}
 433}
 434
 435void netcp_unregister_module(struct netcp_module *module)
 436{
 437	struct netcp_device *netcp_device;
 438	struct netcp_module *module_tmp;
 439
 440	mutex_lock(&netcp_modules_lock);
 441
 442	list_for_each_entry(netcp_device, &netcp_devices, device_list) {
 443		netcp_release_module(netcp_device, module);
 444	}
 445
 446	/* Remove the module from the module list */
 447	for_each_netcp_module(module_tmp) {
 448		if (module == module_tmp) {
 449			list_del(&module->module_list);
 450			break;
 451		}
 452	}
 453
 454	mutex_unlock(&netcp_modules_lock);
 455}
 456EXPORT_SYMBOL_GPL(netcp_unregister_module);
 457
 458void *netcp_module_get_intf_data(struct netcp_module *module,
 459				 struct netcp_intf *intf)
 460{
 461	struct netcp_intf_modpriv *intf_modpriv;
 462
 463	list_for_each_entry(intf_modpriv, &intf->module_head, intf_list)
 464		if (intf_modpriv->netcp_module == module)
 465			return intf_modpriv->module_priv;
 466	return NULL;
 467}
 468EXPORT_SYMBOL_GPL(netcp_module_get_intf_data);
 469
 470/* Module TX and RX Hook management */
 471struct netcp_hook_list {
 472	struct list_head	 list;
 473	netcp_hook_rtn		*hook_rtn;
 474	void			*hook_data;
 475	int			 order;
 476};
 477
 478int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
 479			  netcp_hook_rtn *hook_rtn, void *hook_data)
 480{
 481	struct netcp_hook_list *entry;
 482	struct netcp_hook_list *next;
 483	unsigned long flags;
 484
 485	entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
 486	if (!entry)
 487		return -ENOMEM;
 488
 489	entry->hook_rtn  = hook_rtn;
 490	entry->hook_data = hook_data;
 491	entry->order     = order;
 492
 493	spin_lock_irqsave(&netcp_priv->lock, flags);
 494	list_for_each_entry(next, &netcp_priv->txhook_list_head, list) {
 495		if (next->order > order)
 496			break;
 497	}
 498	__list_add(&entry->list, next->list.prev, &next->list);
 499	spin_unlock_irqrestore(&netcp_priv->lock, flags);
 500
 501	return 0;
 502}
 503EXPORT_SYMBOL_GPL(netcp_register_txhook);
 504
 505int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
 506			    netcp_hook_rtn *hook_rtn, void *hook_data)
 507{
 508	struct netcp_hook_list *next, *n;
 509	unsigned long flags;
 510
 511	spin_lock_irqsave(&netcp_priv->lock, flags);
 512	list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) {
 513		if ((next->order     == order) &&
 514		    (next->hook_rtn  == hook_rtn) &&
 515		    (next->hook_data == hook_data)) {
 516			list_del(&next->list);
 517			spin_unlock_irqrestore(&netcp_priv->lock, flags);
 518			devm_kfree(netcp_priv->dev, next);
 519			return 0;
 520		}
 521	}
 522	spin_unlock_irqrestore(&netcp_priv->lock, flags);
 523	return -ENOENT;
 524}
 525EXPORT_SYMBOL_GPL(netcp_unregister_txhook);
 526
 527int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
 528			  netcp_hook_rtn *hook_rtn, void *hook_data)
 529{
 530	struct netcp_hook_list *entry;
 531	struct netcp_hook_list *next;
 532	unsigned long flags;
 533
 534	entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
 535	if (!entry)
 536		return -ENOMEM;
 537
 538	entry->hook_rtn  = hook_rtn;
 539	entry->hook_data = hook_data;
 540	entry->order     = order;
 541
 542	spin_lock_irqsave(&netcp_priv->lock, flags);
 543	list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) {
 544		if (next->order > order)
 545			break;
 546	}
 547	__list_add(&entry->list, next->list.prev, &next->list);
 548	spin_unlock_irqrestore(&netcp_priv->lock, flags);
 549
 550	return 0;
 551}
 552EXPORT_SYMBOL_GPL(netcp_register_rxhook);
 553
 554int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
 555			    netcp_hook_rtn *hook_rtn, void *hook_data)
 556{
 557	struct netcp_hook_list *next, *n;
 558	unsigned long flags;
 559
 560	spin_lock_irqsave(&netcp_priv->lock, flags);
 561	list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) {
 562		if ((next->order     == order) &&
 563		    (next->hook_rtn  == hook_rtn) &&
 564		    (next->hook_data == hook_data)) {
 565			list_del(&next->list);
 566			spin_unlock_irqrestore(&netcp_priv->lock, flags);
 567			devm_kfree(netcp_priv->dev, next);
 568			return 0;
 569		}
 570	}
 571	spin_unlock_irqrestore(&netcp_priv->lock, flags);
 572
 573	return -ENOENT;
 574}
 575EXPORT_SYMBOL_GPL(netcp_unregister_rxhook);
 576
 577static void netcp_frag_free(bool is_frag, void *ptr)
 578{
 579	if (is_frag)
 580		skb_free_frag(ptr);
 581	else
 582		kfree(ptr);
 583}
 584
 585static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
 586				     struct knav_dma_desc *desc)
 587{
 588	struct knav_dma_desc *ndesc;
 589	dma_addr_t dma_desc, dma_buf;
 590	unsigned int buf_len, dma_sz = sizeof(*ndesc);
 591	void *buf_ptr;
 592	u32 tmp;
 593
 594	get_words(&dma_desc, 1, &desc->next_desc);
 595
 596	while (dma_desc) {
 597		ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
 598		if (unlikely(!ndesc)) {
 599			dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
 600			break;
 601		}
 602		get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
 603		/* warning!!!! We are retrieving the virtual ptr in the sw_data
 604		 * field as a 32bit value. Will not work on 64bit machines
 605		 */
 606		buf_ptr = (void *)GET_SW_DATA0(ndesc);
 607		buf_len = (int)GET_SW_DATA1(desc);
 608		dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
 609		__free_page(buf_ptr);
 610		knav_pool_desc_put(netcp->rx_pool, desc);
 611	}
 612	/* warning!!!! We are retrieving the virtual ptr in the sw_data
 613	 * field as a 32bit value. Will not work on 64bit machines
 614	 */
 615	buf_ptr = (void *)GET_SW_DATA0(desc);
 616	buf_len = (int)GET_SW_DATA1(desc);
 617
 618	if (buf_ptr)
 619		netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
 620	knav_pool_desc_put(netcp->rx_pool, desc);
 621}
 622
 623static void netcp_empty_rx_queue(struct netcp_intf *netcp)
 624{
 625	struct knav_dma_desc *desc;
 626	unsigned int dma_sz;
 627	dma_addr_t dma;
 628
 629	for (; ;) {
 630		dma = knav_queue_pop(netcp->rx_queue, &dma_sz);
 631		if (!dma)
 632			break;
 633
 634		desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
 635		if (unlikely(!desc)) {
 636			dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
 637				__func__);
 638			netcp->ndev->stats.rx_errors++;
 639			continue;
 640		}
 641		netcp_free_rx_desc_chain(netcp, desc);
 642		netcp->ndev->stats.rx_dropped++;
 643	}
 644}
 645
 646static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
 647{
 648	unsigned int dma_sz, buf_len, org_buf_len;
 649	struct knav_dma_desc *desc, *ndesc;
 650	unsigned int pkt_sz = 0, accum_sz;
 651	struct netcp_hook_list *rx_hook;
 652	dma_addr_t dma_desc, dma_buff;
 653	struct netcp_packet p_info;
 654	struct sk_buff *skb;
 655	void *org_buf_ptr;
 656
 657	dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
 658	if (!dma_desc)
 659		return -1;
 660
 661	desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
 662	if (unlikely(!desc)) {
 663		dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
 664		return 0;
 665	}
 666
 667	get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
 668	/* warning!!!! We are retrieving the virtual ptr in the sw_data
 669	 * field as a 32bit value. Will not work on 64bit machines
 670	 */
 671	org_buf_ptr = (void *)GET_SW_DATA0(desc);
 672	org_buf_len = (int)GET_SW_DATA1(desc);
 673
 674	if (unlikely(!org_buf_ptr)) {
 675		dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
 676		goto free_desc;
 677	}
 678
 679	pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK;
 680	accum_sz = buf_len;
 681	dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE);
 682
 683	/* Build a new sk_buff for the primary buffer */
 684	skb = build_skb(org_buf_ptr, org_buf_len);
 685	if (unlikely(!skb)) {
 686		dev_err(netcp->ndev_dev, "build_skb() failed\n");
 687		goto free_desc;
 688	}
 689
 690	/* update data, tail and len */
 691	skb_reserve(skb, NETCP_SOP_OFFSET);
 692	__skb_put(skb, buf_len);
 693
 694	/* Fill in the page fragment list */
 695	while (dma_desc) {
 696		struct page *page;
 697
 698		ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
 699		if (unlikely(!ndesc)) {
 700			dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
 701			goto free_desc;
 702		}
 703
 704		get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
 705		/* warning!!!! We are retrieving the virtual ptr in the sw_data
 706		 * field as a 32bit value. Will not work on 64bit machines
 707		 */
 708		page = (struct page *)GET_SW_DATA0(desc);
 709
 710		if (likely(dma_buff && buf_len && page)) {
 711			dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
 712				       DMA_FROM_DEVICE);
 713		} else {
 714			dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n",
 715				&dma_buff, buf_len, page);
 716			goto free_desc;
 717		}
 718
 719		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
 720				offset_in_page(dma_buff), buf_len, PAGE_SIZE);
 721		accum_sz += buf_len;
 722
 723		/* Free the descriptor */
 724		knav_pool_desc_put(netcp->rx_pool, ndesc);
 725	}
 726
 727	/* Free the primary descriptor */
 728	knav_pool_desc_put(netcp->rx_pool, desc);
 729
 730	/* check for packet len and warn */
 731	if (unlikely(pkt_sz != accum_sz))
 732		dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
 733			pkt_sz, accum_sz);
 734
 735	/* Remove ethernet FCS from the packet */
 736	__pskb_trim(skb, skb->len - ETH_FCS_LEN);
 737
 738	/* Call each of the RX hooks */
 739	p_info.skb = skb;
 740	skb->dev = netcp->ndev;
 741	p_info.rxtstamp_complete = false;
 742	list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
 743		int ret;
 744
 745		ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data,
 746					&p_info);
 747		if (unlikely(ret)) {
 748			dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
 749				rx_hook->order, ret);
 750			netcp->ndev->stats.rx_errors++;
 751			dev_kfree_skb(skb);
 752			return 0;
 753		}
 754	}
 755
 756	netcp->ndev->stats.rx_packets++;
 757	netcp->ndev->stats.rx_bytes += skb->len;
 758
 759	/* push skb up the stack */
 760	skb->protocol = eth_type_trans(skb, netcp->ndev);
 761	netif_receive_skb(skb);
 762	return 0;
 763
 764free_desc:
 765	netcp_free_rx_desc_chain(netcp, desc);
 766	netcp->ndev->stats.rx_errors++;
 767	return 0;
 768}
 769
 770static int netcp_process_rx_packets(struct netcp_intf *netcp,
 771				    unsigned int budget)
 772{
 773	int i;
 774
 775	for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++)
 776		;
 777	return i;
 778}
 779
 780/* Release descriptors and attached buffers from Rx FDQ */
 781static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
 782{
 783	struct knav_dma_desc *desc;
 784	unsigned int buf_len, dma_sz;
 785	dma_addr_t dma;
 786	void *buf_ptr;
 787
 788	/* Allocate descriptor */
 789	while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
 790		desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
 791		if (unlikely(!desc)) {
 792			dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
 793			continue;
 794		}
 795
 796		get_org_pkt_info(&dma, &buf_len, desc);
 797		/* warning!!!! We are retrieving the virtual ptr in the sw_data
 798		 * field as a 32bit value. Will not work on 64bit machines
 799		 */
 800		buf_ptr = (void *)GET_SW_DATA0(desc);
 801
 802		if (unlikely(!dma)) {
 803			dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
 804			knav_pool_desc_put(netcp->rx_pool, desc);
 805			continue;
 806		}
 807
 808		if (unlikely(!buf_ptr)) {
 809			dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
 810			knav_pool_desc_put(netcp->rx_pool, desc);
 811			continue;
 812		}
 813
 814		if (fdq == 0) {
 815			dma_unmap_single(netcp->dev, dma, buf_len,
 816					 DMA_FROM_DEVICE);
 817			netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr);
 818		} else {
 819			dma_unmap_page(netcp->dev, dma, buf_len,
 820				       DMA_FROM_DEVICE);
 821			__free_page(buf_ptr);
 822		}
 823
 824		knav_pool_desc_put(netcp->rx_pool, desc);
 825	}
 826}
 827
 828static void netcp_rxpool_free(struct netcp_intf *netcp)
 829{
 830	int i;
 831
 832	for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
 833	     !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++)
 834		netcp_free_rx_buf(netcp, i);
 835
 836	if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size)
 837		dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n",
 838			netcp->rx_pool_size - knav_pool_count(netcp->rx_pool));
 839
 840	knav_pool_destroy(netcp->rx_pool);
 841	netcp->rx_pool = NULL;
 842}
 843
 844static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
 845{
 846	struct knav_dma_desc *hwdesc;
 847	unsigned int buf_len, dma_sz;
 848	u32 desc_info, pkt_info;
 849	struct page *page;
 850	dma_addr_t dma;
 851	void *bufptr;
 852	u32 sw_data[2];
 853
 854	/* Allocate descriptor */
 855	hwdesc = knav_pool_desc_get(netcp->rx_pool);
 856	if (IS_ERR_OR_NULL(hwdesc)) {
 857		dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
 858		return -ENOMEM;
 859	}
 860
 861	if (likely(fdq == 0)) {
 862		unsigned int primary_buf_len;
 863		/* Allocate a primary receive queue entry */
 864		buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
 865		primary_buf_len = SKB_DATA_ALIGN(buf_len) +
 866				SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 867
 868		bufptr = netdev_alloc_frag(primary_buf_len);
 869		sw_data[1] = primary_buf_len;
 870
 871		if (unlikely(!bufptr)) {
 872			dev_warn_ratelimited(netcp->ndev_dev,
 873					     "Primary RX buffer alloc failed\n");
 874			goto fail;
 875		}
 876		dma = dma_map_single(netcp->dev, bufptr, buf_len,
 877				     DMA_TO_DEVICE);
 878		if (unlikely(dma_mapping_error(netcp->dev, dma)))
 879			goto fail;
 880
 881		/* warning!!!! We are saving the virtual ptr in the sw_data
 882		 * field as a 32bit value. Will not work on 64bit machines
 883		 */
 884		sw_data[0] = (u32)bufptr;
 885	} else {
 886		/* Allocate a secondary receive queue entry */
 887		page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
 888		if (unlikely(!page)) {
 889			dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
 890			goto fail;
 891		}
 892		buf_len = PAGE_SIZE;
 893		dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
 894		/* warning!!!! We are saving the virtual ptr in the sw_data
 895		 * field as a 32bit value. Will not work on 64bit machines
 896		 */
 897		sw_data[0] = (u32)page;
 898		sw_data[1] = 0;
 899	}
 900
 901	desc_info =  KNAV_DMA_DESC_PS_INFO_IN_DESC;
 902	desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK;
 903	pkt_info =  KNAV_DMA_DESC_HAS_EPIB;
 904	pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT;
 905	pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
 906		    KNAV_DMA_DESC_RETQ_SHIFT;
 907	set_org_pkt_info(dma, buf_len, hwdesc);
 908	SET_SW_DATA0(sw_data[0], hwdesc);
 909	SET_SW_DATA1(sw_data[1], hwdesc);
 910	set_desc_info(desc_info, pkt_info, hwdesc);
 911
 912	/* Push to FDQs */
 913	knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
 914			   &dma_sz);
 915	knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
 916	return 0;
 917
 918fail:
 919	knav_pool_desc_put(netcp->rx_pool, hwdesc);
 920	return -ENOMEM;
 921}
 922
 923/* Refill Rx FDQ with descriptors & attached buffers */
 924static void netcp_rxpool_refill(struct netcp_intf *netcp)
 925{
 926	u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
 927	int i, ret = 0;
 928
 929	/* Calculate the FDQ deficit and refill */
 930	for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
 931		fdq_deficit[i] = netcp->rx_queue_depths[i] -
 932				 knav_queue_get_count(netcp->rx_fdq[i]);
 933
 934		while (fdq_deficit[i]-- && !ret)
 935			ret = netcp_allocate_rx_buf(netcp, i);
 936	} /* end for fdqs */
 937}
 938
 939/* NAPI poll */
 940static int netcp_rx_poll(struct napi_struct *napi, int budget)
 941{
 942	struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
 943						rx_napi);
 944	unsigned int packets;
 945
 946	packets = netcp_process_rx_packets(netcp, budget);
 947
 948	netcp_rxpool_refill(netcp);
 949	if (packets < budget) {
 950		napi_complete(&netcp->rx_napi);
 951		knav_queue_enable_notify(netcp->rx_queue);
 952	}
 953
 954	return packets;
 955}
 956
 957static void netcp_rx_notify(void *arg)
 958{
 959	struct netcp_intf *netcp = arg;
 960
 961	knav_queue_disable_notify(netcp->rx_queue);
 962	napi_schedule(&netcp->rx_napi);
 963}
 964
 965static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
 966				     struct knav_dma_desc *desc,
 967				     unsigned int desc_sz)
 968{
 969	struct knav_dma_desc *ndesc = desc;
 970	dma_addr_t dma_desc, dma_buf;
 971	unsigned int buf_len;
 972
 973	while (ndesc) {
 974		get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc);
 975
 976		if (dma_buf && buf_len)
 977			dma_unmap_single(netcp->dev, dma_buf, buf_len,
 978					 DMA_TO_DEVICE);
 979		else
 980			dev_warn(netcp->ndev_dev, "bad Tx desc buf(%pad), len(%d)\n",
 981				 &dma_buf, buf_len);
 982
 983		knav_pool_desc_put(netcp->tx_pool, ndesc);
 984		ndesc = NULL;
 985		if (dma_desc) {
 986			ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc,
 987						     desc_sz);
 988			if (!ndesc)
 989				dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
 990		}
 991	}
 992}
 993
 994static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
 995					  unsigned int budget)
 996{
 997	struct knav_dma_desc *desc;
 998	struct netcp_tx_cb *tx_cb;
 999	struct sk_buff *skb;
1000	unsigned int dma_sz;
1001	dma_addr_t dma;
1002	int pkts = 0;
1003
1004	while (budget--) {
1005		dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
1006		if (!dma)
1007			break;
1008		desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
1009		if (unlikely(!desc)) {
1010			dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
1011			netcp->ndev->stats.tx_errors++;
1012			continue;
1013		}
1014
1015		/* warning!!!! We are retrieving the virtual ptr in the sw_data
1016		 * field as a 32bit value. Will not work on 64bit machines
1017		 */
1018		skb = (struct sk_buff *)GET_SW_DATA0(desc);
1019		netcp_free_tx_desc_chain(netcp, desc, dma_sz);
1020		if (!skb) {
1021			dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
1022			netcp->ndev->stats.tx_errors++;
1023			continue;
1024		}
1025
1026		tx_cb = (struct netcp_tx_cb *)skb->cb;
1027		if (tx_cb->txtstamp)
1028			tx_cb->txtstamp(tx_cb->ts_context, skb);
1029
1030		if (netif_subqueue_stopped(netcp->ndev, skb) &&
1031		    netif_running(netcp->ndev) &&
1032		    (knav_pool_count(netcp->tx_pool) >
1033		    netcp->tx_resume_threshold)) {
1034			u16 subqueue = skb_get_queue_mapping(skb);
1035
1036			netif_wake_subqueue(netcp->ndev, subqueue);
1037		}
1038
1039		netcp->ndev->stats.tx_packets++;
1040		netcp->ndev->stats.tx_bytes += skb->len;
1041		dev_kfree_skb(skb);
1042		pkts++;
1043	}
1044	return pkts;
1045}
1046
1047static int netcp_tx_poll(struct napi_struct *napi, int budget)
1048{
1049	int packets;
1050	struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
1051						tx_napi);
1052
1053	packets = netcp_process_tx_compl_packets(netcp, budget);
1054	if (packets < budget) {
1055		napi_complete(&netcp->tx_napi);
1056		knav_queue_enable_notify(netcp->tx_compl_q);
1057	}
1058
1059	return packets;
1060}
1061
1062static void netcp_tx_notify(void *arg)
1063{
1064	struct netcp_intf *netcp = arg;
1065
1066	knav_queue_disable_notify(netcp->tx_compl_q);
1067	napi_schedule(&netcp->tx_napi);
1068}
1069
1070static struct knav_dma_desc*
1071netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
1072{
1073	struct knav_dma_desc *desc, *ndesc, *pdesc;
1074	unsigned int pkt_len = skb_headlen(skb);
1075	struct device *dev = netcp->dev;
1076	dma_addr_t dma_addr;
1077	unsigned int dma_sz;
1078	int i;
1079
1080	/* Map the linear buffer */
1081	dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
1082	if (unlikely(dma_mapping_error(dev, dma_addr))) {
1083		dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
1084		return NULL;
1085	}
1086
1087	desc = knav_pool_desc_get(netcp->tx_pool);
1088	if (IS_ERR_OR_NULL(desc)) {
1089		dev_err(netcp->ndev_dev, "out of TX desc\n");
1090		dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE);
1091		return NULL;
1092	}
1093
1094	set_pkt_info(dma_addr, pkt_len, 0, desc);
1095	if (skb_is_nonlinear(skb)) {
1096		prefetchw(skb_shinfo(skb));
1097	} else {
1098		desc->next_desc = 0;
1099		goto upd_pkt_len;
1100	}
1101
1102	pdesc = desc;
1103
1104	/* Handle the case where skb is fragmented in pages */
1105	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1106		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1107		struct page *page = skb_frag_page(frag);
1108		u32 page_offset = frag->page_offset;
1109		u32 buf_len = skb_frag_size(frag);
1110		dma_addr_t desc_dma;
1111		u32 desc_dma_32;
1112		u32 pkt_info;
1113
1114		dma_addr = dma_map_page(dev, page, page_offset, buf_len,
1115					DMA_TO_DEVICE);
1116		if (unlikely(!dma_addr)) {
1117			dev_err(netcp->ndev_dev, "Failed to map skb page\n");
1118			goto free_descs;
1119		}
1120
1121		ndesc = knav_pool_desc_get(netcp->tx_pool);
1122		if (IS_ERR_OR_NULL(ndesc)) {
1123			dev_err(netcp->ndev_dev, "out of TX desc for frags\n");
1124			dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE);
1125			goto free_descs;
1126		}
1127
1128		desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, ndesc);
1129		pkt_info =
1130			(netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
1131				KNAV_DMA_DESC_RETQ_SHIFT;
1132		set_pkt_info(dma_addr, buf_len, 0, ndesc);
1133		desc_dma_32 = (u32)desc_dma;
1134		set_words(&desc_dma_32, 1, &pdesc->next_desc);
1135		pkt_len += buf_len;
1136		if (pdesc != desc)
1137			knav_pool_desc_map(netcp->tx_pool, pdesc,
1138					   sizeof(*pdesc), &desc_dma, &dma_sz);
1139		pdesc = ndesc;
1140	}
1141	if (pdesc != desc)
1142		knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc),
1143				   &dma_addr, &dma_sz);
1144
1145	/* frag list based linkage is not supported for now. */
1146	if (skb_shinfo(skb)->frag_list) {
1147		dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n");
1148		goto free_descs;
1149	}
1150
1151upd_pkt_len:
1152	WARN_ON(pkt_len != skb->len);
1153
1154	pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK;
1155	set_words(&pkt_len, 1, &desc->desc_info);
1156	return desc;
1157
1158free_descs:
1159	netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
1160	return NULL;
1161}
1162
1163static int netcp_tx_submit_skb(struct netcp_intf *netcp,
1164			       struct sk_buff *skb,
1165			       struct knav_dma_desc *desc)
1166{
1167	struct netcp_tx_pipe *tx_pipe = NULL;
1168	struct netcp_hook_list *tx_hook;
1169	struct netcp_packet p_info;
1170	struct netcp_tx_cb *tx_cb;
1171	unsigned int dma_sz;
1172	dma_addr_t dma;
1173	u32 tmp = 0;
1174	int ret = 0;
1175
1176	p_info.netcp = netcp;
1177	p_info.skb = skb;
1178	p_info.tx_pipe = NULL;
1179	p_info.psdata_len = 0;
1180	p_info.ts_context = NULL;
1181	p_info.txtstamp = NULL;
1182	p_info.epib = desc->epib;
1183	p_info.psdata = (u32 __force *)desc->psdata;
1184	memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(__le32));
1185
1186	/* Find out where to inject the packet for transmission */
1187	list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) {
1188		ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data,
1189					&p_info);
1190		if (unlikely(ret != 0)) {
1191			dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n",
1192				tx_hook->order, ret);
1193			ret = (ret < 0) ? ret : NETDEV_TX_OK;
1194			goto out;
1195		}
1196	}
1197
1198	/* Make sure some TX hook claimed the packet */
1199	tx_pipe = p_info.tx_pipe;
1200	if (!tx_pipe) {
1201		dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n");
1202		ret = -ENXIO;
1203		goto out;
1204	}
1205
1206	tx_cb = (struct netcp_tx_cb *)skb->cb;
1207	tx_cb->ts_context = p_info.ts_context;
1208	tx_cb->txtstamp = p_info.txtstamp;
1209
1210	/* update descriptor */
1211	if (p_info.psdata_len) {
1212		/* psdata points to both native-endian and device-endian data */
1213		__le32 *psdata = (void __force *)p_info.psdata;
1214
1215		memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
1216			p_info.psdata_len);
1217		set_words(p_info.psdata, p_info.psdata_len, psdata);
1218		tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
1219			KNAV_DMA_DESC_PSLEN_SHIFT;
1220	}
1221
1222	tmp |= KNAV_DMA_DESC_HAS_EPIB |
1223		((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
1224		KNAV_DMA_DESC_RETQ_SHIFT);
1225
1226	if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) {
1227		tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) <<
1228			KNAV_DMA_DESC_PSFLAG_SHIFT);
1229	}
1230
1231	set_words(&tmp, 1, &desc->packet_info);
1232	/* warning!!!! We are saving the virtual ptr in the sw_data
1233	 * field as a 32bit value. Will not work on 64bit machines
1234	 */
1235	SET_SW_DATA0((u32)skb, desc);
1236
1237	if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
1238		tmp = tx_pipe->switch_to_port;
1239		set_words(&tmp, 1, &desc->tag_info);
1240	}
1241
1242	/* submit packet descriptor */
1243	ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
1244				 &dma_sz);
1245	if (unlikely(ret)) {
1246		dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__);
1247		ret = -ENOMEM;
1248		goto out;
1249	}
1250	skb_tx_timestamp(skb);
1251	knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0);
1252
1253out:
1254	return ret;
1255}
1256
1257/* Submit the packet */
1258static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1259{
1260	struct netcp_intf *netcp = netdev_priv(ndev);
1261	int subqueue = skb_get_queue_mapping(skb);
1262	struct knav_dma_desc *desc;
1263	int desc_count, ret = 0;
1264
1265	if (unlikely(skb->len <= 0)) {
1266		dev_kfree_skb(skb);
1267		return NETDEV_TX_OK;
1268	}
1269
1270	if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) {
1271		ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE);
1272		if (ret < 0) {
1273			/* If we get here, the skb has already been dropped */
1274			dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
1275				 ret);
1276			ndev->stats.tx_dropped++;
1277			return ret;
1278		}
1279		skb->len = NETCP_MIN_PACKET_SIZE;
1280	}
1281
1282	desc = netcp_tx_map_skb(skb, netcp);
1283	if (unlikely(!desc)) {
1284		netif_stop_subqueue(ndev, subqueue);
1285		ret = -ENOBUFS;
1286		goto drop;
1287	}
1288
1289	ret = netcp_tx_submit_skb(netcp, skb, desc);
1290	if (ret)
1291		goto drop;
1292
1293	netif_trans_update(ndev);
1294
1295	/* Check Tx pool count & stop subqueue if needed */
1296	desc_count = knav_pool_count(netcp->tx_pool);
1297	if (desc_count < netcp->tx_pause_threshold) {
1298		dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count);
1299		netif_stop_subqueue(ndev, subqueue);
1300	}
1301	return NETDEV_TX_OK;
1302
1303drop:
1304	ndev->stats.tx_dropped++;
1305	if (desc)
1306		netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
1307	dev_kfree_skb(skb);
1308	return ret;
1309}
1310
1311int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe)
1312{
1313	if (tx_pipe->dma_channel) {
1314		knav_dma_close_channel(tx_pipe->dma_channel);
1315		tx_pipe->dma_channel = NULL;
1316	}
1317	return 0;
1318}
1319EXPORT_SYMBOL_GPL(netcp_txpipe_close);
1320
1321int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
1322{
1323	struct device *dev = tx_pipe->netcp_device->device;
1324	struct knav_dma_cfg config;
1325	int ret = 0;
1326	u8 name[16];
1327
1328	memset(&config, 0, sizeof(config));
1329	config.direction = DMA_MEM_TO_DEV;
1330	config.u.tx.filt_einfo = false;
1331	config.u.tx.filt_pswords = false;
1332	config.u.tx.priority = DMA_PRIO_MED_L;
1333
1334	tx_pipe->dma_channel = knav_dma_open_channel(dev,
1335				tx_pipe->dma_chan_name, &config);
1336	if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) {
1337		dev_err(dev, "failed opening tx chan(%s)\n",
1338			tx_pipe->dma_chan_name);
1339		goto err;
1340	}
1341
1342	snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev));
1343	tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
1344					     KNAV_QUEUE_SHARED);
1345	if (IS_ERR(tx_pipe->dma_queue)) {
1346		dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
1347			name, ret);
1348		ret = PTR_ERR(tx_pipe->dma_queue);
1349		goto err;
1350	}
1351
1352	dev_dbg(dev, "opened tx pipe %s\n", name);
1353	return 0;
1354
1355err:
1356	if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
1357		knav_dma_close_channel(tx_pipe->dma_channel);
1358	tx_pipe->dma_channel = NULL;
1359	return ret;
1360}
1361EXPORT_SYMBOL_GPL(netcp_txpipe_open);
1362
1363int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
1364		      struct netcp_device *netcp_device,
1365		      const char *dma_chan_name, unsigned int dma_queue_id)
1366{
1367	memset(tx_pipe, 0, sizeof(*tx_pipe));
1368	tx_pipe->netcp_device = netcp_device;
1369	tx_pipe->dma_chan_name = dma_chan_name;
1370	tx_pipe->dma_queue_id = dma_queue_id;
1371	return 0;
1372}
1373EXPORT_SYMBOL_GPL(netcp_txpipe_init);
1374
1375static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp,
1376					  const u8 *addr,
1377					  enum netcp_addr_type type)
1378{
1379	struct netcp_addr *naddr;
1380
1381	list_for_each_entry(naddr, &netcp->addr_list, node) {
1382		if (naddr->type != type)
1383			continue;
1384		if (addr && memcmp(addr, naddr->addr, ETH_ALEN))
1385			continue;
1386		return naddr;
1387	}
1388
1389	return NULL;
1390}
1391
1392static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
1393					 const u8 *addr,
1394					 enum netcp_addr_type type)
1395{
1396	struct netcp_addr *naddr;
1397
1398	naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC);
1399	if (!naddr)
1400		return NULL;
1401
1402	naddr->type = type;
1403	naddr->flags = 0;
1404	naddr->netcp = netcp;
1405	if (addr)
1406		ether_addr_copy(naddr->addr, addr);
1407	else
1408		eth_zero_addr(naddr->addr);
1409	list_add_tail(&naddr->node, &netcp->addr_list);
1410
1411	return naddr;
1412}
1413
1414static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr)
1415{
1416	list_del(&naddr->node);
1417	devm_kfree(netcp->dev, naddr);
1418}
1419
1420static void netcp_addr_clear_mark(struct netcp_intf *netcp)
1421{
1422	struct netcp_addr *naddr;
1423
1424	list_for_each_entry(naddr, &netcp->addr_list, node)
1425		naddr->flags = 0;
1426}
1427
1428static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr,
1429				enum netcp_addr_type type)
1430{
1431	struct netcp_addr *naddr;
1432
1433	naddr = netcp_addr_find(netcp, addr, type);
1434	if (naddr) {
1435		naddr->flags |= ADDR_VALID;
1436		return;
1437	}
1438
1439	naddr = netcp_addr_add(netcp, addr, type);
1440	if (!WARN_ON(!naddr))
1441		naddr->flags |= ADDR_NEW;
1442}
1443
1444static void netcp_addr_sweep_del(struct netcp_intf *netcp)
1445{
1446	struct netcp_addr *naddr, *tmp;
1447	struct netcp_intf_modpriv *priv;
1448	struct netcp_module *module;
1449	int error;
1450
1451	list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
1452		if (naddr->flags & (ADDR_VALID | ADDR_NEW))
1453			continue;
1454		dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
1455			naddr->addr, naddr->type);
1456		for_each_module(netcp, priv) {
1457			module = priv->netcp_module;
1458			if (!module->del_addr)
1459				continue;
1460			error = module->del_addr(priv->module_priv,
1461						 naddr);
1462			WARN_ON(error);
1463		}
1464		netcp_addr_del(netcp, naddr);
1465	}
1466}
1467
1468static void netcp_addr_sweep_add(struct netcp_intf *netcp)
1469{
1470	struct netcp_addr *naddr, *tmp;
1471	struct netcp_intf_modpriv *priv;
1472	struct netcp_module *module;
1473	int error;
1474
1475	list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
1476		if (!(naddr->flags & ADDR_NEW))
1477			continue;
1478		dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
1479			naddr->addr, naddr->type);
1480
1481		for_each_module(netcp, priv) {
1482			module = priv->netcp_module;
1483			if (!module->add_addr)
1484				continue;
1485			error = module->add_addr(priv->module_priv, naddr);
1486			WARN_ON(error);
1487		}
1488	}
1489}
1490
1491static void netcp_set_rx_mode(struct net_device *ndev)
1492{
1493	struct netcp_intf *netcp = netdev_priv(ndev);
1494	struct netdev_hw_addr *ndev_addr;
1495	bool promisc;
1496
1497	promisc = (ndev->flags & IFF_PROMISC ||
1498		   ndev->flags & IFF_ALLMULTI ||
1499		   netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
1500
1501	spin_lock(&netcp->lock);
1502	/* first clear all marks */
1503	netcp_addr_clear_mark(netcp);
1504
1505	/* next add new entries, mark existing ones */
1506	netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST);
1507	for_each_dev_addr(ndev, ndev_addr)
1508		netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV);
1509	netdev_for_each_uc_addr(ndev_addr, ndev)
1510		netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST);
1511	netdev_for_each_mc_addr(ndev_addr, ndev)
1512		netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST);
1513
1514	if (promisc)
1515		netcp_addr_add_mark(netcp, NULL, ADDR_ANY);
1516
1517	/* finally sweep and callout into modules */
1518	netcp_addr_sweep_del(netcp);
1519	netcp_addr_sweep_add(netcp);
1520	spin_unlock(&netcp->lock);
1521}
1522
1523static void netcp_free_navigator_resources(struct netcp_intf *netcp)
1524{
1525	int i;
1526
1527	if (netcp->rx_channel) {
1528		knav_dma_close_channel(netcp->rx_channel);
1529		netcp->rx_channel = NULL;
1530	}
1531
1532	if (!IS_ERR_OR_NULL(netcp->rx_pool))
1533		netcp_rxpool_free(netcp);
1534
1535	if (!IS_ERR_OR_NULL(netcp->rx_queue)) {
1536		knav_queue_close(netcp->rx_queue);
1537		netcp->rx_queue = NULL;
1538	}
1539
1540	for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
1541	     !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) {
1542		knav_queue_close(netcp->rx_fdq[i]);
1543		netcp->rx_fdq[i] = NULL;
1544	}
1545
1546	if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) {
1547		knav_queue_close(netcp->tx_compl_q);
1548		netcp->tx_compl_q = NULL;
1549	}
1550
1551	if (!IS_ERR_OR_NULL(netcp->tx_pool)) {
1552		knav_pool_destroy(netcp->tx_pool);
1553		netcp->tx_pool = NULL;
1554	}
1555}
1556
1557static int netcp_setup_navigator_resources(struct net_device *ndev)
1558{
1559	struct netcp_intf *netcp = netdev_priv(ndev);
1560	struct knav_queue_notify_config notify_cfg;
1561	struct knav_dma_cfg config;
1562	u32 last_fdq = 0;
1563	u8 name[16];
1564	int ret;
1565	int i;
1566
1567	/* Create Rx/Tx descriptor pools */
1568	snprintf(name, sizeof(name), "rx-pool-%s", ndev->name);
1569	netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size,
1570						netcp->rx_pool_region_id);
1571	if (IS_ERR_OR_NULL(netcp->rx_pool)) {
1572		dev_err(netcp->ndev_dev, "Couldn't create rx pool\n");
1573		ret = PTR_ERR(netcp->rx_pool);
1574		goto fail;
1575	}
1576
1577	snprintf(name, sizeof(name), "tx-pool-%s", ndev->name);
1578	netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size,
1579						netcp->tx_pool_region_id);
1580	if (IS_ERR_OR_NULL(netcp->tx_pool)) {
1581		dev_err(netcp->ndev_dev, "Couldn't create tx pool\n");
1582		ret = PTR_ERR(netcp->tx_pool);
1583		goto fail;
1584	}
1585
1586	/* open Tx completion queue */
1587	snprintf(name, sizeof(name), "tx-compl-%s", ndev->name);
1588	netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0);
1589	if (IS_ERR(netcp->tx_compl_q)) {
1590		ret = PTR_ERR(netcp->tx_compl_q);
1591		goto fail;
1592	}
1593	netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q);
1594
1595	/* Set notification for Tx completion */
1596	notify_cfg.fn = netcp_tx_notify;
1597	notify_cfg.fn_arg = netcp;
1598	ret = knav_queue_device_control(netcp->tx_compl_q,
1599					KNAV_QUEUE_SET_NOTIFIER,
1600					(unsigned long)&notify_cfg);
1601	if (ret)
1602		goto fail;
1603
1604	knav_queue_disable_notify(netcp->tx_compl_q);
1605
1606	/* open Rx completion queue */
1607	snprintf(name, sizeof(name), "rx-compl-%s", ndev->name);
1608	netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0);
1609	if (IS_ERR(netcp->rx_queue)) {
1610		ret = PTR_ERR(netcp->rx_queue);
1611		goto fail;
1612	}
1613	netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue);
1614
1615	/* Set notification for Rx completion */
1616	notify_cfg.fn = netcp_rx_notify;
1617	notify_cfg.fn_arg = netcp;
1618	ret = knav_queue_device_control(netcp->rx_queue,
1619					KNAV_QUEUE_SET_NOTIFIER,
1620					(unsigned long)&notify_cfg);
1621	if (ret)
1622		goto fail;
1623
1624	knav_queue_disable_notify(netcp->rx_queue);
1625
1626	/* open Rx FDQs */
1627	for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
1628	     ++i) {
1629		snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
1630		netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
1631		if (IS_ERR(netcp->rx_fdq[i])) {
1632			ret = PTR_ERR(netcp->rx_fdq[i]);
1633			goto fail;
1634		}
1635	}
1636
1637	memset(&config, 0, sizeof(config));
1638	config.direction		= DMA_DEV_TO_MEM;
1639	config.u.rx.einfo_present	= true;
1640	config.u.rx.psinfo_present	= true;
1641	config.u.rx.err_mode		= DMA_DROP;
1642	config.u.rx.desc_type		= DMA_DESC_HOST;
1643	config.u.rx.psinfo_at_sop	= false;
1644	config.u.rx.sop_offset		= NETCP_SOP_OFFSET;
1645	config.u.rx.dst_q		= netcp->rx_queue_id;
1646	config.u.rx.thresh		= DMA_THRESH_NONE;
1647
1648	for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) {
1649		if (netcp->rx_fdq[i])
1650			last_fdq = knav_queue_get_id(netcp->rx_fdq[i]);
1651		config.u.rx.fdq[i] = last_fdq;
1652	}
1653
1654	netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
1655					netcp->dma_chan_name, &config);
1656	if (IS_ERR_OR_NULL(netcp->rx_channel)) {
1657		dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
1658			netcp->dma_chan_name);
1659		goto fail;
1660	}
1661
1662	dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel);
1663	return 0;
1664
1665fail:
1666	netcp_free_navigator_resources(netcp);
1667	return ret;
1668}
1669
1670/* Open the device */
1671static int netcp_ndo_open(struct net_device *ndev)
1672{
1673	struct netcp_intf *netcp = netdev_priv(ndev);
1674	struct netcp_intf_modpriv *intf_modpriv;
1675	struct netcp_module *module;
1676	int ret;
1677
1678	netif_carrier_off(ndev);
1679	ret = netcp_setup_navigator_resources(ndev);
1680	if (ret) {
1681		dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n");
1682		goto fail;
1683	}
1684
1685	for_each_module(netcp, intf_modpriv) {
1686		module = intf_modpriv->netcp_module;
1687		if (module->open) {
1688			ret = module->open(intf_modpriv->module_priv, ndev);
1689			if (ret != 0) {
1690				dev_err(netcp->ndev_dev, "module open failed\n");
1691				goto fail_open;
1692			}
1693		}
1694	}
1695
1696	napi_enable(&netcp->rx_napi);
1697	napi_enable(&netcp->tx_napi);
1698	knav_queue_enable_notify(netcp->tx_compl_q);
1699	knav_queue_enable_notify(netcp->rx_queue);
1700	netcp_rxpool_refill(netcp);
1701	netif_tx_wake_all_queues(ndev);
1702	dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
1703	return 0;
1704
1705fail_open:
1706	for_each_module(netcp, intf_modpriv) {
1707		module = intf_modpriv->netcp_module;
1708		if (module->close)
1709			module->close(intf_modpriv->module_priv, ndev);
1710	}
1711
1712fail:
1713	netcp_free_navigator_resources(netcp);
1714	return ret;
1715}
1716
1717/* Close the device */
1718static int netcp_ndo_stop(struct net_device *ndev)
1719{
1720	struct netcp_intf *netcp = netdev_priv(ndev);
1721	struct netcp_intf_modpriv *intf_modpriv;
1722	struct netcp_module *module;
1723	int err = 0;
1724
1725	netif_tx_stop_all_queues(ndev);
1726	netif_carrier_off(ndev);
1727	netcp_addr_clear_mark(netcp);
1728	netcp_addr_sweep_del(netcp);
1729	knav_queue_disable_notify(netcp->rx_queue);
1730	knav_queue_disable_notify(netcp->tx_compl_q);
1731	napi_disable(&netcp->rx_napi);
1732	napi_disable(&netcp->tx_napi);
1733
1734	for_each_module(netcp, intf_modpriv) {
1735		module = intf_modpriv->netcp_module;
1736		if (module->close) {
1737			err = module->close(intf_modpriv->module_priv, ndev);
1738			if (err != 0)
1739				dev_err(netcp->ndev_dev, "Close failed\n");
1740		}
1741	}
1742
1743	/* Recycle Rx descriptors from completion queue */
1744	netcp_empty_rx_queue(netcp);
1745
1746	/* Recycle Tx descriptors from completion queue */
1747	netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
1748
1749	if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size)
1750		dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n",
1751			netcp->tx_pool_size - knav_pool_count(netcp->tx_pool));
1752
1753	netcp_free_navigator_resources(netcp);
1754	dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name);
1755	return 0;
1756}
1757
1758static int netcp_ndo_ioctl(struct net_device *ndev,
1759			   struct ifreq *req, int cmd)
1760{
1761	struct netcp_intf *netcp = netdev_priv(ndev);
1762	struct netcp_intf_modpriv *intf_modpriv;
1763	struct netcp_module *module;
1764	int ret = -1, err = -EOPNOTSUPP;
1765
1766	if (!netif_running(ndev))
1767		return -EINVAL;
1768
1769	for_each_module(netcp, intf_modpriv) {
1770		module = intf_modpriv->netcp_module;
1771		if (!module->ioctl)
1772			continue;
1773
1774		err = module->ioctl(intf_modpriv->module_priv, req, cmd);
1775		if ((err < 0) && (err != -EOPNOTSUPP)) {
1776			ret = err;
1777			goto out;
1778		}
1779		if (err == 0)
1780			ret = err;
1781	}
1782
1783out:
1784	return (ret == 0) ? 0 : err;
1785}
1786
1787static void netcp_ndo_tx_timeout(struct net_device *ndev)
1788{
1789	struct netcp_intf *netcp = netdev_priv(ndev);
1790	unsigned int descs = knav_pool_count(netcp->tx_pool);
1791
1792	dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
1793	netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
1794	netif_trans_update(ndev);
1795	netif_tx_wake_all_queues(ndev);
1796}
1797
1798static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
1799{
1800	struct netcp_intf *netcp = netdev_priv(ndev);
1801	struct netcp_intf_modpriv *intf_modpriv;
1802	struct netcp_module *module;
1803	unsigned long flags;
1804	int err = 0;
1805
1806	dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
1807
1808	spin_lock_irqsave(&netcp->lock, flags);
1809	for_each_module(netcp, intf_modpriv) {
1810		module = intf_modpriv->netcp_module;
1811		if ((module->add_vid) && (vid != 0)) {
1812			err = module->add_vid(intf_modpriv->module_priv, vid);
1813			if (err != 0) {
1814				dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n",
1815					vid);
1816				break;
1817			}
1818		}
1819	}
1820	spin_unlock_irqrestore(&netcp->lock, flags);
1821
1822	return err;
1823}
1824
1825static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
1826{
1827	struct netcp_intf *netcp = netdev_priv(ndev);
1828	struct netcp_intf_modpriv *intf_modpriv;
1829	struct netcp_module *module;
1830	unsigned long flags;
1831	int err = 0;
1832
1833	dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
1834
1835	spin_lock_irqsave(&netcp->lock, flags);
1836	for_each_module(netcp, intf_modpriv) {
1837		module = intf_modpriv->netcp_module;
1838		if (module->del_vid) {
1839			err = module->del_vid(intf_modpriv->module_priv, vid);
1840			if (err != 0) {
1841				dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n",
1842					vid);
1843				break;
1844			}
1845		}
1846	}
1847	spin_unlock_irqrestore(&netcp->lock, flags);
1848	return err;
1849}
1850
1851static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
1852			      void *accel_priv,
1853			      select_queue_fallback_t fallback)
1854{
1855	return 0;
1856}
1857
1858static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
1859			  struct tc_to_netdev *tc)
1860{
1861	int i;
1862
1863	/* setup tc must be called under rtnl lock */
1864	ASSERT_RTNL();
1865
1866	if (tc->type != TC_SETUP_MQPRIO)
1867		return -EINVAL;
1868
1869	/* Sanity-check the number of traffic classes requested */
1870	if ((dev->real_num_tx_queues <= 1) ||
1871	    (dev->real_num_tx_queues < tc->tc))
1872		return -EINVAL;
1873
1874	/* Configure traffic class to queue mappings */
1875	if (tc->tc) {
1876		netdev_set_num_tc(dev, tc->tc);
1877		for (i = 0; i < tc->tc; i++)
1878			netdev_set_tc_queue(dev, i, 1, i);
1879	} else {
1880		netdev_reset_tc(dev);
1881	}
1882
1883	return 0;
1884}
1885
1886static const struct net_device_ops netcp_netdev_ops = {
1887	.ndo_open		= netcp_ndo_open,
1888	.ndo_stop		= netcp_ndo_stop,
1889	.ndo_start_xmit		= netcp_ndo_start_xmit,
1890	.ndo_set_rx_mode	= netcp_set_rx_mode,
1891	.ndo_do_ioctl           = netcp_ndo_ioctl,
1892	.ndo_set_mac_address	= eth_mac_addr,
1893	.ndo_validate_addr	= eth_validate_addr,
1894	.ndo_vlan_rx_add_vid	= netcp_rx_add_vid,
1895	.ndo_vlan_rx_kill_vid	= netcp_rx_kill_vid,
1896	.ndo_tx_timeout		= netcp_ndo_tx_timeout,
1897	.ndo_select_queue	= netcp_select_queue,
1898	.ndo_setup_tc		= netcp_setup_tc,
1899};
1900
1901static int netcp_create_interface(struct netcp_device *netcp_device,
1902				  struct device_node *node_interface)
1903{
1904	struct device *dev = netcp_device->device;
1905	struct device_node *node = dev->of_node;
1906	struct netcp_intf *netcp;
1907	struct net_device *ndev;
1908	resource_size_t size;
1909	struct resource res;
1910	void __iomem *efuse = NULL;
1911	u32 efuse_mac = 0;
1912	const void *mac_addr;
1913	u8 efuse_mac_addr[6];
1914	u32 temp[2];
1915	int ret = 0;
1916
1917	ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1);
1918	if (!ndev) {
1919		dev_err(dev, "Error allocating netdev\n");
1920		return -ENOMEM;
1921	}
1922
1923	ndev->features |= NETIF_F_SG;
1924	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1925	ndev->hw_features = ndev->features;
1926	ndev->vlan_features |=  NETIF_F_SG;
1927
1928	/* MTU range: 68 - 9486 */
1929	ndev->min_mtu = ETH_MIN_MTU;
1930	ndev->max_mtu = NETCP_MAX_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1931
1932	netcp = netdev_priv(ndev);
1933	spin_lock_init(&netcp->lock);
1934	INIT_LIST_HEAD(&netcp->module_head);
1935	INIT_LIST_HEAD(&netcp->txhook_list_head);
1936	INIT_LIST_HEAD(&netcp->rxhook_list_head);
1937	INIT_LIST_HEAD(&netcp->addr_list);
1938	netcp->netcp_device = netcp_device;
1939	netcp->dev = netcp_device->device;
1940	netcp->ndev = ndev;
1941	netcp->ndev_dev  = &ndev->dev;
1942	netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG);
1943	netcp->tx_pause_threshold = MAX_SKB_FRAGS;
1944	netcp->tx_resume_threshold = netcp->tx_pause_threshold;
1945	netcp->node_interface = node_interface;
1946
1947	ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac);
1948	if (efuse_mac) {
1949		if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) {
1950			dev_err(dev, "could not find efuse-mac reg resource\n");
1951			ret = -ENODEV;
1952			goto quit;
1953		}
1954		size = resource_size(&res);
1955
1956		if (!devm_request_mem_region(dev, res.start, size,
1957					     dev_name(dev))) {
1958			dev_err(dev, "could not reserve resource\n");
1959			ret = -ENOMEM;
1960			goto quit;
1961		}
1962
1963		efuse = devm_ioremap_nocache(dev, res.start, size);
1964		if (!efuse) {
1965			dev_err(dev, "could not map resource\n");
1966			devm_release_mem_region(dev, res.start, size);
1967			ret = -ENOMEM;
1968			goto quit;
1969		}
1970
1971		emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
1972		if (is_valid_ether_addr(efuse_mac_addr))
1973			ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
1974		else
1975			random_ether_addr(ndev->dev_addr);
1976
1977		devm_iounmap(dev, efuse);
1978		devm_release_mem_region(dev, res.start, size);
1979	} else {
1980		mac_addr = of_get_mac_address(node_interface);
1981		if (mac_addr)
1982			ether_addr_copy(ndev->dev_addr, mac_addr);
1983		else
1984			random_ether_addr(ndev->dev_addr);
1985	}
1986
1987	ret = of_property_read_string(node_interface, "rx-channel",
1988				      &netcp->dma_chan_name);
1989	if (ret < 0) {
1990		dev_err(dev, "missing \"rx-channel\" parameter\n");
1991		ret = -ENODEV;
1992		goto quit;
1993	}
1994
1995	ret = of_property_read_u32(node_interface, "rx-queue",
1996				   &netcp->rx_queue_id);
1997	if (ret < 0) {
1998		dev_warn(dev, "missing \"rx-queue\" parameter\n");
1999		netcp->rx_queue_id = KNAV_QUEUE_QPEND;
2000	}
2001
2002	ret = of_property_read_u32_array(node_interface, "rx-queue-depth",
2003					 netcp->rx_queue_depths,
2004					 KNAV_DMA_FDQ_PER_CHAN);
2005	if (ret < 0) {
2006		dev_err(dev, "missing \"rx-queue-depth\" parameter\n");
2007		netcp->rx_queue_depths[0] = 128;
2008	}
2009
2010	ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
2011	if (ret < 0) {
2012		dev_err(dev, "missing \"rx-pool\" parameter\n");
2013		ret = -ENODEV;
2014		goto quit;
2015	}
2016	netcp->rx_pool_size = temp[0];
2017	netcp->rx_pool_region_id = temp[1];
2018
2019	ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2);
2020	if (ret < 0) {
2021		dev_err(dev, "missing \"tx-pool\" parameter\n");
2022		ret = -ENODEV;
2023		goto quit;
2024	}
2025	netcp->tx_pool_size = temp[0];
2026	netcp->tx_pool_region_id = temp[1];
2027
2028	if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
2029		dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n",
2030			MAX_SKB_FRAGS);
2031		ret = -ENODEV;
2032		goto quit;
2033	}
2034
2035	ret = of_property_read_u32(node_interface, "tx-completion-queue",
2036				   &netcp->tx_compl_qid);
2037	if (ret < 0) {
2038		dev_warn(dev, "missing \"tx-completion-queue\" parameter\n");
2039		netcp->tx_compl_qid = KNAV_QUEUE_QPEND;
2040	}
2041
2042	/* NAPI register */
2043	netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT);
2044	netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT);
2045
2046	/* Register the network device */
2047	ndev->dev_id		= 0;
2048	ndev->watchdog_timeo	= NETCP_TX_TIMEOUT;
2049	ndev->netdev_ops	= &netcp_netdev_ops;
2050	SET_NETDEV_DEV(ndev, dev);
2051
2052	list_add_tail(&netcp->interface_list, &netcp_device->interface_head);
2053	return 0;
2054
2055quit:
2056	free_netdev(ndev);
2057	return ret;
2058}
2059
2060static void netcp_delete_interface(struct netcp_device *netcp_device,
2061				   struct net_device *ndev)
2062{
2063	struct netcp_intf_modpriv *intf_modpriv, *tmp;
2064	struct netcp_intf *netcp = netdev_priv(ndev);
2065	struct netcp_module *module;
2066
2067	dev_dbg(netcp_device->device, "Removing interface \"%s\"\n",
2068		ndev->name);
2069
2070	/* Notify each of the modules that the interface is going away */
2071	list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head,
2072				 intf_list) {
2073		module = intf_modpriv->netcp_module;
2074		dev_dbg(netcp_device->device, "Releasing module \"%s\"\n",
2075			module->name);
2076		if (module->release)
2077			module->release(intf_modpriv->module_priv);
2078		list_del(&intf_modpriv->intf_list);
2079	}
2080	WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n",
2081	     ndev->name);
2082
2083	list_del(&netcp->interface_list);
2084
2085	of_node_put(netcp->node_interface);
2086	unregister_netdev(ndev);
2087	netif_napi_del(&netcp->rx_napi);
2088	free_netdev(ndev);
2089}
2090
2091static int netcp_probe(struct platform_device *pdev)
2092{
2093	struct device_node *node = pdev->dev.of_node;
2094	struct netcp_intf *netcp_intf, *netcp_tmp;
2095	struct device_node *child, *interfaces;
2096	struct netcp_device *netcp_device;
2097	struct device *dev = &pdev->dev;
2098	int ret;
2099
2100	if (!node) {
2101		dev_err(dev, "could not find device info\n");
2102		return -ENODEV;
2103	}
2104
2105	/* Allocate a new NETCP device instance */
2106	netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL);
2107	if (!netcp_device)
2108		return -ENOMEM;
2109
2110	pm_runtime_enable(&pdev->dev);
2111	ret = pm_runtime_get_sync(&pdev->dev);
2112	if (ret < 0) {
2113		dev_err(dev, "Failed to enable NETCP power-domain\n");
2114		pm_runtime_disable(&pdev->dev);
2115		return ret;
2116	}
2117
2118	/* Initialize the NETCP device instance */
2119	INIT_LIST_HEAD(&netcp_device->interface_head);
2120	INIT_LIST_HEAD(&netcp_device->modpriv_head);
2121	netcp_device->device = dev;
2122	platform_set_drvdata(pdev, netcp_device);
2123
2124	/* create interfaces */
2125	interfaces = of_get_child_by_name(node, "netcp-interfaces");
2126	if (!interfaces) {
2127		dev_err(dev, "could not find netcp-interfaces node\n");
2128		ret = -ENODEV;
2129		goto probe_quit;
2130	}
2131
2132	for_each_available_child_of_node(interfaces, child) {
2133		ret = netcp_create_interface(netcp_device, child);
2134		if (ret) {
2135			dev_err(dev, "could not create interface(%s)\n",
2136				child->name);
2137			goto probe_quit_interface;
2138		}
2139	}
2140
2141	of_node_put(interfaces);
2142
2143	/* Add the device instance to the list */
2144	list_add_tail(&netcp_device->device_list, &netcp_devices);
2145
2146	return 0;
2147
2148probe_quit_interface:
2149	list_for_each_entry_safe(netcp_intf, netcp_tmp,
2150				 &netcp_device->interface_head,
2151				 interface_list) {
2152		netcp_delete_interface(netcp_device, netcp_intf->ndev);
2153	}
2154
2155	of_node_put(interfaces);
2156
2157probe_quit:
2158	pm_runtime_put_sync(&pdev->dev);
2159	pm_runtime_disable(&pdev->dev);
2160	platform_set_drvdata(pdev, NULL);
2161	return ret;
2162}
2163
2164static int netcp_remove(struct platform_device *pdev)
2165{
2166	struct netcp_device *netcp_device = platform_get_drvdata(pdev);
2167	struct netcp_intf *netcp_intf, *netcp_tmp;
2168	struct netcp_inst_modpriv *inst_modpriv, *tmp;
2169	struct netcp_module *module;
2170
2171	list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head,
2172				 inst_list) {
2173		module = inst_modpriv->netcp_module;
2174		dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name);
2175		module->remove(netcp_device, inst_modpriv->module_priv);
2176		list_del(&inst_modpriv->inst_list);
2177	}
2178
2179	/* now that all modules are removed, clean up the interfaces */
2180	list_for_each_entry_safe(netcp_intf, netcp_tmp,
2181				 &netcp_device->interface_head,
2182				 interface_list) {
2183		netcp_delete_interface(netcp_device, netcp_intf->ndev);
2184	}
2185
2186	WARN(!list_empty(&netcp_device->interface_head),
2187	     "%s interface list not empty!\n", pdev->name);
2188
2189	pm_runtime_put_sync(&pdev->dev);
2190	pm_runtime_disable(&pdev->dev);
2191	platform_set_drvdata(pdev, NULL);
2192	return 0;
2193}
2194
2195static const struct of_device_id of_match[] = {
2196	{ .compatible = "ti,netcp-1.0", },
2197	{},
2198};
2199MODULE_DEVICE_TABLE(of, of_match);
2200
2201static struct platform_driver netcp_driver = {
2202	.driver = {
2203		.name		= "netcp-1.0",
2204		.of_match_table	= of_match,
2205	},
2206	.probe = netcp_probe,
2207	.remove = netcp_remove,
2208};
2209module_platform_driver(netcp_driver);
2210
2211MODULE_LICENSE("GPL v2");
2212MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
2213MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");