Linux Audio

Check our new training course

Loading...
v6.13.7
   1/**********************************************************************
   2 * Author: Cavium, Inc.
   3 *
   4 * Contact: support@cavium.com
   5 *          Please include "LiquidIO" in the subject.
   6 *
   7 * Copyright (c) 2003-2016 Cavium, Inc.
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17 ***********************************************************************/
  18#include <linux/module.h>
  19#include <linux/interrupt.h>
  20#include <linux/pci.h>
  21#include <net/vxlan.h>
  22#include "liquidio_common.h"
  23#include "octeon_droq.h"
  24#include "octeon_iq.h"
  25#include "response_manager.h"
  26#include "octeon_device.h"
  27#include "octeon_nic.h"
  28#include "octeon_main.h"
  29#include "octeon_network.h"
  30#include "cn23xx_vf_device.h"
  31
  32MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
  33MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
  34MODULE_LICENSE("GPL");
  35
  36static int debug = -1;
  37module_param(debug, int, 0644);
  38MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
  39
  40#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
  41
  42struct oct_timestamp_resp {
  43	u64 rh;
  44	u64 timestamp;
  45	u64 status;
  46};
  47
  48union tx_info {
  49	u64 u64;
  50	struct {
  51#ifdef __BIG_ENDIAN_BITFIELD
  52		u16 gso_size;
  53		u16 gso_segs;
  54		u32 reserved;
  55#else
  56		u32 reserved;
  57		u16 gso_segs;
  58		u16 gso_size;
  59#endif
  60	} s;
  61};
  62
  63#define OCTNIC_GSO_MAX_HEADER_SIZE 128
  64#define OCTNIC_GSO_MAX_SIZE \
  65		(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
  66
  67static int
  68liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  69static void liquidio_vf_remove(struct pci_dev *pdev);
  70static int octeon_device_init(struct octeon_device *oct);
  71static int liquidio_stop(struct net_device *netdev);
  72
  73static int lio_wait_for_oq_pkts(struct octeon_device *oct)
  74{
  75	struct octeon_device_priv *oct_priv = oct->priv;
 
  76	int retry = MAX_IO_PENDING_PKT_COUNT;
  77	int pkt_cnt = 0, pending_pkts;
  78	int i;
  79
  80	do {
  81		pending_pkts = 0;
  82
  83		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
  84			if (!(oct->io_qmask.oq & BIT_ULL(i)))
  85				continue;
  86			pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
  87		}
  88		if (pkt_cnt > 0) {
  89			pending_pkts += pkt_cnt;
  90			tasklet_schedule(&oct_priv->droq_tasklet);
  91		}
  92		pkt_cnt = 0;
  93		schedule_timeout_uninterruptible(1);
  94
  95	} while (retry-- && pending_pkts);
  96
  97	return pkt_cnt;
  98}
  99
 100/**
 101 * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
 102 * @oct: Pointer to Octeon device
 103 */
 104static void pcierror_quiesce_device(struct octeon_device *oct)
 105{
 106	int i;
 107
 108	/* Disable the input and output queues now. No more packets will
 109	 * arrive from Octeon, but we should wait for all packet processing
 110	 * to finish.
 111	 */
 112
 113	/* To allow for in-flight requests */
 114	schedule_timeout_uninterruptible(100);
 115
 116	if (wait_for_pending_requests(oct))
 117		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
 118
 119	/* Force all requests waiting to be fetched by OCTEON to complete. */
 120	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 121		struct octeon_instr_queue *iq;
 122
 123		if (!(oct->io_qmask.iq & BIT_ULL(i)))
 124			continue;
 125		iq = oct->instr_queue[i];
 126
 127		if (atomic_read(&iq->instr_pending)) {
 128			spin_lock_bh(&iq->lock);
 129			iq->fill_cnt = 0;
 130			iq->octeon_read_index = iq->host_write_index;
 131			iq->stats.instr_processed +=
 132			    atomic_read(&iq->instr_pending);
 133			lio_process_iq_request_list(oct, iq, 0);
 134			spin_unlock_bh(&iq->lock);
 135		}
 136	}
 137
 138	/* Force all pending ordered list requests to time out. */
 139	lio_process_ordered_list(oct, 1);
 140
 141	/* We do not need to wait for output queue packets to be processed. */
 142}
 143
 144/**
 145 * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
 146 * @dev: Pointer to PCI device
 147 */
 148static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
 149{
 150	u32 status, mask;
 151	int pos = 0x100;
 152
 153	pr_info("%s :\n", __func__);
 154
 155	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
 156	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
 157	if (dev->error_state == pci_channel_io_normal)
 158		status &= ~mask; /* Clear corresponding nonfatal bits */
 159	else
 160		status &= mask; /* Clear corresponding fatal bits */
 161	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
 162}
 163
 164/**
 165 * stop_pci_io - Stop all PCI IO to a given device
 166 * @oct: Pointer to Octeon device
 167 */
 168static void stop_pci_io(struct octeon_device *oct)
 169{
 170	struct msix_entry *msix_entries;
 171	int i;
 172
 173	/* No more instructions will be forwarded. */
 174	atomic_set(&oct->status, OCT_DEV_IN_RESET);
 175
 176	for (i = 0; i < oct->ifcount; i++)
 177		netif_device_detach(oct->props[i].netdev);
 178
 179	/* Disable interrupts  */
 180	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 181
 182	pcierror_quiesce_device(oct);
 183	if (oct->msix_on) {
 184		msix_entries = (struct msix_entry *)oct->msix_entries;
 185		for (i = 0; i < oct->num_msix_irqs; i++) {
 186			/* clear the affinity_cpumask */
 187			irq_set_affinity_hint(msix_entries[i].vector,
 188					      NULL);
 189			free_irq(msix_entries[i].vector,
 190				 &oct->ioq_vector[i]);
 191		}
 192		pci_disable_msix(oct->pci_dev);
 193		kfree(oct->msix_entries);
 194		oct->msix_entries = NULL;
 195		octeon_free_ioq_vector(oct);
 196	}
 197	dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
 198		lio_get_state_string(&oct->status));
 199
 200	/* making it a common function for all OCTEON models */
 201	cleanup_aer_uncorrect_error_status(oct->pci_dev);
 202
 203	pci_disable_device(oct->pci_dev);
 204}
 205
 206/**
 207 * liquidio_pcie_error_detected - called when PCI error is detected
 208 * @pdev: Pointer to PCI device
 209 * @state: The current pci connection state
 210 *
 211 * This function is called after a PCI bus error affecting
 212 * this device has been detected.
 213 */
 214static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
 215						     pci_channel_state_t state)
 216{
 217	struct octeon_device *oct = pci_get_drvdata(pdev);
 218
 219	/* Non-correctable Non-fatal errors */
 220	if (state == pci_channel_io_normal) {
 221		dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
 222		cleanup_aer_uncorrect_error_status(oct->pci_dev);
 223		return PCI_ERS_RESULT_CAN_RECOVER;
 224	}
 225
 226	/* Non-correctable Fatal errors */
 227	dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
 228	stop_pci_io(oct);
 229
 230	return PCI_ERS_RESULT_DISCONNECT;
 231}
 232
 233/* For PCI-E Advanced Error Recovery (AER) Interface */
 234static const struct pci_error_handlers liquidio_vf_err_handler = {
 235	.error_detected = liquidio_pcie_error_detected,
 236};
 237
 238static const struct pci_device_id liquidio_vf_pci_tbl[] = {
 239	{
 240		PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
 241		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 242	},
 243	{
 244		0, 0, 0, 0, 0, 0, 0
 245	}
 246};
 247MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
 248
 249static struct pci_driver liquidio_vf_pci_driver = {
 250	.name		= "LiquidIO_VF",
 251	.id_table	= liquidio_vf_pci_tbl,
 252	.probe		= liquidio_vf_probe,
 253	.remove		= liquidio_vf_remove,
 254	.err_handler	= &liquidio_vf_err_handler,    /* For AER */
 255};
 256
 257/**
 258 * print_link_info - Print link information
 259 * @netdev: network device
 260 */
 261static void print_link_info(struct net_device *netdev)
 262{
 263	struct lio *lio = GET_LIO(netdev);
 264
 265	if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
 266	    ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
 267		struct oct_link_info *linfo = &lio->linfo;
 268
 269		if (linfo->link.s.link_up) {
 270			netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
 271				   linfo->link.s.speed,
 272				   (linfo->link.s.duplex) ? "Full" : "Half");
 273		} else {
 274			netif_info(lio, link, lio->netdev, "Link Down\n");
 275		}
 276	}
 277}
 278
 279/**
 280 * octnet_link_status_change - Routine to notify MTU change
 281 * @work: work_struct data structure
 282 */
 283static void octnet_link_status_change(struct work_struct *work)
 284{
 285	struct cavium_wk *wk = (struct cavium_wk *)work;
 286	struct lio *lio = (struct lio *)wk->ctxptr;
 287
 288	/* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
 289	 * this API is invoked only when new max-MTU of the interface is
 290	 * less than current MTU.
 291	 */
 292	rtnl_lock();
 293	dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
 294	rtnl_unlock();
 295}
 296
 297/**
 298 * setup_link_status_change_wq - Sets up the mtu status change work
 299 * @netdev: network device
 300 */
 301static int setup_link_status_change_wq(struct net_device *netdev)
 302{
 303	struct lio *lio = GET_LIO(netdev);
 304	struct octeon_device *oct = lio->oct_dev;
 305
 306	lio->link_status_wq.wq = alloc_workqueue("link-status",
 307						 WQ_MEM_RECLAIM, 0);
 308	if (!lio->link_status_wq.wq) {
 309		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
 310		return -1;
 311	}
 312	INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
 313			  octnet_link_status_change);
 314	lio->link_status_wq.wk.ctxptr = lio;
 315
 316	return 0;
 317}
 318
 319static void cleanup_link_status_change_wq(struct net_device *netdev)
 320{
 321	struct lio *lio = GET_LIO(netdev);
 322
 323	if (lio->link_status_wq.wq) {
 324		cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
 325		destroy_workqueue(lio->link_status_wq.wq);
 326	}
 327}
 328
 329/**
 330 * update_link_status - Update link status
 331 * @netdev: network device
 332 * @ls: link status structure
 333 *
 334 * Called on receipt of a link status response from the core application to
 335 * update each interface's link status.
 336 */
 337static void update_link_status(struct net_device *netdev,
 338			       union oct_link_status *ls)
 339{
 340	struct lio *lio = GET_LIO(netdev);
 341	int current_max_mtu = lio->linfo.link.s.mtu;
 342	struct octeon_device *oct = lio->oct_dev;
 343
 344	if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
 345		lio->linfo.link.u64 = ls->u64;
 346
 347		print_link_info(netdev);
 348		lio->link_changes++;
 349
 350		if (lio->linfo.link.s.link_up) {
 351			netif_carrier_on(netdev);
 352			wake_txqs(netdev);
 353		} else {
 354			netif_carrier_off(netdev);
 355			stop_txqs(netdev);
 356		}
 357
 358		if (lio->linfo.link.s.mtu != current_max_mtu) {
 359			dev_info(&oct->pci_dev->dev,
 360				 "Max MTU Changed from %d to %d\n",
 361				 current_max_mtu, lio->linfo.link.s.mtu);
 362			netdev->max_mtu = lio->linfo.link.s.mtu;
 363		}
 364
 365		if (lio->linfo.link.s.mtu < netdev->mtu) {
 366			dev_warn(&oct->pci_dev->dev,
 367				 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
 368				 netdev->mtu, lio->linfo.link.s.mtu);
 369			queue_delayed_work(lio->link_status_wq.wq,
 370					   &lio->link_status_wq.wk.work, 0);
 371		}
 372	}
 373}
 374
 375/**
 376 * liquidio_vf_probe - PCI probe handler
 377 * @pdev: PCI device structure
 378 * @ent: unused
 379 */
 380static int
 381liquidio_vf_probe(struct pci_dev *pdev,
 382		  const struct pci_device_id __maybe_unused *ent)
 383{
 384	struct octeon_device *oct_dev = NULL;
 385
 386	oct_dev = octeon_allocate_device(pdev->device,
 387					 sizeof(struct octeon_device_priv));
 388
 389	if (!oct_dev) {
 390		dev_err(&pdev->dev, "Unable to allocate device\n");
 391		return -ENOMEM;
 392	}
 393	oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
 394
 395	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
 396		 (u32)pdev->vendor, (u32)pdev->device);
 397
 398	/* Assign octeon_device for this device to the private data area. */
 399	pci_set_drvdata(pdev, oct_dev);
 400
 401	/* set linux specific device pointer */
 402	oct_dev->pci_dev = pdev;
 403
 404	oct_dev->subsystem_id = pdev->subsystem_vendor |
 405		(pdev->subsystem_device << 16);
 406
 407	if (octeon_device_init(oct_dev)) {
 408		liquidio_vf_remove(pdev);
 409		return -ENOMEM;
 410	}
 411
 412	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
 413
 414	return 0;
 415}
 416
 417/**
 418 * octeon_pci_flr - PCI FLR for each Octeon device.
 419 * @oct: octeon device
 420 */
 421static void octeon_pci_flr(struct octeon_device *oct)
 422{
 423	pci_save_state(oct->pci_dev);
 424
 425	pci_cfg_access_lock(oct->pci_dev);
 426
 427	/* Quiesce the device completely */
 428	pci_write_config_word(oct->pci_dev, PCI_COMMAND,
 429			      PCI_COMMAND_INTX_DISABLE);
 430
 431	pcie_flr(oct->pci_dev);
 432
 433	pci_cfg_access_unlock(oct->pci_dev);
 434
 435	pci_restore_state(oct->pci_dev);
 436}
 437
 438/**
 439 * octeon_destroy_resources - Destroy resources associated with octeon device
 440 * @oct: octeon device
 
 441 */
 442static void octeon_destroy_resources(struct octeon_device *oct)
 443{
 444	struct octeon_device_priv *oct_priv = oct->priv;
 
 445	struct msix_entry *msix_entries;
 446	int i;
 447
 448	switch (atomic_read(&oct->status)) {
 449	case OCT_DEV_RUNNING:
 450	case OCT_DEV_CORE_OK:
 451		/* No more instructions will be forwarded. */
 452		atomic_set(&oct->status, OCT_DEV_IN_RESET);
 453
 454		oct->app_mode = CVM_DRV_INVALID_APP;
 455		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
 456			lio_get_state_string(&oct->status));
 457
 458		schedule_timeout_uninterruptible(HZ / 10);
 459
 460		fallthrough;
 461	case OCT_DEV_HOST_OK:
 462	case OCT_DEV_IO_QUEUES_DONE:
 463		if (lio_wait_for_instr_fetch(oct))
 464			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
 465
 466		if (wait_for_pending_requests(oct))
 467			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
 468
 469		/* Disable the input and output queues now. No more packets will
 470		 * arrive from Octeon, but we should wait for all packet
 471		 * processing to finish.
 472		 */
 473		oct->fn_list.disable_io_queues(oct);
 474
 475		if (lio_wait_for_oq_pkts(oct))
 476			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
 477
 478		/* Force all requests waiting to be fetched by OCTEON to
 479		 * complete.
 480		 */
 481		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 482			struct octeon_instr_queue *iq;
 483
 484			if (!(oct->io_qmask.iq & BIT_ULL(i)))
 485				continue;
 486			iq = oct->instr_queue[i];
 487
 488			if (atomic_read(&iq->instr_pending)) {
 489				spin_lock_bh(&iq->lock);
 490				iq->fill_cnt = 0;
 491				iq->octeon_read_index = iq->host_write_index;
 492				iq->stats.instr_processed +=
 493					atomic_read(&iq->instr_pending);
 494				lio_process_iq_request_list(oct, iq, 0);
 495				spin_unlock_bh(&iq->lock);
 496			}
 497		}
 498
 499		lio_process_ordered_list(oct, 1);
 500		octeon_free_sc_done_list(oct);
 501		octeon_free_sc_zombie_list(oct);
 502
 503		fallthrough;
 504	case OCT_DEV_INTR_SET_DONE:
 505		/* Disable interrupts  */
 506		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 507
 508		if (oct->msix_on) {
 509			msix_entries = (struct msix_entry *)oct->msix_entries;
 510			for (i = 0; i < oct->num_msix_irqs; i++) {
 511				if (oct->ioq_vector[i].vector) {
 512					irq_set_affinity_hint(
 513							msix_entries[i].vector,
 514							NULL);
 515					free_irq(msix_entries[i].vector,
 516						 &oct->ioq_vector[i]);
 517					oct->ioq_vector[i].vector = 0;
 518				}
 519			}
 520			pci_disable_msix(oct->pci_dev);
 521			kfree(oct->msix_entries);
 522			oct->msix_entries = NULL;
 523			kfree(oct->irq_name_storage);
 524			oct->irq_name_storage = NULL;
 525		}
 526		/* Soft reset the octeon device before exiting */
 527		if (!pcie_reset_flr(oct->pci_dev, PCI_RESET_PROBE))
 528			octeon_pci_flr(oct);
 529		else
 530			cn23xx_vf_ask_pf_to_do_flr(oct);
 531
 532		fallthrough;
 533	case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
 534		octeon_free_ioq_vector(oct);
 535
 536		fallthrough;
 537	case OCT_DEV_MBOX_SETUP_DONE:
 538		oct->fn_list.free_mbox(oct);
 539
 540		fallthrough;
 541	case OCT_DEV_IN_RESET:
 542	case OCT_DEV_DROQ_INIT_DONE:
 543		mdelay(100);
 544		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
 545			if (!(oct->io_qmask.oq & BIT_ULL(i)))
 546				continue;
 547			octeon_delete_droq(oct, i);
 548		}
 549
 550		fallthrough;
 551	case OCT_DEV_RESP_LIST_INIT_DONE:
 552		octeon_delete_response_list(oct);
 553
 554		fallthrough;
 555	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
 556		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 557			if (!(oct->io_qmask.iq & BIT_ULL(i)))
 558				continue;
 559			octeon_delete_instr_queue(oct, i);
 560		}
 561
 562		fallthrough;
 563	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
 564		octeon_free_sc_buffer_pool(oct);
 565
 566		fallthrough;
 567	case OCT_DEV_DISPATCH_INIT_DONE:
 568		octeon_delete_dispatch_list(oct);
 569		cancel_delayed_work_sync(&oct->nic_poll_work.work);
 570
 571		fallthrough;
 572	case OCT_DEV_PCI_MAP_DONE:
 573		octeon_unmap_pci_barx(oct, 0);
 574		octeon_unmap_pci_barx(oct, 1);
 575
 576		fallthrough;
 577	case OCT_DEV_PCI_ENABLE_DONE:
 
 578		/* Disable the device, releasing the PCI INT */
 579		pci_disable_device(oct->pci_dev);
 580
 581		fallthrough;
 582	case OCT_DEV_BEGIN_STATE:
 583		/* Nothing to be done here either */
 584		break;
 585	}
 586
 587	tasklet_kill(&oct_priv->droq_tasklet);
 588}
 589
 590/**
 591 * send_rx_ctrl_cmd - Send Rx control command
 592 * @lio: per-network private data
 593 * @start_stop: whether to start or stop
 594 */
 595static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
 596{
 597	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
 598	struct octeon_soft_command *sc;
 599	union octnet_cmd *ncmd;
 600	int retval;
 601
 602	if (oct->props[lio->ifidx].rx_on == start_stop)
 603		return 0;
 604
 605	sc = (struct octeon_soft_command *)
 606		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
 607					  16, 0);
 608	if (!sc) {
 609		netif_info(lio, rx_err, lio->netdev,
 610			   "Failed to allocate octeon_soft_command struct\n");
 611		return -ENOMEM;
 612	}
 613
 614	ncmd = (union octnet_cmd *)sc->virtdptr;
 615
 616	ncmd->u64 = 0;
 617	ncmd->s.cmd = OCTNET_CMD_RX_CTL;
 618	ncmd->s.param1 = start_stop;
 619
 620	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
 621
 622	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
 623
 624	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
 625				    OPCODE_NIC_CMD, 0, 0, 0);
 626
 627	init_completion(&sc->complete);
 628	sc->sc_status = OCTEON_REQUEST_PENDING;
 629
 630	retval = octeon_send_soft_command(oct, sc);
 631	if (retval == IQ_SEND_FAILED) {
 632		netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
 633		octeon_free_soft_command(oct, sc);
 634	} else {
 635		/* Sleep on a wait queue till the cond flag indicates that the
 636		 * response arrived or timed-out.
 637		 */
 638		retval = wait_for_sc_completion_timeout(oct, sc, 0);
 639		if (retval)
 640			return retval;
 641
 642		oct->props[lio->ifidx].rx_on = start_stop;
 643		WRITE_ONCE(sc->caller_is_done, true);
 644	}
 645
 646	return retval;
 647}
 648
 649/**
 650 * liquidio_destroy_nic_device - Destroy NIC device interface
 651 * @oct: octeon device
 652 * @ifidx: which interface to destroy
 653 *
 654 * Cleanup associated with each interface for an Octeon device  when NIC
 655 * module is being unloaded or if initialization fails during load.
 656 */
 657static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
 658{
 659	struct net_device *netdev = oct->props[ifidx].netdev;
 660	struct octeon_device_priv *oct_priv = oct->priv;
 
 661	struct napi_struct *napi, *n;
 662	struct lio *lio;
 663
 664	if (!netdev) {
 665		dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
 666			__func__, ifidx);
 667		return;
 668	}
 669
 670	lio = GET_LIO(netdev);
 671
 672	dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
 673
 674	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
 675		liquidio_stop(netdev);
 676
 677	if (oct->props[lio->ifidx].napi_enabled == 1) {
 678		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 679			napi_disable(napi);
 680
 681		oct->props[lio->ifidx].napi_enabled = 0;
 682
 683		oct->droq[0]->ops.poll_mode = 0;
 684	}
 685
 686	/* Delete NAPI */
 687	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 688		netif_napi_del(napi);
 689
 690	tasklet_enable(&oct_priv->droq_tasklet);
 691
 692	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
 693		unregister_netdev(netdev);
 694
 695	cleanup_rx_oom_poll_fn(netdev);
 696
 697	cleanup_link_status_change_wq(netdev);
 698
 699	lio_delete_glists(lio);
 700
 701	free_netdev(netdev);
 702
 703	oct->props[ifidx].gmxport = -1;
 704
 705	oct->props[ifidx].netdev = NULL;
 706}
 707
 708/**
 709 * liquidio_stop_nic_module - Stop complete NIC functionality
 710 * @oct: octeon device
 711 */
 712static int liquidio_stop_nic_module(struct octeon_device *oct)
 713{
 714	struct lio *lio;
 715	int i, j;
 716
 717	dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
 718	if (!oct->ifcount) {
 719		dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
 720		return 1;
 721	}
 722
 723	spin_lock_bh(&oct->cmd_resp_wqlock);
 724	oct->cmd_resp_state = OCT_DRV_OFFLINE;
 725	spin_unlock_bh(&oct->cmd_resp_wqlock);
 726
 727	for (i = 0; i < oct->ifcount; i++) {
 728		lio = GET_LIO(oct->props[i].netdev);
 729		for (j = 0; j < oct->num_oqs; j++)
 730			octeon_unregister_droq_ops(oct,
 731						   lio->linfo.rxpciq[j].s.q_no);
 732	}
 733
 734	for (i = 0; i < oct->ifcount; i++)
 735		liquidio_destroy_nic_device(oct, i);
 736
 737	dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
 738	return 0;
 739}
 740
 741/**
 742 * liquidio_vf_remove - Cleans up resources at unload time
 743 * @pdev: PCI device structure
 744 */
 745static void liquidio_vf_remove(struct pci_dev *pdev)
 746{
 747	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
 748
 749	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
 750
 751	if (oct_dev->app_mode == CVM_DRV_NIC_APP)
 752		liquidio_stop_nic_module(oct_dev);
 753
 754	/* Reset the octeon device and cleanup all memory allocated for
 755	 * the octeon device by driver.
 756	 */
 757	octeon_destroy_resources(oct_dev);
 758
 759	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
 760
 761	/* This octeon device has been removed. Update the global
 762	 * data structure to reflect this. Free the device structure.
 763	 */
 764	octeon_free_device_mem(oct_dev);
 765}
 766
 767/**
 768 * octeon_pci_os_setup - PCI initialization for each Octeon device.
 769 * @oct: octeon device
 770 */
 771static int octeon_pci_os_setup(struct octeon_device *oct)
 772{
 773#ifdef CONFIG_PCI_IOV
 774	/* setup PCI stuff first */
 775	if (!oct->pci_dev->physfn)
 776		octeon_pci_flr(oct);
 777#endif
 778
 779	if (pci_enable_device(oct->pci_dev)) {
 780		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
 781		return 1;
 782	}
 783
 784	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
 785		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
 786		pci_disable_device(oct->pci_dev);
 787		return 1;
 788	}
 789
 790	/* Enable PCI DMA Master. */
 791	pci_set_master(oct->pci_dev);
 792
 793	return 0;
 794}
 795
 796/**
 797 * free_netbuf - Unmap and free network buffer
 798 * @buf: buffer
 799 */
 800static void free_netbuf(void *buf)
 801{
 802	struct octnet_buf_free_info *finfo;
 803	struct sk_buff *skb;
 804	struct lio *lio;
 805
 806	finfo = (struct octnet_buf_free_info *)buf;
 807	skb = finfo->skb;
 808	lio = finfo->lio;
 809
 810	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
 811			 DMA_TO_DEVICE);
 812
 813	tx_buffer_free(skb);
 814}
 815
 816/**
 817 * free_netsgbuf - Unmap and free gather buffer
 818 * @buf: buffer
 819 */
 820static void free_netsgbuf(void *buf)
 821{
 822	struct octnet_buf_free_info *finfo;
 823	struct octnic_gather *g;
 824	struct sk_buff *skb;
 825	int i, frags, iq;
 826	struct lio *lio;
 827
 828	finfo = (struct octnet_buf_free_info *)buf;
 829	skb = finfo->skb;
 830	lio = finfo->lio;
 831	g = finfo->g;
 832	frags = skb_shinfo(skb)->nr_frags;
 833
 834	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
 835			 g->sg[0].ptr[0], (skb->len - skb->data_len),
 836			 DMA_TO_DEVICE);
 837
 838	i = 1;
 839	while (frags--) {
 840		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
 841
 842		dma_unmap_page(&lio->oct_dev->pci_dev->dev,
 843			       g->sg[(i >> 2)].ptr[(i & 3)],
 844			       skb_frag_size(frag), DMA_TO_DEVICE);
 845		i++;
 846	}
 847
 848	iq = skb_iq(lio->oct_dev, skb);
 849
 850	spin_lock(&lio->glist_lock[iq]);
 851	list_add_tail(&g->list, &lio->glist[iq]);
 852	spin_unlock(&lio->glist_lock[iq]);
 853
 854	tx_buffer_free(skb);
 855}
 856
 857/**
 858 * free_netsgbuf_with_resp - Unmap and free gather buffer with response
 859 * @buf: buffer
 860 */
 861static void free_netsgbuf_with_resp(void *buf)
 862{
 863	struct octnet_buf_free_info *finfo;
 864	struct octeon_soft_command *sc;
 865	struct octnic_gather *g;
 866	struct sk_buff *skb;
 867	int i, frags, iq;
 868	struct lio *lio;
 869
 870	sc = (struct octeon_soft_command *)buf;
 871	skb = (struct sk_buff *)sc->callback_arg;
 872	finfo = (struct octnet_buf_free_info *)&skb->cb;
 873
 874	lio = finfo->lio;
 875	g = finfo->g;
 876	frags = skb_shinfo(skb)->nr_frags;
 877
 878	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
 879			 g->sg[0].ptr[0], (skb->len - skb->data_len),
 880			 DMA_TO_DEVICE);
 881
 882	i = 1;
 883	while (frags--) {
 884		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
 885
 886		dma_unmap_page(&lio->oct_dev->pci_dev->dev,
 887			       g->sg[(i >> 2)].ptr[(i & 3)],
 888			       skb_frag_size(frag), DMA_TO_DEVICE);
 889		i++;
 890	}
 891
 892	iq = skb_iq(lio->oct_dev, skb);
 893
 894	spin_lock(&lio->glist_lock[iq]);
 895	list_add_tail(&g->list, &lio->glist[iq]);
 896	spin_unlock(&lio->glist_lock[iq]);
 897
 898	/* Don't free the skb yet */
 899}
 900
 901/**
 902 * liquidio_open - Net device open for LiquidIO
 903 * @netdev: network device
 904 */
 905static int liquidio_open(struct net_device *netdev)
 906{
 907	struct lio *lio = GET_LIO(netdev);
 908	struct octeon_device *oct = lio->oct_dev;
 909	struct octeon_device_priv *oct_priv = oct->priv;
 
 910	struct napi_struct *napi, *n;
 911	int ret = 0;
 912
 913	if (!oct->props[lio->ifidx].napi_enabled) {
 914		tasklet_disable(&oct_priv->droq_tasklet);
 915
 916		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 917			napi_enable(napi);
 918
 919		oct->props[lio->ifidx].napi_enabled = 1;
 920
 921		oct->droq[0]->ops.poll_mode = 1;
 922	}
 923
 924	ifstate_set(lio, LIO_IFSTATE_RUNNING);
 925
 926	/* Ready for link status updates */
 927	lio->intf_open = 1;
 928
 929	netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
 930	start_txqs(netdev);
 931
 932	INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
 933	lio->stats_wk.ctxptr = lio;
 934	schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
 935					(LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
 936
 937	/* tell Octeon to start forwarding packets to host */
 938	ret = send_rx_ctrl_cmd(lio, 1);
 939	if (ret)
 940		return ret;
 941
 942	dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
 943
 944	return ret;
 945}
 946
 947/**
 948 * liquidio_stop - jNet device stop for LiquidIO
 949 * @netdev: network device
 950 */
 951static int liquidio_stop(struct net_device *netdev)
 952{
 953	struct lio *lio = GET_LIO(netdev);
 954	struct octeon_device *oct = lio->oct_dev;
 955	struct octeon_device_priv *oct_priv = oct->priv;
 
 956	struct napi_struct *napi, *n;
 957	int ret = 0;
 958
 959	/* tell Octeon to stop forwarding packets to host */
 960	ret = send_rx_ctrl_cmd(lio, 0);
 961	if (ret)
 962		return ret;
 963
 964	netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
 965	/* Inform that netif carrier is down */
 966	lio->intf_open = 0;
 967	lio->linfo.link.s.link_up = 0;
 968
 969	netif_carrier_off(netdev);
 970	lio->link_changes++;
 971
 972	ifstate_reset(lio, LIO_IFSTATE_RUNNING);
 973
 974	stop_txqs(netdev);
 975
 976	/* Wait for any pending Rx descriptors */
 977	if (lio_wait_for_clean_oq(oct))
 978		netif_info(lio, rx_err, lio->netdev,
 979			   "Proceeding with stop interface after partial RX desc processing\n");
 980
 981	if (oct->props[lio->ifidx].napi_enabled == 1) {
 982		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 983			napi_disable(napi);
 984
 985		oct->props[lio->ifidx].napi_enabled = 0;
 986
 987		oct->droq[0]->ops.poll_mode = 0;
 988
 989		tasklet_enable(&oct_priv->droq_tasklet);
 990	}
 991
 992	cancel_delayed_work_sync(&lio->stats_wk.work);
 993
 994	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
 995
 996	return ret;
 997}
 998
 999/**
1000 * get_new_flags - Converts a mask based on net device flags
1001 * @netdev: network device
1002 *
1003 * This routine generates a octnet_ifflags mask from the net device flags
1004 * received from the OS.
1005 */
1006static enum octnet_ifflags get_new_flags(struct net_device *netdev)
1007{
1008	enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1009
1010	if (netdev->flags & IFF_PROMISC)
1011		f |= OCTNET_IFFLAG_PROMISC;
1012
1013	if (netdev->flags & IFF_ALLMULTI)
1014		f |= OCTNET_IFFLAG_ALLMULTI;
1015
1016	if (netdev->flags & IFF_MULTICAST) {
1017		f |= OCTNET_IFFLAG_MULTICAST;
1018
1019		/* Accept all multicast addresses if there are more than we
1020		 * can handle
1021		 */
1022		if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1023			f |= OCTNET_IFFLAG_ALLMULTI;
1024	}
1025
1026	if (netdev->flags & IFF_BROADCAST)
1027		f |= OCTNET_IFFLAG_BROADCAST;
1028
1029	return f;
1030}
1031
1032static void liquidio_set_uc_list(struct net_device *netdev)
1033{
1034	struct lio *lio = GET_LIO(netdev);
1035	struct octeon_device *oct = lio->oct_dev;
1036	struct octnic_ctrl_pkt nctrl;
1037	struct netdev_hw_addr *ha;
1038	u64 *mac;
1039
1040	if (lio->netdev_uc_count == netdev_uc_count(netdev))
1041		return;
1042
1043	if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) {
1044		dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n");
1045		return;
1046	}
1047
1048	lio->netdev_uc_count = netdev_uc_count(netdev);
1049
1050	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1051	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST;
1052	nctrl.ncmd.s.more = lio->netdev_uc_count;
1053	nctrl.ncmd.s.param1 = oct->vf_num;
1054	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1055	nctrl.netpndev = (u64)netdev;
1056	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1057
1058	/* copy all the addresses into the udd */
1059	mac = &nctrl.udd[0];
1060	netdev_for_each_uc_addr(ha, netdev) {
1061		ether_addr_copy(((u8 *)mac) + 2, ha->addr);
1062		mac++;
1063	}
1064
1065	octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1066}
1067
1068/**
1069 * liquidio_set_mcast_list - Net device set_multicast_list
1070 * @netdev: network device
1071 */
1072static void liquidio_set_mcast_list(struct net_device *netdev)
1073{
1074	int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1075	struct lio *lio = GET_LIO(netdev);
1076	struct octeon_device *oct = lio->oct_dev;
1077	struct octnic_ctrl_pkt nctrl;
1078	struct netdev_hw_addr *ha;
1079	u64 *mc;
1080	int ret;
1081
1082	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1083
1084	/* Create a ctrl pkt command to be sent to core app. */
1085	nctrl.ncmd.u64 = 0;
1086	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1087	nctrl.ncmd.s.param1 = get_new_flags(netdev);
1088	nctrl.ncmd.s.param2 = mc_count;
1089	nctrl.ncmd.s.more = mc_count;
1090	nctrl.netpndev = (u64)netdev;
1091	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1092
1093	/* copy all the addresses into the udd */
1094	mc = &nctrl.udd[0];
1095	netdev_for_each_mc_addr(ha, netdev) {
1096		*mc = 0;
1097		ether_addr_copy(((u8 *)mc) + 2, ha->addr);
1098		/* no need to swap bytes */
1099		if (++mc > &nctrl.udd[mc_count])
1100			break;
1101	}
1102
1103	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1104
1105	/* Apparently, any activity in this call from the kernel has to
1106	 * be atomic. So we won't wait for response.
1107	 */
1108
1109	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1110	if (ret) {
1111		dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1112			ret);
1113	}
1114
1115	liquidio_set_uc_list(netdev);
1116}
1117
1118/**
1119 * liquidio_set_mac - Net device set_mac_address
1120 * @netdev: network device
1121 * @p: opaque pointer to sockaddr
1122 */
1123static int liquidio_set_mac(struct net_device *netdev, void *p)
1124{
1125	struct sockaddr *addr = (struct sockaddr *)p;
1126	struct lio *lio = GET_LIO(netdev);
1127	struct octeon_device *oct = lio->oct_dev;
1128	struct octnic_ctrl_pkt nctrl;
1129	int ret = 0;
1130
1131	if (!is_valid_ether_addr(addr->sa_data))
1132		return -EADDRNOTAVAIL;
1133
1134	if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
1135		return 0;
1136
1137	if (lio->linfo.macaddr_is_admin_asgnd)
1138		return -EPERM;
1139
1140	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1141
1142	nctrl.ncmd.u64 = 0;
1143	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
1144	nctrl.ncmd.s.param1 = 0;
1145	nctrl.ncmd.s.more = 1;
1146	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1147	nctrl.netpndev = (u64)netdev;
1148
1149	nctrl.udd[0] = 0;
1150	/* The MAC Address is presented in network byte order. */
1151	ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data);
1152
1153	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1154	if (ret < 0) {
1155		dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
1156		return -ENOMEM;
1157	}
1158
1159	if (nctrl.sc_status ==
1160	    FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) {
1161		dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n");
1162		return -EPERM;
1163	}
1164
1165	eth_hw_addr_set(netdev, addr->sa_data);
1166	ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
1167
1168	return 0;
1169}
1170
1171static void
1172liquidio_get_stats64(struct net_device *netdev,
1173		     struct rtnl_link_stats64 *lstats)
1174{
1175	struct lio *lio = GET_LIO(netdev);
1176	struct octeon_device *oct;
1177	u64 pkts = 0, drop = 0, bytes = 0;
1178	struct oct_droq_stats *oq_stats;
1179	struct oct_iq_stats *iq_stats;
1180	int i, iq_no, oq_no;
1181
1182	oct = lio->oct_dev;
1183
1184	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1185		return;
1186
1187	for (i = 0; i < oct->num_iqs; i++) {
1188		iq_no = lio->linfo.txpciq[i].s.q_no;
1189		iq_stats = &oct->instr_queue[iq_no]->stats;
1190		pkts += iq_stats->tx_done;
1191		drop += iq_stats->tx_dropped;
1192		bytes += iq_stats->tx_tot_bytes;
1193	}
1194
1195	lstats->tx_packets = pkts;
1196	lstats->tx_bytes = bytes;
1197	lstats->tx_dropped = drop;
1198
1199	pkts = 0;
1200	drop = 0;
1201	bytes = 0;
1202
1203	for (i = 0; i < oct->num_oqs; i++) {
1204		oq_no = lio->linfo.rxpciq[i].s.q_no;
1205		oq_stats = &oct->droq[oq_no]->stats;
1206		pkts += oq_stats->rx_pkts_received;
1207		drop += (oq_stats->rx_dropped +
1208			 oq_stats->dropped_nodispatch +
1209			 oq_stats->dropped_toomany +
1210			 oq_stats->dropped_nomem);
1211		bytes += oq_stats->rx_bytes_received;
1212	}
1213
1214	lstats->rx_bytes = bytes;
1215	lstats->rx_packets = pkts;
1216	lstats->rx_dropped = drop;
1217
1218	lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
1219
1220	/* detailed rx_errors: */
1221	lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
1222	/* recved pkt with crc error */
1223	lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
1224	/* recv'd frame alignment error */
1225	lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
1226
1227	lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
1228			    lstats->rx_frame_errors;
1229
1230	/* detailed tx_errors */
1231	lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
1232	lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
1233
1234	lstats->tx_errors = lstats->tx_aborted_errors +
1235		lstats->tx_carrier_errors;
1236}
1237
1238/**
1239 * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
1240 * @netdev: network device
1241 * @ifr: interface request
 
1242 */
1243static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
1244{
1245	struct lio *lio = GET_LIO(netdev);
1246	struct hwtstamp_config conf;
1247
1248	if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
1249		return -EFAULT;
1250
 
 
 
1251	switch (conf.tx_type) {
1252	case HWTSTAMP_TX_ON:
1253	case HWTSTAMP_TX_OFF:
1254		break;
1255	default:
1256		return -ERANGE;
1257	}
1258
1259	switch (conf.rx_filter) {
1260	case HWTSTAMP_FILTER_NONE:
1261		break;
1262	case HWTSTAMP_FILTER_ALL:
1263	case HWTSTAMP_FILTER_SOME:
1264	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1265	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1266	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1267	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1268	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1269	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1270	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1271	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1272	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1273	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1274	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1275	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1276	case HWTSTAMP_FILTER_NTP_ALL:
1277		conf.rx_filter = HWTSTAMP_FILTER_ALL;
1278		break;
1279	default:
1280		return -ERANGE;
1281	}
1282
1283	if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
1284		ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1285
1286	else
1287		ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1288
1289	return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
1290}
1291
1292/**
1293 * liquidio_ioctl - ioctl handler
1294 * @netdev: network device
1295 * @ifr: interface request
1296 * @cmd: command
1297 */
1298static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1299{
1300	switch (cmd) {
1301	case SIOCSHWTSTAMP:
1302		return hwtstamp_ioctl(netdev, ifr);
1303	default:
1304		return -EOPNOTSUPP;
1305	}
1306}
1307
1308static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
1309{
1310	struct sk_buff *skb = (struct sk_buff *)buf;
1311	struct octnet_buf_free_info *finfo;
1312	struct oct_timestamp_resp *resp;
1313	struct octeon_soft_command *sc;
1314	struct lio *lio;
1315
1316	finfo = (struct octnet_buf_free_info *)skb->cb;
1317	lio = finfo->lio;
1318	sc = finfo->sc;
1319	oct = lio->oct_dev;
1320	resp = (struct oct_timestamp_resp *)sc->virtrptr;
1321
1322	if (status != OCTEON_REQUEST_DONE) {
1323		dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
1324			CVM_CAST64(status));
1325		resp->timestamp = 0;
1326	}
1327
1328	octeon_swap_8B_data(&resp->timestamp, 1);
1329
1330	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
1331		struct skb_shared_hwtstamps ts;
1332		u64 ns = resp->timestamp;
1333
1334		netif_info(lio, tx_done, lio->netdev,
1335			   "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
1336			   skb, (unsigned long long)ns);
1337		ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
1338		skb_tstamp_tx(skb, &ts);
1339	}
1340
1341	octeon_free_soft_command(oct, sc);
1342	tx_buffer_free(skb);
1343}
1344
1345/* send_nic_timestamp_pkt - Send a data packet that will be timestamped
1346 * @oct: octeon device
1347 * @ndata: pointer to network data
1348 * @finfo: pointer to private network data
1349 */
1350static int send_nic_timestamp_pkt(struct octeon_device *oct,
1351				  struct octnic_data_pkt *ndata,
1352				  struct octnet_buf_free_info *finfo,
1353				  int xmit_more)
1354{
1355	struct octeon_soft_command *sc;
1356	int ring_doorbell;
1357	struct lio *lio;
1358	int retval;
1359	u32 len;
1360
1361	lio = finfo->lio;
1362
1363	sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
1364					    sizeof(struct oct_timestamp_resp));
1365	finfo->sc = sc;
1366
1367	if (!sc) {
1368		dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
1369		return IQ_SEND_FAILED;
1370	}
1371
1372	if (ndata->reqtype == REQTYPE_NORESP_NET)
1373		ndata->reqtype = REQTYPE_RESP_NET;
1374	else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
1375		ndata->reqtype = REQTYPE_RESP_NET_SG;
1376
1377	sc->callback = handle_timestamp;
1378	sc->callback_arg = finfo->skb;
1379	sc->iq_no = ndata->q_no;
1380
1381	len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
1382
1383	ring_doorbell = !xmit_more;
1384
1385	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
1386				     sc, len, ndata->reqtype);
1387
1388	if (retval == IQ_SEND_FAILED) {
1389		dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
1390			retval);
1391		octeon_free_soft_command(oct, sc);
1392	} else {
1393		netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
1394	}
1395
1396	return retval;
1397}
1398
1399/**
1400 * liquidio_xmit - Transmit networks packets to the Octeon interface
1401 * @skb: skbuff struct to be passed to network layer.
1402 * @netdev: pointer to network device
1403 * @returns whether the packet was transmitted to the device okay or not
1404 *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
1405 */
1406static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
1407{
1408	struct octnet_buf_free_info *finfo;
1409	union octnic_cmd_setup cmdsetup;
1410	struct octnic_data_pkt ndata;
1411	struct octeon_instr_irh *irh;
1412	struct oct_iq_stats *stats;
1413	struct octeon_device *oct;
1414	int q_idx = 0, iq_no = 0;
1415	union tx_info *tx_info;
1416	int xmit_more = 0;
1417	struct lio *lio;
1418	int status = 0;
1419	u64 dptr = 0;
1420	u32 tag = 0;
1421	int j;
1422
1423	lio = GET_LIO(netdev);
1424	oct = lio->oct_dev;
1425
1426	q_idx = skb_iq(lio->oct_dev, skb);
1427	tag = q_idx;
1428	iq_no = lio->linfo.txpciq[q_idx].s.q_no;
1429
1430	stats = &oct->instr_queue[iq_no]->stats;
1431
1432	/* Check for all conditions in which the current packet cannot be
1433	 * transmitted.
1434	 */
1435	if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
1436	    (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
1437		netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
1438			   lio->linfo.link.s.link_up);
1439		goto lio_xmit_failed;
1440	}
1441
1442	/* Use space in skb->cb to store info used to unmap and
1443	 * free the buffers.
1444	 */
1445	finfo = (struct octnet_buf_free_info *)skb->cb;
1446	finfo->lio = lio;
1447	finfo->skb = skb;
1448	finfo->sc = NULL;
1449
1450	/* Prepare the attributes for the data to be passed to OSI. */
1451	memset(&ndata, 0, sizeof(struct octnic_data_pkt));
1452
1453	ndata.buf = finfo;
1454
1455	ndata.q_no = iq_no;
1456
1457	if (octnet_iq_is_full(oct, ndata.q_no)) {
1458		/* defer sending if queue is full */
1459		netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
1460			   ndata.q_no);
1461		stats->tx_iq_busy++;
1462		return NETDEV_TX_BUSY;
1463	}
1464
1465	ndata.datasize = skb->len;
1466
1467	cmdsetup.u64 = 0;
1468	cmdsetup.s.iq_no = iq_no;
1469
1470	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1471		if (skb->encapsulation) {
1472			cmdsetup.s.tnl_csum = 1;
1473			stats->tx_vxlan++;
1474		} else {
1475			cmdsetup.s.transport_csum = 1;
1476		}
1477	}
1478	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1479		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1480		cmdsetup.s.timestamp = 1;
1481	}
1482
1483	if (!skb_shinfo(skb)->nr_frags) {
1484		cmdsetup.s.u.datasize = skb->len;
1485		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1486		/* Offload checksum calculation for TCP/UDP packets */
1487		dptr = dma_map_single(&oct->pci_dev->dev,
1488				      skb->data,
1489				      skb->len,
1490				      DMA_TO_DEVICE);
1491		if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
1492			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
1493				__func__);
1494			return NETDEV_TX_BUSY;
1495		}
1496
1497		ndata.cmd.cmd3.dptr = dptr;
1498		finfo->dptr = dptr;
1499		ndata.reqtype = REQTYPE_NORESP_NET;
1500
1501	} else {
1502		skb_frag_t *frag;
1503		struct octnic_gather *g;
1504		int i, frags;
1505
1506		spin_lock(&lio->glist_lock[q_idx]);
1507		g = (struct octnic_gather *)
1508			lio_list_delete_head(&lio->glist[q_idx]);
1509		spin_unlock(&lio->glist_lock[q_idx]);
1510
1511		if (!g) {
1512			netif_info(lio, tx_err, lio->netdev,
1513				   "Transmit scatter gather: glist null!\n");
1514			goto lio_xmit_failed;
1515		}
1516
1517		cmdsetup.s.gather = 1;
1518		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
1519		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1520
1521		memset(g->sg, 0, g->sg_size);
1522
1523		g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
1524						 skb->data,
1525						 (skb->len - skb->data_len),
1526						 DMA_TO_DEVICE);
1527		if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
1528			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
1529				__func__);
1530			return NETDEV_TX_BUSY;
1531		}
1532		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
1533
1534		frags = skb_shinfo(skb)->nr_frags;
1535		i = 1;
1536		while (frags--) {
1537			frag = &skb_shinfo(skb)->frags[i - 1];
1538
1539			g->sg[(i >> 2)].ptr[(i & 3)] =
1540				skb_frag_dma_map(&oct->pci_dev->dev,
1541						 frag, 0, skb_frag_size(frag),
1542						 DMA_TO_DEVICE);
1543			if (dma_mapping_error(&oct->pci_dev->dev,
1544					      g->sg[i >> 2].ptr[i & 3])) {
1545				dma_unmap_single(&oct->pci_dev->dev,
1546						 g->sg[0].ptr[0],
1547						 skb->len - skb->data_len,
1548						 DMA_TO_DEVICE);
1549				for (j = 1; j < i; j++) {
1550					frag = &skb_shinfo(skb)->frags[j - 1];
1551					dma_unmap_page(&oct->pci_dev->dev,
1552						       g->sg[j >> 2].ptr[j & 3],
1553						       skb_frag_size(frag),
1554						       DMA_TO_DEVICE);
1555				}
1556				dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
1557					__func__);
1558				return NETDEV_TX_BUSY;
1559			}
1560
1561			add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
1562				    (i & 3));
1563			i++;
1564		}
1565
1566		dptr = g->sg_dma_ptr;
1567
1568		ndata.cmd.cmd3.dptr = dptr;
1569		finfo->dptr = dptr;
1570		finfo->g = g;
1571
1572		ndata.reqtype = REQTYPE_NORESP_NET_SG;
1573	}
1574
1575	irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
1576	tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
1577
1578	if (skb_shinfo(skb)->gso_size) {
1579		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
1580		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
1581	}
1582
1583	/* HW insert VLAN tag */
1584	if (skb_vlan_tag_present(skb)) {
1585		irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
1586		irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
1587	}
1588
1589	xmit_more = netdev_xmit_more();
1590
1591	if (unlikely(cmdsetup.s.timestamp))
1592		status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
1593	else
1594		status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
1595	if (status == IQ_SEND_FAILED)
1596		goto lio_xmit_failed;
1597
1598	netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
1599
1600	if (status == IQ_SEND_STOP) {
1601		dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
1602			iq_no);
1603		netif_stop_subqueue(netdev, q_idx);
1604	}
1605
1606	netif_trans_update(netdev);
1607
1608	if (tx_info->s.gso_segs)
1609		stats->tx_done += tx_info->s.gso_segs;
1610	else
1611		stats->tx_done++;
1612	stats->tx_tot_bytes += ndata.datasize;
1613
1614	return NETDEV_TX_OK;
1615
1616lio_xmit_failed:
1617	stats->tx_dropped++;
1618	netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
1619		   iq_no, stats->tx_dropped);
1620	if (dptr)
1621		dma_unmap_single(&oct->pci_dev->dev, dptr,
1622				 ndata.datasize, DMA_TO_DEVICE);
1623
1624	octeon_ring_doorbell_locked(oct, iq_no);
1625
1626	tx_buffer_free(skb);
1627	return NETDEV_TX_OK;
1628}
1629
1630/**
1631 * liquidio_tx_timeout - Network device Tx timeout
1632 * @netdev: pointer to network device
1633 * @txqueue: index of the hung transmit queue
1634 */
1635static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1636{
1637	struct lio *lio;
1638
1639	lio = GET_LIO(netdev);
1640
1641	netif_info(lio, tx_err, lio->netdev,
1642		   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
1643		   netdev->stats.tx_dropped);
1644	netif_trans_update(netdev);
1645	wake_txqs(netdev);
1646}
1647
1648static int
1649liquidio_vlan_rx_add_vid(struct net_device *netdev,
1650			 __be16 proto __attribute__((unused)), u16 vid)
1651{
1652	struct lio *lio = GET_LIO(netdev);
1653	struct octeon_device *oct = lio->oct_dev;
1654	struct octnic_ctrl_pkt nctrl;
1655	int ret = 0;
1656
1657	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1658
1659	nctrl.ncmd.u64 = 0;
1660	nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
1661	nctrl.ncmd.s.param1 = vid;
1662	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1663	nctrl.netpndev = (u64)netdev;
1664	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1665
1666	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1667	if (ret) {
1668		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
1669			ret);
1670		return -EPERM;
1671	}
1672
1673	return 0;
1674}
1675
1676static int
1677liquidio_vlan_rx_kill_vid(struct net_device *netdev,
1678			  __be16 proto __attribute__((unused)), u16 vid)
1679{
1680	struct lio *lio = GET_LIO(netdev);
1681	struct octeon_device *oct = lio->oct_dev;
1682	struct octnic_ctrl_pkt nctrl;
1683	int ret = 0;
1684
1685	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1686
1687	nctrl.ncmd.u64 = 0;
1688	nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
1689	nctrl.ncmd.s.param1 = vid;
1690	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1691	nctrl.netpndev = (u64)netdev;
1692	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1693
1694	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1695	if (ret) {
1696		dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
1697			ret);
1698		if (ret > 0)
1699			ret = -EIO;
1700	}
1701	return ret;
1702}
1703
1704/** Sending command to enable/disable RX checksum offload
1705 * @param netdev                pointer to network device
1706 * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
1707 * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
1708 *                              OCTNET_CMD_RXCSUM_DISABLE
1709 * @returns                     SUCCESS or FAILURE
1710 */
1711static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
1712				       u8 rx_cmd)
1713{
1714	struct lio *lio = GET_LIO(netdev);
1715	struct octeon_device *oct = lio->oct_dev;
1716	struct octnic_ctrl_pkt nctrl;
1717	int ret = 0;
1718
1719	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1720
1721	nctrl.ncmd.u64 = 0;
1722	nctrl.ncmd.s.cmd = command;
1723	nctrl.ncmd.s.param1 = rx_cmd;
1724	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1725	nctrl.netpndev = (u64)netdev;
1726	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1727
1728	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1729	if (ret) {
1730		dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n",
1731			ret);
1732		if (ret > 0)
1733			ret = -EIO;
1734	}
1735	return ret;
1736}
1737
1738/** Sending command to add/delete VxLAN UDP port to firmware
1739 * @param netdev                pointer to network device
1740 * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
1741 * @param vxlan_port            VxLAN port to be added or deleted
1742 * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
1743 *                              OCTNET_CMD_VXLAN_PORT_DEL
1744 * @returns                     SUCCESS or FAILURE
1745 */
1746static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
1747				       u16 vxlan_port, u8 vxlan_cmd_bit)
1748{
1749	struct lio *lio = GET_LIO(netdev);
1750	struct octeon_device *oct = lio->oct_dev;
1751	struct octnic_ctrl_pkt nctrl;
1752	int ret = 0;
1753
1754	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1755
1756	nctrl.ncmd.u64 = 0;
1757	nctrl.ncmd.s.cmd = command;
1758	nctrl.ncmd.s.more = vxlan_cmd_bit;
1759	nctrl.ncmd.s.param1 = vxlan_port;
1760	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1761	nctrl.netpndev = (u64)netdev;
1762	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1763
1764	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1765	if (ret) {
1766		dev_err(&oct->pci_dev->dev,
1767			"DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
1768			ret);
1769		if (ret > 0)
1770			ret = -EIO;
1771	}
1772	return ret;
1773}
1774
1775static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
1776					unsigned int table, unsigned int entry,
1777					struct udp_tunnel_info *ti)
1778{
1779	return liquidio_vxlan_port_command(netdev,
1780					   OCTNET_CMD_VXLAN_PORT_CONFIG,
1781					   htons(ti->port),
1782					   OCTNET_CMD_VXLAN_PORT_ADD);
1783}
1784
1785static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
1786					  unsigned int table,
1787					  unsigned int entry,
1788					  struct udp_tunnel_info *ti)
1789{
1790	return liquidio_vxlan_port_command(netdev,
1791					   OCTNET_CMD_VXLAN_PORT_CONFIG,
1792					   htons(ti->port),
1793					   OCTNET_CMD_VXLAN_PORT_DEL);
1794}
1795
1796static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
1797	.set_port	= liquidio_udp_tunnel_set_port,
1798	.unset_port	= liquidio_udp_tunnel_unset_port,
1799	.tables		= {
1800		{ .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
1801	},
1802};
1803
1804/** \brief Net device fix features
1805 * @param netdev  pointer to network device
1806 * @param request features requested
1807 * @returns updated features list
1808 */
1809static netdev_features_t liquidio_fix_features(struct net_device *netdev,
1810					       netdev_features_t request)
1811{
1812	struct lio *lio = netdev_priv(netdev);
1813
1814	if ((request & NETIF_F_RXCSUM) &&
1815	    !(lio->dev_capability & NETIF_F_RXCSUM))
1816		request &= ~NETIF_F_RXCSUM;
1817
1818	if ((request & NETIF_F_HW_CSUM) &&
1819	    !(lio->dev_capability & NETIF_F_HW_CSUM))
1820		request &= ~NETIF_F_HW_CSUM;
1821
1822	if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
1823		request &= ~NETIF_F_TSO;
1824
1825	if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
1826		request &= ~NETIF_F_TSO6;
1827
1828	if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
1829		request &= ~NETIF_F_LRO;
1830
1831	/* Disable LRO if RXCSUM is off */
1832	if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
1833	    (lio->dev_capability & NETIF_F_LRO))
1834		request &= ~NETIF_F_LRO;
1835
1836	return request;
1837}
1838
1839/** \brief Net device set features
1840 * @param netdev  pointer to network device
1841 * @param features features to enable/disable
1842 */
1843static int liquidio_set_features(struct net_device *netdev,
1844				 netdev_features_t features)
1845{
1846	struct lio *lio = netdev_priv(netdev);
1847
1848	if (!((netdev->features ^ features) & NETIF_F_LRO))
1849		return 0;
1850
1851	if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
1852		liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
1853				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1854	else if (!(features & NETIF_F_LRO) &&
1855		 (lio->dev_capability & NETIF_F_LRO))
1856		liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
1857				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1858	if (!(netdev->features & NETIF_F_RXCSUM) &&
1859	    (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1860	    (features & NETIF_F_RXCSUM))
1861		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1862					    OCTNET_CMD_RXCSUM_ENABLE);
1863	else if ((netdev->features & NETIF_F_RXCSUM) &&
1864		 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1865		 !(features & NETIF_F_RXCSUM))
1866		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1867					    OCTNET_CMD_RXCSUM_DISABLE);
1868
1869	return 0;
1870}
1871
1872static const struct net_device_ops lionetdevops = {
1873	.ndo_open		= liquidio_open,
1874	.ndo_stop		= liquidio_stop,
1875	.ndo_start_xmit		= liquidio_xmit,
1876	.ndo_get_stats64	= liquidio_get_stats64,
1877	.ndo_set_mac_address	= liquidio_set_mac,
1878	.ndo_set_rx_mode	= liquidio_set_mcast_list,
1879	.ndo_tx_timeout		= liquidio_tx_timeout,
1880	.ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
1881	.ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
1882	.ndo_change_mtu		= liquidio_change_mtu,
1883	.ndo_eth_ioctl		= liquidio_ioctl,
1884	.ndo_fix_features	= liquidio_fix_features,
1885	.ndo_set_features	= liquidio_set_features,
 
 
1886};
1887
1888static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
1889{
1890	struct octeon_device *oct = (struct octeon_device *)buf;
1891	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
1892	union oct_link_status *ls;
1893	int gmxport = 0;
1894	int i;
1895
1896	if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
1897		dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
1898			recv_pkt->buffer_size[0],
1899			recv_pkt->rh.r_nic_info.gmxport);
1900		goto nic_info_err;
1901	}
1902
1903	gmxport = recv_pkt->rh.r_nic_info.gmxport;
1904	ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
1905		OCT_DROQ_INFO_SIZE);
1906
1907	octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
1908
1909	for (i = 0; i < oct->ifcount; i++) {
1910		if (oct->props[i].gmxport == gmxport) {
1911			update_link_status(oct->props[i].netdev, ls);
1912			break;
1913		}
1914	}
1915
1916nic_info_err:
1917	for (i = 0; i < recv_pkt->buffer_count; i++)
1918		recv_buffer_free(recv_pkt->buffer_ptr[i]);
1919	octeon_free_recv_info(recv_info);
1920	return 0;
1921}
1922
1923/**
1924 * setup_nic_devices - Setup network interfaces
1925 * @octeon_dev:  octeon device
1926 *
1927 * Called during init time for each device. It assumes the NIC
1928 * is already up and running.  The link information for each
1929 * interface is passed in link_info.
1930 */
1931static int setup_nic_devices(struct octeon_device *octeon_dev)
1932{
1933	int retval, num_iqueues, num_oqueues;
1934	u32 resp_size, data_size;
1935	struct liquidio_if_cfg_resp *resp;
1936	struct octeon_soft_command *sc;
1937	union oct_nic_if_cfg if_cfg;
1938	struct octdev_props *props;
1939	struct net_device *netdev;
1940	struct lio_version *vdata;
1941	struct lio *lio = NULL;
1942	u8 mac[ETH_ALEN], i, j;
1943	u32 ifidx_or_pfnum;
1944
1945	ifidx_or_pfnum = octeon_dev->pf_num;
1946
1947	/* This is to handle link status changes */
1948	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO,
1949				    lio_nic_info, octeon_dev);
1950
1951	/* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
1952	 * They are handled directly.
1953	 */
1954	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
1955					free_netbuf);
1956
1957	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
1958					free_netsgbuf);
1959
1960	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
1961					free_netsgbuf_with_resp);
1962
1963	for (i = 0; i < octeon_dev->ifcount; i++) {
1964		resp_size = sizeof(struct liquidio_if_cfg_resp);
1965		data_size = sizeof(struct lio_version);
1966		sc = (struct octeon_soft_command *)
1967			octeon_alloc_soft_command(octeon_dev, data_size,
1968						  resp_size, 0);
1969		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1970		vdata = (struct lio_version *)sc->virtdptr;
1971
1972		*((u64 *)vdata) = 0;
1973		vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
1974		vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
1975		vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
1976
1977		if_cfg.u64 = 0;
1978
1979		if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf;
1980		if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf;
1981		if_cfg.s.base_queue = 0;
1982
1983		sc->iq_no = 0;
1984
1985		octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
1986					    OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
1987					    0);
1988
1989		init_completion(&sc->complete);
1990		sc->sc_status = OCTEON_REQUEST_PENDING;
1991
1992		retval = octeon_send_soft_command(octeon_dev, sc);
1993		if (retval == IQ_SEND_FAILED) {
1994			dev_err(&octeon_dev->pci_dev->dev,
1995				"iq/oq config failed status: %x\n", retval);
1996			/* Soft instr is freed by driver in case of failure. */
1997			octeon_free_soft_command(octeon_dev, sc);
1998			return(-EIO);
1999		}
2000
2001		/* Sleep on a wait queue till the cond flag indicates that the
2002		 * response arrived or timed-out.
2003		 */
2004		retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
2005		if (retval)
2006			return retval;
2007
2008		retval = resp->status;
2009		if (retval) {
2010			dev_err(&octeon_dev->pci_dev->dev,
2011				"iq/oq config failed, retval = %d\n", retval);
2012			WRITE_ONCE(sc->caller_is_done, true);
2013			return -EIO;
2014		}
2015
2016		snprintf(octeon_dev->fw_info.liquidio_firmware_version,
2017			 32, "%s",
2018			 resp->cfg_info.liquidio_firmware_version);
2019
2020		octeon_swap_8B_data((u64 *)(&resp->cfg_info),
2021				    (sizeof(struct liquidio_if_cfg_info)) >> 3);
2022
2023		num_iqueues = hweight64(resp->cfg_info.iqmask);
2024		num_oqueues = hweight64(resp->cfg_info.oqmask);
2025
2026		if (!(num_iqueues) || !(num_oqueues)) {
2027			dev_err(&octeon_dev->pci_dev->dev,
2028				"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
2029				resp->cfg_info.iqmask, resp->cfg_info.oqmask);
2030			WRITE_ONCE(sc->caller_is_done, true);
2031			goto setup_nic_dev_done;
2032		}
2033		dev_dbg(&octeon_dev->pci_dev->dev,
2034			"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
2035			i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
2036			num_iqueues, num_oqueues);
2037
2038		netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
2039
2040		if (!netdev) {
2041			dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
2042			WRITE_ONCE(sc->caller_is_done, true);
2043			goto setup_nic_dev_done;
2044		}
2045
2046		SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
2047
2048		/* Associate the routines that will handle different
2049		 * netdev tasks.
2050		 */
2051		netdev->netdev_ops = &lionetdevops;
2052
2053		lio = GET_LIO(netdev);
2054
2055		memset(lio, 0, sizeof(struct lio));
2056
2057		lio->ifidx = ifidx_or_pfnum;
2058
2059		props = &octeon_dev->props[i];
2060		props->gmxport = resp->cfg_info.linfo.gmxport;
2061		props->netdev = netdev;
2062
2063		lio->linfo.num_rxpciq = num_oqueues;
2064		lio->linfo.num_txpciq = num_iqueues;
2065
2066		for (j = 0; j < num_oqueues; j++) {
2067			lio->linfo.rxpciq[j].u64 =
2068			    resp->cfg_info.linfo.rxpciq[j].u64;
2069		}
2070		for (j = 0; j < num_iqueues; j++) {
2071			lio->linfo.txpciq[j].u64 =
2072			    resp->cfg_info.linfo.txpciq[j].u64;
2073		}
2074
2075		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
2076		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
2077		lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
2078		lio->linfo.macaddr_is_admin_asgnd =
2079			resp->cfg_info.linfo.macaddr_is_admin_asgnd;
2080		lio->linfo.macaddr_spoofchk =
2081			resp->cfg_info.linfo.macaddr_spoofchk;
2082
2083		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2084
2085		lio->dev_capability = NETIF_F_HIGHDMA
2086				      | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2087				      | NETIF_F_SG | NETIF_F_RXCSUM
2088				      | NETIF_F_TSO | NETIF_F_TSO6
2089				      | NETIF_F_GRO
2090				      | NETIF_F_LRO;
2091		netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
2092
2093		/* Copy of transmit encapsulation capabilities:
2094		 * TSO, TSO6, Checksums for this device
2095		 */
2096		lio->enc_dev_capability = NETIF_F_IP_CSUM
2097					  | NETIF_F_IPV6_CSUM
2098					  | NETIF_F_GSO_UDP_TUNNEL
2099					  | NETIF_F_HW_CSUM | NETIF_F_SG
2100					  | NETIF_F_RXCSUM
2101					  | NETIF_F_TSO | NETIF_F_TSO6
2102					  | NETIF_F_LRO;
2103
2104		netdev->hw_enc_features =
2105		    (lio->enc_dev_capability & ~NETIF_F_LRO);
2106		netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
2107
2108		netdev->vlan_features = lio->dev_capability;
2109		/* Add any unchangeable hw features */
2110		lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
2111				       NETIF_F_HW_VLAN_CTAG_RX |
2112				       NETIF_F_HW_VLAN_CTAG_TX;
2113
2114		netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
2115
2116		netdev->hw_features = lio->dev_capability;
2117		netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2118
2119		/* MTU range: 68 - 16000 */
2120		netdev->min_mtu = LIO_MIN_MTU_SIZE;
2121		netdev->max_mtu = LIO_MAX_MTU_SIZE;
2122
2123		WRITE_ONCE(sc->caller_is_done, true);
2124
2125		/* Point to the  properties for octeon device to which this
2126		 * interface belongs.
2127		 */
2128		lio->oct_dev = octeon_dev;
2129		lio->octprops = props;
2130		lio->netdev = netdev;
2131
2132		dev_dbg(&octeon_dev->pci_dev->dev,
2133			"if%d gmx: %d hw_addr: 0x%llx\n", i,
2134			lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
2135
2136		/* 64-bit swap required on LE machines */
2137		octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
2138		for (j = 0; j < ETH_ALEN; j++)
2139			mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
2140
2141		/* Copy MAC Address to OS network device structure */
2142		eth_hw_addr_set(netdev, mac);
2143
2144		if (liquidio_setup_io_queues(octeon_dev, i,
2145					     lio->linfo.num_txpciq,
2146					     lio->linfo.num_rxpciq)) {
2147			dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
2148			goto setup_nic_dev_free;
2149		}
2150
2151		ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
2152
2153		/* For VFs, enable Octeon device interrupts here,
2154		 * as this is contingent upon IO queue setup
2155		 */
2156		octeon_dev->fn_list.enable_interrupt(octeon_dev,
2157						     OCTEON_ALL_INTR);
2158
2159		/* By default all interfaces on a single Octeon uses the same
2160		 * tx and rx queues
2161		 */
2162		lio->txq = lio->linfo.txpciq[0].s.q_no;
2163		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
2164
2165		lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
2166		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
2167
2168		if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
2169			dev_err(&octeon_dev->pci_dev->dev,
2170				"Gather list allocation failed\n");
2171			goto setup_nic_dev_free;
2172		}
2173
2174		/* Register ethtool support */
2175		liquidio_set_ethtool_ops(netdev);
2176		if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
2177			octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
2178		else
2179			octeon_dev->priv_flags = 0x0;
2180
2181		if (netdev->features & NETIF_F_LRO)
2182			liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2183					     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2184
2185		if (setup_link_status_change_wq(netdev))
2186			goto setup_nic_dev_free;
2187
2188		if (setup_rx_oom_poll_fn(netdev))
2189			goto setup_nic_dev_free;
2190
2191		/* Register the network device with the OS */
2192		if (register_netdev(netdev)) {
2193			dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
2194			goto setup_nic_dev_free;
2195		}
2196
2197		dev_dbg(&octeon_dev->pci_dev->dev,
2198			"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
2199			i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2200		netif_carrier_off(netdev);
2201		lio->link_changes++;
2202
2203		ifstate_set(lio, LIO_IFSTATE_REGISTERED);
2204
2205		/* Sending command to firmware to enable Rx checksum offload
2206		 * by default at the time of setup of Liquidio driver for
2207		 * this device
2208		 */
2209		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2210					    OCTNET_CMD_RXCSUM_ENABLE);
2211		liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
2212				     OCTNET_CMD_TXCSUM_ENABLE);
2213
2214		dev_dbg(&octeon_dev->pci_dev->dev,
2215			"NIC ifidx:%d Setup successful\n", i);
2216
2217		octeon_dev->no_speed_setting = 1;
2218	}
2219
2220	return 0;
2221
2222setup_nic_dev_free:
2223
2224	while (i--) {
2225		dev_err(&octeon_dev->pci_dev->dev,
2226			"NIC ifidx:%d Setup failed\n", i);
2227		liquidio_destroy_nic_device(octeon_dev, i);
2228	}
2229
2230setup_nic_dev_done:
2231
2232	return -ENODEV;
2233}
2234
2235/**
2236 * liquidio_init_nic_module - initialize the NIC
2237 * @oct: octeon device
2238 *
2239 * This initialization routine is called once the Octeon device application is
2240 * up and running
2241 */
2242static int liquidio_init_nic_module(struct octeon_device *oct)
2243{
2244	int num_nic_ports = 1;
2245	int i, retval = 0;
2246
2247	dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
2248
2249	/* only default iq and oq were initialized
2250	 * initialize the rest as well run port_config command for each port
2251	 */
2252	oct->ifcount = num_nic_ports;
2253	memset(oct->props, 0,
2254	       sizeof(struct octdev_props) * num_nic_ports);
2255
2256	for (i = 0; i < MAX_OCTEON_LINKS; i++)
2257		oct->props[i].gmxport = -1;
2258
2259	retval = setup_nic_devices(oct);
2260	if (retval) {
2261		dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
2262		goto octnet_init_failure;
2263	}
2264
2265	dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
2266
2267	return retval;
2268
2269octnet_init_failure:
2270
2271	oct->ifcount = 0;
2272
2273	return retval;
2274}
2275
2276/**
2277 * octeon_device_init - Device initialization for each Octeon device that is probed
2278 * @oct:  octeon device
2279 */
2280static int octeon_device_init(struct octeon_device *oct)
2281{
2282	u32 rev_id;
2283	int j;
2284
2285	atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
2286
2287	/* Enable access to the octeon device and make its DMA capability
2288	 * known to the OS.
2289	 */
2290	if (octeon_pci_os_setup(oct))
2291		return 1;
2292	atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
2293
2294	oct->chip_id = OCTEON_CN23XX_VF_VID;
2295	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
2296	oct->rev_id = rev_id & 0xff;
2297
2298	if (cn23xx_setup_octeon_vf_device(oct))
2299		return 1;
2300
2301	atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
2302
2303	oct->app_mode = CVM_DRV_NIC_APP;
2304
2305	/* Initialize the dispatch mechanism used to push packets arriving on
2306	 * Octeon Output queues.
2307	 */
2308	if (octeon_init_dispatch_list(oct))
2309		return 1;
2310
2311	atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
2312
2313	if (octeon_set_io_queues_off(oct)) {
2314		dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
2315		return 1;
2316	}
2317
2318	if (oct->fn_list.setup_device_regs(oct)) {
2319		dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
2320		return 1;
2321	}
2322
2323	/* Initialize soft command buffer pool */
2324	if (octeon_setup_sc_buffer_pool(oct)) {
2325		dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
2326		return 1;
2327	}
2328	atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
2329
2330	/* Setup the data structures that manage this Octeon's Input queues. */
2331	if (octeon_setup_instr_queues(oct)) {
2332		dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
2333		return 1;
2334	}
2335	atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
2336
2337	/* Initialize lists to manage the requests of different types that
2338	 * arrive from user & kernel applications for this octeon device.
2339	 */
2340	if (octeon_setup_response_list(oct)) {
2341		dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
2342		return 1;
2343	}
2344	atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
2345
2346	if (octeon_setup_output_queues(oct)) {
2347		dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
2348		return 1;
2349	}
2350	atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
2351
2352	if (oct->fn_list.setup_mbox(oct)) {
2353		dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
2354		return 1;
2355	}
2356	atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
2357
2358	if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) {
2359		dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
2360		return 1;
2361	}
2362	atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
2363
2364	dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n",
2365		 oct->sriov_info.rings_per_vf);
2366
2367	/* Setup the interrupt handler and record the INT SUM register address*/
2368	if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
2369		return 1;
2370
2371	atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
2372
2373	/* ***************************************************************
2374	 * The interrupts need to be enabled for the PF<-->VF handshake.
2375	 * They are [re]-enabled after the PF<-->VF handshake so that the
2376	 * correct OQ tick value is used (i.e. the value retrieved from
2377	 * the PF as part of the handshake).
2378	 */
2379
2380	/* Enable Octeon device interrupts */
2381	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2382
2383	if (cn23xx_octeon_pfvf_handshake(oct))
2384		return 1;
2385
2386	/* Here we [re]-enable the interrupts so that the correct OQ tick value
2387	 * is used (i.e. the value that was retrieved during the handshake)
2388	 */
2389
2390	/* Enable Octeon device interrupts */
2391	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2392	/* *************************************************************** */
2393
2394	/* Enable the input and output queues for this Octeon device */
2395	if (oct->fn_list.enable_io_queues(oct)) {
2396		dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
2397		return 1;
2398	}
2399
2400	atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
2401
2402	atomic_set(&oct->status, OCT_DEV_HOST_OK);
2403
2404	/* Send Credit for Octeon Output queues. Credits are always sent after
2405	 * the output queue is enabled.
2406	 */
2407	for (j = 0; j < oct->num_oqs; j++)
2408		writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
2409
2410	/* Packets can start arriving on the output queues from this point. */
2411
2412	atomic_set(&oct->status, OCT_DEV_CORE_OK);
2413
2414	atomic_set(&oct->status, OCT_DEV_RUNNING);
2415
2416	if (liquidio_init_nic_module(oct))
2417		return 1;
2418
2419	return 0;
2420}
2421
2422static int __init liquidio_vf_init(void)
2423{
2424	octeon_init_device_list(0);
2425	return pci_register_driver(&liquidio_vf_pci_driver);
2426}
2427
2428static void __exit liquidio_vf_exit(void)
2429{
2430	pci_unregister_driver(&liquidio_vf_pci_driver);
2431
2432	pr_info("LiquidIO_VF network module is now unloaded\n");
2433}
2434
2435module_init(liquidio_vf_init);
2436module_exit(liquidio_vf_exit);
v5.9
   1/**********************************************************************
   2 * Author: Cavium, Inc.
   3 *
   4 * Contact: support@cavium.com
   5 *          Please include "LiquidIO" in the subject.
   6 *
   7 * Copyright (c) 2003-2016 Cavium, Inc.
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17 ***********************************************************************/
  18#include <linux/module.h>
  19#include <linux/interrupt.h>
  20#include <linux/pci.h>
  21#include <net/vxlan.h>
  22#include "liquidio_common.h"
  23#include "octeon_droq.h"
  24#include "octeon_iq.h"
  25#include "response_manager.h"
  26#include "octeon_device.h"
  27#include "octeon_nic.h"
  28#include "octeon_main.h"
  29#include "octeon_network.h"
  30#include "cn23xx_vf_device.h"
  31
  32MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
  33MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
  34MODULE_LICENSE("GPL");
  35
  36static int debug = -1;
  37module_param(debug, int, 0644);
  38MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
  39
  40#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
  41
  42struct oct_timestamp_resp {
  43	u64 rh;
  44	u64 timestamp;
  45	u64 status;
  46};
  47
  48union tx_info {
  49	u64 u64;
  50	struct {
  51#ifdef __BIG_ENDIAN_BITFIELD
  52		u16 gso_size;
  53		u16 gso_segs;
  54		u32 reserved;
  55#else
  56		u32 reserved;
  57		u16 gso_segs;
  58		u16 gso_size;
  59#endif
  60	} s;
  61};
  62
  63#define OCTNIC_GSO_MAX_HEADER_SIZE 128
  64#define OCTNIC_GSO_MAX_SIZE \
  65		(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
  66
  67static int
  68liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  69static void liquidio_vf_remove(struct pci_dev *pdev);
  70static int octeon_device_init(struct octeon_device *oct);
  71static int liquidio_stop(struct net_device *netdev);
  72
  73static int lio_wait_for_oq_pkts(struct octeon_device *oct)
  74{
  75	struct octeon_device_priv *oct_priv =
  76	    (struct octeon_device_priv *)oct->priv;
  77	int retry = MAX_IO_PENDING_PKT_COUNT;
  78	int pkt_cnt = 0, pending_pkts;
  79	int i;
  80
  81	do {
  82		pending_pkts = 0;
  83
  84		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
  85			if (!(oct->io_qmask.oq & BIT_ULL(i)))
  86				continue;
  87			pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
  88		}
  89		if (pkt_cnt > 0) {
  90			pending_pkts += pkt_cnt;
  91			tasklet_schedule(&oct_priv->droq_tasklet);
  92		}
  93		pkt_cnt = 0;
  94		schedule_timeout_uninterruptible(1);
  95
  96	} while (retry-- && pending_pkts);
  97
  98	return pkt_cnt;
  99}
 100
 101/**
 102 * \brief Cause device to go quiet so it can be safely removed/reset/etc
 103 * @param oct Pointer to Octeon device
 104 */
 105static void pcierror_quiesce_device(struct octeon_device *oct)
 106{
 107	int i;
 108
 109	/* Disable the input and output queues now. No more packets will
 110	 * arrive from Octeon, but we should wait for all packet processing
 111	 * to finish.
 112	 */
 113
 114	/* To allow for in-flight requests */
 115	schedule_timeout_uninterruptible(100);
 116
 117	if (wait_for_pending_requests(oct))
 118		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
 119
 120	/* Force all requests waiting to be fetched by OCTEON to complete. */
 121	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 122		struct octeon_instr_queue *iq;
 123
 124		if (!(oct->io_qmask.iq & BIT_ULL(i)))
 125			continue;
 126		iq = oct->instr_queue[i];
 127
 128		if (atomic_read(&iq->instr_pending)) {
 129			spin_lock_bh(&iq->lock);
 130			iq->fill_cnt = 0;
 131			iq->octeon_read_index = iq->host_write_index;
 132			iq->stats.instr_processed +=
 133			    atomic_read(&iq->instr_pending);
 134			lio_process_iq_request_list(oct, iq, 0);
 135			spin_unlock_bh(&iq->lock);
 136		}
 137	}
 138
 139	/* Force all pending ordered list requests to time out. */
 140	lio_process_ordered_list(oct, 1);
 141
 142	/* We do not need to wait for output queue packets to be processed. */
 143}
 144
 145/**
 146 * \brief Cleanup PCI AER uncorrectable error status
 147 * @param dev Pointer to PCI device
 148 */
 149static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
 150{
 151	u32 status, mask;
 152	int pos = 0x100;
 153
 154	pr_info("%s :\n", __func__);
 155
 156	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
 157	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
 158	if (dev->error_state == pci_channel_io_normal)
 159		status &= ~mask; /* Clear corresponding nonfatal bits */
 160	else
 161		status &= mask; /* Clear corresponding fatal bits */
 162	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
 163}
 164
 165/**
 166 * \brief Stop all PCI IO to a given device
 167 * @param dev Pointer to Octeon device
 168 */
 169static void stop_pci_io(struct octeon_device *oct)
 170{
 171	struct msix_entry *msix_entries;
 172	int i;
 173
 174	/* No more instructions will be forwarded. */
 175	atomic_set(&oct->status, OCT_DEV_IN_RESET);
 176
 177	for (i = 0; i < oct->ifcount; i++)
 178		netif_device_detach(oct->props[i].netdev);
 179
 180	/* Disable interrupts  */
 181	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 182
 183	pcierror_quiesce_device(oct);
 184	if (oct->msix_on) {
 185		msix_entries = (struct msix_entry *)oct->msix_entries;
 186		for (i = 0; i < oct->num_msix_irqs; i++) {
 187			/* clear the affinity_cpumask */
 188			irq_set_affinity_hint(msix_entries[i].vector,
 189					      NULL);
 190			free_irq(msix_entries[i].vector,
 191				 &oct->ioq_vector[i]);
 192		}
 193		pci_disable_msix(oct->pci_dev);
 194		kfree(oct->msix_entries);
 195		oct->msix_entries = NULL;
 196		octeon_free_ioq_vector(oct);
 197	}
 198	dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
 199		lio_get_state_string(&oct->status));
 200
 201	/* making it a common function for all OCTEON models */
 202	cleanup_aer_uncorrect_error_status(oct->pci_dev);
 203
 204	pci_disable_device(oct->pci_dev);
 205}
 206
 207/**
 208 * \brief called when PCI error is detected
 209 * @param pdev Pointer to PCI device
 210 * @param state The current pci connection state
 211 *
 212 * This function is called after a PCI bus error affecting
 213 * this device has been detected.
 214 */
 215static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
 216						     pci_channel_state_t state)
 217{
 218	struct octeon_device *oct = pci_get_drvdata(pdev);
 219
 220	/* Non-correctable Non-fatal errors */
 221	if (state == pci_channel_io_normal) {
 222		dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
 223		cleanup_aer_uncorrect_error_status(oct->pci_dev);
 224		return PCI_ERS_RESULT_CAN_RECOVER;
 225	}
 226
 227	/* Non-correctable Fatal errors */
 228	dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
 229	stop_pci_io(oct);
 230
 231	return PCI_ERS_RESULT_DISCONNECT;
 232}
 233
 234/* For PCI-E Advanced Error Recovery (AER) Interface */
 235static const struct pci_error_handlers liquidio_vf_err_handler = {
 236	.error_detected = liquidio_pcie_error_detected,
 237};
 238
 239static const struct pci_device_id liquidio_vf_pci_tbl[] = {
 240	{
 241		PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
 242		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 243	},
 244	{
 245		0, 0, 0, 0, 0, 0, 0
 246	}
 247};
 248MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
 249
 250static struct pci_driver liquidio_vf_pci_driver = {
 251	.name		= "LiquidIO_VF",
 252	.id_table	= liquidio_vf_pci_tbl,
 253	.probe		= liquidio_vf_probe,
 254	.remove		= liquidio_vf_remove,
 255	.err_handler	= &liquidio_vf_err_handler,    /* For AER */
 256};
 257
 258/**
 259 * \brief Print link information
 260 * @param netdev network device
 261 */
 262static void print_link_info(struct net_device *netdev)
 263{
 264	struct lio *lio = GET_LIO(netdev);
 265
 266	if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
 267	    ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
 268		struct oct_link_info *linfo = &lio->linfo;
 269
 270		if (linfo->link.s.link_up) {
 271			netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
 272				   linfo->link.s.speed,
 273				   (linfo->link.s.duplex) ? "Full" : "Half");
 274		} else {
 275			netif_info(lio, link, lio->netdev, "Link Down\n");
 276		}
 277	}
 278}
 279
 280/**
 281 * \brief Routine to notify MTU change
 282 * @param work work_struct data structure
 283 */
 284static void octnet_link_status_change(struct work_struct *work)
 285{
 286	struct cavium_wk *wk = (struct cavium_wk *)work;
 287	struct lio *lio = (struct lio *)wk->ctxptr;
 288
 289	/* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
 290	 * this API is invoked only when new max-MTU of the interface is
 291	 * less than current MTU.
 292	 */
 293	rtnl_lock();
 294	dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
 295	rtnl_unlock();
 296}
 297
 298/**
 299 * \brief Sets up the mtu status change work
 300 * @param netdev network device
 301 */
 302static int setup_link_status_change_wq(struct net_device *netdev)
 303{
 304	struct lio *lio = GET_LIO(netdev);
 305	struct octeon_device *oct = lio->oct_dev;
 306
 307	lio->link_status_wq.wq = alloc_workqueue("link-status",
 308						 WQ_MEM_RECLAIM, 0);
 309	if (!lio->link_status_wq.wq) {
 310		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
 311		return -1;
 312	}
 313	INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
 314			  octnet_link_status_change);
 315	lio->link_status_wq.wk.ctxptr = lio;
 316
 317	return 0;
 318}
 319
 320static void cleanup_link_status_change_wq(struct net_device *netdev)
 321{
 322	struct lio *lio = GET_LIO(netdev);
 323
 324	if (lio->link_status_wq.wq) {
 325		cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
 326		destroy_workqueue(lio->link_status_wq.wq);
 327	}
 328}
 329
 330/**
 331 * \brief Update link status
 332 * @param netdev network device
 333 * @param ls link status structure
 334 *
 335 * Called on receipt of a link status response from the core application to
 336 * update each interface's link status.
 337 */
 338static void update_link_status(struct net_device *netdev,
 339			       union oct_link_status *ls)
 340{
 341	struct lio *lio = GET_LIO(netdev);
 342	int current_max_mtu = lio->linfo.link.s.mtu;
 343	struct octeon_device *oct = lio->oct_dev;
 344
 345	if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
 346		lio->linfo.link.u64 = ls->u64;
 347
 348		print_link_info(netdev);
 349		lio->link_changes++;
 350
 351		if (lio->linfo.link.s.link_up) {
 352			netif_carrier_on(netdev);
 353			wake_txqs(netdev);
 354		} else {
 355			netif_carrier_off(netdev);
 356			stop_txqs(netdev);
 357		}
 358
 359		if (lio->linfo.link.s.mtu != current_max_mtu) {
 360			dev_info(&oct->pci_dev->dev,
 361				 "Max MTU Changed from %d to %d\n",
 362				 current_max_mtu, lio->linfo.link.s.mtu);
 363			netdev->max_mtu = lio->linfo.link.s.mtu;
 364		}
 365
 366		if (lio->linfo.link.s.mtu < netdev->mtu) {
 367			dev_warn(&oct->pci_dev->dev,
 368				 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
 369				 netdev->mtu, lio->linfo.link.s.mtu);
 370			queue_delayed_work(lio->link_status_wq.wq,
 371					   &lio->link_status_wq.wk.work, 0);
 372		}
 373	}
 374}
 375
 376/**
 377 * \brief PCI probe handler
 378 * @param pdev PCI device structure
 379 * @param ent unused
 380 */
 381static int
 382liquidio_vf_probe(struct pci_dev *pdev,
 383		  const struct pci_device_id *ent __attribute__((unused)))
 384{
 385	struct octeon_device *oct_dev = NULL;
 386
 387	oct_dev = octeon_allocate_device(pdev->device,
 388					 sizeof(struct octeon_device_priv));
 389
 390	if (!oct_dev) {
 391		dev_err(&pdev->dev, "Unable to allocate device\n");
 392		return -ENOMEM;
 393	}
 394	oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
 395
 396	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
 397		 (u32)pdev->vendor, (u32)pdev->device);
 398
 399	/* Assign octeon_device for this device to the private data area. */
 400	pci_set_drvdata(pdev, oct_dev);
 401
 402	/* set linux specific device pointer */
 403	oct_dev->pci_dev = pdev;
 404
 405	oct_dev->subsystem_id = pdev->subsystem_vendor |
 406		(pdev->subsystem_device << 16);
 407
 408	if (octeon_device_init(oct_dev)) {
 409		liquidio_vf_remove(pdev);
 410		return -ENOMEM;
 411	}
 412
 413	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
 414
 415	return 0;
 416}
 417
 418/**
 419 * \brief PCI FLR for each Octeon device.
 420 * @param oct octeon device
 421 */
 422static void octeon_pci_flr(struct octeon_device *oct)
 423{
 424	pci_save_state(oct->pci_dev);
 425
 426	pci_cfg_access_lock(oct->pci_dev);
 427
 428	/* Quiesce the device completely */
 429	pci_write_config_word(oct->pci_dev, PCI_COMMAND,
 430			      PCI_COMMAND_INTX_DISABLE);
 431
 432	pcie_flr(oct->pci_dev);
 433
 434	pci_cfg_access_unlock(oct->pci_dev);
 435
 436	pci_restore_state(oct->pci_dev);
 437}
 438
 439/**
 440 *\brief Destroy resources associated with octeon device
 441 * @param pdev PCI device structure
 442 * @param ent unused
 443 */
 444static void octeon_destroy_resources(struct octeon_device *oct)
 445{
 446	struct octeon_device_priv *oct_priv =
 447		(struct octeon_device_priv *)oct->priv;
 448	struct msix_entry *msix_entries;
 449	int i;
 450
 451	switch (atomic_read(&oct->status)) {
 452	case OCT_DEV_RUNNING:
 453	case OCT_DEV_CORE_OK:
 454		/* No more instructions will be forwarded. */
 455		atomic_set(&oct->status, OCT_DEV_IN_RESET);
 456
 457		oct->app_mode = CVM_DRV_INVALID_APP;
 458		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
 459			lio_get_state_string(&oct->status));
 460
 461		schedule_timeout_uninterruptible(HZ / 10);
 462
 463		fallthrough;
 464	case OCT_DEV_HOST_OK:
 465	case OCT_DEV_IO_QUEUES_DONE:
 466		if (lio_wait_for_instr_fetch(oct))
 467			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
 468
 469		if (wait_for_pending_requests(oct))
 470			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
 471
 472		/* Disable the input and output queues now. No more packets will
 473		 * arrive from Octeon, but we should wait for all packet
 474		 * processing to finish.
 475		 */
 476		oct->fn_list.disable_io_queues(oct);
 477
 478		if (lio_wait_for_oq_pkts(oct))
 479			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
 480
 481		/* Force all requests waiting to be fetched by OCTEON to
 482		 * complete.
 483		 */
 484		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 485			struct octeon_instr_queue *iq;
 486
 487			if (!(oct->io_qmask.iq & BIT_ULL(i)))
 488				continue;
 489			iq = oct->instr_queue[i];
 490
 491			if (atomic_read(&iq->instr_pending)) {
 492				spin_lock_bh(&iq->lock);
 493				iq->fill_cnt = 0;
 494				iq->octeon_read_index = iq->host_write_index;
 495				iq->stats.instr_processed +=
 496					atomic_read(&iq->instr_pending);
 497				lio_process_iq_request_list(oct, iq, 0);
 498				spin_unlock_bh(&iq->lock);
 499			}
 500		}
 501
 502		lio_process_ordered_list(oct, 1);
 503		octeon_free_sc_done_list(oct);
 504		octeon_free_sc_zombie_list(oct);
 505
 506		fallthrough;
 507	case OCT_DEV_INTR_SET_DONE:
 508		/* Disable interrupts  */
 509		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 510
 511		if (oct->msix_on) {
 512			msix_entries = (struct msix_entry *)oct->msix_entries;
 513			for (i = 0; i < oct->num_msix_irqs; i++) {
 514				if (oct->ioq_vector[i].vector) {
 515					irq_set_affinity_hint(
 516							msix_entries[i].vector,
 517							NULL);
 518					free_irq(msix_entries[i].vector,
 519						 &oct->ioq_vector[i]);
 520					oct->ioq_vector[i].vector = 0;
 521				}
 522			}
 523			pci_disable_msix(oct->pci_dev);
 524			kfree(oct->msix_entries);
 525			oct->msix_entries = NULL;
 526			kfree(oct->irq_name_storage);
 527			oct->irq_name_storage = NULL;
 528		}
 529		/* Soft reset the octeon device before exiting */
 530		if (oct->pci_dev->reset_fn)
 531			octeon_pci_flr(oct);
 532		else
 533			cn23xx_vf_ask_pf_to_do_flr(oct);
 534
 535		fallthrough;
 536	case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
 537		octeon_free_ioq_vector(oct);
 538
 539		fallthrough;
 540	case OCT_DEV_MBOX_SETUP_DONE:
 541		oct->fn_list.free_mbox(oct);
 542
 543		fallthrough;
 544	case OCT_DEV_IN_RESET:
 545	case OCT_DEV_DROQ_INIT_DONE:
 546		mdelay(100);
 547		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
 548			if (!(oct->io_qmask.oq & BIT_ULL(i)))
 549				continue;
 550			octeon_delete_droq(oct, i);
 551		}
 552
 553		fallthrough;
 554	case OCT_DEV_RESP_LIST_INIT_DONE:
 555		octeon_delete_response_list(oct);
 556
 557		fallthrough;
 558	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
 559		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 560			if (!(oct->io_qmask.iq & BIT_ULL(i)))
 561				continue;
 562			octeon_delete_instr_queue(oct, i);
 563		}
 564
 565		fallthrough;
 566	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
 567		octeon_free_sc_buffer_pool(oct);
 568
 569		fallthrough;
 570	case OCT_DEV_DISPATCH_INIT_DONE:
 571		octeon_delete_dispatch_list(oct);
 572		cancel_delayed_work_sync(&oct->nic_poll_work.work);
 573
 574		fallthrough;
 575	case OCT_DEV_PCI_MAP_DONE:
 576		octeon_unmap_pci_barx(oct, 0);
 577		octeon_unmap_pci_barx(oct, 1);
 578
 579		fallthrough;
 580	case OCT_DEV_PCI_ENABLE_DONE:
 581		pci_clear_master(oct->pci_dev);
 582		/* Disable the device, releasing the PCI INT */
 583		pci_disable_device(oct->pci_dev);
 584
 585		fallthrough;
 586	case OCT_DEV_BEGIN_STATE:
 587		/* Nothing to be done here either */
 588		break;
 589	}
 590
 591	tasklet_kill(&oct_priv->droq_tasklet);
 592}
 593
 594/**
 595 * \brief Send Rx control command
 596 * @param lio per-network private data
 597 * @param start_stop whether to start or stop
 598 */
 599static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
 600{
 601	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
 602	struct octeon_soft_command *sc;
 603	union octnet_cmd *ncmd;
 604	int retval;
 605
 606	if (oct->props[lio->ifidx].rx_on == start_stop)
 607		return;
 608
 609	sc = (struct octeon_soft_command *)
 610		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
 611					  16, 0);
 
 
 
 
 
 612
 613	ncmd = (union octnet_cmd *)sc->virtdptr;
 614
 615	ncmd->u64 = 0;
 616	ncmd->s.cmd = OCTNET_CMD_RX_CTL;
 617	ncmd->s.param1 = start_stop;
 618
 619	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
 620
 621	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
 622
 623	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
 624				    OPCODE_NIC_CMD, 0, 0, 0);
 625
 626	init_completion(&sc->complete);
 627	sc->sc_status = OCTEON_REQUEST_PENDING;
 628
 629	retval = octeon_send_soft_command(oct, sc);
 630	if (retval == IQ_SEND_FAILED) {
 631		netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
 632		octeon_free_soft_command(oct, sc);
 633	} else {
 634		/* Sleep on a wait queue till the cond flag indicates that the
 635		 * response arrived or timed-out.
 636		 */
 637		retval = wait_for_sc_completion_timeout(oct, sc, 0);
 638		if (retval)
 639			return;
 640
 641		oct->props[lio->ifidx].rx_on = start_stop;
 642		WRITE_ONCE(sc->caller_is_done, true);
 643	}
 
 
 644}
 645
 646/**
 647 * \brief Destroy NIC device interface
 648 * @param oct octeon device
 649 * @param ifidx which interface to destroy
 650 *
 651 * Cleanup associated with each interface for an Octeon device  when NIC
 652 * module is being unloaded or if initialization fails during load.
 653 */
 654static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
 655{
 656	struct net_device *netdev = oct->props[ifidx].netdev;
 657	struct octeon_device_priv *oct_priv =
 658		(struct octeon_device_priv *)oct->priv;
 659	struct napi_struct *napi, *n;
 660	struct lio *lio;
 661
 662	if (!netdev) {
 663		dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
 664			__func__, ifidx);
 665		return;
 666	}
 667
 668	lio = GET_LIO(netdev);
 669
 670	dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
 671
 672	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
 673		liquidio_stop(netdev);
 674
 675	if (oct->props[lio->ifidx].napi_enabled == 1) {
 676		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 677			napi_disable(napi);
 678
 679		oct->props[lio->ifidx].napi_enabled = 0;
 680
 681		oct->droq[0]->ops.poll_mode = 0;
 682	}
 683
 684	/* Delete NAPI */
 685	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 686		netif_napi_del(napi);
 687
 688	tasklet_enable(&oct_priv->droq_tasklet);
 689
 690	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
 691		unregister_netdev(netdev);
 692
 693	cleanup_rx_oom_poll_fn(netdev);
 694
 695	cleanup_link_status_change_wq(netdev);
 696
 697	lio_delete_glists(lio);
 698
 699	free_netdev(netdev);
 700
 701	oct->props[ifidx].gmxport = -1;
 702
 703	oct->props[ifidx].netdev = NULL;
 704}
 705
 706/**
 707 * \brief Stop complete NIC functionality
 708 * @param oct octeon device
 709 */
 710static int liquidio_stop_nic_module(struct octeon_device *oct)
 711{
 712	struct lio *lio;
 713	int i, j;
 714
 715	dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
 716	if (!oct->ifcount) {
 717		dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
 718		return 1;
 719	}
 720
 721	spin_lock_bh(&oct->cmd_resp_wqlock);
 722	oct->cmd_resp_state = OCT_DRV_OFFLINE;
 723	spin_unlock_bh(&oct->cmd_resp_wqlock);
 724
 725	for (i = 0; i < oct->ifcount; i++) {
 726		lio = GET_LIO(oct->props[i].netdev);
 727		for (j = 0; j < oct->num_oqs; j++)
 728			octeon_unregister_droq_ops(oct,
 729						   lio->linfo.rxpciq[j].s.q_no);
 730	}
 731
 732	for (i = 0; i < oct->ifcount; i++)
 733		liquidio_destroy_nic_device(oct, i);
 734
 735	dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
 736	return 0;
 737}
 738
 739/**
 740 * \brief Cleans up resources at unload time
 741 * @param pdev PCI device structure
 742 */
 743static void liquidio_vf_remove(struct pci_dev *pdev)
 744{
 745	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
 746
 747	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
 748
 749	if (oct_dev->app_mode == CVM_DRV_NIC_APP)
 750		liquidio_stop_nic_module(oct_dev);
 751
 752	/* Reset the octeon device and cleanup all memory allocated for
 753	 * the octeon device by driver.
 754	 */
 755	octeon_destroy_resources(oct_dev);
 756
 757	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
 758
 759	/* This octeon device has been removed. Update the global
 760	 * data structure to reflect this. Free the device structure.
 761	 */
 762	octeon_free_device_mem(oct_dev);
 763}
 764
 765/**
 766 * \brief PCI initialization for each Octeon device.
 767 * @param oct octeon device
 768 */
 769static int octeon_pci_os_setup(struct octeon_device *oct)
 770{
 771#ifdef CONFIG_PCI_IOV
 772	/* setup PCI stuff first */
 773	if (!oct->pci_dev->physfn)
 774		octeon_pci_flr(oct);
 775#endif
 776
 777	if (pci_enable_device(oct->pci_dev)) {
 778		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
 779		return 1;
 780	}
 781
 782	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
 783		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
 784		pci_disable_device(oct->pci_dev);
 785		return 1;
 786	}
 787
 788	/* Enable PCI DMA Master. */
 789	pci_set_master(oct->pci_dev);
 790
 791	return 0;
 792}
 793
 794/**
 795 * \brief Unmap and free network buffer
 796 * @param buf buffer
 797 */
 798static void free_netbuf(void *buf)
 799{
 800	struct octnet_buf_free_info *finfo;
 801	struct sk_buff *skb;
 802	struct lio *lio;
 803
 804	finfo = (struct octnet_buf_free_info *)buf;
 805	skb = finfo->skb;
 806	lio = finfo->lio;
 807
 808	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
 809			 DMA_TO_DEVICE);
 810
 811	tx_buffer_free(skb);
 812}
 813
 814/**
 815 * \brief Unmap and free gather buffer
 816 * @param buf buffer
 817 */
 818static void free_netsgbuf(void *buf)
 819{
 820	struct octnet_buf_free_info *finfo;
 821	struct octnic_gather *g;
 822	struct sk_buff *skb;
 823	int i, frags, iq;
 824	struct lio *lio;
 825
 826	finfo = (struct octnet_buf_free_info *)buf;
 827	skb = finfo->skb;
 828	lio = finfo->lio;
 829	g = finfo->g;
 830	frags = skb_shinfo(skb)->nr_frags;
 831
 832	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
 833			 g->sg[0].ptr[0], (skb->len - skb->data_len),
 834			 DMA_TO_DEVICE);
 835
 836	i = 1;
 837	while (frags--) {
 838		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
 839
 840		pci_unmap_page((lio->oct_dev)->pci_dev,
 841			       g->sg[(i >> 2)].ptr[(i & 3)],
 842			       skb_frag_size(frag), DMA_TO_DEVICE);
 843		i++;
 844	}
 845
 846	iq = skb_iq(lio->oct_dev, skb);
 847
 848	spin_lock(&lio->glist_lock[iq]);
 849	list_add_tail(&g->list, &lio->glist[iq]);
 850	spin_unlock(&lio->glist_lock[iq]);
 851
 852	tx_buffer_free(skb);
 853}
 854
 855/**
 856 * \brief Unmap and free gather buffer with response
 857 * @param buf buffer
 858 */
 859static void free_netsgbuf_with_resp(void *buf)
 860{
 861	struct octnet_buf_free_info *finfo;
 862	struct octeon_soft_command *sc;
 863	struct octnic_gather *g;
 864	struct sk_buff *skb;
 865	int i, frags, iq;
 866	struct lio *lio;
 867
 868	sc = (struct octeon_soft_command *)buf;
 869	skb = (struct sk_buff *)sc->callback_arg;
 870	finfo = (struct octnet_buf_free_info *)&skb->cb;
 871
 872	lio = finfo->lio;
 873	g = finfo->g;
 874	frags = skb_shinfo(skb)->nr_frags;
 875
 876	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
 877			 g->sg[0].ptr[0], (skb->len - skb->data_len),
 878			 DMA_TO_DEVICE);
 879
 880	i = 1;
 881	while (frags--) {
 882		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
 883
 884		pci_unmap_page((lio->oct_dev)->pci_dev,
 885			       g->sg[(i >> 2)].ptr[(i & 3)],
 886			       skb_frag_size(frag), DMA_TO_DEVICE);
 887		i++;
 888	}
 889
 890	iq = skb_iq(lio->oct_dev, skb);
 891
 892	spin_lock(&lio->glist_lock[iq]);
 893	list_add_tail(&g->list, &lio->glist[iq]);
 894	spin_unlock(&lio->glist_lock[iq]);
 895
 896	/* Don't free the skb yet */
 897}
 898
 899/**
 900 * \brief Net device open for LiquidIO
 901 * @param netdev network device
 902 */
 903static int liquidio_open(struct net_device *netdev)
 904{
 905	struct lio *lio = GET_LIO(netdev);
 906	struct octeon_device *oct = lio->oct_dev;
 907	struct octeon_device_priv *oct_priv =
 908		(struct octeon_device_priv *)oct->priv;
 909	struct napi_struct *napi, *n;
 
 910
 911	if (!oct->props[lio->ifidx].napi_enabled) {
 912		tasklet_disable(&oct_priv->droq_tasklet);
 913
 914		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 915			napi_enable(napi);
 916
 917		oct->props[lio->ifidx].napi_enabled = 1;
 918
 919		oct->droq[0]->ops.poll_mode = 1;
 920	}
 921
 922	ifstate_set(lio, LIO_IFSTATE_RUNNING);
 923
 924	/* Ready for link status updates */
 925	lio->intf_open = 1;
 926
 927	netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
 928	start_txqs(netdev);
 929
 930	INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
 931	lio->stats_wk.ctxptr = lio;
 932	schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
 933					(LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
 934
 935	/* tell Octeon to start forwarding packets to host */
 936	send_rx_ctrl_cmd(lio, 1);
 
 
 937
 938	dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
 939
 940	return 0;
 941}
 942
 943/**
 944 * \brief Net device stop for LiquidIO
 945 * @param netdev network device
 946 */
 947static int liquidio_stop(struct net_device *netdev)
 948{
 949	struct lio *lio = GET_LIO(netdev);
 950	struct octeon_device *oct = lio->oct_dev;
 951	struct octeon_device_priv *oct_priv =
 952		(struct octeon_device_priv *)oct->priv;
 953	struct napi_struct *napi, *n;
 
 954
 955	/* tell Octeon to stop forwarding packets to host */
 956	send_rx_ctrl_cmd(lio, 0);
 
 
 957
 958	netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
 959	/* Inform that netif carrier is down */
 960	lio->intf_open = 0;
 961	lio->linfo.link.s.link_up = 0;
 962
 963	netif_carrier_off(netdev);
 964	lio->link_changes++;
 965
 966	ifstate_reset(lio, LIO_IFSTATE_RUNNING);
 967
 968	stop_txqs(netdev);
 969
 970	/* Wait for any pending Rx descriptors */
 971	if (lio_wait_for_clean_oq(oct))
 972		netif_info(lio, rx_err, lio->netdev,
 973			   "Proceeding with stop interface after partial RX desc processing\n");
 974
 975	if (oct->props[lio->ifidx].napi_enabled == 1) {
 976		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 977			napi_disable(napi);
 978
 979		oct->props[lio->ifidx].napi_enabled = 0;
 980
 981		oct->droq[0]->ops.poll_mode = 0;
 982
 983		tasklet_enable(&oct_priv->droq_tasklet);
 984	}
 985
 986	cancel_delayed_work_sync(&lio->stats_wk.work);
 987
 988	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
 989
 990	return 0;
 991}
 992
 993/**
 994 * \brief Converts a mask based on net device flags
 995 * @param netdev network device
 996 *
 997 * This routine generates a octnet_ifflags mask from the net device flags
 998 * received from the OS.
 999 */
1000static enum octnet_ifflags get_new_flags(struct net_device *netdev)
1001{
1002	enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1003
1004	if (netdev->flags & IFF_PROMISC)
1005		f |= OCTNET_IFFLAG_PROMISC;
1006
1007	if (netdev->flags & IFF_ALLMULTI)
1008		f |= OCTNET_IFFLAG_ALLMULTI;
1009
1010	if (netdev->flags & IFF_MULTICAST) {
1011		f |= OCTNET_IFFLAG_MULTICAST;
1012
1013		/* Accept all multicast addresses if there are more than we
1014		 * can handle
1015		 */
1016		if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1017			f |= OCTNET_IFFLAG_ALLMULTI;
1018	}
1019
1020	if (netdev->flags & IFF_BROADCAST)
1021		f |= OCTNET_IFFLAG_BROADCAST;
1022
1023	return f;
1024}
1025
1026static void liquidio_set_uc_list(struct net_device *netdev)
1027{
1028	struct lio *lio = GET_LIO(netdev);
1029	struct octeon_device *oct = lio->oct_dev;
1030	struct octnic_ctrl_pkt nctrl;
1031	struct netdev_hw_addr *ha;
1032	u64 *mac;
1033
1034	if (lio->netdev_uc_count == netdev_uc_count(netdev))
1035		return;
1036
1037	if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) {
1038		dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n");
1039		return;
1040	}
1041
1042	lio->netdev_uc_count = netdev_uc_count(netdev);
1043
1044	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1045	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST;
1046	nctrl.ncmd.s.more = lio->netdev_uc_count;
1047	nctrl.ncmd.s.param1 = oct->vf_num;
1048	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1049	nctrl.netpndev = (u64)netdev;
1050	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1051
1052	/* copy all the addresses into the udd */
1053	mac = &nctrl.udd[0];
1054	netdev_for_each_uc_addr(ha, netdev) {
1055		ether_addr_copy(((u8 *)mac) + 2, ha->addr);
1056		mac++;
1057	}
1058
1059	octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1060}
1061
1062/**
1063 * \brief Net device set_multicast_list
1064 * @param netdev network device
1065 */
1066static void liquidio_set_mcast_list(struct net_device *netdev)
1067{
1068	int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1069	struct lio *lio = GET_LIO(netdev);
1070	struct octeon_device *oct = lio->oct_dev;
1071	struct octnic_ctrl_pkt nctrl;
1072	struct netdev_hw_addr *ha;
1073	u64 *mc;
1074	int ret;
1075
1076	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1077
1078	/* Create a ctrl pkt command to be sent to core app. */
1079	nctrl.ncmd.u64 = 0;
1080	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1081	nctrl.ncmd.s.param1 = get_new_flags(netdev);
1082	nctrl.ncmd.s.param2 = mc_count;
1083	nctrl.ncmd.s.more = mc_count;
1084	nctrl.netpndev = (u64)netdev;
1085	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1086
1087	/* copy all the addresses into the udd */
1088	mc = &nctrl.udd[0];
1089	netdev_for_each_mc_addr(ha, netdev) {
1090		*mc = 0;
1091		ether_addr_copy(((u8 *)mc) + 2, ha->addr);
1092		/* no need to swap bytes */
1093		if (++mc > &nctrl.udd[mc_count])
1094			break;
1095	}
1096
1097	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1098
1099	/* Apparently, any activity in this call from the kernel has to
1100	 * be atomic. So we won't wait for response.
1101	 */
1102
1103	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1104	if (ret) {
1105		dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1106			ret);
1107	}
1108
1109	liquidio_set_uc_list(netdev);
1110}
1111
1112/**
1113 * \brief Net device set_mac_address
1114 * @param netdev network device
 
1115 */
1116static int liquidio_set_mac(struct net_device *netdev, void *p)
1117{
1118	struct sockaddr *addr = (struct sockaddr *)p;
1119	struct lio *lio = GET_LIO(netdev);
1120	struct octeon_device *oct = lio->oct_dev;
1121	struct octnic_ctrl_pkt nctrl;
1122	int ret = 0;
1123
1124	if (!is_valid_ether_addr(addr->sa_data))
1125		return -EADDRNOTAVAIL;
1126
1127	if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
1128		return 0;
1129
1130	if (lio->linfo.macaddr_is_admin_asgnd)
1131		return -EPERM;
1132
1133	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1134
1135	nctrl.ncmd.u64 = 0;
1136	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
1137	nctrl.ncmd.s.param1 = 0;
1138	nctrl.ncmd.s.more = 1;
1139	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1140	nctrl.netpndev = (u64)netdev;
1141
1142	nctrl.udd[0] = 0;
1143	/* The MAC Address is presented in network byte order. */
1144	ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data);
1145
1146	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1147	if (ret < 0) {
1148		dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
1149		return -ENOMEM;
1150	}
1151
1152	if (nctrl.sc_status ==
1153	    FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) {
1154		dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n");
1155		return -EPERM;
1156	}
1157
1158	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1159	ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
1160
1161	return 0;
1162}
1163
1164static void
1165liquidio_get_stats64(struct net_device *netdev,
1166		     struct rtnl_link_stats64 *lstats)
1167{
1168	struct lio *lio = GET_LIO(netdev);
1169	struct octeon_device *oct;
1170	u64 pkts = 0, drop = 0, bytes = 0;
1171	struct oct_droq_stats *oq_stats;
1172	struct oct_iq_stats *iq_stats;
1173	int i, iq_no, oq_no;
1174
1175	oct = lio->oct_dev;
1176
1177	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1178		return;
1179
1180	for (i = 0; i < oct->num_iqs; i++) {
1181		iq_no = lio->linfo.txpciq[i].s.q_no;
1182		iq_stats = &oct->instr_queue[iq_no]->stats;
1183		pkts += iq_stats->tx_done;
1184		drop += iq_stats->tx_dropped;
1185		bytes += iq_stats->tx_tot_bytes;
1186	}
1187
1188	lstats->tx_packets = pkts;
1189	lstats->tx_bytes = bytes;
1190	lstats->tx_dropped = drop;
1191
1192	pkts = 0;
1193	drop = 0;
1194	bytes = 0;
1195
1196	for (i = 0; i < oct->num_oqs; i++) {
1197		oq_no = lio->linfo.rxpciq[i].s.q_no;
1198		oq_stats = &oct->droq[oq_no]->stats;
1199		pkts += oq_stats->rx_pkts_received;
1200		drop += (oq_stats->rx_dropped +
1201			 oq_stats->dropped_nodispatch +
1202			 oq_stats->dropped_toomany +
1203			 oq_stats->dropped_nomem);
1204		bytes += oq_stats->rx_bytes_received;
1205	}
1206
1207	lstats->rx_bytes = bytes;
1208	lstats->rx_packets = pkts;
1209	lstats->rx_dropped = drop;
1210
1211	lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
1212
1213	/* detailed rx_errors: */
1214	lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
1215	/* recved pkt with crc error */
1216	lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
1217	/* recv'd frame alignment error */
1218	lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
1219
1220	lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
1221			    lstats->rx_frame_errors;
1222
1223	/* detailed tx_errors */
1224	lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
1225	lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
1226
1227	lstats->tx_errors = lstats->tx_aborted_errors +
1228		lstats->tx_carrier_errors;
1229}
1230
1231/**
1232 * \brief Handler for SIOCSHWTSTAMP ioctl
1233 * @param netdev network device
1234 * @param ifr interface request
1235 * @param cmd command
1236 */
1237static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
1238{
1239	struct lio *lio = GET_LIO(netdev);
1240	struct hwtstamp_config conf;
1241
1242	if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
1243		return -EFAULT;
1244
1245	if (conf.flags)
1246		return -EINVAL;
1247
1248	switch (conf.tx_type) {
1249	case HWTSTAMP_TX_ON:
1250	case HWTSTAMP_TX_OFF:
1251		break;
1252	default:
1253		return -ERANGE;
1254	}
1255
1256	switch (conf.rx_filter) {
1257	case HWTSTAMP_FILTER_NONE:
1258		break;
1259	case HWTSTAMP_FILTER_ALL:
1260	case HWTSTAMP_FILTER_SOME:
1261	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1262	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1263	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1264	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1265	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1266	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1267	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1268	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1269	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1270	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1271	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1272	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1273	case HWTSTAMP_FILTER_NTP_ALL:
1274		conf.rx_filter = HWTSTAMP_FILTER_ALL;
1275		break;
1276	default:
1277		return -ERANGE;
1278	}
1279
1280	if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
1281		ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1282
1283	else
1284		ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1285
1286	return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
1287}
1288
1289/**
1290 * \brief ioctl handler
1291 * @param netdev network device
1292 * @param ifr interface request
1293 * @param cmd command
1294 */
1295static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1296{
1297	switch (cmd) {
1298	case SIOCSHWTSTAMP:
1299		return hwtstamp_ioctl(netdev, ifr);
1300	default:
1301		return -EOPNOTSUPP;
1302	}
1303}
1304
1305static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
1306{
1307	struct sk_buff *skb = (struct sk_buff *)buf;
1308	struct octnet_buf_free_info *finfo;
1309	struct oct_timestamp_resp *resp;
1310	struct octeon_soft_command *sc;
1311	struct lio *lio;
1312
1313	finfo = (struct octnet_buf_free_info *)skb->cb;
1314	lio = finfo->lio;
1315	sc = finfo->sc;
1316	oct = lio->oct_dev;
1317	resp = (struct oct_timestamp_resp *)sc->virtrptr;
1318
1319	if (status != OCTEON_REQUEST_DONE) {
1320		dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
1321			CVM_CAST64(status));
1322		resp->timestamp = 0;
1323	}
1324
1325	octeon_swap_8B_data(&resp->timestamp, 1);
1326
1327	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
1328		struct skb_shared_hwtstamps ts;
1329		u64 ns = resp->timestamp;
1330
1331		netif_info(lio, tx_done, lio->netdev,
1332			   "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
1333			   skb, (unsigned long long)ns);
1334		ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
1335		skb_tstamp_tx(skb, &ts);
1336	}
1337
1338	octeon_free_soft_command(oct, sc);
1339	tx_buffer_free(skb);
1340}
1341
1342/* \brief Send a data packet that will be timestamped
1343 * @param oct octeon device
1344 * @param ndata pointer to network data
1345 * @param finfo pointer to private network data
1346 */
1347static int send_nic_timestamp_pkt(struct octeon_device *oct,
1348				  struct octnic_data_pkt *ndata,
1349				  struct octnet_buf_free_info *finfo,
1350				  int xmit_more)
1351{
1352	struct octeon_soft_command *sc;
1353	int ring_doorbell;
1354	struct lio *lio;
1355	int retval;
1356	u32 len;
1357
1358	lio = finfo->lio;
1359
1360	sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
1361					    sizeof(struct oct_timestamp_resp));
1362	finfo->sc = sc;
1363
1364	if (!sc) {
1365		dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
1366		return IQ_SEND_FAILED;
1367	}
1368
1369	if (ndata->reqtype == REQTYPE_NORESP_NET)
1370		ndata->reqtype = REQTYPE_RESP_NET;
1371	else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
1372		ndata->reqtype = REQTYPE_RESP_NET_SG;
1373
1374	sc->callback = handle_timestamp;
1375	sc->callback_arg = finfo->skb;
1376	sc->iq_no = ndata->q_no;
1377
1378	len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
1379
1380	ring_doorbell = !xmit_more;
1381
1382	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
1383				     sc, len, ndata->reqtype);
1384
1385	if (retval == IQ_SEND_FAILED) {
1386		dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
1387			retval);
1388		octeon_free_soft_command(oct, sc);
1389	} else {
1390		netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
1391	}
1392
1393	return retval;
1394}
1395
1396/** \brief Transmit networks packets to the Octeon interface
1397 * @param skbuff   skbuff struct to be passed to network layer.
1398 * @param netdev   pointer to network device
 
1399 * @returns whether the packet was transmitted to the device okay or not
1400 *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
1401 */
1402static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
1403{
1404	struct octnet_buf_free_info *finfo;
1405	union octnic_cmd_setup cmdsetup;
1406	struct octnic_data_pkt ndata;
1407	struct octeon_instr_irh *irh;
1408	struct oct_iq_stats *stats;
1409	struct octeon_device *oct;
1410	int q_idx = 0, iq_no = 0;
1411	union tx_info *tx_info;
1412	int xmit_more = 0;
1413	struct lio *lio;
1414	int status = 0;
1415	u64 dptr = 0;
1416	u32 tag = 0;
1417	int j;
1418
1419	lio = GET_LIO(netdev);
1420	oct = lio->oct_dev;
1421
1422	q_idx = skb_iq(lio->oct_dev, skb);
1423	tag = q_idx;
1424	iq_no = lio->linfo.txpciq[q_idx].s.q_no;
1425
1426	stats = &oct->instr_queue[iq_no]->stats;
1427
1428	/* Check for all conditions in which the current packet cannot be
1429	 * transmitted.
1430	 */
1431	if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
1432	    (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
1433		netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
1434			   lio->linfo.link.s.link_up);
1435		goto lio_xmit_failed;
1436	}
1437
1438	/* Use space in skb->cb to store info used to unmap and
1439	 * free the buffers.
1440	 */
1441	finfo = (struct octnet_buf_free_info *)skb->cb;
1442	finfo->lio = lio;
1443	finfo->skb = skb;
1444	finfo->sc = NULL;
1445
1446	/* Prepare the attributes for the data to be passed to OSI. */
1447	memset(&ndata, 0, sizeof(struct octnic_data_pkt));
1448
1449	ndata.buf = finfo;
1450
1451	ndata.q_no = iq_no;
1452
1453	if (octnet_iq_is_full(oct, ndata.q_no)) {
1454		/* defer sending if queue is full */
1455		netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
1456			   ndata.q_no);
1457		stats->tx_iq_busy++;
1458		return NETDEV_TX_BUSY;
1459	}
1460
1461	ndata.datasize = skb->len;
1462
1463	cmdsetup.u64 = 0;
1464	cmdsetup.s.iq_no = iq_no;
1465
1466	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1467		if (skb->encapsulation) {
1468			cmdsetup.s.tnl_csum = 1;
1469			stats->tx_vxlan++;
1470		} else {
1471			cmdsetup.s.transport_csum = 1;
1472		}
1473	}
1474	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1475		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1476		cmdsetup.s.timestamp = 1;
1477	}
1478
1479	if (!skb_shinfo(skb)->nr_frags) {
1480		cmdsetup.s.u.datasize = skb->len;
1481		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1482		/* Offload checksum calculation for TCP/UDP packets */
1483		dptr = dma_map_single(&oct->pci_dev->dev,
1484				      skb->data,
1485				      skb->len,
1486				      DMA_TO_DEVICE);
1487		if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
1488			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
1489				__func__);
1490			return NETDEV_TX_BUSY;
1491		}
1492
1493		ndata.cmd.cmd3.dptr = dptr;
1494		finfo->dptr = dptr;
1495		ndata.reqtype = REQTYPE_NORESP_NET;
1496
1497	} else {
1498		skb_frag_t *frag;
1499		struct octnic_gather *g;
1500		int i, frags;
1501
1502		spin_lock(&lio->glist_lock[q_idx]);
1503		g = (struct octnic_gather *)
1504			lio_list_delete_head(&lio->glist[q_idx]);
1505		spin_unlock(&lio->glist_lock[q_idx]);
1506
1507		if (!g) {
1508			netif_info(lio, tx_err, lio->netdev,
1509				   "Transmit scatter gather: glist null!\n");
1510			goto lio_xmit_failed;
1511		}
1512
1513		cmdsetup.s.gather = 1;
1514		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
1515		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1516
1517		memset(g->sg, 0, g->sg_size);
1518
1519		g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
1520						 skb->data,
1521						 (skb->len - skb->data_len),
1522						 DMA_TO_DEVICE);
1523		if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
1524			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
1525				__func__);
1526			return NETDEV_TX_BUSY;
1527		}
1528		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
1529
1530		frags = skb_shinfo(skb)->nr_frags;
1531		i = 1;
1532		while (frags--) {
1533			frag = &skb_shinfo(skb)->frags[i - 1];
1534
1535			g->sg[(i >> 2)].ptr[(i & 3)] =
1536				skb_frag_dma_map(&oct->pci_dev->dev,
1537						 frag, 0, skb_frag_size(frag),
1538						 DMA_TO_DEVICE);
1539			if (dma_mapping_error(&oct->pci_dev->dev,
1540					      g->sg[i >> 2].ptr[i & 3])) {
1541				dma_unmap_single(&oct->pci_dev->dev,
1542						 g->sg[0].ptr[0],
1543						 skb->len - skb->data_len,
1544						 DMA_TO_DEVICE);
1545				for (j = 1; j < i; j++) {
1546					frag = &skb_shinfo(skb)->frags[j - 1];
1547					dma_unmap_page(&oct->pci_dev->dev,
1548						       g->sg[j >> 2].ptr[j & 3],
1549						       skb_frag_size(frag),
1550						       DMA_TO_DEVICE);
1551				}
1552				dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
1553					__func__);
1554				return NETDEV_TX_BUSY;
1555			}
1556
1557			add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
1558				    (i & 3));
1559			i++;
1560		}
1561
1562		dptr = g->sg_dma_ptr;
1563
1564		ndata.cmd.cmd3.dptr = dptr;
1565		finfo->dptr = dptr;
1566		finfo->g = g;
1567
1568		ndata.reqtype = REQTYPE_NORESP_NET_SG;
1569	}
1570
1571	irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
1572	tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
1573
1574	if (skb_shinfo(skb)->gso_size) {
1575		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
1576		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
1577	}
1578
1579	/* HW insert VLAN tag */
1580	if (skb_vlan_tag_present(skb)) {
1581		irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
1582		irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
1583	}
1584
1585	xmit_more = netdev_xmit_more();
1586
1587	if (unlikely(cmdsetup.s.timestamp))
1588		status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
1589	else
1590		status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
1591	if (status == IQ_SEND_FAILED)
1592		goto lio_xmit_failed;
1593
1594	netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
1595
1596	if (status == IQ_SEND_STOP) {
1597		dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
1598			iq_no);
1599		netif_stop_subqueue(netdev, q_idx);
1600	}
1601
1602	netif_trans_update(netdev);
1603
1604	if (tx_info->s.gso_segs)
1605		stats->tx_done += tx_info->s.gso_segs;
1606	else
1607		stats->tx_done++;
1608	stats->tx_tot_bytes += ndata.datasize;
1609
1610	return NETDEV_TX_OK;
1611
1612lio_xmit_failed:
1613	stats->tx_dropped++;
1614	netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
1615		   iq_no, stats->tx_dropped);
1616	if (dptr)
1617		dma_unmap_single(&oct->pci_dev->dev, dptr,
1618				 ndata.datasize, DMA_TO_DEVICE);
1619
1620	octeon_ring_doorbell_locked(oct, iq_no);
1621
1622	tx_buffer_free(skb);
1623	return NETDEV_TX_OK;
1624}
1625
1626/** \brief Network device Tx timeout
1627 * @param netdev    pointer to network device
 
 
1628 */
1629static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1630{
1631	struct lio *lio;
1632
1633	lio = GET_LIO(netdev);
1634
1635	netif_info(lio, tx_err, lio->netdev,
1636		   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
1637		   netdev->stats.tx_dropped);
1638	netif_trans_update(netdev);
1639	wake_txqs(netdev);
1640}
1641
1642static int
1643liquidio_vlan_rx_add_vid(struct net_device *netdev,
1644			 __be16 proto __attribute__((unused)), u16 vid)
1645{
1646	struct lio *lio = GET_LIO(netdev);
1647	struct octeon_device *oct = lio->oct_dev;
1648	struct octnic_ctrl_pkt nctrl;
1649	int ret = 0;
1650
1651	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1652
1653	nctrl.ncmd.u64 = 0;
1654	nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
1655	nctrl.ncmd.s.param1 = vid;
1656	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1657	nctrl.netpndev = (u64)netdev;
1658	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1659
1660	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1661	if (ret) {
1662		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
1663			ret);
1664		return -EPERM;
1665	}
1666
1667	return 0;
1668}
1669
1670static int
1671liquidio_vlan_rx_kill_vid(struct net_device *netdev,
1672			  __be16 proto __attribute__((unused)), u16 vid)
1673{
1674	struct lio *lio = GET_LIO(netdev);
1675	struct octeon_device *oct = lio->oct_dev;
1676	struct octnic_ctrl_pkt nctrl;
1677	int ret = 0;
1678
1679	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1680
1681	nctrl.ncmd.u64 = 0;
1682	nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
1683	nctrl.ncmd.s.param1 = vid;
1684	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1685	nctrl.netpndev = (u64)netdev;
1686	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1687
1688	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1689	if (ret) {
1690		dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
1691			ret);
1692		if (ret > 0)
1693			ret = -EIO;
1694	}
1695	return ret;
1696}
1697
1698/** Sending command to enable/disable RX checksum offload
1699 * @param netdev                pointer to network device
1700 * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
1701 * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
1702 *                              OCTNET_CMD_RXCSUM_DISABLE
1703 * @returns                     SUCCESS or FAILURE
1704 */
1705static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
1706				       u8 rx_cmd)
1707{
1708	struct lio *lio = GET_LIO(netdev);
1709	struct octeon_device *oct = lio->oct_dev;
1710	struct octnic_ctrl_pkt nctrl;
1711	int ret = 0;
1712
1713	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1714
1715	nctrl.ncmd.u64 = 0;
1716	nctrl.ncmd.s.cmd = command;
1717	nctrl.ncmd.s.param1 = rx_cmd;
1718	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1719	nctrl.netpndev = (u64)netdev;
1720	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1721
1722	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1723	if (ret) {
1724		dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n",
1725			ret);
1726		if (ret > 0)
1727			ret = -EIO;
1728	}
1729	return ret;
1730}
1731
1732/** Sending command to add/delete VxLAN UDP port to firmware
1733 * @param netdev                pointer to network device
1734 * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
1735 * @param vxlan_port            VxLAN port to be added or deleted
1736 * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
1737 *                              OCTNET_CMD_VXLAN_PORT_DEL
1738 * @returns                     SUCCESS or FAILURE
1739 */
1740static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
1741				       u16 vxlan_port, u8 vxlan_cmd_bit)
1742{
1743	struct lio *lio = GET_LIO(netdev);
1744	struct octeon_device *oct = lio->oct_dev;
1745	struct octnic_ctrl_pkt nctrl;
1746	int ret = 0;
1747
1748	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1749
1750	nctrl.ncmd.u64 = 0;
1751	nctrl.ncmd.s.cmd = command;
1752	nctrl.ncmd.s.more = vxlan_cmd_bit;
1753	nctrl.ncmd.s.param1 = vxlan_port;
1754	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1755	nctrl.netpndev = (u64)netdev;
1756	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1757
1758	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1759	if (ret) {
1760		dev_err(&oct->pci_dev->dev,
1761			"DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
1762			ret);
1763		if (ret > 0)
1764			ret = -EIO;
1765	}
1766	return ret;
1767}
1768
1769static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
1770					unsigned int table, unsigned int entry,
1771					struct udp_tunnel_info *ti)
1772{
1773	return liquidio_vxlan_port_command(netdev,
1774					   OCTNET_CMD_VXLAN_PORT_CONFIG,
1775					   htons(ti->port),
1776					   OCTNET_CMD_VXLAN_PORT_ADD);
1777}
1778
1779static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
1780					  unsigned int table,
1781					  unsigned int entry,
1782					  struct udp_tunnel_info *ti)
1783{
1784	return liquidio_vxlan_port_command(netdev,
1785					   OCTNET_CMD_VXLAN_PORT_CONFIG,
1786					   htons(ti->port),
1787					   OCTNET_CMD_VXLAN_PORT_DEL);
1788}
1789
1790static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
1791	.set_port	= liquidio_udp_tunnel_set_port,
1792	.unset_port	= liquidio_udp_tunnel_unset_port,
1793	.tables		= {
1794		{ .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
1795	},
1796};
1797
1798/** \brief Net device fix features
1799 * @param netdev  pointer to network device
1800 * @param request features requested
1801 * @returns updated features list
1802 */
1803static netdev_features_t liquidio_fix_features(struct net_device *netdev,
1804					       netdev_features_t request)
1805{
1806	struct lio *lio = netdev_priv(netdev);
1807
1808	if ((request & NETIF_F_RXCSUM) &&
1809	    !(lio->dev_capability & NETIF_F_RXCSUM))
1810		request &= ~NETIF_F_RXCSUM;
1811
1812	if ((request & NETIF_F_HW_CSUM) &&
1813	    !(lio->dev_capability & NETIF_F_HW_CSUM))
1814		request &= ~NETIF_F_HW_CSUM;
1815
1816	if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
1817		request &= ~NETIF_F_TSO;
1818
1819	if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
1820		request &= ~NETIF_F_TSO6;
1821
1822	if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
1823		request &= ~NETIF_F_LRO;
1824
1825	/* Disable LRO if RXCSUM is off */
1826	if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
1827	    (lio->dev_capability & NETIF_F_LRO))
1828		request &= ~NETIF_F_LRO;
1829
1830	return request;
1831}
1832
1833/** \brief Net device set features
1834 * @param netdev  pointer to network device
1835 * @param features features to enable/disable
1836 */
1837static int liquidio_set_features(struct net_device *netdev,
1838				 netdev_features_t features)
1839{
1840	struct lio *lio = netdev_priv(netdev);
1841
1842	if (!((netdev->features ^ features) & NETIF_F_LRO))
1843		return 0;
1844
1845	if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
1846		liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
1847				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1848	else if (!(features & NETIF_F_LRO) &&
1849		 (lio->dev_capability & NETIF_F_LRO))
1850		liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
1851				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1852	if (!(netdev->features & NETIF_F_RXCSUM) &&
1853	    (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1854	    (features & NETIF_F_RXCSUM))
1855		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1856					    OCTNET_CMD_RXCSUM_ENABLE);
1857	else if ((netdev->features & NETIF_F_RXCSUM) &&
1858		 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1859		 !(features & NETIF_F_RXCSUM))
1860		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1861					    OCTNET_CMD_RXCSUM_DISABLE);
1862
1863	return 0;
1864}
1865
1866static const struct net_device_ops lionetdevops = {
1867	.ndo_open		= liquidio_open,
1868	.ndo_stop		= liquidio_stop,
1869	.ndo_start_xmit		= liquidio_xmit,
1870	.ndo_get_stats64	= liquidio_get_stats64,
1871	.ndo_set_mac_address	= liquidio_set_mac,
1872	.ndo_set_rx_mode	= liquidio_set_mcast_list,
1873	.ndo_tx_timeout		= liquidio_tx_timeout,
1874	.ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
1875	.ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
1876	.ndo_change_mtu		= liquidio_change_mtu,
1877	.ndo_do_ioctl		= liquidio_ioctl,
1878	.ndo_fix_features	= liquidio_fix_features,
1879	.ndo_set_features	= liquidio_set_features,
1880	.ndo_udp_tunnel_add	= udp_tunnel_nic_add_port,
1881	.ndo_udp_tunnel_del	= udp_tunnel_nic_del_port,
1882};
1883
1884static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
1885{
1886	struct octeon_device *oct = (struct octeon_device *)buf;
1887	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
1888	union oct_link_status *ls;
1889	int gmxport = 0;
1890	int i;
1891
1892	if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
1893		dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
1894			recv_pkt->buffer_size[0],
1895			recv_pkt->rh.r_nic_info.gmxport);
1896		goto nic_info_err;
1897	}
1898
1899	gmxport = recv_pkt->rh.r_nic_info.gmxport;
1900	ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
1901		OCT_DROQ_INFO_SIZE);
1902
1903	octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
1904
1905	for (i = 0; i < oct->ifcount; i++) {
1906		if (oct->props[i].gmxport == gmxport) {
1907			update_link_status(oct->props[i].netdev, ls);
1908			break;
1909		}
1910	}
1911
1912nic_info_err:
1913	for (i = 0; i < recv_pkt->buffer_count; i++)
1914		recv_buffer_free(recv_pkt->buffer_ptr[i]);
1915	octeon_free_recv_info(recv_info);
1916	return 0;
1917}
1918
1919/**
1920 * \brief Setup network interfaces
1921 * @param octeon_dev  octeon device
1922 *
1923 * Called during init time for each device. It assumes the NIC
1924 * is already up and running.  The link information for each
1925 * interface is passed in link_info.
1926 */
1927static int setup_nic_devices(struct octeon_device *octeon_dev)
1928{
1929	int retval, num_iqueues, num_oqueues;
1930	u32 resp_size, data_size;
1931	struct liquidio_if_cfg_resp *resp;
1932	struct octeon_soft_command *sc;
1933	union oct_nic_if_cfg if_cfg;
1934	struct octdev_props *props;
1935	struct net_device *netdev;
1936	struct lio_version *vdata;
1937	struct lio *lio = NULL;
1938	u8 mac[ETH_ALEN], i, j;
1939	u32 ifidx_or_pfnum;
1940
1941	ifidx_or_pfnum = octeon_dev->pf_num;
1942
1943	/* This is to handle link status changes */
1944	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO,
1945				    lio_nic_info, octeon_dev);
1946
1947	/* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
1948	 * They are handled directly.
1949	 */
1950	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
1951					free_netbuf);
1952
1953	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
1954					free_netsgbuf);
1955
1956	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
1957					free_netsgbuf_with_resp);
1958
1959	for (i = 0; i < octeon_dev->ifcount; i++) {
1960		resp_size = sizeof(struct liquidio_if_cfg_resp);
1961		data_size = sizeof(struct lio_version);
1962		sc = (struct octeon_soft_command *)
1963			octeon_alloc_soft_command(octeon_dev, data_size,
1964						  resp_size, 0);
1965		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1966		vdata = (struct lio_version *)sc->virtdptr;
1967
1968		*((u64 *)vdata) = 0;
1969		vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
1970		vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
1971		vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
1972
1973		if_cfg.u64 = 0;
1974
1975		if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf;
1976		if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf;
1977		if_cfg.s.base_queue = 0;
1978
1979		sc->iq_no = 0;
1980
1981		octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
1982					    OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
1983					    0);
1984
1985		init_completion(&sc->complete);
1986		sc->sc_status = OCTEON_REQUEST_PENDING;
1987
1988		retval = octeon_send_soft_command(octeon_dev, sc);
1989		if (retval == IQ_SEND_FAILED) {
1990			dev_err(&octeon_dev->pci_dev->dev,
1991				"iq/oq config failed status: %x\n", retval);
1992			/* Soft instr is freed by driver in case of failure. */
1993			octeon_free_soft_command(octeon_dev, sc);
1994			return(-EIO);
1995		}
1996
1997		/* Sleep on a wait queue till the cond flag indicates that the
1998		 * response arrived or timed-out.
1999		 */
2000		retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
2001		if (retval)
2002			return retval;
2003
2004		retval = resp->status;
2005		if (retval) {
2006			dev_err(&octeon_dev->pci_dev->dev,
2007				"iq/oq config failed, retval = %d\n", retval);
2008			WRITE_ONCE(sc->caller_is_done, true);
2009			return -EIO;
2010		}
2011
2012		snprintf(octeon_dev->fw_info.liquidio_firmware_version,
2013			 32, "%s",
2014			 resp->cfg_info.liquidio_firmware_version);
2015
2016		octeon_swap_8B_data((u64 *)(&resp->cfg_info),
2017				    (sizeof(struct liquidio_if_cfg_info)) >> 3);
2018
2019		num_iqueues = hweight64(resp->cfg_info.iqmask);
2020		num_oqueues = hweight64(resp->cfg_info.oqmask);
2021
2022		if (!(num_iqueues) || !(num_oqueues)) {
2023			dev_err(&octeon_dev->pci_dev->dev,
2024				"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
2025				resp->cfg_info.iqmask, resp->cfg_info.oqmask);
2026			WRITE_ONCE(sc->caller_is_done, true);
2027			goto setup_nic_dev_done;
2028		}
2029		dev_dbg(&octeon_dev->pci_dev->dev,
2030			"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
2031			i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
2032			num_iqueues, num_oqueues);
2033
2034		netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
2035
2036		if (!netdev) {
2037			dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
2038			WRITE_ONCE(sc->caller_is_done, true);
2039			goto setup_nic_dev_done;
2040		}
2041
2042		SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
2043
2044		/* Associate the routines that will handle different
2045		 * netdev tasks.
2046		 */
2047		netdev->netdev_ops = &lionetdevops;
2048
2049		lio = GET_LIO(netdev);
2050
2051		memset(lio, 0, sizeof(struct lio));
2052
2053		lio->ifidx = ifidx_or_pfnum;
2054
2055		props = &octeon_dev->props[i];
2056		props->gmxport = resp->cfg_info.linfo.gmxport;
2057		props->netdev = netdev;
2058
2059		lio->linfo.num_rxpciq = num_oqueues;
2060		lio->linfo.num_txpciq = num_iqueues;
2061
2062		for (j = 0; j < num_oqueues; j++) {
2063			lio->linfo.rxpciq[j].u64 =
2064			    resp->cfg_info.linfo.rxpciq[j].u64;
2065		}
2066		for (j = 0; j < num_iqueues; j++) {
2067			lio->linfo.txpciq[j].u64 =
2068			    resp->cfg_info.linfo.txpciq[j].u64;
2069		}
2070
2071		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
2072		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
2073		lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
2074		lio->linfo.macaddr_is_admin_asgnd =
2075			resp->cfg_info.linfo.macaddr_is_admin_asgnd;
2076		lio->linfo.macaddr_spoofchk =
2077			resp->cfg_info.linfo.macaddr_spoofchk;
2078
2079		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2080
2081		lio->dev_capability = NETIF_F_HIGHDMA
2082				      | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2083				      | NETIF_F_SG | NETIF_F_RXCSUM
2084				      | NETIF_F_TSO | NETIF_F_TSO6
2085				      | NETIF_F_GRO
2086				      | NETIF_F_LRO;
2087		netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
2088
2089		/* Copy of transmit encapsulation capabilities:
2090		 * TSO, TSO6, Checksums for this device
2091		 */
2092		lio->enc_dev_capability = NETIF_F_IP_CSUM
2093					  | NETIF_F_IPV6_CSUM
2094					  | NETIF_F_GSO_UDP_TUNNEL
2095					  | NETIF_F_HW_CSUM | NETIF_F_SG
2096					  | NETIF_F_RXCSUM
2097					  | NETIF_F_TSO | NETIF_F_TSO6
2098					  | NETIF_F_LRO;
2099
2100		netdev->hw_enc_features =
2101		    (lio->enc_dev_capability & ~NETIF_F_LRO);
2102		netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
2103
2104		netdev->vlan_features = lio->dev_capability;
2105		/* Add any unchangeable hw features */
2106		lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
2107				       NETIF_F_HW_VLAN_CTAG_RX |
2108				       NETIF_F_HW_VLAN_CTAG_TX;
2109
2110		netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
2111
2112		netdev->hw_features = lio->dev_capability;
2113		netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2114
2115		/* MTU range: 68 - 16000 */
2116		netdev->min_mtu = LIO_MIN_MTU_SIZE;
2117		netdev->max_mtu = LIO_MAX_MTU_SIZE;
2118
2119		WRITE_ONCE(sc->caller_is_done, true);
2120
2121		/* Point to the  properties for octeon device to which this
2122		 * interface belongs.
2123		 */
2124		lio->oct_dev = octeon_dev;
2125		lio->octprops = props;
2126		lio->netdev = netdev;
2127
2128		dev_dbg(&octeon_dev->pci_dev->dev,
2129			"if%d gmx: %d hw_addr: 0x%llx\n", i,
2130			lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
2131
2132		/* 64-bit swap required on LE machines */
2133		octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
2134		for (j = 0; j < ETH_ALEN; j++)
2135			mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
2136
2137		/* Copy MAC Address to OS network device structure */
2138		ether_addr_copy(netdev->dev_addr, mac);
2139
2140		if (liquidio_setup_io_queues(octeon_dev, i,
2141					     lio->linfo.num_txpciq,
2142					     lio->linfo.num_rxpciq)) {
2143			dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
2144			goto setup_nic_dev_free;
2145		}
2146
2147		ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
2148
2149		/* For VFs, enable Octeon device interrupts here,
2150		 * as this is contingent upon IO queue setup
2151		 */
2152		octeon_dev->fn_list.enable_interrupt(octeon_dev,
2153						     OCTEON_ALL_INTR);
2154
2155		/* By default all interfaces on a single Octeon uses the same
2156		 * tx and rx queues
2157		 */
2158		lio->txq = lio->linfo.txpciq[0].s.q_no;
2159		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
2160
2161		lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
2162		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
2163
2164		if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
2165			dev_err(&octeon_dev->pci_dev->dev,
2166				"Gather list allocation failed\n");
2167			goto setup_nic_dev_free;
2168		}
2169
2170		/* Register ethtool support */
2171		liquidio_set_ethtool_ops(netdev);
2172		if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
2173			octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
2174		else
2175			octeon_dev->priv_flags = 0x0;
2176
2177		if (netdev->features & NETIF_F_LRO)
2178			liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2179					     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2180
2181		if (setup_link_status_change_wq(netdev))
2182			goto setup_nic_dev_free;
2183
2184		if (setup_rx_oom_poll_fn(netdev))
2185			goto setup_nic_dev_free;
2186
2187		/* Register the network device with the OS */
2188		if (register_netdev(netdev)) {
2189			dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
2190			goto setup_nic_dev_free;
2191		}
2192
2193		dev_dbg(&octeon_dev->pci_dev->dev,
2194			"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
2195			i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2196		netif_carrier_off(netdev);
2197		lio->link_changes++;
2198
2199		ifstate_set(lio, LIO_IFSTATE_REGISTERED);
2200
2201		/* Sending command to firmware to enable Rx checksum offload
2202		 * by default at the time of setup of Liquidio driver for
2203		 * this device
2204		 */
2205		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2206					    OCTNET_CMD_RXCSUM_ENABLE);
2207		liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
2208				     OCTNET_CMD_TXCSUM_ENABLE);
2209
2210		dev_dbg(&octeon_dev->pci_dev->dev,
2211			"NIC ifidx:%d Setup successful\n", i);
2212
2213		octeon_dev->no_speed_setting = 1;
2214	}
2215
2216	return 0;
2217
2218setup_nic_dev_free:
2219
2220	while (i--) {
2221		dev_err(&octeon_dev->pci_dev->dev,
2222			"NIC ifidx:%d Setup failed\n", i);
2223		liquidio_destroy_nic_device(octeon_dev, i);
2224	}
2225
2226setup_nic_dev_done:
2227
2228	return -ENODEV;
2229}
2230
2231/**
2232 * \brief initialize the NIC
2233 * @param oct octeon device
2234 *
2235 * This initialization routine is called once the Octeon device application is
2236 * up and running
2237 */
2238static int liquidio_init_nic_module(struct octeon_device *oct)
2239{
2240	int num_nic_ports = 1;
2241	int i, retval = 0;
2242
2243	dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
2244
2245	/* only default iq and oq were initialized
2246	 * initialize the rest as well run port_config command for each port
2247	 */
2248	oct->ifcount = num_nic_ports;
2249	memset(oct->props, 0,
2250	       sizeof(struct octdev_props) * num_nic_ports);
2251
2252	for (i = 0; i < MAX_OCTEON_LINKS; i++)
2253		oct->props[i].gmxport = -1;
2254
2255	retval = setup_nic_devices(oct);
2256	if (retval) {
2257		dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
2258		goto octnet_init_failure;
2259	}
2260
2261	dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
2262
2263	return retval;
2264
2265octnet_init_failure:
2266
2267	oct->ifcount = 0;
2268
2269	return retval;
2270}
2271
2272/**
2273 * \brief Device initialization for each Octeon device that is probed
2274 * @param octeon_dev  octeon device
2275 */
2276static int octeon_device_init(struct octeon_device *oct)
2277{
2278	u32 rev_id;
2279	int j;
2280
2281	atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
2282
2283	/* Enable access to the octeon device and make its DMA capability
2284	 * known to the OS.
2285	 */
2286	if (octeon_pci_os_setup(oct))
2287		return 1;
2288	atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
2289
2290	oct->chip_id = OCTEON_CN23XX_VF_VID;
2291	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
2292	oct->rev_id = rev_id & 0xff;
2293
2294	if (cn23xx_setup_octeon_vf_device(oct))
2295		return 1;
2296
2297	atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
2298
2299	oct->app_mode = CVM_DRV_NIC_APP;
2300
2301	/* Initialize the dispatch mechanism used to push packets arriving on
2302	 * Octeon Output queues.
2303	 */
2304	if (octeon_init_dispatch_list(oct))
2305		return 1;
2306
2307	atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
2308
2309	if (octeon_set_io_queues_off(oct)) {
2310		dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
2311		return 1;
2312	}
2313
2314	if (oct->fn_list.setup_device_regs(oct)) {
2315		dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
2316		return 1;
2317	}
2318
2319	/* Initialize soft command buffer pool */
2320	if (octeon_setup_sc_buffer_pool(oct)) {
2321		dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
2322		return 1;
2323	}
2324	atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
2325
2326	/* Setup the data structures that manage this Octeon's Input queues. */
2327	if (octeon_setup_instr_queues(oct)) {
2328		dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
2329		return 1;
2330	}
2331	atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
2332
2333	/* Initialize lists to manage the requests of different types that
2334	 * arrive from user & kernel applications for this octeon device.
2335	 */
2336	if (octeon_setup_response_list(oct)) {
2337		dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
2338		return 1;
2339	}
2340	atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
2341
2342	if (octeon_setup_output_queues(oct)) {
2343		dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
2344		return 1;
2345	}
2346	atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
2347
2348	if (oct->fn_list.setup_mbox(oct)) {
2349		dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
2350		return 1;
2351	}
2352	atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
2353
2354	if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) {
2355		dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
2356		return 1;
2357	}
2358	atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
2359
2360	dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n",
2361		 oct->sriov_info.rings_per_vf);
2362
2363	/* Setup the interrupt handler and record the INT SUM register address*/
2364	if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
2365		return 1;
2366
2367	atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
2368
2369	/* ***************************************************************
2370	 * The interrupts need to be enabled for the PF<-->VF handshake.
2371	 * They are [re]-enabled after the PF<-->VF handshake so that the
2372	 * correct OQ tick value is used (i.e. the value retrieved from
2373	 * the PF as part of the handshake).
2374	 */
2375
2376	/* Enable Octeon device interrupts */
2377	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2378
2379	if (cn23xx_octeon_pfvf_handshake(oct))
2380		return 1;
2381
2382	/* Here we [re]-enable the interrupts so that the correct OQ tick value
2383	 * is used (i.e. the value that was retrieved during the handshake)
2384	 */
2385
2386	/* Enable Octeon device interrupts */
2387	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2388	/* *************************************************************** */
2389
2390	/* Enable the input and output queues for this Octeon device */
2391	if (oct->fn_list.enable_io_queues(oct)) {
2392		dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
2393		return 1;
2394	}
2395
2396	atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
2397
2398	atomic_set(&oct->status, OCT_DEV_HOST_OK);
2399
2400	/* Send Credit for Octeon Output queues. Credits are always sent after
2401	 * the output queue is enabled.
2402	 */
2403	for (j = 0; j < oct->num_oqs; j++)
2404		writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
2405
2406	/* Packets can start arriving on the output queues from this point. */
2407
2408	atomic_set(&oct->status, OCT_DEV_CORE_OK);
2409
2410	atomic_set(&oct->status, OCT_DEV_RUNNING);
2411
2412	if (liquidio_init_nic_module(oct))
2413		return 1;
2414
2415	return 0;
2416}
2417
2418static int __init liquidio_vf_init(void)
2419{
2420	octeon_init_device_list(0);
2421	return pci_register_driver(&liquidio_vf_pci_driver);
2422}
2423
2424static void __exit liquidio_vf_exit(void)
2425{
2426	pci_unregister_driver(&liquidio_vf_pci_driver);
2427
2428	pr_info("LiquidIO_VF network module is now unloaded\n");
2429}
2430
2431module_init(liquidio_vf_init);
2432module_exit(liquidio_vf_exit);