Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/**************************************************************************/
   3/*                                                                        */
   4/*  IBM System i and System p Virtual NIC Device Driver                   */
   5/*  Copyright (C) 2014 IBM Corp.                                          */
   6/*  Santiago Leon (santi_leon@yahoo.com)                                  */
   7/*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
   8/*  John Allen (jallen@linux.vnet.ibm.com)                                */
   9/*                                                                        */
  10/*                                                                        */
  11/* This module contains the implementation of a virtual ethernet device   */
  12/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
  13/* option of the RS/6000 Platform Architecture to interface with virtual  */
  14/* ethernet NICs that are presented to the partition by the hypervisor.   */
  15/*									   */
  16/* Messages are passed between the VNIC driver and the VNIC server using  */
  17/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
  18/* issue and receive commands that initiate communication with the server */
  19/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
  20/* are used by the driver to notify the server that a packet is           */
  21/* ready for transmission or that a buffer has been added to receive a    */
  22/* packet. Subsequently, sCRQs are used by the server to notify the       */
  23/* driver that a packet transmission has been completed or that a packet  */
  24/* has been received and placed in a waiting buffer.                      */
  25/*                                                                        */
  26/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
  27/* which skbs are DMA mapped and immediately unmapped when the transmit   */
  28/* or receive has been completed, the VNIC driver is required to use      */
  29/* "long term mapping". This entails that large, continuous DMA mapped    */
  30/* buffers are allocated on driver initialization and these buffers are   */
  31/* then continuously reused to pass skbs to and from the VNIC server.     */
  32/*                                                                        */
  33/**************************************************************************/
  34
  35#include <linux/module.h>
  36#include <linux/moduleparam.h>
  37#include <linux/types.h>
  38#include <linux/errno.h>
  39#include <linux/completion.h>
  40#include <linux/ioport.h>
  41#include <linux/dma-mapping.h>
  42#include <linux/kernel.h>
  43#include <linux/netdevice.h>
  44#include <linux/etherdevice.h>
  45#include <linux/skbuff.h>
  46#include <linux/init.h>
  47#include <linux/delay.h>
  48#include <linux/mm.h>
  49#include <linux/ethtool.h>
  50#include <linux/proc_fs.h>
  51#include <linux/if_arp.h>
  52#include <linux/in.h>
  53#include <linux/ip.h>
  54#include <linux/ipv6.h>
  55#include <linux/irq.h>
  56#include <linux/kthread.h>
  57#include <linux/seq_file.h>
  58#include <linux/interrupt.h>
  59#include <net/net_namespace.h>
  60#include <asm/hvcall.h>
  61#include <linux/atomic.h>
  62#include <asm/vio.h>
  63#include <asm/iommu.h>
  64#include <linux/uaccess.h>
  65#include <asm/firmware.h>
  66#include <linux/workqueue.h>
  67#include <linux/if_vlan.h>
  68#include <linux/utsname.h>
  69
  70#include "ibmvnic.h"
  71
  72static const char ibmvnic_driver_name[] = "ibmvnic";
  73static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
  74
  75MODULE_AUTHOR("Santiago Leon");
  76MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
  77MODULE_LICENSE("GPL");
  78MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
  79
  80static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
  81static void release_sub_crqs(struct ibmvnic_adapter *, bool);
  82static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
  83static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
  84static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
  85static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
  86static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
  87static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
  88static int enable_scrq_irq(struct ibmvnic_adapter *,
  89			   struct ibmvnic_sub_crq_queue *);
  90static int disable_scrq_irq(struct ibmvnic_adapter *,
  91			    struct ibmvnic_sub_crq_queue *);
  92static int pending_scrq(struct ibmvnic_adapter *,
  93			struct ibmvnic_sub_crq_queue *);
  94static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
  95					struct ibmvnic_sub_crq_queue *);
  96static int ibmvnic_poll(struct napi_struct *napi, int data);
  97static void send_query_map(struct ibmvnic_adapter *adapter);
  98static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
  99static int send_request_unmap(struct ibmvnic_adapter *, u8);
 100static int send_login(struct ibmvnic_adapter *adapter);
 101static void send_query_cap(struct ibmvnic_adapter *adapter);
 102static int init_sub_crqs(struct ibmvnic_adapter *);
 103static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
 104static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
 105static void release_crq_queue(struct ibmvnic_adapter *);
 106static int __ibmvnic_set_mac(struct net_device *, u8 *);
 107static int init_crq_queue(struct ibmvnic_adapter *adapter);
 108static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
 109static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
 110					 struct ibmvnic_sub_crq_queue *tx_scrq);
 111
 112struct ibmvnic_stat {
 113	char name[ETH_GSTRING_LEN];
 114	int offset;
 115};
 116
 117#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
 118			     offsetof(struct ibmvnic_statistics, stat))
 119#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
 120
 121static const struct ibmvnic_stat ibmvnic_stats[] = {
 122	{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
 123	{"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
 124	{"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
 125	{"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
 126	{"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
 127	{"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
 128	{"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
 129	{"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
 130	{"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
 131	{"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
 132	{"align_errors", IBMVNIC_STAT_OFF(align_errors)},
 133	{"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
 134	{"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
 135	{"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
 136	{"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
 137	{"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
 138	{"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
 139	{"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
 140	{"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
 141	{"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
 142	{"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
 143	{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
 144};
 145
 146static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
 147{
 148	union ibmvnic_crq crq;
 149
 150	memset(&crq, 0, sizeof(crq));
 151	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
 152	crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
 153
 154	return ibmvnic_send_crq(adapter, &crq);
 155}
 156
 157static int send_version_xchg(struct ibmvnic_adapter *adapter)
 158{
 159	union ibmvnic_crq crq;
 160
 161	memset(&crq, 0, sizeof(crq));
 162	crq.version_exchange.first = IBMVNIC_CRQ_CMD;
 163	crq.version_exchange.cmd = VERSION_EXCHANGE;
 164	crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
 165
 166	return ibmvnic_send_crq(adapter, &crq);
 167}
 168
 169static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
 170			  unsigned long length, unsigned long *number,
 171			  unsigned long *irq)
 172{
 173	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
 174	long rc;
 175
 176	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
 177	*number = retbuf[0];
 178	*irq = retbuf[1];
 179
 180	return rc;
 181}
 182
 183/**
 184 * ibmvnic_wait_for_completion - Check device state and wait for completion
 185 * @adapter: private device data
 186 * @comp_done: completion structure to wait for
 187 * @timeout: time to wait in milliseconds
 188 *
 189 * Wait for a completion signal or until the timeout limit is reached
 190 * while checking that the device is still active.
 191 */
 192static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
 193				       struct completion *comp_done,
 194				       unsigned long timeout)
 195{
 196	struct net_device *netdev;
 197	unsigned long div_timeout;
 198	u8 retry;
 199
 200	netdev = adapter->netdev;
 201	retry = 5;
 202	div_timeout = msecs_to_jiffies(timeout / retry);
 203	while (true) {
 204		if (!adapter->crq.active) {
 205			netdev_err(netdev, "Device down!\n");
 206			return -ENODEV;
 207		}
 208		if (!retry--)
 209			break;
 210		if (wait_for_completion_timeout(comp_done, div_timeout))
 211			return 0;
 212	}
 213	netdev_err(netdev, "Operation timed out.\n");
 214	return -ETIMEDOUT;
 215}
 216
 217static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
 218				struct ibmvnic_long_term_buff *ltb, int size)
 219{
 220	struct device *dev = &adapter->vdev->dev;
 221	int rc;
 222
 223	ltb->size = size;
 224	ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
 225				       GFP_KERNEL);
 226
 227	if (!ltb->buff) {
 228		dev_err(dev, "Couldn't alloc long term buffer\n");
 229		return -ENOMEM;
 230	}
 231	ltb->map_id = adapter->map_id;
 232	adapter->map_id++;
 233
 234	mutex_lock(&adapter->fw_lock);
 235	adapter->fw_done_rc = 0;
 236	reinit_completion(&adapter->fw_done);
 237
 238	rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
 239	if (rc) {
 240		dev_err(dev, "send_request_map failed, rc = %d\n", rc);
 241		goto out;
 242	}
 243
 244	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
 245	if (rc) {
 246		dev_err(dev,
 247			"Long term map request aborted or timed out,rc = %d\n",
 248			rc);
 249		goto out;
 250	}
 251
 252	if (adapter->fw_done_rc) {
 253		dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
 254			adapter->fw_done_rc);
 255		rc = -1;
 256		goto out;
 257	}
 258	rc = 0;
 259out:
 260	if (rc) {
 261		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
 262		ltb->buff = NULL;
 263	}
 264	mutex_unlock(&adapter->fw_lock);
 265	return rc;
 266}
 267
 268static void free_long_term_buff(struct ibmvnic_adapter *adapter,
 269				struct ibmvnic_long_term_buff *ltb)
 270{
 271	struct device *dev = &adapter->vdev->dev;
 272
 273	if (!ltb->buff)
 274		return;
 275
 276	/* VIOS automatically unmaps the long term buffer at remote
 277	 * end for the following resets:
 278	 * FAILOVER, MOBILITY, TIMEOUT.
 279	 */
 280	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
 281	    adapter->reset_reason != VNIC_RESET_MOBILITY &&
 282	    adapter->reset_reason != VNIC_RESET_TIMEOUT)
 283		send_request_unmap(adapter, ltb->map_id);
 284	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
 285	ltb->buff = NULL;
 286	ltb->map_id = 0;
 287}
 288
 289static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
 290				struct ibmvnic_long_term_buff *ltb)
 291{
 292	struct device *dev = &adapter->vdev->dev;
 293	int rc;
 294
 295	memset(ltb->buff, 0, ltb->size);
 296
 297	mutex_lock(&adapter->fw_lock);
 298	adapter->fw_done_rc = 0;
 299
 300	reinit_completion(&adapter->fw_done);
 301	rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
 302	if (rc) {
 303		mutex_unlock(&adapter->fw_lock);
 304		return rc;
 305	}
 306
 307	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
 308	if (rc) {
 309		dev_info(dev,
 310			 "Reset failed, long term map request timed out or aborted\n");
 311		mutex_unlock(&adapter->fw_lock);
 312		return rc;
 313	}
 314
 315	if (adapter->fw_done_rc) {
 316		dev_info(dev,
 317			 "Reset failed, attempting to free and reallocate buffer\n");
 318		free_long_term_buff(adapter, ltb);
 319		mutex_unlock(&adapter->fw_lock);
 320		return alloc_long_term_buff(adapter, ltb, ltb->size);
 321	}
 322	mutex_unlock(&adapter->fw_lock);
 323	return 0;
 324}
 325
 326static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
 327{
 328	int i;
 329
 330	for (i = 0; i < adapter->num_active_rx_pools; i++)
 331		adapter->rx_pool[i].active = 0;
 332}
 333
 334static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
 335			      struct ibmvnic_rx_pool *pool)
 336{
 337	int count = pool->size - atomic_read(&pool->available);
 338	u64 handle = adapter->rx_scrq[pool->index]->handle;
 339	struct device *dev = &adapter->vdev->dev;
 340	struct ibmvnic_ind_xmit_queue *ind_bufp;
 341	struct ibmvnic_sub_crq_queue *rx_scrq;
 342	union sub_crq *sub_crq;
 343	int buffers_added = 0;
 344	unsigned long lpar_rc;
 345	struct sk_buff *skb;
 346	unsigned int offset;
 347	dma_addr_t dma_addr;
 348	unsigned char *dst;
 349	int shift = 0;
 350	int index;
 351	int i;
 352
 353	if (!pool->active)
 354		return;
 355
 356	rx_scrq = adapter->rx_scrq[pool->index];
 357	ind_bufp = &rx_scrq->ind_buf;
 358
 359	/* netdev_skb_alloc() could have failed after we saved a few skbs
 360	 * in the indir_buf and we would not have sent them to VIOS yet.
 361	 * To account for them, start the loop at ind_bufp->index rather
 362	 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
 363	 * be 0.
 364	 */
 365	for (i = ind_bufp->index; i < count; ++i) {
 366		skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
 367		if (!skb) {
 368			dev_err(dev, "Couldn't replenish rx buff\n");
 369			adapter->replenish_no_mem++;
 370			break;
 371		}
 372
 373		index = pool->free_map[pool->next_free];
 374
 375		if (pool->rx_buff[index].skb)
 376			dev_err(dev, "Inconsistent free_map!\n");
 377
 378		/* Copy the skb to the long term mapped DMA buffer */
 379		offset = index * pool->buff_size;
 380		dst = pool->long_term_buff.buff + offset;
 381		memset(dst, 0, pool->buff_size);
 382		dma_addr = pool->long_term_buff.addr + offset;
 383		pool->rx_buff[index].data = dst;
 384
 385		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
 386		pool->rx_buff[index].dma = dma_addr;
 387		pool->rx_buff[index].skb = skb;
 388		pool->rx_buff[index].pool_index = pool->index;
 389		pool->rx_buff[index].size = pool->buff_size;
 390
 391		sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
 392		memset(sub_crq, 0, sizeof(*sub_crq));
 393		sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
 394		sub_crq->rx_add.correlator =
 395		    cpu_to_be64((u64)&pool->rx_buff[index]);
 396		sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
 397		sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
 398
 399		/* The length field of the sCRQ is defined to be 24 bits so the
 400		 * buffer size needs to be left shifted by a byte before it is
 401		 * converted to big endian to prevent the last byte from being
 402		 * truncated.
 403		 */
 404#ifdef __LITTLE_ENDIAN__
 405		shift = 8;
 406#endif
 407		sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
 408		pool->next_free = (pool->next_free + 1) % pool->size;
 409		if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
 410		    i == count - 1) {
 411			lpar_rc =
 412				send_subcrq_indirect(adapter, handle,
 413						     (u64)ind_bufp->indir_dma,
 414						     (u64)ind_bufp->index);
 415			if (lpar_rc != H_SUCCESS)
 416				goto failure;
 417			buffers_added += ind_bufp->index;
 418			adapter->replenish_add_buff_success += ind_bufp->index;
 419			ind_bufp->index = 0;
 420		}
 421	}
 422	atomic_add(buffers_added, &pool->available);
 423	return;
 424
 425failure:
 426	if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
 427		dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
 428	for (i = ind_bufp->index - 1; i >= 0; --i) {
 429		struct ibmvnic_rx_buff *rx_buff;
 430
 431		pool->next_free = pool->next_free == 0 ?
 432				  pool->size - 1 : pool->next_free - 1;
 433		sub_crq = &ind_bufp->indir_arr[i];
 434		rx_buff = (struct ibmvnic_rx_buff *)
 435				be64_to_cpu(sub_crq->rx_add.correlator);
 436		index = (int)(rx_buff - pool->rx_buff);
 437		pool->free_map[pool->next_free] = index;
 438		dev_kfree_skb_any(pool->rx_buff[index].skb);
 439		pool->rx_buff[index].skb = NULL;
 440	}
 441	adapter->replenish_add_buff_failure += ind_bufp->index;
 442	atomic_add(buffers_added, &pool->available);
 443	ind_bufp->index = 0;
 444	if (lpar_rc == H_CLOSED || adapter->failover_pending) {
 445		/* Disable buffer pool replenishment and report carrier off if
 446		 * queue is closed or pending failover.
 447		 * Firmware guarantees that a signal will be sent to the
 448		 * driver, triggering a reset.
 449		 */
 450		deactivate_rx_pools(adapter);
 451		netif_carrier_off(adapter->netdev);
 452	}
 453}
 454
 455static void replenish_pools(struct ibmvnic_adapter *adapter)
 456{
 457	int i;
 458
 459	adapter->replenish_task_cycles++;
 460	for (i = 0; i < adapter->num_active_rx_pools; i++) {
 461		if (adapter->rx_pool[i].active)
 462			replenish_rx_pool(adapter, &adapter->rx_pool[i]);
 463	}
 464
 465	netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
 466}
 467
 468static void release_stats_buffers(struct ibmvnic_adapter *adapter)
 469{
 470	kfree(adapter->tx_stats_buffers);
 471	kfree(adapter->rx_stats_buffers);
 472	adapter->tx_stats_buffers = NULL;
 473	adapter->rx_stats_buffers = NULL;
 474}
 475
 476static int init_stats_buffers(struct ibmvnic_adapter *adapter)
 477{
 478	adapter->tx_stats_buffers =
 479				kcalloc(IBMVNIC_MAX_QUEUES,
 480					sizeof(struct ibmvnic_tx_queue_stats),
 481					GFP_KERNEL);
 482	if (!adapter->tx_stats_buffers)
 483		return -ENOMEM;
 484
 485	adapter->rx_stats_buffers =
 486				kcalloc(IBMVNIC_MAX_QUEUES,
 487					sizeof(struct ibmvnic_rx_queue_stats),
 488					GFP_KERNEL);
 489	if (!adapter->rx_stats_buffers)
 490		return -ENOMEM;
 491
 492	return 0;
 493}
 494
 495static void release_stats_token(struct ibmvnic_adapter *adapter)
 496{
 497	struct device *dev = &adapter->vdev->dev;
 498
 499	if (!adapter->stats_token)
 500		return;
 501
 502	dma_unmap_single(dev, adapter->stats_token,
 503			 sizeof(struct ibmvnic_statistics),
 504			 DMA_FROM_DEVICE);
 505	adapter->stats_token = 0;
 506}
 507
 508static int init_stats_token(struct ibmvnic_adapter *adapter)
 509{
 510	struct device *dev = &adapter->vdev->dev;
 511	dma_addr_t stok;
 512
 513	stok = dma_map_single(dev, &adapter->stats,
 514			      sizeof(struct ibmvnic_statistics),
 515			      DMA_FROM_DEVICE);
 516	if (dma_mapping_error(dev, stok)) {
 517		dev_err(dev, "Couldn't map stats buffer\n");
 518		return -1;
 519	}
 520
 521	adapter->stats_token = stok;
 522	netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
 523	return 0;
 524}
 525
 526static int reset_rx_pools(struct ibmvnic_adapter *adapter)
 527{
 528	struct ibmvnic_rx_pool *rx_pool;
 529	u64 buff_size;
 530	int rx_scrqs;
 531	int i, j, rc;
 532
 533	if (!adapter->rx_pool)
 534		return -1;
 535
 536	buff_size = adapter->cur_rx_buf_sz;
 537	rx_scrqs = adapter->num_active_rx_pools;
 538	for (i = 0; i < rx_scrqs; i++) {
 539		rx_pool = &adapter->rx_pool[i];
 540
 541		netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
 542
 543		if (rx_pool->buff_size != buff_size) {
 544			free_long_term_buff(adapter, &rx_pool->long_term_buff);
 545			rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
 546			rc = alloc_long_term_buff(adapter,
 547						  &rx_pool->long_term_buff,
 548						  rx_pool->size *
 549						  rx_pool->buff_size);
 550		} else {
 551			rc = reset_long_term_buff(adapter,
 552						  &rx_pool->long_term_buff);
 553		}
 554
 555		if (rc)
 556			return rc;
 557
 558		for (j = 0; j < rx_pool->size; j++)
 559			rx_pool->free_map[j] = j;
 560
 561		memset(rx_pool->rx_buff, 0,
 562		       rx_pool->size * sizeof(struct ibmvnic_rx_buff));
 563
 564		atomic_set(&rx_pool->available, 0);
 565		rx_pool->next_alloc = 0;
 566		rx_pool->next_free = 0;
 567		rx_pool->active = 1;
 568	}
 569
 570	return 0;
 571}
 572
 573static void release_rx_pools(struct ibmvnic_adapter *adapter)
 574{
 575	struct ibmvnic_rx_pool *rx_pool;
 576	int i, j;
 577
 578	if (!adapter->rx_pool)
 579		return;
 580
 581	for (i = 0; i < adapter->num_active_rx_pools; i++) {
 582		rx_pool = &adapter->rx_pool[i];
 583
 584		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
 585
 586		kfree(rx_pool->free_map);
 587		free_long_term_buff(adapter, &rx_pool->long_term_buff);
 588
 589		if (!rx_pool->rx_buff)
 590			continue;
 591
 592		for (j = 0; j < rx_pool->size; j++) {
 593			if (rx_pool->rx_buff[j].skb) {
 594				dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
 595				rx_pool->rx_buff[j].skb = NULL;
 596			}
 597		}
 598
 599		kfree(rx_pool->rx_buff);
 600	}
 601
 602	kfree(adapter->rx_pool);
 603	adapter->rx_pool = NULL;
 604	adapter->num_active_rx_pools = 0;
 605}
 606
 607static int init_rx_pools(struct net_device *netdev)
 608{
 609	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 610	struct device *dev = &adapter->vdev->dev;
 611	struct ibmvnic_rx_pool *rx_pool;
 612	int rxadd_subcrqs;
 613	u64 buff_size;
 614	int i, j;
 615
 616	rxadd_subcrqs = adapter->num_active_rx_scrqs;
 617	buff_size = adapter->cur_rx_buf_sz;
 618
 619	adapter->rx_pool = kcalloc(rxadd_subcrqs,
 620				   sizeof(struct ibmvnic_rx_pool),
 621				   GFP_KERNEL);
 622	if (!adapter->rx_pool) {
 623		dev_err(dev, "Failed to allocate rx pools\n");
 624		return -1;
 625	}
 626
 627	adapter->num_active_rx_pools = rxadd_subcrqs;
 628
 629	for (i = 0; i < rxadd_subcrqs; i++) {
 630		rx_pool = &adapter->rx_pool[i];
 631
 632		netdev_dbg(adapter->netdev,
 633			   "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
 634			   i, adapter->req_rx_add_entries_per_subcrq,
 635			   buff_size);
 636
 637		rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
 638		rx_pool->index = i;
 639		rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
 640		rx_pool->active = 1;
 641
 642		rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
 643					    GFP_KERNEL);
 644		if (!rx_pool->free_map) {
 645			release_rx_pools(adapter);
 646			return -1;
 647		}
 648
 649		rx_pool->rx_buff = kcalloc(rx_pool->size,
 650					   sizeof(struct ibmvnic_rx_buff),
 651					   GFP_KERNEL);
 652		if (!rx_pool->rx_buff) {
 653			dev_err(dev, "Couldn't alloc rx buffers\n");
 654			release_rx_pools(adapter);
 655			return -1;
 656		}
 657
 658		if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
 659					 rx_pool->size * rx_pool->buff_size)) {
 660			release_rx_pools(adapter);
 661			return -1;
 662		}
 663
 664		for (j = 0; j < rx_pool->size; ++j)
 665			rx_pool->free_map[j] = j;
 666
 667		atomic_set(&rx_pool->available, 0);
 668		rx_pool->next_alloc = 0;
 669		rx_pool->next_free = 0;
 670	}
 671
 672	return 0;
 673}
 674
 675static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
 676			     struct ibmvnic_tx_pool *tx_pool)
 677{
 678	int rc, i;
 679
 680	rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
 681	if (rc)
 682		return rc;
 683
 684	memset(tx_pool->tx_buff, 0,
 685	       tx_pool->num_buffers *
 686	       sizeof(struct ibmvnic_tx_buff));
 687
 688	for (i = 0; i < tx_pool->num_buffers; i++)
 689		tx_pool->free_map[i] = i;
 690
 691	tx_pool->consumer_index = 0;
 692	tx_pool->producer_index = 0;
 693
 694	return 0;
 695}
 696
 697static int reset_tx_pools(struct ibmvnic_adapter *adapter)
 698{
 699	int tx_scrqs;
 700	int i, rc;
 701
 702	if (!adapter->tx_pool)
 703		return -1;
 704
 705	tx_scrqs = adapter->num_active_tx_pools;
 706	for (i = 0; i < tx_scrqs; i++) {
 707		ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
 708		rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
 709		if (rc)
 710			return rc;
 711		rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
 712		if (rc)
 713			return rc;
 714	}
 715
 716	return 0;
 717}
 718
 719static void release_vpd_data(struct ibmvnic_adapter *adapter)
 720{
 721	if (!adapter->vpd)
 722		return;
 723
 724	kfree(adapter->vpd->buff);
 725	kfree(adapter->vpd);
 726
 727	adapter->vpd = NULL;
 728}
 729
 730static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
 731				struct ibmvnic_tx_pool *tx_pool)
 732{
 733	kfree(tx_pool->tx_buff);
 734	kfree(tx_pool->free_map);
 735	free_long_term_buff(adapter, &tx_pool->long_term_buff);
 736}
 737
 738static void release_tx_pools(struct ibmvnic_adapter *adapter)
 739{
 740	int i;
 741
 742	if (!adapter->tx_pool)
 743		return;
 744
 745	for (i = 0; i < adapter->num_active_tx_pools; i++) {
 746		release_one_tx_pool(adapter, &adapter->tx_pool[i]);
 747		release_one_tx_pool(adapter, &adapter->tso_pool[i]);
 748	}
 749
 750	kfree(adapter->tx_pool);
 751	adapter->tx_pool = NULL;
 752	kfree(adapter->tso_pool);
 753	adapter->tso_pool = NULL;
 754	adapter->num_active_tx_pools = 0;
 755}
 756
 757static int init_one_tx_pool(struct net_device *netdev,
 758			    struct ibmvnic_tx_pool *tx_pool,
 759			    int num_entries, int buf_size)
 760{
 761	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 762	int i;
 763
 764	tx_pool->tx_buff = kcalloc(num_entries,
 765				   sizeof(struct ibmvnic_tx_buff),
 766				   GFP_KERNEL);
 767	if (!tx_pool->tx_buff)
 768		return -1;
 769
 770	if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
 771				 num_entries * buf_size))
 772		return -1;
 773
 774	tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
 775	if (!tx_pool->free_map)
 776		return -1;
 777
 778	for (i = 0; i < num_entries; i++)
 779		tx_pool->free_map[i] = i;
 780
 781	tx_pool->consumer_index = 0;
 782	tx_pool->producer_index = 0;
 783	tx_pool->num_buffers = num_entries;
 784	tx_pool->buf_size = buf_size;
 785
 786	return 0;
 787}
 788
 789static int init_tx_pools(struct net_device *netdev)
 790{
 791	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 792	int tx_subcrqs;
 793	u64 buff_size;
 794	int i, rc;
 795
 796	tx_subcrqs = adapter->num_active_tx_scrqs;
 797	adapter->tx_pool = kcalloc(tx_subcrqs,
 798				   sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
 799	if (!adapter->tx_pool)
 800		return -1;
 801
 802	adapter->tso_pool = kcalloc(tx_subcrqs,
 803				    sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
 804	if (!adapter->tso_pool) {
 805		kfree(adapter->tx_pool);
 806		adapter->tx_pool = NULL;
 807		return -1;
 808	}
 809
 810	adapter->num_active_tx_pools = tx_subcrqs;
 811
 812	for (i = 0; i < tx_subcrqs; i++) {
 813		buff_size = adapter->req_mtu + VLAN_HLEN;
 814		buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
 815		rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
 816				      adapter->req_tx_entries_per_subcrq,
 817				      buff_size);
 818		if (rc) {
 819			release_tx_pools(adapter);
 820			return rc;
 821		}
 822
 823		rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
 824				      IBMVNIC_TSO_BUFS,
 825				      IBMVNIC_TSO_BUF_SZ);
 826		if (rc) {
 827			release_tx_pools(adapter);
 828			return rc;
 829		}
 830	}
 831
 832	return 0;
 833}
 834
 835static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
 836{
 837	int i;
 838
 839	if (adapter->napi_enabled)
 840		return;
 841
 842	for (i = 0; i < adapter->req_rx_queues; i++)
 843		napi_enable(&adapter->napi[i]);
 844
 845	adapter->napi_enabled = true;
 846}
 847
 848static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
 849{
 850	int i;
 851
 852	if (!adapter->napi_enabled)
 853		return;
 854
 855	for (i = 0; i < adapter->req_rx_queues; i++) {
 856		netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
 857		napi_disable(&adapter->napi[i]);
 858	}
 859
 860	adapter->napi_enabled = false;
 861}
 862
 863static int init_napi(struct ibmvnic_adapter *adapter)
 864{
 865	int i;
 866
 867	adapter->napi = kcalloc(adapter->req_rx_queues,
 868				sizeof(struct napi_struct), GFP_KERNEL);
 869	if (!adapter->napi)
 870		return -ENOMEM;
 871
 872	for (i = 0; i < adapter->req_rx_queues; i++) {
 873		netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
 874		netif_napi_add(adapter->netdev, &adapter->napi[i],
 875			       ibmvnic_poll, NAPI_POLL_WEIGHT);
 876	}
 877
 878	adapter->num_active_rx_napi = adapter->req_rx_queues;
 879	return 0;
 880}
 881
 882static void release_napi(struct ibmvnic_adapter *adapter)
 883{
 884	int i;
 885
 886	if (!adapter->napi)
 887		return;
 888
 889	for (i = 0; i < adapter->num_active_rx_napi; i++) {
 890		netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
 891		netif_napi_del(&adapter->napi[i]);
 892	}
 893
 894	kfree(adapter->napi);
 895	adapter->napi = NULL;
 896	adapter->num_active_rx_napi = 0;
 897	adapter->napi_enabled = false;
 898}
 899
 900static const char *adapter_state_to_string(enum vnic_state state)
 901{
 902	switch (state) {
 903	case VNIC_PROBING:
 904		return "PROBING";
 905	case VNIC_PROBED:
 906		return "PROBED";
 907	case VNIC_OPENING:
 908		return "OPENING";
 909	case VNIC_OPEN:
 910		return "OPEN";
 911	case VNIC_CLOSING:
 912		return "CLOSING";
 913	case VNIC_CLOSED:
 914		return "CLOSED";
 915	case VNIC_REMOVING:
 916		return "REMOVING";
 917	case VNIC_REMOVED:
 918		return "REMOVED";
 919	case VNIC_DOWN:
 920		return "DOWN";
 921	}
 922	return "UNKNOWN";
 923}
 924
 925static int ibmvnic_login(struct net_device *netdev)
 926{
 927	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 928	unsigned long timeout = msecs_to_jiffies(20000);
 929	int retry_count = 0;
 930	int retries = 10;
 931	bool retry;
 932	int rc;
 933
 934	do {
 935		retry = false;
 936		if (retry_count > retries) {
 937			netdev_warn(netdev, "Login attempts exceeded\n");
 938			return -1;
 939		}
 940
 941		adapter->init_done_rc = 0;
 942		reinit_completion(&adapter->init_done);
 943		rc = send_login(adapter);
 944		if (rc)
 945			return rc;
 946
 947		if (!wait_for_completion_timeout(&adapter->init_done,
 948						 timeout)) {
 949			netdev_warn(netdev, "Login timed out, retrying...\n");
 950			retry = true;
 951			adapter->init_done_rc = 0;
 952			retry_count++;
 953			continue;
 954		}
 955
 956		if (adapter->init_done_rc == ABORTED) {
 957			netdev_warn(netdev, "Login aborted, retrying...\n");
 958			retry = true;
 959			adapter->init_done_rc = 0;
 960			retry_count++;
 961			/* FW or device may be busy, so
 962			 * wait a bit before retrying login
 963			 */
 964			msleep(500);
 965		} else if (adapter->init_done_rc == PARTIALSUCCESS) {
 966			retry_count++;
 967			release_sub_crqs(adapter, 1);
 968
 969			retry = true;
 970			netdev_dbg(netdev,
 971				   "Received partial success, retrying...\n");
 972			adapter->init_done_rc = 0;
 973			reinit_completion(&adapter->init_done);
 974			send_query_cap(adapter);
 975			if (!wait_for_completion_timeout(&adapter->init_done,
 976							 timeout)) {
 977				netdev_warn(netdev,
 978					    "Capabilities query timed out\n");
 979				return -1;
 980			}
 981
 982			rc = init_sub_crqs(adapter);
 983			if (rc) {
 984				netdev_warn(netdev,
 985					    "SCRQ initialization failed\n");
 986				return -1;
 987			}
 988
 989			rc = init_sub_crq_irqs(adapter);
 990			if (rc) {
 991				netdev_warn(netdev,
 992					    "SCRQ irq initialization failed\n");
 993				return -1;
 994			}
 995		} else if (adapter->init_done_rc) {
 996			netdev_warn(netdev, "Adapter login failed\n");
 997			return -1;
 998		}
 999	} while (retry);
1000
1001	__ibmvnic_set_mac(netdev, adapter->mac_addr);
1002
1003	netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
1004	return 0;
1005}
1006
1007static void release_login_buffer(struct ibmvnic_adapter *adapter)
1008{
1009	kfree(adapter->login_buf);
1010	adapter->login_buf = NULL;
1011}
1012
1013static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1014{
1015	kfree(adapter->login_rsp_buf);
1016	adapter->login_rsp_buf = NULL;
1017}
1018
1019static void release_resources(struct ibmvnic_adapter *adapter)
1020{
1021	release_vpd_data(adapter);
1022
1023	release_tx_pools(adapter);
1024	release_rx_pools(adapter);
1025
1026	release_napi(adapter);
1027	release_login_buffer(adapter);
1028	release_login_rsp_buffer(adapter);
1029}
1030
1031static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1032{
1033	struct net_device *netdev = adapter->netdev;
1034	unsigned long timeout = msecs_to_jiffies(20000);
1035	union ibmvnic_crq crq;
1036	bool resend;
1037	int rc;
1038
1039	netdev_dbg(netdev, "setting link state %d\n", link_state);
1040
1041	memset(&crq, 0, sizeof(crq));
1042	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1043	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1044	crq.logical_link_state.link_state = link_state;
1045
1046	do {
1047		resend = false;
1048
1049		reinit_completion(&adapter->init_done);
1050		rc = ibmvnic_send_crq(adapter, &crq);
1051		if (rc) {
1052			netdev_err(netdev, "Failed to set link state\n");
1053			return rc;
1054		}
1055
1056		if (!wait_for_completion_timeout(&adapter->init_done,
1057						 timeout)) {
1058			netdev_err(netdev, "timeout setting link state\n");
1059			return -1;
1060		}
1061
1062		if (adapter->init_done_rc == PARTIALSUCCESS) {
1063			/* Partuial success, delay and re-send */
1064			mdelay(1000);
1065			resend = true;
1066		} else if (adapter->init_done_rc) {
1067			netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1068				    adapter->init_done_rc);
1069			return adapter->init_done_rc;
1070		}
1071	} while (resend);
1072
1073	return 0;
1074}
1075
1076static int set_real_num_queues(struct net_device *netdev)
1077{
1078	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1079	int rc;
1080
1081	netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1082		   adapter->req_tx_queues, adapter->req_rx_queues);
1083
1084	rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1085	if (rc) {
1086		netdev_err(netdev, "failed to set the number of tx queues\n");
1087		return rc;
1088	}
1089
1090	rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1091	if (rc)
1092		netdev_err(netdev, "failed to set the number of rx queues\n");
1093
1094	return rc;
1095}
1096
1097static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1098{
1099	struct device *dev = &adapter->vdev->dev;
1100	union ibmvnic_crq crq;
1101	int len = 0;
1102	int rc;
1103
1104	if (adapter->vpd->buff)
1105		len = adapter->vpd->len;
1106
1107	mutex_lock(&adapter->fw_lock);
1108	adapter->fw_done_rc = 0;
1109	reinit_completion(&adapter->fw_done);
1110
1111	crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1112	crq.get_vpd_size.cmd = GET_VPD_SIZE;
1113	rc = ibmvnic_send_crq(adapter, &crq);
1114	if (rc) {
1115		mutex_unlock(&adapter->fw_lock);
1116		return rc;
1117	}
1118
1119	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1120	if (rc) {
1121		dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1122		mutex_unlock(&adapter->fw_lock);
1123		return rc;
1124	}
1125	mutex_unlock(&adapter->fw_lock);
1126
1127	if (!adapter->vpd->len)
1128		return -ENODATA;
1129
1130	if (!adapter->vpd->buff)
1131		adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1132	else if (adapter->vpd->len != len)
1133		adapter->vpd->buff =
1134			krealloc(adapter->vpd->buff,
1135				 adapter->vpd->len, GFP_KERNEL);
1136
1137	if (!adapter->vpd->buff) {
1138		dev_err(dev, "Could allocate VPD buffer\n");
1139		return -ENOMEM;
1140	}
1141
1142	adapter->vpd->dma_addr =
1143		dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1144			       DMA_FROM_DEVICE);
1145	if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1146		dev_err(dev, "Could not map VPD buffer\n");
1147		kfree(adapter->vpd->buff);
1148		adapter->vpd->buff = NULL;
1149		return -ENOMEM;
1150	}
1151
1152	mutex_lock(&adapter->fw_lock);
1153	adapter->fw_done_rc = 0;
1154	reinit_completion(&adapter->fw_done);
1155
1156	crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1157	crq.get_vpd.cmd = GET_VPD;
1158	crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1159	crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1160	rc = ibmvnic_send_crq(adapter, &crq);
1161	if (rc) {
1162		kfree(adapter->vpd->buff);
1163		adapter->vpd->buff = NULL;
1164		mutex_unlock(&adapter->fw_lock);
1165		return rc;
1166	}
1167
1168	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1169	if (rc) {
1170		dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1171		kfree(adapter->vpd->buff);
1172		adapter->vpd->buff = NULL;
1173		mutex_unlock(&adapter->fw_lock);
1174		return rc;
1175	}
1176
1177	mutex_unlock(&adapter->fw_lock);
1178	return 0;
1179}
1180
1181static int init_resources(struct ibmvnic_adapter *adapter)
1182{
1183	struct net_device *netdev = adapter->netdev;
1184	int rc;
1185
1186	rc = set_real_num_queues(netdev);
1187	if (rc)
1188		return rc;
1189
1190	adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1191	if (!adapter->vpd)
1192		return -ENOMEM;
1193
1194	/* Vital Product Data (VPD) */
1195	rc = ibmvnic_get_vpd(adapter);
1196	if (rc) {
1197		netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1198		return rc;
1199	}
1200
1201	adapter->map_id = 1;
1202
1203	rc = init_napi(adapter);
1204	if (rc)
1205		return rc;
1206
1207	send_query_map(adapter);
1208
1209	rc = init_rx_pools(netdev);
1210	if (rc)
1211		return rc;
1212
1213	rc = init_tx_pools(netdev);
1214	return rc;
1215}
1216
1217static int __ibmvnic_open(struct net_device *netdev)
1218{
1219	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1220	enum vnic_state prev_state = adapter->state;
1221	int i, rc;
1222
1223	adapter->state = VNIC_OPENING;
1224	replenish_pools(adapter);
1225	ibmvnic_napi_enable(adapter);
1226
1227	/* We're ready to receive frames, enable the sub-crq interrupts and
1228	 * set the logical link state to up
1229	 */
1230	for (i = 0; i < adapter->req_rx_queues; i++) {
1231		netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1232		if (prev_state == VNIC_CLOSED)
1233			enable_irq(adapter->rx_scrq[i]->irq);
1234		enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1235	}
1236
1237	for (i = 0; i < adapter->req_tx_queues; i++) {
1238		netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1239		if (prev_state == VNIC_CLOSED)
1240			enable_irq(adapter->tx_scrq[i]->irq);
1241		enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1242		netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1243	}
1244
1245	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1246	if (rc) {
1247		ibmvnic_napi_disable(adapter);
1248		release_resources(adapter);
1249		return rc;
1250	}
1251
1252	netif_tx_start_all_queues(netdev);
1253
1254	if (prev_state == VNIC_CLOSED) {
1255		for (i = 0; i < adapter->req_rx_queues; i++)
1256			napi_schedule(&adapter->napi[i]);
1257	}
1258
1259	adapter->state = VNIC_OPEN;
1260	return rc;
1261}
1262
1263static int ibmvnic_open(struct net_device *netdev)
1264{
1265	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1266	int rc;
1267
1268	ASSERT_RTNL();
1269
1270	/* If device failover is pending or we are about to reset, just set
1271	 * device state and return. Device operation will be handled by reset
1272	 * routine.
1273	 *
1274	 * It should be safe to overwrite the adapter->state here. Since
1275	 * we hold the rtnl, either the reset has not actually started or
1276	 * the rtnl got dropped during the set_link_state() in do_reset().
1277	 * In the former case, no one else is changing the state (again we
1278	 * have the rtnl) and in the latter case, do_reset() will detect and
1279	 * honor our setting below.
1280	 */
1281	if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
1282		netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1283			   adapter_state_to_string(adapter->state),
1284			   adapter->failover_pending);
1285		adapter->state = VNIC_OPEN;
1286		rc = 0;
1287		goto out;
1288	}
1289
1290	if (adapter->state != VNIC_CLOSED) {
1291		rc = ibmvnic_login(netdev);
1292		if (rc)
1293			goto out;
1294
1295		rc = init_resources(adapter);
1296		if (rc) {
1297			netdev_err(netdev, "failed to initialize resources\n");
1298			release_resources(adapter);
1299			goto out;
1300		}
1301	}
1302
1303	rc = __ibmvnic_open(netdev);
1304
1305out:
1306	/* If open failed and there is a pending failover or in-progress reset,
1307	 * set device state and return. Device operation will be handled by
1308	 * reset routine. See also comments above regarding rtnl.
1309	 */
1310	if (rc &&
1311	    (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
1312		adapter->state = VNIC_OPEN;
1313		rc = 0;
1314	}
1315	return rc;
1316}
1317
1318static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1319{
1320	struct ibmvnic_rx_pool *rx_pool;
1321	struct ibmvnic_rx_buff *rx_buff;
1322	u64 rx_entries;
1323	int rx_scrqs;
1324	int i, j;
1325
1326	if (!adapter->rx_pool)
1327		return;
1328
1329	rx_scrqs = adapter->num_active_rx_pools;
1330	rx_entries = adapter->req_rx_add_entries_per_subcrq;
1331
1332	/* Free any remaining skbs in the rx buffer pools */
1333	for (i = 0; i < rx_scrqs; i++) {
1334		rx_pool = &adapter->rx_pool[i];
1335		if (!rx_pool || !rx_pool->rx_buff)
1336			continue;
1337
1338		netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1339		for (j = 0; j < rx_entries; j++) {
1340			rx_buff = &rx_pool->rx_buff[j];
1341			if (rx_buff && rx_buff->skb) {
1342				dev_kfree_skb_any(rx_buff->skb);
1343				rx_buff->skb = NULL;
1344			}
1345		}
1346	}
1347}
1348
1349static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1350			      struct ibmvnic_tx_pool *tx_pool)
1351{
1352	struct ibmvnic_tx_buff *tx_buff;
1353	u64 tx_entries;
1354	int i;
1355
1356	if (!tx_pool || !tx_pool->tx_buff)
1357		return;
1358
1359	tx_entries = tx_pool->num_buffers;
1360
1361	for (i = 0; i < tx_entries; i++) {
1362		tx_buff = &tx_pool->tx_buff[i];
1363		if (tx_buff && tx_buff->skb) {
1364			dev_kfree_skb_any(tx_buff->skb);
1365			tx_buff->skb = NULL;
1366		}
1367	}
1368}
1369
1370static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1371{
1372	int tx_scrqs;
1373	int i;
1374
1375	if (!adapter->tx_pool || !adapter->tso_pool)
1376		return;
1377
1378	tx_scrqs = adapter->num_active_tx_pools;
1379
1380	/* Free any remaining skbs in the tx buffer pools */
1381	for (i = 0; i < tx_scrqs; i++) {
1382		netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1383		clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1384		clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1385	}
1386}
1387
1388static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1389{
1390	struct net_device *netdev = adapter->netdev;
1391	int i;
1392
1393	if (adapter->tx_scrq) {
1394		for (i = 0; i < adapter->req_tx_queues; i++)
1395			if (adapter->tx_scrq[i]->irq) {
1396				netdev_dbg(netdev,
1397					   "Disabling tx_scrq[%d] irq\n", i);
1398				disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1399				disable_irq(adapter->tx_scrq[i]->irq);
1400			}
1401	}
1402
1403	if (adapter->rx_scrq) {
1404		for (i = 0; i < adapter->req_rx_queues; i++) {
1405			if (adapter->rx_scrq[i]->irq) {
1406				netdev_dbg(netdev,
1407					   "Disabling rx_scrq[%d] irq\n", i);
1408				disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1409				disable_irq(adapter->rx_scrq[i]->irq);
1410			}
1411		}
1412	}
1413}
1414
1415static void ibmvnic_cleanup(struct net_device *netdev)
1416{
1417	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1418
1419	/* ensure that transmissions are stopped if called by do_reset */
1420	if (test_bit(0, &adapter->resetting))
1421		netif_tx_disable(netdev);
1422	else
1423		netif_tx_stop_all_queues(netdev);
1424
1425	ibmvnic_napi_disable(adapter);
1426	ibmvnic_disable_irqs(adapter);
1427
1428	clean_rx_pools(adapter);
1429	clean_tx_pools(adapter);
1430}
1431
1432static int __ibmvnic_close(struct net_device *netdev)
1433{
1434	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1435	int rc = 0;
1436
1437	adapter->state = VNIC_CLOSING;
1438	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1439	adapter->state = VNIC_CLOSED;
1440	return rc;
1441}
1442
1443static int ibmvnic_close(struct net_device *netdev)
1444{
1445	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1446	int rc;
1447
1448	netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
1449		   adapter_state_to_string(adapter->state),
1450		   adapter->failover_pending,
1451		   adapter->force_reset_recovery);
1452
1453	/* If device failover is pending, just set device state and return.
1454	 * Device operation will be handled by reset routine.
1455	 */
1456	if (adapter->failover_pending) {
1457		adapter->state = VNIC_CLOSED;
1458		return 0;
1459	}
1460
1461	rc = __ibmvnic_close(netdev);
1462	ibmvnic_cleanup(netdev);
1463
1464	return rc;
1465}
1466
1467/**
1468 * build_hdr_data - creates L2/L3/L4 header data buffer
1469 * @hdr_field: bitfield determining needed headers
1470 * @skb: socket buffer
1471 * @hdr_len: array of header lengths
1472 * @hdr_data: buffer to write the header to
1473 *
1474 * Reads hdr_field to determine which headers are needed by firmware.
1475 * Builds a buffer containing these headers.  Saves individual header
1476 * lengths and total buffer length to be used to build descriptors.
1477 */
1478static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1479			  int *hdr_len, u8 *hdr_data)
1480{
1481	int len = 0;
1482	u8 *hdr;
1483
1484	if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1485		hdr_len[0] = sizeof(struct vlan_ethhdr);
1486	else
1487		hdr_len[0] = sizeof(struct ethhdr);
1488
1489	if (skb->protocol == htons(ETH_P_IP)) {
1490		hdr_len[1] = ip_hdr(skb)->ihl * 4;
1491		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1492			hdr_len[2] = tcp_hdrlen(skb);
1493		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1494			hdr_len[2] = sizeof(struct udphdr);
1495	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1496		hdr_len[1] = sizeof(struct ipv6hdr);
1497		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1498			hdr_len[2] = tcp_hdrlen(skb);
1499		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1500			hdr_len[2] = sizeof(struct udphdr);
1501	} else if (skb->protocol == htons(ETH_P_ARP)) {
1502		hdr_len[1] = arp_hdr_len(skb->dev);
1503		hdr_len[2] = 0;
1504	}
1505
1506	memset(hdr_data, 0, 120);
1507	if ((hdr_field >> 6) & 1) {
1508		hdr = skb_mac_header(skb);
1509		memcpy(hdr_data, hdr, hdr_len[0]);
1510		len += hdr_len[0];
1511	}
1512
1513	if ((hdr_field >> 5) & 1) {
1514		hdr = skb_network_header(skb);
1515		memcpy(hdr_data + len, hdr, hdr_len[1]);
1516		len += hdr_len[1];
1517	}
1518
1519	if ((hdr_field >> 4) & 1) {
1520		hdr = skb_transport_header(skb);
1521		memcpy(hdr_data + len, hdr, hdr_len[2]);
1522		len += hdr_len[2];
1523	}
1524	return len;
1525}
1526
1527/**
1528 * create_hdr_descs - create header and header extension descriptors
1529 * @hdr_field: bitfield determining needed headers
1530 * @hdr_data: buffer containing header data
1531 * @len: length of data buffer
1532 * @hdr_len: array of individual header lengths
1533 * @scrq_arr: descriptor array
1534 *
1535 * Creates header and, if needed, header extension descriptors and
1536 * places them in a descriptor array, scrq_arr
1537 */
1538
1539static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1540			    union sub_crq *scrq_arr)
1541{
1542	union sub_crq hdr_desc;
1543	int tmp_len = len;
1544	int num_descs = 0;
1545	u8 *data, *cur;
1546	int tmp;
1547
1548	while (tmp_len > 0) {
1549		cur = hdr_data + len - tmp_len;
1550
1551		memset(&hdr_desc, 0, sizeof(hdr_desc));
1552		if (cur != hdr_data) {
1553			data = hdr_desc.hdr_ext.data;
1554			tmp = tmp_len > 29 ? 29 : tmp_len;
1555			hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1556			hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1557			hdr_desc.hdr_ext.len = tmp;
1558		} else {
1559			data = hdr_desc.hdr.data;
1560			tmp = tmp_len > 24 ? 24 : tmp_len;
1561			hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1562			hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1563			hdr_desc.hdr.len = tmp;
1564			hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1565			hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1566			hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1567			hdr_desc.hdr.flag = hdr_field << 1;
1568		}
1569		memcpy(data, cur, tmp);
1570		tmp_len -= tmp;
1571		*scrq_arr = hdr_desc;
1572		scrq_arr++;
1573		num_descs++;
1574	}
1575
1576	return num_descs;
1577}
1578
1579/**
1580 * build_hdr_descs_arr - build a header descriptor array
1581 * @skb: tx socket buffer
1582 * @indir_arr: indirect array
1583 * @num_entries: number of descriptors to be sent
1584 * @hdr_field: bit field determining which headers will be sent
1585 *
1586 * This function will build a TX descriptor array with applicable
1587 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1588 */
1589
1590static void build_hdr_descs_arr(struct sk_buff *skb,
1591				union sub_crq *indir_arr,
1592				int *num_entries, u8 hdr_field)
1593{
1594	int hdr_len[3] = {0, 0, 0};
1595	u8 hdr_data[140] = {0};
1596	int tot_len;
1597
1598	tot_len = build_hdr_data(hdr_field, skb, hdr_len,
1599				 hdr_data);
1600	*num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1601					 indir_arr + 1);
1602}
1603
1604static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1605				    struct net_device *netdev)
1606{
1607	/* For some backing devices, mishandling of small packets
1608	 * can result in a loss of connection or TX stall. Device
1609	 * architects recommend that no packet should be smaller
1610	 * than the minimum MTU value provided to the driver, so
1611	 * pad any packets to that length
1612	 */
1613	if (skb->len < netdev->min_mtu)
1614		return skb_put_padto(skb, netdev->min_mtu);
1615
1616	return 0;
1617}
1618
1619static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
1620					 struct ibmvnic_sub_crq_queue *tx_scrq)
1621{
1622	struct ibmvnic_ind_xmit_queue *ind_bufp;
1623	struct ibmvnic_tx_buff *tx_buff;
1624	struct ibmvnic_tx_pool *tx_pool;
1625	union sub_crq tx_scrq_entry;
1626	int queue_num;
1627	int entries;
1628	int index;
1629	int i;
1630
1631	ind_bufp = &tx_scrq->ind_buf;
1632	entries = (u64)ind_bufp->index;
1633	queue_num = tx_scrq->pool_index;
1634
1635	for (i = entries - 1; i >= 0; --i) {
1636		tx_scrq_entry = ind_bufp->indir_arr[i];
1637		if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
1638			continue;
1639		index = be32_to_cpu(tx_scrq_entry.v1.correlator);
1640		if (index & IBMVNIC_TSO_POOL_MASK) {
1641			tx_pool = &adapter->tso_pool[queue_num];
1642			index &= ~IBMVNIC_TSO_POOL_MASK;
1643		} else {
1644			tx_pool = &adapter->tx_pool[queue_num];
1645		}
1646		tx_pool->free_map[tx_pool->consumer_index] = index;
1647		tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1648					  tx_pool->num_buffers - 1 :
1649					  tx_pool->consumer_index - 1;
1650		tx_buff = &tx_pool->tx_buff[index];
1651		adapter->netdev->stats.tx_packets--;
1652		adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
1653		adapter->tx_stats_buffers[queue_num].packets--;
1654		adapter->tx_stats_buffers[queue_num].bytes -=
1655						tx_buff->skb->len;
1656		dev_kfree_skb_any(tx_buff->skb);
1657		tx_buff->skb = NULL;
1658		adapter->netdev->stats.tx_dropped++;
1659	}
1660	ind_bufp->index = 0;
1661	if (atomic_sub_return(entries, &tx_scrq->used) <=
1662	    (adapter->req_tx_entries_per_subcrq / 2) &&
1663	    __netif_subqueue_stopped(adapter->netdev, queue_num) &&
1664	    !test_bit(0, &adapter->resetting)) {
1665		netif_wake_subqueue(adapter->netdev, queue_num);
1666		netdev_dbg(adapter->netdev, "Started queue %d\n",
1667			   queue_num);
1668	}
1669}
1670
1671static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
1672				 struct ibmvnic_sub_crq_queue *tx_scrq)
1673{
1674	struct ibmvnic_ind_xmit_queue *ind_bufp;
1675	u64 dma_addr;
1676	u64 entries;
1677	u64 handle;
1678	int rc;
1679
1680	ind_bufp = &tx_scrq->ind_buf;
1681	dma_addr = (u64)ind_bufp->indir_dma;
1682	entries = (u64)ind_bufp->index;
1683	handle = tx_scrq->handle;
1684
1685	if (!entries)
1686		return 0;
1687	rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
1688	if (rc)
1689		ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
1690	else
1691		ind_bufp->index = 0;
1692	return 0;
1693}
1694
1695static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1696{
1697	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1698	int queue_num = skb_get_queue_mapping(skb);
1699	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1700	struct device *dev = &adapter->vdev->dev;
1701	struct ibmvnic_ind_xmit_queue *ind_bufp;
1702	struct ibmvnic_tx_buff *tx_buff = NULL;
1703	struct ibmvnic_sub_crq_queue *tx_scrq;
1704	struct ibmvnic_tx_pool *tx_pool;
1705	unsigned int tx_send_failed = 0;
1706	netdev_tx_t ret = NETDEV_TX_OK;
1707	unsigned int tx_map_failed = 0;
1708	union sub_crq indir_arr[16];
1709	unsigned int tx_dropped = 0;
1710	unsigned int tx_packets = 0;
1711	unsigned int tx_bytes = 0;
1712	dma_addr_t data_dma_addr;
1713	struct netdev_queue *txq;
1714	unsigned long lpar_rc;
1715	union sub_crq tx_crq;
1716	unsigned int offset;
1717	int num_entries = 1;
1718	unsigned char *dst;
1719	int index = 0;
1720	u8 proto = 0;
1721
1722	tx_scrq = adapter->tx_scrq[queue_num];
1723	txq = netdev_get_tx_queue(netdev, queue_num);
1724	ind_bufp = &tx_scrq->ind_buf;
1725
1726	if (test_bit(0, &adapter->resetting)) {
1727		if (!netif_subqueue_stopped(netdev, skb))
1728			netif_stop_subqueue(netdev, queue_num);
1729		dev_kfree_skb_any(skb);
1730
1731		tx_send_failed++;
1732		tx_dropped++;
1733		ret = NETDEV_TX_OK;
1734		goto out;
1735	}
1736
1737	if (ibmvnic_xmit_workarounds(skb, netdev)) {
1738		tx_dropped++;
1739		tx_send_failed++;
1740		ret = NETDEV_TX_OK;
1741		ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1742		goto out;
1743	}
1744	if (skb_is_gso(skb))
1745		tx_pool = &adapter->tso_pool[queue_num];
1746	else
1747		tx_pool = &adapter->tx_pool[queue_num];
1748
1749	index = tx_pool->free_map[tx_pool->consumer_index];
1750
1751	if (index == IBMVNIC_INVALID_MAP) {
1752		dev_kfree_skb_any(skb);
1753		tx_send_failed++;
1754		tx_dropped++;
1755		ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1756		ret = NETDEV_TX_OK;
1757		goto out;
1758	}
1759
1760	tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1761
1762	offset = index * tx_pool->buf_size;
1763	dst = tx_pool->long_term_buff.buff + offset;
1764	memset(dst, 0, tx_pool->buf_size);
1765	data_dma_addr = tx_pool->long_term_buff.addr + offset;
1766
1767	if (skb_shinfo(skb)->nr_frags) {
1768		int cur, i;
1769
1770		/* Copy the head */
1771		skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1772		cur = skb_headlen(skb);
1773
1774		/* Copy the frags */
1775		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1776			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1777
1778			memcpy(dst + cur, skb_frag_address(frag),
1779			       skb_frag_size(frag));
1780			cur += skb_frag_size(frag);
1781		}
1782	} else {
1783		skb_copy_from_linear_data(skb, dst, skb->len);
1784	}
1785
1786	/* post changes to long_term_buff *dst before VIOS accessing it */
1787	dma_wmb();
1788
1789	tx_pool->consumer_index =
1790	    (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1791
1792	tx_buff = &tx_pool->tx_buff[index];
1793	tx_buff->skb = skb;
1794	tx_buff->index = index;
1795	tx_buff->pool_index = queue_num;
1796
1797	memset(&tx_crq, 0, sizeof(tx_crq));
1798	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1799	tx_crq.v1.type = IBMVNIC_TX_DESC;
1800	tx_crq.v1.n_crq_elem = 1;
1801	tx_crq.v1.n_sge = 1;
1802	tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1803
1804	if (skb_is_gso(skb))
1805		tx_crq.v1.correlator =
1806			cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1807	else
1808		tx_crq.v1.correlator = cpu_to_be32(index);
1809	tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1810	tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1811	tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1812
1813	if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1814		tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1815		tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1816	}
1817
1818	if (skb->protocol == htons(ETH_P_IP)) {
1819		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1820		proto = ip_hdr(skb)->protocol;
1821	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1822		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1823		proto = ipv6_hdr(skb)->nexthdr;
1824	}
1825
1826	if (proto == IPPROTO_TCP)
1827		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1828	else if (proto == IPPROTO_UDP)
1829		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1830
1831	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1832		tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1833		hdrs += 2;
1834	}
1835	if (skb_is_gso(skb)) {
1836		tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1837		tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1838		hdrs += 2;
1839	}
1840
1841	if ((*hdrs >> 7) & 1)
1842		build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
1843
1844	tx_crq.v1.n_crq_elem = num_entries;
1845	tx_buff->num_entries = num_entries;
1846	/* flush buffer if current entry can not fit */
1847	if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
1848		lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1849		if (lpar_rc != H_SUCCESS)
1850			goto tx_flush_err;
1851	}
1852
1853	indir_arr[0] = tx_crq;
1854	memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
1855	       num_entries * sizeof(struct ibmvnic_generic_scrq));
1856	ind_bufp->index += num_entries;
1857	if (__netdev_tx_sent_queue(txq, skb->len,
1858				   netdev_xmit_more() &&
1859				   ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
1860		lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1861		if (lpar_rc != H_SUCCESS)
1862			goto tx_err;
1863	}
1864
1865	if (atomic_add_return(num_entries, &tx_scrq->used)
1866					>= adapter->req_tx_entries_per_subcrq) {
1867		netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1868		netif_stop_subqueue(netdev, queue_num);
1869	}
1870
1871	tx_packets++;
1872	tx_bytes += skb->len;
1873	txq->trans_start = jiffies;
1874	ret = NETDEV_TX_OK;
1875	goto out;
1876
1877tx_flush_err:
1878	dev_kfree_skb_any(skb);
1879	tx_buff->skb = NULL;
1880	tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1881				  tx_pool->num_buffers - 1 :
1882				  tx_pool->consumer_index - 1;
1883	tx_dropped++;
1884tx_err:
1885	if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1886		dev_err_ratelimited(dev, "tx: send failed\n");
1887
1888	if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1889		/* Disable TX and report carrier off if queue is closed
1890		 * or pending failover.
1891		 * Firmware guarantees that a signal will be sent to the
1892		 * driver, triggering a reset or some other action.
1893		 */
1894		netif_tx_stop_all_queues(netdev);
1895		netif_carrier_off(netdev);
1896	}
1897out:
1898	netdev->stats.tx_dropped += tx_dropped;
1899	netdev->stats.tx_bytes += tx_bytes;
1900	netdev->stats.tx_packets += tx_packets;
1901	adapter->tx_send_failed += tx_send_failed;
1902	adapter->tx_map_failed += tx_map_failed;
1903	adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1904	adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1905	adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1906
1907	return ret;
1908}
1909
1910static void ibmvnic_set_multi(struct net_device *netdev)
1911{
1912	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1913	struct netdev_hw_addr *ha;
1914	union ibmvnic_crq crq;
1915
1916	memset(&crq, 0, sizeof(crq));
1917	crq.request_capability.first = IBMVNIC_CRQ_CMD;
1918	crq.request_capability.cmd = REQUEST_CAPABILITY;
1919
1920	if (netdev->flags & IFF_PROMISC) {
1921		if (!adapter->promisc_supported)
1922			return;
1923	} else {
1924		if (netdev->flags & IFF_ALLMULTI) {
1925			/* Accept all multicast */
1926			memset(&crq, 0, sizeof(crq));
1927			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1928			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1929			crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1930			ibmvnic_send_crq(adapter, &crq);
1931		} else if (netdev_mc_empty(netdev)) {
1932			/* Reject all multicast */
1933			memset(&crq, 0, sizeof(crq));
1934			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1935			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1936			crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1937			ibmvnic_send_crq(adapter, &crq);
1938		} else {
1939			/* Accept one or more multicast(s) */
1940			netdev_for_each_mc_addr(ha, netdev) {
1941				memset(&crq, 0, sizeof(crq));
1942				crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1943				crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1944				crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1945				ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1946						ha->addr);
1947				ibmvnic_send_crq(adapter, &crq);
1948			}
1949		}
1950	}
1951}
1952
1953static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1954{
1955	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1956	union ibmvnic_crq crq;
1957	int rc;
1958
1959	if (!is_valid_ether_addr(dev_addr)) {
1960		rc = -EADDRNOTAVAIL;
1961		goto err;
1962	}
1963
1964	memset(&crq, 0, sizeof(crq));
1965	crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1966	crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1967	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1968
1969	mutex_lock(&adapter->fw_lock);
1970	adapter->fw_done_rc = 0;
1971	reinit_completion(&adapter->fw_done);
1972
1973	rc = ibmvnic_send_crq(adapter, &crq);
1974	if (rc) {
1975		rc = -EIO;
1976		mutex_unlock(&adapter->fw_lock);
1977		goto err;
1978	}
1979
1980	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1981	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
1982	if (rc || adapter->fw_done_rc) {
1983		rc = -EIO;
1984		mutex_unlock(&adapter->fw_lock);
1985		goto err;
1986	}
1987	mutex_unlock(&adapter->fw_lock);
1988	return 0;
1989err:
1990	ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1991	return rc;
1992}
1993
1994static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1995{
1996	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1997	struct sockaddr *addr = p;
1998	int rc;
1999
2000	rc = 0;
2001	if (!is_valid_ether_addr(addr->sa_data))
2002		return -EADDRNOTAVAIL;
2003
2004	ether_addr_copy(adapter->mac_addr, addr->sa_data);
2005	if (adapter->state != VNIC_PROBED)
2006		rc = __ibmvnic_set_mac(netdev, addr->sa_data);
2007
2008	return rc;
2009}
2010
2011static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2012{
2013	switch (reason) {
2014	case VNIC_RESET_FAILOVER:
2015		return "FAILOVER";
2016	case VNIC_RESET_MOBILITY:
2017		return "MOBILITY";
2018	case VNIC_RESET_FATAL:
2019		return "FATAL";
2020	case VNIC_RESET_NON_FATAL:
2021		return "NON_FATAL";
2022	case VNIC_RESET_TIMEOUT:
2023		return "TIMEOUT";
2024	case VNIC_RESET_CHANGE_PARAM:
2025		return "CHANGE_PARAM";
2026	case VNIC_RESET_PASSIVE_INIT:
2027		return "PASSIVE_INIT";
2028	}
2029	return "UNKNOWN";
2030}
2031
2032/*
2033 * do_reset returns zero if we are able to keep processing reset events, or
2034 * non-zero if we hit a fatal error and must halt.
2035 */
2036static int do_reset(struct ibmvnic_adapter *adapter,
2037		    struct ibmvnic_rwi *rwi, u32 reset_state)
2038{
2039	u64 old_num_rx_queues, old_num_tx_queues;
2040	u64 old_num_rx_slots, old_num_tx_slots;
2041	struct net_device *netdev = adapter->netdev;
2042	int rc;
2043
2044	netdev_dbg(adapter->netdev,
2045		   "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2046		   adapter_state_to_string(adapter->state),
2047		   adapter->failover_pending,
2048		   reset_reason_to_string(rwi->reset_reason),
2049		   adapter_state_to_string(reset_state));
2050
2051	adapter->reset_reason = rwi->reset_reason;
2052	/* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
2053	if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2054		rtnl_lock();
2055
2056	/* Now that we have the rtnl lock, clear any pending failover.
2057	 * This will ensure ibmvnic_open() has either completed or will
2058	 * block until failover is complete.
2059	 */
2060	if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2061		adapter->failover_pending = false;
2062
2063	/* read the state and check (again) after getting rtnl */
2064	reset_state = adapter->state;
2065
2066	if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2067		rc = -EBUSY;
2068		goto out;
2069	}
2070
2071	netif_carrier_off(netdev);
2072
2073	old_num_rx_queues = adapter->req_rx_queues;
2074	old_num_tx_queues = adapter->req_tx_queues;
2075	old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2076	old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2077
2078	ibmvnic_cleanup(netdev);
2079
2080	if (reset_state == VNIC_OPEN &&
2081	    adapter->reset_reason != VNIC_RESET_MOBILITY &&
2082	    adapter->reset_reason != VNIC_RESET_FAILOVER) {
2083		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2084			rc = __ibmvnic_close(netdev);
2085			if (rc)
2086				goto out;
2087		} else {
2088			adapter->state = VNIC_CLOSING;
2089
2090			/* Release the RTNL lock before link state change and
2091			 * re-acquire after the link state change to allow
2092			 * linkwatch_event to grab the RTNL lock and run during
2093			 * a reset.
2094			 */
2095			rtnl_unlock();
2096			rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2097			rtnl_lock();
2098			if (rc)
2099				goto out;
2100
2101			if (adapter->state == VNIC_OPEN) {
2102				/* When we dropped rtnl, ibmvnic_open() got
2103				 * it and noticed that we are resetting and
2104				 * set the adapter state to OPEN. Update our
2105				 * new "target" state, and resume the reset
2106				 * from VNIC_CLOSING state.
2107				 */
2108				netdev_dbg(netdev,
2109					   "Open changed state from %s, updating.\n",
2110					   adapter_state_to_string(reset_state));
2111				reset_state = VNIC_OPEN;
2112				adapter->state = VNIC_CLOSING;
2113			}
2114
2115			if (adapter->state != VNIC_CLOSING) {
2116				/* If someone else changed the adapter state
2117				 * when we dropped the rtnl, fail the reset
2118				 */
2119				rc = -1;
2120				goto out;
2121			}
2122			adapter->state = VNIC_CLOSED;
2123		}
2124	}
2125
2126	if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2127		release_resources(adapter);
2128		release_sub_crqs(adapter, 1);
2129		release_crq_queue(adapter);
2130	}
2131
2132	if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2133		/* remove the closed state so when we call open it appears
2134		 * we are coming from the probed state.
2135		 */
2136		adapter->state = VNIC_PROBED;
2137
2138		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2139			rc = init_crq_queue(adapter);
2140		} else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2141			rc = ibmvnic_reenable_crq_queue(adapter);
2142			release_sub_crqs(adapter, 1);
2143		} else {
2144			rc = ibmvnic_reset_crq(adapter);
2145			if (rc == H_CLOSED || rc == H_SUCCESS) {
2146				rc = vio_enable_interrupts(adapter->vdev);
2147				if (rc)
2148					netdev_err(adapter->netdev,
2149						   "Reset failed to enable interrupts. rc=%d\n",
2150						   rc);
2151			}
2152		}
2153
2154		if (rc) {
2155			netdev_err(adapter->netdev,
2156				   "Reset couldn't initialize crq. rc=%d\n", rc);
2157			goto out;
2158		}
2159
2160		rc = ibmvnic_reset_init(adapter, true);
2161		if (rc) {
2162			rc = IBMVNIC_INIT_FAILED;
2163			goto out;
2164		}
2165
2166		/* If the adapter was in PROBE or DOWN state prior to the reset,
2167		 * exit here.
2168		 */
2169		if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
2170			rc = 0;
2171			goto out;
2172		}
2173
2174		rc = ibmvnic_login(netdev);
2175		if (rc)
2176			goto out;
2177
2178		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2179			rc = init_resources(adapter);
2180			if (rc)
2181				goto out;
2182		} else if (adapter->req_rx_queues != old_num_rx_queues ||
2183		    adapter->req_tx_queues != old_num_tx_queues ||
2184		    adapter->req_rx_add_entries_per_subcrq !=
2185		    old_num_rx_slots ||
2186		    adapter->req_tx_entries_per_subcrq !=
2187		    old_num_tx_slots ||
2188		    !adapter->rx_pool ||
2189		    !adapter->tso_pool ||
2190		    !adapter->tx_pool) {
2191			release_rx_pools(adapter);
2192			release_tx_pools(adapter);
2193			release_napi(adapter);
2194			release_vpd_data(adapter);
2195
2196			rc = init_resources(adapter);
2197			if (rc)
2198				goto out;
2199
2200		} else {
2201			rc = reset_tx_pools(adapter);
2202			if (rc) {
2203				netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
2204					   rc);
2205				goto out;
2206			}
2207
2208			rc = reset_rx_pools(adapter);
2209			if (rc) {
2210				netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
2211					   rc);
2212				goto out;
2213			}
2214		}
2215		ibmvnic_disable_irqs(adapter);
2216	}
2217	adapter->state = VNIC_CLOSED;
2218
2219	if (reset_state == VNIC_CLOSED) {
2220		rc = 0;
2221		goto out;
2222	}
2223
2224	rc = __ibmvnic_open(netdev);
2225	if (rc) {
2226		rc = IBMVNIC_OPEN_FAILED;
2227		goto out;
2228	}
2229
2230	/* refresh device's multicast list */
2231	ibmvnic_set_multi(netdev);
2232
2233	if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2234	    adapter->reset_reason == VNIC_RESET_MOBILITY)
2235		__netdev_notify_peers(netdev);
2236
2237	rc = 0;
2238
2239out:
2240	/* restore the adapter state if reset failed */
2241	if (rc)
2242		adapter->state = reset_state;
2243	/* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
2244	if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2245		rtnl_unlock();
2246
2247	netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
2248		   adapter_state_to_string(adapter->state),
2249		   adapter->failover_pending, rc);
2250	return rc;
2251}
2252
2253static int do_hard_reset(struct ibmvnic_adapter *adapter,
2254			 struct ibmvnic_rwi *rwi, u32 reset_state)
2255{
2256	struct net_device *netdev = adapter->netdev;
2257	int rc;
2258
2259	netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
2260		   reset_reason_to_string(rwi->reset_reason));
2261
2262	/* read the state and check (again) after getting rtnl */
2263	reset_state = adapter->state;
2264
2265	if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2266		rc = -EBUSY;
2267		goto out;
2268	}
2269
2270	netif_carrier_off(netdev);
2271	adapter->reset_reason = rwi->reset_reason;
2272
2273	ibmvnic_cleanup(netdev);
2274	release_resources(adapter);
2275	release_sub_crqs(adapter, 0);
2276	release_crq_queue(adapter);
2277
2278	/* remove the closed state so when we call open it appears
2279	 * we are coming from the probed state.
2280	 */
2281	adapter->state = VNIC_PROBED;
2282
2283	reinit_completion(&adapter->init_done);
2284	rc = init_crq_queue(adapter);
2285	if (rc) {
2286		netdev_err(adapter->netdev,
2287			   "Couldn't initialize crq. rc=%d\n", rc);
2288		goto out;
2289	}
2290
2291	rc = ibmvnic_reset_init(adapter, false);
2292	if (rc)
2293		goto out;
2294
2295	/* If the adapter was in PROBE or DOWN state prior to the reset,
2296	 * exit here.
2297	 */
2298	if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
2299		goto out;
2300
2301	rc = ibmvnic_login(netdev);
2302	if (rc)
2303		goto out;
2304
2305	rc = init_resources(adapter);
2306	if (rc)
2307		goto out;
2308
2309	ibmvnic_disable_irqs(adapter);
2310	adapter->state = VNIC_CLOSED;
2311
2312	if (reset_state == VNIC_CLOSED)
2313		goto out;
2314
2315	rc = __ibmvnic_open(netdev);
2316	if (rc) {
2317		rc = IBMVNIC_OPEN_FAILED;
2318		goto out;
2319	}
2320
2321	__netdev_notify_peers(netdev);
2322out:
2323	/* restore adapter state if reset failed */
2324	if (rc)
2325		adapter->state = reset_state;
2326	netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
2327		   adapter_state_to_string(adapter->state),
2328		   adapter->failover_pending, rc);
2329	return rc;
2330}
2331
2332static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2333{
2334	struct ibmvnic_rwi *rwi;
2335	unsigned long flags;
2336
2337	spin_lock_irqsave(&adapter->rwi_lock, flags);
2338
2339	if (!list_empty(&adapter->rwi_list)) {
2340		rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2341				       list);
2342		list_del(&rwi->list);
2343	} else {
2344		rwi = NULL;
2345	}
2346
2347	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2348	return rwi;
2349}
2350
2351/**
2352 * do_passive_init - complete probing when partner device is detected.
2353 * @adapter: ibmvnic_adapter struct
2354 *
2355 * If the ibmvnic device does not have a partner device to communicate with at boot
2356 * and that partner device comes online at a later time, this function is called
2357 * to complete the initialization process of ibmvnic device.
2358 * Caller is expected to hold rtnl_lock().
2359 *
2360 * Returns non-zero if sub-CRQs are not initialized properly leaving the device
2361 * in the down state.
2362 * Returns 0 upon success and the device is in PROBED state.
2363 */
2364
2365static int do_passive_init(struct ibmvnic_adapter *adapter)
2366{
2367	unsigned long timeout = msecs_to_jiffies(30000);
2368	struct net_device *netdev = adapter->netdev;
2369	struct device *dev = &adapter->vdev->dev;
2370	int rc;
2371
2372	netdev_dbg(netdev, "Partner device found, probing.\n");
2373
2374	adapter->state = VNIC_PROBING;
2375	reinit_completion(&adapter->init_done);
2376	adapter->init_done_rc = 0;
2377	adapter->crq.active = true;
2378
2379	rc = send_crq_init_complete(adapter);
2380	if (rc)
2381		goto out;
2382
2383	rc = send_version_xchg(adapter);
2384	if (rc)
2385		netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
2386
2387	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2388		dev_err(dev, "Initialization sequence timed out\n");
2389		rc = -ETIMEDOUT;
2390		goto out;
2391	}
2392
2393	rc = init_sub_crqs(adapter);
2394	if (rc) {
2395		dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
2396		goto out;
2397	}
2398
2399	rc = init_sub_crq_irqs(adapter);
2400	if (rc) {
2401		dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
2402		goto init_failed;
2403	}
2404
2405	netdev->mtu = adapter->req_mtu - ETH_HLEN;
2406	netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2407	netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2408
2409	adapter->state = VNIC_PROBED;
2410	netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
2411
2412	return 0;
2413
2414init_failed:
2415	release_sub_crqs(adapter, 1);
2416out:
2417	adapter->state = VNIC_DOWN;
2418	return rc;
2419}
2420
2421static void __ibmvnic_reset(struct work_struct *work)
2422{
2423	struct ibmvnic_adapter *adapter;
2424	bool saved_state = false;
2425	struct ibmvnic_rwi *tmprwi;
2426	struct ibmvnic_rwi *rwi;
2427	unsigned long flags;
2428	u32 reset_state;
2429	int rc = 0;
2430
2431	adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2432
2433	if (test_and_set_bit_lock(0, &adapter->resetting)) {
2434		queue_delayed_work(system_long_wq,
2435				   &adapter->ibmvnic_delayed_reset,
2436				   IBMVNIC_RESET_DELAY);
2437		return;
2438	}
2439
2440	rwi = get_next_rwi(adapter);
2441	while (rwi) {
2442		spin_lock_irqsave(&adapter->state_lock, flags);
2443
2444		if (adapter->state == VNIC_REMOVING ||
2445		    adapter->state == VNIC_REMOVED) {
2446			spin_unlock_irqrestore(&adapter->state_lock, flags);
2447			kfree(rwi);
2448			rc = EBUSY;
2449			break;
2450		}
2451
2452		if (!saved_state) {
2453			reset_state = adapter->state;
2454			saved_state = true;
2455		}
2456		spin_unlock_irqrestore(&adapter->state_lock, flags);
2457
2458		if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
2459			rtnl_lock();
2460			rc = do_passive_init(adapter);
2461			rtnl_unlock();
2462			if (!rc)
2463				netif_carrier_on(adapter->netdev);
2464		} else if (adapter->force_reset_recovery) {
2465			/* Since we are doing a hard reset now, clear the
2466			 * failover_pending flag so we don't ignore any
2467			 * future MOBILITY or other resets.
2468			 */
2469			adapter->failover_pending = false;
2470
2471			/* Transport event occurred during previous reset */
2472			if (adapter->wait_for_reset) {
2473				/* Previous was CHANGE_PARAM; caller locked */
2474				adapter->force_reset_recovery = false;
2475				rc = do_hard_reset(adapter, rwi, reset_state);
2476			} else {
2477				rtnl_lock();
2478				adapter->force_reset_recovery = false;
2479				rc = do_hard_reset(adapter, rwi, reset_state);
2480				rtnl_unlock();
2481			}
2482			if (rc) {
2483				/* give backing device time to settle down */
2484				netdev_dbg(adapter->netdev,
2485					   "[S:%s] Hard reset failed, waiting 60 secs\n",
2486					   adapter_state_to_string(adapter->state));
2487				set_current_state(TASK_UNINTERRUPTIBLE);
2488				schedule_timeout(60 * HZ);
2489			}
2490		} else {
2491			rc = do_reset(adapter, rwi, reset_state);
2492		}
2493		tmprwi = rwi;
2494		adapter->last_reset_time = jiffies;
2495
2496		if (rc)
2497			netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
2498
2499		rwi = get_next_rwi(adapter);
2500
2501		/*
2502		 * If there is another reset queued, free the previous rwi
2503		 * and process the new reset even if previous reset failed
2504		 * (the previous reset could have failed because of a fail
2505		 * over for instance, so process the fail over).
2506		 *
2507		 * If there are no resets queued and the previous reset failed,
2508		 * the adapter would be in an undefined state. So retry the
2509		 * previous reset as a hard reset.
2510		 */
2511		if (rwi)
2512			kfree(tmprwi);
2513		else if (rc)
2514			rwi = tmprwi;
2515
2516		if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2517			    rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
2518			adapter->force_reset_recovery = true;
2519	}
2520
2521	if (adapter->wait_for_reset) {
2522		adapter->reset_done_rc = rc;
2523		complete(&adapter->reset_done);
2524	}
2525
2526	clear_bit_unlock(0, &adapter->resetting);
2527
2528	netdev_dbg(adapter->netdev,
2529		   "[S:%s FRR:%d WFR:%d] Done processing resets\n",
2530		   adapter_state_to_string(adapter->state),
2531		   adapter->force_reset_recovery,
2532		   adapter->wait_for_reset);
2533}
2534
2535static void __ibmvnic_delayed_reset(struct work_struct *work)
2536{
2537	struct ibmvnic_adapter *adapter;
2538
2539	adapter = container_of(work, struct ibmvnic_adapter,
2540			       ibmvnic_delayed_reset.work);
2541	__ibmvnic_reset(&adapter->ibmvnic_reset);
2542}
2543
2544static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2545			 enum ibmvnic_reset_reason reason)
2546{
2547	struct list_head *entry, *tmp_entry;
2548	struct ibmvnic_rwi *rwi, *tmp;
2549	struct net_device *netdev = adapter->netdev;
2550	unsigned long flags;
2551	int ret;
2552
2553	spin_lock_irqsave(&adapter->rwi_lock, flags);
2554
2555	/* If failover is pending don't schedule any other reset.
2556	 * Instead let the failover complete. If there is already a
2557	 * a failover reset scheduled, we will detect and drop the
2558	 * duplicate reset when walking the ->rwi_list below.
2559	 */
2560	if (adapter->state == VNIC_REMOVING ||
2561	    adapter->state == VNIC_REMOVED ||
2562	    (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
2563		ret = EBUSY;
2564		netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2565		goto err;
2566	}
2567
2568	if (adapter->state == VNIC_PROBING) {
2569		netdev_warn(netdev, "Adapter reset during probe\n");
2570		adapter->init_done_rc = EAGAIN;
2571		ret = EAGAIN;
2572		goto err;
2573	}
2574
2575	list_for_each_entry(tmp, &adapter->rwi_list, list) {
2576		if (tmp->reset_reason == reason) {
2577			netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
2578				   reset_reason_to_string(reason));
2579			ret = EBUSY;
2580			goto err;
2581		}
2582	}
2583
2584	rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2585	if (!rwi) {
2586		ret = ENOMEM;
2587		goto err;
2588	}
2589	/* if we just received a transport event,
2590	 * flush reset queue and process this reset
2591	 */
2592	if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2593		list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2594			list_del(entry);
2595	}
2596	rwi->reset_reason = reason;
2597	list_add_tail(&rwi->list, &adapter->rwi_list);
2598	netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
2599		   reset_reason_to_string(reason));
2600	queue_work(system_long_wq, &adapter->ibmvnic_reset);
2601
2602	ret = 0;
2603err:
2604	/* ibmvnic_close() below can block, so drop the lock first */
2605	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2606
2607	if (ret == ENOMEM)
2608		ibmvnic_close(netdev);
2609
2610	return -ret;
2611}
2612
2613static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
2614{
2615	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2616
2617	if (test_bit(0, &adapter->resetting)) {
2618		netdev_err(adapter->netdev,
2619			   "Adapter is resetting, skip timeout reset\n");
2620		return;
2621	}
2622	/* No queuing up reset until at least 5 seconds (default watchdog val)
2623	 * after last reset
2624	 */
2625	if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2626		netdev_dbg(dev, "Not yet time to tx timeout.\n");
2627		return;
2628	}
2629	ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2630}
2631
2632static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2633				  struct ibmvnic_rx_buff *rx_buff)
2634{
2635	struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2636
2637	rx_buff->skb = NULL;
2638
2639	pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2640	pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2641
2642	atomic_dec(&pool->available);
2643}
2644
2645static int ibmvnic_poll(struct napi_struct *napi, int budget)
2646{
2647	struct ibmvnic_sub_crq_queue *rx_scrq;
2648	struct ibmvnic_adapter *adapter;
2649	struct net_device *netdev;
2650	int frames_processed;
2651	int scrq_num;
2652
2653	netdev = napi->dev;
2654	adapter = netdev_priv(netdev);
2655	scrq_num = (int)(napi - adapter->napi);
2656	frames_processed = 0;
2657	rx_scrq = adapter->rx_scrq[scrq_num];
2658
2659restart_poll:
2660	while (frames_processed < budget) {
2661		struct sk_buff *skb;
2662		struct ibmvnic_rx_buff *rx_buff;
2663		union sub_crq *next;
2664		u32 length;
2665		u16 offset;
2666		u8 flags = 0;
2667
2668		if (unlikely(test_bit(0, &adapter->resetting) &&
2669			     adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2670			enable_scrq_irq(adapter, rx_scrq);
2671			napi_complete_done(napi, frames_processed);
2672			return frames_processed;
2673		}
2674
2675		if (!pending_scrq(adapter, rx_scrq))
2676			break;
2677		next = ibmvnic_next_scrq(adapter, rx_scrq);
2678		rx_buff = (struct ibmvnic_rx_buff *)
2679			  be64_to_cpu(next->rx_comp.correlator);
2680		/* do error checking */
2681		if (next->rx_comp.rc) {
2682			netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2683				   be16_to_cpu(next->rx_comp.rc));
2684			/* free the entry */
2685			next->rx_comp.first = 0;
2686			dev_kfree_skb_any(rx_buff->skb);
2687			remove_buff_from_pool(adapter, rx_buff);
2688			continue;
2689		} else if (!rx_buff->skb) {
2690			/* free the entry */
2691			next->rx_comp.first = 0;
2692			remove_buff_from_pool(adapter, rx_buff);
2693			continue;
2694		}
2695
2696		length = be32_to_cpu(next->rx_comp.len);
2697		offset = be16_to_cpu(next->rx_comp.off_frame_data);
2698		flags = next->rx_comp.flags;
2699		skb = rx_buff->skb;
2700		/* load long_term_buff before copying to skb */
2701		dma_rmb();
2702		skb_copy_to_linear_data(skb, rx_buff->data + offset,
2703					length);
2704
2705		/* VLAN Header has been stripped by the system firmware and
2706		 * needs to be inserted by the driver
2707		 */
2708		if (adapter->rx_vlan_header_insertion &&
2709		    (flags & IBMVNIC_VLAN_STRIPPED))
2710			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2711					       ntohs(next->rx_comp.vlan_tci));
2712
2713		/* free the entry */
2714		next->rx_comp.first = 0;
2715		remove_buff_from_pool(adapter, rx_buff);
2716
2717		skb_put(skb, length);
2718		skb->protocol = eth_type_trans(skb, netdev);
2719		skb_record_rx_queue(skb, scrq_num);
2720
2721		if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2722		    flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2723			skb->ip_summed = CHECKSUM_UNNECESSARY;
2724		}
2725
2726		length = skb->len;
2727		napi_gro_receive(napi, skb); /* send it up */
2728		netdev->stats.rx_packets++;
2729		netdev->stats.rx_bytes += length;
2730		adapter->rx_stats_buffers[scrq_num].packets++;
2731		adapter->rx_stats_buffers[scrq_num].bytes += length;
2732		frames_processed++;
2733	}
2734
2735	if (adapter->state != VNIC_CLOSING &&
2736	    ((atomic_read(&adapter->rx_pool[scrq_num].available) <
2737	      adapter->req_rx_add_entries_per_subcrq / 2) ||
2738	      frames_processed < budget))
2739		replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2740	if (frames_processed < budget) {
2741		if (napi_complete_done(napi, frames_processed)) {
2742			enable_scrq_irq(adapter, rx_scrq);
2743			if (pending_scrq(adapter, rx_scrq)) {
2744				if (napi_reschedule(napi)) {
2745					disable_scrq_irq(adapter, rx_scrq);
2746					goto restart_poll;
2747				}
2748			}
2749		}
2750	}
2751	return frames_processed;
2752}
2753
2754static int wait_for_reset(struct ibmvnic_adapter *adapter)
2755{
2756	int rc, ret;
2757
2758	adapter->fallback.mtu = adapter->req_mtu;
2759	adapter->fallback.rx_queues = adapter->req_rx_queues;
2760	adapter->fallback.tx_queues = adapter->req_tx_queues;
2761	adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2762	adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2763
2764	reinit_completion(&adapter->reset_done);
2765	adapter->wait_for_reset = true;
2766	rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2767
2768	if (rc) {
2769		ret = rc;
2770		goto out;
2771	}
2772	rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2773	if (rc) {
2774		ret = -ENODEV;
2775		goto out;
2776	}
2777
2778	ret = 0;
2779	if (adapter->reset_done_rc) {
2780		ret = -EIO;
2781		adapter->desired.mtu = adapter->fallback.mtu;
2782		adapter->desired.rx_queues = adapter->fallback.rx_queues;
2783		adapter->desired.tx_queues = adapter->fallback.tx_queues;
2784		adapter->desired.rx_entries = adapter->fallback.rx_entries;
2785		adapter->desired.tx_entries = adapter->fallback.tx_entries;
2786
2787		reinit_completion(&adapter->reset_done);
2788		adapter->wait_for_reset = true;
2789		rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2790		if (rc) {
2791			ret = rc;
2792			goto out;
2793		}
2794		rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2795						 60000);
2796		if (rc) {
2797			ret = -ENODEV;
2798			goto out;
2799		}
2800	}
2801out:
2802	adapter->wait_for_reset = false;
2803
2804	return ret;
2805}
2806
2807static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2808{
2809	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2810
2811	adapter->desired.mtu = new_mtu + ETH_HLEN;
2812
2813	return wait_for_reset(adapter);
2814}
2815
2816static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2817						struct net_device *dev,
2818						netdev_features_t features)
2819{
2820	/* Some backing hardware adapters can not
2821	 * handle packets with a MSS less than 224
2822	 * or with only one segment.
2823	 */
2824	if (skb_is_gso(skb)) {
2825		if (skb_shinfo(skb)->gso_size < 224 ||
2826		    skb_shinfo(skb)->gso_segs == 1)
2827			features &= ~NETIF_F_GSO_MASK;
2828	}
2829
2830	return features;
2831}
2832
2833static const struct net_device_ops ibmvnic_netdev_ops = {
2834	.ndo_open		= ibmvnic_open,
2835	.ndo_stop		= ibmvnic_close,
2836	.ndo_start_xmit		= ibmvnic_xmit,
2837	.ndo_set_rx_mode	= ibmvnic_set_multi,
2838	.ndo_set_mac_address	= ibmvnic_set_mac,
2839	.ndo_validate_addr	= eth_validate_addr,
2840	.ndo_tx_timeout		= ibmvnic_tx_timeout,
2841	.ndo_change_mtu		= ibmvnic_change_mtu,
2842	.ndo_features_check     = ibmvnic_features_check,
2843};
2844
2845/* ethtool functions */
2846
2847static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2848				      struct ethtool_link_ksettings *cmd)
2849{
2850	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2851	int rc;
2852
2853	rc = send_query_phys_parms(adapter);
2854	if (rc) {
2855		adapter->speed = SPEED_UNKNOWN;
2856		adapter->duplex = DUPLEX_UNKNOWN;
2857	}
2858	cmd->base.speed = adapter->speed;
2859	cmd->base.duplex = adapter->duplex;
2860	cmd->base.port = PORT_FIBRE;
2861	cmd->base.phy_address = 0;
2862	cmd->base.autoneg = AUTONEG_ENABLE;
2863
2864	return 0;
2865}
2866
2867static void ibmvnic_get_drvinfo(struct net_device *netdev,
2868				struct ethtool_drvinfo *info)
2869{
2870	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2871
2872	strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2873	strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2874	strscpy(info->fw_version, adapter->fw_version,
2875		sizeof(info->fw_version));
2876}
2877
2878static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2879{
2880	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2881
2882	return adapter->msg_enable;
2883}
2884
2885static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2886{
2887	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2888
2889	adapter->msg_enable = data;
2890}
2891
2892static u32 ibmvnic_get_link(struct net_device *netdev)
2893{
2894	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2895
2896	/* Don't need to send a query because we request a logical link up at
2897	 * init and then we wait for link state indications
2898	 */
2899	return adapter->logical_link_state;
2900}
2901
2902static void ibmvnic_get_ringparam(struct net_device *netdev,
2903				  struct ethtool_ringparam *ring)
2904{
2905	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2906
2907	if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2908		ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2909		ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2910	} else {
2911		ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2912		ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2913	}
2914	ring->rx_mini_max_pending = 0;
2915	ring->rx_jumbo_max_pending = 0;
2916	ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2917	ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2918	ring->rx_mini_pending = 0;
2919	ring->rx_jumbo_pending = 0;
2920}
2921
2922static int ibmvnic_set_ringparam(struct net_device *netdev,
2923				 struct ethtool_ringparam *ring)
2924{
2925	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2926	int ret;
2927
2928	ret = 0;
2929	adapter->desired.rx_entries = ring->rx_pending;
2930	adapter->desired.tx_entries = ring->tx_pending;
2931
2932	ret = wait_for_reset(adapter);
2933
2934	if (!ret &&
2935	    (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2936	     adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2937		netdev_info(netdev,
2938			    "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2939			    ring->rx_pending, ring->tx_pending,
2940			    adapter->req_rx_add_entries_per_subcrq,
2941			    adapter->req_tx_entries_per_subcrq);
2942	return ret;
2943}
2944
2945static void ibmvnic_get_channels(struct net_device *netdev,
2946				 struct ethtool_channels *channels)
2947{
2948	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2949
2950	if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2951		channels->max_rx = adapter->max_rx_queues;
2952		channels->max_tx = adapter->max_tx_queues;
2953	} else {
2954		channels->max_rx = IBMVNIC_MAX_QUEUES;
2955		channels->max_tx = IBMVNIC_MAX_QUEUES;
2956	}
2957
2958	channels->max_other = 0;
2959	channels->max_combined = 0;
2960	channels->rx_count = adapter->req_rx_queues;
2961	channels->tx_count = adapter->req_tx_queues;
2962	channels->other_count = 0;
2963	channels->combined_count = 0;
2964}
2965
2966static int ibmvnic_set_channels(struct net_device *netdev,
2967				struct ethtool_channels *channels)
2968{
2969	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2970	int ret;
2971
2972	ret = 0;
2973	adapter->desired.rx_queues = channels->rx_count;
2974	adapter->desired.tx_queues = channels->tx_count;
2975
2976	ret = wait_for_reset(adapter);
2977
2978	if (!ret &&
2979	    (adapter->req_rx_queues != channels->rx_count ||
2980	     adapter->req_tx_queues != channels->tx_count))
2981		netdev_info(netdev,
2982			    "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2983			    channels->rx_count, channels->tx_count,
2984			    adapter->req_rx_queues, adapter->req_tx_queues);
2985	return ret;
2986}
2987
2988static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2989{
2990	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2991	int i;
2992
2993	switch (stringset) {
2994	case ETH_SS_STATS:
2995		for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2996				i++, data += ETH_GSTRING_LEN)
2997			memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2998
2999		for (i = 0; i < adapter->req_tx_queues; i++) {
3000			snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
3001			data += ETH_GSTRING_LEN;
3002
3003			snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
3004			data += ETH_GSTRING_LEN;
3005
3006			snprintf(data, ETH_GSTRING_LEN,
3007				 "tx%d_dropped_packets", i);
3008			data += ETH_GSTRING_LEN;
3009		}
3010
3011		for (i = 0; i < adapter->req_rx_queues; i++) {
3012			snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
3013			data += ETH_GSTRING_LEN;
3014
3015			snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
3016			data += ETH_GSTRING_LEN;
3017
3018			snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
3019			data += ETH_GSTRING_LEN;
3020		}
3021		break;
3022
3023	case ETH_SS_PRIV_FLAGS:
3024		for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
3025			strcpy(data + i * ETH_GSTRING_LEN,
3026			       ibmvnic_priv_flags[i]);
3027		break;
3028	default:
3029		return;
3030	}
3031}
3032
3033static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3034{
3035	struct ibmvnic_adapter *adapter = netdev_priv(dev);
3036
3037	switch (sset) {
3038	case ETH_SS_STATS:
3039		return ARRAY_SIZE(ibmvnic_stats) +
3040		       adapter->req_tx_queues * NUM_TX_STATS +
3041		       adapter->req_rx_queues * NUM_RX_STATS;
3042	case ETH_SS_PRIV_FLAGS:
3043		return ARRAY_SIZE(ibmvnic_priv_flags);
3044	default:
3045		return -EOPNOTSUPP;
3046	}
3047}
3048
3049static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3050				      struct ethtool_stats *stats, u64 *data)
3051{
3052	struct ibmvnic_adapter *adapter = netdev_priv(dev);
3053	union ibmvnic_crq crq;
3054	int i, j;
3055	int rc;
3056
3057	memset(&crq, 0, sizeof(crq));
3058	crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3059	crq.request_statistics.cmd = REQUEST_STATISTICS;
3060	crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3061	crq.request_statistics.len =
3062	    cpu_to_be32(sizeof(struct ibmvnic_statistics));
3063
3064	/* Wait for data to be written */
3065	reinit_completion(&adapter->stats_done);
3066	rc = ibmvnic_send_crq(adapter, &crq);
3067	if (rc)
3068		return;
3069	rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3070	if (rc)
3071		return;
3072
3073	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
3074		data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3075				      (adapter, ibmvnic_stats[i].offset));
3076
3077	for (j = 0; j < adapter->req_tx_queues; j++) {
3078		data[i] = adapter->tx_stats_buffers[j].packets;
3079		i++;
3080		data[i] = adapter->tx_stats_buffers[j].bytes;
3081		i++;
3082		data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3083		i++;
3084	}
3085
3086	for (j = 0; j < adapter->req_rx_queues; j++) {
3087		data[i] = adapter->rx_stats_buffers[j].packets;
3088		i++;
3089		data[i] = adapter->rx_stats_buffers[j].bytes;
3090		i++;
3091		data[i] = adapter->rx_stats_buffers[j].interrupts;
3092		i++;
3093	}
3094}
3095
3096static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
3097{
3098	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3099
3100	return adapter->priv_flags;
3101}
3102
3103static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
3104{
3105	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3106	bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
3107
3108	if (which_maxes)
3109		adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
3110	else
3111		adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
3112
3113	return 0;
3114}
3115
3116static const struct ethtool_ops ibmvnic_ethtool_ops = {
3117	.get_drvinfo		= ibmvnic_get_drvinfo,
3118	.get_msglevel		= ibmvnic_get_msglevel,
3119	.set_msglevel		= ibmvnic_set_msglevel,
3120	.get_link		= ibmvnic_get_link,
3121	.get_ringparam		= ibmvnic_get_ringparam,
3122	.set_ringparam		= ibmvnic_set_ringparam,
3123	.get_channels		= ibmvnic_get_channels,
3124	.set_channels		= ibmvnic_set_channels,
3125	.get_strings            = ibmvnic_get_strings,
3126	.get_sset_count         = ibmvnic_get_sset_count,
3127	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
3128	.get_link_ksettings	= ibmvnic_get_link_ksettings,
3129	.get_priv_flags		= ibmvnic_get_priv_flags,
3130	.set_priv_flags		= ibmvnic_set_priv_flags,
3131};
3132
3133/* Routines for managing CRQs/sCRQs  */
3134
3135static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3136				   struct ibmvnic_sub_crq_queue *scrq)
3137{
3138	int rc;
3139
3140	if (!scrq) {
3141		netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
3142		return -EINVAL;
3143	}
3144
3145	if (scrq->irq) {
3146		free_irq(scrq->irq, scrq);
3147		irq_dispose_mapping(scrq->irq);
3148		scrq->irq = 0;
3149	}
3150
3151	if (scrq->msgs) {
3152		memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3153		atomic_set(&scrq->used, 0);
3154		scrq->cur = 0;
3155		scrq->ind_buf.index = 0;
3156	} else {
3157		netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3158		return -EINVAL;
3159	}
3160
3161	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3162			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3163	return rc;
3164}
3165
3166static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3167{
3168	int i, rc;
3169
3170	if (!adapter->tx_scrq || !adapter->rx_scrq)
3171		return -EINVAL;
3172
3173	for (i = 0; i < adapter->req_tx_queues; i++) {
3174		netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3175		rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3176		if (rc)
3177			return rc;
3178	}
3179
3180	for (i = 0; i < adapter->req_rx_queues; i++) {
3181		netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3182		rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3183		if (rc)
3184			return rc;
3185	}
3186
3187	return rc;
3188}
3189
3190static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3191				  struct ibmvnic_sub_crq_queue *scrq,
3192				  bool do_h_free)
3193{
3194	struct device *dev = &adapter->vdev->dev;
3195	long rc;
3196
3197	netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3198
3199	if (do_h_free) {
3200		/* Close the sub-crqs */
3201		do {
3202			rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3203						adapter->vdev->unit_address,
3204						scrq->crq_num);
3205		} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3206
3207		if (rc) {
3208			netdev_err(adapter->netdev,
3209				   "Failed to release sub-CRQ %16lx, rc = %ld\n",
3210				   scrq->crq_num, rc);
3211		}
3212	}
3213
3214	dma_free_coherent(dev,
3215			  IBMVNIC_IND_ARR_SZ,
3216			  scrq->ind_buf.indir_arr,
3217			  scrq->ind_buf.indir_dma);
3218
3219	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3220			 DMA_BIDIRECTIONAL);
3221	free_pages((unsigned long)scrq->msgs, 2);
3222	kfree(scrq);
3223}
3224
3225static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3226							*adapter)
3227{
3228	struct device *dev = &adapter->vdev->dev;
3229	struct ibmvnic_sub_crq_queue *scrq;
3230	int rc;
3231
3232	scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
3233	if (!scrq)
3234		return NULL;
3235
3236	scrq->msgs =
3237		(union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
3238	if (!scrq->msgs) {
3239		dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3240		goto zero_page_failed;
3241	}
3242
3243	scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3244					 DMA_BIDIRECTIONAL);
3245	if (dma_mapping_error(dev, scrq->msg_token)) {
3246		dev_warn(dev, "Couldn't map crq queue messages page\n");
3247		goto map_failed;
3248	}
3249
3250	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3251			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3252
3253	if (rc == H_RESOURCE)
3254		rc = ibmvnic_reset_crq(adapter);
3255
3256	if (rc == H_CLOSED) {
3257		dev_warn(dev, "Partner adapter not ready, waiting.\n");
3258	} else if (rc) {
3259		dev_warn(dev, "Error %d registering sub-crq\n", rc);
3260		goto reg_failed;
3261	}
3262
3263	scrq->adapter = adapter;
3264	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
3265	scrq->ind_buf.index = 0;
3266
3267	scrq->ind_buf.indir_arr =
3268		dma_alloc_coherent(dev,
3269				   IBMVNIC_IND_ARR_SZ,
3270				   &scrq->ind_buf.indir_dma,
3271				   GFP_KERNEL);
3272
3273	if (!scrq->ind_buf.indir_arr)
3274		goto indir_failed;
3275
3276	spin_lock_init(&scrq->lock);
3277
3278	netdev_dbg(adapter->netdev,
3279		   "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3280		   scrq->crq_num, scrq->hw_irq, scrq->irq);
3281
3282	return scrq;
3283
3284indir_failed:
3285	do {
3286		rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3287					adapter->vdev->unit_address,
3288					scrq->crq_num);
3289	} while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
3290reg_failed:
3291	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3292			 DMA_BIDIRECTIONAL);
3293map_failed:
3294	free_pages((unsigned long)scrq->msgs, 2);
3295zero_page_failed:
3296	kfree(scrq);
3297
3298	return NULL;
3299}
3300
3301static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
3302{
3303	int i;
3304
3305	if (adapter->tx_scrq) {
3306		for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
3307			if (!adapter->tx_scrq[i])
3308				continue;
3309
3310			netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3311				   i);
3312			ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
3313			if (adapter->tx_scrq[i]->irq) {
3314				free_irq(adapter->tx_scrq[i]->irq,
3315					 adapter->tx_scrq[i]);
3316				irq_dispose_mapping(adapter->tx_scrq[i]->irq);
3317				adapter->tx_scrq[i]->irq = 0;
3318			}
3319
3320			release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3321					      do_h_free);
3322		}
3323
3324		kfree(adapter->tx_scrq);
3325		adapter->tx_scrq = NULL;
3326		adapter->num_active_tx_scrqs = 0;
3327	}
3328
3329	if (adapter->rx_scrq) {
3330		for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3331			if (!adapter->rx_scrq[i])
3332				continue;
3333
3334			netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3335				   i);
3336			if (adapter->rx_scrq[i]->irq) {
3337				free_irq(adapter->rx_scrq[i]->irq,
3338					 adapter->rx_scrq[i]);
3339				irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3340				adapter->rx_scrq[i]->irq = 0;
3341			}
3342
3343			release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3344					      do_h_free);
3345		}
3346
3347		kfree(adapter->rx_scrq);
3348		adapter->rx_scrq = NULL;
3349		adapter->num_active_rx_scrqs = 0;
3350	}
3351}
3352
3353static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3354			    struct ibmvnic_sub_crq_queue *scrq)
3355{
3356	struct device *dev = &adapter->vdev->dev;
3357	unsigned long rc;
3358
3359	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3360				H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3361	if (rc)
3362		dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3363			scrq->hw_irq, rc);
3364	return rc;
3365}
3366
3367static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3368			   struct ibmvnic_sub_crq_queue *scrq)
3369{
3370	struct device *dev = &adapter->vdev->dev;
3371	unsigned long rc;
3372
3373	if (scrq->hw_irq > 0x100000000ULL) {
3374		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3375		return 1;
3376	}
3377
3378	if (test_bit(0, &adapter->resetting) &&
3379	    adapter->reset_reason == VNIC_RESET_MOBILITY) {
3380		u64 val = (0xff000000) | scrq->hw_irq;
3381
3382		rc = plpar_hcall_norets(H_EOI, val);
3383		/* H_EOI would fail with rc = H_FUNCTION when running
3384		 * in XIVE mode which is expected, but not an error.
3385		 */
3386		if (rc && (rc != H_FUNCTION))
3387			dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3388				val, rc);
3389	}
3390
3391	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3392				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3393	if (rc)
3394		dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3395			scrq->hw_irq, rc);
3396	return rc;
3397}
3398
3399static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3400			       struct ibmvnic_sub_crq_queue *scrq)
3401{
3402	struct device *dev = &adapter->vdev->dev;
3403	struct ibmvnic_tx_pool *tx_pool;
3404	struct ibmvnic_tx_buff *txbuff;
3405	struct netdev_queue *txq;
3406	union sub_crq *next;
3407	int index;
3408	int i;
3409
3410restart_loop:
3411	while (pending_scrq(adapter, scrq)) {
3412		unsigned int pool = scrq->pool_index;
3413		int num_entries = 0;
3414		int total_bytes = 0;
3415		int num_packets = 0;
3416
3417		next = ibmvnic_next_scrq(adapter, scrq);
3418		for (i = 0; i < next->tx_comp.num_comps; i++) {
3419			index = be32_to_cpu(next->tx_comp.correlators[i]);
3420			if (index & IBMVNIC_TSO_POOL_MASK) {
3421				tx_pool = &adapter->tso_pool[pool];
3422				index &= ~IBMVNIC_TSO_POOL_MASK;
3423			} else {
3424				tx_pool = &adapter->tx_pool[pool];
3425			}
3426
3427			txbuff = &tx_pool->tx_buff[index];
3428			num_packets++;
3429			num_entries += txbuff->num_entries;
3430			if (txbuff->skb) {
3431				total_bytes += txbuff->skb->len;
3432				if (next->tx_comp.rcs[i]) {
3433					dev_err(dev, "tx error %x\n",
3434						next->tx_comp.rcs[i]);
3435					dev_kfree_skb_irq(txbuff->skb);
3436				} else {
3437					dev_consume_skb_irq(txbuff->skb);
3438				}
3439				txbuff->skb = NULL;
3440			} else {
3441				netdev_warn(adapter->netdev,
3442					    "TX completion received with NULL socket buffer\n");
3443			}
3444			tx_pool->free_map[tx_pool->producer_index] = index;
3445			tx_pool->producer_index =
3446				(tx_pool->producer_index + 1) %
3447					tx_pool->num_buffers;
3448		}
3449		/* remove tx_comp scrq*/
3450		next->tx_comp.first = 0;
3451
3452		txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3453		netdev_tx_completed_queue(txq, num_packets, total_bytes);
3454
3455		if (atomic_sub_return(num_entries, &scrq->used) <=
3456		    (adapter->req_tx_entries_per_subcrq / 2) &&
3457		    __netif_subqueue_stopped(adapter->netdev,
3458					     scrq->pool_index)) {
3459			netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3460			netdev_dbg(adapter->netdev, "Started queue %d\n",
3461				   scrq->pool_index);
3462		}
3463	}
3464
3465	enable_scrq_irq(adapter, scrq);
3466
3467	if (pending_scrq(adapter, scrq)) {
3468		disable_scrq_irq(adapter, scrq);
3469		goto restart_loop;
3470	}
3471
3472	return 0;
3473}
3474
3475static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3476{
3477	struct ibmvnic_sub_crq_queue *scrq = instance;
3478	struct ibmvnic_adapter *adapter = scrq->adapter;
3479
3480	disable_scrq_irq(adapter, scrq);
3481	ibmvnic_complete_tx(adapter, scrq);
3482
3483	return IRQ_HANDLED;
3484}
3485
3486static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3487{
3488	struct ibmvnic_sub_crq_queue *scrq = instance;
3489	struct ibmvnic_adapter *adapter = scrq->adapter;
3490
3491	/* When booting a kdump kernel we can hit pending interrupts
3492	 * prior to completing driver initialization.
3493	 */
3494	if (unlikely(adapter->state != VNIC_OPEN))
3495		return IRQ_NONE;
3496
3497	adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3498
3499	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3500		disable_scrq_irq(adapter, scrq);
3501		__napi_schedule(&adapter->napi[scrq->scrq_num]);
3502	}
3503
3504	return IRQ_HANDLED;
3505}
3506
3507static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3508{
3509	struct device *dev = &adapter->vdev->dev;
3510	struct ibmvnic_sub_crq_queue *scrq;
3511	int i = 0, j = 0;
3512	int rc = 0;
3513
3514	for (i = 0; i < adapter->req_tx_queues; i++) {
3515		netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3516			   i);
3517		scrq = adapter->tx_scrq[i];
3518		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3519
3520		if (!scrq->irq) {
3521			rc = -EINVAL;
3522			dev_err(dev, "Error mapping irq\n");
3523			goto req_tx_irq_failed;
3524		}
3525
3526		snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3527			 adapter->vdev->unit_address, i);
3528		rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3529				 0, scrq->name, scrq);
3530
3531		if (rc) {
3532			dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3533				scrq->irq, rc);
3534			irq_dispose_mapping(scrq->irq);
3535			goto req_tx_irq_failed;
3536		}
3537	}
3538
3539	for (i = 0; i < adapter->req_rx_queues; i++) {
3540		netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3541			   i);
3542		scrq = adapter->rx_scrq[i];
3543		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3544		if (!scrq->irq) {
3545			rc = -EINVAL;
3546			dev_err(dev, "Error mapping irq\n");
3547			goto req_rx_irq_failed;
3548		}
3549		snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3550			 adapter->vdev->unit_address, i);
3551		rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3552				 0, scrq->name, scrq);
3553		if (rc) {
3554			dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3555				scrq->irq, rc);
3556			irq_dispose_mapping(scrq->irq);
3557			goto req_rx_irq_failed;
3558		}
3559	}
3560	return rc;
3561
3562req_rx_irq_failed:
3563	for (j = 0; j < i; j++) {
3564		free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3565		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3566	}
3567	i = adapter->req_tx_queues;
3568req_tx_irq_failed:
3569	for (j = 0; j < i; j++) {
3570		free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3571		irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3572	}
3573	release_sub_crqs(adapter, 1);
3574	return rc;
3575}
3576
3577static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3578{
3579	struct device *dev = &adapter->vdev->dev;
3580	struct ibmvnic_sub_crq_queue **allqueues;
3581	int registered_queues = 0;
3582	int total_queues;
3583	int more = 0;
3584	int i;
3585
3586	total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3587
3588	allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3589	if (!allqueues)
3590		return -1;
3591
3592	for (i = 0; i < total_queues; i++) {
3593		allqueues[i] = init_sub_crq_queue(adapter);
3594		if (!allqueues[i]) {
3595			dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3596			break;
3597		}
3598		registered_queues++;
3599	}
3600
3601	/* Make sure we were able to register the minimum number of queues */
3602	if (registered_queues <
3603	    adapter->min_tx_queues + adapter->min_rx_queues) {
3604		dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
3605		goto tx_failed;
3606	}
3607
3608	/* Distribute the failed allocated queues*/
3609	for (i = 0; i < total_queues - registered_queues + more ; i++) {
3610		netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3611		switch (i % 3) {
3612		case 0:
3613			if (adapter->req_rx_queues > adapter->min_rx_queues)
3614				adapter->req_rx_queues--;
3615			else
3616				more++;
3617			break;
3618		case 1:
3619			if (adapter->req_tx_queues > adapter->min_tx_queues)
3620				adapter->req_tx_queues--;
3621			else
3622				more++;
3623			break;
3624		}
3625	}
3626
3627	adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3628				   sizeof(*adapter->tx_scrq), GFP_KERNEL);
3629	if (!adapter->tx_scrq)
3630		goto tx_failed;
3631
3632	for (i = 0; i < adapter->req_tx_queues; i++) {
3633		adapter->tx_scrq[i] = allqueues[i];
3634		adapter->tx_scrq[i]->pool_index = i;
3635		adapter->num_active_tx_scrqs++;
3636	}
3637
3638	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3639				   sizeof(*adapter->rx_scrq), GFP_KERNEL);
3640	if (!adapter->rx_scrq)
3641		goto rx_failed;
3642
3643	for (i = 0; i < adapter->req_rx_queues; i++) {
3644		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3645		adapter->rx_scrq[i]->scrq_num = i;
3646		adapter->num_active_rx_scrqs++;
3647	}
3648
3649	kfree(allqueues);
3650	return 0;
3651
3652rx_failed:
3653	kfree(adapter->tx_scrq);
3654	adapter->tx_scrq = NULL;
3655tx_failed:
3656	for (i = 0; i < registered_queues; i++)
3657		release_sub_crq_queue(adapter, allqueues[i], 1);
3658	kfree(allqueues);
3659	return -1;
3660}
3661
3662static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
3663{
3664	struct device *dev = &adapter->vdev->dev;
3665	union ibmvnic_crq crq;
3666	int max_entries;
3667
3668	if (!retry) {
3669		/* Sub-CRQ entries are 32 byte long */
3670		int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3671
3672		if (adapter->min_tx_entries_per_subcrq > entries_page ||
3673		    adapter->min_rx_add_entries_per_subcrq > entries_page) {
3674			dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3675			return;
3676		}
3677
3678		if (adapter->desired.mtu)
3679			adapter->req_mtu = adapter->desired.mtu;
3680		else
3681			adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3682
3683		if (!adapter->desired.tx_entries)
3684			adapter->desired.tx_entries =
3685					adapter->max_tx_entries_per_subcrq;
3686		if (!adapter->desired.rx_entries)
3687			adapter->desired.rx_entries =
3688					adapter->max_rx_add_entries_per_subcrq;
3689
3690		max_entries = IBMVNIC_MAX_LTB_SIZE /
3691			      (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3692
3693		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3694			adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3695			adapter->desired.tx_entries = max_entries;
3696		}
3697
3698		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3699			adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3700			adapter->desired.rx_entries = max_entries;
3701		}
3702
3703		if (adapter->desired.tx_entries)
3704			adapter->req_tx_entries_per_subcrq =
3705					adapter->desired.tx_entries;
3706		else
3707			adapter->req_tx_entries_per_subcrq =
3708					adapter->max_tx_entries_per_subcrq;
3709
3710		if (adapter->desired.rx_entries)
3711			adapter->req_rx_add_entries_per_subcrq =
3712					adapter->desired.rx_entries;
3713		else
3714			adapter->req_rx_add_entries_per_subcrq =
3715					adapter->max_rx_add_entries_per_subcrq;
3716
3717		if (adapter->desired.tx_queues)
3718			adapter->req_tx_queues =
3719					adapter->desired.tx_queues;
3720		else
3721			adapter->req_tx_queues =
3722					adapter->opt_tx_comp_sub_queues;
3723
3724		if (adapter->desired.rx_queues)
3725			adapter->req_rx_queues =
3726					adapter->desired.rx_queues;
3727		else
3728			adapter->req_rx_queues =
3729					adapter->opt_rx_comp_queues;
3730
3731		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3732	}
3733
3734	memset(&crq, 0, sizeof(crq));
3735	crq.request_capability.first = IBMVNIC_CRQ_CMD;
3736	crq.request_capability.cmd = REQUEST_CAPABILITY;
3737
3738	crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3739	crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3740	atomic_inc(&adapter->running_cap_crqs);
3741	ibmvnic_send_crq(adapter, &crq);
3742
3743	crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3744	crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3745	atomic_inc(&adapter->running_cap_crqs);
3746	ibmvnic_send_crq(adapter, &crq);
3747
3748	crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3749	crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3750	atomic_inc(&adapter->running_cap_crqs);
3751	ibmvnic_send_crq(adapter, &crq);
3752
3753	crq.request_capability.capability =
3754	    cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3755	crq.request_capability.number =
3756	    cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3757	atomic_inc(&adapter->running_cap_crqs);
3758	ibmvnic_send_crq(adapter, &crq);
3759
3760	crq.request_capability.capability =
3761	    cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3762	crq.request_capability.number =
3763	    cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3764	atomic_inc(&adapter->running_cap_crqs);
3765	ibmvnic_send_crq(adapter, &crq);
3766
3767	crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3768	crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3769	atomic_inc(&adapter->running_cap_crqs);
3770	ibmvnic_send_crq(adapter, &crq);
3771
3772	if (adapter->netdev->flags & IFF_PROMISC) {
3773		if (adapter->promisc_supported) {
3774			crq.request_capability.capability =
3775			    cpu_to_be16(PROMISC_REQUESTED);
3776			crq.request_capability.number = cpu_to_be64(1);
3777			atomic_inc(&adapter->running_cap_crqs);
3778			ibmvnic_send_crq(adapter, &crq);
3779		}
3780	} else {
3781		crq.request_capability.capability =
3782		    cpu_to_be16(PROMISC_REQUESTED);
3783		crq.request_capability.number = cpu_to_be64(0);
3784		atomic_inc(&adapter->running_cap_crqs);
3785		ibmvnic_send_crq(adapter, &crq);
3786	}
3787}
3788
3789static int pending_scrq(struct ibmvnic_adapter *adapter,
3790			struct ibmvnic_sub_crq_queue *scrq)
3791{
3792	union sub_crq *entry = &scrq->msgs[scrq->cur];
3793	int rc;
3794
3795	rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
3796
3797	/* Ensure that the SCRQ valid flag is loaded prior to loading the
3798	 * contents of the SCRQ descriptor
3799	 */
3800	dma_rmb();
3801
3802	return rc;
3803}
3804
3805static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3806					struct ibmvnic_sub_crq_queue *scrq)
3807{
3808	union sub_crq *entry;
3809	unsigned long flags;
3810
3811	spin_lock_irqsave(&scrq->lock, flags);
3812	entry = &scrq->msgs[scrq->cur];
3813	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3814		if (++scrq->cur == scrq->size)
3815			scrq->cur = 0;
3816	} else {
3817		entry = NULL;
3818	}
3819	spin_unlock_irqrestore(&scrq->lock, flags);
3820
3821	/* Ensure that the SCRQ valid flag is loaded prior to loading the
3822	 * contents of the SCRQ descriptor
3823	 */
3824	dma_rmb();
3825
3826	return entry;
3827}
3828
3829static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3830{
3831	struct ibmvnic_crq_queue *queue = &adapter->crq;
3832	union ibmvnic_crq *crq;
3833
3834	crq = &queue->msgs[queue->cur];
3835	if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3836		if (++queue->cur == queue->size)
3837			queue->cur = 0;
3838	} else {
3839		crq = NULL;
3840	}
3841
3842	return crq;
3843}
3844
3845static void print_subcrq_error(struct device *dev, int rc, const char *func)
3846{
3847	switch (rc) {
3848	case H_PARAMETER:
3849		dev_warn_ratelimited(dev,
3850				     "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3851				     func, rc);
3852		break;
3853	case H_CLOSED:
3854		dev_warn_ratelimited(dev,
3855				     "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3856				     func, rc);
3857		break;
3858	default:
3859		dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3860		break;
3861	}
3862}
3863
3864static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3865				u64 remote_handle, u64 ioba, u64 num_entries)
3866{
3867	unsigned int ua = adapter->vdev->unit_address;
3868	struct device *dev = &adapter->vdev->dev;
3869	int rc;
3870
3871	/* Make sure the hypervisor sees the complete request */
3872	dma_wmb();
3873	rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3874				cpu_to_be64(remote_handle),
3875				ioba, num_entries);
3876
3877	if (rc)
3878		print_subcrq_error(dev, rc, __func__);
3879
3880	return rc;
3881}
3882
3883static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3884			    union ibmvnic_crq *crq)
3885{
3886	unsigned int ua = adapter->vdev->unit_address;
3887	struct device *dev = &adapter->vdev->dev;
3888	u64 *u64_crq = (u64 *)crq;
3889	int rc;
3890
3891	netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3892		   (unsigned long)cpu_to_be64(u64_crq[0]),
3893		   (unsigned long)cpu_to_be64(u64_crq[1]));
3894
3895	if (!adapter->crq.active &&
3896	    crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3897		dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3898		return -EINVAL;
3899	}
3900
3901	/* Make sure the hypervisor sees the complete request */
3902	dma_wmb();
3903
3904	rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3905				cpu_to_be64(u64_crq[0]),
3906				cpu_to_be64(u64_crq[1]));
3907
3908	if (rc) {
3909		if (rc == H_CLOSED) {
3910			dev_warn(dev, "CRQ Queue closed\n");
3911			/* do not reset, report the fail, wait for passive init from server */
3912		}
3913
3914		dev_warn(dev, "Send error (rc=%d)\n", rc);
3915	}
3916
3917	return rc;
3918}
3919
3920static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3921{
3922	struct device *dev = &adapter->vdev->dev;
3923	union ibmvnic_crq crq;
3924	int retries = 100;
3925	int rc;
3926
3927	memset(&crq, 0, sizeof(crq));
3928	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3929	crq.generic.cmd = IBMVNIC_CRQ_INIT;
3930	netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3931
3932	do {
3933		rc = ibmvnic_send_crq(adapter, &crq);
3934		if (rc != H_CLOSED)
3935			break;
3936		retries--;
3937		msleep(50);
3938
3939	} while (retries > 0);
3940
3941	if (rc) {
3942		dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3943		return rc;
3944	}
3945
3946	return 0;
3947}
3948
3949struct vnic_login_client_data {
3950	u8	type;
3951	__be16	len;
3952	char	name[];
3953} __packed;
3954
3955static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3956{
3957	int len;
3958
3959	/* Calculate the amount of buffer space needed for the
3960	 * vnic client data in the login buffer. There are four entries,
3961	 * OS name, LPAR name, device name, and a null last entry.
3962	 */
3963	len = 4 * sizeof(struct vnic_login_client_data);
3964	len += 6; /* "Linux" plus NULL */
3965	len += strlen(utsname()->nodename) + 1;
3966	len += strlen(adapter->netdev->name) + 1;
3967
3968	return len;
3969}
3970
3971static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3972				 struct vnic_login_client_data *vlcd)
3973{
3974	const char *os_name = "Linux";
3975	int len;
3976
3977	/* Type 1 - LPAR OS */
3978	vlcd->type = 1;
3979	len = strlen(os_name) + 1;
3980	vlcd->len = cpu_to_be16(len);
3981	strscpy(vlcd->name, os_name, len);
3982	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3983
3984	/* Type 2 - LPAR name */
3985	vlcd->type = 2;
3986	len = strlen(utsname()->nodename) + 1;
3987	vlcd->len = cpu_to_be16(len);
3988	strscpy(vlcd->name, utsname()->nodename, len);
3989	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3990
3991	/* Type 3 - device name */
3992	vlcd->type = 3;
3993	len = strlen(adapter->netdev->name) + 1;
3994	vlcd->len = cpu_to_be16(len);
3995	strscpy(vlcd->name, adapter->netdev->name, len);
3996}
3997
3998static int send_login(struct ibmvnic_adapter *adapter)
3999{
4000	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4001	struct ibmvnic_login_buffer *login_buffer;
4002	struct device *dev = &adapter->vdev->dev;
4003	struct vnic_login_client_data *vlcd;
4004	dma_addr_t rsp_buffer_token;
4005	dma_addr_t buffer_token;
4006	size_t rsp_buffer_size;
4007	union ibmvnic_crq crq;
4008	int client_data_len;
4009	size_t buffer_size;
4010	__be64 *tx_list_p;
4011	__be64 *rx_list_p;
4012	int rc;
4013	int i;
4014
4015	if (!adapter->tx_scrq || !adapter->rx_scrq) {
4016		netdev_err(adapter->netdev,
4017			   "RX or TX queues are not allocated, device login failed\n");
4018		return -1;
4019	}
4020
4021	release_login_buffer(adapter);
4022	release_login_rsp_buffer(adapter);
4023
4024	client_data_len = vnic_client_data_len(adapter);
4025
4026	buffer_size =
4027	    sizeof(struct ibmvnic_login_buffer) +
4028	    sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4029	    client_data_len;
4030
4031	login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
4032	if (!login_buffer)
4033		goto buf_alloc_failed;
4034
4035	buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4036				      DMA_TO_DEVICE);
4037	if (dma_mapping_error(dev, buffer_token)) {
4038		dev_err(dev, "Couldn't map login buffer\n");
4039		goto buf_map_failed;
4040	}
4041
4042	rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4043			  sizeof(u64) * adapter->req_tx_queues +
4044			  sizeof(u64) * adapter->req_rx_queues +
4045			  sizeof(u64) * adapter->req_rx_queues +
4046			  sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
4047
4048	login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4049	if (!login_rsp_buffer)
4050		goto buf_rsp_alloc_failed;
4051
4052	rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4053					  rsp_buffer_size, DMA_FROM_DEVICE);
4054	if (dma_mapping_error(dev, rsp_buffer_token)) {
4055		dev_err(dev, "Couldn't map login rsp buffer\n");
4056		goto buf_rsp_map_failed;
4057	}
4058
4059	adapter->login_buf = login_buffer;
4060	adapter->login_buf_token = buffer_token;
4061	adapter->login_buf_sz = buffer_size;
4062	adapter->login_rsp_buf = login_rsp_buffer;
4063	adapter->login_rsp_buf_token = rsp_buffer_token;
4064	adapter->login_rsp_buf_sz = rsp_buffer_size;
4065
4066	login_buffer->len = cpu_to_be32(buffer_size);
4067	login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4068	login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4069	login_buffer->off_txcomp_subcrqs =
4070	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4071	login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4072	login_buffer->off_rxcomp_subcrqs =
4073	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4074			sizeof(u64) * adapter->req_tx_queues);
4075	login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4076	login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4077
4078	tx_list_p = (__be64 *)((char *)login_buffer +
4079				      sizeof(struct ibmvnic_login_buffer));
4080	rx_list_p = (__be64 *)((char *)login_buffer +
4081				      sizeof(struct ibmvnic_login_buffer) +
4082				      sizeof(u64) * adapter->req_tx_queues);
4083
4084	for (i = 0; i < adapter->req_tx_queues; i++) {
4085		if (adapter->tx_scrq[i]) {
4086			tx_list_p[i] =
4087				cpu_to_be64(adapter->tx_scrq[i]->crq_num);
4088		}
4089	}
4090
4091	for (i = 0; i < adapter->req_rx_queues; i++) {
4092		if (adapter->rx_scrq[i]) {
4093			rx_list_p[i] =
4094				cpu_to_be64(adapter->rx_scrq[i]->crq_num);
4095		}
4096	}
4097
4098	/* Insert vNIC login client data */
4099	vlcd = (struct vnic_login_client_data *)
4100		((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4101	login_buffer->client_data_offset =
4102			cpu_to_be32((char *)vlcd - (char *)login_buffer);
4103	login_buffer->client_data_len = cpu_to_be32(client_data_len);
4104
4105	vnic_add_client_data(adapter, vlcd);
4106
4107	netdev_dbg(adapter->netdev, "Login Buffer:\n");
4108	for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4109		netdev_dbg(adapter->netdev, "%016lx\n",
4110			   ((unsigned long *)(adapter->login_buf))[i]);
4111	}
4112
4113	memset(&crq, 0, sizeof(crq));
4114	crq.login.first = IBMVNIC_CRQ_CMD;
4115	crq.login.cmd = LOGIN;
4116	crq.login.ioba = cpu_to_be32(buffer_token);
4117	crq.login.len = cpu_to_be32(buffer_size);
4118
4119	adapter->login_pending = true;
4120	rc = ibmvnic_send_crq(adapter, &crq);
4121	if (rc) {
4122		adapter->login_pending = false;
4123		netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4124		goto buf_rsp_map_failed;
4125	}
4126
4127	return 0;
4128
4129buf_rsp_map_failed:
4130	kfree(login_rsp_buffer);
4131	adapter->login_rsp_buf = NULL;
4132buf_rsp_alloc_failed:
4133	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4134buf_map_failed:
4135	kfree(login_buffer);
4136	adapter->login_buf = NULL;
4137buf_alloc_failed:
4138	return -1;
4139}
4140
4141static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4142			    u32 len, u8 map_id)
4143{
4144	union ibmvnic_crq crq;
4145
4146	memset(&crq, 0, sizeof(crq));
4147	crq.request_map.first = IBMVNIC_CRQ_CMD;
4148	crq.request_map.cmd = REQUEST_MAP;
4149	crq.request_map.map_id = map_id;
4150	crq.request_map.ioba = cpu_to_be32(addr);
4151	crq.request_map.len = cpu_to_be32(len);
4152	return ibmvnic_send_crq(adapter, &crq);
4153}
4154
4155static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
4156{
4157	union ibmvnic_crq crq;
4158
4159	memset(&crq, 0, sizeof(crq));
4160	crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4161	crq.request_unmap.cmd = REQUEST_UNMAP;
4162	crq.request_unmap.map_id = map_id;
4163	return ibmvnic_send_crq(adapter, &crq);
4164}
4165
4166static void send_query_map(struct ibmvnic_adapter *adapter)
4167{
4168	union ibmvnic_crq crq;
4169
4170	memset(&crq, 0, sizeof(crq));
4171	crq.query_map.first = IBMVNIC_CRQ_CMD;
4172	crq.query_map.cmd = QUERY_MAP;
4173	ibmvnic_send_crq(adapter, &crq);
4174}
4175
4176/* Send a series of CRQs requesting various capabilities of the VNIC server */
4177static void send_query_cap(struct ibmvnic_adapter *adapter)
4178{
4179	union ibmvnic_crq crq;
4180
4181	atomic_set(&adapter->running_cap_crqs, 0);
4182	memset(&crq, 0, sizeof(crq));
4183	crq.query_capability.first = IBMVNIC_CRQ_CMD;
4184	crq.query_capability.cmd = QUERY_CAPABILITY;
4185
4186	crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
4187	atomic_inc(&adapter->running_cap_crqs);
4188	ibmvnic_send_crq(adapter, &crq);
4189
4190	crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
4191	atomic_inc(&adapter->running_cap_crqs);
4192	ibmvnic_send_crq(adapter, &crq);
4193
4194	crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
4195	atomic_inc(&adapter->running_cap_crqs);
4196	ibmvnic_send_crq(adapter, &crq);
4197
4198	crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
4199	atomic_inc(&adapter->running_cap_crqs);
4200	ibmvnic_send_crq(adapter, &crq);
4201
4202	crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
4203	atomic_inc(&adapter->running_cap_crqs);
4204	ibmvnic_send_crq(adapter, &crq);
4205
4206	crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
4207	atomic_inc(&adapter->running_cap_crqs);
4208	ibmvnic_send_crq(adapter, &crq);
4209
4210	crq.query_capability.capability =
4211	    cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
4212	atomic_inc(&adapter->running_cap_crqs);
4213	ibmvnic_send_crq(adapter, &crq);
4214
4215	crq.query_capability.capability =
4216	    cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
4217	atomic_inc(&adapter->running_cap_crqs);
4218	ibmvnic_send_crq(adapter, &crq);
4219
4220	crq.query_capability.capability =
4221	    cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
4222	atomic_inc(&adapter->running_cap_crqs);
4223	ibmvnic_send_crq(adapter, &crq);
4224
4225	crq.query_capability.capability =
4226	    cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
4227	atomic_inc(&adapter->running_cap_crqs);
4228	ibmvnic_send_crq(adapter, &crq);
4229
4230	crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
4231	atomic_inc(&adapter->running_cap_crqs);
4232	ibmvnic_send_crq(adapter, &crq);
4233
4234	crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
4235	atomic_inc(&adapter->running_cap_crqs);
4236	ibmvnic_send_crq(adapter, &crq);
4237
4238	crq.query_capability.capability = cpu_to_be16(MIN_MTU);
4239	atomic_inc(&adapter->running_cap_crqs);
4240	ibmvnic_send_crq(adapter, &crq);
4241
4242	crq.query_capability.capability = cpu_to_be16(MAX_MTU);
4243	atomic_inc(&adapter->running_cap_crqs);
4244	ibmvnic_send_crq(adapter, &crq);
4245
4246	crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
4247	atomic_inc(&adapter->running_cap_crqs);
4248	ibmvnic_send_crq(adapter, &crq);
4249
4250	crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
4251	atomic_inc(&adapter->running_cap_crqs);
4252	ibmvnic_send_crq(adapter, &crq);
4253
4254	crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4255	atomic_inc(&adapter->running_cap_crqs);
4256	ibmvnic_send_crq(adapter, &crq);
4257
4258	crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
4259	atomic_inc(&adapter->running_cap_crqs);
4260	ibmvnic_send_crq(adapter, &crq);
4261
4262	crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
4263	atomic_inc(&adapter->running_cap_crqs);
4264	ibmvnic_send_crq(adapter, &crq);
4265
4266	crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
4267	atomic_inc(&adapter->running_cap_crqs);
4268	ibmvnic_send_crq(adapter, &crq);
4269
4270	crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
4271	atomic_inc(&adapter->running_cap_crqs);
4272	ibmvnic_send_crq(adapter, &crq);
4273
4274	crq.query_capability.capability =
4275			cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
4276	atomic_inc(&adapter->running_cap_crqs);
4277	ibmvnic_send_crq(adapter, &crq);
4278
4279	crq.query_capability.capability =
4280			cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
4281	atomic_inc(&adapter->running_cap_crqs);
4282	ibmvnic_send_crq(adapter, &crq);
4283
4284	crq.query_capability.capability =
4285			cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
4286	atomic_inc(&adapter->running_cap_crqs);
4287	ibmvnic_send_crq(adapter, &crq);
4288
4289	crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
4290	atomic_inc(&adapter->running_cap_crqs);
4291	ibmvnic_send_crq(adapter, &crq);
4292}
4293
4294static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4295{
4296	int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4297	struct device *dev = &adapter->vdev->dev;
4298	union ibmvnic_crq crq;
4299
4300	adapter->ip_offload_tok =
4301		dma_map_single(dev,
4302			       &adapter->ip_offload_buf,
4303			       buf_sz,
4304			       DMA_FROM_DEVICE);
4305
4306	if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4307		if (!firmware_has_feature(FW_FEATURE_CMO))
4308			dev_err(dev, "Couldn't map offload buffer\n");
4309		return;
4310	}
4311
4312	memset(&crq, 0, sizeof(crq));
4313	crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4314	crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4315	crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4316	crq.query_ip_offload.ioba =
4317	    cpu_to_be32(adapter->ip_offload_tok);
4318
4319	ibmvnic_send_crq(adapter, &crq);
4320}
4321
4322static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4323{
4324	struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4325	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4326	struct device *dev = &adapter->vdev->dev;
4327	netdev_features_t old_hw_features = 0;
4328	union ibmvnic_crq crq;
4329
4330	adapter->ip_offload_ctrl_tok =
4331		dma_map_single(dev,
4332			       ctrl_buf,
4333			       sizeof(adapter->ip_offload_ctrl),
4334			       DMA_TO_DEVICE);
4335
4336	if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4337		dev_err(dev, "Couldn't map ip offload control buffer\n");
4338		return;
4339	}
4340
4341	ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4342	ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4343	ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4344	ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4345	ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4346	ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4347	ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4348	ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4349	ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4350	ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4351
4352	/* large_rx disabled for now, additional features needed */
4353	ctrl_buf->large_rx_ipv4 = 0;
4354	ctrl_buf->large_rx_ipv6 = 0;
4355
4356	if (adapter->state != VNIC_PROBING) {
4357		old_hw_features = adapter->netdev->hw_features;
4358		adapter->netdev->hw_features = 0;
4359	}
4360
4361	adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4362
4363	if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4364		adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4365
4366	if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4367		adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4368
4369	if ((adapter->netdev->features &
4370	    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4371		adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4372
4373	if (buf->large_tx_ipv4)
4374		adapter->netdev->hw_features |= NETIF_F_TSO;
4375	if (buf->large_tx_ipv6)
4376		adapter->netdev->hw_features |= NETIF_F_TSO6;
4377
4378	if (adapter->state == VNIC_PROBING) {
4379		adapter->netdev->features |= adapter->netdev->hw_features;
4380	} else if (old_hw_features != adapter->netdev->hw_features) {
4381		netdev_features_t tmp = 0;
4382
4383		/* disable features no longer supported */
4384		adapter->netdev->features &= adapter->netdev->hw_features;
4385		/* turn on features now supported if previously enabled */
4386		tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4387			adapter->netdev->hw_features;
4388		adapter->netdev->features |=
4389				tmp & adapter->netdev->wanted_features;
4390	}
4391
4392	memset(&crq, 0, sizeof(crq));
4393	crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4394	crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4395	crq.control_ip_offload.len =
4396	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4397	crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4398	ibmvnic_send_crq(adapter, &crq);
4399}
4400
4401static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4402				struct ibmvnic_adapter *adapter)
4403{
4404	struct device *dev = &adapter->vdev->dev;
4405
4406	if (crq->get_vpd_size_rsp.rc.code) {
4407		dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4408			crq->get_vpd_size_rsp.rc.code);
4409		complete(&adapter->fw_done);
4410		return;
4411	}
4412
4413	adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4414	complete(&adapter->fw_done);
4415}
4416
4417static void handle_vpd_rsp(union ibmvnic_crq *crq,
4418			   struct ibmvnic_adapter *adapter)
4419{
4420	struct device *dev = &adapter->vdev->dev;
4421	unsigned char *substr = NULL;
4422	u8 fw_level_len = 0;
4423
4424	memset(adapter->fw_version, 0, 32);
4425
4426	dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4427			 DMA_FROM_DEVICE);
4428
4429	if (crq->get_vpd_rsp.rc.code) {
4430		dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4431			crq->get_vpd_rsp.rc.code);
4432		goto complete;
4433	}
4434
4435	/* get the position of the firmware version info
4436	 * located after the ASCII 'RM' substring in the buffer
4437	 */
4438	substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4439	if (!substr) {
4440		dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4441		goto complete;
4442	}
4443
4444	/* get length of firmware level ASCII substring */
4445	if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4446		fw_level_len = *(substr + 2);
4447	} else {
4448		dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4449		goto complete;
4450	}
4451
4452	/* copy firmware version string from vpd into adapter */
4453	if ((substr + 3 + fw_level_len) <
4454	    (adapter->vpd->buff + adapter->vpd->len)) {
4455		strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4456	} else {
4457		dev_info(dev, "FW substr extrapolated VPD buff\n");
4458	}
4459
4460complete:
4461	if (adapter->fw_version[0] == '\0')
4462		strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
4463	complete(&adapter->fw_done);
4464}
4465
4466static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4467{
4468	struct device *dev = &adapter->vdev->dev;
4469	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4470	int i;
4471
4472	dma_unmap_single(dev, adapter->ip_offload_tok,
4473			 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4474
4475	netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4476	for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4477		netdev_dbg(adapter->netdev, "%016lx\n",
4478			   ((unsigned long *)(buf))[i]);
4479
4480	netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4481	netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4482	netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4483		   buf->tcp_ipv4_chksum);
4484	netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4485		   buf->tcp_ipv6_chksum);
4486	netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4487		   buf->udp_ipv4_chksum);
4488	netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4489		   buf->udp_ipv6_chksum);
4490	netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4491		   buf->large_tx_ipv4);
4492	netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4493		   buf->large_tx_ipv6);
4494	netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4495		   buf->large_rx_ipv4);
4496	netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4497		   buf->large_rx_ipv6);
4498	netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4499		   buf->max_ipv4_header_size);
4500	netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4501		   buf->max_ipv6_header_size);
4502	netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4503		   buf->max_tcp_header_size);
4504	netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4505		   buf->max_udp_header_size);
4506	netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4507		   buf->max_large_tx_size);
4508	netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4509		   buf->max_large_rx_size);
4510	netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4511		   buf->ipv6_extension_header);
4512	netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4513		   buf->tcp_pseudosum_req);
4514	netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4515		   buf->num_ipv6_ext_headers);
4516	netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4517		   buf->off_ipv6_ext_headers);
4518
4519	send_control_ip_offload(adapter);
4520}
4521
4522static const char *ibmvnic_fw_err_cause(u16 cause)
4523{
4524	switch (cause) {
4525	case ADAPTER_PROBLEM:
4526		return "adapter problem";
4527	case BUS_PROBLEM:
4528		return "bus problem";
4529	case FW_PROBLEM:
4530		return "firmware problem";
4531	case DD_PROBLEM:
4532		return "device driver problem";
4533	case EEH_RECOVERY:
4534		return "EEH recovery";
4535	case FW_UPDATED:
4536		return "firmware updated";
4537	case LOW_MEMORY:
4538		return "low Memory";
4539	default:
4540		return "unknown";
4541	}
4542}
4543
4544static void handle_error_indication(union ibmvnic_crq *crq,
4545				    struct ibmvnic_adapter *adapter)
4546{
4547	struct device *dev = &adapter->vdev->dev;
4548	u16 cause;
4549
4550	cause = be16_to_cpu(crq->error_indication.error_cause);
4551
4552	dev_warn_ratelimited(dev,
4553			     "Firmware reports %serror, cause: %s. Starting recovery...\n",
4554			     crq->error_indication.flags
4555				& IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4556			     ibmvnic_fw_err_cause(cause));
4557
4558	if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4559		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4560	else
4561		ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4562}
4563
4564static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4565				 struct ibmvnic_adapter *adapter)
4566{
4567	struct net_device *netdev = adapter->netdev;
4568	struct device *dev = &adapter->vdev->dev;
4569	long rc;
4570
4571	rc = crq->change_mac_addr_rsp.rc.code;
4572	if (rc) {
4573		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4574		goto out;
4575	}
4576	/* crq->change_mac_addr.mac_addr is the requested one
4577	 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4578	 */
4579	ether_addr_copy(netdev->dev_addr,
4580			&crq->change_mac_addr_rsp.mac_addr[0]);
4581	ether_addr_copy(adapter->mac_addr,
4582			&crq->change_mac_addr_rsp.mac_addr[0]);
4583out:
4584	complete(&adapter->fw_done);
4585	return rc;
4586}
4587
4588static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4589				   struct ibmvnic_adapter *adapter)
4590{
4591	struct device *dev = &adapter->vdev->dev;
4592	u64 *req_value;
4593	char *name;
4594
4595	atomic_dec(&adapter->running_cap_crqs);
4596	switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4597	case REQ_TX_QUEUES:
4598		req_value = &adapter->req_tx_queues;
4599		name = "tx";
4600		break;
4601	case REQ_RX_QUEUES:
4602		req_value = &adapter->req_rx_queues;
4603		name = "rx";
4604		break;
4605	case REQ_RX_ADD_QUEUES:
4606		req_value = &adapter->req_rx_add_queues;
4607		name = "rx_add";
4608		break;
4609	case REQ_TX_ENTRIES_PER_SUBCRQ:
4610		req_value = &adapter->req_tx_entries_per_subcrq;
4611		name = "tx_entries_per_subcrq";
4612		break;
4613	case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4614		req_value = &adapter->req_rx_add_entries_per_subcrq;
4615		name = "rx_add_entries_per_subcrq";
4616		break;
4617	case REQ_MTU:
4618		req_value = &adapter->req_mtu;
4619		name = "mtu";
4620		break;
4621	case PROMISC_REQUESTED:
4622		req_value = &adapter->promisc;
4623		name = "promisc";
4624		break;
4625	default:
4626		dev_err(dev, "Got invalid cap request rsp %d\n",
4627			crq->request_capability.capability);
4628		return;
4629	}
4630
4631	switch (crq->request_capability_rsp.rc.code) {
4632	case SUCCESS:
4633		break;
4634	case PARTIALSUCCESS:
4635		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4636			 *req_value,
4637			 (long)be64_to_cpu(crq->request_capability_rsp.number),
4638			 name);
4639
4640		if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4641		    REQ_MTU) {
4642			pr_err("mtu of %llu is not supported. Reverting.\n",
4643			       *req_value);
4644			*req_value = adapter->fallback.mtu;
4645		} else {
4646			*req_value =
4647				be64_to_cpu(crq->request_capability_rsp.number);
4648		}
4649
4650		send_request_cap(adapter, 1);
4651		return;
4652	default:
4653		dev_err(dev, "Error %d in request cap rsp\n",
4654			crq->request_capability_rsp.rc.code);
4655		return;
4656	}
4657
4658	/* Done receiving requested capabilities, query IP offload support */
4659	if (atomic_read(&adapter->running_cap_crqs) == 0) {
4660		adapter->wait_capability = false;
4661		send_query_ip_offload(adapter);
4662	}
4663}
4664
4665static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4666			    struct ibmvnic_adapter *adapter)
4667{
4668	struct device *dev = &adapter->vdev->dev;
4669	struct net_device *netdev = adapter->netdev;
4670	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4671	struct ibmvnic_login_buffer *login = adapter->login_buf;
4672	u64 *tx_handle_array;
4673	u64 *rx_handle_array;
4674	int num_tx_pools;
4675	int num_rx_pools;
4676	u64 *size_array;
4677	int i;
4678
4679	/* CHECK: Test/set of login_pending does not need to be atomic
4680	 * because only ibmvnic_tasklet tests/clears this.
4681	 */
4682	if (!adapter->login_pending) {
4683		netdev_warn(netdev, "Ignoring unexpected login response\n");
4684		return 0;
4685	}
4686	adapter->login_pending = false;
4687
4688	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4689			 DMA_TO_DEVICE);
4690	dma_unmap_single(dev, adapter->login_rsp_buf_token,
4691			 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4692
4693	/* If the number of queues requested can't be allocated by the
4694	 * server, the login response will return with code 1. We will need
4695	 * to resend the login buffer with fewer queues requested.
4696	 */
4697	if (login_rsp_crq->generic.rc.code) {
4698		adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4699		complete(&adapter->init_done);
4700		return 0;
4701	}
4702
4703	if (adapter->failover_pending) {
4704		adapter->init_done_rc = -EAGAIN;
4705		netdev_dbg(netdev, "Failover pending, ignoring login response\n");
4706		complete(&adapter->init_done);
4707		/* login response buffer will be released on reset */
4708		return 0;
4709	}
4710
4711	netdev->mtu = adapter->req_mtu - ETH_HLEN;
4712
4713	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4714	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4715		netdev_dbg(adapter->netdev, "%016lx\n",
4716			   ((unsigned long *)(adapter->login_rsp_buf))[i]);
4717	}
4718
4719	/* Sanity checks */
4720	if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4721	    (be32_to_cpu(login->num_rxcomp_subcrqs) *
4722	     adapter->req_rx_add_queues !=
4723	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4724		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4725		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4726		return -EIO;
4727	}
4728	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4729		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4730	/* variable buffer sizes are not supported, so just read the
4731	 * first entry.
4732	 */
4733	adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
4734
4735	num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4736	num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4737
4738	tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4739				  be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4740	rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4741				  be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4742
4743	for (i = 0; i < num_tx_pools; i++)
4744		adapter->tx_scrq[i]->handle = tx_handle_array[i];
4745
4746	for (i = 0; i < num_rx_pools; i++)
4747		adapter->rx_scrq[i]->handle = rx_handle_array[i];
4748
4749	adapter->num_active_tx_scrqs = num_tx_pools;
4750	adapter->num_active_rx_scrqs = num_rx_pools;
4751	release_login_rsp_buffer(adapter);
4752	release_login_buffer(adapter);
4753	complete(&adapter->init_done);
4754
4755	return 0;
4756}
4757
4758static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4759				     struct ibmvnic_adapter *adapter)
4760{
4761	struct device *dev = &adapter->vdev->dev;
4762	long rc;
4763
4764	rc = crq->request_unmap_rsp.rc.code;
4765	if (rc)
4766		dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4767}
4768
4769static void handle_query_map_rsp(union ibmvnic_crq *crq,
4770				 struct ibmvnic_adapter *adapter)
4771{
4772	struct net_device *netdev = adapter->netdev;
4773	struct device *dev = &adapter->vdev->dev;
4774	long rc;
4775
4776	rc = crq->query_map_rsp.rc.code;
4777	if (rc) {
4778		dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4779		return;
4780	}
4781	netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4782		   crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4783		   crq->query_map_rsp.free_pages);
4784}
4785
4786static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4787				 struct ibmvnic_adapter *adapter)
4788{
4789	struct net_device *netdev = adapter->netdev;
4790	struct device *dev = &adapter->vdev->dev;
4791	long rc;
4792
4793	atomic_dec(&adapter->running_cap_crqs);
4794	netdev_dbg(netdev, "Outstanding queries: %d\n",
4795		   atomic_read(&adapter->running_cap_crqs));
4796	rc = crq->query_capability.rc.code;
4797	if (rc) {
4798		dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4799		goto out;
4800	}
4801
4802	switch (be16_to_cpu(crq->query_capability.capability)) {
4803	case MIN_TX_QUEUES:
4804		adapter->min_tx_queues =
4805		    be64_to_cpu(crq->query_capability.number);
4806		netdev_dbg(netdev, "min_tx_queues = %lld\n",
4807			   adapter->min_tx_queues);
4808		break;
4809	case MIN_RX_QUEUES:
4810		adapter->min_rx_queues =
4811		    be64_to_cpu(crq->query_capability.number);
4812		netdev_dbg(netdev, "min_rx_queues = %lld\n",
4813			   adapter->min_rx_queues);
4814		break;
4815	case MIN_RX_ADD_QUEUES:
4816		adapter->min_rx_add_queues =
4817		    be64_to_cpu(crq->query_capability.number);
4818		netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4819			   adapter->min_rx_add_queues);
4820		break;
4821	case MAX_TX_QUEUES:
4822		adapter->max_tx_queues =
4823		    be64_to_cpu(crq->query_capability.number);
4824		netdev_dbg(netdev, "max_tx_queues = %lld\n",
4825			   adapter->max_tx_queues);
4826		break;
4827	case MAX_RX_QUEUES:
4828		adapter->max_rx_queues =
4829		    be64_to_cpu(crq->query_capability.number);
4830		netdev_dbg(netdev, "max_rx_queues = %lld\n",
4831			   adapter->max_rx_queues);
4832		break;
4833	case MAX_RX_ADD_QUEUES:
4834		adapter->max_rx_add_queues =
4835		    be64_to_cpu(crq->query_capability.number);
4836		netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4837			   adapter->max_rx_add_queues);
4838		break;
4839	case MIN_TX_ENTRIES_PER_SUBCRQ:
4840		adapter->min_tx_entries_per_subcrq =
4841		    be64_to_cpu(crq->query_capability.number);
4842		netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4843			   adapter->min_tx_entries_per_subcrq);
4844		break;
4845	case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4846		adapter->min_rx_add_entries_per_subcrq =
4847		    be64_to_cpu(crq->query_capability.number);
4848		netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4849			   adapter->min_rx_add_entries_per_subcrq);
4850		break;
4851	case MAX_TX_ENTRIES_PER_SUBCRQ:
4852		adapter->max_tx_entries_per_subcrq =
4853		    be64_to_cpu(crq->query_capability.number);
4854		netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4855			   adapter->max_tx_entries_per_subcrq);
4856		break;
4857	case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4858		adapter->max_rx_add_entries_per_subcrq =
4859		    be64_to_cpu(crq->query_capability.number);
4860		netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4861			   adapter->max_rx_add_entries_per_subcrq);
4862		break;
4863	case TCP_IP_OFFLOAD:
4864		adapter->tcp_ip_offload =
4865		    be64_to_cpu(crq->query_capability.number);
4866		netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4867			   adapter->tcp_ip_offload);
4868		break;
4869	case PROMISC_SUPPORTED:
4870		adapter->promisc_supported =
4871		    be64_to_cpu(crq->query_capability.number);
4872		netdev_dbg(netdev, "promisc_supported = %lld\n",
4873			   adapter->promisc_supported);
4874		break;
4875	case MIN_MTU:
4876		adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4877		netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4878		netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4879		break;
4880	case MAX_MTU:
4881		adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4882		netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4883		netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4884		break;
4885	case MAX_MULTICAST_FILTERS:
4886		adapter->max_multicast_filters =
4887		    be64_to_cpu(crq->query_capability.number);
4888		netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4889			   adapter->max_multicast_filters);
4890		break;
4891	case VLAN_HEADER_INSERTION:
4892		adapter->vlan_header_insertion =
4893		    be64_to_cpu(crq->query_capability.number);
4894		if (adapter->vlan_header_insertion)
4895			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4896		netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4897			   adapter->vlan_header_insertion);
4898		break;
4899	case RX_VLAN_HEADER_INSERTION:
4900		adapter->rx_vlan_header_insertion =
4901		    be64_to_cpu(crq->query_capability.number);
4902		netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4903			   adapter->rx_vlan_header_insertion);
4904		break;
4905	case MAX_TX_SG_ENTRIES:
4906		adapter->max_tx_sg_entries =
4907		    be64_to_cpu(crq->query_capability.number);
4908		netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4909			   adapter->max_tx_sg_entries);
4910		break;
4911	case RX_SG_SUPPORTED:
4912		adapter->rx_sg_supported =
4913		    be64_to_cpu(crq->query_capability.number);
4914		netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4915			   adapter->rx_sg_supported);
4916		break;
4917	case OPT_TX_COMP_SUB_QUEUES:
4918		adapter->opt_tx_comp_sub_queues =
4919		    be64_to_cpu(crq->query_capability.number);
4920		netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4921			   adapter->opt_tx_comp_sub_queues);
4922		break;
4923	case OPT_RX_COMP_QUEUES:
4924		adapter->opt_rx_comp_queues =
4925		    be64_to_cpu(crq->query_capability.number);
4926		netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4927			   adapter->opt_rx_comp_queues);
4928		break;
4929	case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4930		adapter->opt_rx_bufadd_q_per_rx_comp_q =
4931		    be64_to_cpu(crq->query_capability.number);
4932		netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4933			   adapter->opt_rx_bufadd_q_per_rx_comp_q);
4934		break;
4935	case OPT_TX_ENTRIES_PER_SUBCRQ:
4936		adapter->opt_tx_entries_per_subcrq =
4937		    be64_to_cpu(crq->query_capability.number);
4938		netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4939			   adapter->opt_tx_entries_per_subcrq);
4940		break;
4941	case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4942		adapter->opt_rxba_entries_per_subcrq =
4943		    be64_to_cpu(crq->query_capability.number);
4944		netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4945			   adapter->opt_rxba_entries_per_subcrq);
4946		break;
4947	case TX_RX_DESC_REQ:
4948		adapter->tx_rx_desc_req = crq->query_capability.number;
4949		netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4950			   adapter->tx_rx_desc_req);
4951		break;
4952
4953	default:
4954		netdev_err(netdev, "Got invalid cap rsp %d\n",
4955			   crq->query_capability.capability);
4956	}
4957
4958out:
4959	if (atomic_read(&adapter->running_cap_crqs) == 0) {
4960		adapter->wait_capability = false;
4961		send_request_cap(adapter, 0);
4962	}
4963}
4964
4965static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4966{
4967	union ibmvnic_crq crq;
4968	int rc;
4969
4970	memset(&crq, 0, sizeof(crq));
4971	crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4972	crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4973
4974	mutex_lock(&adapter->fw_lock);
4975	adapter->fw_done_rc = 0;
4976	reinit_completion(&adapter->fw_done);
4977
4978	rc = ibmvnic_send_crq(adapter, &crq);
4979	if (rc) {
4980		mutex_unlock(&adapter->fw_lock);
4981		return rc;
4982	}
4983
4984	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
4985	if (rc) {
4986		mutex_unlock(&adapter->fw_lock);
4987		return rc;
4988	}
4989
4990	mutex_unlock(&adapter->fw_lock);
4991	return adapter->fw_done_rc ? -EIO : 0;
4992}
4993
4994static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4995				       struct ibmvnic_adapter *adapter)
4996{
4997	struct net_device *netdev = adapter->netdev;
4998	int rc;
4999	__be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
5000
5001	rc = crq->query_phys_parms_rsp.rc.code;
5002	if (rc) {
5003		netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5004		return rc;
5005	}
5006	switch (rspeed) {
5007	case IBMVNIC_10MBPS:
5008		adapter->speed = SPEED_10;
5009		break;
5010	case IBMVNIC_100MBPS:
5011		adapter->speed = SPEED_100;
5012		break;
5013	case IBMVNIC_1GBPS:
5014		adapter->speed = SPEED_1000;
5015		break;
5016	case IBMVNIC_10GBPS:
5017		adapter->speed = SPEED_10000;
5018		break;
5019	case IBMVNIC_25GBPS:
5020		adapter->speed = SPEED_25000;
5021		break;
5022	case IBMVNIC_40GBPS:
5023		adapter->speed = SPEED_40000;
5024		break;
5025	case IBMVNIC_50GBPS:
5026		adapter->speed = SPEED_50000;
5027		break;
5028	case IBMVNIC_100GBPS:
5029		adapter->speed = SPEED_100000;
5030		break;
5031	case IBMVNIC_200GBPS:
5032		adapter->speed = SPEED_200000;
5033		break;
5034	default:
5035		if (netif_carrier_ok(netdev))
5036			netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
5037		adapter->speed = SPEED_UNKNOWN;
5038	}
5039	if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5040		adapter->duplex = DUPLEX_FULL;
5041	else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5042		adapter->duplex = DUPLEX_HALF;
5043	else
5044		adapter->duplex = DUPLEX_UNKNOWN;
5045
5046	return rc;
5047}
5048
5049static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5050			       struct ibmvnic_adapter *adapter)
5051{
5052	struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5053	struct net_device *netdev = adapter->netdev;
5054	struct device *dev = &adapter->vdev->dev;
5055	u64 *u64_crq = (u64 *)crq;
5056	long rc;
5057
5058	netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
5059		   (unsigned long)cpu_to_be64(u64_crq[0]),
5060		   (unsigned long)cpu_to_be64(u64_crq[1]));
5061	switch (gen_crq->first) {
5062	case IBMVNIC_CRQ_INIT_RSP:
5063		switch (gen_crq->cmd) {
5064		case IBMVNIC_CRQ_INIT:
5065			dev_info(dev, "Partner initialized\n");
5066			adapter->from_passive_init = true;
5067			/* Discard any stale login responses from prev reset.
5068			 * CHECK: should we clear even on INIT_COMPLETE?
5069			 */
5070			adapter->login_pending = false;
5071
5072			if (!completion_done(&adapter->init_done)) {
5073				complete(&adapter->init_done);
5074				adapter->init_done_rc = -EIO;
5075			}
5076
5077			if (adapter->state == VNIC_DOWN)
5078				rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5079			else
5080				rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5081
5082			if (rc && rc != -EBUSY) {
5083				/* We were unable to schedule the failover
5084				 * reset either because the adapter was still
5085				 * probing (eg: during kexec) or we could not
5086				 * allocate memory. Clear the failover_pending
5087				 * flag since no one else will. We ignore
5088				 * EBUSY because it means either FAILOVER reset
5089				 * is already scheduled or the adapter is
5090				 * being removed.
5091				 */
5092				netdev_err(netdev,
5093					   "Error %ld scheduling failover reset\n",
5094					   rc);
5095				adapter->failover_pending = false;
5096			}
5097			break;
5098		case IBMVNIC_CRQ_INIT_COMPLETE:
5099			dev_info(dev, "Partner initialization complete\n");
5100			adapter->crq.active = true;
5101			send_version_xchg(adapter);
5102			break;
5103		default:
5104			dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5105		}
5106		return;
5107	case IBMVNIC_CRQ_XPORT_EVENT:
5108		netif_carrier_off(netdev);
5109		adapter->crq.active = false;
5110		/* terminate any thread waiting for a response
5111		 * from the device
5112		 */
5113		if (!completion_done(&adapter->fw_done)) {
5114			adapter->fw_done_rc = -EIO;
5115			complete(&adapter->fw_done);
5116		}
5117		if (!completion_done(&adapter->stats_done))
5118			complete(&adapter->stats_done);
5119		if (test_bit(0, &adapter->resetting))
5120			adapter->force_reset_recovery = true;
5121		if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
5122			dev_info(dev, "Migrated, re-enabling adapter\n");
5123			ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
5124		} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
5125			dev_info(dev, "Backing device failover detected\n");
5126			adapter->failover_pending = true;
5127		} else {
5128			/* The adapter lost the connection */
5129			dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
5130				gen_crq->cmd);
5131			ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5132		}
5133		return;
5134	case IBMVNIC_CRQ_CMD_RSP:
5135		break;
5136	default:
5137		dev_err(dev, "Got an invalid msg type 0x%02x\n",
5138			gen_crq->first);
5139		return;
5140	}
5141
5142	switch (gen_crq->cmd) {
5143	case VERSION_EXCHANGE_RSP:
5144		rc = crq->version_exchange_rsp.rc.code;
5145		if (rc) {
5146			dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
5147			break;
5148		}
5149		ibmvnic_version =
5150			    be16_to_cpu(crq->version_exchange_rsp.version);
5151		dev_info(dev, "Partner protocol version is %d\n",
5152			 ibmvnic_version);
5153		send_query_cap(adapter);
5154		break;
5155	case QUERY_CAPABILITY_RSP:
5156		handle_query_cap_rsp(crq, adapter);
5157		break;
5158	case QUERY_MAP_RSP:
5159		handle_query_map_rsp(crq, adapter);
5160		break;
5161	case REQUEST_MAP_RSP:
5162		adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5163		complete(&adapter->fw_done);
5164		break;
5165	case REQUEST_UNMAP_RSP:
5166		handle_request_unmap_rsp(crq, adapter);
5167		break;
5168	case REQUEST_CAPABILITY_RSP:
5169		handle_request_cap_rsp(crq, adapter);
5170		break;
5171	case LOGIN_RSP:
5172		netdev_dbg(netdev, "Got Login Response\n");
5173		handle_login_rsp(crq, adapter);
5174		break;
5175	case LOGICAL_LINK_STATE_RSP:
5176		netdev_dbg(netdev,
5177			   "Got Logical Link State Response, state: %d rc: %d\n",
5178			   crq->logical_link_state_rsp.link_state,
5179			   crq->logical_link_state_rsp.rc.code);
5180		adapter->logical_link_state =
5181		    crq->logical_link_state_rsp.link_state;
5182		adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5183		complete(&adapter->init_done);
5184		break;
5185	case LINK_STATE_INDICATION:
5186		netdev_dbg(netdev, "Got Logical Link State Indication\n");
5187		adapter->phys_link_state =
5188		    crq->link_state_indication.phys_link_state;
5189		adapter->logical_link_state =
5190		    crq->link_state_indication.logical_link_state;
5191		if (adapter->phys_link_state && adapter->logical_link_state)
5192			netif_carrier_on(netdev);
5193		else
5194			netif_carrier_off(netdev);
5195		break;
5196	case CHANGE_MAC_ADDR_RSP:
5197		netdev_dbg(netdev, "Got MAC address change Response\n");
5198		adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
5199		break;
5200	case ERROR_INDICATION:
5201		netdev_dbg(netdev, "Got Error Indication\n");
5202		handle_error_indication(crq, adapter);
5203		break;
5204	case REQUEST_STATISTICS_RSP:
5205		netdev_dbg(netdev, "Got Statistics Response\n");
5206		complete(&adapter->stats_done);
5207		break;
5208	case QUERY_IP_OFFLOAD_RSP:
5209		netdev_dbg(netdev, "Got Query IP offload Response\n");
5210		handle_query_ip_offload_rsp(adapter);
5211		break;
5212	case MULTICAST_CTRL_RSP:
5213		netdev_dbg(netdev, "Got multicast control Response\n");
5214		break;
5215	case CONTROL_IP_OFFLOAD_RSP:
5216		netdev_dbg(netdev, "Got Control IP offload Response\n");
5217		dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5218				 sizeof(adapter->ip_offload_ctrl),
5219				 DMA_TO_DEVICE);
5220		complete(&adapter->init_done);
5221		break;
5222	case COLLECT_FW_TRACE_RSP:
5223		netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5224		complete(&adapter->fw_done);
5225		break;
5226	case GET_VPD_SIZE_RSP:
5227		handle_vpd_size_rsp(crq, adapter);
5228		break;
5229	case GET_VPD_RSP:
5230		handle_vpd_rsp(crq, adapter);
5231		break;
5232	case QUERY_PHYS_PARMS_RSP:
5233		adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5234		complete(&adapter->fw_done);
5235		break;
5236	default:
5237		netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5238			   gen_crq->cmd);
5239	}
5240}
5241
5242static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5243{
5244	struct ibmvnic_adapter *adapter = instance;
5245
5246	tasklet_schedule(&adapter->tasklet);
5247	return IRQ_HANDLED;
5248}
5249
5250static void ibmvnic_tasklet(struct tasklet_struct *t)
5251{
5252	struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
5253	struct ibmvnic_crq_queue *queue = &adapter->crq;
5254	union ibmvnic_crq *crq;
5255	unsigned long flags;
5256	bool done = false;
5257
5258	spin_lock_irqsave(&queue->lock, flags);
5259	while (!done) {
5260		/* Pull all the valid messages off the CRQ */
5261		while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
5262			/* This barrier makes sure ibmvnic_next_crq()'s
5263			 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
5264			 * before ibmvnic_handle_crq()'s
5265			 * switch(gen_crq->first) and switch(gen_crq->cmd).
5266			 */
5267			dma_rmb();
5268			ibmvnic_handle_crq(crq, adapter);
5269			crq->generic.first = 0;
5270		}
5271
5272		/* remain in tasklet until all
5273		 * capabilities responses are received
5274		 */
5275		if (!adapter->wait_capability)
5276			done = true;
5277	}
5278	/* if capabilities CRQ's were sent in this tasklet, the following
5279	 * tasklet must wait until all responses are received
5280	 */
5281	if (atomic_read(&adapter->running_cap_crqs) != 0)
5282		adapter->wait_capability = true;
5283	spin_unlock_irqrestore(&queue->lock, flags);
5284}
5285
5286static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5287{
5288	struct vio_dev *vdev = adapter->vdev;
5289	int rc;
5290
5291	do {
5292		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5293	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5294
5295	if (rc)
5296		dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5297
5298	return rc;
5299}
5300
5301static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5302{
5303	struct ibmvnic_crq_queue *crq = &adapter->crq;
5304	struct device *dev = &adapter->vdev->dev;
5305	struct vio_dev *vdev = adapter->vdev;
5306	int rc;
5307
5308	/* Close the CRQ */
5309	do {
5310		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5311	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5312
5313	/* Clean out the queue */
5314	if (!crq->msgs)
5315		return -EINVAL;
5316
5317	memset(crq->msgs, 0, PAGE_SIZE);
5318	crq->cur = 0;
5319	crq->active = false;
5320
5321	/* And re-open it again */
5322	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5323				crq->msg_token, PAGE_SIZE);
5324
5325	if (rc == H_CLOSED)
5326		/* Adapter is good, but other end is not ready */
5327		dev_warn(dev, "Partner adapter not ready\n");
5328	else if (rc != 0)
5329		dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5330
5331	return rc;
5332}
5333
5334static void release_crq_queue(struct ibmvnic_adapter *adapter)
5335{
5336	struct ibmvnic_crq_queue *crq = &adapter->crq;
5337	struct vio_dev *vdev = adapter->vdev;
5338	long rc;
5339
5340	if (!crq->msgs)
5341		return;
5342
5343	netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5344	free_irq(vdev->irq, adapter);
5345	tasklet_kill(&adapter->tasklet);
5346	do {
5347		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5348	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5349
5350	dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5351			 DMA_BIDIRECTIONAL);
5352	free_page((unsigned long)crq->msgs);
5353	crq->msgs = NULL;
5354	crq->active = false;
5355}
5356
5357static int init_crq_queue(struct ibmvnic_adapter *adapter)
5358{
5359	struct ibmvnic_crq_queue *crq = &adapter->crq;
5360	struct device *dev = &adapter->vdev->dev;
5361	struct vio_dev *vdev = adapter->vdev;
5362	int rc, retrc = -ENOMEM;
5363
5364	if (crq->msgs)
5365		return 0;
5366
5367	crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5368	/* Should we allocate more than one page? */
5369
5370	if (!crq->msgs)
5371		return -ENOMEM;
5372
5373	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5374	crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5375					DMA_BIDIRECTIONAL);
5376	if (dma_mapping_error(dev, crq->msg_token))
5377		goto map_failed;
5378
5379	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5380				crq->msg_token, PAGE_SIZE);
5381
5382	if (rc == H_RESOURCE)
5383		/* maybe kexecing and resource is busy. try a reset */
5384		rc = ibmvnic_reset_crq(adapter);
5385	retrc = rc;
5386
5387	if (rc == H_CLOSED) {
5388		dev_warn(dev, "Partner adapter not ready\n");
5389	} else if (rc) {
5390		dev_warn(dev, "Error %d opening adapter\n", rc);
5391		goto reg_crq_failed;
5392	}
5393
5394	retrc = 0;
5395
5396	tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5397
5398	netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5399	snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5400		 adapter->vdev->unit_address);
5401	rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5402	if (rc) {
5403		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5404			vdev->irq, rc);
5405		goto req_irq_failed;
5406	}
5407
5408	rc = vio_enable_interrupts(vdev);
5409	if (rc) {
5410		dev_err(dev, "Error %d enabling interrupts\n", rc);
5411		goto req_irq_failed;
5412	}
5413
5414	crq->cur = 0;
5415	spin_lock_init(&crq->lock);
5416
5417	return retrc;
5418
5419req_irq_failed:
5420	tasklet_kill(&adapter->tasklet);
5421	do {
5422		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5423	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5424reg_crq_failed:
5425	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5426map_failed:
5427	free_page((unsigned long)crq->msgs);
5428	crq->msgs = NULL;
5429	return retrc;
5430}
5431
5432static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5433{
5434	struct device *dev = &adapter->vdev->dev;
5435	unsigned long timeout = msecs_to_jiffies(20000);
5436	u64 old_num_rx_queues = adapter->req_rx_queues;
5437	u64 old_num_tx_queues = adapter->req_tx_queues;
5438	int rc;
5439
5440	adapter->from_passive_init = false;
5441
5442	if (reset)
5443		reinit_completion(&adapter->init_done);
5444
5445	adapter->init_done_rc = 0;
5446	rc = ibmvnic_send_crq_init(adapter);
5447	if (rc) {
5448		dev_err(dev, "Send crq init failed with error %d\n", rc);
5449		return rc;
5450	}
5451
5452	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5453		dev_err(dev, "Initialization sequence timed out\n");
5454		return -1;
5455	}
5456
5457	if (adapter->init_done_rc) {
5458		release_crq_queue(adapter);
5459		return adapter->init_done_rc;
5460	}
5461
5462	if (adapter->from_passive_init) {
5463		adapter->state = VNIC_OPEN;
5464		adapter->from_passive_init = false;
5465		return -1;
5466	}
5467
5468	if (reset &&
5469	    test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5470	    adapter->reset_reason != VNIC_RESET_MOBILITY) {
5471		if (adapter->req_rx_queues != old_num_rx_queues ||
5472		    adapter->req_tx_queues != old_num_tx_queues) {
5473			release_sub_crqs(adapter, 0);
5474			rc = init_sub_crqs(adapter);
5475		} else {
5476			rc = reset_sub_crq_queues(adapter);
5477		}
5478	} else {
5479		rc = init_sub_crqs(adapter);
5480	}
5481
5482	if (rc) {
5483		dev_err(dev, "Initialization of sub crqs failed\n");
5484		release_crq_queue(adapter);
5485		return rc;
5486	}
5487
5488	rc = init_sub_crq_irqs(adapter);
5489	if (rc) {
5490		dev_err(dev, "Failed to initialize sub crq irqs\n");
5491		release_crq_queue(adapter);
5492	}
5493
5494	return rc;
5495}
5496
5497static struct device_attribute dev_attr_failover;
5498
5499static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5500{
5501	struct ibmvnic_adapter *adapter;
5502	struct net_device *netdev;
5503	unsigned char *mac_addr_p;
5504	bool init_success;
5505	int rc;
5506
5507	dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5508		dev->unit_address);
5509
5510	mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5511							VETH_MAC_ADDR, NULL);
5512	if (!mac_addr_p) {
5513		dev_err(&dev->dev,
5514			"(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5515			__FILE__, __LINE__);
5516		return 0;
5517	}
5518
5519	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
5520				   IBMVNIC_MAX_QUEUES);
5521	if (!netdev)
5522		return -ENOMEM;
5523
5524	adapter = netdev_priv(netdev);
5525	adapter->state = VNIC_PROBING;
5526	dev_set_drvdata(&dev->dev, netdev);
5527	adapter->vdev = dev;
5528	adapter->netdev = netdev;
5529	adapter->login_pending = false;
5530
5531	ether_addr_copy(adapter->mac_addr, mac_addr_p);
5532	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5533	netdev->irq = dev->irq;
5534	netdev->netdev_ops = &ibmvnic_netdev_ops;
5535	netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5536	SET_NETDEV_DEV(netdev, &dev->dev);
5537
5538	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5539	INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5540			  __ibmvnic_delayed_reset);
5541	INIT_LIST_HEAD(&adapter->rwi_list);
5542	spin_lock_init(&adapter->rwi_lock);
5543	spin_lock_init(&adapter->state_lock);
5544	mutex_init(&adapter->fw_lock);
5545	init_completion(&adapter->init_done);
5546	init_completion(&adapter->fw_done);
5547	init_completion(&adapter->reset_done);
5548	init_completion(&adapter->stats_done);
5549	clear_bit(0, &adapter->resetting);
5550
5551	init_success = false;
5552	do {
5553		rc = init_crq_queue(adapter);
5554		if (rc) {
5555			dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5556				rc);
5557			goto ibmvnic_init_fail;
5558		}
5559
5560		rc = ibmvnic_reset_init(adapter, false);
5561	} while (rc == EAGAIN);
5562
5563	/* We are ignoring the error from ibmvnic_reset_init() assuming that the
5564	 * partner is not ready. CRQ is not active. When the partner becomes
5565	 * ready, we will do the passive init reset.
5566	 */
5567
5568	if (!rc)
5569		init_success = true;
5570
5571	rc = init_stats_buffers(adapter);
5572	if (rc)
5573		goto ibmvnic_init_fail;
5574
5575	rc = init_stats_token(adapter);
5576	if (rc)
5577		goto ibmvnic_stats_fail;
5578
5579	rc = device_create_file(&dev->dev, &dev_attr_failover);
5580	if (rc)
5581		goto ibmvnic_dev_file_err;
5582
5583	netif_carrier_off(netdev);
5584	rc = register_netdev(netdev);
5585	if (rc) {
5586		dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5587		goto ibmvnic_register_fail;
5588	}
5589	dev_info(&dev->dev, "ibmvnic registered\n");
5590
5591	if (init_success) {
5592		adapter->state = VNIC_PROBED;
5593		netdev->mtu = adapter->req_mtu - ETH_HLEN;
5594		netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5595		netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5596	} else {
5597		adapter->state = VNIC_DOWN;
5598	}
5599
5600	adapter->wait_for_reset = false;
5601	adapter->last_reset_time = jiffies;
5602	return 0;
5603
5604ibmvnic_register_fail:
5605	device_remove_file(&dev->dev, &dev_attr_failover);
5606
5607ibmvnic_dev_file_err:
5608	release_stats_token(adapter);
5609
5610ibmvnic_stats_fail:
5611	release_stats_buffers(adapter);
5612
5613ibmvnic_init_fail:
5614	release_sub_crqs(adapter, 1);
5615	release_crq_queue(adapter);
5616	mutex_destroy(&adapter->fw_lock);
5617	free_netdev(netdev);
5618
5619	return rc;
5620}
5621
5622static void ibmvnic_remove(struct vio_dev *dev)
5623{
5624	struct net_device *netdev = dev_get_drvdata(&dev->dev);
5625	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5626	unsigned long flags;
5627
5628	spin_lock_irqsave(&adapter->state_lock, flags);
5629
5630	/* If ibmvnic_reset() is scheduling a reset, wait for it to
5631	 * finish. Then, set the state to REMOVING to prevent it from
5632	 * scheduling any more work and to have reset functions ignore
5633	 * any resets that have already been scheduled. Drop the lock
5634	 * after setting state, so __ibmvnic_reset() which is called
5635	 * from the flush_work() below, can make progress.
5636	 */
5637	spin_lock(&adapter->rwi_lock);
5638	adapter->state = VNIC_REMOVING;
5639	spin_unlock(&adapter->rwi_lock);
5640
5641	spin_unlock_irqrestore(&adapter->state_lock, flags);
5642
5643	flush_work(&adapter->ibmvnic_reset);
5644	flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5645
5646	rtnl_lock();
5647	unregister_netdevice(netdev);
5648
5649	release_resources(adapter);
5650	release_sub_crqs(adapter, 1);
5651	release_crq_queue(adapter);
5652
5653	release_stats_token(adapter);
5654	release_stats_buffers(adapter);
5655
5656	adapter->state = VNIC_REMOVED;
5657
5658	rtnl_unlock();
5659	mutex_destroy(&adapter->fw_lock);
5660	device_remove_file(&dev->dev, &dev_attr_failover);
5661	free_netdev(netdev);
5662	dev_set_drvdata(&dev->dev, NULL);
5663}
5664
5665static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5666			      const char *buf, size_t count)
5667{
5668	struct net_device *netdev = dev_get_drvdata(dev);
5669	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5670	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5671	__be64 session_token;
5672	long rc;
5673
5674	if (!sysfs_streq(buf, "1"))
5675		return -EINVAL;
5676
5677	rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5678			 H_GET_SESSION_TOKEN, 0, 0, 0);
5679	if (rc) {
5680		netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5681			   rc);
5682		goto last_resort;
5683	}
5684
5685	session_token = (__be64)retbuf[0];
5686	netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5687		   be64_to_cpu(session_token));
5688	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5689				H_SESSION_ERR_DETECTED, session_token, 0, 0);
5690	if (rc)
5691		netdev_err(netdev,
5692			   "H_VIOCTL initiated failover failed, rc %ld\n",
5693			   rc);
5694
5695last_resort:
5696	netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
5697	ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5698
5699	return count;
5700}
5701static DEVICE_ATTR_WO(failover);
5702
5703static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5704{
5705	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5706	struct ibmvnic_adapter *adapter;
5707	struct iommu_table *tbl;
5708	unsigned long ret = 0;
5709	int i;
5710
5711	tbl = get_iommu_table_base(&vdev->dev);
5712
5713	/* netdev inits at probe time along with the structures we need below*/
5714	if (!netdev)
5715		return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5716
5717	adapter = netdev_priv(netdev);
5718
5719	ret += PAGE_SIZE; /* the crq message queue */
5720	ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5721
5722	for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5723		ret += 4 * PAGE_SIZE; /* the scrq message queue */
5724
5725	for (i = 0; i < adapter->num_active_rx_pools; i++)
5726		ret += adapter->rx_pool[i].size *
5727		    IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5728
5729	return ret;
5730}
5731
5732static int ibmvnic_resume(struct device *dev)
5733{
5734	struct net_device *netdev = dev_get_drvdata(dev);
5735	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5736
5737	if (adapter->state != VNIC_OPEN)
5738		return 0;
5739
5740	tasklet_schedule(&adapter->tasklet);
5741
5742	return 0;
5743}
5744
5745static const struct vio_device_id ibmvnic_device_table[] = {
5746	{"network", "IBM,vnic"},
5747	{"", "" }
5748};
5749MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5750
5751static const struct dev_pm_ops ibmvnic_pm_ops = {
5752	.resume = ibmvnic_resume
5753};
5754
5755static struct vio_driver ibmvnic_driver = {
5756	.id_table       = ibmvnic_device_table,
5757	.probe          = ibmvnic_probe,
5758	.remove         = ibmvnic_remove,
5759	.get_desired_dma = ibmvnic_get_desired_dma,
5760	.name		= ibmvnic_driver_name,
5761	.pm		= &ibmvnic_pm_ops,
5762};
5763
5764/* module functions */
5765static int __init ibmvnic_module_init(void)
5766{
5767	pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5768		IBMVNIC_DRIVER_VERSION);
5769
5770	return vio_register_driver(&ibmvnic_driver);
5771}
5772
5773static void __exit ibmvnic_module_exit(void)
5774{
5775	vio_unregister_driver(&ibmvnic_driver);
5776}
5777
5778module_init(ibmvnic_module_init);
5779module_exit(ibmvnic_module_exit);